@@ -158,6 +158,10 @@ async fn run_daemon(config_path: &std::path::Path) -> anyhow::Result<()> { |
| 158 | 158 | // Track daemon start time for uptime reporting |
| 159 | 159 | let daemon_start_time = std::time::Instant::now(); |
| 160 | 160 | |
| 161 | + // State flags for CLI control |
| 162 | + let barrier_enabled = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); |
| 163 | + let shutdown_requested = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); |
| 164 | + |
| 161 | 165 | // Connect to Hyprland |
| 162 | 166 | info!("Connecting to Hyprland..."); |
| 163 | 167 | let hypr_client = hyprland::ipc::HyprlandClient::new().await?; |
@@ -1469,14 +1473,289 @@ async fn run_daemon(config_path: &std::path::Path) -> anyhow::Result<()> { |
| 1469 | 1473 | } |
| 1470 | 1474 | } |
| 1471 | 1475 | } |
| 1476 | + |
| 1477 | + // ================================================================ |
| 1478 | + // CLI Expansion: Control Transfer |
| 1479 | + // ================================================================ |
| 1480 | + |
| 1481 | + IpcRequest::Switch { target } => { |
| 1482 | + use hyprkvm_common::protocol::SwitchTarget; |
| 1483 | + |
| 1484 | + // Resolve target to a direction |
| 1485 | + let direction = match &target { |
| 1486 | + SwitchTarget::Direction(dir) => Some(*dir), |
| 1487 | + SwitchTarget::MachineName(name) => { |
| 1488 | + config.machines.neighbors |
| 1489 | + .iter() |
| 1490 | + .find(|n| &n.name == name) |
| 1491 | + .map(|n| n.direction) |
| 1492 | + } |
| 1493 | + }; |
| 1494 | + |
| 1495 | + match direction { |
| 1496 | + Some(dir) => { |
| 1497 | + let peers_guard = peers.read().await; |
| 1498 | + if peers_guard.get(&dir).is_some() { |
| 1499 | + drop(peers_guard); |
| 1500 | + |
| 1501 | + // Get cursor position and screen size from Hyprland |
| 1502 | + let (cursor_pos, screen_width, screen_height) = match hypr_client.monitors().await { |
| 1503 | + Ok(monitors) => { |
| 1504 | + if let Some(focused) = monitors.iter().find(|m| m.focused) { |
| 1505 | + // Use center of screen as cursor position for switch |
| 1506 | + let cx = focused.x + focused.width as i32 / 2; |
| 1507 | + let cy = focused.y + focused.height as i32 / 2; |
| 1508 | + ((cx, cy), focused.width, focused.height) |
| 1509 | + } else { |
| 1510 | + ((0, 0), 1920, 1080) // Fallback |
| 1511 | + } |
| 1512 | + } |
| 1513 | + Err(_) => ((0, 0), 1920, 1080), // Fallback |
| 1514 | + }; |
| 1515 | + |
| 1516 | + // Initiate transfer |
| 1517 | + match transfer_manager.initiate_transfer(dir, cursor_pos, screen_height, screen_width).await { |
| 1518 | + Ok(()) => { |
| 1519 | + let machine_name = config.machines.neighbors |
| 1520 | + .iter() |
| 1521 | + .find(|n| n.direction == dir) |
| 1522 | + .map(|n| n.name.clone()) |
| 1523 | + .unwrap_or_else(|| format!("{:?}", dir)); |
| 1524 | + IpcResponse::Transferred { to_machine: machine_name } |
| 1525 | + } |
| 1526 | + Err(e) => IpcResponse::Error { |
| 1527 | + message: format!("Transfer failed: {}", e), |
| 1528 | + } |
| 1529 | + } |
| 1530 | + } else { |
| 1531 | + IpcResponse::Error { |
| 1532 | + message: format!("Peer not connected in direction {:?}", dir), |
| 1533 | + } |
| 1534 | + } |
| 1535 | + } |
| 1536 | + None => { |
| 1537 | + let name = match target { |
| 1538 | + SwitchTarget::MachineName(n) => n, |
| 1539 | + _ => "unknown".to_string(), |
| 1540 | + }; |
| 1541 | + IpcResponse::Error { |
| 1542 | + message: format!("Unknown machine: {}", name), |
| 1543 | + } |
| 1544 | + } |
| 1545 | + } |
| 1546 | + } |
| 1547 | + |
| 1548 | + IpcRequest::Return => { |
| 1549 | + match transfer_manager.return_control().await { |
| 1550 | + Ok(()) => IpcResponse::Ok { |
| 1551 | + message: "Control returned".to_string(), |
| 1552 | + }, |
| 1553 | + Err(e) => IpcResponse::Error { |
| 1554 | + message: format!("Return failed: {}", e), |
| 1555 | + } |
| 1556 | + } |
| 1557 | + } |
| 1558 | + |
| 1559 | + // ================================================================ |
| 1560 | + // CLI Expansion: Input Management |
| 1561 | + // ================================================================ |
| 1562 | + |
| 1563 | + IpcRequest::Release => { |
| 1564 | + // Stop input grabbing |
| 1565 | + input_grabber.stop(None); |
| 1566 | + // Abort any pending transfer |
| 1567 | + transfer_manager.abort().await; |
| 1568 | + IpcResponse::Ok { |
| 1569 | + message: "Input released".to_string(), |
| 1570 | + } |
| 1571 | + } |
| 1572 | + |
| 1573 | + IpcRequest::SetBarrier { enabled } => { |
| 1574 | + barrier_enabled.store(enabled, std::sync::atomic::Ordering::SeqCst); |
| 1575 | + let status = if enabled { "enabled" } else { "disabled" }; |
| 1576 | + IpcResponse::Ok { |
| 1577 | + message: format!("Barrier {}", status), |
| 1578 | + } |
| 1579 | + } |
| 1580 | + |
| 1581 | + // ================================================================ |
| 1582 | + // CLI Expansion: Connection Management |
| 1583 | + // ================================================================ |
| 1584 | + |
| 1585 | + IpcRequest::Disconnect { peer_name } => { |
| 1586 | + let neighbor = config.machines.neighbors |
| 1587 | + .iter() |
| 1588 | + .find(|n| n.name == peer_name); |
| 1589 | + |
| 1590 | + match neighbor { |
| 1591 | + Some(n) => { |
| 1592 | + let direction = n.direction; |
| 1593 | + let mut peers_guard = peers.write().await; |
| 1594 | + if let Some(mut peer_conn) = peers_guard.remove(&direction) { |
| 1595 | + // Send goodbye before disconnecting |
| 1596 | + let _ = peer_conn.send(&Message::Goodbye).await; |
| 1597 | + IpcResponse::Ok { |
| 1598 | + message: format!("Disconnected from {}", peer_name), |
| 1599 | + } |
| 1600 | + } else { |
| 1601 | + IpcResponse::Error { |
| 1602 | + message: format!("Peer {} not connected", peer_name), |
| 1603 | + } |
| 1604 | + } |
| 1605 | + } |
| 1606 | + None => IpcResponse::Error { |
| 1607 | + message: format!("Unknown peer: {}", peer_name), |
| 1608 | + } |
| 1609 | + } |
| 1610 | + } |
| 1611 | + |
| 1612 | + IpcRequest::Reconnect { peer_name } => { |
| 1613 | + let neighbor = config.machines.neighbors |
| 1614 | + .iter() |
| 1615 | + .find(|n| n.name == peer_name) |
| 1616 | + .cloned(); |
| 1617 | + |
| 1618 | + match neighbor { |
| 1619 | + Some(n) => { |
| 1620 | + let direction = n.direction; |
| 1621 | + let addr = n.address; |
| 1622 | + // Remove existing connection if any |
| 1623 | + { |
| 1624 | + let mut peers_guard = peers.write().await; |
| 1625 | + if let Some(mut peer_conn) = peers_guard.remove(&direction) { |
| 1626 | + let _ = peer_conn.send(&Message::Goodbye).await; |
| 1627 | + } |
| 1628 | + } |
| 1629 | + // Spawn reconnection task (same logic as initial connection) |
| 1630 | + let peers_clone = peers.clone(); |
| 1631 | + let machine_name = config.machines.self_name.clone(); |
| 1632 | + let neighbor_name = n.name.clone(); |
| 1633 | + tokio::spawn(async move { |
| 1634 | + match network::connect(addr).await { |
| 1635 | + Ok(mut conn) => { |
| 1636 | + // Send Hello |
| 1637 | + let hello = Message::Hello(HelloPayload { |
| 1638 | + protocol_version: PROTOCOL_VERSION, |
| 1639 | + machine_name, |
| 1640 | + capabilities: vec![], |
| 1641 | + }); |
| 1642 | + if let Err(e) = conn.send(&hello).await { |
| 1643 | + tracing::error!("Reconnect: failed to send Hello: {}", e); |
| 1644 | + return; |
| 1645 | + } |
| 1646 | + // Wait for HelloAck |
| 1647 | + match conn.recv().await { |
| 1648 | + Ok(Some(Message::HelloAck(ack))) if ack.accepted => { |
| 1649 | + let mut peers_guard = peers_clone.write().await; |
| 1650 | + peers_guard.insert(direction, conn); |
| 1651 | + info!("Reconnected to {}", neighbor_name); |
| 1652 | + } |
| 1653 | + Ok(Some(Message::HelloAck(ack))) => { |
| 1654 | + tracing::error!("Reconnect rejected: {:?}", ack.error); |
| 1655 | + } |
| 1656 | + _ => { |
| 1657 | + tracing::error!("Reconnect: handshake failed"); |
| 1658 | + } |
| 1659 | + } |
| 1660 | + } |
| 1661 | + Err(e) => { |
| 1662 | + tracing::error!("Reconnect: connection failed: {}", e); |
| 1663 | + } |
| 1664 | + } |
| 1665 | + }); |
| 1666 | + IpcResponse::Ok { |
| 1667 | + message: format!("Reconnecting to {}", peer_name), |
| 1668 | + } |
| 1669 | + } |
| 1670 | + None => IpcResponse::Error { |
| 1671 | + message: format!("Unknown peer: {}", peer_name), |
| 1672 | + } |
| 1673 | + } |
| 1674 | + } |
| 1675 | + |
| 1676 | + // ================================================================ |
| 1677 | + // CLI Expansion: Configuration |
| 1678 | + // ================================================================ |
| 1679 | + |
| 1680 | + IpcRequest::GetConfig => { |
| 1681 | + match toml::to_string_pretty(&config) { |
| 1682 | + Ok(toml_str) => IpcResponse::Config { toml: toml_str }, |
| 1683 | + Err(e) => IpcResponse::Error { |
| 1684 | + message: format!("Failed to serialize config: {}", e), |
| 1685 | + } |
| 1686 | + } |
| 1687 | + } |
| 1688 | + |
| 1689 | + IpcRequest::Reload => { |
| 1690 | + // TODO: Implement config hot-reload |
| 1691 | + IpcResponse::Error { |
| 1692 | + message: "Config reload not yet implemented".to_string(), |
| 1693 | + } |
| 1694 | + } |
| 1695 | + |
| 1696 | + // ================================================================ |
| 1697 | + // CLI Expansion: Daemon Control |
| 1698 | + // ================================================================ |
| 1699 | + |
| 1700 | + IpcRequest::Shutdown => { |
| 1701 | + info!("Shutdown requested via IPC"); |
| 1702 | + shutdown_requested.store(true, std::sync::atomic::Ordering::SeqCst); |
| 1703 | + IpcResponse::Ok { |
| 1704 | + message: "Shutting down...".to_string(), |
| 1705 | + } |
| 1706 | + } |
| 1707 | + |
| 1708 | + IpcRequest::GetLogs { lines, follow: _ } => { |
| 1709 | + // Read from log file |
| 1710 | + let log_path = dirs::data_local_dir() |
| 1711 | + .unwrap_or_else(|| std::path::PathBuf::from("/tmp")) |
| 1712 | + .join("hyprkvm") |
| 1713 | + .join("daemon.log"); |
| 1714 | + |
| 1715 | + if log_path.exists() { |
| 1716 | + match std::fs::read_to_string(&log_path) { |
| 1717 | + Ok(content) => { |
| 1718 | + let n = lines.unwrap_or(50) as usize; |
| 1719 | + let log_lines: Vec<String> = content |
| 1720 | + .lines() |
| 1721 | + .rev() |
| 1722 | + .take(n) |
| 1723 | + .map(|s| s.to_string()) |
| 1724 | + .collect::<Vec<_>>() |
| 1725 | + .into_iter() |
| 1726 | + .rev() |
| 1727 | + .collect(); |
| 1728 | + IpcResponse::Logs { lines: log_lines } |
| 1729 | + } |
| 1730 | + Err(e) => IpcResponse::Error { |
| 1731 | + message: format!("Failed to read log file: {}", e), |
| 1732 | + } |
| 1733 | + } |
| 1734 | + } else { |
| 1735 | + IpcResponse::Logs { |
| 1736 | + lines: vec!["Log file not found. File logging may not be configured.".to_string()], |
| 1737 | + } |
| 1738 | + } |
| 1739 | + } |
| 1472 | 1740 | }; |
| 1473 | 1741 | |
| 1474 | 1742 | let _ = response_tx.send(response); |
| 1475 | 1743 | } |
| 1476 | 1744 | |
| 1477 | | - // Shutdown |
| 1745 | + // Shutdown (Ctrl+C or IPC request) |
| 1478 | 1746 | _ = tokio::signal::ctrl_c() => { |
| 1479 | | - info!("Shutting down..."); |
| 1747 | + info!("Shutting down (Ctrl+C)..."); |
| 1748 | + accept_handle.abort(); |
| 1749 | + break; |
| 1750 | + } |
| 1751 | + |
| 1752 | + // Check for IPC shutdown request |
| 1753 | + _ = async { |
| 1754 | + while !shutdown_requested.load(std::sync::atomic::Ordering::SeqCst) { |
| 1755 | + tokio::time::sleep(std::time::Duration::from_millis(100)).await; |
| 1756 | + } |
| 1757 | + } => { |
| 1758 | + info!("Shutting down (IPC request)..."); |
| 1480 | 1759 | accept_handle.abort(); |
| 1481 | 1760 | break; |
| 1482 | 1761 | } |