| 1 | //! Tauri command façade — the boundary between the pure-Rust `core` |
| 2 | //! module and the webview. All heavy lifting lives in `core`; these |
| 3 | //! commands only coerce types, hand off to the cache, and convert |
| 4 | //! errors to strings for the IPC edge. |
| 5 | |
| 6 | use std::io::Write; |
| 7 | use std::path::{Path, PathBuf}; |
| 8 | use std::sync::{Arc, Mutex, RwLock}; |
| 9 | |
| 10 | use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; |
| 11 | use base64::Engine as _; |
| 12 | use chrono::{DateTime, Utc}; |
| 13 | use dashmap::DashMap; |
| 14 | use portable_pty::MasterPty; |
| 15 | use serde::Serialize; |
| 16 | use tauri::{AppHandle, Emitter, Manager, State}; |
| 17 | use tokio::sync::oneshot; |
| 18 | |
| 19 | use crate::core::cache::SummaryCache; |
| 20 | use crate::core::chat::{spawn_turn, TurnEvent, TurnHandle, TurnRequest}; |
| 21 | use crate::core::claude_mem::{default_db_path, load_title_map}; |
| 22 | use crate::core::discovery::scan_projects; |
| 23 | use crate::core::grouping::{archive_session_id, build_archive_projects, build_projects}; |
| 24 | use crate::core::history_log::{default_history_path, HistoryLog}; |
| 25 | use crate::core::pty::{ |
| 26 | resize_pty as core_resize_pty, spawn_pty as core_spawn_pty, write_pty as core_write_pty, |
| 27 | PtyHandle, PtyRequest, RingBuffer, |
| 28 | }; |
| 29 | use crate::core::reader::read_session_limited as core_read_session_limited; |
| 30 | use crate::core::schema::{ |
| 31 | Message, PermissionMode, Project, SessionDetail, SessionSource, SessionSummary, |
| 32 | }; |
| 33 | use crate::core::watcher::{spawn_watcher, SessionChange, WatcherHandle}; |
| 34 | |
| 35 | |
| 36 | /// Shared state handed to every command. Wrapped in an `Arc` via |
| 37 | /// Tauri's `.manage()` so clones are cheap. |
| 38 | pub struct AppState { |
| 39 | pub projects_root: PathBuf, |
| 40 | pub cache: Arc<SummaryCache>, |
| 41 | pub cache_path: PathBuf, |
| 42 | /// Parsed `~/.claude/history.jsonl`, used to reconstruct |
| 43 | /// archive projects for paths whose on-disk transcripts were |
| 44 | /// deleted (most often by a Claude Code major version upgrade). |
| 45 | /// Reloaded on every `rescan`. |
| 46 | pub history_log: Arc<RwLock<HistoryLog>>, |
| 47 | /// Currently-running chat turns, keyed by the client-generated |
| 48 | /// `turn_id`. Holding the `kill_tx` in here lets `cancel_turn` |
| 49 | /// take ownership and fire it. Forwarder tasks `remove()` their |
| 50 | /// own entry when emitting the terminal event; whichever side |
| 51 | /// wins the `remove` race cancels or reaps the other. |
| 52 | pub active_turns: Arc<DashMap<String, ActiveTurn>>, |
| 53 | /// Currently-running PTY subprocesses, keyed by client-generated |
| 54 | /// `pty_id`. **Unlike `active_turns`, PTY entries survive session |
| 55 | /// switches.** They only leave the map when: (1) the user clicks |
| 56 | /// the close ✕ button (triggering [`close_pty`]), (2) the |
| 57 | /// subprocess exits on its own and the forwarder task removes |
| 58 | /// the entry, or (3) the app quits and |
| 59 | /// [`shutdown_active_ptys`] drains everything. This is what |
| 60 | /// enables the codex-style parallel-thread behavior. |
| 61 | pub active_ptys: Arc<DashMap<String, PtyEntry>>, |
| 62 | /// Absolute path to the `claude` binary, resolved once at |
| 63 | /// startup by walking `$PATH`. `None` means chat is disabled — |
| 64 | /// `start_turn` returns an error and the frontend shows a toast. |
| 65 | pub claude_bin: Option<PathBuf>, |
| 66 | /// Holding this keeps the watcher alive for the lifetime of the app. |
| 67 | pub _watcher: WatcherHandle, |
| 68 | } |
| 69 | |
| 70 | /// One entry in [`AppState::active_turns`]. Owning the `kill_tx` |
| 71 | /// here lets `cancel_turn` pull it out and send. |
| 72 | pub struct ActiveTurn { |
| 73 | pub kill_tx: oneshot::Sender<()>, |
| 74 | pub cwd: PathBuf, |
| 75 | pub started_at: DateTime<Utc>, |
| 76 | } |
| 77 | |
| 78 | /// One entry in [`AppState::active_ptys`]. The writer and master |
| 79 | /// live here so the commands layer can mutate them from any |
| 80 | /// later invocation (`write_pty`, `resize_pty`). `kill_tx` is |
| 81 | /// consumed by [`close_pty`] (or [`shutdown_active_ptys`] on app |
| 82 | /// quit) — whichever removes the entry first owns the signal. |
| 83 | pub struct PtyEntry { |
| 84 | /// Which claudex session this PTY is bound to, if any. Used by |
| 85 | /// the sidebar's "is this session live?" lookup. `None` means |
| 86 | /// the PTY is detached from any session (not currently used but |
| 87 | /// reserved for future tooling like one-off terminals). |
| 88 | pub session_id: Option<String>, |
| 89 | pub writer: Arc<Mutex<Box<dyn Write + Send>>>, |
| 90 | pub master: Arc<Mutex<Box<dyn MasterPty + Send>>>, |
| 91 | pub kill_tx: oneshot::Sender<()>, |
| 92 | pub ring_buffer: Arc<Mutex<RingBuffer>>, |
| 93 | pub cwd: PathBuf, |
| 94 | pub started_at: DateTime<Utc>, |
| 95 | } |
| 96 | |
| 97 | pub type IpcResult<T> = Result<T, String>; |
| 98 | |
| 99 | /// List every project directory under `~/.claude/projects/` with its |
| 100 | /// aggregate metadata. Uses the summary cache so repeat calls are |
| 101 | /// near-instant, then runs the grouping layer to collapse |
| 102 | /// encoded-dir-split projects back together via git-root detection |
| 103 | /// and path-prefix fallback. Finally enriches titles from the |
| 104 | /// claude-mem SQLite database if the plugin is installed — this is a |
| 105 | /// best-effort nice-to-have and never a hard dependency. |
| 106 | #[tauri::command] |
| 107 | pub fn list_projects(state: State<'_, AppState>) -> IpcResult<Vec<Project>> { |
| 108 | let dirs = scan_projects(&state.projects_root).map_err(|e| e.to_string())?; |
| 109 | |
| 110 | // Collect (summary, encoded_dir) pairs across every discovered |
| 111 | // session, skipping sessions we fail to summarize (logged only). |
| 112 | let mut pairs: Vec<(SessionSummary, String)> = Vec::new(); |
| 113 | for dir in dirs { |
| 114 | for session in &dir.sessions { |
| 115 | match state.cache.get_or_compute(session, &dir.id) { |
| 116 | Ok(summary) => pairs.push((summary, dir.id.clone())), |
| 117 | Err(err) => { |
| 118 | tracing::warn!(session = ?session, error = %err, "summarize failed"); |
| 119 | } |
| 120 | } |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | // Best-effort title enrichment from ~/.claude-mem/claude-mem.db. |
| 125 | // If the plugin isn't installed, the map is empty and this is a |
| 126 | // no-op — we fall back to whatever `metadata::summarize` picked |
| 127 | // via the local sanitizer. |
| 128 | if let Some(db_path) = default_db_path() { |
| 129 | let title_map = load_title_map(&db_path); |
| 130 | if !title_map.is_empty() { |
| 131 | for (summary, _encoded_dir) in pairs.iter_mut() { |
| 132 | // Preserve the user's explicit /title override. |
| 133 | if summary.custom_title.is_some() { |
| 134 | continue; |
| 135 | } |
| 136 | if let Some(ai_title) = title_map.get(&summary.id).and_then(|t| t.best()) |
| 137 | { |
| 138 | summary.title = ai_title.to_string(); |
| 139 | } |
| 140 | } |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | let disk_projects = build_projects(pairs); |
| 145 | |
| 146 | // Archive discovery from ~/.claude/history.jsonl. Any project |
| 147 | // path with prompt history whose transcripts are no longer on |
| 148 | // disk becomes a ghost [`ProjectCategory::Archive`] project. |
| 149 | let archive_projects = { |
| 150 | let log = state |
| 151 | .history_log |
| 152 | .read() |
| 153 | .expect("poisoned history_log lock"); |
| 154 | build_archive_projects(&log, &disk_projects) |
| 155 | }; |
| 156 | |
| 157 | let mut all = disk_projects; |
| 158 | all.extend(archive_projects); |
| 159 | |
| 160 | // Re-sort so archive sits at the bottom — build_projects sorted |
| 161 | // only its own inputs. |
| 162 | all.sort_by( |
| 163 | |a, b| match rank_category(a.category).cmp(&rank_category(b.category)) { |
| 164 | std::cmp::Ordering::Equal => b.last_activity.cmp(&a.last_activity), |
| 165 | other => other, |
| 166 | }, |
| 167 | ); |
| 168 | |
| 169 | Ok(all) |
| 170 | } |
| 171 | |
| 172 | fn rank_category(c: crate::core::schema::ProjectCategory) -> u8 { |
| 173 | match c { |
| 174 | crate::core::schema::ProjectCategory::Regular => 0, |
| 175 | crate::core::schema::ProjectCategory::Observer => 1, |
| 176 | crate::core::schema::ProjectCategory::Archive => 2, |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | // ============================================================================ |
| 181 | // Chat turn commands |
| 182 | // ============================================================================ |
| 183 | |
| 184 | /// Wire format for `chat:event` payloads emitted to the webview. |
| 185 | /// Mirrored on the frontend as `ChatEvent` in `lib/ipc/types.ts`. |
| 186 | #[derive(Debug, Clone, Serialize)] |
| 187 | #[serde(tag = "kind", rename_all = "snake_case")] |
| 188 | enum ChatEventWire { |
| 189 | #[serde(rename_all = "camelCase")] |
| 190 | TurnStarted { |
| 191 | turn_id: String, |
| 192 | resume_session_id: Option<String>, |
| 193 | new_session_id: Option<String>, |
| 194 | }, |
| 195 | #[serde(rename_all = "camelCase")] |
| 196 | SessionBound { turn_id: String, session_id: String }, |
| 197 | #[serde(rename_all = "camelCase")] |
| 198 | Message { turn_id: String, message: Message }, |
| 199 | #[serde(rename_all = "camelCase")] |
| 200 | Stderr { turn_id: String, line: String }, |
| 201 | #[serde(rename_all = "camelCase")] |
| 202 | TurnCompleted { turn_id: String, exit_code: i32 }, |
| 203 | #[serde(rename_all = "camelCase")] |
| 204 | TurnFailed { turn_id: String, reason: String }, |
| 205 | #[serde(rename_all = "camelCase")] |
| 206 | TurnCancelled { turn_id: String }, |
| 207 | } |
| 208 | |
| 209 | /// Zero-dep PATH walker: returns the first `claude` executable on |
| 210 | /// `$PATH`, or `None`. Called once at [`initialize`]. |
| 211 | fn resolve_claude_bin() -> Option<PathBuf> { |
| 212 | let path_var = std::env::var_os("PATH")?; |
| 213 | for dir in std::env::split_paths(&path_var) { |
| 214 | let candidate = dir.join("claude"); |
| 215 | if candidate.is_file() { |
| 216 | return Some(candidate); |
| 217 | } |
| 218 | } |
| 219 | None |
| 220 | } |
| 221 | |
| 222 | /// Start a new chat turn. The frontend generates `turn_id` and (for |
| 223 | /// new sessions) `new_session_id` client-side before invoking. We |
| 224 | /// return immediately after spawning — the ongoing turn surfaces |
| 225 | /// via `chat:event` payloads. |
| 226 | /// |
| 227 | /// **This command must be `async`.** `tokio::process::Command::spawn` |
| 228 | /// (called inside `core::chat::spawn_turn`) internally wraps the |
| 229 | /// child's stdout/stderr pipes in `PollEvented`, which calls |
| 230 | /// `Handle::current()` to register them with the tokio I/O driver. |
| 231 | /// Tauri routes sync commands to a non-tokio thread pool where |
| 232 | /// `Handle::try_current()` returns `None` → immediate panic: |
| 233 | /// `"there is no reactor running, must be called from the context |
| 234 | /// of a Tokio 1.x runtime"`. Async commands run on |
| 235 | /// `tauri::async_runtime` which IS a tokio runtime, so the handle |
| 236 | /// is present. Do not regress this to a sync fn. |
| 237 | #[tauri::command] |
| 238 | pub async fn start_turn( |
| 239 | app: AppHandle, |
| 240 | state: State<'_, AppState>, |
| 241 | turn_id: String, |
| 242 | cwd: String, |
| 243 | resume_session_id: Option<String>, |
| 244 | new_session_id: Option<String>, |
| 245 | prompt: String, |
| 246 | permission_mode: Option<PermissionMode>, |
| 247 | ) -> IpcResult<()> { |
| 248 | let claude_bin = state |
| 249 | .claude_bin |
| 250 | .clone() |
| 251 | .ok_or_else(|| "claude CLI not found on PATH".to_string())?; |
| 252 | |
| 253 | let cwd_path = PathBuf::from(&cwd); |
| 254 | if !cwd_path.is_dir() { |
| 255 | return Err(format!("cwd does not exist: {cwd}")); |
| 256 | } |
| 257 | |
| 258 | let req = TurnRequest { |
| 259 | turn_id: turn_id.clone(), |
| 260 | cwd: cwd_path.clone(), |
| 261 | resume_session_id: resume_session_id.clone(), |
| 262 | new_session_id: new_session_id.clone(), |
| 263 | prompt, |
| 264 | permission_mode: permission_mode.unwrap_or_default(), |
| 265 | claude_bin, |
| 266 | env: Vec::new(), |
| 267 | }; |
| 268 | |
| 269 | let TurnHandle { receiver, kill_tx } = |
| 270 | spawn_turn(req).map_err(|e| format!("spawn failed: {e}"))?; |
| 271 | |
| 272 | state.active_turns.insert( |
| 273 | turn_id.clone(), |
| 274 | ActiveTurn { |
| 275 | kill_tx, |
| 276 | cwd: cwd_path, |
| 277 | started_at: Utc::now(), |
| 278 | }, |
| 279 | ); |
| 280 | |
| 281 | // Fire turn_started synchronously so the frontend has the turn |
| 282 | // registered before any other events could race in. |
| 283 | let _ = app.emit( |
| 284 | "chat:event", |
| 285 | ChatEventWire::TurnStarted { |
| 286 | turn_id: turn_id.clone(), |
| 287 | resume_session_id, |
| 288 | new_session_id, |
| 289 | }, |
| 290 | ); |
| 291 | |
| 292 | // Forwarder task: pulls TurnEvents off the receiver and emits |
| 293 | // them over the event bus until a terminal variant arrives. |
| 294 | let app_clone = app.clone(); |
| 295 | let active_turns = state.active_turns.clone(); |
| 296 | let tid = turn_id.clone(); |
| 297 | tauri::async_runtime::spawn(async move { |
| 298 | let mut rx = receiver; |
| 299 | while let Some(ev) = rx.recv().await { |
| 300 | let wire = match ev { |
| 301 | TurnEvent::SessionBound { session_id } => { |
| 302 | ChatEventWire::SessionBound { |
| 303 | turn_id: tid.clone(), |
| 304 | session_id, |
| 305 | } |
| 306 | } |
| 307 | TurnEvent::Message(message) => ChatEventWire::Message { |
| 308 | turn_id: tid.clone(), |
| 309 | message, |
| 310 | }, |
| 311 | TurnEvent::Stderr(line) => ChatEventWire::Stderr { |
| 312 | turn_id: tid.clone(), |
| 313 | line, |
| 314 | }, |
| 315 | TurnEvent::Completed { exit_code } => { |
| 316 | active_turns.remove(&tid); |
| 317 | ChatEventWire::TurnCompleted { |
| 318 | turn_id: tid.clone(), |
| 319 | exit_code, |
| 320 | } |
| 321 | } |
| 322 | TurnEvent::Failed { reason } => { |
| 323 | active_turns.remove(&tid); |
| 324 | ChatEventWire::TurnFailed { |
| 325 | turn_id: tid.clone(), |
| 326 | reason, |
| 327 | } |
| 328 | } |
| 329 | TurnEvent::Cancelled => { |
| 330 | active_turns.remove(&tid); |
| 331 | ChatEventWire::TurnCancelled { |
| 332 | turn_id: tid.clone(), |
| 333 | } |
| 334 | } |
| 335 | }; |
| 336 | if let Err(err) = app_clone.emit("chat:event", &wire) { |
| 337 | tracing::warn!(error = %err, "failed to emit chat:event"); |
| 338 | } |
| 339 | } |
| 340 | // Receiver closed — make sure we don't leak an entry. |
| 341 | active_turns.remove(&tid); |
| 342 | }); |
| 343 | |
| 344 | Ok(()) |
| 345 | } |
| 346 | |
| 347 | /// Cancel an in-flight turn. No-op if the turn id is unknown |
| 348 | /// (already completed, or never existed). |
| 349 | #[tauri::command] |
| 350 | pub fn cancel_turn(state: State<'_, AppState>, turn_id: String) -> IpcResult<()> { |
| 351 | if let Some((_, active)) = state.active_turns.remove(&turn_id) { |
| 352 | // Best-effort — sending can fail only if the receiver was |
| 353 | // already dropped, which means the turn is already wrapping |
| 354 | // up and will emit its own terminal event. |
| 355 | let _ = active.kill_tx.send(()); |
| 356 | } |
| 357 | Ok(()) |
| 358 | } |
| 359 | |
| 360 | /// Return the turn IDs currently active. Used by the frontend on |
| 361 | /// resume / reload to reconcile its in-memory `inFlightTurns` map. |
| 362 | #[tauri::command] |
| 363 | pub fn list_active_turns(state: State<'_, AppState>) -> IpcResult<Vec<String>> { |
| 364 | Ok(state.active_turns.iter().map(|e| e.key().clone()).collect()) |
| 365 | } |
| 366 | |
| 367 | // Silence unused-warning for `Path` import when chat commands are |
| 368 | // compiled without the commented-out legacy paths. |
| 369 | #[allow(dead_code)] |
| 370 | fn _path_import_anchor(_p: &Path) {} |
| 371 | |
| 372 | // ============================================================================ |
| 373 | // PTY commands |
| 374 | // ============================================================================ |
| 375 | |
| 376 | /// Wire format for `pty:data` event payloads. `base64` is the |
| 377 | /// standard-alphabet encoding of a single stdout chunk (≤ 4 KB from |
| 378 | /// the reader task). The frontend decodes via `atob` → |
| 379 | /// `Uint8Array.from` → `xterm.write`. |
| 380 | #[derive(Debug, Clone, Serialize)] |
| 381 | #[serde(rename_all = "camelCase")] |
| 382 | struct PtyDataPayload { |
| 383 | pty_id: String, |
| 384 | base64: String, |
| 385 | } |
| 386 | |
| 387 | /// Wire format for `pty:exit` event payloads. Fires exactly once |
| 388 | /// per PTY. `exit_code` is `None` when the subprocess was killed by |
| 389 | /// a signal (no clean exit code available). |
| 390 | #[derive(Debug, Clone, Serialize)] |
| 391 | #[serde(rename_all = "camelCase")] |
| 392 | struct PtyExitPayload { |
| 393 | pty_id: String, |
| 394 | exit_code: Option<i32>, |
| 395 | } |
| 396 | |
| 397 | /// Spawn a PTY-backed claude subprocess. Must be `async` for the |
| 398 | /// same reason [`start_turn`] must be — the underlying |
| 399 | /// [`core::pty::spawn_pty`] calls `tauri::async_runtime::spawn_blocking` |
| 400 | /// and hands out tokio channels, both of which require a tokio |
| 401 | /// runtime handle in the calling context. See |
| 402 | /// `tauri_tokio_runtime_rule` memory. |
| 403 | #[tauri::command] |
| 404 | pub async fn spawn_pty( |
| 405 | app: AppHandle, |
| 406 | state: State<'_, AppState>, |
| 407 | pty_id: String, |
| 408 | session_id: Option<String>, |
| 409 | cwd: String, |
| 410 | args: Vec<String>, |
| 411 | cols: u16, |
| 412 | rows: u16, |
| 413 | ) -> IpcResult<()> { |
| 414 | let claude_bin = state |
| 415 | .claude_bin |
| 416 | .clone() |
| 417 | .ok_or_else(|| "claude CLI not found on PATH".to_string())?; |
| 418 | |
| 419 | let cwd_path = PathBuf::from(&cwd); |
| 420 | if !cwd_path.is_dir() { |
| 421 | return Err(format!("cwd does not exist: {cwd}")); |
| 422 | } |
| 423 | |
| 424 | let req = PtyRequest { |
| 425 | pty_id: pty_id.clone(), |
| 426 | cwd: cwd_path.clone(), |
| 427 | claude_bin, |
| 428 | args, |
| 429 | cols, |
| 430 | rows, |
| 431 | env: Vec::new(), |
| 432 | }; |
| 433 | |
| 434 | let PtyHandle { |
| 435 | mut data_rx, |
| 436 | exit_rx, |
| 437 | writer, |
| 438 | master, |
| 439 | kill_tx, |
| 440 | ring_buffer, |
| 441 | } = core_spawn_pty(req).map_err(|e| format!("spawn pty failed: {e}"))?; |
| 442 | |
| 443 | state.active_ptys.insert( |
| 444 | pty_id.clone(), |
| 445 | PtyEntry { |
| 446 | session_id, |
| 447 | writer, |
| 448 | master, |
| 449 | kill_tx, |
| 450 | ring_buffer, |
| 451 | cwd: cwd_path, |
| 452 | started_at: Utc::now(), |
| 453 | }, |
| 454 | ); |
| 455 | |
| 456 | // Forwarder task: write-combines stdout chunks off `data_rx` |
| 457 | // into ~frame-sized batches and emits them on a per-PTY event |
| 458 | // `pty:data:<pty_id>`. Write combining matters because: |
| 459 | // |
| 460 | // * Each emit is a Tauri IPC call that dispatches a DOM |
| 461 | // event on the webview's main thread. |
| 462 | // * Each frontend callback does base64 decode + `term.write` |
| 463 | // which is non-trivial main-thread work. |
| 464 | // |
| 465 | // Without coalescing, a claude TUI spinner + token stream |
| 466 | // easily pushed 30-60 discrete chunks per second through the |
| 467 | // bridge, stalling the main thread enough to beachball mouse |
| 468 | // input. We now buffer up to `MAX_COALESCE_BYTES` or until |
| 469 | // `MAX_COALESCE_MS` elapses since the first queued chunk, |
| 470 | // whichever comes first. |
| 471 | const MAX_COALESCE_BYTES: usize = 16 * 1024; |
| 472 | const MAX_COALESCE_MS: u64 = 16; |
| 473 | |
| 474 | let app_clone = app.clone(); |
| 475 | let active_ptys = state.active_ptys.clone(); |
| 476 | let pid = pty_id.clone(); |
| 477 | let data_event = format!("pty:data:{pid}"); |
| 478 | tauri::async_runtime::spawn(async move { |
| 479 | use std::time::Duration; |
| 480 | |
| 481 | let mut buffer: Vec<u8> = Vec::with_capacity(MAX_COALESCE_BYTES); |
| 482 | loop { |
| 483 | // Block for the first chunk of a batch. If the channel |
| 484 | // has closed AND the buffer is empty, we're done. |
| 485 | let first = match data_rx.recv().await { |
| 486 | Some(chunk) => chunk, |
| 487 | None => break, |
| 488 | }; |
| 489 | buffer.extend_from_slice(&first); |
| 490 | |
| 491 | // Pull additional chunks until we hit the byte or time |
| 492 | // budget. `try_recv` is non-blocking; `time::timeout` |
| 493 | // on `recv` bounds the wait. |
| 494 | let deadline = tokio::time::Instant::now() |
| 495 | + Duration::from_millis(MAX_COALESCE_MS); |
| 496 | while buffer.len() < MAX_COALESCE_BYTES { |
| 497 | let now = tokio::time::Instant::now(); |
| 498 | if now >= deadline { |
| 499 | break; |
| 500 | } |
| 501 | let remaining = deadline - now; |
| 502 | match tokio::time::timeout(remaining, data_rx.recv()).await { |
| 503 | Ok(Some(chunk)) => buffer.extend_from_slice(&chunk), |
| 504 | Ok(None) => { |
| 505 | // Sender closed — flush and exit after emit. |
| 506 | break; |
| 507 | } |
| 508 | Err(_) => break, // timeout elapsed |
| 509 | } |
| 510 | } |
| 511 | |
| 512 | let b64 = BASE64_STANDARD.encode(&buffer); |
| 513 | let payload = PtyDataPayload { |
| 514 | pty_id: pid.clone(), |
| 515 | base64: b64, |
| 516 | }; |
| 517 | if let Err(err) = app_clone.emit(&data_event, &payload) { |
| 518 | tracing::warn!(error = %err, "failed to emit pty:data"); |
| 519 | } |
| 520 | buffer.clear(); |
| 521 | } |
| 522 | |
| 523 | // data_rx closed — wait on the exit code and emit pty:exit. |
| 524 | let exit_code = exit_rx.await.ok().flatten(); |
| 525 | let payload = PtyExitPayload { |
| 526 | pty_id: pid.clone(), |
| 527 | exit_code, |
| 528 | }; |
| 529 | if let Err(err) = app_clone.emit("pty:exit", &payload) { |
| 530 | tracing::warn!(error = %err, "failed to emit pty:exit"); |
| 531 | } |
| 532 | active_ptys.remove(&pid); |
| 533 | }); |
| 534 | |
| 535 | Ok(()) |
| 536 | } |
| 537 | |
| 538 | /// Forward a keystroke / paste string from xterm into the PTY |
| 539 | /// master. The frontend sends utf-8 text; we pass the bytes through |
| 540 | /// verbatim so escape sequences, control codes, and multi-byte |
| 541 | /// characters all round-trip correctly. |
| 542 | #[tauri::command] |
| 543 | pub fn write_pty( |
| 544 | state: State<'_, AppState>, |
| 545 | pty_id: String, |
| 546 | data: String, |
| 547 | ) -> IpcResult<()> { |
| 548 | let entry = state |
| 549 | .active_ptys |
| 550 | .get(&pty_id) |
| 551 | .ok_or_else(|| format!("no such pty: {pty_id}"))?; |
| 552 | core_write_pty(&entry.writer, data.as_bytes()).map_err(|e| e.to_string()) |
| 553 | } |
| 554 | |
| 555 | /// Propagate a terminal resize (from xterm's `onResize`) down to |
| 556 | /// the PTY master. On unix this triggers `SIGWINCH` in the |
| 557 | /// subprocess and claude redraws automatically. |
| 558 | #[tauri::command] |
| 559 | pub fn resize_pty( |
| 560 | state: State<'_, AppState>, |
| 561 | pty_id: String, |
| 562 | cols: u16, |
| 563 | rows: u16, |
| 564 | ) -> IpcResult<()> { |
| 565 | let entry = state |
| 566 | .active_ptys |
| 567 | .get(&pty_id) |
| 568 | .ok_or_else(|| format!("no such pty: {pty_id}"))?; |
| 569 | core_resize_pty(&entry.master, cols, rows).map_err(|e| e.to_string()) |
| 570 | } |
| 571 | |
| 572 | /// Explicit teardown: remove the entry from `active_ptys` and send |
| 573 | /// the kill signal. The subprocess dies, the wait task reaps, the |
| 574 | /// forwarder emits `pty:exit`, and its own `remove()` becomes a |
| 575 | /// no-op because we already removed here. |
| 576 | /// |
| 577 | /// No-op if the pty id is unknown (already exited or never existed). |
| 578 | #[tauri::command] |
| 579 | pub fn close_pty(state: State<'_, AppState>, pty_id: String) -> IpcResult<()> { |
| 580 | if let Some((_, entry)) = state.active_ptys.remove(&pty_id) { |
| 581 | let _ = entry.kill_tx.send(()); |
| 582 | } |
| 583 | Ok(()) |
| 584 | } |
| 585 | |
| 586 | /// Snapshot the ring buffer and return it as a base64-encoded |
| 587 | /// string. Called by the frontend on reattach to replay recent |
| 588 | /// stdout into a freshly-mounted xterm instance. |
| 589 | #[tauri::command] |
| 590 | pub fn get_pty_buffer(state: State<'_, AppState>, pty_id: String) -> IpcResult<String> { |
| 591 | let entry = state |
| 592 | .active_ptys |
| 593 | .get(&pty_id) |
| 594 | .ok_or_else(|| format!("no such pty: {pty_id}"))?; |
| 595 | let snapshot = entry |
| 596 | .ring_buffer |
| 597 | .lock() |
| 598 | .map_err(|_| "ring buffer poisoned".to_string())? |
| 599 | .snapshot(); |
| 600 | Ok(BASE64_STANDARD.encode(&snapshot)) |
| 601 | } |
| 602 | |
| 603 | /// Wire format for `list_ptys` responses. Mirrors the shape the |
| 604 | /// titlebar popover wants for its "N terminals" list. |
| 605 | #[derive(Debug, Clone, Serialize)] |
| 606 | #[serde(rename_all = "camelCase")] |
| 607 | pub struct PtyInfo { |
| 608 | pub pty_id: String, |
| 609 | pub session_id: Option<String>, |
| 610 | pub cwd: String, |
| 611 | pub started_at: DateTime<Utc>, |
| 612 | } |
| 613 | |
| 614 | /// List every live PTY with enough metadata for the titlebar |
| 615 | /// popover ("3 terminals") and the sidebar indicators. Called on |
| 616 | /// mount and after `pty:data` / `pty:exit` events to reconcile. |
| 617 | #[tauri::command] |
| 618 | pub fn list_ptys(state: State<'_, AppState>) -> IpcResult<Vec<PtyInfo>> { |
| 619 | Ok(state |
| 620 | .active_ptys |
| 621 | .iter() |
| 622 | .map(|entry| PtyInfo { |
| 623 | pty_id: entry.key().clone(), |
| 624 | session_id: entry.session_id.clone(), |
| 625 | cwd: entry.cwd.to_string_lossy().into_owned(), |
| 626 | started_at: entry.started_at, |
| 627 | }) |
| 628 | .collect()) |
| 629 | } |
| 630 | |
| 631 | /// Drain every live PTY, sending kill to each. Called from the |
| 632 | /// window-destroy hook in `lib.rs::run` so we don't leak claude |
| 633 | /// subprocesses across an app quit. |
| 634 | /// |
| 635 | /// We collect keys first and remove by id because `DashMap`'s |
| 636 | /// iterator doesn't permit in-place removal, and `clear()` would |
| 637 | /// drop the `kill_tx` senders without firing them. |
| 638 | pub fn shutdown_active_ptys(state: &AppState) { |
| 639 | let ids: Vec<String> = state |
| 640 | .active_ptys |
| 641 | .iter() |
| 642 | .map(|e| e.key().clone()) |
| 643 | .collect(); |
| 644 | for id in ids { |
| 645 | if let Some((_, entry)) = state.active_ptys.remove(&id) { |
| 646 | let _ = entry.kill_tx.send(()); |
| 647 | } |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | // ============================================================================ |
| 652 | // Frontend log bridge |
| 653 | // ============================================================================ |
| 654 | |
| 655 | /// Write a structured log entry from the webview into Rust tracing. |
| 656 | /// Wired up from `src/lib/debug.ts` to capture `window.onerror`, |
| 657 | /// `unhandledrejection`, and `console.error` — anything that would |
| 658 | /// otherwise silently crash the React tree without leaving a trace |
| 659 | /// on disk. |
| 660 | /// |
| 661 | /// `level` is one of `"error"`, `"warn"`, `"info"`, `"debug"`; any |
| 662 | /// other value is treated as `"info"`. `source` is a free-form tag |
| 663 | /// (`"window.onerror"`, `"react"`, `"console.error"`, etc.) and |
| 664 | /// `message` is the payload. The optional `stack` argument carries |
| 665 | /// a JS stack trace when available. |
| 666 | #[tauri::command] |
| 667 | pub fn log_frontend( |
| 668 | level: String, |
| 669 | source: String, |
| 670 | message: String, |
| 671 | stack: Option<String>, |
| 672 | ) -> IpcResult<()> { |
| 673 | let stack_fmt = stack.as_deref().unwrap_or(""); |
| 674 | match level.as_str() { |
| 675 | "error" => { |
| 676 | tracing::error!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}"); |
| 677 | } |
| 678 | "warn" => { |
| 679 | tracing::warn!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}"); |
| 680 | } |
| 681 | "debug" => { |
| 682 | tracing::debug!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}"); |
| 683 | } |
| 684 | _ => { |
| 685 | tracing::info!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}"); |
| 686 | } |
| 687 | } |
| 688 | Ok(()) |
| 689 | } |
| 690 | |
| 691 | /// All session summaries for a single project (by **encoded source |
| 692 | /// dir**, not the merged project id), newest first. |
| 693 | /// |
| 694 | /// The tree-shaped sidebar no longer calls this — projects returned by |
| 695 | /// `list_projects` already carry their sessions embedded. Kept as a |
| 696 | /// fallback for debugging and for future callers that want to |
| 697 | /// re-fetch summaries without re-running the full project sweep. |
| 698 | #[tauri::command] |
| 699 | pub fn list_sessions( |
| 700 | encoded_dir: String, |
| 701 | state: State<'_, AppState>, |
| 702 | ) -> IpcResult<Vec<SessionSummary>> { |
| 703 | let project_dir = state.projects_root.join(&encoded_dir); |
| 704 | if !project_dir.exists() { |
| 705 | return Err(format!("encoded dir not found: {encoded_dir}")); |
| 706 | } |
| 707 | |
| 708 | let mut sessions = crate::core::discovery::list_session_files(&project_dir) |
| 709 | .map_err(|e| e.to_string())? |
| 710 | .into_iter() |
| 711 | .filter_map(|path| { |
| 712 | state |
| 713 | .cache |
| 714 | .get_or_compute(&path, &encoded_dir) |
| 715 | .map_err(|e| { |
| 716 | tracing::warn!(path = ?path, error = %e, "summarize failed"); |
| 717 | e |
| 718 | }) |
| 719 | .ok() |
| 720 | }) |
| 721 | .collect::<Vec<_>>(); |
| 722 | |
| 723 | sessions.sort_by(|a, b| b.last_activity_at.cmp(&a.last_activity_at)); |
| 724 | Ok(sessions) |
| 725 | } |
| 726 | |
| 727 | /// Default tail cap for disk session reads. Bounded so a 100 MB+ |
| 728 | /// session file can't hang the main thread during IPC serialize / |
| 729 | /// deserialize. The frontend can opt in to the full transcript by |
| 730 | /// passing an explicit `messageLimit` (future API) once we have |
| 731 | /// lazy-scroll in the viewer. |
| 732 | const DEFAULT_SESSION_MESSAGE_LIMIT: usize = 2000; |
| 733 | |
| 734 | /// Full viewer payload for one session. Dispatches on `source`: |
| 735 | /// |
| 736 | /// - `Disk` (the default): `encoded_dir` is the physical source-dir |
| 737 | /// name (the `projectId` field inside a disk SessionSummary), and |
| 738 | /// we read the real jsonl. |
| 739 | /// - `Archive`: `encoded_dir` is the absolute project path (also the |
| 740 | /// `projectId` field on an archive SessionSummary), and we |
| 741 | /// synthesize a prompt-only [`SessionDetail`] from the in-memory |
| 742 | /// `history.jsonl` log. |
| 743 | /// |
| 744 | /// **Async** so the file read + JSONL parse + serialize-to-IPC |
| 745 | /// pipeline runs on the tokio pool rather than Tauri's sync |
| 746 | /// command pool; this prevents a slow 170 MB read from blocking |
| 747 | /// other commands (`list_ptys`, `write_pty`, etc.) while it runs. |
| 748 | #[tauri::command] |
| 749 | pub async fn read_session( |
| 750 | encoded_dir: String, |
| 751 | session_id: String, |
| 752 | #[allow(non_snake_case)] source: Option<SessionSource>, |
| 753 | state: State<'_, AppState>, |
| 754 | ) -> IpcResult<SessionDetail> { |
| 755 | match source.unwrap_or(SessionSource::Disk) { |
| 756 | SessionSource::Disk => { |
| 757 | let projects_root = state.projects_root.clone(); |
| 758 | // Move the blocking file read off the async runtime. |
| 759 | let encoded_dir_owned = encoded_dir; |
| 760 | let session_id_owned = session_id; |
| 761 | tauri::async_runtime::spawn_blocking(move || { |
| 762 | read_disk_session_blocking( |
| 763 | &projects_root, |
| 764 | &encoded_dir_owned, |
| 765 | &session_id_owned, |
| 766 | ) |
| 767 | }) |
| 768 | .await |
| 769 | .map_err(|e| format!("read_session join error: {e}"))? |
| 770 | } |
| 771 | SessionSource::Archive => read_archive_session(&encoded_dir, &state), |
| 772 | } |
| 773 | } |
| 774 | |
| 775 | fn read_disk_session_blocking( |
| 776 | projects_root: &Path, |
| 777 | encoded_dir: &str, |
| 778 | session_id: &str, |
| 779 | ) -> IpcResult<SessionDetail> { |
| 780 | let path = projects_root |
| 781 | .join(encoded_dir) |
| 782 | .join(format!("{session_id}.jsonl")); |
| 783 | if !path.exists() { |
| 784 | return Err(format!("session not found: {encoded_dir}/{session_id}")); |
| 785 | } |
| 786 | core_read_session_limited(&path, encoded_dir, Some(DEFAULT_SESSION_MESSAGE_LIMIT)) |
| 787 | .map_err(|e| e.to_string()) |
| 788 | } |
| 789 | |
| 790 | fn read_archive_session(project_path: &str, state: &AppState) -> IpcResult<SessionDetail> { |
| 791 | let log = state |
| 792 | .history_log |
| 793 | .read() |
| 794 | .expect("poisoned history_log lock"); |
| 795 | let entries = log.entries_for(project_path); |
| 796 | if entries.is_empty() { |
| 797 | return Err(format!("no archive history for: {project_path}")); |
| 798 | } |
| 799 | |
| 800 | let earliest = entries.first().map(|e| e.timestamp_ms).unwrap_or(0); |
| 801 | let latest = entries.last().map(|e| e.timestamp_ms).unwrap_or(0); |
| 802 | let started_at = DateTime::<Utc>::from_timestamp_millis(earliest); |
| 803 | let last_activity = DateTime::<Utc>::from_timestamp_millis(latest); |
| 804 | |
| 805 | // Synthesize the summary to match what `build_archive_projects` |
| 806 | // returned for this project. The frontend has already seen this |
| 807 | // shape from list_projects so we mirror it. |
| 808 | let title_candidate = entries |
| 809 | .iter() |
| 810 | .map(|e| e.display.as_str()) |
| 811 | .find(|d| !d.trim().is_empty() && !d.trim().starts_with('/')) |
| 812 | .unwrap_or_default(); |
| 813 | let title = crate::core::title::sanitize_title(title_candidate) |
| 814 | .unwrap_or_else(|| "(archived)".to_string()); |
| 815 | |
| 816 | let summary = SessionSummary { |
| 817 | id: archive_session_id(project_path), |
| 818 | project_id: project_path.to_string(), |
| 819 | title, |
| 820 | started_at, |
| 821 | last_activity_at: last_activity, |
| 822 | model: None, |
| 823 | message_count: entries.len() as u32, |
| 824 | // Archive sessions are reconstructed from ~/.claude/history.jsonl |
| 825 | // which stores only human prompts — every entry IS a prompt. |
| 826 | prompt_count: entries.len() as u32, |
| 827 | git_branch: None, |
| 828 | version: None, |
| 829 | slug: None, |
| 830 | cwd: Some(project_path.to_string()), |
| 831 | custom_title: None, |
| 832 | entrypoint: Some("archive".to_string()), |
| 833 | source: SessionSource::Archive, |
| 834 | }; |
| 835 | |
| 836 | let messages = entries |
| 837 | .iter() |
| 838 | .map(|e| { |
| 839 | let at = DateTime::<Utc>::from_timestamp_millis(e.timestamp_ms) |
| 840 | .unwrap_or_else(Utc::now); |
| 841 | Message::User { |
| 842 | id: format!("archive-{}", e.timestamp_ms), |
| 843 | at, |
| 844 | text: e.display.clone(), |
| 845 | is_meta: false, |
| 846 | } |
| 847 | }) |
| 848 | .collect(); |
| 849 | |
| 850 | Ok(SessionDetail { summary, messages }) |
| 851 | } |
| 852 | |
| 853 | /// Force a full rescan, bypassing the cache for every session. Used |
| 854 | /// by the "refresh" button and by tests. |
| 855 | #[tauri::command] |
| 856 | pub fn rescan(state: State<'_, AppState>) -> IpcResult<Vec<Project>> { |
| 857 | // Simplest implementation: clear cache, then call list_projects. |
| 858 | // (We can do something smarter later — per-session cache busts.) |
| 859 | let dirs = scan_projects(&state.projects_root).map_err(|e| e.to_string())?; |
| 860 | for dir in &dirs { |
| 861 | for s in &dir.sessions { |
| 862 | state.cache.remove(s); |
| 863 | } |
| 864 | } |
| 865 | // Reload the history log in case the user has typed new prompts |
| 866 | // since we last loaded. |
| 867 | if let Some(path) = default_history_path() { |
| 868 | let fresh = HistoryLog::load(&path); |
| 869 | *state |
| 870 | .history_log |
| 871 | .write() |
| 872 | .expect("poisoned history_log lock") = fresh; |
| 873 | } |
| 874 | list_projects(state) |
| 875 | } |
| 876 | |
| 877 | /// Initialise shared state and the FS watcher. Called once from |
| 878 | /// `setup()` in `lib.rs`. |
| 879 | pub fn initialize(app: &AppHandle) -> Result<AppState, Box<dyn std::error::Error>> { |
| 880 | let projects_root = crate::core::paths::projects_dir()?; |
| 881 | |
| 882 | // Cache lives in the Tauri app data dir. |
| 883 | let app_data = app |
| 884 | .path() |
| 885 | .app_data_dir() |
| 886 | .map_err(|e| format!("failed to resolve app data dir: {e}"))?; |
| 887 | std::fs::create_dir_all(&app_data)?; |
| 888 | let cache_path = app_data.join("summaries.bin"); |
| 889 | let cache = Arc::new(SummaryCache::load(&cache_path)); |
| 890 | |
| 891 | // Load the prompt-input history log once at startup so archive |
| 892 | // discovery and reads are zero-latency. This is rebuilt on |
| 893 | // `rescan` too. |
| 894 | let history_log = Arc::new(RwLock::new( |
| 895 | default_history_path() |
| 896 | .map(|p| HistoryLog::load(&p)) |
| 897 | .unwrap_or_else(HistoryLog::empty), |
| 898 | )); |
| 899 | |
| 900 | // Resolve the claude CLI once. `None` means chat is disabled; |
| 901 | // start_turn will return an error and the frontend shows a toast. |
| 902 | let claude_bin = resolve_claude_bin(); |
| 903 | if claude_bin.is_none() { |
| 904 | tracing::warn!("claude CLI not found on PATH; chat features disabled"); |
| 905 | } |
| 906 | |
| 907 | let active_turns = Arc::new(DashMap::new()); |
| 908 | let active_ptys: Arc<DashMap<String, PtyEntry>> = Arc::new(DashMap::new()); |
| 909 | |
| 910 | // Start the watcher, even if projects_root doesn't exist yet — |
| 911 | // `spawn_watcher` errors in that case so we handle it. |
| 912 | let watcher = if projects_root.exists() { |
| 913 | let (handle, mut rx) = spawn_watcher(&projects_root)?; |
| 914 | let app_clone = app.clone(); |
| 915 | let cache_clone = cache.clone(); |
| 916 | let ptys_clone = active_ptys.clone(); |
| 917 | tauri::async_runtime::spawn(async move { |
| 918 | while let Some(change) = rx.recv().await { |
| 919 | handle_change(&app_clone, &cache_clone, &ptys_clone, change); |
| 920 | } |
| 921 | }); |
| 922 | handle |
| 923 | } else { |
| 924 | // No projects dir yet — spin up a dummy watcher on the parent |
| 925 | // of projects_root (usually ~/.claude) so we notice if the |
| 926 | // directory appears later. |
| 927 | let fallback_root = projects_root |
| 928 | .parent() |
| 929 | .map(PathBuf::from) |
| 930 | .unwrap_or_else(|| projects_root.clone()); |
| 931 | let (handle, mut rx) = spawn_watcher(&fallback_root)?; |
| 932 | let app_clone = app.clone(); |
| 933 | let cache_clone = cache.clone(); |
| 934 | let ptys_clone = active_ptys.clone(); |
| 935 | tauri::async_runtime::spawn(async move { |
| 936 | while let Some(change) = rx.recv().await { |
| 937 | handle_change(&app_clone, &cache_clone, &ptys_clone, change); |
| 938 | } |
| 939 | }); |
| 940 | handle |
| 941 | }; |
| 942 | |
| 943 | Ok(AppState { |
| 944 | projects_root, |
| 945 | cache, |
| 946 | cache_path, |
| 947 | history_log, |
| 948 | active_turns, |
| 949 | active_ptys, |
| 950 | claude_bin, |
| 951 | _watcher: watcher, |
| 952 | }) |
| 953 | } |
| 954 | |
| 955 | fn handle_change( |
| 956 | app: &AppHandle, |
| 957 | cache: &SummaryCache, |
| 958 | active_ptys: &Arc<DashMap<String, PtyEntry>>, |
| 959 | change: SessionChange, |
| 960 | ) { |
| 961 | // When a PTY is actively writing to this session's JSONL file, |
| 962 | // we'd otherwise re-enter the rescan → list_projects → |
| 963 | // summarize → sidebar re-render cycle every few hundred ms for |
| 964 | // the duration of the subprocess's run. The PTY is already the |
| 965 | // canonical writer and the user sees it via the xterm view, so |
| 966 | // there's nothing new for the sidebar to learn. Skip the event |
| 967 | // entirely for PTY-owned sessions to cut the write-storm at the |
| 968 | // source. Cache invalidation still runs so non-PTY callers |
| 969 | // (e.g. a manual rescan) see fresh data. |
| 970 | let skip_emit = path_matches_live_pty(&change, active_ptys); |
| 971 | match &change { |
| 972 | SessionChange::Removed(path) => cache.remove(path), |
| 973 | SessionChange::Added(path) | SessionChange::Modified(path) => { |
| 974 | // Drop stale entry; next list_* call will repopulate. |
| 975 | cache.remove(path); |
| 976 | } |
| 977 | } |
| 978 | if skip_emit { |
| 979 | return; |
| 980 | } |
| 981 | // Tell the webview something changed. The frontend debounces |
| 982 | // and responds with a `rescan` invocation. |
| 983 | let payload = ChangePayload::from(&change); |
| 984 | if let Err(e) = app.emit("sessions:changed", payload) { |
| 985 | tracing::warn!(error = %e, "failed to emit sessions:changed"); |
| 986 | } |
| 987 | } |
| 988 | |
| 989 | /// Return true if the file path carrying this change is the JSONL |
| 990 | /// transcript of a session currently being driven by a live PTY. |
| 991 | /// Used by `handle_change` to suppress file-watcher events for |
| 992 | /// actively-written sessions so they don't thrash the sidebar. |
| 993 | fn path_matches_live_pty( |
| 994 | change: &SessionChange, |
| 995 | active_ptys: &Arc<DashMap<String, PtyEntry>>, |
| 996 | ) -> bool { |
| 997 | let path = match change { |
| 998 | SessionChange::Added(p) |
| 999 | | SessionChange::Modified(p) |
| 1000 | | SessionChange::Removed(p) => p, |
| 1001 | }; |
| 1002 | let Some(stem) = path.file_stem().and_then(|s| s.to_str()) else { |
| 1003 | return false; |
| 1004 | }; |
| 1005 | for entry in active_ptys.iter() { |
| 1006 | if entry.session_id.as_deref() == Some(stem) { |
| 1007 | return true; |
| 1008 | } |
| 1009 | } |
| 1010 | false |
| 1011 | } |
| 1012 | |
| 1013 | #[derive(Debug, Clone, serde::Serialize)] |
| 1014 | #[serde(tag = "kind", rename_all = "snake_case")] |
| 1015 | enum ChangePayload { |
| 1016 | Added { path: String }, |
| 1017 | Modified { path: String }, |
| 1018 | Removed { path: String }, |
| 1019 | } |
| 1020 | |
| 1021 | impl From<&SessionChange> for ChangePayload { |
| 1022 | fn from(c: &SessionChange) -> Self { |
| 1023 | match c { |
| 1024 | SessionChange::Added(p) => ChangePayload::Added { |
| 1025 | path: p.to_string_lossy().into_owned(), |
| 1026 | }, |
| 1027 | SessionChange::Modified(p) => ChangePayload::Modified { |
| 1028 | path: p.to_string_lossy().into_owned(), |
| 1029 | }, |
| 1030 | SessionChange::Removed(p) => ChangePayload::Removed { |
| 1031 | path: p.to_string_lossy().into_owned(), |
| 1032 | }, |
| 1033 | } |
| 1034 | } |
| 1035 | } |
| 1036 | |
| 1037 | /// Persist the cache to disk — called on shutdown. |
| 1038 | pub fn persist_cache_on_exit(state: &AppState) { |
| 1039 | if let Err(e) = state.cache.save(&state.cache_path) { |
| 1040 | tracing::warn!(error = %e, path = ?state.cache_path, "failed to save cache"); |
| 1041 | } |
| 1042 | } |
| 1043 |