Rust · 36813 bytes Raw Blame History
1 //! Tauri command façade — the boundary between the pure-Rust `core`
2 //! module and the webview. All heavy lifting lives in `core`; these
3 //! commands only coerce types, hand off to the cache, and convert
4 //! errors to strings for the IPC edge.
5
6 use std::io::Write;
7 use std::path::{Path, PathBuf};
8 use std::sync::{Arc, Mutex, RwLock};
9
10 use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
11 use base64::Engine as _;
12 use chrono::{DateTime, Utc};
13 use dashmap::DashMap;
14 use portable_pty::MasterPty;
15 use serde::Serialize;
16 use tauri::{AppHandle, Emitter, Manager, State};
17 use tokio::sync::oneshot;
18
19 use crate::core::cache::SummaryCache;
20 use crate::core::chat::{spawn_turn, TurnEvent, TurnHandle, TurnRequest};
21 use crate::core::claude_mem::{default_db_path, load_title_map};
22 use crate::core::discovery::scan_projects;
23 use crate::core::grouping::{archive_session_id, build_archive_projects, build_projects};
24 use crate::core::history_log::{default_history_path, HistoryLog};
25 use crate::core::pty::{
26 resize_pty as core_resize_pty, spawn_pty as core_spawn_pty, write_pty as core_write_pty,
27 PtyHandle, PtyRequest, RingBuffer,
28 };
29 use crate::core::reader::read_session_limited as core_read_session_limited;
30 use crate::core::schema::{
31 Message, PermissionMode, Project, SessionDetail, SessionSource, SessionSummary,
32 };
33 use crate::core::watcher::{spawn_watcher, SessionChange, WatcherHandle};
34
35
36 /// Shared state handed to every command. Wrapped in an `Arc` via
37 /// Tauri's `.manage()` so clones are cheap.
38 pub struct AppState {
39 pub projects_root: PathBuf,
40 pub cache: Arc<SummaryCache>,
41 pub cache_path: PathBuf,
42 /// Parsed `~/.claude/history.jsonl`, used to reconstruct
43 /// archive projects for paths whose on-disk transcripts were
44 /// deleted (most often by a Claude Code major version upgrade).
45 /// Reloaded on every `rescan`.
46 pub history_log: Arc<RwLock<HistoryLog>>,
47 /// Currently-running chat turns, keyed by the client-generated
48 /// `turn_id`. Holding the `kill_tx` in here lets `cancel_turn`
49 /// take ownership and fire it. Forwarder tasks `remove()` their
50 /// own entry when emitting the terminal event; whichever side
51 /// wins the `remove` race cancels or reaps the other.
52 pub active_turns: Arc<DashMap<String, ActiveTurn>>,
53 /// Currently-running PTY subprocesses, keyed by client-generated
54 /// `pty_id`. **Unlike `active_turns`, PTY entries survive session
55 /// switches.** They only leave the map when: (1) the user clicks
56 /// the close ✕ button (triggering [`close_pty`]), (2) the
57 /// subprocess exits on its own and the forwarder task removes
58 /// the entry, or (3) the app quits and
59 /// [`shutdown_active_ptys`] drains everything. This is what
60 /// enables the codex-style parallel-thread behavior.
61 pub active_ptys: Arc<DashMap<String, PtyEntry>>,
62 /// Absolute path to the `claude` binary, resolved once at
63 /// startup by walking `$PATH`. `None` means chat is disabled —
64 /// `start_turn` returns an error and the frontend shows a toast.
65 pub claude_bin: Option<PathBuf>,
66 /// Holding this keeps the watcher alive for the lifetime of the app.
67 pub _watcher: WatcherHandle,
68 }
69
70 /// One entry in [`AppState::active_turns`]. Owning the `kill_tx`
71 /// here lets `cancel_turn` pull it out and send.
72 pub struct ActiveTurn {
73 pub kill_tx: oneshot::Sender<()>,
74 pub cwd: PathBuf,
75 pub started_at: DateTime<Utc>,
76 }
77
78 /// One entry in [`AppState::active_ptys`]. The writer and master
79 /// live here so the commands layer can mutate them from any
80 /// later invocation (`write_pty`, `resize_pty`). `kill_tx` is
81 /// consumed by [`close_pty`] (or [`shutdown_active_ptys`] on app
82 /// quit) — whichever removes the entry first owns the signal.
83 pub struct PtyEntry {
84 /// Which claudex session this PTY is bound to, if any. Used by
85 /// the sidebar's "is this session live?" lookup. `None` means
86 /// the PTY is detached from any session (not currently used but
87 /// reserved for future tooling like one-off terminals).
88 pub session_id: Option<String>,
89 pub writer: Arc<Mutex<Box<dyn Write + Send>>>,
90 pub master: Arc<Mutex<Box<dyn MasterPty + Send>>>,
91 pub kill_tx: oneshot::Sender<()>,
92 pub ring_buffer: Arc<Mutex<RingBuffer>>,
93 pub cwd: PathBuf,
94 pub started_at: DateTime<Utc>,
95 }
96
97 pub type IpcResult<T> = Result<T, String>;
98
99 /// List every project directory under `~/.claude/projects/` with its
100 /// aggregate metadata. Uses the summary cache so repeat calls are
101 /// near-instant, then runs the grouping layer to collapse
102 /// encoded-dir-split projects back together via git-root detection
103 /// and path-prefix fallback. Finally enriches titles from the
104 /// claude-mem SQLite database if the plugin is installed — this is a
105 /// best-effort nice-to-have and never a hard dependency.
106 #[tauri::command]
107 pub fn list_projects(state: State<'_, AppState>) -> IpcResult<Vec<Project>> {
108 let dirs = scan_projects(&state.projects_root).map_err(|e| e.to_string())?;
109
110 // Collect (summary, encoded_dir) pairs across every discovered
111 // session, skipping sessions we fail to summarize (logged only).
112 let mut pairs: Vec<(SessionSummary, String)> = Vec::new();
113 for dir in dirs {
114 for session in &dir.sessions {
115 match state.cache.get_or_compute(session, &dir.id) {
116 Ok(summary) => pairs.push((summary, dir.id.clone())),
117 Err(err) => {
118 tracing::warn!(session = ?session, error = %err, "summarize failed");
119 }
120 }
121 }
122 }
123
124 // Best-effort title enrichment from ~/.claude-mem/claude-mem.db.
125 // If the plugin isn't installed, the map is empty and this is a
126 // no-op — we fall back to whatever `metadata::summarize` picked
127 // via the local sanitizer.
128 if let Some(db_path) = default_db_path() {
129 let title_map = load_title_map(&db_path);
130 if !title_map.is_empty() {
131 for (summary, _encoded_dir) in pairs.iter_mut() {
132 // Preserve the user's explicit /title override.
133 if summary.custom_title.is_some() {
134 continue;
135 }
136 if let Some(ai_title) = title_map.get(&summary.id).and_then(|t| t.best())
137 {
138 summary.title = ai_title.to_string();
139 }
140 }
141 }
142 }
143
144 let disk_projects = build_projects(pairs);
145
146 // Archive discovery from ~/.claude/history.jsonl. Any project
147 // path with prompt history whose transcripts are no longer on
148 // disk becomes a ghost [`ProjectCategory::Archive`] project.
149 let archive_projects = {
150 let log = state
151 .history_log
152 .read()
153 .expect("poisoned history_log lock");
154 build_archive_projects(&log, &disk_projects)
155 };
156
157 let mut all = disk_projects;
158 all.extend(archive_projects);
159
160 // Re-sort so archive sits at the bottom — build_projects sorted
161 // only its own inputs.
162 all.sort_by(
163 |a, b| match rank_category(a.category).cmp(&rank_category(b.category)) {
164 std::cmp::Ordering::Equal => b.last_activity.cmp(&a.last_activity),
165 other => other,
166 },
167 );
168
169 Ok(all)
170 }
171
172 fn rank_category(c: crate::core::schema::ProjectCategory) -> u8 {
173 match c {
174 crate::core::schema::ProjectCategory::Regular => 0,
175 crate::core::schema::ProjectCategory::Observer => 1,
176 crate::core::schema::ProjectCategory::Archive => 2,
177 }
178 }
179
180 // ============================================================================
181 // Chat turn commands
182 // ============================================================================
183
184 /// Wire format for `chat:event` payloads emitted to the webview.
185 /// Mirrored on the frontend as `ChatEvent` in `lib/ipc/types.ts`.
186 #[derive(Debug, Clone, Serialize)]
187 #[serde(tag = "kind", rename_all = "snake_case")]
188 enum ChatEventWire {
189 #[serde(rename_all = "camelCase")]
190 TurnStarted {
191 turn_id: String,
192 resume_session_id: Option<String>,
193 new_session_id: Option<String>,
194 },
195 #[serde(rename_all = "camelCase")]
196 SessionBound { turn_id: String, session_id: String },
197 #[serde(rename_all = "camelCase")]
198 Message { turn_id: String, message: Message },
199 #[serde(rename_all = "camelCase")]
200 Stderr { turn_id: String, line: String },
201 #[serde(rename_all = "camelCase")]
202 TurnCompleted { turn_id: String, exit_code: i32 },
203 #[serde(rename_all = "camelCase")]
204 TurnFailed { turn_id: String, reason: String },
205 #[serde(rename_all = "camelCase")]
206 TurnCancelled { turn_id: String },
207 }
208
209 /// Zero-dep PATH walker: returns the first `claude` executable on
210 /// `$PATH`, or `None`. Called once at [`initialize`].
211 fn resolve_claude_bin() -> Option<PathBuf> {
212 let path_var = std::env::var_os("PATH")?;
213 for dir in std::env::split_paths(&path_var) {
214 let candidate = dir.join("claude");
215 if candidate.is_file() {
216 return Some(candidate);
217 }
218 }
219 None
220 }
221
222 /// Start a new chat turn. The frontend generates `turn_id` and (for
223 /// new sessions) `new_session_id` client-side before invoking. We
224 /// return immediately after spawning — the ongoing turn surfaces
225 /// via `chat:event` payloads.
226 ///
227 /// **This command must be `async`.** `tokio::process::Command::spawn`
228 /// (called inside `core::chat::spawn_turn`) internally wraps the
229 /// child's stdout/stderr pipes in `PollEvented`, which calls
230 /// `Handle::current()` to register them with the tokio I/O driver.
231 /// Tauri routes sync commands to a non-tokio thread pool where
232 /// `Handle::try_current()` returns `None` → immediate panic:
233 /// `"there is no reactor running, must be called from the context
234 /// of a Tokio 1.x runtime"`. Async commands run on
235 /// `tauri::async_runtime` which IS a tokio runtime, so the handle
236 /// is present. Do not regress this to a sync fn.
237 #[tauri::command]
238 pub async fn start_turn(
239 app: AppHandle,
240 state: State<'_, AppState>,
241 turn_id: String,
242 cwd: String,
243 resume_session_id: Option<String>,
244 new_session_id: Option<String>,
245 prompt: String,
246 permission_mode: Option<PermissionMode>,
247 ) -> IpcResult<()> {
248 let claude_bin = state
249 .claude_bin
250 .clone()
251 .ok_or_else(|| "claude CLI not found on PATH".to_string())?;
252
253 let cwd_path = PathBuf::from(&cwd);
254 if !cwd_path.is_dir() {
255 return Err(format!("cwd does not exist: {cwd}"));
256 }
257
258 let req = TurnRequest {
259 turn_id: turn_id.clone(),
260 cwd: cwd_path.clone(),
261 resume_session_id: resume_session_id.clone(),
262 new_session_id: new_session_id.clone(),
263 prompt,
264 permission_mode: permission_mode.unwrap_or_default(),
265 claude_bin,
266 env: Vec::new(),
267 };
268
269 let TurnHandle { receiver, kill_tx } =
270 spawn_turn(req).map_err(|e| format!("spawn failed: {e}"))?;
271
272 state.active_turns.insert(
273 turn_id.clone(),
274 ActiveTurn {
275 kill_tx,
276 cwd: cwd_path,
277 started_at: Utc::now(),
278 },
279 );
280
281 // Fire turn_started synchronously so the frontend has the turn
282 // registered before any other events could race in.
283 let _ = app.emit(
284 "chat:event",
285 ChatEventWire::TurnStarted {
286 turn_id: turn_id.clone(),
287 resume_session_id,
288 new_session_id,
289 },
290 );
291
292 // Forwarder task: pulls TurnEvents off the receiver and emits
293 // them over the event bus until a terminal variant arrives.
294 let app_clone = app.clone();
295 let active_turns = state.active_turns.clone();
296 let tid = turn_id.clone();
297 tauri::async_runtime::spawn(async move {
298 let mut rx = receiver;
299 while let Some(ev) = rx.recv().await {
300 let wire = match ev {
301 TurnEvent::SessionBound { session_id } => {
302 ChatEventWire::SessionBound {
303 turn_id: tid.clone(),
304 session_id,
305 }
306 }
307 TurnEvent::Message(message) => ChatEventWire::Message {
308 turn_id: tid.clone(),
309 message,
310 },
311 TurnEvent::Stderr(line) => ChatEventWire::Stderr {
312 turn_id: tid.clone(),
313 line,
314 },
315 TurnEvent::Completed { exit_code } => {
316 active_turns.remove(&tid);
317 ChatEventWire::TurnCompleted {
318 turn_id: tid.clone(),
319 exit_code,
320 }
321 }
322 TurnEvent::Failed { reason } => {
323 active_turns.remove(&tid);
324 ChatEventWire::TurnFailed {
325 turn_id: tid.clone(),
326 reason,
327 }
328 }
329 TurnEvent::Cancelled => {
330 active_turns.remove(&tid);
331 ChatEventWire::TurnCancelled {
332 turn_id: tid.clone(),
333 }
334 }
335 };
336 if let Err(err) = app_clone.emit("chat:event", &wire) {
337 tracing::warn!(error = %err, "failed to emit chat:event");
338 }
339 }
340 // Receiver closed — make sure we don't leak an entry.
341 active_turns.remove(&tid);
342 });
343
344 Ok(())
345 }
346
347 /// Cancel an in-flight turn. No-op if the turn id is unknown
348 /// (already completed, or never existed).
349 #[tauri::command]
350 pub fn cancel_turn(state: State<'_, AppState>, turn_id: String) -> IpcResult<()> {
351 if let Some((_, active)) = state.active_turns.remove(&turn_id) {
352 // Best-effort — sending can fail only if the receiver was
353 // already dropped, which means the turn is already wrapping
354 // up and will emit its own terminal event.
355 let _ = active.kill_tx.send(());
356 }
357 Ok(())
358 }
359
360 /// Return the turn IDs currently active. Used by the frontend on
361 /// resume / reload to reconcile its in-memory `inFlightTurns` map.
362 #[tauri::command]
363 pub fn list_active_turns(state: State<'_, AppState>) -> IpcResult<Vec<String>> {
364 Ok(state.active_turns.iter().map(|e| e.key().clone()).collect())
365 }
366
367 // Silence unused-warning for `Path` import when chat commands are
368 // compiled without the commented-out legacy paths.
369 #[allow(dead_code)]
370 fn _path_import_anchor(_p: &Path) {}
371
372 // ============================================================================
373 // PTY commands
374 // ============================================================================
375
376 /// Wire format for `pty:data` event payloads. `base64` is the
377 /// standard-alphabet encoding of a single stdout chunk (≤ 4 KB from
378 /// the reader task). The frontend decodes via `atob` →
379 /// `Uint8Array.from` → `xterm.write`.
380 #[derive(Debug, Clone, Serialize)]
381 #[serde(rename_all = "camelCase")]
382 struct PtyDataPayload {
383 pty_id: String,
384 base64: String,
385 }
386
387 /// Wire format for `pty:exit` event payloads. Fires exactly once
388 /// per PTY. `exit_code` is `None` when the subprocess was killed by
389 /// a signal (no clean exit code available).
390 #[derive(Debug, Clone, Serialize)]
391 #[serde(rename_all = "camelCase")]
392 struct PtyExitPayload {
393 pty_id: String,
394 exit_code: Option<i32>,
395 }
396
397 /// Spawn a PTY-backed claude subprocess. Must be `async` for the
398 /// same reason [`start_turn`] must be — the underlying
399 /// [`core::pty::spawn_pty`] calls `tauri::async_runtime::spawn_blocking`
400 /// and hands out tokio channels, both of which require a tokio
401 /// runtime handle in the calling context. See
402 /// `tauri_tokio_runtime_rule` memory.
403 #[tauri::command]
404 pub async fn spawn_pty(
405 app: AppHandle,
406 state: State<'_, AppState>,
407 pty_id: String,
408 session_id: Option<String>,
409 cwd: String,
410 args: Vec<String>,
411 cols: u16,
412 rows: u16,
413 ) -> IpcResult<()> {
414 let claude_bin = state
415 .claude_bin
416 .clone()
417 .ok_or_else(|| "claude CLI not found on PATH".to_string())?;
418
419 let cwd_path = PathBuf::from(&cwd);
420 if !cwd_path.is_dir() {
421 return Err(format!("cwd does not exist: {cwd}"));
422 }
423
424 let req = PtyRequest {
425 pty_id: pty_id.clone(),
426 cwd: cwd_path.clone(),
427 claude_bin,
428 args,
429 cols,
430 rows,
431 env: Vec::new(),
432 };
433
434 let PtyHandle {
435 mut data_rx,
436 exit_rx,
437 writer,
438 master,
439 kill_tx,
440 ring_buffer,
441 } = core_spawn_pty(req).map_err(|e| format!("spawn pty failed: {e}"))?;
442
443 state.active_ptys.insert(
444 pty_id.clone(),
445 PtyEntry {
446 session_id,
447 writer,
448 master,
449 kill_tx,
450 ring_buffer,
451 cwd: cwd_path,
452 started_at: Utc::now(),
453 },
454 );
455
456 // Forwarder task: base64-encode each chunk off `data_rx` and emit
457 // a per-PTY event named `pty:data:<pty_id>`. Per-PTY events
458 // avoid the O(N) fan-out that the previous single `pty:data`
459 // channel had — every TerminalPane subscribes only to events
460 // for its own ptyId, so one PTY's stdout chunk doesn't wake up
461 // every listener in the webview.
462 //
463 // When `data_rx` closes (reader task hit EOF), wait on
464 // `exit_rx` for the final exit code and emit `pty:exit`
465 // (globally, since the store subscribes to it to clean up
466 // `ptyIds` and doesn't want to listen per-pty). Then remove
467 // our entry from active_ptys — if close_pty already removed
468 // it, the remove here is a harmless no-op.
469 let app_clone = app.clone();
470 let active_ptys = state.active_ptys.clone();
471 let pid = pty_id.clone();
472 let data_event = format!("pty:data:{pid}");
473 tauri::async_runtime::spawn(async move {
474 while let Some(chunk) = data_rx.recv().await {
475 let b64 = BASE64_STANDARD.encode(&chunk);
476 let payload = PtyDataPayload {
477 pty_id: pid.clone(),
478 base64: b64,
479 };
480 if let Err(err) = app_clone.emit(&data_event, &payload) {
481 tracing::warn!(error = %err, "failed to emit pty:data");
482 }
483 }
484
485 // data_rx closed — now block on the wait task's exit code.
486 let exit_code = exit_rx.await.ok().flatten();
487 let payload = PtyExitPayload {
488 pty_id: pid.clone(),
489 exit_code,
490 };
491 if let Err(err) = app_clone.emit("pty:exit", &payload) {
492 tracing::warn!(error = %err, "failed to emit pty:exit");
493 }
494 active_ptys.remove(&pid);
495 });
496
497 Ok(())
498 }
499
500 /// Forward a keystroke / paste string from xterm into the PTY
501 /// master. The frontend sends utf-8 text; we pass the bytes through
502 /// verbatim so escape sequences, control codes, and multi-byte
503 /// characters all round-trip correctly.
504 #[tauri::command]
505 pub fn write_pty(
506 state: State<'_, AppState>,
507 pty_id: String,
508 data: String,
509 ) -> IpcResult<()> {
510 let entry = state
511 .active_ptys
512 .get(&pty_id)
513 .ok_or_else(|| format!("no such pty: {pty_id}"))?;
514 core_write_pty(&entry.writer, data.as_bytes()).map_err(|e| e.to_string())
515 }
516
517 /// Propagate a terminal resize (from xterm's `onResize`) down to
518 /// the PTY master. On unix this triggers `SIGWINCH` in the
519 /// subprocess and claude redraws automatically.
520 #[tauri::command]
521 pub fn resize_pty(
522 state: State<'_, AppState>,
523 pty_id: String,
524 cols: u16,
525 rows: u16,
526 ) -> IpcResult<()> {
527 let entry = state
528 .active_ptys
529 .get(&pty_id)
530 .ok_or_else(|| format!("no such pty: {pty_id}"))?;
531 core_resize_pty(&entry.master, cols, rows).map_err(|e| e.to_string())
532 }
533
534 /// Explicit teardown: remove the entry from `active_ptys` and send
535 /// the kill signal. The subprocess dies, the wait task reaps, the
536 /// forwarder emits `pty:exit`, and its own `remove()` becomes a
537 /// no-op because we already removed here.
538 ///
539 /// No-op if the pty id is unknown (already exited or never existed).
540 #[tauri::command]
541 pub fn close_pty(state: State<'_, AppState>, pty_id: String) -> IpcResult<()> {
542 if let Some((_, entry)) = state.active_ptys.remove(&pty_id) {
543 let _ = entry.kill_tx.send(());
544 }
545 Ok(())
546 }
547
548 /// Snapshot the ring buffer and return it as a base64-encoded
549 /// string. Called by the frontend on reattach to replay recent
550 /// stdout into a freshly-mounted xterm instance.
551 #[tauri::command]
552 pub fn get_pty_buffer(state: State<'_, AppState>, pty_id: String) -> IpcResult<String> {
553 let entry = state
554 .active_ptys
555 .get(&pty_id)
556 .ok_or_else(|| format!("no such pty: {pty_id}"))?;
557 let snapshot = entry
558 .ring_buffer
559 .lock()
560 .map_err(|_| "ring buffer poisoned".to_string())?
561 .snapshot();
562 Ok(BASE64_STANDARD.encode(&snapshot))
563 }
564
565 /// Wire format for `list_ptys` responses. Mirrors the shape the
566 /// titlebar popover wants for its "N terminals" list.
567 #[derive(Debug, Clone, Serialize)]
568 #[serde(rename_all = "camelCase")]
569 pub struct PtyInfo {
570 pub pty_id: String,
571 pub session_id: Option<String>,
572 pub cwd: String,
573 pub started_at: DateTime<Utc>,
574 }
575
576 /// List every live PTY with enough metadata for the titlebar
577 /// popover ("3 terminals") and the sidebar indicators. Called on
578 /// mount and after `pty:data` / `pty:exit` events to reconcile.
579 #[tauri::command]
580 pub fn list_ptys(state: State<'_, AppState>) -> IpcResult<Vec<PtyInfo>> {
581 Ok(state
582 .active_ptys
583 .iter()
584 .map(|entry| PtyInfo {
585 pty_id: entry.key().clone(),
586 session_id: entry.session_id.clone(),
587 cwd: entry.cwd.to_string_lossy().into_owned(),
588 started_at: entry.started_at,
589 })
590 .collect())
591 }
592
593 /// Drain every live PTY, sending kill to each. Called from the
594 /// window-destroy hook in `lib.rs::run` so we don't leak claude
595 /// subprocesses across an app quit.
596 ///
597 /// We collect keys first and remove by id because `DashMap`'s
598 /// iterator doesn't permit in-place removal, and `clear()` would
599 /// drop the `kill_tx` senders without firing them.
600 pub fn shutdown_active_ptys(state: &AppState) {
601 let ids: Vec<String> = state
602 .active_ptys
603 .iter()
604 .map(|e| e.key().clone())
605 .collect();
606 for id in ids {
607 if let Some((_, entry)) = state.active_ptys.remove(&id) {
608 let _ = entry.kill_tx.send(());
609 }
610 }
611 }
612
613 // ============================================================================
614 // Frontend log bridge
615 // ============================================================================
616
617 /// Write a structured log entry from the webview into Rust tracing.
618 /// Wired up from `src/lib/debug.ts` to capture `window.onerror`,
619 /// `unhandledrejection`, and `console.error` — anything that would
620 /// otherwise silently crash the React tree without leaving a trace
621 /// on disk.
622 ///
623 /// `level` is one of `"error"`, `"warn"`, `"info"`, `"debug"`; any
624 /// other value is treated as `"info"`. `source` is a free-form tag
625 /// (`"window.onerror"`, `"react"`, `"console.error"`, etc.) and
626 /// `message` is the payload. The optional `stack` argument carries
627 /// a JS stack trace when available.
628 #[tauri::command]
629 pub fn log_frontend(
630 level: String,
631 source: String,
632 message: String,
633 stack: Option<String>,
634 ) -> IpcResult<()> {
635 let stack_fmt = stack.as_deref().unwrap_or("");
636 match level.as_str() {
637 "error" => {
638 tracing::error!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}");
639 }
640 "warn" => {
641 tracing::warn!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}");
642 }
643 "debug" => {
644 tracing::debug!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}");
645 }
646 _ => {
647 tracing::info!(target: "claudex::frontend", source = %source, stack = %stack_fmt, "{message}");
648 }
649 }
650 Ok(())
651 }
652
653 /// All session summaries for a single project (by **encoded source
654 /// dir**, not the merged project id), newest first.
655 ///
656 /// The tree-shaped sidebar no longer calls this — projects returned by
657 /// `list_projects` already carry their sessions embedded. Kept as a
658 /// fallback for debugging and for future callers that want to
659 /// re-fetch summaries without re-running the full project sweep.
660 #[tauri::command]
661 pub fn list_sessions(
662 encoded_dir: String,
663 state: State<'_, AppState>,
664 ) -> IpcResult<Vec<SessionSummary>> {
665 let project_dir = state.projects_root.join(&encoded_dir);
666 if !project_dir.exists() {
667 return Err(format!("encoded dir not found: {encoded_dir}"));
668 }
669
670 let mut sessions = crate::core::discovery::list_session_files(&project_dir)
671 .map_err(|e| e.to_string())?
672 .into_iter()
673 .filter_map(|path| {
674 state
675 .cache
676 .get_or_compute(&path, &encoded_dir)
677 .map_err(|e| {
678 tracing::warn!(path = ?path, error = %e, "summarize failed");
679 e
680 })
681 .ok()
682 })
683 .collect::<Vec<_>>();
684
685 sessions.sort_by(|a, b| b.last_activity_at.cmp(&a.last_activity_at));
686 Ok(sessions)
687 }
688
689 /// Default tail cap for disk session reads. Bounded so a 100 MB+
690 /// session file can't hang the main thread during IPC serialize /
691 /// deserialize. The frontend can opt in to the full transcript by
692 /// passing an explicit `messageLimit` (future API) once we have
693 /// lazy-scroll in the viewer.
694 const DEFAULT_SESSION_MESSAGE_LIMIT: usize = 2000;
695
696 /// Full viewer payload for one session. Dispatches on `source`:
697 ///
698 /// - `Disk` (the default): `encoded_dir` is the physical source-dir
699 /// name (the `projectId` field inside a disk SessionSummary), and
700 /// we read the real jsonl.
701 /// - `Archive`: `encoded_dir` is the absolute project path (also the
702 /// `projectId` field on an archive SessionSummary), and we
703 /// synthesize a prompt-only [`SessionDetail`] from the in-memory
704 /// `history.jsonl` log.
705 ///
706 /// **Async** so the file read + JSONL parse + serialize-to-IPC
707 /// pipeline runs on the tokio pool rather than Tauri's sync
708 /// command pool; this prevents a slow 170 MB read from blocking
709 /// other commands (`list_ptys`, `write_pty`, etc.) while it runs.
710 #[tauri::command]
711 pub async fn read_session(
712 encoded_dir: String,
713 session_id: String,
714 #[allow(non_snake_case)] source: Option<SessionSource>,
715 state: State<'_, AppState>,
716 ) -> IpcResult<SessionDetail> {
717 match source.unwrap_or(SessionSource::Disk) {
718 SessionSource::Disk => {
719 let projects_root = state.projects_root.clone();
720 // Move the blocking file read off the async runtime.
721 let encoded_dir_owned = encoded_dir;
722 let session_id_owned = session_id;
723 tauri::async_runtime::spawn_blocking(move || {
724 read_disk_session_blocking(
725 &projects_root,
726 &encoded_dir_owned,
727 &session_id_owned,
728 )
729 })
730 .await
731 .map_err(|e| format!("read_session join error: {e}"))?
732 }
733 SessionSource::Archive => read_archive_session(&encoded_dir, &state),
734 }
735 }
736
737 fn read_disk_session_blocking(
738 projects_root: &Path,
739 encoded_dir: &str,
740 session_id: &str,
741 ) -> IpcResult<SessionDetail> {
742 let path = projects_root
743 .join(encoded_dir)
744 .join(format!("{session_id}.jsonl"));
745 if !path.exists() {
746 return Err(format!("session not found: {encoded_dir}/{session_id}"));
747 }
748 core_read_session_limited(&path, encoded_dir, Some(DEFAULT_SESSION_MESSAGE_LIMIT))
749 .map_err(|e| e.to_string())
750 }
751
752 fn read_archive_session(project_path: &str, state: &AppState) -> IpcResult<SessionDetail> {
753 let log = state
754 .history_log
755 .read()
756 .expect("poisoned history_log lock");
757 let entries = log.entries_for(project_path);
758 if entries.is_empty() {
759 return Err(format!("no archive history for: {project_path}"));
760 }
761
762 let earliest = entries.first().map(|e| e.timestamp_ms).unwrap_or(0);
763 let latest = entries.last().map(|e| e.timestamp_ms).unwrap_or(0);
764 let started_at = DateTime::<Utc>::from_timestamp_millis(earliest);
765 let last_activity = DateTime::<Utc>::from_timestamp_millis(latest);
766
767 // Synthesize the summary to match what `build_archive_projects`
768 // returned for this project. The frontend has already seen this
769 // shape from list_projects so we mirror it.
770 let title_candidate = entries
771 .iter()
772 .map(|e| e.display.as_str())
773 .find(|d| !d.trim().is_empty() && !d.trim().starts_with('/'))
774 .unwrap_or_default();
775 let title = crate::core::title::sanitize_title(title_candidate)
776 .unwrap_or_else(|| "(archived)".to_string());
777
778 let summary = SessionSummary {
779 id: archive_session_id(project_path),
780 project_id: project_path.to_string(),
781 title,
782 started_at,
783 last_activity_at: last_activity,
784 model: None,
785 message_count: entries.len() as u32,
786 git_branch: None,
787 version: None,
788 slug: None,
789 cwd: Some(project_path.to_string()),
790 custom_title: None,
791 entrypoint: Some("archive".to_string()),
792 source: SessionSource::Archive,
793 };
794
795 let messages = entries
796 .iter()
797 .map(|e| {
798 let at = DateTime::<Utc>::from_timestamp_millis(e.timestamp_ms)
799 .unwrap_or_else(Utc::now);
800 Message::User {
801 id: format!("archive-{}", e.timestamp_ms),
802 at,
803 text: e.display.clone(),
804 is_meta: false,
805 }
806 })
807 .collect();
808
809 Ok(SessionDetail { summary, messages })
810 }
811
812 /// Force a full rescan, bypassing the cache for every session. Used
813 /// by the "refresh" button and by tests.
814 #[tauri::command]
815 pub fn rescan(state: State<'_, AppState>) -> IpcResult<Vec<Project>> {
816 // Simplest implementation: clear cache, then call list_projects.
817 // (We can do something smarter later — per-session cache busts.)
818 let dirs = scan_projects(&state.projects_root).map_err(|e| e.to_string())?;
819 for dir in &dirs {
820 for s in &dir.sessions {
821 state.cache.remove(s);
822 }
823 }
824 // Reload the history log in case the user has typed new prompts
825 // since we last loaded.
826 if let Some(path) = default_history_path() {
827 let fresh = HistoryLog::load(&path);
828 *state
829 .history_log
830 .write()
831 .expect("poisoned history_log lock") = fresh;
832 }
833 list_projects(state)
834 }
835
836 /// Initialise shared state and the FS watcher. Called once from
837 /// `setup()` in `lib.rs`.
838 pub fn initialize(app: &AppHandle) -> Result<AppState, Box<dyn std::error::Error>> {
839 let projects_root = crate::core::paths::projects_dir()?;
840
841 // Cache lives in the Tauri app data dir.
842 let app_data = app
843 .path()
844 .app_data_dir()
845 .map_err(|e| format!("failed to resolve app data dir: {e}"))?;
846 std::fs::create_dir_all(&app_data)?;
847 let cache_path = app_data.join("summaries.bin");
848 let cache = Arc::new(SummaryCache::load(&cache_path));
849
850 // Load the prompt-input history log once at startup so archive
851 // discovery and reads are zero-latency. This is rebuilt on
852 // `rescan` too.
853 let history_log = Arc::new(RwLock::new(
854 default_history_path()
855 .map(|p| HistoryLog::load(&p))
856 .unwrap_or_else(HistoryLog::empty),
857 ));
858
859 // Resolve the claude CLI once. `None` means chat is disabled;
860 // start_turn will return an error and the frontend shows a toast.
861 let claude_bin = resolve_claude_bin();
862 if claude_bin.is_none() {
863 tracing::warn!("claude CLI not found on PATH; chat features disabled");
864 }
865
866 let active_turns = Arc::new(DashMap::new());
867 let active_ptys: Arc<DashMap<String, PtyEntry>> = Arc::new(DashMap::new());
868
869 // Start the watcher, even if projects_root doesn't exist yet —
870 // `spawn_watcher` errors in that case so we handle it.
871 let watcher = if projects_root.exists() {
872 let (handle, mut rx) = spawn_watcher(&projects_root)?;
873 let app_clone = app.clone();
874 let cache_clone = cache.clone();
875 let ptys_clone = active_ptys.clone();
876 tauri::async_runtime::spawn(async move {
877 while let Some(change) = rx.recv().await {
878 handle_change(&app_clone, &cache_clone, &ptys_clone, change);
879 }
880 });
881 handle
882 } else {
883 // No projects dir yet — spin up a dummy watcher on the parent
884 // of projects_root (usually ~/.claude) so we notice if the
885 // directory appears later.
886 let fallback_root = projects_root
887 .parent()
888 .map(PathBuf::from)
889 .unwrap_or_else(|| projects_root.clone());
890 let (handle, mut rx) = spawn_watcher(&fallback_root)?;
891 let app_clone = app.clone();
892 let cache_clone = cache.clone();
893 let ptys_clone = active_ptys.clone();
894 tauri::async_runtime::spawn(async move {
895 while let Some(change) = rx.recv().await {
896 handle_change(&app_clone, &cache_clone, &ptys_clone, change);
897 }
898 });
899 handle
900 };
901
902 Ok(AppState {
903 projects_root,
904 cache,
905 cache_path,
906 history_log,
907 active_turns,
908 active_ptys,
909 claude_bin,
910 _watcher: watcher,
911 })
912 }
913
914 fn handle_change(
915 app: &AppHandle,
916 cache: &SummaryCache,
917 active_ptys: &Arc<DashMap<String, PtyEntry>>,
918 change: SessionChange,
919 ) {
920 // When a PTY is actively writing to this session's JSONL file,
921 // we'd otherwise re-enter the rescan → list_projects →
922 // summarize → sidebar re-render cycle every few hundred ms for
923 // the duration of the subprocess's run. The PTY is already the
924 // canonical writer and the user sees it via the xterm view, so
925 // there's nothing new for the sidebar to learn. Skip the event
926 // entirely for PTY-owned sessions to cut the write-storm at the
927 // source. Cache invalidation still runs so non-PTY callers
928 // (e.g. a manual rescan) see fresh data.
929 let skip_emit = path_matches_live_pty(&change, active_ptys);
930 match &change {
931 SessionChange::Removed(path) => cache.remove(path),
932 SessionChange::Added(path) | SessionChange::Modified(path) => {
933 // Drop stale entry; next list_* call will repopulate.
934 cache.remove(path);
935 }
936 }
937 if skip_emit {
938 return;
939 }
940 // Tell the webview something changed. The frontend debounces
941 // and responds with a `rescan` invocation.
942 let payload = ChangePayload::from(&change);
943 if let Err(e) = app.emit("sessions:changed", payload) {
944 tracing::warn!(error = %e, "failed to emit sessions:changed");
945 }
946 }
947
948 /// Return true if the file path carrying this change is the JSONL
949 /// transcript of a session currently being driven by a live PTY.
950 /// Used by `handle_change` to suppress file-watcher events for
951 /// actively-written sessions so they don't thrash the sidebar.
952 fn path_matches_live_pty(
953 change: &SessionChange,
954 active_ptys: &Arc<DashMap<String, PtyEntry>>,
955 ) -> bool {
956 let path = match change {
957 SessionChange::Added(p)
958 | SessionChange::Modified(p)
959 | SessionChange::Removed(p) => p,
960 };
961 let Some(stem) = path.file_stem().and_then(|s| s.to_str()) else {
962 return false;
963 };
964 for entry in active_ptys.iter() {
965 if entry.session_id.as_deref() == Some(stem) {
966 return true;
967 }
968 }
969 false
970 }
971
972 #[derive(Debug, Clone, serde::Serialize)]
973 #[serde(tag = "kind", rename_all = "snake_case")]
974 enum ChangePayload {
975 Added { path: String },
976 Modified { path: String },
977 Removed { path: String },
978 }
979
980 impl From<&SessionChange> for ChangePayload {
981 fn from(c: &SessionChange) -> Self {
982 match c {
983 SessionChange::Added(p) => ChangePayload::Added {
984 path: p.to_string_lossy().into_owned(),
985 },
986 SessionChange::Modified(p) => ChangePayload::Modified {
987 path: p.to_string_lossy().into_owned(),
988 },
989 SessionChange::Removed(p) => ChangePayload::Removed {
990 path: p.to_string_lossy().into_owned(),
991 },
992 }
993 }
994 }
995
996 /// Persist the cache to disk — called on shutdown.
997 pub fn persist_cache_on_exit(state: &AppState) {
998 if let Err(e) = state.cache.save(&state.cache_path) {
999 tracing::warn!(error = %e, path = ?state.cache_path, "failed to save cache");
1000 }
1001 }
1002