@@ -44,6 +44,103 @@ const trace = (msg: string, extra?: Record<string, unknown>) => { |
| 44 | 44 | void logFrontend("debug", "TerminalPane", payload); |
| 45 | 45 | }; |
| 46 | 46 | |
| 47 | +/** Module-level dedup lock for concurrent spawn attempts on the |
| 48 | + * same `sessionId`. React 19 StrictMode double-invokes effects |
| 49 | + * (mount → cleanup → mount), and both invocations used to race |
| 50 | + * into `spawn_pty`, producing TWO claude subprocesses per click. |
| 51 | + * The first one was then orphaned — its ptyId never landed in |
| 52 | + * the store so the frontend lost track of it, but the backend |
| 53 | + * kept the subprocess alive until window-destroy. |
| 54 | + * |
| 55 | + * With this map, the second mount awaits the first mount's |
| 56 | + * promise and reuses the same ptyId. Exactly one subprocess per |
| 57 | + * session, StrictMode-safe. */ |
| 58 | +const spawnLocks = new Map<string, Promise<string>>(); |
| 59 | + |
| 60 | +async function getOrSpawnPty( |
| 61 | + sessionId: string, |
| 62 | + cwd: string, |
| 63 | + claudeArgs: string[], |
| 64 | + cols: number, |
| 65 | + rows: number, |
| 66 | +): Promise<string> { |
| 67 | + const existing = useSessionStore.getState().ptyIds.get(sessionId); |
| 68 | + if (existing) return existing; |
| 69 | + |
| 70 | + const inflight = spawnLocks.get(sessionId); |
| 71 | + if (inflight) return inflight; |
| 72 | + |
| 73 | + const promise = (async () => { |
| 74 | + const newId = crypto.randomUUID(); |
| 75 | + trace("spawn path", { ptyId: newId, cols, rows }); |
| 76 | + await spawnPty({ |
| 77 | + ptyId: newId, |
| 78 | + sessionId, |
| 79 | + cwd, |
| 80 | + args: claudeArgs, |
| 81 | + cols, |
| 82 | + rows, |
| 83 | + }); |
| 84 | + // Register in the store before returning so any second mount |
| 85 | + // awaiting the same promise can look up the ptyId via the |
| 86 | + // `existing` check above on its next call. |
| 87 | + useSessionStore.getState().registerPty(sessionId, { |
| 88 | + ptyId: newId, |
| 89 | + sessionId, |
| 90 | + cwd, |
| 91 | + startedAt: new Date().toISOString(), |
| 92 | + }); |
| 93 | + trace("spawn_pty ok", { ptyId: newId }); |
| 94 | + return newId; |
| 95 | + })(); |
| 96 | + |
| 97 | + spawnLocks.set(sessionId, promise); |
| 98 | + try { |
| 99 | + return await promise; |
| 100 | + } finally { |
| 101 | + spawnLocks.delete(sessionId); |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +/** Write a large base64 payload into xterm in paced chunks so we |
| 106 | + * don't block the UI thread. xterm.write accepts a callback that |
| 107 | + * fires once the write has been parsed and rendered, which we use |
| 108 | + * to schedule the next chunk. For a fresh mount with a 200 KB |
| 109 | + * ring buffer replay this keeps the main thread responsive |
| 110 | + * (keystrokes still register) instead of dropping a multi-second |
| 111 | + * stall while xterm parses the whole blob. */ |
| 112 | +const REPLAY_CHUNK_BYTES = 8 * 1024; |
| 113 | + |
| 114 | +function writeBase64Chunked(term: Terminal, b64: string): void { |
| 115 | + const binary = atob(b64); |
| 116 | + const total = binary.length; |
| 117 | + if (total === 0) return; |
| 118 | + if (total <= REPLAY_CHUNK_BYTES) { |
| 119 | + term.write(decodeBinary(binary)); |
| 120 | + return; |
| 121 | + } |
| 122 | + let offset = 0; |
| 123 | + const step = () => { |
| 124 | + const end = Math.min(offset + REPLAY_CHUNK_BYTES, total); |
| 125 | + const slice = decodeBinary(binary.slice(offset, end)); |
| 126 | + offset = end; |
| 127 | + if (offset >= total) { |
| 128 | + term.write(slice); |
| 129 | + } else { |
| 130 | + term.write(slice, step); |
| 131 | + } |
| 132 | + }; |
| 133 | + step(); |
| 134 | +} |
| 135 | + |
| 136 | +function decodeBinary(binary: string): Uint8Array { |
| 137 | + const bytes = new Uint8Array(binary.length); |
| 138 | + for (let i = 0; i < binary.length; i++) { |
| 139 | + bytes[i] = binary.charCodeAt(i); |
| 140 | + } |
| 141 | + return bytes; |
| 142 | +} |
| 143 | + |
| 47 | 144 | /** Theme matching the claudex design tokens in `src/index.css`. */ |
| 48 | 145 | const TERMINAL_THEME = { |
| 49 | 146 | foreground: "#d4d4d8", |
@@ -85,7 +182,6 @@ export interface TerminalPaneProps { |
| 85 | 182 | |
| 86 | 183 | export function TerminalPane({ sessionId, cwd, claudeArgs }: TerminalPaneProps) { |
| 87 | 184 | const containerRef = useRef<HTMLDivElement | null>(null); |
| 88 | | - const registerPty = useSessionStore((s) => s.registerPty); |
| 89 | 185 | |
| 90 | 186 | useEffect(() => { |
| 91 | 187 | const container = containerRef.current; |
@@ -166,72 +262,57 @@ export function TerminalPane({ sessionId, cwd, claudeArgs }: TerminalPaneProps) |
| 166 | 262 | let cancelled = false; |
| 167 | 263 | let disposeListeners: Array<() => void> = []; |
| 168 | 264 | |
| 169 | | - const writeBase64 = (b64: string) => { |
| 170 | | - const binary = atob(b64); |
| 171 | | - const bytes = new Uint8Array(binary.length); |
| 172 | | - for (let i = 0; i < binary.length; i++) { |
| 173 | | - bytes[i] = binary.charCodeAt(i); |
| 174 | | - } |
| 175 | | - term.write(bytes); |
| 176 | | - }; |
| 177 | | - |
| 178 | 265 | const attach = async () => { |
| 266 | + // Resolve a ptyId — either reattach to an already-running |
| 267 | + // subprocess or dedupe-safely spawn a new one. The spawn |
| 268 | + // lock guarantees at most one spawn per sessionId even under |
| 269 | + // React 19 StrictMode's double-mount. |
| 179 | 270 | const existing = useSessionStore.getState().ptyIds.get(sessionId); |
| 180 | | - if (existing) { |
| 181 | | - trace("reattach path", { ptyId: existing }); |
| 182 | | - ptyId = existing; |
| 271 | + const reattach = !!existing; |
| 272 | + let resolved: string; |
| 273 | + try { |
| 274 | + resolved = await getOrSpawnPty( |
| 275 | + sessionId, |
| 276 | + cwd, |
| 277 | + claudeArgs, |
| 278 | + term.cols, |
| 279 | + term.rows, |
| 280 | + ); |
| 281 | + } catch (err) { |
| 282 | + trace("spawn_pty failed", { error: String(err) }); |
| 283 | + if (!cancelled) { |
| 284 | + term.write( |
| 285 | + `\r\n\x1b[31m[claudex] spawn_pty failed: ${formatErr(err)}\x1b[0m\r\n`, |
| 286 | + ); |
| 287 | + } |
| 288 | + return; |
| 289 | + } |
| 290 | + if (cancelled) return; |
| 291 | + ptyId = resolved; |
| 292 | + |
| 293 | + // On reattach, replay the ring buffer into xterm in paced |
| 294 | + // chunks so a 200 KB scrollback doesn't stall the main |
| 295 | + // thread for multiple seconds. |
| 296 | + if (reattach) { |
| 297 | + trace("reattach path", { ptyId: resolved }); |
| 183 | 298 | try { |
| 184 | | - const snapshot = await getPtyBuffer(existing); |
| 299 | + const snapshot = await getPtyBuffer(resolved); |
| 185 | 300 | if (cancelled) return; |
| 186 | | - if (snapshot.length > 0) writeBase64(snapshot); |
| 301 | + if (snapshot.length > 0) writeBase64Chunked(term, snapshot); |
| 187 | 302 | trace("ring buffer replayed", { bytes: snapshot.length }); |
| 188 | 303 | } catch (err) { |
| 189 | 304 | trace("get_pty_buffer failed during reattach", { |
| 190 | 305 | error: String(err), |
| 191 | 306 | }); |
| 192 | 307 | } |
| 193 | | - } else { |
| 194 | | - const newId = crypto.randomUUID(); |
| 195 | | - trace("spawn path", { |
| 196 | | - ptyId: newId, |
| 197 | | - cols: term.cols, |
| 198 | | - rows: term.rows, |
| 199 | | - }); |
| 200 | | - try { |
| 201 | | - await spawnPty({ |
| 202 | | - ptyId: newId, |
| 203 | | - sessionId, |
| 204 | | - cwd, |
| 205 | | - args: claudeArgs, |
| 206 | | - cols: term.cols, |
| 207 | | - rows: term.rows, |
| 208 | | - }); |
| 209 | | - trace("spawn_pty ok", { ptyId: newId }); |
| 210 | | - } catch (err) { |
| 211 | | - trace("spawn_pty failed", { error: String(err) }); |
| 212 | | - if (!cancelled) { |
| 213 | | - term.write( |
| 214 | | - `\r\n\x1b[31m[claudex] spawn_pty failed: ${formatErr(err)}\x1b[0m\r\n`, |
| 215 | | - ); |
| 216 | | - } |
| 217 | | - return; |
| 218 | | - } |
| 219 | | - if (cancelled) return; |
| 220 | | - ptyId = newId; |
| 221 | | - registerPty(sessionId, { |
| 222 | | - ptyId: newId, |
| 223 | | - sessionId, |
| 224 | | - cwd, |
| 225 | | - startedAt: new Date().toISOString(), |
| 226 | | - }); |
| 227 | 308 | } |
| 228 | 309 | |
| 229 | | - // Listen for stdout. `pty:data` is a single shared channel, so |
| 230 | | - // we filter by our own pty id inside the callback. |
| 310 | + // Listen for stdout — per-pty event name, so this listener |
| 311 | + // only wakes up for our own terminal's bytes. |
| 231 | 312 | try { |
| 232 | | - const un = await onPtyData((ev: PtyDataEvent) => { |
| 233 | | - if (ev.ptyId !== ptyId) return; |
| 234 | | - writeBase64(ev.base64); |
| 313 | + const pid = resolved; |
| 314 | + const un = await onPtyData(pid, (ev: PtyDataEvent) => { |
| 315 | + writeBase64Chunked(term, ev.base64); |
| 235 | 316 | }); |
| 236 | 317 | if (cancelled) { |
| 237 | 318 | un(); |
@@ -239,24 +320,25 @@ export function TerminalPane({ sessionId, cwd, claudeArgs }: TerminalPaneProps) |
| 239 | 320 | unlistenData = un; |
| 240 | 321 | } |
| 241 | 322 | } catch (err) { |
| 242 | | - console.warn("[pty] onPtyData listen failed", err); |
| 323 | + trace("onPtyData listen failed", { error: String(err) }); |
| 243 | 324 | } |
| 244 | 325 | |
| 245 | 326 | // Wire keystrokes → PTY writer. |
| 246 | 327 | const dataDispose = term.onData((data) => { |
| 247 | 328 | if (!ptyId) return; |
| 248 | 329 | void writePty(ptyId, data).catch((err) => { |
| 249 | | - console.warn("[pty] write_pty failed", err); |
| 330 | + trace("write_pty failed", { error: String(err) }); |
| 250 | 331 | }); |
| 251 | 332 | }); |
| 252 | 333 | disposeListeners.push(() => dataDispose.dispose()); |
| 253 | 334 | |
| 254 | | - // Propagate xterm resize events (triggered by fit.fit() below) |
| 255 | | - // down to the PTY master so claude's TUI redraws properly. |
| 335 | + // Propagate xterm resize events (triggered by fit.fit() or |
| 336 | + // container ResizeObserver) down to the PTY master so |
| 337 | + // claude's TUI redraws properly. |
| 256 | 338 | const resizeDispose = term.onResize(({ cols, rows }) => { |
| 257 | 339 | if (!ptyId) return; |
| 258 | 340 | void resizePty(ptyId, cols, rows).catch((err) => { |
| 259 | | - console.warn("[pty] resize_pty failed", err); |
| 341 | + trace("resize_pty failed", { error: String(err) }); |
| 260 | 342 | }); |
| 261 | 343 | }); |
| 262 | 344 | disposeListeners.push(() => resizeDispose.dispose()); |