bridge-agent 0.2.11 → 0.2.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,989 @@
1
+ import WebSocket from 'ws';
2
+ import fs from 'fs';
3
+ import path from 'path';
4
+ import os from 'os';
5
+ import { spawnSync } from 'node:child_process';
6
+ import { createHash } from 'node:crypto';
7
+ import { mkWorkspaceId, mkProjectId, mkAgentId } from '../shared/types.js';
8
+ import { detectAgents, AGENT_SPECS } from '../pty/agents.js';
9
+ import { loadConfig, loadProjectSettings } from '../config.js';
10
+ import { startClaudeUsageWatcher } from '../pty/claude-usage.js';
11
+ import { startClaudeQuotaWatcher } from '../pty/claude-quota.js';
12
+ import { startMetricsRelay } from '../metrics.js';
13
+ import { isSpawnHelperHealthy } from '../pty/spawn-helper-health.js';
14
+ // agentId → cleanup function for Claude usage watchers
15
+ const usageWatchers = new Map();
16
+ // Track `posix_spawnp failed` occurrences across agentKeys for SPAWN_HELPER_BROKEN detection
17
+ const spawnFailureWindow = [];
18
+ function recordPosixSpawnpFailure(agentKey) {
19
+ const now = Date.now();
20
+ const cutoff = now - 30_000;
21
+ while (spawnFailureWindow.length && spawnFailureWindow[0].ts < cutoff) {
22
+ spawnFailureWindow.shift();
23
+ }
24
+ spawnFailureWindow.push({ agentKey, ts: now });
25
+ const distinctKeys = new Set(spawnFailureWindow.map(f => f.agentKey));
26
+ return distinctKeys.size >= 2;
27
+ }
28
+ // TUI agents that treat \n as soft newline (multi-line mode) and \r as submit.
29
+ // Strip any trailing \r/\n first to avoid a double-submit on the last line.
30
+ // Applies to: Claude Code, Qwen CLI, Kimi.
31
+ const appendCR = (text) => text.replace(/[\r\n]+$/, '') + '\r';
32
+ // Global quota watcher — runs once, shared across all Claude panels
33
+ let latestQuota = null;
34
+ const stopQuotaWatcher = startClaudeQuotaWatcher(info => { latestQuota = info; });
35
+ const KEEPALIVE_MS = 30_000;
36
+ const EARLY_EXIT_MS = 5000;
37
+ const OUTPUT_SNIPPET_MAX = 400;
38
+ function stripAnsi(text) {
39
+ return text.replace(/\x1b\[[0-9;?]*[A-Za-z]/g, '');
40
+ }
41
+ function clip(text, max = OUTPUT_SNIPPET_MAX) {
42
+ return text.length <= max ? text : `${text.slice(0, max)}...`;
43
+ }
44
+ /** Derive HTTP base URL from the WebSocket server URL stored in config. */
45
+ function deriveServerUrl(wsUrl) {
46
+ return wsUrl
47
+ .replace(/^wss?:/, m => (m === 'wss:' ? 'https:' : 'http:'))
48
+ .replace(/\/ws(\/.*)?$/, '');
49
+ }
50
+ /** Resolve the bridge-mcp binary path from the daemon's real location. */
51
+ function resolveMcpBin() {
52
+ // process.argv[1] gives the daemon script path; realpathSync resolves symlinks.
53
+ const daemonReal = fs.realpathSync(process.argv[1] ?? '');
54
+ const daemonDir = path.dirname(daemonReal);
55
+ const candidates = [
56
+ path.resolve(daemonDir, '../../mcp-server/dist/index.cjs'), // monorepo: packages/mcp-server
57
+ path.resolve(daemonDir, 'bridge-mcp.cjs'), // prod bundle: same dist dir
58
+ path.resolve(process.cwd(), 'node_modules/.bin/bridge-mcp'), // installed in cwd
59
+ ];
60
+ return candidates.find(p => fs.existsSync(p)) ?? 'bridge-mcp';
61
+ }
62
+ /**
63
+ * Write a temp MCP config file and return --mcp-config args for Claude Code.
64
+ * Uses claude's --mcp-config flag which is reliable regardless of project root detection.
65
+ *
66
+ * When BRIDGE_MCP_URL is set (production), writes an HTTP MCP transport config.
67
+ * Otherwise falls back to stdio transport (monorepo dev).
68
+ */
69
+ function buildMcpConfigArgs(ctx) {
70
+ try {
71
+ const bridgeMcpUrl = process.env['BRIDGE_MCP_URL'];
72
+ const config = bridgeMcpUrl
73
+ ? {
74
+ mcpServers: {
75
+ bridge: {
76
+ type: 'http',
77
+ url: `${bridgeMcpUrl}/mcp/${ctx.workspaceId}/${ctx.projectId}`,
78
+ headers: {
79
+ Authorization: `Bearer ${ctx.token}`,
80
+ 'x-panel-id': ctx.agentId ?? '',
81
+ },
82
+ },
83
+ },
84
+ }
85
+ : {
86
+ mcpServers: {
87
+ bridge: {
88
+ command: resolveMcpBin(),
89
+ args: [],
90
+ env: {
91
+ BRIDGE_SERVER_URL: ctx.serverUrl,
92
+ BRIDGE_TOKEN: ctx.token,
93
+ BRIDGE_WORKSPACE_ID: ctx.workspaceId,
94
+ BRIDGE_PROJECT_ID: ctx.projectId,
95
+ BRIDGE_PANEL_ID: ctx.agentId ?? '',
96
+ HTTP_MODE: 'false',
97
+ },
98
+ },
99
+ },
100
+ };
101
+ const tmpPath = path.join(os.tmpdir(), `bridge-mcp-${ctx.agentId ?? ctx.projectId}.json`);
102
+ fs.writeFileSync(tmpPath, JSON.stringify(config, null, 2) + '\n', 'utf-8');
103
+ console.log('[daemon] mcp.config.written', { tmpPath, transport: bridgeMcpUrl ? 'http' : 'stdio' });
104
+ return ['--mcp-config', tmpPath];
105
+ }
106
+ catch (err) {
107
+ console.warn('[daemon] mcp.config.build.failed', { error: String(err) });
108
+ return [];
109
+ }
110
+ }
111
+ // ── Bridge tool registry ─────────────────────────────────────────────────────
112
+ // Single source of truth for all bridge_* MCP tool descriptions.
113
+ // Add new tools here — role prompts compose from this map.
114
+ // `as const` — keyof yields the exact literal union, not `string`.
115
+ // toolRef('bridge_typo') is a compile-time error.
116
+ const BRIDGE_TOOL_DOCS = {
117
+ // Project / plan
118
+ bridge_get_project: 'Project metadata: name, cwd, machineId',
119
+ bridge_get_plan: 'Read project spec/description',
120
+ bridge_update_plan: 'Update project spec/description',
121
+ bridge_get_project_history: 'Past run history and failure patterns',
122
+ bridge_get_execution_status: 'Run history with todo completion counts',
123
+ // Todos
124
+ bridge_get_todos: 'List todos + session state for this project',
125
+ bridge_add_todo: 'Create a new todo (title, todoType, dependsOn)',
126
+ bridge_update_todo: 'Update a todo title or status',
127
+ bridge_cancel_run: 'Cancel active run (use before restarting a stale plan)',
128
+ // Panel management
129
+ bridge_list_agents: 'All agents: role, status, inRun flag',
130
+ bridge_get_agent_status: 'Single agent status check',
131
+ bridge_spawn_worker: 'Spawn a new worker agent (agentKey, role)',
132
+ bridge_kill_agent: 'Terminate a stuck or dead agent',
133
+ bridge_get_agent_output: 'Read terminal output of any agent',
134
+ bridge_send_input: 'Send text input to an agent PTY',
135
+ // Worker task lifecycle
136
+ bridge_get_my_task: 'Get the task assigned to this agent',
137
+ bridge_complete_task: 'Signal task completion',
138
+ bridge_fail_task: 'Signal task failure with a specific reason',
139
+ bridge_get_todo_context: 'Read todo output/error for a specific todo',
140
+ bridge_assign_task: 'Assign a pending todo to a specific agent',
141
+ };
142
+ /** Inline footer: "Available MCP tools: bridge_x, bridge_y, ..." */
143
+ function toolRef(...tools) {
144
+ return `\n\n**Available MCP tools:** ${tools.join(', ')}`;
145
+ }
146
+ /** Markdown table footer for orchestrator-style full reference */
147
+ function toolTable(...tools) {
148
+ const rows = tools.map(t => `| \`${t}\` | ${BRIDGE_TOOL_DOCS[t]} |`).join('\n');
149
+ return `\n\n## Tool reference (only call tools listed here)\n\n| Tool | Purpose |\n|------|---------|
150
+ ${rows}`;
151
+ }
152
+ // ── Role system prompt injection ─────────────────────────────────────────────
153
+ const ROLE_SYSTEM_PROMPTS = {
154
+ developer: `# Bridge Worker — Developer Role
155
+
156
+ You are a **Developer** worker in a multi-agent orchestration system called Bridge.
157
+
158
+ **Your responsibilities:**
159
+ - Implement assigned tasks completely and correctly — no stubs, no TODOs
160
+ - Work inside the project working directory
161
+ - Run existing tests after changes and fix any failures
162
+ - Read dependency outputs with \`bridge_get_todo_context\` before starting a task
163
+ - Signal completion with \`bridge_complete_task\`, failure with \`bridge_fail_task\` + reason
164
+ - After making changes, check the runner agent (role:'runner' in bridge_list_agents) for build errors: bridge_get_agent_output(runnerAgentId)
165
+ - Trigger hot reload after file changes: bridge_send_input(runnerAgentId, "r")
166
+
167
+ ${toolRef('bridge_get_my_task', 'bridge_complete_task', 'bridge_fail_task', 'bridge_get_todo_context', 'bridge_get_todos', 'bridge_list_agents', 'bridge_get_agent_output', 'bridge_send_input')}`,
168
+ reviewer: `# Bridge Worker — Reviewer Role
169
+
170
+ You are a **Quality-Obsessed Tech Lead** reviewing code changes in a multi-agent system called Bridge.
171
+ Your identity: Agile, pragmatic, anti-fragile. You ship with confidence or you send it back.
172
+
173
+ ---
174
+
175
+ ## Workflow
176
+
177
+ ### Step 1 — Load context
178
+ 1. Call \`bridge_get_my_task\` — understand what this review covers
179
+ 2. Call \`bridge_get_todo_context\` on ALL dependency task IDs — read what the developer produced
180
+ 3. Read the actual changed files in the codebase (Glob, Grep, Read)
181
+
182
+ ### Step 2 — Pareto scan (do this first)
183
+ Identify the 20% of changes that carry 80% of the risk:
184
+ - New external interfaces (API endpoints, public functions, exports)
185
+ - State mutations (DB writes, file I/O, global state)
186
+ - Error handling paths and fallbacks
187
+ - Auth, validation, and input boundaries
188
+ Focus your deep review on these. Skim the rest.
189
+
190
+ ### Step 3 — Review lenses (apply all, in order)
191
+
192
+ **Principles (KISS · DRY · SOLID · YAGNI)**
193
+ - Is the solution simpler than it needs to be, or over-engineered?
194
+ - Is logic duplicated that should be shared?
195
+ - Are responsibilities clearly separated (single responsibility)?
196
+ - Is anything implemented "for the future" with no current use?
197
+
198
+ **Chaos Engineering lens**
199
+ - What happens when a dependency (DB, API, file system) is unavailable?
200
+ - What happens under partial failure — does the system leave inconsistent state?
201
+ - Are retries safe? Is idempotency guaranteed for mutations?
202
+ - Are resources (connections, file handles, timers) properly disposed on failure paths?
203
+
204
+ **Safety & correctness**
205
+ - Fail fast: are invalid states caught at entry points, not deep in logic?
206
+ - Strict types: no implicit any, no unchecked casts, no dynamic keys without guards
207
+ - Are all async paths awaited? Are race conditions possible?
208
+ - Edge cases: empty input, null/undefined, zero, max values, concurrent calls
209
+
210
+ **Security**
211
+ - Assume all external input is malicious — is it sanitized before use?
212
+ - SQL/command/template injection vectors?
213
+ - Are secrets never logged or exposed in error messages?
214
+ - Auth checks before data access, not after?
215
+
216
+ **Observability**
217
+ - Does every failure path emit a structured log with enough context to debug?
218
+ - Are errors surfaced to the caller or silently swallowed?
219
+ - Is there a way to trace what happened without a debugger?
220
+
221
+ ### Step 4 — Follow the dependency chain
222
+ - Pull the output of each dependency todo via \`bridge_get_todo_context\`
223
+ - Verify the developer actually used the context from prior tasks correctly
224
+ - Check that interfaces between tasks are consistent (types match, contracts hold)
225
+
226
+ ### Step 5 — Compile & runtime check
227
+ Detect the stack and run the appropriate compile/typecheck/lint/test commands.
228
+ Do NOT approve if any check fails. Do NOT skip this step.
229
+ If a runner agent exists (bridge_list_agents → role:'runner'), call bridge_get_agent_output to verify the app still builds and runs after changes.
230
+
231
+ ### Step 6 — Verdict
232
+
233
+ **Approve** (\`bridge_complete_task\`) only when:
234
+ - All lenses pass or issues are trivial cosmetic nits
235
+ - Compile check is clean
236
+
237
+ **Reject** (\`bridge_fail_task\`) with a specific, actionable message:
238
+ - Quote the file + line number
239
+ - State what is wrong and why
240
+ - State the approach to fix it — not the exact code, but the direction (e.g. "validate before accessing, not after" not "write this exact line")
241
+ - Do NOT reject for style preferences — only for correctness, safety, resilience, or security issues
242
+
243
+ **Retry limit:** If the same issue persists after 2 retries, approve with a documented caveat in your completion message rather than blocking indefinitely.
244
+
245
+ ---
246
+
247
+ ## Rules
248
+ - Never fix the code yourself — only review and report
249
+ - One \`bridge_fail_task\` per review cycle — consolidate all issues into a single message
250
+ - If unsure whether something is a bug or intentional design: flag it as a question, don't reject
251
+
252
+ ${toolRef('bridge_get_my_task', 'bridge_complete_task', 'bridge_fail_task', 'bridge_get_todo_context', 'bridge_get_todos', 'bridge_list_agents', 'bridge_get_agent_output')}`,
253
+ planner: `# Bridge Worker — Planner Role
254
+
255
+ You are a **Planner** in Bridge. Your job: understand the project, listen to the user, then create a well-structured and verified execution plan.
256
+
257
+ ---
258
+
259
+ ## Workflow
260
+
261
+ ### Phase 1 — Load context
262
+ Call all three tools (can be parallel):
263
+ 1. \`bridge_get_plan\` — project spec and goals
264
+ 2. \`bridge_get_project_history\` — past runs, successes, failures
265
+ 3. \`bridge_get_todos\` — currently open todos
266
+
267
+ Then ask the user what they want to work on. Wait for their answer.
268
+
269
+ ### Phase 1.5 — Ambiguity check (after user responds, before planning)
270
+
271
+ Evaluate the user's task against these criteria:
272
+
273
+ **CLEAR — skip to Phase 2 immediately if ALL of these hold:**
274
+ - A specific component, file, endpoint, or UI element is named
275
+ - The outcome is observable (passes tests, renders on page, endpoint returns X)
276
+ - No vague scope verbs without a target: "improve", "refactor", "optimize", "clean up"
277
+
278
+ **AMBIGUOUS — ask ONE targeted question if any of these apply:**
279
+ - Multiple layers could be the target (server vs daemon vs web UI)
280
+ - Success criteria are unclear (what does "faster" or "better" mean here?)
281
+ - A named tool/library is requested but its scope is open (e.g. "add Sentry" — errors only? performance? which layer?)
282
+
283
+ **How to ask (if needed):**
284
+ - Forced-choice format: "Are we targeting (a) [X] or (b) [Y]?"
285
+ - Include concrete options drawn from the project context you just loaded
286
+ - Do NOT ask: "What exactly do you mean?" — too open, wastes a turn
287
+ - Do NOT ask multiple questions at once
288
+
289
+ **After the user's ONE clarifying response:**
290
+ - Proceed to Phase 2 immediately — no more questions
291
+ - If still unclear, state your assumption explicitly: "I'll proceed assuming [X]. Let me know if that's wrong."
292
+
293
+ ### Phase 2 — Plan & create todos (triggered after user specifies the task)
294
+
295
+ **Step A — False positive check (MANDATORY)**
296
+ For any feature or area the user mentions that appears "completed" in history:
297
+ - Search the codebase (Glob, Grep, Read) to confirm it actually exists in code
298
+ - If "completed" but missing from code → it needs a new todo, note the discrepancy
299
+ - If a pending todo is already fully implemented → close it: \`bridge_complete_task\` with that todo's ID
300
+ Past runs can lie. Always verify before trusting history.
301
+
302
+ **Step B — Gap analysis (MANDATORY)**
303
+ For the scope the user requested, compare plan goals vs verified-done vs open todos:
304
+ - ✅ Done (verified in Step A)
305
+ - 🔄 In progress (open todos)
306
+ - ❌ Missing (in plan, no todo, not implemented)
307
+ Show this to the user before creating anything.
308
+
309
+ **Step C — Confirm scope**
310
+ Based on the gap analysis, confirm with the user exactly what to create todos for.
311
+ Do NOT create todos before this confirmation.
312
+
313
+ **Step D — Create todos**
314
+ For each subtask (3–10 todos):
315
+ - Call \`bridge_add_todo\` with: title, description, todoType, dependsOn
316
+ - **Do not set estimatedAgent** — it is set automatically from todoType (\`infra\` → \`sh\`, others → \`claude\`)
317
+ - **todoType determines who does the work:**
318
+ - \`implementation\` → developer worker
319
+ - \`review\` → reviewer worker (validation, QA, sign-off)
320
+ - \`infra\` → infra/shell worker (migrations, scripts, CI)
321
+ - \`planning\` → meta tasks (specs, design decisions)
322
+ - **description**: include relevant file paths, expected inputs/outputs, what the worker needs from prior todos — workers only see title + description
323
+ - **dependsOn**: set when a task needs a prior task's output; omit for parallel tasks
324
+
325
+ **Step E — Dependency chain validation (MANDATORY)**
326
+ After all todos are created:
327
+ - Identify all leaf todos (nothing else depends on them)
328
+ - Every leaf must be covered by a \`review\` todo that depends on it
329
+ - If any leaf is uncovered → add a review todo now
330
+ - Verify: no circular dependencies, no orphaned chains
331
+
332
+ **Step F — Final summary**
333
+ Show the complete todo list with types and dependency chain. Then stop.
334
+
335
+ ---
336
+
337
+ ## Rules
338
+ - Never implement anything yourself
339
+ - You MAY read the codebase during Steps A–B — this is required, not optional
340
+ - To close a stale open todo that's already done: \`bridge_complete_task\` with its ID
341
+ - Todo titles must be specific: name the files, endpoints, components — no vague verbs
342
+
343
+ ${toolRef('bridge_get_plan', 'bridge_get_project_history', 'bridge_get_todos', 'bridge_add_todo', 'bridge_complete_task', 'bridge_fail_task')}`,
344
+ executor: `# Bridge Worker — Executor Role
345
+
346
+ You are an **Executor** worker in a multi-agent orchestration system called Bridge.
347
+
348
+ **Your responsibilities:**
349
+ - Run the specified commands, scripts, or CLI tools exactly as described in the task
350
+ - Use \`bridge_get_todo_context\` to fetch artefacts from dependencies (file paths, config, etc.)
351
+ - Capture and report all relevant output
352
+ - Call \`bridge_complete_task\` on success, \`bridge_fail_task\` with error details on failure
353
+
354
+ ${toolRef('bridge_get_my_task', 'bridge_complete_task', 'bridge_fail_task', 'bridge_get_todo_context', 'bridge_list_agents', 'bridge_get_agent_output', 'bridge_send_input')}`,
355
+ shell: `# Bridge Worker — Shell Role
356
+
357
+ You are a **Shell** worker in a multi-agent orchestration system called Bridge.
358
+
359
+ **Your responsibilities:**
360
+ - Execute shell commands given in each task title directly and faithfully
361
+ - Do not modify, interpret, or add to the command unless it clearly contains a typo
362
+ - Call \`bridge_complete_task\` when the command exits cleanly
363
+ - Call \`bridge_fail_task\` with the error output if the command fails
364
+
365
+ ${toolRef('bridge_get_my_task', 'bridge_complete_task', 'bridge_fail_task')}`,
366
+ orchestrator: `# Bridge Orchestrator
367
+
368
+ You are a **Bridge Orchestrator**. Your sole purpose is to decompose specs into todos, delegate them to worker agents, and report results. Nothing else.
369
+
370
+ ---
371
+
372
+ ## Identity constraints (absolute)
373
+
374
+ - Do NOT answer questions, browse files, run bash, or use mem0
375
+ - Do NOT call any tool not listed in the tool reference below — if a tool isn't listed, it does not exist
376
+ - Do NOT ask "should I start?" / "shall I proceed?" — if you have work and workers, act
377
+ - If a user asks you to do something outside orchestration, respond: "I'm an orchestrator. Give me a task spec and I'll delegate it to workers."
378
+
379
+ **Your own agent ID: \`{{PANEL_ID}}\`**
380
+ Never call \`bridge_kill_agent\` with this ID — killing yourself terminates the orchestration session.
381
+
382
+ ---
383
+
384
+ ## Workflow
385
+
386
+ ### Step 1 — Orient
387
+ Call these in parallel:
388
+ - \`bridge_get_project\` — project name, cwd, machineId
389
+ - \`bridge_get_todos\` — open todos and current session state
390
+ - \`bridge_list_agents\` — worker availability (role, status, inRun)
391
+
392
+ **Decision after Step 1:**
393
+ - If todos exist AND idle workers available → go directly to Step 4 (assign now, no re-planning)
394
+ - If todos exist but no workers → go to Step 4 (spawn first)
395
+ - If no todos exist → go to Step 2
396
+ - If session is stale or wrong → call \`bridge_cancel_run\`, then go to Step 2
397
+
398
+ ### Step 2 — Clarify (if needed)
399
+ If the spec is genuinely ambiguous, ask ONE question. Otherwise skip.
400
+
401
+ ### Step 3 — Plan
402
+ Create 3–10 todos with \`bridge_add_todo\`:
403
+ - \`todoType\`: \`implementation\` → developer, \`review\` → reviewer, \`infra\` → shell/executor
404
+ - \`dependsOn\`: list todo IDs that must complete first
405
+ - Every implementation block must have a \`review\` todo depending on it
406
+ - Descriptions: include file paths, expected inputs/outputs, relevant context
407
+
408
+ Verify plan with \`bridge_get_todos\` before proceeding.
409
+
410
+ ### Step 4 — Spawn workers if needed
411
+ \`bridge_list_agents\` — check for idle agents first (previous sessions may have them).
412
+ Missing a required role → \`bridge_spawn_worker\` (role: developer, reviewer, shell, executor).
413
+
414
+ ### Step 5 — Assign initial work
415
+ For each idle agent matching a needed role:
416
+ - Get the todo ID from \`bridge_list_agents\` (assignedTodo field shows what's already running)
417
+ - \`bridge_assign_task\` — pin the todo to the agent (\`inRun:false\` agents are valid targets)
418
+ - Server dispatches remaining todos automatically as workers complete — you do NOT need to manually assign every todo
419
+
420
+ After assigning, call \`bridge_get_execution_status\` and report current progress to the user:
421
+ - How many todos are running / done / total
422
+ - Which workers are active
423
+
424
+ Then go idle. The server handles dispatch automatically.
425
+
426
+ ### Step 6 — Monitor (only when user explicitly requests it)
427
+
428
+ **Default behavior: stop after Step 5 and go idle.**
429
+
430
+ Only enter this step if the user explicitly asks to "monitor", "watch this", "drive to completion", or similar.
431
+
432
+ **Bounded monitoring loop — maximum 5 poll iterations:**
433
+
434
+ Each iteration:
435
+ 1. \`bridge_get_execution_status\`
436
+ - run status = \`completed\` → DONE: go to Step 7
437
+ - run status = \`circuit_broken\` → all retries exhausted: go to Step 7 with failure summary
438
+ - done = total → DONE: go to Step 7
439
+ 2. \`bridge_list_agents\`
440
+ - Any agent \`status: busy\` → work in progress, continue to next iteration
441
+ - **ALL agents idle + done < total = DEADLOCK:**
442
+ - \`bridge_get_agent_output\` on recently-idle agents to diagnose
443
+ - Missing a required role? → \`bridge_spawn_worker\` **once** (do NOT spawn repeatedly)
444
+ - Report to user: what is stuck and why. Exit loop.
445
+ 3. Continue to next iteration (max 5 total)
446
+
447
+ After 5 iterations: report current \`bridge_get_execution_status\` snapshot and go idle. Tell the user to re-engage you for another status check.
448
+
449
+ **Hard limits in monitoring mode:**
450
+ - Max 5 poll iterations — never exceed this
451
+ - Max 1 \`bridge_spawn_worker\` call per deadlock — never spawn repeatedly
452
+ - If \`bridge_get_execution_status\` returns an error → exit loop, report to user
453
+
454
+ ### Step 7 — Wrap up
455
+ Summarize: todos completed, failed, any blockers. List what workers did.
456
+
457
+ ---
458
+
459
+ ${toolTable('bridge_get_project', 'bridge_get_plan', 'bridge_get_todos', 'bridge_add_todo', 'bridge_cancel_run', 'bridge_get_project_history', 'bridge_get_execution_status', 'bridge_list_agents', 'bridge_get_agent_status', 'bridge_spawn_worker', 'bridge_assign_task', 'bridge_get_todo_context', 'bridge_get_agent_output', 'bridge_kill_agent')}`,
460
+ };
461
+ /**
462
+ * Write a per-agent role system prompt file to tmp and return
463
+ * --append-system-prompt-file args for Claude, or
464
+ * --append-system-prompt args for Qwen.
465
+ * Returns [] if no role or agent doesn't support it.
466
+ */
467
+ function buildRolePromptArgs(agentKey, role, agentId) {
468
+ if (!role)
469
+ return [];
470
+ const content = ROLE_SYSTEM_PROMPTS[role];
471
+ if (!content)
472
+ return [];
473
+ if (agentKey === 'claude') {
474
+ try {
475
+ const tmpPath = path.join(os.tmpdir(), `bridge-role-${agentId}.md`);
476
+ const resolved = content.replaceAll('{{PANEL_ID}}', agentId);
477
+ fs.writeFileSync(tmpPath, resolved + '\n', 'utf-8');
478
+ console.log('[daemon] role.prompt.written', { agentId, role, tmpPath });
479
+ return ['--append-system-prompt-file', tmpPath];
480
+ }
481
+ catch (err) {
482
+ console.warn('[daemon] role.prompt.write.failed', { agentId, role, error: String(err) });
483
+ return [];
484
+ }
485
+ }
486
+ if (agentKey === 'qwen') {
487
+ // Qwen supports --append-system-prompt as a CLI flag
488
+ return ['--append-system-prompt', content];
489
+ }
490
+ // Kimi does not support --system-prompt-file flag
491
+ // Role prompts are not supported for Kimi at this time
492
+ return [];
493
+ }
494
+ function toTomlString(v) {
495
+ return `"${v.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}"`;
496
+ }
497
+ function buildKimiMcpConfigArgs(ctx) {
498
+ try {
499
+ const bridgeMcpUrl = process.env['BRIDGE_MCP_URL'];
500
+ const config = bridgeMcpUrl
501
+ ? {
502
+ mcpServers: {
503
+ bridge: {
504
+ type: 'http',
505
+ url: `${bridgeMcpUrl}/mcp/${ctx.workspaceId}/${ctx.projectId}`,
506
+ headers: {
507
+ Authorization: `Bearer ${ctx.token}`,
508
+ 'x-panel-id': ctx.agentId ?? '',
509
+ },
510
+ },
511
+ },
512
+ }
513
+ : {
514
+ mcpServers: {
515
+ bridge: {
516
+ command: resolveMcpBin(),
517
+ args: [],
518
+ env: {
519
+ BRIDGE_SERVER_URL: ctx.serverUrl,
520
+ BRIDGE_TOKEN: ctx.token,
521
+ BRIDGE_WORKSPACE_ID: ctx.workspaceId,
522
+ BRIDGE_PROJECT_ID: ctx.projectId,
523
+ BRIDGE_PANEL_ID: ctx.agentId ?? '',
524
+ HTTP_MODE: 'false',
525
+ },
526
+ },
527
+ },
528
+ };
529
+ const tmpPath = path.join(os.tmpdir(), `bridge-mcp-kimi-${ctx.agentId ?? ctx.projectId}.json`);
530
+ fs.writeFileSync(tmpPath, JSON.stringify(config, null, 2) + '\n', 'utf-8');
531
+ console.log('[daemon] kimi.mcp.config.written', { tmpPath, transport: bridgeMcpUrl ? 'http' : 'stdio' });
532
+ return ['--mcp-config-file', tmpPath];
533
+ }
534
+ catch (err) {
535
+ console.warn('[daemon] kimi.mcp.config.build.failed', { error: String(err) });
536
+ return [];
537
+ }
538
+ }
539
+ function buildCodexMcpConfigArgs(ctx) {
540
+ try {
541
+ const mcpBin = resolveMcpBin();
542
+ const envInline = `{BRIDGE_SERVER_URL=${toTomlString(ctx.serverUrl)},BRIDGE_TOKEN=${toTomlString(ctx.token)},BRIDGE_WORKSPACE_ID=${toTomlString(ctx.workspaceId)},BRIDGE_PROJECT_ID=${toTomlString(ctx.projectId)},HTTP_MODE="false"}`;
543
+ return [
544
+ '-c', 'mcp_servers.bridge.transport="stdio"',
545
+ '-c', `mcp_servers.bridge.command=${toTomlString(mcpBin)}`,
546
+ '-c', 'mcp_servers.bridge.args=[]',
547
+ '-c', `mcp_servers.bridge.env=${envInline}`,
548
+ ];
549
+ }
550
+ catch (err) {
551
+ console.warn('[daemon] codex.mcp.config.build.failed', { error: String(err) });
552
+ return [];
553
+ }
554
+ }
555
+ function ensureQwenProjectMcp(ctx) {
556
+ try {
557
+ if (!ctx.cwd) {
558
+ console.warn('[daemon] qwen.mcp.setup.skipped', { reason: 'missing_cwd', projectId: ctx.projectId });
559
+ return false;
560
+ }
561
+ const mcpBin = resolveMcpBin();
562
+ const common = {
563
+ cwd: ctx.cwd,
564
+ encoding: 'utf-8',
565
+ timeout: 5000,
566
+ stdio: 'pipe',
567
+ };
568
+ // Idempotent reset to avoid stale env from prior projects.
569
+ spawnSync('qwen', ['mcp', 'remove', '--scope', 'project', 'bridge'], common);
570
+ const add = spawnSync('qwen', [
571
+ 'mcp', 'add', '--scope', 'project',
572
+ '-t', 'stdio',
573
+ '-e', `BRIDGE_SERVER_URL=${ctx.serverUrl}`,
574
+ '-e', `BRIDGE_TOKEN=${ctx.token}`,
575
+ '-e', `BRIDGE_WORKSPACE_ID=${ctx.workspaceId}`,
576
+ '-e', `BRIDGE_PROJECT_ID=${ctx.projectId}`,
577
+ '-e', `BRIDGE_PANEL_ID=${ctx.agentId ?? ''}`,
578
+ '-e', 'HTTP_MODE=false',
579
+ 'bridge', mcpBin,
580
+ ], common);
581
+ if (add.status === 0) {
582
+ console.log('[daemon] qwen.mcp.setup.ok', { cwd: ctx.cwd, projectId: ctx.projectId });
583
+ return true;
584
+ }
585
+ console.warn('[daemon] qwen.mcp.setup.failed', {
586
+ cwd: ctx.cwd,
587
+ projectId: ctx.projectId,
588
+ status: add.status,
589
+ stderr: (add.stderr ?? '').toString().slice(0, 300),
590
+ });
591
+ return false;
592
+ }
593
+ catch (err) {
594
+ console.warn('[daemon] qwen.mcp.setup.error', { error: String(err), projectId: ctx.projectId });
595
+ return false;
596
+ }
597
+ }
598
+ let cachedAgents = [];
599
+ let _isConnected = false;
600
+ let _started = false;
601
+ export function isDaemonWsConnected() { return _isConnected; }
602
+ export function startDaemonConnection(manager) {
603
+ if (_started)
604
+ throw new Error('[daemon] startDaemonConnection called twice — only one connection manager allowed');
605
+ _started = true;
606
+ const config = loadConfig();
607
+ const daemonId = createHash('sha256').update(config.token).digest('hex');
608
+ let ws = null;
609
+ let reconnectTimer = null;
610
+ let heartbeatTimer = null;
611
+ let consecutive1008 = 0; // exit only after 2 consecutive auth failures — avoids transient proxy 1008s
612
+ function connect() {
613
+ if (reconnectTimer) {
614
+ clearTimeout(reconnectTimer);
615
+ reconnectTimer = null;
616
+ }
617
+ ws = new WebSocket(config.server, {
618
+ headers: { Authorization: `Bearer ${config.token}` },
619
+ });
620
+ const currentWs = ws;
621
+ let stopMetrics = null;
622
+ currentWs.on('open', () => {
623
+ _isConnected = true;
624
+ console.log('[daemon] ws.connected', { server: config.server });
625
+ heartbeatTimer = setInterval(() => {
626
+ if (currentWs.readyState === WebSocket.OPEN)
627
+ currentWs.ping();
628
+ }, KEEPALIVE_MS);
629
+ const spawnHelperBroken = !isSpawnHelperHealthy();
630
+ currentWs.send(JSON.stringify({ type: 'ready', version: '1.1', name: config.name, spawnHelperBroken }));
631
+ void detectAgents(config.agentPaths).then(list => {
632
+ cachedAgents = list;
633
+ if (currentWs.readyState === WebSocket.OPEN) {
634
+ currentWs.send(JSON.stringify({ type: 'agents', list: cachedAgents }));
635
+ }
636
+ });
637
+ stopMetrics = startMetricsRelay((metrics) => {
638
+ if (currentWs.readyState === WebSocket.OPEN) {
639
+ currentWs.send(JSON.stringify({ type: 'system_metrics', daemonId, ...metrics }));
640
+ }
641
+ });
642
+ });
643
+ currentWs.on('message', (raw) => {
644
+ let msg;
645
+ try {
646
+ msg = JSON.parse(raw.toString());
647
+ }
648
+ catch {
649
+ console.warn('[daemon] Invalid JSON from server, ignoring');
650
+ return;
651
+ }
652
+ handleMessage(msg, currentWs, manager, config);
653
+ });
654
+ currentWs.on('close', (code) => {
655
+ _isConnected = false;
656
+ if (heartbeatTimer) {
657
+ clearInterval(heartbeatTimer);
658
+ heartbeatTimer = null;
659
+ }
660
+ stopMetrics?.();
661
+ stopMetrics = null;
662
+ if (code === 1008) {
663
+ consecutive1008++;
664
+ if (consecutive1008 >= 2) {
665
+ console.error('[daemon] ws.auth_failed — token invalid or expired (2 consecutive rejections), stopping. Re-run: bridge-agent auth');
666
+ process.exit(1);
667
+ }
668
+ console.warn('[daemon] ws.auth_rejected — transient 1008, will retry once', { attempt: consecutive1008 });
669
+ }
670
+ else {
671
+ consecutive1008 = 0;
672
+ }
673
+ if (reconnectTimer)
674
+ return;
675
+ console.log('[daemon] ws.reconnecting', { attempt: 1 });
676
+ reconnectTimer = setTimeout(connect, 3000);
677
+ });
678
+ currentWs.on('error', (err) => {
679
+ console.error('[daemon] ws.error', { message: err.message });
680
+ });
681
+ }
682
+ function cleanShutdown() {
683
+ if (heartbeatTimer) {
684
+ clearInterval(heartbeatTimer);
685
+ heartbeatTimer = null;
686
+ }
687
+ stopQuotaWatcher();
688
+ for (const stop of usageWatchers.values())
689
+ stop();
690
+ manager.killAll();
691
+ ws?.close();
692
+ }
693
+ process.on('SIGINT', () => { cleanShutdown(); process.exit(0); });
694
+ process.on('SIGTERM', () => { cleanShutdown(); process.exit(0); });
695
+ process.on('SIGHUP', () => { cleanShutdown(); process.exit(0); });
696
+ process.on('uncaughtException', (err) => {
697
+ console.error('[daemon] uncaughtException', { error: err.message });
698
+ manager.killAll();
699
+ process.exit(1);
700
+ });
701
+ connect();
702
+ }
703
+ function handleMessage(msg, ws, manager, config) {
704
+ switch (msg.type) {
705
+ case 'spawn': {
706
+ console.log('[daemon] pty.spawn.start', { agentId: msg.agentId, agentKey: msg.agentKey, sessionId: msg.sessionId, projectId: msg.projectId, workspaceId: msg.workspaceId, role: msg.role });
707
+ const agent = cachedAgents.find(a => a.key === msg.agentKey);
708
+ if (!agent) {
709
+ if (ws.readyState === WebSocket.OPEN) {
710
+ ws.send(JSON.stringify({
711
+ type: 'error',
712
+ code: 'AGENT_NOT_FOUND',
713
+ message: `Agent '${msg.agentKey}' is not installed on this machine`,
714
+ }));
715
+ }
716
+ return;
717
+ }
718
+ const spec = AGENT_SPECS.find(s => s.key === msg.agentKey);
719
+ let args = [];
720
+ let shouldResume = !!(msg.sessionId && spec?.resumeArgs);
721
+ if (shouldResume && msg.agentKey === 'claude') {
722
+ const sessionPath = path.join(os.homedir(), '.claude', 'sessions', msg.sessionId);
723
+ if (!fs.existsSync(sessionPath) && !fs.existsSync(sessionPath + '.json')) {
724
+ console.warn('[daemon] claude resume cache missing — fresh spawning', { agentId: msg.agentId, sessionId: msg.sessionId });
725
+ shouldResume = false;
726
+ }
727
+ }
728
+ if (shouldResume) {
729
+ // Resuming a specific session
730
+ args = spec.resumeArgs(msg.sessionId);
731
+ console.log('[daemon] pty.spawn.resume', { agentId: msg.agentId, sessionId: msg.sessionId });
732
+ // (Re)start usage watcher for resumed Claude session
733
+ if (msg.agentKey === 'claude') {
734
+ usageWatchers.get(msg.agentId)?.();
735
+ usageWatchers.set(msg.agentId, startClaudeUsageWatcher(msg.agentId, msg.sessionId, (agentId, usedPct, usedTokens) => {
736
+ if (ws.readyState === WebSocket.OPEN) {
737
+ ws.send(JSON.stringify({ type: 'panel_token_usage', agentId, usedPct, usedTokens, ...latestQuota }));
738
+ }
739
+ }));
740
+ }
741
+ }
742
+ else if (spec?.assignSessionId) {
743
+ // Fresh spawn for session-capable agent — assign stable UUID now
744
+ const newSessionId = crypto.randomUUID();
745
+ // Kimi uses --session (or -r) instead of --session-id
746
+ const sessionArg = msg.agentKey === 'kimi' ? '--session' : '--session-id';
747
+ args = [...(spec.spawnArgs ?? []), sessionArg, newSessionId];
748
+ if (ws.readyState === WebSocket.OPEN) {
749
+ ws.send(JSON.stringify({ type: 'session_started', agentId: msg.agentId, sessionId: newSessionId }));
750
+ console.log('[daemon] session.assigned', { agentId: msg.agentId, sessionId: newSessionId });
751
+ }
752
+ // Start usage watcher for fresh Claude session
753
+ if (msg.agentKey === 'claude') {
754
+ usageWatchers.get(msg.agentId)?.();
755
+ usageWatchers.set(msg.agentId, startClaudeUsageWatcher(msg.agentId, newSessionId, (agentId, usedPct, usedTokens) => {
756
+ if (ws.readyState === WebSocket.OPEN) {
757
+ ws.send(JSON.stringify({ type: 'panel_token_usage', agentId, usedPct, usedTokens, ...latestQuota }));
758
+ }
759
+ }));
760
+ }
761
+ }
762
+ else {
763
+ // Non-session agent (e.g. sh) — still notify browser so panel transitions to 'running'
764
+ if (ws.readyState === WebSocket.OPEN) {
765
+ ws.send(JSON.stringify({ type: 'session_started', agentId: msg.agentId, sessionId: crypto.randomUUID() }));
766
+ }
767
+ }
768
+ // Build MCP spawn context if the spawn message includes project info
769
+ let spawnCtx;
770
+ let mcpConfigured = false;
771
+ let mcpTransport;
772
+ if (msg.projectId && msg.workspaceId) {
773
+ const serverUrl = deriveServerUrl(config.server);
774
+ const projectSettings = loadProjectSettings(msg.cwd);
775
+ spawnCtx = {
776
+ serverUrl,
777
+ token: config.token,
778
+ workspaceId: mkWorkspaceId(msg.workspaceId),
779
+ projectId: mkProjectId(msg.projectId),
780
+ agentId: msg.agentId ? mkAgentId(msg.agentId) : undefined,
781
+ cwd: msg.cwd,
782
+ projectEnv: projectSettings.env,
783
+ };
784
+ // Agent-specific MCP wiring: different CLIs use different config surfaces.
785
+ if (msg.agentKey === 'claude') {
786
+ const mcpArgs = buildMcpConfigArgs(spawnCtx);
787
+ mcpConfigured = mcpArgs.length > 0;
788
+ mcpTransport = process.env['BRIDGE_MCP_URL'] ? 'http' : 'stdio';
789
+ args = [...args, ...mcpArgs];
790
+ }
791
+ else if (msg.agentKey === 'codex') {
792
+ const codexArgs = buildCodexMcpConfigArgs(spawnCtx);
793
+ mcpConfigured = codexArgs.length > 0;
794
+ mcpTransport = 'stdio';
795
+ args = [...args, ...codexArgs];
796
+ }
797
+ else if (msg.agentKey === 'qwen') {
798
+ mcpConfigured = ensureQwenProjectMcp(spawnCtx);
799
+ mcpTransport = mcpConfigured ? 'stdio' : undefined;
800
+ }
801
+ else if (msg.agentKey === 'kimi') {
802
+ const kimiMcpArgs = buildKimiMcpConfigArgs(spawnCtx);
803
+ mcpConfigured = kimiMcpArgs.length > 0;
804
+ mcpTransport = process.env['BRIDGE_MCP_URL'] ? 'http' : 'stdio';
805
+ args = [...args, ...kimiMcpArgs];
806
+ }
807
+ else {
808
+ mcpConfigured = false;
809
+ console.log('[daemon] mcp.config.skipped', { agentId: msg.agentId, agentKey: msg.agentKey, reason: 'unsupported_agent_path' });
810
+ }
811
+ }
812
+ // Inject role-specific system prompt regardless of MCP setup —
813
+ // role identity must work even without project context
814
+ const roleArgs = buildRolePromptArgs(msg.agentKey, msg.role, msg.agentId);
815
+ if (roleArgs.length > 0)
816
+ args = [...args, ...roleArgs];
817
+ const clampedCols = Math.max(1, Math.min(500, msg.cols));
818
+ const clampedRows = Math.max(1, Math.min(500, msg.rows));
819
+ const spawnStartedAt = Date.now();
820
+ let firstOutputSnippet = '';
821
+ let outputBytes = 0;
822
+ let codexOnboardingAcked = false;
823
+ let kimiRoleInjected = false; // Track if we've injected role prompt for Kimi
824
+ const ok = manager.spawn(msg.agentId, msg.agentKey, agent.binaryPath, args, clampedCols, clampedRows, (data) => {
825
+ outputBytes += data.length;
826
+ // Kimi: Check every chunk for TUI readiness (outside firstOutputSnippet block)
827
+ try {
828
+ const decoded = Buffer.from(data, 'base64').toString('utf-8');
829
+ if (msg.agentKey === 'kimi' &&
830
+ msg.role &&
831
+ !kimiRoleInjected &&
832
+ (Date.now() - spawnStartedAt) < 30_000 &&
833
+ (/yolo agent/.test(decoded) || /●/.test(decoded) || /○/.test(decoded))) {
834
+ kimiRoleInjected = true;
835
+ const roleContent = ROLE_SYSTEM_PROMPTS[msg.role];
836
+ if (roleContent) {
837
+ const injectionText = roleContent.replaceAll('{{PANEL_ID}}', msg.agentId);
838
+ const formatted = appendCR(injectionText);
839
+ const base64data = Buffer.from(formatted).toString('base64');
840
+ manager.write(msg.agentId, base64data, 'orchestrator');
841
+ console.log('[daemon] kimi.role.injected', { agentId: msg.agentId, role: msg.role });
842
+ }
843
+ }
844
+ }
845
+ catch {
846
+ // ignore
847
+ }
848
+ if (!firstOutputSnippet) {
849
+ try {
850
+ const decoded = Buffer.from(data, 'base64').toString('utf-8');
851
+ const cleaned = stripAnsi(decoded).replace(/\x00/g, '').trim();
852
+ if (cleaned)
853
+ firstOutputSnippet = clip(cleaned);
854
+ // Codex may block on a one-time onboarding yes/no prompt.
855
+ // Auto-ack only for this specific startup banner pattern.
856
+ if (msg.agentKey === 'codex' &&
857
+ !codexOnboardingAcked &&
858
+ (Date.now() - spawnStartedAt) < 20_000 &&
859
+ /included in your plan for free|let[’']s build together/i.test(cleaned) &&
860
+ /yes|no|\[y\/n\]|\(y\/n\)|y\/n/i.test(cleaned)) {
861
+ codexOnboardingAcked = true;
862
+ const yes = Buffer.from('y').toString('base64');
863
+ manager.write(msg.agentId, yes, 'orchestrator');
864
+ console.log('[daemon] codex.onboarding.auto_ack', { agentId: msg.agentId });
865
+ }
866
+ }
867
+ catch {
868
+ // ignore decode errors for logging
869
+ }
870
+ }
871
+ if (ws.readyState === WebSocket.OPEN) {
872
+ ws.send(JSON.stringify({ type: 'output', agentId: msg.agentId, data }));
873
+ }
874
+ }, (exitCode, signal) => {
875
+ const uptimeMs = Date.now() - spawnStartedAt;
876
+ const earlyExit = uptimeMs <= EARLY_EXIT_MS;
877
+ console.log('[daemon] pty.spawn.result', {
878
+ agentId: msg.agentId,
879
+ agentKey: msg.agentKey,
880
+ daemonId: msg.daemonId,
881
+ exitCode,
882
+ signal,
883
+ uptimeMs,
884
+ earlyExit,
885
+ outputBytes,
886
+ firstOutputSnippet: firstOutputSnippet || undefined,
887
+ });
888
+ if (earlyExit && ws.readyState === WebSocket.OPEN) {
889
+ ws.send(JSON.stringify({
890
+ type: 'error',
891
+ code: 'SPAWN_FAILED',
892
+ message: `Early exit: agent=${msg.agentKey} code=${exitCode ?? 'null'} signal=${signal ?? 'null'} snippet="${firstOutputSnippet || 'no output'}"`,
893
+ }));
894
+ }
895
+ if (ws.readyState === WebSocket.OPEN) {
896
+ ws.send(JSON.stringify({ type: 'exit', agentId: msg.agentId, exitCode, signal }));
897
+ }
898
+ }, spawnCtx);
899
+ if (!ok) {
900
+ const lastError = manager.getLastError(msg.agentId);
901
+ const isSpawnHelperBroken = lastError?.includes('posix_spawnp failed') && recordPosixSpawnpFailure(msg.agentKey);
902
+ if (ws.readyState === WebSocket.OPEN) {
903
+ if (isSpawnHelperBroken) {
904
+ ws.send(JSON.stringify({
905
+ type: 'error',
906
+ code: 'SPAWN_HELPER_BROKEN',
907
+ message: 'node-pty spawn-helper is not executable. Upgrade bridge-agent to v0.2.10+.',
908
+ }));
909
+ }
910
+ else {
911
+ ws.send(JSON.stringify({
912
+ type: 'error',
913
+ code: 'SPAWN_FAILED',
914
+ message: `Failed to spawn panel ${msg.agentId}`,
915
+ }));
916
+ }
917
+ }
918
+ }
919
+ else {
920
+ if (ws.readyState === WebSocket.OPEN) {
921
+ ws.send(JSON.stringify({
922
+ type: 'mcp_status',
923
+ agentId: msg.agentId,
924
+ mcpConfigured,
925
+ transport: mcpConfigured ? mcpTransport : undefined,
926
+ projectId: spawnCtx?.projectId,
927
+ }));
928
+ }
929
+ }
930
+ break;
931
+ }
932
+ case 'input':
933
+ manager.write(msg.agentId, msg.data, msg.source);
934
+ break;
935
+ case 'kill':
936
+ usageWatchers.get(msg.agentId)?.();
937
+ usageWatchers.delete(msg.agentId);
938
+ manager.kill(msg.agentId, msg.force);
939
+ break;
940
+ case 'resize':
941
+ manager.resize(msg.agentId, msg.cols, msg.rows);
942
+ break;
943
+ case 'detect_agents':
944
+ void detectAgents(config.agentPaths).then(list => {
945
+ cachedAgents = list;
946
+ if (ws.readyState === WebSocket.OPEN) {
947
+ ws.send(JSON.stringify({ type: 'agents', list }));
948
+ }
949
+ });
950
+ break;
951
+ case 'dir_list': {
952
+ const homeDir = os.homedir();
953
+ // Expand leading ~ to home directory before resolving
954
+ const expanded = (msg.path || '~').replace(/^~/, homeDir);
955
+ const safePath = path.resolve(expanded);
956
+ if (safePath !== homeDir && !safePath.startsWith(homeDir + path.sep)) {
957
+ if (ws.readyState === WebSocket.OPEN) {
958
+ ws.send(JSON.stringify({ type: 'error', code: 'INVALID_MSG', message: 'Path outside home directory' }));
959
+ }
960
+ return;
961
+ }
962
+ try {
963
+ const entries = fs.readdirSync(safePath, { withFileTypes: true })
964
+ .filter(e => e.isDirectory() && !e.name.startsWith('.'))
965
+ .map(e => ({ name: e.name, path: path.join(safePath, e.name) }))
966
+ .sort((a, b) => a.name.localeCompare(b.name));
967
+ if (ws.readyState === WebSocket.OPEN) {
968
+ ws.send(JSON.stringify({ type: 'dir_list_result', requestId: msg.requestId, path: safePath, entries }));
969
+ }
970
+ }
971
+ catch (err) {
972
+ if (ws.readyState === WebSocket.OPEN) {
973
+ ws.send(JSON.stringify({
974
+ type: 'dir_list_result',
975
+ requestId: msg.requestId,
976
+ path: safePath,
977
+ entries: [],
978
+ error: err instanceof Error ? err.message : 'Cannot read directory',
979
+ }));
980
+ }
981
+ }
982
+ break;
983
+ }
984
+ default: {
985
+ const _ = msg;
986
+ void _;
987
+ }
988
+ }
989
+ }