thepopebot 1.2.75-beta.2 → 1.2.75-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/bin/cli.js +24 -4
  2. package/bin/docker-build.js +5 -0
  3. package/bin/sync.js +84 -0
  4. package/lib/ai/async-channel.js +51 -0
  5. package/lib/ai/index.js +154 -153
  6. package/lib/ai/tools.js +50 -30
  7. package/lib/chat/actions.js +28 -1
  8. package/lib/chat/components/chat-header.js +4 -0
  9. package/lib/chat/components/chat-header.jsx +4 -0
  10. package/lib/chat/components/settings-coding-agents-page.js +139 -1
  11. package/lib/chat/components/settings-coding-agents-page.jsx +160 -0
  12. package/lib/chat/components/settings-jobs-page.js +13 -2
  13. package/lib/chat/components/settings-jobs-page.jsx +15 -1
  14. package/lib/chat/components/settings-secrets-layout.js +1 -1
  15. package/lib/chat/components/settings-secrets-layout.jsx +1 -1
  16. package/lib/code/actions.js +41 -10
  17. package/lib/code/port-forwards.js +17 -3
  18. package/lib/config.js +4 -0
  19. package/lib/db/config.js +23 -0
  20. package/lib/maintenance.js +8 -1
  21. package/lib/tools/docker.js +15 -5
  22. package/package.json +1 -1
  23. package/setup/setup-ssl.mjs +414 -0
  24. package/templates/.gitignore.template +1 -0
  25. package/templates/CLAUDE.md.template +3 -4
  26. package/templates/README.md +1 -1
  27. package/templates/docker-compose.custom.yml +39 -58
  28. package/templates/docker-compose.yml +12 -17
  29. package/templates/docs/CLI.md +3 -3
  30. package/templates/docs/CONFIGURATION.md +31 -65
  31. package/templates/docs/GETTING_STARTED.md +1 -1
  32. package/templates/docs/SECURITY.md +3 -3
  33. package/templates/docs/SKILLS.md +2 -1
  34. package/templates/docker-compose.litellm.yml +0 -82
  35. package/templates/traefik-dynamic.yml.example +0 -7
package/bin/cli.js CHANGED
@@ -52,11 +52,13 @@ Commands:
52
52
  init Scaffold a new thepopebot project
53
53
  upgrade|update [@beta|version] Upgrade thepopebot (install, init, build, commit, push)
54
54
  setup Run interactive setup wizard
55
+ setup-ssl Configure SSL with Let's Encrypt wildcard cert
55
56
  setup-telegram Reconfigure Telegram webhook
56
57
  reset-auth Regenerate AUTH_SECRET (invalidates all sessions)
57
58
  reset [file] Restore a template file (or list available templates)
58
59
  diff [file] Show differences between project files and package templates
59
60
  sync <path> Sync local package to a test install (build, pack, Docker)
61
+ sync --fast <path> Fast sync — copy source into running container, rebuild .next
60
62
  set-var <KEY> [VALUE] Set a GitHub repository variable
61
63
  user:password <email> Change a user's password
62
64
  `);
@@ -337,8 +339,7 @@ AUTH_TRUST_HOST=true
337
339
  DATABASE_PATH=data/db/thepopebot.sqlite
338
340
  THEPOPEBOT_VERSION=${version}
339
341
 
340
- # Uncomment to use a custom docker-compose file that won't be overwritten by upgrades.
341
- # Edit docker-compose.custom.yml with your changes, then uncomment:
342
+ # To enable SSL with Let's Encrypt, run: npx thepopebot setup-ssl
342
343
  # COMPOSE_FILE=docker-compose.custom.yml
343
344
  `;
344
345
  fs.writeFileSync(envPath, seedEnv);
@@ -490,6 +491,15 @@ function setup() {
490
491
  }
491
492
  }
492
493
 
494
+ function setupSsl() {
495
+ const setupScript = path.join(__dirname, '..', 'setup', 'setup-ssl.mjs');
496
+ try {
497
+ execFileSync(process.execPath, [setupScript], { stdio: 'inherit', cwd: process.cwd() });
498
+ } catch {
499
+ process.exit(1);
500
+ }
501
+ }
502
+
493
503
  function setupTelegram() {
494
504
  const setupScript = path.join(__dirname, '..', 'setup', 'setup-telegram.mjs');
495
505
  try {
@@ -770,6 +780,9 @@ switch (command) {
770
780
  case 'setup':
771
781
  setup();
772
782
  break;
783
+ case 'setup-ssl':
784
+ setupSsl();
785
+ break;
773
786
  case 'setup-telegram':
774
787
  setupTelegram();
775
788
  break;
@@ -787,8 +800,15 @@ switch (command) {
787
800
  await upgrade();
788
801
  break;
789
802
  case 'sync': {
790
- const { sync } = await import('./sync.js');
791
- await sync(args[0]);
803
+ const fast = args.includes('--fast');
804
+ const syncArgs = args.filter(a => a !== '--fast');
805
+ if (fast) {
806
+ const { syncFast } = await import('./sync.js');
807
+ await syncFast(syncArgs[0]);
808
+ } else {
809
+ const { sync } = await import('./sync.js');
810
+ await sync(syncArgs[0]);
811
+ }
792
812
  break;
793
813
  }
794
814
  case 'set-var':
@@ -62,6 +62,11 @@ const CODING_AGENTS = [
62
62
  context: 'docker/coding-agent',
63
63
  dockerfile: 'docker/coding-agent/Dockerfile.opencode',
64
64
  },
65
+ {
66
+ name: 'coding-agent-kimi-cli',
67
+ context: 'docker/coding-agent',
68
+ dockerfile: 'docker/coding-agent/Dockerfile.kimi-cli',
69
+ },
65
70
  ];
66
71
 
67
72
  // Non-coding-agent images (independent, built in parallel)
package/bin/sync.js CHANGED
@@ -306,6 +306,90 @@ function buildDockerImage(projectPath) {
306
306
  }
307
307
  }
308
308
 
309
+ /**
310
+ * Fast sync — skip Docker image rebuild entirely.
311
+ *
312
+ * 1. Build package JSX (npm run build)
313
+ * 2. mirrorTemplates() — scaffold using init's managed-path logic
314
+ * 3. docker cp package source (lib/, api/, config/, package.json) into
315
+ * the running container's /app/node_modules/thepopebot/
316
+ * 4. docker cp web/app/ + web/postcss.config.mjs into container
317
+ * 5. docker exec next build inside the container (tailwindcss already there)
318
+ * 6. Clean up copied source from container
319
+ * 7. docker exec pm2 restart all
320
+ */
321
+ export async function syncFast(projectPath) {
322
+ if (!projectPath) {
323
+ console.error('\n Usage: thepopebot sync --fast <path-to-project>\n');
324
+ process.exit(1);
325
+ }
326
+
327
+ projectPath = path.resolve(projectPath);
328
+
329
+ if (!fs.existsSync(path.join(projectPath, 'package.json'))) {
330
+ console.error(`\n Not a project directory (no package.json): ${projectPath}\n`);
331
+ process.exit(1);
332
+ }
333
+
334
+ // 1. Build JSX
335
+ console.log('\n Building package...');
336
+ execSync('npm run build', { stdio: 'inherit', cwd: PACKAGE_DIR });
337
+
338
+ // 2. Mirror templates
339
+ console.log('\n Mirroring templates...');
340
+ mirrorTemplates(projectPath);
341
+
342
+ // 3. Get running container ID
343
+ const container = execSync('docker compose ps -q event-handler', {
344
+ encoding: 'utf8',
345
+ cwd: projectPath,
346
+ }).trim();
347
+
348
+ if (!container) {
349
+ console.error('\n event-handler container is not running. Use full sync instead.\n');
350
+ process.exit(1);
351
+ }
352
+
353
+ // 4. Copy package source into container's node_modules/thepopebot/
354
+ const PKG_DEST = '/app/node_modules/thepopebot';
355
+ const PACKAGE_DIRS = ['lib', 'api', 'config'];
356
+
357
+ console.log('\n Copying package source into container...');
358
+ for (const dir of PACKAGE_DIRS) {
359
+ execSync(`docker exec ${container} rm -rf ${PKG_DEST}/${dir}`, { stdio: 'inherit' });
360
+ execSync(`docker cp ${path.join(PACKAGE_DIR, dir)} ${container}:${PKG_DEST}/${dir}`, { stdio: 'inherit' });
361
+ }
362
+ // Also copy package.json for exports resolution
363
+ execSync(`docker cp ${path.join(PACKAGE_DIR, 'package.json')} ${container}:${PKG_DEST}/package.json`, { stdio: 'inherit' });
364
+
365
+ // 5. Copy web/app/ source into container for next build
366
+ const webDir = path.join(PACKAGE_DIR, 'web');
367
+ console.log('\n Copying web source into container...');
368
+ execSync(`docker cp ${path.join(webDir, 'app')} ${container}:/app/app`, { stdio: 'inherit' });
369
+ execSync(`docker cp ${path.join(webDir, 'postcss.config.mjs')} ${container}:/app/postcss.config.mjs`, { stdio: 'inherit' });
370
+ execSync(`docker cp ${path.join(webDir, 'next.config.mjs')} ${container}:/app/next.config.mjs`, { stdio: 'inherit' });
371
+
372
+ // 6. Run next build inside the container
373
+ // Hide data/logs dirs so webpack's FileSystemInfo doesn't crawl them (causes OOM/RangeError
374
+ // when workspaces contain thousands of files). Restored immediately after build.
375
+ console.log('\n Building Next.js inside container...');
376
+ execSync(`docker exec ${container} sh -c 'mv /app/data /app/.data-build-tmp 2>/dev/null; mv /app/logs /app/.logs-build-tmp 2>/dev/null; true'`, { stdio: 'inherit' });
377
+ try {
378
+ execSync(`docker exec ${container} ./node_modules/.bin/next build`, { stdio: 'inherit' });
379
+ } finally {
380
+ execSync(`docker exec ${container} sh -c 'mv /app/.data-build-tmp /app/data 2>/dev/null; mv /app/.logs-build-tmp /app/logs 2>/dev/null; true'`, { stdio: 'inherit' });
381
+ }
382
+
383
+ // 7. Clean up web source from container (not needed at runtime)
384
+ execSync(`docker exec ${container} rm -rf /app/app`, { stdio: 'inherit' });
385
+
386
+ // 8. Restart PM2
387
+ console.log('\n Restarting server...');
388
+ execSync(`docker exec ${container} pm2 restart all`, { stdio: 'inherit' });
389
+
390
+ console.log('\n Fast synced!\n');
391
+ }
392
+
309
393
  export async function sync(projectPath) {
310
394
  if (!projectPath) {
311
395
  console.error('\n Usage: thepopebot sync <path-to-project>\n');
@@ -0,0 +1,51 @@
1
+ /**
2
+ * Async push/pull queue. Producer calls push()/done(), consumer uses for-await.
3
+ */
4
+ export function createChannel() {
5
+ const queue = [];
6
+ const waiters = [];
7
+ let isDone = false;
8
+
9
+ return {
10
+ push(value) {
11
+ if (waiters.length > 0) waiters.shift()(value);
12
+ else queue.push(value);
13
+ },
14
+ done() {
15
+ isDone = true;
16
+ while (waiters.length > 0) waiters.shift()(Symbol.for('done'));
17
+ },
18
+ async *[Symbol.asyncIterator]() {
19
+ while (true) {
20
+ if (queue.length > 0) {
21
+ yield queue.shift();
22
+ } else if (isDone) {
23
+ return;
24
+ } else {
25
+ const value = await new Promise(resolve => waiters.push(resolve));
26
+ if (value === Symbol.for('done')) return;
27
+ yield value;
28
+ }
29
+ }
30
+ }
31
+ };
32
+ }
33
+
34
+ /**
35
+ * Merge two async iterables — yields from whichever has data first.
36
+ * Completes when BOTH are exhausted.
37
+ */
38
+ export async function* mergeAsyncIterables(iter1, iter2) {
39
+ const channel = createChannel();
40
+ let active = 2;
41
+
42
+ const consume = async (iter) => {
43
+ for await (const item of iter) channel.push(item);
44
+ if (--active === 0) channel.done();
45
+ };
46
+
47
+ consume(iter1);
48
+ consume(iter2);
49
+
50
+ yield* channel;
51
+ }
package/lib/ai/index.js CHANGED
@@ -1,4 +1,5 @@
1
1
  import { HumanMessage, AIMessage } from '@langchain/core/messages';
2
+ import { createChannel, mergeAsyncIterables } from './async-channel.js';
2
3
  import { z } from 'zod';
3
4
  import { getAgentChat, getCodeChat } from './agent.js';
4
5
  import { createModel } from './model.js';
@@ -178,16 +179,22 @@ async function* chatStream(threadId, message, attachments = [], options = {}) {
178
179
  }
179
180
  }
180
181
 
182
+ // Side channel: bridges the tool's live container output to this generator
183
+ const sideChannel = createChannel();
184
+ const streamCallback = (chunk) => {
185
+ if (chunk === null) sideChannel.done();
186
+ else sideChannel.push(chunk);
187
+ };
188
+
181
189
  try {
182
190
  const stream = await agent.stream(
183
191
  { messages: [new HumanMessage({ content: messageContent })] },
184
- { configurable: { thread_id: threadId, workspaceId, repo, branch, codeModeType }, streamMode: 'messages' }
192
+ { configurable: { thread_id: threadId, workspaceId, repo, branch, codeModeType, streamCallback }, streamMode: 'messages' }
185
193
  );
186
194
 
187
195
  let fullText = '';
188
196
  const toolCallNames = {};
189
197
  const pendingToolCalls = new Map();
190
- let headlessContainer = null;
191
198
 
192
199
  // Accumulate raw tool call arg fragments across streaming chunks.
193
200
  // Each AIMessageChunk only carries its own delta — the first chunk
@@ -195,120 +202,127 @@ async function* chatStream(threadId, message, attachments = [], options = {}) {
195
202
  // chunks (input_json_delta) have only index with the partial JSON delta.
196
203
  const toolCallRawArgs = {}; // tool_call_id → accumulated args string
197
204
  const indexToToolCallId = {}; // chunk index → tool_call_id
205
+ const toolCallArgsEmitted = new Set(); // tool_call_ids whose complete args have been yielded
206
+
207
+ // Headless container streaming state
208
+ const memoryParts = [];
209
+ const headlessPendingToolCalls = new Map();
210
+ let pendingText = ''; // channel text, flushed to DB at tool boundaries
211
+ let llmTextAccum = ''; // langgraph text (direct response or LLM follow-up after container)
212
+ let resultSummary = '';
213
+
214
+ // Tag helper so mergeAsyncIterables can tell the two sources apart.
215
+ // The LangGraph wrapper also closes sideChannel when the agent stream
216
+ // finishes — this prevents a deadlock when no tool calls streamCallback.
217
+ async function* tagged(iter, source) {
218
+ for await (const item of iter) yield { _src: source, item };
219
+ if (source === 'lg') sideChannel.done();
220
+ }
198
221
 
199
- for await (const event of stream) {
200
- // streamMode: 'messages' yields [message, metadata] tuples
201
- const msg = Array.isArray(event) ? event[0] : event;
202
- const msgType = msg._getType?.();
203
-
204
- if (msgType === 'ai') {
205
- // Tool calls AIMessage.tool_calls is an array of { id, name, args }
206
- if (msg.tool_calls?.length > 0) {
207
- for (const tc of msg.tool_calls) {
208
- toolCallNames[tc.id] = tc.name;
209
- pendingToolCalls.set(tc.id, { toolName: tc.name, args: tc.args });
210
- yield {
211
- type: 'tool-call',
212
- toolCallId: tc.id,
213
- toolName: tc.name,
214
- args: tc.args,
215
- };
216
- }
217
- }
218
-
219
- // Accumulate raw tool call arg strings from streaming chunks
220
- if (msg.tool_call_chunks?.length > 0) {
221
- for (const c of msg.tool_call_chunks) {
222
- if (c.id) {
223
- indexToToolCallId[c.index] = c.id;
224
- toolCallRawArgs[c.id] = (toolCallRawArgs[c.id] || '') + (c.args || '');
225
- } else if (c.index != null && indexToToolCallId[c.index]) {
226
- const id = indexToToolCallId[c.index];
227
- toolCallRawArgs[id] = (toolCallRawArgs[id] || '') + (c.args || '');
222
+ try {
223
+ for await (const { _src, item } of mergeAsyncIterables(
224
+ tagged(stream, 'lg'),
225
+ tagged(sideChannel, 'ch')
226
+ )) {
227
+ if (_src === 'lg') {
228
+ // ── LangGraph agent stream ────────────────────────────────────────
229
+ const msg = Array.isArray(item) ? item[0] : item;
230
+ const msgType = msg._getType?.();
231
+
232
+ if (msgType === 'ai') {
233
+ // Tool calls — AIMessage.tool_calls is an array of { id, name, args }
234
+ if (msg.tool_calls?.length > 0) {
235
+ for (const tc of msg.tool_calls) {
236
+ toolCallNames[tc.id] = tc.name;
237
+ pendingToolCalls.set(tc.id, { toolName: tc.name, args: tc.args });
238
+ yield {
239
+ type: 'tool-call',
240
+ toolCallId: tc.id,
241
+ toolName: tc.name,
242
+ args: tc.args,
243
+ };
244
+ }
228
245
  }
229
- }
230
- }
231
246
 
232
- // Text content (wrapped in structured object)
233
- let text = '';
234
- if (typeof msg.content === 'string') {
235
- text = msg.content;
236
- } else if (Array.isArray(msg.content)) {
237
- text = msg.content
238
- .filter((b) => b.type === 'text' && b.text)
239
- .map((b) => b.text)
240
- .join('');
241
- }
242
-
243
- if (text) {
244
- fullText += text;
245
- yield { type: 'text', text };
246
- }
247
- } else if (msgType === 'tool') {
248
- // Parse complete args from accumulated raw fragments
249
- const tc = pendingToolCalls.get(msg.tool_call_id);
250
- const rawArgs = toolCallRawArgs[msg.tool_call_id];
251
- let completeArgs;
252
- try { completeArgs = rawArgs ? JSON.parse(rawArgs) : {}; } catch { completeArgs = {}; }
253
-
254
- // Tool result — ToolMessage has tool_call_id and content
255
- yield {
256
- type: 'tool-result',
257
- toolCallId: msg.tool_call_id,
258
- toolName: tc?.toolName,
259
- args: completeArgs,
260
- result: msg.content,
261
- };
262
-
263
- // Save complete tool invocation as JSON
264
- if (tc) {
265
- persistMessage(threadId, 'assistant', JSON.stringify({
266
- type: 'tool-invocation',
267
- toolCallId: msg.tool_call_id,
268
- toolName: tc.toolName,
269
- state: 'output-available',
270
- input: completeArgs,
271
- output: msg.content,
272
- }), options);
273
- pendingToolCalls.delete(msg.tool_call_id);
274
- }
275
-
276
- // Detect headless container tool result for Phase 2 streaming
277
- const headlessToolName = toolCallNames[msg.tool_call_id];
278
- if (headlessToolName === 'coding_agent') {
279
- try {
280
- const parsed = JSON.parse(msg.content);
281
- if (parsed.status === 'started' && parsed.containerName) {
282
- headlessContainer = { ...parsed, toolName: headlessToolName };
247
+ // Accumulate raw tool call arg strings from streaming chunks
248
+ if (msg.tool_call_chunks?.length > 0) {
249
+ for (const c of msg.tool_call_chunks) {
250
+ if (c.id) {
251
+ indexToToolCallId[c.index] = c.id;
252
+ toolCallRawArgs[c.id] = (toolCallRawArgs[c.id] || '') + (c.args || '');
253
+ } else if (c.index != null && indexToToolCallId[c.index]) {
254
+ const id = indexToToolCallId[c.index];
255
+ toolCallRawArgs[id] = (toolCallRawArgs[id] || '') + (c.args || '');
256
+ }
257
+ }
258
+ // Re-yield tool-call with complete args once the JSON is fully streamed
259
+ for (const c of msg.tool_call_chunks) {
260
+ const id = c.id || indexToToolCallId[c.index];
261
+ if (id && toolCallRawArgs[id] && !toolCallArgsEmitted.has(id)) {
262
+ try {
263
+ const parsed = JSON.parse(toolCallRawArgs[id]);
264
+ toolCallArgsEmitted.add(id);
265
+ const tc = pendingToolCalls.get(id);
266
+ if (tc) {
267
+ tc.args = parsed;
268
+ yield { type: 'tool-call', toolCallId: id, toolName: tc.toolName, args: parsed };
269
+ }
270
+ } catch {} // args not complete yet, keep accumulating
271
+ }
272
+ }
283
273
  }
284
- } catch {}
285
- }
286
- }
287
- // Skip other message types (human, system)
288
- }
289
274
 
290
- // Save assistant response to DB (defer if headless streaming follows)
291
- if (fullText && !headlessContainer) {
292
- persistMessage(threadId, 'assistant', fullText, options);
293
- }
275
+ // Text content (wrapped in structured object)
276
+ let text = '';
277
+ if (typeof msg.content === 'string') {
278
+ text = msg.content;
279
+ } else if (Array.isArray(msg.content)) {
280
+ text = msg.content
281
+ .filter((b) => b.type === 'text' && b.text)
282
+ .map((b) => b.text)
283
+ .join('');
284
+ }
294
285
 
295
- // Phase 2: Stream headless container output live
296
- if (headlessContainer) {
297
- try {
298
- const { tailContainerLogs, waitForContainer, removeContainer } =
299
- await import('../tools/docker.js');
300
- const { parseHeadlessStream } = await import('./headless-stream.js');
286
+ if (text) {
287
+ fullText += text;
288
+ llmTextAccum += text;
289
+ yield { type: 'text', text };
290
+ }
291
+ } else if (msgType === 'tool') {
292
+ // Parse complete args from accumulated raw fragments
293
+ const tc = pendingToolCalls.get(msg.tool_call_id);
294
+ const rawArgs = toolCallRawArgs[msg.tool_call_id];
295
+ let completeArgs;
296
+ try { completeArgs = rawArgs ? JSON.parse(rawArgs) : {}; } catch { completeArgs = {}; }
297
+
298
+ // Tool result — ToolMessage has tool_call_id and content
299
+ yield {
300
+ type: 'tool-result',
301
+ toolCallId: msg.tool_call_id,
302
+ toolName: tc?.toolName,
303
+ args: completeArgs,
304
+ result: msg.content,
305
+ };
301
306
 
302
- const logStream = await tailContainerLogs(headlessContainer.containerName);
307
+ // Save complete tool invocation as JSON
308
+ if (tc) {
309
+ persistMessage(threadId, 'assistant', JSON.stringify({
310
+ type: 'tool-invocation',
311
+ toolCallId: msg.tool_call_id,
312
+ toolName: tc.toolName,
313
+ state: 'output-available',
314
+ input: completeArgs,
315
+ output: msg.content,
316
+ }), options);
317
+ pendingToolCalls.delete(msg.tool_call_id);
318
+ }
319
+ }
320
+ // Skip other message types (human, system)
303
321
 
304
- // Collect conversation parts during streaming, add to memory in one batch at the end
305
- let resultSummary = '';
306
- const memoryParts = [];
307
- const headlessPendingToolCalls = new Map();
308
- let pendingText = '';
322
+ } else {
323
+ // ── Side channel: headless container chunks ───────────────────────
324
+ let chunk = item;
309
325
 
310
- let lastEmittedText = '';
311
- for await (const chunk of parseHeadlessStream(logStream, headlessContainer.codingAgent)) {
312
326
  // Result summary: skip if duplicate, otherwise ensure it starts on a new line
313
327
  if (chunk._resultSummary && chunk.type === 'text') {
314
328
  resultSummary = chunk._resultSummary;
@@ -317,11 +331,12 @@ async function* chatStream(threadId, message, attachments = [], options = {}) {
317
331
  }
318
332
  chunk = { ...chunk, text: '\n\n' + chunk.text };
319
333
  }
320
- yield chunk;
334
+
321
335
  if (chunk.type === 'text') {
322
336
  fullText += chunk.text;
323
337
  memoryParts.push(chunk.text);
324
338
  pendingText += chunk.text;
339
+ yield chunk;
325
340
  } else if (chunk.type === 'tool-call') {
326
341
  // Flush accumulated text before tool call
327
342
  if (pendingText) {
@@ -330,67 +345,53 @@ async function* chatStream(threadId, message, attachments = [], options = {}) {
330
345
  }
331
346
  memoryParts.push('[tool-call] ' + chunk.toolName + ': ' + JSON.stringify(chunk.args));
332
347
  headlessPendingToolCalls.set(chunk.toolCallId, { toolName: chunk.toolName, args: chunk.args });
348
+ yield chunk;
333
349
  } else if (chunk.type === 'tool-result') {
350
+ // Enrich with args from matching tool-call (required by api.js tool-input-available update)
351
+ const htc = headlessPendingToolCalls.get(chunk.toolCallId);
352
+ const enriched = htc ? { ...chunk, args: htc.args, toolName: htc.toolName } : chunk;
353
+ yield enriched;
334
354
  memoryParts.push('[tool-result] ' + chunk.result);
335
- const tc = headlessPendingToolCalls.get(chunk.toolCallId);
336
- if (tc) {
355
+ if (htc) {
337
356
  persistMessage(threadId, 'assistant', JSON.stringify({
338
357
  type: 'tool-invocation',
339
358
  toolCallId: chunk.toolCallId,
340
- toolName: tc.toolName,
359
+ toolName: htc.toolName,
341
360
  state: 'output-available',
342
- input: tc.args,
361
+ input: htc.args,
343
362
  output: chunk.result,
344
363
  }), options);
345
364
  headlessPendingToolCalls.delete(chunk.toolCallId);
346
365
  }
366
+ } else {
367
+ // unknown events pass through unchanged
368
+ yield chunk;
347
369
  }
348
- if (chunk._resultSummary) resultSummary = chunk._resultSummary;
349
- }
350
370
 
351
- // Flush remaining accumulated text
352
- if (pendingText) {
353
- persistMessage(threadId, 'assistant', pendingText, options);
354
- pendingText = '';
371
+ if (chunk._resultSummary) resultSummary = chunk._resultSummary;
355
372
  }
373
+ }
374
+ } finally {
375
+ // Ensure no dangling promise when tool was never called
376
+ sideChannel.done();
377
+ }
356
378
 
357
- // Container has exited by now (tailContainerLogs follows until EOF)
358
- const exitCode = await waitForContainer(headlessContainer.containerName);
359
- await removeContainer(headlessContainer.containerName);
360
-
361
- if (exitCode === 0) {
362
- const completionMsg = codeModeType === 'plan'
363
- ? '\n\nPlanning complete.'
364
- : '\n\nCoding complete.';
365
- yield { type: 'text', text: completionMsg };
366
- fullText += completionMsg;
367
- persistMessage(threadId, 'assistant', completionMsg, options);
368
- } else {
369
- const failureMsg = '\n\nTask exited with errors.';
370
- yield { type: 'text', text: failureMsg };
371
- fullText += failureMsg;
372
- persistMessage(threadId, 'assistant', failureMsg, options);
373
- }
379
+ // Flush remaining channel text
380
+ if (pendingText) {
381
+ persistMessage(threadId, 'assistant', pendingText, options);
382
+ }
374
383
 
375
- // Inject full conversation into LangGraph memory using the correct agent
376
- if (memoryParts.length > 0) {
377
- await agent.updateState(
378
- { configurable: { thread_id: threadId } },
379
- { messages: [new AIMessage(memoryParts.join('\n'))] }
380
- );
381
- }
382
- // Also inject the summary separately for concise follow-up context
383
- if (resultSummary) {
384
- await agent.updateState(
385
- { configurable: { thread_id: threadId } },
386
- { messages: [new AIMessage(resultSummary)] }
387
- );
388
- }
384
+ // Persist LLM text (direct response with no tool, or LLM follow-up after container)
385
+ if (llmTextAccum) {
386
+ persistMessage(threadId, 'assistant', llmTextAccum, options);
387
+ }
389
388
 
390
- } catch (err) {
391
- console.error('[chatStream] headless stream error:', err);
392
- yield { type: 'text', text: '\n\nError streaming headless output: ' + err.message };
393
- }
389
+ // Inject full headless conversation detail into LangGraph memory for follow-up turns
390
+ if (memoryParts.length > 0) {
391
+ await agent.updateState(
392
+ { configurable: { thread_id: threadId } },
393
+ { messages: [new AIMessage(memoryParts.join('\n'))] }
394
+ );
394
395
  }
395
396
 
396
397
  } catch (err) {