@syntero/orca-cli 1.2.0 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/assistant/anthropic.d.ts +23 -0
  2. package/dist/assistant/anthropic.d.ts.map +1 -0
  3. package/dist/assistant/anthropic.js +186 -0
  4. package/dist/assistant/anthropic.js.map +1 -0
  5. package/dist/assistant/cache.d.ts +20 -0
  6. package/dist/assistant/cache.d.ts.map +1 -0
  7. package/dist/assistant/cache.js +55 -0
  8. package/dist/assistant/cache.js.map +1 -0
  9. package/dist/assistant/helpers.d.ts +10 -0
  10. package/dist/assistant/helpers.d.ts.map +1 -0
  11. package/dist/assistant/helpers.js +74 -0
  12. package/dist/assistant/helpers.js.map +1 -0
  13. package/dist/assistant/index.d.ts +13 -0
  14. package/dist/assistant/index.d.ts.map +1 -0
  15. package/dist/assistant/index.js +40 -0
  16. package/dist/assistant/index.js.map +1 -0
  17. package/dist/assistant/openai.d.ts +34 -0
  18. package/dist/assistant/openai.d.ts.map +1 -0
  19. package/dist/assistant/openai.js +303 -0
  20. package/dist/assistant/openai.js.map +1 -0
  21. package/dist/assistant/prompts.d.ts +11 -0
  22. package/dist/assistant/prompts.d.ts.map +1 -0
  23. package/dist/assistant/prompts.js +203 -0
  24. package/dist/assistant/prompts.js.map +1 -0
  25. package/dist/assistant/summarize.d.ts +32 -0
  26. package/dist/assistant/summarize.d.ts.map +1 -0
  27. package/dist/assistant/summarize.js +134 -0
  28. package/dist/assistant/summarize.js.map +1 -0
  29. package/dist/assistant/types.d.ts +62 -0
  30. package/dist/assistant/types.d.ts.map +1 -0
  31. package/dist/assistant/types.js +2 -0
  32. package/dist/assistant/types.js.map +1 -0
  33. package/dist/assistant.d.ts.map +1 -1
  34. package/dist/assistant.js +163 -384
  35. package/dist/assistant.js.map +1 -1
  36. package/dist/components/ChatApp.js +1 -1
  37. package/dist/components/ChatApp.js.map +1 -1
  38. package/dist/conversations/storage.d.ts +1 -1
  39. package/dist/conversations/storage.d.ts.map +1 -1
  40. package/dist/conversations/types.d.ts +1 -1
  41. package/dist/conversations/types.d.ts.map +1 -1
  42. package/dist/index.js +1 -1
  43. package/dist/index.js.map +1 -1
  44. package/dist/tokens.d.ts +1 -1
  45. package/dist/tokens.d.ts.map +1 -1
  46. package/dist/tools.js +4 -4
  47. package/package.json +1 -1
package/dist/assistant.js CHANGED
@@ -138,9 +138,10 @@ When a user asks to "update" Orca:
138
138
  2. If the deployment directory is not found, ask the user for the path
139
139
  3. Once you have the path, cd to that directory and run these steps:
140
140
  a. docker rm -f orca-sandbox-keeper 2>/dev/null (remove sandbox keeper if running)
141
- b. docker compose up -d --pull always --remove-orphans (pull stable images and restart)
141
+ b. docker compose up -d --pull always --force-recreate --remove-orphans (pull stable images and force-recreate containers)
142
142
  c. Wait for all services to be healthy by checking: docker ps --filter "name=orca-"
143
143
  d. docker system prune -af (clean up old images)
144
+ e. Use rebuild_all_sandboxes to rebuild sandbox containers with the new image
144
145
  4. If the deployment directory was provided by the user (not from settings), save it using save_deployment_dir tool for future updates
145
146
 
146
147
  ## Post-Update Container Management
@@ -233,6 +234,145 @@ function addToolsCacheControl(tools) {
233
234
  };
234
235
  return toolsCopy;
235
236
  }
237
+ /**
238
+ * Resolve the OpenAI/Azure model name and registry ID.
239
+ */
240
+ function resolveOpenAIModel(settings, isAzure) {
241
+ if (isAzure) {
242
+ const modelIdForRegistry = settings.azure.deployment;
243
+ const creds = loadCredentials();
244
+ const model = creds?.llm
245
+ ? resolveAzureDeployment(settings.azure.deployment, creds.llm)
246
+ : settings.azure.deployment;
247
+ return { model, modelIdForRegistry };
248
+ }
249
+ return { model: settings.openai.model, modelIdForRegistry: settings.openai.model };
250
+ }
251
+ /**
252
+ * Convert Anthropic message history to OpenAI message format.
253
+ */
254
+ function convertMessagesToOpenAI(messages) {
255
+ const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
256
+ for (const msg of messages) {
257
+ if (msg.role === 'user') {
258
+ if (Array.isArray(msg.content)) {
259
+ for (const item of msg.content) {
260
+ if (item.type === 'tool_result') {
261
+ openaiMessages.push({
262
+ role: 'tool',
263
+ tool_call_id: item.tool_use_id || '',
264
+ content: item.content || '',
265
+ });
266
+ }
267
+ }
268
+ }
269
+ else {
270
+ openaiMessages.push({ role: 'user', content: msg.content });
271
+ }
272
+ }
273
+ else if (msg.role === 'assistant') {
274
+ if (Array.isArray(msg.content)) {
275
+ const toolCallsArr = [];
276
+ let textContent = '';
277
+ for (const item of msg.content) {
278
+ if (item.type === 'text') {
279
+ textContent = item.text || '';
280
+ }
281
+ else if (item.type === 'tool_use') {
282
+ toolCallsArr.push({
283
+ id: item.id || '',
284
+ type: 'function',
285
+ function: {
286
+ name: item.name || '',
287
+ arguments: JSON.stringify(item.input || {}),
288
+ },
289
+ });
290
+ }
291
+ }
292
+ const assistantMsg = {
293
+ role: 'assistant',
294
+ content: textContent || null,
295
+ };
296
+ if (toolCallsArr.length > 0) {
297
+ assistantMsg.tool_calls = toolCallsArr;
298
+ }
299
+ openaiMessages.push(assistantMsg);
300
+ }
301
+ }
302
+ }
303
+ return openaiMessages;
304
+ }
305
+ /**
306
+ * Execute tool calls: display headers, check dangerous commands, run tools, collect results.
307
+ */
308
+ async function* executeToolCalls(toolCalls, confirmCommand, approvedCategories) {
309
+ const anthropicResults = [];
310
+ const openaiResults = [];
311
+ for (const tc of toolCalls) {
312
+ const input = tc.input;
313
+ // Show tool header
314
+ if (tc.name === 'run_command') {
315
+ yield color(`\n\n[${tc.name}] `, Colors.dim);
316
+ yield color(`${input.command}\n`, Colors.cyan);
317
+ if (input.gist) {
318
+ yield color(`${input.gist}\n`, Colors.dim);
319
+ }
320
+ }
321
+ else {
322
+ yield color(`\n\n[${tc.name}]\n`, Colors.dim);
323
+ }
324
+ let result;
325
+ if (tc.name === 'run_command' && confirmCommand) {
326
+ const command = input.command;
327
+ const gist = input.gist;
328
+ const dangerCheck = checkCommand(command);
329
+ if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
330
+ yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
331
+ const confirmResult = await confirmCommand(command, dangerCheck, gist);
332
+ if (!confirmResult.confirmed) {
333
+ result = 'Command cancelled by user.';
334
+ yield color(`${result}\n`, Colors.dim);
335
+ }
336
+ else {
337
+ result = await handleToolCall(tc.name, tc.input);
338
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
339
+ yield color(`${preview}\n`, Colors.dim);
340
+ }
341
+ }
342
+ else {
343
+ result = await handleToolCall(tc.name, tc.input);
344
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
345
+ yield color(`${preview}\n`, Colors.dim);
346
+ }
347
+ }
348
+ else {
349
+ result = await handleToolCall(tc.name, tc.input);
350
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
351
+ yield color(`${preview}\n`, Colors.dim);
352
+ }
353
+ anthropicResults.push({
354
+ type: 'tool_result',
355
+ tool_use_id: tc.id,
356
+ content: result,
357
+ });
358
+ openaiResults.push({
359
+ tool_call_id: tc.id,
360
+ content: result,
361
+ });
362
+ }
363
+ return { anthropicResults, openaiResults };
364
+ }
365
+ /**
366
+ * Inject a queued user message into conversation history.
367
+ */
368
+ function* injectQueuedMessage(msg, messages, openaiMessages) {
369
+ const injectedContent = `[User update while AI was working]: ${msg}`;
370
+ messages.push({ role: 'user', content: injectedContent });
371
+ if (openaiMessages) {
372
+ openaiMessages.push({ role: 'user', content: injectedContent });
373
+ }
374
+ yield color(`\n[Queued message injected: "${msg.length > 50 ? msg.slice(0, 50) + '...' : msg}"]\n`, Colors.dim);
375
+ }
236
376
  /**
237
377
  * Run assistant with Anthropic API with streaming.
238
378
  * @param getQueuedMessage - Optional callback to check for queued messages at each iteration
@@ -245,20 +385,11 @@ export async function* runAssistantAnthropic(client, settings, userMessage, mess
245
385
  // Check for queued messages at the start of each iteration
246
386
  const queuedMessage = getQueuedMessage?.();
247
387
  if (queuedMessage) {
248
- // Inject the queued message into the conversation
249
- messages.push({
250
- role: 'user',
251
- content: `[User update while AI was working]: ${queuedMessage}`,
252
- });
253
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
388
+ yield* injectQueuedMessage(queuedMessage, messages);
254
389
  }
255
390
  let responseText = '';
256
391
  const toolCalls = [];
257
392
  // Anthropic prompt caching: Add cache_control markers to reduce token costs
258
- // - System prompt: cached as content block array
259
- // - Tools: cache_control on last tool
260
- // - Messages: cache_control on last content block of last message
261
- // This enables incremental caching - each turn builds on the cached prefix
262
393
  const cachedSystem = [
263
394
  { type: 'text', text: SYSTEM_PROMPT, cache_control: { type: 'ephemeral' } },
264
395
  ];
@@ -315,51 +446,8 @@ export async function* runAssistantAnthropic(client, settings, userMessage, mess
315
446
  }
316
447
  messages.push({ role: 'assistant', content: assistantContent });
317
448
  if (finalMessage.stop_reason === 'tool_use') {
318
- const toolResults = [];
319
- for (const tc of toolCalls) {
320
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
321
- let result;
322
- // Check for dangerous commands that need confirmation
323
- if (tc.name === 'run_command' && confirmCommand) {
324
- const input = tc.input;
325
- const command = input.command;
326
- const gist = input.gist;
327
- const dangerCheck = checkCommand(command);
328
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
329
- // Request confirmation from user
330
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
331
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
332
- if (!confirmResult.confirmed) {
333
- result = 'Command cancelled by user.';
334
- yield color(`${result}\n`, Colors.dim);
335
- }
336
- else {
337
- // User confirmed - execute the command
338
- result = await handleToolCall(tc.name, tc.input);
339
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
340
- yield color(`${preview}\n`, Colors.dim);
341
- }
342
- }
343
- else {
344
- // Not dangerous or already approved - execute normally
345
- result = await handleToolCall(tc.name, tc.input);
346
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
347
- yield color(`${preview}\n`, Colors.dim);
348
- }
349
- }
350
- else {
351
- // Not run_command or no confirmation callback - execute normally
352
- result = await handleToolCall(tc.name, tc.input);
353
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
354
- yield color(`${preview}\n`, Colors.dim);
355
- }
356
- toolResults.push({
357
- type: 'tool_result',
358
- tool_use_id: tc.id,
359
- content: result,
360
- });
361
- }
362
- messages.push({ role: 'user', content: toolResults });
449
+ const { anthropicResults } = yield* executeToolCalls(toolCalls, confirmCommand, approvedCategories);
450
+ messages.push({ role: 'user', content: anthropicResults });
363
451
  }
364
452
  else {
365
453
  break;
@@ -380,12 +468,7 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
380
468
  // Check for queued messages at the start of each iteration
381
469
  const queuedMessage = getQueuedMessage?.();
382
470
  if (queuedMessage) {
383
- // Inject the queued message into the conversation
384
- messages.push({
385
- role: 'user',
386
- content: `[User update while AI was working]: ${queuedMessage}`,
387
- });
388
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
471
+ yield* injectQueuedMessage(queuedMessage, messages);
389
472
  }
390
473
  // Anthropic prompt caching: Add cache_control markers to reduce token costs
391
474
  const cachedSystem = [
@@ -435,51 +518,8 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
435
518
  }
436
519
  messages.push({ role: 'assistant', content: assistantContent });
437
520
  if (response.stop_reason === 'tool_use') {
438
- const toolResults = [];
439
- for (const tc of toolCalls) {
440
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
441
- let result;
442
- // Check for dangerous commands that need confirmation
443
- if (tc.name === 'run_command' && confirmCommand) {
444
- const input = tc.input;
445
- const command = input.command;
446
- const gist = input.gist;
447
- const dangerCheck = checkCommand(command);
448
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
449
- // Request confirmation from user
450
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
451
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
452
- if (!confirmResult.confirmed) {
453
- result = 'Command cancelled by user.';
454
- yield color(`${result}\n`, Colors.dim);
455
- }
456
- else {
457
- // User confirmed - execute the command
458
- result = await handleToolCall(tc.name, tc.input);
459
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
460
- yield color(`${preview}\n`, Colors.dim);
461
- }
462
- }
463
- else {
464
- // Not dangerous or already approved - execute normally
465
- result = await handleToolCall(tc.name, tc.input);
466
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
467
- yield color(`${preview}\n`, Colors.dim);
468
- }
469
- }
470
- else {
471
- // Not run_command or no confirmation callback - execute normally
472
- result = await handleToolCall(tc.name, tc.input);
473
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
474
- yield color(`${preview}\n`, Colors.dim);
475
- }
476
- toolResults.push({
477
- type: 'tool_result',
478
- tool_use_id: tc.id,
479
- content: result,
480
- });
481
- }
482
- messages.push({ role: 'user', content: toolResults });
521
+ const { anthropicResults } = yield* executeToolCalls(toolCalls, confirmCommand, approvedCategories);
522
+ messages.push({ role: 'user', content: anthropicResults });
483
523
  }
484
524
  else {
485
525
  break;
@@ -494,85 +534,15 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
494
534
  * @param approvedCategories - Set of command categories approved for the session
495
535
  */
496
536
  export async function* runAssistantOpenAI(client, settings, userMessage, messages, isAzure = false, signal, getQueuedMessage, confirmCommand, approvedCategories) {
497
- // For Azure, resolve the model ID (azure-large/azure-small) to the actual deployment name
498
- let model;
499
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
500
- if (isAzure) {
501
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
502
- const creds = loadCredentials();
503
- if (creds?.llm) {
504
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
505
- }
506
- else {
507
- // Fallback to settings if no cloud credentials (local Azure config)
508
- model = settings.azure.deployment;
509
- }
510
- }
511
- else {
512
- model = settings.openai.model;
513
- modelIdForRegistry = model;
514
- }
515
- // Convert messages format for OpenAI
516
- const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
517
- for (const msg of messages) {
518
- if (msg.role === 'user') {
519
- if (Array.isArray(msg.content)) {
520
- // Tool results
521
- for (const item of msg.content) {
522
- if (item.type === 'tool_result') {
523
- openaiMessages.push({
524
- role: 'tool',
525
- tool_call_id: item.tool_use_id || '',
526
- content: item.content || '',
527
- });
528
- }
529
- }
530
- }
531
- else {
532
- openaiMessages.push({ role: 'user', content: msg.content });
533
- }
534
- }
535
- else if (msg.role === 'assistant') {
536
- if (Array.isArray(msg.content)) {
537
- const toolCallsArr = [];
538
- let textContent = '';
539
- for (const item of msg.content) {
540
- if (item.type === 'text') {
541
- textContent = item.text || '';
542
- }
543
- else if (item.type === 'tool_use') {
544
- toolCallsArr.push({
545
- id: item.id || '',
546
- type: 'function',
547
- function: {
548
- name: item.name || '',
549
- arguments: JSON.stringify(item.input || {}),
550
- },
551
- });
552
- }
553
- }
554
- const assistantMsg = {
555
- role: 'assistant',
556
- content: textContent || null,
557
- };
558
- if (toolCallsArr.length > 0) {
559
- assistantMsg.tool_calls = toolCallsArr;
560
- }
561
- openaiMessages.push(assistantMsg);
562
- }
563
- }
564
- }
537
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
538
+ const openaiMessages = convertMessagesToOpenAI(messages);
565
539
  openaiMessages.push({ role: 'user', content: userMessage });
566
540
  messages.push({ role: 'user', content: userMessage });
567
541
  while (true) {
568
542
  // Check for queued messages at the start of each iteration
569
543
  const queuedMessage = getQueuedMessage?.();
570
544
  if (queuedMessage) {
571
- // Inject the queued message into both message formats
572
- const injectedContent = `[User update while AI was working]: ${queuedMessage}`;
573
- messages.push({ role: 'user', content: injectedContent });
574
- openaiMessages.push({ role: 'user', content: injectedContent });
575
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
545
+ yield* injectQueuedMessage(queuedMessage, messages, openaiMessages);
576
546
  }
577
547
  let responseText = '';
578
548
  const toolCalls = {};
@@ -653,56 +623,11 @@ export async function* runAssistantOpenAI(client, settings, userMessage, message
653
623
  openaiMessages.push(openaiAssistantMsg);
654
624
  messages.push({ role: 'assistant', content: assistantContent });
655
625
  if (finishReason === 'tool_calls' && Object.keys(toolCalls).length > 0) {
656
- const toolResults = [];
657
- for (const tc of Object.values(toolCalls)) {
658
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
659
- let result;
660
- // Check for dangerous commands that need confirmation
661
- if (tc.name === 'run_command' && confirmCommand) {
662
- const input = tc.input;
663
- const command = input.command;
664
- const gist = input.gist;
665
- const dangerCheck = checkCommand(command);
666
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
667
- // Request confirmation from user
668
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
669
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
670
- if (!confirmResult.confirmed) {
671
- result = 'Command cancelled by user.';
672
- yield color(`${result}\n`, Colors.dim);
673
- }
674
- else {
675
- // User confirmed - execute the command
676
- result = await handleToolCall(tc.name, tc.input);
677
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
678
- yield color(`${preview}\n`, Colors.dim);
679
- }
680
- }
681
- else {
682
- // Not dangerous or already approved - execute normally
683
- result = await handleToolCall(tc.name, tc.input);
684
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
685
- yield color(`${preview}\n`, Colors.dim);
686
- }
687
- }
688
- else {
689
- // Not run_command or no confirmation callback - execute normally
690
- result = await handleToolCall(tc.name, tc.input);
691
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
692
- yield color(`${preview}\n`, Colors.dim);
693
- }
694
- openaiMessages.push({
695
- role: 'tool',
696
- tool_call_id: tc.id,
697
- content: result,
698
- });
699
- toolResults.push({
700
- type: 'tool_result',
701
- tool_use_id: tc.id,
702
- content: result,
703
- });
626
+ const { anthropicResults, openaiResults } = yield* executeToolCalls(Object.values(toolCalls), confirmCommand, approvedCategories);
627
+ for (const r of openaiResults) {
628
+ openaiMessages.push({ role: 'tool', tool_call_id: r.tool_call_id, content: r.content });
704
629
  }
705
- messages.push({ role: 'user', content: toolResults });
630
+ messages.push({ role: 'user', content: anthropicResults });
706
631
  }
707
632
  else {
708
633
  break;
@@ -718,85 +643,15 @@ export async function* runAssistantOpenAI(client, settings, userMessage, message
718
643
  * @param approvedCategories - Set of command categories approved for the session
719
644
  */
720
645
  export async function* runAssistantOpenAINonStreaming(client, settings, userMessage, messages, isAzure = false, signal, getQueuedMessage, confirmCommand, approvedCategories) {
721
- // For Azure, resolve the model ID (azure-large/azure-small) to the actual deployment name
722
- let model;
723
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
724
- if (isAzure) {
725
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
726
- const creds = loadCredentials();
727
- if (creds?.llm) {
728
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
729
- }
730
- else {
731
- // Fallback to settings if no cloud credentials (local Azure config)
732
- model = settings.azure.deployment;
733
- }
734
- }
735
- else {
736
- model = settings.openai.model;
737
- modelIdForRegistry = model;
738
- }
739
- // Convert messages format for OpenAI
740
- const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
741
- for (const msg of messages) {
742
- if (msg.role === 'user') {
743
- if (Array.isArray(msg.content)) {
744
- // Tool results
745
- for (const item of msg.content) {
746
- if (item.type === 'tool_result') {
747
- openaiMessages.push({
748
- role: 'tool',
749
- tool_call_id: item.tool_use_id || '',
750
- content: item.content || '',
751
- });
752
- }
753
- }
754
- }
755
- else {
756
- openaiMessages.push({ role: 'user', content: msg.content });
757
- }
758
- }
759
- else if (msg.role === 'assistant') {
760
- if (Array.isArray(msg.content)) {
761
- const toolCallsArr = [];
762
- let textContent = '';
763
- for (const item of msg.content) {
764
- if (item.type === 'text') {
765
- textContent = item.text || '';
766
- }
767
- else if (item.type === 'tool_use') {
768
- toolCallsArr.push({
769
- id: item.id || '',
770
- type: 'function',
771
- function: {
772
- name: item.name || '',
773
- arguments: JSON.stringify(item.input || {}),
774
- },
775
- });
776
- }
777
- }
778
- const assistantMsg = {
779
- role: 'assistant',
780
- content: textContent || null,
781
- };
782
- if (toolCallsArr.length > 0) {
783
- assistantMsg.tool_calls = toolCallsArr;
784
- }
785
- openaiMessages.push(assistantMsg);
786
- }
787
- }
788
- }
646
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
647
+ const openaiMessages = convertMessagesToOpenAI(messages);
789
648
  openaiMessages.push({ role: 'user', content: userMessage });
790
649
  messages.push({ role: 'user', content: userMessage });
791
650
  while (true) {
792
651
  // Check for queued messages at the start of each iteration
793
652
  const queuedMessage = getQueuedMessage?.();
794
653
  if (queuedMessage) {
795
- // Inject the queued message into both message formats
796
- const injectedContent = `[User update while AI was working]: ${queuedMessage}`;
797
- messages.push({ role: 'user', content: injectedContent });
798
- openaiMessages.push({ role: 'user', content: injectedContent });
799
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
654
+ yield* injectQueuedMessage(queuedMessage, messages, openaiMessages);
800
655
  }
801
656
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
802
657
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
@@ -865,56 +720,11 @@ export async function* runAssistantOpenAINonStreaming(client, settings, userMess
865
720
  openaiMessages.push(openaiAssistantMsg);
866
721
  messages.push({ role: 'assistant', content: assistantContent });
867
722
  if (finishReason === 'tool_calls' && Object.keys(toolCalls).length > 0) {
868
- const toolResults = [];
869
- for (const tc of Object.values(toolCalls)) {
870
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
871
- let result;
872
- // Check for dangerous commands that need confirmation
873
- if (tc.name === 'run_command' && confirmCommand) {
874
- const input = tc.input;
875
- const command = input.command;
876
- const gist = input.gist;
877
- const dangerCheck = checkCommand(command);
878
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
879
- // Request confirmation from user
880
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
881
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
882
- if (!confirmResult.confirmed) {
883
- result = 'Command cancelled by user.';
884
- yield color(`${result}\n`, Colors.dim);
885
- }
886
- else {
887
- // User confirmed - execute the command
888
- result = await handleToolCall(tc.name, tc.input);
889
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
890
- yield color(`${preview}\n`, Colors.dim);
891
- }
892
- }
893
- else {
894
- // Not dangerous or already approved - execute normally
895
- result = await handleToolCall(tc.name, tc.input);
896
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
897
- yield color(`${preview}\n`, Colors.dim);
898
- }
899
- }
900
- else {
901
- // Not run_command or no confirmation callback - execute normally
902
- result = await handleToolCall(tc.name, tc.input);
903
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
904
- yield color(`${preview}\n`, Colors.dim);
905
- }
906
- openaiMessages.push({
907
- role: 'tool',
908
- tool_call_id: tc.id,
909
- content: result,
910
- });
911
- toolResults.push({
912
- type: 'tool_result',
913
- tool_use_id: tc.id,
914
- content: result,
915
- });
723
+ const { anthropicResults, openaiResults } = yield* executeToolCalls(Object.values(toolCalls), confirmCommand, approvedCategories);
724
+ for (const r of openaiResults) {
725
+ openaiMessages.push({ role: 'tool', tool_call_id: r.tool_call_id, content: r.content });
916
726
  }
917
- messages.push({ role: 'user', content: toolResults });
727
+ messages.push({ role: 'user', content: anthropicResults });
918
728
  }
919
729
  else {
920
730
  break;
@@ -1046,23 +856,7 @@ async function summarizeWithAnthropic(client, settings, conversationText) {
1046
856
  * Summarize a conversation using the OpenAI/Azure API.
1047
857
  */
1048
858
  async function summarizeWithOpenAI(client, settings, conversationText, isAzure) {
1049
- // For Azure, resolve the model ID to the actual deployment name
1050
- let model;
1051
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
1052
- if (isAzure) {
1053
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
1054
- const creds = loadCredentials();
1055
- if (creds?.llm) {
1056
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
1057
- }
1058
- else {
1059
- model = settings.azure.deployment;
1060
- }
1061
- }
1062
- else {
1063
- model = settings.openai.model;
1064
- modelIdForRegistry = model;
1065
- }
859
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
1066
860
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
1067
861
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
1068
862
  const baseOptions = {
@@ -1154,22 +948,7 @@ export async function generateAITitle(client, settings, messages) {
1154
948
  }
1155
949
  else if (settings.provider === Provider.OPENAI || settings.provider === Provider.AZURE) {
1156
950
  const openaiClient = client;
1157
- let model;
1158
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
1159
- if (settings.provider === Provider.AZURE) {
1160
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
1161
- const creds = loadCredentials();
1162
- if (creds?.llm) {
1163
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
1164
- }
1165
- else {
1166
- model = settings.azure.deployment;
1167
- }
1168
- }
1169
- else {
1170
- model = settings.openai.model;
1171
- modelIdForRegistry = model;
1172
- }
951
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, settings.provider === Provider.AZURE);
1173
952
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
1174
953
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
1175
954
  const baseOptions = {