@syntero/orca-cli 1.2.1 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/assistant/anthropic.d.ts +23 -0
  2. package/dist/assistant/anthropic.d.ts.map +1 -0
  3. package/dist/assistant/anthropic.js +186 -0
  4. package/dist/assistant/anthropic.js.map +1 -0
  5. package/dist/assistant/cache.d.ts +20 -0
  6. package/dist/assistant/cache.d.ts.map +1 -0
  7. package/dist/assistant/cache.js +55 -0
  8. package/dist/assistant/cache.js.map +1 -0
  9. package/dist/assistant/helpers.d.ts +10 -0
  10. package/dist/assistant/helpers.d.ts.map +1 -0
  11. package/dist/assistant/helpers.js +74 -0
  12. package/dist/assistant/helpers.js.map +1 -0
  13. package/dist/assistant/index.d.ts +13 -0
  14. package/dist/assistant/index.d.ts.map +1 -0
  15. package/dist/assistant/index.js +40 -0
  16. package/dist/assistant/index.js.map +1 -0
  17. package/dist/assistant/openai.d.ts +34 -0
  18. package/dist/assistant/openai.d.ts.map +1 -0
  19. package/dist/assistant/openai.js +303 -0
  20. package/dist/assistant/openai.js.map +1 -0
  21. package/dist/assistant/prompts.d.ts +11 -0
  22. package/dist/assistant/prompts.d.ts.map +1 -0
  23. package/dist/assistant/prompts.js +203 -0
  24. package/dist/assistant/prompts.js.map +1 -0
  25. package/dist/assistant/summarize.d.ts +32 -0
  26. package/dist/assistant/summarize.d.ts.map +1 -0
  27. package/dist/assistant/summarize.js +134 -0
  28. package/dist/assistant/summarize.js.map +1 -0
  29. package/dist/assistant/types.d.ts +62 -0
  30. package/dist/assistant/types.d.ts.map +1 -0
  31. package/dist/assistant/types.js +2 -0
  32. package/dist/assistant/types.js.map +1 -0
  33. package/dist/assistant.d.ts.map +1 -1
  34. package/dist/assistant.js +161 -383
  35. package/dist/assistant.js.map +1 -1
  36. package/dist/components/ChatApp.js +1 -1
  37. package/dist/components/ChatApp.js.map +1 -1
  38. package/dist/conversations/storage.d.ts +1 -1
  39. package/dist/conversations/storage.d.ts.map +1 -1
  40. package/dist/conversations/types.d.ts +1 -1
  41. package/dist/conversations/types.d.ts.map +1 -1
  42. package/dist/index.js +1 -1
  43. package/dist/index.js.map +1 -1
  44. package/dist/tokens.d.ts +1 -1
  45. package/dist/tokens.d.ts.map +1 -1
  46. package/package.json +1 -1
package/dist/assistant.js CHANGED
@@ -234,6 +234,145 @@ function addToolsCacheControl(tools) {
234
234
  };
235
235
  return toolsCopy;
236
236
  }
237
+ /**
238
+ * Resolve the OpenAI/Azure model name and registry ID.
239
+ */
240
+ function resolveOpenAIModel(settings, isAzure) {
241
+ if (isAzure) {
242
+ const modelIdForRegistry = settings.azure.deployment;
243
+ const creds = loadCredentials();
244
+ const model = creds?.llm
245
+ ? resolveAzureDeployment(settings.azure.deployment, creds.llm)
246
+ : settings.azure.deployment;
247
+ return { model, modelIdForRegistry };
248
+ }
249
+ return { model: settings.openai.model, modelIdForRegistry: settings.openai.model };
250
+ }
251
+ /**
252
+ * Convert Anthropic message history to OpenAI message format.
253
+ */
254
+ function convertMessagesToOpenAI(messages) {
255
+ const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
256
+ for (const msg of messages) {
257
+ if (msg.role === 'user') {
258
+ if (Array.isArray(msg.content)) {
259
+ for (const item of msg.content) {
260
+ if (item.type === 'tool_result') {
261
+ openaiMessages.push({
262
+ role: 'tool',
263
+ tool_call_id: item.tool_use_id || '',
264
+ content: item.content || '',
265
+ });
266
+ }
267
+ }
268
+ }
269
+ else {
270
+ openaiMessages.push({ role: 'user', content: msg.content });
271
+ }
272
+ }
273
+ else if (msg.role === 'assistant') {
274
+ if (Array.isArray(msg.content)) {
275
+ const toolCallsArr = [];
276
+ let textContent = '';
277
+ for (const item of msg.content) {
278
+ if (item.type === 'text') {
279
+ textContent = item.text || '';
280
+ }
281
+ else if (item.type === 'tool_use') {
282
+ toolCallsArr.push({
283
+ id: item.id || '',
284
+ type: 'function',
285
+ function: {
286
+ name: item.name || '',
287
+ arguments: JSON.stringify(item.input || {}),
288
+ },
289
+ });
290
+ }
291
+ }
292
+ const assistantMsg = {
293
+ role: 'assistant',
294
+ content: textContent || null,
295
+ };
296
+ if (toolCallsArr.length > 0) {
297
+ assistantMsg.tool_calls = toolCallsArr;
298
+ }
299
+ openaiMessages.push(assistantMsg);
300
+ }
301
+ }
302
+ }
303
+ return openaiMessages;
304
+ }
305
+ /**
306
+ * Execute tool calls: display headers, check dangerous commands, run tools, collect results.
307
+ */
308
+ async function* executeToolCalls(toolCalls, confirmCommand, approvedCategories) {
309
+ const anthropicResults = [];
310
+ const openaiResults = [];
311
+ for (const tc of toolCalls) {
312
+ const input = tc.input;
313
+ // Show tool header
314
+ if (tc.name === 'run_command') {
315
+ yield color(`\n\n[${tc.name}] `, Colors.dim);
316
+ yield color(`${input.command}\n`, Colors.cyan);
317
+ if (input.gist) {
318
+ yield color(`${input.gist}\n`, Colors.dim);
319
+ }
320
+ }
321
+ else {
322
+ yield color(`\n\n[${tc.name}]\n`, Colors.dim);
323
+ }
324
+ let result;
325
+ if (tc.name === 'run_command' && confirmCommand) {
326
+ const command = input.command;
327
+ const gist = input.gist;
328
+ const dangerCheck = checkCommand(command);
329
+ if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
330
+ yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
331
+ const confirmResult = await confirmCommand(command, dangerCheck, gist);
332
+ if (!confirmResult.confirmed) {
333
+ result = 'Command cancelled by user.';
334
+ yield color(`${result}\n`, Colors.dim);
335
+ }
336
+ else {
337
+ result = await handleToolCall(tc.name, tc.input);
338
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
339
+ yield color(`${preview}\n`, Colors.dim);
340
+ }
341
+ }
342
+ else {
343
+ result = await handleToolCall(tc.name, tc.input);
344
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
345
+ yield color(`${preview}\n`, Colors.dim);
346
+ }
347
+ }
348
+ else {
349
+ result = await handleToolCall(tc.name, tc.input);
350
+ const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
351
+ yield color(`${preview}\n`, Colors.dim);
352
+ }
353
+ anthropicResults.push({
354
+ type: 'tool_result',
355
+ tool_use_id: tc.id,
356
+ content: result,
357
+ });
358
+ openaiResults.push({
359
+ tool_call_id: tc.id,
360
+ content: result,
361
+ });
362
+ }
363
+ return { anthropicResults, openaiResults };
364
+ }
365
+ /**
366
+ * Inject a queued user message into conversation history.
367
+ */
368
+ function* injectQueuedMessage(msg, messages, openaiMessages) {
369
+ const injectedContent = `[User update while AI was working]: ${msg}`;
370
+ messages.push({ role: 'user', content: injectedContent });
371
+ if (openaiMessages) {
372
+ openaiMessages.push({ role: 'user', content: injectedContent });
373
+ }
374
+ yield color(`\n[Queued message injected: "${msg.length > 50 ? msg.slice(0, 50) + '...' : msg}"]\n`, Colors.dim);
375
+ }
237
376
  /**
238
377
  * Run assistant with Anthropic API with streaming.
239
378
  * @param getQueuedMessage - Optional callback to check for queued messages at each iteration
@@ -246,20 +385,11 @@ export async function* runAssistantAnthropic(client, settings, userMessage, mess
246
385
  // Check for queued messages at the start of each iteration
247
386
  const queuedMessage = getQueuedMessage?.();
248
387
  if (queuedMessage) {
249
- // Inject the queued message into the conversation
250
- messages.push({
251
- role: 'user',
252
- content: `[User update while AI was working]: ${queuedMessage}`,
253
- });
254
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
388
+ yield* injectQueuedMessage(queuedMessage, messages);
255
389
  }
256
390
  let responseText = '';
257
391
  const toolCalls = [];
258
392
  // Anthropic prompt caching: Add cache_control markers to reduce token costs
259
- // - System prompt: cached as content block array
260
- // - Tools: cache_control on last tool
261
- // - Messages: cache_control on last content block of last message
262
- // This enables incremental caching - each turn builds on the cached prefix
263
393
  const cachedSystem = [
264
394
  { type: 'text', text: SYSTEM_PROMPT, cache_control: { type: 'ephemeral' } },
265
395
  ];
@@ -316,51 +446,8 @@ export async function* runAssistantAnthropic(client, settings, userMessage, mess
316
446
  }
317
447
  messages.push({ role: 'assistant', content: assistantContent });
318
448
  if (finalMessage.stop_reason === 'tool_use') {
319
- const toolResults = [];
320
- for (const tc of toolCalls) {
321
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
322
- let result;
323
- // Check for dangerous commands that need confirmation
324
- if (tc.name === 'run_command' && confirmCommand) {
325
- const input = tc.input;
326
- const command = input.command;
327
- const gist = input.gist;
328
- const dangerCheck = checkCommand(command);
329
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
330
- // Request confirmation from user
331
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
332
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
333
- if (!confirmResult.confirmed) {
334
- result = 'Command cancelled by user.';
335
- yield color(`${result}\n`, Colors.dim);
336
- }
337
- else {
338
- // User confirmed - execute the command
339
- result = await handleToolCall(tc.name, tc.input);
340
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
341
- yield color(`${preview}\n`, Colors.dim);
342
- }
343
- }
344
- else {
345
- // Not dangerous or already approved - execute normally
346
- result = await handleToolCall(tc.name, tc.input);
347
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
348
- yield color(`${preview}\n`, Colors.dim);
349
- }
350
- }
351
- else {
352
- // Not run_command or no confirmation callback - execute normally
353
- result = await handleToolCall(tc.name, tc.input);
354
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
355
- yield color(`${preview}\n`, Colors.dim);
356
- }
357
- toolResults.push({
358
- type: 'tool_result',
359
- tool_use_id: tc.id,
360
- content: result,
361
- });
362
- }
363
- messages.push({ role: 'user', content: toolResults });
449
+ const { anthropicResults } = yield* executeToolCalls(toolCalls, confirmCommand, approvedCategories);
450
+ messages.push({ role: 'user', content: anthropicResults });
364
451
  }
365
452
  else {
366
453
  break;
@@ -381,12 +468,7 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
381
468
  // Check for queued messages at the start of each iteration
382
469
  const queuedMessage = getQueuedMessage?.();
383
470
  if (queuedMessage) {
384
- // Inject the queued message into the conversation
385
- messages.push({
386
- role: 'user',
387
- content: `[User update while AI was working]: ${queuedMessage}`,
388
- });
389
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
471
+ yield* injectQueuedMessage(queuedMessage, messages);
390
472
  }
391
473
  // Anthropic prompt caching: Add cache_control markers to reduce token costs
392
474
  const cachedSystem = [
@@ -436,51 +518,8 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
436
518
  }
437
519
  messages.push({ role: 'assistant', content: assistantContent });
438
520
  if (response.stop_reason === 'tool_use') {
439
- const toolResults = [];
440
- for (const tc of toolCalls) {
441
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
442
- let result;
443
- // Check for dangerous commands that need confirmation
444
- if (tc.name === 'run_command' && confirmCommand) {
445
- const input = tc.input;
446
- const command = input.command;
447
- const gist = input.gist;
448
- const dangerCheck = checkCommand(command);
449
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
450
- // Request confirmation from user
451
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
452
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
453
- if (!confirmResult.confirmed) {
454
- result = 'Command cancelled by user.';
455
- yield color(`${result}\n`, Colors.dim);
456
- }
457
- else {
458
- // User confirmed - execute the command
459
- result = await handleToolCall(tc.name, tc.input);
460
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
461
- yield color(`${preview}\n`, Colors.dim);
462
- }
463
- }
464
- else {
465
- // Not dangerous or already approved - execute normally
466
- result = await handleToolCall(tc.name, tc.input);
467
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
468
- yield color(`${preview}\n`, Colors.dim);
469
- }
470
- }
471
- else {
472
- // Not run_command or no confirmation callback - execute normally
473
- result = await handleToolCall(tc.name, tc.input);
474
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
475
- yield color(`${preview}\n`, Colors.dim);
476
- }
477
- toolResults.push({
478
- type: 'tool_result',
479
- tool_use_id: tc.id,
480
- content: result,
481
- });
482
- }
483
- messages.push({ role: 'user', content: toolResults });
521
+ const { anthropicResults } = yield* executeToolCalls(toolCalls, confirmCommand, approvedCategories);
522
+ messages.push({ role: 'user', content: anthropicResults });
484
523
  }
485
524
  else {
486
525
  break;
@@ -495,85 +534,15 @@ export async function* runAssistantAnthropicNonStreaming(client, settings, userM
495
534
  * @param approvedCategories - Set of command categories approved for the session
496
535
  */
497
536
  export async function* runAssistantOpenAI(client, settings, userMessage, messages, isAzure = false, signal, getQueuedMessage, confirmCommand, approvedCategories) {
498
- // For Azure, resolve the model ID (azure-large/azure-small) to the actual deployment name
499
- let model;
500
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
501
- if (isAzure) {
502
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
503
- const creds = loadCredentials();
504
- if (creds?.llm) {
505
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
506
- }
507
- else {
508
- // Fallback to settings if no cloud credentials (local Azure config)
509
- model = settings.azure.deployment;
510
- }
511
- }
512
- else {
513
- model = settings.openai.model;
514
- modelIdForRegistry = model;
515
- }
516
- // Convert messages format for OpenAI
517
- const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
518
- for (const msg of messages) {
519
- if (msg.role === 'user') {
520
- if (Array.isArray(msg.content)) {
521
- // Tool results
522
- for (const item of msg.content) {
523
- if (item.type === 'tool_result') {
524
- openaiMessages.push({
525
- role: 'tool',
526
- tool_call_id: item.tool_use_id || '',
527
- content: item.content || '',
528
- });
529
- }
530
- }
531
- }
532
- else {
533
- openaiMessages.push({ role: 'user', content: msg.content });
534
- }
535
- }
536
- else if (msg.role === 'assistant') {
537
- if (Array.isArray(msg.content)) {
538
- const toolCallsArr = [];
539
- let textContent = '';
540
- for (const item of msg.content) {
541
- if (item.type === 'text') {
542
- textContent = item.text || '';
543
- }
544
- else if (item.type === 'tool_use') {
545
- toolCallsArr.push({
546
- id: item.id || '',
547
- type: 'function',
548
- function: {
549
- name: item.name || '',
550
- arguments: JSON.stringify(item.input || {}),
551
- },
552
- });
553
- }
554
- }
555
- const assistantMsg = {
556
- role: 'assistant',
557
- content: textContent || null,
558
- };
559
- if (toolCallsArr.length > 0) {
560
- assistantMsg.tool_calls = toolCallsArr;
561
- }
562
- openaiMessages.push(assistantMsg);
563
- }
564
- }
565
- }
537
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
538
+ const openaiMessages = convertMessagesToOpenAI(messages);
566
539
  openaiMessages.push({ role: 'user', content: userMessage });
567
540
  messages.push({ role: 'user', content: userMessage });
568
541
  while (true) {
569
542
  // Check for queued messages at the start of each iteration
570
543
  const queuedMessage = getQueuedMessage?.();
571
544
  if (queuedMessage) {
572
- // Inject the queued message into both message formats
573
- const injectedContent = `[User update while AI was working]: ${queuedMessage}`;
574
- messages.push({ role: 'user', content: injectedContent });
575
- openaiMessages.push({ role: 'user', content: injectedContent });
576
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
545
+ yield* injectQueuedMessage(queuedMessage, messages, openaiMessages);
577
546
  }
578
547
  let responseText = '';
579
548
  const toolCalls = {};
@@ -654,56 +623,11 @@ export async function* runAssistantOpenAI(client, settings, userMessage, message
654
623
  openaiMessages.push(openaiAssistantMsg);
655
624
  messages.push({ role: 'assistant', content: assistantContent });
656
625
  if (finishReason === 'tool_calls' && Object.keys(toolCalls).length > 0) {
657
- const toolResults = [];
658
- for (const tc of Object.values(toolCalls)) {
659
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
660
- let result;
661
- // Check for dangerous commands that need confirmation
662
- if (tc.name === 'run_command' && confirmCommand) {
663
- const input = tc.input;
664
- const command = input.command;
665
- const gist = input.gist;
666
- const dangerCheck = checkCommand(command);
667
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
668
- // Request confirmation from user
669
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
670
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
671
- if (!confirmResult.confirmed) {
672
- result = 'Command cancelled by user.';
673
- yield color(`${result}\n`, Colors.dim);
674
- }
675
- else {
676
- // User confirmed - execute the command
677
- result = await handleToolCall(tc.name, tc.input);
678
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
679
- yield color(`${preview}\n`, Colors.dim);
680
- }
681
- }
682
- else {
683
- // Not dangerous or already approved - execute normally
684
- result = await handleToolCall(tc.name, tc.input);
685
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
686
- yield color(`${preview}\n`, Colors.dim);
687
- }
688
- }
689
- else {
690
- // Not run_command or no confirmation callback - execute normally
691
- result = await handleToolCall(tc.name, tc.input);
692
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
693
- yield color(`${preview}\n`, Colors.dim);
694
- }
695
- openaiMessages.push({
696
- role: 'tool',
697
- tool_call_id: tc.id,
698
- content: result,
699
- });
700
- toolResults.push({
701
- type: 'tool_result',
702
- tool_use_id: tc.id,
703
- content: result,
704
- });
626
+ const { anthropicResults, openaiResults } = yield* executeToolCalls(Object.values(toolCalls), confirmCommand, approvedCategories);
627
+ for (const r of openaiResults) {
628
+ openaiMessages.push({ role: 'tool', tool_call_id: r.tool_call_id, content: r.content });
705
629
  }
706
- messages.push({ role: 'user', content: toolResults });
630
+ messages.push({ role: 'user', content: anthropicResults });
707
631
  }
708
632
  else {
709
633
  break;
@@ -719,85 +643,15 @@ export async function* runAssistantOpenAI(client, settings, userMessage, message
719
643
  * @param approvedCategories - Set of command categories approved for the session
720
644
  */
721
645
  export async function* runAssistantOpenAINonStreaming(client, settings, userMessage, messages, isAzure = false, signal, getQueuedMessage, confirmCommand, approvedCategories) {
722
- // For Azure, resolve the model ID (azure-large/azure-small) to the actual deployment name
723
- let model;
724
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
725
- if (isAzure) {
726
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
727
- const creds = loadCredentials();
728
- if (creds?.llm) {
729
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
730
- }
731
- else {
732
- // Fallback to settings if no cloud credentials (local Azure config)
733
- model = settings.azure.deployment;
734
- }
735
- }
736
- else {
737
- model = settings.openai.model;
738
- modelIdForRegistry = model;
739
- }
740
- // Convert messages format for OpenAI
741
- const openaiMessages = [{ role: 'system', content: SYSTEM_PROMPT }];
742
- for (const msg of messages) {
743
- if (msg.role === 'user') {
744
- if (Array.isArray(msg.content)) {
745
- // Tool results
746
- for (const item of msg.content) {
747
- if (item.type === 'tool_result') {
748
- openaiMessages.push({
749
- role: 'tool',
750
- tool_call_id: item.tool_use_id || '',
751
- content: item.content || '',
752
- });
753
- }
754
- }
755
- }
756
- else {
757
- openaiMessages.push({ role: 'user', content: msg.content });
758
- }
759
- }
760
- else if (msg.role === 'assistant') {
761
- if (Array.isArray(msg.content)) {
762
- const toolCallsArr = [];
763
- let textContent = '';
764
- for (const item of msg.content) {
765
- if (item.type === 'text') {
766
- textContent = item.text || '';
767
- }
768
- else if (item.type === 'tool_use') {
769
- toolCallsArr.push({
770
- id: item.id || '',
771
- type: 'function',
772
- function: {
773
- name: item.name || '',
774
- arguments: JSON.stringify(item.input || {}),
775
- },
776
- });
777
- }
778
- }
779
- const assistantMsg = {
780
- role: 'assistant',
781
- content: textContent || null,
782
- };
783
- if (toolCallsArr.length > 0) {
784
- assistantMsg.tool_calls = toolCallsArr;
785
- }
786
- openaiMessages.push(assistantMsg);
787
- }
788
- }
789
- }
646
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
647
+ const openaiMessages = convertMessagesToOpenAI(messages);
790
648
  openaiMessages.push({ role: 'user', content: userMessage });
791
649
  messages.push({ role: 'user', content: userMessage });
792
650
  while (true) {
793
651
  // Check for queued messages at the start of each iteration
794
652
  const queuedMessage = getQueuedMessage?.();
795
653
  if (queuedMessage) {
796
- // Inject the queued message into both message formats
797
- const injectedContent = `[User update while AI was working]: ${queuedMessage}`;
798
- messages.push({ role: 'user', content: injectedContent });
799
- openaiMessages.push({ role: 'user', content: injectedContent });
800
- yield color(`\n[Queued message injected: "${queuedMessage.length > 50 ? queuedMessage.slice(0, 50) + '...' : queuedMessage}"]\n`, Colors.dim);
654
+ yield* injectQueuedMessage(queuedMessage, messages, openaiMessages);
801
655
  }
802
656
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
803
657
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
@@ -866,56 +720,11 @@ export async function* runAssistantOpenAINonStreaming(client, settings, userMess
866
720
  openaiMessages.push(openaiAssistantMsg);
867
721
  messages.push({ role: 'assistant', content: assistantContent });
868
722
  if (finishReason === 'tool_calls' && Object.keys(toolCalls).length > 0) {
869
- const toolResults = [];
870
- for (const tc of Object.values(toolCalls)) {
871
- yield color(`\n\n[${tc.name}]\n`, Colors.dim);
872
- let result;
873
- // Check for dangerous commands that need confirmation
874
- if (tc.name === 'run_command' && confirmCommand) {
875
- const input = tc.input;
876
- const command = input.command;
877
- const gist = input.gist;
878
- const dangerCheck = checkCommand(command);
879
- if (dangerCheck.isDangerous && !allCategoriesApproved(dangerCheck, approvedCategories || new Set())) {
880
- // Request confirmation from user
881
- yield color(`Dangerous command detected. Waiting for confirmation...\n`, Colors.yellow);
882
- const confirmResult = await confirmCommand(command, dangerCheck, gist);
883
- if (!confirmResult.confirmed) {
884
- result = 'Command cancelled by user.';
885
- yield color(`${result}\n`, Colors.dim);
886
- }
887
- else {
888
- // User confirmed - execute the command
889
- result = await handleToolCall(tc.name, tc.input);
890
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
891
- yield color(`${preview}\n`, Colors.dim);
892
- }
893
- }
894
- else {
895
- // Not dangerous or already approved - execute normally
896
- result = await handleToolCall(tc.name, tc.input);
897
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
898
- yield color(`${preview}\n`, Colors.dim);
899
- }
900
- }
901
- else {
902
- // Not run_command or no confirmation callback - execute normally
903
- result = await handleToolCall(tc.name, tc.input);
904
- const preview = result.length > 500 ? result.slice(0, 500) + '...' : result;
905
- yield color(`${preview}\n`, Colors.dim);
906
- }
907
- openaiMessages.push({
908
- role: 'tool',
909
- tool_call_id: tc.id,
910
- content: result,
911
- });
912
- toolResults.push({
913
- type: 'tool_result',
914
- tool_use_id: tc.id,
915
- content: result,
916
- });
723
+ const { anthropicResults, openaiResults } = yield* executeToolCalls(Object.values(toolCalls), confirmCommand, approvedCategories);
724
+ for (const r of openaiResults) {
725
+ openaiMessages.push({ role: 'tool', tool_call_id: r.tool_call_id, content: r.content });
917
726
  }
918
- messages.push({ role: 'user', content: toolResults });
727
+ messages.push({ role: 'user', content: anthropicResults });
919
728
  }
920
729
  else {
921
730
  break;
@@ -1047,23 +856,7 @@ async function summarizeWithAnthropic(client, settings, conversationText) {
1047
856
  * Summarize a conversation using the OpenAI/Azure API.
1048
857
  */
1049
858
  async function summarizeWithOpenAI(client, settings, conversationText, isAzure) {
1050
- // For Azure, resolve the model ID to the actual deployment name
1051
- let model;
1052
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
1053
- if (isAzure) {
1054
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
1055
- const creds = loadCredentials();
1056
- if (creds?.llm) {
1057
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
1058
- }
1059
- else {
1060
- model = settings.azure.deployment;
1061
- }
1062
- }
1063
- else {
1064
- model = settings.openai.model;
1065
- modelIdForRegistry = model;
1066
- }
859
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, isAzure);
1067
860
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
1068
861
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
1069
862
  const baseOptions = {
@@ -1155,22 +948,7 @@ export async function generateAITitle(client, settings, messages) {
1155
948
  }
1156
949
  else if (settings.provider === Provider.OPENAI || settings.provider === Provider.AZURE) {
1157
950
  const openaiClient = client;
1158
- let model;
1159
- let modelIdForRegistry; // The model ID to use for ModelRegistry lookups
1160
- if (settings.provider === Provider.AZURE) {
1161
- modelIdForRegistry = settings.azure.deployment; // e.g., 'azure-large'
1162
- const creds = loadCredentials();
1163
- if (creds?.llm) {
1164
- model = resolveAzureDeployment(settings.azure.deployment, creds.llm);
1165
- }
1166
- else {
1167
- model = settings.azure.deployment;
1168
- }
1169
- }
1170
- else {
1171
- model = settings.openai.model;
1172
- modelIdForRegistry = model;
1173
- }
951
+ const { model, modelIdForRegistry } = resolveOpenAIModel(settings, settings.provider === Provider.AZURE);
1174
952
  // Build request options with dynamic max_tokens parameter based on model ID (not deployment name)
1175
953
  const maxTokensParam = ModelRegistry.getMaxTokensParam(modelIdForRegistry);
1176
954
  const baseOptions = {