sam-coder-cli 1.0.9 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bin/agi-cli.js +142 -129
  2. package/package.json +1 -1
package/bin/agi-cli.js CHANGED
@@ -387,14 +387,14 @@ async function callOpenRouter(messages, currentModel, useJson = false) {
387
387
  const response = await fetch(`${API_BASE_URL}/chat/completions`, {
388
388
  method: 'POST',
389
389
  headers: {
390
- 'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
390
+ 'Authorization': `Bearer ${apiKey}`,
391
391
  'Content-Type': 'application/json'
392
392
  },
393
393
  body: JSON.stringify({
394
394
  model: currentModel,
395
395
  messages: messages,
396
- tools: tools,
397
- tool_choice: 'auto'
396
+ tools: useJson ? undefined : tools,
397
+ tool_choice: useJson ? undefined : 'auto'
398
398
  })
399
399
  });
400
400
 
@@ -416,122 +416,177 @@ async function callOpenRouter(messages, currentModel, useJson = false) {
416
416
 
417
417
  // Process a query with tool calling
418
418
  async function processQueryWithTools(query, conversation = [], currentModel) {
419
- // Add user message to conversation
420
419
  const userMessage = { role: 'user', content: query };
421
420
  const messages = [...conversation, userMessage];
422
421
 
423
- // Add system message if this is the first message
424
- if (conversation.length === 0) {
425
- messages.unshift({
426
- role: 'system',
427
- content: `You are a helpful AI assistant with access to tools. Use the tools when needed.
428
- You can use multiple tools in sequence if needed to complete the task.
429
- When using tools, make sure to provide all required parameters.
430
- If a tool fails, you can try again with different parameters or a different approach.`
431
- });
432
- }
433
-
434
422
  ui.startThinking();
435
423
 
436
424
  try {
437
- const response = await callOpenRouterWithTools(messages, currentModel);
425
+ const response = await callOpenRouter(messages, currentModel);
438
426
  const assistantMessage = response.choices[0].message;
439
427
  messages.push(assistantMessage);
440
428
 
441
- // If there are no tool calls, we're done
442
- if (!assistantMessage.tool_calls || assistantMessage.tool_calls.length === 0) {
429
+ if (assistantMessage.tool_calls) {
430
+ const toolResults = await handleToolCalls(assistantMessage.tool_calls, messages);
431
+ messages.push(...toolResults);
432
+
433
+ ui.startThinking();
434
+ const finalResponseObj = await callOpenRouter(messages, currentModel);
435
+ const finalAssistantMessage = finalResponseObj.choices[0].message;
436
+ messages.push(finalAssistantMessage);
443
437
  ui.stopThinking();
438
+
444
439
  return {
445
- response: assistantMessage.content || 'No content in response',
440
+ response: finalAssistantMessage.content,
441
+ conversation: messages
442
+ };
443
+ } else {
444
+ ui.stopThinking();
445
+ return {
446
+ response: assistantMessage.content,
446
447
  conversation: messages
447
448
  };
448
449
  }
449
-
450
- // Process tool calls
451
- ui.stopThinking(); // Stop thinking while tools execute
452
- console.log(`🛠️ Executing ${assistantMessage.tool_calls.length} tools...`);
453
- const toolResults = await handleToolCalls(assistantMessage.tool_calls, messages);
454
-
455
- // Add tool results to messages
456
- messages.push(...toolResults);
457
-
458
- // Now, get the AI's response to the tool results
459
- ui.startThinking();
460
- const finalResponseObj = await callOpenRouterWithTools(messages, currentModel);
461
- const finalAssistantMessage = finalResponseObj.choices[0].message;
462
- messages.push(finalAssistantMessage);
463
- ui.stopThinking();
464
-
465
- return {
466
- response: finalAssistantMessage.content || 'Actions executed. What is the next step?',
467
- conversation: messages
468
- };
469
-
470
450
  } catch (error) {
471
451
  ui.stopThinking();
472
- console.error('❌ Error during processing:', error);
452
+ ui.showError(`Error processing query: ${error.message}`);
473
453
  return {
474
- response: `An error occurred: ${error.message}`,
454
+ response: `Error: ${error.message}`,
475
455
  conversation: messages
476
456
  };
477
457
  }
478
458
  }
479
459
 
480
- // Process a query with action handling (legacy function calling)
460
+ async function handleToolCalls(toolCalls, messages) {
461
+ const results = [];
462
+
463
+ for (const toolCall of toolCalls) {
464
+ const functionName = toolCall.function.name;
465
+ let args;
466
+
467
+ try {
468
+ args = JSON.parse(toolCall.function.arguments);
469
+ } catch (error) {
470
+ console.error('❌ Failed to parse tool arguments:', error);
471
+ results.push({
472
+ tool_call_id: toolCall.id,
473
+ role: 'tool',
474
+ name: functionName,
475
+ content: JSON.stringify({ error: `Invalid arguments format: ${error.message}` })
476
+ });
477
+ continue;
478
+ }
479
+
480
+ console.log(`🔧 Executing ${functionName} with args:`, args);
481
+
482
+ try {
483
+ if (!agentUtils[functionName]) {
484
+ throw new Error(`Tool '${functionName}' not found`);
485
+ }
486
+
487
+ const result = await agentUtils[functionName](...Object.values(args));
488
+ console.log('✅ Tool executed successfully');
489
+
490
+ const resultContent = typeof result === 'string' ? result : JSON.stringify(result);
491
+
492
+ results.push({
493
+ tool_call_id: toolCall.id,
494
+ role: 'tool',
495
+ name: functionName,
496
+ content: resultContent
497
+ });
498
+ } catch (error) {
499
+ console.error('❌ Tool execution failed:', error);
500
+
501
+ results.push({
502
+ tool_call_id: toolCall.id,
503
+ role: 'tool',
504
+ name: functionName,
505
+ content: JSON.stringify({
506
+ error: error.message,
507
+ stack: process.env.DEBUG ? error.stack : undefined
508
+ })
509
+ });
510
+ }
511
+ }
512
+
513
+ return results;
514
+ }
515
+
516
+ async function executeAction(action) {
517
+ const { type, data } = action;
518
+
519
+ switch (type) {
520
+ case 'read':
521
+ return await agentUtils.readFile(data.path);
522
+ case 'write':
523
+ return await agentUtils.writeFile(data.path, data.content);
524
+ case 'edit':
525
+ return await agentUtils.editFile(data.path, data.edits);
526
+ case 'command':
527
+ return await agentUtils.runCommand(data.command);
528
+ case 'search':
529
+ if (data.type === 'files') {
530
+ return await agentUtils.searchFiles(data.pattern);
531
+ }
532
+ throw new Error('Text search is not implemented yet');
533
+ case 'execute':
534
+ const cmd = data.language === 'bash'
535
+ ? data.code
536
+ : `node -e "${data.code.replace(/"/g, '\"')}"`;
537
+ return await agentUtils.runCommand(cmd);
538
+ case 'browse':
539
+ throw new Error('Web browsing is not implemented yet');
540
+ case 'analyze':
541
+ return `Analysis requested for code: ${data.code}\nQuestion: ${data.question}`;
542
+ case 'stop':
543
+ return 'Stopping action execution';
544
+ default:
545
+ throw new Error(`Unknown action type: ${type}`);
546
+ }
547
+ }
548
+
481
549
  async function processQuery(query, conversation = [], currentModel) {
482
550
  try {
483
- // Add user message to conversation
484
551
  const userMessage = { role: 'user', content: query };
485
- const messages = [...conversation, userMessage];
486
-
487
- // Add system message if this is the first message
488
- if (conversation.length === 0) {
489
- messages.unshift({
490
- role: 'system',
491
- content: FUNCTION_CALLING_PROMPT
492
- });
493
- }
552
+ let messages = [...conversation, userMessage];
553
+ let actionCount = 0;
554
+ const MAX_ACTIONS = 10;
494
555
 
495
556
  ui.startThinking();
496
557
 
497
- const response = await callOpenRouterWithFunctions(messages, currentModel);
498
- const assistantMessage = response.choices[0].message;
558
+ const responseText = await callOpenRouter(messages, currentModel, true);
559
+ const assistantMessage = responseText.choices[0].message;
499
560
  messages.push(assistantMessage);
500
-
501
- // Try to extract JSON from the response
561
+
502
562
  const actionData = extractJsonFromMarkdown(assistantMessage.content);
503
563
 
504
- if (actionData && actionData.actions) {
564
+ if (!actionData || actionData.type === 'stop') {
505
565
  ui.stopThinking();
506
- ui.showAction('Executing actions...');
507
- const results = [];
508
-
509
- for (const action of actionData.actions) {
510
- ui.showAction(` → ${action.type} action`);
511
- try {
512
- const result = await executeAction(action);
513
- results.push({ type: action.type, success: true, result });
514
- } catch (error) {
515
- results.push({ type: action.type, success: false, error: error.message });
516
- }
517
- }
518
-
519
- // Add action results to the conversation
520
- messages.push({
521
- role: 'system',
522
- content: `Action results: ${JSON.stringify(results, null, 2)}`
523
- });
524
-
525
- // Continue the conversation with the results
526
566
  return {
527
- response: `Actions executed. Results: ${JSON.stringify(results, null, 2)}`,
567
+ response: assistantMessage.content,
528
568
  conversation: messages
529
569
  };
530
570
  }
531
-
571
+
572
+ let currentAction = actionData;
573
+ while (currentAction && currentAction.type !== 'stop' && actionCount < MAX_ACTIONS) {
574
+ actionCount++;
575
+ const result = await executeAction(currentAction);
576
+
577
+ messages.push({ role: 'assistant', content: `Action result: ${result}` });
578
+
579
+ ui.startThinking();
580
+ const nextResponse = await callOpenRouter(messages, currentModel, true);
581
+ const nextAssistantMessage = nextResponse.choices[0].message;
582
+ messages.push(nextAssistantMessage);
583
+
584
+ currentAction = extractJsonFromMarkdown(nextAssistantMessage.content);
585
+ }
586
+
532
587
  ui.stopThinking();
533
588
  return {
534
- response: assistantMessage.content || 'No content in response',
589
+ response: messages[messages.length - 1].content,
535
590
  conversation: messages
536
591
  };
537
592
  } catch (error) {
@@ -539,12 +594,11 @@ async function processQuery(query, conversation = [], currentModel) {
539
594
  console.error('❌ Error during processing:', error);
540
595
  return {
541
596
  response: `An error occurred: ${error.message}`,
542
- conversation: messages
597
+ conversation: conversation
543
598
  };
544
599
  }
545
600
  }
546
601
 
547
- // Main chat loop
548
602
  async function chat(rl, useToolCalling, initialModel) {
549
603
  let currentModel = initialModel;
550
604
  const conversation = [];
@@ -558,7 +612,7 @@ async function chat(rl, useToolCalling, initialModel) {
558
612
  const newModel = input.split(' ')[1];
559
613
  if (newModel) {
560
614
  currentModel = newModel;
561
- let config = await readConfig();
615
+ let config = await readConfig() || {};
562
616
  config.MODEL = currentModel;
563
617
  await writeConfig(config);
564
618
  console.log(`Model changed to: ${currentModel}`);
@@ -571,7 +625,7 @@ async function chat(rl, useToolCalling, initialModel) {
571
625
 
572
626
  if (input.toLowerCase() === '/default-model') {
573
627
  currentModel = 'deepseek/deepseek-chat-v3-0324:free';
574
- let config = await readConfig();
628
+ let config = await readConfig() || {};
575
629
  config.MODEL = currentModel;
576
630
  await writeConfig(config);
577
631
  console.log(`Model reset to default: ${currentModel}`);
@@ -597,8 +651,7 @@ async function chat(rl, useToolCalling, initialModel) {
597
651
  ui.stopThinking();
598
652
  ui.showResponse(result.response);
599
653
 
600
- // Update conversation with the full context
601
- conversation.length = 0; // Clear the array
654
+ conversation.length = 0;
602
655
  result.conversation.forEach(msg => conversation.push(msg));
603
656
 
604
657
  rl.prompt();
@@ -608,7 +661,6 @@ async function chat(rl, useToolCalling, initialModel) {
608
661
  });
609
662
  }
610
663
 
611
- // Ask user for mode selection
612
664
  function askForMode(rl) {
613
665
  return new Promise((resolve) => {
614
666
  rl.question('Select mode (1 for tool calling, 2 for function calling): ', (answer) => {
@@ -617,14 +669,13 @@ function askForMode(rl) {
617
669
  });
618
670
  }
619
671
 
620
- // Start the application
621
672
  async function readConfig() {
622
673
  try {
623
674
  const data = await fs.readFile(CONFIG_PATH, 'utf-8');
624
675
  return JSON.parse(data);
625
676
  } catch (error) {
626
677
  if (error.code === 'ENOENT') {
627
- return null; // Config file doesn't exist
678
+ return null;
628
679
  }
629
680
  throw error;
630
681
  }
@@ -671,7 +722,6 @@ async function runSetup(rl, isReconfig = false) {
671
722
  return config;
672
723
  }
673
724
 
674
- // Start the application
675
725
  async function start() {
676
726
  const rl = readline.createInterface({
677
727
  input: process.stdin,
@@ -684,42 +734,7 @@ async function start() {
684
734
  config = await runSetup(rl);
685
735
  }
686
736
 
687
- MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
688
-
689
- OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
690
- if (config.isPro && config.customApiBase) {
691
- API_BASE_URL = config.customApiBase;
692
- console.log(`🚀 Using Pro Plan custom endpoint: ${API_BASE_URL}`);
693
- }
694
-
695
- ui.showHeader();
696
- console.log('Select Mode:');
697
- console.log('1. Tool Calling (for models that support it)');
698
- console.log('2. Function Calling (legacy)');
699
-
700
- const useToolCalling = await askForMode(rl);
701
- ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
702
-
703
- // Start the chat with the selected mode
704
- await chat(rl, useToolCalling, MODEL);
705
- } catch (error) {
706
- ui.showError(error);
707
- rl.close();
708
- process.exit(1);
709
- }
710
- }
711
- const rl = readline.createInterface({
712
- input: process.stdin,
713
- output: process.stdout
714
- });
715
-
716
- try {
717
- let config = await readConfig();
718
- if (!config || !config.OPENROUTER_API_KEY) {
719
- config = await runSetup(rl);
720
- }
721
-
722
- MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
737
+ MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free';
723
738
 
724
739
  OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
725
740
  if (config.isPro && config.customApiBase) {
@@ -735,7 +750,6 @@ async function start() {
735
750
  const useToolCalling = await askForMode(rl);
736
751
  ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
737
752
 
738
- // Start the chat with the selected mode
739
753
  await chat(rl, useToolCalling, MODEL);
740
754
  } catch (error) {
741
755
  ui.showError(error);
@@ -744,5 +758,4 @@ async function start() {
744
758
  }
745
759
  }
746
760
 
747
- // Start the application
748
761
  start().catch(console.error);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sam-coder-cli",
3
- "version": "1.0.9",
3
+ "version": "1.0.10",
4
4
  "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities.",
5
5
  "main": "bin/agi-cli.js",
6
6
  "bin": {