sam-coder-cli 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bin/agi-cli.js +157 -133
  2. package/package.json +1 -1
package/bin/agi-cli.js CHANGED
@@ -382,20 +382,31 @@ function extractJsonFromMarkdown(text) {
382
382
  // Call OpenRouter API with tool calling
383
383
  async function callOpenRouter(messages, currentModel, useJson = false) {
384
384
  const apiKey = OPENROUTER_API_KEY;
385
+ const isCustomEndpoint = API_BASE_URL !== 'https://openrouter.ai/api/v1';
386
+
387
+ let body = {
388
+ model: currentModel,
389
+ messages: messages,
390
+ };
391
+
392
+ // For standard OpenRouter calls that are not legacy, add tool parameters.
393
+ if (!isCustomEndpoint && !useJson) {
394
+ body.tools = tools;
395
+ body.tool_choice = 'auto';
396
+ }
397
+ // For custom endpoints (like vllm), ensure no tool-related parameters are sent.
398
+ else if (isCustomEndpoint) {
399
+ // The body is already clean for vLLM, containing only model and messages.
400
+ }
385
401
 
386
402
  try {
387
403
  const response = await fetch(`${API_BASE_URL}/chat/completions`, {
388
404
  method: 'POST',
389
405
  headers: {
390
- 'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
406
+ 'Authorization': `Bearer ${apiKey}`,
391
407
  'Content-Type': 'application/json'
392
408
  },
393
- body: JSON.stringify({
394
- model: currentModel,
395
- messages: messages,
396
- tools: tools,
397
- tool_choice: 'auto'
398
- })
409
+ body: JSON.stringify(body)
399
410
  });
400
411
 
401
412
  if (!response.ok) {
@@ -416,122 +427,177 @@ async function callOpenRouter(messages, currentModel, useJson = false) {
416
427
 
417
428
  // Process a query with tool calling
418
429
  async function processQueryWithTools(query, conversation = [], currentModel) {
419
- // Add user message to conversation
420
430
  const userMessage = { role: 'user', content: query };
421
431
  const messages = [...conversation, userMessage];
422
432
 
423
- // Add system message if this is the first message
424
- if (conversation.length === 0) {
425
- messages.unshift({
426
- role: 'system',
427
- content: `You are a helpful AI assistant with access to tools. Use the tools when needed.
428
- You can use multiple tools in sequence if needed to complete the task.
429
- When using tools, make sure to provide all required parameters.
430
- If a tool fails, you can try again with different parameters or a different approach.`
431
- });
432
- }
433
-
434
433
  ui.startThinking();
435
434
 
436
435
  try {
437
- const response = await callOpenRouterWithTools(messages, currentModel);
436
+ const response = await callOpenRouter(messages, currentModel);
438
437
  const assistantMessage = response.choices[0].message;
439
438
  messages.push(assistantMessage);
440
439
 
441
- // If there are no tool calls, we're done
442
- if (!assistantMessage.tool_calls || assistantMessage.tool_calls.length === 0) {
440
+ if (assistantMessage.tool_calls) {
441
+ const toolResults = await handleToolCalls(assistantMessage.tool_calls, messages);
442
+ messages.push(...toolResults);
443
+
444
+ ui.startThinking();
445
+ const finalResponseObj = await callOpenRouter(messages, currentModel);
446
+ const finalAssistantMessage = finalResponseObj.choices[0].message;
447
+ messages.push(finalAssistantMessage);
443
448
  ui.stopThinking();
449
+
444
450
  return {
445
- response: assistantMessage.content || 'No content in response',
451
+ response: finalAssistantMessage.content,
452
+ conversation: messages
453
+ };
454
+ } else {
455
+ ui.stopThinking();
456
+ return {
457
+ response: assistantMessage.content,
446
458
  conversation: messages
447
459
  };
448
460
  }
449
-
450
- // Process tool calls
451
- ui.stopThinking(); // Stop thinking while tools execute
452
- console.log(`🛠️ Executing ${assistantMessage.tool_calls.length} tools...`);
453
- const toolResults = await handleToolCalls(assistantMessage.tool_calls, messages);
454
-
455
- // Add tool results to messages
456
- messages.push(...toolResults);
457
-
458
- // Now, get the AI's response to the tool results
459
- ui.startThinking();
460
- const finalResponseObj = await callOpenRouterWithTools(messages, currentModel);
461
- const finalAssistantMessage = finalResponseObj.choices[0].message;
462
- messages.push(finalAssistantMessage);
463
- ui.stopThinking();
464
-
465
- return {
466
- response: finalAssistantMessage.content || 'Actions executed. What is the next step?',
467
- conversation: messages
468
- };
469
-
470
461
  } catch (error) {
471
462
  ui.stopThinking();
472
- console.error('❌ Error during processing:', error);
463
+ ui.showError(`Error processing query: ${error.message}`);
473
464
  return {
474
- response: `An error occurred: ${error.message}`,
465
+ response: `Error: ${error.message}`,
475
466
  conversation: messages
476
467
  };
477
468
  }
478
469
  }
479
470
 
480
- // Process a query with action handling (legacy function calling)
471
+ async function handleToolCalls(toolCalls, messages) {
472
+ const results = [];
473
+
474
+ for (const toolCall of toolCalls) {
475
+ const functionName = toolCall.function.name;
476
+ let args;
477
+
478
+ try {
479
+ args = JSON.parse(toolCall.function.arguments);
480
+ } catch (error) {
481
+ console.error('❌ Failed to parse tool arguments:', error);
482
+ results.push({
483
+ tool_call_id: toolCall.id,
484
+ role: 'tool',
485
+ name: functionName,
486
+ content: JSON.stringify({ error: `Invalid arguments format: ${error.message}` })
487
+ });
488
+ continue;
489
+ }
490
+
491
+ console.log(`🔧 Executing ${functionName} with args:`, args);
492
+
493
+ try {
494
+ if (!agentUtils[functionName]) {
495
+ throw new Error(`Tool '${functionName}' not found`);
496
+ }
497
+
498
+ const result = await agentUtils[functionName](...Object.values(args));
499
+ console.log('✅ Tool executed successfully');
500
+
501
+ const resultContent = typeof result === 'string' ? result : JSON.stringify(result);
502
+
503
+ results.push({
504
+ tool_call_id: toolCall.id,
505
+ role: 'tool',
506
+ name: functionName,
507
+ content: resultContent
508
+ });
509
+ } catch (error) {
510
+ console.error('❌ Tool execution failed:', error);
511
+
512
+ results.push({
513
+ tool_call_id: toolCall.id,
514
+ role: 'tool',
515
+ name: functionName,
516
+ content: JSON.stringify({
517
+ error: error.message,
518
+ stack: process.env.DEBUG ? error.stack : undefined
519
+ })
520
+ });
521
+ }
522
+ }
523
+
524
+ return results;
525
+ }
526
+
527
+ async function executeAction(action) {
528
+ const { type, data } = action;
529
+
530
+ switch (type) {
531
+ case 'read':
532
+ return await agentUtils.readFile(data.path);
533
+ case 'write':
534
+ return await agentUtils.writeFile(data.path, data.content);
535
+ case 'edit':
536
+ return await agentUtils.editFile(data.path, data.edits);
537
+ case 'command':
538
+ return await agentUtils.runCommand(data.command);
539
+ case 'search':
540
+ if (data.type === 'files') {
541
+ return await agentUtils.searchFiles(data.pattern);
542
+ }
543
+ throw new Error('Text search is not implemented yet');
544
+ case 'execute':
545
+ const cmd = data.language === 'bash'
546
+ ? data.code
547
+ : `node -e "${data.code.replace(/"/g, '\"')}"`;
548
+ return await agentUtils.runCommand(cmd);
549
+ case 'browse':
550
+ throw new Error('Web browsing is not implemented yet');
551
+ case 'analyze':
552
+ return `Analysis requested for code: ${data.code}\nQuestion: ${data.question}`;
553
+ case 'stop':
554
+ return 'Stopping action execution';
555
+ default:
556
+ throw new Error(`Unknown action type: ${type}`);
557
+ }
558
+ }
559
+
481
560
  async function processQuery(query, conversation = [], currentModel) {
482
561
  try {
483
- // Add user message to conversation
484
562
  const userMessage = { role: 'user', content: query };
485
- const messages = [...conversation, userMessage];
486
-
487
- // Add system message if this is the first message
488
- if (conversation.length === 0) {
489
- messages.unshift({
490
- role: 'system',
491
- content: FUNCTION_CALLING_PROMPT
492
- });
493
- }
563
+ let messages = [...conversation, userMessage];
564
+ let actionCount = 0;
565
+ const MAX_ACTIONS = 10;
494
566
 
495
567
  ui.startThinking();
496
568
 
497
- const response = await callOpenRouterWithFunctions(messages, currentModel);
498
- const assistantMessage = response.choices[0].message;
569
+ const responseText = await callOpenRouter(messages, currentModel, true);
570
+ const assistantMessage = responseText.choices[0].message;
499
571
  messages.push(assistantMessage);
500
-
501
- // Try to extract JSON from the response
572
+
502
573
  const actionData = extractJsonFromMarkdown(assistantMessage.content);
503
574
 
504
- if (actionData && actionData.actions) {
575
+ if (!actionData || actionData.type === 'stop') {
505
576
  ui.stopThinking();
506
- ui.showAction('Executing actions...');
507
- const results = [];
508
-
509
- for (const action of actionData.actions) {
510
- ui.showAction(` → ${action.type} action`);
511
- try {
512
- const result = await executeAction(action);
513
- results.push({ type: action.type, success: true, result });
514
- } catch (error) {
515
- results.push({ type: action.type, success: false, error: error.message });
516
- }
517
- }
518
-
519
- // Add action results to the conversation
520
- messages.push({
521
- role: 'system',
522
- content: `Action results: ${JSON.stringify(results, null, 2)}`
523
- });
524
-
525
- // Continue the conversation with the results
526
577
  return {
527
- response: `Actions executed. Results: ${JSON.stringify(results, null, 2)}`,
578
+ response: assistantMessage.content,
528
579
  conversation: messages
529
580
  };
530
581
  }
531
-
582
+
583
+ let currentAction = actionData;
584
+ while (currentAction && currentAction.type !== 'stop' && actionCount < MAX_ACTIONS) {
585
+ actionCount++;
586
+ const result = await executeAction(currentAction);
587
+
588
+ messages.push({ role: 'assistant', content: `Action result: ${result}` });
589
+
590
+ ui.startThinking();
591
+ const nextResponse = await callOpenRouter(messages, currentModel, true);
592
+ const nextAssistantMessage = nextResponse.choices[0].message;
593
+ messages.push(nextAssistantMessage);
594
+
595
+ currentAction = extractJsonFromMarkdown(nextAssistantMessage.content);
596
+ }
597
+
532
598
  ui.stopThinking();
533
599
  return {
534
- response: assistantMessage.content || 'No content in response',
600
+ response: messages[messages.length - 1].content,
535
601
  conversation: messages
536
602
  };
537
603
  } catch (error) {
@@ -539,12 +605,11 @@ async function processQuery(query, conversation = [], currentModel) {
539
605
  console.error('❌ Error during processing:', error);
540
606
  return {
541
607
  response: `An error occurred: ${error.message}`,
542
- conversation: messages
608
+ conversation: conversation
543
609
  };
544
610
  }
545
611
  }
546
612
 
547
- // Main chat loop
548
613
  async function chat(rl, useToolCalling, initialModel) {
549
614
  let currentModel = initialModel;
550
615
  const conversation = [];
@@ -558,7 +623,7 @@ async function chat(rl, useToolCalling, initialModel) {
558
623
  const newModel = input.split(' ')[1];
559
624
  if (newModel) {
560
625
  currentModel = newModel;
561
- let config = await readConfig();
626
+ let config = await readConfig() || {};
562
627
  config.MODEL = currentModel;
563
628
  await writeConfig(config);
564
629
  console.log(`Model changed to: ${currentModel}`);
@@ -571,7 +636,7 @@ async function chat(rl, useToolCalling, initialModel) {
571
636
 
572
637
  if (input.toLowerCase() === '/default-model') {
573
638
  currentModel = 'deepseek/deepseek-chat-v3-0324:free';
574
- let config = await readConfig();
639
+ let config = await readConfig() || {};
575
640
  config.MODEL = currentModel;
576
641
  await writeConfig(config);
577
642
  console.log(`Model reset to default: ${currentModel}`);
@@ -597,8 +662,7 @@ async function chat(rl, useToolCalling, initialModel) {
597
662
  ui.stopThinking();
598
663
  ui.showResponse(result.response);
599
664
 
600
- // Update conversation with the full context
601
- conversation.length = 0; // Clear the array
665
+ conversation.length = 0;
602
666
  result.conversation.forEach(msg => conversation.push(msg));
603
667
 
604
668
  rl.prompt();
@@ -608,7 +672,6 @@ async function chat(rl, useToolCalling, initialModel) {
608
672
  });
609
673
  }
610
674
 
611
- // Ask user for mode selection
612
675
  function askForMode(rl) {
613
676
  return new Promise((resolve) => {
614
677
  rl.question('Select mode (1 for tool calling, 2 for function calling): ', (answer) => {
@@ -617,14 +680,13 @@ function askForMode(rl) {
617
680
  });
618
681
  }
619
682
 
620
- // Start the application
621
683
  async function readConfig() {
622
684
  try {
623
685
  const data = await fs.readFile(CONFIG_PATH, 'utf-8');
624
686
  return JSON.parse(data);
625
687
  } catch (error) {
626
688
  if (error.code === 'ENOENT') {
627
- return null; // Config file doesn't exist
689
+ return null;
628
690
  }
629
691
  throw error;
630
692
  }
@@ -671,7 +733,6 @@ async function runSetup(rl, isReconfig = false) {
671
733
  return config;
672
734
  }
673
735
 
674
- // Start the application
675
736
  async function start() {
676
737
  const rl = readline.createInterface({
677
738
  input: process.stdin,
@@ -684,42 +745,7 @@ async function start() {
684
745
  config = await runSetup(rl);
685
746
  }
686
747
 
687
- MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
688
-
689
- OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
690
- if (config.isPro && config.customApiBase) {
691
- API_BASE_URL = config.customApiBase;
692
- console.log(`🚀 Using Pro Plan custom endpoint: ${API_BASE_URL}`);
693
- }
694
-
695
- ui.showHeader();
696
- console.log('Select Mode:');
697
- console.log('1. Tool Calling (for models that support it)');
698
- console.log('2. Function Calling (legacy)');
699
-
700
- const useToolCalling = await askForMode(rl);
701
- ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
702
-
703
- // Start the chat with the selected mode
704
- await chat(rl, useToolCalling, MODEL);
705
- } catch (error) {
706
- ui.showError(error);
707
- rl.close();
708
- process.exit(1);
709
- }
710
- }
711
- const rl = readline.createInterface({
712
- input: process.stdin,
713
- output: process.stdout
714
- });
715
-
716
- try {
717
- let config = await readConfig();
718
- if (!config || !config.OPENROUTER_API_KEY) {
719
- config = await runSetup(rl);
720
- }
721
-
722
- MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
748
+ MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free';
723
749
 
724
750
  OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
725
751
  if (config.isPro && config.customApiBase) {
@@ -735,7 +761,6 @@ async function start() {
735
761
  const useToolCalling = await askForMode(rl);
736
762
  ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
737
763
 
738
- // Start the chat with the selected mode
739
764
  await chat(rl, useToolCalling, MODEL);
740
765
  } catch (error) {
741
766
  ui.showError(error);
@@ -744,5 +769,4 @@ async function start() {
744
769
  }
745
770
  }
746
771
 
747
- // Start the application
748
772
  start().catch(console.error);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sam-coder-cli",
3
- "version": "1.0.9",
3
+ "version": "1.0.11",
4
4
  "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities.",
5
5
  "main": "bin/agi-cli.js",
6
6
  "bin": {