sam-coder-cli 1.0.8 → 1.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bin/agi-cli.js +82 -157
  2. package/package.json +1 -1
package/bin/agi-cli.js CHANGED
@@ -12,7 +12,7 @@ const execAsync = util.promisify(exec);
12
12
  // Configuration
13
13
  const CONFIG_PATH = path.join(os.homedir(), '.sam-coder-config.json');
14
14
  let OPENROUTER_API_KEY;
15
- const MODEL = 'deepseek/deepseek-chat-v3-0324:free';
15
+ let MODEL = 'deepseek/deepseek-chat-v3-0324:free';
16
16
  let API_BASE_URL = 'https://openrouter.ai/api/v1';
17
17
 
18
18
  // Tool/Function definitions for the AI
@@ -380,19 +380,18 @@ function extractJsonFromMarkdown(text) {
380
380
  }
381
381
 
382
382
  // Call OpenRouter API with tool calling
383
- async function callOpenRouterWithTools(messages) {
383
+ async function callOpenRouter(messages, currentModel, useJson = false) {
384
384
  const apiKey = OPENROUTER_API_KEY;
385
385
 
386
386
  try {
387
- const response = await fetch(API_BASE_URL + '/chat/completions', {
387
+ const response = await fetch(`${API_BASE_URL}/chat/completions`, {
388
388
  method: 'POST',
389
389
  headers: {
390
- 'Content-Type': 'application/json',
391
- 'Authorization': `Bearer ${apiKey}`,
392
- 'HTTP-Referer': 'https://github.com/yourusername/agi-cli'
390
+ 'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
391
+ 'Content-Type': 'application/json'
393
392
  },
394
393
  body: JSON.stringify({
395
- model: MODEL,
394
+ model: currentModel,
396
395
  messages: messages,
397
396
  tools: tools,
398
397
  tool_choice: 'auto'
@@ -415,100 +414,8 @@ async function callOpenRouterWithTools(messages) {
415
414
  }
416
415
  }
417
416
 
418
- // Call OpenRouter API with function calling (legacy)
419
- async function callOpenRouterWithFunctions(messages) {
420
- const apiKey = OPENROUTER_API_KEY;
421
-
422
- try {
423
- const response = await fetch(API_BASE_URL + '/chat/completions', {
424
- method: 'POST',
425
- headers: {
426
- 'Content-Type': 'application/json',
427
- 'Authorization': `Bearer ${apiKey}`,
428
- 'HTTP-Referer': 'https://github.com/yourusername/agi-cli'
429
- },
430
- body: JSON.stringify({
431
- model: MODEL,
432
- messages: messages
433
- })
434
- });
435
-
436
- if (!response.ok) {
437
- if (response.status === 401) {
438
- throw new Error('AuthenticationError: Invalid API key. Please run /setup to reconfigure.');
439
- }
440
- const error = await response.json();
441
- throw new Error(`API error: ${error.error?.message || response.statusText}`);
442
- }
443
-
444
- return await response.json();
445
- } catch (error) {
446
- console.error('API call failed:', error);
447
- ui.stopThinking();
448
- throw new Error(`Failed to call OpenRouter API: ${error.message}`);
449
- }
450
- }
451
-
452
- // Process tool calls from the AI response
453
- async function handleToolCalls(toolCalls, messages) {
454
- const results = [];
455
-
456
- for (const toolCall of toolCalls) {
457
- const functionName = toolCall.function.name;
458
- let args;
459
-
460
- try {
461
- args = JSON.parse(toolCall.function.arguments);
462
- } catch (error) {
463
- console.error('❌ Failed to parse tool arguments:', error);
464
- results.push({
465
- tool_call_id: toolCall.id,
466
- role: 'tool',
467
- name: functionName,
468
- content: JSON.stringify({ error: `Invalid arguments format: ${error.message}` })
469
- });
470
- continue;
471
- }
472
-
473
- console.log(`🔧 Executing ${functionName} with args:`, args);
474
-
475
- try {
476
- if (!agentUtils[functionName]) {
477
- throw new Error(`Tool '${functionName}' not found`);
478
- }
479
-
480
- const result = await agentUtils[functionName](...Object.values(args));
481
- console.log('✅ Tool executed successfully');
482
-
483
- // Stringify the result if it's not already a string
484
- const resultContent = typeof result === 'string' ? result : JSON.stringify(result);
485
-
486
- results.push({
487
- tool_call_id: toolCall.id,
488
- role: 'tool',
489
- name: functionName,
490
- content: resultContent
491
- });
492
- } catch (error) {
493
- console.error('❌ Tool execution failed:', error);
494
-
495
- results.push({
496
- tool_call_id: toolCall.id,
497
- role: 'tool',
498
- name: functionName,
499
- content: JSON.stringify({
500
- error: error.message,
501
- stack: process.env.DEBUG ? error.stack : undefined
502
- })
503
- });
504
- }
505
- }
506
-
507
- return results;
508
- }
509
-
510
417
  // Process a query with tool calling
511
- async function processQueryWithTools(query, conversation = []) {
418
+ async function processQueryWithTools(query, conversation = [], currentModel) {
512
419
  // Add user message to conversation
513
420
  const userMessage = { role: 'user', content: query };
514
421
  const messages = [...conversation, userMessage];
@@ -527,7 +434,7 @@ If a tool fails, you can try again with different parameters or a different appr
527
434
  ui.startThinking();
528
435
 
529
436
  try {
530
- const response = await callOpenRouterWithTools(messages);
437
+ const response = await callOpenRouterWithTools(messages, currentModel);
531
438
  const assistantMessage = response.choices[0].message;
532
439
  messages.push(assistantMessage);
533
440
 
@@ -550,7 +457,7 @@ If a tool fails, you can try again with different parameters or a different appr
550
457
 
551
458
  // Now, get the AI's response to the tool results
552
459
  ui.startThinking();
553
- const finalResponseObj = await callOpenRouterWithTools(messages);
460
+ const finalResponseObj = await callOpenRouterWithTools(messages, currentModel);
554
461
  const finalAssistantMessage = finalResponseObj.choices[0].message;
555
462
  messages.push(finalAssistantMessage);
556
463
  ui.stopThinking();
@@ -570,53 +477,8 @@ If a tool fails, you can try again with different parameters or a different appr
570
477
  }
571
478
  }
572
479
 
573
- // Execute a single action from the action system
574
- async function executeAction(action) {
575
- const { type, data } = action;
576
-
577
- switch (type) {
578
- case 'read':
579
- return await agentUtils.readFile(data.path);
580
-
581
- case 'write':
582
- return await agentUtils.writeFile(data.path, data.content);
583
-
584
- case 'edit':
585
- return await agentUtils.editFile(data.path, data.edits);
586
-
587
- case 'command':
588
- return await agentUtils.runCommand(data.command);
589
-
590
- case 'search':
591
- if (data.type === 'files') {
592
- return await agentUtils.searchFiles(data.pattern);
593
- }
594
- throw new Error('Text search is not implemented yet');
595
-
596
- case 'execute':
597
- // For execute action, we'll run it as a command
598
- const cmd = data.language === 'bash'
599
- ? data.code
600
- : `node -e "${data.code.replace(/"/g, '\\"')}"`;
601
- return await agentUtils.runCommand(cmd);
602
-
603
- case 'browse':
604
- throw new Error('Web browsing is not implemented yet');
605
-
606
- case 'analyze':
607
- // For analyze action, we'll just return the question for now
608
- return `Analysis requested for code: ${data.code}\nQuestion: ${data.question}`;
609
-
610
- case 'stop':
611
- return 'Stopping action execution';
612
-
613
- default:
614
- throw new Error(`Unknown action type: ${type}`);
615
- }
616
- }
617
-
618
480
  // Process a query with action handling (legacy function calling)
619
- async function processQuery(query, conversation = []) {
481
+ async function processQuery(query, conversation = [], currentModel) {
620
482
  try {
621
483
  // Add user message to conversation
622
484
  const userMessage = { role: 'user', content: query };
@@ -632,10 +494,10 @@ async function processQuery(query, conversation = []) {
632
494
 
633
495
  ui.startThinking();
634
496
 
635
- const response = await callOpenRouterWithFunctions(messages);
497
+ const response = await callOpenRouterWithFunctions(messages, currentModel);
636
498
  const assistantMessage = response.choices[0].message;
637
499
  messages.push(assistantMessage);
638
-
500
+
639
501
  // Try to extract JSON from the response
640
502
  const actionData = extractJsonFromMarkdown(assistantMessage.content);
641
503
 
@@ -674,16 +536,17 @@ async function processQuery(query, conversation = []) {
674
536
  };
675
537
  } catch (error) {
676
538
  ui.stopThinking();
677
- ui.showError(`Error processing query: ${error.message}`);
539
+ console.error('❌ Error during processing:', error);
678
540
  return {
679
- response: `Error: ${error.message}`,
680
- conversation: conversation || []
541
+ response: `An error occurred: ${error.message}`,
542
+ conversation: messages
681
543
  };
682
544
  }
683
545
  }
684
546
 
685
547
  // Main chat loop
686
- async function chat(rl, useToolCalling) {
548
+ async function chat(rl, useToolCalling, initialModel) {
549
+ let currentModel = initialModel;
687
550
  const conversation = [];
688
551
  console.log('Type your message, or "exit" to quit.');
689
552
 
@@ -691,6 +554,31 @@ async function chat(rl, useToolCalling) {
691
554
  rl.prompt();
692
555
 
693
556
  rl.on('line', async (input) => {
557
+ if (input.toLowerCase().startsWith('/model')) {
558
+ const newModel = input.split(' ')[1];
559
+ if (newModel) {
560
+ currentModel = newModel;
561
+ let config = await readConfig();
562
+ config.MODEL = currentModel;
563
+ await writeConfig(config);
564
+ console.log(`Model changed to: ${currentModel}`);
565
+ } else {
566
+ console.log('Please specify a model. Usage: /model <model_name>');
567
+ }
568
+ rl.prompt();
569
+ return;
570
+ }
571
+
572
+ if (input.toLowerCase() === '/default-model') {
573
+ currentModel = 'deepseek/deepseek-chat-v3-0324:free';
574
+ let config = await readConfig();
575
+ config.MODEL = currentModel;
576
+ await writeConfig(config);
577
+ console.log(`Model reset to default: ${currentModel}`);
578
+ rl.prompt();
579
+ return;
580
+ }
581
+
694
582
  if (input.toLowerCase() === '/setup') {
695
583
  await runSetup(rl, true);
696
584
  console.log('\nSetup complete. Please restart the application to apply changes.');
@@ -704,8 +592,8 @@ async function chat(rl, useToolCalling) {
704
592
  }
705
593
 
706
594
  const result = useToolCalling
707
- ? await processQueryWithTools(input, conversation)
708
- : await processQuery(input, conversation);
595
+ ? await processQueryWithTools(input, conversation, currentModel)
596
+ : await processQuery(input, conversation, currentModel);
709
597
  ui.stopThinking();
710
598
  ui.showResponse(result.response);
711
599
 
@@ -796,6 +684,43 @@ async function start() {
796
684
  config = await runSetup(rl);
797
685
  }
798
686
 
687
+ MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
688
+
689
+ OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
690
+ if (config.isPro && config.customApiBase) {
691
+ API_BASE_URL = config.customApiBase;
692
+ console.log(`🚀 Using Pro Plan custom endpoint: ${API_BASE_URL}`);
693
+ }
694
+
695
+ ui.showHeader();
696
+ console.log('Select Mode:');
697
+ console.log('1. Tool Calling (for models that support it)');
698
+ console.log('2. Function Calling (legacy)');
699
+
700
+ const useToolCalling = await askForMode(rl);
701
+ ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
702
+
703
+ // Start the chat with the selected mode
704
+ await chat(rl, useToolCalling, MODEL);
705
+ } catch (error) {
706
+ ui.showError(error);
707
+ rl.close();
708
+ process.exit(1);
709
+ }
710
+ }
711
+ const rl = readline.createInterface({
712
+ input: process.stdin,
713
+ output: process.stdout
714
+ });
715
+
716
+ try {
717
+ let config = await readConfig();
718
+ if (!config || !config.OPENROUTER_API_KEY) {
719
+ config = await runSetup(rl);
720
+ }
721
+
722
+ MODEL = config.MODEL || 'deepseek/deepseek-chat-v3-0324:free'; // Load model from config or use default
723
+
799
724
  OPENROUTER_API_KEY = config.OPENROUTER_API_KEY;
800
725
  if (config.isPro && config.customApiBase) {
801
726
  API_BASE_URL = config.customApiBase;
@@ -811,7 +736,7 @@ async function start() {
811
736
  ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
812
737
 
813
738
  // Start the chat with the selected mode
814
- await chat(rl, useToolCalling);
739
+ await chat(rl, useToolCalling, MODEL);
815
740
  } catch (error) {
816
741
  ui.showError(error);
817
742
  rl.close();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sam-coder-cli",
3
- "version": "1.0.8",
3
+ "version": "1.0.9",
4
4
  "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities.",
5
5
  "main": "bin/agi-cli.js",
6
6
  "bin": {