sam-coder-cli 1.0.59 → 1.0.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bin/agi-cli.js +300 -2
  2. package/package.json +1 -1
package/bin/agi-cli.js CHANGED
@@ -17,6 +17,7 @@ const CONFIG_PATH = path.join(os.homedir(), '.sam-coder-config.json');
17
17
  let OPENROUTER_API_KEY;
18
18
  let MODEL = 'deepseek/deepseek-chat-v3-0324:free';
19
19
  let API_BASE_URL = 'https://openrouter.ai/api/v1';
20
+ let SHOW_THOUGHTS = false; // Optional: reveal <think> content in console
20
21
 
21
22
  // Tool/Function definitions for the AI
22
23
  const tools = [
@@ -619,6 +620,164 @@ function extractJsonFromMarkdown(text) {
619
620
  return null;
620
621
  }
621
622
 
623
+ // Extract and strip <think>...</think> blocks from model output
624
+ function splitThinking(text) {
625
+ if (!text || typeof text !== 'string') {
626
+ return { thought: '', content: text || '' };
627
+ }
628
+ const thinkRegex = /<think>[\s\S]*?<\/think>/gi;
629
+ let combinedThoughts = [];
630
+ let match;
631
+ // Collect all thoughts
632
+ const singleThinkRegex = /<think>([\s\S]*?)<\/think>/i;
633
+ let remaining = text;
634
+ while ((match = remaining.match(singleThinkRegex))) {
635
+ combinedThoughts.push((match[1] || '').trim());
636
+ remaining = remaining.replace(singleThinkRegex, '');
637
+ }
638
+ const visible = remaining.replace(thinkRegex, '').trim();
639
+ return { thought: combinedThoughts.join('\n\n').trim(), content: visible };
640
+ }
641
+
642
+ // Try to recover tool/function calls embedded in assistant text for thinking models
643
+ function parseInlineToolCalls(text) {
644
+ if (!text || typeof text !== 'string') return null;
645
+
646
+ const candidates = [];
647
+
648
+ // 1) JSON code blocks
649
+ const codeBlockRegex = /```(?:json)?\s*([\s\S]*?)\s*```/gi;
650
+ let m;
651
+ while ((m = codeBlockRegex.exec(text)) !== null) {
652
+ const block = (m[1] || '').trim();
653
+ if (block) candidates.push(block);
654
+ }
655
+
656
+ // 2) <tool_call>...</tool_call>
657
+ const toolTagRegex = /<tool_call>([\s\S]*?)<\/tool_call>/gi;
658
+ while ((m = toolTagRegex.exec(text)) !== null) {
659
+ const inner = (m[1] || '').trim();
660
+ if (inner) candidates.push(inner);
661
+ }
662
+ // 2b) <function_call>...</function_call>
663
+ const fnTagRegex = /<function_call>([\s\S]*?)<\/function_call>/gi;
664
+ while ((m = fnTagRegex.exec(text)) !== null) {
665
+ const inner = (m[1] || '').trim();
666
+ if (inner) candidates.push(inner);
667
+ }
668
+
669
+ // 3) General JSON-looking substrings as last resort
670
+ const braceRegex = /\{[\s\S]*?\}/g;
671
+ const braceMatches = text.match(braceRegex) || [];
672
+ braceMatches.forEach(snippet => candidates.push(snippet));
673
+
674
+ const toolCalls = [];
675
+
676
+ for (const candidate of candidates) {
677
+ try {
678
+ const obj = JSON.parse(candidate);
679
+ // OpenAI-style single function_call
680
+ if (obj && obj.function_call && obj.function_call.name) {
681
+ const args = obj.function_call.arguments ?? {};
682
+ toolCalls.push({
683
+ id: `inline-${toolCalls.length + 1}`,
684
+ type: 'function',
685
+ function: {
686
+ name: obj.function_call.name,
687
+ arguments: typeof args === 'string' ? args : JSON.stringify(args)
688
+ }
689
+ });
690
+ continue;
691
+ }
692
+ // Anthropic-like tool_use
693
+ if (obj && obj.tool_call && obj.tool_call.name) {
694
+ const args = obj.tool_call.arguments ?? {};
695
+ toolCalls.push({
696
+ id: `inline-${toolCalls.length + 1}`,
697
+ type: 'function',
698
+ function: {
699
+ name: obj.tool_call.name,
700
+ arguments: typeof args === 'string' ? args : JSON.stringify(args)
701
+ }
702
+ });
703
+ continue;
704
+ }
705
+ // Array of tool_calls
706
+ if (Array.isArray(obj?.tool_calls)) {
707
+ obj.tool_calls.forEach((tc) => {
708
+ if (tc?.function?.name) {
709
+ const args = tc.function.arguments ?? {};
710
+ toolCalls.push({
711
+ id: tc.id || `inline-${toolCalls.length + 1}`,
712
+ type: 'function',
713
+ function: {
714
+ name: tc.function.name,
715
+ arguments: typeof args === 'string' ? args : JSON.stringify(args)
716
+ }
717
+ });
718
+ }
719
+ });
720
+ if (toolCalls.length) continue;
721
+ }
722
+ // Direct function structure
723
+ if (obj?.name && (obj.arguments !== undefined || obj.args !== undefined)) {
724
+ const args = obj.arguments ?? obj.args ?? {};
725
+ toolCalls.push({
726
+ id: `inline-${toolCalls.length + 1}`,
727
+ type: 'function',
728
+ function: {
729
+ name: obj.name,
730
+ arguments: typeof args === 'string' ? args : JSON.stringify(args)
731
+ }
732
+ });
733
+ continue;
734
+ }
735
+ } catch (_) {
736
+ // ignore parse failures
737
+ }
738
+ }
739
+
740
+ return toolCalls.length ? toolCalls : null;
741
+ }
742
+
743
+ // Normalize single function_call to tool_calls array if present
744
+ function normalizeToolCallsFromMessage(message) {
745
+ if (!message || typeof message !== 'object') return message;
746
+ if (!message.tool_calls && message.function_call && message.function_call.name) {
747
+ const args = message.function_call.arguments ?? {};
748
+ message.tool_calls = [{
749
+ id: 'fc-1',
750
+ type: 'function',
751
+ function: {
752
+ name: message.function_call.name,
753
+ arguments: typeof args === 'string' ? args : JSON.stringify(args)
754
+ }
755
+ }];
756
+ }
757
+ return message;
758
+ }
759
+
760
+ // Ask the model to produce a structured follow-up with thinking and either tool_calls or JSON action
761
+ async function requestStructuredFollowup(messages, currentModel, preferTools = true) {
762
+ const instruction = preferTools
763
+ ? 'Think step-by-step inside <think></think>. Then, if any tools are needed, call them via tool_calls. If no tools are required, output a single markdown ```json code block with an action object (type, data, reasoning) per the schema. Do not include any other text.'
764
+ : 'Think step-by-step inside <think></think>, then output a single markdown ```json code block with an action object (type, data, reasoning) per the schema. Do not include any other text.';
765
+
766
+ const followupUser = { role: 'user', content: instruction };
767
+ const followupMessages = [...messages, followupUser];
768
+ const responseObj = await callOpenRouter(followupMessages, currentModel, !preferTools);
769
+ const assistantMessage = responseObj.choices[0].message;
770
+ if (assistantMessage && typeof assistantMessage.content === 'string') {
771
+ const { thought, content } = splitThinking(assistantMessage.content);
772
+ if (thought && SHOW_THOUGHTS) {
773
+ ui.showInfo(`Thinking:\n${thought}`);
774
+ }
775
+ assistantMessage.content = content;
776
+ }
777
+ normalizeToolCallsFromMessage(assistantMessage);
778
+ return { assistantMessage, updatedMessages: [...followupMessages, assistantMessage] };
779
+ }
780
+
622
781
  // Call OpenRouter API with tool calling
623
782
  async function callOpenRouter(messages, currentModel, useJson = false) {
624
783
  const apiKey = OPENROUTER_API_KEY;
@@ -675,8 +834,24 @@ async function processQueryWithTools(query, conversation = [], currentModel) {
675
834
  try {
676
835
  const response = await callOpenRouter(messages, currentModel);
677
836
  const assistantMessage = response.choices[0].message;
837
+ // Handle thinking tags and optionally display them
838
+ if (assistantMessage && typeof assistantMessage.content === 'string') {
839
+ const { thought, content } = splitThinking(assistantMessage.content);
840
+ if (thought && SHOW_THOUGHTS) {
841
+ ui.showInfo(`Thinking:\n${thought}`);
842
+ }
843
+ assistantMessage.content = content;
844
+ }
845
+ normalizeToolCallsFromMessage(assistantMessage);
678
846
  messages.push(assistantMessage);
679
847
 
848
+ // Try inline recovery for thinking models that embed tool calls inside content
849
+ if (!assistantMessage.tool_calls && assistantMessage.content) {
850
+ const recovered = parseInlineToolCalls(assistantMessage.content);
851
+ if (recovered && recovered.length) {
852
+ assistantMessage.tool_calls = recovered;
853
+ }
854
+ }
680
855
  if (assistantMessage.tool_calls) {
681
856
  const toolResults = await handleToolCalls(assistantMessage.tool_calls, messages);
682
857
  messages.push(...toolResults);
@@ -684,6 +859,14 @@ async function processQueryWithTools(query, conversation = [], currentModel) {
684
859
  ui.startThinking();
685
860
  const finalResponseObj = await callOpenRouter(messages, currentModel);
686
861
  const finalAssistantMessage = finalResponseObj.choices[0].message;
862
+ if (finalAssistantMessage && typeof finalAssistantMessage.content === 'string') {
863
+ const { thought, content } = splitThinking(finalAssistantMessage.content);
864
+ if (thought && SHOW_THOUGHTS) {
865
+ ui.showInfo(`Thinking:\n${thought}`);
866
+ }
867
+ finalAssistantMessage.content = content;
868
+ }
869
+ normalizeToolCallsFromMessage(finalAssistantMessage);
687
870
  messages.push(finalAssistantMessage);
688
871
  ui.stopThinking();
689
872
 
@@ -692,6 +875,82 @@ async function processQueryWithTools(query, conversation = [], currentModel) {
692
875
  conversation: messages
693
876
  };
694
877
  } else {
878
+ // Fallback: if no tool_calls were returned, try to parse a JSON action from content (thinking models may embed later)
879
+ const fallbackAction = extractJsonFromMarkdown(assistantMessage.content);
880
+ if (fallbackAction && fallbackAction.type) {
881
+ try {
882
+ const result = await executeAction(fallbackAction);
883
+ messages.push({ role: 'user', content: `Action result (${fallbackAction.type}): ${result}` });
884
+ ui.startThinking();
885
+ const finalResponseObj = await callOpenRouter(messages, currentModel);
886
+ const finalAssistantMessage = finalResponseObj.choices[0].message;
887
+ if (finalAssistantMessage && typeof finalAssistantMessage.content === 'string') {
888
+ const { thought, content } = splitThinking(finalAssistantMessage.content);
889
+ if (thought && SHOW_THOUGHTS) {
890
+ ui.showInfo(`Thinking:\n${thought}`);
891
+ }
892
+ finalAssistantMessage.content = content;
893
+ }
894
+ normalizeToolCallsFromMessage(finalAssistantMessage);
895
+ messages.push(finalAssistantMessage);
896
+ ui.stopThinking();
897
+ return { response: finalAssistantMessage.content, conversation: messages };
898
+ } catch (e) {
899
+ ui.stopThinking();
900
+ // If fallback execution fails, just return original assistant content
901
+ }
902
+ }
903
+ // Final attempt: request a structured follow-up that includes thinking and tool calls
904
+ try {
905
+ ui.startThinking();
906
+ const { assistantMessage: structuredMsg, updatedMessages } = await requestStructuredFollowup(messages, currentModel, true);
907
+ messages.length = 0; updatedMessages.forEach(m => messages.push(m));
908
+
909
+ // If tool calls present now, execute them
910
+ if (structuredMsg.tool_calls && structuredMsg.tool_calls.length) {
911
+ const toolResults2 = await handleToolCalls(structuredMsg.tool_calls, messages);
912
+ messages.push(...toolResults2);
913
+ const finalResponseObj2 = await callOpenRouter(messages, currentModel);
914
+ const finalAssistantMessage2 = finalResponseObj2.choices[0].message;
915
+ if (finalAssistantMessage2 && typeof finalAssistantMessage2.content === 'string') {
916
+ const { thought, content } = splitThinking(finalAssistantMessage2.content);
917
+ if (thought && SHOW_THOUGHTS) ui.showInfo(`Thinking:\n${thought}`);
918
+ finalAssistantMessage2.content = content;
919
+ }
920
+ messages.push(finalAssistantMessage2);
921
+ ui.stopThinking();
922
+ return { response: finalAssistantMessage2.content, conversation: messages };
923
+ }
924
+
925
+ // Else try JSON action again from the structured response
926
+ const structuredAction = extractJsonFromMarkdown(structuredMsg.content);
927
+ if (structuredAction && structuredAction.type) {
928
+ try {
929
+ const result2 = await executeAction(structuredAction);
930
+ messages.push({ role: 'user', content: `Action result (${structuredAction.type}): ${result2}` });
931
+ const finalResponseObj3 = await callOpenRouter(messages, currentModel);
932
+ const finalAssistantMessage3 = finalResponseObj3.choices[0].message;
933
+ if (finalAssistantMessage3 && typeof finalAssistantMessage3.content === 'string') {
934
+ const { thought, content } = splitThinking(finalAssistantMessage3.content);
935
+ if (thought && SHOW_THOUGHTS) ui.showInfo(`Thinking:\n${thought}`);
936
+ finalAssistantMessage3.content = content;
937
+ }
938
+ messages.push(finalAssistantMessage3);
939
+ ui.stopThinking();
940
+ return { response: finalAssistantMessage3.content, conversation: messages };
941
+ } catch (_) {
942
+ ui.stopThinking();
943
+ }
944
+ }
945
+
946
+ ui.stopThinking();
947
+ return {
948
+ response: structuredMsg.content,
949
+ conversation: messages
950
+ };
951
+ } catch (_) {
952
+ // ignore and fall back to original assistant message
953
+ }
695
954
  ui.stopThinking();
696
955
  return {
697
956
  response: assistantMessage.content,
@@ -818,6 +1077,14 @@ async function processQuery(query, conversation = [], currentModel) {
818
1077
  while (actionCount < MAX_ACTIONS) {
819
1078
  const responseObj = await callOpenRouter(messages, currentModel, true);
820
1079
  const assistantMessage = responseObj.choices[0].message;
1080
+ if (assistantMessage && typeof assistantMessage.content === 'string') {
1081
+ const { thought, content } = splitThinking(assistantMessage.content);
1082
+ if (thought && SHOW_THOUGHTS) {
1083
+ ui.showInfo(`Thinking:\n${thought}`);
1084
+ }
1085
+ assistantMessage.content = content;
1086
+ }
1087
+ normalizeToolCallsFromMessage(assistantMessage);
821
1088
  messages.push(assistantMessage);
822
1089
 
823
1090
  const actionData = extractJsonFromMarkdown(assistantMessage.content);
@@ -869,8 +1136,17 @@ async function processQuery(query, conversation = [], currentModel) {
869
1136
  messages.push(finalMsg);
870
1137
 
871
1138
  const finalResponseObj = await callOpenRouter(messages, currentModel, true);
872
- finalResponse = finalResponseObj.choices[0].message.content;
873
- messages.push(finalResponseObj.choices[0].message);
1139
+ const finalAssistantMessage = finalResponseObj.choices[0].message;
1140
+ if (finalAssistantMessage && typeof finalAssistantMessage.content === 'string') {
1141
+ const { thought, content } = splitThinking(finalAssistantMessage.content);
1142
+ if (thought && SHOW_THOUGHTS) {
1143
+ ui.showInfo(`Thinking:\n${thought}`);
1144
+ }
1145
+ finalResponse = content;
1146
+ } else {
1147
+ finalResponse = finalResponseObj.choices[0].message.content;
1148
+ }
1149
+ messages.push(finalAssistantMessage);
874
1150
  }
875
1151
 
876
1152
  ui.stopThinking();
@@ -920,6 +1196,25 @@ async function chat(rl, useToolCalling, initialModel) {
920
1196
  return;
921
1197
  }
922
1198
 
1199
+ if (input.toLowerCase().startsWith('/thoughts')) {
1200
+ const parts = input.trim().split(/\s+/);
1201
+ const arg = parts[1] ? parts[1].toLowerCase() : '';
1202
+ if (arg !== 'on' && arg !== 'off') {
1203
+ const state = SHOW_THOUGHTS ? 'on' : 'off';
1204
+ ui.showInfo(`Usage: /thoughts on|off (currently ${state})`);
1205
+ rl.prompt();
1206
+ return;
1207
+ }
1208
+ const enable = arg === 'on';
1209
+ SHOW_THOUGHTS = enable;
1210
+ let config = await readConfig() || {};
1211
+ config.showThoughts = enable;
1212
+ await writeConfig(config);
1213
+ ui.showResponse(`Hidden thoughts ${enable ? 'enabled' : 'disabled'}.`);
1214
+ rl.prompt();
1215
+ return;
1216
+ }
1217
+
923
1218
  if (input.toLowerCase() === '/default-model') {
924
1219
  currentModel = 'deepseek/deepseek-chat-v3-0324:free';
925
1220
  let config = await readConfig() || {};
@@ -1039,6 +1334,9 @@ async function start() {
1039
1334
  console.log(`🚀 Using Pro Plan custom endpoint: ${API_BASE_URL}`);
1040
1335
  }
1041
1336
 
1337
+ // Optional: reveal <think> thoughts if enabled in config or env
1338
+ SHOW_THOUGHTS = (typeof config.showThoughts === 'boolean') ? config.showThoughts : (process.env.SHOW_THOUGHTS === '1');
1339
+
1042
1340
  // Check if animation should be shown (can be disabled via config)
1043
1341
  const showAnimation = config.showAnimation !== false; // Default to true
1044
1342
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sam-coder-cli",
3
- "version": "1.0.59",
3
+ "version": "1.0.61",
4
4
  "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities.",
5
5
  "main": "bin/agi-cli.js",
6
6
  "bin": {