sam-coder-cli 2.0.4 β†’ 2.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,59 +1,52 @@
1
- # AI Assistant
1
+ # SAM-CODER CLI
2
2
 
3
- A beautiful and minimalistic AI assistant for VS Code.
3
+ **SAM-CODER** is an epic, animated AI coding assistant that lives in your terminal. Armed with strategic thinking and autonomous agency, it doesn't just chatβ€”it *builds*.
4
4
 
5
- ## Features
5
+ ![AGI Header](https://raw.githubusercontent.com/guilhermekeller/Samantha-CLI-v2.0-main/main/gemini-screenshot.png)
6
6
 
7
- - Beautiful and modern UI with animations and transitions
8
- - Minimalistic design with a focus on usability
9
- - Support for code blocks and inline code
10
- - Language toggle between English and Portuguese
11
- - Clear conversation button
12
- - Status bar integration
7
+ ## πŸš€ Installation
13
8
 
14
- ## Usage
9
+ Install SAM-CODER globally via npm:
15
10
 
16
- 1. Click on the AI Assistant icon in the activity bar
17
- 2. Type your question in the input field
18
- 3. Press Enter or click the Send button
19
- 4. The AI Assistant will respond with helpful information
11
+ ```bash
12
+ npm install -g sam-coder-cli
13
+ ```
20
14
 
21
- ## Commands
15
+ ## πŸ› οΈ Getting Started
22
16
 
23
- - `aiAssistant.showView`: Show the AI Assistant view
24
- - `aiAssistant.sendQuery`: Send a query to the AI Assistant
25
- - `aiAssistant.clear`: Clear the conversation
26
- - `aiAssistant.toggleLanguage`: Toggle between English and Portuguese
27
- - `aiAssistant.testFileWrite`: Test file writing capabilities
17
+ 1. **Initialize Setup**:
18
+ Run the setup command to configure your OpenRouter API key and preferred model.
19
+ ```bash
20
+ sam-coder
21
+ ```
22
+ Follow the prompts to enter your API key.
28
23
 
29
- ## Keyboard Shortcuts
24
+ 2. **Run with Style**:
25
+ By default, SAM-CODER starts with a frame-by-frame AGI Awakening animation.
26
+ ```bash
27
+ sam-coder
28
+ ```
30
29
 
31
- - `Ctrl+L` (Windows/Linux) or `Cmd+L` (Mac): Show the AI Assistant view
30
+ ## 🧠 Features
32
31
 
33
- ## Requirements
32
+ - **Autonomous Agency**: Can read, write, edit files, and execute shell commands.
33
+ - **Epic Animation**: Custom frame-by-frame ASCII animation on startup.
34
+ - **Strategic Thinking**: Exposes its internal reasoning process for transparency.
35
+ - **Multi-Model Support**: Powered by OpenRouter, supporting DeepSeek and other top-tier models.
34
36
 
35
- - VS Code 1.60.0 or higher
37
+ ## ⌨️ CLI Commands
36
38
 
37
- ## Extension Settings
39
+ - `sam-coder`: Launch the assistant.
40
+ - `/setup`: Re-run the configuration wizard.
41
+ - `/model <name>`: Switch the AI model on the fly.
42
+ - `/thoughts on|off`: Toggle visibility of internal reasoning.
43
+ - `exit`: Safely close the session.
38
44
 
39
- This extension contributes the following settings:
45
+ ## βš™οΈ Requirements
40
46
 
41
- * `aiAssistant.language`: The language to use for the AI Assistant (en or pt-BR)
47
+ - **Node.js**: version 18.12.1 or higher.
48
+ - **API Key**: An [OpenRouter](https://openrouter.ai/) API key is required.
42
49
 
43
- ## Known Issues
50
+ ## πŸ“œ License
44
51
 
45
- - None at the moment
46
-
47
- ## Release Notes
48
-
49
- ### 0.0.1
50
-
51
- Initial release of AI Assistant
52
-
53
- ## Contributing
54
-
55
- Contributions are welcome! Please feel free to submit a Pull Request.
56
-
57
- ## License
58
-
59
- This extension is licensed under the MIT License.
52
+ This project is licensed under the MIT License. Created by Guilherme Keller.
package/bin/agi-cli.js CHANGED
@@ -9,12 +9,20 @@ const { exec } = require('child_process');
9
9
  const util = require('util');
10
10
  const execAsync = util.promisify(exec);
11
11
 
12
+ // Global error handlers to prevent silent crashes
13
+ process.on('unhandledRejection', (reason, promise) => {
14
+ console.error('Unhandled Rejection at:', promise, 'reason:', reason);
15
+ // Don't exit, just log the error
16
+ });
17
+
18
+ process.on('uncaughtException', (error) => {
19
+ console.error('Uncaught Exception:', error);
20
+ // Don't exit, just log the error
21
+ });
22
+
12
23
  // Import AGI Animation module
13
24
  const { runAGIAnimation } = require('./agi-animation.js');
14
25
 
15
- // Import Brainstorm Core module
16
- const brainstormCore = require('./core');
17
-
18
26
  // Configuration
19
27
  const CONFIG_PATH = path.join(os.homedir(), '.sam-coder-config.json');
20
28
  let OPENROUTER_API_KEY;
@@ -72,17 +80,9 @@ const tools = [
72
80
  properties: {
73
81
  type: {
74
82
  type: 'string',
75
- enum: ['replace', 'insert', 'delete', 'search_replace'],
83
+ enum: ['replace', 'insert', 'delete'],
76
84
  description: 'Type of edit operation'
77
85
  },
78
- old_string: {
79
- type: 'string',
80
- description: 'Exact string to search for and replace (for search_replace operations)'
81
- },
82
- new_string: {
83
- type: 'string',
84
- description: 'String to replace old_string with (for search_replace operations)'
85
- },
86
86
  startLine: {
87
87
  type: 'number',
88
88
  description: 'Starting line number (1-based)'
@@ -160,14 +160,6 @@ const tools = [
160
160
  endLine: { type: 'number' }
161
161
  },
162
162
  required: ['startLine', 'endLine']
163
- },
164
- {
165
- properties: {
166
- type: { const: 'search_replace' },
167
- old_string: { type: 'string' },
168
- new_string: { type: 'string' }
169
- },
170
- required: ['old_string', 'new_string']
171
163
  }
172
164
  ]
173
165
  }
@@ -233,48 +225,6 @@ INSTRUCTIONS:
233
225
 
234
226
  Always think step by step and explain your reasoning before taking actions that could affect the system.`;
235
227
 
236
- // System prompt for the AI Assistant when using Engineer Mode
237
- const ENGINEER_PROMPT = `You are a Senior Software Engineer with 15+ years of experience. You have deep expertise in:
238
- - Software architecture and design patterns
239
- - Clean code principles and best practices
240
- - Test-driven development
241
- - Performance optimization
242
- - Security best practices
243
- - Code review and mentoring
244
-
245
- TOOLS AVAILABLE:
246
- 1. readFile - Read the contents of a file
247
- 2. writeFile - Write content to a file
248
- 3. editFile - Edit specific parts of a file (use search_replace with old_string/new_string for precise edits)
249
- 4. runCommand - Execute a shell command
250
- 5. searchFiles - Search for files using a glob pattern
251
-
252
- ENGINEER PRINCIPLES:
253
- 1. **Code Quality First**: Write clean, maintainable, well-documented code
254
- 2. **Think Before Acting**: Analyze the problem thoroughly before making changes
255
- 3. **Small, Focused Changes**: Make incremental changes that are easy to review
256
- 4. **Test Your Work**: Verify changes work as expected before moving on
257
- 5. **Explain Your Reasoning**: Document why you made certain technical decisions
258
-
259
- WHEN EDITING FILES:
260
- - ALWAYS use editFile with search_replace operations
261
- - Use { "type": "search_replace", "old_string": "exact text to find", "new_string": "replacement text" }
262
- - The old_string must match EXACTLY including whitespace
263
- - Make focused, minimal changes
264
-
265
- WORKFLOW:
266
- 1. Read and understand the existing code
267
- 2. Identify the minimal changes needed
268
- 3. Make changes using precise search_replace operations
269
- 4. Verify the changes compile/run correctly
270
- 5. Summarize what was done
271
-
272
- ENVIRONMENT:
273
- - OS: ${process.platform}
274
- - Current directory: ${process.cwd()}
275
-
276
- You are autonomous - continue working until the task is complete. Use the 'stop' action when finished.`;
277
-
278
228
  // System prompt for the AI Assistant when using legacy function calling (JSON actions)
279
229
  const FUNCTION_CALLING_PROMPT = `You are an autonomous AI agent with advanced problem-solving capabilities. You operate through strategic action sequences to accomplish complex tasks on the user's system. Think like an expert developer and system administrator combined.
280
230
 
@@ -561,27 +511,6 @@ const agentUtils = {
561
511
  lines.splice(op.startLine - 1, op.endLine - op.startLine + 1);
562
512
  break;
563
513
 
564
- case 'search_replace':
565
- // Claude Code style: exact string search and replace
566
- if (op.old_string === undefined || op.new_string === undefined) {
567
- throw new Error('search_replace requires old_string and new_string');
568
- }
569
- content = lines.join('\n');
570
- if (!content.includes(op.old_string)) {
571
- throw new Error(`Could not find exact match for old_string: "${op.old_string.substring(0, 50)}${op.old_string.length > 50 ? '...' : ''}"`);
572
- }
573
- // Count occurrences
574
- const occurrences = content.split(op.old_string).length - 1;
575
- if (occurrences > 1) {
576
- console.log(`Warning: Found ${occurrences} occurrences of old_string. Replacing first occurrence only.`);
577
- }
578
- // Replace first occurrence (to be safe and predictable like Claude Code)
579
- content = content.replace(op.old_string, op.new_string);
580
- // Update lines array
581
- lines.length = 0;
582
- lines.push(...content.split('\n'));
583
- break;
584
-
585
514
  default:
586
515
  throw new Error(`Unknown operation type: ${op.type}`);
587
516
  }
@@ -1455,225 +1384,145 @@ async function processQuery(query, conversation = [], currentModel) {
1455
1384
  }
1456
1385
  }
1457
1386
 
1458
- async function chat(rl, mode, initialModel) {
1459
- let currentModel = initialModel;
1460
- const conversation = [];
1461
-
1462
- // Initialize conversation with appropriate system prompt based on mode
1463
- if (mode === 'engineer') {
1464
- conversation.push({ role: 'system', content: ENGINEER_PROMPT });
1465
- } else if (mode === 'tool') {
1466
- conversation.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1467
- } else {
1468
- conversation.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1469
- }
1470
-
1471
- // Determine if we should use tool calling API (both 'tool' and 'engineer' modes use tools)
1472
- const useToolCalling = (mode === 'tool' || mode === 'engineer');
1473
-
1474
- console.log('Type your message, or "exit" to quit.');
1475
-
1476
- // Use async iterator pattern instead of event-based 'on' to avoid async callback issues
1477
- while (true) {
1478
- const input = await new Promise((resolve) => {
1479
- rl.question('> ', resolve);
1480
- });
1481
-
1482
- // Handle commands
1483
- if (input.toLowerCase().startsWith('/model')) {
1484
- const newModel = input.split(' ')[1];
1485
- if (newModel) {
1486
- currentModel = newModel;
1487
- let config = await readConfig() || {};
1488
- config.MODEL = currentModel;
1489
- await writeConfig(config);
1490
- console.log(`Model changed to: ${currentModel}`);
1491
- } else {
1492
- console.log('Please specify a model. Usage: /model <model_name>');
1493
- }
1494
- continue;
1495
- }
1496
-
1497
- if (input.toLowerCase().startsWith('/thoughts')) {
1498
- const parts = input.trim().split(/\s+/);
1499
- const arg = parts[1] ? parts[1].toLowerCase() : '';
1500
- if (arg !== 'on' && arg !== 'off') {
1501
- const state = SHOW_THOUGHTS ? 'on' : 'off';
1502
- ui.showInfo(`Usage: /thoughts on|off (currently ${state})`);
1503
- continue;
1504
- }
1505
- const enable = arg === 'on';
1506
- SHOW_THOUGHTS = enable;
1507
- let config = await readConfig() || {};
1508
- config.showThoughts = enable;
1509
- await writeConfig(config);
1510
- ui.showResponse(`Hidden thoughts ${enable ? 'enabled' : 'disabled'}.`);
1511
- continue;
1512
- }
1513
-
1514
- if (input.toLowerCase() === '/default-model') {
1515
- currentModel = 'deepseek/deepseek-chat-v3-0324:free';
1516
- let config = await readConfig() || {};
1517
- config.MODEL = currentModel;
1518
- await writeConfig(config);
1519
- console.log(`Model reset to default: ${currentModel}`);
1520
- continue;
1521
- }
1387
+ async function chat(rl, useToolCalling, initialModel) {
1388
+ return new Promise((resolve) => {
1389
+ let currentModel = initialModel;
1390
+ const conversation = [];
1522
1391
 
1523
- if (input.toLowerCase() === '/setup') {
1524
- await runSetup(rl, true);
1525
- console.log('\nSetup complete. Please restart the application to apply changes.');
1526
- rl.close();
1527
- return;
1392
+ // Initialize conversation with appropriate system prompt
1393
+ if (useToolCalling) {
1394
+ conversation.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1395
+ } else {
1396
+ conversation.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1528
1397
  }
1529
1398
 
1530
- // Brainstorm command: /brainstorm <project-name> <description>
1531
- if (input.toLowerCase().startsWith('/brainstorm')) {
1532
- const parts = input.substring('/brainstorm'.length).trim();
1533
- if (!parts) {
1534
- console.log('Usage: /brainstorm <project-name> "<description>"');
1535
- console.log('Example: /brainstorm "My Project" "A cool project that does things"');
1536
- continue;
1537
- }
1538
-
1539
- // Parse project name and description
1540
- const match = parts.match(/^"?([^"]+)"?\s+"?([^"]+)"?$/) || parts.match(/^(\S+)\s+(.+)$/);
1541
- if (!match) {
1542
- console.log('Usage: /brainstorm <project-name> "<description>"');
1543
- continue;
1544
- }
1399
+ console.log('Type your message, or "exit" to quit.');
1545
1400
 
1546
- const projectName = match[1].trim();
1547
- const projectDescription = match[2].trim();
1548
- const outputDir = path.join(process.cwd(), projectName.replace(/\s+/g, '-').toLowerCase());
1401
+ rl.setPrompt('> ');
1402
+ rl.prompt();
1549
1403
 
1404
+ rl.on('line', async (input) => {
1550
1405
  try {
1551
- ui.showInfo(`Creating brainstorm session for "${projectName}"...`);
1552
- const session = await brainstormCore.quickStart(
1553
- projectName,
1554
- projectDescription,
1555
- outputDir,
1556
- ['CLAUDE-1'] // Single agent by default
1557
- );
1558
-
1559
- ui.showSuccess(`βœ… Brainstorm session created!`);
1560
- console.log(` Session ID: ${session.id}`);
1561
- console.log(` Output: ${outputDir}`);
1562
- console.log(` Files generated: ${Object.keys(session.fileVersions).length}`);
1563
- console.log(`\n Generated files:`);
1564
- Object.keys(session.fileVersions).forEach(f => console.log(` - ${f}`));
1565
- console.log(`\n Use "/finish <summary>" when done to complete the session.`);
1566
-
1567
- // Store current session path for /finish command
1568
- global.currentSessionDir = outputDir;
1569
- } catch (error) {
1570
- ui.showError(`Failed to create brainstorm: ${error.message}`);
1571
- }
1572
- continue;
1573
- }
1574
-
1575
- // Finish command: /finish <summary>
1576
- if (input.toLowerCase().startsWith('/finish')) {
1577
- const summary = input.substring('/finish'.length).trim();
1406
+ // Handle empty input - just reprompt
1407
+ if (!input || !input.trim()) {
1408
+ rl.prompt();
1409
+ return;
1410
+ }
1578
1411
 
1579
- if (!global.currentSessionDir) {
1580
- console.log('No active brainstorm session. Use /brainstorm first.');
1581
- continue;
1582
- }
1412
+ if (input.toLowerCase().startsWith('/model')) {
1413
+ const newModel = input.split(' ')[1];
1414
+ if (newModel) {
1415
+ currentModel = newModel;
1416
+ let config = await readConfig() || {};
1417
+ config.MODEL = currentModel;
1418
+ await writeConfig(config);
1419
+ console.log(`Model changed to: ${currentModel}`);
1420
+ } else {
1421
+ console.log('Please specify a model. Usage: /model <model_name>');
1422
+ }
1423
+ rl.prompt();
1424
+ return;
1425
+ }
1583
1426
 
1584
- if (!summary) {
1585
- console.log('Usage: /finish "<summary of what was accomplished>"');
1586
- continue;
1587
- }
1427
+ if (input.toLowerCase().startsWith('/thoughts')) {
1428
+ const parts = input.trim().split(/\s+/);
1429
+ const arg = parts[1] ? parts[1].toLowerCase() : '';
1430
+ if (arg !== 'on' && arg !== 'off') {
1431
+ const state = SHOW_THOUGHTS ? 'on' : 'off';
1432
+ ui.showInfo(`Usage: /thoughts on|off (currently ${state})`);
1433
+ rl.prompt();
1434
+ return;
1435
+ }
1436
+ const enable = arg === 'on';
1437
+ SHOW_THOUGHTS = enable;
1438
+ let config = await readConfig() || {};
1439
+ config.showThoughts = enable;
1440
+ await writeConfig(config);
1441
+ ui.showResponse(`Hidden thoughts ${enable ? 'enabled' : 'disabled'}.`);
1442
+ rl.prompt();
1443
+ return;
1444
+ }
1588
1445
 
1589
- try {
1590
- ui.showInfo('Completing brainstorm session...');
1591
- const result = await brainstormCore.finishBrainstorm({
1592
- sessionDir: global.currentSessionDir,
1593
- summary,
1594
- actor: 'CLAUDE-1'
1595
- });
1446
+ if (input.toLowerCase() === '/default-model') {
1447
+ currentModel = 'deepseek/deepseek-chat-v3-0324:free';
1448
+ let config = await readConfig() || {};
1449
+ config.MODEL = currentModel;
1450
+ await writeConfig(config);
1451
+ console.log(`Model reset to default: ${currentModel}`);
1452
+ rl.prompt();
1453
+ return;
1454
+ }
1596
1455
 
1597
- if (result.success) {
1598
- ui.showSuccess(`βœ… Session completed!`);
1599
- console.log(` Summary: ${summary}`);
1600
- global.currentSessionDir = null;
1601
- } else {
1602
- ui.showError(`Failed: ${result.errors.join(', ')}`);
1456
+ if (input.toLowerCase() === '/setup') {
1457
+ await runSetup(rl, true);
1458
+ console.log('\nSetup complete. Please restart the application to apply changes.');
1459
+ rl.close();
1460
+ return;
1603
1461
  }
1604
- } catch (error) {
1605
- ui.showError(`Failed to finish session: ${error.message}`);
1606
- }
1607
- continue;
1608
- }
1609
1462
 
1610
- if (input.toLowerCase() === 'exit') {
1611
- ui.showResponse('Goodbye!');
1612
- rl.close();
1613
- return;
1614
- }
1463
+ if (input.toLowerCase() === 'exit') {
1464
+ rl.close();
1465
+ return;
1466
+ }
1615
1467
 
1616
- // Direct Harmony tool-call execution from user input (bypass model)
1617
- try {
1618
- const seg = parseSegmentedTranscript(input);
1619
- if (seg && seg.segmented && seg.recoveredToolCalls && seg.recoveredToolCalls.length) {
1620
- const messages = [];
1621
- if (useToolCalling) {
1622
- messages.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1623
- } else {
1624
- messages.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1468
+ // Direct Harmony tool-call execution from user input (bypass model)
1469
+ try {
1470
+ const seg = parseSegmentedTranscript(input);
1471
+ if (seg && seg.segmented && seg.recoveredToolCalls && seg.recoveredToolCalls.length) {
1472
+ const messages = [];
1473
+ if (useToolCalling) {
1474
+ messages.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1475
+ } else {
1476
+ messages.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1477
+ }
1478
+ messages.push({ role: 'user', content: input });
1479
+ ui.startThinking();
1480
+ const results = await handleToolCalls(seg.recoveredToolCalls, messages);
1481
+ ui.stopThinking();
1482
+ if (results.length === 1) {
1483
+ ui.showSuccess(`Tool '${results[0].name}' executed.`);
1484
+ } else {
1485
+ ui.showSuccess(`Executed ${results.length} tool calls.`);
1486
+ }
1487
+ // Show concise outputs
1488
+ results.forEach(r => {
1489
+ const preview = typeof r.content === 'string' ? r.content : JSON.stringify(r.content);
1490
+ ui.showInfo(`${r.name}: ${preview.length > 300 ? preview.slice(0, 300) + '...' : preview}`);
1491
+ });
1492
+ rl.prompt();
1493
+ return;
1494
+ }
1495
+ } catch (e) {
1496
+ // Fall through to normal processing if parsing/execution fails
1625
1497
  }
1626
- messages.push({ role: 'user', content: input });
1627
- ui.startThinking();
1628
- const results = await handleToolCalls(seg.recoveredToolCalls, messages);
1498
+
1499
+ const result = useToolCalling
1500
+ ? await processQueryWithTools(input, conversation, currentModel)
1501
+ : await processQuery(input, conversation, currentModel);
1629
1502
  ui.stopThinking();
1630
- if (results.length === 1) {
1631
- ui.showSuccess(`Tool '${results[0].name}' executed.`);
1632
- } else {
1633
- ui.showSuccess(`Executed ${results.length} tool calls.`);
1634
- }
1635
- // Show concise outputs
1636
- results.forEach(r => {
1637
- const preview = typeof r.content === 'string' ? r.content : JSON.stringify(r.content);
1638
- ui.showInfo(`${r.name}: ${preview.length > 300 ? preview.slice(0, 300) + '...' : preview}`);
1639
- });
1640
- continue;
1641
- }
1642
- } catch (e) {
1643
- // Fall through to normal processing if parsing/execution fails
1644
- }
1503
+ ui.showResponse(result.response);
1645
1504
 
1646
- // Main query processing
1647
- try {
1648
- const result = useToolCalling
1649
- ? await processQueryWithTools(input, conversation, currentModel)
1650
- : await processQuery(input, conversation, currentModel);
1651
- ui.stopThinking();
1652
- ui.showResponse(result.response);
1505
+ conversation.length = 0;
1506
+ result.conversation.forEach(msg => conversation.push(msg));
1653
1507
 
1654
- conversation.length = 0;
1655
- result.conversation.forEach(msg => conversation.push(msg));
1656
- } catch (error) {
1657
- ui.stopThinking();
1658
- console.error('❌ Error processing request:', error.message);
1659
- if (process.env.DEBUG) {
1660
- console.error(error.stack);
1508
+ rl.prompt();
1509
+ } catch (error) {
1510
+ ui.stopThinking();
1511
+ ui.showError(`Unexpected error: ${error.message}`);
1512
+ console.error(error);
1513
+ rl.prompt();
1661
1514
  }
1662
- }
1663
- }
1515
+ }).on('close', () => {
1516
+ ui.showResponse('Goodbye!');
1517
+ resolve();
1518
+ });
1519
+ });
1664
1520
  }
1665
1521
 
1666
1522
  function askForMode(rl) {
1667
1523
  return new Promise((resolve) => {
1668
- rl.question('Select mode (1 for tool calling, 2 for function calling, 3 for engineer): ', (answer) => {
1669
- const mode = answer.trim();
1670
- if (mode === '3') {
1671
- resolve('engineer');
1672
- } else if (mode === '2') {
1673
- resolve('function');
1674
- } else {
1675
- resolve('tool');
1676
- }
1524
+ rl.question('Select mode (1 for tool calling, 2 for function calling): ', (answer) => {
1525
+ resolve(answer.trim() === '1');
1677
1526
  });
1678
1527
  });
1679
1528
  }
@@ -1769,14 +1618,11 @@ async function start() {
1769
1618
  console.log('Select Mode:');
1770
1619
  console.log('1. Tool Calling (for models that support it)');
1771
1620
  console.log('2. Function Calling (legacy)');
1772
- console.log('3. Engineer Mode (autonomous engineering with Claude Code style editing)');
1773
1621
 
1774
- const selectedMode = await askForMode(rl);
1775
- const modeNames = { 'tool': 'Tool Calling', 'function': 'Function Calling', 'engineer': 'Engineer' };
1776
- ui.showResponse(`\nStarting in ${modeNames[selectedMode]} mode...\n`);
1622
+ const useToolCalling = await askForMode(rl);
1623
+ ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
1777
1624
 
1778
- await chat(rl, selectedMode, MODEL);
1779
- process.exit(0);
1625
+ await chat(rl, useToolCalling, MODEL);
1780
1626
  } catch (error) {
1781
1627
  ui.showError(error);
1782
1628
  rl.close();
@@ -1784,12 +1630,4 @@ async function start() {
1784
1630
  }
1785
1631
  }
1786
1632
 
1787
- // Keep the process alive and handle uncaught errors
1788
- process.on('uncaughtException', (err) => {
1789
- console.error('Uncaught Exception:', err.message);
1790
- });
1791
- process.on('unhandledRejection', (reason) => {
1792
- console.error('Unhandled Rejection:', reason);
1793
- });
1794
-
1795
1633
  start().catch(console.error);
package/bin/ui.js CHANGED
@@ -1,7 +1,14 @@
1
1
  const chalk = require('chalk');
2
2
  const ora = require('ora');
3
3
 
4
- const spinner = ora({ text: 'Thinking...', color: 'yellow', spinner: 'pipe' });
4
+ // IMPORTANT: discardStdin must be false to prevent ora from closing stdin
5
+ // which would cause readline to close and the program to exit
6
+ const spinner = ora({
7
+ text: 'Thinking...',
8
+ color: 'yellow',
9
+ spinner: 'pipe',
10
+ discardStdin: false // Critical: prevents stdin interference
11
+ });
5
12
 
6
13
  // ASCII Art for AGI header
7
14
  const AGI_HEADER = `
package/package.json CHANGED
@@ -1,12 +1,16 @@
1
1
  {
2
2
  "name": "sam-coder-cli",
3
- "version": "2.0.4",
4
- "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities, brainstorm framework, and engineer mode.",
3
+ "version": "2.0.5",
4
+ "description": "SAM-CODER: An animated command-line AI assistant with agency capabilities.",
5
5
  "main": "bin/agi-cli.js",
6
6
  "bin": {
7
7
  "sam-coder": "bin/agi-cli.js",
8
8
  "agi-cli": "bin/agi-cli.js"
9
9
  },
10
+ "files": [
11
+ "bin",
12
+ "README.md"
13
+ ],
10
14
  "scripts": {
11
15
  "start": "node ./bin/agi-cli.js"
12
16
  },
@@ -16,29 +20,16 @@
16
20
  "cli",
17
21
  "terminal",
18
22
  "agent",
19
- "brainstorm",
20
- "engineer",
21
- "code-generation"
23
+ "agi",
24
+ "deepseek"
22
25
  ],
23
- "author": "",
26
+ "author": "Guilherme Keller",
24
27
  "license": "MIT",
25
28
  "dependencies": {
26
29
  "chalk": "^4.1.2",
27
- "ora": "^5.4.1",
28
- "ws": "^8.18.3"
29
- },
30
- "devDependencies": {
31
- "@types/vscode": "^1.102.0",
32
- "eslint": "^7.27.0"
30
+ "ora": "^5.4.1"
33
31
  },
34
32
  "engines": {
35
33
  "node": ">=18.12.1"
36
- },
37
- "files": [
38
- "bin/**/*"
39
- ],
40
- "repository": {
41
- "type": "git",
42
- "url": ""
43
34
  }
44
- }
35
+ }