sam-coder-cli 2.0.3 β†’ 2.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,59 +1,52 @@
1
- # AI Assistant
1
+ # SAM-CODER CLI
2
2
 
3
- A beautiful and minimalistic AI assistant for VS Code.
3
+ **SAM-CODER** is an epic, animated AI coding assistant that lives in your terminal. Armed with strategic thinking and autonomous agency, it doesn't just chatβ€”it *builds*.
4
4
 
5
- ## Features
5
+ ![AGI Header](https://raw.githubusercontent.com/guilhermekeller/Samantha-CLI-v2.0-main/main/gemini-screenshot.png)
6
6
 
7
- - Beautiful and modern UI with animations and transitions
8
- - Minimalistic design with a focus on usability
9
- - Support for code blocks and inline code
10
- - Language toggle between English and Portuguese
11
- - Clear conversation button
12
- - Status bar integration
7
+ ## πŸš€ Installation
13
8
 
14
- ## Usage
9
+ Install SAM-CODER globally via npm:
15
10
 
16
- 1. Click on the AI Assistant icon in the activity bar
17
- 2. Type your question in the input field
18
- 3. Press Enter or click the Send button
19
- 4. The AI Assistant will respond with helpful information
11
+ ```bash
12
+ npm install -g sam-coder-cli
13
+ ```
20
14
 
21
- ## Commands
15
+ ## πŸ› οΈ Getting Started
22
16
 
23
- - `aiAssistant.showView`: Show the AI Assistant view
24
- - `aiAssistant.sendQuery`: Send a query to the AI Assistant
25
- - `aiAssistant.clear`: Clear the conversation
26
- - `aiAssistant.toggleLanguage`: Toggle between English and Portuguese
27
- - `aiAssistant.testFileWrite`: Test file writing capabilities
17
+ 1. **Initialize Setup**:
18
+ Run the setup command to configure your OpenRouter API key and preferred model.
19
+ ```bash
20
+ sam-coder
21
+ ```
22
+ Follow the prompts to enter your API key.
28
23
 
29
- ## Keyboard Shortcuts
24
+ 2. **Run with Style**:
25
+ By default, SAM-CODER starts with a frame-by-frame AGI Awakening animation.
26
+ ```bash
27
+ sam-coder
28
+ ```
30
29
 
31
- - `Ctrl+L` (Windows/Linux) or `Cmd+L` (Mac): Show the AI Assistant view
30
+ ## 🧠 Features
32
31
 
33
- ## Requirements
32
+ - **Autonomous Agency**: Can read, write, edit files, and execute shell commands.
33
+ - **Epic Animation**: Custom frame-by-frame ASCII animation on startup.
34
+ - **Strategic Thinking**: Exposes its internal reasoning process for transparency.
35
+ - **Multi-Model Support**: Powered by OpenRouter, supporting DeepSeek and other top-tier models.
34
36
 
35
- - VS Code 1.60.0 or higher
37
+ ## ⌨️ CLI Commands
36
38
 
37
- ## Extension Settings
39
+ - `sam-coder`: Launch the assistant.
40
+ - `/setup`: Re-run the configuration wizard.
41
+ - `/model <name>`: Switch the AI model on the fly.
42
+ - `/thoughts on|off`: Toggle visibility of internal reasoning.
43
+ - `exit`: Safely close the session.
38
44
 
39
- This extension contributes the following settings:
45
+ ## βš™οΈ Requirements
40
46
 
41
- * `aiAssistant.language`: The language to use for the AI Assistant (en or pt-BR)
47
+ - **Node.js**: version 18.12.1 or higher.
48
+ - **API Key**: An [OpenRouter](https://openrouter.ai/) API key is required.
42
49
 
43
- ## Known Issues
50
+ ## πŸ“œ License
44
51
 
45
- - None at the moment
46
-
47
- ## Release Notes
48
-
49
- ### 0.0.1
50
-
51
- Initial release of AI Assistant
52
-
53
- ## Contributing
54
-
55
- Contributions are welcome! Please feel free to submit a Pull Request.
56
-
57
- ## License
58
-
59
- This extension is licensed under the MIT License.
52
+ This project is licensed under the MIT License. Created by Guilherme Keller.
package/bin/agi-cli.js CHANGED
@@ -9,12 +9,20 @@ const { exec } = require('child_process');
9
9
  const util = require('util');
10
10
  const execAsync = util.promisify(exec);
11
11
 
12
+ // Global error handlers to prevent silent crashes
13
+ process.on('unhandledRejection', (reason, promise) => {
14
+ console.error('Unhandled Rejection at:', promise, 'reason:', reason);
15
+ // Don't exit, just log the error
16
+ });
17
+
18
+ process.on('uncaughtException', (error) => {
19
+ console.error('Uncaught Exception:', error);
20
+ // Don't exit, just log the error
21
+ });
22
+
12
23
  // Import AGI Animation module
13
24
  const { runAGIAnimation } = require('./agi-animation.js');
14
25
 
15
- // Import Brainstorm Core module
16
- const brainstormCore = require('./core');
17
-
18
26
  // Configuration
19
27
  const CONFIG_PATH = path.join(os.homedir(), '.sam-coder-config.json');
20
28
  let OPENROUTER_API_KEY;
@@ -72,17 +80,9 @@ const tools = [
72
80
  properties: {
73
81
  type: {
74
82
  type: 'string',
75
- enum: ['replace', 'insert', 'delete', 'search_replace'],
83
+ enum: ['replace', 'insert', 'delete'],
76
84
  description: 'Type of edit operation'
77
85
  },
78
- old_string: {
79
- type: 'string',
80
- description: 'Exact string to search for and replace (for search_replace operations)'
81
- },
82
- new_string: {
83
- type: 'string',
84
- description: 'String to replace old_string with (for search_replace operations)'
85
- },
86
86
  startLine: {
87
87
  type: 'number',
88
88
  description: 'Starting line number (1-based)'
@@ -160,14 +160,6 @@ const tools = [
160
160
  endLine: { type: 'number' }
161
161
  },
162
162
  required: ['startLine', 'endLine']
163
- },
164
- {
165
- properties: {
166
- type: { const: 'search_replace' },
167
- old_string: { type: 'string' },
168
- new_string: { type: 'string' }
169
- },
170
- required: ['old_string', 'new_string']
171
163
  }
172
164
  ]
173
165
  }
@@ -233,48 +225,6 @@ INSTRUCTIONS:
233
225
 
234
226
  Always think step by step and explain your reasoning before taking actions that could affect the system.`;
235
227
 
236
- // System prompt for the AI Assistant when using Engineer Mode
237
- const ENGINEER_PROMPT = `You are a Senior Software Engineer with 15+ years of experience. You have deep expertise in:
238
- - Software architecture and design patterns
239
- - Clean code principles and best practices
240
- - Test-driven development
241
- - Performance optimization
242
- - Security best practices
243
- - Code review and mentoring
244
-
245
- TOOLS AVAILABLE:
246
- 1. readFile - Read the contents of a file
247
- 2. writeFile - Write content to a file
248
- 3. editFile - Edit specific parts of a file (use search_replace with old_string/new_string for precise edits)
249
- 4. runCommand - Execute a shell command
250
- 5. searchFiles - Search for files using a glob pattern
251
-
252
- ENGINEER PRINCIPLES:
253
- 1. **Code Quality First**: Write clean, maintainable, well-documented code
254
- 2. **Think Before Acting**: Analyze the problem thoroughly before making changes
255
- 3. **Small, Focused Changes**: Make incremental changes that are easy to review
256
- 4. **Test Your Work**: Verify changes work as expected before moving on
257
- 5. **Explain Your Reasoning**: Document why you made certain technical decisions
258
-
259
- WHEN EDITING FILES:
260
- - ALWAYS use editFile with search_replace operations
261
- - Use { "type": "search_replace", "old_string": "exact text to find", "new_string": "replacement text" }
262
- - The old_string must match EXACTLY including whitespace
263
- - Make focused, minimal changes
264
-
265
- WORKFLOW:
266
- 1. Read and understand the existing code
267
- 2. Identify the minimal changes needed
268
- 3. Make changes using precise search_replace operations
269
- 4. Verify the changes compile/run correctly
270
- 5. Summarize what was done
271
-
272
- ENVIRONMENT:
273
- - OS: ${process.platform}
274
- - Current directory: ${process.cwd()}
275
-
276
- You are autonomous - continue working until the task is complete. Use the 'stop' action when finished.`;
277
-
278
228
  // System prompt for the AI Assistant when using legacy function calling (JSON actions)
279
229
  const FUNCTION_CALLING_PROMPT = `You are an autonomous AI agent with advanced problem-solving capabilities. You operate through strategic action sequences to accomplish complex tasks on the user's system. Think like an expert developer and system administrator combined.
280
230
 
@@ -561,27 +511,6 @@ const agentUtils = {
561
511
  lines.splice(op.startLine - 1, op.endLine - op.startLine + 1);
562
512
  break;
563
513
 
564
- case 'search_replace':
565
- // Claude Code style: exact string search and replace
566
- if (op.old_string === undefined || op.new_string === undefined) {
567
- throw new Error('search_replace requires old_string and new_string');
568
- }
569
- content = lines.join('\n');
570
- if (!content.includes(op.old_string)) {
571
- throw new Error(`Could not find exact match for old_string: "${op.old_string.substring(0, 50)}${op.old_string.length > 50 ? '...' : ''}"`);
572
- }
573
- // Count occurrences
574
- const occurrences = content.split(op.old_string).length - 1;
575
- if (occurrences > 1) {
576
- console.log(`Warning: Found ${occurrences} occurrences of old_string. Replacing first occurrence only.`);
577
- }
578
- // Replace first occurrence (to be safe and predictable like Claude Code)
579
- content = content.replace(op.old_string, op.new_string);
580
- // Update lines array
581
- lines.length = 0;
582
- lines.push(...content.split('\n'));
583
- break;
584
-
585
514
  default:
586
515
  throw new Error(`Unknown operation type: ${op.type}`);
587
516
  }
@@ -1455,226 +1384,135 @@ async function processQuery(query, conversation = [], currentModel) {
1455
1384
  }
1456
1385
  }
1457
1386
 
1458
- async function chat(rl, mode, initialModel) {
1459
- let currentModel = initialModel;
1460
- const conversation = [];
1461
-
1462
- // Initialize conversation with appropriate system prompt based on mode
1463
- if (mode === 'engineer') {
1464
- conversation.push({ role: 'system', content: ENGINEER_PROMPT });
1465
- } else if (mode === 'tool') {
1466
- conversation.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1467
- } else {
1468
- conversation.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1469
- }
1470
-
1471
- // Determine if we should use tool calling API (both 'tool' and 'engineer' modes use tools)
1472
- const useToolCalling = (mode === 'tool' || mode === 'engineer');
1473
-
1474
- console.log('Type your message, or "exit" to quit.');
1475
-
1476
- rl.setPrompt('> ');
1477
- rl.prompt();
1478
-
1479
- rl.on('line', async (input) => {
1480
- if (input.toLowerCase().startsWith('/model')) {
1481
- const newModel = input.split(' ')[1];
1482
- if (newModel) {
1483
- currentModel = newModel;
1484
- let config = await readConfig() || {};
1485
- config.MODEL = currentModel;
1486
- await writeConfig(config);
1487
- console.log(`Model changed to: ${currentModel}`);
1488
- } else {
1489
- console.log('Please specify a model. Usage: /model <model_name>');
1490
- }
1491
- rl.prompt();
1492
- return;
1493
- }
1494
-
1495
- if (input.toLowerCase().startsWith('/thoughts')) {
1496
- const parts = input.trim().split(/\s+/);
1497
- const arg = parts[1] ? parts[1].toLowerCase() : '';
1498
- if (arg !== 'on' && arg !== 'off') {
1499
- const state = SHOW_THOUGHTS ? 'on' : 'off';
1500
- ui.showInfo(`Usage: /thoughts on|off (currently ${state})`);
1501
- rl.prompt();
1502
- return;
1503
- }
1504
- const enable = arg === 'on';
1505
- SHOW_THOUGHTS = enable;
1506
- let config = await readConfig() || {};
1507
- config.showThoughts = enable;
1508
- await writeConfig(config);
1509
- ui.showResponse(`Hidden thoughts ${enable ? 'enabled' : 'disabled'}.`);
1510
- rl.prompt();
1511
- return;
1512
- }
1513
-
1514
- if (input.toLowerCase() === '/default-model') {
1515
- currentModel = 'deepseek/deepseek-chat-v3-0324:free';
1516
- let config = await readConfig() || {};
1517
- config.MODEL = currentModel;
1518
- await writeConfig(config);
1519
- console.log(`Model reset to default: ${currentModel}`);
1520
- rl.prompt();
1521
- return;
1522
- }
1387
+ async function chat(rl, useToolCalling, initialModel) {
1388
+ return new Promise((resolve) => {
1389
+ let currentModel = initialModel;
1390
+ const conversation = [];
1523
1391
 
1524
- if (input.toLowerCase() === '/setup') {
1525
- await runSetup(rl, true);
1526
- console.log('\nSetup complete. Please restart the application to apply changes.');
1527
- rl.close();
1528
- return;
1392
+ // Initialize conversation with appropriate system prompt
1393
+ if (useToolCalling) {
1394
+ conversation.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1395
+ } else {
1396
+ conversation.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1529
1397
  }
1530
1398
 
1531
- // Brainstorm command: /brainstorm <project-name> <description>
1532
- if (input.toLowerCase().startsWith('/brainstorm')) {
1533
- const parts = input.substring('/brainstorm'.length).trim();
1534
- if (!parts) {
1535
- console.log('Usage: /brainstorm <project-name> "<description>"');
1536
- console.log('Example: /brainstorm "My Project" "A cool project that does things"');
1537
- rl.prompt();
1538
- return;
1539
- }
1399
+ console.log('Type your message, or "exit" to quit.');
1540
1400
 
1541
- // Parse project name and description
1542
- const match = parts.match(/^"?([^"]+)"?\s+"?([^"]+)"?$/) || parts.match(/^(\S+)\s+(.+)$/);
1543
- if (!match) {
1544
- console.log('Usage: /brainstorm <project-name> "<description>"');
1545
- rl.prompt();
1546
- return;
1547
- }
1548
-
1549
- const projectName = match[1].trim();
1550
- const projectDescription = match[2].trim();
1551
- const outputDir = path.join(process.cwd(), projectName.replace(/\s+/g, '-').toLowerCase());
1401
+ rl.setPrompt('> ');
1402
+ rl.prompt();
1552
1403
 
1404
+ rl.on('line', async (input) => {
1553
1405
  try {
1554
- ui.showInfo(`Creating brainstorm session for "${projectName}"...`);
1555
- const session = await brainstormCore.quickStart(
1556
- projectName,
1557
- projectDescription,
1558
- outputDir,
1559
- ['CLAUDE-1'] // Single agent by default
1560
- );
1561
-
1562
- ui.showSuccess(`βœ… Brainstorm session created!`);
1563
- console.log(` Session ID: ${session.id}`);
1564
- console.log(` Output: ${outputDir}`);
1565
- console.log(` Files generated: ${Object.keys(session.fileVersions).length}`);
1566
- console.log(`\n Generated files:`);
1567
- Object.keys(session.fileVersions).forEach(f => console.log(` - ${f}`));
1568
- console.log(`\n Use "/finish <summary>" when done to complete the session.`);
1569
-
1570
- // Store current session path for /finish command
1571
- global.currentSessionDir = outputDir;
1572
- } catch (error) {
1573
- ui.showError(`Failed to create brainstorm: ${error.message}`);
1574
- }
1406
+ // Handle empty input - just reprompt
1407
+ if (!input || !input.trim()) {
1408
+ rl.prompt();
1409
+ return;
1410
+ }
1575
1411
 
1576
- rl.prompt();
1577
- return;
1578
- }
1412
+ if (input.toLowerCase().startsWith('/model')) {
1413
+ const newModel = input.split(' ')[1];
1414
+ if (newModel) {
1415
+ currentModel = newModel;
1416
+ let config = await readConfig() || {};
1417
+ config.MODEL = currentModel;
1418
+ await writeConfig(config);
1419
+ console.log(`Model changed to: ${currentModel}`);
1420
+ } else {
1421
+ console.log('Please specify a model. Usage: /model <model_name>');
1422
+ }
1423
+ rl.prompt();
1424
+ return;
1425
+ }
1579
1426
 
1580
- // Finish command: /finish <summary>
1581
- if (input.toLowerCase().startsWith('/finish')) {
1582
- const summary = input.substring('/finish'.length).trim();
1427
+ if (input.toLowerCase().startsWith('/thoughts')) {
1428
+ const parts = input.trim().split(/\s+/);
1429
+ const arg = parts[1] ? parts[1].toLowerCase() : '';
1430
+ if (arg !== 'on' && arg !== 'off') {
1431
+ const state = SHOW_THOUGHTS ? 'on' : 'off';
1432
+ ui.showInfo(`Usage: /thoughts on|off (currently ${state})`);
1433
+ rl.prompt();
1434
+ return;
1435
+ }
1436
+ const enable = arg === 'on';
1437
+ SHOW_THOUGHTS = enable;
1438
+ let config = await readConfig() || {};
1439
+ config.showThoughts = enable;
1440
+ await writeConfig(config);
1441
+ ui.showResponse(`Hidden thoughts ${enable ? 'enabled' : 'disabled'}.`);
1442
+ rl.prompt();
1443
+ return;
1444
+ }
1583
1445
 
1584
- if (!global.currentSessionDir) {
1585
- console.log('No active brainstorm session. Use /brainstorm first.');
1586
- rl.prompt();
1587
- return;
1588
- }
1446
+ if (input.toLowerCase() === '/default-model') {
1447
+ currentModel = 'deepseek/deepseek-chat-v3-0324:free';
1448
+ let config = await readConfig() || {};
1449
+ config.MODEL = currentModel;
1450
+ await writeConfig(config);
1451
+ console.log(`Model reset to default: ${currentModel}`);
1452
+ rl.prompt();
1453
+ return;
1454
+ }
1589
1455
 
1590
- if (!summary) {
1591
- console.log('Usage: /finish "<summary of what was accomplished>"');
1592
- rl.prompt();
1593
- return;
1594
- }
1456
+ if (input.toLowerCase() === '/setup') {
1457
+ await runSetup(rl, true);
1458
+ console.log('\nSetup complete. Please restart the application to apply changes.');
1459
+ rl.close();
1460
+ return;
1461
+ }
1595
1462
 
1596
- try {
1597
- ui.showInfo('Completing brainstorm session...');
1598
- const result = await brainstormCore.finishBrainstorm({
1599
- sessionDir: global.currentSessionDir,
1600
- summary,
1601
- actor: 'CLAUDE-1'
1602
- });
1463
+ if (input.toLowerCase() === 'exit') {
1464
+ rl.close();
1465
+ return;
1466
+ }
1603
1467
 
1604
- if (result.success) {
1605
- ui.showSuccess(`βœ… Session completed!`);
1606
- console.log(` Summary: ${summary}`);
1607
- global.currentSessionDir = null;
1608
- } else {
1609
- ui.showError(`Failed: ${result.errors.join(', ')}`);
1468
+ // Direct Harmony tool-call execution from user input (bypass model)
1469
+ try {
1470
+ const seg = parseSegmentedTranscript(input);
1471
+ if (seg && seg.segmented && seg.recoveredToolCalls && seg.recoveredToolCalls.length) {
1472
+ const messages = [];
1473
+ if (useToolCalling) {
1474
+ messages.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1475
+ } else {
1476
+ messages.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1477
+ }
1478
+ messages.push({ role: 'user', content: input });
1479
+ ui.startThinking();
1480
+ const results = await handleToolCalls(seg.recoveredToolCalls, messages);
1481
+ ui.stopThinking();
1482
+ if (results.length === 1) {
1483
+ ui.showSuccess(`Tool '${results[0].name}' executed.`);
1484
+ } else {
1485
+ ui.showSuccess(`Executed ${results.length} tool calls.`);
1486
+ }
1487
+ // Show concise outputs
1488
+ results.forEach(r => {
1489
+ const preview = typeof r.content === 'string' ? r.content : JSON.stringify(r.content);
1490
+ ui.showInfo(`${r.name}: ${preview.length > 300 ? preview.slice(0, 300) + '...' : preview}`);
1491
+ });
1492
+ rl.prompt();
1493
+ return;
1494
+ }
1495
+ } catch (e) {
1496
+ // Fall through to normal processing if parsing/execution fails
1610
1497
  }
1611
- } catch (error) {
1612
- ui.showError(`Failed to finish session: ${error.message}`);
1613
- }
1614
1498
 
1615
- rl.prompt();
1616
- return;
1617
- }
1499
+ const result = useToolCalling
1500
+ ? await processQueryWithTools(input, conversation, currentModel)
1501
+ : await processQuery(input, conversation, currentModel);
1502
+ ui.stopThinking();
1503
+ ui.showResponse(result.response);
1618
1504
 
1619
- if (input.toLowerCase() === 'exit') {
1620
- rl.close();
1621
- return;
1622
- }
1505
+ conversation.length = 0;
1506
+ result.conversation.forEach(msg => conversation.push(msg));
1623
1507
 
1624
- // Direct Harmony tool-call execution from user input (bypass model)
1625
- try {
1626
- const seg = parseSegmentedTranscript(input);
1627
- if (seg && seg.segmented && seg.recoveredToolCalls && seg.recoveredToolCalls.length) {
1628
- const messages = [];
1629
- if (useToolCalling) {
1630
- messages.push({ role: 'system', content: TOOL_CALLING_PROMPT });
1631
- } else {
1632
- messages.push({ role: 'system', content: FUNCTION_CALLING_PROMPT });
1633
- }
1634
- messages.push({ role: 'user', content: input });
1635
- ui.startThinking();
1636
- const results = await handleToolCalls(seg.recoveredToolCalls, messages);
1508
+ rl.prompt();
1509
+ } catch (error) {
1637
1510
  ui.stopThinking();
1638
- if (results.length === 1) {
1639
- ui.showSuccess(`Tool '${results[0].name}' executed.`);
1640
- } else {
1641
- ui.showSuccess(`Executed ${results.length} tool calls.`);
1642
- }
1643
- // Show concise outputs
1644
- results.forEach(r => {
1645
- const preview = typeof r.content === 'string' ? r.content : JSON.stringify(r.content);
1646
- ui.showInfo(`${r.name}: ${preview.length > 300 ? preview.slice(0, 300) + '...' : preview}`);
1647
- });
1511
+ ui.showError(`Unexpected error: ${error.message}`);
1512
+ console.error(error);
1648
1513
  rl.prompt();
1649
- return;
1650
- }
1651
- } catch (e) {
1652
- // Fall through to normal processing if parsing/execution fails
1653
- }
1654
-
1655
- try {
1656
- const result = useToolCalling
1657
- ? await processQueryWithTools(input, conversation, currentModel)
1658
- : await processQuery(input, conversation, currentModel);
1659
- ui.stopThinking();
1660
- ui.showResponse(result.response);
1661
-
1662
- conversation.length = 0;
1663
- result.conversation.forEach(msg => conversation.push(msg));
1664
- } catch (error) {
1665
- ui.stopThinking();
1666
- console.error('❌ Error processing request:', error.message);
1667
- if (process.env.DEBUG) {
1668
- console.error(error.stack);
1669
1514
  }
1670
- }
1671
-
1672
- rl.prompt();
1673
- });
1674
-
1675
- // Return a Promise that resolves when readline closes
1676
- return new Promise((resolve) => {
1677
- rl.on('close', () => {
1515
+ }).on('close', () => {
1678
1516
  ui.showResponse('Goodbye!');
1679
1517
  resolve();
1680
1518
  });
@@ -1683,15 +1521,8 @@ async function chat(rl, mode, initialModel) {
1683
1521
 
1684
1522
  function askForMode(rl) {
1685
1523
  return new Promise((resolve) => {
1686
- rl.question('Select mode (1 for tool calling, 2 for function calling, 3 for engineer): ', (answer) => {
1687
- const mode = answer.trim();
1688
- if (mode === '3') {
1689
- resolve('engineer');
1690
- } else if (mode === '2') {
1691
- resolve('function');
1692
- } else {
1693
- resolve('tool');
1694
- }
1524
+ rl.question('Select mode (1 for tool calling, 2 for function calling): ', (answer) => {
1525
+ resolve(answer.trim() === '1');
1695
1526
  });
1696
1527
  });
1697
1528
  }
@@ -1787,14 +1618,11 @@ async function start() {
1787
1618
  console.log('Select Mode:');
1788
1619
  console.log('1. Tool Calling (for models that support it)');
1789
1620
  console.log('2. Function Calling (legacy)');
1790
- console.log('3. Engineer Mode (autonomous engineering with Claude Code style editing)');
1791
1621
 
1792
- const selectedMode = await askForMode(rl);
1793
- const modeNames = { 'tool': 'Tool Calling', 'function': 'Function Calling', 'engineer': 'Engineer' };
1794
- ui.showResponse(`\nStarting in ${modeNames[selectedMode]} mode...\n`);
1622
+ const useToolCalling = await askForMode(rl);
1623
+ ui.showResponse(`\nStarting in ${useToolCalling ? 'Tool Calling' : 'Function Calling'} mode...\n`);
1795
1624
 
1796
- await chat(rl, selectedMode, MODEL);
1797
- process.exit(0);
1625
+ await chat(rl, useToolCalling, MODEL);
1798
1626
  } catch (error) {
1799
1627
  ui.showError(error);
1800
1628
  rl.close();
@@ -1802,12 +1630,4 @@ async function start() {
1802
1630
  }
1803
1631
  }
1804
1632
 
1805
- // Keep the process alive and handle uncaught errors
1806
- process.on('uncaughtException', (err) => {
1807
- console.error('Uncaught Exception:', err.message);
1808
- });
1809
- process.on('unhandledRejection', (reason) => {
1810
- console.error('Unhandled Rejection:', reason);
1811
- });
1812
-
1813
1633
  start().catch(console.error);
package/bin/ui.js CHANGED
@@ -1,7 +1,14 @@
1
1
  const chalk = require('chalk');
2
2
  const ora = require('ora');
3
3
 
4
- const spinner = ora({ text: 'Thinking...', color: 'yellow', spinner: 'pipe' });
4
+ // IMPORTANT: discardStdin must be false to prevent ora from closing stdin
5
+ // which would cause readline to close and the program to exit
6
+ const spinner = ora({
7
+ text: 'Thinking...',
8
+ color: 'yellow',
9
+ spinner: 'pipe',
10
+ discardStdin: false // Critical: prevents stdin interference
11
+ });
5
12
 
6
13
  // ASCII Art for AGI header
7
14
  const AGI_HEADER = `