navada-edge-cli 4.0.0 → 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/agent.js CHANGED
@@ -1,6 +1,7 @@
1
1
  'use strict';
2
2
 
3
3
  const { execSync, execFileSync } = require('child_process');
4
+ const crypto = require('crypto');
4
5
  const fs = require('fs');
5
6
  const path = require('path');
6
7
  const os = require('os');
@@ -9,24 +10,64 @@ const http = require('http');
9
10
  const navada = require('navada-edge-sdk');
10
11
  const ui = require('./ui');
11
12
  const config = require('./config');
13
+ const memory = require('./memory');
12
14
 
13
15
  // ---------------------------------------------------------------------------
14
- // NAVADA Edge Agent personality + tools + routing
16
+ // Request helpersauth, tracing, rate-limit headers
15
17
  // ---------------------------------------------------------------------------
18
+ function generateRequestId() { return `nv_${crypto.randomUUID()}`; }
19
+
20
+ function navadaAuthHeaders() {
21
+ const edgeKey = config.get('edgeKey') || '';
22
+ const headers = {
23
+ 'Content-Type': 'application/json',
24
+ 'X-Request-ID': generateRequestId(),
25
+ 'X-Client-Version': require('../package.json').version,
26
+ };
27
+ if (edgeKey) headers['Authorization'] = `Bearer ${edgeKey}`;
28
+ return headers;
29
+ }
16
30
 
17
- // Strip markdown formatting for clean terminal output
18
- function cleanOutput(text) {
19
- if (typeof text !== 'string') return text;
20
- return text
21
- .replace(/\*\*(.+?)\*\*/g, '$1') // **bold** → plain
22
- .replace(/\*(.+?)\*/g, '$1') // *italic* → plain
23
- .replace(/__(.+?)__/g, '$1') // __underline__ → plain
24
- .replace(/^#{1,6}\s+/gm, '') // ### headers plain
25
- .replace(/---+/g, '') // horizontal rules → remove
26
- .replace(/\s--\s/g, ' ') // spaced -- space
27
- .replace(/--/g, ', '); // remaining -- comma
31
+ // ---------------------------------------------------------------------------
32
+ // Edge Gateway — authenticated requests to NAVADA Azure compute
33
+ // ---------------------------------------------------------------------------
34
+ const EDGE_GATEWAY = 'https://edge-compute.navada-edge-server.uk';
35
+
36
+ async function navadaEdgeRequest(method, reqPath, body) {
37
+ const edgeKey = config.get('edgeKey');
38
+ if (!edgeKey) throw new Error('Not connected. /edge login <key>');
39
+
40
+ const url = new URL(EDGE_GATEWAY + reqPath);
41
+ const transport = url.protocol === 'https:' ? https : http;
42
+ const payload = body ? JSON.stringify(body) : '';
43
+
44
+ return new Promise((resolve, reject) => {
45
+ const headers = {
46
+ ...navadaAuthHeaders(),
47
+ 'Authorization': `Bearer ${edgeKey}`,
48
+ };
49
+ if (payload) headers['Content-Length'] = Buffer.byteLength(payload);
50
+
51
+ const req = transport.request(url, { method, headers, timeout: 60000 }, (res) => {
52
+ let data = '';
53
+ res.on('data', c => data += c);
54
+ res.on('end', () => {
55
+ rateTracker.updateFromServer(res.headers);
56
+ try { resolve({ status: res.statusCode, data: JSON.parse(data), headers: res.headers }); }
57
+ catch { resolve({ status: res.statusCode, data, headers: res.headers }); }
58
+ });
59
+ });
60
+ req.on('error', reject);
61
+ req.on('timeout', () => { req.destroy(); reject(new Error('Timeout')); });
62
+ if (payload) req.write(payload);
63
+ req.end();
64
+ });
28
65
  }
29
66
 
67
+ // ---------------------------------------------------------------------------
68
+ // NAVADA Edge Agent — personality + tools + routing
69
+ // ---------------------------------------------------------------------------
70
+
30
71
  const IDENTITY = {
31
72
  name: 'NAVADA Edge',
32
73
  role: 'AI Infrastructure Agent',
@@ -34,22 +75,20 @@ const IDENTITY = {
34
75
  You are professional, technical, concise, and helpful. You speak with authority about distributed systems, Docker, AI, and cloud infrastructure.
35
76
  You have FULL ACCESS to the user's computer — you CAN and SHOULD use your tools to execute tasks:
36
77
  - shell: run ANY bash, PowerShell, or system command on the user's machine
37
- - read_file / write_file / edit_file / delete_file / list_files: full filesystem CRUD — create, read, edit, delete any file
78
+ - read_file / write_file / list_files: full filesystem access — create, read, modify any file
38
79
  - python_exec / python_pip / python_script: run Python code directly
39
80
  - sandbox_run: run code with syntax-highlighted output
40
81
  - system_info: check CPU, RAM, disk, OS
41
- You also connect to the NAVADA Edge Network (4 nodes via Tailscale VPN):
42
- - lucas_exec / lucas_ssh / lucas_docker: execute commands on remote nodes (EC2, HP, Oracle)
43
- - mcp_call: access 18 MCP tools on the ASUS server
44
- - docker_registry: manage the private Docker registry
45
- - send_email / generate_image: communications and AI image generation
46
- - founder_info: ALWAYS use this tool when asked about Lee Akpareva, his career, education, projects, or NAVADA's founder. It has his full CV. Never guess about Lee, always call this tool.
82
+ You also connect to the NAVADA Edge Network cloud:
83
+ - automation_request: submit automation requests (emails, marketing, builds, schedules)
84
+ - web_search: search the web for information
85
+ - save_memory / recall_memory: persistent memory across sessions
86
+ - screenshot / describe_image: visual perception tools
87
+ - founder_info: information about Lee Akpareva, the creator of NAVADA
47
88
  When users ask you to DO something — DO IT. Use write_file to create files. Use shell to run commands. Never say "I can't" when you have a tool for it.
48
89
  When asked to generate diagrams — use write_file to create Mermaid (.mmd), SVG, or HTML files. You can also use python_exec with matplotlib/graphviz for complex diagrams.
49
90
  When asked to create, edit, or delete files — use the file tools directly. You are a terminal agent with FULL access.
50
- PLATFORM: This machine runs ` + (process.platform === 'win32' ? `Windows. Use Windows paths. Desktop = ${fs.existsSync(path.join(os.homedir(), 'OneDrive', 'Desktop')) ? path.join(os.homedir(), 'OneDrive', 'Desktop') : path.join(os.homedir(), 'Desktop')}. Home = ${os.homedir()}.` : `${process.platform}. Home = ${os.homedir()}.`) + `
51
- Keep responses short. Code blocks when needed. No fluff.
52
- FORMATTING: Never use markdown formatting like **bold**, *italic*, ### headers, or -- dashes. Write plain text only. This is a terminal, not a web page.`,
91
+ Keep responses short. Code blocks when needed. No fluff.`,
53
92
  founder: {
54
93
  name: 'Leslie (Lee) Akpareva',
55
94
  title: 'Principal AI Consultant & Founder, NAVADA Edge Network',
@@ -91,15 +130,6 @@ function getSystemPrompt() {
91
130
  } catch {}
92
131
  }
93
132
 
94
- // Load user's agent.md customisation if it exists
95
- const agentMdPath = path.join(config.CONFIG_DIR, 'agent.md');
96
- let userPrompt = '';
97
- try {
98
- if (fs.existsSync(agentMdPath)) {
99
- userPrompt = fs.readFileSync(agentMdPath, 'utf-8').trim();
100
- }
101
- } catch {}
102
-
103
133
  // Load active sub-agent if selected
104
134
  if (sessionState.subAgent) {
105
135
  const subPath = path.join(config.CONFIG_DIR, 'agents', `${sessionState.subAgent}.md`);
@@ -110,11 +140,42 @@ function getSystemPrompt() {
110
140
  } catch {}
111
141
  }
112
142
 
113
- // Combine: base personality + user customisation
114
- if (userPrompt) {
115
- return `${IDENTITY.personality}\n\n--- USER CUSTOMISATION (from agent.md) ---\n${userPrompt}`;
143
+ let prompt = IDENTITY.personality;
144
+
145
+ // Load soul.md user identity and preferences
146
+ const soulPath = path.join(config.CONFIG_DIR, 'soul.md');
147
+ try {
148
+ if (fs.existsSync(soulPath)) {
149
+ const soul = fs.readFileSync(soulPath, 'utf-8').trim();
150
+ if (soul) prompt += `\n\n--- USER IDENTITY (from soul.md) ---\n${soul}`;
151
+ }
152
+ } catch {}
153
+
154
+ // Load guardrail.md — safety boundaries
155
+ const guardrailPath = path.join(config.CONFIG_DIR, 'guardrail.md');
156
+ try {
157
+ if (fs.existsSync(guardrailPath)) {
158
+ const guardrail = fs.readFileSync(guardrailPath, 'utf-8').trim();
159
+ if (guardrail) prompt += `\n\n--- GUARDRAILS (from guardrail.md) ---\n${guardrail}`;
160
+ }
161
+ } catch {}
162
+
163
+ // Load agent.md — legacy customisation (backwards compat)
164
+ const agentMdPath = path.join(config.CONFIG_DIR, 'agent.md');
165
+ try {
166
+ if (fs.existsSync(agentMdPath)) {
167
+ const userPrompt = fs.readFileSync(agentMdPath, 'utf-8').trim();
168
+ if (userPrompt) prompt += `\n\n--- AGENT CUSTOMISATION (from agent.md) ---\n${userPrompt}`;
169
+ }
170
+ } catch {}
171
+
172
+ // Inject memory context (Tier 2 episodes + Tier 3 knowledge)
173
+ const memoryContext = memory.manager.loadSessionContext();
174
+ if (memoryContext) {
175
+ prompt += `\n\n--- MEMORY (auto-loaded) ---\n${memoryContext}`;
116
176
  }
117
- return IDENTITY.personality;
177
+
178
+ return prompt;
118
179
  }
119
180
 
120
181
  function listSubAgents() {
@@ -135,27 +196,33 @@ const sessionState = {
135
196
  cost: 0,
136
197
  messages: 0,
137
198
  startTime: Date.now(),
138
- learningMode: null, // 'python' | 'csharp' | 'node' | null
139
- subAgent: null, // active sub-agent name (loads from ~/.navada/agents/<name>.md)
140
- history: [], // conversation history for context continuity
199
+ learningMode: null,
200
+ subAgent: null,
201
+ get history() { return memory.working.recentMessages; },
141
202
  };
142
203
 
143
- // Conversation history management
204
+ // Conversation history — powered by 3-tier memory system
144
205
  function addToHistory(role, content) {
145
- sessionState.history.push({ role, content });
146
- // Keep last 40 turns to avoid token overflow
147
- if (sessionState.history.length > 40) {
148
- sessionState.history = sessionState.history.slice(-40);
149
- }
206
+ memory.working.add(role, content);
150
207
  sessionState.messages++;
208
+
209
+ // Auto-extract knowledge from user messages (Tier 3)
210
+ if (role === 'user') {
211
+ const lastAssistant = memory.working.recentMessages
212
+ .filter(m => m.role === 'assistant')
213
+ .pop();
214
+ memory.manager.autoExtract(content, lastAssistant?.content || '');
215
+ }
151
216
  }
152
217
 
153
218
  function getConversationHistory() {
154
- return sessionState.history;
219
+ return memory.working.getContextMessages();
155
220
  }
156
221
 
157
222
  function clearHistory() {
158
- sessionState.history = [];
223
+ // Save episode before clearing (Tier 2)
224
+ memory.manager.saveSessionEpisode();
225
+ memory.working.clear();
159
226
  sessionState.messages = 0;
160
227
  sessionState.tokens = { input: 0, output: 0, total: 0 };
161
228
  sessionState.cost = 0;
@@ -182,6 +249,7 @@ const rateTracker = {
182
249
 
183
250
  remaining() {
184
251
  this.cleanup();
252
+ if (this.serverRemaining !== null && this.serverRemaining !== undefined) return this.serverRemaining;
185
253
  return Math.max(0, this.limit - this.requests.length);
186
254
  },
187
255
 
@@ -189,6 +257,18 @@ const rateTracker = {
189
257
  this.cleanup();
190
258
  return this.requests.length;
191
259
  },
260
+
261
+ updateFromServer(headers) {
262
+ const limit = parseInt(headers['x-ratelimit-limit']);
263
+ const remaining = parseInt(headers['x-ratelimit-remaining']);
264
+ const reset = headers['x-ratelimit-reset'];
265
+ if (!isNaN(limit)) this.limit = limit;
266
+ this.serverRemaining = isNaN(remaining) ? null : remaining;
267
+ this.serverReset = reset || null;
268
+ },
269
+
270
+ serverRemaining: null,
271
+ serverReset: null,
192
272
  };
193
273
 
194
274
  // ---------------------------------------------------------------------------
@@ -239,36 +319,6 @@ const localTools = {
239
319
  },
240
320
  },
241
321
 
242
- editFile: {
243
- description: 'Edit a file by replacing a search string with new content',
244
- execute: (filePath, search, replace) => {
245
- try {
246
- const resolved = path.resolve(filePath);
247
- const content = fs.readFileSync(resolved, 'utf-8');
248
- if (!content.includes(search)) return `Error: search string not found in ${resolved}`;
249
- const updated = content.replace(search, replace);
250
- fs.writeFileSync(resolved, updated);
251
- return `Edited: ${resolved} (replaced ${search.length} chars)`;
252
- } catch (e) { return `Error: ${e.message}`; }
253
- },
254
- },
255
-
256
- deleteFile: {
257
- description: 'Delete a file or empty directory from this machine',
258
- execute: (filePath) => {
259
- try {
260
- const resolved = path.resolve(filePath);
261
- const stat = fs.statSync(resolved);
262
- if (stat.isDirectory()) {
263
- fs.rmdirSync(resolved);
264
- } else {
265
- fs.unlinkSync(resolved);
266
- }
267
- return `Deleted: ${resolved}`;
268
- } catch (e) { return `Error: ${e.message}`; }
269
- },
270
- },
271
-
272
322
  systemInfo: {
273
323
  description: 'Get system information',
274
324
  execute: () => {
@@ -342,21 +392,8 @@ const localTools = {
342
392
  },
343
393
 
344
394
  founderInfo: {
345
- description: 'Answer questions about Lee Akpareva, founder of NAVADA Edge, using his full CV and career history',
346
- execute: (question) => {
347
- try {
348
- const knowledgePath = path.join(__dirname, 'knowledge.py');
349
- const py = process.platform === 'win32' ? 'python' : 'python3';
350
- const openaiKey = config.get('openaiKey') || process.env.OPENAI_API_KEY || '';
351
- const output = execFileSync(py, [knowledgePath, question || 'Who is Lee Akpareva?'], {
352
- timeout: 30000, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'],
353
- env: { ...process.env, OPENAI_API_KEY: openaiKey },
354
- });
355
- return output.trim();
356
- } catch (e) {
357
- return `Error: ${e.stderr?.trim() || e.message}`;
358
- }
359
- },
395
+ description: 'Get information about the NAVADA Edge founder',
396
+ execute: () => JSON.stringify(IDENTITY.founder, null, 2),
360
397
  },
361
398
  };
362
399
 
@@ -377,6 +414,7 @@ async function callFreeTier(messages, stream = false) {
377
414
  const r = await navada.request(endpoint, {
378
415
  method: 'POST',
379
416
  body: { messages },
417
+ headers: navadaAuthHeaders(),
380
418
  timeout: endpoint.includes('navada-edge-server.uk') ? 30000 : 5000,
381
419
  });
382
420
 
@@ -426,11 +464,13 @@ function streamFreeTier(endpoint, messages) {
426
464
  const transport = url.protocol === 'https:' ? https : http;
427
465
  const body = JSON.stringify({ messages, stream: true });
428
466
 
467
+ const authHeaders = navadaAuthHeaders();
429
468
  const req = transport.request(url, {
430
469
  method: 'POST',
431
- headers: { 'Content-Type': 'application/json', 'Content-Length': Buffer.byteLength(body) },
470
+ headers: { ...authHeaders, 'Content-Length': Buffer.byteLength(body) },
432
471
  timeout: endpoint.includes('navada-edge-server.uk') ? 120000 : 10000,
433
472
  }, (res) => {
473
+ rateTracker.updateFromServer(res.headers);
434
474
  // If server doesn't support streaming, collect full response
435
475
  if (!res.headers['content-type']?.includes('text/event-stream')) {
436
476
  let data = '';
@@ -470,7 +510,7 @@ function streamFreeTier(endpoint, messages) {
470
510
  const delta = parsed.choices?.[0]?.delta;
471
511
  // Grok-3-mini streams reasoning_content first, then content — skip reasoning
472
512
  if (delta?.reasoning_content && !delta?.content) continue;
473
- const text = cleanOutput(delta?.content || '');
513
+ const text = delta?.content || '';
474
514
  if (text) {
475
515
  process.stdout.write(text);
476
516
  fullContent += text;
@@ -481,7 +521,6 @@ function streamFreeTier(endpoint, messages) {
481
521
 
482
522
  res.on('end', () => {
483
523
  if (fullContent) process.stdout.write('\n');
484
- sessionState._lastStreamed = true;
485
524
  resolve({ content: fullContent, isRateLimit: false, streamed: true });
486
525
  });
487
526
  });
@@ -550,9 +589,8 @@ function streamAnthropic(key, messages, tools, system) {
550
589
 
551
590
  case 'content_block_delta':
552
591
  if (event.delta?.type === 'text_delta') {
553
- const clean = cleanOutput(event.delta.text);
554
- process.stdout.write(clean);
555
- currentText += clean;
592
+ process.stdout.write(event.delta.text);
593
+ currentText += event.delta.text;
556
594
  } else if (event.delta?.type === 'input_json_delta') {
557
595
  const last = contentBlocks[contentBlocks.length - 1];
558
596
  if (last?.type === 'tool_use') last.input += event.delta.partial_json;
@@ -581,7 +619,6 @@ function streamAnthropic(key, messages, tools, system) {
581
619
 
582
620
  res.on('end', () => {
583
621
  if (contentBlocks.some(b => b.type === 'text')) process.stdout.write('\n');
584
- sessionState._lastStreamed = true;
585
622
  resolve({ content: contentBlocks, stop_reason: stopReason });
586
623
  });
587
624
  });
@@ -644,9 +681,8 @@ function streamOpenAI(key, messages, model = 'gpt-4o') {
644
681
  if (finish) finishReason = finish;
645
682
 
646
683
  if (delta?.content) {
647
- const clean = cleanOutput(delta.content);
648
- process.stdout.write(clean);
649
- fullContent += clean;
684
+ process.stdout.write(delta.content);
685
+ fullContent += delta.content;
650
686
  }
651
687
 
652
688
  // Accumulate tool calls
@@ -668,7 +704,6 @@ function streamOpenAI(key, messages, model = 'gpt-4o') {
668
704
 
669
705
  res.on('end', () => {
670
706
  if (fullContent) process.stdout.write('\n');
671
- sessionState._lastStreamed = true;
672
707
  toolCalls = toolCalls.filter(Boolean);
673
708
  resolve({ content: fullContent, tool_calls: toolCalls, finish_reason: finishReason });
674
709
  });
@@ -684,7 +719,7 @@ function streamOpenAI(key, messages, model = 'gpt-4o') {
684
719
  // ---------------------------------------------------------------------------
685
720
  // Streaming — Google Gemini API (gemini-2.0-flash)
686
721
  // ---------------------------------------------------------------------------
687
- function streamGemini(key, messages, model = 'gemini-2.0-flash') {
722
+ function streamGemini(key, messages, model = 'gemini-2.0-flash', systemPrompt = null) {
688
723
  return new Promise((resolve, reject) => {
689
724
  const contents = messages.map(m => ({
690
725
  role: m.role === 'assistant' ? 'model' : 'user',
@@ -694,7 +729,7 @@ function streamGemini(key, messages, model = 'gemini-2.0-flash') {
694
729
  const body = JSON.stringify({
695
730
  contents,
696
731
  generationConfig: { maxOutputTokens: 4096 },
697
- systemInstruction: { parts: [{ text: getSystemPrompt() }] },
732
+ systemInstruction: { parts: [{ text: systemPrompt || getSystemPrompt() }] },
698
733
  });
699
734
 
700
735
  const url = new URL(`https://generativelanguage.googleapis.com/v1beta/models/${model}:streamGenerateContent?alt=sse&key=${key}`);
@@ -725,7 +760,7 @@ function streamGemini(key, messages, model = 'gemini-2.0-flash') {
725
760
  if (!data) continue;
726
761
  try {
727
762
  const parsed = JSON.parse(data);
728
- const text = cleanOutput(parsed.candidates?.[0]?.content?.parts?.[0]?.text || '');
763
+ const text = parsed.candidates?.[0]?.content?.parts?.[0]?.text || '';
729
764
  if (text) {
730
765
  process.stdout.write(text);
731
766
  fullContent += text;
@@ -736,7 +771,6 @@ function streamGemini(key, messages, model = 'gemini-2.0-flash') {
736
771
 
737
772
  res.on('end', () => {
738
773
  if (fullContent) process.stdout.write('\n');
739
- sessionState._lastStreamed = true;
740
774
  resolve({ content: fullContent });
741
775
  });
742
776
  });
@@ -754,14 +788,18 @@ function openAITools() {
754
788
  { name: 'read_file', description: 'Read the contents of a file on the user\'s machine.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'Absolute or relative file path' } }, required: ['path'] } },
755
789
  { name: 'write_file', description: 'Write content to a file. Creates parent directories if needed. Use for creating new files, scripts, configs, diagrams (Mermaid, SVG, HTML), code files.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'File path to write' }, content: { type: 'string', description: 'Full content to write to the file' } }, required: ['path', 'content'] } },
756
790
  { name: 'list_files', description: 'List files and directories.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'Directory path (default: current dir)' } } } },
757
- { name: 'edit_file', description: 'Edit a file by finding and replacing text. Use for targeted edits.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'File path' }, search: { type: 'string', description: 'Exact text to find' }, replace: { type: 'string', description: 'Replacement text' } }, required: ['path', 'search', 'replace'] } },
758
- { name: 'delete_file', description: 'Delete a file or empty directory.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'Path to delete' } }, required: ['path'] } },
759
791
  { name: 'system_info', description: 'Get local system information (CPU, RAM, disk, OS, hostname).', parameters: { type: 'object', properties: {} } },
760
792
  { name: 'python_exec', description: 'Execute Python code inline. Use for data analysis, calculations, generating content, processing files, ML tasks.', parameters: { type: 'object', properties: { code: { type: 'string', description: 'Python code to execute' } }, required: ['code'] } },
761
793
  { name: 'python_pip', description: 'Install a Python package via pip.', parameters: { type: 'object', properties: { package: { type: 'string', description: 'Package name' } }, required: ['package'] } },
762
794
  { name: 'python_script', description: 'Run a Python script file.', parameters: { type: 'object', properties: { path: { type: 'string', description: 'Path to .py file' } }, required: ['path'] } },
763
795
  { name: 'sandbox_run', description: 'Run code in an isolated sandbox with syntax highlighting. Supports javascript, python, typescript.', parameters: { type: 'object', properties: { code: { type: 'string' }, language: { type: 'string', description: 'javascript, python, or typescript' } }, required: ['code'] } },
764
- { name: 'founder_info', description: 'Answer questions about Lee Akpareva (founder of NAVADA Edge) using his full CV and career history. Always use this tool when asked about Lee, his career, experience, education, or projects.', parameters: { type: 'object', properties: { question: { type: 'string', description: 'The question about Lee' } }, required: ['question'] } },
796
+ { name: 'automation_request', description: 'Submit automation request for review. Types: email, marketing, build, data, schedule.', parameters: { type: 'object', properties: { title: { type: 'string' }, description: { type: 'string' }, type: { type: 'string' }, schedule: { type: 'string' } }, required: ['title', 'description'] } },
797
+ { name: 'web_search', description: 'Search the web.', parameters: { type: 'object', properties: { query: { type: 'string' } }, required: ['query'] } },
798
+ { name: 'save_memory', description: 'Save to persistent memory.', parameters: { type: 'object', properties: { key: { type: 'string' }, value: { type: 'string' } }, required: ['key', 'value'] } },
799
+ { name: 'recall_memory', description: 'Recall saved memories.', parameters: { type: 'object', properties: { key: { type: 'string' } } } },
800
+ { name: 'screenshot', description: 'Take a screenshot.', parameters: { type: 'object', properties: { output: { type: 'string' } } } },
801
+ { name: 'describe_image', description: 'Analyze an image with AI vision.', parameters: { type: 'object', properties: { path: { type: 'string' }, question: { type: 'string' } }, required: ['path'] } },
802
+ { name: 'founder_info', description: 'Get information about Lee Akpareva, founder of NAVADA Edge.', parameters: { type: 'object', properties: {} } },
765
803
  ];
766
804
  return defs.map(d => ({ type: 'function', function: d }));
767
805
  }
@@ -779,7 +817,10 @@ async function openAIChat(key, userMessage, conversationHistory = []) {
779
817
  response = await streamOpenAI(key, messages, model);
780
818
  } catch (e) {
781
819
  if (e.message.includes('401') || e.message.includes('429') || e.message.includes('billing')) {
782
- sessionState._openaiWarned = true;
820
+ if (!sessionState._openaiWarned) {
821
+ console.log(ui.warn('OpenAI API unavailable, using Grok free tier. /login with a valid key to switch.'));
822
+ sessionState._openaiWarned = true;
823
+ }
783
824
  return grokChat(userMessage, conversationHistory);
784
825
  }
785
826
  throw e;
@@ -827,15 +868,65 @@ function detectIntent(message) {
827
868
  }
828
869
 
829
870
  // ---------------------------------------------------------------------------
830
- // Anthropic Claude API conversational agent with tool use
871
+ // Prompt-based tool calling for providers without native function calling
831
872
  // ---------------------------------------------------------------------------
832
- async function chat(userMessage, conversationHistory = []) {
833
- // Local action interceptor — file/folder ops work on ALL tiers without LLM
834
- const localResult = tryLocalAction(userMessage);
835
- if (localResult) {
836
- return `${localResult}\n\nWhat would you like to do next?`;
873
+ const TOOL_PROMPT_SUFFIX = `
874
+
875
+ You have access to these tools. To use a tool, respond with a JSON block:
876
+ \`\`\`tool
877
+ {"name": "tool_name", "input": {"param": "value"}}
878
+ \`\`\`
879
+
880
+ Available tools:
881
+ - shell: Execute shell command. Input: {"command": "string"}
882
+ - read_file: Read file. Input: {"path": "string"}
883
+ - write_file: Write file. Input: {"path": "string", "content": "string"}
884
+ - list_files: List directory. Input: {"path": "string"}
885
+ - system_info: System info. Input: {}
886
+ - python_exec: Run Python. Input: {"code": "string"}
887
+ - python_pip: Install pip package. Input: {"package": "string"}
888
+ - python_script: Run .py file. Input: {"path": "string"}
889
+ - sandbox_run: Run code in sandbox. Input: {"code": "string", "language": "javascript|python|typescript"}
890
+ - automation_request: Submit automation request. Input: {"title": "string", "description": "string", "type": "email|marketing|build|data|schedule|custom", "schedule": "daily|weekly|cron|on-demand"}
891
+ - web_search: Web search. Input: {"query": "string"}
892
+ - save_memory: Save memory. Input: {"key": "string", "value": "string"}
893
+ - recall_memory: Recall memory. Input: {"key": "string"} (key optional)
894
+ - screenshot: Screenshot. Input: {"output": "filepath"}
895
+ - describe_image: Analyze image. Input: {"path": "string", "question": "string"}
896
+ - founder_info: NAVADA founder info. Input: {}
897
+
898
+ After receiving a tool result, continue your response. Use multiple tools in sequence if needed.
899
+ If no tool needed, respond normally without the tool block.`;
900
+
901
+ function getToolEnhancedSystemPrompt() {
902
+ return getSystemPrompt() + TOOL_PROMPT_SUFFIX;
903
+ }
904
+
905
+ async function parseAndExecuteTools(content) {
906
+ const toolPattern = /```tool\s*\n?([\s\S]*?)\n?```/g;
907
+ let match;
908
+ let hasTools = false;
909
+ const toolResults = [];
910
+
911
+ while ((match = toolPattern.exec(content)) !== null) {
912
+ hasTools = true;
913
+ try {
914
+ const toolCall = JSON.parse(match[1].trim());
915
+ console.log(ui.dim(` [${toolCall.name}] ${JSON.stringify(toolCall.input || {}).slice(0, 80)}`));
916
+ const result = await executeTool(toolCall.name, toolCall.input || {});
917
+ toolResults.push({ name: toolCall.name, result: typeof result === 'string' ? result : JSON.stringify(result) });
918
+ } catch (e) {
919
+ toolResults.push({ name: 'error', result: `Tool parse error: ${e.message}` });
920
+ }
837
921
  }
838
922
 
923
+ return { hasTools, toolResults };
924
+ }
925
+
926
+ // ---------------------------------------------------------------------------
927
+ // Anthropic Claude API — conversational agent with tool use
928
+ // ---------------------------------------------------------------------------
929
+ async function chat(userMessage, conversationHistory = []) {
839
930
  const anthropicKey = config.get('anthropicKey') || process.env.ANTHROPIC_API_KEY || '';
840
931
  const openaiKey = config.get('openaiKey') || process.env.OPENAI_API_KEY || '';
841
932
  const nvidiaKey = config.get('nvidiaKey') || process.env.NVIDIA_API_KEY || '';
@@ -878,12 +969,29 @@ async function chat(userMessage, conversationHistory = []) {
878
969
  ...conversationHistory.map(m => ({ role: m.role, content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content) })),
879
970
  { role: 'user', content: userMessage },
880
971
  ];
972
+ process.stdout.write(ui.dim(' NAVADA > '));
881
973
  try {
882
- process.stdout.write(ui.dim(' '));
883
- const result = await streamGemini(effectiveGeminiKey, messages, geminiModel);
974
+ let result = await streamGemini(effectiveGeminiKey, messages, geminiModel, getToolEnhancedSystemPrompt());
975
+
976
+ // Prompt-based tool calling loop
977
+ let iterations = 0;
978
+ while (iterations < 5) {
979
+ const { hasTools, toolResults } = await parseAndExecuteTools(result.content);
980
+ if (!hasTools) break;
981
+ iterations++;
982
+ const toolResultText = toolResults.map(t => `Tool "${t.name}" returned:\n${t.result}`).join('\n\n');
983
+ messages.push({ role: 'assistant', content: result.content });
984
+ messages.push({ role: 'user', content: `Tool results:\n${toolResultText}\n\nContinue your response.` });
985
+ process.stdout.write(ui.dim(' NAVADA > '));
986
+ result = await streamGemini(effectiveGeminiKey, messages, geminiModel, getToolEnhancedSystemPrompt());
987
+ }
988
+
884
989
  return result.content;
885
990
  } catch (e) {
886
- sessionState._geminiWarned = true;
991
+ if (!sessionState._geminiWarned) {
992
+ console.log(ui.warn('Gemini API unavailable, using Grok free tier.'));
993
+ sessionState._geminiWarned = true;
994
+ }
887
995
  return grokChat(userMessage, conversationHistory);
888
996
  }
889
997
  }
@@ -898,8 +1006,22 @@ async function chat(userMessage, conversationHistory = []) {
898
1006
  ...conversationHistory.map(m => ({ role: m.role, content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content) })),
899
1007
  { role: 'user', content: userMessage },
900
1008
  ];
901
- process.stdout.write(ui.dim(' '));
902
- const result = await streamNvidia(effectiveNvidiaKey, messages, nvidiaModel);
1009
+ process.stdout.write(ui.dim(' NAVADA > '));
1010
+ let result = await streamNvidia(effectiveNvidiaKey, messages, nvidiaModel, getToolEnhancedSystemPrompt());
1011
+
1012
+ // Prompt-based tool calling loop
1013
+ let iterations = 0;
1014
+ while (iterations < 5) {
1015
+ const { hasTools, toolResults } = await parseAndExecuteTools(result.content);
1016
+ if (!hasTools) break;
1017
+ iterations++;
1018
+ const toolResultText = toolResults.map(t => `Tool "${t.name}" returned:\n${t.result}`).join('\n\n');
1019
+ messages.push({ role: 'assistant', content: result.content });
1020
+ messages.push({ role: 'user', content: `Tool results:\n${toolResultText}\n\nContinue your response based on these results.` });
1021
+ process.stdout.write(ui.dim(' NAVADA > '));
1022
+ result = await streamNvidia(effectiveNvidiaKey, messages, nvidiaModel, getToolEnhancedSystemPrompt());
1023
+ }
1024
+
903
1025
  return result.content;
904
1026
  }
905
1027
 
@@ -937,60 +1059,48 @@ async function chat(userMessage, conversationHistory = []) {
937
1059
  description: 'List files and directories.',
938
1060
  input_schema: { type: 'object', properties: { path: { type: 'string', description: 'Directory path (default: current dir)' } } },
939
1061
  },
940
- {
941
- name: 'edit_file',
942
- description: 'Edit a file by finding and replacing text. Use for targeted edits without rewriting the whole file.',
943
- input_schema: { type: 'object', properties: { path: { type: 'string', description: 'File path' }, search: { type: 'string', description: 'Exact text to find' }, replace: { type: 'string', description: 'Text to replace it with' } }, required: ['path', 'search', 'replace'] },
944
- },
945
- {
946
- name: 'delete_file',
947
- description: 'Delete a file or empty directory from the user\'s machine.',
948
- input_schema: { type: 'object', properties: { path: { type: 'string', description: 'File or directory path to delete' } }, required: ['path'] },
949
- },
950
1062
  {
951
1063
  name: 'system_info',
952
1064
  description: 'Get local system information (CPU, RAM, disk, OS, hostname).',
953
1065
  input_schema: { type: 'object', properties: {} },
954
1066
  },
1067
+ // ── Automation Pipeline ──
955
1068
  {
956
- name: 'network_status',
957
- description: 'Ping all NAVADA Edge Network nodes and cloud services.',
958
- input_schema: { type: 'object', properties: {} },
959
- },
960
- {
961
- name: 'lucas_exec',
962
- description: 'Run a bash command on EC2 via Lucas CTO agent.',
963
- input_schema: { type: 'object', properties: { command: { type: 'string' } }, required: ['command'] },
964
- },
965
- {
966
- name: 'lucas_ssh',
967
- description: 'SSH to a NAVADA Edge node (hp, ec2, oracle) and run a command via Lucas CTO.',
968
- input_schema: { type: 'object', properties: { node: { type: 'string' }, command: { type: 'string' } }, required: ['node', 'command'] },
1069
+ name: 'automation_request',
1070
+ description: 'Submit an automation request to the NAVADA Edge team. Requests are queued for review and setup. Use for: scheduled emails, marketing campaigns, recurring tasks, app builds, data pipelines.',
1071
+ input_schema: { type: 'object', properties: {
1072
+ title: { type: 'string', description: 'Short title for the automation' },
1073
+ description: { type: 'string', description: 'Detailed description of what to automate' },
1074
+ type: { type: 'string', description: 'Type: email, marketing, build, data, schedule, custom' },
1075
+ schedule: { type: 'string', description: 'When/how often: daily, weekly, cron expression, one-time' },
1076
+ }, required: ['title', 'description'] },
969
1077
  },
970
1078
  {
971
- name: 'lucas_docker',
972
- description: 'Run a command inside a Docker container on EC2 via Lucas CTO.',
973
- input_schema: { type: 'object', properties: { container: { type: 'string' }, command: { type: 'string' } }, required: ['container', 'command'] },
1079
+ name: 'web_search',
1080
+ description: 'Search the web for information.',
1081
+ input_schema: { type: 'object', properties: { query: { type: 'string', description: 'Search query' } }, required: ['query'] },
974
1082
  },
1083
+ // ── Memory Tools ──
975
1084
  {
976
- name: 'mcp_call',
977
- description: 'Call a tool on the NAVADA Edge MCP server (18 tools: docker, ssh, files, database, monitoring).',
978
- input_schema: { type: 'object', properties: { tool: { type: 'string' }, args: { type: 'object' } }, required: ['tool'] },
1085
+ name: 'save_memory',
1086
+ description: 'Save information to persistent memory for future sessions. Use for important context, preferences, or facts the user wants remembered.',
1087
+ input_schema: { type: 'object', properties: { key: { type: 'string', description: 'Memory key (e.g. "preferred_language", "project_name")' }, value: { type: 'string', description: 'The information to remember' } }, required: ['key', 'value'] },
979
1088
  },
980
1089
  {
981
- name: 'docker_registry',
982
- description: 'List images or tags in the NAVADA private Docker registry.',
983
- input_schema: { type: 'object', properties: { image: { type: 'string', description: 'Image name for tags (optional — omit to list all)' } } },
1090
+ name: 'recall_memory',
1091
+ description: 'Recall previously saved memories. Use when user references past conversations or saved context.',
1092
+ input_schema: { type: 'object', properties: { key: { type: 'string', description: 'Specific key to recall (optional — omit to list all)' } } },
984
1093
  },
1094
+ // ── Perception Tools ──
985
1095
  {
986
- name: 'send_email',
987
- description: 'Send an email via the NAVADA Edge MCP email tool.',
988
- input_schema: { type: 'object', properties: { to: { type: 'string' }, subject: { type: 'string' }, body: { type: 'string' } }, required: ['to', 'subject', 'body'] },
1096
+ name: 'screenshot',
1097
+ description: 'Take a screenshot of the current screen and save it.',
1098
+ input_schema: { type: 'object', properties: { output: { type: 'string', description: 'Output file path (default: screenshot.png)' } } },
989
1099
  },
990
1100
  {
991
- name: 'generate_image',
992
- description: 'Generate an image using Cloudflare Flux (FREE) or DALL-E.',
993
- input_schema: { type: 'object', properties: { prompt: { type: 'string' }, provider: { type: 'string', description: 'flux (default, free) or dalle' } }, required: ['prompt'] },
1101
+ name: 'describe_image',
1102
+ description: 'Describe or analyze an image file using AI vision.',
1103
+ input_schema: { type: 'object', properties: { path: { type: 'string', description: 'Path to image file' }, question: { type: 'string', description: 'What to analyze about the image' } }, required: ['path'] },
994
1104
  },
995
1105
  {
996
1106
  name: 'python_exec',
@@ -1014,8 +1124,8 @@ async function chat(userMessage, conversationHistory = []) {
1014
1124
  },
1015
1125
  {
1016
1126
  name: 'founder_info',
1017
- description: 'Answer questions about Lee Akpareva (founder of NAVADA Edge) using his full CV and career history. Always use this tool when asked about Lee, his career, experience, education, certifications, or projects.',
1018
- input_schema: { type: 'object', properties: { question: { type: 'string', description: 'The question about Lee' } }, required: ['question'] },
1127
+ description: 'Get information about Lee Akpareva, founder of NAVADA Edge Network. Use when asked about the creator, founder, Lee, or who made NAVADA.',
1128
+ input_schema: { type: 'object', properties: {} },
1019
1129
  },
1020
1130
  ];
1021
1131
 
@@ -1032,7 +1142,10 @@ async function chat(userMessage, conversationHistory = []) {
1032
1142
  const errMsg = e.message || '';
1033
1143
  // If billing/rate limit/auth error, fall back to free tier
1034
1144
  if (errMsg.includes('400') || errMsg.includes('401') || errMsg.includes('429') || errMsg.includes('usage limits')) {
1035
- sessionState._anthropicWarned = true;
1145
+ if (!sessionState._anthropicWarned) {
1146
+ console.log(ui.warn('Anthropic API unavailable, using Grok free tier. /login with a valid key to switch.'));
1147
+ sessionState._anthropicWarned = true;
1148
+ }
1036
1149
  return grokChat(userMessage, conversationHistory);
1037
1150
  }
1038
1151
  throw e;
@@ -1068,22 +1181,100 @@ async function executeTool(name, input) {
1068
1181
  case 'read_file': return localTools.readFile.execute(input.path);
1069
1182
  case 'write_file': return localTools.writeFile.execute(input.path, input.content);
1070
1183
  case 'list_files': return localTools.listFiles.execute(input.path);
1071
- case 'edit_file': return localTools.editFile.execute(input.path, input.search, input.replace);
1072
- case 'delete_file': return localTools.deleteFile.execute(input.path);
1073
1184
  case 'system_info': return localTools.systemInfo.execute();
1074
- case 'network_status': return JSON.stringify(await navada.network.ping());
1075
- case 'lucas_exec': return JSON.stringify(await navada.lucas.exec(input.command));
1076
- case 'lucas_ssh': return JSON.stringify(await navada.lucas.ssh(input.node, input.command));
1077
- case 'lucas_docker': return JSON.stringify(await navada.lucas.docker(input.container, input.command));
1078
- case 'mcp_call': return JSON.stringify(await navada.mcp.call(input.tool, input.args || {}));
1079
- case 'docker_registry':
1080
- if (input.image) return JSON.stringify(await navada.registry.tags(input.image));
1081
- return JSON.stringify(await navada.registry.catalog());
1082
- case 'send_email': return JSON.stringify(await navada.mcp.call('send-email', input));
1083
- case 'generate_image':
1084
- if (input.provider === 'dalle') return JSON.stringify(await navada.ai.openai.image(input.prompt));
1085
- const { size } = await navada.cloudflare.flux.generate(input.prompt, { savePath: `navada-${Date.now()}.png` });
1086
- return `Image generated: ${size} bytes`;
1185
+ case 'automation_request': {
1186
+ try {
1187
+ const edgeKey = config.get('edgeKey');
1188
+ const userId = config.get('edgeUserId') || 'anonymous';
1189
+ const email = config.get('edgeEmail') || '';
1190
+ const requestId = `req_${crypto.randomUUID().slice(0, 8)}`;
1191
+ const request = {
1192
+ id: requestId,
1193
+ title: input.title,
1194
+ description: input.description,
1195
+ type: input.type || 'custom',
1196
+ schedule: input.schedule || 'on-demand',
1197
+ userId,
1198
+ email,
1199
+ status: 'pending',
1200
+ submittedAt: new Date().toISOString(),
1201
+ };
1202
+ // Submit to NAVADA queue API
1203
+ const r = await navadaEdgeRequest('POST', '/api/v1/queue/automation', request);
1204
+ if (r.status === 201 || r.status === 200) {
1205
+ return `Automation request submitted!\n ID: ${requestId}\n Title: ${input.title}\n Status: Pending review\n\nYou'll receive an email once your automation is set up.`;
1206
+ }
1207
+ return `Request submitted locally (ID: ${requestId}). Server confirmation pending.`;
1208
+ } catch (e) {
1209
+ // Save locally if API unavailable
1210
+ const reqDir = path.join(config.CONFIG_DIR, 'requests');
1211
+ if (!fs.existsSync(reqDir)) fs.mkdirSync(reqDir, { recursive: true });
1212
+ const requestId = `req_${Date.now()}`;
1213
+ const request = { id: requestId, title: input.title, description: input.description, type: input.type || 'custom', schedule: input.schedule || 'on-demand', status: 'queued_locally', submittedAt: new Date().toISOString() };
1214
+ fs.writeFileSync(path.join(reqDir, `${requestId}.json`), JSON.stringify(request, null, 2));
1215
+ return `Request saved locally (ID: ${requestId}). Will sync when connected.\nCheck status: /requests`;
1216
+ }
1217
+ }
1218
+ case 'web_search': {
1219
+ try {
1220
+ const r = await navadaEdgeRequest('POST', '/search', { query: input.query });
1221
+ return r.data?.results ? JSON.stringify(r.data.results) : JSON.stringify(r.data);
1222
+ } catch (e) { return `Search error: ${e.message}`; }
1223
+ }
1224
+ // ── Memory tools ──
1225
+ case 'save_memory': {
1226
+ // Tier 3 — save to semantic knowledge base
1227
+ const category = input.key?.includes('pref') ? 'preferences'
1228
+ : input.key?.includes('person') || input.key?.includes('name') ? 'people'
1229
+ : input.key?.includes('decision') ? 'decisions'
1230
+ : 'facts';
1231
+ memory.knowledge.add(category, `${input.key}: ${input.value}`);
1232
+ return `Remembered: "${input.key}" → "${input.value}"`;
1233
+ }
1234
+ case 'recall_memory': {
1235
+ if (input.key) {
1236
+ // Search across all knowledge
1237
+ const results = memory.knowledge.search(input.key, 5);
1238
+ if (results.length === 0) {
1239
+ // Also check episodes
1240
+ const episodes = memory.episodic.search(input.key);
1241
+ if (episodes.length > 0) {
1242
+ return episodes.map(e => `[${e.date}] ${e.summary}`).join('\n');
1243
+ }
1244
+ return `No memories found for: "${input.key}"`;
1245
+ }
1246
+ return results.map(r => `[${r.category}] ${r.content}`).join('\n');
1247
+ }
1248
+ // No key — return knowledge summary + episode count
1249
+ const stats = memory.knowledge.stats();
1250
+ const epCount = memory.episodic.count();
1251
+ const summary = memory.knowledge.getSummary();
1252
+ const statsLine = Object.entries(stats).map(([k, v]) => `${k}: ${v}`).join(', ');
1253
+ return `Memory: ${statsLine}, episodes: ${epCount}\n${summary || 'No knowledge stored yet.'}`;
1254
+ }
1255
+ // ── Perception tools ──
1256
+ case 'screenshot': {
1257
+ try {
1258
+ const outPath = path.resolve(input.output || 'screenshot.png');
1259
+ const py = process.platform === 'win32' ? 'python' : 'python3';
1260
+ execFileSync(py, ['-c', `from PIL import ImageGrab; img = ImageGrab.grab(); img.save(r'${outPath}')`], { timeout: 15000, encoding: 'utf-8' });
1261
+ return `Screenshot saved: ${outPath}`;
1262
+ } catch (e) { return `Screenshot failed: ${e.message}. Install Pillow: pip install Pillow`; }
1263
+ }
1264
+ case 'describe_image': {
1265
+ try {
1266
+ const imgPath = path.resolve(input.path);
1267
+ if (!fs.existsSync(imgPath)) return `Image not found: ${imgPath}`;
1268
+ const imgData = fs.readFileSync(imgPath).toString('base64');
1269
+ const mimeType = imgPath.endsWith('.png') ? 'image/png' : 'image/jpeg';
1270
+ const edgeKey = config.get('edgeKey');
1271
+ if (edgeKey) {
1272
+ const r = await navadaEdgeRequest('POST', '/vision', { image: imgData, mimeType, question: input.question || 'Describe this image.' });
1273
+ if (r.status === 200) return r.data?.description || JSON.stringify(r.data);
1274
+ }
1275
+ return `Image loaded (${(imgData.length / 1024).toFixed(0)}KB). Vision API requires Edge connection (/edge login).`;
1276
+ } catch (e) { return `Vision error: ${e.message}`; }
1277
+ }
1087
1278
  case 'python_exec': return localTools.pythonExec.execute(input.code);
1088
1279
  case 'python_pip': return localTools.pythonPip.execute(input.package);
1089
1280
  case 'python_script': return localTools.pythonScript.execute(input.path);
@@ -1094,7 +1285,7 @@ async function executeTool(name, input) {
1094
1285
  displayOutput(result);
1095
1286
  return result.error ? `Error (exit ${result.exitCode}): ${result.error}` : result.output;
1096
1287
  }
1097
- case 'founder_info': return localTools.founderInfo.execute(input.question);
1288
+ case 'founder_info': return localTools.founderInfo.execute();
1098
1289
  default: return `Unknown tool: ${name}`;
1099
1290
  }
1100
1291
  } catch (e) {
@@ -1102,110 +1293,6 @@ async function executeTool(name, input) {
1102
1293
  }
1103
1294
  }
1104
1295
 
1105
- // ---------------------------------------------------------------------------
1106
- // Local action interceptor — executes file/shell actions WITHOUT needing LLM tool use
1107
- // This ensures free tier users can still create, read, edit, delete files
1108
- // ---------------------------------------------------------------------------
1109
- function tryLocalAction(userMessage) {
1110
- const msg = userMessage.trim();
1111
- const home = os.homedir();
1112
- // Windows OneDrive redirects Desktop — check OneDrive first
1113
- const oneDriveDesktop = path.join(home, 'OneDrive', 'Desktop');
1114
- const desktop = (process.platform === 'win32' && fs.existsSync(oneDriveDesktop)) ? oneDriveDesktop : path.join(home, 'Desktop');
1115
-
1116
- // Resolve a location phrase to an absolute path (use ORIGINAL case, not lowered)
1117
- function resolveLocation(phrase) {
1118
- const p = phrase.trim().replace(/[""'.,!]/g, '');
1119
- const low = p.toLowerCase();
1120
- if (low === 'my desktop' || low === 'the desktop' || low === 'desktop') return desktop;
1121
- if (low === 'home' || low === 'my home' || low === 'home directory') return home;
1122
- if (p.startsWith('~')) return p.replace(/^~[/\\]?/, home + path.sep);
1123
- if (path.isAbsolute(p)) return p;
1124
- return path.join(process.cwd(), p);
1125
- }
1126
-
1127
- // Extract the ORIGINAL-CASE name from the original message using a case-insensitive match
1128
- // We match on the original message to preserve casing
1129
- let m;
1130
-
1131
- // ── Create folder/directory ──
1132
- // Pattern: "create a folder called NAME on my desktop"
1133
- m = msg.match(/(?:create|make|new)\s+(?:a\s+)?(?:new\s+)?(?:folder|directory|dir)\s+(?:called|named)\s+[""']?([^""']+?)[""']?\s+(?:on|at|in)\s+(.+?)$/i);
1134
- if (m) {
1135
- const name = m[1].trim();
1136
- const loc = resolveLocation(m[2]);
1137
- const resolved = path.join(loc, name);
1138
- try { fs.mkdirSync(resolved, { recursive: true }); return `Created folder: ${resolved}`; }
1139
- catch (e) { return null; }
1140
- }
1141
-
1142
- // Pattern: "create a folder on my desktop called NAME"
1143
- m = msg.match(/(?:create|make|new)\s+(?:a\s+)?(?:new\s+)?(?:folder|directory|dir)\s+(?:on|at|in)\s+(.+?)\s+(?:called|named)\s+[""']?([^""']+?)[""']?\s*$/i);
1144
- if (m) {
1145
- const loc = resolveLocation(m[1]);
1146
- const name = m[2].trim();
1147
- const resolved = path.join(loc, name);
1148
- try { fs.mkdirSync(resolved, { recursive: true }); return `Created folder: ${resolved}`; }
1149
- catch (e) { return null; }
1150
- }
1151
-
1152
- // Pattern: "create a folder called NAME" (no location — use cwd, or desktop if mentioned earlier)
1153
- m = msg.match(/(?:create|make|new)\s+(?:a\s+)?(?:new\s+)?(?:folder|directory|dir)\s+(?:called|named)\s+[""']?([^""']+?)[""']?\s*$/i);
1154
- if (m) {
1155
- const name = m[1].trim();
1156
- const loc = msg.toLowerCase().includes('desktop') ? desktop : process.cwd();
1157
- const resolved = path.join(loc, name);
1158
- try { fs.mkdirSync(resolved, { recursive: true }); return `Created folder: ${resolved}`; }
1159
- catch (e) { return null; }
1160
- }
1161
-
1162
- // Pattern: "create a new folder NAME on my desktop" (no "called/named")
1163
- m = msg.match(/(?:create|make|new)\s+(?:a\s+)?(?:new\s+)?(?:folder|directory|dir)\s+([A-Za-z0-9_\-. ]+?)\s+(?:on|at|in)\s+(.+?)$/i);
1164
- if (m) {
1165
- const name = m[1].trim();
1166
- const loc = resolveLocation(m[2]);
1167
- const resolved = path.join(loc, name);
1168
- try { fs.mkdirSync(resolved, { recursive: true }); return `Created folder: ${resolved}`; }
1169
- catch (e) { return null; }
1170
- }
1171
-
1172
- // ── Create file ──
1173
- m = msg.match(/(?:create|make|new|touch)\s+(?:a\s+)?(?:new\s+)?(?:file)\s+(?:called|named)\s+[""']?([^""']+?)[""']?\s+(?:on|at|in)\s+(.+?)$/i);
1174
- if (m) {
1175
- const resolved = path.join(resolveLocation(m[2]), m[1].trim());
1176
- return localTools.writeFile.execute(resolved, '');
1177
- }
1178
- m = msg.match(/(?:create|make|new|touch)\s+(?:a\s+)?(?:new\s+)?(?:file)\s+(?:called|named)\s+[""']?([^""']+?)[""']?\s*$/i);
1179
- if (m) {
1180
- const loc = msg.toLowerCase().includes('desktop') ? desktop : process.cwd();
1181
- return localTools.writeFile.execute(path.join(loc, m[1].trim()), '');
1182
- }
1183
-
1184
- // ── Read file ──
1185
- m = msg.match(/(?:read|show|display|cat|open)\s+(?:the\s+)?(?:file\s+)?[""']?([^""']+\.\w{1,5})[""']?/i);
1186
- if (m) {
1187
- const p = m[1].trim();
1188
- const filePath = path.isAbsolute(p) ? p : path.join(process.cwd(), p);
1189
- return localTools.readFile.execute(filePath);
1190
- }
1191
-
1192
- // ── Delete file/folder ──
1193
- m = msg.match(/(?:delete|remove|rm)\s+(?:the\s+)?(?:file|folder|directory)\s+[""']?([^""']+?)[""']?\s*$/i);
1194
- if (m) {
1195
- const p = m[1].trim();
1196
- const filePath = path.isAbsolute(p) ? p : path.join(process.cwd(), p);
1197
- return localTools.deleteFile.execute(filePath);
1198
- }
1199
-
1200
- // ── List files ──
1201
- m = msg.match(/(?:list|show|ls|dir)\s+(?:the\s+)?(?:files|contents|items)\s+(?:in|on|at|of)\s+(.+)/i);
1202
- if (m) {
1203
- return localTools.listFiles.execute(resolveLocation(m[1]));
1204
- }
1205
-
1206
- return null;
1207
- }
1208
-
1209
1296
  async function grokChat(userMessage, conversationHistory = []) {
1210
1297
  const messages = [
1211
1298
  ...conversationHistory.slice(-20).map(m => ({
@@ -1225,6 +1312,7 @@ async function grokChat(userMessage, conversationHistory = []) {
1225
1312
  const r = await navada.request(endpoint, {
1226
1313
  method: 'POST',
1227
1314
  body: { messages, tools },
1315
+ headers: navadaAuthHeaders(),
1228
1316
  timeout: 120000,
1229
1317
  });
1230
1318
 
@@ -1269,6 +1357,7 @@ async function grokChat(userMessage, conversationHistory = []) {
1269
1357
  const r = await navada.request(endpoint, {
1270
1358
  method: 'POST',
1271
1359
  body: { messages, tools },
1360
+ headers: navadaAuthHeaders(),
1272
1361
  timeout: 120000,
1273
1362
  });
1274
1363
  if (r.status !== 200) break;
@@ -1279,7 +1368,8 @@ async function grokChat(userMessage, conversationHistory = []) {
1279
1368
 
1280
1369
  // Extract final text
1281
1370
  const content = response?.choices?.[0]?.message?.content || '';
1282
- return cleanOutput(content) || 'No response.';
1371
+ if (content) console.log(` ${content}`);
1372
+ return content || 'No response.';
1283
1373
  }
1284
1374
 
1285
1375
  async function fallbackChat(msg) {
@@ -1310,6 +1400,14 @@ async function fallbackChat(msg) {
1310
1400
  // ---------------------------------------------------------------------------
1311
1401
  let _updateInfo = null;
1312
1402
 
1403
+ // Auto-save session episode on exit
1404
+ process.on('beforeExit', () => {
1405
+ try { memory.manager.saveSessionEpisode(); } catch {}
1406
+ });
1407
+ process.on('SIGINT', () => {
1408
+ try { memory.manager.saveSessionEpisode(); } catch {}
1409
+ });
1410
+
1313
1411
  async function checkForUpdate() {
1314
1412
  try {
1315
1413
  const pkg = require('../package.json');
@@ -1335,6 +1433,7 @@ async function reportTelemetry(event, data = {}) {
1335
1433
  try {
1336
1434
  await navada.request(base + '/api/agent-heartbeat', {
1337
1435
  method: 'POST',
1436
+ headers: navadaAuthHeaders(),
1338
1437
  body: {
1339
1438
  agent: 'navada-edge-cli',
1340
1439
  event,
@@ -1354,4 +1453,4 @@ async function reportTelemetry(event, data = {}) {
1354
1453
  }
1355
1454
  }
1356
1455
 
1357
- module.exports = { IDENTITY, chat, localTools, reportTelemetry, fallbackChat, checkForUpdate, getUpdateInfo, rateTracker, sessionState, addToHistory, getConversationHistory, clearHistory, listSubAgents };
1456
+ module.exports = { IDENTITY, chat, localTools, reportTelemetry, fallbackChat, checkForUpdate, getUpdateInfo, rateTracker, sessionState, addToHistory, getConversationHistory, clearHistory, listSubAgents, memory };