0agent 1.0.45 → 1.0.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/bin/chat.js +168 -21
  2. package/dist/daemon.mjs +50 -26
  3. package/package.json +1 -1
package/bin/chat.js CHANGED
@@ -130,6 +130,117 @@ const C = {
130
130
  const fmt = (color, text) => `${color}${text}${C.reset}`;
131
131
  const clearLine = () => process.stdout.write('\r\x1b[2K');
132
132
 
133
+ // ─── Markdown renderer ────────────────────────────────────────────────────────
134
+ // Applied to the full streamed response at session.completed — rewrites raw
135
+ // LLM output with ANSI formatting (bold, code, headers, bullets).
136
+ function renderMarkdown(text) {
137
+ const lines = text.split('\n');
138
+ const out = [];
139
+ let inCode = false;
140
+ let codeLang = '';
141
+
142
+ for (const raw of lines) {
143
+ if (raw.startsWith('```')) {
144
+ inCode = !inCode;
145
+ codeLang = inCode ? raw.slice(3).trim() : '';
146
+ if (!inCode) out.push(''); // blank after code block
147
+ continue;
148
+ }
149
+ if (inCode) {
150
+ out.push(` \x1b[36m${raw}\x1b[0m`);
151
+ continue;
152
+ }
153
+
154
+ let line = raw;
155
+
156
+ // Headers
157
+ if (line.startsWith('### ')) line = `\x1b[1m${line.slice(4)}\x1b[0m`;
158
+ else if (line.startsWith('## ')) line = `\x1b[1;4m${line.slice(3)}\x1b[0m`;
159
+ else if (line.startsWith('# ')) line = `\x1b[1;4m${line.slice(2)}\x1b[0m`;
160
+ // Bullets
161
+ else if (/^[-*] /.test(line)) line = ` \x1b[36m·\x1b[0m ${line.slice(2)}`;
162
+ else if (/^\d+\. /.test(line)) line = ` ${line}`;
163
+ // Horizontal rule
164
+ else if (/^-{3,}$/.test(line)) line = `\x1b[2m${'─'.repeat(54)}\x1b[0m`;
165
+
166
+ // Inline: bold, code, italic
167
+ line = line
168
+ .replace(/\*\*([^*\n]+)\*\*/g, '\x1b[1m$1\x1b[0m')
169
+ .replace(/`([^`\n]+)`/g, '\x1b[36m$1\x1b[0m')
170
+ .replace(/\*([^*\s][^*\n]*)\*/g,'\x1b[3m$1\x1b[0m');
171
+
172
+ out.push(' ' + line);
173
+ }
174
+ return out.join('\n');
175
+ }
176
+
177
+ // ─── Step formatter ───────────────────────────────────────────────────────────
178
+ // Converts raw step labels from AgentExecutor into icon + clean readable form.
179
+ function formatStep(step) {
180
+ const ICONS = {
181
+ shell_exec: `\x1b[33m⚡\x1b[0m`,
182
+ file_op: `\x1b[34m◈\x1b[0m`,
183
+ web_search: `\x1b[35m⌕\x1b[0m`,
184
+ scrape_url: `\x1b[35m◎\x1b[0m`,
185
+ memory_write: `\x1b[32m◆\x1b[0m`,
186
+ browser_open: `\x1b[34m◉\x1b[0m`,
187
+ };
188
+
189
+ // Tool call: "▶ shell_exec("cmd")"
190
+ const toolMatch = step.match(/^▶\s+(\w+)\((.{0,100})\)/);
191
+ if (toolMatch) {
192
+ const [, tool, args] = toolMatch;
193
+ const icon = ICONS[tool] ?? fmt(C.dim, '›');
194
+ const clean = args.replace(/^["'](.*)["']$/, '$1').replace(/\\n/g, ' ').slice(0, 72);
195
+ return ` ${icon} \x1b[2m${clean}\x1b[0m`;
196
+ }
197
+
198
+ // Result: " ↳ text"
199
+ if (/^\s*↳/.test(step)) {
200
+ const text = step.replace(/^\s*↳\s*/, '');
201
+ return ` \x1b[2m↳ ${text.slice(0, 100)}\x1b[0m`;
202
+ }
203
+
204
+ // Thinking / Continuing (suppress — replaced by startSession static status)
205
+ if (/^(Thinking|Continuing)/.test(step)) return null;
206
+
207
+ // Summary lines (Done, Files written, Commands run)
208
+ if (/^(Done|Files|Commands)/.test(step))
209
+ return ` \x1b[2m${step}\x1b[0m`;
210
+
211
+ return ` \x1b[2m› ${step}\x1b[0m`;
212
+ }
213
+
214
+ // ─── Cost estimator ───────────────────────────────────────────────────────────
215
+ function estimateCost(model, tokens) {
216
+ const RATES = { // $ per 1M tokens (blended in+out)
217
+ 'claude-sonnet-4-6': 4.0, 'claude-opus-4-6': 22.0, 'claude-haiku-4-5': 0.5,
218
+ 'gpt-4o': 5.0, 'gpt-4o-mini': 0.2, 'grok-3': 3.0,
219
+ 'gemini-2.0-flash': 0.12, 'gemini-2.0-pro': 3.5,
220
+ };
221
+ if (!model || !tokens) return '';
222
+ const key = Object.keys(RATES).find(k => String(model).includes(k));
223
+ if (!key) return '';
224
+ const usd = (tokens / 1_000_000) * RATES[key];
225
+ return usd < 0.01 ? ' · <$0.01' : ` · $${usd.toFixed(3)}`;
226
+ }
227
+
228
+ // ─── OS notification ──────────────────────────────────────────────────────────
229
+ async function notifyDone(task, success) {
230
+ try {
231
+ const { execSync } = await import('node:child_process');
232
+ const title = success ? '0agent ✓' : '0agent ✗';
233
+ const body = task.replace(/'/g, '').slice(0, 80);
234
+ if (process.platform === 'darwin') {
235
+ execSync(`osascript -e 'display notification "${body}" with title "${title}"'`,
236
+ { stdio: 'ignore', timeout: 3000 });
237
+ } else if (process.platform === 'linux') {
238
+ execSync(`notify-send "${title}" "${body}" 2>/dev/null`,
239
+ { stdio: 'ignore', timeout: 3000 });
240
+ }
241
+ } catch {}
242
+ }
243
+
133
244
  // ─── LLM ping — direct 1-token call, bypasses daemon, instant ────────────────
134
245
  async function pingLLM(provider) {
135
246
  const key = provider.api_key ?? '';
@@ -195,25 +306,30 @@ function getCurrentProvider(cfg) {
195
306
  }
196
307
 
197
308
  // ─── State ────────────────────────────────────────────────────────────────────
198
- let cfg = loadConfig();
309
+ let cfg = loadConfig();
199
310
  let sessionId = null;
200
311
  const messageQueue = []; // queued tasks while session is running
201
312
  let lastFailedTask = null; // for retry-on-abort
202
313
  let streaming = false;
314
+ let streamLineCount = 0; // newlines printed during streaming (for re-render)
203
315
  let ws = null;
204
316
  let wsReady = false;
205
317
  let pendingResolve = null;
206
318
  let lineBuffer = '';
319
+ let currentTask = ''; // task being executed (for notifications)
320
+ let sessionStartMs = 0; // when current session started (for elapsed time)
207
321
  const spinner = new Spinner('Thinking');
208
- const history = []; // command history for arrow keys
322
+ const history = []; // command history for arrow keys
209
323
 
210
324
  // ─── Header ──────────────────────────────────────────────────────────────────
211
325
  function printHeader() {
212
326
  const provider = getCurrentProvider(cfg);
213
327
  const modelStr = provider ? `${provider.provider}/${provider.model}` : 'no model';
328
+ const ws = cfg?.workspace?.path ?? null;
214
329
  console.log();
215
- console.log(fmt(C.bold, ' 0agent') + fmt(C.dim, ` ${modelStr}`));
216
- console.log(fmt(C.dim, ' Type a task or /command — press Tab to browse, / to see all.\n'));
330
+ console.log(` ${fmt(C.bold, '0agent')} ${fmt(C.dim, '·')} ${fmt(C.cyan, modelStr)}`);
331
+ if (ws) console.log(fmt(C.dim, ` ${ws}`));
332
+ console.log(fmt(C.dim, '\n Type a task, or / for commands.\n'));
217
333
  }
218
334
 
219
335
  function printInsights() {
@@ -252,23 +368,26 @@ function handleWsEvent(event) {
252
368
  switch (event.type) {
253
369
  case 'session.step': {
254
370
  spinner.stop();
255
- if (streaming) { process.stdout.write('\n'); streaming = false; }
256
- // Clear current readline line, print step, then restore › prompt
257
- process.stdout.write('\r\x1b[2K');
258
- console.log(` ${fmt(C.dim, '')} ${event.step}`);
371
+ if (streaming) { process.stdout.write('\n'); streaming = false; streamLineCount = 0; }
372
+ const formatted = formatStep(event.step);
373
+ if (formatted !== null) {
374
+ process.stdout.write('\r\x1b[2K');
375
+ console.log(formatted);
376
+ }
259
377
  spinner.startSession(event.step.slice(0, 50));
260
- rl.prompt(true); // restore › so user can keep typing
378
+ rl.prompt(true);
261
379
  break;
262
380
  }
263
381
  case 'session.token': {
264
382
  spinner.stop();
265
383
  if (!streaming) {
266
- // Clear › prompt line before streaming response
267
- process.stdout.write('\r\x1b[2K\n ');
384
+ process.stdout.write('\r\x1b[2K\n');
268
385
  streaming = true;
386
+ streamLineCount = 1;
269
387
  }
270
388
  process.stdout.write(event.token);
271
389
  lineBuffer += event.token;
390
+ streamLineCount += (event.token.match(/\n/g) || []).length;
272
391
  break;
273
392
  }
274
393
  case 'runtime.heal_proposal': {
@@ -353,10 +472,29 @@ function handleWsEvent(event) {
353
472
  }
354
473
  case 'session.completed': {
355
474
  spinner.stop();
356
- if (streaming) { process.stdout.write('\n'); streaming = false; }
357
- const r = event.result ?? {};
358
- if (r.files_written?.length) console.log(`\n ${fmt(C.green, '✓')} ${r.files_written.join(', ')}`);
359
- if (r.tokens_used) process.stdout.write(fmt(C.dim, `\n ${r.tokens_used} tokens · ${r.model ?? ''}\n`));
475
+
476
+ // Re-render streamed response with markdown (rewind cursor, clear, reprint)
477
+ if (streaming) {
478
+ const rendered = renderMarkdown(lineBuffer.trim());
479
+ const rewound = streamLineCount + 1;
480
+ process.stdout.write(`\x1b[${rewound}A\x1b[0J`); // move up + clear to end
481
+ process.stdout.write(rendered + '\n');
482
+ streaming = false;
483
+ streamLineCount = 0;
484
+ }
485
+
486
+ const r = event.result ?? {};
487
+ const elapsed = sessionStartMs ? `${((Date.now() - sessionStartMs) / 1000).toFixed(1)}s` : '';
488
+ const cost = estimateCost(r.model, r.tokens_used);
489
+
490
+ if (r.files_written?.length)
491
+ console.log(`\n ${fmt(C.green, '✓')} ${r.files_written.join(', ')}`);
492
+
493
+ // Stats line: tokens · model · elapsed · cost
494
+ if (r.tokens_used) {
495
+ process.stdout.write(fmt(C.dim,
496
+ `\n ${r.tokens_used.toLocaleString()} tokens · ${r.model ?? ''}${elapsed ? ` · ${elapsed}` : ''}${cost}\n`));
497
+ }
360
498
 
361
499
  // Contextual next-step suggestions
362
500
  const suggestions = _suggestNext(lineBuffer, r);
@@ -366,18 +504,22 @@ function handleWsEvent(event) {
366
504
  );
367
505
  }
368
506
 
369
- // Confirm server if port mentioned
507
+ // OS notification for tasks that took > 8s (user may have switched windows)
508
+ if (sessionStartMs && Date.now() - sessionStartMs > 8000) {
509
+ notifyDone(currentTask, true);
510
+ }
511
+
370
512
  confirmServer(r, lineBuffer);
371
513
  lineBuffer = '';
372
514
  if (pendingResolve) { pendingResolve(); pendingResolve = null; }
373
515
  sessionId = null;
374
- // auto-drain queued messages
375
516
  drainQueue();
376
517
  break;
377
518
  }
378
519
  case 'session.failed': {
379
520
  spinner.stop();
380
- if (streaming) { process.stdout.write('\n'); streaming = false; }
521
+ if (streaming) { process.stdout.write('\n'); streaming = false; streamLineCount = 0; }
522
+ if (sessionStartMs && Date.now() - sessionStartMs > 8000) notifyDone(currentTask, false);
381
523
  const isAbort = /aborted|timeout|AbortError/i.test(event.error ?? '');
382
524
  console.log(`\n ${fmt(C.red, '✗')} ${event.error}\n`);
383
525
  // Offer retry if it was a timeout/abort
@@ -478,7 +620,9 @@ async function runTask(input) {
478
620
  body: JSON.stringify(body),
479
621
  });
480
622
  const s = await res.json();
481
- sessionId = s.session_id ?? s.id;
623
+ sessionId = s.session_id ?? s.id;
624
+ sessionStartMs = Date.now();
625
+ currentTask = task;
482
626
  // Start session-mode status (no \r animation) then restore › so user can type
483
627
  process.stdout.write('\n');
484
628
  spinner.startSession('Thinking');
@@ -513,8 +657,11 @@ async function runTask(input) {
513
657
  const steps = session.steps ?? [];
514
658
  for (let j = lastPolledStep; j < steps.length; j++) {
515
659
  spinner.stop();
516
- process.stdout.write('\r\x1b[2K');
517
- console.log(` \x1b[2m›\x1b[0m ${steps[j].description}`);
660
+ const formatted = formatStep(steps[j].description);
661
+ if (formatted !== null) {
662
+ process.stdout.write('\r\x1b[2K');
663
+ console.log(formatted);
664
+ }
518
665
  spinner.startSession(steps[j].description.slice(0, 50));
519
666
  rl.prompt(true);
520
667
  }
package/dist/daemon.mjs CHANGED
@@ -2493,8 +2493,9 @@ var init_MemoryCapability = __esm({
2493
2493
  "use strict";
2494
2494
  init_src();
2495
2495
  MemoryCapability = class {
2496
- constructor(graph) {
2496
+ constructor(graph, onWrite) {
2497
2497
  this.graph = graph;
2498
+ this.onWrite = onWrite;
2498
2499
  }
2499
2500
  name = "memory_write";
2500
2501
  description = "Persist a discovered fact to long-term memory so it survives across sessions.";
@@ -2537,11 +2538,13 @@ var init_MemoryCapability = __esm({
2537
2538
  });
2538
2539
  this.graph.addNode(node);
2539
2540
  }
2540
- return {
2541
+ const result = {
2541
2542
  success: true,
2542
2543
  output: `Remembered: "${label}" = ${content.slice(0, 120)}${content.length > 120 ? "\u2026" : ""}`,
2543
2544
  duration_ms: Date.now() - start
2544
2545
  };
2546
+ this.onWrite?.();
2547
+ return result;
2545
2548
  } catch (err) {
2546
2549
  return {
2547
2550
  success: false,
@@ -2651,7 +2654,7 @@ var init_CapabilityRegistry = __esm({
2651
2654
  * task_type: browser_task). The main agent does NOT have direct access
2652
2655
  * to browser_open without going through a subagent spawn.
2653
2656
  */
2654
- constructor(codespaceManager, graph) {
2657
+ constructor(codespaceManager, graph, onMemoryWrite) {
2655
2658
  this.register(new WebSearchCapability());
2656
2659
  if (codespaceManager) {
2657
2660
  try {
@@ -2667,7 +2670,7 @@ var init_CapabilityRegistry = __esm({
2667
2670
  this.register(new ShellCapability());
2668
2671
  this.register(new FileCapability());
2669
2672
  if (graph) {
2670
- this.register(new MemoryCapability(graph));
2673
+ this.register(new MemoryCapability(graph, onMemoryWrite));
2671
2674
  }
2672
2675
  }
2673
2676
  register(cap) {
@@ -2734,7 +2737,7 @@ var init_AgentExecutor = __esm({
2734
2737
  this.maxIterations = config.max_iterations ?? 20;
2735
2738
  this.maxCommandMs = config.max_command_ms ?? 3e4;
2736
2739
  this.agentRoot = config.agent_root;
2737
- this.registry = new CapabilityRegistry(void 0, config.graph);
2740
+ this.registry = new CapabilityRegistry(void 0, config.graph, config.onMemoryWrite);
2738
2741
  }
2739
2742
  cwd;
2740
2743
  maxIterations;
@@ -4735,7 +4738,7 @@ var SessionManager = class {
4735
4738
  if (activeLLM?.isConfigured) {
4736
4739
  const executor = new AgentExecutor(
4737
4740
  activeLLM,
4738
- { cwd: this.cwd, agent_root: this.agentRoot, graph: this.graph },
4741
+ { cwd: this.cwd, agent_root: this.agentRoot, graph: this.graph, onMemoryWrite: this.onMemoryWritten },
4739
4742
  // step callback → emit session.step events
4740
4743
  (step) => this.addStep(sessionId, step),
4741
4744
  // token callback → emit session.token events
@@ -4901,33 +4904,48 @@ Current task:`;
4901
4904
  */
4902
4905
  async _extractAndPersistFacts(task, output, llm) {
4903
4906
  if (!this.graph || !llm.isConfigured) return;
4907
+ const combined = `${task} ${output}`;
4908
+ if (combined.trim().length < 20) return;
4904
4909
  const prompt = `Extract factual entities from this conversation that should be remembered long-term.
4905
- Return ONLY a JSON array, no other text, max 12 items.
4910
+ Return ONLY a valid JSON array (no markdown, no explanation), max 12 items.
4911
+ If nothing worth remembering, return [].
4906
4912
 
4907
4913
  Types: identity (name/role), project (apps/products), tech (stack/tools), preference, url, path, config, outcome
4908
4914
 
4909
- Format: [{"label":"snake_case_key","content":"value to remember","type":"type"}]
4915
+ Format: [{"label":"snake_case_key","content":"value","type":"type"}]
4910
4916
 
4911
4917
  Examples:
4912
- - User says "my name is Sahil" \u2192 {"label":"user_name","content":"Sahil","type":"identity"}
4913
- - User says "we have a telegram bot" \u2192 {"label":"project_telegram_bot","content":"user has a Telegram bot project","type":"project"}
4914
- - User says "I use React and Next.js" \u2192 {"label":"tech_stack","content":"React, Next.js","type":"tech"}
4918
+ - "my name is Sahil" \u2192 {"label":"user_name","content":"Sahil","type":"identity"}
4919
+ - "we have a telegram bot" \u2192 {"label":"project_telegram_bot","content":"user has a Telegram bot","type":"project"}
4920
+ - "I use React and Next.js" \u2192 {"label":"tech_stack","content":"React, Next.js","type":"tech"}
4921
+ - ngrok URL found \u2192 {"label":"ngrok_url","content":"https://abc.ngrok.io","type":"url"}
4915
4922
 
4916
4923
  Conversation:
4917
4924
  User: ${task.slice(0, 600)}
4918
- Agent: ${output.slice(0, 400)}`;
4925
+ Agent: ${output.slice(0, 500)}`;
4919
4926
  try {
4920
4927
  const resp = await llm.complete(
4921
4928
  [{ role: "user", content: prompt }],
4922
- "You are a concise memory extraction system. Extract only factual, durable information. Skip generic statements."
4929
+ "You are a memory extraction system. Be concise. Extract only factual, durable information. Return valid JSON only."
4923
4930
  );
4924
- const jsonMatch = resp.content.match(/\[[\s\S]*?\]/);
4925
- if (!jsonMatch) return;
4926
- const entities = JSON.parse(jsonMatch[0]);
4931
+ let entities = [];
4932
+ const raw = resp.content.trim();
4933
+ try {
4934
+ const parsed = JSON.parse(raw);
4935
+ if (Array.isArray(parsed)) entities = parsed;
4936
+ } catch {
4937
+ const match = raw.match(/\[\s*\{[\s\S]*?\}\s*\]/);
4938
+ if (match) {
4939
+ try {
4940
+ entities = JSON.parse(match[0]);
4941
+ } catch {
4942
+ }
4943
+ }
4944
+ }
4927
4945
  if (!Array.isArray(entities) || entities.length === 0) return;
4928
4946
  let wrote = 0;
4929
4947
  for (const e of entities.slice(0, 12)) {
4930
- if (!e.label?.trim() || !e.content?.trim()) continue;
4948
+ if (!e?.label?.trim() || !e?.content?.trim()) continue;
4931
4949
  const nodeId = `memory:${e.label.toLowerCase().replace(/[^a-z0-9_]/g, "_")}`;
4932
4950
  try {
4933
4951
  const existing = this.graph.getNode(nodeId);
@@ -4946,14 +4964,16 @@ Agent: ${output.slice(0, 400)}`;
4946
4964
  }));
4947
4965
  }
4948
4966
  wrote++;
4949
- } catch {
4967
+ } catch (err) {
4968
+ console.warn(`[0agent] Memory write failed for "${e.label}":`, err instanceof Error ? err.message : err);
4950
4969
  }
4951
4970
  }
4952
4971
  if (wrote > 0) {
4953
- console.log(`[0agent] Memory: extracted ${wrote} facts from session`);
4972
+ console.log(`[0agent] Memory: persisted ${wrote} facts \u2192 graph`);
4954
4973
  this.onMemoryWritten?.();
4955
4974
  }
4956
- } catch {
4975
+ } catch (err) {
4976
+ console.warn("[0agent] Memory extraction failed:", err instanceof Error ? err.message : String(err));
4957
4977
  }
4958
4978
  }
4959
4979
  /**
@@ -7303,20 +7323,24 @@ var ZeroAgentDaemon = class {
7303
7323
  adapter: this.adapter,
7304
7324
  agentRoot,
7305
7325
  // agent source path — self-improvement tasks read the right files
7306
- // Mark GitHub memory dirty immediately when facts are extracted pushes within 2min
7326
+ // Push to GitHub immediately when facts are extracted from a session
7307
7327
  onMemoryWritten: () => {
7308
7328
  this.githubMemorySync?.markDirty();
7329
+ if (this.githubMemorySync) {
7330
+ this.githubMemorySync.push("sync: new facts learned").then((r) => {
7331
+ if (r.pushed) console.log(`[0agent] Memory pushed: ${r.nodes_synced} nodes \u2192 github`);
7332
+ }).catch(() => {
7333
+ });
7334
+ }
7309
7335
  }
7310
7336
  });
7311
7337
  const teamSync = identity && teams.length > 0 ? new TeamSync(teamManager, this.adapter, identity.entity_node_id) : null;
7312
7338
  if (this.githubMemorySync) {
7313
7339
  const memSync = this.githubMemorySync;
7314
7340
  this.memorySyncTimer = setInterval(async () => {
7315
- if (memSync.hasPendingChanges()) {
7316
- const result = await memSync.push().catch(() => null);
7317
- if (result?.pushed) {
7318
- console.log(`[0agent] Memory auto-synced: ${result.nodes_synced} nodes`);
7319
- }
7341
+ const result = await memSync.push().catch(() => null);
7342
+ if (result?.pushed && result.nodes_synced > 0) {
7343
+ console.log(`[0agent] Memory synced: ${result.nodes_synced} nodes \u2192 github`);
7320
7344
  }
7321
7345
  }, 2 * 60 * 1e3);
7322
7346
  if (typeof this.memorySyncTimer === "object") this.memorySyncTimer.unref?.();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "0agent",
3
- "version": "1.0.45",
3
+ "version": "1.0.47",
4
4
  "description": "A persistent, learning AI agent that runs on your machine. An agent that learns.",
5
5
  "private": false,
6
6
  "license": "Apache-2.0",