@openanonymity/nanomem 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +194 -0
  2. package/package.json +85 -0
  3. package/src/backends/BaseStorage.js +177 -0
  4. package/src/backends/filesystem.js +177 -0
  5. package/src/backends/indexeddb.js +208 -0
  6. package/src/backends/ram.js +113 -0
  7. package/src/backends/schema.js +42 -0
  8. package/src/bullets/bulletIndex.js +125 -0
  9. package/src/bullets/compaction.js +109 -0
  10. package/src/bullets/index.js +16 -0
  11. package/src/bullets/normalize.js +241 -0
  12. package/src/bullets/parser.js +199 -0
  13. package/src/bullets/scoring.js +53 -0
  14. package/src/cli/auth.js +323 -0
  15. package/src/cli/commands.js +411 -0
  16. package/src/cli/config.js +120 -0
  17. package/src/cli/diff.js +68 -0
  18. package/src/cli/help.js +84 -0
  19. package/src/cli/output.js +269 -0
  20. package/src/cli/spinner.js +54 -0
  21. package/src/cli.js +178 -0
  22. package/src/engine/compactor.js +247 -0
  23. package/src/engine/executors.js +152 -0
  24. package/src/engine/ingester.js +229 -0
  25. package/src/engine/retriever.js +414 -0
  26. package/src/engine/toolLoop.js +176 -0
  27. package/src/imports/chatgpt.js +160 -0
  28. package/src/imports/index.js +14 -0
  29. package/src/imports/markdown.js +104 -0
  30. package/src/imports/oaFastchat.js +124 -0
  31. package/src/index.js +199 -0
  32. package/src/llm/anthropic.js +264 -0
  33. package/src/llm/openai.js +179 -0
  34. package/src/prompt_sets/conversation/ingestion.js +51 -0
  35. package/src/prompt_sets/document/ingestion.js +43 -0
  36. package/src/prompt_sets/index.js +31 -0
  37. package/src/types.js +382 -0
  38. package/src/utils/portability.js +174 -0
  39. package/types/backends/BaseStorage.d.ts +42 -0
  40. package/types/backends/filesystem.d.ts +11 -0
  41. package/types/backends/indexeddb.d.ts +12 -0
  42. package/types/backends/ram.d.ts +8 -0
  43. package/types/backends/schema.d.ts +14 -0
  44. package/types/bullets/bulletIndex.d.ts +47 -0
  45. package/types/bullets/compaction.d.ts +10 -0
  46. package/types/bullets/index.d.ts +36 -0
  47. package/types/bullets/normalize.d.ts +95 -0
  48. package/types/bullets/parser.d.ts +31 -0
  49. package/types/bullets/scoring.d.ts +12 -0
  50. package/types/engine/compactor.d.ts +27 -0
  51. package/types/engine/executors.d.ts +46 -0
  52. package/types/engine/ingester.d.ts +29 -0
  53. package/types/engine/retriever.d.ts +50 -0
  54. package/types/engine/toolLoop.d.ts +9 -0
  55. package/types/imports/chatgpt.d.ts +14 -0
  56. package/types/imports/index.d.ts +3 -0
  57. package/types/imports/markdown.d.ts +31 -0
  58. package/types/imports/oaFastchat.d.ts +30 -0
  59. package/types/index.d.ts +21 -0
  60. package/types/llm/anthropic.d.ts +16 -0
  61. package/types/llm/openai.d.ts +16 -0
  62. package/types/prompt_sets/conversation/ingestion.d.ts +7 -0
  63. package/types/prompt_sets/document/ingestion.d.ts +7 -0
  64. package/types/prompt_sets/index.d.ts +11 -0
  65. package/types/types.d.ts +293 -0
  66. package/types/utils/portability.d.ts +33 -0
@@ -0,0 +1,411 @@
1
+ /**
2
+ * CLI command implementations — thin wrappers around the library API.
3
+ */
4
+
5
+ import { readFile, writeFile, readdir, stat } from 'node:fs/promises';
6
+ import { resolve, join } from 'node:path';
7
+ import { serialize, toZip } from '../utils/portability.js';
8
+ import { safeDateIso } from '../bullets/normalize.js';
9
+ import { extractSessionsFromOAFastchatExport } from '../imports/oaFastchat.js';
10
+ import { isChatGptExport, parseChatGptExport } from '../imports/chatgpt.js';
11
+ import { parseMarkdownFiles } from '../imports/markdown.js';
12
+ import { loginInteractive } from './auth.js';
13
+ import { writeConfigFile, CONFIG_PATH } from './config.js';
14
+ import { createSpinner } from './spinner.js';
15
+ import { printFileDiff } from './diff.js';
16
+
17
+ // ─── Helpers ─────────────────────────────────────────────────────
18
+
19
+ async function readStdin() {
20
+ const chunks = [];
21
+ for await (const chunk of process.stdin) chunks.push(chunk);
22
+ return Buffer.concat(chunks).toString('utf-8');
23
+ }
24
+
25
+ async function readMarkdownDir(dirPath) {
26
+ const entries = await readdir(dirPath, { recursive: true });
27
+ const files = [];
28
+ for (const entry of entries) {
29
+ if (!entry.endsWith('.md')) continue;
30
+ const fullPath = join(dirPath, entry);
31
+ const info = await stat(fullPath);
32
+ if (!info.isFile()) continue;
33
+ const content = await readFile(fullPath, 'utf-8');
34
+ files.push({ path: entry, content });
35
+ }
36
+ return files;
37
+ }
38
+
39
+ function buildExportPath(format) {
40
+ const stamp = new Date().toISOString().replace(/[:.]/g, '-');
41
+ const ext = format === 'zip' ? 'zip' : 'txt';
42
+ return resolve(process.cwd(), `memory-export-${stamp}.${ext}`);
43
+ }
44
+
45
+ /**
46
+ * Parse input into one or more conversations.
47
+ * Returns [{ title, messages }] to handle both single and multi-session inputs.
48
+ *
49
+ * Auto-detects:
50
+ * 1. OA Fastchat export → { data: { chats: { sessions, messages } } }
51
+ * 2. ChatGPT export → [{ mapping, current_node, title, ... }]
52
+ * 3. JSON messages array → [{ role, content }]
53
+ * 4. Plain text → User: / Assistant: line format
54
+ */
55
+ function parseConversations(input, flags) {
56
+ const trimmed = input.trim();
57
+
58
+ // Try JSON
59
+ try {
60
+ const parsed = JSON.parse(trimmed);
61
+
62
+ // OA Fastchat export
63
+ if (parsed?.data?.chats?.sessions) {
64
+ const sessions = extractSessionsFromOAFastchatExport(parsed, {
65
+ sessionId: flags['session-id'],
66
+ sessionTitle: flags['session-title'],
67
+ });
68
+ return sessions.map(s => ({
69
+ title: s.session.title || s.session.id || 'untitled',
70
+ messages: s.conversation,
71
+ updatedAt: safeDateIso(s.session.updatedAt),
72
+ }));
73
+ }
74
+
75
+ // ChatGPT export (conversations.json)
76
+ if (isChatGptExport(parsed)) {
77
+ return parseChatGptExport(parsed);
78
+ }
79
+
80
+ // Plain messages array
81
+ if (Array.isArray(parsed)) {
82
+ return [{ title: null, messages: parsed }];
83
+ }
84
+ } catch { /* fall through to text parsing */ }
85
+
86
+ // Parse User: / Assistant: format
87
+ const messages = [];
88
+ const lines = trimmed.split('\n');
89
+ let current = null;
90
+ for (const line of lines) {
91
+ const userMatch = line.match(/^User:\s*(.*)/i);
92
+ const asstMatch = line.match(/^Assistant:\s*(.*)/i);
93
+ if (userMatch) {
94
+ if (current) messages.push(current);
95
+ current = { role: 'user', content: userMatch[1] };
96
+ } else if (asstMatch) {
97
+ if (current) messages.push(current);
98
+ current = { role: 'assistant', content: asstMatch[1] };
99
+ } else if (current) {
100
+ current.content += '\n' + line;
101
+ }
102
+ }
103
+ if (current) messages.push(current);
104
+
105
+ if (messages.length === 0) {
106
+ // Fallback: treat as plain markdown notes to extract facts from
107
+ return parseMarkdownFiles(trimmed);
108
+ }
109
+ return [{ title: null, messages }];
110
+ }
111
+
112
+ // ─── Commands ────────────────────────────────────────────────────
113
+
114
+ export async function init(positionals, flags, mem, config) {
115
+ await mem.init();
116
+ return { status: 'initialized', storage: config.storage, path: config.storagePath };
117
+ }
118
+
119
+ export async function retrieve(positionals, flags, mem) {
120
+ const query = positionals[0];
121
+ if (!query) throw new Error('Usage: memory retrieve <query>');
122
+
123
+ await mem.init();
124
+
125
+ let conversationText = null;
126
+ if (flags.context) {
127
+ conversationText = await readFile(flags.context, 'utf-8');
128
+ }
129
+
130
+ const result = await mem.retrieve(query, conversationText);
131
+ if (!result || !result.assembledContext) {
132
+ return { assembledContext: null, message: 'No relevant context found.' };
133
+ }
134
+ return result;
135
+ }
136
+
137
+ export async function importCmd(positionals, flags, mem, config, { showProgress, spinnerHolder } = {}) {
138
+ const source = positionals[0];
139
+ let conversations;
140
+ let extractionMode = 'conversation';
141
+
142
+ if (source === '-' || (!source && !process.stdin.isTTY)) {
143
+ conversations = parseConversations(await readStdin(), flags);
144
+ } else if (source) {
145
+ const info = await stat(source);
146
+ if (info.isDirectory()) {
147
+ const files = await readMarkdownDir(source);
148
+ if (files.length === 0) throw new Error(`No .md files found in ${source}`);
149
+ conversations = parseMarkdownFiles(files);
150
+ extractionMode = 'document';
151
+ } else {
152
+ conversations = parseConversations(await readFile(source, 'utf-8'), flags);
153
+ if (flags.format === 'markdown') extractionMode = 'document';
154
+ }
155
+ } else {
156
+ throw new Error('Usage: memory import <file|dir|->');
157
+ }
158
+
159
+ return ingestConversations(conversations, extractionMode, mem, { showProgress, spinnerHolder, status: 'imported' });
160
+ }
161
+
162
+ export async function add(positionals, flags, mem, config, { showProgress, spinnerHolder } = {}) {
163
+ const input = positionals[0] ?? (!process.stdin.isTTY ? await readStdin() : null);
164
+ if (!input) throw new Error('Usage: memory add <text>');
165
+
166
+ const conversations = parseConversations(input, flags);
167
+ return ingestConversations(conversations, 'conversation', mem, { showProgress, spinnerHolder, status: 'added', showDiff: true });
168
+ }
169
+
170
+ async function ingestConversations(conversations, extractionMode, mem, { showProgress, spinnerHolder, status, showDiff = false }) {
171
+ await mem.init();
172
+
173
+ const total = conversations.length;
174
+ let totalWriteCalls = 0;
175
+ const results = [];
176
+
177
+ const isTTY = process.stderr.isTTY;
178
+ const c = isTTY ? { green: '\x1b[32m', yellow: '\x1b[33m', dim: '\x1b[2m', bold: '\x1b[1m', reset: '\x1b[0m', gray: '\x1b[90m' }
179
+ : { green: '', yellow: '', dim: '', bold: '', reset: '', gray: '' };
180
+
181
+ for (let i = 0; i < total; i++) {
182
+ const conv = conversations[i];
183
+ const label = conv.title || `conversation ${i + 1}`;
184
+
185
+ if (showProgress) {
186
+ const counter = total > 1 ? `${c.gray}(${i + 1}/${total})${c.reset} ` : '';
187
+ process.stderr.write(`\n ${counter}${c.bold}"${label}"${c.reset}\n`);
188
+ }
189
+
190
+ let spinner = null;
191
+ if (showProgress && isTTY) {
192
+ spinner = createSpinner('thinking…');
193
+ if (spinnerHolder) spinnerHolder.current = spinner;
194
+ }
195
+
196
+ const result = await mem.ingest(conv.messages, { updatedAt: conv.updatedAt, extractionMode });
197
+
198
+ if (spinnerHolder) spinnerHolder.current = null;
199
+ if (showProgress) {
200
+ if (result.status === 'error') {
201
+ spinner?.stop(` ${c.yellow}⚠ ${result.error}${c.reset}`);
202
+ } else if (result.writeCalls > 0) {
203
+ spinner?.stop(` ${c.green}✓ ${result.writeCalls} fact${result.writeCalls === 1 ? '' : 's'} saved${c.reset}`);
204
+ } else {
205
+ spinner?.stop(` ${c.dim}– nothing to save${c.reset}`);
206
+ }
207
+ if (showDiff && result.writes?.length) {
208
+ for (const { path, before, after } of result.writes) {
209
+ printFileDiff(path, before, after);
210
+ }
211
+ }
212
+ }
213
+
214
+ totalWriteCalls += result.writeCalls || 0;
215
+ results.push({ session: label, messages: conv.messages.length, writeCalls: result.writeCalls, error: result.error });
216
+ }
217
+
218
+ return { status, sessions: results.length, totalWriteCalls, details: results };
219
+ }
220
+
221
+ export async function compact(positionals, flags, mem) {
222
+ await mem.init();
223
+ const stats = await mem.compact();
224
+ return { status: 'compacted', filesChanged: stats?.filesChanged ?? 0, filesTotal: stats?.filesTotal ?? 0 };
225
+ }
226
+
227
+ export async function ls(positionals, flags, mem) {
228
+ await mem.init();
229
+ return mem.storage.ls(positionals[0] || '');
230
+ }
231
+
232
+ export async function read(positionals, flags, mem) {
233
+ const path = positionals[0];
234
+ if (!path) throw new Error('Usage: memory read <path>');
235
+
236
+ await mem.init();
237
+ const content = await mem.storage.read(path);
238
+ if (content == null) {
239
+ throw new Error(`File not found: ${path}`);
240
+ }
241
+ return { path, content };
242
+ }
243
+
244
+ export async function write(positionals, flags, mem) {
245
+ const path = positionals[0];
246
+ if (!path) throw new Error('Usage: memory write <path>');
247
+
248
+ let content;
249
+ if (flags.content != null) {
250
+ content = flags.content;
251
+ } else if (!process.stdin.isTTY) {
252
+ content = await readStdin();
253
+ } else {
254
+ throw new Error('Provide content via --content or stdin.');
255
+ }
256
+
257
+ await mem.init();
258
+ await mem.storage.write(path, content);
259
+ return { status: 'written', path };
260
+ }
261
+
262
+ export async function del(positionals, flags, mem) {
263
+ const path = positionals[0];
264
+ if (!path) throw new Error('Usage: memory delete <path>');
265
+
266
+ await mem.init();
267
+ await mem.storage.delete(path);
268
+ return { status: 'deleted', path };
269
+ }
270
+
271
+ export async function search(positionals, flags, mem) {
272
+ const query = positionals[0];
273
+ if (!query) throw new Error('Usage: memory search <query>');
274
+
275
+ await mem.init();
276
+ const results = await mem.storage.search(query);
277
+ results._query = query;
278
+ return results;
279
+ }
280
+
281
+ export async function exportCmd(positionals, flags, mem) {
282
+ const format = flags.format || 'txt';
283
+ await mem.init();
284
+ const all = await mem.storage.exportAll();
285
+ const files = all.filter(f => !f.path.endsWith('_tree.md'));
286
+ const exportPath = buildExportPath(format);
287
+
288
+ if (format === 'zip') {
289
+ const zip = toZip(all);
290
+ await writeFile(exportPath, zip);
291
+ } else {
292
+ await writeFile(exportPath, serialize(all), 'utf-8');
293
+ }
294
+
295
+ return { status: 'exported', files: files.length, format, path: exportPath };
296
+ }
297
+
298
+
299
+
300
+ export async function clear(positionals, flags, mem, config) {
301
+ await mem.init();
302
+ const all = await mem.storage.exportAll();
303
+ const files = all.filter(f => !f.path.endsWith('_tree.md'));
304
+
305
+ if (!flags.confirm) {
306
+ throw new Error(`This will delete ${files.length} file${files.length === 1 ? '' : 's'} in ${config.storagePath}. Run with --confirm to proceed.`);
307
+ }
308
+
309
+ await mem.storage.clear();
310
+ return { status: 'cleared', filesDeleted: files.length, path: config.storagePath };
311
+ }
312
+
313
+ export async function status(positionals, flags, mem, config) {
314
+ await mem.init();
315
+ const all = await mem.storage.exportAll();
316
+ const files = all.filter(f => !f.path.endsWith('_tree.md'));
317
+
318
+ const dirs = new Set();
319
+ for (const f of files) {
320
+ const slash = f.path.indexOf('/');
321
+ if (slash !== -1) dirs.add(f.path.slice(0, slash));
322
+ }
323
+
324
+ return {
325
+ provider: config.provider,
326
+ model: config.model,
327
+ baseUrl: config.baseUrl,
328
+ storage: config.storage,
329
+ storagePath: config.storagePath,
330
+ configFile: CONFIG_PATH,
331
+ files: files.length,
332
+ directories: [...dirs].sort(),
333
+ };
334
+ }
335
+
336
+ export async function tree(positionals, flags, mem, config) {
337
+ await mem.init();
338
+ const all = await mem.storage.exportAll();
339
+ const files = all
340
+ .filter(f => !f.path.endsWith('_tree.md'))
341
+ .sort((a, b) => a.path.localeCompare(b.path));
342
+
343
+ if (files.length === 0) {
344
+ return { treeLines: [] };
345
+ }
346
+
347
+ // Group by top-level directory
348
+ const grouped = new Map(); // dir → [file]
349
+ for (const f of files) {
350
+ const slash = f.path.indexOf('/');
351
+ const dir = slash !== -1 ? f.path.slice(0, slash) : '';
352
+ const name = slash !== -1 ? f.path.slice(slash + 1) : f.path;
353
+ if (!grouped.has(dir)) grouped.set(dir, []);
354
+ grouped.get(dir).push({ ...f, name });
355
+ }
356
+
357
+ const isTTY = process.stdout.isTTY;
358
+ const c = isTTY ? {
359
+ reset: '\x1b[0m', bold: '\x1b[1m', dim: '\x1b[2m',
360
+ cyan: '\x1b[36m', green: '\x1b[32m', gray: '\x1b[90m', yellow: '\x1b[33m',
361
+ } : Object.fromEntries(['reset','bold','dim','cyan','green','gray','yellow'].map(k => [k, '']));
362
+
363
+ const lines = [];
364
+ lines.push('');
365
+ lines.push(` ${c.bold}Memory${c.reset} ${c.dim}${config.storagePath}${c.reset}`);
366
+ lines.push('');
367
+
368
+ const dirs = [...grouped.keys()].sort();
369
+ for (const dir of dirs) {
370
+ const entries = grouped.get(dir);
371
+
372
+ if (dir) {
373
+ lines.push(` ${c.cyan}${dir}/${c.reset}`);
374
+ }
375
+
376
+ const prefix = dir ? ' ' : '';
377
+ for (let i = 0; i < entries.length; i++) {
378
+ const f = entries[i];
379
+ const isLast = i === entries.length - 1;
380
+ const branch = entries.length === 1 ? '──' : isLast ? '└─' : '├─';
381
+ const count = f.itemCount != null ? `${c.green}${String(f.itemCount).padStart(3)} facts${c.reset}` : '';
382
+ const hint = f.oneLiner ? ` ${c.dim}${f.oneLiner.slice(0, 60)}${f.oneLiner.length > 60 ? '…' : ''}${c.reset}` : '';
383
+ const fname = f.name.replace(/\.md$/, '');
384
+ lines.push(` ${prefix}${c.gray}${branch}${c.reset} ${c.bold}${fname}${c.reset} ${count}${hint}`);
385
+ }
386
+ lines.push('');
387
+ }
388
+
389
+ const totalFacts = files.reduce((n, f) => n + (f.itemCount || 0), 0);
390
+ lines.push(` ${c.dim}${files.length} file${files.length === 1 ? '' : 's'} · ${totalFacts} facts total${c.reset}`);
391
+ lines.push('');
392
+
393
+ return { treeLines: lines };
394
+ }
395
+
396
+ export async function login(positionals, flags, mem, config) {
397
+ // Non-interactive mode: --api-key provided as a flag
398
+ if (flags['api-key']) {
399
+ const toSave = {
400
+ provider: flags.provider || config.provider,
401
+ apiKey: flags['api-key'],
402
+ storage: flags.storage || (flags.path ? 'filesystem' : null) || config.storage || 'filesystem',
403
+ };
404
+ if (flags.model) toSave.model = flags.model;
405
+ if (flags.path) toSave.storagePath = flags.path;
406
+ await writeConfigFile(toSave);
407
+ return { status: 'logged_in', provider: toSave.provider, configFile: CONFIG_PATH };
408
+ }
409
+
410
+ return loginInteractive();
411
+ }
@@ -0,0 +1,120 @@
1
+ /**
2
+ * CLI config resolution — flags + config file + env vars → createMemoryBank config.
3
+ *
4
+ * Config lives at ~/.config/nanomem/config.json (fixed location).
5
+ * Memory data lives at ~/nanomem by default (configurable via login or --path).
6
+ *
7
+ * Priority (highest wins):
8
+ * CLI flags > config file > env vars > preset defaults
9
+ */
10
+
11
+ import { homedir } from 'node:os';
12
+ import { join } from 'node:path';
13
+ import { readFile, writeFile, mkdir } from 'node:fs/promises';
14
+ import { createMemoryBank } from '../index.js';
15
+
16
+ // ─── Paths ──────────────────────────────────────────────────────
17
+
18
+ export const CONFIG_DIR = join(homedir(), '.config', 'nanomem');
19
+ export const CONFIG_PATH = join(CONFIG_DIR, 'config.json');
20
+ export const DEFAULT_STORAGE_PATH = join(homedir(), 'nanomem');
21
+
22
+ // ─── Config file ────────────────────────────────────────────────
23
+
24
+ export async function readConfigFile() {
25
+ try {
26
+ const raw = await readFile(CONFIG_PATH, 'utf-8');
27
+ return JSON.parse(raw);
28
+ } catch {
29
+ return {};
30
+ }
31
+ }
32
+
33
+ export async function writeConfigFile(data) {
34
+ await mkdir(CONFIG_DIR, { recursive: true });
35
+ const existing = await readConfigFile();
36
+ const merged = { ...existing, ...data };
37
+ await writeFile(CONFIG_PATH, JSON.stringify(merged, null, 2) + '\n', 'utf-8');
38
+ }
39
+
40
+ // ─── Provider presets ───────────────────────────────────────────
41
+
42
+ const PRESETS = {
43
+ tinfoil: { envKey: 'TINFOIL_API_KEY', baseUrl: 'https://inference.tinfoil.sh/v1', model: 'kimi-k2-5' },
44
+ openai: { envKey: 'OPENAI_API_KEY', baseUrl: 'https://api.openai.com/v1', model: 'gpt-5.4-mini' },
45
+ anthropic: { envKey: 'ANTHROPIC_API_KEY', baseUrl: 'https://api.anthropic.com', model: 'claude-sonnet-4-6', isAnthropic: true },
46
+ openrouter: {
47
+ envKey: 'OPENROUTER_API_KEY', baseUrl: 'https://openrouter.ai/api/v1', model: 'openai/gpt-4o',
48
+ headers: { 'HTTP-Referer': 'https://github.com/openanonymity/nanomem', 'X-Title': 'nanomem' }
49
+ },
50
+ custom: { envKey: null, baseUrl: null, model: null },
51
+ };
52
+
53
+ // ─── Resolve config ─────────────────────────────────────────────
54
+
55
+ export async function resolveConfig(flags) {
56
+ const fileConfig = await readConfigFile();
57
+
58
+ // 1. Pick provider
59
+ let providerName = flags.provider || fileConfig.provider || process.env.LLM_PROVIDER || null;
60
+ let preset;
61
+ if (providerName) {
62
+ preset = PRESETS[providerName];
63
+ if (!preset) {
64
+ throw new Error(`Unknown provider: ${providerName}. Use: ${Object.keys(PRESETS).join(', ')}`);
65
+ }
66
+ } else {
67
+ const match = Object.entries(PRESETS).find(([, p]) => p.envKey && process.env[p.envKey]);
68
+ providerName = match ? match[0] : 'openai';
69
+ preset = match ? match[1] : PRESETS.openai;
70
+ }
71
+
72
+ // 2. Resolve fields — flags > config file > env vars > preset defaults
73
+ const apiKey = flags['api-key'] || fileConfig.apiKey || process.env.LLM_API_KEY || (preset.envKey && process.env[preset.envKey]) || null;
74
+ const baseUrl = flags['base-url'] || fileConfig.baseUrl || process.env.LLM_BASE_URL || preset.baseUrl;
75
+ const model = flags.model || fileConfig.model || process.env.LLM_MODEL || preset.model;
76
+ const headers = preset.headers || null;
77
+ const storage = flags.storage || (flags.path ? 'filesystem' : null) || fileConfig.storage || 'filesystem';
78
+ const rawPath = flags.path || fileConfig.storagePath || DEFAULT_STORAGE_PATH;
79
+ const storagePath = rawPath.startsWith('~/') ? join(homedir(), rawPath.slice(2)) : rawPath;
80
+
81
+ return { apiKey, baseUrl, model, headers, provider: providerName, isAnthropic: !!preset.isAnthropic, storage, storagePath };
82
+ }
83
+
84
+ // ─── Create a memory instance from resolved config ──────────────
85
+
86
+ const LLM_COMMANDS = new Set(['retrieve', 'extract', 'compact', 'import', 'add']);
87
+
88
+ export function createMemoryFromConfig(config, command, { onToolCall, onProgress, onCompactProgress } = {}) {
89
+ const needsLlm = LLM_COMMANDS.has(command);
90
+
91
+ if (needsLlm && !config.apiKey) {
92
+ throw new Error(
93
+ 'No API key configured. Run `memory login` to get started.'
94
+ );
95
+ }
96
+
97
+ const opts = {
98
+ storage: config.storage,
99
+ storagePath: config.storagePath,
100
+ };
101
+
102
+ if (needsLlm) {
103
+ opts.llm = {
104
+ apiKey: config.apiKey,
105
+ baseUrl: config.baseUrl,
106
+ model: config.model,
107
+ provider: config.isAnthropic ? 'anthropic' : config.provider,
108
+ ...(config.headers ? { headers: config.headers } : {}),
109
+ };
110
+ if (onToolCall) opts.onToolCall = onToolCall;
111
+ if (onProgress) opts.onProgress = onProgress;
112
+ if (onCompactProgress) opts.onCompactProgress = onCompactProgress;
113
+ } else {
114
+ opts.llmClient = {
115
+ createChatCompletion() { throw new Error('This command requires an API key.'); },
116
+ };
117
+ }
118
+
119
+ return createMemoryBank(opts);
120
+ }
@@ -0,0 +1,68 @@
1
+ /**
2
+ * Terminal diff rendering using @pierre/diffs.
3
+ */
4
+
5
+ import { parseDiffFromFile } from '@pierre/diffs';
6
+
7
+ const R = '\x1b[0m';
8
+ const DIM = '\x1b[2m';
9
+ const BOLD = '\x1b[1m';
10
+ const CYAN = '\x1b[36m';
11
+ const GREEN = '\x1b[32m';
12
+ const RED = '\x1b[31m';
13
+ const GRAY = '\x1b[90m';
14
+
15
+ /**
16
+ * Render a file diff to stderr as ANSI-colored terminal output.
17
+ * Returns early and silently if not a TTY or nothing changed.
18
+ *
19
+ * @param {string} path
20
+ * @param {string} before
21
+ * @param {string} after
22
+ */
23
+ export function printFileDiff(path, before, after) {
24
+ if (!process.stderr.isTTY) return;
25
+ if (before === after) return;
26
+
27
+ let fileMeta;
28
+ try {
29
+ fileMeta = parseDiffFromFile(
30
+ { name: path, contents: before },
31
+ { name: path, contents: after },
32
+ { context: 2 },
33
+ );
34
+ } catch {
35
+ return;
36
+ }
37
+
38
+ const { hunks, additionLines, deletionLines } = fileMeta;
39
+ if (!hunks?.length) return;
40
+
41
+ const isNew = !before;
42
+ const action = isNew ? 'new file' : 'modified';
43
+
44
+ process.stderr.write(`\n ${BOLD}${CYAN}${path}${R} ${DIM}${action}${R}\n`);
45
+
46
+ for (const hunk of hunks) {
47
+ const header = (hunk.hunkSpecs || '').trim();
48
+ process.stderr.write(` ${GRAY}${header}${R}\n`);
49
+
50
+ for (const seg of hunk.hunkContent) {
51
+ if (seg.type === 'context') {
52
+ for (let i = 0; i < seg.lines; i++) {
53
+ const line = (additionLines[seg.additionLineIndex + i] ?? '').replace(/\n$/, '');
54
+ process.stderr.write(` ${DIM} ${line}${R}\n`);
55
+ }
56
+ } else {
57
+ for (let i = 0; i < seg.deletions; i++) {
58
+ const line = (deletionLines[seg.deletionLineIndex + i] ?? '').replace(/\n$/, '');
59
+ process.stderr.write(` ${RED}- ${line}${R}\n`);
60
+ }
61
+ for (let i = 0; i < seg.additions; i++) {
62
+ const line = (additionLines[seg.additionLineIndex + i] ?? '').replace(/\n$/, '');
63
+ process.stderr.write(` ${GREEN}+ ${line}${R}\n`);
64
+ }
65
+ }
66
+ }
67
+ }
68
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * CLI help text.
3
+ */
4
+
5
+ export const GLOBAL_HELP = `Usage: nanomem <command> [args] [flags]
6
+
7
+ Commands:
8
+
9
+ Setup:
10
+ login Configure provider, model, API key, and storage path
11
+ status Show current config and storage stats
12
+
13
+ Memory:
14
+ add <text> Add raw text directly and extract facts
15
+ import <file|dir|-> Import conversations or notes and extract facts
16
+ retrieve <query> [--context <file>] Retrieve relevant context for a query
17
+ compact Deduplicate and archive stale facts
18
+ export [--format txt|zip] Export all memory to a file
19
+
20
+ Storage:
21
+ ls [path] List files and directories
22
+ read <path> Read a file
23
+ write <path> --content <text> Write content to a file (or pipe stdin)
24
+ delete <path> Delete a file
25
+ search <query> Search files by keyword
26
+ clear --confirm Delete all memory files
27
+
28
+ Flags:
29
+ --api-key <key> LLM API key
30
+ --model <model> Model ID
31
+ --provider <name> Provider: openai | anthropic | tinfoil | custom
32
+ --base-url <url> Custom API endpoint
33
+ --path <dir> Storage directory (default: ~/nanomem)
34
+ --json Force JSON output
35
+ --render Render markdown for terminal output
36
+ -h, --help Show help
37
+ -v, --version Show version
38
+
39
+ Examples:
40
+ nanomem login
41
+ nanomem add "User: I moved to Seattle."
42
+ nanomem import conversations.json
43
+ nanomem import my-notes.md
44
+ nanomem import ./notes/
45
+ nanomem retrieve "what are my hobbies?"
46
+ nanomem status
47
+ nanomem export --format zip
48
+ `;
49
+
50
+ export const COMMAND_HELP = {
51
+ add: 'Usage: nanomem add <text>\n\nAdd raw text directly and extract facts into memory.\nAccepts quoted text or piped stdin.\nRequires an LLM API key.',
52
+ retrieve: 'Usage: nanomem retrieve <query> [--context <file>]\n\nRetrieve relevant memory context for a query.\nRequires an LLM API key.',
53
+ compact: 'Usage: nanomem compact\n\nDeduplicate and archive stale facts across all memory files.\nRequires an LLM API key.',
54
+ ls: 'Usage: nanomem ls [path]\n\nList files and directories in storage.',
55
+ read: 'Usage: nanomem read <path>\n\nRead a file from storage.\nUse --render to format markdown files for terminal display.',
56
+ write: 'Usage: nanomem write <path> [--content <text>]\n\nWrite content to a file. Reads from stdin if --content is not provided.',
57
+ delete: 'Usage: nanomem delete <path>\n\nDelete a file from storage.',
58
+ search: 'Usage: nanomem search <query>\n\nSearch files by keyword.',
59
+ export: 'Usage: nanomem export [--format txt|zip]\n\nExport all memory to a timestamped file in the current directory.\nDefault format is txt (line-delimited text). Use --format zip for a ZIP archive.',
60
+ import: `Usage: nanomem import <file|dir|->
61
+
62
+ Import conversations or notes and extract facts into memory.
63
+
64
+ Auto-detects format:
65
+ - ChatGPT export (conversations.json from "Export data")
66
+ - OA Fastchat export (JSON with data.chats.sessions)
67
+ - JSON messages array ([{role, content}, ...])
68
+ - Plain text (User:/Assistant: lines)
69
+ - Markdown notes (splits by top-level headings)
70
+ - Directory (imports all .md files recursively)
71
+
72
+ For multi-session exports, use --session-id or --session-title to filter.
73
+ Requires an LLM API key.`,
74
+ clear: 'Usage: nanomem clear --confirm\n\nDelete all memory files. Requires --confirm to prevent accidental data loss.',
75
+ status: 'Usage: nanomem status\n\nShow resolved config and storage statistics.',
76
+ login: `Usage: nanomem login
77
+
78
+ Walks you through provider, model, API key, and storage path.
79
+ Config is saved to ~/.nanomem/config.json.
80
+
81
+ Non-interactive (for agents/scripts):
82
+ nanomem login --provider openai --api-key sk-... --model gpt-5.4-mini
83
+ nanomem login --provider anthropic --api-key sk-ant-... --path ~/project/memory`,
84
+ };