@openanonymity/nanomem 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +64 -18
  3. package/package.json +7 -3
  4. package/src/backends/BaseStorage.js +147 -3
  5. package/src/backends/indexeddb.js +21 -8
  6. package/src/browser.js +227 -0
  7. package/src/bullets/parser.js +8 -9
  8. package/src/cli/auth.js +1 -1
  9. package/src/cli/commands.js +58 -9
  10. package/src/cli/config.js +1 -1
  11. package/src/cli/help.js +5 -2
  12. package/src/cli/output.js +4 -0
  13. package/src/cli.js +6 -3
  14. package/src/engine/compactor.js +3 -6
  15. package/src/engine/deleter.js +187 -0
  16. package/src/engine/executors.js +474 -11
  17. package/src/engine/ingester.js +98 -63
  18. package/src/engine/recentConversation.js +110 -0
  19. package/src/engine/retriever.js +243 -37
  20. package/src/engine/toolLoop.js +51 -9
  21. package/src/imports/chatgpt.js +1 -1
  22. package/src/imports/claude.js +85 -0
  23. package/src/imports/importData.js +462 -0
  24. package/src/imports/index.js +10 -0
  25. package/src/index.js +95 -2
  26. package/src/llm/openai.js +204 -58
  27. package/src/llm/tinfoil.js +508 -0
  28. package/src/omf.js +343 -0
  29. package/src/prompt_sets/conversation/ingestion.js +111 -12
  30. package/src/prompt_sets/document/ingestion.js +98 -4
  31. package/src/prompt_sets/index.js +12 -4
  32. package/src/types.js +135 -4
  33. package/src/vendor/tinfoil.browser.d.ts +2 -0
  34. package/src/vendor/tinfoil.browser.js +41596 -0
  35. package/types/backends/BaseStorage.d.ts +19 -0
  36. package/types/backends/indexeddb.d.ts +1 -0
  37. package/types/browser.d.ts +17 -0
  38. package/types/engine/deleter.d.ts +67 -0
  39. package/types/engine/executors.d.ts +56 -2
  40. package/types/engine/recentConversation.d.ts +18 -0
  41. package/types/engine/retriever.d.ts +22 -9
  42. package/types/imports/claude.d.ts +14 -0
  43. package/types/imports/importData.d.ts +29 -0
  44. package/types/imports/index.d.ts +2 -0
  45. package/types/index.d.ts +9 -0
  46. package/types/llm/openai.d.ts +6 -9
  47. package/types/llm/tinfoil.d.ts +13 -0
  48. package/types/omf.d.ts +40 -0
  49. package/types/prompt_sets/conversation/ingestion.d.ts +8 -3
  50. package/types/prompt_sets/document/ingestion.d.ts +8 -3
  51. package/types/types.d.ts +127 -2
  52. package/types/vendor/tinfoil.browser.d.ts +6348 -0
package/src/index.js CHANGED
@@ -11,16 +11,33 @@
11
11
  * rebuildTree, exportAll }
12
12
  * Utilities (portability): mem.serialize(), mem.toZip()
13
13
  */
14
- /** @import { LLMClient, MemoryBank, MemoryBankConfig, MemoryBankLLMConfig, Message, IngestOptions, RetrievalResult, StorageBackend } from './types.js' */
14
+ /** @import { LLMClient, MemoryBank, MemoryBankConfig, MemoryBankLLMConfig, Message, IngestOptions, RetrievalResult, AugmentQueryResult, StorageBackend } from './types.js' */
15
15
 
16
16
  import { createOpenAIClient } from './llm/openai.js';
17
17
  import { createAnthropicClient } from './llm/anthropic.js';
18
+ import { createTinfoilClient } from './llm/tinfoil.js';
18
19
  import { MemoryBulletIndex } from './bullets/bulletIndex.js';
19
20
  import { MemoryRetriever } from './engine/retriever.js';
20
21
  import { MemoryIngester } from './engine/ingester.js';
22
+ import { MemoryDeleter } from './engine/deleter.js';
21
23
  import { MemoryCompactor } from './engine/compactor.js';
22
24
  import { InMemoryStorage } from './backends/ram.js';
25
+ import { importData as importMemoryData } from './imports/importData.js';
23
26
  import { serialize, toZip } from './utils/portability.js';
27
+ import { buildOmfExport, previewOmfImport, importOmf } from './omf.js';
28
+
29
+ /**
30
+ * Remove review-only [[user_data]] markers before sending the final prompt to
31
+ * the frontier model.
32
+ *
33
+ * @param {string} text
34
+ * @returns {string}
35
+ */
36
+ export function stripUserDataTags(text) {
37
+ return String(text ?? '')
38
+ .replace(/\[\[user_data\]\]/g, '')
39
+ .replace(/\[\[\/user_data\]\]/g, '');
40
+ }
24
41
 
25
42
  /**
26
43
  * Create a memory instance.
@@ -44,6 +61,7 @@ export function createMemoryBank(config = {}) {
44
61
  onToolCall: config.onToolCall,
45
62
  });
46
63
  const compactor = new MemoryCompactor({ backend, bulletIndex, llmClient, model, onProgress: config.onCompactProgress });
64
+ const deleter = new MemoryDeleter({ backend, bulletIndex, llmClient, model, onToolCall: config.onToolCall });
47
65
 
48
66
  async function write(path, content) {
49
67
  await backend.write(path, content);
@@ -75,6 +93,14 @@ export function createMemoryBank(config = {}) {
75
93
  */
76
94
  retrieve: (query, conversationText) => retrieval.retrieveForQuery(query, conversationText),
77
95
 
96
+ /**
97
+ * Build a reviewable prompt that augments the user query with memory.
98
+ * @param {string} query
99
+ * @param {string} [conversationText]
100
+ * @returns {Promise<AugmentQueryResult | null>}
101
+ */
102
+ augmentQuery: (query, conversationText) => retrieval.augmentQueryForPrompt(query, conversationText),
103
+
78
104
  /**
79
105
  * Ingest facts from a conversation into memory.
80
106
  * @param {Message[]} messages
@@ -82,13 +108,74 @@ export function createMemoryBank(config = {}) {
82
108
  */
83
109
  ingest: (messages, options) => ingester.ingest(messages, options),
84
110
 
111
+ /**
112
+ * Import supported conversation/document formats into memory.
113
+ */
114
+ importData: (input, options) => importMemoryData({
115
+ init: () => backend.init(),
116
+ ingest: (messages, ingestOptions) => ingester.ingest(messages, ingestOptions)
117
+ }, input, options),
118
+ exportOmf: async () => {
119
+ await backend.init();
120
+ return buildOmfExport({
121
+ read: (path) => backend.read(path),
122
+ write: (path, content) => write(path, content),
123
+ delete: (path) => remove(path),
124
+ exists: (path) => backend.exists(path),
125
+ search: (query) => backend.search(query),
126
+ ls: (dirPath) => backend.ls(dirPath),
127
+ getTree: () => backend.getTree(),
128
+ rebuildTree: () => rebuildTree(),
129
+ exportAll: () => backend.exportAll(),
130
+ clear: () => backend.clear(),
131
+ }, { sourceApp: 'nanomem' });
132
+ },
133
+ previewOmfImport: async (doc, options) => {
134
+ await backend.init();
135
+ return previewOmfImport({
136
+ read: (path) => backend.read(path),
137
+ write: (path, content) => write(path, content),
138
+ delete: (path) => remove(path),
139
+ exists: (path) => backend.exists(path),
140
+ search: (query) => backend.search(query),
141
+ ls: (dirPath) => backend.ls(dirPath),
142
+ getTree: () => backend.getTree(),
143
+ rebuildTree: () => rebuildTree(),
144
+ exportAll: () => backend.exportAll(),
145
+ clear: () => backend.clear(),
146
+ }, doc, options);
147
+ },
148
+ importOmf: async (doc, options) => {
149
+ await backend.init();
150
+ return importOmf({
151
+ read: (path) => backend.read(path),
152
+ write: (path, content) => write(path, content),
153
+ delete: (path) => remove(path),
154
+ exists: (path) => backend.exists(path),
155
+ search: (query) => backend.search(query),
156
+ ls: (dirPath) => backend.ls(dirPath),
157
+ getTree: () => backend.getTree(),
158
+ rebuildTree: () => rebuildTree(),
159
+ exportAll: () => backend.exportAll(),
160
+ clear: () => backend.clear(),
161
+ }, doc, options);
162
+ },
163
+
85
164
  /** Compact all memory files (dedup, archive stale facts). */
86
165
  compact: () => compactor.compactAll(),
87
166
 
167
+ /**
168
+ * Delete memory content matching a plain-text query.
169
+ * @param {string} query
170
+ * @returns {Promise<{ status: string, deleteCalls: number, writes: Array }>}
171
+ */
172
+ deleteContent: (query, options) => deleter.deleteForQuery(query, options),
173
+
88
174
  // ─── Low-level (direct storage ops) ──────────────────────
89
175
 
90
176
  storage: {
91
177
  read: (path) => backend.read(path),
178
+ resolvePath: (path) => backend.resolvePath ? backend.resolvePath(path) : Promise.resolve(null),
92
179
  write: (path, content) => write(path, content),
93
180
  delete: (path) => remove(path),
94
181
  exists: (path) => backend.exists(path),
@@ -128,6 +215,10 @@ function _createLlmClient(llmConfig = /** @type {MemoryBankLLMConfig} */ ({ apiK
128
215
  return createAnthropicClient({ apiKey, baseUrl, headers });
129
216
  }
130
217
 
218
+ if (detectedProvider === 'tinfoil') {
219
+ return createTinfoilClient(llmConfig);
220
+ }
221
+
131
222
  return createOpenAIClient({ apiKey, baseUrl, headers });
132
223
  }
133
224
 
@@ -135,6 +226,7 @@ function _detectProvider(baseUrl) {
135
226
  if (!baseUrl) return 'openai';
136
227
  const lower = baseUrl.toLowerCase();
137
228
  if (lower.includes('anthropic.com')) return 'anthropic';
229
+ if (lower.includes('tinfoil.sh')) return 'tinfoil';
138
230
  return 'openai';
139
231
  }
140
232
 
@@ -167,7 +259,7 @@ function _asyncBackend(loader) {
167
259
  return _loading;
168
260
  }
169
261
 
170
- const methods = ['init', 'read', 'write', 'delete', 'exists', 'ls', 'search', 'getTree', 'rebuildTree', 'exportAll', 'clear'];
262
+ const methods = ['init', 'read', 'resolvePath', 'write', 'delete', 'exists', 'ls', 'search', 'getTree', 'rebuildTree', 'exportAll', 'clear'];
171
263
  const proxy = {};
172
264
  for (const method of methods) {
173
265
  proxy[method] = async (...args) => {
@@ -192,6 +284,7 @@ export { MemoryIngester } from './engine/ingester.js';
192
284
  export { MemoryCompactor } from './engine/compactor.js';
193
285
  export { createRetrievalExecutors, createExtractionExecutors } from './engine/executors.js';
194
286
  export { serialize, deserialize, toZip } from './utils/portability.js';
287
+ export { buildOmfExport, previewOmfImport, importOmf, parseOmfText, validateOmf } from './omf.js';
195
288
  export {
196
289
  extractSessionsFromOAFastchatExport,
197
290
  extractConversationFromOAFastchatExport,
package/src/llm/openai.js CHANGED
@@ -7,6 +7,23 @@
7
7
  * Uses `fetch` (built into Node 18+ and browsers).
8
8
  */
9
9
  /** @import { ChatCompletionParams, ChatCompletionResponse, LLMClient, LLMClientOptions, ToolCall } from '../types.js' */
10
+ /**
11
+ * @typedef {Error & { status?: number, retryable?: boolean, retryAfterMs?: number | null, _retryFinalized?: boolean }} ApiError
12
+ */
13
+
14
+ const RETRYABLE_STATUS = new Set([408, 429, 500, 502, 503, 504]);
15
+ const RETRYABLE_ERROR_CODES = new Set([
16
+ 'ECONNRESET',
17
+ 'ECONNREFUSED',
18
+ 'ENOTFOUND',
19
+ 'ETIMEDOUT',
20
+ 'EAI_AGAIN',
21
+ 'UND_ERR_CONNECT_TIMEOUT',
22
+ 'UND_ERR_SOCKET',
23
+ ]);
24
+ const MAX_ATTEMPTS = 3;
25
+ const BASE_DELAY_MS = 400;
26
+ const MAX_DELAY_MS = 2500;
10
27
 
11
28
  /**
12
29
  * @param {LLMClientOptions} [options]
@@ -24,21 +41,20 @@ export function createOpenAIClient({ apiKey, baseUrl = 'https://api.openai.com/v
24
41
  };
25
42
  }
26
43
 
44
+ function buildRequestInit(body) {
45
+ return {
46
+ method: 'POST',
47
+ headers: buildHeaders(),
48
+ body: JSON.stringify(body),
49
+ };
50
+ }
51
+
27
52
  async function createChatCompletion({ model, messages, tools, max_tokens, temperature }) {
28
53
  const body = { model, messages, temperature };
29
54
  if (max_tokens != null) body.max_tokens = max_tokens;
30
55
  if (tools && tools.length > 0) body.tools = tools;
31
56
 
32
- const response = await fetch(`${base}/chat/completions`, {
33
- method: 'POST',
34
- headers: buildHeaders(),
35
- body: JSON.stringify(body),
36
- });
37
-
38
- if (!response.ok) {
39
- const text = await response.text().catch(() => '');
40
- throw new Error(`OpenAI API error ${response.status}: ${text}`);
41
- }
57
+ const response = await fetchWithRetry(`${base}/chat/completions`, buildRequestInit(body), 'chat completion request');
42
58
 
43
59
  const data = await response.json();
44
60
  const choice = data.choices?.[0]?.message || {};
@@ -62,65 +78,72 @@ export function createOpenAIClient({ apiKey, baseUrl = 'https://api.openai.com/v
62
78
  if (max_tokens != null) body.max_tokens = max_tokens;
63
79
  if (tools && tools.length > 0) body.tools = tools;
64
80
 
65
- const response = await fetch(`${base}/chat/completions`, {
66
- method: 'POST',
67
- headers: buildHeaders(),
68
- body: JSON.stringify(body),
69
- });
81
+ return withRetry(async (attempt) => {
82
+ const response = await fetch(`${base}/chat/completions`, buildRequestInit(body));
83
+ if (!response.ok) {
84
+ throw await createHttpError(response, attempt);
85
+ }
70
86
 
71
- if (!response.ok) {
72
- const text = await response.text().catch(() => '');
73
- throw new Error(`OpenAI API error ${response.status}: ${text}`);
74
- }
87
+ // Only retry streaming requests if the connection dies before
88
+ // any SSE data arrives. Once we have surfaced deltas, replaying
89
+ // would duplicate partial reasoning/content.
90
+ let content = '';
91
+ let sawStreamData = false;
92
+ const toolCallAccumulator = new Map();
75
93
 
76
- // Accumulate the full response from SSE deltas
77
- let content = '';
78
- const toolCallAccumulator = new Map();
94
+ try {
95
+ await readSSE(response, (chunk) => {
96
+ sawStreamData = true;
79
97
 
80
- await readSSE(response, (chunk) => {
81
- const delta = chunk.choices?.[0]?.delta;
82
- if (!delta) return;
98
+ const delta = chunk.choices?.[0]?.delta;
99
+ if (!delta) return;
83
100
 
84
- // Content delta
85
- if (delta.content) {
86
- content += delta.content;
87
- onDelta?.(delta.content);
88
- }
101
+ if (delta.content) {
102
+ content += delta.content;
103
+ onDelta?.(delta.content);
104
+ }
89
105
 
90
- // Reasoning delta (some providers send this)
91
- if (delta.reasoning) {
92
- onReasoning?.(delta.reasoning);
93
- }
106
+ if (delta.reasoning) {
107
+ onReasoning?.(delta.reasoning);
108
+ }
94
109
 
95
- // Tool call deltas — accumulate by index
96
- if (delta.tool_calls) {
97
- for (const tc of delta.tool_calls) {
98
- const idx = tc.index ?? 0;
99
- if (!toolCallAccumulator.has(idx)) {
100
- toolCallAccumulator.set(idx, {
101
- id: tc.id || '',
102
- type: 'function',
103
- function: { name: '', arguments: '' },
104
- });
110
+ if (delta.tool_calls) {
111
+ for (const tc of delta.tool_calls) {
112
+ const idx = tc.index ?? 0;
113
+ if (!toolCallAccumulator.has(idx)) {
114
+ toolCallAccumulator.set(idx, {
115
+ id: tc.id || '',
116
+ type: 'function',
117
+ function: { name: '', arguments: '' },
118
+ });
119
+ }
120
+ const acc = toolCallAccumulator.get(idx);
121
+ if (!acc) continue;
122
+ if (tc.id) acc.id = tc.id;
123
+ if (tc.function?.name) acc.function.name += tc.function.name;
124
+ if (tc.function?.arguments) acc.function.arguments += tc.function.arguments;
125
+ }
105
126
  }
106
- const acc = toolCallAccumulator.get(idx);
107
- if (!acc) continue;
108
- if (tc.id) acc.id = tc.id;
109
- if (tc.function?.name) acc.function.name += tc.function.name;
110
- if (tc.function?.arguments) acc.function.arguments += tc.function.arguments;
127
+ });
128
+ } catch (error) {
129
+ if (!sawStreamData && isRetryableNetworkError(error)) {
130
+ const retryError = asError(error);
131
+ retryError.retryable = true;
132
+ throw retryError;
111
133
  }
134
+ throw error;
112
135
  }
113
- });
114
136
 
115
- const tool_calls = [...toolCallAccumulator.entries()]
116
- .sort(([a], [b]) => a - b)
117
- .map(([, tc]) => tc);
137
+ const tool_calls = [...toolCallAccumulator.entries()]
138
+ .sort(([a], [b]) => a - b)
139
+ .map(([, tc]) => tc);
118
140
 
119
- return {
120
- content,
121
- tool_calls,
122
- usage: null,
123
- };
141
+ return {
142
+ content,
143
+ tool_calls,
144
+ usage: null,
145
+ };
146
+ }, 'streaming chat completion');
124
147
  }
125
148
 
126
149
  return { createChatCompletion, streamChatCompletion };
@@ -128,6 +151,129 @@ export function createOpenAIClient({ apiKey, baseUrl = 'https://api.openai.com/v
128
151
 
129
152
  // ─── SSE Parser ──────────────────────────────────────────────
130
153
 
154
+ async function fetchWithRetry(url, init, context) {
155
+ return withRetry(async (attempt) => {
156
+ const response = await fetch(url, init);
157
+ if (!response.ok) {
158
+ throw await createHttpError(response, attempt);
159
+ }
160
+ return response;
161
+ }, context);
162
+ }
163
+
164
+ async function withRetry(fn, context) {
165
+ for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt += 1) {
166
+ try {
167
+ return await fn(attempt);
168
+ } catch (error) {
169
+ const normalized = asError(error);
170
+ const shouldRetry = attempt < MAX_ATTEMPTS && isRetryableError(normalized);
171
+ if (!shouldRetry) {
172
+ throw finalizeRetryError(normalized, attempt);
173
+ }
174
+
175
+ const delay = getRetryDelay(attempt - 1, normalized.retryAfterMs || null);
176
+ console.warn(`[nanomem/openai] ${context} attempt ${attempt}/${MAX_ATTEMPTS} failed: ${normalized.message}. Retrying in ${Math.round(delay)}ms.`);
177
+ await sleep(delay);
178
+ }
179
+ }
180
+
181
+ throw new Error(`OpenAI API ${context} failed after ${MAX_ATTEMPTS} attempts.`);
182
+ }
183
+
184
+ function isRetryableError(error) {
185
+ if (!error) return false;
186
+ if (error.retryable === true) return true;
187
+ if (typeof error.status === 'number') {
188
+ return RETRYABLE_STATUS.has(error.status);
189
+ }
190
+ return isRetryableNetworkError(error);
191
+ }
192
+
193
+ function isRetryableNetworkError(error) {
194
+ if (!error || error.isUserAbort) return false;
195
+ if (error.name === 'TypeError' || error.name === 'AbortError') return true;
196
+
197
+ const code = String(error.code || error.cause?.code || '').toUpperCase();
198
+ if (RETRYABLE_ERROR_CODES.has(code)) return true;
199
+
200
+ const message = String(error.message || '').toLowerCase();
201
+ return message.includes('failed to fetch')
202
+ || message.includes('network')
203
+ || message.includes('timeout')
204
+ || message.includes('err_network_changed')
205
+ || message.includes('econnreset')
206
+ || message.includes('connection');
207
+ }
208
+
209
+ /**
210
+ * @param {number} attempt
211
+ * @param {number | null} [retryAfterMs]
212
+ * @returns {number}
213
+ */
214
+ function getRetryDelay(attempt, retryAfterMs = null) {
215
+ if (retryAfterMs != null && Number.isFinite(retryAfterMs) && retryAfterMs > 0) {
216
+ return Math.min(retryAfterMs, MAX_DELAY_MS);
217
+ }
218
+
219
+ const exponential = BASE_DELAY_MS * Math.pow(2, attempt);
220
+ const jitter = Math.random() * BASE_DELAY_MS;
221
+ return Math.min(exponential + jitter, MAX_DELAY_MS);
222
+ }
223
+
224
+ function sleep(ms) {
225
+ return new Promise((resolve) => setTimeout(resolve, ms));
226
+ }
227
+
228
+ async function createHttpError(response, attempt = 1) {
229
+ const text = await response.text().catch(() => '');
230
+ const suffix = attempt > 1 ? ` after ${attempt} attempts` : '';
231
+ const error = /** @type {ApiError} */ (new Error(`OpenAI API error ${response.status}${suffix}: ${text}`));
232
+ error.status = response.status;
233
+ error.retryable = RETRYABLE_STATUS.has(response.status);
234
+ error.retryAfterMs = parseRetryAfterMs(response);
235
+ return error;
236
+ }
237
+
238
+ function parseRetryAfterMs(response) {
239
+ const value = response?.headers?.get?.('Retry-After');
240
+ if (!value) return null;
241
+
242
+ const seconds = Number.parseInt(value, 10);
243
+ if (Number.isFinite(seconds) && seconds > 0) {
244
+ return seconds * 1000;
245
+ }
246
+
247
+ const date = Date.parse(value);
248
+ if (Number.isFinite(date)) {
249
+ const ms = date - Date.now();
250
+ return ms > 0 ? ms : null;
251
+ }
252
+
253
+ return null;
254
+ }
255
+
256
+ function finalizeRetryError(error, attempts) {
257
+ const normalized = asError(error);
258
+ if (attempts <= 1 || normalized._retryFinalized) {
259
+ return normalized;
260
+ }
261
+
262
+ if (!normalized.message.includes('after ')) {
263
+ normalized.message = `${normalized.message} (after ${attempts} attempts)`;
264
+ }
265
+ normalized._retryFinalized = true;
266
+ return normalized;
267
+ }
268
+
269
+ /**
270
+ * @param {unknown} error
271
+ * @returns {ApiError}
272
+ */
273
+ function asError(error) {
274
+ return /** @type {ApiError} */ (error instanceof Error ? error : new Error(String(error)));
275
+ }
276
+
131
277
  async function readSSE(response, onMessage) {
132
278
  if (!response.body) {
133
279
  throw new Error('Streaming response body is not available.');