@openanonymity/nanomem 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +64 -18
  3. package/package.json +7 -3
  4. package/src/backends/BaseStorage.js +147 -3
  5. package/src/backends/indexeddb.js +21 -8
  6. package/src/browser.js +227 -0
  7. package/src/bullets/parser.js +8 -9
  8. package/src/cli/auth.js +1 -1
  9. package/src/cli/commands.js +58 -9
  10. package/src/cli/config.js +1 -1
  11. package/src/cli/help.js +5 -2
  12. package/src/cli/output.js +4 -0
  13. package/src/cli.js +6 -3
  14. package/src/engine/compactor.js +3 -6
  15. package/src/engine/deleter.js +187 -0
  16. package/src/engine/executors.js +474 -11
  17. package/src/engine/ingester.js +98 -63
  18. package/src/engine/recentConversation.js +110 -0
  19. package/src/engine/retriever.js +243 -37
  20. package/src/engine/toolLoop.js +51 -9
  21. package/src/imports/chatgpt.js +1 -1
  22. package/src/imports/claude.js +85 -0
  23. package/src/imports/importData.js +462 -0
  24. package/src/imports/index.js +10 -0
  25. package/src/index.js +95 -2
  26. package/src/llm/openai.js +204 -58
  27. package/src/llm/tinfoil.js +508 -0
  28. package/src/omf.js +343 -0
  29. package/src/prompt_sets/conversation/ingestion.js +111 -12
  30. package/src/prompt_sets/document/ingestion.js +98 -4
  31. package/src/prompt_sets/index.js +12 -4
  32. package/src/types.js +135 -4
  33. package/src/vendor/tinfoil.browser.d.ts +2 -0
  34. package/src/vendor/tinfoil.browser.js +41596 -0
  35. package/types/backends/BaseStorage.d.ts +19 -0
  36. package/types/backends/indexeddb.d.ts +1 -0
  37. package/types/browser.d.ts +17 -0
  38. package/types/engine/deleter.d.ts +67 -0
  39. package/types/engine/executors.d.ts +56 -2
  40. package/types/engine/recentConversation.d.ts +18 -0
  41. package/types/engine/retriever.d.ts +22 -9
  42. package/types/imports/claude.d.ts +14 -0
  43. package/types/imports/importData.d.ts +29 -0
  44. package/types/imports/index.d.ts +2 -0
  45. package/types/index.d.ts +9 -0
  46. package/types/llm/openai.d.ts +6 -9
  47. package/types/llm/tinfoil.d.ts +13 -0
  48. package/types/omf.d.ts +40 -0
  49. package/types/prompt_sets/conversation/ingestion.d.ts +8 -3
  50. package/types/prompt_sets/document/ingestion.d.ts +8 -3
  51. package/types/types.d.ts +127 -2
  52. package/types/vendor/tinfoil.browser.d.ts +6348 -0
@@ -9,14 +9,186 @@
9
9
  * Each factory takes a storage backend and returns an object mapping
10
10
  * tool names to async functions: { tool_name: async (args) => resultString }
11
11
  */
12
- /** @import { ExtractionExecutorHooks, StorageBackend } from '../types.js' */
12
+ /** @import { ChatCompletionResponse, ExtractionExecutorHooks, LLMClient, StorageBackend, ToolDefinition } from '../types.js' */
13
13
  import {
14
14
  compactBullets,
15
+ ensureBulletMetadata,
15
16
  inferTopicFromPath,
16
17
  normalizeFactText,
17
18
  parseBullets,
18
19
  renderCompactedDocument,
20
+ todayIsoDate,
19
21
  } from '../bullets/index.js';
22
+ import { trimRecentConversation } from './recentConversation.js';
23
+
24
+ const MAX_AUGMENT_QUERY_FILES = 8;
25
+ const MAX_AUGMENT_FILE_CHARS = 1800;
26
+ const MAX_AUGMENT_TOTAL_CHARS = 12000;
27
+ const MAX_AUGMENT_RECENT_CONTEXT_CHARS = 3000;
28
+ const AUGMENT_CRAFTER_MAX_ATTEMPTS = 3;
29
+ const AUGMENT_CRAFTER_RETRY_BASE_DELAY_MS = 350;
30
+
31
+ function normalizeLookupPath(value, { stripExtension = false } = {}) {
32
+ let normalized = String(value || '')
33
+ .trim()
34
+ .replace(/\\/g, '/')
35
+ .replace(/^\.\//, '')
36
+ .replace(/^\/+/, '')
37
+ .replace(/\/+/g, '/');
38
+
39
+ if (stripExtension) {
40
+ normalized = normalized.replace(/\.md$/i, '');
41
+ }
42
+
43
+ if (typeof normalized.normalize === 'function') {
44
+ normalized = normalized.normalize('NFKD').replace(/[\u0300-\u036f]/g, '');
45
+ }
46
+
47
+ return normalizeFactText(normalized.replace(/[\/_]/g, ' '));
48
+ }
49
+
50
+ function pathMatchesQuery(path, query) {
51
+ const rawPath = String(path || '');
52
+ const rawQuery = String(query || '').trim().toLowerCase();
53
+ if (!rawPath || !rawQuery) return false;
54
+ if (rawPath.toLowerCase().includes(rawQuery)) return true;
55
+
56
+ const normalizedQuery = normalizeFactText(rawQuery);
57
+ if (!normalizedQuery) return false;
58
+
59
+ return normalizeLookupPath(rawPath).includes(normalizedQuery)
60
+ || normalizeLookupPath(rawPath, { stripExtension: true }).includes(normalizedQuery);
61
+ }
62
+
63
+ const AUGMENT_QUERY_EXECUTOR_SYSTEM_PROMPT = `You craft delegation prompts for a frontier model.
64
+
65
+ Your job is to turn a user's request plus selected memory into a minimized, self-contained prompt with explicit [[user_data]] tagging.
66
+
67
+ Return JSON only with this exact shape:
68
+ {"reviewPrompt":"string"}
69
+
70
+ Core rules:
71
+ - The frontier model has zero prior context. Include everything it actually needs in one pass.
72
+ - Include only the minimum user-specific data required to answer well.
73
+ - If memory is not actually needed, keep the prompt generic.
74
+ - Keep the user's current request in normal prose.
75
+ - Every additional fact sourced from memory files or recent conversation that you include must be wrapped in [[user_data]]...[[/user_data]].
76
+ - Do not wrap generic instructions, output-format guidance, or your own reasoning in tags.
77
+ - Strip personal identifiers unless they are strictly necessary.
78
+ - No real names unless the task genuinely requires the specific name.
79
+ - No specific location unless the task depends on location.
80
+ - Put everything into one final minimized prompt in reviewPrompt.
81
+ - Do not include markdown fences or any text outside the JSON object.
82
+
83
+ Privacy and minimization:
84
+ - Every included fact should pass this test: "Does the frontier model need this specific fact to answer well?" If no, leave it out.
85
+ - If a memory fact only repeats or confirms what the current query already makes obvious, leave it out.
86
+ - Generalize when possible. Prefer "their partner is vegetarian" or just "vegetarian-friendly options" over a partner's real name.
87
+ - Open-ended everyday questions usually need less context than planning or personalized analysis questions.
88
+ - Do not assume household members are part of the request unless the user's question or the retrieved memory makes that clearly necessary.
89
+
90
+ Common over-sharing patterns to avoid:
91
+ - Do not include background facts that merely restate the topic, interest, or domain already obvious from the user's current query.
92
+ - Do not include descriptive biography when the answer only needs concrete constraints, preferences, specs, or requirements.
93
+ - Only include memory when it changes the answer: constraints, tradeoffs, personalization, or disambiguation.
94
+ - Prefer concise, answer-shaping facts over broad user background.
95
+
96
+ The user will review the exact prompt before it is sent. Keep it useful, minimal, and explicit.`;
97
+
98
+ function clipText(value, limit) {
99
+ const text = typeof value === 'string' ? value.trim() : '';
100
+ if (!text) return '';
101
+ if (text.length <= limit) return text;
102
+ return `${text.slice(0, limit)}\n...(truncated)`;
103
+ }
104
+
105
+ function renderFiles(files) {
106
+ const normalizedFiles = Array.isArray(files) ? files : [];
107
+ let usedChars = 0;
108
+
109
+ return normalizedFiles.map((file, index) => {
110
+ const path = typeof file?.path === 'string' ? file.path : `memory-${index + 1}.md`;
111
+ let content = typeof file?.content === 'string' ? file.content.trim() : '';
112
+ if (!content) content = '(empty)';
113
+
114
+ const remaining = MAX_AUGMENT_TOTAL_CHARS - usedChars;
115
+ if (remaining <= 0) {
116
+ content = '(omitted for length)';
117
+ } else {
118
+ content = clipText(content, Math.min(MAX_AUGMENT_FILE_CHARS, remaining));
119
+ usedChars += content.length;
120
+ }
121
+
122
+ return `## ${path}\n${content}`;
123
+ }).join('\n\n');
124
+ }
125
+
126
+ function buildCrafterInput({ userQuery, files, conversationText }) {
127
+ const sections = [
128
+ `User query:\n${userQuery.trim()}`,
129
+ `Retrieved memory files:\n${renderFiles(files)}`
130
+ ];
131
+
132
+ const clippedConversation = trimRecentContext(conversationText);
133
+ if (clippedConversation) {
134
+ sections.push(`Recent conversation:\n${clippedConversation}`);
135
+ }
136
+
137
+ sections.push(`Produce the JSON now. Remember:
138
+ - reviewPrompt should be the exact final prompt that will be shown to the user
139
+ - keep the current user request in normal prose
140
+ - any extra facts injected from memory or recent conversation must stay wrapped in [[user_data]] tags
141
+ - if a memory fact only restates the domain already obvious from the query, omit it
142
+ - omit names, relationship labels, and locations unless the prompt really needs them`);
143
+
144
+ return sections.join('\n\n');
145
+ }
146
+
147
+ function extractResponseText(response) {
148
+ if (!response) return '';
149
+ if (typeof response.content === 'string') return response.content;
150
+ return '';
151
+ }
152
+
153
+ function parseCrafterJson(rawText) {
154
+ const text = typeof rawText === 'string' ? rawText.trim() : '';
155
+ if (!text) {
156
+ throw new Error('augment_query prompt crafter returned an empty response.');
157
+ }
158
+
159
+ const codeFenceMatch = text.match(/```(?:json)?\s*([\s\S]*?)```/i);
160
+ const candidate = codeFenceMatch?.[1]?.trim() || text;
161
+ const start = candidate.indexOf('{');
162
+ const end = candidate.lastIndexOf('}');
163
+ const jsonText = (start !== -1 && end !== -1 && end >= start)
164
+ ? candidate.slice(start, end + 1)
165
+ : candidate;
166
+
167
+ let parsed;
168
+ try {
169
+ parsed = JSON.parse(jsonText);
170
+ } catch (error) {
171
+ throw new Error(`augment_query prompt crafter returned invalid JSON: ${error instanceof Error ? error.message : String(error)}`);
172
+ }
173
+
174
+ return {
175
+ reviewPrompt: typeof parsed?.reviewPrompt === 'string' ? parsed.reviewPrompt.trim() : ''
176
+ };
177
+ }
178
+
179
+ function sleep(ms) {
180
+ return new Promise((resolve) => setTimeout(resolve, ms));
181
+ }
182
+
183
+ function getCrafterRetryDelay(attemptIndex) {
184
+ const exponential = AUGMENT_CRAFTER_RETRY_BASE_DELAY_MS * Math.pow(2, attemptIndex);
185
+ const jitter = Math.random() * AUGMENT_CRAFTER_RETRY_BASE_DELAY_MS;
186
+ return exponential + jitter;
187
+ }
188
+
189
+ function normalizeQueryText(text) {
190
+ return String(text || '').trim().replace(/\s+/g, ' ');
191
+ }
20
192
 
21
193
  /**
22
194
  * Build tool executors for the retrieval (read) flow.
@@ -33,9 +205,10 @@ export function createRetrievalExecutors(backend) {
33
205
  const contentPaths = results.map(r => r.path);
34
206
 
35
207
  const allFiles = await backend.exportAll();
36
- const queryLower = query.toLowerCase();
37
208
  const pathMatches = allFiles
38
- .filter(f => !f.path.endsWith('_tree.md') && f.path.toLowerCase().includes(queryLower))
209
+ .filter((file) => typeof file?.path === 'string' && typeof file?.content === 'string')
210
+ .filter((file) => !file.path.endsWith('_tree.md'))
211
+ .filter((file) => pathMatchesQuery(file.path, query))
39
212
  .map(f => f.path);
40
213
 
41
214
  const seen = new Set();
@@ -47,20 +220,185 @@ export function createRetrievalExecutors(backend) {
47
220
  return JSON.stringify({ paths: paths.slice(0, 5), count: Math.min(paths.length, 5) });
48
221
  },
49
222
  read_file: async ({ path }) => {
50
- const content = await backend.read(path);
223
+ const resolvedPath = typeof backend.resolvePath === 'function'
224
+ ? await backend.resolvePath(path)
225
+ : null;
226
+ const content = await backend.read(resolvedPath || path);
51
227
  if (content === null) return JSON.stringify({ error: `File not found: ${path}` });
52
228
  return content.length > 1500 ? content.slice(0, 1500) + '...(truncated)' : content;
53
229
  }
54
230
  };
55
231
  }
56
232
 
233
+ /**
234
+ * Build the executed augment_query tool for the retrieval flow.
235
+ *
236
+ * The outer memory-agent loop chooses relevant files. This executor then runs a
237
+ * dedicated prompt-crafter pass that turns those raw inputs into the final
238
+ * tagged prompt, keeping prompt-crafting fully inside nanomem.
239
+ *
240
+ * @param {object} options
241
+ * @param {StorageBackend} options.backend
242
+ * @param {LLMClient} options.llmClient
243
+ * @param {string} options.model
244
+ * @param {string} options.query
245
+ * @param {string} [options.conversationText]
246
+ * @param {(event: { stage: 'loading', message: string, attempt?: number }) => void} [options.onProgress]
247
+ */
248
+ export function createAugmentQueryExecutor({ backend, llmClient, model, query, conversationText, onProgress }) {
249
+ return async ({ user_query, memory_files }) => {
250
+ const selectedPaths = Array.isArray(memory_files)
251
+ ? [...new Set(memory_files.filter((path) => typeof path === 'string' && path.trim()))].slice(0, MAX_AUGMENT_QUERY_FILES)
252
+ : [];
253
+ const originalQuery = normalizeQueryText(query);
254
+ const providedQuery = normalizeQueryText(user_query);
255
+ const effectiveQuery = (typeof user_query === 'string' && providedQuery && providedQuery === originalQuery)
256
+ ? user_query.trim()
257
+ : query;
258
+
259
+ if (!effectiveQuery || !effectiveQuery.trim()) {
260
+ return JSON.stringify({ error: 'augment_query requires the original user_query.' });
261
+ }
262
+
263
+ if (selectedPaths.length === 0) {
264
+ return JSON.stringify({
265
+ noRelevantMemory: true,
266
+ files: []
267
+ });
268
+ }
269
+
270
+ const files = [];
271
+ for (const path of selectedPaths) {
272
+ const resolvedPath = typeof backend.resolvePath === 'function'
273
+ ? await backend.resolvePath(path)
274
+ : null;
275
+ const canonicalPath = resolvedPath || path;
276
+ const raw = await backend.read(canonicalPath);
277
+ if (!raw) continue;
278
+ files.push({ path: canonicalPath, content: raw });
279
+ }
280
+
281
+ if (files.length === 0) {
282
+ return JSON.stringify({ error: 'augment_query could not load any selected memory files.' });
283
+ }
284
+
285
+ let reviewPrompt = '';
286
+ let crafterError = '';
287
+ const messages = /** @type {import('../types.js').LLMMessage[]} */ ([
288
+ { role: 'system', content: AUGMENT_QUERY_EXECUTOR_SYSTEM_PROMPT },
289
+ {
290
+ role: 'user',
291
+ content: buildCrafterInput({
292
+ userQuery: effectiveQuery,
293
+ files,
294
+ conversationText
295
+ })
296
+ }
297
+ ]);
298
+
299
+ for (let attempt = 1; attempt <= AUGMENT_CRAFTER_MAX_ATTEMPTS; attempt += 1) {
300
+ let response;
301
+ try {
302
+ onProgress?.({
303
+ stage: 'loading',
304
+ message: attempt === 1
305
+ ? 'Crafting minimized prompt...'
306
+ : `Retrying prompt crafting (${attempt}/${AUGMENT_CRAFTER_MAX_ATTEMPTS})...`,
307
+ attempt
308
+ });
309
+ if (typeof llmClient.streamChatCompletion === 'function') {
310
+ let emittedReasoningPhase = false;
311
+ let emittedOutputPhase = false;
312
+ response = /** @type {ChatCompletionResponse} */ (await llmClient.streamChatCompletion({
313
+ model,
314
+ messages,
315
+ temperature: 0,
316
+ onDelta: (chunk) => {
317
+ if (!chunk || emittedOutputPhase) return;
318
+ emittedOutputPhase = true;
319
+ onProgress?.({
320
+ stage: 'loading',
321
+ message: 'Finalizing prompt...',
322
+ attempt
323
+ });
324
+ },
325
+ onReasoning: (chunk) => {
326
+ if (!chunk || emittedReasoningPhase) return;
327
+ emittedReasoningPhase = true;
328
+ onProgress?.({
329
+ stage: 'loading',
330
+ message: 'Minimizing personal context...',
331
+ attempt
332
+ });
333
+ }
334
+ }));
335
+ } else {
336
+ response = /** @type {ChatCompletionResponse} */ (await llmClient.createChatCompletion({
337
+ model,
338
+ messages,
339
+ temperature: 0
340
+ }));
341
+ }
342
+ } catch (error) {
343
+ const message = error instanceof Error ? error.message : String(error);
344
+ return JSON.stringify({ error: `augment_query prompt crafting failed: ${message}` });
345
+ }
346
+
347
+ try {
348
+ const parsed = parseCrafterJson(extractResponseText(response));
349
+ reviewPrompt = parsed.reviewPrompt;
350
+ if (!reviewPrompt) {
351
+ throw new Error('augment_query did not produce a reviewPrompt.');
352
+ }
353
+ crafterError = '';
354
+ break;
355
+ } catch (error) {
356
+ crafterError = error instanceof Error ? error.message : String(error);
357
+ if (attempt >= AUGMENT_CRAFTER_MAX_ATTEMPTS) {
358
+ break;
359
+ }
360
+ const delay = getCrafterRetryDelay(attempt - 1);
361
+ onProgress?.({
362
+ stage: 'loading',
363
+ message: `Prompt crafter retry ${attempt + 1}/${AUGMENT_CRAFTER_MAX_ATTEMPTS} after: ${crafterError}`,
364
+ attempt: attempt + 1
365
+ });
366
+ console.warn(`[nanomem/augment_query] prompt crafter attempt ${attempt}/${AUGMENT_CRAFTER_MAX_ATTEMPTS} failed: ${crafterError}. Retrying in ${Math.round(delay)}ms.`);
367
+ await sleep(delay);
368
+ }
369
+ }
370
+
371
+ if (crafterError) {
372
+ return JSON.stringify({ error: `${crafterError} (after ${AUGMENT_CRAFTER_MAX_ATTEMPTS} attempts)` });
373
+ }
374
+
375
+ if (!/\[\[user_data\]\]/.test(reviewPrompt)) {
376
+ return JSON.stringify({
377
+ noRelevantMemory: true,
378
+ files: []
379
+ });
380
+ }
381
+
382
+ const apiPrompt = stripUserDataTags(reviewPrompt);
383
+
384
+ return JSON.stringify({
385
+ reviewPrompt,
386
+ apiPrompt,
387
+ files: files.map((file) => ({
388
+ path: file.path,
389
+ content: clipText(file.content, MAX_AUGMENT_FILE_CHARS)
390
+ }))
391
+ });
392
+ };
393
+ }
394
+
57
395
  /**
58
396
  * Build tool executors for the extraction (write) flow.
59
397
  * @param {StorageBackend} backend
60
398
  * @param {ExtractionExecutorHooks} [hooks]
61
399
  */
62
400
  export function createExtractionExecutors(backend, hooks = {}) {
63
- const { normalizeContent, mergeWithExisting, refreshIndex, onWrite } = hooks;
401
+ const { normalizeContent, mergeWithExisting, refreshIndex, onWrite, updatedAt } = hooks;
64
402
 
65
403
  return {
66
404
  read_file: async ({ path }) => {
@@ -70,7 +408,7 @@ export function createExtractionExecutors(backend, hooks = {}) {
70
408
  },
71
409
  create_new_file: async ({ path, content }) => {
72
410
  const exists = await backend.exists(path);
73
- if (exists) return JSON.stringify({ error: `File already exists: ${path}. Use append_memory or update_memory instead.` });
411
+ if (exists) return JSON.stringify({ error: `File already exists: ${path}. Use append_memory or update_bullets instead.` });
74
412
  const normalized = normalizeContent ? normalizeContent(content, path) : content;
75
413
  await backend.write(path, normalized);
76
414
  if (refreshIndex) await refreshIndex(path);
@@ -87,13 +425,62 @@ export function createExtractionExecutors(backend, hooks = {}) {
87
425
  onWrite?.(path, existing ?? '', newContent);
88
426
  return JSON.stringify({ success: true, path, action: 'appended' });
89
427
  },
90
- update_memory: async ({ path, content }) => {
428
+ update_bullets: async ({ path, updates }) => {
91
429
  const before = await backend.read(path);
92
- const normalized = normalizeContent ? normalizeContent(content, path) : content;
93
- await backend.write(path, normalized);
430
+ if (!before) return JSON.stringify({ error: `File not found: ${path}` });
431
+ if (!Array.isArray(updates) || updates.length === 0) return JSON.stringify({ error: 'updates must be a non-empty array' });
432
+
433
+ const parsed = parseBullets(before);
434
+ const defaultTopic = inferTopicFromPath(path);
435
+ const effectiveUpdatedAt = updatedAt || todayIsoDate();
436
+ let matchedCount = 0;
437
+ const errors = [];
438
+
439
+ for (const { old_fact, new_fact } of updates) {
440
+ const factText = typeof old_fact === 'string' && old_fact.includes('|')
441
+ ? old_fact.split('|')[0].trim()
442
+ : String(old_fact || '').trim();
443
+ const target = normalizeFactText(factText);
444
+ if (!target) { errors.push('empty old_fact'); continue; }
445
+
446
+ const idx = parsed.findIndex((b) => normalizeFactText(b.text) === target);
447
+ if (idx === -1) { errors.push(`No match: ${factText}`); continue; }
448
+
449
+ // Supersede the old bullet and push a new active replacement.
450
+ // Strip any metadata the LLM may have included in new_fact.
451
+ const oldBullet = parsed[idx];
452
+ const rawNewFact = String(new_fact || '').trim();
453
+ const cleanNewFact = rawNewFact.includes('|')
454
+ ? rawNewFact.split('|')[0].trim()
455
+ : rawNewFact;
456
+ parsed[idx] = { ...oldBullet, status: 'superseded', tier: 'history' };
457
+ parsed.push(ensureBulletMetadata(
458
+ {
459
+ text: cleanNewFact,
460
+ topic: oldBullet.topic,
461
+ source: oldBullet.source,
462
+ confidence: oldBullet.confidence,
463
+ },
464
+ { defaultTopic, updatedAt: effectiveUpdatedAt }
465
+ ));
466
+ matchedCount++;
467
+ }
468
+
469
+ if (matchedCount === 0) {
470
+ return JSON.stringify({ error: errors.join('; ') || 'No bullets matched' });
471
+ }
472
+
473
+ const compacted = compactBullets(parsed, { defaultTopic, maxActivePerTopic: 1000 });
474
+ const after = renderCompactedDocument(
475
+ compacted.working, compacted.longTerm, compacted.history,
476
+ { titleTopic: defaultTopic }
477
+ );
478
+ await backend.write(path, after);
94
479
  if (refreshIndex) await refreshIndex(path);
95
- onWrite?.(path, before ?? '', normalized);
96
- return JSON.stringify({ success: true, path, action: 'updated' });
480
+ onWrite?.(path, before, after);
481
+ const result = { success: true, path, action: 'bullets_updated', updated: matchedCount };
482
+ if (errors.length) result.errors = errors;
483
+ return JSON.stringify(result);
97
484
  },
98
485
  archive_memory: async ({ path, item_text }) => {
99
486
  const existing = await backend.read(path);
@@ -117,6 +504,70 @@ export function createExtractionExecutors(backend, hooks = {}) {
117
504
  };
118
505
  }
119
506
 
507
+ /**
508
+ * Build tool executors for the deletion flow.
509
+ * @param {StorageBackend} backend
510
+ * @param {{ refreshIndex?: Function, onWrite?: Function }} [hooks]
511
+ */
512
+ export function createDeletionExecutors(backend, hooks = {}) {
513
+ const { refreshIndex, onWrite } = hooks;
514
+
515
+ return {
516
+ list_directory: async ({ dir_path }) => {
517
+ const { files, dirs } = await backend.ls(dir_path || '');
518
+ return JSON.stringify({ files, dirs });
519
+ },
520
+ retrieve_file: async ({ query }) => {
521
+ const results = await backend.search(query);
522
+ const contentPaths = results.map(r => r.path);
523
+
524
+ const allFiles = await backend.exportAll();
525
+ const queryLower = query.toLowerCase();
526
+ const pathMatches = allFiles
527
+ .filter(f => !f.path.endsWith('_tree.md') && f.path.toLowerCase().includes(queryLower))
528
+ .map(f => f.path);
529
+
530
+ const seen = new Set();
531
+ const paths = [];
532
+ for (const p of [...pathMatches, ...contentPaths]) {
533
+ if (!seen.has(p)) { seen.add(p); paths.push(p); }
534
+ }
535
+
536
+ return JSON.stringify({ paths: paths.slice(0, 5), count: Math.min(paths.length, 5) });
537
+ },
538
+ read_file: async ({ path }) => {
539
+ const content = await backend.read(path);
540
+ if (content === null) return JSON.stringify({ error: `File not found: ${path}` });
541
+ return content;
542
+ },
543
+ delete_bullet: async ({ path, bullet_text }) => {
544
+ const before = await backend.read(path);
545
+ if (!before) return JSON.stringify({ error: `File not found: ${path}` });
546
+ // Strip pipe-delimited metadata if present — removeArchivedItem matches
547
+ // against bullet.text (fact text only), not the full line with metadata.
548
+ const factText = bullet_text.includes('|')
549
+ ? bullet_text.split('|')[0].trim()
550
+ : bullet_text.trim();
551
+ const after = removeArchivedItem(before, factText, path);
552
+ if (after === null) {
553
+ return JSON.stringify({ error: `No exact match found for the given bullet text in: ${path}` });
554
+ }
555
+ // If no bullets remain, delete the file entirely instead of leaving empty headers.
556
+ const remaining = parseBullets(after);
557
+ if (remaining.length === 0) {
558
+ await backend.delete(path);
559
+ if (refreshIndex) await refreshIndex(path);
560
+ onWrite?.(path, before, null);
561
+ return JSON.stringify({ success: true, path, action: 'file_deleted', removed: factText });
562
+ }
563
+ await backend.write(path, after);
564
+ if (refreshIndex) await refreshIndex(path);
565
+ onWrite?.(path, before, after);
566
+ return JSON.stringify({ success: true, path, action: 'deleted', removed: factText });
567
+ },
568
+ };
569
+ }
570
+
120
571
  function removeArchivedItem(content, itemText, path) {
121
572
  const raw = String(content || '');
122
573
  const target = normalizeFactText(itemText);
@@ -150,3 +601,15 @@ function removeArchivedItem(content, itemText, path) {
150
601
  if (!removed) return null;
151
602
  return filtered.join('\n').trim();
152
603
  }
604
+
605
+ function trimRecentContext(conversationText) {
606
+ return trimRecentConversation(conversationText, {
607
+ maxChars: MAX_AUGMENT_RECENT_CONTEXT_CHARS
608
+ });
609
+ }
610
+
611
+ function stripUserDataTags(text) {
612
+ return String(text ?? '')
613
+ .replace(/\[\[user_data\]\]/g, '')
614
+ .replace(/\[\[\/user_data\]\]/g, '');
615
+ }