@openanonymity/nanomem 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +194 -0
  2. package/package.json +85 -0
  3. package/src/backends/BaseStorage.js +177 -0
  4. package/src/backends/filesystem.js +177 -0
  5. package/src/backends/indexeddb.js +208 -0
  6. package/src/backends/ram.js +113 -0
  7. package/src/backends/schema.js +42 -0
  8. package/src/bullets/bulletIndex.js +125 -0
  9. package/src/bullets/compaction.js +109 -0
  10. package/src/bullets/index.js +16 -0
  11. package/src/bullets/normalize.js +241 -0
  12. package/src/bullets/parser.js +199 -0
  13. package/src/bullets/scoring.js +53 -0
  14. package/src/cli/auth.js +323 -0
  15. package/src/cli/commands.js +411 -0
  16. package/src/cli/config.js +120 -0
  17. package/src/cli/diff.js +68 -0
  18. package/src/cli/help.js +84 -0
  19. package/src/cli/output.js +269 -0
  20. package/src/cli/spinner.js +54 -0
  21. package/src/cli.js +178 -0
  22. package/src/engine/compactor.js +247 -0
  23. package/src/engine/executors.js +152 -0
  24. package/src/engine/ingester.js +229 -0
  25. package/src/engine/retriever.js +414 -0
  26. package/src/engine/toolLoop.js +176 -0
  27. package/src/imports/chatgpt.js +160 -0
  28. package/src/imports/index.js +14 -0
  29. package/src/imports/markdown.js +104 -0
  30. package/src/imports/oaFastchat.js +124 -0
  31. package/src/index.js +199 -0
  32. package/src/llm/anthropic.js +264 -0
  33. package/src/llm/openai.js +179 -0
  34. package/src/prompt_sets/conversation/ingestion.js +51 -0
  35. package/src/prompt_sets/document/ingestion.js +43 -0
  36. package/src/prompt_sets/index.js +31 -0
  37. package/src/types.js +382 -0
  38. package/src/utils/portability.js +174 -0
  39. package/types/backends/BaseStorage.d.ts +42 -0
  40. package/types/backends/filesystem.d.ts +11 -0
  41. package/types/backends/indexeddb.d.ts +12 -0
  42. package/types/backends/ram.d.ts +8 -0
  43. package/types/backends/schema.d.ts +14 -0
  44. package/types/bullets/bulletIndex.d.ts +47 -0
  45. package/types/bullets/compaction.d.ts +10 -0
  46. package/types/bullets/index.d.ts +36 -0
  47. package/types/bullets/normalize.d.ts +95 -0
  48. package/types/bullets/parser.d.ts +31 -0
  49. package/types/bullets/scoring.d.ts +12 -0
  50. package/types/engine/compactor.d.ts +27 -0
  51. package/types/engine/executors.d.ts +46 -0
  52. package/types/engine/ingester.d.ts +29 -0
  53. package/types/engine/retriever.d.ts +50 -0
  54. package/types/engine/toolLoop.d.ts +9 -0
  55. package/types/imports/chatgpt.d.ts +14 -0
  56. package/types/imports/index.d.ts +3 -0
  57. package/types/imports/markdown.d.ts +31 -0
  58. package/types/imports/oaFastchat.d.ts +30 -0
  59. package/types/index.d.ts +21 -0
  60. package/types/llm/anthropic.d.ts +16 -0
  61. package/types/llm/openai.d.ts +16 -0
  62. package/types/prompt_sets/conversation/ingestion.d.ts +7 -0
  63. package/types/prompt_sets/document/ingestion.d.ts +7 -0
  64. package/types/prompt_sets/index.d.ts +11 -0
  65. package/types/types.d.ts +293 -0
  66. package/types/utils/portability.d.ts +33 -0
@@ -0,0 +1,160 @@
1
+ /**
2
+ * ChatGPT export parser.
3
+ *
4
+ * Handles conversations.json from ChatGPT's "Export data" feature.
5
+ * Format: array of conversation objects, each with a tree-structured `mapping`.
6
+ *
7
+ * Based on the OA Fastchat chatgptImporter.
8
+ */
9
+ /** @import { ChatGptSession } from '../types.js' */
10
+ import { safeDateIso } from '../bullets/normalize.js';
11
+
12
+ const SKIP_CONTENT_TYPES = new Set([
13
+ 'user_editable_context',
14
+ 'reasoning_recap',
15
+ 'thoughts',
16
+ ]);
17
+
18
+ /**
19
+ * Detect whether parsed JSON is a ChatGPT export.
20
+ * ChatGPT exports are arrays of objects with `mapping` and `current_node`.
21
+ * @param {unknown} parsed
22
+ * @returns {boolean}
23
+ */
24
+ export function isChatGptExport(parsed) {
25
+ if (!Array.isArray(parsed)) return false;
26
+ if (parsed.length === 0) return false;
27
+ const first = parsed[0];
28
+ return first && typeof first === 'object' && ('mapping' in first);
29
+ }
30
+
31
+ /**
32
+ * Parse a ChatGPT export into normalized sessions.
33
+ * @param {unknown[]} conversations — the parsed JSON array
34
+ * @returns {ChatGptSession[]}
35
+ */
36
+ export function parseChatGptExport(conversations) {
37
+ if (!Array.isArray(conversations)) {
38
+ throw new Error('ChatGPT export should be an array of conversations.');
39
+ }
40
+
41
+ return conversations
42
+ .map(normalizeChatGptConversation)
43
+ .filter(session => session.messages.length > 0);
44
+ }
45
+
46
+ function normalizeChatGptConversation(conversation) {
47
+ const rawMessages = getConversationPathMessages(conversation);
48
+ const messages = [];
49
+
50
+ for (const message of rawMessages) {
51
+ const authorRole = message?.author?.role;
52
+
53
+ // Skip tool and system messages
54
+ if (authorRole === 'tool' || authorRole === 'system' || authorRole === 'developer') continue;
55
+ if (message?.metadata?.is_visually_hidden_from_conversation) continue;
56
+
57
+ const contentType = message?.content?.content_type || '';
58
+ if (SKIP_CONTENT_TYPES.has(contentType)) continue;
59
+
60
+ /** @type {'assistant' | 'user'} */
61
+ const role = authorRole === 'assistant' ? 'assistant' : 'user';
62
+ const text = extractText(message?.content);
63
+
64
+ if (!text.trim()) continue;
65
+
66
+ messages.push({ role, content: text });
67
+ }
68
+
69
+ const title = (conversation?.title || '').trim() || null;
70
+ const updatedAt = conversation?.update_time
71
+ ? safeDateIso(String(conversation.update_time * 1000))
72
+ : null;
73
+ return { title, messages, updatedAt };
74
+ }
75
+
76
+ function getConversationPathMessages(conversation) {
77
+ const mapping = conversation?.mapping || {};
78
+ const currentNode = conversation?.current_node;
79
+
80
+ if (!currentNode || !mapping[currentNode]) {
81
+ // Fallback: all messages sorted by time
82
+ return Object.values(mapping)
83
+ .map(node => node?.message)
84
+ .filter(Boolean)
85
+ .sort((a, b) => (a.create_time || 0) - (b.create_time || 0));
86
+ }
87
+
88
+ // Walk from current_node to root, then reverse
89
+ const ordered = [];
90
+ const visited = new Set();
91
+ let nodeId = currentNode;
92
+ while (nodeId && mapping[nodeId] && !visited.has(nodeId)) {
93
+ visited.add(nodeId);
94
+ const node = mapping[nodeId];
95
+ if (node?.message) {
96
+ ordered.push(node.message);
97
+ }
98
+ nodeId = node.parent;
99
+ }
100
+ return ordered.reverse();
101
+ }
102
+
103
+ function isInternalToolCodeBlock(content) {
104
+ if (!content || typeof content !== 'object') return false;
105
+ const contentType = content.content_type || '';
106
+ if (contentType !== 'code') return false;
107
+ if (typeof content.text !== 'string') return false;
108
+
109
+ const lang = (content.language || '').trim().toLowerCase();
110
+ if (lang && lang !== 'unknown') return false;
111
+
112
+ const raw = content.text.trim();
113
+
114
+ // Function-call style: search("..."), web.search("..."), etc.
115
+ if (/^[a-z_][a-z0-9_.]*\s*\(/i.test(raw)) return true;
116
+
117
+ // JSON tool payloads
118
+ if (raw.startsWith('{') && raw.endsWith('}')) {
119
+ try {
120
+ const parsed = JSON.parse(raw);
121
+ if (Array.isArray(parsed?.search_query) || Array.isArray(parsed?.image_query)) return true;
122
+ if (Array.isArray(parsed?.open)) return true;
123
+ if (typeof parsed?.response_length === 'string' && !parsed.text && !parsed.content) return true;
124
+ } catch { /* not JSON, keep it */ }
125
+ }
126
+
127
+ return false;
128
+ }
129
+
130
+ function extractText(content) {
131
+ if (!content || typeof content !== 'object') return '';
132
+
133
+ const contentType = content.content_type || '';
134
+ const parts = [];
135
+
136
+ if (contentType === 'code' && typeof content.text === 'string') {
137
+ if (isInternalToolCodeBlock(content)) return '';
138
+ const lang = content.language ? content.language.trim() : '';
139
+ parts.push(lang ? `\`\`\`${lang}\n${content.text}\n\`\`\`` : `\`\`\`\n${content.text}\n\`\`\``);
140
+ } else {
141
+ if (Array.isArray(content.parts)) {
142
+ for (const part of content.parts) {
143
+ if (typeof part === 'string') {
144
+ parts.push(part);
145
+ } else if (part && typeof part === 'object') {
146
+ if (typeof part.text === 'string') parts.push(part.text);
147
+ else if (typeof part.content === 'string') parts.push(part.content);
148
+ }
149
+ }
150
+ }
151
+ if (typeof content.text === 'string' && !parts.length) {
152
+ parts.push(content.text);
153
+ }
154
+ if (typeof content.result === 'string') {
155
+ parts.push(content.result);
156
+ }
157
+ }
158
+
159
+ return parts.join('\n').trim();
160
+ }
@@ -0,0 +1,14 @@
1
+ export {
2
+ extractSessionsFromOAFastchatExport,
3
+ extractConversationFromOAFastchatExport,
4
+ listOAFastchatSessions,
5
+ } from './oaFastchat.js';
6
+
7
+ export {
8
+ isChatGptExport,
9
+ parseChatGptExport,
10
+ } from './chatgpt.js';
11
+
12
+ export {
13
+ parseMarkdownFiles,
14
+ } from './markdown.js';
@@ -0,0 +1,104 @@
1
+ /**
2
+ * Plain markdown file importer.
3
+ *
4
+ * Converts markdown files into sessions that can be fed to ingest().
5
+ * Each file becomes a single-message conversation with the file content
6
+ * as a user message, so the LLM extracts structured facts from it.
7
+ *
8
+ * Usage (library):
9
+ * import { parseMarkdownFiles } from '@openanonymity/nanomem/imports'
10
+ * const sessions = parseMarkdownFiles([{ path: 'notes/health.md', content: '...' }])
11
+ * for (const session of sessions) await mem.ingest(session.messages)
12
+ *
13
+ * Usage (CLI):
14
+ * memory import notes.md --format markdown
15
+ * memory import notes-dir/ --format markdown
16
+ */
17
+ /** @import { Message, ChatGptSession } from '../types.js' */
18
+
19
+ /**
20
+ * Parse one or more markdown documents into sessions for ingestion.
21
+ *
22
+ * Accepts either a single markdown string or an array of { path, content } records
23
+ * (e.g. from reading a directory of .md files).
24
+ *
25
+ * @param {string | { path: string; content: string }[]} input
26
+ * @returns {ChatGptSession[]}
27
+ */
28
+ export function parseMarkdownFiles(input) {
29
+ if (typeof input === 'string') {
30
+ return parseMarkdownString(input);
31
+ }
32
+
33
+ if (!Array.isArray(input) || input.length === 0) {
34
+ throw new Error('Expected a markdown string or an array of { path, content } records.');
35
+ }
36
+
37
+ return input
38
+ .filter(f => f.content && f.content.trim())
39
+ .map(f => ({
40
+ title: titleFromPath(f.path),
41
+ messages: /** @type {Message[]} */ ([{ role: 'user', content: f.content.trim() }]),
42
+ updatedAt: null,
43
+ }));
44
+ }
45
+
46
+ /**
47
+ * Split a single markdown string into sections by top-level headings.
48
+ * If the document has multiple `# ` headings, each becomes a separate session.
49
+ * Otherwise the whole string is a single session.
50
+ */
51
+ function parseMarkdownString(input) {
52
+ const trimmed = input.trim();
53
+ if (!trimmed) return [];
54
+
55
+ const sections = splitByTopHeadings(trimmed);
56
+
57
+ if (sections.length <= 1) {
58
+ return [/** @type {ChatGptSession} */ ({
59
+ title: sections[0]?.heading ?? null,
60
+ messages: /** @type {Message[]} */ ([{ role: 'user', content: trimmed }]),
61
+ updatedAt: null,
62
+ })];
63
+ }
64
+
65
+ return sections
66
+ .filter(s => s.content.trim())
67
+ .map(s => /** @type {ChatGptSession} */ ({
68
+ title: s.heading,
69
+ messages: /** @type {Message[]} */ ([{ role: 'user', content: s.content.trim() }]),
70
+ updatedAt: null,
71
+ }));
72
+ }
73
+
74
+ function splitByTopHeadings(text) {
75
+ const lines = text.split('\n');
76
+ /** @type {{ heading: string | null, content: string }[]} */
77
+ const sections = [];
78
+ /** @type {{ heading: string | null, lines: string[] }} */
79
+ let current = { heading: null, lines: [] };
80
+
81
+ for (const line of lines) {
82
+ const match = line.match(/^#\s+(.*)/);
83
+ if (match) {
84
+ if (current.lines.length > 0 || current.heading) {
85
+ sections.push({ heading: current.heading, content: current.lines.join('\n') });
86
+ }
87
+ current = { heading: match[1].trim() || null, lines: [] };
88
+ } else {
89
+ current.lines.push(line);
90
+ }
91
+ }
92
+
93
+ if (current.lines.length > 0 || current.heading) {
94
+ sections.push({ heading: current.heading, content: current.lines.join('\n') });
95
+ }
96
+
97
+ return sections;
98
+ }
99
+
100
+ function titleFromPath(filePath) {
101
+ if (!filePath) return null;
102
+ const name = filePath.split('/').pop() || '';
103
+ return name.replace(/\.md$/i, '').replace(/[_-]/g, ' ').trim() || null;
104
+ }
@@ -0,0 +1,124 @@
1
+ /** @import { Message, SessionSummary, SessionWithConversation } from '../types.js' */
2
+
3
+ function toArray(value) {
4
+ return Array.isArray(value) ? value : [];
5
+ }
6
+
7
+ function normalizeContent(content) {
8
+ if (typeof content === 'string') return content;
9
+ if (content == null) return '';
10
+ return String(content);
11
+ }
12
+
13
+ function matchesSession(session, { sessionId, sessionTitle } = /** @type {{ sessionId?: string, sessionTitle?: string }} */ ({})) {
14
+ if (sessionId) return session?.id === sessionId;
15
+ if (sessionTitle) return session?.title === sessionTitle;
16
+ return true;
17
+ }
18
+
19
+ function toSessionSummary(session) {
20
+ return {
21
+ id: session.id,
22
+ title: session.title || '',
23
+ createdAt: session.createdAt || null,
24
+ updatedAt: session.updatedAt || null,
25
+ messageCount: session.messageCountAtGeneration || null,
26
+ model: session.model || null
27
+ };
28
+ }
29
+
30
+ function toConversationMessages(messages, sessionOrder) {
31
+ return messages
32
+ .sort((a, b) => {
33
+ const sessionDiff = (sessionOrder.get(a?.sessionId) || 0) - (sessionOrder.get(b?.sessionId) || 0);
34
+ if (sessionDiff !== 0) return sessionDiff;
35
+ const timeDiff = (a?.timestamp || 0) - (b?.timestamp || 0);
36
+ if (timeDiff !== 0) return timeDiff;
37
+ return String(a?.id || '').localeCompare(String(b?.id || ''));
38
+ })
39
+ .map((message) => /** @type {Message} */ ({
40
+ role: message?.role === 'assistant' ? 'assistant' : 'user',
41
+ content: normalizeContent(message?.content)
42
+ }))
43
+ .filter((message) => message.content.trim());
44
+ }
45
+
46
+ /**
47
+ * @param {any} exportJson
48
+ * @returns {SessionSummary[]}
49
+ */
50
+ export function listOAFastchatSessions(exportJson) {
51
+ const sessions = toArray(exportJson?.data?.chats?.sessions);
52
+ return sessions.map((session) => toSessionSummary(session));
53
+ }
54
+
55
+ /**
56
+ * @param {any} exportJson
57
+ * @param {{ sessionId?: string, sessionTitle?: string }} [options]
58
+ * @returns {SessionWithConversation[]}
59
+ */
60
+ export function extractSessionsFromOAFastchatExport(exportJson, options = {}) {
61
+ const sessions = toArray(exportJson?.data?.chats?.sessions);
62
+ const messages = toArray(exportJson?.data?.chats?.messages);
63
+
64
+ if (sessions.length === 0) {
65
+ throw new Error('OAFastChat export does not contain any chat sessions.');
66
+ }
67
+
68
+ const selectedSessions = sessions.filter((session) => matchesSession(session, options));
69
+ if (selectedSessions.length === 0) {
70
+ const hint = options.sessionId
71
+ ? `sessionId=${options.sessionId}`
72
+ : `sessionTitle=${options.sessionTitle}`;
73
+ throw new Error(`Could not find a chat session matching ${hint}.`);
74
+ }
75
+
76
+ const sortedSessions = [...selectedSessions].sort((a, b) => {
77
+ const createdDiff = (a?.createdAt || 0) - (b?.createdAt || 0);
78
+ if (createdDiff !== 0) return createdDiff;
79
+ return String(a?.id || '').localeCompare(String(b?.id || ''));
80
+ });
81
+ const sessionOrder = new Map(sortedSessions.map((session, index) => [session.id, index]));
82
+ const sessionsWithConversation = sortedSessions.map((session) => {
83
+ const sessionMessages = messages.filter((message) => message?.sessionId === session.id);
84
+ const conversation = toConversationMessages(sessionMessages, sessionOrder);
85
+ return {
86
+ session: toSessionSummary(session),
87
+ conversation
88
+ };
89
+ }).filter((entry) => entry.conversation.length > 0);
90
+
91
+ if (sessionsWithConversation.length === 0) {
92
+ throw new Error('The selected chat session set does not contain any messages.');
93
+ }
94
+
95
+ return sessionsWithConversation;
96
+ }
97
+
98
+ /**
99
+ * @param {any} exportJson
100
+ * @param {{ sessionId?: string, sessionTitle?: string }} [options]
101
+ * @returns {{ session: SessionSummary | null, sessions: SessionSummary[], conversation: Message[] }}
102
+ */
103
+ export function extractConversationFromOAFastchatExport(exportJson, options = {}) {
104
+ const sessionsWithConversation = extractSessionsFromOAFastchatExport(exportJson, options);
105
+ const sessionOrder = new Map(sessionsWithConversation.map((entry, index) => [entry.session.id, index]));
106
+ const conversation = toConversationMessages(
107
+ sessionsWithConversation.flatMap((entry) =>
108
+ entry.conversation.map((message, index) => ({
109
+ id: `${entry.session.id}:${index}`,
110
+ sessionId: entry.session.id,
111
+ timestamp: index,
112
+ role: message.role,
113
+ content: message.content
114
+ }))
115
+ ),
116
+ sessionOrder
117
+ );
118
+
119
+ return {
120
+ session: sessionsWithConversation.length === 1 ? sessionsWithConversation[0].session : null,
121
+ sessions: sessionsWithConversation.map((entry) => entry.session),
122
+ conversation
123
+ };
124
+ }
package/src/index.js ADDED
@@ -0,0 +1,199 @@
1
+ /**
2
+ * @openanonymity/nanomem — LLM-driven personal memory.
3
+ *
4
+ * createMemoryBank(config) is the main entry point.
5
+ *
6
+ * Returned object has three named groups:
7
+ *
8
+ * Engine (LLM-driven): init, retrieve, ingest, compact
9
+ * Backends (storage ops): mem.storage.{ read, write, delete, exists,
10
+ * search, ls, getTree,
11
+ * rebuildTree, exportAll }
12
+ * Utilities (portability): mem.serialize(), mem.toZip()
13
+ */
14
+ /** @import { LLMClient, MemoryBank, MemoryBankConfig, MemoryBankLLMConfig, Message, IngestOptions, RetrievalResult, StorageBackend } from './types.js' */
15
+
16
+ import { createOpenAIClient } from './llm/openai.js';
17
+ import { createAnthropicClient } from './llm/anthropic.js';
18
+ import { MemoryBulletIndex } from './bullets/bulletIndex.js';
19
+ import { MemoryRetriever } from './engine/retriever.js';
20
+ import { MemoryIngester } from './engine/ingester.js';
21
+ import { MemoryCompactor } from './engine/compactor.js';
22
+ import { InMemoryStorage } from './backends/ram.js';
23
+ import { serialize, toZip } from './utils/portability.js';
24
+
25
+ /**
26
+ * Create a memory instance.
27
+ *
28
+ * @param {MemoryBankConfig} [config]
29
+ * @returns {MemoryBank}
30
+ */
31
+ export function createMemoryBank(config = {}) {
32
+ const llmClient = config.llmClient || _createLlmClient(config.llm);
33
+ const model = config.model || config.llm?.model || 'gpt-4o';
34
+ const backend = _createBackend(config.storage, config.storagePath);
35
+ const bulletIndex = new MemoryBulletIndex(backend);
36
+
37
+ const retrieval = new MemoryRetriever({
38
+ backend, bulletIndex, llmClient, model,
39
+ onProgress: config.onProgress,
40
+ onModelText: config.onModelText,
41
+ });
42
+ const ingester = new MemoryIngester({
43
+ backend, bulletIndex, llmClient, model,
44
+ onToolCall: config.onToolCall,
45
+ });
46
+ const compactor = new MemoryCompactor({ backend, bulletIndex, llmClient, model, onProgress: config.onCompactProgress });
47
+
48
+ async function write(path, content) {
49
+ await backend.write(path, content);
50
+ await bulletIndex.refreshPath(path);
51
+ }
52
+
53
+ async function remove(path) {
54
+ await backend.delete(path);
55
+ await bulletIndex.refreshPath(path);
56
+ }
57
+
58
+ async function rebuildTree() {
59
+ await backend.rebuildTree();
60
+ await bulletIndex.rebuild();
61
+ }
62
+
63
+ // ─── Public API ──────────────────────────────────────────────
64
+ return {
65
+ /** Initialize the storage backend (creates seed files if empty). */
66
+ init: () => backend.init(),
67
+
68
+ // ─── High-level (LLM-driven) ──────────────────────────────
69
+
70
+ /**
71
+ * Retrieve relevant memory context for a query.
72
+ * @param {string} query
73
+ * @param {string} [conversationText] current session text for reference resolution
74
+ * @returns {Promise<RetrievalResult | null>}
75
+ */
76
+ retrieve: (query, conversationText) => retrieval.retrieveForQuery(query, conversationText),
77
+
78
+ /**
79
+ * Ingest facts from a conversation into memory.
80
+ * @param {Message[]} messages
81
+ * @param {IngestOptions} [options]
82
+ */
83
+ ingest: (messages, options) => ingester.ingest(messages, options),
84
+
85
+ /** Compact all memory files (dedup, archive stale facts). */
86
+ compact: () => compactor.compactAll(),
87
+
88
+ // ─── Low-level (direct storage ops) ──────────────────────
89
+
90
+ storage: {
91
+ read: (path) => backend.read(path),
92
+ write: (path, content) => write(path, content),
93
+ delete: (path) => remove(path),
94
+ exists: (path) => backend.exists(path),
95
+ search: (query) => backend.search(query),
96
+ ls: (dirPath) => backend.ls(dirPath),
97
+ getTree: () => backend.getTree(),
98
+ rebuildTree: () => rebuildTree(),
99
+ exportAll: () => backend.exportAll(),
100
+ clear: () => backend.clear(),
101
+ },
102
+
103
+ // ─── Utilities (portability) ──────────────────────────────
104
+
105
+ /** Serialize entire memory state to a single portable string. */
106
+ serialize: async () => serialize(await backend.exportAll()),
107
+
108
+ /** Serialize entire memory state to a ZIP archive (Uint8Array). */
109
+ toZip: async () => toZip(await backend.exportAll()),
110
+
111
+ // ─── Internals (for advanced use / testing) ──────────────
112
+ _backend: backend,
113
+ _bulletIndex: bulletIndex,
114
+ };
115
+ }
116
+
117
+ // ─── Internal Helpers ────────────────────────────────────────────
118
+
119
+ function _createLlmClient(llmConfig = /** @type {MemoryBankLLMConfig} */ ({ apiKey: '' })) {
120
+ const { apiKey, baseUrl, headers, provider } = llmConfig;
121
+ if (!apiKey) {
122
+ throw new Error('createMemoryBank: config.llm.apiKey is required (or provide config.llmClient)');
123
+ }
124
+
125
+ const detectedProvider = provider || _detectProvider(baseUrl);
126
+
127
+ if (detectedProvider === 'anthropic') {
128
+ return createAnthropicClient({ apiKey, baseUrl, headers });
129
+ }
130
+
131
+ return createOpenAIClient({ apiKey, baseUrl, headers });
132
+ }
133
+
134
+ function _detectProvider(baseUrl) {
135
+ if (!baseUrl) return 'openai';
136
+ const lower = baseUrl.toLowerCase();
137
+ if (lower.includes('anthropic.com')) return 'anthropic';
138
+ return 'openai';
139
+ }
140
+
141
+ function _createBackend(storage, storagePath) {
142
+ // Custom backend object
143
+ if (storage && typeof storage === 'object' && typeof storage.read === 'function') {
144
+ return storage;
145
+ }
146
+
147
+ const storageType = typeof storage === 'string' ? storage : 'ram';
148
+
149
+ switch (storageType) {
150
+ case 'indexeddb':
151
+ return _asyncBackend(() => import('./backends/indexeddb.js').then(m => new m.IndexedDBStorage()));
152
+ case 'filesystem':
153
+ return _asyncBackend(() => import('./backends/filesystem.js').then(m => new m.FileSystemStorage(storagePath || 'nanomem')));
154
+ case 'ram':
155
+ default:
156
+ return new InMemoryStorage();
157
+ }
158
+ }
159
+
160
+ function _asyncBackend(loader) {
161
+ let _backend = null;
162
+ let _loading = null;
163
+
164
+ async function resolve() {
165
+ if (_backend) return _backend;
166
+ if (!_loading) _loading = loader().then(b => { _backend = b; return b; });
167
+ return _loading;
168
+ }
169
+
170
+ const methods = ['init', 'read', 'write', 'delete', 'exists', 'ls', 'search', 'getTree', 'rebuildTree', 'exportAll', 'clear'];
171
+ const proxy = {};
172
+ for (const method of methods) {
173
+ proxy[method] = async (...args) => {
174
+ const backend = await resolve();
175
+ return backend[method](...args);
176
+ };
177
+ }
178
+ return /** @type {StorageBackend} */ (proxy);
179
+ }
180
+
181
+ // ─── Re-exports ──────────────────────────────────────────────────
182
+
183
+ /** Re-export all shared type definitions for consumers. */
184
+ export * from './types.js';
185
+ export { createOpenAIClient } from './llm/openai.js';
186
+ export { createAnthropicClient } from './llm/anthropic.js';
187
+ export { InMemoryStorage } from './backends/ram.js';
188
+ export { BaseStorage } from './backends/BaseStorage.js';
189
+ export { MemoryBulletIndex } from './bullets/bulletIndex.js';
190
+ export { MemoryRetriever } from './engine/retriever.js';
191
+ export { MemoryIngester } from './engine/ingester.js';
192
+ export { MemoryCompactor } from './engine/compactor.js';
193
+ export { createRetrievalExecutors, createExtractionExecutors } from './engine/executors.js';
194
+ export { serialize, deserialize, toZip } from './utils/portability.js';
195
+ export {
196
+ extractSessionsFromOAFastchatExport,
197
+ extractConversationFromOAFastchatExport,
198
+ listOAFastchatSessions
199
+ } from './imports/oaFastchat.js';