ai-exodus 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,95 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Regenerate output files from raw-analysis.json
5
+ * Optionally consolidates skills via Claude (one call)
6
+ * Usage: node bin/regenerate.js [--consolidate-skills]
7
+ */
8
+
9
+ import { readFile } from 'node:fs/promises';
10
+ import { resolve } from 'node:path';
11
+ import { generate } from '../src/generator.js';
12
+ import { callClaude } from '../src/claude.js';
13
+
14
+ const outputDir = resolve('./exodus-output');
15
+
16
+ async function main() {
17
+ const consolidate = process.argv.includes('--consolidate-skills');
18
+
19
+ console.log(' Loading raw analysis data...');
20
+ const raw = JSON.parse(await readFile(resolve(outputDir, 'raw-analysis.json'), 'utf-8'));
21
+
22
+ const aiName = raw.index?.aiName || 'AI';
23
+ const userName = raw.index?.userName || 'User';
24
+
25
+ console.log(` AI: ${aiName} | User: ${userName}`);
26
+ console.log(` Skills: ${raw.skills?.skills?.length || 0}`);
27
+
28
+ let analysis = {
29
+ index: raw.index,
30
+ personality: raw.personality,
31
+ memory: raw.memory,
32
+ skills: raw.skills,
33
+ // These need to be re-read from the existing files
34
+ relationship: await readFile(resolve(outputDir, 'relationship.md'), 'utf-8').catch(() => ''),
35
+ persona: await readFile(resolve(outputDir, 'persona.md'), 'utf-8').catch(() => ''),
36
+ preferences: await readFile(resolve(outputDir, 'preferences.md'), 'utf-8').catch(() => ''),
37
+ customInstructions: await readFile(resolve(outputDir, 'custom-instructions.txt'), 'utf-8')
38
+ .then(t => t.split('──────────────────────────────────────────\n\n')[1] || t)
39
+ .catch(() => ''),
40
+ source: raw.source || 'chatgpt',
41
+ stats: raw.stats || { conversations: 0, messages: 0, dateRange: {}, chunks: 0 },
42
+ };
43
+
44
+ if (consolidate && analysis.skills?.skills?.length > 30) {
45
+ const rawCount = analysis.skills.skills.length;
46
+ console.log(` Consolidating ${rawCount} skills via Claude (one call)...`);
47
+
48
+ const result = await callClaude({
49
+ model: 'sonnet',
50
+ system: `You are consolidating a list of ${rawCount} extracted skills into a clean, deduplicated list of 15-30 unique skills.
51
+
52
+ AI: ${aiName}
53
+ User: ${userName}
54
+
55
+ Many skills are duplicates with slightly different names (e.g. "AI image prompt engineering" and "Image generation prompt crafting" are the same skill). Merge them.
56
+
57
+ Rules:
58
+ - Combine duplicates into ONE skill with the best name, description, and examples
59
+ - Keep 15-30 unique skills maximum
60
+ - Preserve the category, frequency (pick the highest), and approach fields
61
+ - Pick the most specific and useful description for each
62
+ - primaryRole should be ONE clear sentence
63
+ - secondaryRoles should be 3-5 items max
64
+ - Output ONLY valid JSON with schema: { "skills": [...], "primaryRole": "...", "secondaryRoles": [...] }
65
+ - No markdown fences. No commentary.`,
66
+ prompt: JSON.stringify(analysis.skills, null, 2).slice(0, 100000),
67
+ });
68
+
69
+ try {
70
+ let cleaned = result.trim();
71
+ if (cleaned.startsWith('```')) cleaned = cleaned.replace(/^```(?:json)?\s*\n?/, '').replace(/\n?```\s*$/, '');
72
+ const parsed = JSON.parse(cleaned);
73
+ analysis.skills = parsed;
74
+ console.log(` ${rawCount} → ${parsed.skills?.length || 0} skills`);
75
+ } catch (e) {
76
+ console.error(' Failed to parse consolidated skills, keeping originals');
77
+ }
78
+ }
79
+
80
+ console.log(' Regenerating output files...');
81
+ await generate(analysis, {
82
+ outputDir,
83
+ hearthline: false,
84
+ letta: false,
85
+ aiName,
86
+ userName,
87
+ });
88
+
89
+ console.log(' Done. Files regenerated.');
90
+ }
91
+
92
+ main().catch(err => {
93
+ console.error(`Error: ${err.message}`);
94
+ process.exit(1);
95
+ });
package/package.json ADDED
@@ -0,0 +1,43 @@
1
+ {
2
+ "name": "ai-exodus",
3
+ "version": "2.0.0",
4
+ "description": "Migrate your AI relationship from any platform to Claude. Your AI belongs to you.",
5
+ "type": "module",
6
+ "bin": {
7
+ "ai-exodus": "./bin/cli.js"
8
+ },
9
+ "files": [
10
+ "bin/",
11
+ "src/",
12
+ "portal/",
13
+ "prompts/",
14
+ "README.md",
15
+ "LICENSE"
16
+ ],
17
+ "scripts": {
18
+ "start": "node bin/cli.js",
19
+ "migrate": "node bin/cli.js migrate"
20
+ },
21
+ "keywords": [
22
+ "ai",
23
+ "migration",
24
+ "chatgpt",
25
+ "claude",
26
+ "companion",
27
+ "character-ai",
28
+ "replika",
29
+ "mcp",
30
+ "exodus",
31
+ "ai-relationship",
32
+ "conversation-archive"
33
+ ],
34
+ "author": "Marta Stypulkowska",
35
+ "license": "MIT",
36
+ "engines": {
37
+ "node": ">=18.0.0"
38
+ },
39
+ "repository": {
40
+ "type": "git",
41
+ "url": "https://github.com/martusha89/ai-exodus"
42
+ }
43
+ }
@@ -0,0 +1,300 @@
1
+ """
2
+ AI Exodus Portal — MCP Server
3
+ Connects Claude to your personal chat archive via the portal's API.
4
+
5
+ Tools:
6
+ exodus_search — Search conversation history by keyword
7
+ exodus_conversation — Get a full conversation by ID
8
+ exodus_skills — List all extracted skills with triggers
9
+ exodus_memories — List memories, optionally filtered by category
10
+ exodus_persona — Get the AI persona definition
11
+ exodus_stats — Get archive statistics
12
+ exodus_narrative — Get the relationship narrative
13
+
14
+ Usage:
15
+ python exodus_mcp.py
16
+
17
+ Configure in Claude Desktop / Claude Code MCP settings.
18
+ """
19
+
20
+ import os
21
+ import json
22
+ import urllib.request
23
+ import urllib.parse
24
+ import urllib.error
25
+ from mcp.server.fastmcp import FastMCP
26
+
27
+ # ── Config ──
28
+ PORTAL_URL = os.environ.get("EXODUS_PORTAL_URL", "")
29
+ MCP_SECRET = os.environ.get("EXODUS_MCP_SECRET", "")
30
+ PORTAL_PASSWORD = os.environ.get("EXODUS_PORTAL_PASSWORD", "")
31
+
32
+ # Try loading from ~/.exodus/config.json if env vars not set
33
+ if not PORTAL_URL or not MCP_SECRET:
34
+ config_path = os.path.join(
35
+ os.environ.get("USERPROFILE", os.environ.get("HOME", "")),
36
+ ".exodus", "config.json"
37
+ )
38
+ if os.path.exists(config_path):
39
+ with open(config_path, "r") as f:
40
+ config = json.load(f)
41
+ if not PORTAL_URL:
42
+ PORTAL_URL = config.get("portalUrl", "")
43
+ if not MCP_SECRET:
44
+ MCP_SECRET = config.get("mcpSecret", "")
45
+ if not PORTAL_PASSWORD:
46
+ PORTAL_PASSWORD = config.get("portalPassword", "")
47
+
48
+ mcp = FastMCP("AI Exodus Archive")
49
+
50
+
51
+ def _mcp_url(tool, params=None):
52
+ """Build MCP endpoint URL."""
53
+ base = f"{PORTAL_URL}/mcp/{MCP_SECRET}/{tool}"
54
+ if params:
55
+ qs = urllib.parse.urlencode({k: v for k, v in params.items() if v})
56
+ base += "?" + qs
57
+ return base
58
+
59
+
60
+ def _fetch(tool, params=None):
61
+ """Fetch from portal MCP endpoint."""
62
+ url = _mcp_url(tool, params)
63
+ try:
64
+ req = urllib.request.Request(url)
65
+ with urllib.request.urlopen(req, timeout=30) as resp:
66
+ return json.loads(resp.read().decode("utf-8"))
67
+ except urllib.error.HTTPError as e:
68
+ return {"error": f"HTTP {e.code}: {e.reason}"}
69
+ except Exception as e:
70
+ return {"error": str(e)}
71
+
72
+
73
+ @mcp.tool()
74
+ def exodus_search(query: str, limit: int = 10) -> str:
75
+ """Search your conversation history. Returns matching messages with conversation context.
76
+ Use this to find specific conversations, topics, or things that were discussed."""
77
+ if not query.strip():
78
+ return "Please provide a search query."
79
+
80
+ data = _fetch("search", {"q": query, "limit": str(limit)})
81
+
82
+ if "error" in data:
83
+ return f"Search error: {data['error']}"
84
+
85
+ results = data.get("results", [])
86
+ if not results:
87
+ return f'No results found for "{query}".'
88
+
89
+ lines = [f'Found {len(results)} results for "{query}":\n']
90
+ for r in results:
91
+ title = r.get("title", "Untitled")
92
+ role = r.get("role", "?")
93
+ model = r.get("model", "")
94
+ content = r.get("content", "")
95
+ # Truncate long content
96
+ if len(content) > 300:
97
+ # Try to show the part around the query
98
+ idx = content.lower().find(query.lower())
99
+ if idx > 0:
100
+ start = max(0, idx - 100)
101
+ content = "..." + content[start:start + 300] + "..."
102
+ else:
103
+ content = content[:300] + "..."
104
+
105
+ lines.append(f"**{title}** ({role}, {model or 'unknown model'})")
106
+ lines.append(f" {content}")
107
+ lines.append(f" [Conversation ID: {r.get('conversation_id', '?')}]")
108
+ lines.append("")
109
+
110
+ return "\n".join(lines)
111
+
112
+
113
+ @mcp.tool()
114
+ def exodus_conversation(conversation_id: str) -> str:
115
+ """Get the full content of a specific conversation by its ID.
116
+ Use after searching to read the complete conversation."""
117
+ data = _fetch("conversation", {"id": conversation_id})
118
+
119
+ if "error" in data:
120
+ return f"Error: {data['error']}"
121
+
122
+ title = data.get("title", "Untitled")
123
+ messages = data.get("messages", [])
124
+
125
+ if not messages:
126
+ return f"Conversation '{title}' has no messages."
127
+
128
+ lines = [f"# {title}\n"]
129
+ for msg in messages:
130
+ role = msg.get("role", "?").upper()
131
+ content = msg.get("content", "")
132
+ model = msg.get("model", "")
133
+ model_tag = f" [{model}]" if model else ""
134
+ lines.append(f"**{role}**{model_tag}: {content}\n")
135
+
136
+ return "\n".join(lines)
137
+
138
+
139
+ @mcp.tool()
140
+ def exodus_skills() -> str:
141
+ """List all extracted skills with their activation triggers.
142
+ Shows what the AI could do and when each skill activates."""
143
+ data = _fetch("skills")
144
+
145
+ if isinstance(data, dict) and "error" in data:
146
+ return f"Error: {data['error']}"
147
+
148
+ if not data:
149
+ return "No skills found. Run an analysis first."
150
+
151
+ lines = [f"Found {len(data)} skills:\n"]
152
+ for s in data:
153
+ name = s.get("name", "?")
154
+ category = s.get("category", "?")
155
+ freq = s.get("frequency", "?")
156
+ rule = s.get("activation_rule", "")
157
+ desc = s.get("description", "")
158
+
159
+ lines.append(f"### {name}")
160
+ lines.append(f"Category: {category} | Frequency: {freq}")
161
+ if rule:
162
+ lines.append(f"Activates: {rule}")
163
+ if desc:
164
+ lines.append(f"Description: {desc}")
165
+
166
+ # Triggers
167
+ phrases = _safe_json(s.get("triggers_phrases"))
168
+ temporal = _safe_json(s.get("triggers_temporal"))
169
+ emotional = _safe_json(s.get("triggers_emotional"))
170
+ if phrases:
171
+ lines.append(f"Phrases: {', '.join(phrases)}")
172
+ if temporal:
173
+ lines.append(f"Temporal: {', '.join(temporal)}")
174
+ if emotional:
175
+ lines.append(f"Emotional: {', '.join(emotional)}")
176
+ lines.append("")
177
+
178
+ return "\n".join(lines)
179
+
180
+
181
+ @mcp.tool()
182
+ def exodus_memories(category: str = "") -> str:
183
+ """List memories about the user. Optionally filter by category.
184
+ Categories: identity, life, preferences, personality, relationship, timeline, emotional, facts"""
185
+ params = {}
186
+ if category:
187
+ params["category"] = category
188
+
189
+ data = _fetch("memories", params)
190
+
191
+ if isinstance(data, dict) and "error" in data:
192
+ return f"Error: {data['error']}"
193
+
194
+ if not data:
195
+ cat_text = f" in category '{category}'" if category else ""
196
+ return f"No memories found{cat_text}. Run an analysis first."
197
+
198
+ lines = [f"Found {len(data)} memories:\n"]
199
+ current_cat = None
200
+ for m in data:
201
+ cat = m.get("category", "?")
202
+ if cat != current_cat:
203
+ current_cat = cat
204
+ lines.append(f"\n## {cat.title()}")
205
+
206
+ key = m.get("key", "")
207
+ value = m.get("value", "")
208
+ if key:
209
+ lines.append(f"- **{key}**: {value}")
210
+ else:
211
+ lines.append(f"- {value}")
212
+
213
+ return "\n".join(lines)
214
+
215
+
216
+ @mcp.tool()
217
+ def exodus_persona() -> str:
218
+ """Get the AI persona definition — the personality profile extracted from conversations."""
219
+ data = _fetch("persona")
220
+
221
+ if isinstance(data, dict) and "error" in data:
222
+ return f"Error: {data['error']}"
223
+
224
+ if not data:
225
+ return "No persona found. Run an analysis first."
226
+
227
+ return "\n\n".join(s.get("content", "") for s in data)
228
+
229
+
230
+ @mcp.tool()
231
+ def exodus_stats() -> str:
232
+ """Get archive statistics — conversation count, message count, model breakdown, date range."""
233
+ data = _fetch("stats")
234
+
235
+ if "error" in data:
236
+ return f"Error: {data['error']}"
237
+
238
+ lines = [
239
+ "# Archive Statistics\n",
240
+ f"- **Conversations**: {data.get('conversations', 0):,}",
241
+ f"- **Messages**: {data.get('messages', 0):,}",
242
+ f"- **Skills**: {data.get('skills', 0)}",
243
+ f"- **Memories**: {data.get('memories', 0)}",
244
+ f"- **Analysis runs**: {data.get('analysisRuns', 0)}",
245
+ f"- **AI Name**: {data.get('aiName', '?')}",
246
+ f"- **User Name**: {data.get('userName', '?')}",
247
+ ]
248
+
249
+ dr = data.get("dateRange", {})
250
+ if dr.get("from"):
251
+ lines.append(f"- **Date range**: {dr['from']} to {dr.get('to', '?')}")
252
+
253
+ models = data.get("models", [])
254
+ if models:
255
+ lines.append("\n## Model Breakdown")
256
+ for m in models:
257
+ lines.append(f"- {m.get('model', '?')}: {m.get('count', 0):,} messages")
258
+
259
+ return "\n".join(lines)
260
+
261
+
262
+ @mcp.tool()
263
+ def exodus_narrative() -> str:
264
+ """Get the relationship narrative — the story of the human-AI relationship."""
265
+ data = _fetch("narrative")
266
+
267
+ if isinstance(data, dict) and "error" in data:
268
+ return f"Error: {data['error']}"
269
+
270
+ content = data.get("content", "")
271
+ if not content:
272
+ return "No narrative found. Run a full analysis first."
273
+
274
+ return content
275
+
276
+
277
+ def _safe_json(val):
278
+ """Parse a JSON string or return the value if already parsed."""
279
+ if not val:
280
+ return []
281
+ if isinstance(val, list):
282
+ return val
283
+ try:
284
+ return json.loads(val)
285
+ except (json.JSONDecodeError, TypeError):
286
+ return []
287
+
288
+
289
+ if __name__ == "__main__":
290
+ if not PORTAL_URL:
291
+ print("Error: No portal URL configured.")
292
+ print("Set EXODUS_PORTAL_URL environment variable or run 'ai-exodus deploy' first.")
293
+ print("Config is read from ~/.exodus/config.json")
294
+ exit(1)
295
+ if not MCP_SECRET:
296
+ print("Error: No MCP secret configured.")
297
+ print("Set EXODUS_MCP_SECRET environment variable or check ~/.exodus/config.json")
298
+ exit(1)
299
+
300
+ mcp.run(transport="stdio")
@@ -0,0 +1,158 @@
1
+ -- AI Exodus Portal — D1 Schema
2
+ -- Per-user deployed archive + analysis portal
3
+
4
+ -- Auth
5
+ CREATE TABLE IF NOT EXISTS settings (
6
+ key TEXT PRIMARY KEY,
7
+ value TEXT NOT NULL,
8
+ updated_at TEXT DEFAULT (datetime('now'))
9
+ );
10
+
11
+ -- Imported conversations
12
+ CREATE TABLE IF NOT EXISTS conversations (
13
+ id TEXT PRIMARY KEY,
14
+ title TEXT,
15
+ created_at TEXT,
16
+ updated_at TEXT,
17
+ message_count INTEGER DEFAULT 0,
18
+ model TEXT,
19
+ source TEXT DEFAULT 'chatgpt',
20
+ metadata TEXT DEFAULT '{}',
21
+ imported_at TEXT DEFAULT (datetime('now'))
22
+ );
23
+
24
+ CREATE INDEX IF NOT EXISTS idx_conversations_created ON conversations(created_at);
25
+ CREATE INDEX IF NOT EXISTS idx_conversations_model ON conversations(model);
26
+ CREATE INDEX IF NOT EXISTS idx_conversations_source ON conversations(source);
27
+
28
+ -- Individual messages
29
+ CREATE TABLE IF NOT EXISTS messages (
30
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
31
+ conversation_id TEXT NOT NULL,
32
+ role TEXT NOT NULL,
33
+ content TEXT NOT NULL,
34
+ model TEXT,
35
+ created_at TEXT,
36
+ position INTEGER DEFAULT 0,
37
+ FOREIGN KEY (conversation_id) REFERENCES conversations(id)
38
+ );
39
+
40
+ CREATE INDEX IF NOT EXISTS idx_messages_convo ON messages(conversation_id, position);
41
+ CREATE INDEX IF NOT EXISTS idx_messages_content ON messages(content);
42
+ CREATE INDEX IF NOT EXISTS idx_messages_model ON messages(model);
43
+
44
+ -- Skill categories (user-editable + defaults)
45
+ CREATE TABLE IF NOT EXISTS skill_categories (
46
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
47
+ name TEXT NOT NULL UNIQUE,
48
+ color TEXT DEFAULT '#8b5cf6',
49
+ icon TEXT DEFAULT '',
50
+ is_default INTEGER DEFAULT 0,
51
+ sort_order INTEGER DEFAULT 0,
52
+ created_at TEXT DEFAULT (datetime('now'))
53
+ );
54
+
55
+ -- Default skill categories
56
+ INSERT OR IGNORE INTO skill_categories (name, color, icon, is_default, sort_order) VALUES
57
+ ('emotional_support', '#ef4444', '', 1, 1),
58
+ ('creative', '#f59e0b', '', 1, 2),
59
+ ('productivity', '#10b981', '', 1, 3),
60
+ ('coding', '#3b82f6', '', 1, 4),
61
+ ('knowledge', '#8b5cf6', '', 1, 5),
62
+ ('decision_making', '#ec4899', '', 1, 6),
63
+ ('health', '#14b8a6', '', 1, 7),
64
+ ('intimate', '#f43f5e', '', 1, 8),
65
+ ('entertainment', '#f97316', '', 1, 9),
66
+ ('other', '#6b7280', '', 1, 10);
67
+
68
+ -- Skills (auto-extracted + user-editable)
69
+ CREATE TABLE IF NOT EXISTS skills (
70
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
71
+ name TEXT NOT NULL,
72
+ category TEXT NOT NULL DEFAULT 'other',
73
+ frequency TEXT DEFAULT 'occasional',
74
+ description TEXT,
75
+ approach TEXT,
76
+ quality TEXT,
77
+ activation_rule TEXT,
78
+ triggers_phrases TEXT DEFAULT '[]',
79
+ triggers_temporal TEXT DEFAULT '[]',
80
+ triggers_emotional TEXT DEFAULT '[]',
81
+ triggers_contextual TEXT DEFAULT '[]',
82
+ examples TEXT DEFAULT '[]',
83
+ source TEXT DEFAULT 'extracted',
84
+ run_id INTEGER,
85
+ created_at TEXT DEFAULT (datetime('now')),
86
+ updated_at TEXT DEFAULT (datetime('now'))
87
+ );
88
+
89
+ -- Memory categories (user-editable + defaults)
90
+ CREATE TABLE IF NOT EXISTS memory_categories (
91
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
92
+ name TEXT NOT NULL UNIQUE,
93
+ color TEXT DEFAULT '#8b5cf6',
94
+ icon TEXT DEFAULT '',
95
+ is_default INTEGER DEFAULT 0,
96
+ sort_order INTEGER DEFAULT 0,
97
+ created_at TEXT DEFAULT (datetime('now'))
98
+ );
99
+
100
+ -- Default memory categories
101
+ INSERT OR IGNORE INTO memory_categories (name, color, icon, is_default, sort_order) VALUES
102
+ ('identity', '#3b82f6', '', 1, 1),
103
+ ('life', '#10b981', '', 1, 2),
104
+ ('preferences', '#f59e0b', '', 1, 3),
105
+ ('personality', '#8b5cf6', '', 1, 4),
106
+ ('relationship', '#ef4444', '', 1, 5),
107
+ ('timeline', '#ec4899', '', 1, 6),
108
+ ('emotional', '#f43f5e', '', 1, 7),
109
+ ('facts', '#6b7280', '', 1, 8);
110
+
111
+ -- Memories (auto-extracted + user-editable)
112
+ CREATE TABLE IF NOT EXISTS memories (
113
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
114
+ category TEXT NOT NULL DEFAULT 'facts',
115
+ key TEXT,
116
+ value TEXT NOT NULL,
117
+ confidence TEXT DEFAULT 'extracted',
118
+ source TEXT DEFAULT 'extracted',
119
+ run_id INTEGER,
120
+ created_at TEXT DEFAULT (datetime('now')),
121
+ updated_at TEXT DEFAULT (datetime('now'))
122
+ );
123
+
124
+ CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(category);
125
+
126
+ -- Persona (editable text blocks)
127
+ CREATE TABLE IF NOT EXISTS persona (
128
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
129
+ section TEXT NOT NULL,
130
+ content TEXT NOT NULL,
131
+ sort_order INTEGER DEFAULT 0,
132
+ updated_at TEXT DEFAULT (datetime('now'))
133
+ );
134
+
135
+ -- Analysis runs history
136
+ CREATE TABLE IF NOT EXISTS analysis_runs (
137
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
138
+ status TEXT DEFAULT 'pending',
139
+ passes TEXT DEFAULT '[]',
140
+ model TEXT DEFAULT 'sonnet',
141
+ date_from TEXT,
142
+ date_to TEXT,
143
+ model_filter TEXT,
144
+ conversation_count INTEGER DEFAULT 0,
145
+ message_count INTEGER DEFAULT 0,
146
+ results TEXT DEFAULT '{}',
147
+ started_at TEXT DEFAULT (datetime('now')),
148
+ completed_at TEXT,
149
+ error TEXT
150
+ );
151
+
152
+ -- Relationship narrative
153
+ CREATE TABLE IF NOT EXISTS narratives (
154
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
155
+ content TEXT NOT NULL,
156
+ run_id INTEGER,
157
+ created_at TEXT DEFAULT (datetime('now'))
158
+ );