opencodekit 0.17.13 → 0.18.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/index.js +4 -6
  2. package/dist/template/.opencode/AGENTS.md +57 -0
  3. package/dist/template/.opencode/agent/scout.md +0 -37
  4. package/dist/template/.opencode/command/resume.md +1 -1
  5. package/dist/template/.opencode/command/status.md +7 -14
  6. package/dist/template/.opencode/dcp.jsonc +81 -81
  7. package/dist/template/.opencode/memory/memory.db +0 -0
  8. package/dist/template/.opencode/memory.db +0 -0
  9. package/dist/template/.opencode/memory.db-shm +0 -0
  10. package/dist/template/.opencode/memory.db-wal +0 -0
  11. package/dist/template/.opencode/opencode.json +199 -23
  12. package/dist/template/.opencode/opencode.json.tui-migration.bak +1380 -0
  13. package/dist/template/.opencode/package.json +1 -1
  14. package/dist/template/.opencode/plugin/README.md +37 -25
  15. package/dist/template/.opencode/plugin/lib/capture.ts +177 -0
  16. package/dist/template/.opencode/plugin/lib/context.ts +194 -0
  17. package/dist/template/.opencode/plugin/lib/curator.ts +234 -0
  18. package/dist/template/.opencode/plugin/lib/db/maintenance.ts +312 -0
  19. package/dist/template/.opencode/plugin/lib/db/observations.ts +299 -0
  20. package/dist/template/.opencode/plugin/lib/db/pipeline.ts +520 -0
  21. package/dist/template/.opencode/plugin/lib/db/schema.ts +356 -0
  22. package/dist/template/.opencode/plugin/lib/db/types.ts +211 -0
  23. package/dist/template/.opencode/plugin/lib/distill.ts +376 -0
  24. package/dist/template/.opencode/plugin/lib/inject.ts +126 -0
  25. package/dist/template/.opencode/plugin/lib/memory-admin-tools.ts +188 -0
  26. package/dist/template/.opencode/plugin/lib/memory-db.ts +54 -936
  27. package/dist/template/.opencode/plugin/lib/memory-helpers.ts +202 -0
  28. package/dist/template/.opencode/plugin/lib/memory-hooks.ts +240 -0
  29. package/dist/template/.opencode/plugin/lib/memory-tools.ts +341 -0
  30. package/dist/template/.opencode/plugin/memory.ts +56 -60
  31. package/dist/template/.opencode/plugin/sessions.ts +372 -93
  32. package/dist/template/.opencode/skill/memory-system/SKILL.md +103 -60
  33. package/dist/template/.opencode/skill/session-management/SKILL.md +22 -35
  34. package/dist/template/.opencode/tui.json +15 -0
  35. package/package.json +1 -1
  36. package/dist/template/.opencode/plugin/compaction.ts +0 -190
  37. package/dist/template/.opencode/tool/action-queue.ts +0 -313
  38. package/dist/template/.opencode/tool/memory-admin.ts +0 -445
  39. package/dist/template/.opencode/tool/memory-get.ts +0 -143
  40. package/dist/template/.opencode/tool/memory-read.ts +0 -45
  41. package/dist/template/.opencode/tool/memory-search.ts +0 -264
  42. package/dist/template/.opencode/tool/memory-timeline.ts +0 -105
  43. package/dist/template/.opencode/tool/memory-update.ts +0 -63
  44. package/dist/template/.opencode/tool/observation.ts +0 -357
@@ -0,0 +1,376 @@
1
+ /**
2
+ * Heuristic Distillation Module
3
+ *
4
+ * Compresses batches of temporal messages into distillations using
5
+ * TF-IDF term extraction and key sentence selection.
6
+ * No LLM dependency — pure heuristic, upgradeable later.
7
+ *
8
+ * Pipeline: temporal_messages → TF-IDF terms + key sentences → distillation
9
+ */
10
+
11
+ import {
12
+ type DistillationInput,
13
+ getUndistilledMessageCount,
14
+ getUndistilledMessages,
15
+ MEMORY_CONFIG,
16
+ markMessagesDistilled,
17
+ storeDistillation,
18
+ type TemporalMessageRow,
19
+ } from "./memory-db.js";
20
+
21
+ // ============================================================================
22
+ // TF-IDF Engine
23
+ // ============================================================================
24
+
25
+ /** Stop words to exclude from term extraction */
26
+ const STOP_WORDS = new Set([
27
+ "the",
28
+ "be",
29
+ "to",
30
+ "of",
31
+ "and",
32
+ "a",
33
+ "in",
34
+ "that",
35
+ "have",
36
+ "i",
37
+ "it",
38
+ "for",
39
+ "not",
40
+ "on",
41
+ "with",
42
+ "he",
43
+ "as",
44
+ "you",
45
+ "do",
46
+ "at",
47
+ "this",
48
+ "but",
49
+ "his",
50
+ "by",
51
+ "from",
52
+ "they",
53
+ "we",
54
+ "say",
55
+ "her",
56
+ "she",
57
+ "or",
58
+ "an",
59
+ "will",
60
+ "my",
61
+ "one",
62
+ "all",
63
+ "would",
64
+ "there",
65
+ "their",
66
+ "what",
67
+ "so",
68
+ "up",
69
+ "out",
70
+ "if",
71
+ "about",
72
+ "who",
73
+ "get",
74
+ "which",
75
+ "go",
76
+ "me",
77
+ "when",
78
+ "make",
79
+ "can",
80
+ "like",
81
+ "time",
82
+ "no",
83
+ "just",
84
+ "him",
85
+ "know",
86
+ "take",
87
+ "people",
88
+ "into",
89
+ "year",
90
+ "your",
91
+ "good",
92
+ "some",
93
+ "could",
94
+ "them",
95
+ "see",
96
+ "other",
97
+ "than",
98
+ "then",
99
+ "now",
100
+ "look",
101
+ "only",
102
+ "come",
103
+ "its",
104
+ "over",
105
+ "think",
106
+ "also",
107
+ "back",
108
+ "after",
109
+ "use",
110
+ "two",
111
+ "how",
112
+ "our",
113
+ "work",
114
+ "first",
115
+ "well",
116
+ "way",
117
+ "even",
118
+ "new",
119
+ "want",
120
+ "because",
121
+ "any",
122
+ "these",
123
+ "give",
124
+ "day",
125
+ "most",
126
+ "us",
127
+ "is",
128
+ "are",
129
+ "was",
130
+ "were",
131
+ "been",
132
+ "being",
133
+ "has",
134
+ "had",
135
+ "did",
136
+ "does",
137
+ "doing",
138
+ "am",
139
+ // Code-specific stop words
140
+ "function",
141
+ "const",
142
+ "let",
143
+ "var",
144
+ "return",
145
+ "import",
146
+ "export",
147
+ "true",
148
+ "false",
149
+ "null",
150
+ "undefined",
151
+ "string",
152
+ "number",
153
+ "boolean",
154
+ ]);
155
+
156
+ /**
157
+ * Tokenize text into normalized words.
158
+ */
159
+ function tokenize(text: string): string[] {
160
+ return text
161
+ .toLowerCase()
162
+ .replace(/[^a-z0-9_\-/.]+/g, " ")
163
+ .split(/\s+/)
164
+ .filter((w) => w.length > 2 && !STOP_WORDS.has(w));
165
+ }
166
+
167
+ /**
168
+ * Compute term frequency for a document.
169
+ */
170
+ function computeTF(words: string[]): Map<string, number> {
171
+ const tf = new Map<string, number>();
172
+ for (const word of words) {
173
+ tf.set(word, (tf.get(word) ?? 0) + 1);
174
+ }
175
+ // Normalize by total words
176
+ const total = words.length || 1;
177
+ for (const [word, count] of tf) {
178
+ tf.set(word, count / total);
179
+ }
180
+ return tf;
181
+ }
182
+
183
+ /**
184
+ * Compute inverse document frequency across multiple documents.
185
+ */
186
+ function computeIDF(documents: string[][]): Map<string, number> {
187
+ const idf = new Map<string, number>();
188
+ const N = documents.length || 1;
189
+
190
+ // Count documents containing each term
191
+ const docFreq = new Map<string, number>();
192
+ for (const words of documents) {
193
+ const unique = new Set(words);
194
+ for (const word of unique) {
195
+ docFreq.set(word, (docFreq.get(word) ?? 0) + 1);
196
+ }
197
+ }
198
+
199
+ // IDF = log(N / df)
200
+ for (const [word, df] of docFreq) {
201
+ idf.set(word, Math.log(N / df));
202
+ }
203
+
204
+ return idf;
205
+ }
206
+
207
+ /**
208
+ * Extract top-N TF-IDF terms from a collection of messages.
209
+ */
210
+ function extractTopTerms(
211
+ messages: TemporalMessageRow[],
212
+ topN: number,
213
+ ): string[] {
214
+ // Tokenize each message as a document
215
+ const documents = messages.map((m) => tokenize(m.content));
216
+
217
+ // Compute IDF across all documents
218
+ const idf = computeIDF(documents);
219
+
220
+ // Compute TF-IDF for the merged corpus
221
+ const allWords = documents.flat();
222
+ const tf = computeTF(allWords);
223
+
224
+ // Score each term
225
+ const scores: Array<[string, number]> = [];
226
+ for (const [word, tfScore] of tf) {
227
+ const idfScore = idf.get(word) ?? 0;
228
+ scores.push([word, tfScore * idfScore]);
229
+ }
230
+
231
+ // Sort by score descending, return top N
232
+ scores.sort((a, b) => b[1] - a[1]);
233
+ return scores.slice(0, topN).map(([term]) => term);
234
+ }
235
+
236
+ // ============================================================================
237
+ // Key Sentence Selection
238
+ // ============================================================================
239
+
240
+ /**
241
+ * Select key sentences from messages based on term density.
242
+ * Prefers sentences that contain high-value TF-IDF terms.
243
+ */
244
+ function selectKeySentences(
245
+ messages: TemporalMessageRow[],
246
+ topTerms: string[],
247
+ targetLength: number,
248
+ ): string {
249
+ const termSet = new Set(topTerms);
250
+
251
+ // Split all messages into sentences
252
+ interface ScoredSentence {
253
+ text: string;
254
+ score: number;
255
+ messageIndex: number;
256
+ }
257
+
258
+ const sentences: ScoredSentence[] = [];
259
+
260
+ for (let i = 0; i < messages.length; i++) {
261
+ const msg = messages[i];
262
+ // Split on sentence boundaries
263
+ const msgSentences = msg.content
264
+ .split(/(?<=[.!?])\s+|\n+/)
265
+ .map((s) => s.trim())
266
+ .filter((s) => s.length > 10 && s.length < 500);
267
+
268
+ for (const sentence of msgSentences) {
269
+ const words = tokenize(sentence);
270
+ const termHits = words.filter((w) => termSet.has(w)).length;
271
+ const density = termHits / (words.length || 1);
272
+
273
+ sentences.push({
274
+ text: sentence,
275
+ score: density * (1 + termHits), // Boost sentences with more term hits
276
+ messageIndex: i,
277
+ });
278
+ }
279
+ }
280
+
281
+ // Sort by score descending
282
+ sentences.sort((a, b) => b.score - a.score);
283
+
284
+ // Greedy-pack sentences up to target length
285
+ const selected: ScoredSentence[] = [];
286
+ let currentLength = 0;
287
+
288
+ for (const sentence of sentences) {
289
+ if (currentLength + sentence.text.length + 2 > targetLength) continue;
290
+ selected.push(sentence);
291
+ currentLength += sentence.text.length + 2; // +2 for separator
292
+ }
293
+
294
+ // Re-sort by original message order for coherence
295
+ selected.sort((a, b) => a.messageIndex - b.messageIndex);
296
+
297
+ return selected.map((s) => s.text).join("\n");
298
+ }
299
+
300
+ // ============================================================================
301
+ // Distillation Pipeline
302
+ // ============================================================================
303
+
304
+ /**
305
+ * Run distillation for a session if enough undistilled messages exist.
306
+ *
307
+ * Returns the distillation ID if created, or null if no distillation was needed.
308
+ */
309
+ export function distillSession(sessionId: string): number | null {
310
+ if (!MEMORY_CONFIG.distillation.enabled) return null;
311
+
312
+ const undistilledCount = getUndistilledMessageCount(sessionId);
313
+
314
+ if (undistilledCount < MEMORY_CONFIG.distillation.minMessages) {
315
+ return null; // Not enough messages yet
316
+ }
317
+
318
+ // Get undistilled messages (up to maxMessages)
319
+ const messages = getUndistilledMessages(
320
+ sessionId,
321
+ MEMORY_CONFIG.distillation.maxMessages,
322
+ );
323
+
324
+ if (messages.length < MEMORY_CONFIG.distillation.minMessages) {
325
+ return null;
326
+ }
327
+
328
+ // Extract top TF-IDF terms
329
+ const topTerms = extractTopTerms(
330
+ messages,
331
+ MEMORY_CONFIG.distillation.topTerms,
332
+ );
333
+
334
+ // Compute target length based on compression ratio
335
+ const totalChars = messages.reduce((sum, m) => sum + m.content.length, 0);
336
+ const targetLength = Math.max(
337
+ 200,
338
+ Math.floor(totalChars * MEMORY_CONFIG.distillation.compressionTarget),
339
+ );
340
+
341
+ // Select key sentences
342
+ const distilledContent = selectKeySentences(messages, topTerms, targetLength);
343
+
344
+ if (!distilledContent || distilledContent.length < 50) {
345
+ return null; // Distillation too thin
346
+ }
347
+
348
+ // Compute compression ratio
349
+ const compressionRatio = distilledContent.length / (totalChars || 1);
350
+
351
+ // Time range
352
+ const timeStart = messages[0].time_created;
353
+ const timeEnd = messages[messages.length - 1].time_created;
354
+
355
+ // Store distillation
356
+ const input: DistillationInput = {
357
+ session_id: sessionId,
358
+ content: distilledContent,
359
+ terms: topTerms,
360
+ message_count: messages.length,
361
+ compression_ratio: compressionRatio,
362
+ time_start: timeStart,
363
+ time_end: timeEnd,
364
+ };
365
+
366
+ const distillationId = storeDistillation(input);
367
+
368
+ // Mark messages as distilled
369
+ const messageIds = messages.map((m) => m.id);
370
+ markMessagesDistilled(messageIds, distillationId);
371
+
372
+ return distillationId;
373
+ }
374
+
375
+ // Export term extraction for use by inject.ts
376
+ export { extractTopTerms, tokenize };
@@ -0,0 +1,126 @@
1
+ /**
2
+ * LTM Injection Module
3
+ *
4
+ * Implements system.transform: searches observations + distillations using
5
+ * TF-IDF query terms extracted from recent conversation, scores results by
6
+ * BM25 * recency * confidence, and greedy-packs into a token budget.
7
+ *
8
+ * Injected into the system prompt on every turn for relevant context.
9
+ */
10
+
11
+ import { tokenize } from "./distill.js";
12
+ import {
13
+ estimateTokens,
14
+ getRelevantKnowledge,
15
+ MEMORY_CONFIG,
16
+ } from "./memory-db.js";
17
+
18
+ // ============================================================================
19
+ // Query Term Extraction
20
+ // ============================================================================
21
+
22
+ /**
23
+ * Extract top query terms from the current system prompt context.
24
+ * Uses TF-IDF-like approach on the existing system prompt to find
25
+ * what the conversation is about.
26
+ */
27
+ export function extractQueryTerms(
28
+ systemPrompt: string[],
29
+ topN?: number,
30
+ ): string[] {
31
+ const n = topN ?? MEMORY_CONFIG.injection.topTerms;
32
+
33
+ // Combine all system prompt segments
34
+ const fullText = systemPrompt.join("\n");
35
+
36
+ // Tokenize
37
+ const words = tokenize(fullText);
38
+ if (words.length === 0) return [];
39
+
40
+ // Compute term frequency
41
+ const tf = new Map<string, number>();
42
+ for (const word of words) {
43
+ tf.set(word, (tf.get(word) ?? 0) + 1);
44
+ }
45
+
46
+ // Sort by frequency descending
47
+ const sorted = [...tf.entries()].sort((a, b) => b[1] - a[1]);
48
+
49
+ // Return top N terms
50
+ return sorted.slice(0, n).map(([term]) => term);
51
+ }
52
+
53
+ // ============================================================================
54
+ // Injection
55
+ // ============================================================================
56
+
57
+ /**
58
+ * Build the LTM injection block for system.transform.
59
+ *
60
+ * @param systemPrompt - Current system prompt segments
61
+ * @returns Additional system prompt segment with relevant knowledge, or null if empty
62
+ */
63
+ export function buildInjection(systemPrompt: string[]): string | null {
64
+ if (!MEMORY_CONFIG.injection.enabled) return null;
65
+
66
+ // Extract query terms from current system context
67
+ const queryTerms = extractQueryTerms(systemPrompt);
68
+ if (queryTerms.length === 0) return null;
69
+
70
+ // Get relevant knowledge, scored and packed within budget
71
+ const knowledge = getRelevantKnowledge(queryTerms, {
72
+ tokenBudget: MEMORY_CONFIG.injection.tokenBudget,
73
+ minScore: MEMORY_CONFIG.injection.minScore,
74
+ });
75
+
76
+ if (knowledge.length === 0) return null;
77
+
78
+ // Format as injection block
79
+ const lines: string[] = [
80
+ "<memory_context>",
81
+ "Relevant knowledge from previous sessions:",
82
+ "",
83
+ ];
84
+
85
+ for (const item of knowledge) {
86
+ const sourceTag =
87
+ item.source === "observation" ? `[${item.type}]` : "[distillation]";
88
+ const scoreTag = `(relevance: ${item.score.toFixed(2)})`;
89
+
90
+ lines.push(`### ${sourceTag} ${item.title} ${scoreTag}`);
91
+
92
+ // Truncate content to avoid dominating the injection
93
+ const maxContentChars = Math.floor(
94
+ (MEMORY_CONFIG.injection.tokenBudget * 4) / knowledge.length,
95
+ );
96
+ const content =
97
+ item.content.length > maxContentChars
98
+ ? `${item.content.slice(0, maxContentChars)}...`
99
+ : item.content;
100
+
101
+ lines.push(content);
102
+ lines.push("");
103
+ }
104
+
105
+ lines.push("</memory_context>");
106
+
107
+ const injection = lines.join("\n");
108
+
109
+ // Final check: ensure we're within token budget
110
+ const tokens = estimateTokens(injection);
111
+ if (tokens > MEMORY_CONFIG.injection.tokenBudget * 1.2) {
112
+ // Over budget — trim to just titles
113
+ const trimmed = [
114
+ "<memory_context>",
115
+ "Relevant knowledge (summaries):",
116
+ ...knowledge.map(
117
+ (k) =>
118
+ `- [${k.source === "observation" ? k.type : "distillation"}] ${k.title}`,
119
+ ),
120
+ "</memory_context>",
121
+ ].join("\n");
122
+ return trimmed;
123
+ }
124
+
125
+ return injection;
126
+ }
@@ -0,0 +1,188 @@
1
+ /**
2
+ * Memory Plugin — Admin Tools
3
+ *
4
+ * memory-admin (9 operations).
5
+ *
6
+ * Uses factory pattern: createAdminTools(deps) returns tool definitions.
7
+ */
8
+
9
+ import { readdir, readFile } from "node:fs/promises";
10
+ import path from "node:path";
11
+ import { tool } from "@opencode-ai/plugin/tool";
12
+ import { curateFromDistillations } from "./curator.js";
13
+ import { distillSession } from "./distill.js";
14
+ import {
15
+ archiveOldObservations,
16
+ type ConfidenceLevel,
17
+ checkFTS5Available,
18
+ checkpointWAL,
19
+ getCaptureStats,
20
+ getDatabaseSizes,
21
+ getDistillationStats,
22
+ getMarkdownFilesInSqlite,
23
+ getObservationStats,
24
+ type ObservationType,
25
+ rebuildFTS5,
26
+ runFullMaintenance,
27
+ storeObservation,
28
+ vacuumDatabase,
29
+ } from "./memory-db.js";
30
+
31
+ interface AdminToolDeps {
32
+ directory: string;
33
+ }
34
+
35
+ export function createAdminTools(deps: AdminToolDeps) {
36
+ const { directory } = deps;
37
+
38
+ return {
39
+ "memory-admin": tool({
40
+ description: `Memory system administration: maintenance and migration.\n\nOperations:\n- "status": Storage stats and recommendations\n- "full": Full maintenance cycle (archive + checkpoint + vacuum)\n- "archive": Archive old observations (>90 days default)\n- "checkpoint": Checkpoint WAL file\n- "vacuum": Vacuum database\n- "migrate": Import .opencode/memory/observations/*.md into SQLite\n- "capture-stats": Temporal message capture statistics\n- "distill-now": Force distillation for current session\n- "curate-now": Force curator run\n\nExample:\nmemory-admin({ operation: "status" })\nmemory-admin({ operation: "migrate", dry_run: true })`,
41
+ args: {
42
+ operation: tool.schema
43
+ .string()
44
+ .optional()
45
+ .describe("Operation (default: status)"),
46
+ older_than_days: tool.schema
47
+ .number()
48
+ .optional()
49
+ .describe("Archive threshold (default: 90)"),
50
+ dry_run: tool.schema
51
+ .boolean()
52
+ .optional()
53
+ .describe("Preview without executing"),
54
+ force: tool.schema.boolean().optional().describe("Force re-migration"),
55
+ },
56
+ execute: async (args, ctx) => {
57
+ const op = args.operation ?? "status";
58
+ const dryRun = args.dry_run ?? false;
59
+ const olderThanDays = args.older_than_days ?? 90;
60
+
61
+ switch (op) {
62
+ case "status": {
63
+ const sizes = getDatabaseSizes();
64
+ const stats = getObservationStats();
65
+ const archivable = archiveOldObservations({
66
+ olderThanDays,
67
+ dryRun: true,
68
+ });
69
+ const captureStats = getCaptureStats();
70
+ const distillStats = getDistillationStats();
71
+ return [
72
+ "## Memory System Status\n",
73
+ `**Database**: ${(sizes.total / 1024).toFixed(1)} KB`,
74
+ `**FTS5**: ${checkFTS5Available() ? "Available (porter stemming)" : "Unavailable"}`,
75
+ `**Schema**: v2 (4-tier storage)\n`,
76
+ "### Observations",
77
+ ...Object.entries(stats).map(([k, v]) => ` ${k}: ${v}`),
78
+ ` Archivable (>${olderThanDays}d): ${archivable}\n`,
79
+ "### Capture Pipeline",
80
+ ` Messages: ${captureStats.total} (undistilled: ${captureStats.undistilled})`,
81
+ ` Sessions: ${captureStats.sessions}\n`,
82
+ "### Distillations",
83
+ ` Total: ${distillStats.total} (${distillStats.sessions} sessions)`,
84
+ ` Avg compression: ${(distillStats.avgCompression * 100).toFixed(1)}%`,
85
+ ].join("\n");
86
+ }
87
+ case "full": {
88
+ if (dryRun)
89
+ return `Dry run: would archive, purge, optimize, checkpoint, vacuum.`;
90
+ const r = runFullMaintenance({
91
+ olderThanDays,
92
+ includeSuperseded: true,
93
+ });
94
+ return `Done: archived ${r.archived}, purged ${r.purgedMessages} msgs, freed ${(r.freedBytes / 1024).toFixed(1)} KB.`;
95
+ }
96
+ case "archive": {
97
+ const c = archiveOldObservations({
98
+ olderThanDays,
99
+ includeSuperseded: true,
100
+ dryRun,
101
+ });
102
+ return dryRun
103
+ ? `Would archive ${c} observations.`
104
+ : `Archived ${c} observations.`;
105
+ }
106
+ case "checkpoint": {
107
+ const r = checkpointWAL();
108
+ return r.checkpointed
109
+ ? `WAL checkpointed (${r.walSize} pages).`
110
+ : "Checkpoint failed or busy.";
111
+ }
112
+ case "vacuum":
113
+ return vacuumDatabase() ? "Vacuumed." : "Vacuum failed.";
114
+ case "capture-stats":
115
+ return JSON.stringify(getCaptureStats(), null, 2);
116
+ case "distill-now": {
117
+ const sid = ctx?.sessionID;
118
+ if (!sid) return "Error: No session ID.";
119
+ const did = distillSession(sid);
120
+ return did
121
+ ? `Distillation #${did} created.`
122
+ : "Not enough undistilled messages.";
123
+ }
124
+ case "curate-now": {
125
+ const r = curateFromDistillations();
126
+ return `Created ${r.created}, skipped ${r.skipped}. Patterns: ${JSON.stringify(r.patterns)}`;
127
+ }
128
+ case "migrate": {
129
+ const obsDir = path.join(
130
+ directory,
131
+ ".opencode",
132
+ "memory",
133
+ "observations",
134
+ );
135
+ let mdFiles: string[] = [];
136
+ try {
137
+ mdFiles = (await readdir(obsDir)).filter((f) =>
138
+ f.endsWith(".md"),
139
+ );
140
+ } catch {
141
+ return "No observations directory found.";
142
+ }
143
+ if (mdFiles.length === 0) return "No files to migrate.";
144
+ const existing = new Set(getMarkdownFilesInSqlite());
145
+ const toMigrate = args.force
146
+ ? mdFiles
147
+ : mdFiles.filter((f) => !existing.has(f));
148
+ if (toMigrate.length === 0) return "All files already migrated.";
149
+ if (dryRun) return `Would migrate ${toMigrate.length} files.`;
150
+ let migrated = 0;
151
+ for (const file of toMigrate) {
152
+ try {
153
+ const content = await readFile(
154
+ path.join(obsDir, file),
155
+ "utf-8",
156
+ );
157
+ const fmMatch = content.match(
158
+ /^---\n([\s\S]*?)\n---\n([\s\S]*)$/,
159
+ );
160
+ const body = fmMatch ? fmMatch[2].trim() : content.trim();
161
+ const fm = fmMatch ? fmMatch[1] : "";
162
+ storeObservation({
163
+ type: (fm.match(/type:\s*(\w+)/)?.[1] ??
164
+ "discovery") as ObservationType,
165
+ title:
166
+ fm.match(/title:\s*(.+)/)?.[1]?.trim() ??
167
+ file.replace(/\.md$/, ""),
168
+ narrative: body,
169
+ confidence: (fm.match(/confidence:\s*(\w+)/)?.[1] ??
170
+ "medium") as ConfidenceLevel,
171
+ markdown_file: file,
172
+ source: "imported",
173
+ });
174
+ migrated++;
175
+ } catch {
176
+ /* Skip failed files */
177
+ }
178
+ }
179
+ if (migrated > 0) rebuildFTS5();
180
+ return `Migrated ${migrated}/${toMigrate.length} files.`;
181
+ }
182
+ default:
183
+ return `Unknown operation: "${op}".`;
184
+ }
185
+ },
186
+ }),
187
+ };
188
+ }