@askexenow/exe-os 0.9.65 → 0.9.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/deploy/stack-manifests/v0.9.json +54 -5
  2. package/dist/bin/age-ontology-load.js +61 -0
  3. package/dist/bin/agentic-ontology-backfill.js +4708 -0
  4. package/dist/bin/agentic-reflection-backfill.js +4144 -0
  5. package/dist/bin/{exe-link.js → agentic-semantic-label.js} +1532 -2173
  6. package/dist/bin/backfill-conversations.js +528 -20
  7. package/dist/bin/backfill-responses.js +528 -20
  8. package/dist/bin/backfill-vectors.js +255 -20
  9. package/dist/bin/bulk-sync-postgres.js +4876 -0
  10. package/dist/bin/cleanup-stale-review-tasks.js +529 -21
  11. package/dist/bin/cli.js +3471 -1491
  12. package/dist/bin/exe-agent-config.js +4 -0
  13. package/dist/bin/exe-agent.js +16 -0
  14. package/dist/bin/exe-assign.js +528 -20
  15. package/dist/bin/exe-boot.js +492 -54
  16. package/dist/bin/exe-call.js +16 -0
  17. package/dist/bin/exe-cloud.js +7415 -518
  18. package/dist/bin/exe-dispatch.js +540 -22
  19. package/dist/bin/exe-doctor.js +3404 -1225
  20. package/dist/bin/exe-export-behaviors.js +542 -24
  21. package/dist/bin/exe-forget.js +529 -21
  22. package/dist/bin/exe-gateway.js +595 -25
  23. package/dist/bin/exe-heartbeat.js +541 -24
  24. package/dist/bin/exe-kill.js +529 -21
  25. package/dist/bin/exe-launch-agent.js +2334 -1067
  26. package/dist/bin/exe-new-employee.js +324 -166
  27. package/dist/bin/exe-pending-messages.js +529 -21
  28. package/dist/bin/exe-pending-notifications.js +529 -21
  29. package/dist/bin/exe-pending-reviews.js +529 -21
  30. package/dist/bin/exe-rename.js +529 -21
  31. package/dist/bin/exe-review.js +529 -21
  32. package/dist/bin/exe-search.js +542 -24
  33. package/dist/bin/exe-session-cleanup.js +540 -22
  34. package/dist/bin/exe-settings.js +14 -0
  35. package/dist/bin/exe-start-codex.js +817 -144
  36. package/dist/bin/exe-start-opencode.js +776 -80
  37. package/dist/bin/exe-status.js +529 -21
  38. package/dist/bin/exe-team.js +529 -21
  39. package/dist/bin/git-sweep.js +540 -22
  40. package/dist/bin/graph-backfill.js +580 -21
  41. package/dist/bin/graph-export.js +529 -21
  42. package/dist/bin/graph-layer-benchmark.js +109 -0
  43. package/dist/bin/install.js +420 -289
  44. package/dist/bin/intercom-check.js +540 -22
  45. package/dist/bin/postgres-agentic-reflection-backfill.js +187 -0
  46. package/dist/bin/postgres-agentic-semantic-backfill.js +237 -0
  47. package/dist/bin/scan-tasks.js +540 -22
  48. package/dist/bin/setup.js +790 -206
  49. package/dist/bin/shard-migrate.js +528 -20
  50. package/dist/bin/update.js +4 -0
  51. package/dist/gateway/index.js +593 -23
  52. package/dist/hooks/bug-report-worker.js +651 -64
  53. package/dist/hooks/codex-stop-task-finalizer.js +540 -22
  54. package/dist/hooks/commit-complete.js +540 -22
  55. package/dist/hooks/error-recall.js +542 -24
  56. package/dist/hooks/exe-heartbeat-hook.js +4 -0
  57. package/dist/hooks/ingest-worker.js +4 -0
  58. package/dist/hooks/ingest.js +539 -22
  59. package/dist/hooks/instructions-loaded.js +529 -21
  60. package/dist/hooks/notification.js +529 -21
  61. package/dist/hooks/post-compact.js +529 -21
  62. package/dist/hooks/post-tool-combined.js +543 -25
  63. package/dist/hooks/pre-compact.js +772 -127
  64. package/dist/hooks/pre-tool-use.js +529 -21
  65. package/dist/hooks/prompt-submit.js +543 -25
  66. package/dist/hooks/session-end.js +673 -140
  67. package/dist/hooks/session-start.js +662 -26
  68. package/dist/hooks/stop.js +540 -23
  69. package/dist/hooks/subagent-stop.js +529 -21
  70. package/dist/hooks/summary-worker.js +571 -126
  71. package/dist/index.js +593 -23
  72. package/dist/lib/agent-config.js +4 -0
  73. package/dist/lib/cloud-sync.js +408 -47
  74. package/dist/lib/config.js +25 -1
  75. package/dist/lib/consolidation.js +5 -1
  76. package/dist/lib/database.js +128 -0
  77. package/dist/lib/db-daemon-client.js +4 -0
  78. package/dist/lib/db.js +128 -0
  79. package/dist/lib/device-registry.js +128 -0
  80. package/dist/lib/embedder.js +25 -1
  81. package/dist/lib/employee-templates.js +16 -0
  82. package/dist/lib/employees.js +4 -0
  83. package/dist/lib/exe-daemon-client.js +4 -0
  84. package/dist/lib/exe-daemon.js +3158 -930
  85. package/dist/lib/hybrid-search.js +542 -24
  86. package/dist/lib/identity.js +7 -0
  87. package/dist/lib/keychain.js +178 -22
  88. package/dist/lib/license.js +4 -0
  89. package/dist/lib/messaging.js +7 -0
  90. package/dist/lib/reminders.js +7 -0
  91. package/dist/lib/schedules.js +255 -20
  92. package/dist/lib/skill-learning.js +28 -1
  93. package/dist/lib/status-brief.js +39 -0
  94. package/dist/lib/store.js +528 -20
  95. package/dist/lib/task-router.js +4 -0
  96. package/dist/lib/tasks.js +28 -1
  97. package/dist/lib/tmux-routing.js +28 -1
  98. package/dist/lib/token-spend.js +7 -0
  99. package/dist/mcp/server.js +2739 -813
  100. package/dist/mcp/tools/complete-reminder.js +7 -0
  101. package/dist/mcp/tools/create-reminder.js +7 -0
  102. package/dist/mcp/tools/create-task.js +28 -1
  103. package/dist/mcp/tools/deactivate-behavior.js +7 -0
  104. package/dist/mcp/tools/list-reminders.js +7 -0
  105. package/dist/mcp/tools/list-tasks.js +7 -0
  106. package/dist/mcp/tools/send-message.js +7 -0
  107. package/dist/mcp/tools/update-task.js +28 -1
  108. package/dist/runtime/index.js +540 -22
  109. package/dist/tui/App.js +618 -29
  110. package/package.json +9 -5
  111. package/src/commands/exe/cloud.md +11 -8
  112. package/stack.release.json +3 -3
  113. package/src/commands/exe/link.md +0 -17
@@ -0,0 +1,4708 @@
1
+ #!/usr/bin/env node
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
5
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
6
+ }) : x)(function(x) {
7
+ if (typeof require !== "undefined") return require.apply(this, arguments);
8
+ throw Error('Dynamic require of "' + x + '" is not supported');
9
+ });
10
+ var __esm = (fn, res) => function __init() {
11
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
12
+ };
13
+ var __export = (target, all) => {
14
+ for (var name in all)
15
+ __defProp(target, name, { get: all[name], enumerable: true });
16
+ };
17
+
18
+ // src/types/memory.ts
19
+ var EMBEDDING_DIM;
20
+ var init_memory = __esm({
21
+ "src/types/memory.ts"() {
22
+ "use strict";
23
+ EMBEDDING_DIM = 1024;
24
+ }
25
+ });
26
+
27
+ // src/lib/db-retry.ts
28
+ function isBusyError(err) {
29
+ if (err instanceof Error) {
30
+ const msg = err.message.toLowerCase();
31
+ return msg.includes("sqlite_busy") || msg.includes("database is locked");
32
+ }
33
+ return false;
34
+ }
35
+ function delay(ms) {
36
+ return new Promise((resolve) => setTimeout(resolve, ms));
37
+ }
38
+ async function retryOnBusy(fn, label) {
39
+ let lastError;
40
+ for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
41
+ try {
42
+ return await fn();
43
+ } catch (err) {
44
+ lastError = err;
45
+ if (!isBusyError(err) || attempt === MAX_RETRIES) {
46
+ throw err;
47
+ }
48
+ const backoff = BASE_DELAY_MS * Math.pow(2, attempt);
49
+ const jitter = Math.floor(Math.random() * MAX_JITTER_MS);
50
+ process.stderr.write(
51
+ `[exe-os] SQLITE_BUSY ${label} retry ${attempt + 1}/${MAX_RETRIES} \u2014 waiting ${backoff + jitter}ms
52
+ `
53
+ );
54
+ await delay(backoff + jitter);
55
+ }
56
+ }
57
+ throw lastError;
58
+ }
59
+ function wrapWithRetry(client) {
60
+ return new Proxy(client, {
61
+ get(target, prop, receiver) {
62
+ if (prop === "execute") {
63
+ return (sql) => retryOnBusy(() => target.execute(sql), "execute");
64
+ }
65
+ if (prop === "batch") {
66
+ return (stmts, mode) => retryOnBusy(() => target.batch(stmts, mode), "batch");
67
+ }
68
+ return Reflect.get(target, prop, receiver);
69
+ }
70
+ });
71
+ }
72
+ var MAX_RETRIES, BASE_DELAY_MS, MAX_JITTER_MS;
73
+ var init_db_retry = __esm({
74
+ "src/lib/db-retry.ts"() {
75
+ "use strict";
76
+ MAX_RETRIES = 5;
77
+ BASE_DELAY_MS = 250;
78
+ MAX_JITTER_MS = 400;
79
+ }
80
+ });
81
+
82
+ // src/lib/secure-files.ts
83
+ import { chmodSync, existsSync, mkdirSync } from "fs";
84
+ import { chmod, mkdir } from "fs/promises";
85
+ async function ensurePrivateDir(dirPath) {
86
+ await mkdir(dirPath, { recursive: true, mode: PRIVATE_DIR_MODE });
87
+ try {
88
+ await chmod(dirPath, PRIVATE_DIR_MODE);
89
+ } catch {
90
+ }
91
+ }
92
+ function ensurePrivateDirSync(dirPath) {
93
+ mkdirSync(dirPath, { recursive: true, mode: PRIVATE_DIR_MODE });
94
+ try {
95
+ chmodSync(dirPath, PRIVATE_DIR_MODE);
96
+ } catch {
97
+ }
98
+ }
99
+ async function enforcePrivateFile(filePath) {
100
+ try {
101
+ await chmod(filePath, PRIVATE_FILE_MODE);
102
+ } catch {
103
+ }
104
+ }
105
+ function enforcePrivateFileSync(filePath) {
106
+ try {
107
+ if (existsSync(filePath)) chmodSync(filePath, PRIVATE_FILE_MODE);
108
+ } catch {
109
+ }
110
+ }
111
+ var PRIVATE_DIR_MODE, PRIVATE_FILE_MODE;
112
+ var init_secure_files = __esm({
113
+ "src/lib/secure-files.ts"() {
114
+ "use strict";
115
+ PRIVATE_DIR_MODE = 448;
116
+ PRIVATE_FILE_MODE = 384;
117
+ }
118
+ });
119
+
120
+ // src/lib/config.ts
121
+ import { readFile, writeFile } from "fs/promises";
122
+ import { readFileSync, existsSync as existsSync2, renameSync } from "fs";
123
+ import path from "path";
124
+ import os from "os";
125
+ function resolveDataDir() {
126
+ if (process.env.EXE_OS_DIR) return process.env.EXE_OS_DIR;
127
+ if (process.env.EXE_MEM_DIR) return process.env.EXE_MEM_DIR;
128
+ const newDir = path.join(os.homedir(), ".exe-os");
129
+ const legacyDir = path.join(os.homedir(), ".exe-mem");
130
+ if (!existsSync2(newDir) && existsSync2(legacyDir)) {
131
+ try {
132
+ renameSync(legacyDir, newDir);
133
+ process.stderr.write(`[exe-os] Migrated data directory: ~/.exe-mem \u2192 ~/.exe-os
134
+ `);
135
+ } catch {
136
+ return legacyDir;
137
+ }
138
+ }
139
+ return newDir;
140
+ }
141
+ function migrateLegacyConfig(raw) {
142
+ if ("r2" in raw) {
143
+ process.stderr.write(
144
+ "[exe-os] Warning: config.json contains deprecated 'r2' field from v1.0. R2 sync has been replaced in v1.1. The 'r2' field will be ignored.\n"
145
+ );
146
+ delete raw.r2;
147
+ }
148
+ if ("syncIntervalMs" in raw) {
149
+ delete raw.syncIntervalMs;
150
+ }
151
+ return raw;
152
+ }
153
+ function migrateConfig(raw) {
154
+ const fromVersion = typeof raw.config_version === "number" ? raw.config_version : 0;
155
+ let currentVersion = fromVersion;
156
+ let migrated = false;
157
+ if (currentVersion > CURRENT_CONFIG_VERSION) {
158
+ return { config: raw, migrated: false, fromVersion };
159
+ }
160
+ for (const migration of CONFIG_MIGRATIONS) {
161
+ if (currentVersion === migration.from && migration.to <= CURRENT_CONFIG_VERSION) {
162
+ raw = migration.migrate(raw);
163
+ currentVersion = migration.to;
164
+ migrated = true;
165
+ }
166
+ }
167
+ return { config: raw, migrated, fromVersion };
168
+ }
169
+ function normalizeScalingRoadmap(raw) {
170
+ const defaultAuto = DEFAULT_CONFIG.scalingRoadmap.rerankerAutoTrigger;
171
+ const userRoadmap = raw.scalingRoadmap ?? {};
172
+ const userAuto = userRoadmap.rerankerAutoTrigger ?? {};
173
+ if (userAuto.enabled === void 0 && raw.rerankerEnabled !== void 0) {
174
+ userAuto.enabled = raw.rerankerEnabled;
175
+ }
176
+ raw.scalingRoadmap = {
177
+ ...userRoadmap,
178
+ rerankerAutoTrigger: { ...defaultAuto, ...userAuto }
179
+ };
180
+ }
181
+ function normalizeSessionLifecycle(raw) {
182
+ const defaultSL = DEFAULT_CONFIG.sessionLifecycle;
183
+ const userSL = raw.sessionLifecycle ?? {};
184
+ raw.sessionLifecycle = { ...defaultSL, ...userSL };
185
+ }
186
+ function normalizeAutoUpdate(raw) {
187
+ const defaultAU = DEFAULT_CONFIG.autoUpdate;
188
+ const userAU = raw.autoUpdate ?? {};
189
+ raw.autoUpdate = { ...defaultAU, ...userAU };
190
+ }
191
+ function normalizeOrchestration(raw) {
192
+ const defaultOrg = DEFAULT_CONFIG.orchestration;
193
+ const userOrg = raw.orchestration ?? {};
194
+ raw.orchestration = { ...defaultOrg, ...userOrg };
195
+ }
196
+ async function loadConfig() {
197
+ const dir = process.env.EXE_OS_DIR ?? process.env.EXE_MEM_DIR ?? EXE_AI_DIR;
198
+ await ensurePrivateDir(dir);
199
+ const configPath = path.join(dir, "config.json");
200
+ if (!existsSync2(configPath)) {
201
+ return { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db") };
202
+ }
203
+ const raw = await readFile(configPath, "utf-8");
204
+ try {
205
+ let parsed = JSON.parse(raw);
206
+ parsed = migrateLegacyConfig(parsed);
207
+ const { config: migratedCfg, migrated, fromVersion } = migrateConfig(parsed);
208
+ if (migrated) {
209
+ process.stderr.write(`[exe-os] Config migrated from v${fromVersion} to v${migratedCfg.config_version}
210
+ `);
211
+ try {
212
+ await writeFile(configPath, JSON.stringify(migratedCfg, null, 2) + "\n");
213
+ await enforcePrivateFile(configPath);
214
+ } catch {
215
+ }
216
+ }
217
+ normalizeScalingRoadmap(migratedCfg);
218
+ normalizeSessionLifecycle(migratedCfg);
219
+ normalizeAutoUpdate(migratedCfg);
220
+ normalizeOrchestration(migratedCfg);
221
+ const config = { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db"), ...migratedCfg };
222
+ if (config.dbPath.startsWith("~")) {
223
+ config.dbPath = config.dbPath.replace(/^~/, os.homedir());
224
+ }
225
+ const envDbPath = path.join(dir, "memories.db");
226
+ if (process.env.EXE_OS_DIR && config.dbPath !== envDbPath && !existsSync2(config.dbPath) && existsSync2(envDbPath)) {
227
+ config.dbPath = envDbPath;
228
+ }
229
+ return config;
230
+ } catch {
231
+ return { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db") };
232
+ }
233
+ }
234
+ var EXE_AI_DIR, DB_PATH, MODELS_DIR, CONFIG_PATH, LEGACY_LANCE_PATH, CURRENT_CONFIG_VERSION, DEFAULT_CONFIG, CONFIG_MIGRATIONS;
235
+ var init_config = __esm({
236
+ "src/lib/config.ts"() {
237
+ "use strict";
238
+ init_secure_files();
239
+ EXE_AI_DIR = resolveDataDir();
240
+ DB_PATH = path.join(EXE_AI_DIR, "memories.db");
241
+ MODELS_DIR = path.join(EXE_AI_DIR, "models");
242
+ CONFIG_PATH = path.join(EXE_AI_DIR, "config.json");
243
+ LEGACY_LANCE_PATH = path.join(EXE_AI_DIR, "local.lance");
244
+ CURRENT_CONFIG_VERSION = 1;
245
+ DEFAULT_CONFIG = {
246
+ config_version: CURRENT_CONFIG_VERSION,
247
+ dbPath: DB_PATH,
248
+ modelFile: "jina-embeddings-v5-small-q4_k_m.gguf",
249
+ embeddingDim: 1024,
250
+ batchSize: 20,
251
+ flushIntervalMs: 1e4,
252
+ autoIngestion: true,
253
+ autoRetrieval: true,
254
+ searchMode: "hybrid",
255
+ hookSearchMode: "hybrid",
256
+ fileGrepEnabled: true,
257
+ splashEffect: true,
258
+ consolidationEnabled: true,
259
+ consolidationIntervalMs: 6 * 60 * 60 * 1e3,
260
+ consolidationModel: "claude-haiku-4-5-20251001",
261
+ consolidationMaxCallsPerRun: 20,
262
+ selfQueryRouter: true,
263
+ selfQueryModel: "claude-haiku-4-5-20251001",
264
+ rerankerEnabled: true,
265
+ scalingRoadmap: {
266
+ rerankerAutoTrigger: {
267
+ enabled: true,
268
+ broadQueryMinCardinality: 5e4,
269
+ fetchTopK: 200,
270
+ returnTopK: 20
271
+ }
272
+ },
273
+ graphRagEnabled: true,
274
+ wikiEnabled: false,
275
+ wikiUrl: "",
276
+ wikiApiKey: "",
277
+ wikiSyncIntervalMs: 30 * 60 * 1e3,
278
+ wikiWorkspaceMapping: {},
279
+ wikiAutoUpdate: true,
280
+ wikiAutoUpdateThreshold: 0.5,
281
+ wikiAutoUpdateCreateNew: true,
282
+ skillLearning: true,
283
+ skillThreshold: 3,
284
+ skillModel: "claude-haiku-4-5-20251001",
285
+ exeHeartbeat: {
286
+ enabled: true,
287
+ intervalSeconds: 60,
288
+ staleInProgressThresholdHours: 2
289
+ },
290
+ sessionLifecycle: {
291
+ idleKillEnabled: true,
292
+ idleKillTicksRequired: 3,
293
+ idleKillIntercomAckWindowMs: 1e4,
294
+ maxAutoInstances: 10
295
+ },
296
+ autoUpdate: {
297
+ checkOnBoot: true,
298
+ autoInstall: false,
299
+ checkIntervalMs: 24 * 60 * 60 * 1e3
300
+ },
301
+ orchestration: {
302
+ phase: "phase_1_coo",
303
+ phaseSetBy: "default"
304
+ }
305
+ };
306
+ CONFIG_MIGRATIONS = [
307
+ {
308
+ from: 0,
309
+ to: 1,
310
+ migrate: (cfg) => {
311
+ cfg.config_version = 1;
312
+ return cfg;
313
+ }
314
+ }
315
+ ];
316
+ }
317
+ });
318
+
319
+ // src/lib/employees.ts
320
+ import { readFile as readFile2, writeFile as writeFile2, mkdir as mkdir2 } from "fs/promises";
321
+ import { existsSync as existsSync3, symlinkSync, readlinkSync, readFileSync as readFileSync2, renameSync as renameSync2, unlinkSync, writeFileSync } from "fs";
322
+ import { execSync } from "child_process";
323
+ import path2 from "path";
324
+ import os2 from "os";
325
+ function normalizeRole(role) {
326
+ return (role ?? "").trim().toLowerCase();
327
+ }
328
+ function isCoordinatorRole(role) {
329
+ return normalizeRole(role) === normalizeRole(COORDINATOR_ROLE);
330
+ }
331
+ function getCoordinatorEmployee(employees) {
332
+ return employees.find((e) => isCoordinatorRole(e.role));
333
+ }
334
+ function getCoordinatorName(employees = loadEmployeesSync()) {
335
+ return getCoordinatorEmployee(employees)?.name ?? DEFAULT_COORDINATOR_TEMPLATE_NAME;
336
+ }
337
+ function loadEmployeesSync(employeesPath = EMPLOYEES_PATH) {
338
+ if (!existsSync3(employeesPath)) return [];
339
+ try {
340
+ return JSON.parse(readFileSync2(employeesPath, "utf-8"));
341
+ } catch {
342
+ return [];
343
+ }
344
+ }
345
+ var EMPLOYEES_PATH, DEFAULT_COORDINATOR_TEMPLATE_NAME, COORDINATOR_ROLE, IDENTITY_DIR;
346
+ var init_employees = __esm({
347
+ "src/lib/employees.ts"() {
348
+ "use strict";
349
+ init_config();
350
+ EMPLOYEES_PATH = path2.join(EXE_AI_DIR, "exe-employees.json");
351
+ DEFAULT_COORDINATOR_TEMPLATE_NAME = "exe";
352
+ COORDINATOR_ROLE = "COO";
353
+ IDENTITY_DIR = path2.join(EXE_AI_DIR, "identity");
354
+ }
355
+ });
356
+
357
+ // src/lib/database-adapter.ts
358
+ import os3 from "os";
359
+ import path3 from "path";
360
+ import { createRequire } from "module";
361
+ import { pathToFileURL } from "url";
362
+ function quotedIdentifier(identifier) {
363
+ return `"${identifier.replace(/"/g, '""')}"`;
364
+ }
365
+ function unqualifiedTableName(name) {
366
+ const raw = name.trim().replace(/^"|"$/g, "");
367
+ const parts = raw.split(".");
368
+ return parts[parts.length - 1].replace(/^"|"$/g, "").toLowerCase();
369
+ }
370
+ function stripTrailingSemicolon(sql) {
371
+ return sql.trim().replace(/;+\s*$/u, "");
372
+ }
373
+ function appendClause(sql, clause) {
374
+ const trimmed = stripTrailingSemicolon(sql);
375
+ const returningMatch = /\sRETURNING\b[\s\S]*$/iu.exec(trimmed);
376
+ if (!returningMatch) {
377
+ return `${trimmed}${clause}`;
378
+ }
379
+ const idx = returningMatch.index;
380
+ return `${trimmed.slice(0, idx)}${clause}${trimmed.slice(idx)}`;
381
+ }
382
+ function normalizeStatement(stmt) {
383
+ if (typeof stmt === "string") {
384
+ return { kind: "positional", sql: stmt, args: [] };
385
+ }
386
+ const sql = stmt.sql;
387
+ if (Array.isArray(stmt.args) || stmt.args === void 0) {
388
+ return { kind: "positional", sql, args: stmt.args ?? [] };
389
+ }
390
+ return { kind: "named", sql, args: stmt.args };
391
+ }
392
+ function rewriteBooleanLiterals(sql) {
393
+ let out = sql;
394
+ for (const column of BOOLEAN_COLUMN_NAMES) {
395
+ const scoped = `((?:\\b[a-z_][a-z0-9_]*\\.)?${column})`;
396
+ out = out.replace(new RegExp(`${scoped}\\s*=\\s*0\\b`, "giu"), "$1 = FALSE");
397
+ out = out.replace(new RegExp(`${scoped}\\s*=\\s*1\\b`, "giu"), "$1 = TRUE");
398
+ out = out.replace(new RegExp(`${scoped}\\s*!=\\s*0\\b`, "giu"), "$1 != FALSE");
399
+ out = out.replace(new RegExp(`${scoped}\\s*!=\\s*1\\b`, "giu"), "$1 != TRUE");
400
+ out = out.replace(new RegExp(`${scoped}\\s*<>\\s*0\\b`, "giu"), "$1 <> FALSE");
401
+ out = out.replace(new RegExp(`${scoped}\\s*<>\\s*1\\b`, "giu"), "$1 <> TRUE");
402
+ }
403
+ return out;
404
+ }
405
+ function rewriteInsertOrIgnore(sql) {
406
+ if (!/^\s*INSERT\s+OR\s+IGNORE\s+INTO\b/iu.test(sql)) {
407
+ return sql;
408
+ }
409
+ const replaced = sql.replace(/^\s*INSERT\s+OR\s+IGNORE\s+INTO\b/iu, "INSERT INTO");
410
+ return /\bON\s+CONFLICT\b/iu.test(replaced) ? replaced : appendClause(replaced, " ON CONFLICT DO NOTHING");
411
+ }
412
+ function rewriteInsertOrReplace(sql) {
413
+ const match = /^\s*INSERT\s+OR\s+REPLACE\s+INTO\s+([A-Za-z0-9_."]+)\s*\(([^)]+)\)([\s\S]*)$/iu.exec(sql);
414
+ if (!match) {
415
+ return sql;
416
+ }
417
+ const rawTable = match[1];
418
+ const rawColumns = match[2];
419
+ const remainder = match[3];
420
+ const tableName = unqualifiedTableName(rawTable);
421
+ const conflictKeys = UPSERT_KEYS[tableName];
422
+ if (!conflictKeys?.length) {
423
+ return sql;
424
+ }
425
+ const columns = rawColumns.split(",").map((col) => col.trim().replace(/^"|"$/g, ""));
426
+ const updateColumns = columns.filter((col) => !conflictKeys.includes(col));
427
+ const conflictTarget = conflictKeys.map(quotedIdentifier).join(", ");
428
+ const updateClause = updateColumns.length === 0 ? " DO NOTHING" : ` DO UPDATE SET ${updateColumns.map((col) => `${quotedIdentifier(col)} = EXCLUDED.${quotedIdentifier(col)}`).join(", ")}`;
429
+ return `INSERT INTO ${rawTable} (${rawColumns})${appendClause(remainder, ` ON CONFLICT (${conflictTarget})${updateClause}`)}`;
430
+ }
431
+ function rewriteSql(sql) {
432
+ let out = sql;
433
+ out = out.replace(/\bdatetime\(\s*['"]now['"]\s*\)/giu, "CURRENT_TIMESTAMP");
434
+ out = out.replace(/\bvector32\s*\(\s*\?\s*\)/giu, "?");
435
+ out = rewriteBooleanLiterals(out);
436
+ out = rewriteInsertOrReplace(out);
437
+ out = rewriteInsertOrIgnore(out);
438
+ return stripTrailingSemicolon(out);
439
+ }
440
+ function toBoolean(value) {
441
+ if (value === null || value === void 0) return value;
442
+ if (typeof value === "boolean") return value;
443
+ if (typeof value === "number") return value !== 0;
444
+ if (typeof value === "bigint") return value !== 0n;
445
+ if (typeof value === "string") {
446
+ const normalized = value.trim().toLowerCase();
447
+ if (normalized === "0" || normalized === "false") return false;
448
+ if (normalized === "1" || normalized === "true") return true;
449
+ }
450
+ return Boolean(value);
451
+ }
452
+ function countQuestionMarks(sql, end) {
453
+ let count = 0;
454
+ let inSingle = false;
455
+ let inDouble = false;
456
+ let inLineComment = false;
457
+ let inBlockComment = false;
458
+ for (let i = 0; i < end; i++) {
459
+ const ch = sql[i];
460
+ const next = sql[i + 1];
461
+ if (inLineComment) {
462
+ if (ch === "\n") inLineComment = false;
463
+ continue;
464
+ }
465
+ if (inBlockComment) {
466
+ if (ch === "*" && next === "/") {
467
+ inBlockComment = false;
468
+ i += 1;
469
+ }
470
+ continue;
471
+ }
472
+ if (!inSingle && !inDouble && ch === "-" && next === "-") {
473
+ inLineComment = true;
474
+ i += 1;
475
+ continue;
476
+ }
477
+ if (!inSingle && !inDouble && ch === "/" && next === "*") {
478
+ inBlockComment = true;
479
+ i += 1;
480
+ continue;
481
+ }
482
+ if (!inDouble && ch === "'" && sql[i - 1] !== "\\") {
483
+ inSingle = !inSingle;
484
+ continue;
485
+ }
486
+ if (!inSingle && ch === '"' && sql[i - 1] !== "\\") {
487
+ inDouble = !inDouble;
488
+ continue;
489
+ }
490
+ if (!inSingle && !inDouble && ch === "?") {
491
+ count += 1;
492
+ }
493
+ }
494
+ return count;
495
+ }
496
+ function findBooleanPlaceholderIndexes(sql) {
497
+ const indexes = /* @__PURE__ */ new Set();
498
+ for (const column of BOOLEAN_COLUMN_NAMES) {
499
+ const pattern = new RegExp(`(?:\\b[a-z_][a-z0-9_]*\\.)?${column}\\s*=\\s*\\?`, "giu");
500
+ for (const match of sql.matchAll(pattern)) {
501
+ const matchText = match[0];
502
+ const qIndex = match.index + matchText.lastIndexOf("?");
503
+ indexes.add(countQuestionMarks(sql, qIndex + 1));
504
+ }
505
+ }
506
+ return indexes;
507
+ }
508
+ function coerceInsertBooleanArgs(sql, args) {
509
+ const match = /^\s*INSERT(?:\s+OR\s+(?:IGNORE|REPLACE))?\s+INTO\s+([A-Za-z0-9_."]+)\s*\(([^)]+)\)/iu.exec(sql);
510
+ if (!match) return;
511
+ const rawTable = match[1];
512
+ const rawColumns = match[2];
513
+ const boolColumns = BOOLEAN_COLUMNS_BY_TABLE[unqualifiedTableName(rawTable)];
514
+ if (!boolColumns?.size) return;
515
+ const columns = rawColumns.split(",").map((col) => col.trim().replace(/^"|"$/g, ""));
516
+ for (const [index, column] of columns.entries()) {
517
+ if (boolColumns.has(column) && index < args.length) {
518
+ args[index] = toBoolean(args[index]);
519
+ }
520
+ }
521
+ }
522
+ function coerceUpdateBooleanArgs(sql, args) {
523
+ const match = /^\s*UPDATE\s+([A-Za-z0-9_."]+)\s+SET\s+([\s\S]+?)(?:\s+WHERE\b|$)/iu.exec(sql);
524
+ if (!match) return;
525
+ const rawTable = match[1];
526
+ const setClause = match[2];
527
+ const boolColumns = BOOLEAN_COLUMNS_BY_TABLE[unqualifiedTableName(rawTable)];
528
+ if (!boolColumns?.size) return;
529
+ const assignments = setClause.split(",");
530
+ let placeholderIndex = 0;
531
+ for (const assignment of assignments) {
532
+ if (!assignment.includes("?")) continue;
533
+ placeholderIndex += 1;
534
+ const colMatch = /^\s*(?:[A-Za-z_][A-Za-z0-9_]*\.)?([A-Za-z_][A-Za-z0-9_]*)\s*=\s*\?/iu.exec(assignment);
535
+ if (colMatch && boolColumns.has(colMatch[1])) {
536
+ args[placeholderIndex - 1] = toBoolean(args[placeholderIndex - 1]);
537
+ }
538
+ }
539
+ }
540
+ function coerceBooleanArgs(sql, args) {
541
+ const nextArgs = [...args];
542
+ coerceInsertBooleanArgs(sql, nextArgs);
543
+ coerceUpdateBooleanArgs(sql, nextArgs);
544
+ const placeholderIndexes = findBooleanPlaceholderIndexes(sql);
545
+ for (const index of placeholderIndexes) {
546
+ if (index > 0 && index <= nextArgs.length) {
547
+ nextArgs[index - 1] = toBoolean(nextArgs[index - 1]);
548
+ }
549
+ }
550
+ return nextArgs;
551
+ }
552
+ function convertQuestionMarksToDollarParams(sql) {
553
+ let out = "";
554
+ let placeholder = 0;
555
+ let inSingle = false;
556
+ let inDouble = false;
557
+ let inLineComment = false;
558
+ let inBlockComment = false;
559
+ for (let i = 0; i < sql.length; i++) {
560
+ const ch = sql[i];
561
+ const next = sql[i + 1];
562
+ if (inLineComment) {
563
+ out += ch;
564
+ if (ch === "\n") inLineComment = false;
565
+ continue;
566
+ }
567
+ if (inBlockComment) {
568
+ out += ch;
569
+ if (ch === "*" && next === "/") {
570
+ out += next;
571
+ inBlockComment = false;
572
+ i += 1;
573
+ }
574
+ continue;
575
+ }
576
+ if (!inSingle && !inDouble && ch === "-" && next === "-") {
577
+ out += ch + next;
578
+ inLineComment = true;
579
+ i += 1;
580
+ continue;
581
+ }
582
+ if (!inSingle && !inDouble && ch === "/" && next === "*") {
583
+ out += ch + next;
584
+ inBlockComment = true;
585
+ i += 1;
586
+ continue;
587
+ }
588
+ if (!inDouble && ch === "'" && sql[i - 1] !== "\\") {
589
+ inSingle = !inSingle;
590
+ out += ch;
591
+ continue;
592
+ }
593
+ if (!inSingle && ch === '"' && sql[i - 1] !== "\\") {
594
+ inDouble = !inDouble;
595
+ out += ch;
596
+ continue;
597
+ }
598
+ if (!inSingle && !inDouble && ch === "?") {
599
+ placeholder += 1;
600
+ out += `$${placeholder}`;
601
+ continue;
602
+ }
603
+ out += ch;
604
+ }
605
+ return out;
606
+ }
607
+ function translateStatementForPostgres(stmt) {
608
+ const normalized = normalizeStatement(stmt);
609
+ if (normalized.kind === "named") {
610
+ throw new Error("Named SQL parameters are not supported by the Prisma adapter.");
611
+ }
612
+ const rewrittenSql = rewriteSql(normalized.sql);
613
+ const coercedArgs = coerceBooleanArgs(rewrittenSql, normalized.args);
614
+ return {
615
+ sql: convertQuestionMarksToDollarParams(rewrittenSql),
616
+ args: coercedArgs
617
+ };
618
+ }
619
+ function shouldBypassPostgres(stmt) {
620
+ const normalized = normalizeStatement(stmt);
621
+ if (normalized.kind === "named") {
622
+ return true;
623
+ }
624
+ return IMMEDIATE_FALLBACK_PATTERNS.some((pattern) => pattern.test(normalized.sql));
625
+ }
626
+ function shouldFallbackOnError(error) {
627
+ const message = error instanceof Error ? error.message : String(error);
628
+ return /42P01|42883|42601|does not exist|syntax error|not supported|Named SQL parameters are not supported/iu.test(message);
629
+ }
630
+ function isReadQuery(sql) {
631
+ const trimmed = sql.trimStart();
632
+ return /^(SELECT|WITH|SHOW|EXPLAIN|VALUES)\b/iu.test(trimmed) || /\bRETURNING\b/iu.test(trimmed);
633
+ }
634
+ function buildRow(row, columns) {
635
+ const values = columns.map((column) => row[column]);
636
+ return Object.assign(values, row);
637
+ }
638
+ function buildResultSet(rows, rowsAffected = 0) {
639
+ const columns = rows[0] ? Object.keys(rows[0]) : [];
640
+ const resultRows = rows.map((row) => buildRow(row, columns));
641
+ return {
642
+ columns,
643
+ columnTypes: columns.map(() => ""),
644
+ rows: resultRows,
645
+ rowsAffected,
646
+ lastInsertRowid: void 0,
647
+ toJSON() {
648
+ return {
649
+ columns,
650
+ columnTypes: columns.map(() => ""),
651
+ rows,
652
+ rowsAffected,
653
+ lastInsertRowid: void 0
654
+ };
655
+ }
656
+ };
657
+ }
658
+ async function loadPrismaClient() {
659
+ if (!prismaClientPromise) {
660
+ prismaClientPromise = (async () => {
661
+ const explicitPath = process.env.EXE_OS_PRISMA_CLIENT_PATH;
662
+ if (explicitPath) {
663
+ const module2 = await import(pathToFileURL(explicitPath).href);
664
+ const PrismaClient2 = module2.PrismaClient ?? module2.default?.PrismaClient;
665
+ if (!PrismaClient2) {
666
+ throw new Error(`No PrismaClient export found at ${explicitPath}`);
667
+ }
668
+ return new PrismaClient2();
669
+ }
670
+ const exeDbRoot = process.env.EXE_DB_ROOT ?? path3.join(os3.homedir(), "exe-db");
671
+ const requireFromExeDb = createRequire(path3.join(exeDbRoot, "package.json"));
672
+ const prismaEntry = requireFromExeDb.resolve("@prisma/client");
673
+ const module = await import(pathToFileURL(prismaEntry).href);
674
+ const PrismaClient = module.PrismaClient ?? module.default?.PrismaClient;
675
+ if (!PrismaClient) {
676
+ throw new Error(`No PrismaClient export found in ${prismaEntry}`);
677
+ }
678
+ return new PrismaClient();
679
+ })();
680
+ }
681
+ return prismaClientPromise;
682
+ }
683
+ async function ensureCompatibilityViews(prisma) {
684
+ if (!compatibilityBootstrapPromise) {
685
+ compatibilityBootstrapPromise = (async () => {
686
+ for (const mapping of VIEW_MAPPINGS) {
687
+ const relation = mapping.source.replace(/"/g, "");
688
+ const rows = await prisma.$queryRawUnsafe(
689
+ "SELECT to_regclass($1)::text AS regclass",
690
+ relation
691
+ );
692
+ if (!rows[0]?.regclass) {
693
+ continue;
694
+ }
695
+ await prisma.$executeRawUnsafe(
696
+ `CREATE OR REPLACE VIEW public.${quotedIdentifier(mapping.view)} AS SELECT * FROM ${mapping.source}`
697
+ );
698
+ }
699
+ })();
700
+ }
701
+ return compatibilityBootstrapPromise;
702
+ }
703
+ async function executeOnPrisma(executor, stmt) {
704
+ const translated = translateStatementForPostgres(stmt);
705
+ if (isReadQuery(translated.sql)) {
706
+ const rows = await executor.$queryRawUnsafe(
707
+ translated.sql,
708
+ ...translated.args
709
+ );
710
+ return buildResultSet(rows, /\bRETURNING\b/iu.test(translated.sql) ? rows.length : 0);
711
+ }
712
+ const rowsAffected = await executor.$executeRawUnsafe(translated.sql, ...translated.args);
713
+ return buildResultSet([], rowsAffected);
714
+ }
715
+ function splitSqlStatements(sql) {
716
+ const parts = [];
717
+ let current = "";
718
+ let inSingle = false;
719
+ let inDouble = false;
720
+ let inLineComment = false;
721
+ let inBlockComment = false;
722
+ for (let i = 0; i < sql.length; i++) {
723
+ const ch = sql[i];
724
+ const next = sql[i + 1];
725
+ if (inLineComment) {
726
+ current += ch;
727
+ if (ch === "\n") inLineComment = false;
728
+ continue;
729
+ }
730
+ if (inBlockComment) {
731
+ current += ch;
732
+ if (ch === "*" && next === "/") {
733
+ current += next;
734
+ inBlockComment = false;
735
+ i += 1;
736
+ }
737
+ continue;
738
+ }
739
+ if (!inSingle && !inDouble && ch === "-" && next === "-") {
740
+ current += ch + next;
741
+ inLineComment = true;
742
+ i += 1;
743
+ continue;
744
+ }
745
+ if (!inSingle && !inDouble && ch === "/" && next === "*") {
746
+ current += ch + next;
747
+ inBlockComment = true;
748
+ i += 1;
749
+ continue;
750
+ }
751
+ if (!inDouble && ch === "'" && sql[i - 1] !== "\\") {
752
+ inSingle = !inSingle;
753
+ current += ch;
754
+ continue;
755
+ }
756
+ if (!inSingle && ch === '"' && sql[i - 1] !== "\\") {
757
+ inDouble = !inDouble;
758
+ current += ch;
759
+ continue;
760
+ }
761
+ if (!inSingle && !inDouble && ch === ";") {
762
+ if (current.trim()) {
763
+ parts.push(current.trim());
764
+ }
765
+ current = "";
766
+ continue;
767
+ }
768
+ current += ch;
769
+ }
770
+ if (current.trim()) {
771
+ parts.push(current.trim());
772
+ }
773
+ return parts;
774
+ }
775
+ async function createPrismaDbAdapter(fallbackClient) {
776
+ const prisma = await loadPrismaClient();
777
+ await ensureCompatibilityViews(prisma);
778
+ let closed = false;
779
+ let adapter;
780
+ const fallbackExecute = async (stmt, error) => {
781
+ if (!fallbackClient) {
782
+ if (error) throw error;
783
+ throw new Error("No fallback SQLite client is available for this Prisma-routed query.");
784
+ }
785
+ if (error) {
786
+ process.stderr.write(
787
+ `[database-adapter] Falling back to SQLite: ${error instanceof Error ? error.message : String(error)}
788
+ `
789
+ );
790
+ }
791
+ return fallbackClient.execute(stmt);
792
+ };
793
+ adapter = {
794
+ async execute(stmt) {
795
+ if (shouldBypassPostgres(stmt)) {
796
+ return fallbackExecute(stmt);
797
+ }
798
+ try {
799
+ return await executeOnPrisma(prisma, stmt);
800
+ } catch (error) {
801
+ if (shouldFallbackOnError(error)) {
802
+ return fallbackExecute(stmt, error);
803
+ }
804
+ throw error;
805
+ }
806
+ },
807
+ async batch(stmts, mode) {
808
+ if (stmts.some((stmt) => shouldBypassPostgres(stmt))) {
809
+ if (!fallbackClient) {
810
+ throw new Error("Cannot batch unsupported SQLite-only statements without a fallback client.");
811
+ }
812
+ return fallbackClient.batch(stmts, mode);
813
+ }
814
+ try {
815
+ if (prisma.$transaction) {
816
+ return await prisma.$transaction(async (tx) => {
817
+ const results2 = [];
818
+ for (const stmt of stmts) {
819
+ results2.push(await executeOnPrisma(tx, stmt));
820
+ }
821
+ return results2;
822
+ });
823
+ }
824
+ const results = [];
825
+ for (const stmt of stmts) {
826
+ results.push(await executeOnPrisma(prisma, stmt));
827
+ }
828
+ return results;
829
+ } catch (error) {
830
+ if (fallbackClient && shouldFallbackOnError(error)) {
831
+ process.stderr.write(
832
+ `[database-adapter] Falling back batch to SQLite: ${error instanceof Error ? error.message : String(error)}
833
+ `
834
+ );
835
+ return fallbackClient.batch(stmts, mode);
836
+ }
837
+ throw error;
838
+ }
839
+ },
840
+ async migrate(stmts) {
841
+ if (fallbackClient) {
842
+ return fallbackClient.migrate(stmts);
843
+ }
844
+ return adapter.batch(stmts, "deferred");
845
+ },
846
+ async transaction(mode) {
847
+ if (!fallbackClient) {
848
+ throw new Error("Interactive transactions are only supported on the SQLite fallback client.");
849
+ }
850
+ return fallbackClient.transaction(mode);
851
+ },
852
+ async executeMultiple(sql) {
853
+ if (fallbackClient && shouldBypassPostgres(sql)) {
854
+ return fallbackClient.executeMultiple(sql);
855
+ }
856
+ for (const statement of splitSqlStatements(sql)) {
857
+ await adapter.execute(statement);
858
+ }
859
+ },
860
+ async sync() {
861
+ if (fallbackClient) {
862
+ return fallbackClient.sync();
863
+ }
864
+ return { frame_no: 0, frames_synced: 0 };
865
+ },
866
+ close() {
867
+ closed = true;
868
+ prismaClientPromise = null;
869
+ compatibilityBootstrapPromise = null;
870
+ void prisma.$disconnect?.();
871
+ },
872
+ get closed() {
873
+ return closed;
874
+ },
875
+ get protocol() {
876
+ return "prisma-postgres";
877
+ }
878
+ };
879
+ return adapter;
880
+ }
881
+ var VIEW_MAPPINGS, UPSERT_KEYS, BOOLEAN_COLUMNS_BY_TABLE, BOOLEAN_COLUMN_NAMES, IMMEDIATE_FALLBACK_PATTERNS, prismaClientPromise, compatibilityBootstrapPromise;
882
+ var init_database_adapter = __esm({
883
+ "src/lib/database-adapter.ts"() {
884
+ "use strict";
885
+ VIEW_MAPPINGS = [
886
+ { view: "memories", source: "memory.memory_records" },
887
+ { view: "tasks", source: "memory.tasks" },
888
+ { view: "behaviors", source: "memory.behaviors" },
889
+ { view: "entities", source: "memory.entities" },
890
+ { view: "relationships", source: "memory.relationships" },
891
+ { view: "entity_memories", source: "memory.entity_memories" },
892
+ { view: "entity_aliases", source: "memory.entity_aliases" },
893
+ { view: "notifications", source: "memory.notifications" },
894
+ { view: "messages", source: "memory.messages" },
895
+ { view: "users", source: "wiki.users" },
896
+ { view: "workspaces", source: "wiki.workspaces" },
897
+ { view: "workspace_users", source: "wiki.workspace_users" },
898
+ { view: "documents", source: "wiki.workspace_documents" },
899
+ { view: "chats", source: "wiki.workspace_chats" }
900
+ ];
901
+ UPSERT_KEYS = {
902
+ memories: ["id"],
903
+ tasks: ["id"],
904
+ behaviors: ["id"],
905
+ entities: ["id"],
906
+ relationships: ["id"],
907
+ entity_aliases: ["alias"],
908
+ notifications: ["id"],
909
+ messages: ["id"],
910
+ users: ["id"],
911
+ workspaces: ["id"],
912
+ workspace_users: ["id"],
913
+ documents: ["id"],
914
+ chats: ["id"]
915
+ };
916
+ BOOLEAN_COLUMNS_BY_TABLE = {
917
+ memories: /* @__PURE__ */ new Set(["has_error", "draft"]),
918
+ behaviors: /* @__PURE__ */ new Set(["active"]),
919
+ notifications: /* @__PURE__ */ new Set(["read"]),
920
+ users: /* @__PURE__ */ new Set(["has_personal_memory"])
921
+ };
922
+ BOOLEAN_COLUMN_NAMES = new Set(
923
+ Object.values(BOOLEAN_COLUMNS_BY_TABLE).flatMap((cols) => [...cols])
924
+ );
925
+ IMMEDIATE_FALLBACK_PATTERNS = [
926
+ /\bPRAGMA\b/i,
927
+ /\bsqlite_master\b/i,
928
+ /(?:^|[.\s])(?:memories|conversations|entities)_fts\b/i,
929
+ /\bMATCH\b/i,
930
+ /\bvector_distance_cos\s*\(/i,
931
+ /\bjson_extract\s*\(/i,
932
+ /\bjulianday\s*\(/i,
933
+ /\bstrftime\s*\(/i,
934
+ /\blast_insert_rowid\s*\(/i
935
+ ];
936
+ prismaClientPromise = null;
937
+ compatibilityBootstrapPromise = null;
938
+ }
939
+ });
940
+
941
+ // src/lib/daemon-auth.ts
942
+ import crypto from "crypto";
943
+ import path4 from "path";
944
+ import { existsSync as existsSync4, readFileSync as readFileSync3, writeFileSync as writeFileSync2 } from "fs";
945
+ function normalizeToken(token) {
946
+ if (!token) return null;
947
+ const trimmed = token.trim();
948
+ return trimmed.length > 0 ? trimmed : null;
949
+ }
950
+ function readDaemonToken() {
951
+ try {
952
+ if (!existsSync4(DAEMON_TOKEN_PATH)) return null;
953
+ return normalizeToken(readFileSync3(DAEMON_TOKEN_PATH, "utf8"));
954
+ } catch {
955
+ return null;
956
+ }
957
+ }
958
+ function ensureDaemonToken(seed) {
959
+ const existing = readDaemonToken();
960
+ if (existing) return existing;
961
+ const token = normalizeToken(seed) ?? crypto.randomBytes(32).toString("hex");
962
+ ensurePrivateDirSync(EXE_AI_DIR);
963
+ writeFileSync2(DAEMON_TOKEN_PATH, `${token}
964
+ `, "utf8");
965
+ enforcePrivateFileSync(DAEMON_TOKEN_PATH);
966
+ return token;
967
+ }
968
+ var DAEMON_TOKEN_PATH;
969
+ var init_daemon_auth = __esm({
970
+ "src/lib/daemon-auth.ts"() {
971
+ "use strict";
972
+ init_config();
973
+ init_secure_files();
974
+ DAEMON_TOKEN_PATH = path4.join(EXE_AI_DIR, "exed.token");
975
+ }
976
+ });
977
+
978
+ // src/lib/exe-daemon-client.ts
979
+ import net from "net";
980
+ import os4 from "os";
981
+ import { spawn } from "child_process";
982
+ import { randomUUID } from "crypto";
983
+ import { existsSync as existsSync5, unlinkSync as unlinkSync2, readFileSync as readFileSync4, openSync, closeSync, statSync } from "fs";
984
+ import path5 from "path";
985
+ import { fileURLToPath } from "url";
986
+ function handleData(chunk) {
987
+ _buffer += chunk.toString();
988
+ if (_buffer.length > MAX_BUFFER) {
989
+ _buffer = "";
990
+ return;
991
+ }
992
+ let newlineIdx;
993
+ while ((newlineIdx = _buffer.indexOf("\n")) !== -1) {
994
+ const line = _buffer.slice(0, newlineIdx).trim();
995
+ _buffer = _buffer.slice(newlineIdx + 1);
996
+ if (!line) continue;
997
+ try {
998
+ const response = JSON.parse(line);
999
+ const id = response.id;
1000
+ if (!id) continue;
1001
+ const entry = _pending.get(id);
1002
+ if (entry) {
1003
+ clearTimeout(entry.timer);
1004
+ _pending.delete(id);
1005
+ entry.resolve(response);
1006
+ }
1007
+ } catch {
1008
+ }
1009
+ }
1010
+ }
1011
+ function cleanupStaleFiles() {
1012
+ if (existsSync5(PID_PATH)) {
1013
+ try {
1014
+ const pid = parseInt(readFileSync4(PID_PATH, "utf8").trim(), 10);
1015
+ if (pid > 0) {
1016
+ try {
1017
+ process.kill(pid, 0);
1018
+ return;
1019
+ } catch {
1020
+ }
1021
+ }
1022
+ } catch {
1023
+ }
1024
+ try {
1025
+ unlinkSync2(PID_PATH);
1026
+ } catch {
1027
+ }
1028
+ try {
1029
+ unlinkSync2(SOCKET_PATH);
1030
+ } catch {
1031
+ }
1032
+ }
1033
+ }
1034
+ function findPackageRoot() {
1035
+ let dir = path5.dirname(fileURLToPath(import.meta.url));
1036
+ const { root } = path5.parse(dir);
1037
+ while (dir !== root) {
1038
+ if (existsSync5(path5.join(dir, "package.json"))) return dir;
1039
+ dir = path5.dirname(dir);
1040
+ }
1041
+ return null;
1042
+ }
1043
+ function getAvailableMemoryGB() {
1044
+ if (process.platform === "darwin") {
1045
+ try {
1046
+ const { execSync: execSync3 } = __require("child_process");
1047
+ const vmstat = execSync3("vm_stat", { encoding: "utf8" });
1048
+ const pageSize = 16384;
1049
+ const pageSizeMatch = vmstat.match(/page size of (\d+) bytes/);
1050
+ const actualPageSize = pageSizeMatch ? parseInt(pageSizeMatch[1], 10) : pageSize;
1051
+ const free = vmstat.match(/Pages free:\s+(\d+)/);
1052
+ const inactive = vmstat.match(/Pages inactive:\s+(\d+)/);
1053
+ const speculative = vmstat.match(/Pages speculative:\s+(\d+)/);
1054
+ const freePages = free ? parseInt(free[1], 10) : 0;
1055
+ const inactivePages = inactive ? parseInt(inactive[1], 10) : 0;
1056
+ const speculativePages = speculative ? parseInt(speculative[1], 10) : 0;
1057
+ return (freePages + inactivePages + speculativePages) * actualPageSize / (1024 * 1024 * 1024);
1058
+ } catch {
1059
+ return os4.freemem() / (1024 * 1024 * 1024);
1060
+ }
1061
+ }
1062
+ return os4.freemem() / (1024 * 1024 * 1024);
1063
+ }
1064
+ function spawnDaemon() {
1065
+ const freeGB = getAvailableMemoryGB();
1066
+ const totalGB = os4.totalmem() / (1024 * 1024 * 1024);
1067
+ if (totalGB <= 8) {
1068
+ process.stderr.write(
1069
+ `[exed-client] SKIP: ${totalGB.toFixed(0)}GB system \u2014 embedding daemon disabled. Using keyword search only. Minimum 16GB recommended for vector search.
1070
+ `
1071
+ );
1072
+ return;
1073
+ }
1074
+ if (totalGB <= 16 && freeGB < 2) {
1075
+ process.stderr.write(
1076
+ `[exed-client] SKIP: low memory (${freeGB.toFixed(1)}GB available / ${totalGB.toFixed(0)}GB total). Embedding daemon not started \u2014 using keyword search only.
1077
+ `
1078
+ );
1079
+ return;
1080
+ }
1081
+ const pkgRoot = findPackageRoot();
1082
+ if (!pkgRoot) {
1083
+ process.stderr.write("[exed-client] WARN: cannot find package root\n");
1084
+ return;
1085
+ }
1086
+ const daemonPath = path5.join(pkgRoot, "dist", "lib", "exe-daemon.js");
1087
+ if (!existsSync5(daemonPath)) {
1088
+ process.stderr.write(`[exed-client] WARN: daemon script not found at ${daemonPath}
1089
+ `);
1090
+ return;
1091
+ }
1092
+ const resolvedPath = daemonPath;
1093
+ const daemonToken = ensureDaemonToken(process.env[DAEMON_TOKEN_ENV] ?? null);
1094
+ process.stderr.write(`[exed-client] Spawning daemon: ${resolvedPath}
1095
+ `);
1096
+ const logPath = path5.join(path5.dirname(SOCKET_PATH), "exed.log");
1097
+ let stderrFd = "ignore";
1098
+ try {
1099
+ stderrFd = openSync(logPath, "a");
1100
+ } catch {
1101
+ }
1102
+ const heapCapMB = totalGB <= 8 ? 256 : 512;
1103
+ const nodeArgs = [`--max-old-space-size=${heapCapMB}`, resolvedPath];
1104
+ const child = spawn(process.execPath, nodeArgs, {
1105
+ detached: true,
1106
+ stdio: ["ignore", "ignore", stderrFd],
1107
+ env: {
1108
+ ...process.env,
1109
+ TMUX: void 0,
1110
+ // Daemon is global — must not inherit session scope
1111
+ TMUX_PANE: void 0,
1112
+ // Prevents resolveExeSession() from scoping to one session
1113
+ EXE_DAEMON_SOCK: SOCKET_PATH,
1114
+ EXE_DAEMON_PID: PID_PATH,
1115
+ [DAEMON_TOKEN_ENV]: daemonToken
1116
+ }
1117
+ });
1118
+ child.unref();
1119
+ if (typeof stderrFd === "number") {
1120
+ try {
1121
+ closeSync(stderrFd);
1122
+ } catch {
1123
+ }
1124
+ }
1125
+ }
1126
+ function acquireSpawnLock() {
1127
+ try {
1128
+ const fd = openSync(SPAWN_LOCK_PATH, "wx");
1129
+ closeSync(fd);
1130
+ return true;
1131
+ } catch {
1132
+ try {
1133
+ const stat = statSync(SPAWN_LOCK_PATH);
1134
+ if (Date.now() - stat.mtimeMs > SPAWN_LOCK_STALE_MS) {
1135
+ try {
1136
+ unlinkSync2(SPAWN_LOCK_PATH);
1137
+ } catch {
1138
+ }
1139
+ try {
1140
+ const fd = openSync(SPAWN_LOCK_PATH, "wx");
1141
+ closeSync(fd);
1142
+ return true;
1143
+ } catch {
1144
+ }
1145
+ }
1146
+ } catch {
1147
+ }
1148
+ return false;
1149
+ }
1150
+ }
1151
+ function releaseSpawnLock() {
1152
+ try {
1153
+ unlinkSync2(SPAWN_LOCK_PATH);
1154
+ } catch {
1155
+ }
1156
+ }
1157
+ function connectToSocket() {
1158
+ return new Promise((resolve) => {
1159
+ if (_socket && _connected) {
1160
+ resolve(true);
1161
+ return;
1162
+ }
1163
+ const socket = net.createConnection({ path: SOCKET_PATH });
1164
+ const connectTimeout = setTimeout(() => {
1165
+ socket.destroy();
1166
+ resolve(false);
1167
+ }, 2e3);
1168
+ socket.on("connect", () => {
1169
+ clearTimeout(connectTimeout);
1170
+ _socket = socket;
1171
+ _connected = true;
1172
+ _buffer = "";
1173
+ socket.on("data", handleData);
1174
+ socket.on("close", () => {
1175
+ _connected = false;
1176
+ _socket = null;
1177
+ for (const [id, entry] of _pending) {
1178
+ clearTimeout(entry.timer);
1179
+ _pending.delete(id);
1180
+ entry.resolve({ error: "Connection closed" });
1181
+ }
1182
+ });
1183
+ socket.on("error", () => {
1184
+ _connected = false;
1185
+ _socket = null;
1186
+ });
1187
+ resolve(true);
1188
+ });
1189
+ socket.on("error", () => {
1190
+ clearTimeout(connectTimeout);
1191
+ resolve(false);
1192
+ });
1193
+ });
1194
+ }
1195
+ async function connectEmbedDaemon() {
1196
+ if (_socket && _connected) return true;
1197
+ if (await connectToSocket()) return true;
1198
+ if (acquireSpawnLock()) {
1199
+ try {
1200
+ cleanupStaleFiles();
1201
+ spawnDaemon();
1202
+ } finally {
1203
+ releaseSpawnLock();
1204
+ }
1205
+ }
1206
+ const start = Date.now();
1207
+ let delay2 = 100;
1208
+ while (Date.now() - start < CONNECT_TIMEOUT_MS) {
1209
+ await new Promise((r) => setTimeout(r, delay2));
1210
+ if (await connectToSocket()) return true;
1211
+ delay2 = Math.min(delay2 * 2, 3e3);
1212
+ }
1213
+ return false;
1214
+ }
1215
+ function sendDaemonRequest(payload, timeoutMs = REQUEST_TIMEOUT_MS) {
1216
+ return new Promise((resolve) => {
1217
+ if (!_socket || !_connected) {
1218
+ resolve({ error: "Not connected" });
1219
+ return;
1220
+ }
1221
+ const id = randomUUID();
1222
+ const token = process.env[DAEMON_TOKEN_ENV] ?? readDaemonToken();
1223
+ const timer = setTimeout(() => {
1224
+ _pending.delete(id);
1225
+ resolve({ error: "Request timeout" });
1226
+ }, timeoutMs);
1227
+ _pending.set(id, { resolve, timer });
1228
+ try {
1229
+ _socket.write(JSON.stringify({ id, token, ...payload }) + "\n");
1230
+ } catch {
1231
+ clearTimeout(timer);
1232
+ _pending.delete(id);
1233
+ resolve({ error: "Write failed" });
1234
+ }
1235
+ });
1236
+ }
1237
+ function isClientConnected() {
1238
+ return _connected;
1239
+ }
1240
+ var SOCKET_PATH, PID_PATH, SPAWN_LOCK_PATH, SPAWN_LOCK_STALE_MS, CONNECT_TIMEOUT_MS, REQUEST_TIMEOUT_MS, DAEMON_TOKEN_ENV, _socket, _connected, _buffer, _pending, MAX_BUFFER;
1241
+ var init_exe_daemon_client = __esm({
1242
+ "src/lib/exe-daemon-client.ts"() {
1243
+ "use strict";
1244
+ init_config();
1245
+ init_daemon_auth();
1246
+ SOCKET_PATH = process.env.EXE_DAEMON_SOCK ?? process.env.EXE_EMBED_SOCK ?? path5.join(EXE_AI_DIR, "exed.sock");
1247
+ PID_PATH = process.env.EXE_DAEMON_PID ?? process.env.EXE_EMBED_PID ?? path5.join(EXE_AI_DIR, "exed.pid");
1248
+ SPAWN_LOCK_PATH = path5.join(EXE_AI_DIR, "exed-spawn.lock");
1249
+ SPAWN_LOCK_STALE_MS = 3e4;
1250
+ CONNECT_TIMEOUT_MS = 15e3;
1251
+ REQUEST_TIMEOUT_MS = 3e4;
1252
+ DAEMON_TOKEN_ENV = "EXE_DAEMON_TOKEN";
1253
+ _socket = null;
1254
+ _connected = false;
1255
+ _buffer = "";
1256
+ _pending = /* @__PURE__ */ new Map();
1257
+ MAX_BUFFER = 1e7;
1258
+ }
1259
+ });
1260
+
1261
+ // src/lib/daemon-protocol.ts
1262
+ function serializeValue(v) {
1263
+ if (v === null || v === void 0) return null;
1264
+ if (typeof v === "bigint") return Number(v);
1265
+ if (typeof v === "boolean") return v ? 1 : 0;
1266
+ if (v instanceof Uint8Array) {
1267
+ return { __blob: Buffer.from(v).toString("base64") };
1268
+ }
1269
+ if (ArrayBuffer.isView(v)) {
1270
+ return { __blob: Buffer.from(v.buffer, v.byteOffset, v.byteLength).toString("base64") };
1271
+ }
1272
+ if (v instanceof ArrayBuffer) {
1273
+ return { __blob: Buffer.from(v).toString("base64") };
1274
+ }
1275
+ if (typeof v === "string" || typeof v === "number") return v;
1276
+ return String(v);
1277
+ }
1278
+ function deserializeValue(v) {
1279
+ if (v === null) return null;
1280
+ if (typeof v === "object" && v !== null && "__blob" in v) {
1281
+ const buf = Buffer.from(v.__blob, "base64");
1282
+ return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);
1283
+ }
1284
+ return v;
1285
+ }
1286
+ function deserializeResultSet(srs) {
1287
+ const rows = srs.rows.map((obj) => {
1288
+ const values = srs.columns.map(
1289
+ (col) => deserializeValue(obj[col] ?? null)
1290
+ );
1291
+ const row = values;
1292
+ for (let i = 0; i < srs.columns.length; i++) {
1293
+ const col = srs.columns[i];
1294
+ if (col !== void 0) {
1295
+ row[col] = values[i] ?? null;
1296
+ }
1297
+ }
1298
+ Object.defineProperty(row, "length", {
1299
+ value: values.length,
1300
+ enumerable: false
1301
+ });
1302
+ return row;
1303
+ });
1304
+ return {
1305
+ columns: srs.columns,
1306
+ columnTypes: srs.columnTypes ?? [],
1307
+ rows,
1308
+ rowsAffected: srs.rowsAffected,
1309
+ lastInsertRowid: srs.lastInsertRowid != null ? BigInt(srs.lastInsertRowid) : void 0,
1310
+ toJSON: () => ({
1311
+ columns: srs.columns,
1312
+ columnTypes: srs.columnTypes ?? [],
1313
+ rows: srs.rows,
1314
+ rowsAffected: srs.rowsAffected,
1315
+ lastInsertRowid: srs.lastInsertRowid
1316
+ })
1317
+ };
1318
+ }
1319
+ var init_daemon_protocol = __esm({
1320
+ "src/lib/daemon-protocol.ts"() {
1321
+ "use strict";
1322
+ }
1323
+ });
1324
+
1325
+ // src/lib/db-daemon-client.ts
1326
+ var db_daemon_client_exports = {};
1327
+ __export(db_daemon_client_exports, {
1328
+ createDaemonDbClient: () => createDaemonDbClient,
1329
+ initDaemonDbClient: () => initDaemonDbClient
1330
+ });
1331
+ function normalizeStatement2(stmt) {
1332
+ if (typeof stmt === "string") {
1333
+ return { sql: stmt, args: [] };
1334
+ }
1335
+ const sql = stmt.sql;
1336
+ let args = [];
1337
+ if (Array.isArray(stmt.args)) {
1338
+ args = stmt.args.map((v) => serializeValue(v));
1339
+ } else if (stmt.args && typeof stmt.args === "object") {
1340
+ const named = {};
1341
+ for (const [key, val] of Object.entries(stmt.args)) {
1342
+ named[key] = serializeValue(val);
1343
+ }
1344
+ return { sql, args: named };
1345
+ }
1346
+ return { sql, args };
1347
+ }
1348
+ function createDaemonDbClient(fallbackClient) {
1349
+ let _useDaemon = false;
1350
+ const client = {
1351
+ async execute(stmt) {
1352
+ if (!_useDaemon || !isClientConnected()) {
1353
+ return fallbackClient.execute(stmt);
1354
+ }
1355
+ const { sql, args } = normalizeStatement2(stmt);
1356
+ const response = await sendDaemonRequest({
1357
+ type: "db-execute",
1358
+ sql,
1359
+ args
1360
+ });
1361
+ if (response.error) {
1362
+ const errMsg = String(response.error);
1363
+ if (errMsg === "Not connected" || errMsg === "Request timeout" || errMsg === "Write failed" || errMsg === "DB not initialized") {
1364
+ process.stderr.write(`[db-daemon] Transport error (${errMsg}), falling back to direct
1365
+ `);
1366
+ return fallbackClient.execute(stmt);
1367
+ }
1368
+ throw new Error(errMsg);
1369
+ }
1370
+ if (response.db) {
1371
+ return deserializeResultSet(response.db);
1372
+ }
1373
+ process.stderr.write("[db-daemon] Unexpected response shape, falling back to direct\n");
1374
+ return fallbackClient.execute(stmt);
1375
+ },
1376
+ async batch(stmts, mode) {
1377
+ if (!_useDaemon || !isClientConnected()) {
1378
+ return fallbackClient.batch(stmts, mode);
1379
+ }
1380
+ const statements = stmts.map(normalizeStatement2);
1381
+ const response = await sendDaemonRequest({
1382
+ type: "db-batch",
1383
+ statements,
1384
+ mode: mode ?? "deferred"
1385
+ });
1386
+ if (response.error) {
1387
+ const errMsg = String(response.error);
1388
+ if (errMsg === "Not connected" || errMsg === "Request timeout" || errMsg === "Write failed" || errMsg === "DB not initialized") {
1389
+ process.stderr.write(`[db-daemon] Batch transport error (${errMsg}), falling back to direct
1390
+ `);
1391
+ return fallbackClient.batch(stmts, mode);
1392
+ }
1393
+ throw new Error(errMsg);
1394
+ }
1395
+ const batchResults = response["db-batch"];
1396
+ if (batchResults) {
1397
+ return batchResults.map(deserializeResultSet);
1398
+ }
1399
+ process.stderr.write("[db-daemon] Unexpected batch response shape, falling back to direct\n");
1400
+ return fallbackClient.batch(stmts, mode);
1401
+ },
1402
+ // Transaction support — delegate to fallback (transactions need direct connection)
1403
+ async transaction(mode) {
1404
+ return fallbackClient.transaction(mode);
1405
+ },
1406
+ // executeMultiple — delegate to fallback (used only for schema migrations)
1407
+ async executeMultiple(sql) {
1408
+ return fallbackClient.executeMultiple(sql);
1409
+ },
1410
+ // migrate — delegate to fallback
1411
+ async migrate(stmts) {
1412
+ return fallbackClient.migrate(stmts);
1413
+ },
1414
+ // Sync mode — delegate to fallback
1415
+ sync() {
1416
+ return fallbackClient.sync();
1417
+ },
1418
+ close() {
1419
+ _useDaemon = false;
1420
+ },
1421
+ get closed() {
1422
+ return fallbackClient.closed;
1423
+ },
1424
+ get protocol() {
1425
+ return fallbackClient.protocol;
1426
+ }
1427
+ };
1428
+ return {
1429
+ ...client,
1430
+ /** Enable daemon routing (call after confirming daemon is connected) */
1431
+ _enableDaemon() {
1432
+ _useDaemon = true;
1433
+ },
1434
+ /** Check if daemon routing is active */
1435
+ _isDaemonActive() {
1436
+ return _useDaemon && isClientConnected();
1437
+ }
1438
+ };
1439
+ }
1440
+ async function initDaemonDbClient(fallbackClient) {
1441
+ if (process.env.EXE_IS_DAEMON === "1") return null;
1442
+ const connected = await connectEmbedDaemon();
1443
+ if (!connected) {
1444
+ process.stderr.write("[db-daemon] Daemon unavailable \u2014 using direct SQLite\n");
1445
+ return null;
1446
+ }
1447
+ const client = createDaemonDbClient(fallbackClient);
1448
+ client._enableDaemon();
1449
+ process.stderr.write("[db-daemon] DB routing through daemon (single-writer)\n");
1450
+ return client;
1451
+ }
1452
+ var init_db_daemon_client = __esm({
1453
+ "src/lib/db-daemon-client.ts"() {
1454
+ "use strict";
1455
+ init_exe_daemon_client();
1456
+ init_daemon_protocol();
1457
+ }
1458
+ });
1459
+
1460
+ // src/lib/database.ts
1461
+ var database_exports = {};
1462
+ __export(database_exports, {
1463
+ SOFT_DELETE_RETENTION_DAYS: () => SOFT_DELETE_RETENTION_DAYS,
1464
+ disposeDatabase: () => disposeDatabase,
1465
+ disposeTurso: () => disposeTurso,
1466
+ ensureSchema: () => ensureSchema,
1467
+ getClient: () => getClient,
1468
+ getRawClient: () => getRawClient,
1469
+ initDaemonClient: () => initDaemonClient,
1470
+ initDatabase: () => initDatabase,
1471
+ initTurso: () => initTurso,
1472
+ isInitialized: () => isInitialized,
1473
+ setExternalClient: () => setExternalClient
1474
+ });
1475
+ import { createClient } from "@libsql/client";
1476
+ async function initDatabase(config) {
1477
+ if (_walCheckpointTimer) {
1478
+ clearInterval(_walCheckpointTimer);
1479
+ _walCheckpointTimer = null;
1480
+ }
1481
+ if (_daemonClient) {
1482
+ _daemonClient.close();
1483
+ _daemonClient = null;
1484
+ }
1485
+ if (_adapterClient && _adapterClient !== _resilientClient) {
1486
+ _adapterClient.close();
1487
+ }
1488
+ _adapterClient = null;
1489
+ if (_client) {
1490
+ _client.close();
1491
+ _client = null;
1492
+ _resilientClient = null;
1493
+ }
1494
+ const opts = {
1495
+ url: `file:${config.dbPath}`
1496
+ };
1497
+ if (config.encryptionKey) {
1498
+ opts.encryptionKey = config.encryptionKey;
1499
+ }
1500
+ _client = createClient(opts);
1501
+ _resilientClient = wrapWithRetry(_client);
1502
+ _adapterClient = _resilientClient;
1503
+ _client.execute("PRAGMA busy_timeout = 30000").catch(() => {
1504
+ });
1505
+ _client.execute("PRAGMA journal_mode = WAL").catch(() => {
1506
+ });
1507
+ if (_walCheckpointTimer) clearInterval(_walCheckpointTimer);
1508
+ _walCheckpointTimer = setInterval(() => {
1509
+ _client?.execute("PRAGMA wal_checkpoint(PASSIVE)").catch(() => {
1510
+ });
1511
+ }, 3e4);
1512
+ _walCheckpointTimer.unref();
1513
+ if (process.env.DATABASE_URL && process.env.EXE_USE_POSTGRES === "1") {
1514
+ _adapterClient = await createPrismaDbAdapter(_resilientClient);
1515
+ }
1516
+ }
1517
+ function isInitialized() {
1518
+ return _adapterClient !== null || _client !== null;
1519
+ }
1520
+ function setExternalClient(client) {
1521
+ _adapterClient = client;
1522
+ }
1523
+ function getClient() {
1524
+ if (!_adapterClient) {
1525
+ throw new Error("Database client not initialized. Call initDatabase() first.");
1526
+ }
1527
+ if (process.env.DATABASE_URL && process.env.EXE_USE_POSTGRES === "1") {
1528
+ return _adapterClient;
1529
+ }
1530
+ if (process.env.EXE_IS_DAEMON === "1") {
1531
+ return _resilientClient;
1532
+ }
1533
+ if (_daemonClient && _daemonClient._isDaemonActive()) {
1534
+ return _daemonClient;
1535
+ }
1536
+ if (!_resilientClient) {
1537
+ return _adapterClient;
1538
+ }
1539
+ return _resilientClient;
1540
+ }
1541
+ async function initDaemonClient() {
1542
+ if (process.env.DATABASE_URL && process.env.EXE_USE_POSTGRES === "1") return;
1543
+ if (process.env.EXE_IS_DAEMON === "1") return;
1544
+ if (process.env.VITEST) return;
1545
+ if (!_resilientClient) return;
1546
+ if (_daemonClient) return;
1547
+ try {
1548
+ const { initDaemonDbClient: initDaemonDbClient2 } = await Promise.resolve().then(() => (init_db_daemon_client(), db_daemon_client_exports));
1549
+ _daemonClient = await initDaemonDbClient2(_resilientClient);
1550
+ } catch (err) {
1551
+ process.stderr.write(
1552
+ `[database] Daemon client init failed (non-fatal): ${err instanceof Error ? err.message : String(err)}
1553
+ `
1554
+ );
1555
+ }
1556
+ }
1557
+ function getRawClient() {
1558
+ if (!_client) {
1559
+ throw new Error("Database client not initialized. Call initDatabase() first.");
1560
+ }
1561
+ return _client;
1562
+ }
1563
+ async function ensureSchema() {
1564
+ const client = getRawClient();
1565
+ await client.execute("PRAGMA journal_mode = WAL");
1566
+ await client.execute("PRAGMA busy_timeout = 30000");
1567
+ await client.execute("PRAGMA wal_autocheckpoint = 1000");
1568
+ try {
1569
+ await client.execute("PRAGMA libsql_vector_search_ef = 128");
1570
+ } catch {
1571
+ }
1572
+ await client.executeMultiple(`
1573
+ CREATE TABLE IF NOT EXISTS memories (
1574
+ id TEXT PRIMARY KEY,
1575
+ agent_id TEXT NOT NULL,
1576
+ agent_role TEXT NOT NULL,
1577
+ session_id TEXT NOT NULL,
1578
+ timestamp TEXT NOT NULL,
1579
+ tool_name TEXT NOT NULL,
1580
+ project_name TEXT NOT NULL,
1581
+ has_error INTEGER NOT NULL DEFAULT 0,
1582
+ raw_text TEXT NOT NULL,
1583
+ vector F32_BLOB(1024),
1584
+ version INTEGER NOT NULL DEFAULT 0
1585
+ );
1586
+
1587
+ CREATE INDEX IF NOT EXISTS idx_memories_agent
1588
+ ON memories(agent_id);
1589
+
1590
+ CREATE INDEX IF NOT EXISTS idx_memories_timestamp
1591
+ ON memories(timestamp);
1592
+
1593
+ CREATE INDEX IF NOT EXISTS idx_memories_session
1594
+ ON memories(session_id);
1595
+
1596
+ CREATE INDEX IF NOT EXISTS idx_memories_project
1597
+ ON memories(project_name);
1598
+
1599
+ CREATE INDEX IF NOT EXISTS idx_memories_tool
1600
+ ON memories(tool_name);
1601
+
1602
+ CREATE INDEX IF NOT EXISTS idx_memories_version
1603
+ ON memories(version);
1604
+
1605
+ CREATE INDEX IF NOT EXISTS idx_memories_agent_project
1606
+ ON memories(agent_id, project_name);
1607
+ `);
1608
+ await client.executeMultiple(`
1609
+ CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts USING fts5(
1610
+ raw_text,
1611
+ content='memories',
1612
+ content_rowid='rowid'
1613
+ );
1614
+
1615
+ CREATE TRIGGER IF NOT EXISTS memories_fts_ai AFTER INSERT ON memories BEGIN
1616
+ INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
1617
+ END;
1618
+
1619
+ CREATE TRIGGER IF NOT EXISTS memories_fts_ad AFTER DELETE ON memories BEGIN
1620
+ INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
1621
+ END;
1622
+
1623
+ CREATE TRIGGER IF NOT EXISTS memories_fts_au AFTER UPDATE ON memories
1624
+ WHEN new.status IS NULL OR new.status != 'deleted' BEGIN
1625
+ INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
1626
+ INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
1627
+ END;
1628
+
1629
+ -- Soft-delete trigger: remove from FTS when status changes to 'deleted'
1630
+ CREATE TRIGGER IF NOT EXISTS memories_fts_soft_delete AFTER UPDATE ON memories
1631
+ WHEN new.status = 'deleted' AND (old.status IS NULL OR old.status != 'deleted') BEGIN
1632
+ INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
1633
+ END;
1634
+ `);
1635
+ await client.executeMultiple(`
1636
+ CREATE TABLE IF NOT EXISTS sync_meta (
1637
+ key TEXT PRIMARY KEY,
1638
+ value TEXT NOT NULL
1639
+ );
1640
+ `);
1641
+ await client.executeMultiple(`
1642
+ CREATE TABLE IF NOT EXISTS tasks (
1643
+ id TEXT PRIMARY KEY,
1644
+ title TEXT NOT NULL,
1645
+ assigned_to TEXT NOT NULL,
1646
+ assigned_by TEXT NOT NULL,
1647
+ project_name TEXT NOT NULL,
1648
+ priority TEXT NOT NULL DEFAULT 'p1',
1649
+ status TEXT NOT NULL DEFAULT 'open',
1650
+ task_file TEXT,
1651
+ created_at TEXT NOT NULL,
1652
+ updated_at TEXT NOT NULL
1653
+ );
1654
+
1655
+ CREATE INDEX IF NOT EXISTS idx_tasks_assignee_status
1656
+ ON tasks(assigned_to, status);
1657
+ `);
1658
+ await client.executeMultiple(`
1659
+ CREATE TABLE IF NOT EXISTS behaviors (
1660
+ id TEXT PRIMARY KEY,
1661
+ agent_id TEXT NOT NULL,
1662
+ project_name TEXT,
1663
+ domain TEXT,
1664
+ content TEXT NOT NULL,
1665
+ active INTEGER NOT NULL DEFAULT 1,
1666
+ created_at TEXT NOT NULL,
1667
+ updated_at TEXT NOT NULL
1668
+ );
1669
+
1670
+ CREATE INDEX IF NOT EXISTS idx_behaviors_agent
1671
+ ON behaviors(agent_id, active);
1672
+ `);
1673
+ try {
1674
+ const coordinatorName = getCoordinatorName();
1675
+ const existing = await client.execute({
1676
+ sql: "SELECT COUNT(*) as cnt FROM behaviors WHERE agent_id = ?",
1677
+ args: [coordinatorName]
1678
+ });
1679
+ if (Number(existing.rows[0]?.cnt) === 0) {
1680
+ const seededAt = "2026-03-25T00:00:00Z";
1681
+ for (const [domain, content] of [
1682
+ ["workflow", `Don't ask "keep going?" \u2014 just keep executing phases/plans autonomously`],
1683
+ ["tool-use", "Always use create_task MCP tool, never write .md files directly for task creation"],
1684
+ ["workflow", "Auto-start reviewing when idle and reviews are pending \u2014 never ask founder for permission"]
1685
+ ]) {
1686
+ await client.execute({
1687
+ sql: `INSERT INTO behaviors (id, agent_id, project_name, domain, content, active, created_at, updated_at)
1688
+ VALUES (hex(randomblob(16)), ?, NULL, ?, ?, 1, ?, ?)`,
1689
+ args: [coordinatorName, domain, content, seededAt, seededAt]
1690
+ });
1691
+ }
1692
+ }
1693
+ } catch {
1694
+ }
1695
+ try {
1696
+ await client.execute({
1697
+ sql: `ALTER TABLE behaviors ADD COLUMN priority TEXT DEFAULT 'p1'`,
1698
+ args: []
1699
+ });
1700
+ } catch {
1701
+ }
1702
+ try {
1703
+ await client.execute({
1704
+ sql: `ALTER TABLE behaviors ADD COLUMN vector F32_BLOB(${EMBEDDING_DIM})`,
1705
+ args: []
1706
+ });
1707
+ } catch {
1708
+ }
1709
+ try {
1710
+ await client.execute({
1711
+ sql: `ALTER TABLE tasks ADD COLUMN blocked_by TEXT`,
1712
+ args: []
1713
+ });
1714
+ } catch {
1715
+ }
1716
+ try {
1717
+ await client.execute({
1718
+ sql: `ALTER TABLE tasks ADD COLUMN parent_task_id TEXT`,
1719
+ args: []
1720
+ });
1721
+ } catch {
1722
+ }
1723
+ try {
1724
+ await client.execute({
1725
+ sql: `CREATE INDEX IF NOT EXISTS idx_tasks_parent_task_id
1726
+ ON tasks(parent_task_id)
1727
+ WHERE parent_task_id IS NOT NULL`,
1728
+ args: []
1729
+ });
1730
+ } catch {
1731
+ }
1732
+ try {
1733
+ await client.execute({
1734
+ sql: `UPDATE tasks SET status = 'done' WHERE status = 'completed'`,
1735
+ args: []
1736
+ });
1737
+ } catch {
1738
+ }
1739
+ try {
1740
+ await client.execute({
1741
+ sql: `ALTER TABLE tasks ADD COLUMN reviewer TEXT`,
1742
+ args: []
1743
+ });
1744
+ } catch {
1745
+ }
1746
+ try {
1747
+ await client.execute({
1748
+ sql: `ALTER TABLE tasks ADD COLUMN context TEXT`,
1749
+ args: []
1750
+ });
1751
+ } catch {
1752
+ }
1753
+ try {
1754
+ await client.execute({
1755
+ sql: `ALTER TABLE tasks ADD COLUMN result TEXT`,
1756
+ args: []
1757
+ });
1758
+ } catch {
1759
+ }
1760
+ try {
1761
+ await client.execute({
1762
+ sql: `ALTER TABLE tasks ADD COLUMN assigned_tmux TEXT`,
1763
+ args: []
1764
+ });
1765
+ } catch {
1766
+ }
1767
+ try {
1768
+ await client.execute({
1769
+ sql: `ALTER TABLE tasks ADD COLUMN checkpoint TEXT`,
1770
+ args: []
1771
+ });
1772
+ } catch {
1773
+ }
1774
+ try {
1775
+ await client.execute({
1776
+ sql: `ALTER TABLE tasks ADD COLUMN checkpoint_count INTEGER NOT NULL DEFAULT 0`,
1777
+ args: []
1778
+ });
1779
+ } catch {
1780
+ }
1781
+ try {
1782
+ await client.execute({
1783
+ sql: `ALTER TABLE tasks ADD COLUMN complexity TEXT NOT NULL DEFAULT 'standard'`,
1784
+ args: []
1785
+ });
1786
+ } catch {
1787
+ }
1788
+ try {
1789
+ await client.execute({
1790
+ sql: `ALTER TABLE tasks ADD COLUMN session_scope TEXT`,
1791
+ args: []
1792
+ });
1793
+ } catch {
1794
+ }
1795
+ try {
1796
+ await client.execute({
1797
+ sql: `ALTER TABLE memories ADD COLUMN task_id TEXT`,
1798
+ args: []
1799
+ });
1800
+ } catch {
1801
+ }
1802
+ try {
1803
+ await client.execute({
1804
+ sql: `ALTER TABLE memories ADD COLUMN consolidated INTEGER NOT NULL DEFAULT 0`,
1805
+ args: []
1806
+ });
1807
+ } catch {
1808
+ }
1809
+ try {
1810
+ await client.execute({
1811
+ sql: `ALTER TABLE memories ADD COLUMN author_device_id TEXT`,
1812
+ args: []
1813
+ });
1814
+ } catch {
1815
+ }
1816
+ try {
1817
+ await client.execute({
1818
+ sql: `ALTER TABLE memories ADD COLUMN scope TEXT NOT NULL DEFAULT 'business'`,
1819
+ args: []
1820
+ });
1821
+ } catch {
1822
+ }
1823
+ await client.executeMultiple(`
1824
+ CREATE TABLE IF NOT EXISTS consolidations (
1825
+ id TEXT PRIMARY KEY,
1826
+ consolidated_memory_id TEXT NOT NULL,
1827
+ source_memory_id TEXT NOT NULL,
1828
+ created_at TEXT NOT NULL
1829
+ );
1830
+
1831
+ CREATE INDEX IF NOT EXISTS idx_consolidations_source
1832
+ ON consolidations(source_memory_id);
1833
+
1834
+ CREATE INDEX IF NOT EXISTS idx_consolidations_consolidated
1835
+ ON consolidations(consolidated_memory_id);
1836
+ `);
1837
+ await client.executeMultiple(`
1838
+ CREATE TABLE IF NOT EXISTS reminders (
1839
+ id TEXT PRIMARY KEY,
1840
+ text TEXT NOT NULL,
1841
+ created_at TEXT NOT NULL,
1842
+ due_date TEXT,
1843
+ completed_at TEXT
1844
+ );
1845
+ `);
1846
+ await client.executeMultiple(`
1847
+ CREATE TABLE IF NOT EXISTS notifications (
1848
+ id TEXT PRIMARY KEY,
1849
+ agent_id TEXT NOT NULL,
1850
+ agent_role TEXT NOT NULL,
1851
+ event TEXT NOT NULL,
1852
+ project TEXT NOT NULL,
1853
+ summary TEXT NOT NULL,
1854
+ task_file TEXT,
1855
+ session_scope TEXT,
1856
+ read INTEGER NOT NULL DEFAULT 0,
1857
+ created_at TEXT NOT NULL
1858
+ );
1859
+
1860
+ CREATE INDEX IF NOT EXISTS idx_notifications_read
1861
+ ON notifications(read);
1862
+
1863
+ CREATE INDEX IF NOT EXISTS idx_notifications_agent
1864
+ ON notifications(agent_id, session_scope);
1865
+
1866
+ CREATE INDEX IF NOT EXISTS idx_notifications_task_file
1867
+ ON notifications(task_file);
1868
+ `);
1869
+ await client.executeMultiple(`
1870
+ CREATE TABLE IF NOT EXISTS schedules (
1871
+ id TEXT PRIMARY KEY,
1872
+ cron TEXT NOT NULL,
1873
+ description TEXT NOT NULL,
1874
+ job_type TEXT NOT NULL DEFAULT 'report',
1875
+ prompt TEXT,
1876
+ assigned_to TEXT,
1877
+ project_name TEXT,
1878
+ active INTEGER NOT NULL DEFAULT 1,
1879
+ use_crontab INTEGER NOT NULL DEFAULT 0,
1880
+ created_at TEXT NOT NULL
1881
+ );
1882
+ `);
1883
+ await client.executeMultiple(`
1884
+ CREATE TABLE IF NOT EXISTS device_registry (
1885
+ device_id TEXT PRIMARY KEY,
1886
+ friendly_name TEXT NOT NULL,
1887
+ hostname TEXT NOT NULL,
1888
+ projects TEXT NOT NULL DEFAULT '[]',
1889
+ agents TEXT NOT NULL DEFAULT '[]',
1890
+ connected INTEGER DEFAULT 0,
1891
+ last_seen TEXT NOT NULL
1892
+ );
1893
+ `);
1894
+ await client.executeMultiple(`
1895
+ CREATE TABLE IF NOT EXISTS messages (
1896
+ id TEXT PRIMARY KEY,
1897
+ from_agent TEXT NOT NULL,
1898
+ from_device TEXT NOT NULL DEFAULT 'local',
1899
+ target_agent TEXT NOT NULL,
1900
+ target_project TEXT,
1901
+ target_device TEXT NOT NULL DEFAULT 'local',
1902
+ session_scope TEXT,
1903
+ content TEXT NOT NULL,
1904
+ priority TEXT DEFAULT 'normal',
1905
+ status TEXT DEFAULT 'pending',
1906
+ server_seq INTEGER,
1907
+ retry_count INTEGER DEFAULT 0,
1908
+ created_at TEXT NOT NULL,
1909
+ delivered_at TEXT,
1910
+ processed_at TEXT,
1911
+ failed_at TEXT,
1912
+ failure_reason TEXT
1913
+ );
1914
+
1915
+ CREATE INDEX IF NOT EXISTS idx_messages_target
1916
+ ON messages(target_agent, session_scope, status);
1917
+
1918
+ CREATE INDEX IF NOT EXISTS idx_messages_conversation_order
1919
+ ON messages(target_agent, session_scope, from_agent, server_seq);
1920
+ `);
1921
+ try {
1922
+ await client.execute({
1923
+ sql: `ALTER TABLE notifications ADD COLUMN session_scope TEXT`,
1924
+ args: []
1925
+ });
1926
+ } catch {
1927
+ }
1928
+ try {
1929
+ await client.execute({
1930
+ sql: `ALTER TABLE messages ADD COLUMN session_scope TEXT`,
1931
+ args: []
1932
+ });
1933
+ } catch {
1934
+ }
1935
+ await client.executeMultiple(`
1936
+ CREATE INDEX IF NOT EXISTS idx_notifications_agent_scope_read
1937
+ ON notifications(agent_id, session_scope, read, created_at);
1938
+
1939
+ CREATE INDEX IF NOT EXISTS idx_messages_target_scope_status
1940
+ ON messages(target_agent, session_scope, status, created_at);
1941
+ `);
1942
+ try {
1943
+ await client.execute({
1944
+ sql: `UPDATE memories SET project_name = 'exe-create' WHERE project_name = 'web'`,
1945
+ args: []
1946
+ });
1947
+ await client.execute({
1948
+ sql: `UPDATE memories SET project_name = 'exe-os' WHERE project_name = 'worker'`,
1949
+ args: []
1950
+ });
1951
+ await client.execute({
1952
+ sql: `UPDATE tasks SET project_name = 'exe-create' WHERE project_name = 'web'`,
1953
+ args: []
1954
+ });
1955
+ await client.execute({
1956
+ sql: `UPDATE tasks SET project_name = 'exe-os' WHERE project_name = 'worker'`,
1957
+ args: []
1958
+ });
1959
+ } catch {
1960
+ }
1961
+ await client.executeMultiple(`
1962
+ CREATE TABLE IF NOT EXISTS trajectories (
1963
+ id TEXT PRIMARY KEY,
1964
+ task_id TEXT NOT NULL,
1965
+ agent_id TEXT NOT NULL,
1966
+ project_name TEXT NOT NULL,
1967
+ task_title TEXT NOT NULL,
1968
+ signature TEXT NOT NULL,
1969
+ signature_hash TEXT NOT NULL,
1970
+ tool_count INTEGER NOT NULL,
1971
+ skill_id TEXT,
1972
+ created_at TEXT NOT NULL
1973
+ );
1974
+
1975
+ CREATE INDEX IF NOT EXISTS idx_trajectories_hash
1976
+ ON trajectories(signature_hash);
1977
+
1978
+ CREATE INDEX IF NOT EXISTS idx_trajectories_agent
1979
+ ON trajectories(agent_id);
1980
+ `);
1981
+ try {
1982
+ await client.execute("ALTER TABLE trajectories ADD COLUMN skill_id TEXT");
1983
+ } catch {
1984
+ }
1985
+ await client.executeMultiple(`
1986
+ CREATE TABLE IF NOT EXISTS consolidations (
1987
+ id TEXT PRIMARY KEY,
1988
+ consolidated_memory_id TEXT NOT NULL,
1989
+ source_memory_id TEXT NOT NULL,
1990
+ created_at TEXT NOT NULL
1991
+ );
1992
+
1993
+ CREATE INDEX IF NOT EXISTS idx_consolidations_source
1994
+ ON consolidations(source_memory_id);
1995
+ `);
1996
+ await client.executeMultiple(`
1997
+ CREATE TABLE IF NOT EXISTS audit_trail (
1998
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
1999
+ timestamp TEXT NOT NULL,
2000
+ session_id TEXT NOT NULL,
2001
+ agent_id TEXT NOT NULL,
2002
+ tool TEXT NOT NULL,
2003
+ input TEXT,
2004
+ decision TEXT NOT NULL,
2005
+ reason TEXT,
2006
+ is_customer_facing INTEGER NOT NULL DEFAULT 0
2007
+ );
2008
+
2009
+ CREATE INDEX IF NOT EXISTS idx_audit_trail_agent
2010
+ ON audit_trail(agent_id, timestamp);
2011
+
2012
+ CREATE INDEX IF NOT EXISTS idx_audit_trail_session
2013
+ ON audit_trail(session_id);
2014
+ `);
2015
+ try {
2016
+ await client.execute({
2017
+ sql: `ALTER TABLE memories ADD COLUMN consolidated INTEGER NOT NULL DEFAULT 0`,
2018
+ args: []
2019
+ });
2020
+ } catch {
2021
+ }
2022
+ try {
2023
+ await client.execute({
2024
+ sql: `ALTER TABLE memories ADD COLUMN importance INTEGER DEFAULT 5`,
2025
+ args: []
2026
+ });
2027
+ } catch {
2028
+ }
2029
+ try {
2030
+ await client.execute({
2031
+ sql: `ALTER TABLE memories ADD COLUMN status TEXT DEFAULT 'active'`,
2032
+ args: []
2033
+ });
2034
+ } catch {
2035
+ }
2036
+ try {
2037
+ await client.execute({
2038
+ sql: `ALTER TABLE memories ADD COLUMN deleted_at TEXT`,
2039
+ args: []
2040
+ });
2041
+ } catch {
2042
+ }
2043
+ try {
2044
+ await client.execute({
2045
+ sql: `ALTER TABLE memories ADD COLUMN confidence REAL DEFAULT 0.7`,
2046
+ args: []
2047
+ });
2048
+ } catch {
2049
+ }
2050
+ try {
2051
+ await client.execute({
2052
+ sql: `ALTER TABLE memories ADD COLUMN last_accessed TEXT`,
2053
+ args: []
2054
+ });
2055
+ } catch {
2056
+ }
2057
+ try {
2058
+ await client.execute({
2059
+ sql: `UPDATE memories SET last_accessed = timestamp WHERE last_accessed IS NULL`,
2060
+ args: []
2061
+ });
2062
+ } catch {
2063
+ }
2064
+ try {
2065
+ await client.execute({
2066
+ sql: `ALTER TABLE memories ADD COLUMN wiki_synced INTEGER DEFAULT 0`,
2067
+ args: []
2068
+ });
2069
+ } catch {
2070
+ }
2071
+ try {
2072
+ await client.execute({
2073
+ sql: `ALTER TABLE memories ADD COLUMN graph_extracted INTEGER DEFAULT 0`,
2074
+ args: []
2075
+ });
2076
+ } catch {
2077
+ }
2078
+ for (const col of [
2079
+ "ALTER TABLE memories ADD COLUMN content_hash TEXT",
2080
+ "ALTER TABLE memories ADD COLUMN graph_extracted_hash TEXT"
2081
+ ]) {
2082
+ try {
2083
+ await client.execute(col);
2084
+ } catch {
2085
+ }
2086
+ }
2087
+ try {
2088
+ await client.execute(
2089
+ `CREATE INDEX IF NOT EXISTS idx_memories_content_hash ON memories(content_hash, agent_id)`
2090
+ );
2091
+ } catch {
2092
+ }
2093
+ try {
2094
+ await client.execute(
2095
+ `CREATE INDEX IF NOT EXISTS idx_memories_scoped_content_hash
2096
+ ON memories(content_hash, agent_id, project_name, memory_type)
2097
+ WHERE content_hash IS NOT NULL`
2098
+ );
2099
+ } catch {
2100
+ }
2101
+ await client.executeMultiple(`
2102
+ CREATE TABLE IF NOT EXISTS entities (
2103
+ id TEXT PRIMARY KEY,
2104
+ name TEXT NOT NULL,
2105
+ type TEXT NOT NULL,
2106
+ first_seen TEXT NOT NULL,
2107
+ last_seen TEXT NOT NULL,
2108
+ properties TEXT DEFAULT '{}',
2109
+ UNIQUE(name, type)
2110
+ );
2111
+
2112
+ CREATE TABLE IF NOT EXISTS relationships (
2113
+ id TEXT PRIMARY KEY,
2114
+ source_entity_id TEXT NOT NULL,
2115
+ target_entity_id TEXT NOT NULL,
2116
+ type TEXT NOT NULL,
2117
+ weight REAL DEFAULT 1.0,
2118
+ timestamp TEXT NOT NULL,
2119
+ properties TEXT DEFAULT '{}',
2120
+ UNIQUE(source_entity_id, target_entity_id, type)
2121
+ );
2122
+
2123
+ CREATE TABLE IF NOT EXISTS entity_memories (
2124
+ entity_id TEXT NOT NULL,
2125
+ memory_id TEXT NOT NULL,
2126
+ PRIMARY KEY (entity_id, memory_id)
2127
+ );
2128
+
2129
+ CREATE TABLE IF NOT EXISTS relationship_memories (
2130
+ relationship_id TEXT NOT NULL,
2131
+ memory_id TEXT NOT NULL,
2132
+ PRIMARY KEY (relationship_id, memory_id)
2133
+ );
2134
+
2135
+ CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
2136
+ CREATE INDEX IF NOT EXISTS idx_entities_type ON entities(type);
2137
+ CREATE INDEX IF NOT EXISTS idx_relationships_source ON relationships(source_entity_id);
2138
+ CREATE INDEX IF NOT EXISTS idx_relationships_target ON relationships(target_entity_id);
2139
+
2140
+ CREATE TABLE IF NOT EXISTS hyperedges (
2141
+ id TEXT PRIMARY KEY,
2142
+ label TEXT NOT NULL,
2143
+ relation TEXT NOT NULL,
2144
+ confidence REAL DEFAULT 1.0,
2145
+ timestamp TEXT NOT NULL
2146
+ );
2147
+
2148
+ CREATE TABLE IF NOT EXISTS hyperedge_nodes (
2149
+ hyperedge_id TEXT NOT NULL,
2150
+ entity_id TEXT NOT NULL,
2151
+ PRIMARY KEY (hyperedge_id, entity_id)
2152
+ );
2153
+
2154
+ CREATE VIRTUAL TABLE IF NOT EXISTS entities_fts USING fts5(
2155
+ name,
2156
+ content=entities,
2157
+ content_rowid=rowid
2158
+ );
2159
+
2160
+ CREATE TRIGGER IF NOT EXISTS entities_fts_ai AFTER INSERT ON entities BEGIN
2161
+ INSERT INTO entities_fts(rowid, name) VALUES (new.rowid, new.name);
2162
+ END;
2163
+
2164
+ CREATE TRIGGER IF NOT EXISTS entities_fts_ad AFTER DELETE ON entities BEGIN
2165
+ INSERT INTO entities_fts(entities_fts, rowid, name) VALUES('delete', old.rowid, old.name);
2166
+ END;
2167
+
2168
+ CREATE TRIGGER IF NOT EXISTS entities_fts_au AFTER UPDATE ON entities BEGIN
2169
+ INSERT INTO entities_fts(entities_fts, rowid, name) VALUES('delete', old.rowid, old.name);
2170
+ INSERT INTO entities_fts(rowid, name) VALUES (new.rowid, new.name);
2171
+ END;
2172
+ `);
2173
+ try {
2174
+ await client.execute("INSERT INTO entities_fts(entities_fts) VALUES('rebuild')");
2175
+ } catch {
2176
+ }
2177
+ await client.executeMultiple(`
2178
+ CREATE TABLE IF NOT EXISTS entity_aliases (
2179
+ alias TEXT NOT NULL PRIMARY KEY,
2180
+ canonical_entity_id TEXT NOT NULL
2181
+ );
2182
+ CREATE INDEX IF NOT EXISTS idx_entity_aliases_canonical ON entity_aliases(canonical_entity_id);
2183
+ `);
2184
+ for (const col of [
2185
+ "ALTER TABLE relationships ADD COLUMN confidence REAL DEFAULT 1.0",
2186
+ "ALTER TABLE relationships ADD COLUMN confidence_label TEXT DEFAULT 'extracted'"
2187
+ ]) {
2188
+ try {
2189
+ await client.execute(col);
2190
+ } catch {
2191
+ }
2192
+ }
2193
+ try {
2194
+ await client.execute(
2195
+ `CREATE INDEX IF NOT EXISTS idx_memories_status ON memories(status)`
2196
+ );
2197
+ } catch {
2198
+ }
2199
+ await client.executeMultiple(`
2200
+ CREATE TABLE IF NOT EXISTS identity (
2201
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
2202
+ agent_id TEXT NOT NULL UNIQUE,
2203
+ content_hash TEXT NOT NULL,
2204
+ updated_at TEXT NOT NULL,
2205
+ updated_by TEXT NOT NULL
2206
+ );
2207
+
2208
+ CREATE INDEX IF NOT EXISTS idx_identity_agent ON identity(agent_id);
2209
+ `);
2210
+ await client.executeMultiple(`
2211
+ CREATE TABLE IF NOT EXISTS chat_history (
2212
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
2213
+ session_id TEXT NOT NULL,
2214
+ role TEXT NOT NULL,
2215
+ content TEXT NOT NULL,
2216
+ tool_name TEXT,
2217
+ tool_id TEXT,
2218
+ is_error INTEGER NOT NULL DEFAULT 0,
2219
+ timestamp INTEGER NOT NULL
2220
+ );
2221
+
2222
+ CREATE INDEX IF NOT EXISTS idx_chat_history_session
2223
+ ON chat_history(session_id, id);
2224
+ `);
2225
+ await client.executeMultiple(`
2226
+ CREATE TABLE IF NOT EXISTS workspaces (
2227
+ id TEXT PRIMARY KEY,
2228
+ slug TEXT NOT NULL UNIQUE,
2229
+ name TEXT NOT NULL,
2230
+ owner_agent_id TEXT,
2231
+ created_at TEXT NOT NULL,
2232
+ metadata TEXT
2233
+ );
2234
+
2235
+ CREATE INDEX IF NOT EXISTS idx_workspaces_slug
2236
+ ON workspaces(slug);
2237
+ `);
2238
+ await client.executeMultiple(`
2239
+ CREATE TABLE IF NOT EXISTS documents (
2240
+ id TEXT PRIMARY KEY,
2241
+ workspace_id TEXT NOT NULL,
2242
+ filename TEXT NOT NULL,
2243
+ mime TEXT,
2244
+ source_type TEXT,
2245
+ user_id TEXT,
2246
+ uploaded_at TEXT NOT NULL,
2247
+ metadata TEXT,
2248
+ FOREIGN KEY (workspace_id) REFERENCES workspaces(id)
2249
+ );
2250
+
2251
+ CREATE INDEX IF NOT EXISTS idx_documents_workspace
2252
+ ON documents(workspace_id);
2253
+
2254
+ CREATE INDEX IF NOT EXISTS idx_documents_user
2255
+ ON documents(user_id);
2256
+ `);
2257
+ for (const column of [
2258
+ "workspace_id TEXT",
2259
+ "document_id TEXT",
2260
+ "user_id TEXT",
2261
+ "char_offset INTEGER",
2262
+ "page_number INTEGER"
2263
+ ]) {
2264
+ try {
2265
+ await client.execute({
2266
+ sql: `ALTER TABLE memories ADD COLUMN ${column}`,
2267
+ args: []
2268
+ });
2269
+ } catch {
2270
+ }
2271
+ }
2272
+ for (const col of [
2273
+ "ALTER TABLE memories ADD COLUMN source_path TEXT",
2274
+ "ALTER TABLE memories ADD COLUMN source_type TEXT DEFAULT 'text'"
2275
+ ]) {
2276
+ try {
2277
+ await client.execute(col);
2278
+ } catch {
2279
+ }
2280
+ }
2281
+ await client.executeMultiple(`
2282
+ CREATE INDEX IF NOT EXISTS idx_memories_workspace
2283
+ ON memories(workspace_id);
2284
+
2285
+ CREATE INDEX IF NOT EXISTS idx_memories_document
2286
+ ON memories(document_id);
2287
+
2288
+ CREATE INDEX IF NOT EXISTS idx_memories_user
2289
+ ON memories(user_id);
2290
+ `);
2291
+ await client.executeMultiple(`
2292
+ CREATE TABLE IF NOT EXISTS session_kills (
2293
+ id TEXT PRIMARY KEY,
2294
+ session_name TEXT NOT NULL,
2295
+ agent_id TEXT NOT NULL,
2296
+ killed_at TIMESTAMP NOT NULL,
2297
+ reason TEXT NOT NULL,
2298
+ ticks_idle INTEGER,
2299
+ estimated_tokens_saved INTEGER
2300
+ );
2301
+
2302
+ CREATE INDEX IF NOT EXISTS idx_session_kills_killed_at
2303
+ ON session_kills(killed_at);
2304
+
2305
+ CREATE INDEX IF NOT EXISTS idx_session_kills_agent
2306
+ ON session_kills(agent_id);
2307
+ `);
2308
+ await client.execute(`
2309
+ CREATE TABLE IF NOT EXISTS company_procedures (
2310
+ id TEXT PRIMARY KEY,
2311
+ title TEXT NOT NULL,
2312
+ content TEXT NOT NULL,
2313
+ priority TEXT NOT NULL DEFAULT 'p0',
2314
+ domain TEXT,
2315
+ active INTEGER NOT NULL DEFAULT 1,
2316
+ created_at TEXT NOT NULL,
2317
+ updated_at TEXT NOT NULL
2318
+ )
2319
+ `);
2320
+ const legacyProcedureObject = await client.execute({
2321
+ sql: "SELECT type FROM sqlite_master WHERE name = 'global_procedures'",
2322
+ args: []
2323
+ });
2324
+ const legacyProcedureType = legacyProcedureObject.rows[0]?.type == null ? null : String(legacyProcedureObject.rows[0].type);
2325
+ if (legacyProcedureType === "table") {
2326
+ await client.execute(`
2327
+ INSERT OR IGNORE INTO company_procedures
2328
+ (id, title, content, priority, domain, active, created_at, updated_at)
2329
+ SELECT id, title, content, priority, domain, active, created_at, updated_at
2330
+ FROM global_procedures
2331
+ `);
2332
+ await client.executeMultiple(`
2333
+ CREATE TRIGGER IF NOT EXISTS global_procedures_mirror_insert
2334
+ AFTER INSERT ON global_procedures
2335
+ BEGIN
2336
+ INSERT OR IGNORE INTO company_procedures
2337
+ (id, title, content, priority, domain, active, created_at, updated_at)
2338
+ VALUES
2339
+ (NEW.id, NEW.title, NEW.content, NEW.priority, NEW.domain, NEW.active, NEW.created_at, NEW.updated_at);
2340
+ END;
2341
+
2342
+ CREATE TRIGGER IF NOT EXISTS global_procedures_mirror_update
2343
+ AFTER UPDATE ON global_procedures
2344
+ BEGIN
2345
+ UPDATE company_procedures
2346
+ SET title = NEW.title,
2347
+ content = NEW.content,
2348
+ priority = NEW.priority,
2349
+ domain = NEW.domain,
2350
+ active = NEW.active,
2351
+ created_at = NEW.created_at,
2352
+ updated_at = NEW.updated_at
2353
+ WHERE id = OLD.id;
2354
+ END;
2355
+ `);
2356
+ } else {
2357
+ await client.execute(`
2358
+ CREATE VIEW IF NOT EXISTS global_procedures AS
2359
+ SELECT id, title, content, priority, domain, active, created_at, updated_at
2360
+ FROM company_procedures
2361
+ `);
2362
+ await client.executeMultiple(`
2363
+ CREATE TRIGGER IF NOT EXISTS global_procedures_insert
2364
+ INSTEAD OF INSERT ON global_procedures
2365
+ BEGIN
2366
+ INSERT INTO company_procedures
2367
+ (id, title, content, priority, domain, active, created_at, updated_at)
2368
+ VALUES
2369
+ (NEW.id, NEW.title, NEW.content, NEW.priority, NEW.domain, NEW.active, NEW.created_at, NEW.updated_at);
2370
+ END;
2371
+
2372
+ CREATE TRIGGER IF NOT EXISTS global_procedures_update
2373
+ INSTEAD OF UPDATE ON global_procedures
2374
+ BEGIN
2375
+ UPDATE company_procedures
2376
+ SET title = NEW.title,
2377
+ content = NEW.content,
2378
+ priority = NEW.priority,
2379
+ domain = NEW.domain,
2380
+ active = NEW.active,
2381
+ created_at = NEW.created_at,
2382
+ updated_at = NEW.updated_at
2383
+ WHERE id = OLD.id;
2384
+ END;
2385
+ `);
2386
+ }
2387
+ await client.executeMultiple(`
2388
+ CREATE TABLE IF NOT EXISTS conversations (
2389
+ id TEXT PRIMARY KEY,
2390
+ platform TEXT NOT NULL,
2391
+ external_id TEXT,
2392
+ sender_id TEXT NOT NULL,
2393
+ sender_name TEXT,
2394
+ sender_phone TEXT,
2395
+ sender_email TEXT,
2396
+ recipient_id TEXT,
2397
+ channel_id TEXT NOT NULL,
2398
+ thread_id TEXT,
2399
+ reply_to_id TEXT,
2400
+ content_text TEXT,
2401
+ content_media TEXT,
2402
+ content_metadata TEXT,
2403
+ agent_response TEXT,
2404
+ agent_name TEXT,
2405
+ timestamp TEXT NOT NULL,
2406
+ ingested_at TEXT NOT NULL
2407
+ );
2408
+
2409
+ CREATE INDEX IF NOT EXISTS idx_conversations_platform
2410
+ ON conversations(platform);
2411
+
2412
+ CREATE INDEX IF NOT EXISTS idx_conversations_sender
2413
+ ON conversations(sender_id);
2414
+
2415
+ CREATE INDEX IF NOT EXISTS idx_conversations_timestamp
2416
+ ON conversations(timestamp);
2417
+
2418
+ CREATE INDEX IF NOT EXISTS idx_conversations_thread
2419
+ ON conversations(thread_id);
2420
+
2421
+ CREATE INDEX IF NOT EXISTS idx_conversations_channel
2422
+ ON conversations(channel_id);
2423
+ `);
2424
+ await client.executeMultiple(`
2425
+ CREATE TABLE IF NOT EXISTS session_agent_map (
2426
+ session_uuid TEXT PRIMARY KEY,
2427
+ agent_id TEXT NOT NULL,
2428
+ session_name TEXT,
2429
+ task_id TEXT,
2430
+ project_name TEXT,
2431
+ started_at TEXT NOT NULL,
2432
+ cache_cold_count INTEGER NOT NULL DEFAULT 0
2433
+ );
2434
+
2435
+ CREATE INDEX IF NOT EXISTS idx_session_agent_map_agent
2436
+ ON session_agent_map(agent_id);
2437
+ `);
2438
+ await client.executeMultiple(`
2439
+ CREATE TABLE IF NOT EXISTS agent_file_reads (
2440
+ session_uuid TEXT NOT NULL,
2441
+ agent_id TEXT NOT NULL,
2442
+ file_path TEXT NOT NULL,
2443
+ read_at TEXT NOT NULL,
2444
+ commit_hash TEXT,
2445
+ PRIMARY KEY (session_uuid, file_path)
2446
+ );
2447
+
2448
+ CREATE INDEX IF NOT EXISTS idx_agent_file_reads_agent_read_at
2449
+ ON agent_file_reads(agent_id, read_at);
2450
+ `);
2451
+ try {
2452
+ const mapCount = await client.execute({ sql: `SELECT COUNT(*) as cnt FROM session_agent_map`, args: [] });
2453
+ if (Number(mapCount.rows[0]?.cnt ?? 0) === 0) {
2454
+ await client.execute({
2455
+ sql: `INSERT OR IGNORE INTO session_agent_map (session_uuid, agent_id, session_name, started_at)
2456
+ SELECT session_id, agent_id, '', MIN(timestamp)
2457
+ FROM memories
2458
+ WHERE session_id IS NOT NULL AND session_id != '' AND agent_id IS NOT NULL AND agent_id != ''
2459
+ GROUP BY session_id, agent_id`,
2460
+ args: []
2461
+ });
2462
+ }
2463
+ } catch {
2464
+ }
2465
+ try {
2466
+ await client.execute({
2467
+ sql: `ALTER TABLE session_agent_map ADD COLUMN cache_cold_count INTEGER NOT NULL DEFAULT 0`,
2468
+ args: []
2469
+ });
2470
+ } catch {
2471
+ }
2472
+ try {
2473
+ await client.execute({
2474
+ sql: `ALTER TABLE tasks ADD COLUMN budget_tokens INTEGER`,
2475
+ args: []
2476
+ });
2477
+ } catch {
2478
+ }
2479
+ try {
2480
+ await client.execute({
2481
+ sql: `ALTER TABLE tasks ADD COLUMN budget_fallback_model TEXT`,
2482
+ args: []
2483
+ });
2484
+ } catch {
2485
+ }
2486
+ try {
2487
+ await client.execute({
2488
+ sql: `ALTER TABLE tasks ADD COLUMN tokens_used INTEGER DEFAULT 0`,
2489
+ args: []
2490
+ });
2491
+ } catch {
2492
+ }
2493
+ try {
2494
+ await client.execute({
2495
+ sql: `ALTER TABLE tasks ADD COLUMN tokens_warned_at INTEGER`,
2496
+ args: []
2497
+ });
2498
+ } catch {
2499
+ }
2500
+ await client.executeMultiple(`
2501
+ CREATE VIRTUAL TABLE IF NOT EXISTS conversations_fts USING fts5(
2502
+ content_text,
2503
+ sender_name,
2504
+ agent_response,
2505
+ content='conversations',
2506
+ content_rowid='rowid'
2507
+ );
2508
+
2509
+ CREATE TRIGGER IF NOT EXISTS conversations_fts_ai AFTER INSERT ON conversations BEGIN
2510
+ INSERT INTO conversations_fts(rowid, content_text, sender_name, agent_response)
2511
+ VALUES (new.rowid, new.content_text, new.sender_name, new.agent_response);
2512
+ END;
2513
+
2514
+ CREATE TRIGGER IF NOT EXISTS conversations_fts_ad AFTER DELETE ON conversations BEGIN
2515
+ INSERT INTO conversations_fts(conversations_fts, rowid, content_text, sender_name, agent_response)
2516
+ VALUES('delete', old.rowid, old.content_text, old.sender_name, old.agent_response);
2517
+ END;
2518
+
2519
+ CREATE TRIGGER IF NOT EXISTS conversations_fts_au AFTER UPDATE ON conversations BEGIN
2520
+ INSERT INTO conversations_fts(conversations_fts, rowid, content_text, sender_name, agent_response)
2521
+ VALUES('delete', old.rowid, old.content_text, old.sender_name, old.agent_response);
2522
+ INSERT INTO conversations_fts(rowid, content_text, sender_name, agent_response)
2523
+ VALUES (new.rowid, new.content_text, new.sender_name, new.agent_response);
2524
+ END;
2525
+ `);
2526
+ await client.executeMultiple(`
2527
+ CREATE TABLE IF NOT EXISTS memory_cards (
2528
+ id TEXT PRIMARY KEY,
2529
+ memory_id TEXT NOT NULL,
2530
+ agent_id TEXT NOT NULL,
2531
+ session_id TEXT NOT NULL,
2532
+ project_name TEXT,
2533
+ timestamp TEXT NOT NULL,
2534
+ card_type TEXT NOT NULL,
2535
+ subject TEXT,
2536
+ predicate TEXT,
2537
+ object TEXT,
2538
+ content TEXT NOT NULL,
2539
+ source_ref TEXT,
2540
+ confidence REAL DEFAULT 0.6,
2541
+ active INTEGER DEFAULT 1,
2542
+ created_at TEXT NOT NULL
2543
+ );
2544
+
2545
+ CREATE INDEX IF NOT EXISTS idx_memory_cards_agent
2546
+ ON memory_cards(agent_id, active, timestamp);
2547
+
2548
+ CREATE INDEX IF NOT EXISTS idx_memory_cards_memory
2549
+ ON memory_cards(memory_id);
2550
+
2551
+ CREATE VIRTUAL TABLE IF NOT EXISTS memory_cards_fts
2552
+ USING fts5(content, subject, predicate, object, content='memory_cards', content_rowid='rowid');
2553
+
2554
+ CREATE TRIGGER IF NOT EXISTS memory_cards_fts_ai AFTER INSERT ON memory_cards BEGIN
2555
+ INSERT INTO memory_cards_fts(rowid, content, subject, predicate, object)
2556
+ VALUES (new.rowid, new.content, new.subject, new.predicate, new.object);
2557
+ END;
2558
+
2559
+ CREATE TRIGGER IF NOT EXISTS memory_cards_fts_ad AFTER DELETE ON memory_cards BEGIN
2560
+ INSERT INTO memory_cards_fts(memory_cards_fts, rowid, content, subject, predicate, object)
2561
+ VALUES('delete', old.rowid, old.content, old.subject, old.predicate, old.object);
2562
+ END;
2563
+
2564
+ CREATE TRIGGER IF NOT EXISTS memory_cards_fts_au AFTER UPDATE ON memory_cards BEGIN
2565
+ INSERT INTO memory_cards_fts(memory_cards_fts, rowid, content, subject, predicate, object)
2566
+ VALUES('delete', old.rowid, old.content, old.subject, old.predicate, old.object);
2567
+ INSERT INTO memory_cards_fts(rowid, content, subject, predicate, object)
2568
+ VALUES (new.rowid, new.content, new.subject, new.predicate, new.object);
2569
+ END;
2570
+ `);
2571
+ await client.executeMultiple(`
2572
+ CREATE TABLE IF NOT EXISTS agent_sessions (
2573
+ id TEXT PRIMARY KEY,
2574
+ agent_id TEXT NOT NULL,
2575
+ project_name TEXT,
2576
+ started_at TEXT NOT NULL,
2577
+ last_event_at TEXT NOT NULL,
2578
+ event_count INTEGER NOT NULL DEFAULT 0,
2579
+ properties TEXT DEFAULT '{}'
2580
+ );
2581
+
2582
+ CREATE INDEX IF NOT EXISTS idx_agent_sessions_agent_time
2583
+ ON agent_sessions(agent_id, started_at);
2584
+
2585
+ CREATE TABLE IF NOT EXISTS agent_goals (
2586
+ id TEXT PRIMARY KEY,
2587
+ statement TEXT NOT NULL,
2588
+ owner_agent_id TEXT,
2589
+ project_name TEXT,
2590
+ status TEXT NOT NULL DEFAULT 'open',
2591
+ priority INTEGER NOT NULL DEFAULT 5,
2592
+ success_criteria TEXT,
2593
+ parent_goal_id TEXT,
2594
+ due_at TEXT,
2595
+ achieved_at TEXT,
2596
+ supersedes_id TEXT,
2597
+ created_at TEXT NOT NULL,
2598
+ updated_at TEXT NOT NULL,
2599
+ source_memory_id TEXT
2600
+ );
2601
+
2602
+ CREATE INDEX IF NOT EXISTS idx_agent_goals_project_status
2603
+ ON agent_goals(project_name, status, priority);
2604
+
2605
+ CREATE TABLE IF NOT EXISTS agent_events (
2606
+ id TEXT PRIMARY KEY,
2607
+ event_type TEXT NOT NULL,
2608
+ occurred_at TEXT NOT NULL,
2609
+ sequence_index INTEGER NOT NULL,
2610
+ actor_agent_id TEXT,
2611
+ agent_role TEXT,
2612
+ project_name TEXT,
2613
+ session_id TEXT,
2614
+ task_id TEXT,
2615
+ goal_id TEXT,
2616
+ parent_event_id TEXT,
2617
+ intention TEXT,
2618
+ outcome TEXT,
2619
+ evidence_memory_id TEXT,
2620
+ impact TEXT,
2621
+ payload TEXT DEFAULT '{}',
2622
+ created_at TEXT NOT NULL
2623
+ );
2624
+
2625
+ CREATE INDEX IF NOT EXISTS idx_agent_events_time
2626
+ ON agent_events(occurred_at, sequence_index);
2627
+
2628
+ CREATE INDEX IF NOT EXISTS idx_agent_events_session_seq
2629
+ ON agent_events(session_id, sequence_index);
2630
+
2631
+ CREATE INDEX IF NOT EXISTS idx_agent_events_goal_time
2632
+ ON agent_events(goal_id, occurred_at);
2633
+
2634
+ CREATE INDEX IF NOT EXISTS idx_agent_events_memory
2635
+ ON agent_events(evidence_memory_id);
2636
+
2637
+ CREATE TABLE IF NOT EXISTS agent_goal_links (
2638
+ id TEXT PRIMARY KEY,
2639
+ goal_id TEXT NOT NULL,
2640
+ link_type TEXT NOT NULL,
2641
+ target_id TEXT NOT NULL,
2642
+ target_type TEXT NOT NULL,
2643
+ created_at TEXT NOT NULL
2644
+ );
2645
+
2646
+ CREATE INDEX IF NOT EXISTS idx_agent_goal_links_goal
2647
+ ON agent_goal_links(goal_id, target_type);
2648
+
2649
+ CREATE TABLE IF NOT EXISTS agent_semantic_labels (
2650
+ id TEXT PRIMARY KEY,
2651
+ source_memory_id TEXT NOT NULL,
2652
+ event_id TEXT,
2653
+ labeler TEXT NOT NULL,
2654
+ schema_version INTEGER NOT NULL DEFAULT 1,
2655
+ confidence REAL NOT NULL DEFAULT 0,
2656
+ labels TEXT NOT NULL,
2657
+ created_at TEXT NOT NULL,
2658
+ updated_at TEXT NOT NULL
2659
+ );
2660
+
2661
+ CREATE INDEX IF NOT EXISTS idx_agent_semantic_labels_memory
2662
+ ON agent_semantic_labels(source_memory_id, labeler);
2663
+
2664
+ CREATE INDEX IF NOT EXISTS idx_agent_semantic_labels_event
2665
+ ON agent_semantic_labels(event_id);
2666
+
2667
+ CREATE TABLE IF NOT EXISTS agent_reflection_checkpoints (
2668
+ id TEXT PRIMARY KEY,
2669
+ project_name TEXT,
2670
+ session_id TEXT,
2671
+ window_start_at TEXT NOT NULL,
2672
+ window_end_at TEXT NOT NULL,
2673
+ event_count INTEGER NOT NULL DEFAULT 0,
2674
+ goal_count INTEGER NOT NULL DEFAULT 0,
2675
+ success_count INTEGER NOT NULL DEFAULT 0,
2676
+ failure_count INTEGER NOT NULL DEFAULT 0,
2677
+ risk_count INTEGER NOT NULL DEFAULT 0,
2678
+ summary TEXT NOT NULL,
2679
+ learnings TEXT NOT NULL DEFAULT '[]',
2680
+ next_actions TEXT NOT NULL DEFAULT '[]',
2681
+ evidence_event_ids TEXT NOT NULL DEFAULT '[]',
2682
+ confidence REAL NOT NULL DEFAULT 0,
2683
+ created_at TEXT NOT NULL
2684
+ );
2685
+
2686
+ CREATE INDEX IF NOT EXISTS idx_agent_reflection_project_time
2687
+ ON agent_reflection_checkpoints(project_name, window_end_at);
2688
+
2689
+ CREATE INDEX IF NOT EXISTS idx_agent_reflection_session_time
2690
+ ON agent_reflection_checkpoints(session_id, window_end_at);
2691
+ `);
2692
+ try {
2693
+ await client.execute({
2694
+ sql: `ALTER TABLE memories ADD COLUMN tier INTEGER DEFAULT 3`,
2695
+ args: []
2696
+ });
2697
+ } catch {
2698
+ }
2699
+ try {
2700
+ await client.execute(
2701
+ `CREATE INDEX IF NOT EXISTS idx_memories_tier ON memories(tier)`
2702
+ );
2703
+ } catch {
2704
+ }
2705
+ try {
2706
+ await client.execute({
2707
+ sql: `UPDATE memories SET tier = 1 WHERE tool_name = 'commit_to_long_term_memory' AND importance >= 8 AND tier = 3`,
2708
+ args: []
2709
+ });
2710
+ await client.execute({
2711
+ sql: `UPDATE memories SET tier = 2 WHERE tool_name IN ('store_memory', 'manual') AND importance >= 5 AND tier = 3`,
2712
+ args: []
2713
+ });
2714
+ } catch {
2715
+ }
2716
+ try {
2717
+ await client.execute({
2718
+ sql: `ALTER TABLE memories ADD COLUMN supersedes_id TEXT`,
2719
+ args: []
2720
+ });
2721
+ } catch {
2722
+ }
2723
+ try {
2724
+ await client.execute(
2725
+ `CREATE INDEX IF NOT EXISTS idx_memories_supersedes ON memories(supersedes_id) WHERE supersedes_id IS NOT NULL`
2726
+ );
2727
+ } catch {
2728
+ }
2729
+ for (const col of [
2730
+ "ALTER TABLE tasks ADD COLUMN checkpoint TEXT",
2731
+ "ALTER TABLE tasks ADD COLUMN checkpoint_count INTEGER DEFAULT 0"
2732
+ ]) {
2733
+ try {
2734
+ await client.execute(col);
2735
+ } catch {
2736
+ }
2737
+ }
2738
+ try {
2739
+ await client.execute({
2740
+ sql: `ALTER TABLE memories ADD COLUMN draft INTEGER DEFAULT 0`,
2741
+ args: []
2742
+ });
2743
+ } catch {
2744
+ }
2745
+ try {
2746
+ await client.execute(
2747
+ `CREATE INDEX IF NOT EXISTS idx_memories_draft ON memories(draft) WHERE draft = 1`
2748
+ );
2749
+ } catch {
2750
+ }
2751
+ try {
2752
+ await client.execute({
2753
+ sql: `ALTER TABLE memories ADD COLUMN memory_type TEXT DEFAULT 'raw'`,
2754
+ args: []
2755
+ });
2756
+ } catch {
2757
+ }
2758
+ try {
2759
+ await client.execute(
2760
+ `CREATE INDEX IF NOT EXISTS idx_memories_type ON memories(memory_type)`
2761
+ );
2762
+ } catch {
2763
+ }
2764
+ try {
2765
+ await client.execute({
2766
+ sql: `ALTER TABLE memories ADD COLUMN trajectory TEXT`,
2767
+ args: []
2768
+ });
2769
+ } catch {
2770
+ }
2771
+ for (const col of [
2772
+ "ALTER TABLE memories ADD COLUMN intent TEXT",
2773
+ "ALTER TABLE memories ADD COLUMN outcome TEXT",
2774
+ "ALTER TABLE memories ADD COLUMN domain TEXT",
2775
+ "ALTER TABLE memories ADD COLUMN referenced_entities TEXT",
2776
+ "ALTER TABLE memories ADD COLUMN retrieval_count INTEGER DEFAULT 0",
2777
+ "ALTER TABLE memories ADD COLUMN chain_position TEXT",
2778
+ "ALTER TABLE memories ADD COLUMN review_status TEXT",
2779
+ "ALTER TABLE memories ADD COLUMN context_window_pct INTEGER",
2780
+ "ALTER TABLE memories ADD COLUMN file_paths TEXT",
2781
+ "ALTER TABLE memories ADD COLUMN commit_hash TEXT",
2782
+ "ALTER TABLE memories ADD COLUMN duration_ms INTEGER",
2783
+ "ALTER TABLE memories ADD COLUMN token_cost REAL",
2784
+ "ALTER TABLE memories ADD COLUMN audience TEXT",
2785
+ "ALTER TABLE memories ADD COLUMN language_type TEXT",
2786
+ "ALTER TABLE memories ADD COLUMN parent_memory_id TEXT"
2787
+ ]) {
2788
+ try {
2789
+ await client.execute(col);
2790
+ } catch {
2791
+ }
2792
+ }
2793
+ try {
2794
+ await client.execute({
2795
+ sql: `UPDATE tasks SET status = 'closed' WHERE status = 'done' AND result IS NOT NULL`,
2796
+ args: []
2797
+ });
2798
+ } catch {
2799
+ }
2800
+ }
2801
+ async function disposeDatabase() {
2802
+ if (_walCheckpointTimer) {
2803
+ clearInterval(_walCheckpointTimer);
2804
+ _walCheckpointTimer = null;
2805
+ }
2806
+ if (_daemonClient) {
2807
+ _daemonClient.close();
2808
+ _daemonClient = null;
2809
+ }
2810
+ if (_adapterClient && _adapterClient !== _resilientClient) {
2811
+ _adapterClient.close();
2812
+ }
2813
+ _adapterClient = null;
2814
+ if (_client) {
2815
+ _client.close();
2816
+ _client = null;
2817
+ _resilientClient = null;
2818
+ }
2819
+ }
2820
+ var _client, _resilientClient, _walCheckpointTimer, _daemonClient, _adapterClient, initTurso, SOFT_DELETE_RETENTION_DAYS, disposeTurso;
2821
+ var init_database = __esm({
2822
+ "src/lib/database.ts"() {
2823
+ "use strict";
2824
+ init_db_retry();
2825
+ init_employees();
2826
+ init_database_adapter();
2827
+ init_memory();
2828
+ _client = null;
2829
+ _resilientClient = null;
2830
+ _walCheckpointTimer = null;
2831
+ _daemonClient = null;
2832
+ _adapterClient = null;
2833
+ initTurso = initDatabase;
2834
+ SOFT_DELETE_RETENTION_DAYS = 7;
2835
+ disposeTurso = disposeDatabase;
2836
+ }
2837
+ });
2838
+
2839
+ // src/lib/shard-manager.ts
2840
+ var shard_manager_exports = {};
2841
+ __export(shard_manager_exports, {
2842
+ auditShardHealth: () => auditShardHealth,
2843
+ disposeShards: () => disposeShards,
2844
+ ensureShardSchema: () => ensureShardSchema,
2845
+ getOpenShardCount: () => getOpenShardCount,
2846
+ getReadyShardClient: () => getReadyShardClient,
2847
+ getShardClient: () => getShardClient,
2848
+ getShardsDir: () => getShardsDir,
2849
+ initShardManager: () => initShardManager,
2850
+ isShardingEnabled: () => isShardingEnabled,
2851
+ listShards: () => listShards,
2852
+ shardExists: () => shardExists
2853
+ });
2854
+ import path7 from "path";
2855
+ import { existsSync as existsSync7, mkdirSync as mkdirSync2, readdirSync, renameSync as renameSync3, statSync as statSync3 } from "fs";
2856
+ import { createClient as createClient2 } from "@libsql/client";
2857
+ function initShardManager(encryptionKey) {
2858
+ _encryptionKey = encryptionKey;
2859
+ if (!existsSync7(SHARDS_DIR)) {
2860
+ mkdirSync2(SHARDS_DIR, { recursive: true });
2861
+ }
2862
+ _shardingEnabled = true;
2863
+ if (_evictionTimer) clearInterval(_evictionTimer);
2864
+ _evictionTimer = setInterval(evictIdleShards, EVICTION_INTERVAL_MS);
2865
+ _evictionTimer.unref();
2866
+ }
2867
+ function isShardingEnabled() {
2868
+ return _shardingEnabled;
2869
+ }
2870
+ function getShardsDir() {
2871
+ return SHARDS_DIR;
2872
+ }
2873
+ function getShardClient(projectName) {
2874
+ if (!_encryptionKey) {
2875
+ throw new Error("Shard manager not initialized. Call initShardManager() first.");
2876
+ }
2877
+ const safeName = safeShardName(projectName);
2878
+ if (!safeName || safeName === "unknown") {
2879
+ throw new Error(`Invalid project name for shard: "${projectName}" (resolved to "${safeName}")`);
2880
+ }
2881
+ const cached = _shards.get(safeName);
2882
+ if (cached) {
2883
+ _shardLastAccess.set(safeName, Date.now());
2884
+ return cached;
2885
+ }
2886
+ while (_shards.size >= MAX_OPEN_SHARDS) {
2887
+ evictLRU();
2888
+ }
2889
+ const dbPath = path7.join(SHARDS_DIR, `${safeName}.db`);
2890
+ const client = createClient2({
2891
+ url: `file:${dbPath}`,
2892
+ encryptionKey: _encryptionKey
2893
+ });
2894
+ _shards.set(safeName, client);
2895
+ _shardLastAccess.set(safeName, Date.now());
2896
+ return client;
2897
+ }
2898
+ function shardExists(projectName) {
2899
+ const safeName = safeShardName(projectName);
2900
+ return existsSync7(path7.join(SHARDS_DIR, `${safeName}.db`));
2901
+ }
2902
+ function safeShardName(projectName) {
2903
+ return projectName.replace(/[^a-zA-Z0-9_-]/g, "_");
2904
+ }
2905
+ function listShards() {
2906
+ if (!existsSync7(SHARDS_DIR)) return [];
2907
+ return readdirSync(SHARDS_DIR).filter((f) => f.endsWith(".db")).map((f) => f.replace(".db", ""));
2908
+ }
2909
+ async function auditShardHealth(options = {}) {
2910
+ if (!_encryptionKey) {
2911
+ throw new Error("Shard manager not initialized. Call initShardManager() first.");
2912
+ }
2913
+ const repair = options.repair === true;
2914
+ const dryRun = options.dryRun === true;
2915
+ const names = listShards();
2916
+ const shards = [];
2917
+ for (const name of names) {
2918
+ const dbPath = path7.join(SHARDS_DIR, `${name}.db`);
2919
+ const stat = statSync3(dbPath);
2920
+ const item = {
2921
+ name,
2922
+ path: dbPath,
2923
+ ok: false,
2924
+ unreadable: false,
2925
+ error: null,
2926
+ size: stat.size,
2927
+ mtime: stat.mtime.toISOString(),
2928
+ memoryCount: null
2929
+ };
2930
+ const client = createClient2({
2931
+ url: `file:${dbPath}`,
2932
+ encryptionKey: _encryptionKey
2933
+ });
2934
+ try {
2935
+ await client.execute("SELECT COUNT(*) as cnt FROM sqlite_schema");
2936
+ const hasMemories = await client.execute(
2937
+ "SELECT COUNT(*) as cnt FROM sqlite_schema WHERE type = 'table' AND name = 'memories'"
2938
+ );
2939
+ if (Number(hasMemories.rows[0]?.cnt ?? 0) > 0) {
2940
+ const mem = await client.execute("SELECT COUNT(*) as cnt FROM memories");
2941
+ item.memoryCount = Number(mem.rows[0]?.cnt ?? 0);
2942
+ }
2943
+ item.ok = true;
2944
+ } catch (err) {
2945
+ const message = err instanceof Error ? err.message : String(err);
2946
+ item.error = message;
2947
+ item.unreadable = /SQLITE_NOTADB|file is not a database/i.test(message);
2948
+ if (item.unreadable && repair && !dryRun) {
2949
+ client.close();
2950
+ _shards.delete(name);
2951
+ _shardLastAccess.delete(name);
2952
+ const stamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-");
2953
+ const archivedPath = path7.join(SHARDS_DIR, `${name}.db.broken-${stamp}`);
2954
+ renameSync3(dbPath, archivedPath);
2955
+ item.archivedPath = archivedPath;
2956
+ }
2957
+ } finally {
2958
+ try {
2959
+ client.close();
2960
+ } catch {
2961
+ }
2962
+ }
2963
+ shards.push(item);
2964
+ }
2965
+ return {
2966
+ total: shards.length,
2967
+ ok: shards.filter((s) => s.ok).length,
2968
+ unreadable: shards.filter((s) => s.unreadable).length,
2969
+ archived: shards.filter((s) => Boolean(s.archivedPath)).length,
2970
+ shards
2971
+ };
2972
+ }
2973
+ async function ensureShardSchema(client) {
2974
+ await client.execute("PRAGMA journal_mode = WAL");
2975
+ await client.execute("PRAGMA busy_timeout = 30000");
2976
+ try {
2977
+ await client.execute("PRAGMA libsql_vector_search_ef = 128");
2978
+ } catch {
2979
+ }
2980
+ await client.executeMultiple(`
2981
+ CREATE TABLE IF NOT EXISTS memories (
2982
+ id TEXT PRIMARY KEY,
2983
+ agent_id TEXT NOT NULL,
2984
+ agent_role TEXT NOT NULL,
2985
+ session_id TEXT NOT NULL,
2986
+ timestamp TEXT NOT NULL,
2987
+ tool_name TEXT NOT NULL,
2988
+ project_name TEXT NOT NULL,
2989
+ has_error INTEGER NOT NULL DEFAULT 0,
2990
+ raw_text TEXT NOT NULL,
2991
+ vector F32_BLOB(1024),
2992
+ version INTEGER NOT NULL DEFAULT 0
2993
+ );
2994
+
2995
+ CREATE INDEX IF NOT EXISTS idx_memories_agent ON memories(agent_id);
2996
+ CREATE INDEX IF NOT EXISTS idx_memories_timestamp ON memories(timestamp);
2997
+ CREATE INDEX IF NOT EXISTS idx_memories_agent_project ON memories(agent_id, project_name);
2998
+ `);
2999
+ await client.executeMultiple(`
3000
+ CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts USING fts5(
3001
+ raw_text,
3002
+ content='memories',
3003
+ content_rowid='rowid'
3004
+ );
3005
+
3006
+ CREATE TRIGGER IF NOT EXISTS memories_fts_ai AFTER INSERT ON memories BEGIN
3007
+ INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
3008
+ END;
3009
+
3010
+ CREATE TRIGGER IF NOT EXISTS memories_fts_ad AFTER DELETE ON memories BEGIN
3011
+ INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
3012
+ END;
3013
+
3014
+ CREATE TRIGGER IF NOT EXISTS memories_fts_au AFTER UPDATE ON memories BEGIN
3015
+ INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
3016
+ INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
3017
+ END;
3018
+ `);
3019
+ for (const col of [
3020
+ "ALTER TABLE memories ADD COLUMN task_id TEXT",
3021
+ "ALTER TABLE memories ADD COLUMN consolidated INTEGER NOT NULL DEFAULT 0",
3022
+ "ALTER TABLE memories ADD COLUMN author_device_id TEXT",
3023
+ "ALTER TABLE memories ADD COLUMN scope TEXT NOT NULL DEFAULT 'business'",
3024
+ "ALTER TABLE memories ADD COLUMN importance INTEGER DEFAULT 5",
3025
+ "ALTER TABLE memories ADD COLUMN status TEXT DEFAULT 'active'",
3026
+ "ALTER TABLE memories ADD COLUMN wiki_synced INTEGER DEFAULT 0",
3027
+ "ALTER TABLE memories ADD COLUMN graph_extracted INTEGER DEFAULT 0",
3028
+ "ALTER TABLE memories ADD COLUMN content_hash TEXT",
3029
+ "ALTER TABLE memories ADD COLUMN graph_extracted_hash TEXT",
3030
+ "ALTER TABLE memories ADD COLUMN confidence REAL DEFAULT 0.7",
3031
+ "ALTER TABLE memories ADD COLUMN last_accessed TEXT",
3032
+ // Wiki linkage columns (must match database.ts)
3033
+ "ALTER TABLE memories ADD COLUMN workspace_id TEXT",
3034
+ "ALTER TABLE memories ADD COLUMN document_id TEXT",
3035
+ "ALTER TABLE memories ADD COLUMN user_id TEXT",
3036
+ "ALTER TABLE memories ADD COLUMN char_offset INTEGER",
3037
+ "ALTER TABLE memories ADD COLUMN page_number INTEGER",
3038
+ // Source provenance columns (must match database.ts)
3039
+ "ALTER TABLE memories ADD COLUMN source_path TEXT",
3040
+ "ALTER TABLE memories ADD COLUMN source_type TEXT DEFAULT 'text'",
3041
+ "ALTER TABLE memories ADD COLUMN tier INTEGER DEFAULT 3",
3042
+ "ALTER TABLE memories ADD COLUMN supersedes_id TEXT",
3043
+ // MS-11: draft staging, MS-6a: memory_type, MS-7: trajectory
3044
+ "ALTER TABLE memories ADD COLUMN draft INTEGER DEFAULT 0",
3045
+ "ALTER TABLE memories ADD COLUMN memory_type TEXT DEFAULT 'raw'",
3046
+ "ALTER TABLE memories ADD COLUMN trajectory TEXT",
3047
+ // Metadata enrichment columns (must match database.ts)
3048
+ "ALTER TABLE memories ADD COLUMN intent TEXT",
3049
+ "ALTER TABLE memories ADD COLUMN outcome TEXT",
3050
+ "ALTER TABLE memories ADD COLUMN domain TEXT",
3051
+ "ALTER TABLE memories ADD COLUMN referenced_entities TEXT",
3052
+ "ALTER TABLE memories ADD COLUMN retrieval_count INTEGER DEFAULT 0",
3053
+ "ALTER TABLE memories ADD COLUMN chain_position TEXT",
3054
+ "ALTER TABLE memories ADD COLUMN review_status TEXT",
3055
+ "ALTER TABLE memories ADD COLUMN context_window_pct INTEGER",
3056
+ "ALTER TABLE memories ADD COLUMN file_paths TEXT",
3057
+ "ALTER TABLE memories ADD COLUMN commit_hash TEXT",
3058
+ "ALTER TABLE memories ADD COLUMN duration_ms INTEGER",
3059
+ "ALTER TABLE memories ADD COLUMN token_cost REAL",
3060
+ "ALTER TABLE memories ADD COLUMN audience TEXT",
3061
+ "ALTER TABLE memories ADD COLUMN language_type TEXT",
3062
+ "ALTER TABLE memories ADD COLUMN parent_memory_id TEXT",
3063
+ "ALTER TABLE memories ADD COLUMN deleted_at TEXT"
3064
+ ]) {
3065
+ try {
3066
+ await client.execute(col);
3067
+ } catch {
3068
+ }
3069
+ }
3070
+ for (const idx of [
3071
+ "CREATE INDEX IF NOT EXISTS idx_memories_tier ON memories(tier)",
3072
+ "CREATE INDEX IF NOT EXISTS idx_memories_supersedes ON memories(supersedes_id) WHERE supersedes_id IS NOT NULL",
3073
+ "CREATE INDEX IF NOT EXISTS idx_memories_scoped_content_hash ON memories(content_hash, agent_id, project_name, memory_type) WHERE content_hash IS NOT NULL"
3074
+ ]) {
3075
+ try {
3076
+ await client.execute(idx);
3077
+ } catch {
3078
+ }
3079
+ }
3080
+ try {
3081
+ await client.execute("CREATE INDEX IF NOT EXISTS idx_memories_status ON memories(status)");
3082
+ } catch {
3083
+ }
3084
+ for (const idx of [
3085
+ "CREATE INDEX IF NOT EXISTS idx_memories_workspace ON memories(workspace_id)",
3086
+ "CREATE INDEX IF NOT EXISTS idx_memories_document ON memories(document_id)",
3087
+ "CREATE INDEX IF NOT EXISTS idx_memories_user ON memories(user_id)"
3088
+ ]) {
3089
+ try {
3090
+ await client.execute(idx);
3091
+ } catch {
3092
+ }
3093
+ }
3094
+ await client.executeMultiple(`
3095
+ CREATE TABLE IF NOT EXISTS entities (
3096
+ id TEXT PRIMARY KEY,
3097
+ name TEXT NOT NULL,
3098
+ type TEXT NOT NULL,
3099
+ first_seen TEXT NOT NULL,
3100
+ last_seen TEXT NOT NULL,
3101
+ properties TEXT DEFAULT '{}',
3102
+ UNIQUE(name, type)
3103
+ );
3104
+
3105
+ CREATE TABLE IF NOT EXISTS relationships (
3106
+ id TEXT PRIMARY KEY,
3107
+ source_entity_id TEXT NOT NULL,
3108
+ target_entity_id TEXT NOT NULL,
3109
+ type TEXT NOT NULL,
3110
+ weight REAL DEFAULT 1.0,
3111
+ timestamp TEXT NOT NULL,
3112
+ properties TEXT DEFAULT '{}',
3113
+ UNIQUE(source_entity_id, target_entity_id, type)
3114
+ );
3115
+
3116
+ CREATE TABLE IF NOT EXISTS entity_memories (
3117
+ entity_id TEXT NOT NULL,
3118
+ memory_id TEXT NOT NULL,
3119
+ PRIMARY KEY (entity_id, memory_id)
3120
+ );
3121
+
3122
+ CREATE TABLE IF NOT EXISTS relationship_memories (
3123
+ relationship_id TEXT NOT NULL,
3124
+ memory_id TEXT NOT NULL,
3125
+ PRIMARY KEY (relationship_id, memory_id)
3126
+ );
3127
+
3128
+ CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
3129
+ CREATE INDEX IF NOT EXISTS idx_entities_type ON entities(type);
3130
+ CREATE INDEX IF NOT EXISTS idx_relationships_source ON relationships(source_entity_id);
3131
+ CREATE INDEX IF NOT EXISTS idx_relationships_target ON relationships(target_entity_id);
3132
+ CREATE INDEX IF NOT EXISTS idx_relationships_type ON relationships(type);
3133
+
3134
+ CREATE TABLE IF NOT EXISTS hyperedges (
3135
+ id TEXT PRIMARY KEY,
3136
+ label TEXT NOT NULL,
3137
+ relation TEXT NOT NULL,
3138
+ confidence REAL DEFAULT 1.0,
3139
+ timestamp TEXT NOT NULL
3140
+ );
3141
+
3142
+ CREATE TABLE IF NOT EXISTS hyperedge_nodes (
3143
+ hyperedge_id TEXT NOT NULL,
3144
+ entity_id TEXT NOT NULL,
3145
+ PRIMARY KEY (hyperedge_id, entity_id)
3146
+ );
3147
+ `);
3148
+ for (const col of [
3149
+ "ALTER TABLE relationships ADD COLUMN confidence REAL DEFAULT 1.0",
3150
+ "ALTER TABLE relationships ADD COLUMN confidence_label TEXT DEFAULT 'extracted'"
3151
+ ]) {
3152
+ try {
3153
+ await client.execute(col);
3154
+ } catch {
3155
+ }
3156
+ }
3157
+ }
3158
+ async function getReadyShardClient(projectName) {
3159
+ const safeName = safeShardName(projectName);
3160
+ let client = getShardClient(projectName);
3161
+ try {
3162
+ await ensureShardSchema(client);
3163
+ return client;
3164
+ } catch (err) {
3165
+ const message = err instanceof Error ? err.message : String(err);
3166
+ if (!/SQLITE_NOTADB|file is not a database/i.test(message)) throw err;
3167
+ client.close();
3168
+ _shards.delete(safeName);
3169
+ _shardLastAccess.delete(safeName);
3170
+ const dbPath = path7.join(SHARDS_DIR, `${safeName}.db`);
3171
+ if (existsSync7(dbPath)) {
3172
+ const stat = statSync3(dbPath);
3173
+ const stamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-");
3174
+ const archivedPath = path7.join(SHARDS_DIR, `${safeName}.db.broken-${stamp}`);
3175
+ renameSync3(dbPath, archivedPath);
3176
+ process.stderr.write(
3177
+ `[shard-manager] Archived unreadable shard ${safeName}: ${archivedPath} (${stat.size} bytes, mtime ${stat.mtime.toISOString()})
3178
+ `
3179
+ );
3180
+ }
3181
+ client = getShardClient(projectName);
3182
+ await ensureShardSchema(client);
3183
+ return client;
3184
+ }
3185
+ }
3186
+ function evictLRU() {
3187
+ let oldest = null;
3188
+ let oldestTime = Infinity;
3189
+ for (const [name, time] of _shardLastAccess) {
3190
+ if (time < oldestTime) {
3191
+ oldestTime = time;
3192
+ oldest = name;
3193
+ }
3194
+ }
3195
+ if (oldest) {
3196
+ const client = _shards.get(oldest);
3197
+ if (client) {
3198
+ client.close();
3199
+ }
3200
+ _shards.delete(oldest);
3201
+ _shardLastAccess.delete(oldest);
3202
+ }
3203
+ }
3204
+ function evictIdleShards() {
3205
+ const now = Date.now();
3206
+ const toEvict = [];
3207
+ for (const [name, lastAccess] of _shardLastAccess) {
3208
+ if (now - lastAccess > SHARD_IDLE_MS) {
3209
+ toEvict.push(name);
3210
+ }
3211
+ }
3212
+ for (const name of toEvict) {
3213
+ const client = _shards.get(name);
3214
+ if (client) {
3215
+ client.close();
3216
+ }
3217
+ _shards.delete(name);
3218
+ _shardLastAccess.delete(name);
3219
+ }
3220
+ }
3221
+ function getOpenShardCount() {
3222
+ return _shards.size;
3223
+ }
3224
+ function disposeShards() {
3225
+ if (_evictionTimer) {
3226
+ clearInterval(_evictionTimer);
3227
+ _evictionTimer = null;
3228
+ }
3229
+ for (const [, client] of _shards) {
3230
+ client.close();
3231
+ }
3232
+ _shards.clear();
3233
+ _shardLastAccess.clear();
3234
+ _shardingEnabled = false;
3235
+ _encryptionKey = null;
3236
+ }
3237
+ var SHARDS_DIR, SHARD_IDLE_MS, MAX_OPEN_SHARDS, EVICTION_INTERVAL_MS, _shards, _shardLastAccess, _evictionTimer, _encryptionKey, _shardingEnabled;
3238
+ var init_shard_manager = __esm({
3239
+ "src/lib/shard-manager.ts"() {
3240
+ "use strict";
3241
+ init_config();
3242
+ SHARDS_DIR = path7.join(EXE_AI_DIR, "shards");
3243
+ SHARD_IDLE_MS = 5 * 60 * 1e3;
3244
+ MAX_OPEN_SHARDS = 10;
3245
+ EVICTION_INTERVAL_MS = 60 * 1e3;
3246
+ _shards = /* @__PURE__ */ new Map();
3247
+ _shardLastAccess = /* @__PURE__ */ new Map();
3248
+ _evictionTimer = null;
3249
+ _encryptionKey = null;
3250
+ _shardingEnabled = false;
3251
+ }
3252
+ });
3253
+
3254
+ // src/lib/platform-procedures.ts
3255
+ var PLATFORM_PROCEDURES, PLATFORM_PROCEDURE_TITLES;
3256
+ var init_platform_procedures = __esm({
3257
+ "src/lib/platform-procedures.ts"() {
3258
+ "use strict";
3259
+ PLATFORM_PROCEDURES = [
3260
+ // --- Foundation: what is exe-os ---
3261
+ {
3262
+ title: "What is exe-os \u2014 the operating model every agent must understand",
3263
+ domain: "architecture",
3264
+ priority: "p0",
3265
+ content: "Exe OS is an AI employee operating system. A founder runs 5-10 AI agents as a real org: COO, CTO, CMO, engineers, and content production specialists. Each agent has identity, expertise, and experience layers \u2014 persistent memory that makes them better over time. All data is local-first, E2EE, owned by the user. The MCP server is the ONLY data interface \u2014 never access the DB directly."
3266
+ },
3267
+ {
3268
+ title: "Mode 1 \u2014 how exe-os runs inside Claude Code",
3269
+ domain: "architecture",
3270
+ priority: "p0",
3271
+ content: "Mode 1: exe-os runs AS hooks + MCP + skills inside Claude Code, Codex, or OpenCode. The founder picks their default tool at setup. The COO manages employees in tmux sessions. Each coordinator session is a separate window/project. Employees run in their own tmux panes via create_task auto-spawn. The founder talks to the COO; the COO orchestrates the team. The tool is the shell, exe-os is the brain."
3272
+ },
3273
+ {
3274
+ title: "Sessions explained \u2014 coordinator session names and projects",
3275
+ domain: "architecture",
3276
+ priority: "p0",
3277
+ content: "Each coordinator session is an isolated project session. One might be exe-os development, another might be exe-wiki. Each session spawns its own employees using {employee}-{coordinatorSession}. Sessions share the same memory DB but tasks are scoped to the session that created them. A founder can run multiple projects simultaneously. Sessions never interfere with each other."
3278
+ },
3279
+ {
3280
+ title: "Runtime settings \u2014 COO can view and change tools per agent",
3281
+ domain: "workflow",
3282
+ priority: "p1",
3283
+ content: "exe-os supports three tools: Claude Code (Anthropic), Codex (OpenAI), and OpenCode (open source, 75+ providers). Each agent can use a different tool and model. COO uses set_agent_config MCP tool to view or change settings. Call with no args to show all agents. Call with agent_id + runtime + model to change. Users can also run `exe-os settings` from terminal for interactive arrow-key selection."
3284
+ },
3285
+ // --- Hierarchy and dispatch ---
3286
+ {
3287
+ title: "Chain of command \u2014 who talks to whom",
3288
+ domain: "workflow",
3289
+ priority: "p0",
3290
+ content: "Founder -> coordinator (the executive agent, internally routed as 'COO') -> CTO/CMO. CTO -> engineers. CMO -> content production. Never skip levels: the coordinator does not bypass managers for specialist work. Specialists report to their manager. If you need cross-team info, use ask_team_memory \u2014 don't read other agents' task folders. Each level owns dispatch downward and review upward."
3291
+ },
3292
+ {
3293
+ title: "Customer orchestration maturity \u2014 recommend, never trap",
3294
+ domain: "workflow",
3295
+ priority: "p1",
3296
+ content: "New customers start best in Phase 1: founder \u2194 coordinator/Chief of Staff, building company context. Suggest Phase 2 executives when domain work repeats; suggest Phase 3 parallel execution only when review/permission gates are ready. This is guidance, not a blocker: users may jump phases anytime. Never overwrite their phase, role titles, identities, or custom org design."
3297
+ },
3298
+ {
3299
+ title: "Single dispatch path \u2014 create_task only",
3300
+ domain: "workflow",
3301
+ priority: "p0",
3302
+ content: "create_task is the ONLY way to dispatch work to another agent. No direct ensureEmployee calls, no manual tmux spawns, no send_message for actionable work. create_task \u2192 system auto-spawns \u2192 session correctly named. ONE PATH. No backdoors. No exceptions."
3303
+ },
3304
+ // --- Session isolation ---
3305
+ {
3306
+ title: "Session scoping \u2014 stay in your coordinator boundary",
3307
+ domain: "security",
3308
+ priority: "p0",
3309
+ content: "Session scoping is mandatory. Managers dispatch to workers within their own coordinator session ONLY. Employee sessions use {employee}-{coordinatorSession}. Cross-session dispatch is blocked by the system. Verify session names before dispatch. Tasks are scoped to the creating coordinator session."
3310
+ },
3311
+ {
3312
+ title: "Session isolation \u2014 never touch another session's work",
3313
+ domain: "workflow",
3314
+ priority: "p0",
3315
+ content: "Sessions are isolated. A coordinator session owns ONLY tasks it dispatched. (1) Never close/update/cancel tasks from another coordinator session. (2) Never review work from a different session \u2014 report that it belongs to another session and skip. (3) Ignore other sessions' items in list_tasks results. (4) Employees inherit session: employee sessions work ONLY on their parent coordinator session's tasks. Cross-session work is a system violation."
3316
+ },
3317
+ // --- Engineering: session scoping in code ---
3318
+ {
3319
+ title: "Three-dimensional scoping \u2014 session, project, role \u2014 enforced in every query",
3320
+ domain: "architecture",
3321
+ priority: "p0",
3322
+ content: "Every DB query, notification, review count, and task operation MUST be scoped on 3 dimensions: (1) Session \u2014 filter by session_scope matching the current coordinator session. (2) Project \u2014 filter by project_name. (3) Role \u2014 agents only see data at their hierarchy level. When writing ANY function that touches tasks, reviews, messages, or notifications: always accept a sessionScope parameter and pass it to the SQL WHERE clause. Unscoped queries are bugs. Test by running 2+ coordinator sessions simultaneously."
3323
+ },
3324
+ // --- Hard constraints ---
3325
+ {
3326
+ title: "What you CANNOT do in exe-os \u2014 hard constraints",
3327
+ domain: "security",
3328
+ priority: "p0",
3329
+ content: "NEVER: (1) Access the database directly \u2014 it's SQLCipher encrypted, always fails. Use MCP tools only. (2) Manually spawn tmux sessions \u2014 create_task handles it. (3) Run git checkout main \u2014 agents work in worktrees. (4) Modify another agent's in-progress task. (5) Push to remote \u2014 the COO reviews and pushes. (6) Skip update_task(done) \u2014 it's the ONLY way your work gets reviewed. (7) Run git init."
3330
+ },
3331
+ {
3332
+ title: "Customer patch triage \u2014 upstream bug vs customization",
3333
+ domain: "support",
3334
+ priority: "p0",
3335
+ content: "When an agent encounters a suspected Exe OS bug, update breakage, MCP/tool failure, installer issue, memory/orchestration defect, or customer-local patch need, it MUST use create_bug_report. Do this before or alongside any local workaround so the report reaches AskExe support directly via the customer's license. Classify first: upstream_bug = reproducible exe-os/platform defect; customer_customization = identity, behavior, procedure, config, branding, workflow preference that belongs in customer-owned layers; emergency_hotfix = temporary local patch. For upstream bugs/emergency hotfixes include version, repro steps, expected/actual, files changed, workaround, and local diff summary. Avoid permanent platform-code patches unless founder approves; if a hotfix is unavoidable, document it in the bug report and re-check after npm update."
3336
+ },
3337
+ // --- Operations ---
3338
+ {
3339
+ title: "Managers must supervise deployed workers",
3340
+ domain: "workflow",
3341
+ priority: "p0",
3342
+ content: `Every manager (COO/CTO/CMO) who dispatches work to a worker MUST actively monitor them. Check tmux capture-pane every 10 minutes. Verify they're working, not stuck. If idle at prompt with in_progress task \u2192 send intercom. If stuck \u2192 unblock or escalate. "Standing by" without checking is negligence.`
3343
+ },
3344
+ {
3345
+ title: "COO boot health check \u2014 memory, cloud sync, daemon on every launch",
3346
+ domain: "workflow",
3347
+ priority: "p0",
3348
+ content: "On every /exe boot, COO MUST check system health BEFORE other work: (1) daemon \u2014 is exed PID alive, (2) cloud sync \u2014 grep workers.log for recent cloud-sync errors, (3) memory count \u2014 total in DB, (4) sync delta \u2014 local vs cloud storage_bytes. Report as 4-line status table. If ANY check fails, surface to founder immediately. Do not proceed to tasks until health confirmed."
3349
+ },
3350
+ {
3351
+ title: "exe-build-adv mandatory for 3+ files",
3352
+ domain: "workflow",
3353
+ priority: "p0",
3354
+ content: "exe-build-adv is MANDATORY for ALL work touching 3+ files. Run /exe-build-adv --auto BEFORE implementation. Pipeline: Spec \u2192 AC \u2192 Tests \u2192 Evaluate \u2192 Fix. No multi-file feature ships without pipeline artifacts. No exceptions \u2014 managers reject work without them."
3355
+ },
3356
+ {
3357
+ title: "Commit discipline \u2014 never leave verified work floating",
3358
+ domain: "workflow",
3359
+ priority: "p1",
3360
+ content: "After any code-change batch passes typecheck/tests/build, run git status, summarize changed files, and commit with a clear message before ending the session. If work must remain uncommitted for review/dogfood, explicitly say so, list the files, and state the blocker. Never imply work is complete while verified changes are still floating locally."
3361
+ },
3362
+ {
3363
+ title: "Desktop and TUI are the same product",
3364
+ domain: "architecture",
3365
+ priority: "p0",
3366
+ content: "Desktop and TUI are the SAME product in different renderers. Same data contracts, same interactions, same acceptance criteria. Desktop tab specs in ARCHITECTURE.md ARE the TUI specs. When building TUI, cross-reference Desktop spec. Different tab names, identical behavior. Never treat them as separate products."
3367
+ },
3368
+ // --- Orchestration golden path ---
3369
+ {
3370
+ title: "Task lifecycle \u2014 the golden path every agent follows",
3371
+ domain: "workflow",
3372
+ priority: "p0",
3373
+ content: "create_task is dispatch + delivery. Task lifecycle: open \u2192 in_progress (you start) \u2192 done (update_task when finished) \u2192 needs_review (reviewer nudged) \u2192 closed (COO only via close_task). DB is the reliable delivery \u2014 intercom is just a speedup nudge. If you finish a task, self-chain: check for next task immediately (step 7). Never wait for a nudge. Never say 'standing by.'"
3374
+ },
3375
+ {
3376
+ title: "Intercom is a speedup, not delivery \u2014 DB is the source of truth",
3377
+ domain: "architecture",
3378
+ priority: "p0",
3379
+ content: "Tasks live in the DB. Intercom (tmux send-keys) is fire-and-forget \u2014 it may fail, get garbled, or arrive mid-work. Never rely on intercom for task delivery. The UserPromptSubmit hook checks the DB for new tasks on every prompt. Your operating procedures step 7 says check for next work. The daemon nudges idle agents as a speedup. If you have no tasks, you found them all."
3380
+ },
3381
+ // --- MCP is the ONLY data interface ---
3382
+ {
3383
+ title: "MCP disconnect \u2014 ask the user, never work around it",
3384
+ domain: "workflow",
3385
+ priority: "p0",
3386
+ content: "If MCP tools are unavailable, disconnected, or returning connection errors: STOP. Tell the user clearly: 'MCP server is disconnected. Please run /mcp to reconnect.' Do NOT attempt workarounds \u2014 no raw Node imports, no direct DB access, no CLI hacks, no daemon socket calls. MCP is the ONLY data interface. Working around it wastes time, hits bundling issues, and bypasses the contract boundary. Ask once, wait, proceed when reconnected."
3387
+ },
3388
+ // --- MCP Tool Catalog (Layer 0 — every agent knows what tools exist) ---
3389
+ {
3390
+ title: "MCP tools \u2014 memory and search",
3391
+ domain: "tool-use",
3392
+ priority: "p1",
3393
+ content: "recall_my_memory: search your own memories (semantic + FTS). ask_team_memory: search a colleague's memories by agent name. store_memory: persist a memory (decisions, summaries, context). commit_memory: high-importance memory that survives consolidation. search_everything: unified search across memories, tasks, entities, conversations. get_session_context: temporal window of memories around a timestamp. consolidate_memories: merge duplicate/related memories into insights. get_memory_cardinality: count memories per agent (health check)."
3394
+ },
3395
+ {
3396
+ title: "MCP tools \u2014 task orchestration",
3397
+ domain: "tool-use",
3398
+ priority: "p1",
3399
+ content: "create_task: dispatch work to an employee (auto-spawns session). The ONLY dispatch path. list_tasks: query tasks by status, assignee, project. get_task: fetch full task details by ID. update_task: change status (in_progress, done, blocked, cancelled) + add result summary. close_task: finalize a reviewed task (COO only). checkpoint_task: save progress state for crash recovery. resume_employee: re-spawn an employee session for an existing task."
3400
+ },
3401
+ {
3402
+ title: "MCP tools \u2014 knowledge graph (GraphRAG)",
3403
+ domain: "tool-use",
3404
+ priority: "p1",
3405
+ content: "query_relationships: find connections between entities in the knowledge graph. get_entity_neighbors: explore an entity's direct connections. get_hot_entities: find most-referenced entities (trending topics). get_graph_stats: graph health \u2014 entity/relationship counts, density. export_graph: export graph data for visualization. merge_entities: deduplicate entities (alias resolution). find_similar_trajectories: match tool-call patterns to past task solutions."
3406
+ },
3407
+ {
3408
+ title: "MCP tools \u2014 identity, behavior, and decisions",
3409
+ domain: "tool-use",
3410
+ priority: "p1",
3411
+ content: "get_identity: read an agent's exe.md (Layer 1 identity). update_identity: write an agent's exe.md. Identity > behavior \u2014 use for permanent rules. store_behavior: record a correction or pattern for an agent (Layer 2 expertise). list_behaviors: view an agent's active behaviors. deactivate_behavior: soft-delete a stale or conflicting behavior. store_decision: record an ADR (architectural decision record). get_decision: retrieve a past decision by query. create_bug_report: customer-facing bug/support intake; use whenever an Exe OS bug or emergency hotfix is encountered so the report reaches AskExe directly. Customers only get report access; internal list/get/triage support tools are AskExe-only."
3412
+ },
3413
+ {
3414
+ title: "MCP tools \u2014 communication and messaging",
3415
+ domain: "tool-use",
3416
+ priority: "p1",
3417
+ content: "send_message: send supplementary context to another agent (NOT for actionable work \u2014 use create_task). acknowledge_messages: mark messages as read. send_whatsapp: send WhatsApp message via gateway (customer-facing alerts). query_conversations: search ingested conversations across all channels (WhatsApp, email, etc.)."
3418
+ },
3419
+ {
3420
+ title: "MCP tools \u2014 wiki, documents, and content",
3421
+ domain: "tool-use",
3422
+ priority: "p1",
3423
+ content: "wiki: read/list wiki pages only. Direct wiki write tools are removed; wiki updates flow through raw-data ingestion/projection into the curated wiki store. Legacy aliases: list_wiki_pages/get_wiki_page. crm: read/list/get CRM records from exe-db. raw_data: read capped raw landing-pad events from exe-db with payload opt-in. ingest_document: import a file (PDF, MD, etc.) into memory as chunks. list_documents: browse ingested documents by workspace. purge_document: remove a document and its memory chunks. set_document_importance: adjust chunk importance scores. rerank_documents: re-score document relevance for a query."
3424
+ },
3425
+ {
3426
+ title: "MCP tools \u2014 system, operations, and admin",
3427
+ domain: "tool-use",
3428
+ priority: "p1",
3429
+ content: "get_agent_spend: token usage per agent/session (cost tracking). list_agent_sessions: view agent session history. get_session_kills: audit log of killed sessions. get_daemon_health: check exed daemon status. get_auto_wake_status: daemon auto-wake configuration. get_worker_gate: check worker deployment gates. run_memory_audit: health check \u2014 duplicates, null vectors, orphaned rows. run_consolidation: trigger sleep-time memory consolidation. cloud_sync: force a cloud sync cycle. backup_vps: trigger VPS backup."
3430
+ },
3431
+ {
3432
+ title: "MCP tools \u2014 config, licensing, and team",
3433
+ domain: "tool-use",
3434
+ priority: "p1",
3435
+ content: "set_agent_config: view/change per-agent runtime and model settings. list_employees: view the employee roster. add_person: add a person to the CRM contacts roster. list_people: browse CRM contacts. get_person: fetch contact details. get_license_status: check license validity. create_license: generate a new license key (admin). list_licenses: view all issued licenses. activate_license: activate a license on a device."
3436
+ },
3437
+ {
3438
+ title: "MCP tools \u2014 advanced (triggers, skills, orchestration)",
3439
+ domain: "tool-use",
3440
+ priority: "p1",
3441
+ content: "create_trigger: set up a scheduled recurring agent job (cron). list_triggers: view active triggers. load_skill: load a slash-command skill dynamically. apply_starter_pack: import a pre-built behavior + identity pack for a role. export_orchestration: export full org state (tasks, behaviors, identities) as portable JSON. import_orchestration: import org state into a new instance. deploy_client: deploy a customer client instance. query_company_brain: unified RAG query across all company knowledge. create_reminder: set a text reminder (shown in boot brief). list_reminders: view pending reminders. complete_reminder: mark a reminder done. company_procedure: manage customer-owned company procedures (Layer 0; actions: store, list, deactivate). Legacy aliases: global_procedure, store_global_procedure, list_global_procedures, deactivate_global_procedure."
3442
+ }
3443
+ ];
3444
+ PLATFORM_PROCEDURE_TITLES = new Set(
3445
+ PLATFORM_PROCEDURES.map((p) => p.title)
3446
+ );
3447
+ }
3448
+ });
3449
+
3450
+ // src/lib/global-procedures.ts
3451
+ var global_procedures_exports = {};
3452
+ __export(global_procedures_exports, {
3453
+ deactivateGlobalProcedure: () => deactivateGlobalProcedure,
3454
+ getGlobalProceduresBlock: () => getGlobalProceduresBlock,
3455
+ loadGlobalProcedures: () => loadGlobalProcedures,
3456
+ storeGlobalProcedure: () => storeGlobalProcedure
3457
+ });
3458
+ import { randomUUID as randomUUID2 } from "crypto";
3459
+ async function loadGlobalProcedures() {
3460
+ const client = getClient();
3461
+ const result = await client.execute({
3462
+ sql: "SELECT * FROM company_procedures WHERE active = 1 ORDER BY priority ASC, created_at ASC",
3463
+ args: []
3464
+ });
3465
+ const allRows = result.rows;
3466
+ const customerOnly = allRows.filter((p) => !PLATFORM_PROCEDURE_TITLES.has(p.title));
3467
+ if (customerOnly.length > 0) {
3468
+ _customerCache = customerOnly.map((p) => `### ${p.title}
3469
+ ${p.content}`).join("\n\n");
3470
+ } else {
3471
+ _customerCache = "";
3472
+ }
3473
+ _cacheLoaded = true;
3474
+ return customerOnly;
3475
+ }
3476
+ function getGlobalProceduresBlock() {
3477
+ const sections = [];
3478
+ if (_platformCache) sections.push(_platformCache);
3479
+ if (_cacheLoaded && _customerCache) sections.push(_customerCache);
3480
+ if (sections.length === 0) return "";
3481
+ return `## Organization-Wide Procedures (MANDATORY \u2014 supersedes all other rules)
3482
+
3483
+ ${sections.join("\n\n")}
3484
+ `;
3485
+ }
3486
+ async function storeGlobalProcedure(input) {
3487
+ const id = randomUUID2();
3488
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3489
+ const client = getClient();
3490
+ await client.execute({
3491
+ sql: `INSERT INTO company_procedures (id, title, content, priority, domain, active, created_at, updated_at)
3492
+ VALUES (?, ?, ?, ?, ?, 1, ?, ?)`,
3493
+ args: [id, input.title, input.content, input.priority ?? "p0", input.domain ?? null, now, now]
3494
+ });
3495
+ await loadGlobalProcedures();
3496
+ return id;
3497
+ }
3498
+ async function deactivateGlobalProcedure(id) {
3499
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3500
+ const client = getClient();
3501
+ const result = await client.execute({
3502
+ sql: "UPDATE company_procedures SET active = 0, updated_at = ? WHERE id = ?",
3503
+ args: [now, id]
3504
+ });
3505
+ await loadGlobalProcedures();
3506
+ return result.rowsAffected > 0;
3507
+ }
3508
+ var _customerCache, _cacheLoaded, _platformCache;
3509
+ var init_global_procedures = __esm({
3510
+ "src/lib/global-procedures.ts"() {
3511
+ "use strict";
3512
+ init_database();
3513
+ init_platform_procedures();
3514
+ _customerCache = "";
3515
+ _cacheLoaded = false;
3516
+ _platformCache = PLATFORM_PROCEDURES.map((p) => `### ${p.title}
3517
+ ${p.content}`).join("\n\n");
3518
+ }
3519
+ });
3520
+
3521
+ // src/lib/memory-cards.ts
3522
+ var memory_cards_exports = {};
3523
+ __export(memory_cards_exports, {
3524
+ extractMemoryCards: () => extractMemoryCards,
3525
+ insertMemoryCardsForBatch: () => insertMemoryCardsForBatch,
3526
+ searchMemoryCards: () => searchMemoryCards
3527
+ });
3528
+ import { createHash as createHash2 } from "crypto";
3529
+ function stableId(memoryId, type, content) {
3530
+ return createHash2("sha256").update(`${memoryId}:${type}:${content}`).digest("hex").slice(0, 32);
3531
+ }
3532
+ function cleanText(text) {
3533
+ return text.replace(/```[\s\S]*?```/g, " ").replace(/<[^>]+>/g, " ").replace(/\s+/g, " ").trim();
3534
+ }
3535
+ function splitSentences(text) {
3536
+ return cleanText(text).split(/(?<=[.!?])\s+|\n+/).map((s) => s.trim()).filter((s) => s.length >= 24 && s.length <= MAX_SENTENCE_CHARS);
3537
+ }
3538
+ function inferCardType(sentence, toolName) {
3539
+ const lower = sentence.toLowerCase();
3540
+ if (toolName === "store_decision" || /\b(decided|decision|adr|approved|rejected)\b/.test(lower)) return "decision";
3541
+ if (/\b(prefers|preference|likes|dislikes|wants|doesn't want|does not want)\b/.test(lower)) return "preference";
3542
+ if (/\b(changed|updated|replaced|now|no longer|instead|supersedes)\b/.test(lower)) return "belief_update";
3543
+ if (toolName && ["Read", "Write", "Edit", "Bash"].includes(toolName)) return "code";
3544
+ if (/\b(meeting|deadline|shipped|launched|completed|failed|blocked|assigned|created)\b/.test(lower)) return "event";
3545
+ return "fact";
3546
+ }
3547
+ function extractSubject(sentence, agentId) {
3548
+ const explicit = sentence.match(/\b([A-Z][a-zA-Z0-9_-]{2,}(?:\s+[A-Z][a-zA-Z0-9_-]{2,})?)\b/);
3549
+ return explicit?.[1] ?? agentId;
3550
+ }
3551
+ function predicateFor(type) {
3552
+ switch (type) {
3553
+ case "preference":
3554
+ return "prefers";
3555
+ case "belief_update":
3556
+ return "updated";
3557
+ case "decision":
3558
+ return "decided";
3559
+ case "event":
3560
+ return "happened";
3561
+ case "code":
3562
+ return "implemented";
3563
+ default:
3564
+ return "states";
3565
+ }
3566
+ }
3567
+ function extractMemoryCards(row) {
3568
+ const sentences = splitSentences(row.raw_text);
3569
+ const cards = [];
3570
+ for (const sentence of sentences) {
3571
+ const type = inferCardType(sentence, row.tool_name);
3572
+ const subject = extractSubject(sentence, row.agent_id);
3573
+ const content = sentence.length > MAX_SENTENCE_CHARS ? `${sentence.slice(0, MAX_SENTENCE_CHARS - 1)}\u2026` : sentence;
3574
+ cards.push({
3575
+ id: stableId(row.id, type, content),
3576
+ memory_id: row.id,
3577
+ agent_id: row.agent_id,
3578
+ session_id: row.session_id,
3579
+ project_name: row.project_name ?? null,
3580
+ timestamp: row.timestamp,
3581
+ card_type: type,
3582
+ subject,
3583
+ predicate: predicateFor(type),
3584
+ object: content,
3585
+ content,
3586
+ source_ref: row.id,
3587
+ confidence: type === "fact" ? 0.55 : 0.65
3588
+ });
3589
+ if (cards.length >= MAX_CARDS_PER_MEMORY) break;
3590
+ }
3591
+ return cards;
3592
+ }
3593
+ async function insertMemoryCardsForBatch(rows) {
3594
+ const cards = rows.flatMap(extractMemoryCards);
3595
+ if (cards.length === 0) return 0;
3596
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3597
+ const client = getClient();
3598
+ const stmts = cards.map((card) => ({
3599
+ sql: `INSERT OR IGNORE INTO memory_cards
3600
+ (id, memory_id, agent_id, session_id, project_name, timestamp, card_type,
3601
+ subject, predicate, object, content, source_ref, confidence, active, created_at)
3602
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1, ?)`,
3603
+ args: [
3604
+ card.id,
3605
+ card.memory_id,
3606
+ card.agent_id,
3607
+ card.session_id,
3608
+ card.project_name,
3609
+ card.timestamp,
3610
+ card.card_type,
3611
+ card.subject,
3612
+ card.predicate,
3613
+ card.object,
3614
+ card.content,
3615
+ card.source_ref,
3616
+ card.confidence,
3617
+ now
3618
+ ]
3619
+ }));
3620
+ await client.batch(stmts, "write");
3621
+ return cards.length;
3622
+ }
3623
+ function buildMatchExpr(queryText) {
3624
+ const terms = queryText.toLowerCase().split(/\s+/).filter((t) => t.length >= 3).map((t) => t.replace(/[^a-z0-9_]/g, "")).filter((t) => t.length >= 3).slice(0, 12);
3625
+ if (terms.length === 0) return null;
3626
+ return terms.map((t) => `${t}*`).join(terms.length >= 3 ? " AND " : " OR ");
3627
+ }
3628
+ async function searchMemoryCards(queryText, agentId, options) {
3629
+ const limit = options?.limit ?? 10;
3630
+ const matchExpr = buildMatchExpr(queryText);
3631
+ if (!matchExpr) return [];
3632
+ let sql = `SELECT c.id, c.memory_id, c.agent_id, c.session_id, c.project_name,
3633
+ c.timestamp, c.card_type, c.content, c.source_ref, c.confidence
3634
+ FROM memory_cards c
3635
+ JOIN memory_cards_fts fts ON c.rowid = fts.rowid
3636
+ WHERE memory_cards_fts MATCH ?
3637
+ AND c.agent_id = ?
3638
+ AND COALESCE(c.active, 1) = 1`;
3639
+ const args = [matchExpr, agentId];
3640
+ if (options?.projectName) {
3641
+ sql += ` AND c.project_name = ?`;
3642
+ args.push(options.projectName);
3643
+ }
3644
+ if (options?.since) {
3645
+ sql += ` AND c.timestamp >= ?`;
3646
+ args.push(options.since);
3647
+ }
3648
+ sql += ` ORDER BY rank LIMIT ?`;
3649
+ args.push(limit);
3650
+ const result = await getClient().execute({ sql, args });
3651
+ return result.rows.map((row) => ({
3652
+ id: `card:${String(row.id)}`,
3653
+ agent_id: String(row.agent_id),
3654
+ agent_role: "memory_card",
3655
+ session_id: String(row.session_id),
3656
+ timestamp: String(row.timestamp),
3657
+ tool_name: `memory_card:${String(row.card_type)}`,
3658
+ project_name: row.project_name == null ? "" : String(row.project_name),
3659
+ has_error: false,
3660
+ raw_text: `[${String(row.card_type)}] ${String(row.content)}
3661
+ Source memory: ${String(row.source_ref ?? row.memory_id)}`,
3662
+ vector: [],
3663
+ importance: 6,
3664
+ status: "active",
3665
+ confidence: Number(row.confidence ?? 0.6),
3666
+ last_accessed: String(row.timestamp)
3667
+ }));
3668
+ }
3669
+ var MAX_CARDS_PER_MEMORY, MAX_SENTENCE_CHARS;
3670
+ var init_memory_cards = __esm({
3671
+ "src/lib/memory-cards.ts"() {
3672
+ "use strict";
3673
+ init_database();
3674
+ MAX_CARDS_PER_MEMORY = 6;
3675
+ MAX_SENTENCE_CHARS = 360;
3676
+ }
3677
+ });
3678
+
3679
+ // src/lib/agentic-ontology.ts
3680
+ var agentic_ontology_exports = {};
3681
+ __export(agentic_ontology_exports, {
3682
+ clean: () => clean,
3683
+ extractGoalCandidates: () => extractGoalCandidates,
3684
+ inferIntention: () => inferIntention,
3685
+ inferOntologyEventType: () => inferOntologyEventType,
3686
+ inferOutcome: () => inferOutcome,
3687
+ inferSemanticLabel: () => inferSemanticLabel,
3688
+ insertOntologyForBatch: () => insertOntologyForBatch,
3689
+ insertOntologyForMemory: () => insertOntologyForMemory,
3690
+ ontologyPayload: () => ontologyPayload,
3691
+ stableId: () => stableId2
3692
+ });
3693
+ import { createHash as createHash3 } from "crypto";
3694
+ function stableId2(...parts) {
3695
+ return createHash3("sha256").update(parts.map((p) => String(p ?? "")).join("::")).digest("hex").slice(0, 32);
3696
+ }
3697
+ function clean(text, max = 240) {
3698
+ return text.replace(/\u0000/g, "").replace(/```[\s\S]*?```/g, " ").replace(/\s+/g, " ").trim().slice(0, max);
3699
+ }
3700
+ function inferOntologyEventType(row) {
3701
+ const lower = row.raw_text.toLowerCase();
3702
+ if (row.has_error) return "error";
3703
+ if (/\b(done|complete|completed|fixed|resolved|shipped|deployed|pushed|published)\b/.test(lower)) return "milestone";
3704
+ if (/\b(blocked|failed|error|bug|regression|broken)\b/.test(lower)) return "problem";
3705
+ if (/\b(decided|decision|adr|we chose|approved|rejected)\b/.test(lower)) return "decision";
3706
+ if (/\b(goal|need to|we need|want to|trying to|objective)\b/.test(lower)) return "goal_signal";
3707
+ if (["Bash", "Read", "Edit", "Write", "Grep", "Glob"].includes(row.tool_name)) return "tool_action";
3708
+ if (row.tool_name.startsWith("memory_card")) return "memory_card";
3709
+ return "memory_observation";
3710
+ }
3711
+ function inferIntention(row) {
3712
+ if (row.intent) return clean(row.intent, 220);
3713
+ const text = clean(row.raw_text, 1e3);
3714
+ const patterns = [
3715
+ /(?:we need to|need to|let'?s|i want to|we should|goal is to|objective is to|trying to)\s+([^.!?\n]{8,220})/i,
3716
+ /(?:so that|in order to)\s+([^.!?\n]{8,220})/i,
3717
+ /(?:task|plan):\s*([^.!?\n]{8,220})/i
3718
+ ];
3719
+ for (const p of patterns) {
3720
+ const m = text.match(p);
3721
+ if (m?.[1]) return clean(m[1], 220);
3722
+ }
3723
+ if (["Bash", "Read", "Edit", "Write", "Grep", "Glob"].includes(row.tool_name)) {
3724
+ return `${row.tool_name} during ${row.project_name}`;
3725
+ }
3726
+ return null;
3727
+ }
3728
+ function inferOutcome(row) {
3729
+ if (row.outcome) return clean(row.outcome, 220);
3730
+ if (row.has_error) return "error";
3731
+ const lower = row.raw_text.toLowerCase();
3732
+ if (/\b(done|complete|completed|fixed|resolved|shipped|deployed|pushed|published|passed)\b/.test(lower)) return "success_signal";
3733
+ if (/\b(blocked|failed|error|regression|broken|not working|could not)\b/.test(lower)) return "failure_signal";
3734
+ if (/\b(warning|risk|concern|caveat)\b/.test(lower)) return "risk_signal";
3735
+ return null;
3736
+ }
3737
+ function extractGoalCandidates(row) {
3738
+ const text = clean(row.raw_text, 1600);
3739
+ const patterns = [
3740
+ /(?:we need to|need to|i want to|we should|goal is to|objective is to|trying to|let'?s)\s+([^.!?\n]{12,220})/gi,
3741
+ /(?:success means|success criteria|so that)\s+([^.!?\n]{12,220})/gi
3742
+ ];
3743
+ const out = [];
3744
+ for (const pattern of patterns) {
3745
+ for (const m of text.matchAll(pattern)) {
3746
+ const candidate = clean(m[1] ?? "", 220);
3747
+ if (candidate.length >= 12 && !out.some((x) => x.toLowerCase() === candidate.toLowerCase())) out.push(candidate);
3748
+ if (out.length >= 3) return out;
3749
+ }
3750
+ }
3751
+ return out;
3752
+ }
3753
+ function uniq(values, max = 6) {
3754
+ const out = [];
3755
+ for (const value of values.map((v) => clean(v, 220)).filter(Boolean)) {
3756
+ if (!out.some((x) => x.toLowerCase() === value.toLowerCase())) out.push(value);
3757
+ if (out.length >= max) break;
3758
+ }
3759
+ return out;
3760
+ }
3761
+ function extractMatches(text, patterns, max = 5) {
3762
+ const out = [];
3763
+ for (const pattern of patterns) {
3764
+ for (const match of text.matchAll(pattern)) {
3765
+ const value = match[1] ?? match[0];
3766
+ if (value) out.push(value);
3767
+ if (out.length >= max) return uniq(out, max);
3768
+ }
3769
+ }
3770
+ return uniq(out, max);
3771
+ }
3772
+ function inferSemanticLabel(row) {
3773
+ const text = clean(row.raw_text, 2400);
3774
+ const eventType = inferOntologyEventType(row);
3775
+ const intention = inferIntention(row);
3776
+ const outcome = inferOutcome(row);
3777
+ const goals = extractGoalCandidates(row);
3778
+ const milestones = extractMatches(text, [
3779
+ /\b(?:completed|finished|fixed|resolved|shipped|deployed|published|pushed|passed)\b([^.!?\n]{0,180})/gi,
3780
+ /(?:milestone|done):\s*([^.!?\n]{8,220})/gi
3781
+ ]);
3782
+ const problems = extractMatches(text, [
3783
+ /\b(?:blocked by|failed because|bug|regression|broken|not working|error)\b([^.!?\n]{0,180})/gi,
3784
+ /(?:problem|issue|risk):\s*([^.!?\n]{8,220})/gi
3785
+ ]);
3786
+ const decisions = extractMatches(text, [
3787
+ /(?:decided|decision|adr|we chose|approved|rejected)\s+([^.!?\n]{8,220})/gi
3788
+ ]);
3789
+ const temporalAnchors = extractMatches(text, [
3790
+ /\b(\d{4}-\d{2}-\d{2}(?:[T ][0-9:.+-Z]+)?)\b/g,
3791
+ /\b(today|yesterday|tomorrow|this week|next week|last week|morning|afternoon|tonight)\b/gi
3792
+ ], 8);
3793
+ const nextActions = extractMatches(text, [
3794
+ /(?:next|todo|follow[- ]?up|remaining|need to)\s*:?\s*([^.!?\n]{8,220})/gi
3795
+ ]);
3796
+ const actors = uniq([
3797
+ row.agent_id,
3798
+ ...extractMatches(text, [/\b(?:agent|employee|owner|assignee)[:= ]+([a-zA-Z][a-zA-Z0-9_-]{1,40})/gi], 5)
3799
+ ], 6);
3800
+ const successSignals = milestones.length ? milestones : outcome === "success_signal" ? [clean(text, 180)] : [];
3801
+ const failureSignals = problems.length ? problems : outcome === "failure_signal" || row.has_error ? [clean(text, 180)] : [];
3802
+ const impact = successSignals.length && failureSignals.length ? "mixed" : failureSignals.length ? "negative" : successSignals.length ? "positive" : "neutral";
3803
+ const signalCount = goals.length + milestones.length + problems.length + decisions.length + nextActions.length;
3804
+ return {
3805
+ labeler: "deterministic",
3806
+ schemaVersion: 1,
3807
+ eventType,
3808
+ intention,
3809
+ outcome,
3810
+ impact,
3811
+ confidence: Math.min(0.95, 0.45 + signalCount * 0.08 + (intention ? 0.1 : 0) + (outcome ? 0.1 : 0)),
3812
+ goals,
3813
+ milestones,
3814
+ problems,
3815
+ decisions,
3816
+ actors,
3817
+ temporalAnchors,
3818
+ successSignals,
3819
+ failureSignals,
3820
+ nextActions,
3821
+ summary: clean(text, 280)
3822
+ };
3823
+ }
3824
+ function ontologyPayload(row) {
3825
+ const semantic = inferSemanticLabel(row);
3826
+ return {
3827
+ tool_name: row.tool_name,
3828
+ memory_version: row.version ?? null,
3829
+ domain: row.domain ?? null,
3830
+ trajectory: row.trajectory ? safeJson(row.trajectory) : null,
3831
+ semantic
3832
+ };
3833
+ }
3834
+ function safeJson(value) {
3835
+ try {
3836
+ return JSON.parse(value);
3837
+ } catch {
3838
+ return value.slice(0, 1e3);
3839
+ }
3840
+ }
3841
+ async function resolveClient(client) {
3842
+ if (client) return client;
3843
+ const { getClient: getClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
3844
+ return getClient2();
3845
+ }
3846
+ async function insertOntologyForMemory(row, client) {
3847
+ const db = await resolveClient(client);
3848
+ const occurredAt = row.timestamp;
3849
+ const sequence = Number(row.version ?? 0) || Math.floor(new Date(occurredAt).getTime() / 1e3);
3850
+ const eventType = inferOntologyEventType(row);
3851
+ const intention = inferIntention(row);
3852
+ const outcome = inferOutcome(row);
3853
+ const eventId = stableId2("event", row.id);
3854
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3855
+ await db.execute({
3856
+ sql: `INSERT INTO agent_sessions (id, agent_id, project_name, started_at, last_event_at, event_count, properties)
3857
+ VALUES (?, ?, ?, ?, ?, 1, ?)
3858
+ ON CONFLICT(id) DO UPDATE SET last_event_at = MAX(last_event_at, excluded.last_event_at),
3859
+ event_count = event_count + 1`,
3860
+ args: [row.session_id, row.agent_id, row.project_name, occurredAt, occurredAt, JSON.stringify({ agent_role: row.agent_role })]
3861
+ });
3862
+ await db.execute({
3863
+ sql: `INSERT OR IGNORE INTO agent_events
3864
+ (id, event_type, occurred_at, sequence_index, actor_agent_id, agent_role, project_name,
3865
+ session_id, task_id, goal_id, parent_event_id, intention, outcome, evidence_memory_id,
3866
+ impact, payload, created_at)
3867
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, NULL, NULL, ?, ?, ?, ?, ?, ?)`,
3868
+ args: [
3869
+ eventId,
3870
+ eventType,
3871
+ occurredAt,
3872
+ sequence,
3873
+ row.agent_id,
3874
+ row.agent_role,
3875
+ row.project_name,
3876
+ row.session_id,
3877
+ row.task_id ?? null,
3878
+ intention,
3879
+ outcome,
3880
+ row.id,
3881
+ row.has_error ? "negative" : outcome === "success_signal" ? "positive" : "neutral",
3882
+ JSON.stringify(ontologyPayload(row)),
3883
+ now
3884
+ ]
3885
+ });
3886
+ const semantic = inferSemanticLabel(row);
3887
+ await db.execute({
3888
+ sql: `INSERT INTO agent_semantic_labels
3889
+ (id, source_memory_id, event_id, labeler, schema_version, confidence, labels, created_at, updated_at)
3890
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
3891
+ ON CONFLICT(id) DO UPDATE SET confidence = excluded.confidence,
3892
+ labels = excluded.labels, updated_at = excluded.updated_at`,
3893
+ args: [
3894
+ stableId2("semantic", row.id, semantic.labeler, semantic.schemaVersion),
3895
+ row.id,
3896
+ eventId,
3897
+ semantic.labeler,
3898
+ semantic.schemaVersion,
3899
+ semantic.confidence,
3900
+ JSON.stringify(semantic),
3901
+ now,
3902
+ now
3903
+ ]
3904
+ });
3905
+ for (const statement of extractGoalCandidates(row)) {
3906
+ const goalId = stableId2("goal", row.project_name, statement.toLowerCase());
3907
+ await db.execute({
3908
+ sql: `INSERT INTO agent_goals
3909
+ (id, statement, owner_agent_id, project_name, status, priority, success_criteria,
3910
+ parent_goal_id, due_at, achieved_at, supersedes_id, created_at, updated_at, source_memory_id)
3911
+ VALUES (?, ?, ?, ?, 'open', 5, NULL, NULL, NULL, NULL, NULL, ?, ?, ?)
3912
+ ON CONFLICT(id) DO UPDATE SET updated_at = excluded.updated_at`,
3913
+ args: [goalId, statement, row.agent_id, row.project_name, now, now, row.id]
3914
+ });
3915
+ await db.execute({
3916
+ sql: `INSERT OR IGNORE INTO agent_goal_links
3917
+ (id, goal_id, link_type, target_id, target_type, created_at)
3918
+ VALUES (?, ?, 'evidence', ?, 'memory', ?)`,
3919
+ args: [stableId2("goal_link", goalId, row.id, "memory"), goalId, row.id, now]
3920
+ });
3921
+ await db.execute({
3922
+ sql: `INSERT OR IGNORE INTO agent_goal_links
3923
+ (id, goal_id, link_type, target_id, target_type, created_at)
3924
+ VALUES (?, ?, 'event', ?, 'event', ?)`,
3925
+ args: [stableId2("goal_link", goalId, eventId, "event"), goalId, eventId, now]
3926
+ });
3927
+ }
3928
+ }
3929
+ async function insertOntologyForBatch(rows, client) {
3930
+ const db = await resolveClient(client);
3931
+ let count = 0;
3932
+ for (const row of rows) {
3933
+ try {
3934
+ await insertOntologyForMemory(row, db);
3935
+ count++;
3936
+ } catch {
3937
+ }
3938
+ }
3939
+ return count;
3940
+ }
3941
+ var init_agentic_ontology = __esm({
3942
+ "src/lib/agentic-ontology.ts"() {
3943
+ "use strict";
3944
+ }
3945
+ });
3946
+
3947
+ // src/lib/store.ts
3948
+ init_memory();
3949
+ init_database();
3950
+
3951
+ // src/lib/keychain.ts
3952
+ import { readFile as readFile3, writeFile as writeFile3, unlink, mkdir as mkdir3, chmod as chmod2 } from "fs/promises";
3953
+ import { existsSync as existsSync6, statSync as statSync2 } from "fs";
3954
+ import { execSync as execSync2 } from "child_process";
3955
+ import path6 from "path";
3956
+ import os5 from "os";
3957
+ var SERVICE = "exe-os";
3958
+ var LEGACY_SERVICE = "exe-mem";
3959
+ var ACCOUNT = "master-key";
3960
+ function getKeyDir() {
3961
+ return process.env.EXE_OS_DIR ?? process.env.EXE_MEM_DIR ?? path6.join(os5.homedir(), ".exe-os");
3962
+ }
3963
+ function getKeyPath() {
3964
+ return path6.join(getKeyDir(), "master.key");
3965
+ }
3966
+ function nativeKeychainAllowed() {
3967
+ return process.env.EXE_OS_DISABLE_NATIVE_KEYCHAIN !== "1";
3968
+ }
3969
+ var linuxSecretAvailability = null;
3970
+ function linuxSecretAvailable() {
3971
+ if (!nativeKeychainAllowed()) return false;
3972
+ if (process.platform !== "linux") return false;
3973
+ if (linuxSecretAvailability !== null) return linuxSecretAvailability;
3974
+ try {
3975
+ execSync2("command -v secret-tool >/dev/null 2>&1", { timeout: 1e3 });
3976
+ } catch {
3977
+ linuxSecretAvailability = false;
3978
+ return false;
3979
+ }
3980
+ try {
3981
+ execSync2("secret-tool search --all exe-os probe >/dev/null 2>&1", { timeout: 1e3 });
3982
+ linuxSecretAvailability = true;
3983
+ } catch {
3984
+ linuxSecretAvailability = false;
3985
+ }
3986
+ return linuxSecretAvailability;
3987
+ }
3988
+ function isRootOnlyTrustedServerKeyFile(keyPath) {
3989
+ if (process.platform !== "linux") return false;
3990
+ try {
3991
+ const uid = typeof os5.userInfo().uid === "number" ? os5.userInfo().uid : -1;
3992
+ const st = statSync2(keyPath);
3993
+ if (!st.isFile() || (st.mode & 63) !== 0) return false;
3994
+ if (uid === 0) return true;
3995
+ const exeOsDir = process.env.EXE_OS_DIR;
3996
+ return Boolean(exeOsDir && path6.resolve(keyPath).startsWith(path6.resolve(exeOsDir) + path6.sep));
3997
+ } catch {
3998
+ return false;
3999
+ }
4000
+ }
4001
+ function macKeychainGet(service = SERVICE) {
4002
+ if (!nativeKeychainAllowed()) return null;
4003
+ if (process.platform !== "darwin") return null;
4004
+ try {
4005
+ return execSync2(
4006
+ `security find-generic-password -s "${service}" -a "${ACCOUNT}" -w 2>/dev/null`,
4007
+ { encoding: "utf-8", timeout: 5e3 }
4008
+ ).trim();
4009
+ } catch {
4010
+ return null;
4011
+ }
4012
+ }
4013
+ function macKeychainSet(value, service = SERVICE) {
4014
+ if (!nativeKeychainAllowed()) return false;
4015
+ if (process.platform !== "darwin") return false;
4016
+ try {
4017
+ try {
4018
+ execSync2(
4019
+ `security delete-generic-password -s "${service}" -a "${ACCOUNT}" 2>/dev/null`,
4020
+ { timeout: 5e3 }
4021
+ );
4022
+ } catch {
4023
+ }
4024
+ execSync2(
4025
+ `security add-generic-password -s "${service}" -a "${ACCOUNT}" -w "${value}"`,
4026
+ { timeout: 5e3 }
4027
+ );
4028
+ return true;
4029
+ } catch {
4030
+ return false;
4031
+ }
4032
+ }
4033
+ function macKeychainDelete(service = SERVICE) {
4034
+ if (!nativeKeychainAllowed()) return false;
4035
+ if (process.platform !== "darwin") return false;
4036
+ try {
4037
+ execSync2(
4038
+ `security delete-generic-password -s "${service}" -a "${ACCOUNT}" 2>/dev/null`,
4039
+ { timeout: 5e3 }
4040
+ );
4041
+ return true;
4042
+ } catch {
4043
+ return false;
4044
+ }
4045
+ }
4046
+ function linuxSecretGet(service = SERVICE) {
4047
+ if (!linuxSecretAvailable()) return null;
4048
+ try {
4049
+ return execSync2(
4050
+ `secret-tool lookup service "${service}" account "${ACCOUNT}" 2>/dev/null`,
4051
+ { encoding: "utf-8", timeout: 5e3 }
4052
+ ).trim();
4053
+ } catch {
4054
+ return null;
4055
+ }
4056
+ }
4057
+ function linuxSecretSet(value, service = SERVICE) {
4058
+ if (!linuxSecretAvailable()) return false;
4059
+ try {
4060
+ execSync2(
4061
+ `echo -n "${value}" | secret-tool store --label="exe-os master key" service "${service}" account "${ACCOUNT}" 2>/dev/null`,
4062
+ { timeout: 5e3 }
4063
+ );
4064
+ return true;
4065
+ } catch {
4066
+ return false;
4067
+ }
4068
+ }
4069
+ function linuxSecretDelete(service = SERVICE) {
4070
+ if (!nativeKeychainAllowed()) return false;
4071
+ if (process.platform !== "linux") return false;
4072
+ try {
4073
+ execSync2(
4074
+ `secret-tool clear service "${service}" account "${ACCOUNT}" 2>/dev/null`,
4075
+ { timeout: 5e3 }
4076
+ );
4077
+ return true;
4078
+ } catch {
4079
+ return false;
4080
+ }
4081
+ }
4082
+ async function tryKeytar() {
4083
+ if (!nativeKeychainAllowed()) return null;
4084
+ try {
4085
+ return await import("keytar");
4086
+ } catch {
4087
+ return null;
4088
+ }
4089
+ }
4090
+ var ENCRYPTED_PREFIX = "enc:";
4091
+ function deriveMachineKey() {
4092
+ try {
4093
+ const crypto2 = __require("crypto");
4094
+ const material = [
4095
+ os5.hostname(),
4096
+ os5.userInfo().username,
4097
+ os5.arch(),
4098
+ os5.platform(),
4099
+ // Machine ID on Linux (stable across reboots)
4100
+ process.platform === "linux" ? readMachineId() : ""
4101
+ ].join("|");
4102
+ return crypto2.createHash("sha256").update(material).digest();
4103
+ } catch {
4104
+ return null;
4105
+ }
4106
+ }
4107
+ function readMachineId() {
4108
+ try {
4109
+ const { readFileSync: readFileSync5 } = __require("fs");
4110
+ return readFileSync5("/etc/machine-id", "utf-8").trim();
4111
+ } catch {
4112
+ return "";
4113
+ }
4114
+ }
4115
+ function encryptWithMachineKey(plaintext, machineKey) {
4116
+ const crypto2 = __require("crypto");
4117
+ const iv = crypto2.randomBytes(12);
4118
+ const cipher = crypto2.createCipheriv("aes-256-gcm", machineKey, iv);
4119
+ let encrypted = cipher.update(plaintext, "utf-8", "base64");
4120
+ encrypted += cipher.final("base64");
4121
+ const authTag = cipher.getAuthTag().toString("base64");
4122
+ return `${ENCRYPTED_PREFIX}${iv.toString("base64")}:${authTag}:${encrypted}`;
4123
+ }
4124
+ function decryptWithMachineKey(encrypted, machineKey) {
4125
+ if (!encrypted.startsWith(ENCRYPTED_PREFIX)) return null;
4126
+ try {
4127
+ const crypto2 = __require("crypto");
4128
+ const parts = encrypted.slice(ENCRYPTED_PREFIX.length).split(":");
4129
+ if (parts.length !== 3) return null;
4130
+ const [ivB64, tagB64, cipherB64] = parts;
4131
+ const iv = Buffer.from(ivB64, "base64");
4132
+ const authTag = Buffer.from(tagB64, "base64");
4133
+ const decipher = crypto2.createDecipheriv("aes-256-gcm", machineKey, iv);
4134
+ decipher.setAuthTag(authTag);
4135
+ let decrypted = decipher.update(cipherB64, "base64", "utf-8");
4136
+ decrypted += decipher.final("utf-8");
4137
+ return decrypted;
4138
+ } catch {
4139
+ return null;
4140
+ }
4141
+ }
4142
+ async function writeMachineBoundFileFallback(b64) {
4143
+ const dir = getKeyDir();
4144
+ await mkdir3(dir, { recursive: true });
4145
+ const keyPath = getKeyPath();
4146
+ const machineKey = deriveMachineKey();
4147
+ if (machineKey) {
4148
+ const encrypted = encryptWithMachineKey(b64, machineKey);
4149
+ await writeFile3(keyPath, encrypted + "\n", "utf-8");
4150
+ await chmod2(keyPath, 384);
4151
+ return "encrypted";
4152
+ }
4153
+ await writeFile3(keyPath, b64 + "\n", "utf-8");
4154
+ await chmod2(keyPath, 384);
4155
+ return "plaintext";
4156
+ }
4157
+ async function getMasterKey() {
4158
+ let nativeValue = macKeychainGet() ?? linuxSecretGet();
4159
+ if (!nativeValue) {
4160
+ const legacyValue = macKeychainGet(LEGACY_SERVICE) ?? linuxSecretGet(LEGACY_SERVICE);
4161
+ if (legacyValue) {
4162
+ const migrated = macKeychainSet(legacyValue) || linuxSecretSet(legacyValue);
4163
+ if (migrated) {
4164
+ macKeychainDelete(LEGACY_SERVICE);
4165
+ linuxSecretDelete(LEGACY_SERVICE);
4166
+ process.stderr.write("[keychain] Migrated keychain service from exe-mem to exe-os.\n");
4167
+ }
4168
+ nativeValue = legacyValue;
4169
+ }
4170
+ }
4171
+ if (nativeValue) {
4172
+ return Buffer.from(nativeValue, "base64");
4173
+ }
4174
+ const keytar = await tryKeytar();
4175
+ if (keytar) {
4176
+ try {
4177
+ const keytarValue = await keytar.getPassword(SERVICE, ACCOUNT);
4178
+ const legacyKeytarValue = keytarValue ?? await keytar.getPassword(LEGACY_SERVICE, ACCOUNT);
4179
+ if (legacyKeytarValue) {
4180
+ const migrated = macKeychainSet(legacyKeytarValue) || linuxSecretSet(legacyKeytarValue);
4181
+ if (migrated) {
4182
+ process.stderr.write("[keychain] Migrated key from keytar to native keychain.\n");
4183
+ try {
4184
+ await keytar.deletePassword(LEGACY_SERVICE, ACCOUNT);
4185
+ } catch {
4186
+ }
4187
+ }
4188
+ return Buffer.from(legacyKeytarValue, "base64");
4189
+ }
4190
+ } catch {
4191
+ }
4192
+ }
4193
+ const keyPath = getKeyPath();
4194
+ if (!existsSync6(keyPath)) {
4195
+ process.stderr.write(
4196
+ `[keychain] Key not found at ${keyPath} (HOME=${os5.homedir()}, EXE_OS_DIR=${process.env.EXE_OS_DIR ?? "unset"})
4197
+ `
4198
+ );
4199
+ return null;
4200
+ }
4201
+ try {
4202
+ const content = (await readFile3(keyPath, "utf-8")).trim();
4203
+ let b64Value;
4204
+ if (content.startsWith(ENCRYPTED_PREFIX)) {
4205
+ const machineKey = deriveMachineKey();
4206
+ if (!machineKey) {
4207
+ process.stderr.write("[keychain] Cannot derive machine key to decrypt stored key.\n");
4208
+ return null;
4209
+ }
4210
+ const decrypted = decryptWithMachineKey(content, machineKey);
4211
+ if (!decrypted) {
4212
+ process.stderr.write(
4213
+ "[keychain] Key decryption failed \u2014 machine may have changed.\n Use your 24-word recovery phrase during setup: exe-os setup\n"
4214
+ );
4215
+ return null;
4216
+ }
4217
+ b64Value = decrypted;
4218
+ } else {
4219
+ b64Value = content;
4220
+ }
4221
+ const key = Buffer.from(b64Value, "base64");
4222
+ if (!content.startsWith(ENCRYPTED_PREFIX) && isRootOnlyTrustedServerKeyFile(keyPath)) {
4223
+ return key;
4224
+ }
4225
+ const migrated = macKeychainSet(b64Value) || linuxSecretSet(b64Value);
4226
+ if (migrated) {
4227
+ process.stderr.write("[keychain] Migrated key from file to native keychain.\n");
4228
+ try {
4229
+ await unlink(keyPath);
4230
+ process.stderr.write("[keychain] Removed legacy master.key file after native keychain migration.\n");
4231
+ } catch {
4232
+ }
4233
+ } else if (!content.startsWith(ENCRYPTED_PREFIX)) {
4234
+ const fallback = await writeMachineBoundFileFallback(b64Value);
4235
+ if (fallback === "encrypted") {
4236
+ process.stderr.write("[keychain] Upgraded legacy plaintext master.key to machine-bound encrypted fallback.\n");
4237
+ } else {
4238
+ process.stderr.write(
4239
+ "[keychain] WARNING: Could not encrypt legacy master.key \u2014 plaintext fallback remains.\n"
4240
+ );
4241
+ }
4242
+ }
4243
+ return key;
4244
+ } catch (err) {
4245
+ process.stderr.write(
4246
+ `[keychain] Key read failed at ${keyPath}: ${err instanceof Error ? err.message : String(err)}
4247
+ `
4248
+ );
4249
+ return null;
4250
+ }
4251
+ }
4252
+
4253
+ // src/lib/store.ts
4254
+ init_config();
4255
+
4256
+ // src/lib/state-bus.ts
4257
+ var StateBus = class {
4258
+ handlers = /* @__PURE__ */ new Map();
4259
+ globalHandlers = /* @__PURE__ */ new Set();
4260
+ /** Emit an event to all subscribers */
4261
+ emit(event) {
4262
+ const typeHandlers = this.handlers.get(event.type);
4263
+ if (typeHandlers) {
4264
+ for (const handler of typeHandlers) {
4265
+ try {
4266
+ handler(event);
4267
+ } catch {
4268
+ }
4269
+ }
4270
+ }
4271
+ for (const handler of this.globalHandlers) {
4272
+ try {
4273
+ handler(event);
4274
+ } catch {
4275
+ }
4276
+ }
4277
+ }
4278
+ /** Subscribe to a specific event type */
4279
+ on(type, handler) {
4280
+ if (!this.handlers.has(type)) {
4281
+ this.handlers.set(type, /* @__PURE__ */ new Set());
4282
+ }
4283
+ this.handlers.get(type).add(handler);
4284
+ }
4285
+ /** Subscribe to ALL events */
4286
+ onAny(handler) {
4287
+ this.globalHandlers.add(handler);
4288
+ }
4289
+ /** Unsubscribe from a specific event type */
4290
+ off(type, handler) {
4291
+ this.handlers.get(type)?.delete(handler);
4292
+ }
4293
+ /** Unsubscribe from ALL events */
4294
+ offAny(handler) {
4295
+ this.globalHandlers.delete(handler);
4296
+ }
4297
+ /** Remove all listeners */
4298
+ clear() {
4299
+ this.handlers.clear();
4300
+ this.globalHandlers.clear();
4301
+ }
4302
+ };
4303
+ var orgBus = new StateBus();
4304
+
4305
+ // src/lib/memory-write-governor.ts
4306
+ import { createHash } from "crypto";
4307
+ var HIGH_VALUE_SUPERSESSION_TYPES = /* @__PURE__ */ new Set([
4308
+ "decision",
4309
+ "adr",
4310
+ "behavior",
4311
+ "procedure"
4312
+ ]);
4313
+ async function runPostWriteMemoryHygiene(memoryId) {
4314
+ try {
4315
+ const { getClient: getClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
4316
+ const client = getClient2();
4317
+ const current = await client.execute({
4318
+ sql: `SELECT id, agent_id, project_name, memory_type, content_hash, supersedes_id,
4319
+ importance, timestamp
4320
+ FROM memories
4321
+ WHERE id = ?
4322
+ LIMIT 1`,
4323
+ args: [memoryId]
4324
+ });
4325
+ const row = current.rows[0];
4326
+ if (!row) return;
4327
+ const memoryType = String(row.memory_type ?? "raw");
4328
+ const contentHash = row.content_hash ? String(row.content_hash) : null;
4329
+ const agentId = String(row.agent_id);
4330
+ const projectName = String(row.project_name);
4331
+ if (contentHash) {
4332
+ await client.execute({
4333
+ sql: `UPDATE memories
4334
+ SET status = 'deleted',
4335
+ outcome = COALESCE(outcome, 'superseded')
4336
+ WHERE id != ?
4337
+ AND content_hash = ?
4338
+ AND agent_id = ?
4339
+ AND project_name = ?
4340
+ AND COALESCE(memory_type, 'raw') = ?
4341
+ AND COALESCE(status, 'active') = 'active'`,
4342
+ args: [memoryId, contentHash, agentId, projectName, memoryType]
4343
+ });
4344
+ }
4345
+ const supersedesId = row.supersedes_id ? String(row.supersedes_id) : null;
4346
+ if (supersedesId && HIGH_VALUE_SUPERSESSION_TYPES.has(memoryType)) {
4347
+ const old = await client.execute({
4348
+ sql: `SELECT importance FROM memories WHERE id = ? LIMIT 1`,
4349
+ args: [supersedesId]
4350
+ });
4351
+ const oldImportance = Number(old.rows[0]?.importance ?? 0);
4352
+ const newImportance = Number(row.importance ?? 0);
4353
+ await client.batch([
4354
+ {
4355
+ sql: `UPDATE memories
4356
+ SET status = 'archived',
4357
+ outcome = COALESCE(outcome, 'superseded')
4358
+ WHERE id = ?`,
4359
+ args: [supersedesId]
4360
+ },
4361
+ {
4362
+ sql: `UPDATE memories
4363
+ SET importance = MAX(COALESCE(importance, 5), ?),
4364
+ parent_memory_id = COALESCE(parent_memory_id, ?)
4365
+ WHERE id = ?`,
4366
+ args: [Math.max(oldImportance, newImportance), supersedesId, memoryId]
4367
+ }
4368
+ ], "write");
4369
+ }
4370
+ } catch (err) {
4371
+ process.stderr.write(
4372
+ `[memory-governor] post-write hygiene failed for ${memoryId}: ${err instanceof Error ? err.message : String(err)}
4373
+ `
4374
+ );
4375
+ }
4376
+ }
4377
+ function schedulePostWriteMemoryHygiene(memoryIds) {
4378
+ if (process.env.EXE_SKIP_MEMORY_HYGIENE === "1") return;
4379
+ if (memoryIds.length === 0) return;
4380
+ const run = () => {
4381
+ void Promise.all(memoryIds.map((id) => runPostWriteMemoryHygiene(id)));
4382
+ };
4383
+ if (typeof setImmediate === "function") setImmediate(run);
4384
+ else setTimeout(run, 0);
4385
+ }
4386
+
4387
+ // src/lib/store.ts
4388
+ var INIT_MAX_RETRIES = 3;
4389
+ var INIT_RETRY_DELAY_MS = 1e3;
4390
+ function isBusyError2(err) {
4391
+ if (err instanceof Error) {
4392
+ const msg = err.message.toLowerCase();
4393
+ return msg.includes("sqlite_busy") || msg.includes("database is locked");
4394
+ }
4395
+ return false;
4396
+ }
4397
+ async function retryOnBusy2(fn, label) {
4398
+ for (let attempt = 0; attempt <= INIT_MAX_RETRIES; attempt++) {
4399
+ try {
4400
+ return await fn();
4401
+ } catch (err) {
4402
+ if (!isBusyError2(err) || attempt === INIT_MAX_RETRIES) throw err;
4403
+ process.stderr.write(
4404
+ `[store] SQLITE_BUSY during ${label}, retry ${attempt + 1}/${INIT_MAX_RETRIES}
4405
+ `
4406
+ );
4407
+ await new Promise((r) => setTimeout(r, INIT_RETRY_DELAY_MS * (attempt + 1)));
4408
+ }
4409
+ }
4410
+ throw new Error("unreachable");
4411
+ }
4412
+ var _pendingRecords = [];
4413
+ var _batchSize = 20;
4414
+ var _flushIntervalMs = 1e4;
4415
+ var _flushTimer = null;
4416
+ var _flushing = false;
4417
+ var _nextVersion = 1;
4418
+ async function initStore(options) {
4419
+ if (_flushTimer !== null) {
4420
+ clearInterval(_flushTimer);
4421
+ _flushTimer = null;
4422
+ }
4423
+ _pendingRecords = [];
4424
+ _flushing = false;
4425
+ _batchSize = options?.batchSize ?? 20;
4426
+ _flushIntervalMs = options?.flushIntervalMs ?? 1e4;
4427
+ let dbPath = options?.dbPath;
4428
+ if (!dbPath) {
4429
+ const config = await loadConfig();
4430
+ dbPath = config.dbPath;
4431
+ }
4432
+ let masterKey = options?.masterKey ?? null;
4433
+ if (!masterKey) {
4434
+ masterKey = await getMasterKey();
4435
+ if (!masterKey) {
4436
+ throw new Error(
4437
+ "No encryption key found. Run /exe-setup to generate one."
4438
+ );
4439
+ }
4440
+ }
4441
+ const hexKey = masterKey.toString("hex");
4442
+ await initTurso({
4443
+ dbPath,
4444
+ encryptionKey: hexKey
4445
+ });
4446
+ await retryOnBusy2(() => ensureSchema(), "ensureSchema");
4447
+ try {
4448
+ const { initDaemonClient: initDaemonClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
4449
+ await initDaemonClient2();
4450
+ } catch {
4451
+ }
4452
+ if (!options?.lightweight) {
4453
+ try {
4454
+ const { initShardManager: initShardManager2 } = await Promise.resolve().then(() => (init_shard_manager(), shard_manager_exports));
4455
+ initShardManager2(hexKey);
4456
+ } catch {
4457
+ }
4458
+ const client = getClient();
4459
+ const vResult = await retryOnBusy2(
4460
+ () => client.execute("SELECT MAX(version) as max_v FROM memories"),
4461
+ "version-query"
4462
+ );
4463
+ _nextVersion = (Number(vResult.rows[0]?.max_v) || 0) + 1;
4464
+ try {
4465
+ const { loadGlobalProcedures: loadGlobalProcedures2 } = await Promise.resolve().then(() => (init_global_procedures(), global_procedures_exports));
4466
+ await loadGlobalProcedures2();
4467
+ } catch {
4468
+ }
4469
+ }
4470
+ }
4471
+ async function flushBatch() {
4472
+ if (_flushing || _pendingRecords.length === 0) return 0;
4473
+ _flushing = true;
4474
+ try {
4475
+ const batch = _pendingRecords.slice(0);
4476
+ const client = getClient();
4477
+ const vResult = await client.execute("SELECT MAX(version) as max_v FROM memories");
4478
+ let baseVersion = (Number(vResult.rows[0]?.max_v) || 0) + 1;
4479
+ for (const row of batch) {
4480
+ row.version = baseVersion++;
4481
+ }
4482
+ _nextVersion = baseVersion;
4483
+ const buildStmt = (row) => {
4484
+ const hasVector = row.vector !== null;
4485
+ const taskId = row.task_id ?? null;
4486
+ const importance = row.importance ?? 5;
4487
+ const status = row.status ?? "active";
4488
+ const confidence = row.confidence ?? 0.7;
4489
+ const lastAccessed = row.last_accessed ?? row.timestamp;
4490
+ const workspaceId = row.workspace_id ?? null;
4491
+ const documentId = row.document_id ?? null;
4492
+ const userId = row.user_id ?? null;
4493
+ const charOffset = row.char_offset ?? null;
4494
+ const pageNumber = row.page_number ?? null;
4495
+ const sourcePath = row.source_path ?? null;
4496
+ const sourceType = row.source_type ?? null;
4497
+ const tier = row.tier ?? 3;
4498
+ const supersedesId = row.supersedes_id ?? null;
4499
+ const draft = row.draft ? 1 : 0;
4500
+ const memoryType = row.memory_type ?? "raw";
4501
+ const trajectory = row.trajectory ?? null;
4502
+ const contentHash = row.content_hash ?? null;
4503
+ const intent = row.intent ?? null;
4504
+ const outcome = row.outcome ?? null;
4505
+ const domain = row.domain ?? null;
4506
+ const referencedEntities = row.referenced_entities ?? null;
4507
+ const retrievalCount = row.retrieval_count ?? 0;
4508
+ const chainPosition = row.chain_position ?? null;
4509
+ const reviewStatus = row.review_status ?? null;
4510
+ const contextWindowPct = row.context_window_pct ?? null;
4511
+ const filePaths = row.file_paths ?? null;
4512
+ const commitHash = row.commit_hash ?? null;
4513
+ const durationMs = row.duration_ms ?? null;
4514
+ const tokenCost = row.token_cost ?? null;
4515
+ const audience = row.audience ?? null;
4516
+ const languageType = row.language_type ?? null;
4517
+ const parentMemoryId = row.parent_memory_id ?? null;
4518
+ const cols = `id, agent_id, agent_role, session_id, timestamp,
4519
+ tool_name, project_name,
4520
+ has_error, raw_text, vector, version, task_id, importance, status,
4521
+ confidence, last_accessed,
4522
+ workspace_id, document_id, user_id, char_offset, page_number,
4523
+ source_path, source_type, tier, supersedes_id, draft, memory_type, trajectory, content_hash,
4524
+ intent, outcome, domain, referenced_entities, retrieval_count,
4525
+ chain_position, review_status, context_window_pct, file_paths, commit_hash,
4526
+ duration_ms, token_cost, audience, language_type, parent_memory_id`;
4527
+ const metaArgs = [
4528
+ intent,
4529
+ outcome,
4530
+ domain,
4531
+ referencedEntities,
4532
+ retrievalCount,
4533
+ chainPosition,
4534
+ reviewStatus,
4535
+ contextWindowPct,
4536
+ filePaths,
4537
+ commitHash,
4538
+ durationMs,
4539
+ tokenCost,
4540
+ audience,
4541
+ languageType,
4542
+ parentMemoryId
4543
+ ];
4544
+ const baseArgs = [
4545
+ row.id,
4546
+ row.agent_id,
4547
+ row.agent_role,
4548
+ row.session_id,
4549
+ row.timestamp,
4550
+ row.tool_name,
4551
+ row.project_name,
4552
+ row.has_error,
4553
+ row.raw_text
4554
+ ];
4555
+ const sharedArgs = [
4556
+ row.version,
4557
+ taskId,
4558
+ importance,
4559
+ status,
4560
+ confidence,
4561
+ lastAccessed,
4562
+ workspaceId,
4563
+ documentId,
4564
+ userId,
4565
+ charOffset,
4566
+ pageNumber,
4567
+ sourcePath,
4568
+ sourceType,
4569
+ tier,
4570
+ supersedesId,
4571
+ draft,
4572
+ memoryType,
4573
+ trajectory,
4574
+ contentHash
4575
+ ];
4576
+ return {
4577
+ sql: hasVector ? `INSERT OR IGNORE INTO memories (${cols})
4578
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, vector32(?), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` : `INSERT OR IGNORE INTO memories (${cols})
4579
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
4580
+ args: hasVector ? [...baseArgs, vectorToBlob(row.vector), ...sharedArgs, ...metaArgs] : [...baseArgs, ...sharedArgs, ...metaArgs]
4581
+ };
4582
+ };
4583
+ const globalClient = getClient();
4584
+ const globalStmts = batch.map(buildStmt);
4585
+ await globalClient.batch(globalStmts, "write");
4586
+ try {
4587
+ const { insertMemoryCardsForBatch: insertMemoryCardsForBatch2 } = await Promise.resolve().then(() => (init_memory_cards(), memory_cards_exports));
4588
+ await insertMemoryCardsForBatch2(batch);
4589
+ } catch {
4590
+ }
4591
+ try {
4592
+ const { insertOntologyForBatch: insertOntologyForBatch2 } = await Promise.resolve().then(() => (init_agentic_ontology(), agentic_ontology_exports));
4593
+ await insertOntologyForBatch2(batch);
4594
+ } catch {
4595
+ }
4596
+ schedulePostWriteMemoryHygiene(batch.map((row) => row.id));
4597
+ _pendingRecords.splice(0, batch.length);
4598
+ try {
4599
+ const { isShardingEnabled: isShardingEnabled2, getReadyShardClient: getReadyShardClient2 } = await Promise.resolve().then(() => (init_shard_manager(), shard_manager_exports));
4600
+ if (isShardingEnabled2()) {
4601
+ const byProject = /* @__PURE__ */ new Map();
4602
+ let skippedUnknown = 0;
4603
+ for (const row of batch) {
4604
+ const proj = row.project_name?.trim();
4605
+ if (!proj) {
4606
+ skippedUnknown++;
4607
+ continue;
4608
+ }
4609
+ if (!byProject.has(proj)) byProject.set(proj, []);
4610
+ byProject.get(proj).push(row);
4611
+ }
4612
+ if (skippedUnknown > 0) {
4613
+ process.stderr.write(
4614
+ `[store] Shard skip: ${skippedUnknown} record(s) with empty project_name (kept in main DB only)
4615
+ `
4616
+ );
4617
+ }
4618
+ for (const [project, rows] of byProject) {
4619
+ try {
4620
+ const shardClient = await getReadyShardClient2(project);
4621
+ const shardStmts = rows.map(buildStmt);
4622
+ await shardClient.batch(shardStmts, "write");
4623
+ } catch (err) {
4624
+ const fullError = err instanceof Error ? `${err.name}: ${err.message}${err.stack ? `
4625
+ ${err.stack.split("\n").slice(1, 3).join("\n")}` : ""}` : String(err);
4626
+ process.stderr.write(
4627
+ `[store] Shard write failed for ${project} (${rows.length} records): ${fullError}
4628
+ `
4629
+ );
4630
+ }
4631
+ }
4632
+ }
4633
+ } catch {
4634
+ }
4635
+ return batch.length;
4636
+ } finally {
4637
+ _flushing = false;
4638
+ }
4639
+ }
4640
+ async function disposeStore() {
4641
+ if (_flushTimer !== null) {
4642
+ clearInterval(_flushTimer);
4643
+ _flushTimer = null;
4644
+ }
4645
+ if (_pendingRecords.length > 0) {
4646
+ await flushBatch();
4647
+ }
4648
+ await disposeTurso();
4649
+ _pendingRecords = [];
4650
+ _nextVersion = 1;
4651
+ }
4652
+ function vectorToBlob(vector) {
4653
+ const f32 = vector instanceof Float32Array ? vector : new Float32Array(vector);
4654
+ return JSON.stringify(Array.from(f32));
4655
+ }
4656
+
4657
+ // src/bin/agentic-ontology-backfill.ts
4658
+ init_database();
4659
+ init_agentic_ontology();
4660
+ var BATCH_SIZE = 500;
4661
+ async function main() {
4662
+ await initStore({ lightweight: true });
4663
+ const client = getClient();
4664
+ let offset = 0;
4665
+ let total = 0;
4666
+ while (true) {
4667
+ const result = await client.execute({
4668
+ sql: `SELECT id, agent_id, agent_role, session_id, timestamp, tool_name, project_name,
4669
+ has_error, raw_text, version, task_id, intent, outcome, domain, trajectory
4670
+ FROM memories
4671
+ WHERE id NOT IN (SELECT evidence_memory_id FROM agent_events WHERE evidence_memory_id IS NOT NULL)
4672
+ ORDER BY version ASC, timestamp ASC
4673
+ LIMIT ? OFFSET ?`,
4674
+ args: [BATCH_SIZE, offset]
4675
+ });
4676
+ if (result.rows.length === 0) break;
4677
+ const rows = result.rows.map((row) => ({
4678
+ id: String(row.id),
4679
+ agent_id: String(row.agent_id),
4680
+ agent_role: String(row.agent_role),
4681
+ session_id: String(row.session_id),
4682
+ timestamp: String(row.timestamp),
4683
+ tool_name: String(row.tool_name),
4684
+ project_name: String(row.project_name),
4685
+ has_error: Number(row.has_error ?? 0),
4686
+ raw_text: String(row.raw_text),
4687
+ version: Number(row.version ?? 0),
4688
+ task_id: row.task_id == null ? null : String(row.task_id),
4689
+ intent: row.intent == null ? null : String(row.intent),
4690
+ outcome: row.outcome == null ? null : String(row.outcome),
4691
+ domain: row.domain == null ? null : String(row.domain),
4692
+ trajectory: row.trajectory == null ? null : String(row.trajectory)
4693
+ }));
4694
+ const inserted = await insertOntologyForBatch(rows, client);
4695
+ total += inserted;
4696
+ process.stderr.write(`[agentic-ontology-backfill] +${inserted} memories projected (${total} total)
4697
+ `);
4698
+ offset = 0;
4699
+ }
4700
+ process.stderr.write(`[agentic-ontology-backfill] Complete: ${total} memories projected.
4701
+ `);
4702
+ await disposeStore();
4703
+ }
4704
+ main().catch((err) => {
4705
+ process.stderr.write(`[agentic-ontology-backfill] FATAL: ${err instanceof Error ? err.message : String(err)}
4706
+ `);
4707
+ process.exit(1);
4708
+ });