@askexenow/exe-os 0.9.37 → 0.9.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/deploy/stack-manifests/v0.9.json +55 -0
- package/dist/bin/backfill-conversations.js +36 -9
- package/dist/bin/backfill-responses.js +36 -9
- package/dist/bin/backfill-vectors.js +36 -9
- package/dist/bin/cleanup-stale-review-tasks.js +37 -10
- package/dist/bin/cli.js +624 -204
- package/dist/bin/exe-agent.js +13 -5
- package/dist/bin/exe-assign.js +36 -9
- package/dist/bin/exe-boot.js +50 -20
- package/dist/bin/exe-call.js +134 -342
- package/dist/bin/exe-dispatch.js +36 -9
- package/dist/bin/exe-doctor.js +39 -12
- package/dist/bin/exe-export-behaviors.js +38 -11
- package/dist/bin/exe-forget.js +36 -9
- package/dist/bin/exe-gateway.js +64 -15
- package/dist/bin/exe-heartbeat.js +37 -10
- package/dist/bin/exe-kill.js +36 -9
- package/dist/bin/exe-launch-agent.js +287 -1081
- package/dist/bin/exe-new-employee.js +100 -14
- package/dist/bin/exe-pending-messages.js +36 -9
- package/dist/bin/exe-pending-notifications.js +36 -9
- package/dist/bin/exe-pending-reviews.js +36 -9
- package/dist/bin/exe-rename.js +1780 -204
- package/dist/bin/exe-review.js +36 -9
- package/dist/bin/exe-search.js +38 -11
- package/dist/bin/exe-session-cleanup.js +38 -11
- package/dist/bin/exe-start-codex.js +38 -11
- package/dist/bin/exe-start-opencode.js +38 -11
- package/dist/bin/exe-status.js +37 -10
- package/dist/bin/exe-team.js +36 -9
- package/dist/bin/git-sweep.js +36 -9
- package/dist/bin/graph-backfill.js +36 -9
- package/dist/bin/graph-export.js +36 -9
- package/dist/bin/install.js +70 -3
- package/dist/bin/intercom-check.js +38 -11
- package/dist/bin/scan-tasks.js +36 -9
- package/dist/bin/setup.js +20 -19
- package/dist/bin/shard-migrate.js +36 -9
- package/dist/bin/stack-update.js +308 -0
- package/dist/gateway/index.js +62 -13
- package/dist/hooks/bug-report-worker.js +40 -12
- package/dist/hooks/codex-stop-task-finalizer.js +38 -11
- package/dist/hooks/commit-complete.js +36 -9
- package/dist/hooks/error-recall.js +38 -11
- package/dist/hooks/ingest.js +38 -10
- package/dist/hooks/instructions-loaded.js +44 -12
- package/dist/hooks/notification.js +36 -9
- package/dist/hooks/post-compact.js +36 -9
- package/dist/hooks/post-tool-combined.js +39 -12
- package/dist/hooks/pre-compact.js +37 -10
- package/dist/hooks/pre-tool-use.js +38 -10
- package/dist/hooks/prompt-submit.js +43 -15
- package/dist/hooks/session-end.js +37 -10
- package/dist/hooks/session-start.js +49 -16
- package/dist/hooks/stop.js +37 -10
- package/dist/hooks/subagent-stop.js +36 -9
- package/dist/hooks/summary-worker.js +45 -18
- package/dist/index.js +60 -11
- package/dist/lib/consolidation.js +2 -1
- package/dist/lib/employee-templates.js +4 -3
- package/dist/lib/employees.js +2 -1
- package/dist/lib/exe-daemon.js +11229 -10537
- package/dist/lib/hybrid-search.js +38 -11
- package/dist/lib/identity.js +8 -3
- package/dist/lib/schedules.js +36 -9
- package/dist/lib/store.js +36 -9
- package/dist/mcp/server.js +6873 -6249
- package/dist/mcp/tools/create-task.js +10 -4
- package/dist/runtime/index.js +36 -9
- package/dist/tui/App.js +42 -13
- package/package.json +4 -1
- package/stack.release.json +31 -0
- package/stack.release.schema.json +31 -0
package/dist/bin/exe-rename.js
CHANGED
|
@@ -18,6 +18,13 @@ var __export = (target, all) => {
|
|
|
18
18
|
// src/lib/secure-files.ts
|
|
19
19
|
import { chmodSync, existsSync, mkdirSync } from "fs";
|
|
20
20
|
import { chmod, mkdir } from "fs/promises";
|
|
21
|
+
async function ensurePrivateDir(dirPath) {
|
|
22
|
+
await mkdir(dirPath, { recursive: true, mode: PRIVATE_DIR_MODE });
|
|
23
|
+
try {
|
|
24
|
+
await chmod(dirPath, PRIVATE_DIR_MODE);
|
|
25
|
+
} catch {
|
|
26
|
+
}
|
|
27
|
+
}
|
|
21
28
|
function ensurePrivateDirSync(dirPath) {
|
|
22
29
|
mkdirSync(dirPath, { recursive: true, mode: PRIVATE_DIR_MODE });
|
|
23
30
|
try {
|
|
@@ -25,6 +32,12 @@ function ensurePrivateDirSync(dirPath) {
|
|
|
25
32
|
} catch {
|
|
26
33
|
}
|
|
27
34
|
}
|
|
35
|
+
async function enforcePrivateFile(filePath) {
|
|
36
|
+
try {
|
|
37
|
+
await chmod(filePath, PRIVATE_FILE_MODE);
|
|
38
|
+
} catch {
|
|
39
|
+
}
|
|
40
|
+
}
|
|
28
41
|
function enforcePrivateFileSync(filePath) {
|
|
29
42
|
try {
|
|
30
43
|
if (existsSync(filePath)) chmodSync(filePath, PRIVATE_FILE_MODE);
|
|
@@ -61,7 +74,90 @@ function resolveDataDir() {
|
|
|
61
74
|
}
|
|
62
75
|
return newDir;
|
|
63
76
|
}
|
|
64
|
-
|
|
77
|
+
function migrateLegacyConfig(raw) {
|
|
78
|
+
if ("r2" in raw) {
|
|
79
|
+
process.stderr.write(
|
|
80
|
+
"[exe-os] Warning: config.json contains deprecated 'r2' field from v1.0. R2 sync has been replaced in v1.1. The 'r2' field will be ignored.\n"
|
|
81
|
+
);
|
|
82
|
+
delete raw.r2;
|
|
83
|
+
}
|
|
84
|
+
if ("syncIntervalMs" in raw) {
|
|
85
|
+
delete raw.syncIntervalMs;
|
|
86
|
+
}
|
|
87
|
+
return raw;
|
|
88
|
+
}
|
|
89
|
+
function migrateConfig(raw) {
|
|
90
|
+
const fromVersion = typeof raw.config_version === "number" ? raw.config_version : 0;
|
|
91
|
+
let currentVersion = fromVersion;
|
|
92
|
+
let migrated = false;
|
|
93
|
+
if (currentVersion > CURRENT_CONFIG_VERSION) {
|
|
94
|
+
return { config: raw, migrated: false, fromVersion };
|
|
95
|
+
}
|
|
96
|
+
for (const migration of CONFIG_MIGRATIONS) {
|
|
97
|
+
if (currentVersion === migration.from && migration.to <= CURRENT_CONFIG_VERSION) {
|
|
98
|
+
raw = migration.migrate(raw);
|
|
99
|
+
currentVersion = migration.to;
|
|
100
|
+
migrated = true;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
return { config: raw, migrated, fromVersion };
|
|
104
|
+
}
|
|
105
|
+
function normalizeScalingRoadmap(raw) {
|
|
106
|
+
const defaultAuto = DEFAULT_CONFIG.scalingRoadmap.rerankerAutoTrigger;
|
|
107
|
+
const userRoadmap = raw.scalingRoadmap ?? {};
|
|
108
|
+
const userAuto = userRoadmap.rerankerAutoTrigger ?? {};
|
|
109
|
+
if (userAuto.enabled === void 0 && raw.rerankerEnabled !== void 0) {
|
|
110
|
+
userAuto.enabled = raw.rerankerEnabled;
|
|
111
|
+
}
|
|
112
|
+
raw.scalingRoadmap = {
|
|
113
|
+
...userRoadmap,
|
|
114
|
+
rerankerAutoTrigger: { ...defaultAuto, ...userAuto }
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
function normalizeSessionLifecycle(raw) {
|
|
118
|
+
const defaultSL = DEFAULT_CONFIG.sessionLifecycle;
|
|
119
|
+
const userSL = raw.sessionLifecycle ?? {};
|
|
120
|
+
raw.sessionLifecycle = { ...defaultSL, ...userSL };
|
|
121
|
+
}
|
|
122
|
+
function normalizeAutoUpdate(raw) {
|
|
123
|
+
const defaultAU = DEFAULT_CONFIG.autoUpdate;
|
|
124
|
+
const userAU = raw.autoUpdate ?? {};
|
|
125
|
+
raw.autoUpdate = { ...defaultAU, ...userAU };
|
|
126
|
+
}
|
|
127
|
+
async function loadConfig() {
|
|
128
|
+
const dir = process.env.EXE_OS_DIR ?? process.env.EXE_MEM_DIR ?? EXE_AI_DIR;
|
|
129
|
+
await ensurePrivateDir(dir);
|
|
130
|
+
const configPath = path.join(dir, "config.json");
|
|
131
|
+
if (!existsSync2(configPath)) {
|
|
132
|
+
return { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db") };
|
|
133
|
+
}
|
|
134
|
+
const raw = await readFile(configPath, "utf-8");
|
|
135
|
+
try {
|
|
136
|
+
let parsed = JSON.parse(raw);
|
|
137
|
+
parsed = migrateLegacyConfig(parsed);
|
|
138
|
+
const { config: migratedCfg, migrated, fromVersion } = migrateConfig(parsed);
|
|
139
|
+
if (migrated) {
|
|
140
|
+
process.stderr.write(`[exe-os] Config migrated from v${fromVersion} to v${migratedCfg.config_version}
|
|
141
|
+
`);
|
|
142
|
+
try {
|
|
143
|
+
await writeFile(configPath, JSON.stringify(migratedCfg, null, 2) + "\n");
|
|
144
|
+
await enforcePrivateFile(configPath);
|
|
145
|
+
} catch {
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
normalizeScalingRoadmap(migratedCfg);
|
|
149
|
+
normalizeSessionLifecycle(migratedCfg);
|
|
150
|
+
normalizeAutoUpdate(migratedCfg);
|
|
151
|
+
const config = { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db"), ...migratedCfg };
|
|
152
|
+
if (config.dbPath.startsWith("~")) {
|
|
153
|
+
config.dbPath = config.dbPath.replace(/^~/, os.homedir());
|
|
154
|
+
}
|
|
155
|
+
return config;
|
|
156
|
+
} catch {
|
|
157
|
+
return { ...DEFAULT_CONFIG, dbPath: path.join(dir, "memories.db") };
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
var EXE_AI_DIR, DB_PATH, MODELS_DIR, CONFIG_PATH, LEGACY_LANCE_PATH, CURRENT_CONFIG_VERSION, DEFAULT_CONFIG, CONFIG_MIGRATIONS;
|
|
65
161
|
var init_config = __esm({
|
|
66
162
|
"src/lib/config.ts"() {
|
|
67
163
|
"use strict";
|
|
@@ -129,6 +225,16 @@ var init_config = __esm({
|
|
|
129
225
|
checkIntervalMs: 24 * 60 * 60 * 1e3
|
|
130
226
|
}
|
|
131
227
|
};
|
|
228
|
+
CONFIG_MIGRATIONS = [
|
|
229
|
+
{
|
|
230
|
+
from: 0,
|
|
231
|
+
to: 1,
|
|
232
|
+
migrate: (cfg) => {
|
|
233
|
+
cfg.config_version = 1;
|
|
234
|
+
return cfg;
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
];
|
|
132
238
|
}
|
|
133
239
|
});
|
|
134
240
|
|
|
@@ -993,8 +1099,8 @@ function findPackageRoot() {
|
|
|
993
1099
|
function getAvailableMemoryGB() {
|
|
994
1100
|
if (process.platform === "darwin") {
|
|
995
1101
|
try {
|
|
996
|
-
const { execSync:
|
|
997
|
-
const vmstat =
|
|
1102
|
+
const { execSync: execSync4 } = __require("child_process");
|
|
1103
|
+
const vmstat = execSync4("vm_stat", { encoding: "utf8" });
|
|
998
1104
|
const pageSize = 16384;
|
|
999
1105
|
const pageSizeMatch = vmstat.match(/page size of (\d+) bytes/);
|
|
1000
1106
|
const actualPageSize = pageSizeMatch ? parseInt(pageSizeMatch[1], 10) : pageSize;
|
|
@@ -2550,194 +2656,1661 @@ var init_database = __esm({
|
|
|
2550
2656
|
}
|
|
2551
2657
|
});
|
|
2552
2658
|
|
|
2553
|
-
// src/
|
|
2554
|
-
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2659
|
+
// src/lib/platform-procedures.ts
|
|
2660
|
+
var PLATFORM_PROCEDURES, PLATFORM_PROCEDURE_TITLES;
|
|
2661
|
+
var init_platform_procedures = __esm({
|
|
2662
|
+
"src/lib/platform-procedures.ts"() {
|
|
2663
|
+
"use strict";
|
|
2664
|
+
PLATFORM_PROCEDURES = [
|
|
2665
|
+
// --- Foundation: what is exe-os ---
|
|
2666
|
+
{
|
|
2667
|
+
title: "What is exe-os \u2014 the operating model every agent must understand",
|
|
2668
|
+
domain: "architecture",
|
|
2669
|
+
priority: "p0",
|
|
2670
|
+
content: "Exe OS is an AI employee operating system. A founder runs 5-10 AI agents as a real org: COO, CTO, CMO, engineers, and content production specialists. Each agent has identity, expertise, and experience layers \u2014 persistent memory that makes them better over time. All data is local-first, E2EE, owned by the user. The MCP server is the ONLY data interface \u2014 never access the DB directly."
|
|
2671
|
+
},
|
|
2672
|
+
{
|
|
2673
|
+
title: "Mode 1 \u2014 how exe-os runs inside Claude Code",
|
|
2674
|
+
domain: "architecture",
|
|
2675
|
+
priority: "p0",
|
|
2676
|
+
content: "Mode 1: exe-os runs AS hooks + MCP + skills inside Claude Code, Codex, or OpenCode. The founder picks their default tool at setup. The COO manages employees in tmux sessions. Each coordinator session is a separate window/project. Employees run in their own tmux panes via create_task auto-spawn. The founder talks to the COO; the COO orchestrates the team. The tool is the shell, exe-os is the brain."
|
|
2677
|
+
},
|
|
2678
|
+
{
|
|
2679
|
+
title: "Sessions explained \u2014 coordinator session names and projects",
|
|
2680
|
+
domain: "architecture",
|
|
2681
|
+
priority: "p0",
|
|
2682
|
+
content: "Each coordinator session is an isolated project session. One might be exe-os development, another might be exe-wiki. Each session spawns its own employees using {employee}-{coordinatorSession}. Sessions share the same memory DB but tasks are scoped to the session that created them. A founder can run multiple projects simultaneously. Sessions never interfere with each other."
|
|
2683
|
+
},
|
|
2684
|
+
{
|
|
2685
|
+
title: "Runtime settings \u2014 COO can view and change tools per agent",
|
|
2686
|
+
domain: "workflow",
|
|
2687
|
+
priority: "p1",
|
|
2688
|
+
content: "exe-os supports three tools: Claude Code (Anthropic), Codex (OpenAI), and OpenCode (open source, 75+ providers). Each agent can use a different tool and model. COO uses set_agent_config MCP tool to view or change settings. Call with no args to show all agents. Call with agent_id + runtime + model to change. Users can also run `exe-os settings` from terminal for interactive arrow-key selection."
|
|
2689
|
+
},
|
|
2690
|
+
// --- Hierarchy and dispatch ---
|
|
2691
|
+
{
|
|
2692
|
+
title: "Chain of command \u2014 who talks to whom",
|
|
2693
|
+
domain: "workflow",
|
|
2694
|
+
priority: "p0",
|
|
2695
|
+
content: "Founder -> coordinator (the executive agent, internally routed as 'COO') -> CTO/CMO. CTO -> engineers. CMO -> content production. Never skip levels: the coordinator does not bypass managers for specialist work. Specialists report to their manager. If you need cross-team info, use ask_team_memory \u2014 don't read other agents' task folders. Each level owns dispatch downward and review upward."
|
|
2696
|
+
},
|
|
2697
|
+
{
|
|
2698
|
+
title: "Single dispatch path \u2014 create_task only",
|
|
2699
|
+
domain: "workflow",
|
|
2700
|
+
priority: "p0",
|
|
2701
|
+
content: "create_task is the ONLY way to dispatch work to another agent. No direct ensureEmployee calls, no manual tmux spawns, no send_message for actionable work. create_task \u2192 system auto-spawns \u2192 session correctly named. ONE PATH. No backdoors. No exceptions."
|
|
2702
|
+
},
|
|
2703
|
+
// --- Session isolation ---
|
|
2704
|
+
{
|
|
2705
|
+
title: "Session scoping \u2014 stay in your coordinator boundary",
|
|
2706
|
+
domain: "security",
|
|
2707
|
+
priority: "p0",
|
|
2708
|
+
content: "Session scoping is mandatory. Managers dispatch to workers within their own coordinator session ONLY. Employee sessions use {employee}-{coordinatorSession}. Cross-session dispatch is blocked by the system. Verify session names before dispatch. Tasks are scoped to the creating coordinator session."
|
|
2709
|
+
},
|
|
2710
|
+
{
|
|
2711
|
+
title: "Session isolation \u2014 never touch another session's work",
|
|
2712
|
+
domain: "workflow",
|
|
2713
|
+
priority: "p0",
|
|
2714
|
+
content: "Sessions are isolated. A coordinator session owns ONLY tasks it dispatched. (1) Never close/update/cancel tasks from another coordinator session. (2) Never review work from a different session \u2014 report that it belongs to another session and skip. (3) Ignore other sessions' items in list_tasks results. (4) Employees inherit session: employee sessions work ONLY on their parent coordinator session's tasks. Cross-session work is a system violation."
|
|
2715
|
+
},
|
|
2716
|
+
// --- Engineering: session scoping in code ---
|
|
2717
|
+
{
|
|
2718
|
+
title: "Three-dimensional scoping \u2014 session, project, role \u2014 enforced in every query",
|
|
2719
|
+
domain: "architecture",
|
|
2720
|
+
priority: "p0",
|
|
2721
|
+
content: "Every DB query, notification, review count, and task operation MUST be scoped on 3 dimensions: (1) Session \u2014 filter by session_scope matching the current coordinator session. (2) Project \u2014 filter by project_name. (3) Role \u2014 agents only see data at their hierarchy level. When writing ANY function that touches tasks, reviews, messages, or notifications: always accept a sessionScope parameter and pass it to the SQL WHERE clause. Unscoped queries are bugs. Test by running 2+ coordinator sessions simultaneously."
|
|
2722
|
+
},
|
|
2723
|
+
// --- Hard constraints ---
|
|
2724
|
+
{
|
|
2725
|
+
title: "What you CANNOT do in exe-os \u2014 hard constraints",
|
|
2726
|
+
domain: "security",
|
|
2727
|
+
priority: "p0",
|
|
2728
|
+
content: "NEVER: (1) Access the database directly \u2014 it's SQLCipher encrypted, always fails. Use MCP tools only. (2) Manually spawn tmux sessions \u2014 create_task handles it. (3) Run git checkout main \u2014 agents work in worktrees. (4) Modify another agent's in-progress task. (5) Push to remote \u2014 the COO reviews and pushes. (6) Skip update_task(done) \u2014 it's the ONLY way your work gets reviewed. (7) Run git init."
|
|
2729
|
+
},
|
|
2730
|
+
// --- Operations ---
|
|
2731
|
+
{
|
|
2732
|
+
title: "Managers must supervise deployed workers",
|
|
2733
|
+
domain: "workflow",
|
|
2734
|
+
priority: "p0",
|
|
2735
|
+
content: `Every manager (COO/CTO/CMO) who dispatches work to a worker MUST actively monitor them. Check tmux capture-pane every 10 minutes. Verify they're working, not stuck. If idle at prompt with in_progress task \u2192 send intercom. If stuck \u2192 unblock or escalate. "Standing by" without checking is negligence.`
|
|
2736
|
+
},
|
|
2737
|
+
{
|
|
2738
|
+
title: "COO boot health check \u2014 memory, cloud sync, daemon on every launch",
|
|
2739
|
+
domain: "workflow",
|
|
2740
|
+
priority: "p0",
|
|
2741
|
+
content: "On every /exe boot, COO MUST check system health BEFORE other work: (1) daemon \u2014 is exed PID alive, (2) cloud sync \u2014 grep workers.log for recent cloud-sync errors, (3) memory count \u2014 total in DB, (4) sync delta \u2014 local vs cloud storage_bytes. Report as 4-line status table. If ANY check fails, surface to founder immediately. Do not proceed to tasks until health confirmed."
|
|
2742
|
+
},
|
|
2743
|
+
{
|
|
2744
|
+
title: "exe-build-adv mandatory for 3+ files",
|
|
2745
|
+
domain: "workflow",
|
|
2746
|
+
priority: "p0",
|
|
2747
|
+
content: "exe-build-adv is MANDATORY for ALL work touching 3+ files. Run /exe-build-adv --auto BEFORE implementation. Pipeline: Spec \u2192 AC \u2192 Tests \u2192 Evaluate \u2192 Fix. No multi-file feature ships without pipeline artifacts. No exceptions \u2014 managers reject work without them."
|
|
2748
|
+
},
|
|
2749
|
+
{
|
|
2750
|
+
title: "Desktop and TUI are the same product",
|
|
2751
|
+
domain: "architecture",
|
|
2752
|
+
priority: "p0",
|
|
2753
|
+
content: "Desktop and TUI are the SAME product in different renderers. Same data contracts, same interactions, same acceptance criteria. Desktop tab specs in ARCHITECTURE.md ARE the TUI specs. When building TUI, cross-reference Desktop spec. Different tab names, identical behavior. Never treat them as separate products."
|
|
2754
|
+
},
|
|
2755
|
+
// --- Orchestration golden path ---
|
|
2756
|
+
{
|
|
2757
|
+
title: "Task lifecycle \u2014 the golden path every agent follows",
|
|
2758
|
+
domain: "workflow",
|
|
2759
|
+
priority: "p0",
|
|
2760
|
+
content: "create_task is dispatch + delivery. Task lifecycle: open \u2192 in_progress (you start) \u2192 done (update_task when finished) \u2192 needs_review (reviewer nudged) \u2192 closed (COO only via close_task). DB is the reliable delivery \u2014 intercom is just a speedup nudge. If you finish a task, self-chain: check for next task immediately (step 7). Never wait for a nudge. Never say 'standing by.'"
|
|
2761
|
+
},
|
|
2762
|
+
{
|
|
2763
|
+
title: "Intercom is a speedup, not delivery \u2014 DB is the source of truth",
|
|
2764
|
+
domain: "architecture",
|
|
2765
|
+
priority: "p0",
|
|
2766
|
+
content: "Tasks live in the DB. Intercom (tmux send-keys) is fire-and-forget \u2014 it may fail, get garbled, or arrive mid-work. Never rely on intercom for task delivery. The UserPromptSubmit hook checks the DB for new tasks on every prompt. Your operating procedures step 7 says check for next work. The daemon nudges idle agents as a speedup. If you have no tasks, you found them all."
|
|
2767
|
+
},
|
|
2768
|
+
// --- MCP is the ONLY data interface ---
|
|
2769
|
+
{
|
|
2770
|
+
title: "MCP disconnect \u2014 ask the user, never work around it",
|
|
2771
|
+
domain: "workflow",
|
|
2772
|
+
priority: "p0",
|
|
2773
|
+
content: "If MCP tools are unavailable, disconnected, or returning connection errors: STOP. Tell the user clearly: 'MCP server is disconnected. Please run /mcp to reconnect.' Do NOT attempt workarounds \u2014 no raw Node imports, no direct DB access, no CLI hacks, no daemon socket calls. MCP is the ONLY data interface. Working around it wastes time, hits bundling issues, and bypasses the contract boundary. Ask once, wait, proceed when reconnected."
|
|
2774
|
+
},
|
|
2775
|
+
// --- MCP Tool Catalog (Layer 0 — every agent knows what tools exist) ---
|
|
2776
|
+
{
|
|
2777
|
+
title: "MCP tools \u2014 memory and search",
|
|
2778
|
+
domain: "tool-use",
|
|
2779
|
+
priority: "p1",
|
|
2780
|
+
content: "recall_my_memory: search your own memories (semantic + FTS). ask_team_memory: search a colleague's memories by agent name. store_memory: persist a memory (decisions, summaries, context). commit_memory: high-importance memory that survives consolidation. search_everything: unified search across memories, tasks, entities, conversations. get_session_context: temporal window of memories around a timestamp. consolidate_memories: merge duplicate/related memories into insights. get_memory_cardinality: count memories per agent (health check)."
|
|
2781
|
+
},
|
|
2782
|
+
{
|
|
2783
|
+
title: "MCP tools \u2014 task orchestration",
|
|
2784
|
+
domain: "tool-use",
|
|
2785
|
+
priority: "p1",
|
|
2786
|
+
content: "create_task: dispatch work to an employee (auto-spawns session). The ONLY dispatch path. list_tasks: query tasks by status, assignee, project. get_task: fetch full task details by ID. update_task: change status (in_progress, done, blocked, cancelled) + add result summary. close_task: finalize a reviewed task (COO only). checkpoint_task: save progress state for crash recovery. resume_employee: re-spawn an employee session for an existing task."
|
|
2787
|
+
},
|
|
2788
|
+
{
|
|
2789
|
+
title: "MCP tools \u2014 knowledge graph (GraphRAG)",
|
|
2790
|
+
domain: "tool-use",
|
|
2791
|
+
priority: "p1",
|
|
2792
|
+
content: "query_relationships: find connections between entities in the knowledge graph. get_entity_neighbors: explore an entity's direct connections. get_hot_entities: find most-referenced entities (trending topics). get_graph_stats: graph health \u2014 entity/relationship counts, density. export_graph: export graph data for visualization. merge_entities: deduplicate entities (alias resolution). find_similar_trajectories: match tool-call patterns to past task solutions."
|
|
2793
|
+
},
|
|
2794
|
+
{
|
|
2795
|
+
title: "MCP tools \u2014 identity, behavior, and decisions",
|
|
2796
|
+
domain: "tool-use",
|
|
2797
|
+
priority: "p1",
|
|
2798
|
+
content: "get_identity: read an agent's exe.md (Layer 1 identity). update_identity: write an agent's exe.md. Identity > behavior \u2014 use for permanent rules. store_behavior: record a correction or pattern for an agent (Layer 2 expertise). list_behaviors: view an agent's active behaviors. deactivate_behavior: soft-delete a stale or conflicting behavior. store_decision: record an ADR (architectural decision record). get_decision: retrieve a past decision by query."
|
|
2799
|
+
},
|
|
2800
|
+
{
|
|
2801
|
+
title: "MCP tools \u2014 communication and messaging",
|
|
2802
|
+
domain: "tool-use",
|
|
2803
|
+
priority: "p1",
|
|
2804
|
+
content: "send_message: send supplementary context to another agent (NOT for actionable work \u2014 use create_task). acknowledge_messages: mark messages as read. send_whatsapp: send WhatsApp message via gateway (customer-facing alerts). query_conversations: search ingested conversations across all channels (WhatsApp, email, etc.)."
|
|
2805
|
+
},
|
|
2806
|
+
{
|
|
2807
|
+
title: "MCP tools \u2014 wiki, documents, and content",
|
|
2808
|
+
domain: "tool-use",
|
|
2809
|
+
priority: "p1",
|
|
2810
|
+
content: "wiki: read/list wiki pages only. Direct wiki write tools are removed; wiki updates flow through raw-data ingestion/projection into the curated wiki store. Legacy aliases: list_wiki_pages/get_wiki_page. crm: read/list/get CRM records from exe-db. raw_data: read capped raw landing-pad events from exe-db with payload opt-in. ingest_document: import a file (PDF, MD, etc.) into memory as chunks. list_documents: browse ingested documents by workspace. purge_document: remove a document and its memory chunks. set_document_importance: adjust chunk importance scores. rerank_documents: re-score document relevance for a query."
|
|
2811
|
+
},
|
|
2812
|
+
{
|
|
2813
|
+
title: "MCP tools \u2014 system, operations, and admin",
|
|
2814
|
+
domain: "tool-use",
|
|
2815
|
+
priority: "p1",
|
|
2816
|
+
content: "get_agent_spend: token usage per agent/session (cost tracking). list_agent_sessions: view agent session history. get_session_kills: audit log of killed sessions. get_daemon_health: check exed daemon status. get_auto_wake_status: daemon auto-wake configuration. get_worker_gate: check worker deployment gates. run_memory_audit: health check \u2014 duplicates, null vectors, orphaned rows. run_consolidation: trigger sleep-time memory consolidation. cloud_sync: force a cloud sync cycle. backup_vps: trigger VPS backup."
|
|
2817
|
+
},
|
|
2818
|
+
{
|
|
2819
|
+
title: "MCP tools \u2014 config, licensing, and team",
|
|
2820
|
+
domain: "tool-use",
|
|
2821
|
+
priority: "p1",
|
|
2822
|
+
content: "set_agent_config: view/change per-agent runtime and model settings. list_employees: view the employee roster. add_person: add a person to the CRM contacts roster. list_people: browse CRM contacts. get_person: fetch contact details. get_license_status: check license validity. create_license: generate a new license key (admin). list_licenses: view all issued licenses. activate_license: activate a license on a device."
|
|
2823
|
+
},
|
|
2824
|
+
{
|
|
2825
|
+
title: "MCP tools \u2014 advanced (triggers, skills, orchestration)",
|
|
2826
|
+
domain: "tool-use",
|
|
2827
|
+
priority: "p1",
|
|
2828
|
+
content: "create_trigger: set up a scheduled recurring agent job (cron). list_triggers: view active triggers. load_skill: load a slash-command skill dynamically. apply_starter_pack: import a pre-built behavior + identity pack for a role. export_orchestration: export full org state (tasks, behaviors, identities) as portable JSON. import_orchestration: import org state into a new instance. deploy_client: deploy a customer client instance. query_company_brain: unified RAG query across all company knowledge. create_reminder: set a text reminder (shown in boot brief). list_reminders: view pending reminders. complete_reminder: mark a reminder done. global_procedure: manage customer-owned company procedures (Layer 0; actions: store, list, deactivate). Legacy aliases: store_global_procedure, list_global_procedures, deactivate_global_procedure."
|
|
2829
|
+
}
|
|
2830
|
+
];
|
|
2831
|
+
PLATFORM_PROCEDURE_TITLES = new Set(
|
|
2832
|
+
PLATFORM_PROCEDURES.map((p) => p.title)
|
|
2833
|
+
);
|
|
2834
|
+
}
|
|
2835
|
+
});
|
|
2559
2836
|
|
|
2560
2837
|
// src/lib/global-procedures.ts
|
|
2561
|
-
|
|
2838
|
+
var global_procedures_exports = {};
|
|
2839
|
+
__export(global_procedures_exports, {
|
|
2840
|
+
deactivateGlobalProcedure: () => deactivateGlobalProcedure,
|
|
2841
|
+
getGlobalProceduresBlock: () => getGlobalProceduresBlock,
|
|
2842
|
+
loadGlobalProcedures: () => loadGlobalProcedures,
|
|
2843
|
+
storeGlobalProcedure: () => storeGlobalProcedure
|
|
2844
|
+
});
|
|
2562
2845
|
import { randomUUID as randomUUID2 } from "crypto";
|
|
2846
|
+
async function loadGlobalProcedures() {
|
|
2847
|
+
const client = getClient();
|
|
2848
|
+
const result = await client.execute({
|
|
2849
|
+
sql: "SELECT * FROM global_procedures WHERE active = 1 ORDER BY priority ASC, created_at ASC",
|
|
2850
|
+
args: []
|
|
2851
|
+
});
|
|
2852
|
+
const allRows = result.rows;
|
|
2853
|
+
const customerOnly = allRows.filter((p) => !PLATFORM_PROCEDURE_TITLES.has(p.title));
|
|
2854
|
+
if (customerOnly.length > 0) {
|
|
2855
|
+
_customerCache = customerOnly.map((p) => `### ${p.title}
|
|
2856
|
+
${p.content}`).join("\n\n");
|
|
2857
|
+
} else {
|
|
2858
|
+
_customerCache = "";
|
|
2859
|
+
}
|
|
2860
|
+
_cacheLoaded = true;
|
|
2861
|
+
return customerOnly;
|
|
2862
|
+
}
|
|
2863
|
+
function getGlobalProceduresBlock() {
|
|
2864
|
+
const sections = [];
|
|
2865
|
+
if (_platformCache) sections.push(_platformCache);
|
|
2866
|
+
if (_cacheLoaded && _customerCache) sections.push(_customerCache);
|
|
2867
|
+
if (sections.length === 0) return "";
|
|
2868
|
+
return `## Organization-Wide Procedures (MANDATORY \u2014 supersedes all other rules)
|
|
2563
2869
|
|
|
2564
|
-
|
|
2565
|
-
|
|
2566
|
-
|
|
2567
|
-
|
|
2568
|
-
|
|
2569
|
-
|
|
2570
|
-
|
|
2571
|
-
|
|
2572
|
-
|
|
2573
|
-
|
|
2574
|
-
|
|
2575
|
-
|
|
2576
|
-
|
|
2577
|
-
|
|
2578
|
-
|
|
2579
|
-
|
|
2580
|
-
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
|
|
2589
|
-
|
|
2590
|
-
|
|
2591
|
-
|
|
2592
|
-
|
|
2593
|
-
|
|
2594
|
-
|
|
2595
|
-
|
|
2596
|
-
|
|
2597
|
-
|
|
2598
|
-
{
|
|
2599
|
-
title: "Single dispatch path \u2014 create_task only",
|
|
2600
|
-
domain: "workflow",
|
|
2601
|
-
priority: "p0",
|
|
2602
|
-
content: "create_task is the ONLY way to dispatch work to another agent. No direct ensureEmployee calls, no manual tmux spawns, no send_message for actionable work. create_task \u2192 system auto-spawns \u2192 session correctly named. ONE PATH. No backdoors. No exceptions."
|
|
2603
|
-
},
|
|
2604
|
-
// --- Session isolation ---
|
|
2605
|
-
{
|
|
2606
|
-
title: "Session scoping \u2014 stay in your coordinator boundary",
|
|
2607
|
-
domain: "security",
|
|
2608
|
-
priority: "p0",
|
|
2609
|
-
content: "Session scoping is mandatory. Managers dispatch to workers within their own coordinator session ONLY. Employee sessions use {employee}-{coordinatorSession}. Cross-session dispatch is blocked by the system. Verify session names before dispatch. Tasks are scoped to the creating coordinator session."
|
|
2610
|
-
},
|
|
2611
|
-
{
|
|
2612
|
-
title: "Session isolation \u2014 never touch another session's work",
|
|
2613
|
-
domain: "workflow",
|
|
2614
|
-
priority: "p0",
|
|
2615
|
-
content: "Sessions are isolated. A coordinator session owns ONLY tasks it dispatched. (1) Never close/update/cancel tasks from another coordinator session. (2) Never review work from a different session \u2014 report that it belongs to another session and skip. (3) Ignore other sessions' items in list_tasks results. (4) Employees inherit session: employee sessions work ONLY on their parent coordinator session's tasks. Cross-session work is a system violation."
|
|
2616
|
-
},
|
|
2617
|
-
// --- Engineering: session scoping in code ---
|
|
2618
|
-
{
|
|
2619
|
-
title: "Three-dimensional scoping \u2014 session, project, role \u2014 enforced in every query",
|
|
2620
|
-
domain: "architecture",
|
|
2621
|
-
priority: "p0",
|
|
2622
|
-
content: "Every DB query, notification, review count, and task operation MUST be scoped on 3 dimensions: (1) Session \u2014 filter by session_scope matching the current coordinator session. (2) Project \u2014 filter by project_name. (3) Role \u2014 agents only see data at their hierarchy level. When writing ANY function that touches tasks, reviews, messages, or notifications: always accept a sessionScope parameter and pass it to the SQL WHERE clause. Unscoped queries are bugs. Test by running 2+ coordinator sessions simultaneously."
|
|
2623
|
-
},
|
|
2624
|
-
// --- Hard constraints ---
|
|
2625
|
-
{
|
|
2626
|
-
title: "What you CANNOT do in exe-os \u2014 hard constraints",
|
|
2627
|
-
domain: "security",
|
|
2628
|
-
priority: "p0",
|
|
2629
|
-
content: "NEVER: (1) Access the database directly \u2014 it's SQLCipher encrypted, always fails. Use MCP tools only. (2) Manually spawn tmux sessions \u2014 create_task handles it. (3) Run git checkout main \u2014 agents work in worktrees. (4) Modify another agent's in-progress task. (5) Push to remote \u2014 the COO reviews and pushes. (6) Skip update_task(done) \u2014 it's the ONLY way your work gets reviewed. (7) Run git init."
|
|
2630
|
-
},
|
|
2631
|
-
// --- Operations ---
|
|
2632
|
-
{
|
|
2633
|
-
title: "Managers must supervise deployed workers",
|
|
2634
|
-
domain: "workflow",
|
|
2635
|
-
priority: "p0",
|
|
2636
|
-
content: `Every manager (COO/CTO/CMO) who dispatches work to a worker MUST actively monitor them. Check tmux capture-pane every 10 minutes. Verify they're working, not stuck. If idle at prompt with in_progress task \u2192 send intercom. If stuck \u2192 unblock or escalate. "Standing by" without checking is negligence.`
|
|
2637
|
-
},
|
|
2638
|
-
{
|
|
2639
|
-
title: "COO boot health check \u2014 memory, cloud sync, daemon on every launch",
|
|
2640
|
-
domain: "workflow",
|
|
2641
|
-
priority: "p0",
|
|
2642
|
-
content: "On every /exe boot, COO MUST check system health BEFORE other work: (1) daemon \u2014 is exed PID alive, (2) cloud sync \u2014 grep workers.log for recent cloud-sync errors, (3) memory count \u2014 total in DB, (4) sync delta \u2014 local vs cloud storage_bytes. Report as 4-line status table. If ANY check fails, surface to founder immediately. Do not proceed to tasks until health confirmed."
|
|
2643
|
-
},
|
|
2644
|
-
{
|
|
2645
|
-
title: "exe-build-adv mandatory for 3+ files",
|
|
2646
|
-
domain: "workflow",
|
|
2647
|
-
priority: "p0",
|
|
2648
|
-
content: "exe-build-adv is MANDATORY for ALL work touching 3+ files. Run /exe-build-adv --auto BEFORE implementation. Pipeline: Spec \u2192 AC \u2192 Tests \u2192 Evaluate \u2192 Fix. No multi-file feature ships without pipeline artifacts. No exceptions \u2014 managers reject work without them."
|
|
2649
|
-
},
|
|
2650
|
-
{
|
|
2651
|
-
title: "Desktop and TUI are the same product",
|
|
2652
|
-
domain: "architecture",
|
|
2653
|
-
priority: "p0",
|
|
2654
|
-
content: "Desktop and TUI are the SAME product in different renderers. Same data contracts, same interactions, same acceptance criteria. Desktop tab specs in ARCHITECTURE.md ARE the TUI specs. When building TUI, cross-reference Desktop spec. Different tab names, identical behavior. Never treat them as separate products."
|
|
2655
|
-
},
|
|
2656
|
-
// --- Orchestration golden path ---
|
|
2657
|
-
{
|
|
2658
|
-
title: "Task lifecycle \u2014 the golden path every agent follows",
|
|
2659
|
-
domain: "workflow",
|
|
2660
|
-
priority: "p0",
|
|
2661
|
-
content: "create_task is dispatch + delivery. Task lifecycle: open \u2192 in_progress (you start) \u2192 done (update_task when finished) \u2192 needs_review (reviewer nudged) \u2192 closed (COO only via close_task). DB is the reliable delivery \u2014 intercom is just a speedup nudge. If you finish a task, self-chain: check for next task immediately (step 7). Never wait for a nudge. Never say 'standing by.'"
|
|
2662
|
-
},
|
|
2663
|
-
{
|
|
2664
|
-
title: "Intercom is a speedup, not delivery \u2014 DB is the source of truth",
|
|
2665
|
-
domain: "architecture",
|
|
2666
|
-
priority: "p0",
|
|
2667
|
-
content: "Tasks live in the DB. Intercom (tmux send-keys) is fire-and-forget \u2014 it may fail, get garbled, or arrive mid-work. Never rely on intercom for task delivery. The UserPromptSubmit hook checks the DB for new tasks on every prompt. Your operating procedures step 7 says check for next work. The daemon nudges idle agents as a speedup. If you have no tasks, you found them all."
|
|
2668
|
-
},
|
|
2669
|
-
// --- MCP is the ONLY data interface ---
|
|
2670
|
-
{
|
|
2671
|
-
title: "MCP disconnect \u2014 ask the user, never work around it",
|
|
2672
|
-
domain: "workflow",
|
|
2673
|
-
priority: "p0",
|
|
2674
|
-
content: "If MCP tools are unavailable, disconnected, or returning connection errors: STOP. Tell the user clearly: 'MCP server is disconnected. Please run /mcp to reconnect.' Do NOT attempt workarounds \u2014 no raw Node imports, no direct DB access, no CLI hacks, no daemon socket calls. MCP is the ONLY data interface. Working around it wastes time, hits bundling issues, and bypasses the contract boundary. Ask once, wait, proceed when reconnected."
|
|
2675
|
-
},
|
|
2676
|
-
// --- MCP Tool Catalog (Layer 0 — every agent knows what tools exist) ---
|
|
2677
|
-
{
|
|
2678
|
-
title: "MCP tools \u2014 memory and search",
|
|
2679
|
-
domain: "tool-use",
|
|
2680
|
-
priority: "p1",
|
|
2681
|
-
content: "recall_my_memory: search your own memories (semantic + FTS). ask_team_memory: search a colleague's memories by agent name. store_memory: persist a memory (decisions, summaries, context). commit_memory: high-importance memory that survives consolidation. search_everything: unified search across memories, tasks, entities, conversations. get_session_context: temporal window of memories around a timestamp. consolidate_memories: merge duplicate/related memories into insights. get_memory_cardinality: count memories per agent (health check)."
|
|
2682
|
-
},
|
|
2683
|
-
{
|
|
2684
|
-
title: "MCP tools \u2014 task orchestration",
|
|
2685
|
-
domain: "tool-use",
|
|
2686
|
-
priority: "p1",
|
|
2687
|
-
content: "create_task: dispatch work to an employee (auto-spawns session). The ONLY dispatch path. list_tasks: query tasks by status, assignee, project. get_task: fetch full task details by ID. update_task: change status (in_progress, done, blocked, cancelled) + add result summary. close_task: finalize a reviewed task (COO only). checkpoint_task: save progress state for crash recovery. resume_employee: re-spawn an employee session for an existing task."
|
|
2688
|
-
},
|
|
2689
|
-
{
|
|
2690
|
-
title: "MCP tools \u2014 knowledge graph (GraphRAG)",
|
|
2691
|
-
domain: "tool-use",
|
|
2692
|
-
priority: "p1",
|
|
2693
|
-
content: "query_relationships: find connections between entities in the knowledge graph. get_entity_neighbors: explore an entity's direct connections. get_hot_entities: find most-referenced entities (trending topics). get_graph_stats: graph health \u2014 entity/relationship counts, density. export_graph: export graph data for visualization. merge_entities: deduplicate entities (alias resolution). find_similar_trajectories: match tool-call patterns to past task solutions."
|
|
2694
|
-
},
|
|
2695
|
-
{
|
|
2696
|
-
title: "MCP tools \u2014 identity, behavior, and decisions",
|
|
2697
|
-
domain: "tool-use",
|
|
2698
|
-
priority: "p1",
|
|
2699
|
-
content: "get_identity: read an agent's exe.md (Layer 1 identity). update_identity: write an agent's exe.md. Identity > behavior \u2014 use for permanent rules. store_behavior: record a correction or pattern for an agent (Layer 2 expertise). list_behaviors: view an agent's active behaviors. deactivate_behavior: soft-delete a stale or conflicting behavior. store_decision: record an ADR (architectural decision record). get_decision: retrieve a past decision by query."
|
|
2700
|
-
},
|
|
2701
|
-
{
|
|
2702
|
-
title: "MCP tools \u2014 communication and messaging",
|
|
2703
|
-
domain: "tool-use",
|
|
2704
|
-
priority: "p1",
|
|
2705
|
-
content: "send_message: send supplementary context to another agent (NOT for actionable work \u2014 use create_task). acknowledge_messages: mark messages as read. send_whatsapp: send WhatsApp message via gateway (customer-facing alerts). query_conversations: search ingested conversations across all channels (WhatsApp, email, etc.)."
|
|
2706
|
-
},
|
|
2707
|
-
{
|
|
2708
|
-
title: "MCP tools \u2014 wiki, documents, and content",
|
|
2709
|
-
domain: "tool-use",
|
|
2710
|
-
priority: "p1",
|
|
2711
|
-
content: "create_wiki_page: create a wiki page in exe-wiki. list_wiki_pages: browse wiki pages. get_wiki_page: read a wiki page. update_wiki_page: edit a wiki page. ingest_document: import a file (PDF, MD, etc.) into memory as chunks. list_documents: browse ingested documents by workspace. purge_document: remove a document and its memory chunks. set_document_importance: adjust chunk importance scores. rerank_documents: re-score document relevance for a query."
|
|
2712
|
-
},
|
|
2713
|
-
{
|
|
2714
|
-
title: "MCP tools \u2014 system, operations, and admin",
|
|
2715
|
-
domain: "tool-use",
|
|
2716
|
-
priority: "p1",
|
|
2717
|
-
content: "get_agent_spend: token usage per agent/session (cost tracking). list_agent_sessions: view agent session history. get_session_kills: audit log of killed sessions. get_daemon_health: check exed daemon status. get_auto_wake_status: daemon auto-wake configuration. get_worker_gate: check worker deployment gates. run_memory_audit: health check \u2014 duplicates, null vectors, orphaned rows. run_consolidation: trigger sleep-time memory consolidation. cloud_sync: force a cloud sync cycle. backup_vps: trigger VPS backup."
|
|
2718
|
-
},
|
|
2719
|
-
{
|
|
2720
|
-
title: "MCP tools \u2014 config, licensing, and team",
|
|
2721
|
-
domain: "tool-use",
|
|
2722
|
-
priority: "p1",
|
|
2723
|
-
content: "set_agent_config: view/change per-agent runtime and model settings. list_employees: view the employee roster. add_person: add a person to the CRM contacts roster. list_people: browse CRM contacts. get_person: fetch contact details. get_license_status: check license validity. create_license: generate a new license key (admin). list_licenses: view all issued licenses. activate_license: activate a license on a device."
|
|
2724
|
-
},
|
|
2725
|
-
{
|
|
2726
|
-
title: "MCP tools \u2014 advanced (triggers, skills, orchestration)",
|
|
2727
|
-
domain: "tool-use",
|
|
2728
|
-
priority: "p1",
|
|
2729
|
-
content: "create_trigger: set up a scheduled recurring agent job (cron). list_triggers: view active triggers. load_skill: load a slash-command skill dynamically. apply_starter_pack: import a pre-built behavior + identity pack for a role. export_orchestration: export full org state (tasks, behaviors, identities) as portable JSON. import_orchestration: import org state into a new instance. deploy_client: deploy a customer client instance. query_company_brain: unified RAG query across all company knowledge. create_reminder: set a text reminder (shown in boot brief). list_reminders: view pending reminders. complete_reminder: mark a reminder done. global_procedure: manage Layer 0 procedures (actions: store, list, deactivate). Legacy aliases: store_global_procedure, list_global_procedures, deactivate_global_procedure."
|
|
2730
|
-
}
|
|
2731
|
-
];
|
|
2732
|
-
var PLATFORM_PROCEDURE_TITLES = new Set(
|
|
2733
|
-
PLATFORM_PROCEDURES.map((p) => p.title)
|
|
2734
|
-
);
|
|
2735
|
-
|
|
2736
|
-
// src/lib/global-procedures.ts
|
|
2737
|
-
var _platformCache = PLATFORM_PROCEDURES.map((p) => `### ${p.title}
|
|
2870
|
+
${sections.join("\n\n")}
|
|
2871
|
+
`;
|
|
2872
|
+
}
|
|
2873
|
+
async function storeGlobalProcedure(input) {
|
|
2874
|
+
const id = randomUUID2();
|
|
2875
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2876
|
+
const client = getClient();
|
|
2877
|
+
await client.execute({
|
|
2878
|
+
sql: `INSERT INTO global_procedures (id, title, content, priority, domain, active, created_at, updated_at)
|
|
2879
|
+
VALUES (?, ?, ?, ?, ?, 1, ?, ?)`,
|
|
2880
|
+
args: [id, input.title, input.content, input.priority ?? "p0", input.domain ?? null, now, now]
|
|
2881
|
+
});
|
|
2882
|
+
await loadGlobalProcedures();
|
|
2883
|
+
return id;
|
|
2884
|
+
}
|
|
2885
|
+
async function deactivateGlobalProcedure(id) {
|
|
2886
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2887
|
+
const client = getClient();
|
|
2888
|
+
const result = await client.execute({
|
|
2889
|
+
sql: "UPDATE global_procedures SET active = 0, updated_at = ? WHERE id = ?",
|
|
2890
|
+
args: [now, id]
|
|
2891
|
+
});
|
|
2892
|
+
await loadGlobalProcedures();
|
|
2893
|
+
return result.rowsAffected > 0;
|
|
2894
|
+
}
|
|
2895
|
+
var _customerCache, _cacheLoaded, _platformCache;
|
|
2896
|
+
var init_global_procedures = __esm({
|
|
2897
|
+
"src/lib/global-procedures.ts"() {
|
|
2898
|
+
"use strict";
|
|
2899
|
+
init_database();
|
|
2900
|
+
init_platform_procedures();
|
|
2901
|
+
_customerCache = "";
|
|
2902
|
+
_cacheLoaded = false;
|
|
2903
|
+
_platformCache = PLATFORM_PROCEDURES.map((p) => `### ${p.title}
|
|
2738
2904
|
${p.content}`).join("\n\n");
|
|
2905
|
+
}
|
|
2906
|
+
});
|
|
2907
|
+
|
|
2908
|
+
// src/lib/keychain.ts
|
|
2909
|
+
import { readFile as readFile3, writeFile as writeFile3, unlink, mkdir as mkdir3, chmod as chmod2 } from "fs/promises";
|
|
2910
|
+
import { existsSync as existsSync6 } from "fs";
|
|
2911
|
+
import { execSync as execSync2 } from "child_process";
|
|
2912
|
+
import path6 from "path";
|
|
2913
|
+
import os5 from "os";
|
|
2914
|
+
function getKeyDir() {
|
|
2915
|
+
return process.env.EXE_OS_DIR ?? process.env.EXE_MEM_DIR ?? path6.join(os5.homedir(), ".exe-os");
|
|
2916
|
+
}
|
|
2917
|
+
function getKeyPath() {
|
|
2918
|
+
return path6.join(getKeyDir(), "master.key");
|
|
2919
|
+
}
|
|
2920
|
+
function macKeychainGet() {
|
|
2921
|
+
if (process.platform !== "darwin") return null;
|
|
2922
|
+
try {
|
|
2923
|
+
return execSync2(
|
|
2924
|
+
`security find-generic-password -s "${SERVICE}" -a "${ACCOUNT}" -w 2>/dev/null`,
|
|
2925
|
+
{ encoding: "utf-8", timeout: 5e3 }
|
|
2926
|
+
).trim();
|
|
2927
|
+
} catch {
|
|
2928
|
+
return null;
|
|
2929
|
+
}
|
|
2930
|
+
}
|
|
2931
|
+
function macKeychainSet(value) {
|
|
2932
|
+
if (process.platform !== "darwin") return false;
|
|
2933
|
+
try {
|
|
2934
|
+
try {
|
|
2935
|
+
execSync2(
|
|
2936
|
+
`security delete-generic-password -s "${SERVICE}" -a "${ACCOUNT}" 2>/dev/null`,
|
|
2937
|
+
{ timeout: 5e3 }
|
|
2938
|
+
);
|
|
2939
|
+
} catch {
|
|
2940
|
+
}
|
|
2941
|
+
execSync2(
|
|
2942
|
+
`security add-generic-password -s "${SERVICE}" -a "${ACCOUNT}" -w "${value}"`,
|
|
2943
|
+
{ timeout: 5e3 }
|
|
2944
|
+
);
|
|
2945
|
+
return true;
|
|
2946
|
+
} catch {
|
|
2947
|
+
return false;
|
|
2948
|
+
}
|
|
2949
|
+
}
|
|
2950
|
+
function linuxSecretGet() {
|
|
2951
|
+
if (process.platform !== "linux") return null;
|
|
2952
|
+
try {
|
|
2953
|
+
return execSync2(
|
|
2954
|
+
`secret-tool lookup service "${SERVICE}" account "${ACCOUNT}" 2>/dev/null`,
|
|
2955
|
+
{ encoding: "utf-8", timeout: 5e3 }
|
|
2956
|
+
).trim();
|
|
2957
|
+
} catch {
|
|
2958
|
+
return null;
|
|
2959
|
+
}
|
|
2960
|
+
}
|
|
2961
|
+
function linuxSecretSet(value) {
|
|
2962
|
+
if (process.platform !== "linux") return false;
|
|
2963
|
+
try {
|
|
2964
|
+
execSync2(
|
|
2965
|
+
`echo -n "${value}" | secret-tool store --label="exe-os master key" service "${SERVICE}" account "${ACCOUNT}"`,
|
|
2966
|
+
{ timeout: 5e3 }
|
|
2967
|
+
);
|
|
2968
|
+
return true;
|
|
2969
|
+
} catch {
|
|
2970
|
+
return false;
|
|
2971
|
+
}
|
|
2972
|
+
}
|
|
2973
|
+
async function tryKeytar() {
|
|
2974
|
+
try {
|
|
2975
|
+
return await import("keytar");
|
|
2976
|
+
} catch {
|
|
2977
|
+
return null;
|
|
2978
|
+
}
|
|
2979
|
+
}
|
|
2980
|
+
function deriveMachineKey() {
|
|
2981
|
+
try {
|
|
2982
|
+
const crypto2 = __require("crypto");
|
|
2983
|
+
const material = [
|
|
2984
|
+
os5.hostname(),
|
|
2985
|
+
os5.userInfo().username,
|
|
2986
|
+
os5.arch(),
|
|
2987
|
+
os5.platform(),
|
|
2988
|
+
// Machine ID on Linux (stable across reboots)
|
|
2989
|
+
process.platform === "linux" ? readMachineId() : ""
|
|
2990
|
+
].join("|");
|
|
2991
|
+
return crypto2.createHash("sha256").update(material).digest();
|
|
2992
|
+
} catch {
|
|
2993
|
+
return null;
|
|
2994
|
+
}
|
|
2995
|
+
}
|
|
2996
|
+
function readMachineId() {
|
|
2997
|
+
try {
|
|
2998
|
+
const { readFileSync: readFileSync6 } = __require("fs");
|
|
2999
|
+
return readFileSync6("/etc/machine-id", "utf-8").trim();
|
|
3000
|
+
} catch {
|
|
3001
|
+
return "";
|
|
3002
|
+
}
|
|
3003
|
+
}
|
|
3004
|
+
function decryptWithMachineKey(encrypted, machineKey) {
|
|
3005
|
+
if (!encrypted.startsWith(ENCRYPTED_PREFIX)) return null;
|
|
3006
|
+
try {
|
|
3007
|
+
const crypto2 = __require("crypto");
|
|
3008
|
+
const parts = encrypted.slice(ENCRYPTED_PREFIX.length).split(":");
|
|
3009
|
+
if (parts.length !== 3) return null;
|
|
3010
|
+
const [ivB64, tagB64, cipherB64] = parts;
|
|
3011
|
+
const iv = Buffer.from(ivB64, "base64");
|
|
3012
|
+
const authTag = Buffer.from(tagB64, "base64");
|
|
3013
|
+
const decipher = crypto2.createDecipheriv("aes-256-gcm", machineKey, iv);
|
|
3014
|
+
decipher.setAuthTag(authTag);
|
|
3015
|
+
let decrypted = decipher.update(cipherB64, "base64", "utf-8");
|
|
3016
|
+
decrypted += decipher.final("utf-8");
|
|
3017
|
+
return decrypted;
|
|
3018
|
+
} catch {
|
|
3019
|
+
return null;
|
|
3020
|
+
}
|
|
3021
|
+
}
|
|
3022
|
+
async function getMasterKey() {
|
|
3023
|
+
const nativeValue = macKeychainGet() ?? linuxSecretGet();
|
|
3024
|
+
if (nativeValue) {
|
|
3025
|
+
return Buffer.from(nativeValue, "base64");
|
|
3026
|
+
}
|
|
3027
|
+
const keytar = await tryKeytar();
|
|
3028
|
+
if (keytar) {
|
|
3029
|
+
try {
|
|
3030
|
+
const keytarValue = await keytar.getPassword(SERVICE, ACCOUNT);
|
|
3031
|
+
if (keytarValue) {
|
|
3032
|
+
const migrated = macKeychainSet(keytarValue) || linuxSecretSet(keytarValue);
|
|
3033
|
+
if (migrated) {
|
|
3034
|
+
process.stderr.write("[keychain] Migrated key from keytar to native keychain.\n");
|
|
3035
|
+
}
|
|
3036
|
+
return Buffer.from(keytarValue, "base64");
|
|
3037
|
+
}
|
|
3038
|
+
} catch {
|
|
3039
|
+
}
|
|
3040
|
+
}
|
|
3041
|
+
const keyPath = getKeyPath();
|
|
3042
|
+
if (!existsSync6(keyPath)) {
|
|
3043
|
+
process.stderr.write(
|
|
3044
|
+
`[keychain] Key not found at ${keyPath} (HOME=${os5.homedir()}, EXE_OS_DIR=${process.env.EXE_OS_DIR ?? "unset"})
|
|
3045
|
+
`
|
|
3046
|
+
);
|
|
3047
|
+
return null;
|
|
3048
|
+
}
|
|
3049
|
+
try {
|
|
3050
|
+
const content = (await readFile3(keyPath, "utf-8")).trim();
|
|
3051
|
+
let b64Value;
|
|
3052
|
+
if (content.startsWith(ENCRYPTED_PREFIX)) {
|
|
3053
|
+
const machineKey = deriveMachineKey();
|
|
3054
|
+
if (!machineKey) {
|
|
3055
|
+
process.stderr.write("[keychain] Cannot derive machine key to decrypt stored key.\n");
|
|
3056
|
+
return null;
|
|
3057
|
+
}
|
|
3058
|
+
const decrypted = decryptWithMachineKey(content, machineKey);
|
|
3059
|
+
if (!decrypted) {
|
|
3060
|
+
process.stderr.write(
|
|
3061
|
+
"[keychain] Key decryption failed \u2014 machine may have changed.\n Use your 24-word recovery phrase: exe-os link import\n"
|
|
3062
|
+
);
|
|
3063
|
+
return null;
|
|
3064
|
+
}
|
|
3065
|
+
b64Value = decrypted;
|
|
3066
|
+
} else {
|
|
3067
|
+
b64Value = content;
|
|
3068
|
+
}
|
|
3069
|
+
const key = Buffer.from(b64Value, "base64");
|
|
3070
|
+
const migrated = macKeychainSet(b64Value) || linuxSecretSet(b64Value);
|
|
3071
|
+
if (migrated) {
|
|
3072
|
+
process.stderr.write("[keychain] Migrated key from file to native keychain.\n");
|
|
3073
|
+
}
|
|
3074
|
+
return key;
|
|
3075
|
+
} catch (err) {
|
|
3076
|
+
process.stderr.write(
|
|
3077
|
+
`[keychain] Key read failed at ${keyPath}: ${err instanceof Error ? err.message : String(err)}
|
|
3078
|
+
`
|
|
3079
|
+
);
|
|
3080
|
+
return null;
|
|
3081
|
+
}
|
|
3082
|
+
}
|
|
3083
|
+
var SERVICE, ACCOUNT, ENCRYPTED_PREFIX;
|
|
3084
|
+
var init_keychain = __esm({
|
|
3085
|
+
"src/lib/keychain.ts"() {
|
|
3086
|
+
"use strict";
|
|
3087
|
+
SERVICE = "exe-mem";
|
|
3088
|
+
ACCOUNT = "master-key";
|
|
3089
|
+
ENCRYPTED_PREFIX = "enc:";
|
|
3090
|
+
}
|
|
3091
|
+
});
|
|
3092
|
+
|
|
3093
|
+
// src/lib/state-bus.ts
|
|
3094
|
+
var StateBus, orgBus;
|
|
3095
|
+
var init_state_bus = __esm({
|
|
3096
|
+
"src/lib/state-bus.ts"() {
|
|
3097
|
+
"use strict";
|
|
3098
|
+
StateBus = class {
|
|
3099
|
+
handlers = /* @__PURE__ */ new Map();
|
|
3100
|
+
globalHandlers = /* @__PURE__ */ new Set();
|
|
3101
|
+
/** Emit an event to all subscribers */
|
|
3102
|
+
emit(event) {
|
|
3103
|
+
const typeHandlers = this.handlers.get(event.type);
|
|
3104
|
+
if (typeHandlers) {
|
|
3105
|
+
for (const handler of typeHandlers) {
|
|
3106
|
+
try {
|
|
3107
|
+
handler(event);
|
|
3108
|
+
} catch {
|
|
3109
|
+
}
|
|
3110
|
+
}
|
|
3111
|
+
}
|
|
3112
|
+
for (const handler of this.globalHandlers) {
|
|
3113
|
+
try {
|
|
3114
|
+
handler(event);
|
|
3115
|
+
} catch {
|
|
3116
|
+
}
|
|
3117
|
+
}
|
|
3118
|
+
}
|
|
3119
|
+
/** Subscribe to a specific event type */
|
|
3120
|
+
on(type, handler) {
|
|
3121
|
+
if (!this.handlers.has(type)) {
|
|
3122
|
+
this.handlers.set(type, /* @__PURE__ */ new Set());
|
|
3123
|
+
}
|
|
3124
|
+
this.handlers.get(type).add(handler);
|
|
3125
|
+
}
|
|
3126
|
+
/** Subscribe to ALL events */
|
|
3127
|
+
onAny(handler) {
|
|
3128
|
+
this.globalHandlers.add(handler);
|
|
3129
|
+
}
|
|
3130
|
+
/** Unsubscribe from a specific event type */
|
|
3131
|
+
off(type, handler) {
|
|
3132
|
+
this.handlers.get(type)?.delete(handler);
|
|
3133
|
+
}
|
|
3134
|
+
/** Unsubscribe from ALL events */
|
|
3135
|
+
offAny(handler) {
|
|
3136
|
+
this.globalHandlers.delete(handler);
|
|
3137
|
+
}
|
|
3138
|
+
/** Remove all listeners */
|
|
3139
|
+
clear() {
|
|
3140
|
+
this.handlers.clear();
|
|
3141
|
+
this.globalHandlers.clear();
|
|
3142
|
+
}
|
|
3143
|
+
};
|
|
3144
|
+
orgBus = new StateBus();
|
|
3145
|
+
}
|
|
3146
|
+
});
|
|
3147
|
+
|
|
3148
|
+
// src/lib/memory-write-governor.ts
|
|
3149
|
+
import { createHash } from "crypto";
|
|
3150
|
+
function normalizeMemoryText(text) {
|
|
3151
|
+
return text.replace(/\r\n/g, "\n").replace(/[ \t]+$/gm, "").replace(/\n{4,}/g, "\n\n\n").trim();
|
|
3152
|
+
}
|
|
3153
|
+
function classifyMemoryType(input) {
|
|
3154
|
+
if (input.memory_type && input.memory_type.trim()) return input.memory_type.trim();
|
|
3155
|
+
const tool = input.tool_name.toLowerCase();
|
|
3156
|
+
const text = input.raw_text.toLowerCase();
|
|
3157
|
+
if (tool.includes("store_decision") || tool.includes("decision")) return "decision";
|
|
3158
|
+
if (tool.includes("commit") || text.includes("adr-") || text.includes("architectural decision")) return "adr";
|
|
3159
|
+
if (tool.includes("store_behavior") || tool.includes("behavior")) return "behavior";
|
|
3160
|
+
if (tool.includes("global_procedure") || text.includes("organization-wide procedures")) return "procedure";
|
|
3161
|
+
if (tool.includes("checkpoint") || text.startsWith("context checkpoint")) return "checkpoint";
|
|
3162
|
+
if (tool.includes("sessionsummary") || tool.includes("session-summary")) return "summary";
|
|
3163
|
+
if (tool.includes("sessionend") || text.startsWith("session ended")) return "summary";
|
|
3164
|
+
if (tool.includes("send_whatsapp") || tool.includes("conversation")) return "conversation";
|
|
3165
|
+
if (tool === "store_memory" || tool === "manual") return "observation";
|
|
3166
|
+
return "raw";
|
|
3167
|
+
}
|
|
3168
|
+
function shouldDropMemory(text) {
|
|
3169
|
+
const normalized = normalizeMemoryText(text);
|
|
3170
|
+
if (normalized.length < 10) return { drop: true, reason: "too_short" };
|
|
3171
|
+
if (NOISE_DROP_PATTERNS.some((pattern) => pattern.test(normalized))) {
|
|
3172
|
+
return { drop: true, reason: "known_boilerplate_noise" };
|
|
3173
|
+
}
|
|
3174
|
+
return { drop: false };
|
|
3175
|
+
}
|
|
3176
|
+
function shouldSkipEmbedding(input) {
|
|
3177
|
+
const type = classifyMemoryType(input);
|
|
3178
|
+
if (HIGH_VALUE_SUPERSESSION_TYPES.has(type)) return false;
|
|
3179
|
+
if (type === "raw" && input.raw_text.length > 2e4) return true;
|
|
3180
|
+
if (SKIP_EMBED_PATTERNS.some((pattern) => pattern.test(input.raw_text))) return true;
|
|
3181
|
+
return false;
|
|
3182
|
+
}
|
|
3183
|
+
function hashMemoryContent(text) {
|
|
3184
|
+
return createHash("sha256").update(normalizeMemoryText(text)).digest("hex");
|
|
3185
|
+
}
|
|
3186
|
+
function scopedDedupArgs(input) {
|
|
3187
|
+
return [input.contentHash, input.agentId, input.projectName, input.memoryType];
|
|
3188
|
+
}
|
|
3189
|
+
function governMemoryRecord(record) {
|
|
3190
|
+
const normalized = normalizeMemoryText(record.raw_text);
|
|
3191
|
+
const memoryType = classifyMemoryType({
|
|
3192
|
+
raw_text: normalized,
|
|
3193
|
+
agent_id: record.agent_id,
|
|
3194
|
+
project_name: record.project_name,
|
|
3195
|
+
tool_name: record.tool_name,
|
|
3196
|
+
memory_type: record.memory_type
|
|
3197
|
+
});
|
|
3198
|
+
const drop = shouldDropMemory(normalized);
|
|
3199
|
+
const skipEmbedding = shouldSkipEmbedding({
|
|
3200
|
+
raw_text: normalized,
|
|
3201
|
+
agent_id: record.agent_id,
|
|
3202
|
+
project_name: record.project_name,
|
|
3203
|
+
tool_name: record.tool_name,
|
|
3204
|
+
memory_type: memoryType
|
|
3205
|
+
});
|
|
3206
|
+
return {
|
|
3207
|
+
record: {
|
|
3208
|
+
...record,
|
|
3209
|
+
raw_text: normalized,
|
|
3210
|
+
memory_type: memoryType,
|
|
3211
|
+
vector: skipEmbedding ? null : record.vector
|
|
3212
|
+
},
|
|
3213
|
+
contentHash: hashMemoryContent(normalized),
|
|
3214
|
+
shouldDrop: drop.drop,
|
|
3215
|
+
dropReason: drop.reason,
|
|
3216
|
+
skipEmbedding,
|
|
3217
|
+
hygiene: {
|
|
3218
|
+
dedup: true,
|
|
3219
|
+
supersession: HIGH_VALUE_SUPERSESSION_TYPES.has(memoryType)
|
|
3220
|
+
}
|
|
3221
|
+
};
|
|
3222
|
+
}
|
|
3223
|
+
async function findScopedDuplicate(input) {
|
|
3224
|
+
const { getClient: getClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
|
|
3225
|
+
const client = getClient2();
|
|
3226
|
+
const args = scopedDedupArgs(input);
|
|
3227
|
+
let sql = `SELECT id FROM memories
|
|
3228
|
+
WHERE content_hash = ?
|
|
3229
|
+
AND agent_id = ?
|
|
3230
|
+
AND project_name = ?
|
|
3231
|
+
AND COALESCE(memory_type, 'raw') = ?
|
|
3232
|
+
AND COALESCE(status, 'active') != 'deleted'`;
|
|
3233
|
+
if (input.excludeId) {
|
|
3234
|
+
sql += " AND id != ?";
|
|
3235
|
+
args.push(input.excludeId);
|
|
3236
|
+
}
|
|
3237
|
+
sql += " ORDER BY timestamp DESC LIMIT 1";
|
|
3238
|
+
const result = await client.execute({ sql, args });
|
|
3239
|
+
return result.rows[0]?.id ? String(result.rows[0].id) : null;
|
|
3240
|
+
}
|
|
3241
|
+
async function runPostWriteMemoryHygiene(memoryId) {
|
|
3242
|
+
try {
|
|
3243
|
+
const { getClient: getClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
|
|
3244
|
+
const client = getClient2();
|
|
3245
|
+
const current = await client.execute({
|
|
3246
|
+
sql: `SELECT id, agent_id, project_name, memory_type, content_hash, supersedes_id,
|
|
3247
|
+
importance, timestamp
|
|
3248
|
+
FROM memories
|
|
3249
|
+
WHERE id = ?
|
|
3250
|
+
LIMIT 1`,
|
|
3251
|
+
args: [memoryId]
|
|
3252
|
+
});
|
|
3253
|
+
const row = current.rows[0];
|
|
3254
|
+
if (!row) return;
|
|
3255
|
+
const memoryType = String(row.memory_type ?? "raw");
|
|
3256
|
+
const contentHash = row.content_hash ? String(row.content_hash) : null;
|
|
3257
|
+
const agentId = String(row.agent_id);
|
|
3258
|
+
const projectName = String(row.project_name);
|
|
3259
|
+
if (contentHash) {
|
|
3260
|
+
await client.execute({
|
|
3261
|
+
sql: `UPDATE memories
|
|
3262
|
+
SET status = 'deleted',
|
|
3263
|
+
outcome = COALESCE(outcome, 'superseded')
|
|
3264
|
+
WHERE id != ?
|
|
3265
|
+
AND content_hash = ?
|
|
3266
|
+
AND agent_id = ?
|
|
3267
|
+
AND project_name = ?
|
|
3268
|
+
AND COALESCE(memory_type, 'raw') = ?
|
|
3269
|
+
AND COALESCE(status, 'active') = 'active'`,
|
|
3270
|
+
args: [memoryId, contentHash, agentId, projectName, memoryType]
|
|
3271
|
+
});
|
|
3272
|
+
}
|
|
3273
|
+
const supersedesId = row.supersedes_id ? String(row.supersedes_id) : null;
|
|
3274
|
+
if (supersedesId && HIGH_VALUE_SUPERSESSION_TYPES.has(memoryType)) {
|
|
3275
|
+
const old = await client.execute({
|
|
3276
|
+
sql: `SELECT importance FROM memories WHERE id = ? LIMIT 1`,
|
|
3277
|
+
args: [supersedesId]
|
|
3278
|
+
});
|
|
3279
|
+
const oldImportance = Number(old.rows[0]?.importance ?? 0);
|
|
3280
|
+
const newImportance = Number(row.importance ?? 0);
|
|
3281
|
+
await client.batch([
|
|
3282
|
+
{
|
|
3283
|
+
sql: `UPDATE memories
|
|
3284
|
+
SET status = 'archived',
|
|
3285
|
+
outcome = COALESCE(outcome, 'superseded')
|
|
3286
|
+
WHERE id = ?`,
|
|
3287
|
+
args: [supersedesId]
|
|
3288
|
+
},
|
|
3289
|
+
{
|
|
3290
|
+
sql: `UPDATE memories
|
|
3291
|
+
SET importance = MAX(COALESCE(importance, 5), ?),
|
|
3292
|
+
parent_memory_id = COALESCE(parent_memory_id, ?)
|
|
3293
|
+
WHERE id = ?`,
|
|
3294
|
+
args: [Math.max(oldImportance, newImportance), supersedesId, memoryId]
|
|
3295
|
+
}
|
|
3296
|
+
], "write");
|
|
3297
|
+
}
|
|
3298
|
+
} catch (err) {
|
|
3299
|
+
process.stderr.write(
|
|
3300
|
+
`[memory-governor] post-write hygiene failed for ${memoryId}: ${err instanceof Error ? err.message : String(err)}
|
|
3301
|
+
`
|
|
3302
|
+
);
|
|
3303
|
+
}
|
|
3304
|
+
}
|
|
3305
|
+
function schedulePostWriteMemoryHygiene(memoryIds) {
|
|
3306
|
+
if (process.env.EXE_SKIP_MEMORY_HYGIENE === "1") return;
|
|
3307
|
+
if (memoryIds.length === 0) return;
|
|
3308
|
+
const run = () => {
|
|
3309
|
+
void Promise.all(memoryIds.map((id) => runPostWriteMemoryHygiene(id)));
|
|
3310
|
+
};
|
|
3311
|
+
if (typeof setImmediate === "function") setImmediate(run);
|
|
3312
|
+
else setTimeout(run, 0);
|
|
3313
|
+
}
|
|
3314
|
+
var HIGH_VALUE_SUPERSESSION_TYPES, NOISE_DROP_PATTERNS, SKIP_EMBED_PATTERNS;
|
|
3315
|
+
var init_memory_write_governor = __esm({
|
|
3316
|
+
"src/lib/memory-write-governor.ts"() {
|
|
3317
|
+
"use strict";
|
|
3318
|
+
HIGH_VALUE_SUPERSESSION_TYPES = /* @__PURE__ */ new Set([
|
|
3319
|
+
"decision",
|
|
3320
|
+
"adr",
|
|
3321
|
+
"behavior",
|
|
3322
|
+
"procedure"
|
|
3323
|
+
]);
|
|
3324
|
+
NOISE_DROP_PATTERNS = [
|
|
3325
|
+
/^\s*\[📋\s+\d+\s+reviews?\s+pending\b/im,
|
|
3326
|
+
/^\s*<system-reminder>[\s\S]*?<\/system-reminder>\s*$/im,
|
|
3327
|
+
/^\s*The UserPromptSubmit hook checks the DB for new tasks/im,
|
|
3328
|
+
/^\s*Intercom is a speedup, not delivery/im,
|
|
3329
|
+
/^\s*Context bar reads as USAGE not remaining/im
|
|
3330
|
+
];
|
|
3331
|
+
SKIP_EMBED_PATTERNS = [
|
|
3332
|
+
/tmux capture-pane\b/i,
|
|
3333
|
+
/docker ps\b/i,
|
|
3334
|
+
/docker images\b/i,
|
|
3335
|
+
/git status\b/i,
|
|
3336
|
+
/grep .*node_modules/i,
|
|
3337
|
+
/npm (install|ci)\b[\s\S]*(added \d+ packages|audited \d+ packages)/i
|
|
3338
|
+
];
|
|
3339
|
+
}
|
|
3340
|
+
});
|
|
3341
|
+
|
|
3342
|
+
// src/lib/shard-manager.ts
|
|
3343
|
+
var shard_manager_exports = {};
|
|
3344
|
+
__export(shard_manager_exports, {
|
|
3345
|
+
disposeShards: () => disposeShards,
|
|
3346
|
+
ensureShardSchema: () => ensureShardSchema,
|
|
3347
|
+
getOpenShardCount: () => getOpenShardCount,
|
|
3348
|
+
getReadyShardClient: () => getReadyShardClient,
|
|
3349
|
+
getShardClient: () => getShardClient,
|
|
3350
|
+
getShardsDir: () => getShardsDir,
|
|
3351
|
+
initShardManager: () => initShardManager,
|
|
3352
|
+
isShardingEnabled: () => isShardingEnabled,
|
|
3353
|
+
listShards: () => listShards,
|
|
3354
|
+
shardExists: () => shardExists
|
|
3355
|
+
});
|
|
3356
|
+
import path7 from "path";
|
|
3357
|
+
import { existsSync as existsSync7, mkdirSync as mkdirSync2, readdirSync, renameSync as renameSync3, statSync as statSync2 } from "fs";
|
|
3358
|
+
import { createClient as createClient2 } from "@libsql/client";
|
|
3359
|
+
function initShardManager(encryptionKey) {
|
|
3360
|
+
_encryptionKey = encryptionKey;
|
|
3361
|
+
if (!existsSync7(SHARDS_DIR)) {
|
|
3362
|
+
mkdirSync2(SHARDS_DIR, { recursive: true });
|
|
3363
|
+
}
|
|
3364
|
+
_shardingEnabled = true;
|
|
3365
|
+
if (_evictionTimer) clearInterval(_evictionTimer);
|
|
3366
|
+
_evictionTimer = setInterval(evictIdleShards, EVICTION_INTERVAL_MS);
|
|
3367
|
+
_evictionTimer.unref();
|
|
3368
|
+
}
|
|
3369
|
+
function isShardingEnabled() {
|
|
3370
|
+
return _shardingEnabled;
|
|
3371
|
+
}
|
|
3372
|
+
function getShardsDir() {
|
|
3373
|
+
return SHARDS_DIR;
|
|
3374
|
+
}
|
|
3375
|
+
function getShardClient(projectName) {
|
|
3376
|
+
if (!_encryptionKey) {
|
|
3377
|
+
throw new Error("Shard manager not initialized. Call initShardManager() first.");
|
|
3378
|
+
}
|
|
3379
|
+
const safeName = safeShardName(projectName);
|
|
3380
|
+
if (!safeName || safeName === "unknown") {
|
|
3381
|
+
throw new Error(`Invalid project name for shard: "${projectName}" (resolved to "${safeName}")`);
|
|
3382
|
+
}
|
|
3383
|
+
const cached = _shards.get(safeName);
|
|
3384
|
+
if (cached) {
|
|
3385
|
+
_shardLastAccess.set(safeName, Date.now());
|
|
3386
|
+
return cached;
|
|
3387
|
+
}
|
|
3388
|
+
while (_shards.size >= MAX_OPEN_SHARDS) {
|
|
3389
|
+
evictLRU();
|
|
3390
|
+
}
|
|
3391
|
+
const dbPath = path7.join(SHARDS_DIR, `${safeName}.db`);
|
|
3392
|
+
const client = createClient2({
|
|
3393
|
+
url: `file:${dbPath}`,
|
|
3394
|
+
encryptionKey: _encryptionKey
|
|
3395
|
+
});
|
|
3396
|
+
_shards.set(safeName, client);
|
|
3397
|
+
_shardLastAccess.set(safeName, Date.now());
|
|
3398
|
+
return client;
|
|
3399
|
+
}
|
|
3400
|
+
function shardExists(projectName) {
|
|
3401
|
+
const safeName = safeShardName(projectName);
|
|
3402
|
+
return existsSync7(path7.join(SHARDS_DIR, `${safeName}.db`));
|
|
3403
|
+
}
|
|
3404
|
+
function safeShardName(projectName) {
|
|
3405
|
+
return projectName.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
3406
|
+
}
|
|
3407
|
+
function listShards() {
|
|
3408
|
+
if (!existsSync7(SHARDS_DIR)) return [];
|
|
3409
|
+
return readdirSync(SHARDS_DIR).filter((f) => f.endsWith(".db")).map((f) => f.replace(".db", ""));
|
|
3410
|
+
}
|
|
3411
|
+
async function ensureShardSchema(client) {
|
|
3412
|
+
await client.execute("PRAGMA journal_mode = WAL");
|
|
3413
|
+
await client.execute("PRAGMA busy_timeout = 30000");
|
|
3414
|
+
try {
|
|
3415
|
+
await client.execute("PRAGMA libsql_vector_search_ef = 128");
|
|
3416
|
+
} catch {
|
|
3417
|
+
}
|
|
3418
|
+
await client.executeMultiple(`
|
|
3419
|
+
CREATE TABLE IF NOT EXISTS memories (
|
|
3420
|
+
id TEXT PRIMARY KEY,
|
|
3421
|
+
agent_id TEXT NOT NULL,
|
|
3422
|
+
agent_role TEXT NOT NULL,
|
|
3423
|
+
session_id TEXT NOT NULL,
|
|
3424
|
+
timestamp TEXT NOT NULL,
|
|
3425
|
+
tool_name TEXT NOT NULL,
|
|
3426
|
+
project_name TEXT NOT NULL,
|
|
3427
|
+
has_error INTEGER NOT NULL DEFAULT 0,
|
|
3428
|
+
raw_text TEXT NOT NULL,
|
|
3429
|
+
vector F32_BLOB(1024),
|
|
3430
|
+
version INTEGER NOT NULL DEFAULT 0
|
|
3431
|
+
);
|
|
3432
|
+
|
|
3433
|
+
CREATE INDEX IF NOT EXISTS idx_memories_agent ON memories(agent_id);
|
|
3434
|
+
CREATE INDEX IF NOT EXISTS idx_memories_timestamp ON memories(timestamp);
|
|
3435
|
+
CREATE INDEX IF NOT EXISTS idx_memories_agent_project ON memories(agent_id, project_name);
|
|
3436
|
+
`);
|
|
3437
|
+
await client.executeMultiple(`
|
|
3438
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts USING fts5(
|
|
3439
|
+
raw_text,
|
|
3440
|
+
content='memories',
|
|
3441
|
+
content_rowid='rowid'
|
|
3442
|
+
);
|
|
3443
|
+
|
|
3444
|
+
CREATE TRIGGER IF NOT EXISTS memories_fts_ai AFTER INSERT ON memories BEGIN
|
|
3445
|
+
INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
|
|
3446
|
+
END;
|
|
3447
|
+
|
|
3448
|
+
CREATE TRIGGER IF NOT EXISTS memories_fts_ad AFTER DELETE ON memories BEGIN
|
|
3449
|
+
INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
|
|
3450
|
+
END;
|
|
3451
|
+
|
|
3452
|
+
CREATE TRIGGER IF NOT EXISTS memories_fts_au AFTER UPDATE ON memories BEGIN
|
|
3453
|
+
INSERT INTO memories_fts(memories_fts, rowid, raw_text) VALUES('delete', old.rowid, old.raw_text);
|
|
3454
|
+
INSERT INTO memories_fts(rowid, raw_text) VALUES (new.rowid, new.raw_text);
|
|
3455
|
+
END;
|
|
3456
|
+
`);
|
|
3457
|
+
for (const col of [
|
|
3458
|
+
"ALTER TABLE memories ADD COLUMN task_id TEXT",
|
|
3459
|
+
"ALTER TABLE memories ADD COLUMN consolidated INTEGER NOT NULL DEFAULT 0",
|
|
3460
|
+
"ALTER TABLE memories ADD COLUMN author_device_id TEXT",
|
|
3461
|
+
"ALTER TABLE memories ADD COLUMN scope TEXT NOT NULL DEFAULT 'business'",
|
|
3462
|
+
"ALTER TABLE memories ADD COLUMN importance INTEGER DEFAULT 5",
|
|
3463
|
+
"ALTER TABLE memories ADD COLUMN status TEXT DEFAULT 'active'",
|
|
3464
|
+
"ALTER TABLE memories ADD COLUMN wiki_synced INTEGER DEFAULT 0",
|
|
3465
|
+
"ALTER TABLE memories ADD COLUMN graph_extracted INTEGER DEFAULT 0",
|
|
3466
|
+
"ALTER TABLE memories ADD COLUMN content_hash TEXT",
|
|
3467
|
+
"ALTER TABLE memories ADD COLUMN graph_extracted_hash TEXT",
|
|
3468
|
+
"ALTER TABLE memories ADD COLUMN confidence REAL DEFAULT 0.7",
|
|
3469
|
+
"ALTER TABLE memories ADD COLUMN last_accessed TEXT",
|
|
3470
|
+
// Wiki linkage columns (must match database.ts)
|
|
3471
|
+
"ALTER TABLE memories ADD COLUMN workspace_id TEXT",
|
|
3472
|
+
"ALTER TABLE memories ADD COLUMN document_id TEXT",
|
|
3473
|
+
"ALTER TABLE memories ADD COLUMN user_id TEXT",
|
|
3474
|
+
"ALTER TABLE memories ADD COLUMN char_offset INTEGER",
|
|
3475
|
+
"ALTER TABLE memories ADD COLUMN page_number INTEGER",
|
|
3476
|
+
// Source provenance columns (must match database.ts)
|
|
3477
|
+
"ALTER TABLE memories ADD COLUMN source_path TEXT",
|
|
3478
|
+
"ALTER TABLE memories ADD COLUMN source_type TEXT DEFAULT 'text'",
|
|
3479
|
+
"ALTER TABLE memories ADD COLUMN tier INTEGER DEFAULT 3",
|
|
3480
|
+
"ALTER TABLE memories ADD COLUMN supersedes_id TEXT",
|
|
3481
|
+
// MS-11: draft staging, MS-6a: memory_type, MS-7: trajectory
|
|
3482
|
+
"ALTER TABLE memories ADD COLUMN draft INTEGER DEFAULT 0",
|
|
3483
|
+
"ALTER TABLE memories ADD COLUMN memory_type TEXT DEFAULT 'raw'",
|
|
3484
|
+
"ALTER TABLE memories ADD COLUMN trajectory TEXT",
|
|
3485
|
+
// Metadata enrichment columns (must match database.ts)
|
|
3486
|
+
"ALTER TABLE memories ADD COLUMN intent TEXT",
|
|
3487
|
+
"ALTER TABLE memories ADD COLUMN outcome TEXT",
|
|
3488
|
+
"ALTER TABLE memories ADD COLUMN domain TEXT",
|
|
3489
|
+
"ALTER TABLE memories ADD COLUMN referenced_entities TEXT",
|
|
3490
|
+
"ALTER TABLE memories ADD COLUMN retrieval_count INTEGER DEFAULT 0",
|
|
3491
|
+
"ALTER TABLE memories ADD COLUMN chain_position TEXT",
|
|
3492
|
+
"ALTER TABLE memories ADD COLUMN review_status TEXT",
|
|
3493
|
+
"ALTER TABLE memories ADD COLUMN context_window_pct INTEGER",
|
|
3494
|
+
"ALTER TABLE memories ADD COLUMN file_paths TEXT",
|
|
3495
|
+
"ALTER TABLE memories ADD COLUMN commit_hash TEXT",
|
|
3496
|
+
"ALTER TABLE memories ADD COLUMN duration_ms INTEGER",
|
|
3497
|
+
"ALTER TABLE memories ADD COLUMN token_cost REAL",
|
|
3498
|
+
"ALTER TABLE memories ADD COLUMN audience TEXT",
|
|
3499
|
+
"ALTER TABLE memories ADD COLUMN language_type TEXT",
|
|
3500
|
+
"ALTER TABLE memories ADD COLUMN parent_memory_id TEXT",
|
|
3501
|
+
"ALTER TABLE memories ADD COLUMN deleted_at TEXT"
|
|
3502
|
+
]) {
|
|
3503
|
+
try {
|
|
3504
|
+
await client.execute(col);
|
|
3505
|
+
} catch {
|
|
3506
|
+
}
|
|
3507
|
+
}
|
|
3508
|
+
for (const idx of [
|
|
3509
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_tier ON memories(tier)",
|
|
3510
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_supersedes ON memories(supersedes_id) WHERE supersedes_id IS NOT NULL",
|
|
3511
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_scoped_content_hash ON memories(content_hash, agent_id, project_name, memory_type) WHERE content_hash IS NOT NULL"
|
|
3512
|
+
]) {
|
|
3513
|
+
try {
|
|
3514
|
+
await client.execute(idx);
|
|
3515
|
+
} catch {
|
|
3516
|
+
}
|
|
3517
|
+
}
|
|
3518
|
+
try {
|
|
3519
|
+
await client.execute("CREATE INDEX IF NOT EXISTS idx_memories_status ON memories(status)");
|
|
3520
|
+
} catch {
|
|
3521
|
+
}
|
|
3522
|
+
for (const idx of [
|
|
3523
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_workspace ON memories(workspace_id)",
|
|
3524
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_document ON memories(document_id)",
|
|
3525
|
+
"CREATE INDEX IF NOT EXISTS idx_memories_user ON memories(user_id)"
|
|
3526
|
+
]) {
|
|
3527
|
+
try {
|
|
3528
|
+
await client.execute(idx);
|
|
3529
|
+
} catch {
|
|
3530
|
+
}
|
|
3531
|
+
}
|
|
3532
|
+
await client.executeMultiple(`
|
|
3533
|
+
CREATE TABLE IF NOT EXISTS entities (
|
|
3534
|
+
id TEXT PRIMARY KEY,
|
|
3535
|
+
name TEXT NOT NULL,
|
|
3536
|
+
type TEXT NOT NULL,
|
|
3537
|
+
first_seen TEXT NOT NULL,
|
|
3538
|
+
last_seen TEXT NOT NULL,
|
|
3539
|
+
properties TEXT DEFAULT '{}',
|
|
3540
|
+
UNIQUE(name, type)
|
|
3541
|
+
);
|
|
3542
|
+
|
|
3543
|
+
CREATE TABLE IF NOT EXISTS relationships (
|
|
3544
|
+
id TEXT PRIMARY KEY,
|
|
3545
|
+
source_entity_id TEXT NOT NULL,
|
|
3546
|
+
target_entity_id TEXT NOT NULL,
|
|
3547
|
+
type TEXT NOT NULL,
|
|
3548
|
+
weight REAL DEFAULT 1.0,
|
|
3549
|
+
timestamp TEXT NOT NULL,
|
|
3550
|
+
properties TEXT DEFAULT '{}',
|
|
3551
|
+
UNIQUE(source_entity_id, target_entity_id, type)
|
|
3552
|
+
);
|
|
3553
|
+
|
|
3554
|
+
CREATE TABLE IF NOT EXISTS entity_memories (
|
|
3555
|
+
entity_id TEXT NOT NULL,
|
|
3556
|
+
memory_id TEXT NOT NULL,
|
|
3557
|
+
PRIMARY KEY (entity_id, memory_id)
|
|
3558
|
+
);
|
|
3559
|
+
|
|
3560
|
+
CREATE TABLE IF NOT EXISTS relationship_memories (
|
|
3561
|
+
relationship_id TEXT NOT NULL,
|
|
3562
|
+
memory_id TEXT NOT NULL,
|
|
3563
|
+
PRIMARY KEY (relationship_id, memory_id)
|
|
3564
|
+
);
|
|
3565
|
+
|
|
3566
|
+
CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
|
|
3567
|
+
CREATE INDEX IF NOT EXISTS idx_entities_type ON entities(type);
|
|
3568
|
+
CREATE INDEX IF NOT EXISTS idx_relationships_source ON relationships(source_entity_id);
|
|
3569
|
+
CREATE INDEX IF NOT EXISTS idx_relationships_target ON relationships(target_entity_id);
|
|
3570
|
+
CREATE INDEX IF NOT EXISTS idx_relationships_type ON relationships(type);
|
|
3571
|
+
|
|
3572
|
+
CREATE TABLE IF NOT EXISTS hyperedges (
|
|
3573
|
+
id TEXT PRIMARY KEY,
|
|
3574
|
+
label TEXT NOT NULL,
|
|
3575
|
+
relation TEXT NOT NULL,
|
|
3576
|
+
confidence REAL DEFAULT 1.0,
|
|
3577
|
+
timestamp TEXT NOT NULL
|
|
3578
|
+
);
|
|
3579
|
+
|
|
3580
|
+
CREATE TABLE IF NOT EXISTS hyperedge_nodes (
|
|
3581
|
+
hyperedge_id TEXT NOT NULL,
|
|
3582
|
+
entity_id TEXT NOT NULL,
|
|
3583
|
+
PRIMARY KEY (hyperedge_id, entity_id)
|
|
3584
|
+
);
|
|
3585
|
+
`);
|
|
3586
|
+
for (const col of [
|
|
3587
|
+
"ALTER TABLE relationships ADD COLUMN confidence REAL DEFAULT 1.0",
|
|
3588
|
+
"ALTER TABLE relationships ADD COLUMN confidence_label TEXT DEFAULT 'extracted'"
|
|
3589
|
+
]) {
|
|
3590
|
+
try {
|
|
3591
|
+
await client.execute(col);
|
|
3592
|
+
} catch {
|
|
3593
|
+
}
|
|
3594
|
+
}
|
|
3595
|
+
}
|
|
3596
|
+
async function getReadyShardClient(projectName) {
|
|
3597
|
+
const safeName = safeShardName(projectName);
|
|
3598
|
+
let client = getShardClient(projectName);
|
|
3599
|
+
try {
|
|
3600
|
+
await ensureShardSchema(client);
|
|
3601
|
+
return client;
|
|
3602
|
+
} catch (err) {
|
|
3603
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
3604
|
+
if (!/SQLITE_NOTADB|file is not a database/i.test(message)) throw err;
|
|
3605
|
+
client.close();
|
|
3606
|
+
_shards.delete(safeName);
|
|
3607
|
+
_shardLastAccess.delete(safeName);
|
|
3608
|
+
const dbPath = path7.join(SHARDS_DIR, `${safeName}.db`);
|
|
3609
|
+
if (existsSync7(dbPath)) {
|
|
3610
|
+
const stat = statSync2(dbPath);
|
|
3611
|
+
const stamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-");
|
|
3612
|
+
const archivedPath = path7.join(SHARDS_DIR, `${safeName}.db.broken-${stamp}`);
|
|
3613
|
+
renameSync3(dbPath, archivedPath);
|
|
3614
|
+
process.stderr.write(
|
|
3615
|
+
`[shard-manager] Archived unreadable shard ${safeName}: ${archivedPath} (${stat.size} bytes, mtime ${stat.mtime.toISOString()})
|
|
3616
|
+
`
|
|
3617
|
+
);
|
|
3618
|
+
}
|
|
3619
|
+
client = getShardClient(projectName);
|
|
3620
|
+
await ensureShardSchema(client);
|
|
3621
|
+
return client;
|
|
3622
|
+
}
|
|
3623
|
+
}
|
|
3624
|
+
function evictLRU() {
|
|
3625
|
+
let oldest = null;
|
|
3626
|
+
let oldestTime = Infinity;
|
|
3627
|
+
for (const [name, time] of _shardLastAccess) {
|
|
3628
|
+
if (time < oldestTime) {
|
|
3629
|
+
oldestTime = time;
|
|
3630
|
+
oldest = name;
|
|
3631
|
+
}
|
|
3632
|
+
}
|
|
3633
|
+
if (oldest) {
|
|
3634
|
+
const client = _shards.get(oldest);
|
|
3635
|
+
if (client) {
|
|
3636
|
+
client.close();
|
|
3637
|
+
}
|
|
3638
|
+
_shards.delete(oldest);
|
|
3639
|
+
_shardLastAccess.delete(oldest);
|
|
3640
|
+
}
|
|
3641
|
+
}
|
|
3642
|
+
function evictIdleShards() {
|
|
3643
|
+
const now = Date.now();
|
|
3644
|
+
const toEvict = [];
|
|
3645
|
+
for (const [name, lastAccess] of _shardLastAccess) {
|
|
3646
|
+
if (now - lastAccess > SHARD_IDLE_MS) {
|
|
3647
|
+
toEvict.push(name);
|
|
3648
|
+
}
|
|
3649
|
+
}
|
|
3650
|
+
for (const name of toEvict) {
|
|
3651
|
+
const client = _shards.get(name);
|
|
3652
|
+
if (client) {
|
|
3653
|
+
client.close();
|
|
3654
|
+
}
|
|
3655
|
+
_shards.delete(name);
|
|
3656
|
+
_shardLastAccess.delete(name);
|
|
3657
|
+
}
|
|
3658
|
+
}
|
|
3659
|
+
function getOpenShardCount() {
|
|
3660
|
+
return _shards.size;
|
|
3661
|
+
}
|
|
3662
|
+
function disposeShards() {
|
|
3663
|
+
if (_evictionTimer) {
|
|
3664
|
+
clearInterval(_evictionTimer);
|
|
3665
|
+
_evictionTimer = null;
|
|
3666
|
+
}
|
|
3667
|
+
for (const [, client] of _shards) {
|
|
3668
|
+
client.close();
|
|
3669
|
+
}
|
|
3670
|
+
_shards.clear();
|
|
3671
|
+
_shardLastAccess.clear();
|
|
3672
|
+
_shardingEnabled = false;
|
|
3673
|
+
_encryptionKey = null;
|
|
3674
|
+
}
|
|
3675
|
+
var SHARDS_DIR, SHARD_IDLE_MS, MAX_OPEN_SHARDS, EVICTION_INTERVAL_MS, _shards, _shardLastAccess, _evictionTimer, _encryptionKey, _shardingEnabled;
|
|
3676
|
+
var init_shard_manager = __esm({
|
|
3677
|
+
"src/lib/shard-manager.ts"() {
|
|
3678
|
+
"use strict";
|
|
3679
|
+
init_config();
|
|
3680
|
+
SHARDS_DIR = path7.join(EXE_AI_DIR, "shards");
|
|
3681
|
+
SHARD_IDLE_MS = 5 * 60 * 1e3;
|
|
3682
|
+
MAX_OPEN_SHARDS = 10;
|
|
3683
|
+
EVICTION_INTERVAL_MS = 60 * 1e3;
|
|
3684
|
+
_shards = /* @__PURE__ */ new Map();
|
|
3685
|
+
_shardLastAccess = /* @__PURE__ */ new Map();
|
|
3686
|
+
_evictionTimer = null;
|
|
3687
|
+
_encryptionKey = null;
|
|
3688
|
+
_shardingEnabled = false;
|
|
3689
|
+
}
|
|
3690
|
+
});
|
|
3691
|
+
|
|
3692
|
+
// src/lib/store.ts
|
|
3693
|
+
var store_exports = {};
|
|
3694
|
+
__export(store_exports, {
|
|
3695
|
+
attachDocumentMetadata: () => attachDocumentMetadata,
|
|
3696
|
+
buildRawVisibilityFilter: () => buildRawVisibilityFilter,
|
|
3697
|
+
buildWikiScopeFilter: () => buildWikiScopeFilter,
|
|
3698
|
+
classifyTier: () => classifyTier,
|
|
3699
|
+
disposeStore: () => disposeStore,
|
|
3700
|
+
flushBatch: () => flushBatch,
|
|
3701
|
+
flushTier3: () => flushTier3,
|
|
3702
|
+
getMemoryCardinality: () => getMemoryCardinality,
|
|
3703
|
+
initStore: () => initStore,
|
|
3704
|
+
reserveVersions: () => reserveVersions,
|
|
3705
|
+
searchMemories: () => searchMemories,
|
|
3706
|
+
updateMemoryStatus: () => updateMemoryStatus,
|
|
3707
|
+
vectorToBlob: () => vectorToBlob,
|
|
3708
|
+
writeMemory: () => writeMemory
|
|
3709
|
+
});
|
|
3710
|
+
function isBusyError2(err) {
|
|
3711
|
+
if (err instanceof Error) {
|
|
3712
|
+
const msg = err.message.toLowerCase();
|
|
3713
|
+
return msg.includes("sqlite_busy") || msg.includes("database is locked");
|
|
3714
|
+
}
|
|
3715
|
+
return false;
|
|
3716
|
+
}
|
|
3717
|
+
async function retryOnBusy2(fn, label) {
|
|
3718
|
+
for (let attempt = 0; attempt <= INIT_MAX_RETRIES; attempt++) {
|
|
3719
|
+
try {
|
|
3720
|
+
return await fn();
|
|
3721
|
+
} catch (err) {
|
|
3722
|
+
if (!isBusyError2(err) || attempt === INIT_MAX_RETRIES) throw err;
|
|
3723
|
+
process.stderr.write(
|
|
3724
|
+
`[store] SQLITE_BUSY during ${label}, retry ${attempt + 1}/${INIT_MAX_RETRIES}
|
|
3725
|
+
`
|
|
3726
|
+
);
|
|
3727
|
+
await new Promise((r) => setTimeout(r, INIT_RETRY_DELAY_MS * (attempt + 1)));
|
|
3728
|
+
}
|
|
3729
|
+
}
|
|
3730
|
+
throw new Error("unreachable");
|
|
3731
|
+
}
|
|
3732
|
+
async function initStore(options) {
|
|
3733
|
+
if (_flushTimer !== null) {
|
|
3734
|
+
clearInterval(_flushTimer);
|
|
3735
|
+
_flushTimer = null;
|
|
3736
|
+
}
|
|
3737
|
+
_pendingRecords = [];
|
|
3738
|
+
_flushing = false;
|
|
3739
|
+
_batchSize = options?.batchSize ?? 20;
|
|
3740
|
+
_flushIntervalMs = options?.flushIntervalMs ?? 1e4;
|
|
3741
|
+
let dbPath = options?.dbPath;
|
|
3742
|
+
if (!dbPath) {
|
|
3743
|
+
const config = await loadConfig();
|
|
3744
|
+
dbPath = config.dbPath;
|
|
3745
|
+
}
|
|
3746
|
+
let masterKey = options?.masterKey ?? null;
|
|
3747
|
+
if (!masterKey) {
|
|
3748
|
+
masterKey = await getMasterKey();
|
|
3749
|
+
if (!masterKey) {
|
|
3750
|
+
throw new Error(
|
|
3751
|
+
"No encryption key found. Run /exe-setup to generate one."
|
|
3752
|
+
);
|
|
3753
|
+
}
|
|
3754
|
+
}
|
|
3755
|
+
const hexKey = masterKey.toString("hex");
|
|
3756
|
+
await initTurso({
|
|
3757
|
+
dbPath,
|
|
3758
|
+
encryptionKey: hexKey
|
|
3759
|
+
});
|
|
3760
|
+
await retryOnBusy2(() => ensureSchema(), "ensureSchema");
|
|
3761
|
+
try {
|
|
3762
|
+
const { initDaemonClient: initDaemonClient2 } = await Promise.resolve().then(() => (init_database(), database_exports));
|
|
3763
|
+
await initDaemonClient2();
|
|
3764
|
+
} catch {
|
|
3765
|
+
}
|
|
3766
|
+
if (!options?.lightweight) {
|
|
3767
|
+
try {
|
|
3768
|
+
const { initShardManager: initShardManager2 } = await Promise.resolve().then(() => (init_shard_manager(), shard_manager_exports));
|
|
3769
|
+
initShardManager2(hexKey);
|
|
3770
|
+
} catch {
|
|
3771
|
+
}
|
|
3772
|
+
const client = getClient();
|
|
3773
|
+
const vResult = await retryOnBusy2(
|
|
3774
|
+
() => client.execute("SELECT MAX(version) as max_v FROM memories"),
|
|
3775
|
+
"version-query"
|
|
3776
|
+
);
|
|
3777
|
+
_nextVersion = (Number(vResult.rows[0]?.max_v) || 0) + 1;
|
|
3778
|
+
try {
|
|
3779
|
+
const { loadGlobalProcedures: loadGlobalProcedures2 } = await Promise.resolve().then(() => (init_global_procedures(), global_procedures_exports));
|
|
3780
|
+
await loadGlobalProcedures2();
|
|
3781
|
+
} catch {
|
|
3782
|
+
}
|
|
3783
|
+
}
|
|
3784
|
+
}
|
|
3785
|
+
function classifyTier(record) {
|
|
3786
|
+
if (record.tool_name === "commit_to_long_term_memory" && (record.importance ?? 0) >= 8) return 1;
|
|
3787
|
+
if (["store_memory", "manual"].includes(record.tool_name ?? "") && (record.importance ?? 0) >= 5) return 2;
|
|
3788
|
+
return 3;
|
|
3789
|
+
}
|
|
3790
|
+
function inferFilePaths(record) {
|
|
3791
|
+
if (!["Read", "Write", "Edit"].includes(record.tool_name)) return null;
|
|
3792
|
+
const firstLine = record.raw_text.split("\n")[0] ?? "";
|
|
3793
|
+
const match = firstLine.match(/(\/[\w./-]+\.\w+)/);
|
|
3794
|
+
return match ? JSON.stringify([match[1]]) : null;
|
|
3795
|
+
}
|
|
3796
|
+
function inferCommitHash(record) {
|
|
3797
|
+
if (record.tool_name !== "Bash") return null;
|
|
3798
|
+
const match = record.raw_text.match(/\b([a-f0-9]{7,40})\b/);
|
|
3799
|
+
return match ? match[1] : null;
|
|
3800
|
+
}
|
|
3801
|
+
function inferLanguageType(record) {
|
|
3802
|
+
const text = record.raw_text;
|
|
3803
|
+
if (!text || text.length < 10) return null;
|
|
3804
|
+
const trimmed = text.trimStart();
|
|
3805
|
+
if (trimmed.startsWith("{") || trimmed.startsWith("[")) return "json";
|
|
3806
|
+
if (/\b(SELECT|INSERT|UPDATE|DELETE|CREATE TABLE|ALTER TABLE)\b/i.test(text)) return "sql";
|
|
3807
|
+
if (/\b(function |const |import |export |class |def |async |=>)\b/.test(text)) return "code";
|
|
3808
|
+
if (trimmed.startsWith("#") || trimmed.startsWith("*")) return "prose";
|
|
3809
|
+
return "mixed";
|
|
3810
|
+
}
|
|
3811
|
+
function inferDomain(record) {
|
|
3812
|
+
const proj = (record.project_name ?? "").toLowerCase();
|
|
3813
|
+
if (proj.includes("marketing") || proj.includes("content")) return "marketing";
|
|
3814
|
+
if (proj.includes("crm") || proj.includes("customer")) return "customer";
|
|
3815
|
+
return null;
|
|
3816
|
+
}
|
|
3817
|
+
async function writeMemory(record) {
|
|
3818
|
+
if (record.vector !== null && record.vector.length !== EMBEDDING_DIM) {
|
|
3819
|
+
throw new Error(
|
|
3820
|
+
`Expected ${EMBEDDING_DIM}-dim vector, got ${record.vector.length}`
|
|
3821
|
+
);
|
|
3822
|
+
}
|
|
3823
|
+
const governed = governMemoryRecord(record);
|
|
3824
|
+
if (governed.shouldDrop) return;
|
|
3825
|
+
record = governed.record;
|
|
3826
|
+
const contentHash = governed.contentHash;
|
|
3827
|
+
const memoryType = record.memory_type ?? "raw";
|
|
3828
|
+
if (_pendingRecords.some(
|
|
3829
|
+
(r) => r.content_hash === contentHash && r.agent_id === record.agent_id && r.project_name === record.project_name && (r.memory_type ?? "raw") === memoryType
|
|
3830
|
+
)) {
|
|
3831
|
+
return;
|
|
3832
|
+
}
|
|
3833
|
+
try {
|
|
3834
|
+
const existing = await findScopedDuplicate({
|
|
3835
|
+
contentHash,
|
|
3836
|
+
agentId: record.agent_id,
|
|
3837
|
+
projectName: record.project_name,
|
|
3838
|
+
memoryType
|
|
3839
|
+
});
|
|
3840
|
+
if (existing) return;
|
|
3841
|
+
} catch {
|
|
3842
|
+
}
|
|
3843
|
+
const dbRow = {
|
|
3844
|
+
id: record.id,
|
|
3845
|
+
agent_id: record.agent_id,
|
|
3846
|
+
agent_role: record.agent_role,
|
|
3847
|
+
session_id: record.session_id,
|
|
3848
|
+
timestamp: record.timestamp,
|
|
3849
|
+
tool_name: record.tool_name,
|
|
3850
|
+
project_name: record.project_name,
|
|
3851
|
+
has_error: record.has_error ? 1 : 0,
|
|
3852
|
+
raw_text: record.raw_text,
|
|
3853
|
+
vector: record.vector,
|
|
3854
|
+
version: 0,
|
|
3855
|
+
// Placeholder — assigned atomically at flush time
|
|
3856
|
+
task_id: record.task_id ?? null,
|
|
3857
|
+
importance: record.importance ?? 5,
|
|
3858
|
+
status: record.status ?? "active",
|
|
3859
|
+
confidence: record.confidence ?? 0.7,
|
|
3860
|
+
last_accessed: record.last_accessed ?? record.timestamp,
|
|
3861
|
+
workspace_id: record.workspace_id ?? null,
|
|
3862
|
+
document_id: record.document_id ?? null,
|
|
3863
|
+
user_id: record.user_id ?? null,
|
|
3864
|
+
char_offset: record.char_offset ?? null,
|
|
3865
|
+
page_number: record.page_number ?? null,
|
|
3866
|
+
source_path: record.source_path ?? null,
|
|
3867
|
+
source_type: record.source_type ?? null,
|
|
3868
|
+
tier: record.tier ?? classifyTier(record),
|
|
3869
|
+
supersedes_id: record.supersedes_id ?? null,
|
|
3870
|
+
draft: record.draft ? 1 : 0,
|
|
3871
|
+
memory_type: memoryType,
|
|
3872
|
+
trajectory: record.trajectory ? JSON.stringify(record.trajectory) : null,
|
|
3873
|
+
content_hash: contentHash,
|
|
3874
|
+
intent: record.intent ?? null,
|
|
3875
|
+
outcome: record.outcome ?? null,
|
|
3876
|
+
domain: record.domain ?? inferDomain(record),
|
|
3877
|
+
referenced_entities: record.referenced_entities ?? null,
|
|
3878
|
+
retrieval_count: record.retrieval_count ?? 0,
|
|
3879
|
+
chain_position: record.chain_position ?? null,
|
|
3880
|
+
review_status: record.review_status ?? null,
|
|
3881
|
+
context_window_pct: record.context_window_pct ?? null,
|
|
3882
|
+
file_paths: record.file_paths ?? inferFilePaths(record),
|
|
3883
|
+
commit_hash: record.commit_hash ?? inferCommitHash(record),
|
|
3884
|
+
duration_ms: record.duration_ms ?? null,
|
|
3885
|
+
token_cost: record.token_cost ?? null,
|
|
3886
|
+
audience: record.audience ?? null,
|
|
3887
|
+
language_type: record.language_type ?? inferLanguageType(record),
|
|
3888
|
+
parent_memory_id: record.parent_memory_id ?? null
|
|
3889
|
+
};
|
|
3890
|
+
_pendingRecords.push(dbRow);
|
|
3891
|
+
orgBus.emit({
|
|
3892
|
+
type: "memory_stored",
|
|
3893
|
+
agentId: record.agent_id,
|
|
3894
|
+
project: record.project_name,
|
|
3895
|
+
timestamp: record.timestamp
|
|
3896
|
+
});
|
|
3897
|
+
const MAX_PENDING = 1e3;
|
|
3898
|
+
if (_pendingRecords.length > MAX_PENDING) {
|
|
3899
|
+
const dropped = _pendingRecords.length - MAX_PENDING;
|
|
3900
|
+
_pendingRecords = _pendingRecords.slice(-MAX_PENDING);
|
|
3901
|
+
console.warn(`[store] Dropped ${dropped} oldest pending records (overflow)`);
|
|
3902
|
+
}
|
|
3903
|
+
if (_flushTimer === null) {
|
|
3904
|
+
_flushTimer = setInterval(() => {
|
|
3905
|
+
void flushBatch();
|
|
3906
|
+
}, _flushIntervalMs);
|
|
3907
|
+
if (_flushTimer && typeof _flushTimer === "object" && "unref" in _flushTimer) {
|
|
3908
|
+
_flushTimer.unref();
|
|
3909
|
+
}
|
|
3910
|
+
}
|
|
3911
|
+
if (_pendingRecords.length >= _batchSize) {
|
|
3912
|
+
await flushBatch();
|
|
3913
|
+
}
|
|
3914
|
+
}
|
|
3915
|
+
async function flushBatch() {
|
|
3916
|
+
if (_flushing || _pendingRecords.length === 0) return 0;
|
|
3917
|
+
_flushing = true;
|
|
3918
|
+
try {
|
|
3919
|
+
const batch = _pendingRecords.slice(0);
|
|
3920
|
+
const client = getClient();
|
|
3921
|
+
const vResult = await client.execute("SELECT MAX(version) as max_v FROM memories");
|
|
3922
|
+
let baseVersion = (Number(vResult.rows[0]?.max_v) || 0) + 1;
|
|
3923
|
+
for (const row of batch) {
|
|
3924
|
+
row.version = baseVersion++;
|
|
3925
|
+
}
|
|
3926
|
+
_nextVersion = baseVersion;
|
|
3927
|
+
const buildStmt = (row) => {
|
|
3928
|
+
const hasVector = row.vector !== null;
|
|
3929
|
+
const taskId = row.task_id ?? null;
|
|
3930
|
+
const importance = row.importance ?? 5;
|
|
3931
|
+
const status = row.status ?? "active";
|
|
3932
|
+
const confidence = row.confidence ?? 0.7;
|
|
3933
|
+
const lastAccessed = row.last_accessed ?? row.timestamp;
|
|
3934
|
+
const workspaceId = row.workspace_id ?? null;
|
|
3935
|
+
const documentId = row.document_id ?? null;
|
|
3936
|
+
const userId = row.user_id ?? null;
|
|
3937
|
+
const charOffset = row.char_offset ?? null;
|
|
3938
|
+
const pageNumber = row.page_number ?? null;
|
|
3939
|
+
const sourcePath = row.source_path ?? null;
|
|
3940
|
+
const sourceType = row.source_type ?? null;
|
|
3941
|
+
const tier = row.tier ?? 3;
|
|
3942
|
+
const supersedesId = row.supersedes_id ?? null;
|
|
3943
|
+
const draft = row.draft ? 1 : 0;
|
|
3944
|
+
const memoryType = row.memory_type ?? "raw";
|
|
3945
|
+
const trajectory = row.trajectory ?? null;
|
|
3946
|
+
const contentHash = row.content_hash ?? null;
|
|
3947
|
+
const intent = row.intent ?? null;
|
|
3948
|
+
const outcome = row.outcome ?? null;
|
|
3949
|
+
const domain = row.domain ?? null;
|
|
3950
|
+
const referencedEntities = row.referenced_entities ?? null;
|
|
3951
|
+
const retrievalCount = row.retrieval_count ?? 0;
|
|
3952
|
+
const chainPosition = row.chain_position ?? null;
|
|
3953
|
+
const reviewStatus = row.review_status ?? null;
|
|
3954
|
+
const contextWindowPct = row.context_window_pct ?? null;
|
|
3955
|
+
const filePaths = row.file_paths ?? null;
|
|
3956
|
+
const commitHash = row.commit_hash ?? null;
|
|
3957
|
+
const durationMs = row.duration_ms ?? null;
|
|
3958
|
+
const tokenCost = row.token_cost ?? null;
|
|
3959
|
+
const audience = row.audience ?? null;
|
|
3960
|
+
const languageType = row.language_type ?? null;
|
|
3961
|
+
const parentMemoryId = row.parent_memory_id ?? null;
|
|
3962
|
+
const cols = `id, agent_id, agent_role, session_id, timestamp,
|
|
3963
|
+
tool_name, project_name,
|
|
3964
|
+
has_error, raw_text, vector, version, task_id, importance, status,
|
|
3965
|
+
confidence, last_accessed,
|
|
3966
|
+
workspace_id, document_id, user_id, char_offset, page_number,
|
|
3967
|
+
source_path, source_type, tier, supersedes_id, draft, memory_type, trajectory, content_hash,
|
|
3968
|
+
intent, outcome, domain, referenced_entities, retrieval_count,
|
|
3969
|
+
chain_position, review_status, context_window_pct, file_paths, commit_hash,
|
|
3970
|
+
duration_ms, token_cost, audience, language_type, parent_memory_id`;
|
|
3971
|
+
const metaArgs = [
|
|
3972
|
+
intent,
|
|
3973
|
+
outcome,
|
|
3974
|
+
domain,
|
|
3975
|
+
referencedEntities,
|
|
3976
|
+
retrievalCount,
|
|
3977
|
+
chainPosition,
|
|
3978
|
+
reviewStatus,
|
|
3979
|
+
contextWindowPct,
|
|
3980
|
+
filePaths,
|
|
3981
|
+
commitHash,
|
|
3982
|
+
durationMs,
|
|
3983
|
+
tokenCost,
|
|
3984
|
+
audience,
|
|
3985
|
+
languageType,
|
|
3986
|
+
parentMemoryId
|
|
3987
|
+
];
|
|
3988
|
+
const baseArgs = [
|
|
3989
|
+
row.id,
|
|
3990
|
+
row.agent_id,
|
|
3991
|
+
row.agent_role,
|
|
3992
|
+
row.session_id,
|
|
3993
|
+
row.timestamp,
|
|
3994
|
+
row.tool_name,
|
|
3995
|
+
row.project_name,
|
|
3996
|
+
row.has_error,
|
|
3997
|
+
row.raw_text
|
|
3998
|
+
];
|
|
3999
|
+
const sharedArgs = [
|
|
4000
|
+
row.version,
|
|
4001
|
+
taskId,
|
|
4002
|
+
importance,
|
|
4003
|
+
status,
|
|
4004
|
+
confidence,
|
|
4005
|
+
lastAccessed,
|
|
4006
|
+
workspaceId,
|
|
4007
|
+
documentId,
|
|
4008
|
+
userId,
|
|
4009
|
+
charOffset,
|
|
4010
|
+
pageNumber,
|
|
4011
|
+
sourcePath,
|
|
4012
|
+
sourceType,
|
|
4013
|
+
tier,
|
|
4014
|
+
supersedesId,
|
|
4015
|
+
draft,
|
|
4016
|
+
memoryType,
|
|
4017
|
+
trajectory,
|
|
4018
|
+
contentHash
|
|
4019
|
+
];
|
|
4020
|
+
return {
|
|
4021
|
+
sql: hasVector ? `INSERT OR IGNORE INTO memories (${cols})
|
|
4022
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, vector32(?), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` : `INSERT OR IGNORE INTO memories (${cols})
|
|
4023
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
4024
|
+
args: hasVector ? [...baseArgs, vectorToBlob(row.vector), ...sharedArgs, ...metaArgs] : [...baseArgs, ...sharedArgs, ...metaArgs]
|
|
4025
|
+
};
|
|
4026
|
+
};
|
|
4027
|
+
const globalClient = getClient();
|
|
4028
|
+
const globalStmts = batch.map(buildStmt);
|
|
4029
|
+
await globalClient.batch(globalStmts, "write");
|
|
4030
|
+
schedulePostWriteMemoryHygiene(batch.map((row) => row.id));
|
|
4031
|
+
_pendingRecords.splice(0, batch.length);
|
|
4032
|
+
try {
|
|
4033
|
+
const { isShardingEnabled: isShardingEnabled2, getReadyShardClient: getReadyShardClient2 } = await Promise.resolve().then(() => (init_shard_manager(), shard_manager_exports));
|
|
4034
|
+
if (isShardingEnabled2()) {
|
|
4035
|
+
const byProject = /* @__PURE__ */ new Map();
|
|
4036
|
+
let skippedUnknown = 0;
|
|
4037
|
+
for (const row of batch) {
|
|
4038
|
+
const proj = row.project_name?.trim();
|
|
4039
|
+
if (!proj) {
|
|
4040
|
+
skippedUnknown++;
|
|
4041
|
+
continue;
|
|
4042
|
+
}
|
|
4043
|
+
if (!byProject.has(proj)) byProject.set(proj, []);
|
|
4044
|
+
byProject.get(proj).push(row);
|
|
4045
|
+
}
|
|
4046
|
+
if (skippedUnknown > 0) {
|
|
4047
|
+
process.stderr.write(
|
|
4048
|
+
`[store] Shard skip: ${skippedUnknown} record(s) with empty project_name (kept in main DB only)
|
|
4049
|
+
`
|
|
4050
|
+
);
|
|
4051
|
+
}
|
|
4052
|
+
for (const [project, rows] of byProject) {
|
|
4053
|
+
try {
|
|
4054
|
+
const shardClient = await getReadyShardClient2(project);
|
|
4055
|
+
const shardStmts = rows.map(buildStmt);
|
|
4056
|
+
await shardClient.batch(shardStmts, "write");
|
|
4057
|
+
} catch (err) {
|
|
4058
|
+
const fullError = err instanceof Error ? `${err.name}: ${err.message}${err.stack ? `
|
|
4059
|
+
${err.stack.split("\n").slice(1, 3).join("\n")}` : ""}` : String(err);
|
|
4060
|
+
process.stderr.write(
|
|
4061
|
+
`[store] Shard write failed for ${project} (${rows.length} records): ${fullError}
|
|
4062
|
+
`
|
|
4063
|
+
);
|
|
4064
|
+
}
|
|
4065
|
+
}
|
|
4066
|
+
}
|
|
4067
|
+
} catch {
|
|
4068
|
+
}
|
|
4069
|
+
return batch.length;
|
|
4070
|
+
} finally {
|
|
4071
|
+
_flushing = false;
|
|
4072
|
+
}
|
|
4073
|
+
}
|
|
4074
|
+
function buildWikiScopeFilter(options, columnPrefix) {
|
|
4075
|
+
const args = [];
|
|
4076
|
+
let clause = "";
|
|
4077
|
+
if (options?.workspaceId !== void 0) {
|
|
4078
|
+
clause += ` AND ${columnPrefix}workspace_id = ?`;
|
|
4079
|
+
args.push(options.workspaceId);
|
|
4080
|
+
}
|
|
4081
|
+
if (options?.userId === void 0) {
|
|
4082
|
+
clause += ` AND ${columnPrefix}user_id IS NULL`;
|
|
4083
|
+
} else if (options.userId === null) {
|
|
4084
|
+
clause += ` AND ${columnPrefix}user_id IS NULL`;
|
|
4085
|
+
} else {
|
|
4086
|
+
clause += ` AND (${columnPrefix}user_id = ? OR ${columnPrefix}user_id IS NULL)`;
|
|
4087
|
+
args.push(options.userId);
|
|
4088
|
+
}
|
|
4089
|
+
return { clause, args };
|
|
4090
|
+
}
|
|
4091
|
+
function buildRawVisibilityFilter(options, columnPrefix) {
|
|
4092
|
+
if (options?.includeRaw === false) {
|
|
4093
|
+
return {
|
|
4094
|
+
clause: ` AND COALESCE(${columnPrefix}memory_type, 'raw') != 'raw'`,
|
|
4095
|
+
args: []
|
|
4096
|
+
};
|
|
4097
|
+
}
|
|
4098
|
+
return { clause: "", args: [] };
|
|
4099
|
+
}
|
|
4100
|
+
async function searchMemories(queryVector, agentId, options) {
|
|
4101
|
+
let client;
|
|
4102
|
+
try {
|
|
4103
|
+
const { isShardingEnabled: isShardingEnabled2, shardExists: shardExists2, getReadyShardClient: getReadyShardClient2 } = await Promise.resolve().then(() => (init_shard_manager(), shard_manager_exports));
|
|
4104
|
+
if (isShardingEnabled2() && options?.projectName && shardExists2(options.projectName)) {
|
|
4105
|
+
client = await getReadyShardClient2(options.projectName);
|
|
4106
|
+
} else {
|
|
4107
|
+
client = getClient();
|
|
4108
|
+
}
|
|
4109
|
+
} catch {
|
|
4110
|
+
client = getClient();
|
|
4111
|
+
}
|
|
4112
|
+
const limit = options?.limit ?? 10;
|
|
4113
|
+
const statusFilter = options?.includeArchived ? "" : `
|
|
4114
|
+
AND COALESCE(status, 'active') = 'active'`;
|
|
4115
|
+
const draftFilter = options?.includeDrafts ? "" : `
|
|
4116
|
+
AND (draft = 0 OR draft IS NULL)`;
|
|
4117
|
+
let sql = `SELECT id, agent_id, agent_role, session_id, timestamp,
|
|
4118
|
+
tool_name, project_name,
|
|
4119
|
+
has_error, raw_text, vector, importance, status,
|
|
4120
|
+
confidence, last_accessed,
|
|
4121
|
+
workspace_id, document_id, user_id,
|
|
4122
|
+
char_offset, page_number,
|
|
4123
|
+
source_path, source_type
|
|
4124
|
+
FROM memories
|
|
4125
|
+
WHERE agent_id = ?
|
|
4126
|
+
AND vector IS NOT NULL${statusFilter}${draftFilter}
|
|
4127
|
+
AND COALESCE(confidence, 0.7) >= 0.3`;
|
|
4128
|
+
const args = [agentId];
|
|
4129
|
+
const scope = buildWikiScopeFilter(options, "");
|
|
4130
|
+
sql += scope.clause;
|
|
4131
|
+
args.push(...scope.args);
|
|
4132
|
+
const rawVisibility = buildRawVisibilityFilter(options, "");
|
|
4133
|
+
sql += rawVisibility.clause;
|
|
4134
|
+
args.push(...rawVisibility.args);
|
|
4135
|
+
if (options?.projectName) {
|
|
4136
|
+
sql += ` AND project_name = ?`;
|
|
4137
|
+
args.push(options.projectName);
|
|
4138
|
+
}
|
|
4139
|
+
if (options?.toolName) {
|
|
4140
|
+
sql += ` AND tool_name = ?`;
|
|
4141
|
+
args.push(options.toolName);
|
|
4142
|
+
}
|
|
4143
|
+
if (options?.hasError !== void 0) {
|
|
4144
|
+
sql += ` AND has_error = ?`;
|
|
4145
|
+
args.push(options.hasError ? 1 : 0);
|
|
4146
|
+
}
|
|
4147
|
+
if (options?.since) {
|
|
4148
|
+
sql += ` AND timestamp >= ?`;
|
|
4149
|
+
args.push(options.since);
|
|
4150
|
+
}
|
|
4151
|
+
if (options?.memoryTypes && options.memoryTypes.length > 0) {
|
|
4152
|
+
const uniqueTypes = [...new Set(options.memoryTypes)];
|
|
4153
|
+
sql += ` AND memory_type IN (${uniqueTypes.map(() => "?").join(",")})`;
|
|
4154
|
+
args.push(...uniqueTypes);
|
|
4155
|
+
} else if (options?.memoryType) {
|
|
4156
|
+
sql += ` AND memory_type = ?`;
|
|
4157
|
+
args.push(options.memoryType);
|
|
4158
|
+
}
|
|
4159
|
+
sql += ` ORDER BY vector_distance_cos(vector, vector32(?))`;
|
|
4160
|
+
args.push(vectorToBlob(queryVector));
|
|
4161
|
+
sql += ` LIMIT ?`;
|
|
4162
|
+
args.push(limit);
|
|
4163
|
+
const result = await client.execute({ sql, args });
|
|
4164
|
+
return result.rows.map((row) => ({
|
|
4165
|
+
id: row.id,
|
|
4166
|
+
agent_id: row.agent_id,
|
|
4167
|
+
agent_role: row.agent_role,
|
|
4168
|
+
session_id: row.session_id,
|
|
4169
|
+
timestamp: row.timestamp,
|
|
4170
|
+
tool_name: row.tool_name,
|
|
4171
|
+
project_name: row.project_name,
|
|
4172
|
+
has_error: row.has_error === 1,
|
|
4173
|
+
raw_text: row.raw_text,
|
|
4174
|
+
vector: row.vector == null ? [] : Array.isArray(row.vector) ? row.vector : Array.from(row.vector),
|
|
4175
|
+
importance: row.importance ?? 5,
|
|
4176
|
+
status: row.status ?? "active",
|
|
4177
|
+
confidence: row.confidence ?? 0.7,
|
|
4178
|
+
last_accessed: row.last_accessed ?? row.timestamp,
|
|
4179
|
+
workspace_id: row.workspace_id ?? null,
|
|
4180
|
+
document_id: row.document_id ?? null,
|
|
4181
|
+
user_id: row.user_id ?? null,
|
|
4182
|
+
char_offset: row.char_offset ?? null,
|
|
4183
|
+
page_number: row.page_number ?? null,
|
|
4184
|
+
source_path: row.source_path ?? null,
|
|
4185
|
+
source_type: row.source_type ?? null
|
|
4186
|
+
}));
|
|
4187
|
+
}
|
|
4188
|
+
async function attachDocumentMetadata(records) {
|
|
4189
|
+
const docIds = [
|
|
4190
|
+
...new Set(
|
|
4191
|
+
records.map((r) => r.document_id).filter((id) => typeof id === "string" && id.length > 0)
|
|
4192
|
+
)
|
|
4193
|
+
];
|
|
4194
|
+
if (docIds.length === 0) return records;
|
|
4195
|
+
try {
|
|
4196
|
+
const client = getClient();
|
|
4197
|
+
const placeholders = docIds.map(() => "?").join(",");
|
|
4198
|
+
const result = await client.execute({
|
|
4199
|
+
sql: `SELECT id, filename, mime, source_type, uploaded_at
|
|
4200
|
+
FROM documents
|
|
4201
|
+
WHERE id IN (${placeholders})`,
|
|
4202
|
+
args: docIds
|
|
4203
|
+
});
|
|
4204
|
+
const byId = /* @__PURE__ */ new Map();
|
|
4205
|
+
for (const row of result.rows) {
|
|
4206
|
+
const id = row.id;
|
|
4207
|
+
byId.set(id, {
|
|
4208
|
+
document_id: id,
|
|
4209
|
+
filename: row.filename,
|
|
4210
|
+
mime: row.mime ?? null,
|
|
4211
|
+
source_type: row.source_type ?? null,
|
|
4212
|
+
uploaded_at: row.uploaded_at
|
|
4213
|
+
});
|
|
4214
|
+
}
|
|
4215
|
+
for (const record of records) {
|
|
4216
|
+
if (!record.document_id) continue;
|
|
4217
|
+
record.document_metadata = byId.get(record.document_id) ?? null;
|
|
4218
|
+
}
|
|
4219
|
+
} catch {
|
|
4220
|
+
}
|
|
4221
|
+
return records;
|
|
4222
|
+
}
|
|
4223
|
+
async function flushTier3(agentId, options) {
|
|
4224
|
+
const client = getClient();
|
|
4225
|
+
const maxAge = options?.maxAgeHours ?? 72;
|
|
4226
|
+
const cutoff = new Date(Date.now() - maxAge * 36e5).toISOString();
|
|
4227
|
+
if (options?.dryRun) {
|
|
4228
|
+
const result2 = await client.execute({
|
|
4229
|
+
sql: `SELECT COUNT(*) as cnt FROM memories
|
|
4230
|
+
WHERE agent_id = ? AND tier = 3 AND status = 'active' AND timestamp < ?`,
|
|
4231
|
+
args: [agentId, cutoff]
|
|
4232
|
+
});
|
|
4233
|
+
return { archived: Number(result2.rows[0]?.cnt ?? 0) };
|
|
4234
|
+
}
|
|
4235
|
+
const result = await client.execute({
|
|
4236
|
+
sql: `UPDATE memories SET status = 'archived'
|
|
4237
|
+
WHERE agent_id = ? AND tier = 3 AND status = 'active' AND timestamp < ?`,
|
|
4238
|
+
args: [agentId, cutoff]
|
|
4239
|
+
});
|
|
4240
|
+
return { archived: result.rowsAffected };
|
|
4241
|
+
}
|
|
4242
|
+
async function disposeStore() {
|
|
4243
|
+
if (_flushTimer !== null) {
|
|
4244
|
+
clearInterval(_flushTimer);
|
|
4245
|
+
_flushTimer = null;
|
|
4246
|
+
}
|
|
4247
|
+
if (_pendingRecords.length > 0) {
|
|
4248
|
+
await flushBatch();
|
|
4249
|
+
}
|
|
4250
|
+
await disposeTurso();
|
|
4251
|
+
_pendingRecords = [];
|
|
4252
|
+
_nextVersion = 1;
|
|
4253
|
+
}
|
|
4254
|
+
function vectorToBlob(vector) {
|
|
4255
|
+
const f32 = vector instanceof Float32Array ? vector : new Float32Array(vector);
|
|
4256
|
+
return JSON.stringify(Array.from(f32));
|
|
4257
|
+
}
|
|
4258
|
+
async function updateMemoryStatus(id, status) {
|
|
4259
|
+
const client = getClient();
|
|
4260
|
+
await client.execute({
|
|
4261
|
+
sql: `UPDATE memories SET status = ? WHERE id = ?`,
|
|
4262
|
+
args: [status, id]
|
|
4263
|
+
});
|
|
4264
|
+
}
|
|
4265
|
+
function reserveVersions(count) {
|
|
4266
|
+
const reserved = [];
|
|
4267
|
+
for (let i = 0; i < count; i++) {
|
|
4268
|
+
reserved.push(_nextVersion++);
|
|
4269
|
+
}
|
|
4270
|
+
return reserved;
|
|
4271
|
+
}
|
|
4272
|
+
async function getMemoryCardinality(agentId) {
|
|
4273
|
+
try {
|
|
4274
|
+
const client = getClient();
|
|
4275
|
+
const result = await client.execute({
|
|
4276
|
+
sql: `SELECT COUNT(*) as cnt FROM memories WHERE agent_id = ? AND COALESCE(status, 'active') = 'active'`,
|
|
4277
|
+
args: [agentId]
|
|
4278
|
+
});
|
|
4279
|
+
return Number(result.rows[0]?.cnt) || 0;
|
|
4280
|
+
} catch {
|
|
4281
|
+
return 0;
|
|
4282
|
+
}
|
|
4283
|
+
}
|
|
4284
|
+
var INIT_MAX_RETRIES, INIT_RETRY_DELAY_MS, _pendingRecords, _batchSize, _flushIntervalMs, _flushTimer, _flushing, _nextVersion;
|
|
4285
|
+
var init_store = __esm({
|
|
4286
|
+
"src/lib/store.ts"() {
|
|
4287
|
+
"use strict";
|
|
4288
|
+
init_memory();
|
|
4289
|
+
init_database();
|
|
4290
|
+
init_keychain();
|
|
4291
|
+
init_config();
|
|
4292
|
+
init_state_bus();
|
|
4293
|
+
init_memory_write_governor();
|
|
4294
|
+
INIT_MAX_RETRIES = 3;
|
|
4295
|
+
INIT_RETRY_DELAY_MS = 1e3;
|
|
4296
|
+
_pendingRecords = [];
|
|
4297
|
+
_batchSize = 20;
|
|
4298
|
+
_flushIntervalMs = 1e4;
|
|
4299
|
+
_flushTimer = null;
|
|
4300
|
+
_flushing = false;
|
|
4301
|
+
_nextVersion = 1;
|
|
4302
|
+
}
|
|
4303
|
+
});
|
|
4304
|
+
|
|
4305
|
+
// src/bin/exe-rename.ts
|
|
4306
|
+
init_employees();
|
|
4307
|
+
import { readFileSync as readFileSync5, writeFileSync as writeFileSync3, renameSync as renameSync4, unlinkSync as unlinkSync3, existsSync as existsSync8 } from "fs";
|
|
4308
|
+
import { execSync as execSync3 } from "child_process";
|
|
4309
|
+
import path8 from "path";
|
|
4310
|
+
import { homedir } from "os";
|
|
2739
4311
|
|
|
2740
4312
|
// src/lib/employee-templates.ts
|
|
4313
|
+
init_global_procedures();
|
|
2741
4314
|
function personalizePrompt(prompt, templateName, actualName) {
|
|
2742
4315
|
if (templateName === actualName) return prompt;
|
|
2743
4316
|
const escaped = templateName.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
@@ -2761,9 +4334,9 @@ function isMainModule(importMetaUrl) {
|
|
|
2761
4334
|
|
|
2762
4335
|
// src/bin/exe-rename.ts
|
|
2763
4336
|
async function renameEmployee(oldName, newName, opts = {}) {
|
|
2764
|
-
const rosterPath = opts.rosterPath ??
|
|
2765
|
-
const identityDir = opts.identityDir ??
|
|
2766
|
-
const agentsDir = opts.agentsDir ??
|
|
4337
|
+
const rosterPath = opts.rosterPath ?? path8.join(homedir(), ".exe-os", "exe-employees.json");
|
|
4338
|
+
const identityDir = opts.identityDir ?? path8.join(homedir(), ".exe-os", "identity");
|
|
4339
|
+
const agentsDir = opts.agentsDir ?? path8.join(homedir(), ".claude", "agents");
|
|
2767
4340
|
const validation = validateEmployeeName(newName);
|
|
2768
4341
|
if (!validation.valid) {
|
|
2769
4342
|
return { success: false, error: validation.error };
|
|
@@ -2785,7 +4358,8 @@ async function renameEmployee(oldName, newName, opts = {}) {
|
|
|
2785
4358
|
const originalName = employee.name;
|
|
2786
4359
|
const originalPrompt = employee.systemPrompt;
|
|
2787
4360
|
employee.name = newName;
|
|
2788
|
-
employee.systemPrompt = personalizePrompt(originalPrompt, rosterOldName, newName);
|
|
4361
|
+
if (originalPrompt) employee.systemPrompt = personalizePrompt(originalPrompt, rosterOldName, newName);
|
|
4362
|
+
else delete employee.systemPrompt;
|
|
2789
4363
|
await saveEmployees(employees, rosterPath);
|
|
2790
4364
|
rollbackStack.push({
|
|
2791
4365
|
description: "restore roster",
|
|
@@ -2795,36 +4369,36 @@ async function renameEmployee(oldName, newName, opts = {}) {
|
|
|
2795
4369
|
writeFileSync3(rosterPath, JSON.stringify(employees, null, 2) + "\n", "utf-8");
|
|
2796
4370
|
}
|
|
2797
4371
|
});
|
|
2798
|
-
const oldIdentityPath =
|
|
2799
|
-
const newIdentityPath =
|
|
2800
|
-
if (
|
|
4372
|
+
const oldIdentityPath = path8.join(identityDir, `${rosterOldName}.md`);
|
|
4373
|
+
const newIdentityPath = path8.join(identityDir, `${newName}.md`);
|
|
4374
|
+
if (existsSync8(oldIdentityPath)) {
|
|
2801
4375
|
const content = readFileSync5(oldIdentityPath, "utf-8");
|
|
2802
4376
|
const updatedContent = content.replace(
|
|
2803
4377
|
/^(agent_id:\s*)\S+/m,
|
|
2804
4378
|
`$1${newName}`
|
|
2805
4379
|
);
|
|
2806
|
-
|
|
4380
|
+
renameSync4(oldIdentityPath, newIdentityPath);
|
|
2807
4381
|
writeFileSync3(newIdentityPath, updatedContent, "utf-8");
|
|
2808
4382
|
rollbackStack.push({
|
|
2809
4383
|
description: "restore identity file",
|
|
2810
4384
|
undo: () => {
|
|
2811
|
-
if (
|
|
4385
|
+
if (existsSync8(newIdentityPath)) {
|
|
2812
4386
|
writeFileSync3(newIdentityPath, content, "utf-8");
|
|
2813
|
-
|
|
4387
|
+
renameSync4(newIdentityPath, oldIdentityPath);
|
|
2814
4388
|
}
|
|
2815
4389
|
}
|
|
2816
4390
|
});
|
|
2817
4391
|
}
|
|
2818
|
-
const oldAgentPath =
|
|
2819
|
-
const newAgentPath =
|
|
2820
|
-
if (
|
|
4392
|
+
const oldAgentPath = path8.join(agentsDir, `${rosterOldName}.md`);
|
|
4393
|
+
const newAgentPath = path8.join(agentsDir, `${newName}.md`);
|
|
4394
|
+
if (existsSync8(oldAgentPath)) {
|
|
2821
4395
|
const agentContent = readFileSync5(oldAgentPath, "utf-8");
|
|
2822
|
-
|
|
4396
|
+
renameSync4(oldAgentPath, newAgentPath);
|
|
2823
4397
|
rollbackStack.push({
|
|
2824
4398
|
description: "restore agent file",
|
|
2825
4399
|
undo: () => {
|
|
2826
|
-
if (
|
|
2827
|
-
|
|
4400
|
+
if (existsSync8(newAgentPath)) {
|
|
4401
|
+
renameSync4(newAgentPath, oldAgentPath);
|
|
2828
4402
|
writeFileSync3(oldAgentPath, agentContent, "utf-8");
|
|
2829
4403
|
}
|
|
2830
4404
|
}
|
|
@@ -2894,7 +4468,7 @@ async function renameEmployee(oldName, newName, opts = {}) {
|
|
|
2894
4468
|
}
|
|
2895
4469
|
function findExeBin2() {
|
|
2896
4470
|
try {
|
|
2897
|
-
return
|
|
4471
|
+
return execSync3(process.platform === "win32" ? "where exe-os" : "which exe-os", { encoding: "utf8" }).trim();
|
|
2898
4472
|
} catch {
|
|
2899
4473
|
return null;
|
|
2900
4474
|
}
|
|
@@ -2903,10 +4477,10 @@ function removeOldSymlinks(name) {
|
|
|
2903
4477
|
try {
|
|
2904
4478
|
const exeBinPath = findExeBin2();
|
|
2905
4479
|
if (!exeBinPath) return;
|
|
2906
|
-
const binDir =
|
|
4480
|
+
const binDir = path8.dirname(exeBinPath);
|
|
2907
4481
|
for (const suffix of ["", "-opencode"]) {
|
|
2908
|
-
const linkPath =
|
|
2909
|
-
if (
|
|
4482
|
+
const linkPath = path8.join(binDir, `${name}${suffix}`);
|
|
4483
|
+
if (existsSync8(linkPath)) {
|
|
2910
4484
|
try {
|
|
2911
4485
|
unlinkSync3(linkPath);
|
|
2912
4486
|
} catch {
|
|
@@ -2923,6 +4497,8 @@ async function main() {
|
|
|
2923
4497
|
process.exit(1);
|
|
2924
4498
|
}
|
|
2925
4499
|
const [oldName, newName] = args;
|
|
4500
|
+
const { initStore: initStore2 } = await Promise.resolve().then(() => (init_store(), store_exports));
|
|
4501
|
+
await initStore2({ lightweight: true });
|
|
2926
4502
|
const result = await renameEmployee(oldName, newName);
|
|
2927
4503
|
if (!result.success) {
|
|
2928
4504
|
console.error(`Error: ${result.error}`);
|