@askexenow/exe-os 0.9.67 → 0.9.69
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/age-ontology-load.js +263 -0
- package/dist/bin/agentic-ontology-backfill.js +178 -17
- package/dist/bin/agentic-reflection-backfill.js +165 -10
- package/dist/bin/agentic-semantic-label.js +173 -12
- package/dist/bin/backfill-vectors.js +176 -13
- package/dist/bin/cc-doctor.js +293 -30
- package/dist/bin/cli.js +1175 -855
- package/dist/bin/exe-healthcheck.js +293 -30
- package/dist/bin/graph-backfill.js +176 -17
- package/dist/bin/postgres-agentic-reflection-backfill.js +270 -0
- package/dist/bin/postgres-agentic-semantic-backfill.js +271 -1
- package/dist/lib/exe-daemon.js +1714 -985
- package/dist/mcp/server.js +728 -530
- package/package.json +1 -1
|
@@ -2,6 +2,255 @@
|
|
|
2
2
|
|
|
3
3
|
// src/bin/age-ontology-load.ts
|
|
4
4
|
import { Client } from "pg";
|
|
5
|
+
|
|
6
|
+
// src/lib/background-jobs.ts
|
|
7
|
+
import { existsSync as existsSync3, mkdirSync as mkdirSync2, readFileSync as readFileSync2, writeFileSync, unlinkSync } from "fs";
|
|
8
|
+
import { execFileSync } from "child_process";
|
|
9
|
+
import os2 from "os";
|
|
10
|
+
import path2 from "path";
|
|
11
|
+
|
|
12
|
+
// src/lib/config.ts
|
|
13
|
+
import { readFile, writeFile } from "fs/promises";
|
|
14
|
+
import { readFileSync, existsSync as existsSync2, renameSync } from "fs";
|
|
15
|
+
import path from "path";
|
|
16
|
+
import os from "os";
|
|
17
|
+
|
|
18
|
+
// src/lib/secure-files.ts
|
|
19
|
+
import { chmodSync, existsSync, mkdirSync } from "fs";
|
|
20
|
+
import { chmod, mkdir } from "fs/promises";
|
|
21
|
+
|
|
22
|
+
// src/lib/config.ts
|
|
23
|
+
function resolveDataDir() {
|
|
24
|
+
if (process.env.EXE_OS_DIR) return process.env.EXE_OS_DIR;
|
|
25
|
+
if (process.env.EXE_MEM_DIR) return process.env.EXE_MEM_DIR;
|
|
26
|
+
const newDir = path.join(os.homedir(), ".exe-os");
|
|
27
|
+
const legacyDir = path.join(os.homedir(), ".exe-mem");
|
|
28
|
+
if (!existsSync2(newDir) && existsSync2(legacyDir)) {
|
|
29
|
+
try {
|
|
30
|
+
renameSync(legacyDir, newDir);
|
|
31
|
+
process.stderr.write(`[exe-os] Migrated data directory: ~/.exe-mem \u2192 ~/.exe-os
|
|
32
|
+
`);
|
|
33
|
+
} catch {
|
|
34
|
+
return legacyDir;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return newDir;
|
|
38
|
+
}
|
|
39
|
+
var EXE_AI_DIR = resolveDataDir();
|
|
40
|
+
var DB_PATH = path.join(EXE_AI_DIR, "memories.db");
|
|
41
|
+
var MODELS_DIR = path.join(EXE_AI_DIR, "models");
|
|
42
|
+
var CONFIG_PATH = path.join(EXE_AI_DIR, "config.json");
|
|
43
|
+
var LEGACY_LANCE_PATH = path.join(EXE_AI_DIR, "local.lance");
|
|
44
|
+
var CURRENT_CONFIG_VERSION = 1;
|
|
45
|
+
var DEFAULT_CONFIG = {
|
|
46
|
+
config_version: CURRENT_CONFIG_VERSION,
|
|
47
|
+
dbPath: DB_PATH,
|
|
48
|
+
modelFile: "jina-embeddings-v5-small-q4_k_m.gguf",
|
|
49
|
+
embeddingDim: 1024,
|
|
50
|
+
batchSize: 20,
|
|
51
|
+
flushIntervalMs: 1e4,
|
|
52
|
+
autoIngestion: true,
|
|
53
|
+
autoRetrieval: true,
|
|
54
|
+
searchMode: "hybrid",
|
|
55
|
+
hookSearchMode: "hybrid",
|
|
56
|
+
fileGrepEnabled: true,
|
|
57
|
+
splashEffect: true,
|
|
58
|
+
consolidationEnabled: true,
|
|
59
|
+
consolidationIntervalMs: 6 * 60 * 60 * 1e3,
|
|
60
|
+
consolidationModel: "claude-haiku-4-5-20251001",
|
|
61
|
+
consolidationMaxCallsPerRun: 20,
|
|
62
|
+
selfQueryRouter: true,
|
|
63
|
+
selfQueryModel: "claude-haiku-4-5-20251001",
|
|
64
|
+
rerankerEnabled: true,
|
|
65
|
+
scalingRoadmap: {
|
|
66
|
+
rerankerAutoTrigger: {
|
|
67
|
+
enabled: true,
|
|
68
|
+
broadQueryMinCardinality: 5e4,
|
|
69
|
+
fetchTopK: 200,
|
|
70
|
+
returnTopK: 20
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
graphRagEnabled: true,
|
|
74
|
+
wikiEnabled: false,
|
|
75
|
+
wikiUrl: "",
|
|
76
|
+
wikiApiKey: "",
|
|
77
|
+
wikiSyncIntervalMs: 30 * 60 * 1e3,
|
|
78
|
+
wikiWorkspaceMapping: {},
|
|
79
|
+
wikiAutoUpdate: true,
|
|
80
|
+
wikiAutoUpdateThreshold: 0.5,
|
|
81
|
+
wikiAutoUpdateCreateNew: true,
|
|
82
|
+
skillLearning: true,
|
|
83
|
+
skillThreshold: 3,
|
|
84
|
+
skillModel: "claude-haiku-4-5-20251001",
|
|
85
|
+
exeHeartbeat: {
|
|
86
|
+
enabled: true,
|
|
87
|
+
intervalSeconds: 60,
|
|
88
|
+
staleInProgressThresholdHours: 2
|
|
89
|
+
},
|
|
90
|
+
sessionLifecycle: {
|
|
91
|
+
idleKillEnabled: true,
|
|
92
|
+
idleKillTicksRequired: 3,
|
|
93
|
+
idleKillIntercomAckWindowMs: 1e4,
|
|
94
|
+
maxAutoInstances: 10
|
|
95
|
+
},
|
|
96
|
+
autoUpdate: {
|
|
97
|
+
checkOnBoot: true,
|
|
98
|
+
autoInstall: false,
|
|
99
|
+
checkIntervalMs: 24 * 60 * 60 * 1e3
|
|
100
|
+
},
|
|
101
|
+
orchestration: {
|
|
102
|
+
phase: "phase_1_coo",
|
|
103
|
+
phaseSetBy: "default"
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
// src/lib/background-jobs.ts
|
|
108
|
+
var JOB_DIR = path2.join(EXE_AI_DIR, "jobs");
|
|
109
|
+
var JOBS_FILE = path2.join(JOB_DIR, "jobs.json");
|
|
110
|
+
var LOCK_DIR = path2.join(JOB_DIR, "locks");
|
|
111
|
+
var DEFAULT_LOCK_TTL_MS = 6 * 60 * 60 * 1e3;
|
|
112
|
+
var MAX_HISTORY = 200;
|
|
113
|
+
function ensureDirs() {
|
|
114
|
+
mkdirSync2(LOCK_DIR, { recursive: true });
|
|
115
|
+
}
|
|
116
|
+
function now() {
|
|
117
|
+
return (/* @__PURE__ */ new Date()).toISOString();
|
|
118
|
+
}
|
|
119
|
+
function isAlive(pid) {
|
|
120
|
+
if (!pid || pid <= 0) return false;
|
|
121
|
+
try {
|
|
122
|
+
process.kill(pid, 0);
|
|
123
|
+
return true;
|
|
124
|
+
} catch {
|
|
125
|
+
return false;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
function readJobsRaw() {
|
|
129
|
+
ensureDirs();
|
|
130
|
+
if (!existsSync3(JOBS_FILE)) return [];
|
|
131
|
+
try {
|
|
132
|
+
const parsed = JSON.parse(readFileSync2(JOBS_FILE, "utf8"));
|
|
133
|
+
return Array.isArray(parsed) ? parsed : [];
|
|
134
|
+
} catch {
|
|
135
|
+
return [];
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
function writeJobsRaw(jobs) {
|
|
139
|
+
ensureDirs();
|
|
140
|
+
const running = jobs.filter((j) => j.status === "running");
|
|
141
|
+
const rest = jobs.filter((j) => j.status !== "running").slice(-MAX_HISTORY);
|
|
142
|
+
writeFileSync(JOBS_FILE, JSON.stringify([...rest, ...running], null, 2) + "\n");
|
|
143
|
+
}
|
|
144
|
+
function lockPath(type) {
|
|
145
|
+
return path2.join(LOCK_DIR, `${type.replace(/[^a-zA-Z0-9_.-]/g, "_")}.lock`);
|
|
146
|
+
}
|
|
147
|
+
function acquireJobLock(type, ttlMs = DEFAULT_LOCK_TTL_MS) {
|
|
148
|
+
ensureDirs();
|
|
149
|
+
const file = lockPath(type);
|
|
150
|
+
if (existsSync3(file)) {
|
|
151
|
+
try {
|
|
152
|
+
const lock = JSON.parse(readFileSync2(file, "utf8"));
|
|
153
|
+
const age = Date.now() - Date.parse(lock.updatedAt ?? "");
|
|
154
|
+
if (lock.pid && isAlive(lock.pid) && Number.isFinite(age) && age < ttlMs) return false;
|
|
155
|
+
} catch {
|
|
156
|
+
}
|
|
157
|
+
try {
|
|
158
|
+
unlinkSync(file);
|
|
159
|
+
} catch {
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
try {
|
|
163
|
+
writeFileSync(file, JSON.stringify({ pid: process.pid, updatedAt: now() }, null, 2) + "\n", { flag: "wx" });
|
|
164
|
+
return true;
|
|
165
|
+
} catch {
|
|
166
|
+
return false;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
function releaseJobLock(type) {
|
|
170
|
+
const file = lockPath(type);
|
|
171
|
+
try {
|
|
172
|
+
if (!existsSync3(file)) return;
|
|
173
|
+
const lock = JSON.parse(readFileSync2(file, "utf8"));
|
|
174
|
+
if (lock.pid === process.pid || !lock.pid || !isAlive(lock.pid)) unlinkSync(file);
|
|
175
|
+
} catch {
|
|
176
|
+
try {
|
|
177
|
+
unlinkSync(file);
|
|
178
|
+
} catch {
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
function startManagedJob(options) {
|
|
183
|
+
const lowPriority = options.lowPriority ?? true;
|
|
184
|
+
if (!acquireJobLock(options.type, options.lockTtlMs)) return null;
|
|
185
|
+
if (lowPriority) {
|
|
186
|
+
try {
|
|
187
|
+
os2.setPriority(process.pid, 10);
|
|
188
|
+
} catch {
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
const id = `${options.type}-${Date.now()}-${process.pid}`.replace(/[^a-zA-Z0-9_.-]/g, "_");
|
|
192
|
+
const record = {
|
|
193
|
+
id,
|
|
194
|
+
type: options.type,
|
|
195
|
+
name: options.name,
|
|
196
|
+
pid: process.pid,
|
|
197
|
+
command: options.command ?? process.argv.join(" "),
|
|
198
|
+
cwd: process.cwd(),
|
|
199
|
+
status: "running",
|
|
200
|
+
startedAt: now(),
|
|
201
|
+
updatedAt: now(),
|
|
202
|
+
lastHeartbeatAt: now(),
|
|
203
|
+
cancelCommand: `exe-os jobs cancel ${id}`,
|
|
204
|
+
lowPriority
|
|
205
|
+
};
|
|
206
|
+
const upsert = (patch) => {
|
|
207
|
+
const jobs = readJobsRaw().filter((j) => j.id !== id);
|
|
208
|
+
Object.assign(record, patch, { updatedAt: now() });
|
|
209
|
+
writeJobsRaw([...jobs, record]);
|
|
210
|
+
const file = lockPath(options.type);
|
|
211
|
+
try {
|
|
212
|
+
writeFileSync(file, JSON.stringify({ pid: process.pid, jobId: id, updatedAt: record.updatedAt }, null, 2) + "\n");
|
|
213
|
+
} catch {
|
|
214
|
+
}
|
|
215
|
+
};
|
|
216
|
+
upsert({});
|
|
217
|
+
const timer = setInterval(() => upsert({ lastHeartbeatAt: now() }), 3e4);
|
|
218
|
+
timer.unref?.();
|
|
219
|
+
const cleanup = (status, error) => {
|
|
220
|
+
clearInterval(timer);
|
|
221
|
+
upsert({ status, error, lastHeartbeatAt: now() });
|
|
222
|
+
releaseJobLock(options.type);
|
|
223
|
+
};
|
|
224
|
+
process.once("SIGTERM", () => {
|
|
225
|
+
cleanup("cancelled");
|
|
226
|
+
process.exit(0);
|
|
227
|
+
});
|
|
228
|
+
process.once("SIGINT", () => {
|
|
229
|
+
cleanup("cancelled");
|
|
230
|
+
process.exit(130);
|
|
231
|
+
});
|
|
232
|
+
process.once("exit", () => releaseJobLock(options.type));
|
|
233
|
+
return {
|
|
234
|
+
id,
|
|
235
|
+
update(progress) {
|
|
236
|
+
upsert({ progressCurrent: progress.current, progressTotal: progress.total, progressLabel: progress.label, lastHeartbeatAt: now() });
|
|
237
|
+
},
|
|
238
|
+
complete() {
|
|
239
|
+
cleanup("completed");
|
|
240
|
+
},
|
|
241
|
+
fail(err) {
|
|
242
|
+
cleanup("failed", err instanceof Error ? err.message : String(err));
|
|
243
|
+
},
|
|
244
|
+
cancel() {
|
|
245
|
+
cleanup("cancelled");
|
|
246
|
+
}
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
async function politeBatchPause(ms = 250) {
|
|
250
|
+
await new Promise((resolve) => setTimeout(resolve, ms));
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// src/bin/age-ontology-load.ts
|
|
5
254
|
function q(value) {
|
|
6
255
|
return `'${String(value ?? "").replace(/\u0000/g, "").slice(0, 500).replace(/\\/g, "\\\\").replace(/'/g, "\\'")}'`;
|
|
7
256
|
}
|
|
@@ -9,6 +258,11 @@ function sqlString(value) {
|
|
|
9
258
|
return `'${value.replace(/'/g, "''")}'`;
|
|
10
259
|
}
|
|
11
260
|
async function main() {
|
|
261
|
+
const job = startManagedJob({ type: "age-ontology-load", name: "Apache AGE ontology loader", lowPriority: true });
|
|
262
|
+
if (!job) {
|
|
263
|
+
process.stderr.write("[age-ontology-load] Another AGE ontology load is already running.\n");
|
|
264
|
+
return;
|
|
265
|
+
}
|
|
12
266
|
const sourceUrl = process.env.DATABASE_URL || process.env.EXED_DATABASE_URL;
|
|
13
267
|
const ageUrl = process.env.AGE_DATABASE_URL;
|
|
14
268
|
if (!sourceUrl) throw new Error("DATABASE_URL or EXED_DATABASE_URL is required for canonical source");
|
|
@@ -39,6 +293,10 @@ async function main() {
|
|
|
39
293
|
const cypher = `CREATE (:Entity {id: ${q(entity.id)}, name: ${q(entity.name)}, type: ${q(entity.type)}})`;
|
|
40
294
|
await age.query(`SELECT * FROM cypher(${sqlString(graph)}, $$ ${cypher} $$) AS (v agtype)`);
|
|
41
295
|
nodeCount++;
|
|
296
|
+
if (nodeCount % 250 === 0) {
|
|
297
|
+
job.update({ current: nodeCount, total: limit, label: `Loaded ${nodeCount} AGE nodes` });
|
|
298
|
+
await politeBatchPause(250);
|
|
299
|
+
}
|
|
42
300
|
}
|
|
43
301
|
const relationships = await source.query(`SELECT source_entity_id, target_entity_id, type, confidence FROM memory.relationships ORDER BY id LIMIT $1`, [limit * 2]);
|
|
44
302
|
let edgeCount = 0;
|
|
@@ -46,9 +304,14 @@ async function main() {
|
|
|
46
304
|
const cypher = `MATCH (a:Entity {id: ${q(rel.source_entity_id)}}), (b:Entity {id: ${q(rel.target_entity_id)}}) CREATE (a)-[:RELATED {type: ${q(rel.type)}}]->(b)`;
|
|
47
305
|
await age.query(`SELECT * FROM cypher(${sqlString(graph)}, $$ ${cypher} $$) AS (e agtype)`);
|
|
48
306
|
edgeCount++;
|
|
307
|
+
if (edgeCount % 500 === 0) {
|
|
308
|
+
job.update({ current: nodeCount + edgeCount, total: limit * 3, label: `Loaded ${nodeCount} nodes, ${edgeCount} edges` });
|
|
309
|
+
await politeBatchPause(250);
|
|
310
|
+
}
|
|
49
311
|
}
|
|
50
312
|
process.stderr.write(`[age-ontology-load] Loaded ${nodeCount} nodes and ${edgeCount} edges into ${graph}.
|
|
51
313
|
`);
|
|
314
|
+
job.complete();
|
|
52
315
|
} finally {
|
|
53
316
|
await age.end();
|
|
54
317
|
await source.end();
|
|
@@ -3202,10 +3202,10 @@ function evictLRU() {
|
|
|
3202
3202
|
}
|
|
3203
3203
|
}
|
|
3204
3204
|
function evictIdleShards() {
|
|
3205
|
-
const
|
|
3205
|
+
const now2 = Date.now();
|
|
3206
3206
|
const toEvict = [];
|
|
3207
3207
|
for (const [name, lastAccess] of _shardLastAccess) {
|
|
3208
|
-
if (
|
|
3208
|
+
if (now2 - lastAccess > SHARD_IDLE_MS) {
|
|
3209
3209
|
toEvict.push(name);
|
|
3210
3210
|
}
|
|
3211
3211
|
}
|
|
@@ -3485,22 +3485,22 @@ ${sections.join("\n\n")}
|
|
|
3485
3485
|
}
|
|
3486
3486
|
async function storeGlobalProcedure(input) {
|
|
3487
3487
|
const id = randomUUID2();
|
|
3488
|
-
const
|
|
3488
|
+
const now2 = (/* @__PURE__ */ new Date()).toISOString();
|
|
3489
3489
|
const client = getClient();
|
|
3490
3490
|
await client.execute({
|
|
3491
3491
|
sql: `INSERT INTO company_procedures (id, title, content, priority, domain, active, created_at, updated_at)
|
|
3492
3492
|
VALUES (?, ?, ?, ?, ?, 1, ?, ?)`,
|
|
3493
|
-
args: [id, input.title, input.content, input.priority ?? "p0", input.domain ?? null,
|
|
3493
|
+
args: [id, input.title, input.content, input.priority ?? "p0", input.domain ?? null, now2, now2]
|
|
3494
3494
|
});
|
|
3495
3495
|
await loadGlobalProcedures();
|
|
3496
3496
|
return id;
|
|
3497
3497
|
}
|
|
3498
3498
|
async function deactivateGlobalProcedure(id) {
|
|
3499
|
-
const
|
|
3499
|
+
const now2 = (/* @__PURE__ */ new Date()).toISOString();
|
|
3500
3500
|
const client = getClient();
|
|
3501
3501
|
const result = await client.execute({
|
|
3502
3502
|
sql: "UPDATE company_procedures SET active = 0, updated_at = ? WHERE id = ?",
|
|
3503
|
-
args: [
|
|
3503
|
+
args: [now2, id]
|
|
3504
3504
|
});
|
|
3505
3505
|
await loadGlobalProcedures();
|
|
3506
3506
|
return result.rowsAffected > 0;
|
|
@@ -3593,7 +3593,7 @@ function extractMemoryCards(row) {
|
|
|
3593
3593
|
async function insertMemoryCardsForBatch(rows) {
|
|
3594
3594
|
const cards = rows.flatMap(extractMemoryCards);
|
|
3595
3595
|
if (cards.length === 0) return 0;
|
|
3596
|
-
const
|
|
3596
|
+
const now2 = (/* @__PURE__ */ new Date()).toISOString();
|
|
3597
3597
|
const client = getClient();
|
|
3598
3598
|
const stmts = cards.map((card) => ({
|
|
3599
3599
|
sql: `INSERT OR IGNORE INTO memory_cards
|
|
@@ -3614,7 +3614,7 @@ async function insertMemoryCardsForBatch(rows) {
|
|
|
3614
3614
|
card.content,
|
|
3615
3615
|
card.source_ref,
|
|
3616
3616
|
card.confidence,
|
|
3617
|
-
|
|
3617
|
+
now2
|
|
3618
3618
|
]
|
|
3619
3619
|
}));
|
|
3620
3620
|
await client.batch(stmts, "write");
|
|
@@ -3851,7 +3851,7 @@ async function insertOntologyForMemory(row, client) {
|
|
|
3851
3851
|
const intention = inferIntention(row);
|
|
3852
3852
|
const outcome = inferOutcome(row);
|
|
3853
3853
|
const eventId = stableId2("event", row.id);
|
|
3854
|
-
const
|
|
3854
|
+
const now2 = (/* @__PURE__ */ new Date()).toISOString();
|
|
3855
3855
|
await db.execute({
|
|
3856
3856
|
sql: `INSERT INTO agent_sessions (id, agent_id, project_name, started_at, last_event_at, event_count, properties)
|
|
3857
3857
|
VALUES (?, ?, ?, ?, ?, 1, ?)
|
|
@@ -3880,7 +3880,7 @@ async function insertOntologyForMemory(row, client) {
|
|
|
3880
3880
|
row.id,
|
|
3881
3881
|
row.has_error ? "negative" : outcome === "success_signal" ? "positive" : "neutral",
|
|
3882
3882
|
JSON.stringify(ontologyPayload(row)),
|
|
3883
|
-
|
|
3883
|
+
now2
|
|
3884
3884
|
]
|
|
3885
3885
|
});
|
|
3886
3886
|
const semantic = inferSemanticLabel(row);
|
|
@@ -3898,8 +3898,8 @@ async function insertOntologyForMemory(row, client) {
|
|
|
3898
3898
|
semantic.schemaVersion,
|
|
3899
3899
|
semantic.confidence,
|
|
3900
3900
|
JSON.stringify(semantic),
|
|
3901
|
-
|
|
3902
|
-
|
|
3901
|
+
now2,
|
|
3902
|
+
now2
|
|
3903
3903
|
]
|
|
3904
3904
|
});
|
|
3905
3905
|
for (const statement of extractGoalCandidates(row)) {
|
|
@@ -3910,19 +3910,19 @@ async function insertOntologyForMemory(row, client) {
|
|
|
3910
3910
|
parent_goal_id, due_at, achieved_at, supersedes_id, created_at, updated_at, source_memory_id)
|
|
3911
3911
|
VALUES (?, ?, ?, ?, 'open', 5, NULL, NULL, NULL, NULL, NULL, ?, ?, ?)
|
|
3912
3912
|
ON CONFLICT(id) DO UPDATE SET updated_at = excluded.updated_at`,
|
|
3913
|
-
args: [goalId, statement, row.agent_id, row.project_name,
|
|
3913
|
+
args: [goalId, statement, row.agent_id, row.project_name, now2, now2, row.id]
|
|
3914
3914
|
});
|
|
3915
3915
|
await db.execute({
|
|
3916
3916
|
sql: `INSERT OR IGNORE INTO agent_goal_links
|
|
3917
3917
|
(id, goal_id, link_type, target_id, target_type, created_at)
|
|
3918
3918
|
VALUES (?, ?, 'evidence', ?, 'memory', ?)`,
|
|
3919
|
-
args: [stableId2("goal_link", goalId, row.id, "memory"), goalId, row.id,
|
|
3919
|
+
args: [stableId2("goal_link", goalId, row.id, "memory"), goalId, row.id, now2]
|
|
3920
3920
|
});
|
|
3921
3921
|
await db.execute({
|
|
3922
3922
|
sql: `INSERT OR IGNORE INTO agent_goal_links
|
|
3923
3923
|
(id, goal_id, link_type, target_id, target_type, created_at)
|
|
3924
3924
|
VALUES (?, ?, 'event', ?, 'event', ?)`,
|
|
3925
|
-
args: [stableId2("goal_link", goalId, eventId, "event"), goalId, eventId,
|
|
3925
|
+
args: [stableId2("goal_link", goalId, eventId, "event"), goalId, eventId, now2]
|
|
3926
3926
|
});
|
|
3927
3927
|
}
|
|
3928
3928
|
}
|
|
@@ -4106,8 +4106,8 @@ function deriveMachineKey() {
|
|
|
4106
4106
|
}
|
|
4107
4107
|
function readMachineId() {
|
|
4108
4108
|
try {
|
|
4109
|
-
const { readFileSync:
|
|
4110
|
-
return
|
|
4109
|
+
const { readFileSync: readFileSync6 } = __require("fs");
|
|
4110
|
+
return readFileSync6("/etc/machine-id", "utf-8").trim();
|
|
4111
4111
|
} catch {
|
|
4112
4112
|
return "";
|
|
4113
4113
|
}
|
|
@@ -4657,8 +4657,166 @@ function vectorToBlob(vector) {
|
|
|
4657
4657
|
// src/bin/agentic-ontology-backfill.ts
|
|
4658
4658
|
init_database();
|
|
4659
4659
|
init_agentic_ontology();
|
|
4660
|
+
|
|
4661
|
+
// src/lib/background-jobs.ts
|
|
4662
|
+
init_config();
|
|
4663
|
+
import { existsSync as existsSync8, mkdirSync as mkdirSync3, readFileSync as readFileSync5, writeFileSync as writeFileSync3, unlinkSync as unlinkSync3 } from "fs";
|
|
4664
|
+
import { execFileSync } from "child_process";
|
|
4665
|
+
import os6 from "os";
|
|
4666
|
+
import path8 from "path";
|
|
4667
|
+
var JOB_DIR = path8.join(EXE_AI_DIR, "jobs");
|
|
4668
|
+
var JOBS_FILE = path8.join(JOB_DIR, "jobs.json");
|
|
4669
|
+
var LOCK_DIR = path8.join(JOB_DIR, "locks");
|
|
4670
|
+
var DEFAULT_LOCK_TTL_MS = 6 * 60 * 60 * 1e3;
|
|
4671
|
+
var MAX_HISTORY = 200;
|
|
4672
|
+
function ensureDirs() {
|
|
4673
|
+
mkdirSync3(LOCK_DIR, { recursive: true });
|
|
4674
|
+
}
|
|
4675
|
+
function now() {
|
|
4676
|
+
return (/* @__PURE__ */ new Date()).toISOString();
|
|
4677
|
+
}
|
|
4678
|
+
function isAlive(pid) {
|
|
4679
|
+
if (!pid || pid <= 0) return false;
|
|
4680
|
+
try {
|
|
4681
|
+
process.kill(pid, 0);
|
|
4682
|
+
return true;
|
|
4683
|
+
} catch {
|
|
4684
|
+
return false;
|
|
4685
|
+
}
|
|
4686
|
+
}
|
|
4687
|
+
function readJobsRaw() {
|
|
4688
|
+
ensureDirs();
|
|
4689
|
+
if (!existsSync8(JOBS_FILE)) return [];
|
|
4690
|
+
try {
|
|
4691
|
+
const parsed = JSON.parse(readFileSync5(JOBS_FILE, "utf8"));
|
|
4692
|
+
return Array.isArray(parsed) ? parsed : [];
|
|
4693
|
+
} catch {
|
|
4694
|
+
return [];
|
|
4695
|
+
}
|
|
4696
|
+
}
|
|
4697
|
+
function writeJobsRaw(jobs) {
|
|
4698
|
+
ensureDirs();
|
|
4699
|
+
const running = jobs.filter((j) => j.status === "running");
|
|
4700
|
+
const rest = jobs.filter((j) => j.status !== "running").slice(-MAX_HISTORY);
|
|
4701
|
+
writeFileSync3(JOBS_FILE, JSON.stringify([...rest, ...running], null, 2) + "\n");
|
|
4702
|
+
}
|
|
4703
|
+
function lockPath(type) {
|
|
4704
|
+
return path8.join(LOCK_DIR, `${type.replace(/[^a-zA-Z0-9_.-]/g, "_")}.lock`);
|
|
4705
|
+
}
|
|
4706
|
+
function acquireJobLock(type, ttlMs = DEFAULT_LOCK_TTL_MS) {
|
|
4707
|
+
ensureDirs();
|
|
4708
|
+
const file = lockPath(type);
|
|
4709
|
+
if (existsSync8(file)) {
|
|
4710
|
+
try {
|
|
4711
|
+
const lock = JSON.parse(readFileSync5(file, "utf8"));
|
|
4712
|
+
const age = Date.now() - Date.parse(lock.updatedAt ?? "");
|
|
4713
|
+
if (lock.pid && isAlive(lock.pid) && Number.isFinite(age) && age < ttlMs) return false;
|
|
4714
|
+
} catch {
|
|
4715
|
+
}
|
|
4716
|
+
try {
|
|
4717
|
+
unlinkSync3(file);
|
|
4718
|
+
} catch {
|
|
4719
|
+
}
|
|
4720
|
+
}
|
|
4721
|
+
try {
|
|
4722
|
+
writeFileSync3(file, JSON.stringify({ pid: process.pid, updatedAt: now() }, null, 2) + "\n", { flag: "wx" });
|
|
4723
|
+
return true;
|
|
4724
|
+
} catch {
|
|
4725
|
+
return false;
|
|
4726
|
+
}
|
|
4727
|
+
}
|
|
4728
|
+
function releaseJobLock(type) {
|
|
4729
|
+
const file = lockPath(type);
|
|
4730
|
+
try {
|
|
4731
|
+
if (!existsSync8(file)) return;
|
|
4732
|
+
const lock = JSON.parse(readFileSync5(file, "utf8"));
|
|
4733
|
+
if (lock.pid === process.pid || !lock.pid || !isAlive(lock.pid)) unlinkSync3(file);
|
|
4734
|
+
} catch {
|
|
4735
|
+
try {
|
|
4736
|
+
unlinkSync3(file);
|
|
4737
|
+
} catch {
|
|
4738
|
+
}
|
|
4739
|
+
}
|
|
4740
|
+
}
|
|
4741
|
+
function startManagedJob(options) {
|
|
4742
|
+
const lowPriority = options.lowPriority ?? true;
|
|
4743
|
+
if (!acquireJobLock(options.type, options.lockTtlMs)) return null;
|
|
4744
|
+
if (lowPriority) {
|
|
4745
|
+
try {
|
|
4746
|
+
os6.setPriority(process.pid, 10);
|
|
4747
|
+
} catch {
|
|
4748
|
+
}
|
|
4749
|
+
}
|
|
4750
|
+
const id = `${options.type}-${Date.now()}-${process.pid}`.replace(/[^a-zA-Z0-9_.-]/g, "_");
|
|
4751
|
+
const record = {
|
|
4752
|
+
id,
|
|
4753
|
+
type: options.type,
|
|
4754
|
+
name: options.name,
|
|
4755
|
+
pid: process.pid,
|
|
4756
|
+
command: options.command ?? process.argv.join(" "),
|
|
4757
|
+
cwd: process.cwd(),
|
|
4758
|
+
status: "running",
|
|
4759
|
+
startedAt: now(),
|
|
4760
|
+
updatedAt: now(),
|
|
4761
|
+
lastHeartbeatAt: now(),
|
|
4762
|
+
cancelCommand: `exe-os jobs cancel ${id}`,
|
|
4763
|
+
lowPriority
|
|
4764
|
+
};
|
|
4765
|
+
const upsert = (patch) => {
|
|
4766
|
+
const jobs = readJobsRaw().filter((j) => j.id !== id);
|
|
4767
|
+
Object.assign(record, patch, { updatedAt: now() });
|
|
4768
|
+
writeJobsRaw([...jobs, record]);
|
|
4769
|
+
const file = lockPath(options.type);
|
|
4770
|
+
try {
|
|
4771
|
+
writeFileSync3(file, JSON.stringify({ pid: process.pid, jobId: id, updatedAt: record.updatedAt }, null, 2) + "\n");
|
|
4772
|
+
} catch {
|
|
4773
|
+
}
|
|
4774
|
+
};
|
|
4775
|
+
upsert({});
|
|
4776
|
+
const timer = setInterval(() => upsert({ lastHeartbeatAt: now() }), 3e4);
|
|
4777
|
+
timer.unref?.();
|
|
4778
|
+
const cleanup = (status, error) => {
|
|
4779
|
+
clearInterval(timer);
|
|
4780
|
+
upsert({ status, error, lastHeartbeatAt: now() });
|
|
4781
|
+
releaseJobLock(options.type);
|
|
4782
|
+
};
|
|
4783
|
+
process.once("SIGTERM", () => {
|
|
4784
|
+
cleanup("cancelled");
|
|
4785
|
+
process.exit(0);
|
|
4786
|
+
});
|
|
4787
|
+
process.once("SIGINT", () => {
|
|
4788
|
+
cleanup("cancelled");
|
|
4789
|
+
process.exit(130);
|
|
4790
|
+
});
|
|
4791
|
+
process.once("exit", () => releaseJobLock(options.type));
|
|
4792
|
+
return {
|
|
4793
|
+
id,
|
|
4794
|
+
update(progress) {
|
|
4795
|
+
upsert({ progressCurrent: progress.current, progressTotal: progress.total, progressLabel: progress.label, lastHeartbeatAt: now() });
|
|
4796
|
+
},
|
|
4797
|
+
complete() {
|
|
4798
|
+
cleanup("completed");
|
|
4799
|
+
},
|
|
4800
|
+
fail(err) {
|
|
4801
|
+
cleanup("failed", err instanceof Error ? err.message : String(err));
|
|
4802
|
+
},
|
|
4803
|
+
cancel() {
|
|
4804
|
+
cleanup("cancelled");
|
|
4805
|
+
}
|
|
4806
|
+
};
|
|
4807
|
+
}
|
|
4808
|
+
async function politeBatchPause(ms = 250) {
|
|
4809
|
+
await new Promise((resolve) => setTimeout(resolve, ms));
|
|
4810
|
+
}
|
|
4811
|
+
|
|
4812
|
+
// src/bin/agentic-ontology-backfill.ts
|
|
4660
4813
|
var BATCH_SIZE = 500;
|
|
4661
4814
|
async function main() {
|
|
4815
|
+
const job = startManagedJob({ type: "agentic-ontology-backfill", name: "Agentic ontology backfill", lowPriority: true });
|
|
4816
|
+
if (!job) {
|
|
4817
|
+
process.stderr.write("[agentic-ontology-backfill] Another ontology backfill is already running.\n");
|
|
4818
|
+
return;
|
|
4819
|
+
}
|
|
4662
4820
|
await initStore({ lightweight: true });
|
|
4663
4821
|
const client = getClient();
|
|
4664
4822
|
let offset = 0;
|
|
@@ -4695,10 +4853,13 @@ async function main() {
|
|
|
4695
4853
|
total += inserted;
|
|
4696
4854
|
process.stderr.write(`[agentic-ontology-backfill] +${inserted} memories projected (${total} total)
|
|
4697
4855
|
`);
|
|
4856
|
+
job.update({ current: total, label: `Projected ${total} memories` });
|
|
4857
|
+
await politeBatchPause(250);
|
|
4698
4858
|
offset = 0;
|
|
4699
4859
|
}
|
|
4700
4860
|
process.stderr.write(`[agentic-ontology-backfill] Complete: ${total} memories projected.
|
|
4701
4861
|
`);
|
|
4862
|
+
job.complete();
|
|
4702
4863
|
await disposeStore();
|
|
4703
4864
|
}
|
|
4704
4865
|
main().catch((err) => {
|