signetai 0.84.1 → 0.84.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/daemon.js +32 -3
- package/package.json +1 -1
package/dist/daemon.js
CHANGED
|
@@ -30668,8 +30668,15 @@ async function reembedMissingMemoriesBatch(accessor2, embeddingFn, embeddingCfg,
|
|
|
30668
30668
|
const written = accessor2.withWriteTx((db) => {
|
|
30669
30669
|
const now2 = new Date().toISOString();
|
|
30670
30670
|
let count2 = 0;
|
|
30671
|
+
const writeHash = db.prepare("UPDATE memories SET content_hash = ? WHERE id = ? AND content_hash IS NULL");
|
|
30672
|
+
const checkHash = db.prepare(`SELECT id FROM memories WHERE content_hash = ? AND is_deleted = 0 AND id <> ? LIMIT 1`);
|
|
30671
30673
|
for (const { memory, vector } of results) {
|
|
30672
30674
|
const contentHash = typeof memory.contentHash === "string" && memory.contentHash.trim().length > 0 ? memory.contentHash : normalizeAndHashContent(memory.content).contentHash;
|
|
30675
|
+
if (!memory.contentHash) {
|
|
30676
|
+
const collision = checkHash.get(contentHash, memory.id);
|
|
30677
|
+
if (!collision)
|
|
30678
|
+
writeHash.run(contentHash, memory.id);
|
|
30679
|
+
}
|
|
30673
30680
|
const embId = crypto.randomUUID();
|
|
30674
30681
|
const blob = vectorToBlob(vector);
|
|
30675
30682
|
syncVecDeleteBySourceExceptHash(db, "memory", memory.id, contentHash);
|
|
@@ -41603,9 +41610,9 @@ function insertSummaryFacts(accessor2, job, facts) {
|
|
|
41603
41610
|
return accessor2.withWriteTx((db) => {
|
|
41604
41611
|
let count2 = 0;
|
|
41605
41612
|
const stmt = db.prepare(`INSERT INTO memories
|
|
41606
|
-
(id, content, type, importance, source_id, source_type, who, tags,
|
|
41613
|
+
(id, content, content_hash, type, importance, source_id, source_type, who, tags,
|
|
41607
41614
|
project, agent_id, created_at, updated_at, updated_by)
|
|
41608
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
|
|
41615
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
|
|
41609
41616
|
for (const item of facts) {
|
|
41610
41617
|
if (!item.content || typeof item.content !== "string")
|
|
41611
41618
|
continue;
|
|
@@ -41614,7 +41621,8 @@ function insertSummaryFacts(accessor2, job, facts) {
|
|
|
41614
41621
|
continue;
|
|
41615
41622
|
const id = crypto.randomUUID();
|
|
41616
41623
|
const type = item.type || inferType(item.content);
|
|
41617
|
-
|
|
41624
|
+
const { contentHash } = normalizeAndHashContent(item.content);
|
|
41625
|
+
stmt.run(id, item.content, contentHash, type, importance, job.session_key || null, "session_end", job.harness, item.tags || null, job.project || null, job.agent_id, now4, now4, SUMMARY_WORKER_UPDATED_BY);
|
|
41618
41626
|
count2++;
|
|
41619
41627
|
}
|
|
41620
41628
|
return count2;
|
|
@@ -41965,6 +41973,7 @@ function enqueueSummaryJob(accessor2, params) {
|
|
|
41965
41973
|
}
|
|
41966
41974
|
var RECOVER_BATCH = 100, RECOVER_LIMIT_MAX = 1000, SUMMARY_WORKER_UPDATED_BY = "summary-worker", COMMAND_STAGE_RUNNING_RESULT = "command-stage-running", COMMAND_STAGE_COMPLETED_RESULT = "command-stage-complete", AGENTS_DIR4, MEMORY_DIR, POLL_INTERVAL_MS = 5000, CHUNK_TARGET_CHARS = 20000;
|
|
41967
41975
|
var init_summary_worker = __esm(() => {
|
|
41976
|
+
init_content_normalization();
|
|
41968
41977
|
init_db_helpers();
|
|
41969
41978
|
init_hooks();
|
|
41970
41979
|
init_logger();
|
|
@@ -43639,6 +43648,17 @@ function enqueueStructuralJob(db, memoryId, jobType, payload) {
|
|
|
43639
43648
|
created_at, updated_at)
|
|
43640
43649
|
VALUES (?, ?, ?, 'pending', ?, 0, 3, ?, ?)`).run(id, memoryId, jobType, payload, now4, now4);
|
|
43641
43650
|
}
|
|
43651
|
+
function recoverMemoryJobs(accessor2) {
|
|
43652
|
+
return accessor2.withWriteTx((db) => {
|
|
43653
|
+
const table = db.prepare("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'memory_jobs'").get();
|
|
43654
|
+
if (!table)
|
|
43655
|
+
return { updated: 0 };
|
|
43656
|
+
const updated = countChanges(db.prepare(`UPDATE memory_jobs
|
|
43657
|
+
SET status = 'dead'
|
|
43658
|
+
WHERE status = 'pending' AND attempts >= max_attempts`).run());
|
|
43659
|
+
return { updated };
|
|
43660
|
+
});
|
|
43661
|
+
}
|
|
43642
43662
|
function startWorker(accessor2, provider3, pipelineCfg, decisionCfg, analytics, telemetry, runtimeDeps) {
|
|
43643
43663
|
const runtime = {
|
|
43644
43664
|
now: runtimeDeps?.now ?? (() => Date.now()),
|
|
@@ -44184,6 +44204,15 @@ function startWorker(accessor2, provider3, pipelineCfg, decisionCfg, analytics,
|
|
|
44184
44204
|
clearTimeout(pollTimer);
|
|
44185
44205
|
scheduleTick();
|
|
44186
44206
|
}, WATCHDOG_INTERVAL);
|
|
44207
|
+
try {
|
|
44208
|
+
const { updated } = recoverMemoryJobs(accessor2);
|
|
44209
|
+
if (updated > 0)
|
|
44210
|
+
logger.info("pipeline", `Startup recovery: marked ${updated} exhausted pending job(s) as dead`);
|
|
44211
|
+
} catch (e) {
|
|
44212
|
+
logger.warn("pipeline", "Startup recovery failed (non-fatal)", {
|
|
44213
|
+
error: e instanceof Error ? e.message : String(e)
|
|
44214
|
+
});
|
|
44215
|
+
}
|
|
44187
44216
|
scheduleTick();
|
|
44188
44217
|
logger.info("pipeline", "Worker started", {
|
|
44189
44218
|
pollMs: pipelineCfg.worker.pollMs,
|