engrm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mcp.json +9 -0
- package/AUTH-DESIGN.md +436 -0
- package/BRIEF.md +197 -0
- package/CLAUDE.md +44 -0
- package/COMPETITIVE.md +174 -0
- package/CONTEXT-OPTIMIZATION.md +305 -0
- package/INFRASTRUCTURE.md +252 -0
- package/LICENSE +105 -0
- package/MARKET.md +230 -0
- package/PLAN.md +278 -0
- package/README.md +121 -0
- package/SENTINEL.md +293 -0
- package/SERVER-API-PLAN.md +553 -0
- package/SPEC.md +843 -0
- package/SWOT.md +148 -0
- package/SYNC-ARCHITECTURE.md +294 -0
- package/VIBE-CODER-STRATEGY.md +250 -0
- package/bun.lock +375 -0
- package/hooks/post-tool-use.ts +144 -0
- package/hooks/session-start.ts +64 -0
- package/hooks/stop.ts +131 -0
- package/mem-page.html +1305 -0
- package/package.json +30 -0
- package/src/capture/dedup.test.ts +103 -0
- package/src/capture/dedup.ts +76 -0
- package/src/capture/extractor.test.ts +245 -0
- package/src/capture/extractor.ts +330 -0
- package/src/capture/quality.test.ts +168 -0
- package/src/capture/quality.ts +104 -0
- package/src/capture/retrospective.test.ts +115 -0
- package/src/capture/retrospective.ts +121 -0
- package/src/capture/scanner.test.ts +131 -0
- package/src/capture/scanner.ts +100 -0
- package/src/capture/scrubber.test.ts +144 -0
- package/src/capture/scrubber.ts +181 -0
- package/src/cli.ts +517 -0
- package/src/config.ts +238 -0
- package/src/context/inject.test.ts +940 -0
- package/src/context/inject.ts +382 -0
- package/src/embeddings/backfill.ts +50 -0
- package/src/embeddings/embedder.test.ts +76 -0
- package/src/embeddings/embedder.ts +139 -0
- package/src/lifecycle/aging.test.ts +103 -0
- package/src/lifecycle/aging.ts +36 -0
- package/src/lifecycle/compaction.test.ts +264 -0
- package/src/lifecycle/compaction.ts +190 -0
- package/src/lifecycle/purge.test.ts +100 -0
- package/src/lifecycle/purge.ts +37 -0
- package/src/lifecycle/scheduler.test.ts +120 -0
- package/src/lifecycle/scheduler.ts +101 -0
- package/src/provisioning/browser-auth.ts +172 -0
- package/src/provisioning/provision.test.ts +198 -0
- package/src/provisioning/provision.ts +94 -0
- package/src/register.test.ts +167 -0
- package/src/register.ts +178 -0
- package/src/server.ts +436 -0
- package/src/storage/migrations.test.ts +244 -0
- package/src/storage/migrations.ts +261 -0
- package/src/storage/outbox.test.ts +229 -0
- package/src/storage/outbox.ts +131 -0
- package/src/storage/projects.test.ts +137 -0
- package/src/storage/projects.ts +184 -0
- package/src/storage/sqlite.test.ts +798 -0
- package/src/storage/sqlite.ts +934 -0
- package/src/storage/vec.test.ts +198 -0
- package/src/sync/auth.test.ts +76 -0
- package/src/sync/auth.ts +68 -0
- package/src/sync/client.ts +183 -0
- package/src/sync/engine.test.ts +94 -0
- package/src/sync/engine.ts +127 -0
- package/src/sync/pull.test.ts +279 -0
- package/src/sync/pull.ts +170 -0
- package/src/sync/push.test.ts +117 -0
- package/src/sync/push.ts +230 -0
- package/src/tools/get.ts +34 -0
- package/src/tools/pin.ts +47 -0
- package/src/tools/save.test.ts +301 -0
- package/src/tools/save.ts +231 -0
- package/src/tools/search.test.ts +69 -0
- package/src/tools/search.ts +181 -0
- package/src/tools/timeline.ts +64 -0
- package/tsconfig.json +22 -0
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
import { Database } from "bun:sqlite";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Schema version tracking and migrations for engrm.
|
|
5
|
+
*
|
|
6
|
+
* All schema changes go through numbered migrations. The current version
|
|
7
|
+
* is stored in the `schema_version` pragma. Migrations run sequentially
|
|
8
|
+
* on startup if the DB is behind.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
interface Migration {
|
|
12
|
+
version: number;
|
|
13
|
+
description: string;
|
|
14
|
+
sql: string;
|
|
15
|
+
/** If provided, migration is skipped when this returns false. */
|
|
16
|
+
condition?: (db: Database) => boolean;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const MIGRATIONS: Migration[] = [
|
|
20
|
+
{
|
|
21
|
+
version: 1,
|
|
22
|
+
description: "Initial schema: projects, observations, sessions, sync, FTS5",
|
|
23
|
+
sql: `
|
|
24
|
+
-- Projects (canonical identity across machines)
|
|
25
|
+
CREATE TABLE projects (
|
|
26
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
27
|
+
canonical_id TEXT UNIQUE NOT NULL,
|
|
28
|
+
name TEXT NOT NULL,
|
|
29
|
+
local_path TEXT,
|
|
30
|
+
remote_url TEXT,
|
|
31
|
+
first_seen_epoch INTEGER NOT NULL,
|
|
32
|
+
last_active_epoch INTEGER NOT NULL
|
|
33
|
+
);
|
|
34
|
+
|
|
35
|
+
-- Core observations table
|
|
36
|
+
CREATE TABLE observations (
|
|
37
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
38
|
+
session_id TEXT,
|
|
39
|
+
project_id INTEGER NOT NULL REFERENCES projects(id),
|
|
40
|
+
type TEXT NOT NULL CHECK (type IN (
|
|
41
|
+
'bugfix', 'discovery', 'decision', 'pattern',
|
|
42
|
+
'change', 'feature', 'refactor', 'digest'
|
|
43
|
+
)),
|
|
44
|
+
title TEXT NOT NULL,
|
|
45
|
+
narrative TEXT,
|
|
46
|
+
facts TEXT,
|
|
47
|
+
concepts TEXT,
|
|
48
|
+
files_read TEXT,
|
|
49
|
+
files_modified TEXT,
|
|
50
|
+
quality REAL DEFAULT 0.5 CHECK (quality BETWEEN 0.0 AND 1.0),
|
|
51
|
+
lifecycle TEXT DEFAULT 'active' CHECK (lifecycle IN (
|
|
52
|
+
'active', 'aging', 'archived', 'purged', 'pinned'
|
|
53
|
+
)),
|
|
54
|
+
sensitivity TEXT DEFAULT 'shared' CHECK (sensitivity IN (
|
|
55
|
+
'shared', 'personal', 'secret'
|
|
56
|
+
)),
|
|
57
|
+
user_id TEXT NOT NULL,
|
|
58
|
+
device_id TEXT NOT NULL,
|
|
59
|
+
agent TEXT DEFAULT 'claude-code',
|
|
60
|
+
created_at TEXT NOT NULL,
|
|
61
|
+
created_at_epoch INTEGER NOT NULL,
|
|
62
|
+
archived_at_epoch INTEGER,
|
|
63
|
+
compacted_into INTEGER REFERENCES observations(id) ON DELETE SET NULL
|
|
64
|
+
);
|
|
65
|
+
|
|
66
|
+
-- Session tracking
|
|
67
|
+
CREATE TABLE sessions (
|
|
68
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
69
|
+
session_id TEXT UNIQUE NOT NULL,
|
|
70
|
+
project_id INTEGER REFERENCES projects(id),
|
|
71
|
+
user_id TEXT NOT NULL,
|
|
72
|
+
device_id TEXT NOT NULL,
|
|
73
|
+
agent TEXT DEFAULT 'claude-code',
|
|
74
|
+
status TEXT DEFAULT 'active' CHECK (status IN ('active', 'completed')),
|
|
75
|
+
observation_count INTEGER DEFAULT 0,
|
|
76
|
+
started_at_epoch INTEGER,
|
|
77
|
+
completed_at_epoch INTEGER
|
|
78
|
+
);
|
|
79
|
+
|
|
80
|
+
-- Session summaries (generated on Stop hook)
|
|
81
|
+
CREATE TABLE session_summaries (
|
|
82
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
83
|
+
session_id TEXT UNIQUE NOT NULL,
|
|
84
|
+
project_id INTEGER REFERENCES projects(id),
|
|
85
|
+
user_id TEXT NOT NULL,
|
|
86
|
+
request TEXT,
|
|
87
|
+
investigated TEXT,
|
|
88
|
+
learned TEXT,
|
|
89
|
+
completed TEXT,
|
|
90
|
+
next_steps TEXT,
|
|
91
|
+
created_at_epoch INTEGER
|
|
92
|
+
);
|
|
93
|
+
|
|
94
|
+
-- Sync outbox (offline-first queue)
|
|
95
|
+
CREATE TABLE sync_outbox (
|
|
96
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
97
|
+
record_type TEXT NOT NULL CHECK (record_type IN ('observation', 'summary')),
|
|
98
|
+
record_id INTEGER NOT NULL,
|
|
99
|
+
status TEXT DEFAULT 'pending' CHECK (status IN (
|
|
100
|
+
'pending', 'syncing', 'synced', 'failed'
|
|
101
|
+
)),
|
|
102
|
+
retry_count INTEGER DEFAULT 0,
|
|
103
|
+
max_retries INTEGER DEFAULT 10,
|
|
104
|
+
last_error TEXT,
|
|
105
|
+
created_at_epoch INTEGER NOT NULL,
|
|
106
|
+
synced_at_epoch INTEGER,
|
|
107
|
+
next_retry_epoch INTEGER
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
-- Sync high-water mark and lifecycle job tracking
|
|
111
|
+
CREATE TABLE sync_state (
|
|
112
|
+
key TEXT PRIMARY KEY,
|
|
113
|
+
value TEXT NOT NULL
|
|
114
|
+
);
|
|
115
|
+
|
|
116
|
+
-- FTS5 for local offline search (external content mode)
|
|
117
|
+
CREATE VIRTUAL TABLE observations_fts USING fts5(
|
|
118
|
+
title, narrative, facts, concepts,
|
|
119
|
+
content=observations,
|
|
120
|
+
content_rowid=id
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
-- Indexes: observations
|
|
124
|
+
CREATE INDEX idx_observations_project ON observations(project_id);
|
|
125
|
+
CREATE INDEX idx_observations_project_lifecycle ON observations(project_id, lifecycle);
|
|
126
|
+
CREATE INDEX idx_observations_type ON observations(type);
|
|
127
|
+
CREATE INDEX idx_observations_created ON observations(created_at_epoch);
|
|
128
|
+
CREATE INDEX idx_observations_session ON observations(session_id);
|
|
129
|
+
CREATE INDEX idx_observations_lifecycle ON observations(lifecycle);
|
|
130
|
+
CREATE INDEX idx_observations_quality ON observations(quality);
|
|
131
|
+
CREATE INDEX idx_observations_user ON observations(user_id);
|
|
132
|
+
|
|
133
|
+
-- Indexes: sessions
|
|
134
|
+
CREATE INDEX idx_sessions_project ON sessions(project_id);
|
|
135
|
+
|
|
136
|
+
-- Indexes: sync outbox
|
|
137
|
+
CREATE INDEX idx_outbox_status ON sync_outbox(status, next_retry_epoch);
|
|
138
|
+
CREATE INDEX idx_outbox_record ON sync_outbox(record_type, record_id);
|
|
139
|
+
`,
|
|
140
|
+
},
|
|
141
|
+
{
|
|
142
|
+
version: 2,
|
|
143
|
+
description: "Add superseded_by for knowledge supersession",
|
|
144
|
+
sql: `
|
|
145
|
+
ALTER TABLE observations ADD COLUMN superseded_by INTEGER REFERENCES observations(id) ON DELETE SET NULL;
|
|
146
|
+
CREATE INDEX idx_observations_superseded ON observations(superseded_by);
|
|
147
|
+
`,
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
version: 3,
|
|
151
|
+
description: "Add remote_source_id for pull deduplication",
|
|
152
|
+
sql: `
|
|
153
|
+
ALTER TABLE observations ADD COLUMN remote_source_id TEXT;
|
|
154
|
+
CREATE UNIQUE INDEX idx_observations_remote_source ON observations(remote_source_id) WHERE remote_source_id IS NOT NULL;
|
|
155
|
+
`,
|
|
156
|
+
},
|
|
157
|
+
{
|
|
158
|
+
version: 4,
|
|
159
|
+
description: "Add sqlite-vec for local semantic search",
|
|
160
|
+
sql: `
|
|
161
|
+
CREATE VIRTUAL TABLE vec_observations USING vec0(
|
|
162
|
+
observation_id INTEGER PRIMARY KEY,
|
|
163
|
+
embedding float[384]
|
|
164
|
+
);
|
|
165
|
+
`,
|
|
166
|
+
condition: (db) => isVecExtensionLoaded(db),
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
version: 5,
|
|
170
|
+
description: "Session metrics and security findings",
|
|
171
|
+
sql: `
|
|
172
|
+
ALTER TABLE sessions ADD COLUMN files_touched_count INTEGER DEFAULT 0;
|
|
173
|
+
ALTER TABLE sessions ADD COLUMN searches_performed INTEGER DEFAULT 0;
|
|
174
|
+
ALTER TABLE sessions ADD COLUMN tool_calls_count INTEGER DEFAULT 0;
|
|
175
|
+
|
|
176
|
+
CREATE TABLE security_findings (
|
|
177
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
178
|
+
session_id TEXT,
|
|
179
|
+
project_id INTEGER NOT NULL REFERENCES projects(id),
|
|
180
|
+
finding_type TEXT NOT NULL,
|
|
181
|
+
severity TEXT NOT NULL DEFAULT 'medium' CHECK (severity IN ('critical', 'high', 'medium', 'low')),
|
|
182
|
+
pattern_name TEXT NOT NULL,
|
|
183
|
+
file_path TEXT,
|
|
184
|
+
snippet TEXT,
|
|
185
|
+
tool_name TEXT,
|
|
186
|
+
user_id TEXT NOT NULL,
|
|
187
|
+
device_id TEXT NOT NULL,
|
|
188
|
+
created_at_epoch INTEGER NOT NULL
|
|
189
|
+
);
|
|
190
|
+
|
|
191
|
+
CREATE INDEX idx_security_findings_session ON security_findings(session_id);
|
|
192
|
+
CREATE INDEX idx_security_findings_project ON security_findings(project_id, created_at_epoch);
|
|
193
|
+
CREATE INDEX idx_security_findings_severity ON security_findings(severity);
|
|
194
|
+
`,
|
|
195
|
+
},
|
|
196
|
+
];
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Check if the sqlite-vec extension is loaded in this database.
|
|
200
|
+
*/
|
|
201
|
+
function isVecExtensionLoaded(db: Database): boolean {
|
|
202
|
+
try {
|
|
203
|
+
db.exec("SELECT vec_version()");
|
|
204
|
+
return true;
|
|
205
|
+
} catch {
|
|
206
|
+
return false;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Run all pending migrations on the given database.
|
|
212
|
+
* Uses SQLite's user_version pragma to track schema version.
|
|
213
|
+
*/
|
|
214
|
+
export function runMigrations(db: Database): void {
|
|
215
|
+
const currentVersion = db.query("PRAGMA user_version").get() as {
|
|
216
|
+
user_version: number;
|
|
217
|
+
};
|
|
218
|
+
let version = currentVersion.user_version;
|
|
219
|
+
|
|
220
|
+
for (const migration of MIGRATIONS) {
|
|
221
|
+
if (migration.version <= version) continue;
|
|
222
|
+
|
|
223
|
+
// Skip conditional migrations when condition is not met
|
|
224
|
+
if (migration.condition && !migration.condition(db)) {
|
|
225
|
+
continue;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
db.exec("BEGIN TRANSACTION");
|
|
229
|
+
try {
|
|
230
|
+
db.exec(migration.sql);
|
|
231
|
+
db.exec(`PRAGMA user_version = ${migration.version}`);
|
|
232
|
+
db.exec("COMMIT");
|
|
233
|
+
version = migration.version;
|
|
234
|
+
} catch (error) {
|
|
235
|
+
db.exec("ROLLBACK");
|
|
236
|
+
throw new Error(
|
|
237
|
+
`Migration ${migration.version} (${migration.description}) failed: ${
|
|
238
|
+
error instanceof Error ? error.message : String(error)
|
|
239
|
+
}`
|
|
240
|
+
);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
/**
|
|
246
|
+
* Get the current schema version.
|
|
247
|
+
*/
|
|
248
|
+
export function getSchemaVersion(db: Database): number {
|
|
249
|
+
const result = db.query("PRAGMA user_version").get() as {
|
|
250
|
+
user_version: number;
|
|
251
|
+
};
|
|
252
|
+
return result.user_version;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Expected schema version after all unconditional migrations have run.
|
|
257
|
+
* Conditional migrations (e.g., sqlite-vec) may bump this higher at runtime.
|
|
258
|
+
*/
|
|
259
|
+
export const LATEST_SCHEMA_VERSION = MIGRATIONS
|
|
260
|
+
.filter((m) => !m.condition)
|
|
261
|
+
.reduce((max, m) => Math.max(max, m.version), 0);
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
import { describe, expect, test, beforeEach, afterEach } from "bun:test";
|
|
2
|
+
import { mkdtempSync, rmSync } from "node:fs";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
import { tmpdir } from "node:os";
|
|
5
|
+
import { MemDatabase } from "./sqlite.js";
|
|
6
|
+
import {
|
|
7
|
+
getPendingEntries,
|
|
8
|
+
markSyncing,
|
|
9
|
+
markSynced,
|
|
10
|
+
markFailed,
|
|
11
|
+
purgeSynced,
|
|
12
|
+
getOutboxStats,
|
|
13
|
+
} from "./outbox.js";
|
|
14
|
+
|
|
15
|
+
let db: MemDatabase;
|
|
16
|
+
let tmpDir: string;
|
|
17
|
+
let projectId: number;
|
|
18
|
+
|
|
19
|
+
beforeEach(() => {
|
|
20
|
+
tmpDir = mkdtempSync(join(tmpdir(), "candengo-mem-outbox-test-"));
|
|
21
|
+
db = new MemDatabase(join(tmpDir, "test.db"));
|
|
22
|
+
const project = db.upsertProject({
|
|
23
|
+
canonical_id: "github.com/org/repo",
|
|
24
|
+
name: "repo",
|
|
25
|
+
});
|
|
26
|
+
projectId = project.id;
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
afterEach(() => {
|
|
30
|
+
db.close();
|
|
31
|
+
rmSync(tmpDir, { recursive: true, force: true });
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
function createObsAndOutboxEntry(): number {
|
|
35
|
+
const obs = db.insertObservation({
|
|
36
|
+
project_id: projectId,
|
|
37
|
+
type: "bugfix",
|
|
38
|
+
title: "Fix something",
|
|
39
|
+
quality: 0.5,
|
|
40
|
+
user_id: "david",
|
|
41
|
+
device_id: "laptop-abc",
|
|
42
|
+
});
|
|
43
|
+
// insertObservation does NOT add to outbox — that's the save tool's job.
|
|
44
|
+
// We add manually here for outbox-specific tests.
|
|
45
|
+
db.addToOutbox("observation", obs.id);
|
|
46
|
+
|
|
47
|
+
const entry = db.db
|
|
48
|
+
.query<{ id: number }, [number]>(
|
|
49
|
+
"SELECT id FROM sync_outbox WHERE record_id = ? ORDER BY id DESC LIMIT 1"
|
|
50
|
+
)
|
|
51
|
+
.get(obs.id);
|
|
52
|
+
return entry!.id;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
describe("getPendingEntries", () => {
|
|
56
|
+
test("returns pending entries", () => {
|
|
57
|
+
createObsAndOutboxEntry();
|
|
58
|
+
const entries = getPendingEntries(db);
|
|
59
|
+
expect(entries.length).toBe(1);
|
|
60
|
+
expect(entries[0]!.status).toBe("pending");
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
test("returns empty when no pending entries", () => {
|
|
64
|
+
expect(getPendingEntries(db)).toEqual([]);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
test("respects limit", () => {
|
|
68
|
+
createObsAndOutboxEntry();
|
|
69
|
+
createObsAndOutboxEntry();
|
|
70
|
+
createObsAndOutboxEntry();
|
|
71
|
+
const entries = getPendingEntries(db, 2);
|
|
72
|
+
expect(entries.length).toBe(2);
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
test("includes failed entries past retry time", () => {
|
|
76
|
+
const entryId = createObsAndOutboxEntry();
|
|
77
|
+
markFailed(db, entryId, "network error");
|
|
78
|
+
|
|
79
|
+
// Set next_retry_epoch to the past
|
|
80
|
+
db.db
|
|
81
|
+
.query("UPDATE sync_outbox SET next_retry_epoch = 0 WHERE id = ?")
|
|
82
|
+
.run(entryId);
|
|
83
|
+
|
|
84
|
+
const entries = getPendingEntries(db);
|
|
85
|
+
expect(entries.length).toBe(1);
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
test("excludes failed entries before retry time", () => {
|
|
89
|
+
const entryId = createObsAndOutboxEntry();
|
|
90
|
+
markFailed(db, entryId, "network error");
|
|
91
|
+
// next_retry_epoch is in the future by default
|
|
92
|
+
|
|
93
|
+
const entries = getPendingEntries(db);
|
|
94
|
+
// Should only include failed entries whose retry time has passed
|
|
95
|
+
// Since we just marked it failed, next_retry is in the future
|
|
96
|
+
const failedEntries = entries.filter((e) => e.status === "failed");
|
|
97
|
+
expect(failedEntries.length).toBe(0);
|
|
98
|
+
});
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
describe("markSyncing", () => {
|
|
102
|
+
test("sets status to syncing", () => {
|
|
103
|
+
const entryId = createObsAndOutboxEntry();
|
|
104
|
+
markSyncing(db, entryId);
|
|
105
|
+
|
|
106
|
+
const entry = db.db
|
|
107
|
+
.query<{ status: string }, [number]>(
|
|
108
|
+
"SELECT status FROM sync_outbox WHERE id = ?"
|
|
109
|
+
)
|
|
110
|
+
.get(entryId);
|
|
111
|
+
expect(entry!.status).toBe("syncing");
|
|
112
|
+
});
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
describe("markSynced", () => {
|
|
116
|
+
test("sets status to synced with timestamp", () => {
|
|
117
|
+
const entryId = createObsAndOutboxEntry();
|
|
118
|
+
markSynced(db, entryId);
|
|
119
|
+
|
|
120
|
+
const entry = db.db
|
|
121
|
+
.query<{ status: string; synced_at_epoch: number | null }, [number]>(
|
|
122
|
+
"SELECT status, synced_at_epoch FROM sync_outbox WHERE id = ?"
|
|
123
|
+
)
|
|
124
|
+
.get(entryId);
|
|
125
|
+
expect(entry!.status).toBe("synced");
|
|
126
|
+
expect(entry!.synced_at_epoch).not.toBeNull();
|
|
127
|
+
});
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
describe("markFailed", () => {
|
|
131
|
+
test("increments retry count atomically", () => {
|
|
132
|
+
const entryId = createObsAndOutboxEntry();
|
|
133
|
+
|
|
134
|
+
markFailed(db, entryId, "error 1");
|
|
135
|
+
let entry = db.db
|
|
136
|
+
.query<{ retry_count: number }, [number]>(
|
|
137
|
+
"SELECT retry_count FROM sync_outbox WHERE id = ?"
|
|
138
|
+
)
|
|
139
|
+
.get(entryId);
|
|
140
|
+
expect(entry!.retry_count).toBe(1);
|
|
141
|
+
|
|
142
|
+
markFailed(db, entryId, "error 2");
|
|
143
|
+
entry = db.db
|
|
144
|
+
.query<{ retry_count: number }, [number]>(
|
|
145
|
+
"SELECT retry_count FROM sync_outbox WHERE id = ?"
|
|
146
|
+
)
|
|
147
|
+
.get(entryId);
|
|
148
|
+
expect(entry!.retry_count).toBe(2);
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
test("sets last_error", () => {
|
|
152
|
+
const entryId = createObsAndOutboxEntry();
|
|
153
|
+
markFailed(db, entryId, "connection refused");
|
|
154
|
+
|
|
155
|
+
const entry = db.db
|
|
156
|
+
.query<{ last_error: string | null }, [number]>(
|
|
157
|
+
"SELECT last_error FROM sync_outbox WHERE id = ?"
|
|
158
|
+
)
|
|
159
|
+
.get(entryId);
|
|
160
|
+
expect(entry!.last_error).toBe("connection refused");
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
test("sets next_retry_epoch in the future", () => {
|
|
164
|
+
const entryId = createObsAndOutboxEntry();
|
|
165
|
+
const before = Math.floor(Date.now() / 1000);
|
|
166
|
+
markFailed(db, entryId, "error");
|
|
167
|
+
|
|
168
|
+
const entry = db.db
|
|
169
|
+
.query<{ next_retry_epoch: number | null }, [number]>(
|
|
170
|
+
"SELECT next_retry_epoch FROM sync_outbox WHERE id = ?"
|
|
171
|
+
)
|
|
172
|
+
.get(entryId);
|
|
173
|
+
expect(entry!.next_retry_epoch).not.toBeNull();
|
|
174
|
+
expect(entry!.next_retry_epoch!).toBeGreaterThan(before);
|
|
175
|
+
});
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
describe("purgeSynced", () => {
|
|
179
|
+
test("deletes old synced entries", () => {
|
|
180
|
+
const entryId = createObsAndOutboxEntry();
|
|
181
|
+
markSynced(db, entryId);
|
|
182
|
+
|
|
183
|
+
// Set synced_at to far in the past
|
|
184
|
+
db.db
|
|
185
|
+
.query("UPDATE sync_outbox SET synced_at_epoch = 1000 WHERE id = ?")
|
|
186
|
+
.run(entryId);
|
|
187
|
+
|
|
188
|
+
const deleted = purgeSynced(db, 2000);
|
|
189
|
+
expect(deleted).toBe(1);
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
test("does not delete recent synced entries", () => {
|
|
193
|
+
const entryId = createObsAndOutboxEntry();
|
|
194
|
+
markSynced(db, entryId);
|
|
195
|
+
|
|
196
|
+
const deleted = purgeSynced(db, 1000); // cutoff before sync time
|
|
197
|
+
expect(deleted).toBe(0);
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
test("does not delete pending entries", () => {
|
|
201
|
+
createObsAndOutboxEntry();
|
|
202
|
+
const deleted = purgeSynced(db, Math.floor(Date.now() / 1000) + 9999);
|
|
203
|
+
expect(deleted).toBe(0);
|
|
204
|
+
});
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
describe("getOutboxStats", () => {
|
|
208
|
+
test("returns zeroes when empty", () => {
|
|
209
|
+
const stats = getOutboxStats(db);
|
|
210
|
+
expect(stats["pending"]).toBe(0);
|
|
211
|
+
expect(stats["syncing"]).toBe(0);
|
|
212
|
+
expect(stats["synced"]).toBe(0);
|
|
213
|
+
expect(stats["failed"]).toBe(0);
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
test("counts by status", () => {
|
|
217
|
+
const id1 = createObsAndOutboxEntry();
|
|
218
|
+
const id2 = createObsAndOutboxEntry();
|
|
219
|
+
createObsAndOutboxEntry(); // stays pending
|
|
220
|
+
|
|
221
|
+
markSynced(db, id1);
|
|
222
|
+
markFailed(db, id2, "err");
|
|
223
|
+
|
|
224
|
+
const stats = getOutboxStats(db);
|
|
225
|
+
expect(stats["pending"]).toBe(1);
|
|
226
|
+
expect(stats["synced"]).toBe(1);
|
|
227
|
+
expect(stats["failed"]).toBe(1);
|
|
228
|
+
});
|
|
229
|
+
});
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Sync outbox queue management.
|
|
3
|
+
*
|
|
4
|
+
* The outbox queues observations and summaries for push to Candengo Vector.
|
|
5
|
+
* Supports exponential backoff on retries and batch processing.
|
|
6
|
+
*
|
|
7
|
+
* Flow: pending → syncing → synced | failed (with retry)
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type { MemDatabase } from "./sqlite.js";
|
|
11
|
+
|
|
12
|
+
export interface OutboxEntry {
|
|
13
|
+
id: number;
|
|
14
|
+
record_type: "observation" | "summary";
|
|
15
|
+
record_id: number;
|
|
16
|
+
status: string;
|
|
17
|
+
retry_count: number;
|
|
18
|
+
max_retries: number;
|
|
19
|
+
last_error: string | null;
|
|
20
|
+
created_at_epoch: number;
|
|
21
|
+
synced_at_epoch: number | null;
|
|
22
|
+
next_retry_epoch: number | null;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Get pending outbox entries that are ready to sync.
|
|
27
|
+
* Returns entries that are 'pending' or 'failed' with next_retry_epoch in the past.
|
|
28
|
+
*/
|
|
29
|
+
export function getPendingEntries(
|
|
30
|
+
db: MemDatabase,
|
|
31
|
+
limit: number = 50
|
|
32
|
+
): OutboxEntry[] {
|
|
33
|
+
const now = Math.floor(Date.now() / 1000);
|
|
34
|
+
return db.db
|
|
35
|
+
.query<OutboxEntry, [number, number]>(
|
|
36
|
+
`SELECT * FROM sync_outbox
|
|
37
|
+
WHERE (status = 'pending')
|
|
38
|
+
OR (status = 'failed' AND retry_count < max_retries AND (next_retry_epoch IS NULL OR next_retry_epoch <= ?))
|
|
39
|
+
ORDER BY created_at_epoch ASC
|
|
40
|
+
LIMIT ?`
|
|
41
|
+
)
|
|
42
|
+
.all(now, limit);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Mark an entry as syncing (in-progress).
|
|
47
|
+
*/
|
|
48
|
+
export function markSyncing(db: MemDatabase, entryId: number): void {
|
|
49
|
+
db.db
|
|
50
|
+
.query("UPDATE sync_outbox SET status = 'syncing' WHERE id = ?")
|
|
51
|
+
.run(entryId);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Mark an entry as successfully synced.
|
|
56
|
+
*/
|
|
57
|
+
export function markSynced(db: MemDatabase, entryId: number): void {
|
|
58
|
+
const now = Math.floor(Date.now() / 1000);
|
|
59
|
+
db.db
|
|
60
|
+
.query(
|
|
61
|
+
"UPDATE sync_outbox SET status = 'synced', synced_at_epoch = ? WHERE id = ?"
|
|
62
|
+
)
|
|
63
|
+
.run(now, entryId);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Mark an entry as failed with exponential backoff for retry.
|
|
68
|
+
* Backoff: 30s, 60s, 120s, 240s, ... capped at 1 hour.
|
|
69
|
+
*/
|
|
70
|
+
export function markFailed(
|
|
71
|
+
db: MemDatabase,
|
|
72
|
+
entryId: number,
|
|
73
|
+
error: string
|
|
74
|
+
): void {
|
|
75
|
+
const now = Math.floor(Date.now() / 1000);
|
|
76
|
+
|
|
77
|
+
// Atomic increment — avoids TOCTOU race on retry_count.
|
|
78
|
+
// Backoff: 30 * 2^(retry_count) seconds, capped at 3600.
|
|
79
|
+
db.db
|
|
80
|
+
.query(
|
|
81
|
+
`UPDATE sync_outbox SET
|
|
82
|
+
status = 'failed',
|
|
83
|
+
retry_count = retry_count + 1,
|
|
84
|
+
last_error = ?,
|
|
85
|
+
next_retry_epoch = ? + MIN(30 * (1 << retry_count), 3600)
|
|
86
|
+
WHERE id = ?`
|
|
87
|
+
)
|
|
88
|
+
.run(error, now, entryId);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Clean up old synced entries (older than the given epoch).
|
|
93
|
+
* Prevents the outbox table from growing unbounded.
|
|
94
|
+
*/
|
|
95
|
+
export function purgeSynced(
|
|
96
|
+
db: MemDatabase,
|
|
97
|
+
olderThanEpoch: number
|
|
98
|
+
): number {
|
|
99
|
+
const result = db.db
|
|
100
|
+
.query(
|
|
101
|
+
"DELETE FROM sync_outbox WHERE status = 'synced' AND synced_at_epoch < ?"
|
|
102
|
+
)
|
|
103
|
+
.run(olderThanEpoch);
|
|
104
|
+
return result.changes;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Get outbox stats for diagnostics.
|
|
109
|
+
*/
|
|
110
|
+
export function getOutboxStats(
|
|
111
|
+
db: MemDatabase
|
|
112
|
+
): Record<string, number> {
|
|
113
|
+
const rows = db.db
|
|
114
|
+
.query<{ status: string; count: number }, []>(
|
|
115
|
+
"SELECT status, COUNT(*) as count FROM sync_outbox GROUP BY status"
|
|
116
|
+
)
|
|
117
|
+
.all();
|
|
118
|
+
|
|
119
|
+
const stats: Record<string, number> = {
|
|
120
|
+
pending: 0,
|
|
121
|
+
syncing: 0,
|
|
122
|
+
synced: 0,
|
|
123
|
+
failed: 0,
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
for (const row of rows) {
|
|
127
|
+
stats[row.status] = row.count;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return stats;
|
|
131
|
+
}
|