agenr 0.6.15 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +29 -0
- package/dist/chunk-TW7FHRVW.js +586 -0
- package/dist/cli-main.js +647 -833
- package/dist/openclaw-plugin/index.d.ts +13 -1
- package/dist/openclaw-plugin/index.js +151 -45
- package/package.json +8 -13
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,34 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## [0.7.1] - 2026-02-20
|
|
4
|
+
|
|
5
|
+
### Added
|
|
6
|
+
- feat(init): new `agenr init` command to auto-wire project instructions, MCP config, and `.agenr/config.json` with project slug/platform/projectDir
|
|
7
|
+
- feat(init): `--depends-on` support for dependency-aware project recall scope in `.agenr/config.json`
|
|
8
|
+
|
|
9
|
+
### Changed
|
|
10
|
+
- feat(mcp): default `agenr_recall` scope now reads `AGENR_PROJECT_DIR` + `.agenr/config.json` per call and auto-includes direct dependencies when `project` is omitted
|
|
11
|
+
- feat(mcp): `project="*"` now bypasses configured project scope, while explicit `project` values stay strict (no dependency expansion)
|
|
12
|
+
- feat(mcp): default `agenr_store` project now comes from configured project scope when caller omits `project`
|
|
13
|
+
- docs: corrected setup guidance in `docs/guides/scenarios.md` and aligned MCP examples in `docs/MCP.md` with current init output
|
|
14
|
+
|
|
15
|
+
### Removed
|
|
16
|
+
- perf(mcp): removed public `since_seq` parameter/handler from `agenr_recall`
|
|
17
|
+
- perf(plugin): removed redundant OpenClaw `writeAgenrMd` write path (session-start context is still injected via `prependContext`)
|
|
18
|
+
- perf(signals): removed extra `agenr_recall` footer from signal notifications for lower token overhead
|
|
19
|
+
|
|
20
|
+
## [0.7.0] - 2026-02-19
|
|
21
|
+
|
|
22
|
+
### Added
|
|
23
|
+
- feat(signals): mid-session signal delivery via `before_prompt_build` hook - notifies agents of new high-importance entries (imp >= 7) with compact 50-100 token notifications
|
|
24
|
+
- feat(signals): `signal_watermarks` table for per-consumer rowid-based watermark tracking
|
|
25
|
+
- feat(mcp): `since_seq` parameter on `agenr_recall` for watermark-based incremental recall without embedding cost
|
|
26
|
+
- feat(plugin): `signalsEnabled`, `signalMinImportance`, `signalMaxPerSignal`, and `dbPath` plugin config options
|
|
27
|
+
|
|
28
|
+
### Changed
|
|
29
|
+
- refactor(plugin): plugin now opens a direct DB connection for sub-ms signal queries (vs CLI spawn)
|
|
30
|
+
- refactor(plugin/types): expanded `PluginApi` and `AgenrPluginConfig` types for signal support
|
|
31
|
+
|
|
3
32
|
## [0.6.15] - 2026-02-19
|
|
4
33
|
|
|
5
34
|
### Changed
|
|
@@ -0,0 +1,586 @@
|
|
|
1
|
+
// src/version.ts
|
|
2
|
+
import { createRequire } from "module";
|
|
3
|
+
var require2 = createRequire(import.meta.url);
|
|
4
|
+
var APP_VERSION = (() => {
|
|
5
|
+
try {
|
|
6
|
+
const raw = require2("../package.json");
|
|
7
|
+
if (typeof raw.version === "string" && raw.version.trim().length > 0) {
|
|
8
|
+
return raw.version.trim();
|
|
9
|
+
}
|
|
10
|
+
} catch {
|
|
11
|
+
}
|
|
12
|
+
const fromEnv = process.env.npm_package_version;
|
|
13
|
+
if (typeof fromEnv === "string" && fromEnv.trim().length > 0) {
|
|
14
|
+
return fromEnv.trim();
|
|
15
|
+
}
|
|
16
|
+
return "0.0.0";
|
|
17
|
+
})();
|
|
18
|
+
|
|
19
|
+
// src/db/client.ts
|
|
20
|
+
import { createClient } from "@libsql/client";
|
|
21
|
+
import fs from "fs/promises";
|
|
22
|
+
import os from "os";
|
|
23
|
+
import path from "path";
|
|
24
|
+
|
|
25
|
+
// src/db/schema.ts
|
|
26
|
+
var CREATE_IDX_ENTRIES_EMBEDDING_SQL = `
|
|
27
|
+
CREATE INDEX IF NOT EXISTS idx_entries_embedding ON entries (
|
|
28
|
+
libsql_vector_idx(embedding, 'metric=cosine', 'compress_neighbors=float8', 'max_neighbors=50')
|
|
29
|
+
)
|
|
30
|
+
`;
|
|
31
|
+
var CREATE_ENTRIES_FTS_TABLE_SQL = `
|
|
32
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS entries_fts USING fts5(
|
|
33
|
+
content, subject, content=entries, content_rowid=rowid
|
|
34
|
+
)
|
|
35
|
+
`;
|
|
36
|
+
var CREATE_ENTRIES_FTS_TRIGGER_AI_SQL = `
|
|
37
|
+
CREATE TRIGGER IF NOT EXISTS entries_ai AFTER INSERT ON entries BEGIN
|
|
38
|
+
INSERT INTO entries_fts(rowid, content, subject) VALUES (new.rowid, new.content, new.subject);
|
|
39
|
+
END
|
|
40
|
+
`;
|
|
41
|
+
var CREATE_ENTRIES_FTS_TRIGGER_AD_SQL = `
|
|
42
|
+
CREATE TRIGGER IF NOT EXISTS entries_ad AFTER DELETE ON entries BEGIN
|
|
43
|
+
INSERT INTO entries_fts(entries_fts, rowid, content, subject) VALUES ('delete', old.rowid, old.content, old.subject);
|
|
44
|
+
END
|
|
45
|
+
`;
|
|
46
|
+
var CREATE_ENTRIES_FTS_TRIGGER_AU_SQL = `
|
|
47
|
+
CREATE TRIGGER IF NOT EXISTS entries_au AFTER UPDATE ON entries BEGIN
|
|
48
|
+
INSERT INTO entries_fts(entries_fts, rowid, content, subject) VALUES ('delete', old.rowid, old.content, old.subject);
|
|
49
|
+
INSERT INTO entries_fts(rowid, content, subject) VALUES (new.rowid, new.content, new.subject);
|
|
50
|
+
END
|
|
51
|
+
`;
|
|
52
|
+
var REBUILD_ENTRIES_FTS_SQL = "INSERT INTO entries_fts(entries_fts) VALUES ('rebuild')";
|
|
53
|
+
var LEGACY_IMPORTANCE_BACKFILL_META_KEY = "legacy_importance_backfill_from_confidence_v1";
|
|
54
|
+
var CREATE_TABLE_AND_TRIGGER_STATEMENTS = [
|
|
55
|
+
`
|
|
56
|
+
CREATE TABLE IF NOT EXISTS _meta (
|
|
57
|
+
key TEXT PRIMARY KEY,
|
|
58
|
+
value TEXT NOT NULL,
|
|
59
|
+
updated_at TEXT NOT NULL
|
|
60
|
+
)
|
|
61
|
+
`,
|
|
62
|
+
`
|
|
63
|
+
CREATE TABLE IF NOT EXISTS entries (
|
|
64
|
+
id TEXT PRIMARY KEY,
|
|
65
|
+
type TEXT NOT NULL,
|
|
66
|
+
subject TEXT NOT NULL,
|
|
67
|
+
canonical_key TEXT,
|
|
68
|
+
content TEXT NOT NULL,
|
|
69
|
+
importance INTEGER NOT NULL,
|
|
70
|
+
expiry TEXT NOT NULL,
|
|
71
|
+
scope TEXT DEFAULT 'private',
|
|
72
|
+
platform TEXT DEFAULT NULL,
|
|
73
|
+
project TEXT DEFAULT NULL,
|
|
74
|
+
source_file TEXT,
|
|
75
|
+
source_context TEXT,
|
|
76
|
+
embedding F32_BLOB(1024),
|
|
77
|
+
created_at TEXT NOT NULL,
|
|
78
|
+
updated_at TEXT NOT NULL,
|
|
79
|
+
last_recalled_at TEXT,
|
|
80
|
+
recall_count INTEGER DEFAULT 0,
|
|
81
|
+
confirmations INTEGER DEFAULT 0,
|
|
82
|
+
contradictions INTEGER DEFAULT 0,
|
|
83
|
+
superseded_by TEXT,
|
|
84
|
+
content_hash TEXT,
|
|
85
|
+
merged_from INTEGER DEFAULT 0,
|
|
86
|
+
consolidated_at TEXT,
|
|
87
|
+
retired INTEGER NOT NULL DEFAULT 0,
|
|
88
|
+
retired_at TEXT,
|
|
89
|
+
retired_reason TEXT,
|
|
90
|
+
suppressed_contexts TEXT,
|
|
91
|
+
FOREIGN KEY (superseded_by) REFERENCES entries(id)
|
|
92
|
+
)
|
|
93
|
+
`,
|
|
94
|
+
`
|
|
95
|
+
CREATE TABLE IF NOT EXISTS tags (
|
|
96
|
+
entry_id TEXT NOT NULL,
|
|
97
|
+
tag TEXT NOT NULL,
|
|
98
|
+
PRIMARY KEY (entry_id, tag),
|
|
99
|
+
FOREIGN KEY (entry_id) REFERENCES entries(id) ON DELETE CASCADE
|
|
100
|
+
)
|
|
101
|
+
`,
|
|
102
|
+
`
|
|
103
|
+
CREATE TABLE IF NOT EXISTS relations (
|
|
104
|
+
id TEXT PRIMARY KEY,
|
|
105
|
+
source_id TEXT NOT NULL,
|
|
106
|
+
target_id TEXT NOT NULL,
|
|
107
|
+
relation_type TEXT NOT NULL,
|
|
108
|
+
created_at TEXT NOT NULL,
|
|
109
|
+
FOREIGN KEY (source_id) REFERENCES entries(id) ON DELETE CASCADE,
|
|
110
|
+
FOREIGN KEY (target_id) REFERENCES entries(id) ON DELETE CASCADE
|
|
111
|
+
)
|
|
112
|
+
`,
|
|
113
|
+
`
|
|
114
|
+
CREATE TABLE IF NOT EXISTS ingest_log (
|
|
115
|
+
id TEXT PRIMARY KEY,
|
|
116
|
+
file_path TEXT NOT NULL,
|
|
117
|
+
ingested_at TEXT NOT NULL,
|
|
118
|
+
entries_added INTEGER NOT NULL,
|
|
119
|
+
entries_updated INTEGER NOT NULL,
|
|
120
|
+
entries_skipped INTEGER NOT NULL,
|
|
121
|
+
duration_ms INTEGER NOT NULL,
|
|
122
|
+
content_hash TEXT,
|
|
123
|
+
entries_superseded INTEGER NOT NULL DEFAULT 0,
|
|
124
|
+
dedup_llm_calls INTEGER NOT NULL DEFAULT 0
|
|
125
|
+
)
|
|
126
|
+
`,
|
|
127
|
+
`
|
|
128
|
+
CREATE TABLE IF NOT EXISTS entry_sources (
|
|
129
|
+
merged_entry_id TEXT NOT NULL REFERENCES entries(id),
|
|
130
|
+
source_entry_id TEXT NOT NULL REFERENCES entries(id),
|
|
131
|
+
original_confirmations INTEGER NOT NULL DEFAULT 0,
|
|
132
|
+
original_recall_count INTEGER NOT NULL DEFAULT 0,
|
|
133
|
+
original_created_at TEXT,
|
|
134
|
+
PRIMARY KEY (merged_entry_id, source_entry_id)
|
|
135
|
+
)
|
|
136
|
+
`,
|
|
137
|
+
`
|
|
138
|
+
CREATE TABLE IF NOT EXISTS signal_watermarks (
|
|
139
|
+
consumer_id TEXT PRIMARY KEY,
|
|
140
|
+
last_received_seq INTEGER NOT NULL DEFAULT 0,
|
|
141
|
+
updated_at TEXT NOT NULL
|
|
142
|
+
)
|
|
143
|
+
`,
|
|
144
|
+
CREATE_ENTRIES_FTS_TABLE_SQL,
|
|
145
|
+
CREATE_ENTRIES_FTS_TRIGGER_AI_SQL,
|
|
146
|
+
CREATE_ENTRIES_FTS_TRIGGER_AD_SQL,
|
|
147
|
+
CREATE_ENTRIES_FTS_TRIGGER_AU_SQL
|
|
148
|
+
];
|
|
149
|
+
var CREATE_INDEX_STATEMENTS = [
|
|
150
|
+
CREATE_IDX_ENTRIES_EMBEDDING_SQL,
|
|
151
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_type ON entries(type)",
|
|
152
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_type_canonical_key ON entries(type, canonical_key)",
|
|
153
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_expiry ON entries(expiry)",
|
|
154
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_scope ON entries(scope)",
|
|
155
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_platform ON entries(platform)",
|
|
156
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_project ON entries(project)",
|
|
157
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_created ON entries(created_at)",
|
|
158
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_superseded ON entries(superseded_by)",
|
|
159
|
+
"CREATE INDEX IF NOT EXISTS idx_entries_content_hash ON entries(content_hash)",
|
|
160
|
+
"CREATE INDEX IF NOT EXISTS idx_tags_tag ON tags(tag)",
|
|
161
|
+
"CREATE INDEX IF NOT EXISTS idx_relations_source ON relations(source_id)",
|
|
162
|
+
"CREATE INDEX IF NOT EXISTS idx_relations_target ON relations(target_id)",
|
|
163
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS idx_ingest_log_file_hash ON ingest_log(file_path, content_hash)"
|
|
164
|
+
];
|
|
165
|
+
var COLUMN_MIGRATIONS = [
|
|
166
|
+
{
|
|
167
|
+
table: "entries",
|
|
168
|
+
column: "importance",
|
|
169
|
+
sql: "ALTER TABLE entries ADD COLUMN importance INTEGER NOT NULL DEFAULT 5"
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
table: "entries",
|
|
173
|
+
column: "canonical_key",
|
|
174
|
+
sql: "ALTER TABLE entries ADD COLUMN canonical_key TEXT"
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
table: "entries",
|
|
178
|
+
column: "scope",
|
|
179
|
+
sql: "ALTER TABLE entries ADD COLUMN scope TEXT DEFAULT 'private'"
|
|
180
|
+
},
|
|
181
|
+
{
|
|
182
|
+
table: "entries",
|
|
183
|
+
column: "content_hash",
|
|
184
|
+
sql: "ALTER TABLE entries ADD COLUMN content_hash TEXT"
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
table: "entries",
|
|
188
|
+
column: "merged_from",
|
|
189
|
+
sql: "ALTER TABLE entries ADD COLUMN merged_from INTEGER DEFAULT 0"
|
|
190
|
+
},
|
|
191
|
+
{
|
|
192
|
+
table: "entries",
|
|
193
|
+
column: "consolidated_at",
|
|
194
|
+
sql: "ALTER TABLE entries ADD COLUMN consolidated_at TEXT"
|
|
195
|
+
},
|
|
196
|
+
{
|
|
197
|
+
table: "entries",
|
|
198
|
+
column: "platform",
|
|
199
|
+
sql: "ALTER TABLE entries ADD COLUMN platform TEXT DEFAULT NULL"
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
table: "entries",
|
|
203
|
+
column: "project",
|
|
204
|
+
sql: "ALTER TABLE entries ADD COLUMN project TEXT DEFAULT NULL"
|
|
205
|
+
},
|
|
206
|
+
{
|
|
207
|
+
table: "entries",
|
|
208
|
+
column: "retired",
|
|
209
|
+
sql: "ALTER TABLE entries ADD COLUMN retired INTEGER NOT NULL DEFAULT 0"
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
table: "entries",
|
|
213
|
+
column: "idx_entries_retired",
|
|
214
|
+
sql: "CREATE INDEX IF NOT EXISTS idx_entries_retired ON entries (retired) WHERE retired = 0",
|
|
215
|
+
isIndex: true
|
|
216
|
+
},
|
|
217
|
+
{
|
|
218
|
+
table: "entries",
|
|
219
|
+
column: "retired_at",
|
|
220
|
+
sql: "ALTER TABLE entries ADD COLUMN retired_at TEXT"
|
|
221
|
+
},
|
|
222
|
+
{
|
|
223
|
+
table: "entries",
|
|
224
|
+
column: "retired_reason",
|
|
225
|
+
sql: "ALTER TABLE entries ADD COLUMN retired_reason TEXT"
|
|
226
|
+
},
|
|
227
|
+
{
|
|
228
|
+
table: "entries",
|
|
229
|
+
column: "suppressed_contexts",
|
|
230
|
+
sql: "ALTER TABLE entries ADD COLUMN suppressed_contexts TEXT"
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
table: "ingest_log",
|
|
234
|
+
column: "content_hash",
|
|
235
|
+
sql: "ALTER TABLE ingest_log ADD COLUMN content_hash TEXT"
|
|
236
|
+
},
|
|
237
|
+
{
|
|
238
|
+
table: "ingest_log",
|
|
239
|
+
column: "entries_superseded",
|
|
240
|
+
sql: "ALTER TABLE ingest_log ADD COLUMN entries_superseded INTEGER NOT NULL DEFAULT 0"
|
|
241
|
+
},
|
|
242
|
+
{
|
|
243
|
+
table: "ingest_log",
|
|
244
|
+
column: "dedup_llm_calls",
|
|
245
|
+
sql: "ALTER TABLE ingest_log ADD COLUMN dedup_llm_calls INTEGER NOT NULL DEFAULT 0"
|
|
246
|
+
},
|
|
247
|
+
{
|
|
248
|
+
table: "entry_sources",
|
|
249
|
+
column: "original_created_at",
|
|
250
|
+
sql: "ALTER TABLE entry_sources ADD COLUMN original_created_at TEXT"
|
|
251
|
+
},
|
|
252
|
+
{
|
|
253
|
+
table: "entries",
|
|
254
|
+
column: "recall_intervals",
|
|
255
|
+
sql: "ALTER TABLE entries ADD COLUMN recall_intervals TEXT DEFAULT NULL"
|
|
256
|
+
}
|
|
257
|
+
];
|
|
258
|
+
async function initSchema(client) {
|
|
259
|
+
for (const statement of CREATE_TABLE_AND_TRIGGER_STATEMENTS) {
|
|
260
|
+
await client.execute(statement);
|
|
261
|
+
}
|
|
262
|
+
let willRunLegacyBackfill = false;
|
|
263
|
+
try {
|
|
264
|
+
const earlyEntriesInfo = await client.execute("PRAGMA table_info(entries)");
|
|
265
|
+
const earlyColumns = new Set(earlyEntriesInfo.rows.map((row) => String(row.name)));
|
|
266
|
+
if (earlyColumns.has("confidence")) {
|
|
267
|
+
const sentinel = await client.execute({
|
|
268
|
+
sql: "SELECT 1 AS found FROM _meta WHERE key = ? LIMIT 1",
|
|
269
|
+
args: [LEGACY_IMPORTANCE_BACKFILL_META_KEY]
|
|
270
|
+
});
|
|
271
|
+
const alreadyBackfilled = sentinel.rows.length > 0;
|
|
272
|
+
willRunLegacyBackfill = !alreadyBackfilled;
|
|
273
|
+
}
|
|
274
|
+
} catch {
|
|
275
|
+
}
|
|
276
|
+
try {
|
|
277
|
+
const entriesCountResult = await client.execute("SELECT COUNT(*) AS count FROM entries");
|
|
278
|
+
const entriesCount = Number(entriesCountResult.rows[0]?.count ?? 0);
|
|
279
|
+
const ftsCountResult = await client.execute("SELECT COUNT(*) AS count FROM entries_fts");
|
|
280
|
+
const ftsCount = Number(ftsCountResult.rows[0]?.count ?? 0);
|
|
281
|
+
if (entriesCount > 0 && ftsCount === 0 && !willRunLegacyBackfill) {
|
|
282
|
+
await client.execute(REBUILD_ENTRIES_FTS_SQL);
|
|
283
|
+
}
|
|
284
|
+
} catch {
|
|
285
|
+
}
|
|
286
|
+
for (const migration of COLUMN_MIGRATIONS) {
|
|
287
|
+
if (migration.isIndex) {
|
|
288
|
+
const existingIndex = await client.execute({
|
|
289
|
+
sql: "SELECT name FROM sqlite_master WHERE type='index' AND name=?",
|
|
290
|
+
args: [migration.column]
|
|
291
|
+
});
|
|
292
|
+
if (existingIndex.rows.length > 0) {
|
|
293
|
+
continue;
|
|
294
|
+
}
|
|
295
|
+
await client.execute(migration.sql);
|
|
296
|
+
continue;
|
|
297
|
+
}
|
|
298
|
+
const info = await client.execute(`PRAGMA table_info(${migration.table})`);
|
|
299
|
+
const hasColumn = info.rows.some((row) => String(row.name) === migration.column);
|
|
300
|
+
if (!hasColumn) {
|
|
301
|
+
await client.execute(migration.sql);
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
const entriesInfo = await client.execute("PRAGMA table_info(entries)");
|
|
305
|
+
const entryColumns = new Set(entriesInfo.rows.map((row) => String(row.name)));
|
|
306
|
+
const backfillSentinel = await client.execute({
|
|
307
|
+
sql: "SELECT 1 AS found FROM _meta WHERE key = ? LIMIT 1",
|
|
308
|
+
args: [LEGACY_IMPORTANCE_BACKFILL_META_KEY]
|
|
309
|
+
});
|
|
310
|
+
const legacyBackfillDone = backfillSentinel.rows.length > 0;
|
|
311
|
+
if (entryColumns.has("confidence") && entryColumns.has("importance") && !legacyBackfillDone) {
|
|
312
|
+
try {
|
|
313
|
+
await client.execute("DROP TRIGGER IF EXISTS entries_ai");
|
|
314
|
+
await client.execute("DROP TRIGGER IF EXISTS entries_ad");
|
|
315
|
+
await client.execute("DROP TRIGGER IF EXISTS entries_au");
|
|
316
|
+
await client.execute("DROP TABLE IF EXISTS entries_fts");
|
|
317
|
+
} catch {
|
|
318
|
+
}
|
|
319
|
+
await client.execute(`
|
|
320
|
+
UPDATE entries
|
|
321
|
+
SET importance = CASE lower(trim(confidence))
|
|
322
|
+
WHEN 'low' THEN 3
|
|
323
|
+
WHEN 'medium' THEN 6
|
|
324
|
+
WHEN 'high' THEN 8
|
|
325
|
+
ELSE
|
|
326
|
+
CASE
|
|
327
|
+
WHEN CAST(confidence AS INTEGER) BETWEEN 1 AND 10 THEN CAST(confidence AS INTEGER)
|
|
328
|
+
ELSE 5
|
|
329
|
+
END
|
|
330
|
+
END
|
|
331
|
+
WHERE importance = 5
|
|
332
|
+
`);
|
|
333
|
+
try {
|
|
334
|
+
await client.execute(CREATE_ENTRIES_FTS_TABLE_SQL);
|
|
335
|
+
await client.execute(CREATE_ENTRIES_FTS_TRIGGER_AI_SQL);
|
|
336
|
+
await client.execute(CREATE_ENTRIES_FTS_TRIGGER_AD_SQL);
|
|
337
|
+
await client.execute(CREATE_ENTRIES_FTS_TRIGGER_AU_SQL);
|
|
338
|
+
await client.execute(REBUILD_ENTRIES_FTS_SQL);
|
|
339
|
+
} catch {
|
|
340
|
+
}
|
|
341
|
+
try {
|
|
342
|
+
await client.execute({
|
|
343
|
+
sql: `
|
|
344
|
+
INSERT INTO _meta (key, value, updated_at)
|
|
345
|
+
VALUES (?, datetime('now'), datetime('now'))
|
|
346
|
+
ON CONFLICT(key) DO NOTHING
|
|
347
|
+
`,
|
|
348
|
+
args: [LEGACY_IMPORTANCE_BACKFILL_META_KEY]
|
|
349
|
+
});
|
|
350
|
+
} catch {
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
for (const statement of CREATE_INDEX_STATEMENTS) {
|
|
354
|
+
await client.execute(statement);
|
|
355
|
+
}
|
|
356
|
+
await client.execute({
|
|
357
|
+
sql: `
|
|
358
|
+
INSERT INTO _meta (key, value, updated_at)
|
|
359
|
+
VALUES ('db_created_at', datetime('now'), datetime('now'))
|
|
360
|
+
ON CONFLICT(key) DO NOTHING
|
|
361
|
+
`,
|
|
362
|
+
args: []
|
|
363
|
+
});
|
|
364
|
+
await client.execute({
|
|
365
|
+
sql: `
|
|
366
|
+
INSERT INTO _meta (key, value, updated_at)
|
|
367
|
+
VALUES ('schema_version', ?, datetime('now'))
|
|
368
|
+
ON CONFLICT(key) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at
|
|
369
|
+
`,
|
|
370
|
+
args: [APP_VERSION]
|
|
371
|
+
});
|
|
372
|
+
}
|
|
373
|
+
async function resetDb(db) {
|
|
374
|
+
const foreignKeysResult = await db.execute("PRAGMA foreign_keys");
|
|
375
|
+
const foreignKeysRow = foreignKeysResult.rows[0];
|
|
376
|
+
const previousForeignKeys = Number(foreignKeysRow?.foreign_keys ?? (foreignKeysRow ? Object.values(foreignKeysRow)[0] : 1)) === 0 ? 0 : 1;
|
|
377
|
+
await db.execute("PRAGMA foreign_keys=OFF");
|
|
378
|
+
try {
|
|
379
|
+
const schemaObjects = await db.execute(`
|
|
380
|
+
SELECT type, name
|
|
381
|
+
FROM sqlite_master
|
|
382
|
+
WHERE name NOT LIKE 'sqlite_%'
|
|
383
|
+
ORDER BY
|
|
384
|
+
CASE type
|
|
385
|
+
WHEN 'trigger' THEN 1
|
|
386
|
+
WHEN 'index' THEN 2
|
|
387
|
+
WHEN 'table' THEN 3
|
|
388
|
+
ELSE 4
|
|
389
|
+
END,
|
|
390
|
+
name
|
|
391
|
+
`);
|
|
392
|
+
for (const row of schemaObjects.rows) {
|
|
393
|
+
const type = String(row.type ?? "");
|
|
394
|
+
const name = String(row.name ?? "");
|
|
395
|
+
if (!type || !name) {
|
|
396
|
+
continue;
|
|
397
|
+
}
|
|
398
|
+
const safeName = name.replace(/"/g, '""');
|
|
399
|
+
if (type === "trigger") {
|
|
400
|
+
await db.execute(`DROP TRIGGER IF EXISTS "${safeName}"`);
|
|
401
|
+
continue;
|
|
402
|
+
}
|
|
403
|
+
if (type === "index") {
|
|
404
|
+
await db.execute(`DROP INDEX IF EXISTS "${safeName}"`);
|
|
405
|
+
continue;
|
|
406
|
+
}
|
|
407
|
+
if (type === "table") {
|
|
408
|
+
await db.execute(`DROP TABLE IF EXISTS "${safeName}"`);
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
await initSchema(db);
|
|
412
|
+
} finally {
|
|
413
|
+
await db.execute(`PRAGMA foreign_keys=${previousForeignKeys === 0 ? "OFF" : "ON"}`);
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
// src/db/client.ts
|
|
418
|
+
var DEFAULT_DB_PATH = path.join(os.homedir(), ".agenr", "knowledge.db");
|
|
419
|
+
var walInitByClient = /* @__PURE__ */ new WeakMap();
|
|
420
|
+
var didWarnVectorIndexCorruption = false;
|
|
421
|
+
var WAL_CHECKPOINT_MAX_ATTEMPTS = 5;
|
|
422
|
+
var WAL_CHECKPOINT_RETRY_MS = 50;
|
|
423
|
+
function resolveUserPath(inputPath) {
|
|
424
|
+
if (!inputPath.startsWith("~")) {
|
|
425
|
+
return inputPath;
|
|
426
|
+
}
|
|
427
|
+
return path.join(os.homedir(), inputPath.slice(1));
|
|
428
|
+
}
|
|
429
|
+
function resolveDbPath(dbPath) {
|
|
430
|
+
if (dbPath === ":memory:") {
|
|
431
|
+
return dbPath;
|
|
432
|
+
}
|
|
433
|
+
return resolveUserPath(dbPath);
|
|
434
|
+
}
|
|
435
|
+
function normalizeBackupSourcePath(dbPath) {
|
|
436
|
+
const strippedDbPath = dbPath.startsWith("file:") ? dbPath.slice("file:".length) : dbPath;
|
|
437
|
+
return path.resolve(resolveDbPath(strippedDbPath));
|
|
438
|
+
}
|
|
439
|
+
function isErrnoCode(error, code) {
|
|
440
|
+
return error?.code === code;
|
|
441
|
+
}
|
|
442
|
+
async function copySidecarIfPresent(sourcePath, targetPath) {
|
|
443
|
+
try {
|
|
444
|
+
await fs.copyFile(sourcePath, targetPath);
|
|
445
|
+
} catch (error) {
|
|
446
|
+
if (isErrnoCode(error, "ENOENT")) {
|
|
447
|
+
return;
|
|
448
|
+
}
|
|
449
|
+
throw error;
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
function buildBackupPath(dbPath) {
|
|
453
|
+
const resolvedDbPath = normalizeBackupSourcePath(dbPath);
|
|
454
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").replace(/Z$/, "");
|
|
455
|
+
return path.join(
|
|
456
|
+
path.dirname(resolvedDbPath),
|
|
457
|
+
`${path.basename(resolvedDbPath)}.backup-pre-reset-${timestamp}Z`
|
|
458
|
+
);
|
|
459
|
+
}
|
|
460
|
+
function getDb(dbPath) {
|
|
461
|
+
const rawPath = dbPath?.trim() ? dbPath.trim() : DEFAULT_DB_PATH;
|
|
462
|
+
if (rawPath === ":memory:") {
|
|
463
|
+
return createClient({ url: ":memory:" });
|
|
464
|
+
}
|
|
465
|
+
if (rawPath.startsWith("file:")) {
|
|
466
|
+
const client2 = createClient({ url: rawPath });
|
|
467
|
+
walInitByClient.set(
|
|
468
|
+
client2,
|
|
469
|
+
client2.execute("PRAGMA journal_mode=WAL").then(() => client2.execute("PRAGMA busy_timeout=3000")).then(() => void 0)
|
|
470
|
+
);
|
|
471
|
+
return client2;
|
|
472
|
+
}
|
|
473
|
+
const resolvedPath = resolveDbPath(rawPath);
|
|
474
|
+
const client = createClient({ url: `file:${resolvedPath}` });
|
|
475
|
+
walInitByClient.set(
|
|
476
|
+
client,
|
|
477
|
+
client.execute("PRAGMA journal_mode=WAL").then(() => client.execute("PRAGMA busy_timeout=3000")).then(() => void 0)
|
|
478
|
+
);
|
|
479
|
+
return client;
|
|
480
|
+
}
|
|
481
|
+
async function initDb(client) {
|
|
482
|
+
const walInit = walInitByClient.get(client);
|
|
483
|
+
if (walInit) {
|
|
484
|
+
await walInit;
|
|
485
|
+
await client.execute("PRAGMA wal_autocheckpoint=1000");
|
|
486
|
+
}
|
|
487
|
+
await initSchema(client);
|
|
488
|
+
try {
|
|
489
|
+
const hasEntries = await client.execute(
|
|
490
|
+
"SELECT 1 FROM entries WHERE embedding IS NOT NULL LIMIT 1"
|
|
491
|
+
);
|
|
492
|
+
if (hasEntries.rows.length > 0) {
|
|
493
|
+
await client.execute(`
|
|
494
|
+
SELECT count(*) FROM vector_top_k(
|
|
495
|
+
'idx_entries_embedding',
|
|
496
|
+
(SELECT embedding FROM entries WHERE embedding IS NOT NULL LIMIT 1),
|
|
497
|
+
1
|
|
498
|
+
)
|
|
499
|
+
`);
|
|
500
|
+
}
|
|
501
|
+
} catch {
|
|
502
|
+
if (!didWarnVectorIndexCorruption) {
|
|
503
|
+
didWarnVectorIndexCorruption = true;
|
|
504
|
+
process.stderr.write(
|
|
505
|
+
"\n\u26A0\uFE0F Vector index may be corrupted. Run `agenr db rebuild-index` to fix.\n\n"
|
|
506
|
+
);
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
function toFiniteNumber(value) {
|
|
511
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
512
|
+
return value;
|
|
513
|
+
}
|
|
514
|
+
if (typeof value === "bigint") {
|
|
515
|
+
return Number(value);
|
|
516
|
+
}
|
|
517
|
+
if (typeof value === "string" && value.trim().length > 0) {
|
|
518
|
+
const parsed = Number(value);
|
|
519
|
+
return Number.isFinite(parsed) ? parsed : null;
|
|
520
|
+
}
|
|
521
|
+
return null;
|
|
522
|
+
}
|
|
523
|
+
function extractBusyFromCheckpointRow(row) {
|
|
524
|
+
if (!row || typeof row !== "object") {
|
|
525
|
+
return null;
|
|
526
|
+
}
|
|
527
|
+
const record = row;
|
|
528
|
+
const busyValue = record.busy ?? Object.values(record)[0];
|
|
529
|
+
return toFiniteNumber(busyValue);
|
|
530
|
+
}
|
|
531
|
+
async function validatedWalCheckpoint(client) {
|
|
532
|
+
for (let attempt = 1; attempt <= WAL_CHECKPOINT_MAX_ATTEMPTS; attempt += 1) {
|
|
533
|
+
const result = await client.execute("PRAGMA wal_checkpoint(TRUNCATE)");
|
|
534
|
+
const busy = extractBusyFromCheckpointRow(result.rows[0]);
|
|
535
|
+
if (busy === null) {
|
|
536
|
+
throw new Error("WAL checkpoint returned an unexpected result and could not be validated.");
|
|
537
|
+
}
|
|
538
|
+
if (busy === 0) {
|
|
539
|
+
return;
|
|
540
|
+
}
|
|
541
|
+
if (attempt < WAL_CHECKPOINT_MAX_ATTEMPTS) {
|
|
542
|
+
await new Promise((resolve) => setTimeout(resolve, WAL_CHECKPOINT_RETRY_MS * attempt));
|
|
543
|
+
continue;
|
|
544
|
+
}
|
|
545
|
+
throw new Error(
|
|
546
|
+
`WAL checkpoint did not finish (busy=${busy}). Active readers are blocking backup safety.`
|
|
547
|
+
);
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
async function walCheckpoint(client) {
|
|
551
|
+
await validatedWalCheckpoint(client);
|
|
552
|
+
}
|
|
553
|
+
async function backupDb(dbPath) {
|
|
554
|
+
if (dbPath === ":memory:") {
|
|
555
|
+
throw new Error("Cannot back up in-memory databases.");
|
|
556
|
+
}
|
|
557
|
+
const checkpointClient = getDb(dbPath);
|
|
558
|
+
try {
|
|
559
|
+
await walCheckpoint(checkpointClient);
|
|
560
|
+
} finally {
|
|
561
|
+
closeDb(checkpointClient);
|
|
562
|
+
}
|
|
563
|
+
const resolvedDbPath = normalizeBackupSourcePath(dbPath);
|
|
564
|
+
const backupPath = buildBackupPath(dbPath);
|
|
565
|
+
await fs.copyFile(resolvedDbPath, backupPath);
|
|
566
|
+
await copySidecarIfPresent(`${resolvedDbPath}-wal`, `${backupPath}-wal`);
|
|
567
|
+
await copySidecarIfPresent(`${resolvedDbPath}-shm`, `${backupPath}-shm`);
|
|
568
|
+
return backupPath;
|
|
569
|
+
}
|
|
570
|
+
function closeDb(client) {
|
|
571
|
+
client.close();
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
export {
|
|
575
|
+
APP_VERSION,
|
|
576
|
+
CREATE_IDX_ENTRIES_EMBEDDING_SQL,
|
|
577
|
+
initSchema,
|
|
578
|
+
resetDb,
|
|
579
|
+
DEFAULT_DB_PATH,
|
|
580
|
+
buildBackupPath,
|
|
581
|
+
getDb,
|
|
582
|
+
initDb,
|
|
583
|
+
walCheckpoint,
|
|
584
|
+
backupDb,
|
|
585
|
+
closeDb
|
|
586
|
+
};
|