engramx 1.0.1 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +150 -0
  2. package/README.md +67 -7
  3. package/dist/{aider-context-TNGSXMVY.js → aider-context-BC5R2ZTA.js} +1 -1
  4. package/dist/cache-AK6CF3BC.js +10 -0
  5. package/dist/chunk-22INHMKB.js +31 -0
  6. package/dist/chunk-533LR4I7.js +220 -0
  7. package/dist/{chunk-QOG4K427.js → chunk-C6GBUOAL.js} +1 -1
  8. package/dist/chunk-CIQQ5Y3S.js +338 -0
  9. package/dist/chunk-KL6NSPVA.js +59 -0
  10. package/dist/{chunk-SBHGK5WA.js → chunk-PEH54LYC.js} +85 -3
  11. package/dist/{chunk-6SFMVYUN.js → chunk-SJT7VS2G.js} +127 -23
  12. package/dist/cli.js +383 -258
  13. package/dist/{core-77MHT3QV.js → core-6IY5L6II.js} +2 -2
  14. package/dist/{cursor-mdc-HWVUZUZH.js → cursor-mdc-GJ7E5LDD.js} +1 -1
  15. package/dist/{exporter-A3VSLS4U.js → exporter-GWU2GF23.js} +1 -1
  16. package/dist/grammars/tree-sitter-go.wasm +0 -0
  17. package/dist/grammars/tree-sitter-javascript.wasm +0 -0
  18. package/dist/grammars/tree-sitter-python.wasm +0 -0
  19. package/dist/grammars/tree-sitter-rust.wasm +0 -0
  20. package/dist/grammars/tree-sitter-tsx.wasm +0 -0
  21. package/dist/grammars/tree-sitter-typescript.wasm +0 -0
  22. package/dist/{importer-LU2YFZDY.js → importer-V62NGZRK.js} +1 -1
  23. package/dist/index.js +3 -3
  24. package/dist/{migrate-5ZJWF2HD.js → migrate-UKCO6BUU.js} +3 -1
  25. package/dist/plugin-loader-STTGYIL5.js +106 -0
  26. package/dist/serve.js +2 -2
  27. package/dist/server-6AOI7NQP.js +1370 -0
  28. package/dist/{tuner-2LVIEE5V.js → tuner-KFNNGKG3.js} +4 -2
  29. package/dist/windsurf-rules-C7SVDHBL.js +59 -0
  30. package/package.json +4 -3
  31. package/dist/chunk-CEAANHHX.js +0 -88
  32. package/dist/server-I3C74ZLB.js +0 -193
@@ -0,0 +1,338 @@
1
+ // src/intelligence/cache.ts
2
+ import { statSync } from "fs";
3
+ import { join } from "path";
4
+ var LRUCache = class {
5
+ map = /* @__PURE__ */ new Map();
6
+ maxSize;
7
+ constructor(maxSize) {
8
+ this.maxSize = maxSize;
9
+ }
10
+ get(key) {
11
+ const val = this.map.get(key);
12
+ if (val !== void 0) {
13
+ this.map.delete(key);
14
+ this.map.set(key, val);
15
+ }
16
+ return val;
17
+ }
18
+ set(key, value) {
19
+ this.map.delete(key);
20
+ if (this.map.size >= this.maxSize) {
21
+ const firstKey = this.map.keys().next().value;
22
+ if (firstKey !== void 0) this.map.delete(firstKey);
23
+ }
24
+ this.map.set(key, value);
25
+ }
26
+ delete(key) {
27
+ this.map.delete(key);
28
+ }
29
+ clear() {
30
+ this.map.clear();
31
+ }
32
+ get size() {
33
+ return this.map.size;
34
+ }
35
+ };
36
+ var ContextCache = class {
37
+ queryLRU = new LRUCache(100);
38
+ patternLRU = new LRUCache(50);
39
+ hotFiles = /* @__PURE__ */ new Set();
40
+ queryHits = 0;
41
+ queryMisses = 0;
42
+ patternHits = 0;
43
+ patternMisses = 0;
44
+ /**
45
+ * Initialize cache tables in the store. Call once when the store opens.
46
+ * Safe to call multiple times (uses IF NOT EXISTS).
47
+ */
48
+ static ensureTables(store) {
49
+ store.runSql(`
50
+ CREATE TABLE IF NOT EXISTS query_cache (
51
+ key TEXT PRIMARY KEY,
52
+ result TEXT NOT NULL,
53
+ file_path TEXT NOT NULL,
54
+ file_mtime REAL NOT NULL,
55
+ created_at INTEGER NOT NULL,
56
+ hit_count INTEGER NOT NULL DEFAULT 0
57
+ )
58
+ `);
59
+ store.runSql(`
60
+ CREATE TABLE IF NOT EXISTS pattern_cache (
61
+ pattern TEXT PRIMARY KEY,
62
+ result TEXT NOT NULL,
63
+ graph_version INTEGER NOT NULL,
64
+ hit_count INTEGER NOT NULL DEFAULT 0
65
+ )
66
+ `);
67
+ store.runSql(
68
+ "CREATE INDEX IF NOT EXISTS idx_query_cache_file ON query_cache(file_path)"
69
+ );
70
+ }
71
+ // ─── Query Cache (per-file context packets) ─────────────────────
72
+ /**
73
+ * Get a cached context packet for a file. Returns null on miss or if
74
+ * the file has been modified since caching.
75
+ */
76
+ getQuery(store, filePath, absPath) {
77
+ const memResult = this.queryLRU.get(filePath);
78
+ if (memResult !== void 0) {
79
+ try {
80
+ const currentMtime = statSync(absPath).mtimeMs;
81
+ const cached = this.getQueryEntry(store, filePath);
82
+ if (cached && cached.fileMtime === currentMtime) {
83
+ this.queryHits++;
84
+ this.incrementQueryHitCount(store, filePath);
85
+ return memResult;
86
+ }
87
+ } catch {
88
+ }
89
+ this.queryLRU.delete(filePath);
90
+ }
91
+ const entry = this.getQueryEntry(store, filePath);
92
+ if (!entry) {
93
+ this.queryMisses++;
94
+ return null;
95
+ }
96
+ try {
97
+ const currentMtime = statSync(absPath).mtimeMs;
98
+ if (entry.fileMtime !== currentMtime) {
99
+ this.invalidateFile(store, filePath);
100
+ this.queryMisses++;
101
+ return null;
102
+ }
103
+ } catch {
104
+ this.invalidateFile(store, filePath);
105
+ this.queryMisses++;
106
+ return null;
107
+ }
108
+ this.queryLRU.set(filePath, entry.result);
109
+ this.queryHits++;
110
+ this.incrementQueryHitCount(store, filePath);
111
+ return entry.result;
112
+ }
113
+ /**
114
+ * Store a resolved context packet for a file.
115
+ */
116
+ setQuery(store, filePath, absPath, result) {
117
+ let mtime = 0;
118
+ try {
119
+ mtime = statSync(absPath).mtimeMs;
120
+ } catch {
121
+ return;
122
+ }
123
+ store.runSql(
124
+ `INSERT OR REPLACE INTO query_cache (key, result, file_path, file_mtime, created_at, hit_count)
125
+ VALUES (?, ?, ?, ?, ?, 0)`,
126
+ [filePath, result, filePath, mtime, Date.now()]
127
+ );
128
+ this.queryLRU.set(filePath, result);
129
+ }
130
+ // ─── Pattern Cache (structural queries) ─────────────────────────
131
+ /**
132
+ * Get a cached answer for a structural query pattern.
133
+ */
134
+ getPattern(store, pattern, graphVersion) {
135
+ const memResult = this.patternLRU.get(pattern);
136
+ if (memResult !== void 0) {
137
+ if (memResult.graphVersion === graphVersion) {
138
+ this.patternHits++;
139
+ try {
140
+ store.runSql(
141
+ "UPDATE pattern_cache SET hit_count = hit_count + 1 WHERE pattern = ?",
142
+ [pattern]
143
+ );
144
+ } catch {
145
+ }
146
+ return memResult.result;
147
+ }
148
+ this.patternLRU.delete(pattern);
149
+ }
150
+ const stmt = store.prepare(
151
+ "SELECT result, graph_version, hit_count FROM pattern_cache WHERE pattern = ?"
152
+ );
153
+ stmt.bind([pattern]);
154
+ if (stmt.step()) {
155
+ const row = stmt.getAsObject();
156
+ stmt.free();
157
+ const cachedVersion = row.graph_version;
158
+ if (cachedVersion !== graphVersion) {
159
+ store.runSql("DELETE FROM pattern_cache WHERE pattern = ?", [pattern]);
160
+ this.patternMisses++;
161
+ return null;
162
+ }
163
+ const result = row.result;
164
+ this.patternLRU.set(pattern, { result, graphVersion: cachedVersion });
165
+ this.patternHits++;
166
+ store.runSql(
167
+ "UPDATE pattern_cache SET hit_count = hit_count + 1 WHERE pattern = ?",
168
+ [pattern]
169
+ );
170
+ return result;
171
+ }
172
+ stmt.free();
173
+ this.patternMisses++;
174
+ return null;
175
+ }
176
+ /**
177
+ * Cache a structural query result.
178
+ */
179
+ setPattern(store, pattern, result, graphVersion) {
180
+ store.runSql(
181
+ `INSERT OR REPLACE INTO pattern_cache (pattern, result, graph_version, hit_count)
182
+ VALUES (?, ?, ?, 0)`,
183
+ [pattern, result, graphVersion]
184
+ );
185
+ this.patternLRU.set(pattern, { result, graphVersion });
186
+ }
187
+ // ─── Hot File Cache ─────────────────────────────────────────────
188
+ /**
189
+ * Pre-warm hot files from access frequency data.
190
+ * Call at SessionStart to eliminate first-hit latency.
191
+ */
192
+ warmHotFiles(store, projectRoot, topN = 20) {
193
+ const stmt = store.prepare(
194
+ "SELECT file_path, result FROM query_cache ORDER BY hit_count DESC LIMIT ?"
195
+ );
196
+ stmt.bind([topN]);
197
+ let count = 0;
198
+ while (stmt.step()) {
199
+ const row = stmt.getAsObject();
200
+ const filePath = row.file_path;
201
+ const result = row.result;
202
+ try {
203
+ const absPath = join(projectRoot, filePath);
204
+ const currentMtime = statSync(absPath).mtimeMs;
205
+ const entry = this.getQueryEntry(store, filePath);
206
+ if (entry && entry.fileMtime === currentMtime) {
207
+ this.queryLRU.set(filePath, result);
208
+ this.hotFiles.add(filePath);
209
+ count++;
210
+ }
211
+ } catch {
212
+ }
213
+ }
214
+ stmt.free();
215
+ return count;
216
+ }
217
+ // ─── Invalidation ───────────────────────────────────────────────
218
+ /** Invalidate all cache entries for a specific file. */
219
+ invalidateFile(store, filePath) {
220
+ store.runSql("DELETE FROM query_cache WHERE file_path = ?", [filePath]);
221
+ this.queryLRU.delete(filePath);
222
+ this.hotFiles.delete(filePath);
223
+ }
224
+ /** Invalidate all pattern cache entries (on graph mutation). */
225
+ invalidatePatterns(store) {
226
+ store.runSql("DELETE FROM pattern_cache");
227
+ this.patternLRU.clear();
228
+ }
229
+ /** Clear all caches completely. */
230
+ clearAll(store) {
231
+ store.runSql("DELETE FROM query_cache");
232
+ store.runSql("DELETE FROM pattern_cache");
233
+ this.queryLRU.clear();
234
+ this.patternLRU.clear();
235
+ this.hotFiles.clear();
236
+ this.queryHits = 0;
237
+ this.queryMisses = 0;
238
+ this.patternHits = 0;
239
+ this.patternMisses = 0;
240
+ }
241
+ // ─── Stats ──────────────────────────────────────────────────────
242
+ getStats(store) {
243
+ let queryEntries = 0;
244
+ let patternEntries = 0;
245
+ let persistedQueryHits = 0;
246
+ let persistedPatternHits = 0;
247
+ try {
248
+ const stmt1 = store.prepare(
249
+ "SELECT COUNT(*) as cnt, COALESCE(SUM(hit_count), 0) as hits FROM query_cache"
250
+ );
251
+ if (stmt1.step()) {
252
+ const row = stmt1.getAsObject();
253
+ queryEntries = row.cnt;
254
+ persistedQueryHits = row.hits;
255
+ }
256
+ stmt1.free();
257
+ } catch {
258
+ }
259
+ try {
260
+ const stmt2 = store.prepare(
261
+ "SELECT COUNT(*) as cnt, COALESCE(SUM(hit_count), 0) as hits FROM pattern_cache"
262
+ );
263
+ if (stmt2.step()) {
264
+ const row = stmt2.getAsObject();
265
+ patternEntries = row.cnt;
266
+ persistedPatternHits = row.hits;
267
+ }
268
+ stmt2.free();
269
+ } catch {
270
+ }
271
+ const queryHits = Math.max(this.queryHits, persistedQueryHits);
272
+ const patternHits = Math.max(this.patternHits, persistedPatternHits);
273
+ const totalHits = queryHits + patternHits;
274
+ const totalMisses = this.queryMisses + this.patternMisses;
275
+ const total = totalHits + totalMisses;
276
+ return {
277
+ queryEntries,
278
+ queryHits,
279
+ queryMisses: this.queryMisses,
280
+ patternEntries,
281
+ patternHits,
282
+ patternMisses: this.patternMisses,
283
+ hotFileCount: this.hotFiles.size,
284
+ totalHits,
285
+ totalMisses,
286
+ hitRate: total > 0 ? totalHits / total : 0
287
+ };
288
+ }
289
+ // ─── Private helpers ────────────────────────────────────────────
290
+ getQueryEntry(store, filePath) {
291
+ try {
292
+ const stmt = store.prepare(
293
+ "SELECT * FROM query_cache WHERE key = ?"
294
+ );
295
+ stmt.bind([filePath]);
296
+ if (stmt.step()) {
297
+ const row = stmt.getAsObject();
298
+ stmt.free();
299
+ return {
300
+ key: row.key,
301
+ result: row.result,
302
+ filePath: row.file_path,
303
+ fileMtime: row.file_mtime,
304
+ createdAt: row.created_at,
305
+ hitCount: row.hit_count
306
+ };
307
+ }
308
+ stmt.free();
309
+ } catch {
310
+ }
311
+ return null;
312
+ }
313
+ incrementQueryHitCount(store, filePath) {
314
+ try {
315
+ store.runSql(
316
+ "UPDATE query_cache SET hit_count = hit_count + 1 WHERE key = ?",
317
+ [filePath]
318
+ );
319
+ } catch {
320
+ }
321
+ }
322
+ };
323
+ var _instance = null;
324
+ function getContextCache() {
325
+ if (!_instance) {
326
+ _instance = new ContextCache();
327
+ }
328
+ return _instance;
329
+ }
330
+ function _resetContextCache() {
331
+ _instance = null;
332
+ }
333
+
334
+ export {
335
+ ContextCache,
336
+ getContextCache,
337
+ _resetContextCache
338
+ };
@@ -0,0 +1,59 @@
1
+ // src/intelligence/hook-log.ts
2
+ import {
3
+ appendFileSync,
4
+ existsSync,
5
+ renameSync,
6
+ statSync,
7
+ readFileSync
8
+ } from "fs";
9
+ import { join } from "path";
10
+ var HOOK_LOG_MAX_BYTES = 10 * 1024 * 1024;
11
+ var LOG_FILENAME = "hook-log.jsonl";
12
+ var LOG_ROTATED_FILENAME = "hook-log.jsonl.1";
13
+ function logHookEvent(projectRoot, entry) {
14
+ if (!projectRoot) return;
15
+ try {
16
+ const logPath = join(projectRoot, ".engram", LOG_FILENAME);
17
+ rotateIfNeeded(projectRoot);
18
+ const line = JSON.stringify({
19
+ ts: (/* @__PURE__ */ new Date()).toISOString(),
20
+ ...entry
21
+ }) + "\n";
22
+ appendFileSync(logPath, line);
23
+ } catch {
24
+ }
25
+ }
26
+ function rotateIfNeeded(projectRoot) {
27
+ try {
28
+ const logPath = join(projectRoot, ".engram", LOG_FILENAME);
29
+ if (!existsSync(logPath)) return;
30
+ const size = statSync(logPath).size;
31
+ if (size < HOOK_LOG_MAX_BYTES) return;
32
+ const rotatedPath = join(projectRoot, ".engram", LOG_ROTATED_FILENAME);
33
+ renameSync(logPath, rotatedPath);
34
+ } catch {
35
+ }
36
+ }
37
+ function readHookLog(projectRoot) {
38
+ try {
39
+ const logPath = join(projectRoot, ".engram", LOG_FILENAME);
40
+ if (!existsSync(logPath)) return [];
41
+ const raw = readFileSync(logPath, "utf-8");
42
+ const entries = [];
43
+ for (const line of raw.split("\n")) {
44
+ if (!line.trim()) continue;
45
+ try {
46
+ entries.push(JSON.parse(line));
47
+ } catch {
48
+ }
49
+ }
50
+ return entries;
51
+ } catch {
52
+ return [];
53
+ }
54
+ }
55
+
56
+ export {
57
+ logHookEvent,
58
+ readHookLog
59
+ };
@@ -1,6 +1,19 @@
1
1
  // src/db/migrate.ts
2
2
  import { existsSync, copyFileSync } from "fs";
3
- var CURRENT_SCHEMA_VERSION = 6;
3
+ var CURRENT_SCHEMA_VERSION = 7;
4
+ var DOWN_MIGRATIONS = {
5
+ 7: `DROP TABLE IF EXISTS query_cache; DROP TABLE IF EXISTS pattern_cache;`,
6
+ 6: `DROP TABLE IF EXISTS engram_config;`,
7
+ 5: `DROP TABLE IF EXISTS provider_cache;`,
8
+ 4: `SELECT 1;`,
9
+ // hook-log is JSONL, no SQL rollback
10
+ 3: `SELECT 1;`,
11
+ // skills miner — no schema change
12
+ 2: `SELECT 1;`,
13
+ // mistake memory — no schema change
14
+ // 1 → 0 drops the entire schema. We require `engram init` for that.
15
+ 1: `DROP TABLE IF EXISTS stats; DROP TABLE IF EXISTS edges; DROP TABLE IF EXISTS nodes;`
16
+ };
4
17
  var MIGRATIONS = {
5
18
  // v0.1.0: Initial schema
6
19
  1: `
@@ -55,7 +68,24 @@ CREATE TABLE IF NOT EXISTS engram_config (
55
68
  key TEXT PRIMARY KEY,
56
69
  value TEXT NOT NULL,
57
70
  updated_at INTEGER NOT NULL
58
- );`
71
+ );`,
72
+ // v2.0.0: Memory cache tables — query_cache + pattern_cache
73
+ 7: `
74
+ CREATE TABLE IF NOT EXISTS query_cache (
75
+ key TEXT PRIMARY KEY,
76
+ result TEXT NOT NULL,
77
+ file_path TEXT NOT NULL,
78
+ file_mtime REAL NOT NULL,
79
+ created_at INTEGER NOT NULL,
80
+ hit_count INTEGER NOT NULL DEFAULT 0
81
+ );
82
+ CREATE TABLE IF NOT EXISTS pattern_cache (
83
+ pattern TEXT PRIMARY KEY,
84
+ result TEXT NOT NULL,
85
+ graph_version INTEGER NOT NULL,
86
+ hit_count INTEGER NOT NULL DEFAULT 0
87
+ );
88
+ CREATE INDEX IF NOT EXISTS idx_query_cache_file ON query_cache(file_path);`
59
89
  };
60
90
  function getSchemaVersion(db) {
61
91
  try {
@@ -96,9 +126,61 @@ function runMigrations(db, dbPath) {
96
126
  db.run(`INSERT INTO schema_version (version) VALUES (?)`, [CURRENT_SCHEMA_VERSION]);
97
127
  return { fromVersion, toVersion: CURRENT_SCHEMA_VERSION, migrationsRun, backedUp };
98
128
  }
129
+ function rollback(db, dbPath, targetVersion) {
130
+ const fromVersion = getSchemaVersion(db);
131
+ if (targetVersion < 0 || targetVersion > CURRENT_SCHEMA_VERSION) {
132
+ throw new Error(
133
+ `Invalid target version ${targetVersion}. Must be 0..${CURRENT_SCHEMA_VERSION}.`
134
+ );
135
+ }
136
+ if (targetVersion > fromVersion) {
137
+ throw new Error(
138
+ `Cannot roll back to v${targetVersion}: current is v${fromVersion}. Use 'engram db migrate' to move forward.`
139
+ );
140
+ }
141
+ if (targetVersion === fromVersion) {
142
+ return {
143
+ fromVersion,
144
+ toVersion: fromVersion,
145
+ migrationsReverted: 0,
146
+ backedUp: false
147
+ };
148
+ }
149
+ let backedUp = false;
150
+ if (existsSync(dbPath)) {
151
+ const backupPath = `${dbPath}.bak-v${fromVersion}`;
152
+ try {
153
+ copyFileSync(dbPath, backupPath);
154
+ backedUp = true;
155
+ } catch {
156
+ }
157
+ }
158
+ const dbExec = db;
159
+ let migrationsReverted = 0;
160
+ for (let v = fromVersion; v > targetVersion; v--) {
161
+ const sql = DOWN_MIGRATIONS[v];
162
+ if (sql) {
163
+ dbExec.exec(sql);
164
+ migrationsReverted++;
165
+ }
166
+ }
167
+ if (targetVersion === 0) {
168
+ dbExec.exec(`DROP TABLE IF EXISTS schema_version`);
169
+ } else {
170
+ dbExec.exec(`DELETE FROM schema_version`);
171
+ db.run(`INSERT INTO schema_version (version) VALUES (?)`, [targetVersion]);
172
+ }
173
+ return {
174
+ fromVersion,
175
+ toVersion: targetVersion,
176
+ migrationsReverted,
177
+ backedUp
178
+ };
179
+ }
99
180
 
100
181
  export {
101
182
  CURRENT_SCHEMA_VERSION,
102
183
  getSchemaVersion,
103
- runMigrations
184
+ runMigrations,
185
+ rollback
104
186
  };