@polka-codes/cli-shared 0.10.1 → 0.10.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config.d.ts +13 -0
- package/dist/config.js +202 -0
- package/dist/config.js.map +1 -0
- package/dist/config.parameters.test.d.ts +1 -0
- package/dist/config.parameters.test.js +240 -0
- package/dist/config.parameters.test.js.map +1 -0
- package/dist/config.rules.test.d.ts +1 -0
- package/dist/config.rules.test.js +92 -0
- package/dist/config.rules.test.js.map +1 -0
- package/dist/config.test.d.ts +1 -0
- package/dist/config.test.js +311 -0
- package/dist/config.test.js.map +1 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.js +694 -70817
- package/dist/index.js.map +1 -0
- package/dist/memory-manager.d.ts +52 -0
- package/dist/memory-manager.js +76 -0
- package/dist/memory-manager.js.map +1 -0
- package/dist/project-scope.d.ts +10 -0
- package/dist/project-scope.js +67 -0
- package/dist/project-scope.js.map +1 -0
- package/dist/provider.d.ts +32 -0
- package/dist/provider.js +366 -0
- package/dist/provider.js.map +1 -0
- package/dist/provider.test.d.ts +1 -0
- package/dist/provider.test.js +21 -0
- package/dist/provider.test.js.map +1 -0
- package/dist/sqlite-memory-store.d.ts +112 -0
- package/dist/sqlite-memory-store.js +919 -0
- package/dist/sqlite-memory-store.js.map +1 -0
- package/dist/sqlite-memory-store.test.d.ts +1 -0
- package/dist/sqlite-memory-store.test.js +661 -0
- package/dist/sqlite-memory-store.test.js.map +1 -0
- package/dist/utils/__tests__/parameterSimplifier.test.d.ts +1 -0
- package/dist/utils/__tests__/parameterSimplifier.test.js +137 -0
- package/dist/utils/__tests__/parameterSimplifier.test.js.map +1 -0
- package/dist/utils/checkRipgrep.d.ts +5 -0
- package/dist/utils/checkRipgrep.js +22 -0
- package/dist/utils/checkRipgrep.js.map +1 -0
- package/dist/utils/eventHandler.d.ts +11 -0
- package/dist/utils/eventHandler.js +196 -0
- package/dist/utils/eventHandler.js.map +1 -0
- package/dist/utils/eventHandler.test.d.ts +1 -0
- package/dist/utils/eventHandler.test.js +31 -0
- package/dist/utils/eventHandler.test.js.map +1 -0
- package/dist/utils/index.d.ts +6 -0
- package/dist/utils/index.js +7 -0
- package/dist/utils/index.js.map +1 -0
- package/dist/utils/listFiles.d.ts +12 -0
- package/dist/utils/listFiles.js +136 -0
- package/dist/utils/listFiles.js.map +1 -0
- package/dist/utils/listFiles.test.d.ts +1 -0
- package/dist/utils/listFiles.test.js +64 -0
- package/dist/utils/listFiles.test.js.map +1 -0
- package/dist/utils/parameterSimplifier.d.ts +1 -0
- package/dist/utils/parameterSimplifier.js +65 -0
- package/dist/utils/parameterSimplifier.js.map +1 -0
- package/dist/utils/readMultiline.d.ts +1 -0
- package/dist/utils/readMultiline.js +19 -0
- package/dist/utils/readMultiline.js.map +1 -0
- package/dist/utils/search.constants.d.ts +7 -0
- package/dist/utils/search.constants.js +8 -0
- package/dist/utils/search.constants.js.map +1 -0
- package/dist/utils/searchFiles.d.ts +12 -0
- package/dist/utils/searchFiles.js +72 -0
- package/dist/utils/searchFiles.js.map +1 -0
- package/dist/utils/searchFiles.test.d.ts +1 -0
- package/dist/utils/searchFiles.test.js +140 -0
- package/dist/utils/searchFiles.test.js.map +1 -0
- package/package.json +2 -2
|
@@ -0,0 +1,919 @@
|
|
|
1
|
+
import { AsyncLocalStorage } from 'node:async_hooks';
|
|
2
|
+
import { randomUUID } from 'node:crypto';
|
|
3
|
+
import { existsSync } from 'node:fs';
|
|
4
|
+
import { mkdir, readdir, readFile, rename, unlink, writeFile } from 'node:fs/promises';
|
|
5
|
+
import { basename, dirname, resolve } from 'node:path';
|
|
6
|
+
import { fileURLToPath } from 'node:url';
|
|
7
|
+
import { DEFAULT_MEMORY_CONFIG, resolveHomePath } from '@polka-codes/core';
|
|
8
|
+
import initSqlJs from 'sql.js';
|
|
9
|
+
/**
|
|
10
|
+
* Simple file lock for cross-process synchronization
|
|
11
|
+
* Uses a lockfile with PID and timestamp to prevent concurrent writes
|
|
12
|
+
*/
|
|
13
|
+
class FileLock {
|
|
14
|
+
lockfilePath;
|
|
15
|
+
static LOCK_TIMEOUT = 30000; // 30 seconds max lock time
|
|
16
|
+
static CLEANUP_AGE = 600000; // 10 minutes - cleanup old lock files
|
|
17
|
+
static lastCleanupTime = 0;
|
|
18
|
+
static CLEANUP_THROTTLE = 60000; // Throttle cleanup to once per minute
|
|
19
|
+
constructor(dbPath) {
|
|
20
|
+
this.lockfilePath = `${dbPath}.lock`;
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Reset the cleanup throttle. For testing purposes only.
|
|
24
|
+
*/
|
|
25
|
+
static resetCleanupThrottle() {
|
|
26
|
+
FileLock.lastCleanupTime = 0;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Clean up old lock files (.released.*, .stale.*, .invalid.*, .corrupt.*)
|
|
30
|
+
* Only removes files older than CLEANUP_AGE (10 minutes by default)
|
|
31
|
+
* This method is safe to call multiple times concurrently
|
|
32
|
+
* Cleanup is throttled to run at most once per minute to avoid performance impact
|
|
33
|
+
* @param force - Skip throttling and force cleanup (for testing)
|
|
34
|
+
*/
|
|
35
|
+
static async cleanupOldLockFiles(dbPath, maxAge = FileLock.CLEANUP_AGE, force = false) {
|
|
36
|
+
// Throttle cleanup to avoid performance impact on high-throughput scenarios
|
|
37
|
+
const now = Date.now();
|
|
38
|
+
if (!force && now - FileLock.lastCleanupTime < FileLock.CLEANUP_THROTTLE) {
|
|
39
|
+
return;
|
|
40
|
+
}
|
|
41
|
+
FileLock.lastCleanupTime = now;
|
|
42
|
+
try {
|
|
43
|
+
const lockDir = dirname(dbPath);
|
|
44
|
+
const dbBaseName = basename(dbPath);
|
|
45
|
+
const files = await readdir(lockDir);
|
|
46
|
+
const now = Date.now();
|
|
47
|
+
for (const file of files) {
|
|
48
|
+
// Match lock file patterns: {dbBaseName}.lock.released.*, etc.
|
|
49
|
+
// Use strict prefix check to avoid matching db.sqlite-wal when looking for db.sqlite.lock.*
|
|
50
|
+
if (!file.startsWith(`${dbBaseName}.lock.`)) {
|
|
51
|
+
continue;
|
|
52
|
+
}
|
|
53
|
+
const match = file.match(/\.lock\.(released|stale|invalid|corrupt)\.(\d+)$/);
|
|
54
|
+
if (!match) {
|
|
55
|
+
continue;
|
|
56
|
+
}
|
|
57
|
+
const filePath = resolve(lockDir, file);
|
|
58
|
+
const timestamp = Number.parseInt(match[2], 10);
|
|
59
|
+
const age = now - timestamp;
|
|
60
|
+
// Only remove files older than maxAge
|
|
61
|
+
if (age > maxAge) {
|
|
62
|
+
try {
|
|
63
|
+
await unlink(filePath);
|
|
64
|
+
}
|
|
65
|
+
catch (error) {
|
|
66
|
+
// Ignore errors - file might have been deleted by another process
|
|
67
|
+
const errorCode = error?.code;
|
|
68
|
+
if (errorCode !== 'ENOENT') {
|
|
69
|
+
console.warn(`[FileLock] Failed to delete old lock file ${file}: ${error instanceof Error ? error.message : String(error)}`);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
catch (error) {
|
|
76
|
+
// Silently ignore cleanup errors - this is a maintenance task, not critical
|
|
77
|
+
console.debug(`[FileLock] Cleanup encountered an error: ${error instanceof Error ? error.message : String(error)}`);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Try to acquire lock with retries
|
|
82
|
+
* @throws Error if lock cannot be acquired after retries
|
|
83
|
+
*/
|
|
84
|
+
async acquire(retries = 10, delay = 100) {
|
|
85
|
+
for (let i = 0; i < retries; i++) {
|
|
86
|
+
try {
|
|
87
|
+
// Try to create lockfile exclusively (fails if exists)
|
|
88
|
+
const lockData = JSON.stringify({
|
|
89
|
+
pid: process.pid,
|
|
90
|
+
acquiredAt: Date.now(),
|
|
91
|
+
});
|
|
92
|
+
await writeFile(this.lockfilePath, lockData, {
|
|
93
|
+
flag: 'wx', // Exclusive create - fails if file exists
|
|
94
|
+
mode: 0o600,
|
|
95
|
+
});
|
|
96
|
+
return; // Lock acquired successfully
|
|
97
|
+
}
|
|
98
|
+
catch (error) {
|
|
99
|
+
const errorCode = error?.code;
|
|
100
|
+
if (errorCode === 'EEXIST') {
|
|
101
|
+
// Lock file exists, check if it's stale
|
|
102
|
+
try {
|
|
103
|
+
const lockContent = await readFile(this.lockfilePath, 'utf-8');
|
|
104
|
+
const lockData = JSON.parse(lockContent);
|
|
105
|
+
// Validate lock data structure
|
|
106
|
+
if (!lockData || typeof lockData.acquiredAt !== 'number' || lockData.acquiredAt <= 0) {
|
|
107
|
+
console.warn(`[FileLock] Lock file has invalid acquiredAt, treating as stale`);
|
|
108
|
+
await rename(this.lockfilePath, `${this.lockfilePath}.invalid.${Date.now()}`);
|
|
109
|
+
continue; // Retry acquisition
|
|
110
|
+
}
|
|
111
|
+
const lockAge = Date.now() - lockData.acquiredAt;
|
|
112
|
+
// If lock is older than timeout, assume stale and break it
|
|
113
|
+
if (lockAge > FileLock.LOCK_TIMEOUT) {
|
|
114
|
+
console.warn(`[FileLock] Breaking stale lock (age: ${lockAge}ms)`);
|
|
115
|
+
await rename(this.lockfilePath, `${this.lockfilePath}.stale.${Date.now()}`);
|
|
116
|
+
continue; // Retry acquisition
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
catch (readError) {
|
|
120
|
+
// If JSON parse fails, lock file is corrupted - treat as stale
|
|
121
|
+
if (readError instanceof SyntaxError) {
|
|
122
|
+
console.warn(`[FileLock] Lock file contains invalid JSON, treating as stale`);
|
|
123
|
+
await rename(this.lockfilePath, `${this.lockfilePath}.corrupt.${Date.now()}`);
|
|
124
|
+
continue; // Retry acquisition
|
|
125
|
+
}
|
|
126
|
+
// Other read errors - assume lock is valid
|
|
127
|
+
}
|
|
128
|
+
// Lock is held by another process, wait and retry
|
|
129
|
+
if (i < retries - 1) {
|
|
130
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
131
|
+
}
|
|
132
|
+
else {
|
|
133
|
+
throw new Error(`Cannot acquire lock after ${retries} retries (file: ${this.lockfilePath})`);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
// Other error (permissions, etc.)
|
|
138
|
+
throw error;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* Release the lock by removing the lockfile
|
|
145
|
+
*/
|
|
146
|
+
async release() {
|
|
147
|
+
try {
|
|
148
|
+
await rename(this.lockfilePath, `${this.lockfilePath}.released.${Date.now()}`);
|
|
149
|
+
// Trigger cleanup in the background after releasing lock
|
|
150
|
+
// This is fire-and-forget - we don't await the result
|
|
151
|
+
// Remove .lock suffix (5 chars) safely using slice
|
|
152
|
+
const dbPath = this.lockfilePath.slice(0, -5);
|
|
153
|
+
FileLock.cleanupOldLockFiles(dbPath).catch(() => {
|
|
154
|
+
// Ignore errors
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
catch (error) {
|
|
158
|
+
const errorCode = error.code;
|
|
159
|
+
if (errorCode !== 'ENOENT') {
|
|
160
|
+
// Log but don't throw - lock might have been cleaned up by another process
|
|
161
|
+
console.warn(`[FileLock] Error releasing lock: ${error instanceof Error ? error.message : String(error)}`);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Reentrant Mutex for serializing async operations
|
|
168
|
+
* Allows the same owner to acquire the lock multiple times (reentrant)
|
|
169
|
+
*/
|
|
170
|
+
class ReentrantMutex {
|
|
171
|
+
queue = [];
|
|
172
|
+
locked = false;
|
|
173
|
+
lockCount = 0;
|
|
174
|
+
owner = null;
|
|
175
|
+
async acquire(owner) {
|
|
176
|
+
// If already locked by this owner, increment count and return immediately
|
|
177
|
+
if (this.locked && this.owner === owner) {
|
|
178
|
+
this.lockCount++;
|
|
179
|
+
return () => this.release(owner);
|
|
180
|
+
}
|
|
181
|
+
// Wait for lock to be released
|
|
182
|
+
while (this.locked) {
|
|
183
|
+
await new Promise((resolve) => this.queue.push(resolve));
|
|
184
|
+
}
|
|
185
|
+
// Acquire the lock
|
|
186
|
+
this.locked = true;
|
|
187
|
+
this.owner = owner;
|
|
188
|
+
this.lockCount = 1;
|
|
189
|
+
return () => this.release(owner);
|
|
190
|
+
}
|
|
191
|
+
release(owner) {
|
|
192
|
+
// Only the owner can release
|
|
193
|
+
if (this.owner !== owner) {
|
|
194
|
+
return;
|
|
195
|
+
}
|
|
196
|
+
this.lockCount--;
|
|
197
|
+
if (this.lockCount === 0) {
|
|
198
|
+
// Release the lock
|
|
199
|
+
this.locked = false;
|
|
200
|
+
this.owner = null;
|
|
201
|
+
// Notify next waiter
|
|
202
|
+
const next = this.queue.shift();
|
|
203
|
+
if (next) {
|
|
204
|
+
next();
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
let SqlJs = null;
|
|
210
|
+
let SqlJsInitPromise = null;
|
|
211
|
+
/**
|
|
212
|
+
* Initialize sql.js WASM module (singleton)
|
|
213
|
+
*
|
|
214
|
+
* The WASM file location is resolved by trying multiple paths in order:
|
|
215
|
+
* 1. Same directory as this module (when CLI is installed: dist/sql-wasm.wasm)
|
|
216
|
+
* 2. Parent directory's dist (when running from monorepo source)
|
|
217
|
+
* 3. node_modules/sql.js/dist/sql-wasm.wasm (development mode)
|
|
218
|
+
* 4. Absolute path from cwd (fallback)
|
|
219
|
+
*/
|
|
220
|
+
async function getSqlJs() {
|
|
221
|
+
if (SqlJs) {
|
|
222
|
+
return SqlJs;
|
|
223
|
+
}
|
|
224
|
+
if (SqlJsInitPromise) {
|
|
225
|
+
return SqlJsInitPromise;
|
|
226
|
+
}
|
|
227
|
+
// Build a list of candidate paths to try
|
|
228
|
+
const moduleDir = dirname(fileURLToPath(import.meta.url));
|
|
229
|
+
const candidates = [
|
|
230
|
+
// Same directory as the bundled module (published CLI: dist/sql-wasm.wasm)
|
|
231
|
+
resolve(moduleDir, 'sql-wasm.wasm'),
|
|
232
|
+
// Development: CLI's dist directory from source
|
|
233
|
+
resolve(moduleDir, '..', 'dist', 'sql-wasm.wasm'),
|
|
234
|
+
// Development: sql.js in local node_modules
|
|
235
|
+
resolve(moduleDir, '..', 'node_modules', 'sql.js', 'dist', 'sql-wasm.wasm'),
|
|
236
|
+
// Workspace monorepo: node_modules at root
|
|
237
|
+
resolve(moduleDir, '..', '..', '..', 'node_modules', 'sql.js', 'dist', 'sql-wasm.wasm'),
|
|
238
|
+
// Absolute path from cwd (for when CLI is run from project root)
|
|
239
|
+
resolve(process.cwd(), 'node_modules', 'sql.js', 'dist', 'sql-wasm.wasm'),
|
|
240
|
+
];
|
|
241
|
+
SqlJsInitPromise = initSqlJs({
|
|
242
|
+
locateFile: (file) => {
|
|
243
|
+
// Try each candidate path
|
|
244
|
+
for (const candidate of candidates) {
|
|
245
|
+
if (existsSync(candidate)) {
|
|
246
|
+
return candidate;
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
// Fall back to default behavior (node_modules resolution)
|
|
250
|
+
return file;
|
|
251
|
+
},
|
|
252
|
+
});
|
|
253
|
+
SqlJs = await SqlJsInitPromise;
|
|
254
|
+
return SqlJs;
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* SQLite Memory Store Implementation
|
|
258
|
+
*
|
|
259
|
+
* This is a concrete implementation of IMemoryStore using SQLite as the backend.
|
|
260
|
+
* It can be used in CLI environments where local file-based storage is appropriate.
|
|
261
|
+
*
|
|
262
|
+
* Uses sql.js (WebAssembly port of SQLite) for consistent behavior across all runtimes.
|
|
263
|
+
*/
|
|
264
|
+
// AsyncLocalStorage for tracking transaction owner across reentrant calls
|
|
265
|
+
const transactionOwnerStorage = new AsyncLocalStorage();
|
|
266
|
+
export class SQLiteMemoryStore {
|
|
267
|
+
db = null;
|
|
268
|
+
dbPromise = null;
|
|
269
|
+
config;
|
|
270
|
+
currentScope;
|
|
271
|
+
inTransaction = false; // Track if we're in a transaction
|
|
272
|
+
transactionMutex = new ReentrantMutex(); // Serialize transactions
|
|
273
|
+
fileLock; // Cross-process file lock (initialized in getFileLock())
|
|
274
|
+
/**
|
|
275
|
+
* Reset the lock file cleanup throttle. For testing purposes only.
|
|
276
|
+
*/
|
|
277
|
+
static resetCleanupThrottle() {
|
|
278
|
+
FileLock.resetCleanupThrottle();
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Get the configured database path, or default if not set
|
|
282
|
+
*/
|
|
283
|
+
getDbPath() {
|
|
284
|
+
return this.config.path || DEFAULT_MEMORY_CONFIG.path;
|
|
285
|
+
}
|
|
286
|
+
/**
|
|
287
|
+
* Get lockfile instance for database path
|
|
288
|
+
*/
|
|
289
|
+
getFileLock() {
|
|
290
|
+
if (!this.fileLock) {
|
|
291
|
+
const dbPath = this.resolvePath(this.getDbPath());
|
|
292
|
+
this.fileLock = new FileLock(dbPath);
|
|
293
|
+
}
|
|
294
|
+
return this.fileLock;
|
|
295
|
+
}
|
|
296
|
+
// Whitelists for validation
|
|
297
|
+
static SORT_COLUMNS = {
|
|
298
|
+
created: 'created_at',
|
|
299
|
+
updated: 'updated_at',
|
|
300
|
+
accessed: 'last_accessed',
|
|
301
|
+
name: 'name',
|
|
302
|
+
};
|
|
303
|
+
static ALLOWED_SORT_ORDERS = ['asc', 'desc'];
|
|
304
|
+
static ALLOWED_PRIORITIES = ['low', 'medium', 'high', 'critical'];
|
|
305
|
+
constructor(config, scope) {
|
|
306
|
+
this.config = config;
|
|
307
|
+
this.currentScope = scope;
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Initialize database connection and schema
|
|
311
|
+
*/
|
|
312
|
+
async initializeDatabase() {
|
|
313
|
+
// Use promise singleton pattern to prevent race conditions
|
|
314
|
+
if (this.dbPromise) {
|
|
315
|
+
return this.dbPromise;
|
|
316
|
+
}
|
|
317
|
+
this.dbPromise = (async () => {
|
|
318
|
+
if (this.db) {
|
|
319
|
+
return this.db;
|
|
320
|
+
}
|
|
321
|
+
const dbPath = this.resolvePath(this.getDbPath());
|
|
322
|
+
try {
|
|
323
|
+
// Create directory if needed
|
|
324
|
+
const dir = dirname(dbPath);
|
|
325
|
+
if (!existsSync(dir)) {
|
|
326
|
+
await mkdir(dir, { recursive: true, mode: 0o700 });
|
|
327
|
+
}
|
|
328
|
+
// Trigger cleanup of old lock files in the background
|
|
329
|
+
// This is fire-and-forget - we don't await the result
|
|
330
|
+
FileLock.cleanupOldLockFiles(dbPath).catch(() => {
|
|
331
|
+
// Ignore errors
|
|
332
|
+
});
|
|
333
|
+
// Load existing database data or create new one
|
|
334
|
+
let dbData;
|
|
335
|
+
if (existsSync(dbPath)) {
|
|
336
|
+
// Acquire lock before reading to prevent concurrent read/write issues
|
|
337
|
+
const lock = this.getFileLock();
|
|
338
|
+
await lock.acquire();
|
|
339
|
+
try {
|
|
340
|
+
try {
|
|
341
|
+
dbData = await readFile(dbPath);
|
|
342
|
+
// Validate SQLite header (first 16 bytes should be "SQLite format 3\0")
|
|
343
|
+
if (dbData.length >= 16) {
|
|
344
|
+
const header = String.fromCharCode(...dbData.subarray(0, 15));
|
|
345
|
+
if (header !== 'SQLite format 3') {
|
|
346
|
+
console.warn('[SQLiteMemoryStore] Invalid SQLite database header, will recreate');
|
|
347
|
+
dbData = undefined;
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
catch (error) {
|
|
352
|
+
// Only ignore ENOENT (file not found) errors - for all other errors, rethrow
|
|
353
|
+
// to prevent data loss from overwriting an existing unreadable database
|
|
354
|
+
const errorCode = error?.code;
|
|
355
|
+
if (errorCode === 'ENOENT') {
|
|
356
|
+
// File was deleted between existsSync and readFile - treat as new database
|
|
357
|
+
dbData = undefined;
|
|
358
|
+
}
|
|
359
|
+
else {
|
|
360
|
+
throw new Error(`Failed to read database file at ${dbPath}: ${error instanceof Error ? error.message : String(error)}`);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
finally {
|
|
365
|
+
await lock.release();
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
// Initialize sql.js and create database
|
|
369
|
+
const SqlJs = await getSqlJs();
|
|
370
|
+
const db = new SqlJs.Database(dbData);
|
|
371
|
+
// Configure pragmas
|
|
372
|
+
this.configurePragmas(db);
|
|
373
|
+
// Check integrity and initialize schema
|
|
374
|
+
this.checkIntegrity(db);
|
|
375
|
+
this.initializeSchema(db);
|
|
376
|
+
this.db = db;
|
|
377
|
+
return db;
|
|
378
|
+
}
|
|
379
|
+
catch (error) {
|
|
380
|
+
console.error('[SQLiteMemoryStore] Initialization failed:', error);
|
|
381
|
+
// Recovery: backup corrupted database
|
|
382
|
+
if (existsSync(dbPath)) {
|
|
383
|
+
const backupPath = `${dbPath}.corrupted.${Date.now()}`;
|
|
384
|
+
console.warn(`[SQLiteMemoryStore] Backing up corrupted database to: ${backupPath}`);
|
|
385
|
+
try {
|
|
386
|
+
await rename(dbPath, backupPath);
|
|
387
|
+
}
|
|
388
|
+
catch (backupError) {
|
|
389
|
+
console.error('[SQLiteMemoryStore] Failed to backup corrupted database:', backupError);
|
|
390
|
+
this.dbPromise = null;
|
|
391
|
+
throw backupError;
|
|
392
|
+
}
|
|
393
|
+
// Clear promise and retry once
|
|
394
|
+
this.dbPromise = null;
|
|
395
|
+
return this.initializeDatabase();
|
|
396
|
+
}
|
|
397
|
+
this.dbPromise = null;
|
|
398
|
+
throw error;
|
|
399
|
+
}
|
|
400
|
+
})();
|
|
401
|
+
return this.dbPromise;
|
|
402
|
+
}
|
|
403
|
+
/**
|
|
404
|
+
* Persist database to disk using atomic write with file locking
|
|
405
|
+
*/
|
|
406
|
+
async saveDatabase() {
|
|
407
|
+
if (!this.db) {
|
|
408
|
+
return;
|
|
409
|
+
}
|
|
410
|
+
const lock = this.getFileLock();
|
|
411
|
+
await lock.acquire();
|
|
412
|
+
try {
|
|
413
|
+
const dbPath = this.resolvePath(this.getDbPath());
|
|
414
|
+
const tempPath = `${dbPath}.tmp`;
|
|
415
|
+
const data = this.db.export();
|
|
416
|
+
// Write to temporary file first, then atomically rename
|
|
417
|
+
// Use mode 0o600 to restrict file access to owner only (contains potentially sensitive data)
|
|
418
|
+
await writeFile(tempPath, data, { mode: 0o600 });
|
|
419
|
+
await rename(tempPath, dbPath);
|
|
420
|
+
}
|
|
421
|
+
finally {
|
|
422
|
+
await lock.release();
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
/**
|
|
426
|
+
* Configure database pragmas
|
|
427
|
+
*/
|
|
428
|
+
configurePragmas(db) {
|
|
429
|
+
db.run('PRAGMA synchronous = NORMAL');
|
|
430
|
+
db.run('PRAGMA busy_timeout = 5000');
|
|
431
|
+
db.run('PRAGMA foreign_keys = ON');
|
|
432
|
+
db.run('PRAGMA temp_store = MEMORY');
|
|
433
|
+
}
|
|
434
|
+
/**
|
|
435
|
+
* Check database integrity
|
|
436
|
+
*/
|
|
437
|
+
checkIntegrity(db) {
|
|
438
|
+
try {
|
|
439
|
+
// Ensure the database is accessible
|
|
440
|
+
const results = db.exec('SELECT 1');
|
|
441
|
+
if (results.length === 0) {
|
|
442
|
+
throw new Error('Database query returned no results');
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
catch (error) {
|
|
446
|
+
console.error('[SQLiteMemoryStore] Integrity check failed:', error);
|
|
447
|
+
throw new Error('Database is corrupted');
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Initialize database schema
|
|
452
|
+
*/
|
|
453
|
+
initializeSchema(db) {
|
|
454
|
+
// Create memory entries table
|
|
455
|
+
db.run(`
|
|
456
|
+
CREATE TABLE IF NOT EXISTS memory_entries (
|
|
457
|
+
id TEXT PRIMARY KEY,
|
|
458
|
+
name TEXT NOT NULL CHECK(length(name) > 0),
|
|
459
|
+
scope TEXT NOT NULL CHECK(scope IN ('global') OR scope LIKE 'project:%'),
|
|
460
|
+
content TEXT NOT NULL CHECK(length(content) > 0),
|
|
461
|
+
entry_type TEXT NOT NULL CHECK(length(entry_type) > 0),
|
|
462
|
+
status TEXT CHECK(status IS NULL OR length(status) > 0),
|
|
463
|
+
priority TEXT CHECK(priority IS NULL OR priority IN ('low', 'medium', 'high', 'critical')),
|
|
464
|
+
tags TEXT CHECK(tags IS NULL OR length(tags) > 0),
|
|
465
|
+
metadata TEXT CHECK(metadata IS NULL OR json_valid(metadata)),
|
|
466
|
+
created_at INTEGER NOT NULL CHECK(created_at > 0),
|
|
467
|
+
updated_at INTEGER NOT NULL CHECK(updated_at > 0),
|
|
468
|
+
last_accessed INTEGER NOT NULL CHECK(last_accessed > 0),
|
|
469
|
+
UNIQUE(name, scope)
|
|
470
|
+
)
|
|
471
|
+
`);
|
|
472
|
+
// Create optimized indexes
|
|
473
|
+
db.run('CREATE INDEX IF NOT EXISTS idx_memory_entries_scope_type ON memory_entries(scope, entry_type)');
|
|
474
|
+
db.run('CREATE INDEX IF NOT EXISTS idx_memory_entries_updated ON memory_entries(updated_at)');
|
|
475
|
+
}
|
|
476
|
+
/**
|
|
477
|
+
* Get database instance
|
|
478
|
+
*/
|
|
479
|
+
async getDatabase() {
|
|
480
|
+
if (!this.db) {
|
|
481
|
+
this.db = await this.initializeDatabase();
|
|
482
|
+
}
|
|
483
|
+
return this.db;
|
|
484
|
+
}
|
|
485
|
+
/**
|
|
486
|
+
* Resolve home directory in path using shared utility
|
|
487
|
+
*/
|
|
488
|
+
resolvePath(path) {
|
|
489
|
+
const resolved = resolveHomePath(path);
|
|
490
|
+
return resolve(resolved);
|
|
491
|
+
}
|
|
492
|
+
/**
|
|
493
|
+
* Generate UUID v4
|
|
494
|
+
*/
|
|
495
|
+
generateUUID() {
|
|
496
|
+
return randomUUID();
|
|
497
|
+
}
|
|
498
|
+
/**
|
|
499
|
+
* Get current timestamp in milliseconds
|
|
500
|
+
*/
|
|
501
|
+
now() {
|
|
502
|
+
return Date.now();
|
|
503
|
+
}
|
|
504
|
+
/**
|
|
505
|
+
* Execute transaction
|
|
506
|
+
* Uses Mutex to serialize concurrent transaction calls for safety
|
|
507
|
+
* Supports reentrancy (nested transactions from the same call chain)
|
|
508
|
+
*/
|
|
509
|
+
async transaction(callback) {
|
|
510
|
+
// Get or create owner symbol for this transaction context
|
|
511
|
+
let owner = transactionOwnerStorage.getStore();
|
|
512
|
+
if (!owner) {
|
|
513
|
+
owner = Symbol('transaction');
|
|
514
|
+
}
|
|
515
|
+
// Always acquire the mutex to serialize concurrent calls
|
|
516
|
+
const release = await this.transactionMutex.acquire(owner);
|
|
517
|
+
// Run callback in AsyncLocalStorage context to enable reentrancy
|
|
518
|
+
return transactionOwnerStorage.run(owner, async () => {
|
|
519
|
+
try {
|
|
520
|
+
const db = await this.getDatabase();
|
|
521
|
+
// sql.js is synchronous, so we use explicit transaction control
|
|
522
|
+
const shouldBegin = !this.inTransaction;
|
|
523
|
+
try {
|
|
524
|
+
if (shouldBegin) {
|
|
525
|
+
db.run('BEGIN TRANSACTION');
|
|
526
|
+
this.inTransaction = true;
|
|
527
|
+
}
|
|
528
|
+
const result = await callback();
|
|
529
|
+
if (shouldBegin) {
|
|
530
|
+
db.run('COMMIT');
|
|
531
|
+
this.inTransaction = false;
|
|
532
|
+
// Save after successful transaction
|
|
533
|
+
try {
|
|
534
|
+
await this.saveDatabase();
|
|
535
|
+
}
|
|
536
|
+
catch (saveError) {
|
|
537
|
+
// If save fails after commit, close the database to prevent state divergence
|
|
538
|
+
// The in-memory db has committed data but disk is stale
|
|
539
|
+
// Close forces re-initialization which will load from disk on next operation
|
|
540
|
+
console.error('[SQLiteMemoryStore] Failed to save database after commit, closing:', saveError);
|
|
541
|
+
await this.close(true); // Skip save since it just failed
|
|
542
|
+
throw saveError;
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
return result;
|
|
546
|
+
}
|
|
547
|
+
catch (error) {
|
|
548
|
+
// Only rollback if we're still in a transaction (saveDatabase could have failed after COMMIT)
|
|
549
|
+
if (this.inTransaction) {
|
|
550
|
+
try {
|
|
551
|
+
db.run('ROLLBACK');
|
|
552
|
+
}
|
|
553
|
+
catch (rollbackError) {
|
|
554
|
+
// Log but don't mask the original error
|
|
555
|
+
console.error('[SQLiteMemoryStore] ROLLBACK failed:', rollbackError);
|
|
556
|
+
}
|
|
557
|
+
this.inTransaction = false;
|
|
558
|
+
}
|
|
559
|
+
throw error;
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
finally {
|
|
563
|
+
// Always release the mutex lock
|
|
564
|
+
release();
|
|
565
|
+
}
|
|
566
|
+
});
|
|
567
|
+
}
|
|
568
|
+
/**
|
|
569
|
+
* Read memory by topic
|
|
570
|
+
* Note: Does NOT update last_accessed timestamp to avoid expensive disk writes on every read.
|
|
571
|
+
* The timestamp is updated when memory is modified through updateMemory operations.
|
|
572
|
+
*/
|
|
573
|
+
async readMemory(topic) {
|
|
574
|
+
const db = await this.getDatabase();
|
|
575
|
+
const scope = this.currentScope;
|
|
576
|
+
const stmt = db.prepare('SELECT content FROM memory_entries WHERE name = ? AND scope = ?');
|
|
577
|
+
stmt.bind([topic, scope]);
|
|
578
|
+
// Need to call step() to execute the query
|
|
579
|
+
if (stmt.step()) {
|
|
580
|
+
const row = stmt.getAsObject();
|
|
581
|
+
const content = row.content;
|
|
582
|
+
stmt.free();
|
|
583
|
+
return content;
|
|
584
|
+
}
|
|
585
|
+
stmt.free();
|
|
586
|
+
return undefined;
|
|
587
|
+
}
|
|
588
|
+
/**
|
|
589
|
+
* Internal update memory without transaction (used by batchUpdateMemory)
|
|
590
|
+
*/
|
|
591
|
+
async updateMemoryInternal(db, operation, topic, content, metadata) {
|
|
592
|
+
const scope = this.currentScope;
|
|
593
|
+
const now = this.now();
|
|
594
|
+
// Use provided timestamps or default to now
|
|
595
|
+
const createdAt = metadata?.created_at ?? now;
|
|
596
|
+
const updatedAt = metadata?.updated_at ?? now;
|
|
597
|
+
const lastAccessed = metadata?.last_accessed ?? now;
|
|
598
|
+
if (operation === 'remove') {
|
|
599
|
+
const stmt = db.prepare('DELETE FROM memory_entries WHERE name = ? AND scope = ?');
|
|
600
|
+
stmt.run([topic, scope]);
|
|
601
|
+
stmt.free();
|
|
602
|
+
return;
|
|
603
|
+
}
|
|
604
|
+
const stmt = db.prepare('SELECT content, entry_type, status, priority, tags FROM memory_entries WHERE name = ? AND scope = ?');
|
|
605
|
+
stmt.bind([topic, scope]);
|
|
606
|
+
let existing;
|
|
607
|
+
if (stmt.step()) {
|
|
608
|
+
const row = stmt.getAsObject();
|
|
609
|
+
existing = {
|
|
610
|
+
content: row.content,
|
|
611
|
+
entry_type: row.entry_type,
|
|
612
|
+
status: row.status,
|
|
613
|
+
priority: row.priority,
|
|
614
|
+
tags: row.tags,
|
|
615
|
+
};
|
|
616
|
+
stmt.free();
|
|
617
|
+
}
|
|
618
|
+
else {
|
|
619
|
+
existing = undefined;
|
|
620
|
+
stmt.free();
|
|
621
|
+
}
|
|
622
|
+
let finalContent;
|
|
623
|
+
let entry_type;
|
|
624
|
+
let status;
|
|
625
|
+
let priority;
|
|
626
|
+
let tags;
|
|
627
|
+
if (existing) {
|
|
628
|
+
if (operation === 'append') {
|
|
629
|
+
if (!content) {
|
|
630
|
+
throw new Error('Content is required for append operation.');
|
|
631
|
+
}
|
|
632
|
+
finalContent = `${existing.content}\n${content}`;
|
|
633
|
+
}
|
|
634
|
+
else {
|
|
635
|
+
if (!content) {
|
|
636
|
+
throw new Error('Content is required for replace operation.');
|
|
637
|
+
}
|
|
638
|
+
finalContent = content;
|
|
639
|
+
}
|
|
640
|
+
entry_type = metadata?.entry_type || existing.entry_type;
|
|
641
|
+
status = (metadata?.status || existing.status) ?? undefined;
|
|
642
|
+
priority = (metadata?.priority || existing.priority) ?? undefined;
|
|
643
|
+
tags = (metadata?.tags || existing.tags) ?? undefined;
|
|
644
|
+
}
|
|
645
|
+
else {
|
|
646
|
+
if (!content) {
|
|
647
|
+
throw new Error('Content is required for new memory entries.');
|
|
648
|
+
}
|
|
649
|
+
finalContent = content;
|
|
650
|
+
entry_type = metadata?.entry_type || 'note';
|
|
651
|
+
status = metadata?.status;
|
|
652
|
+
priority = metadata?.priority;
|
|
653
|
+
tags = metadata?.tags;
|
|
654
|
+
}
|
|
655
|
+
const upsertStmt = db.prepare(`
|
|
656
|
+
INSERT INTO memory_entries (id, name, scope, content, entry_type, status, priority, tags, created_at, updated_at, last_accessed)
|
|
657
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
658
|
+
ON CONFLICT(name, scope) DO UPDATE SET
|
|
659
|
+
content = excluded.content,
|
|
660
|
+
entry_type = excluded.entry_type,
|
|
661
|
+
status = excluded.status,
|
|
662
|
+
priority = excluded.priority,
|
|
663
|
+
tags = excluded.tags,
|
|
664
|
+
updated_at = excluded.updated_at,
|
|
665
|
+
last_accessed = excluded.last_accessed
|
|
666
|
+
`);
|
|
667
|
+
upsertStmt.run([
|
|
668
|
+
this.generateUUID(),
|
|
669
|
+
topic,
|
|
670
|
+
scope,
|
|
671
|
+
finalContent,
|
|
672
|
+
entry_type,
|
|
673
|
+
status ?? null,
|
|
674
|
+
priority ?? null,
|
|
675
|
+
tags ?? null,
|
|
676
|
+
createdAt,
|
|
677
|
+
updatedAt,
|
|
678
|
+
lastAccessed,
|
|
679
|
+
]);
|
|
680
|
+
upsertStmt.free();
|
|
681
|
+
}
|
|
682
|
+
/**
|
|
683
|
+
* Update memory
|
|
684
|
+
*/
|
|
685
|
+
async updateMemory(operation, topic, content, metadata) {
|
|
686
|
+
return this.transaction(async () => {
|
|
687
|
+
const db = await this.getDatabase();
|
|
688
|
+
await this.updateMemoryInternal(db, operation, topic, content, metadata);
|
|
689
|
+
});
|
|
690
|
+
}
|
|
691
|
+
/**
|
|
692
|
+
* Query memory with filters
|
|
693
|
+
*/
|
|
694
|
+
async queryMemory(query = {}, options = {}) {
|
|
695
|
+
// For delete operations, wrap in transaction to ensure persistence
|
|
696
|
+
if (options.operation === 'delete') {
|
|
697
|
+
return this.transaction(async () => {
|
|
698
|
+
const db = await this.getDatabase();
|
|
699
|
+
const { sql, params } = this.buildQuery(query);
|
|
700
|
+
const deleteSql = `DELETE FROM memory_entries WHERE id IN (SELECT id FROM (${sql}))`;
|
|
701
|
+
const stmt = db.prepare(deleteSql);
|
|
702
|
+
stmt.bind(params);
|
|
703
|
+
stmt.step();
|
|
704
|
+
stmt.free();
|
|
705
|
+
return db.getRowsModified();
|
|
706
|
+
});
|
|
707
|
+
}
|
|
708
|
+
const db = await this.getDatabase();
|
|
709
|
+
const { sql, params } = this.buildQuery(query);
|
|
710
|
+
if (options.operation === 'count') {
|
|
711
|
+
const countSql = `SELECT COUNT(*) as count FROM (${sql})`;
|
|
712
|
+
const stmt = db.prepare(countSql);
|
|
713
|
+
stmt.bind(params);
|
|
714
|
+
let count = 0;
|
|
715
|
+
if (stmt.step()) {
|
|
716
|
+
const row = stmt.getAsObject();
|
|
717
|
+
count = row.count;
|
|
718
|
+
}
|
|
719
|
+
stmt.free();
|
|
720
|
+
return count;
|
|
721
|
+
}
|
|
722
|
+
const stmt = db.prepare(sql);
|
|
723
|
+
stmt.bind(params);
|
|
724
|
+
const entries = [];
|
|
725
|
+
while (stmt.step()) {
|
|
726
|
+
entries.push(stmt.getAsObject());
|
|
727
|
+
}
|
|
728
|
+
stmt.free();
|
|
729
|
+
return entries;
|
|
730
|
+
}
|
|
731
|
+
/**
|
|
732
|
+
* Build SQL query safely with parameterized statements
|
|
733
|
+
*/
|
|
734
|
+
buildQuery(query) {
|
|
735
|
+
const conditions = [];
|
|
736
|
+
const params = [];
|
|
737
|
+
let sql = 'SELECT * FROM memory_entries WHERE 1=1';
|
|
738
|
+
// Scope handling
|
|
739
|
+
const scope = query.scope === 'auto' ? this.currentScope : query.scope;
|
|
740
|
+
if (scope === 'global') {
|
|
741
|
+
conditions.push(`scope = ?`);
|
|
742
|
+
params.push('global');
|
|
743
|
+
}
|
|
744
|
+
else if (scope === 'project' || (!scope && this.currentScope !== 'global')) {
|
|
745
|
+
conditions.push(`scope = ?`);
|
|
746
|
+
params.push(this.currentScope);
|
|
747
|
+
}
|
|
748
|
+
// Name filter (exact match)
|
|
749
|
+
if (query.name) {
|
|
750
|
+
if (!query.name.trim()) {
|
|
751
|
+
throw new Error('Name cannot be empty');
|
|
752
|
+
}
|
|
753
|
+
conditions.push(`name = ?`);
|
|
754
|
+
params.push(query.name.trim());
|
|
755
|
+
}
|
|
756
|
+
// Type filter
|
|
757
|
+
if (query.type) {
|
|
758
|
+
if (!query.type.trim()) {
|
|
759
|
+
throw new Error('Type cannot be empty');
|
|
760
|
+
}
|
|
761
|
+
conditions.push(`entry_type = ?`);
|
|
762
|
+
params.push(query.type.trim());
|
|
763
|
+
}
|
|
764
|
+
// Status filter
|
|
765
|
+
if (query.status) {
|
|
766
|
+
conditions.push(`status = ?`);
|
|
767
|
+
params.push(query.status);
|
|
768
|
+
}
|
|
769
|
+
// Priority filter
|
|
770
|
+
if (query.priority) {
|
|
771
|
+
if (!SQLiteMemoryStore.ALLOWED_PRIORITIES.includes(query.priority)) {
|
|
772
|
+
throw new Error(`Invalid priority: ${query.priority}`);
|
|
773
|
+
}
|
|
774
|
+
conditions.push(`priority = ?`);
|
|
775
|
+
params.push(query.priority);
|
|
776
|
+
}
|
|
777
|
+
// Tags filter
|
|
778
|
+
if (query.tags) {
|
|
779
|
+
const tags = Array.isArray(query.tags) ? query.tags : [query.tags];
|
|
780
|
+
for (const tag of tags) {
|
|
781
|
+
const trimmed = tag.trim();
|
|
782
|
+
if (!trimmed) {
|
|
783
|
+
throw new Error('Tags cannot be empty');
|
|
784
|
+
}
|
|
785
|
+
// Escape special LIKE characters (\, _, %) for exact matching
|
|
786
|
+
const escaped = trimmed.replace(/[\\_%]/g, '\\$&');
|
|
787
|
+
// Use comma-wrapped matching for precise tag filtering with ESCAPE clause
|
|
788
|
+
// Matches: "tag", "tag,other", "other,tag", "other,tag,other"
|
|
789
|
+
conditions.push(`(tags = ? OR tags LIKE ? ESCAPE '\\' OR tags LIKE ? ESCAPE '\\' OR tags LIKE ? ESCAPE '\\')`);
|
|
790
|
+
params.push(trimmed, `${escaped},%`, `%,${escaped}`, `%,${escaped},%`);
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
// Search filter
|
|
794
|
+
if (query.search) {
|
|
795
|
+
const searchTerm = query.search.trim();
|
|
796
|
+
// For LIKE queries, we need to include the wildcards in the parameter value
|
|
797
|
+
// Also escape special LIKE characters for literal matching
|
|
798
|
+
conditions.push(`(content LIKE ? ESCAPE '\\' OR name LIKE ? ESCAPE '\\')`);
|
|
799
|
+
const searchPattern = `%${searchTerm.replace(/[\\_%]/g, '\\$&')}%`;
|
|
800
|
+
params.push(searchPattern, searchPattern);
|
|
801
|
+
}
|
|
802
|
+
// Date range filters
|
|
803
|
+
if (query.createdAfter) {
|
|
804
|
+
conditions.push(`created_at >= ?`);
|
|
805
|
+
params.push(query.createdAfter);
|
|
806
|
+
}
|
|
807
|
+
if (query.createdBefore) {
|
|
808
|
+
conditions.push(`created_at <= ?`);
|
|
809
|
+
params.push(query.createdBefore);
|
|
810
|
+
}
|
|
811
|
+
if (query.updatedAfter) {
|
|
812
|
+
conditions.push(`updated_at >= ?`);
|
|
813
|
+
params.push(query.updatedAfter);
|
|
814
|
+
}
|
|
815
|
+
if (query.updatedBefore) {
|
|
816
|
+
conditions.push(`updated_at <= ?`);
|
|
817
|
+
params.push(query.updatedBefore);
|
|
818
|
+
}
|
|
819
|
+
// Build WHERE clause
|
|
820
|
+
if (conditions.length > 0) {
|
|
821
|
+
sql += ` AND ${conditions.join(' AND ')}`;
|
|
822
|
+
}
|
|
823
|
+
// Sorting
|
|
824
|
+
if (query.sortBy) {
|
|
825
|
+
const column = SQLiteMemoryStore.SORT_COLUMNS[query.sortBy];
|
|
826
|
+
if (!column) {
|
|
827
|
+
throw new Error(`Invalid sortBy: ${query.sortBy}`);
|
|
828
|
+
}
|
|
829
|
+
const order = query.sortOrder || 'desc';
|
|
830
|
+
if (!SQLiteMemoryStore.ALLOWED_SORT_ORDERS.includes(order)) {
|
|
831
|
+
throw new Error(`Invalid sortOrder: ${order}`);
|
|
832
|
+
}
|
|
833
|
+
sql += ` ORDER BY ${column} ${order.toUpperCase()}`;
|
|
834
|
+
}
|
|
835
|
+
// Pagination
|
|
836
|
+
if (query.limit) {
|
|
837
|
+
const limit = Number(query.limit);
|
|
838
|
+
if (Number.isNaN(limit) || limit < 1 || limit > 10000) {
|
|
839
|
+
throw new Error('Limit must be between 1 and 10000');
|
|
840
|
+
}
|
|
841
|
+
sql += ` LIMIT ?`;
|
|
842
|
+
params.push(limit);
|
|
843
|
+
}
|
|
844
|
+
if (query.offset) {
|
|
845
|
+
const offset = Number(query.offset);
|
|
846
|
+
if (Number.isNaN(offset) || offset < 0) {
|
|
847
|
+
throw new Error('Offset must be >= 0');
|
|
848
|
+
}
|
|
849
|
+
sql += ` OFFSET ?`;
|
|
850
|
+
params.push(offset);
|
|
851
|
+
}
|
|
852
|
+
return { sql, params };
|
|
853
|
+
}
|
|
854
|
+
/**
|
|
855
|
+
* Batch update memory
|
|
856
|
+
*/
|
|
857
|
+
async batchUpdateMemory(operations) {
|
|
858
|
+
return this.transaction(async () => {
|
|
859
|
+
const db = await this.getDatabase();
|
|
860
|
+
for (const op of operations) {
|
|
861
|
+
await this.updateMemoryInternal(db, op.operation, op.name, op.content, op.metadata);
|
|
862
|
+
}
|
|
863
|
+
});
|
|
864
|
+
}
|
|
865
|
+
/**
|
|
866
|
+
* Close database connection
|
|
867
|
+
* @param skipSave - If true, skip saving before close (useful when save already failed)
|
|
868
|
+
*/
|
|
869
|
+
async close(skipSave = false) {
|
|
870
|
+
const db = this.db;
|
|
871
|
+
if (db) {
|
|
872
|
+
try {
|
|
873
|
+
if (!skipSave) {
|
|
874
|
+
await this.saveDatabase();
|
|
875
|
+
}
|
|
876
|
+
}
|
|
877
|
+
finally {
|
|
878
|
+
// Always close and nullify, even if save fails
|
|
879
|
+
// Only close if this.db is still the same instance (prevent double-close)
|
|
880
|
+
if (this.db === db) {
|
|
881
|
+
db.close();
|
|
882
|
+
this.db = null;
|
|
883
|
+
}
|
|
884
|
+
}
|
|
885
|
+
}
|
|
886
|
+
this.dbPromise = null;
|
|
887
|
+
}
|
|
888
|
+
/**
|
|
889
|
+
* Get database statistics
|
|
890
|
+
*/
|
|
891
|
+
async getStats() {
|
|
892
|
+
const db = await this.getDatabase();
|
|
893
|
+
const results = db.exec('SELECT COUNT(*) as count FROM memory_entries');
|
|
894
|
+
const totalEntries = results[0]?.values[0]?.[0] || 0;
|
|
895
|
+
const typeResults = db.exec('SELECT entry_type, COUNT(*) as count FROM memory_entries GROUP BY entry_type');
|
|
896
|
+
const entriesByType = {};
|
|
897
|
+
if (typeResults.length > 0) {
|
|
898
|
+
for (const row of typeResults[0].values) {
|
|
899
|
+
entriesByType[row[0]] = row[1];
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
// Get database file size
|
|
903
|
+
const dbPath = this.resolvePath(this.getDbPath());
|
|
904
|
+
let databaseSize = 0;
|
|
905
|
+
try {
|
|
906
|
+
const stats = await import('node:fs/promises').then((fs) => fs.stat(dbPath));
|
|
907
|
+
databaseSize = stats.size;
|
|
908
|
+
}
|
|
909
|
+
catch {
|
|
910
|
+
// File might not exist yet
|
|
911
|
+
}
|
|
912
|
+
return {
|
|
913
|
+
totalEntries,
|
|
914
|
+
entriesByType,
|
|
915
|
+
databaseSize,
|
|
916
|
+
};
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
//# sourceMappingURL=sqlite-memory-store.js.map
|