@exaudeus/workrail 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +153 -189
- package/dist/application/services/classification-engine.d.ts +33 -0
- package/dist/application/services/classification-engine.js +258 -0
- package/dist/application/services/compression-service.d.ts +20 -0
- package/dist/application/services/compression-service.js +312 -0
- package/dist/application/services/context-management-service.d.ts +38 -0
- package/dist/application/services/context-management-service.js +301 -0
- package/dist/application/services/context-persistence-service.d.ts +45 -0
- package/dist/application/services/context-persistence-service.js +273 -0
- package/dist/cli/migrate-workflow.js +3 -2
- package/dist/infrastructure/storage/context-storage.d.ts +150 -0
- package/dist/infrastructure/storage/context-storage.js +40 -0
- package/dist/infrastructure/storage/filesystem-blob-storage.d.ts +27 -0
- package/dist/infrastructure/storage/filesystem-blob-storage.js +363 -0
- package/dist/infrastructure/storage/hybrid-context-storage.d.ts +29 -0
- package/dist/infrastructure/storage/hybrid-context-storage.js +400 -0
- package/dist/infrastructure/storage/migrations/001_initial_schema.sql +38 -0
- package/dist/infrastructure/storage/migrations/002_context_concurrency_enhancements.sql +234 -0
- package/dist/infrastructure/storage/migrations/003_classification_overrides.sql +20 -0
- package/dist/infrastructure/storage/sqlite-metadata-storage.d.ts +35 -0
- package/dist/infrastructure/storage/sqlite-metadata-storage.js +410 -0
- package/dist/infrastructure/storage/sqlite-migrator.d.ts +46 -0
- package/dist/infrastructure/storage/sqlite-migrator.js +293 -0
- package/dist/types/context-types.d.ts +236 -0
- package/dist/types/context-types.js +10 -0
- package/dist/utils/storage-security.js +1 -1
- package/package.json +4 -1
- package/workflows/coding-task-workflow-with-loops.json +434 -0
- package/workflows/mr-review-workflow.json +75 -26
- package/workflows/systemic-bug-investigation-with-loops.json +423 -0
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.HybridContextStorage = void 0;
|
|
40
|
+
exports.createDefaultHybridContextStorage = createDefaultHybridContextStorage;
|
|
41
|
+
exports.createCustomHybridContextStorage = createCustomHybridContextStorage;
|
|
42
|
+
const crypto_1 = __importDefault(require("crypto"));
|
|
43
|
+
const error_handler_1 = require("../../core/error-handler");
|
|
44
|
+
class HybridContextStorage {
|
|
45
|
+
constructor(metadataStorage, blobStorage, config) {
|
|
46
|
+
this.isInitialized = false;
|
|
47
|
+
this.metadataStorage = metadataStorage;
|
|
48
|
+
this.blobStorage = blobStorage;
|
|
49
|
+
this.config = config;
|
|
50
|
+
}
|
|
51
|
+
async initialize() {
|
|
52
|
+
if (this.isInitialized) {
|
|
53
|
+
return;
|
|
54
|
+
}
|
|
55
|
+
try {
|
|
56
|
+
await Promise.all([
|
|
57
|
+
this.metadataStorage.initialize(),
|
|
58
|
+
this.blobStorage.initialize()
|
|
59
|
+
]);
|
|
60
|
+
this.isInitialized = true;
|
|
61
|
+
console.log('✅ Hybrid context storage initialized');
|
|
62
|
+
}
|
|
63
|
+
catch (error) {
|
|
64
|
+
throw new Error(`Failed to initialize hybrid context storage: ${error}`);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
async saveCheckpoint(metadata, blob) {
|
|
68
|
+
this.ensureInitialized();
|
|
69
|
+
let operationId = null;
|
|
70
|
+
try {
|
|
71
|
+
const checkpointId = metadata.id || crypto_1.default.randomUUID();
|
|
72
|
+
const fullMetadata = {
|
|
73
|
+
...metadata,
|
|
74
|
+
id: checkpointId,
|
|
75
|
+
createdAt: metadata.createdAt || new Date().toISOString(),
|
|
76
|
+
status: metadata.status || 'active'
|
|
77
|
+
};
|
|
78
|
+
await this.metadataStorage.saveCheckpointMetadata(fullMetadata);
|
|
79
|
+
const blobMetadata = await this.blobStorage.saveBlob(metadata.sessionId, checkpointId, blob);
|
|
80
|
+
await this.metadataStorage.upsertSessionInfo({
|
|
81
|
+
id: metadata.sessionId,
|
|
82
|
+
lastAccessedAt: new Date().toISOString(),
|
|
83
|
+
totalSizeBytes: blob.originalSize,
|
|
84
|
+
createdAt: new Date().toISOString()
|
|
85
|
+
});
|
|
86
|
+
console.log(`✅ Saved checkpoint: ${checkpointId} (${blobMetadata.sizeBytes} bytes)`);
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
try {
|
|
90
|
+
if (operationId) {
|
|
91
|
+
console.warn(`Warning: Checkpoint save failed, blob may be orphaned: ${metadata.id}`);
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
catch (cleanupError) {
|
|
95
|
+
console.warn('Failed to cleanup after save failure:', cleanupError);
|
|
96
|
+
}
|
|
97
|
+
throw new Error(`Failed to save checkpoint: ${error}`);
|
|
98
|
+
}
|
|
99
|
+
finally {
|
|
100
|
+
if (operationId) {
|
|
101
|
+
try {
|
|
102
|
+
await this.metadataStorage.releaseSessionLock(operationId);
|
|
103
|
+
}
|
|
104
|
+
catch (lockError) {
|
|
105
|
+
console.warn('Failed to release session lock:', lockError);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
async loadCheckpoint(checkpointId) {
|
|
111
|
+
this.ensureInitialized();
|
|
112
|
+
try {
|
|
113
|
+
const metadata = await this.metadataStorage.getCheckpointMetadata(checkpointId);
|
|
114
|
+
if (!metadata) {
|
|
115
|
+
throw new error_handler_1.CheckpointNotFoundError(checkpointId);
|
|
116
|
+
}
|
|
117
|
+
const blobMetadata = {
|
|
118
|
+
checkpointId: metadata.id,
|
|
119
|
+
sessionId: metadata.sessionId,
|
|
120
|
+
path: metadata.blobPath,
|
|
121
|
+
sizeBytes: metadata.contextSizeBytes,
|
|
122
|
+
hash: metadata.contextHash,
|
|
123
|
+
encrypted: false
|
|
124
|
+
};
|
|
125
|
+
const blob = await this.blobStorage.loadBlob(blobMetadata);
|
|
126
|
+
if (!blob) {
|
|
127
|
+
throw new error_handler_1.CheckpointNotFoundError(checkpointId);
|
|
128
|
+
}
|
|
129
|
+
await this.updateSessionAccess(metadata.sessionId);
|
|
130
|
+
console.log(`✅ Loaded checkpoint: ${checkpointId}`);
|
|
131
|
+
return { metadata, blob };
|
|
132
|
+
}
|
|
133
|
+
catch (error) {
|
|
134
|
+
throw new Error(`Failed to load checkpoint: ${error}`);
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
async listCheckpoints(sessionId, limit = 20, offset = 0) {
|
|
138
|
+
this.ensureInitialized();
|
|
139
|
+
try {
|
|
140
|
+
const checkpoints = await this.metadataStorage.listCheckpointMetadata(sessionId, limit, offset);
|
|
141
|
+
if (checkpoints.length > 0) {
|
|
142
|
+
await this.updateSessionAccess(sessionId);
|
|
143
|
+
}
|
|
144
|
+
return checkpoints;
|
|
145
|
+
}
|
|
146
|
+
catch (error) {
|
|
147
|
+
throw new Error(`Failed to list checkpoints: ${error}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
async deleteCheckpoint(checkpointId) {
|
|
151
|
+
this.ensureInitialized();
|
|
152
|
+
let operationId = null;
|
|
153
|
+
try {
|
|
154
|
+
const metadata = await this.metadataStorage.getCheckpointMetadata(checkpointId);
|
|
155
|
+
if (!metadata) {
|
|
156
|
+
throw new error_handler_1.CheckpointNotFoundError(checkpointId);
|
|
157
|
+
}
|
|
158
|
+
operationId = await this.metadataStorage.acquireSessionLock(metadata.sessionId, 'delete', this.config.concurrency.operationTimeoutMs);
|
|
159
|
+
await this.metadataStorage.deleteCheckpointMetadata(checkpointId);
|
|
160
|
+
const blobMetadata = {
|
|
161
|
+
checkpointId: metadata.id,
|
|
162
|
+
sessionId: metadata.sessionId,
|
|
163
|
+
path: metadata.blobPath,
|
|
164
|
+
sizeBytes: metadata.contextSizeBytes,
|
|
165
|
+
hash: metadata.contextHash,
|
|
166
|
+
encrypted: false
|
|
167
|
+
};
|
|
168
|
+
await this.blobStorage.deleteBlob(blobMetadata);
|
|
169
|
+
console.log(`🗑️ Deleted checkpoint: ${checkpointId}`);
|
|
170
|
+
}
|
|
171
|
+
catch (error) {
|
|
172
|
+
throw new Error(`Failed to delete checkpoint: ${error}`);
|
|
173
|
+
}
|
|
174
|
+
finally {
|
|
175
|
+
if (operationId) {
|
|
176
|
+
try {
|
|
177
|
+
await this.metadataStorage.releaseSessionLock(operationId);
|
|
178
|
+
}
|
|
179
|
+
catch (lockError) {
|
|
180
|
+
console.warn('Failed to release session lock:', lockError);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
async getSession(sessionId) {
|
|
186
|
+
this.ensureInitialized();
|
|
187
|
+
try {
|
|
188
|
+
return await this.metadataStorage.getSessionInfo(sessionId);
|
|
189
|
+
}
|
|
190
|
+
catch (error) {
|
|
191
|
+
throw new Error(`Failed to get session: ${error}`);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
async upsertSession(session) {
|
|
195
|
+
this.ensureInitialized();
|
|
196
|
+
try {
|
|
197
|
+
await this.metadataStorage.upsertSessionInfo(session);
|
|
198
|
+
console.log(`📋 Upserted session: ${session.id}`);
|
|
199
|
+
}
|
|
200
|
+
catch (error) {
|
|
201
|
+
throw new Error(`Failed to upsert session: ${error}`);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
async deleteSession(sessionId) {
|
|
205
|
+
this.ensureInitialized();
|
|
206
|
+
let operationId = null;
|
|
207
|
+
try {
|
|
208
|
+
operationId = await this.metadataStorage.acquireSessionLock(sessionId, 'delete', this.config.concurrency.operationTimeoutMs);
|
|
209
|
+
const checkpoints = await this.metadataStorage.listCheckpointMetadata(sessionId, 1000, 0);
|
|
210
|
+
for (const checkpoint of checkpoints) {
|
|
211
|
+
try {
|
|
212
|
+
const blobMetadata = {
|
|
213
|
+
checkpointId: checkpoint.id,
|
|
214
|
+
sessionId: checkpoint.sessionId,
|
|
215
|
+
path: checkpoint.blobPath,
|
|
216
|
+
sizeBytes: checkpoint.contextSizeBytes,
|
|
217
|
+
hash: checkpoint.contextHash,
|
|
218
|
+
encrypted: false
|
|
219
|
+
};
|
|
220
|
+
await this.blobStorage.deleteBlob(blobMetadata);
|
|
221
|
+
}
|
|
222
|
+
catch (blobError) {
|
|
223
|
+
console.warn(`Warning: Failed to delete blob for checkpoint ${checkpoint.id}:`, blobError);
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
await this.metadataStorage.deleteSessionInfo(sessionId);
|
|
227
|
+
console.log(`🗑️ Deleted session: ${sessionId} (${checkpoints.length} checkpoints)`);
|
|
228
|
+
}
|
|
229
|
+
catch (error) {
|
|
230
|
+
throw new Error(`Failed to delete session: ${error}`);
|
|
231
|
+
}
|
|
232
|
+
finally {
|
|
233
|
+
if (operationId) {
|
|
234
|
+
try {
|
|
235
|
+
await this.metadataStorage.releaseSessionLock(operationId);
|
|
236
|
+
}
|
|
237
|
+
catch (lockError) {
|
|
238
|
+
console.warn('Failed to release session lock:', lockError);
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
async getStorageStats() {
|
|
244
|
+
this.ensureInitialized();
|
|
245
|
+
try {
|
|
246
|
+
const [metadataStats, blobStats] = await Promise.all([
|
|
247
|
+
this.metadataStorage.getMetadataStats(),
|
|
248
|
+
this.blobStorage.getBlobStats()
|
|
249
|
+
]);
|
|
250
|
+
const totalSizeBytes = blobStats.totalSizeBytes;
|
|
251
|
+
const maxTotalSize = this.config.quotas.maxTotalSize;
|
|
252
|
+
const storageUtilization = maxTotalSize > 0 ? totalSizeBytes / maxTotalSize : 0;
|
|
253
|
+
return {
|
|
254
|
+
totalSessions: metadataStats.totalSessions,
|
|
255
|
+
totalCheckpoints: metadataStats.totalCheckpoints,
|
|
256
|
+
totalSizeBytes: blobStats.totalSizeBytes,
|
|
257
|
+
averageCheckpointSize: metadataStats.averageCheckpointSize,
|
|
258
|
+
oldestCheckpoint: undefined,
|
|
259
|
+
newestCheckpoint: undefined,
|
|
260
|
+
storageUtilization
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
catch (error) {
|
|
264
|
+
throw new Error(`Failed to get storage stats: ${error}`);
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
async getCheckpoint(checkpointId) {
|
|
268
|
+
this.ensureInitialized();
|
|
269
|
+
return this.metadataStorage.getCheckpointMetadata(checkpointId);
|
|
270
|
+
}
|
|
271
|
+
async validateIntegrity() {
|
|
272
|
+
this.ensureInitialized();
|
|
273
|
+
const result = {
|
|
274
|
+
isValid: true,
|
|
275
|
+
errors: [],
|
|
276
|
+
warnings: [],
|
|
277
|
+
repairSuggestions: []
|
|
278
|
+
};
|
|
279
|
+
try {
|
|
280
|
+
const blobValidation = await this.blobStorage.validateBlobIntegrity();
|
|
281
|
+
if (!blobValidation.isValid) {
|
|
282
|
+
result.isValid = false;
|
|
283
|
+
if (blobValidation.corruptedFiles.length > 0) {
|
|
284
|
+
result.errors.push({
|
|
285
|
+
type: 'CORRUPTION',
|
|
286
|
+
description: `Found ${blobValidation.corruptedFiles.length} corrupted blob files`,
|
|
287
|
+
affectedItems: blobValidation.corruptedFiles,
|
|
288
|
+
severity: 'HIGH'
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
if (blobValidation.missingFiles.length > 0) {
|
|
292
|
+
result.errors.push({
|
|
293
|
+
type: 'MISSING_DATA',
|
|
294
|
+
description: `Found ${blobValidation.missingFiles.length} missing blob files`,
|
|
295
|
+
affectedItems: blobValidation.missingFiles,
|
|
296
|
+
severity: 'CRITICAL'
|
|
297
|
+
});
|
|
298
|
+
}
|
|
299
|
+
if (blobValidation.orphanedFiles.length > 0) {
|
|
300
|
+
result.warnings.push({
|
|
301
|
+
type: 'CLEANUP_NEEDED',
|
|
302
|
+
description: `Found ${blobValidation.orphanedFiles.length} orphaned blob files`,
|
|
303
|
+
suggestion: 'Run orphaned file cleanup to reclaim disk space'
|
|
304
|
+
});
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
const stats = await this.getStorageStats();
|
|
308
|
+
if (stats.storageUtilization > this.config.quotas.warningThreshold) {
|
|
309
|
+
const severity = stats.storageUtilization > this.config.quotas.cleanupThreshold ? 'HIGH' : 'MEDIUM';
|
|
310
|
+
result.warnings.push({
|
|
311
|
+
type: 'QUOTA_WARNING',
|
|
312
|
+
description: `Storage usage at ${(stats.storageUtilization * 100).toFixed(1)}% of quota`,
|
|
313
|
+
suggestion: severity === 'HIGH' ? 'Immediate cleanup recommended' : 'Consider cleanup soon'
|
|
314
|
+
});
|
|
315
|
+
}
|
|
316
|
+
if (result.errors.length > 0) {
|
|
317
|
+
result.repairSuggestions.push('Run storage repair to fix corrupted or missing data');
|
|
318
|
+
}
|
|
319
|
+
if (result.warnings.some(w => w.type === 'CLEANUP_NEEDED')) {
|
|
320
|
+
result.repairSuggestions.push('Run orphaned file cleanup to reclaim disk space');
|
|
321
|
+
}
|
|
322
|
+
return result;
|
|
323
|
+
}
|
|
324
|
+
catch (error) {
|
|
325
|
+
result.isValid = false;
|
|
326
|
+
result.errors.push({
|
|
327
|
+
type: 'CORRUPTION',
|
|
328
|
+
description: `Storage validation failed: ${error}`,
|
|
329
|
+
affectedItems: ['storage-system'],
|
|
330
|
+
severity: 'CRITICAL'
|
|
331
|
+
});
|
|
332
|
+
return result;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
async close() {
|
|
336
|
+
try {
|
|
337
|
+
await Promise.all([
|
|
338
|
+
this.metadataStorage.close(),
|
|
339
|
+
this.blobStorage.close()
|
|
340
|
+
]);
|
|
341
|
+
this.isInitialized = false;
|
|
342
|
+
console.log('🔒 Hybrid context storage closed');
|
|
343
|
+
}
|
|
344
|
+
catch (error) {
|
|
345
|
+
console.warn('Warning during hybrid storage close:', error);
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
async cleanupOrphanedBlobs() {
|
|
349
|
+
this.ensureInitialized();
|
|
350
|
+
try {
|
|
351
|
+
const allCheckpoints = await this.metadataStorage.listCheckpointMetadata('', 10000, 0);
|
|
352
|
+
const referencedPaths = allCheckpoints.map(checkpoint => checkpoint.blobPath);
|
|
353
|
+
const cleanedCount = await this.blobStorage.cleanupOrphanedBlobs(referencedPaths);
|
|
354
|
+
if (cleanedCount > 0) {
|
|
355
|
+
console.log(`🧹 Cleaned up ${cleanedCount} orphaned blobs`);
|
|
356
|
+
}
|
|
357
|
+
return cleanedCount;
|
|
358
|
+
}
|
|
359
|
+
catch (error) {
|
|
360
|
+
throw new Error(`Failed to cleanup orphaned blobs: ${error}`);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
async updateSessionAccess(sessionId) {
|
|
364
|
+
try {
|
|
365
|
+
const session = await this.metadataStorage.getSessionInfo(sessionId);
|
|
366
|
+
if (session) {
|
|
367
|
+
const updatedSession = {
|
|
368
|
+
...session,
|
|
369
|
+
lastAccessedAt: new Date().toISOString()
|
|
370
|
+
};
|
|
371
|
+
await this.metadataStorage.upsertSessionInfo(updatedSession);
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
catch (error) {
|
|
375
|
+
console.warn(`Warning: Failed to update session access time for ${sessionId}:`, error);
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
ensureInitialized() {
|
|
379
|
+
if (!this.isInitialized) {
|
|
380
|
+
throw new Error('HybridContextStorage not initialized');
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
exports.HybridContextStorage = HybridContextStorage;
|
|
385
|
+
async function createDefaultHybridContextStorage(config) {
|
|
386
|
+
const { createDefaultContextStorageConfig } = await Promise.resolve().then(() => __importStar(require('./context-storage')));
|
|
387
|
+
const { SqliteMetadataStorage } = await Promise.resolve().then(() => __importStar(require('./sqlite-metadata-storage')));
|
|
388
|
+
const { FileSystemBlobStorage } = await Promise.resolve().then(() => __importStar(require('./filesystem-blob-storage')));
|
|
389
|
+
const fullConfig = config ? { ...createDefaultContextStorageConfig(), ...config } : createDefaultContextStorageConfig();
|
|
390
|
+
const metadataStorage = new SqliteMetadataStorage(fullConfig);
|
|
391
|
+
const blobStorage = new FileSystemBlobStorage(fullConfig);
|
|
392
|
+
const hybridStorage = new HybridContextStorage(metadataStorage, blobStorage, fullConfig);
|
|
393
|
+
await hybridStorage.initialize();
|
|
394
|
+
return hybridStorage;
|
|
395
|
+
}
|
|
396
|
+
async function createCustomHybridContextStorage(metadataStorage, blobStorage, config) {
|
|
397
|
+
const hybridStorage = new HybridContextStorage(metadataStorage, blobStorage, config);
|
|
398
|
+
await hybridStorage.initialize();
|
|
399
|
+
return hybridStorage;
|
|
400
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
-- Migration: Initial schema for context management
|
|
2
|
+
-- Version: 1
|
|
3
|
+
-- Description: Creates the initial tables for context persistence
|
|
4
|
+
|
|
5
|
+
-- Sessions table
|
|
6
|
+
CREATE TABLE IF NOT EXISTS sessions (
|
|
7
|
+
id TEXT PRIMARY KEY,
|
|
8
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
9
|
+
last_accessed_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
10
|
+
workflow_id TEXT,
|
|
11
|
+
tags TEXT, -- JSON array stored as text
|
|
12
|
+
total_size_bytes INTEGER DEFAULT 0
|
|
13
|
+
);
|
|
14
|
+
|
|
15
|
+
-- Checkpoint metadata table
|
|
16
|
+
CREATE TABLE IF NOT EXISTS checkpoint_metadata (
|
|
17
|
+
id TEXT PRIMARY KEY,
|
|
18
|
+
session_id TEXT NOT NULL,
|
|
19
|
+
name TEXT,
|
|
20
|
+
agent_id TEXT,
|
|
21
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
22
|
+
tags TEXT, -- JSON array stored as text
|
|
23
|
+
context_size_bytes INTEGER NOT NULL,
|
|
24
|
+
context_hash TEXT NOT NULL,
|
|
25
|
+
blob_path TEXT NOT NULL,
|
|
26
|
+
status TEXT DEFAULT 'active',
|
|
27
|
+
classification_info TEXT, -- JSON object stored as text
|
|
28
|
+
created_by_operation TEXT DEFAULT NULL,
|
|
29
|
+
compression_ratio REAL DEFAULT 1.0,
|
|
30
|
+
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
-- Indexes for performance
|
|
34
|
+
CREATE INDEX IF NOT EXISTS idx_checkpoint_session ON checkpoint_metadata(session_id);
|
|
35
|
+
CREATE INDEX IF NOT EXISTS idx_checkpoint_created ON checkpoint_metadata(created_at);
|
|
36
|
+
CREATE INDEX IF NOT EXISTS idx_checkpoint_status ON checkpoint_metadata(status);
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_session_workflow ON sessions(workflow_id);
|
|
38
|
+
CREATE INDEX IF NOT EXISTS idx_session_accessed ON sessions(last_accessed_at);
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
-- Enhanced Concurrency Safety for Native Context Management
|
|
2
|
+
-- Version 2: Adds active operations tracking and lock management
|
|
3
|
+
-- Based on Devil's Advocate Review recommendations
|
|
4
|
+
|
|
5
|
+
-- Update schema version
|
|
6
|
+
INSERT OR IGNORE INTO schema_version (version, description) VALUES (2, 'Enhanced concurrency safety with active operations tracking');
|
|
7
|
+
|
|
8
|
+
-- =============================================================================
|
|
9
|
+
-- ACTIVE OPERATIONS TRACKING
|
|
10
|
+
-- =============================================================================
|
|
11
|
+
|
|
12
|
+
-- Table to track active operations for concurrency safety
|
|
13
|
+
CREATE TABLE IF NOT EXISTS active_operations (
|
|
14
|
+
id TEXT PRIMARY KEY, -- Operation UUID
|
|
15
|
+
session_id TEXT NOT NULL, -- Session being operated on
|
|
16
|
+
operation_type TEXT NOT NULL CHECK (operation_type IN ('save', 'load', 'delete', 'cleanup')), -- Type of operation
|
|
17
|
+
started_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, -- When operation started
|
|
18
|
+
heartbeat_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, -- Last heartbeat timestamp
|
|
19
|
+
timeout_ms INTEGER NOT NULL DEFAULT 5000, -- Timeout in milliseconds
|
|
20
|
+
metadata TEXT, -- JSON metadata about operation
|
|
21
|
+
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
-- Indexes for efficient lock management
|
|
25
|
+
CREATE INDEX IF NOT EXISTS idx_active_operations_session ON active_operations(session_id);
|
|
26
|
+
CREATE INDEX IF NOT EXISTS idx_active_operations_heartbeat ON active_operations(heartbeat_at);
|
|
27
|
+
CREATE INDEX IF NOT EXISTS idx_active_operations_started ON active_operations(started_at);
|
|
28
|
+
CREATE INDEX IF NOT EXISTS idx_active_operations_type ON active_operations(operation_type);
|
|
29
|
+
|
|
30
|
+
-- =============================================================================
|
|
31
|
+
-- AUTOMATIC CLEANUP TRIGGERS
|
|
32
|
+
-- =============================================================================
|
|
33
|
+
|
|
34
|
+
-- Trigger to cleanup stale operations when new operations are inserted
|
|
35
|
+
CREATE TRIGGER IF NOT EXISTS cleanup_stale_operations_on_insert
|
|
36
|
+
AFTER INSERT ON active_operations
|
|
37
|
+
FOR EACH ROW
|
|
38
|
+
BEGIN
|
|
39
|
+
-- Remove operations older than 10 minutes or with stale heartbeats (>5 minutes)
|
|
40
|
+
DELETE FROM active_operations
|
|
41
|
+
WHERE (
|
|
42
|
+
heartbeat_at < datetime('now', '-5 minutes') OR
|
|
43
|
+
started_at < datetime('now', '-10 minutes')
|
|
44
|
+
) AND id != NEW.id;
|
|
45
|
+
END;
|
|
46
|
+
|
|
47
|
+
-- Trigger to cleanup operations when sessions are deleted
|
|
48
|
+
CREATE TRIGGER IF NOT EXISTS cleanup_operations_on_session_delete
|
|
49
|
+
AFTER DELETE ON sessions
|
|
50
|
+
FOR EACH ROW
|
|
51
|
+
BEGIN
|
|
52
|
+
DELETE FROM active_operations WHERE session_id = OLD.id;
|
|
53
|
+
END;
|
|
54
|
+
|
|
55
|
+
-- =============================================================================
|
|
56
|
+
-- SESSION LOCK MANAGEMENT
|
|
57
|
+
-- =============================================================================
|
|
58
|
+
|
|
59
|
+
-- Add session-level lock information to sessions table
|
|
60
|
+
ALTER TABLE sessions ADD COLUMN locked_at DATETIME DEFAULT NULL;
|
|
61
|
+
ALTER TABLE sessions ADD COLUMN locked_by TEXT DEFAULT NULL; -- Operation ID holding the lock
|
|
62
|
+
ALTER TABLE sessions ADD COLUMN lock_timeout_at DATETIME DEFAULT NULL;
|
|
63
|
+
|
|
64
|
+
-- Index for efficient lock queries
|
|
65
|
+
CREATE INDEX IF NOT EXISTS idx_sessions_locked_at ON sessions(locked_at);
|
|
66
|
+
CREATE INDEX IF NOT EXISTS idx_sessions_lock_timeout ON sessions(lock_timeout_at);
|
|
67
|
+
|
|
68
|
+
-- Trigger to automatically release expired locks
|
|
69
|
+
CREATE TRIGGER IF NOT EXISTS release_expired_session_locks
|
|
70
|
+
AFTER INSERT ON active_operations
|
|
71
|
+
FOR EACH ROW
|
|
72
|
+
BEGIN
|
|
73
|
+
-- Release locks that have timed out
|
|
74
|
+
UPDATE sessions
|
|
75
|
+
SET locked_at = NULL, locked_by = NULL, lock_timeout_at = NULL
|
|
76
|
+
WHERE lock_timeout_at IS NOT NULL AND lock_timeout_at < CURRENT_TIMESTAMP;
|
|
77
|
+
END;
|
|
78
|
+
|
|
79
|
+
-- =============================================================================
|
|
80
|
+
-- ENHANCED CHECKPOINT METADATA
|
|
81
|
+
-- =============================================================================
|
|
82
|
+
|
|
83
|
+
-- Add operation tracking to checkpoint metadata
|
|
84
|
+
ALTER TABLE checkpoint_metadata ADD COLUMN created_by_operation TEXT DEFAULT NULL;
|
|
85
|
+
ALTER TABLE checkpoint_metadata ADD COLUMN compression_ratio REAL DEFAULT 1.0;
|
|
86
|
+
ALTER TABLE checkpoint_metadata ADD COLUMN classification_info TEXT DEFAULT NULL; -- JSON with layer counts
|
|
87
|
+
|
|
88
|
+
-- Index for operation tracking
|
|
89
|
+
CREATE INDEX IF NOT EXISTS idx_checkpoints_created_by_operation ON checkpoint_metadata(created_by_operation);
|
|
90
|
+
|
|
91
|
+
-- =============================================================================
|
|
92
|
+
-- STORAGE QUOTA TRACKING
|
|
93
|
+
-- =============================================================================
|
|
94
|
+
|
|
95
|
+
-- Table for tracking storage quotas and usage
|
|
96
|
+
CREATE TABLE IF NOT EXISTS storage_quotas (
|
|
97
|
+
id TEXT PRIMARY KEY DEFAULT 'global', -- Usually 'global' or session_id
|
|
98
|
+
max_total_size INTEGER NOT NULL DEFAULT 10737418240, -- 10GB default
|
|
99
|
+
max_checkpoints INTEGER NOT NULL DEFAULT 1000, -- 1000 checkpoints default
|
|
100
|
+
warning_threshold REAL NOT NULL DEFAULT 0.8, -- Warn at 80%
|
|
101
|
+
cleanup_threshold REAL NOT NULL DEFAULT 0.9, -- Auto-cleanup at 90%
|
|
102
|
+
last_cleanup_at DATETIME DEFAULT NULL,
|
|
103
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
104
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
105
|
+
);
|
|
106
|
+
|
|
107
|
+
-- Insert default global quota
|
|
108
|
+
INSERT OR IGNORE INTO storage_quotas (id, max_total_size, max_checkpoints)
|
|
109
|
+
VALUES ('global', 10737418240, 1000);
|
|
110
|
+
|
|
111
|
+
-- =============================================================================
|
|
112
|
+
-- PERFORMANCE MONITORING
|
|
113
|
+
-- =============================================================================
|
|
114
|
+
|
|
115
|
+
-- Table for tracking operation performance metrics
|
|
116
|
+
CREATE TABLE IF NOT EXISTS operation_metrics (
|
|
117
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
118
|
+
operation_type TEXT NOT NULL, -- save, load, classify, compress
|
|
119
|
+
duration_ms INTEGER NOT NULL, -- Operation duration
|
|
120
|
+
context_size_bytes INTEGER DEFAULT NULL, -- Input size
|
|
121
|
+
compressed_size_bytes INTEGER DEFAULT NULL, -- Output size (if applicable)
|
|
122
|
+
compression_ratio REAL DEFAULT NULL, -- Compression ratio achieved
|
|
123
|
+
session_id TEXT DEFAULT NULL, -- Associated session
|
|
124
|
+
timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
125
|
+
metadata TEXT DEFAULT NULL -- JSON with additional metrics
|
|
126
|
+
);
|
|
127
|
+
|
|
128
|
+
-- Indexes for performance analysis
|
|
129
|
+
CREATE INDEX IF NOT EXISTS idx_metrics_operation_type ON operation_metrics(operation_type);
|
|
130
|
+
CREATE INDEX IF NOT EXISTS idx_metrics_timestamp ON operation_metrics(timestamp);
|
|
131
|
+
CREATE INDEX IF NOT EXISTS idx_metrics_session ON operation_metrics(session_id);
|
|
132
|
+
|
|
133
|
+
-- Trigger to automatically cleanup old metrics (keep last 30 days)
|
|
134
|
+
CREATE TRIGGER IF NOT EXISTS cleanup_old_metrics
|
|
135
|
+
AFTER INSERT ON operation_metrics
|
|
136
|
+
FOR EACH ROW
|
|
137
|
+
WHEN (NEW.id % 100 = 0) -- Only run cleanup every 100 inserts for performance
|
|
138
|
+
BEGIN
|
|
139
|
+
DELETE FROM operation_metrics
|
|
140
|
+
WHERE timestamp < datetime('now', '-30 days');
|
|
141
|
+
END;
|
|
142
|
+
|
|
143
|
+
-- =============================================================================
|
|
144
|
+
-- ENHANCED VIEWS FOR MONITORING
|
|
145
|
+
-- =============================================================================
|
|
146
|
+
|
|
147
|
+
-- View for active sessions with lock status
|
|
148
|
+
CREATE VIEW IF NOT EXISTS active_sessions_with_locks AS
|
|
149
|
+
SELECT
|
|
150
|
+
s.*,
|
|
151
|
+
ao.operation_type as active_operation,
|
|
152
|
+
ao.started_at as operation_started_at,
|
|
153
|
+
ao.heartbeat_at as last_heartbeat,
|
|
154
|
+
CASE
|
|
155
|
+
WHEN s.locked_at IS NOT NULL AND s.lock_timeout_at > CURRENT_TIMESTAMP THEN 'locked'
|
|
156
|
+
WHEN s.locked_at IS NOT NULL AND s.lock_timeout_at <= CURRENT_TIMESTAMP THEN 'expired_lock'
|
|
157
|
+
WHEN ao.id IS NOT NULL THEN 'operation_active'
|
|
158
|
+
ELSE 'available'
|
|
159
|
+
END as lock_status
|
|
160
|
+
FROM sessions s
|
|
161
|
+
LEFT JOIN active_operations ao ON s.id = ao.session_id
|
|
162
|
+
WHERE s.last_accessed_at > datetime('now', '-7 days'); -- Only recent sessions
|
|
163
|
+
|
|
164
|
+
-- View for storage usage summary
|
|
165
|
+
CREATE VIEW IF NOT EXISTS storage_usage_summary AS
|
|
166
|
+
SELECT
|
|
167
|
+
'global' as scope,
|
|
168
|
+
COUNT(DISTINCT s.id) as total_sessions,
|
|
169
|
+
COUNT(c.id) as total_checkpoints,
|
|
170
|
+
COALESCE(SUM(s.total_size_bytes), 0) as total_size_bytes,
|
|
171
|
+
COALESCE(AVG(c.context_size_bytes), 0) as avg_checkpoint_size,
|
|
172
|
+
MAX(c.created_at) as latest_checkpoint,
|
|
173
|
+
q.max_total_size,
|
|
174
|
+
q.max_checkpoints,
|
|
175
|
+
CAST(COALESCE(SUM(s.total_size_bytes), 0) AS REAL) / q.max_total_size as size_utilization,
|
|
176
|
+
CAST(COUNT(c.id) AS REAL) / q.max_checkpoints as checkpoint_utilization
|
|
177
|
+
FROM storage_quotas q
|
|
178
|
+
LEFT JOIN sessions s ON q.id = 'global'
|
|
179
|
+
LEFT JOIN checkpoint_metadata c ON s.id = c.session_id
|
|
180
|
+
WHERE q.id = 'global'
|
|
181
|
+
GROUP BY q.id, q.max_total_size, q.max_checkpoints;
|
|
182
|
+
|
|
183
|
+
-- View for recent operation performance
|
|
184
|
+
CREATE VIEW IF NOT EXISTS recent_operation_performance AS
|
|
185
|
+
SELECT
|
|
186
|
+
operation_type,
|
|
187
|
+
COUNT(*) as operation_count,
|
|
188
|
+
AVG(duration_ms) as avg_duration_ms,
|
|
189
|
+
MIN(duration_ms) as min_duration_ms,
|
|
190
|
+
MAX(duration_ms) as max_duration_ms,
|
|
191
|
+
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY duration_ms) as p50_duration_ms,
|
|
192
|
+
PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY duration_ms) as p95_duration_ms,
|
|
193
|
+
AVG(CASE WHEN compression_ratio IS NOT NULL THEN compression_ratio END) as avg_compression_ratio,
|
|
194
|
+
COUNT(CASE WHEN duration_ms > 1000 THEN 1 END) as slow_operations_count
|
|
195
|
+
FROM operation_metrics
|
|
196
|
+
WHERE timestamp > datetime('now', '-24 hours')
|
|
197
|
+
GROUP BY operation_type;
|
|
198
|
+
|
|
199
|
+
-- =============================================================================
|
|
200
|
+
-- INTEGRITY CONSTRAINTS
|
|
201
|
+
-- =============================================================================
|
|
202
|
+
|
|
203
|
+
-- Ensure no duplicate active operations per session for save operations
|
|
204
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_save_operations
|
|
205
|
+
ON active_operations(session_id)
|
|
206
|
+
WHERE operation_type = 'save';
|
|
207
|
+
|
|
208
|
+
-- Ensure checkpoint metadata integrity
|
|
209
|
+
CREATE INDEX IF NOT EXISTS idx_checkpoints_hash_unique ON checkpoint_metadata(context_hash, session_id);
|
|
210
|
+
|
|
211
|
+
-- =============================================================================
|
|
212
|
+
-- CLEANUP PROCEDURES
|
|
213
|
+
-- =============================================================================
|
|
214
|
+
|
|
215
|
+
-- Create a trigger for automatic maintenance
|
|
216
|
+
CREATE TRIGGER IF NOT EXISTS automatic_maintenance
|
|
217
|
+
AFTER INSERT ON sessions
|
|
218
|
+
FOR EACH ROW
|
|
219
|
+
WHEN (NEW.rowid % 50 = 0) -- Run maintenance every 50 new sessions
|
|
220
|
+
BEGIN
|
|
221
|
+
-- Update storage quota usage tracking
|
|
222
|
+
UPDATE storage_quotas
|
|
223
|
+
SET updated_at = CURRENT_TIMESTAMP
|
|
224
|
+
WHERE id = 'global';
|
|
225
|
+
|
|
226
|
+
-- Clean up orphaned operations
|
|
227
|
+
DELETE FROM active_operations
|
|
228
|
+
WHERE session_id NOT IN (SELECT id FROM sessions);
|
|
229
|
+
|
|
230
|
+
-- Clean up very old sessions with no checkpoints
|
|
231
|
+
DELETE FROM sessions
|
|
232
|
+
WHERE id NOT IN (SELECT DISTINCT session_id FROM checkpoint_metadata)
|
|
233
|
+
AND last_accessed_at < datetime('now', '-90 days');
|
|
234
|
+
END;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
-- Migration: Classification Overrides
|
|
2
|
+
-- Version: 3
|
|
3
|
+
-- Description: Adds table for session-specific classification overrides
|
|
4
|
+
|
|
5
|
+
-- Update schema version
|
|
6
|
+
INSERT OR IGNORE INTO schema_version (version, description) VALUES (3, 'Classification overrides');
|
|
7
|
+
|
|
8
|
+
-- Classification overrides table
|
|
9
|
+
CREATE TABLE IF NOT EXISTS classification_overrides (
|
|
10
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
11
|
+
session_id TEXT NOT NULL,
|
|
12
|
+
context_key TEXT NOT NULL,
|
|
13
|
+
classification TEXT NOT NULL,
|
|
14
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
15
|
+
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE,
|
|
16
|
+
UNIQUE(session_id, context_key)
|
|
17
|
+
);
|
|
18
|
+
|
|
19
|
+
-- Index for performance
|
|
20
|
+
CREATE INDEX IF NOT EXISTS idx_classification_session ON classification_overrides(session_id);
|