@stackmemoryai/stackmemory 0.3.13 → 0.3.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,762 @@
1
+ import Database from "better-sqlite3";
2
+ import {
3
+ S3Client,
4
+ PutObjectCommand,
5
+ GetObjectCommand
6
+ } from "@aws-sdk/client-s3";
7
+ import { createClient as createRedisClient } from "redis";
8
+ import { Pool } from "pg";
9
+ import * as zlib from "zlib";
10
+ import { promisify } from "util";
11
+ import { v4 as uuidv4 } from "uuid";
12
+ import { Logger } from "../monitoring/logger.js";
13
+ const lz4 = {
14
+ encode: (data) => data,
15
+ // Placeholder
16
+ decode: (data) => data
17
+ // Placeholder
18
+ };
19
+ const gzip = promisify(zlib.gzip);
20
+ const gunzip = promisify(zlib.gunzip);
21
+ var StorageTier = /* @__PURE__ */ ((StorageTier2) => {
22
+ StorageTier2["YOUNG"] = "young";
23
+ StorageTier2["MATURE"] = "mature";
24
+ StorageTier2["OLD"] = "old";
25
+ StorageTier2["REMOTE"] = "remote";
26
+ return StorageTier2;
27
+ })(StorageTier || {});
28
+ class TwoTierStorageSystem {
29
+ db;
30
+ redisClient;
31
+ timeseriesPool;
32
+ s3Client;
33
+ logger;
34
+ config;
35
+ migrationTimer;
36
+ offlineQueue = [];
37
+ stats;
38
+ constructor(config) {
39
+ this.config = config;
40
+ this.logger = new Logger("TwoTierStorage");
41
+ this.db = new Database(config.local.dbPath);
42
+ this.initializeLocalStorage();
43
+ this.s3Client = new S3Client({
44
+ region: config.remote.s3.region,
45
+ credentials: config.remote.s3.accessKeyId && config.remote.s3.secretAccessKey ? {
46
+ accessKeyId: config.remote.s3.accessKeyId,
47
+ secretAccessKey: config.remote.s3.secretAccessKey
48
+ } : void 0
49
+ });
50
+ this.stats = {
51
+ localUsageMB: 0,
52
+ remoteUsageMB: 0,
53
+ tierDistribution: {
54
+ ["young" /* YOUNG */]: 0,
55
+ ["mature" /* MATURE */]: 0,
56
+ ["old" /* OLD */]: 0,
57
+ ["remote" /* REMOTE */]: 0
58
+ },
59
+ compressionRatio: 1,
60
+ migrationsPending: 0,
61
+ lastMigration: null
62
+ };
63
+ }
64
+ initializeLocalStorage() {
65
+ this.db.pragma("journal_mode = WAL");
66
+ this.db.pragma("synchronous = NORMAL");
67
+ this.db.exec(`
68
+ CREATE TABLE IF NOT EXISTS storage_items (
69
+ id TEXT PRIMARY KEY,
70
+ frame_id TEXT NOT NULL,
71
+ tier TEXT NOT NULL,
72
+ data BLOB NOT NULL,
73
+ metadata TEXT,
74
+ size_bytes INTEGER,
75
+ importance_score REAL DEFAULT 0.5,
76
+ access_count INTEGER DEFAULT 0,
77
+ last_accessed DATETIME DEFAULT CURRENT_TIMESTAMP,
78
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
79
+ compressed BOOLEAN DEFAULT FALSE,
80
+ compression_type TEXT
81
+ );
82
+
83
+ CREATE INDEX IF NOT EXISTS idx_tier_age ON storage_items (tier, created_at);
84
+ CREATE INDEX IF NOT EXISTS idx_frame ON storage_items (frame_id);
85
+ CREATE INDEX IF NOT EXISTS idx_importance ON storage_items (importance_score DESC);
86
+
87
+ CREATE TABLE IF NOT EXISTS migration_queue (
88
+ id TEXT PRIMARY KEY,
89
+ item_id TEXT NOT NULL,
90
+ source_tier TEXT NOT NULL,
91
+ target_tier TEXT NOT NULL,
92
+ priority INTEGER DEFAULT 5,
93
+ attempts INTEGER DEFAULT 0,
94
+ status TEXT DEFAULT 'pending',
95
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP
96
+ );
97
+
98
+ CREATE INDEX IF NOT EXISTS idx_status_priority ON migration_queue (status, priority DESC);
99
+
100
+ CREATE TABLE IF NOT EXISTS storage_metrics (
101
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
102
+ tier TEXT NOT NULL,
103
+ item_count INTEGER,
104
+ total_size_mb REAL,
105
+ avg_compression_ratio REAL,
106
+ measured_at DATETIME DEFAULT CURRENT_TIMESTAMP
107
+ );
108
+ `);
109
+ }
110
+ async initialize() {
111
+ try {
112
+ if (this.config.remote.redis?.url) {
113
+ this.redisClient = createRedisClient({
114
+ url: this.config.remote.redis.url
115
+ });
116
+ await this.redisClient.connect();
117
+ this.logger.info("Redis connected for hot cache");
118
+ }
119
+ if (this.config.remote.timeseries?.connectionString) {
120
+ this.timeseriesPool = new Pool({
121
+ connectionString: this.config.remote.timeseries.connectionString,
122
+ max: 5
123
+ });
124
+ await this.initializeTimeseriesSchema();
125
+ this.logger.info("TimeSeries DB connected");
126
+ }
127
+ this.startMigrationWorker();
128
+ await this.loadOfflineQueue();
129
+ await this.updateStats();
130
+ this.logger.info("Two-tier storage system initialized");
131
+ } catch (error) {
132
+ this.logger.error("Failed to initialize storage", { error });
133
+ throw error;
134
+ }
135
+ }
136
+ async initializeTimeseriesSchema() {
137
+ if (!this.timeseriesPool) return;
138
+ const client = await this.timeseriesPool.connect();
139
+ try {
140
+ await client.query(`
141
+ CREATE TABLE IF NOT EXISTS frame_timeseries (
142
+ time TIMESTAMPTZ NOT NULL,
143
+ frame_id TEXT NOT NULL,
144
+ project_id TEXT NOT NULL,
145
+ data JSONB,
146
+ metrics JSONB,
147
+ importance_score REAL
148
+ );
149
+
150
+ SELECT create_hypertable('frame_timeseries', 'time',
151
+ chunk_time_interval => INTERVAL '1 month',
152
+ if_not_exists => TRUE
153
+ );
154
+
155
+ CREATE INDEX IF NOT EXISTS idx_frame_time
156
+ ON frame_timeseries (frame_id, time DESC);
157
+ `);
158
+ } finally {
159
+ client.release();
160
+ }
161
+ }
162
+ /**
163
+ * Store a frame with automatic tier selection
164
+ */
165
+ async storeFrame(frame, events, anchors) {
166
+ const storageId = uuidv4();
167
+ const data = { frame, events, anchors };
168
+ const tier = this.selectTier(frame);
169
+ const importanceScore = this.calculateImportance(frame, events, anchors);
170
+ const compressed = await this.compressData(data, tier);
171
+ const stmt = this.db.prepare(`
172
+ INSERT INTO storage_items (
173
+ id, frame_id, tier, data, metadata, size_bytes,
174
+ importance_score, compressed, compression_type
175
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
176
+ `);
177
+ stmt.run(
178
+ storageId,
179
+ frame.frame_id,
180
+ tier,
181
+ compressed.data,
182
+ JSON.stringify({
183
+ originalSize: compressed.originalSize,
184
+ compressedSize: compressed.compressedSize
185
+ }),
186
+ compressed.compressedSize,
187
+ importanceScore,
188
+ compressed.compressed ? 1 : 0,
189
+ compressed.compressionType
190
+ );
191
+ if (tier === "young" /* YOUNG */ && this.redisClient) {
192
+ await this.redisClient.setex(
193
+ `frame:${frame.frame_id}`,
194
+ this.config.remote.redis?.ttlSeconds || 300,
195
+ JSON.stringify(data)
196
+ );
197
+ }
198
+ if (importanceScore > 0.7) {
199
+ await this.queueRemoteUpload(storageId, data, "high");
200
+ }
201
+ return storageId;
202
+ }
203
+ /**
204
+ * Retrieve a frame from any tier
205
+ */
206
+ async retrieveFrame(frameId) {
207
+ if (this.redisClient) {
208
+ try {
209
+ const cached = await this.redisClient.get(`frame:${frameId}`);
210
+ if (cached) {
211
+ this.updateAccessCount(frameId);
212
+ return JSON.parse(cached);
213
+ }
214
+ } catch (error) {
215
+ this.logger.warn("Redis retrieval failed", { frameId, error });
216
+ }
217
+ }
218
+ const local = this.db.prepare(
219
+ `
220
+ SELECT data, compressed, compression_type
221
+ FROM storage_items
222
+ WHERE frame_id = ?
223
+ ORDER BY created_at DESC
224
+ LIMIT 1
225
+ `
226
+ ).get(frameId);
227
+ if (local) {
228
+ this.updateAccessCount(frameId);
229
+ const data = local.compressed ? await this.decompressData(local.data, local.compression_type) : JSON.parse(local.data);
230
+ if (this.redisClient) {
231
+ await this.redisClient.setex(
232
+ `frame:${frameId}`,
233
+ 300,
234
+ // 5 minute cache
235
+ JSON.stringify(data)
236
+ );
237
+ }
238
+ return data;
239
+ }
240
+ return this.retrieveFromRemote(frameId);
241
+ }
242
+ /**
243
+ * Select appropriate tier based on frame age and characteristics
244
+ */
245
+ selectTier(frame) {
246
+ if (!frame.created_at || isNaN(frame.created_at)) {
247
+ this.logger.warn("Invalid frame timestamp, defaulting to YOUNG tier");
248
+ return "young" /* YOUNG */;
249
+ }
250
+ const ageHours = (Date.now() - frame.created_at) / (1e3 * 60 * 60);
251
+ if (ageHours < 24) return "young" /* YOUNG */;
252
+ if (ageHours < 168) return "mature" /* MATURE */;
253
+ if (ageHours < 720) return "old" /* OLD */;
254
+ return "remote" /* REMOTE */;
255
+ }
256
+ /**
257
+ * Calculate importance score for migration decisions
258
+ */
259
+ calculateImportance(frame, events, anchors) {
260
+ let score = 0.5;
261
+ const decisions = anchors.filter((a) => a.type === "DECISION");
262
+ score += decisions.length * 0.1;
263
+ score += Math.min(events.length * 0.01, 0.2);
264
+ const ageHours = (Date.now() - frame.created_at) / (1e3 * 60 * 60);
265
+ if (ageHours < 24) score += 0.2;
266
+ else if (ageHours < 168) score += 0.1;
267
+ const errors = events.filter((e) => e.event_type === "error");
268
+ if (errors.length > 0) score += 0.2;
269
+ return Math.min(score, 1);
270
+ }
271
+ /**
272
+ * Compress data based on tier configuration
273
+ */
274
+ async compressData(data, tier) {
275
+ const json = JSON.stringify(data);
276
+ const originalSize = Buffer.byteLength(json);
277
+ const tierConfig = this.config.local.tiers.find((t) => t.name === tier);
278
+ if (!tierConfig || tierConfig.compressionType === "none") {
279
+ return {
280
+ data: Buffer.from(json),
281
+ originalSize,
282
+ compressedSize: originalSize,
283
+ compressed: false,
284
+ compressionType: "none"
285
+ };
286
+ }
287
+ let compressed;
288
+ switch (tierConfig.compressionType) {
289
+ case "lz4":
290
+ compressed = lz4.encode(Buffer.from(json));
291
+ break;
292
+ case "zstd":
293
+ compressed = await gzip(json, { level: 9 });
294
+ break;
295
+ case "gzip":
296
+ compressed = await gzip(json);
297
+ break;
298
+ default:
299
+ compressed = Buffer.from(json);
300
+ }
301
+ return {
302
+ data: compressed,
303
+ originalSize,
304
+ compressedSize: compressed.length,
305
+ compressed: true,
306
+ compressionType: tierConfig.compressionType
307
+ };
308
+ }
309
+ /**
310
+ * Decompress data
311
+ */
312
+ async decompressData(data, compressionType) {
313
+ let decompressed;
314
+ switch (compressionType) {
315
+ case "lz4":
316
+ decompressed = lz4.decode(data);
317
+ break;
318
+ case "zstd":
319
+ case "gzip":
320
+ decompressed = await gunzip(data);
321
+ break;
322
+ default:
323
+ decompressed = data;
324
+ }
325
+ return JSON.parse(decompressed.toString());
326
+ }
327
+ /**
328
+ * Start background migration worker
329
+ */
330
+ startMigrationWorker() {
331
+ this.migrationTimer = setInterval(async () => {
332
+ try {
333
+ await this.processMigrations();
334
+ await this.checkMigrationTriggers();
335
+ await this.processOfflineQueue();
336
+ } catch (error) {
337
+ this.logger.error("Migration worker error", { error });
338
+ }
339
+ }, this.config.migration.intervalMs);
340
+ this.logger.info("Migration worker started");
341
+ }
342
+ /**
343
+ * Process pending migrations
344
+ */
345
+ async processMigrations() {
346
+ const pending = this.db.prepare(
347
+ `
348
+ SELECT * FROM migration_queue
349
+ WHERE status = 'pending'
350
+ ORDER BY priority DESC, created_at ASC
351
+ LIMIT ?
352
+ `
353
+ ).all(this.config.migration.batchSize);
354
+ for (const migration of pending) {
355
+ try {
356
+ await this.executeMigration(migration);
357
+ this.db.prepare(
358
+ `
359
+ UPDATE migration_queue
360
+ SET status = 'completed'
361
+ WHERE id = ?
362
+ `
363
+ ).run(migration.id);
364
+ } catch (error) {
365
+ this.logger.error("Migration failed", { migration, error });
366
+ this.db.prepare(
367
+ `
368
+ UPDATE migration_queue
369
+ SET attempts = attempts + 1,
370
+ status = CASE WHEN attempts >= 3 THEN 'failed' ELSE 'pending' END
371
+ WHERE id = ?
372
+ `
373
+ ).run(migration.id);
374
+ }
375
+ }
376
+ this.stats.lastMigration = /* @__PURE__ */ new Date();
377
+ }
378
+ /**
379
+ * Check and trigger migrations based on rules
380
+ */
381
+ async checkMigrationTriggers() {
382
+ for (const trigger of this.config.migration.triggers) {
383
+ if (trigger.type === "age") {
384
+ const items = this.db.prepare(
385
+ `
386
+ SELECT id, frame_id, tier
387
+ FROM storage_items
388
+ WHERE julianday('now') - julianday(created_at) > ?
389
+ AND tier != ?
390
+ LIMIT 100
391
+ `
392
+ ).all(trigger.threshold / 24, "remote" /* REMOTE */);
393
+ for (const item of items) {
394
+ this.queueMigration(item.id, item.tier, "remote" /* REMOTE */);
395
+ }
396
+ }
397
+ if (trigger.type === "size") {
398
+ const stats = this.db.prepare(
399
+ `
400
+ SELECT SUM(size_bytes) as total_size
401
+ FROM storage_items
402
+ WHERE tier IN ('young', 'mature')
403
+ `
404
+ ).get();
405
+ if (stats.total_size > trigger.threshold * 1024 * 1024) {
406
+ const items = this.db.prepare(
407
+ `
408
+ SELECT id, tier FROM storage_items
409
+ WHERE tier IN ('young', 'mature')
410
+ ORDER BY created_at ASC
411
+ LIMIT 50
412
+ `
413
+ ).all();
414
+ for (const item of items) {
415
+ const targetTier = item.tier === "young" /* YOUNG */ ? "mature" /* MATURE */ : "old" /* OLD */;
416
+ this.queueMigration(item.id, item.tier, targetTier);
417
+ }
418
+ }
419
+ }
420
+ }
421
+ }
422
+ /**
423
+ * Queue a migration
424
+ */
425
+ queueMigration(itemId, sourceTier, targetTier, priority = 5) {
426
+ const id = uuidv4();
427
+ this.db.prepare(
428
+ `
429
+ INSERT INTO migration_queue (id, item_id, source_tier, target_tier, priority)
430
+ VALUES (?, ?, ?, ?, ?)
431
+ `
432
+ ).run(id, itemId, sourceTier, targetTier, priority);
433
+ this.stats.migrationsPending++;
434
+ }
435
+ /**
436
+ * Execute a single migration
437
+ */
438
+ async executeMigration(migration) {
439
+ const item = this.db.prepare(
440
+ `
441
+ SELECT * FROM storage_items WHERE id = ?
442
+ `
443
+ ).get(migration.item_id);
444
+ if (!item) {
445
+ throw new Error(`Item not found: ${migration.item_id}`);
446
+ }
447
+ const data = item.compressed ? await this.decompressData(item.data, item.compression_type) : JSON.parse(item.data);
448
+ if (migration.target_tier === "remote" /* REMOTE */) {
449
+ await this.uploadToS3(item.frame_id, data);
450
+ this.db.prepare(
451
+ `
452
+ DELETE FROM storage_items WHERE id = ?
453
+ `
454
+ ).run(migration.item_id);
455
+ } else {
456
+ const compressed = await this.compressData(data, migration.target_tier);
457
+ this.db.prepare(
458
+ `
459
+ UPDATE storage_items
460
+ SET tier = ?, data = ?, size_bytes = ?,
461
+ compressed = ?, compression_type = ?
462
+ WHERE id = ?
463
+ `
464
+ ).run(
465
+ migration.target_tier,
466
+ compressed.data,
467
+ compressed.compressedSize,
468
+ compressed.compressed ? 1 : 0,
469
+ compressed.compressionType,
470
+ migration.item_id
471
+ );
472
+ }
473
+ this.logger.info("Migration completed", {
474
+ itemId: migration.item_id,
475
+ from: migration.source_tier,
476
+ to: migration.target_tier
477
+ });
478
+ }
479
+ /**
480
+ * Upload data to S3
481
+ */
482
+ async uploadToS3(frameId, data) {
483
+ const date = /* @__PURE__ */ new Date();
484
+ const partition = `${date.getFullYear()}/${String(date.getMonth() + 1).padStart(2, "0")}`;
485
+ const key = `frames/${partition}/${frameId}.json.gz`;
486
+ const compressed = await gzip(JSON.stringify(data));
487
+ const command = new PutObjectCommand({
488
+ Bucket: this.config.remote.s3.bucket,
489
+ Key: key,
490
+ Body: compressed,
491
+ ContentType: "application/json",
492
+ ContentEncoding: "gzip",
493
+ Metadata: {
494
+ frameId,
495
+ uploadedAt: date.toISOString()
496
+ }
497
+ });
498
+ await this.s3Client.send(command);
499
+ }
500
+ /**
501
+ * Retrieve from remote storage
502
+ */
503
+ async retrieveFromRemote(frameId) {
504
+ if (this.timeseriesPool) {
505
+ const client = await this.timeseriesPool.connect();
506
+ try {
507
+ const result = await client.query(
508
+ `
509
+ SELECT data FROM frame_timeseries
510
+ WHERE frame_id = $1
511
+ ORDER BY time DESC
512
+ LIMIT 1
513
+ `,
514
+ [frameId]
515
+ );
516
+ if (result.rows.length > 0) {
517
+ return result.rows[0].data;
518
+ }
519
+ } finally {
520
+ client.release();
521
+ }
522
+ }
523
+ const date = /* @__PURE__ */ new Date();
524
+ for (let i = 0; i < 3; i++) {
525
+ const checkDate = new Date(date);
526
+ checkDate.setMonth(checkDate.getMonth() - i);
527
+ const partition = `${checkDate.getFullYear()}/${String(checkDate.getMonth() + 1).padStart(2, "0")}`;
528
+ const key = `frames/${partition}/${frameId}.json.gz`;
529
+ try {
530
+ const command = new GetObjectCommand({
531
+ Bucket: this.config.remote.s3.bucket,
532
+ Key: key
533
+ });
534
+ const response = await this.s3Client.send(command);
535
+ if (!response.Body) continue;
536
+ const body = await response.Body.transformToByteArray();
537
+ const decompressed = await gunzip(Buffer.from(body));
538
+ return JSON.parse(decompressed.toString());
539
+ } catch {
540
+ }
541
+ }
542
+ return null;
543
+ }
544
+ /**
545
+ * Queue for offline upload
546
+ */
547
+ async queueRemoteUpload(id, data, priority) {
548
+ this.offlineQueue.push({
549
+ id,
550
+ data,
551
+ priority,
552
+ timestamp: Date.now()
553
+ });
554
+ if (this.config.migration.offlineQueuePath) {
555
+ await this.saveOfflineQueue();
556
+ }
557
+ }
558
+ /**
559
+ * Process offline upload queue
560
+ */
561
+ async processOfflineQueue() {
562
+ if (this.offlineQueue.length === 0) return;
563
+ const isOnline = await this.checkConnectivity();
564
+ if (!isOnline) return;
565
+ this.offlineQueue.sort((a, b) => {
566
+ if (a.priority !== b.priority) {
567
+ return a.priority === "high" ? -1 : 1;
568
+ }
569
+ return a.timestamp - b.timestamp;
570
+ });
571
+ const batch = this.offlineQueue.splice(0, 10);
572
+ for (const item of batch) {
573
+ try {
574
+ await this.uploadToS3(item.id, item.data);
575
+ } catch {
576
+ this.offlineQueue.push(item);
577
+ }
578
+ }
579
+ await this.saveOfflineQueue();
580
+ }
581
+ /**
582
+ * Check connectivity to remote services
583
+ */
584
+ async checkConnectivity() {
585
+ try {
586
+ const response = await fetch("https://s3.amazonaws.com");
587
+ return response.ok;
588
+ } catch {
589
+ return false;
590
+ }
591
+ }
592
+ /**
593
+ * Update access count for cache promotion
594
+ */
595
+ updateAccessCount(frameId) {
596
+ this.db.prepare(
597
+ `
598
+ UPDATE storage_items
599
+ SET access_count = access_count + 1,
600
+ last_accessed = CURRENT_TIMESTAMP
601
+ WHERE frame_id = ?
602
+ `
603
+ ).run(frameId);
604
+ }
605
+ /**
606
+ * Save offline queue to disk
607
+ */
608
+ async saveOfflineQueue() {
609
+ if (!this.config.migration.offlineQueuePath) return;
610
+ const fs = await import("fs/promises");
611
+ await fs.writeFile(
612
+ this.config.migration.offlineQueuePath,
613
+ JSON.stringify(this.offlineQueue),
614
+ "utf-8"
615
+ );
616
+ }
617
+ /**
618
+ * Load offline queue from disk
619
+ */
620
+ async loadOfflineQueue() {
621
+ if (!this.config.migration.offlineQueuePath) return;
622
+ const fs = await import("fs/promises");
623
+ try {
624
+ const data = await fs.readFile(
625
+ this.config.migration.offlineQueuePath,
626
+ "utf-8"
627
+ );
628
+ this.offlineQueue = JSON.parse(data);
629
+ this.logger.info(
630
+ `Loaded ${this.offlineQueue.length} items from offline queue`
631
+ );
632
+ } catch {
633
+ }
634
+ }
635
+ /**
636
+ * Update storage statistics
637
+ */
638
+ async updateStats() {
639
+ const localStats = this.db.prepare(
640
+ `
641
+ SELECT
642
+ tier,
643
+ COUNT(*) as count,
644
+ SUM(size_bytes) / 1048576.0 as size_mb
645
+ FROM storage_items
646
+ GROUP BY tier
647
+ `
648
+ ).all();
649
+ this.stats.localUsageMB = 0;
650
+ for (const stat of localStats) {
651
+ this.stats.tierDistribution[stat.tier] = stat.count;
652
+ this.stats.localUsageMB += stat.size_mb;
653
+ }
654
+ const compressionStats = this.db.prepare(
655
+ `
656
+ SELECT
657
+ AVG(CAST(json_extract(metadata, '$.originalSize') AS REAL) /
658
+ CAST(json_extract(metadata, '$.compressedSize') AS REAL)) as ratio
659
+ FROM storage_items
660
+ WHERE compressed = 1
661
+ `
662
+ ).get();
663
+ this.stats.compressionRatio = compressionStats?.ratio || 1;
664
+ const pending = this.db.prepare(
665
+ `
666
+ SELECT COUNT(*) as count
667
+ FROM migration_queue
668
+ WHERE status = 'pending'
669
+ `
670
+ ).get();
671
+ this.stats.migrationsPending = pending.count;
672
+ this.db.prepare(
673
+ `
674
+ INSERT INTO storage_metrics (tier, item_count, total_size_mb, avg_compression_ratio)
675
+ VALUES ('all', ?, ?, ?)
676
+ `
677
+ ).run(
678
+ Object.values(this.stats.tierDistribution).reduce((a, b) => a + b, 0),
679
+ this.stats.localUsageMB,
680
+ this.stats.compressionRatio
681
+ );
682
+ }
683
+ /**
684
+ * Get current storage statistics
685
+ */
686
+ async getStats() {
687
+ await this.updateStats();
688
+ return { ...this.stats };
689
+ }
690
+ /**
691
+ * Cleanup and shutdown
692
+ */
693
+ async shutdown() {
694
+ if (this.migrationTimer) {
695
+ clearInterval(this.migrationTimer);
696
+ }
697
+ await this.saveOfflineQueue();
698
+ if (this.redisClient) {
699
+ await this.redisClient.quit();
700
+ }
701
+ if (this.timeseriesPool) {
702
+ await this.timeseriesPool.end();
703
+ }
704
+ this.db.close();
705
+ this.logger.info("Two-tier storage system shut down");
706
+ }
707
+ }
708
+ const defaultTwoTierConfig = {
709
+ local: {
710
+ dbPath: "~/.stackmemory/two-tier.db",
711
+ maxSizeGB: 2,
712
+ tiers: [
713
+ {
714
+ name: "young" /* YOUNG */,
715
+ maxAgeHours: 24,
716
+ compressionType: "none",
717
+ retentionPolicy: "complete",
718
+ maxSizeMB: 500
719
+ },
720
+ {
721
+ name: "mature" /* MATURE */,
722
+ maxAgeHours: 168,
723
+ compressionType: "lz4",
724
+ retentionPolicy: "selective",
725
+ maxSizeMB: 1e3
726
+ },
727
+ {
728
+ name: "old" /* OLD */,
729
+ maxAgeHours: 720,
730
+ compressionType: "zstd",
731
+ retentionPolicy: "critical",
732
+ maxSizeMB: 500
733
+ }
734
+ ]
735
+ },
736
+ remote: {
737
+ s3: {
738
+ bucket: process.env.S3_BUCKET || "stackmemory-storage",
739
+ region: process.env.AWS_REGION || "us-east-1"
740
+ }
741
+ },
742
+ migration: {
743
+ triggers: [
744
+ { type: "age", threshold: 720, action: "migrate" },
745
+ // 30 days
746
+ { type: "size", threshold: 1500, action: "migrate" },
747
+ // 1.5GB
748
+ { type: "importance", threshold: 0.3, action: "delete" }
749
+ // Low importance
750
+ ],
751
+ batchSize: 50,
752
+ intervalMs: 6e4,
753
+ // 1 minute
754
+ offlineQueuePath: "~/.stackmemory/offline-queue.json"
755
+ }
756
+ };
757
+ export {
758
+ StorageTier,
759
+ TwoTierStorageSystem,
760
+ defaultTwoTierConfig
761
+ };
762
+ //# sourceMappingURL=two-tier-storage.js.map