@stackmemoryai/stackmemory 0.3.3 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +19 -18
  2. package/dist/cli/commands/chromadb.js +267 -0
  3. package/dist/cli/commands/chromadb.js.map +7 -0
  4. package/dist/cli/commands/context.js +15 -5
  5. package/dist/cli/commands/context.js.map +2 -2
  6. package/dist/cli/commands/infinite-storage.js +240 -0
  7. package/dist/cli/commands/infinite-storage.js.map +7 -0
  8. package/dist/cli/commands/skills.js +262 -0
  9. package/dist/cli/commands/skills.js.map +7 -0
  10. package/dist/cli/index.js +7 -0
  11. package/dist/cli/index.js.map +2 -2
  12. package/dist/core/context/frame-handoff-manager.js +399 -9
  13. package/dist/core/context/frame-handoff-manager.js.map +2 -2
  14. package/dist/core/context/frame-manager.js +11 -2
  15. package/dist/core/context/frame-manager.js.map +2 -2
  16. package/dist/core/storage/chromadb-adapter.js +346 -0
  17. package/dist/core/storage/chromadb-adapter.js.map +7 -0
  18. package/dist/core/storage/chromadb-simple.js +160 -0
  19. package/dist/core/storage/chromadb-simple.js.map +7 -0
  20. package/dist/core/storage/infinite-storage.js +443 -0
  21. package/dist/core/storage/infinite-storage.js.map +7 -0
  22. package/dist/core/utils/compression.js +79 -0
  23. package/dist/core/utils/compression.js.map +7 -0
  24. package/dist/features/tui/services/data-service.js +12 -40
  25. package/dist/features/tui/services/data-service.js.map +2 -2
  26. package/dist/features/tui/services/linear-task-reader.js +100 -0
  27. package/dist/features/tui/services/linear-task-reader.js.map +7 -0
  28. package/dist/features/web/client/stores/task-store.js +22 -0
  29. package/dist/features/web/client/stores/task-store.js.map +7 -0
  30. package/dist/features/web/server/index.js +171 -0
  31. package/dist/features/web/server/index.js.map +7 -0
  32. package/dist/skills/claude-skills.js +712 -0
  33. package/dist/skills/claude-skills.js.map +7 -0
  34. package/dist/skills/dashboard-launcher.js +212 -0
  35. package/dist/skills/dashboard-launcher.js.map +7 -0
  36. package/package.json +23 -31
@@ -0,0 +1,160 @@
1
+ import { ChromaClient } from "chromadb";
2
+ import { v4 as uuidv4 } from "uuid";
3
+ class ChromaDBAdapter {
4
+ client;
5
+ collection = null;
6
+ config;
7
+ fallbackStorage = /* @__PURE__ */ new Map();
8
+ constructor(config) {
9
+ this.config = config;
10
+ try {
11
+ if (config.apiUrl && config.apiUrl.includes("trychroma.com")) {
12
+ this.client = new ChromaClient({
13
+ ssl: true,
14
+ host: "api.trychroma.com",
15
+ port: 443,
16
+ headers: {
17
+ "X-Chroma-Token": config.apiKey
18
+ }
19
+ });
20
+ } else {
21
+ this.client = new ChromaClient();
22
+ }
23
+ } catch (error) {
24
+ console.log("Using in-memory ChromaDB client");
25
+ this.client = new ChromaClient();
26
+ }
27
+ }
28
+ async initialize() {
29
+ try {
30
+ const collectionName = this.config.collectionName || "stackmemory_context";
31
+ this.collection = await this.client.getOrCreateCollection({
32
+ name: collectionName,
33
+ metadata: {
34
+ description: "StackMemory Claude context",
35
+ version: "2.0.0"
36
+ }
37
+ });
38
+ console.log(`[${(/* @__PURE__ */ new Date()).toISOString()}] INFO: ChromaDB collection '${collectionName}' initialized`);
39
+ } catch (error) {
40
+ console.log(`ChromaDB service not available, using JSON fallback storage`);
41
+ this.collection = null;
42
+ }
43
+ }
44
+ async store(context) {
45
+ const id = context.id || `ctx_${uuidv4()}`;
46
+ const metadata = {
47
+ timestamp: context.timestamp || (/* @__PURE__ */ new Date()).toISOString(),
48
+ type: context.type || "context",
49
+ user_id: context.user_id || this.config.userId || "default",
50
+ project: context.project || "stackmemory"
51
+ };
52
+ if (context.session_id) metadata.session_id = context.session_id;
53
+ if (context.metadata) {
54
+ Object.entries(context.metadata).forEach(([key, value]) => {
55
+ if (value !== void 0 && value !== null) {
56
+ metadata[key] = value;
57
+ }
58
+ });
59
+ }
60
+ if (!this.collection) {
61
+ await this.initialize();
62
+ }
63
+ try {
64
+ if (this.collection) {
65
+ await this.collection.upsert({
66
+ ids: [id],
67
+ documents: [context.content || JSON.stringify(context)],
68
+ metadatas: [metadata]
69
+ });
70
+ } else {
71
+ await this.storeToJsonFallback(id, context, metadata);
72
+ }
73
+ return {
74
+ success: true,
75
+ id,
76
+ stored_at: (/* @__PURE__ */ new Date()).toISOString()
77
+ };
78
+ } catch (error) {
79
+ console.error("Failed to store context:", error.message);
80
+ try {
81
+ await this.storeToJsonFallback(id, context, metadata);
82
+ return { success: true, id, stored_at: (/* @__PURE__ */ new Date()).toISOString() };
83
+ } catch (fallbackError) {
84
+ return { success: false, error: fallbackError.message };
85
+ }
86
+ }
87
+ }
88
+ async storeToJsonFallback(id, context, metadata) {
89
+ const fs = await import("fs");
90
+ const path = await import("path");
91
+ const os = await import("os");
92
+ const storageDir = path.join(os.homedir(), ".stackmemory", "context-storage");
93
+ const storageFile = path.join(storageDir, "contexts.jsonl");
94
+ if (!fs.existsSync(storageDir)) {
95
+ fs.mkdirSync(storageDir, { recursive: true });
96
+ }
97
+ const entry = {
98
+ id,
99
+ content: context.content || JSON.stringify(context),
100
+ metadata,
101
+ stored_at: (/* @__PURE__ */ new Date()).toISOString()
102
+ };
103
+ fs.appendFileSync(storageFile, JSON.stringify(entry) + "\n");
104
+ }
105
+ async search(params) {
106
+ if (!this.collection) {
107
+ await this.initialize();
108
+ }
109
+ try {
110
+ const query = params.query || "";
111
+ const limit = params.limit || 10;
112
+ const where = {};
113
+ if (params.filter) {
114
+ Object.entries(params.filter).forEach(([key, value]) => {
115
+ if (value !== void 0 && value !== null) {
116
+ where[key] = value;
117
+ }
118
+ });
119
+ }
120
+ const results = await this.collection.query({
121
+ queryTexts: [query],
122
+ nResults: limit,
123
+ where: Object.keys(where).length > 0 ? where : void 0
124
+ });
125
+ const contexts = [];
126
+ if (results.documents && results.documents[0]) {
127
+ for (let i = 0; i < results.documents[0].length; i++) {
128
+ contexts.push({
129
+ id: results.ids[0][i],
130
+ content: results.documents[0][i],
131
+ metadata: results.metadatas?.[0]?.[i] || {},
132
+ distance: results.distances?.[0]?.[i] || 0
133
+ });
134
+ }
135
+ }
136
+ return contexts;
137
+ } catch (error) {
138
+ console.error("Failed to search contexts:", error.message);
139
+ return [];
140
+ }
141
+ }
142
+ async deleteCollection() {
143
+ if (this.collection) {
144
+ await this.client.deleteCollection({
145
+ name: this.config.collectionName || "stackmemory_context"
146
+ });
147
+ console.log("Collection deleted");
148
+ }
149
+ }
150
+ async listCollections() {
151
+ const collections = await this.client.listCollections();
152
+ return collections;
153
+ }
154
+ }
155
+ var chromadb_simple_default = ChromaDBAdapter;
156
+ export {
157
+ ChromaDBAdapter,
158
+ chromadb_simple_default as default
159
+ };
160
+ //# sourceMappingURL=chromadb-simple.js.map
@@ -0,0 +1,7 @@
1
+ {
2
+ "version": 3,
3
+ "sources": ["../../../src/core/storage/chromadb-simple.ts"],
4
+ "sourcesContent": ["/**\n * Simplified ChromaDB adapter for Claude hooks\n */\n\nimport { ChromaClient } from 'chromadb';\nimport { v4 as uuidv4 } from 'uuid';\n\nexport class ChromaDBAdapter {\n private client: ChromaClient;\n private collection: any = null;\n private config: any;\n private fallbackStorage: Map<string, any> = new Map();\n \n constructor(config: any) {\n this.config = config;\n \n try {\n // Try to initialize ChromaDB client\n if (config.apiUrl && config.apiUrl.includes('trychroma.com')) {\n // Cloud configuration\n this.client = new ChromaClient({\n ssl: true,\n host: 'api.trychroma.com',\n port: 443,\n headers: {\n 'X-Chroma-Token': config.apiKey\n }\n });\n } else {\n // In-memory/local configuration (no external service needed)\n this.client = new ChromaClient();\n }\n } catch (error) {\n // Fallback to in-memory client\n console.log('Using in-memory ChromaDB client');\n this.client = new ChromaClient();\n }\n }\n\n async initialize(): Promise<void> {\n try {\n // Use a single collection for all context\n const collectionName = this.config.collectionName || 'stackmemory_context';\n \n // Get or create collection\n this.collection = await this.client.getOrCreateCollection({\n name: collectionName,\n metadata: {\n description: 'StackMemory Claude context',\n version: '2.0.0'\n }\n });\n \n console.log(`[${new Date().toISOString()}] INFO: ChromaDB collection '${collectionName}' initialized`);\n } catch (error: any) {\n console.log(`ChromaDB service not available, using JSON fallback storage`);\n this.collection = null; // Use fallback\n }\n }\n\n async store(context: any): Promise<any> {\n const id = context.id || `ctx_${uuidv4()}`;\n \n // Prepare metadata - only include non-undefined values\n const metadata: any = {\n timestamp: context.timestamp || new Date().toISOString(),\n type: context.type || 'context',\n user_id: context.user_id || this.config.userId || 'default',\n project: context.project || 'stackmemory'\n };\n\n // Add optional metadata if defined\n if (context.session_id) metadata.session_id = context.session_id;\n if (context.metadata) {\n Object.entries(context.metadata).forEach(([key, value]) => {\n if (value !== undefined && value !== null) {\n metadata[key] = value;\n }\n });\n }\n\n if (!this.collection) {\n await this.initialize();\n }\n\n try {\n if (this.collection) {\n // Store in ChromaDB if available\n await this.collection.upsert({\n ids: [id],\n documents: [context.content || JSON.stringify(context)],\n metadatas: [metadata]\n });\n } else {\n // Fallback to JSON file storage\n await this.storeToJsonFallback(id, context, metadata);\n }\n\n return { \n success: true, \n id,\n stored_at: new Date().toISOString()\n };\n } catch (error: any) {\n console.error('Failed to store context:', error.message);\n // Try fallback storage\n try {\n await this.storeToJsonFallback(id, context, metadata);\n return { success: true, id, stored_at: new Date().toISOString() };\n } catch (fallbackError: any) {\n return { success: false, error: fallbackError.message };\n }\n }\n }\n\n private async storeToJsonFallback(id: string, context: any, metadata: any): Promise<void> {\n const fs = await import('fs');\n const path = await import('path');\n const os = await import('os');\n \n const storageDir = path.join(os.homedir(), '.stackmemory', 'context-storage');\n const storageFile = path.join(storageDir, 'contexts.jsonl');\n\n // Ensure directory exists\n if (!fs.existsSync(storageDir)) {\n fs.mkdirSync(storageDir, { recursive: true });\n }\n\n const entry = {\n id,\n content: context.content || JSON.stringify(context),\n metadata,\n stored_at: new Date().toISOString()\n };\n\n // Append to JSONL file\n fs.appendFileSync(storageFile, JSON.stringify(entry) + '\\n');\n }\n\n async search(params: any): Promise<any[]> {\n if (!this.collection) {\n await this.initialize();\n }\n\n try {\n const query = params.query || '';\n const limit = params.limit || 10;\n \n // Build where clause\n const where: any = {};\n if (params.filter) {\n Object.entries(params.filter).forEach(([key, value]) => {\n if (value !== undefined && value !== null) {\n where[key] = value;\n }\n });\n }\n\n // Query collection\n const results = await this.collection.query({\n queryTexts: [query],\n nResults: limit,\n where: Object.keys(where).length > 0 ? where : undefined\n });\n\n // Format results\n const contexts: any[] = [];\n if (results.documents && results.documents[0]) {\n for (let i = 0; i < results.documents[0].length; i++) {\n contexts.push({\n id: results.ids[0][i],\n content: results.documents[0][i],\n metadata: results.metadatas?.[0]?.[i] || {},\n distance: results.distances?.[0]?.[i] || 0\n });\n }\n }\n\n return contexts;\n } catch (error: any) {\n console.error('Failed to search contexts:', error.message);\n return [];\n }\n }\n\n async deleteCollection(): Promise<void> {\n if (this.collection) {\n await this.client.deleteCollection({\n name: this.config.collectionName || 'stackmemory_context'\n });\n console.log('Collection deleted');\n }\n }\n\n async listCollections(): Promise<any[]> {\n const collections = await this.client.listCollections();\n return collections;\n }\n}\n\nexport default ChromaDBAdapter;"],
5
+ "mappings": "AAIA,SAAS,oBAAoB;AAC7B,SAAS,MAAM,cAAc;AAEtB,MAAM,gBAAgB;AAAA,EACnB;AAAA,EACA,aAAkB;AAAA,EAClB;AAAA,EACA,kBAAoC,oBAAI,IAAI;AAAA,EAEpD,YAAY,QAAa;AACvB,SAAK,SAAS;AAEd,QAAI;AAEF,UAAI,OAAO,UAAU,OAAO,OAAO,SAAS,eAAe,GAAG;AAE5D,aAAK,SAAS,IAAI,aAAa;AAAA,UAC7B,KAAK;AAAA,UACL,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,YACP,kBAAkB,OAAO;AAAA,UAC3B;AAAA,QACF,CAAC;AAAA,MACH,OAAO;AAEL,aAAK,SAAS,IAAI,aAAa;AAAA,MACjC;AAAA,IACF,SAAS,OAAO;AAEd,cAAQ,IAAI,iCAAiC;AAC7C,WAAK,SAAS,IAAI,aAAa;AAAA,IACjC;AAAA,EACF;AAAA,EAEA,MAAM,aAA4B;AAChC,QAAI;AAEF,YAAM,iBAAiB,KAAK,OAAO,kBAAkB;AAGrD,WAAK,aAAa,MAAM,KAAK,OAAO,sBAAsB;AAAA,QACxD,MAAM;AAAA,QACN,UAAU;AAAA,UACR,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,MACF,CAAC;AAED,cAAQ,IAAI,KAAI,oBAAI,KAAK,GAAE,YAAY,CAAC,gCAAgC,cAAc,eAAe;AAAA,IACvG,SAAS,OAAY;AACnB,cAAQ,IAAI,6DAA6D;AACzE,WAAK,aAAa;AAAA,IACpB;AAAA,EACF;AAAA,EAEA,MAAM,MAAM,SAA4B;AACtC,UAAM,KAAK,QAAQ,MAAM,OAAO,OAAO,CAAC;AAGxC,UAAM,WAAgB;AAAA,MACpB,WAAW,QAAQ,cAAa,oBAAI,KAAK,GAAE,YAAY;AAAA,MACvD,MAAM,QAAQ,QAAQ;AAAA,MACtB,SAAS,QAAQ,WAAW,KAAK,OAAO,UAAU;AAAA,MAClD,SAAS,QAAQ,WAAW;AAAA,IAC9B;AAGA,QAAI,QAAQ,WAAY,UAAS,aAAa,QAAQ;AACtD,QAAI,QAAQ,UAAU;AACpB,aAAO,QAAQ,QAAQ,QAAQ,EAAE,QAAQ,CAAC,CAAC,KAAK,KAAK,MAAM;AACzD,YAAI,UAAU,UAAa,UAAU,MAAM;AACzC,mBAAS,GAAG,IAAI;AAAA,QAClB;AAAA,MACF,CAAC;AAAA,IACH;AAEA,QAAI,CAAC,KAAK,YAAY;AACpB,YAAM,KAAK,WAAW;AAAA,IACxB;AAEA,QAAI;AACF,UAAI,KAAK,YAAY;AAEnB,cAAM,KAAK,WAAW,OAAO;AAAA,UAC3B,KAAK,CAAC,EAAE;AAAA,UACR,WAAW,CAAC,QAAQ,WAAW,KAAK,UAAU,OAAO,CAAC;AAAA,UACtD,WAAW,CAAC,QAAQ;AAAA,QACtB,CAAC;AAAA,MACH,OAAO;AAEL,cAAM,KAAK,oBAAoB,IAAI,SAAS,QAAQ;AAAA,MACtD;AAEA,aAAO;AAAA,QACL,SAAS;AAAA,QACT;AAAA,QACA,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,MACpC;AAAA,IACF,SAAS,OAAY;AACnB,cAAQ,MAAM,4BAA4B,MAAM,OAAO;AAEvD,UAAI;AACF,cAAM,KAAK,oBAAoB,IAAI,SAAS,QAAQ;AACpD,eAAO,EAAE,SAAS,MAAM,IAAI,YAAW,oBAAI,KAAK,GAAE,YAAY,EAAE;AAAA,MAClE,SAAS,eAAoB;AAC3B,eAAO,EAAE,SAAS,OAAO,OAAO,cAAc,QAAQ;AAAA,MACxD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,oBAAoB,IAAY,SAAc,UAA8B;AACxF,UAAM,KAAK,MAAM,OAAO,IAAI;AAC5B,UAAM,OAAO,MAAM,OAAO,MAAM;AAChC,UAAM,KAAK,MAAM,OAAO,IAAI;AAE5B,UAAM,aAAa,KAAK,KAAK,GAAG,QAAQ,GAAG,gBAAgB,iBAAiB;AAC5E,UAAM,cAAc,KAAK,KAAK,YAAY,gBAAgB;AAG1D,QAAI,CAAC,GAAG,WAAW,UAAU,GAAG;AAC9B,SAAG,UAAU,YAAY,EAAE,WAAW,KAAK,CAAC;AAAA,IAC9C;AAEA,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA,SAAS,QAAQ,WAAW,KAAK,UAAU,OAAO;AAAA,MAClD;AAAA,MACA,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,IACpC;AAGA,OAAG,eAAe,aAAa,KAAK,UAAU,KAAK,IAAI,IAAI;AAAA,EAC7D;AAAA,EAEA,MAAM,OAAO,QAA6B;AACxC,QAAI,CAAC,KAAK,YAAY;AACpB,YAAM,KAAK,WAAW;AAAA,IACxB;AAEA,QAAI;AACF,YAAM,QAAQ,OAAO,SAAS;AAC9B,YAAM,QAAQ,OAAO,SAAS;AAG9B,YAAM,QAAa,CAAC;AACpB,UAAI,OAAO,QAAQ;AACjB,eAAO,QAAQ,OAAO,MAAM,EAAE,QAAQ,CAAC,CAAC,KAAK,KAAK,MAAM;AACtD,cAAI,UAAU,UAAa,UAAU,MAAM;AACzC,kBAAM,GAAG,IAAI;AAAA,UACf;AAAA,QACF,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,MAAM,KAAK,WAAW,MAAM;AAAA,QAC1C,YAAY,CAAC,KAAK;AAAA,QAClB,UAAU;AAAA,QACV,OAAO,OAAO,KAAK,KAAK,EAAE,SAAS,IAAI,QAAQ;AAAA,MACjD,CAAC;AAGD,YAAM,WAAkB,CAAC;AACzB,UAAI,QAAQ,aAAa,QAAQ,UAAU,CAAC,GAAG;AAC7C,iBAAS,IAAI,GAAG,IAAI,QAAQ,UAAU,CAAC,EAAE,QAAQ,KAAK;AACpD,mBAAS,KAAK;AAAA,YACZ,IAAI,QAAQ,IAAI,CAAC,EAAE,CAAC;AAAA,YACpB,SAAS,QAAQ,UAAU,CAAC,EAAE,CAAC;AAAA,YAC/B,UAAU,QAAQ,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC;AAAA,YAC1C,UAAU,QAAQ,YAAY,CAAC,IAAI,CAAC,KAAK;AAAA,UAC3C,CAAC;AAAA,QACH;AAAA,MACF;AAEA,aAAO;AAAA,IACT,SAAS,OAAY;AACnB,cAAQ,MAAM,8BAA8B,MAAM,OAAO;AACzD,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA,EAEA,MAAM,mBAAkC;AACtC,QAAI,KAAK,YAAY;AACnB,YAAM,KAAK,OAAO,iBAAiB;AAAA,QACjC,MAAM,KAAK,OAAO,kBAAkB;AAAA,MACtC,CAAC;AACD,cAAQ,IAAI,oBAAoB;AAAA,IAClC;AAAA,EACF;AAAA,EAEA,MAAM,kBAAkC;AACtC,UAAM,cAAc,MAAM,KAAK,OAAO,gBAAgB;AACtD,WAAO;AAAA,EACT;AACF;AAEA,IAAO,0BAAQ;",
6
+ "names": []
7
+ }
@@ -0,0 +1,443 @@
1
+ import { S3Client, PutObjectCommand, GetObjectCommand } from "@aws-sdk/client-s3";
2
+ import { createClient as createRedisClient } from "redis";
3
+ import { Pool } from "pg";
4
+ import { Logger } from "../monitoring/logger.js";
5
+ import { compress, decompress } from "../utils/compression.js";
6
+ class InfiniteStorageSystem {
7
+ redisClient;
8
+ timeseriesPool;
9
+ s3Client;
10
+ logger;
11
+ config;
12
+ latencies = [];
13
+ migrationWorker = null;
14
+ constructor(config) {
15
+ this.config = config;
16
+ this.logger = new Logger("InfiniteStorage");
17
+ if (!config.tiers || config.tiers.length === 0) {
18
+ this.config.tiers = [
19
+ { name: "hot", ageThresholdHours: 1, storageClass: "MEMORY", accessLatencyMs: 5 },
20
+ { name: "warm", ageThresholdHours: 168, storageClass: "TIMESERIES", accessLatencyMs: 50 },
21
+ { name: "cold", ageThresholdHours: 720, storageClass: "S3_STANDARD", accessLatencyMs: 100 },
22
+ { name: "archive", ageThresholdHours: Infinity, storageClass: "S3_GLACIER", accessLatencyMs: 36e5 }
23
+ ];
24
+ }
25
+ }
26
+ async initialize() {
27
+ try {
28
+ if (this.config.redis?.url) {
29
+ this.redisClient = createRedisClient({
30
+ url: this.config.redis.url
31
+ });
32
+ await this.redisClient.connect();
33
+ await this.redisClient.configSet("maxmemory-policy", "allkeys-lru");
34
+ if (this.config.redis.maxMemoryMB) {
35
+ await this.redisClient.configSet("maxmemory", `${this.config.redis.maxMemoryMB}mb`);
36
+ }
37
+ this.logger.info("Redis client initialized for hot tier");
38
+ }
39
+ if (this.config.timeseries?.connectionString) {
40
+ this.timeseriesPool = new Pool({
41
+ connectionString: this.config.timeseries.connectionString,
42
+ max: 10,
43
+ idleTimeoutMillis: 3e4
44
+ });
45
+ await this.createTimeSeriesTables();
46
+ this.logger.info("TimeSeries DB initialized for warm tier");
47
+ }
48
+ if (this.config.s3?.bucket) {
49
+ this.s3Client = new S3Client({
50
+ region: this.config.s3.region || "us-east-1",
51
+ credentials: this.config.s3.accessKeyId ? {
52
+ accessKeyId: this.config.s3.accessKeyId,
53
+ secretAccessKey: this.config.s3.secretAccessKey
54
+ } : void 0
55
+ });
56
+ this.logger.info("S3 client initialized for cold/archive tiers");
57
+ }
58
+ this.startMigrationWorker();
59
+ this.logger.info("Infinite Storage System initialized");
60
+ } catch (error) {
61
+ this.logger.error("Failed to initialize storage system", error);
62
+ throw error;
63
+ }
64
+ }
65
+ /**
66
+ * Create TimeSeries tables for warm tier storage
67
+ */
68
+ async createTimeSeriesTables() {
69
+ const client = await this.timeseriesPool.connect();
70
+ try {
71
+ await client.query(`
72
+ CREATE TABLE IF NOT EXISTS frame_timeseries (
73
+ time TIMESTAMPTZ NOT NULL,
74
+ frame_id TEXT NOT NULL,
75
+ user_id TEXT NOT NULL,
76
+ project_name TEXT,
77
+ type TEXT,
78
+ data JSONB,
79
+ compressed_data BYTEA,
80
+ storage_tier TEXT DEFAULT 'warm',
81
+ access_count INTEGER DEFAULT 0,
82
+ last_accessed TIMESTAMPTZ DEFAULT NOW(),
83
+ PRIMARY KEY (time, frame_id)
84
+ )
85
+ `);
86
+ await client.query(`
87
+ SELECT create_hypertable('frame_timeseries', 'time',
88
+ chunk_time_interval => INTERVAL '1 day',
89
+ if_not_exists => TRUE)
90
+ `).catch(() => {
91
+ this.logger.info("Using standard PostgreSQL partitioning");
92
+ });
93
+ await client.query(`
94
+ CREATE INDEX IF NOT EXISTS idx_frame_user ON frame_timeseries (user_id, time DESC);
95
+ CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);
96
+ CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);
97
+ `);
98
+ await client.query(`
99
+ SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)
100
+ `).catch(() => {
101
+ this.logger.info("Compression policy not available");
102
+ });
103
+ } finally {
104
+ client.release();
105
+ }
106
+ }
107
+ /**
108
+ * Store a frame with automatic tier selection
109
+ */
110
+ async storeFrame(frame, userId) {
111
+ const startTime = Date.now();
112
+ try {
113
+ const frameData = JSON.stringify(frame);
114
+ const compressedData = await compress(frameData);
115
+ const frameKey = `frame:${userId}:${frame.frameId}`;
116
+ if (this.redisClient) {
117
+ await this.redisClient.setEx(
118
+ frameKey,
119
+ this.config.redis.ttlSeconds || 3600,
120
+ compressedData
121
+ );
122
+ await this.redisClient.hSet(`meta:${frameKey}`, {
123
+ userId,
124
+ projectName: frame.projectName || "default",
125
+ type: frame.type,
126
+ timestamp: frame.timestamp,
127
+ tier: "hot"
128
+ });
129
+ }
130
+ if (this.timeseriesPool) {
131
+ const client = await this.timeseriesPool.connect();
132
+ try {
133
+ await client.query(`
134
+ INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)
135
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
136
+ ON CONFLICT (time, frame_id) DO UPDATE
137
+ SET data = EXCLUDED.data,
138
+ compressed_data = EXCLUDED.compressed_data,
139
+ last_accessed = NOW(),
140
+ access_count = frame_timeseries.access_count + 1
141
+ `, [
142
+ new Date(frame.timestamp),
143
+ frame.frameId,
144
+ userId,
145
+ frame.projectName || "default",
146
+ frame.type,
147
+ frame,
148
+ compressedData,
149
+ "warm"
150
+ ]);
151
+ } finally {
152
+ client.release();
153
+ }
154
+ }
155
+ const latency = Date.now() - startTime;
156
+ this.trackLatency(latency);
157
+ this.logger.debug(`Stored frame ${frame.frameId} in ${latency}ms`);
158
+ } catch (error) {
159
+ this.logger.error(`Failed to store frame ${frame.frameId}`, error);
160
+ throw error;
161
+ }
162
+ }
163
+ /**
164
+ * Retrieve a frame with intelligent caching
165
+ */
166
+ async retrieveFrame(frameId, userId) {
167
+ const startTime = Date.now();
168
+ const frameKey = `frame:${userId}:${frameId}`;
169
+ try {
170
+ if (this.redisClient) {
171
+ const cached = await this.redisClient.get(frameKey);
172
+ if (cached) {
173
+ const decompressed = await decompress(cached);
174
+ const frame = JSON.parse(decompressed);
175
+ await this.redisClient.expire(frameKey, this.config.redis.ttlSeconds || 3600);
176
+ const latency = Date.now() - startTime;
177
+ this.trackLatency(latency);
178
+ this.logger.debug(`Retrieved frame ${frameId} from hot tier in ${latency}ms`);
179
+ return frame;
180
+ }
181
+ }
182
+ if (this.timeseriesPool) {
183
+ const client = await this.timeseriesPool.connect();
184
+ try {
185
+ const result = await client.query(`
186
+ SELECT data, compressed_data, storage_tier
187
+ FROM frame_timeseries
188
+ WHERE frame_id = $1 AND user_id = $2
189
+ ORDER BY time DESC
190
+ LIMIT 1
191
+ `, [frameId, userId]);
192
+ if (result.rows.length > 0) {
193
+ const row = result.rows[0];
194
+ let frame;
195
+ if (row.compressed_data) {
196
+ const decompressed = await decompress(row.compressed_data);
197
+ frame = JSON.parse(decompressed);
198
+ } else {
199
+ frame = row.data;
200
+ }
201
+ await client.query(`
202
+ UPDATE frame_timeseries
203
+ SET last_accessed = NOW(), access_count = access_count + 1
204
+ WHERE frame_id = $1 AND user_id = $2
205
+ `, [frameId, userId]);
206
+ if (this.redisClient) {
207
+ await this.promoteToHotTier(frame, userId);
208
+ }
209
+ const latency = Date.now() - startTime;
210
+ this.trackLatency(latency);
211
+ this.logger.debug(`Retrieved frame ${frameId} from warm tier in ${latency}ms`);
212
+ return frame;
213
+ }
214
+ } finally {
215
+ client.release();
216
+ }
217
+ }
218
+ if (this.s3Client && this.config.s3.bucket) {
219
+ const key = `frames/${userId}/${frameId}.json.gz`;
220
+ try {
221
+ const command = new GetObjectCommand({
222
+ Bucket: this.config.s3.bucket,
223
+ Key: key
224
+ });
225
+ const response = await this.s3Client.send(command);
226
+ const compressedData = await response.Body.transformToByteArray();
227
+ const decompressed = await decompress(Buffer.from(compressedData));
228
+ const frame = JSON.parse(decompressed);
229
+ await this.promoteFrame(frame, userId);
230
+ const latency = Date.now() - startTime;
231
+ this.trackLatency(latency);
232
+ this.logger.debug(`Retrieved frame ${frameId} from cold tier in ${latency}ms`);
233
+ return frame;
234
+ } catch (error) {
235
+ if (error.Code !== "NoSuchKey") {
236
+ throw error;
237
+ }
238
+ }
239
+ }
240
+ this.logger.debug(`Frame ${frameId} not found in any tier`);
241
+ return null;
242
+ } catch (error) {
243
+ this.logger.error(`Failed to retrieve frame ${frameId}`, error);
244
+ throw error;
245
+ }
246
+ }
247
+ /**
248
+ * Promote frame to hot tier for fast access
249
+ */
250
+ async promoteToHotTier(frame, userId) {
251
+ if (!this.redisClient) return;
252
+ try {
253
+ const frameKey = `frame:${userId}:${frame.frameId}`;
254
+ const frameData = JSON.stringify(frame);
255
+ const compressedData = await compress(frameData);
256
+ await this.redisClient.setEx(
257
+ frameKey,
258
+ this.config.redis.ttlSeconds || 3600,
259
+ compressedData
260
+ );
261
+ this.logger.debug(`Promoted frame ${frame.frameId} to hot tier`);
262
+ } catch (error) {
263
+ this.logger.error(`Failed to promote frame ${frame.frameId}`, error);
264
+ }
265
+ }
266
+ /**
267
+ * Promote frame through storage tiers
268
+ */
269
+ async promoteFrame(frame, userId) {
270
+ if (this.timeseriesPool) {
271
+ const client = await this.timeseriesPool.connect();
272
+ try {
273
+ const compressedData = await compress(JSON.stringify(frame));
274
+ await client.query(`
275
+ INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)
276
+ VALUES ($1, $2, $3, $4, $5, $6)
277
+ ON CONFLICT (time, frame_id) DO UPDATE
278
+ SET storage_tier = 'warm',
279
+ last_accessed = NOW(),
280
+ access_count = frame_timeseries.access_count + 1
281
+ `, [
282
+ new Date(frame.timestamp),
283
+ frame.frameId,
284
+ userId,
285
+ frame,
286
+ compressedData,
287
+ "warm"
288
+ ]);
289
+ } finally {
290
+ client.release();
291
+ }
292
+ }
293
+ await this.promoteToHotTier(frame, userId);
294
+ }
295
+ /**
296
+ * Start background worker for tier migration
297
+ */
298
+ startMigrationWorker() {
299
+ this.migrationWorker = setInterval(async () => {
300
+ await this.migrateAgedData();
301
+ }, 60 * 60 * 1e3);
302
+ this.logger.info("Migration worker started");
303
+ }
304
+ /**
305
+ * Migrate aged data to appropriate storage tiers
306
+ */
307
+ async migrateAgedData() {
308
+ this.logger.info("Starting tier migration...");
309
+ if (!this.timeseriesPool) return;
310
+ const client = await this.timeseriesPool.connect();
311
+ try {
312
+ const coldEligible = await client.query(`
313
+ SELECT frame_id, user_id, data, compressed_data
314
+ FROM frame_timeseries
315
+ WHERE storage_tier = 'warm'
316
+ AND time < NOW() - INTERVAL '7 days'
317
+ AND last_accessed < NOW() - INTERVAL '7 days'
318
+ LIMIT 1000
319
+ `);
320
+ for (const row of coldEligible.rows) {
321
+ await this.migrateToS3(row, "STANDARD");
322
+ await client.query(`
323
+ UPDATE frame_timeseries
324
+ SET storage_tier = 'cold'
325
+ WHERE frame_id = $1 AND user_id = $2
326
+ `, [row.frame_id, row.user_id]);
327
+ }
328
+ const archiveEligible = await client.query(`
329
+ SELECT frame_id, user_id, data, compressed_data
330
+ FROM frame_timeseries
331
+ WHERE storage_tier = 'cold'
332
+ AND time < NOW() - INTERVAL '30 days'
333
+ AND last_accessed < NOW() - INTERVAL '30 days'
334
+ LIMIT 1000
335
+ `);
336
+ for (const row of archiveEligible.rows) {
337
+ await this.migrateToS3(row, "GLACIER");
338
+ await client.query(`
339
+ UPDATE frame_timeseries
340
+ SET storage_tier = 'archive'
341
+ WHERE frame_id = $1 AND user_id = $2
342
+ `, [row.frame_id, row.user_id]);
343
+ }
344
+ this.logger.info(`Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`);
345
+ } finally {
346
+ client.release();
347
+ }
348
+ }
349
+ /**
350
+ * Migrate data to S3 storage
351
+ */
352
+ async migrateToS3(row, storageClass) {
353
+ if (!this.s3Client || !this.config.s3.bucket) return;
354
+ try {
355
+ const key = `frames/${row.user_id}/${row.frame_id}.json.gz`;
356
+ const data = row.compressed_data || await compress(JSON.stringify(row.data));
357
+ const command = new PutObjectCommand({
358
+ Bucket: this.config.s3.bucket,
359
+ Key: key,
360
+ Body: data,
361
+ StorageClass: storageClass,
362
+ Metadata: {
363
+ userId: row.user_id,
364
+ frameId: row.frame_id,
365
+ migratedAt: (/* @__PURE__ */ new Date()).toISOString()
366
+ }
367
+ });
368
+ await this.s3Client.send(command);
369
+ this.logger.debug(`Migrated frame ${row.frame_id} to S3 ${storageClass}`);
370
+ } catch (error) {
371
+ this.logger.error(`Failed to migrate frame ${row.frame_id} to S3`, error);
372
+ throw error;
373
+ }
374
+ }
375
+ /**
376
+ * Track latency for performance monitoring
377
+ */
378
+ trackLatency(latencyMs) {
379
+ this.latencies.push(latencyMs);
380
+ if (this.latencies.length > 1e3) {
381
+ this.latencies.shift();
382
+ }
383
+ }
384
+ /**
385
+ * Get storage metrics
386
+ */
387
+ async getMetrics() {
388
+ const metrics = {
389
+ totalObjects: 0,
390
+ tierDistribution: {},
391
+ storageBytes: 0,
392
+ avgLatencyMs: 0,
393
+ p50LatencyMs: 0,
394
+ p99LatencyMs: 0
395
+ };
396
+ if (this.latencies.length > 0) {
397
+ const sorted = [...this.latencies].sort((a, b) => a - b);
398
+ metrics.avgLatencyMs = sorted.reduce((a, b) => a + b, 0) / sorted.length;
399
+ metrics.p50LatencyMs = sorted[Math.floor(sorted.length * 0.5)];
400
+ metrics.p99LatencyMs = sorted[Math.floor(sorted.length * 0.99)];
401
+ }
402
+ if (this.timeseriesPool) {
403
+ const client = await this.timeseriesPool.connect();
404
+ try {
405
+ const result = await client.query(`
406
+ SELECT
407
+ storage_tier,
408
+ COUNT(*) as count,
409
+ SUM(pg_column_size(compressed_data)) as bytes
410
+ FROM frame_timeseries
411
+ GROUP BY storage_tier
412
+ `);
413
+ for (const row of result.rows) {
414
+ metrics.tierDistribution[row.storage_tier] = parseInt(row.count);
415
+ metrics.storageBytes += parseInt(row.bytes || 0);
416
+ metrics.totalObjects += parseInt(row.count);
417
+ }
418
+ } finally {
419
+ client.release();
420
+ }
421
+ }
422
+ return metrics;
423
+ }
424
+ /**
425
+ * Cleanup and shutdown
426
+ */
427
+ async shutdown() {
428
+ if (this.migrationWorker) {
429
+ clearInterval(this.migrationWorker);
430
+ }
431
+ if (this.redisClient) {
432
+ await this.redisClient.quit();
433
+ }
434
+ if (this.timeseriesPool) {
435
+ await this.timeseriesPool.end();
436
+ }
437
+ this.logger.info("Infinite Storage System shut down");
438
+ }
439
+ }
440
+ export {
441
+ InfiniteStorageSystem
442
+ };
443
+ //# sourceMappingURL=infinite-storage.js.map