@stackmemoryai/stackmemory 0.3.7 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/core/agent-task-manager.js +5 -5
- package/dist/agents/core/agent-task-manager.js.map +2 -2
- package/dist/agents/verifiers/base-verifier.js +2 -2
- package/dist/agents/verifiers/base-verifier.js.map +2 -2
- package/dist/cli/claude-sm.js +0 -11
- package/dist/cli/claude-sm.js.map +2 -2
- package/dist/cli/codex-sm.js +0 -11
- package/dist/cli/codex-sm.js.map +2 -2
- package/dist/cli/commands/chromadb.js +64 -34
- package/dist/cli/commands/chromadb.js.map +2 -2
- package/dist/cli/commands/clear.js +9 -13
- package/dist/cli/commands/clear.js.map +2 -2
- package/dist/cli/commands/config.js +43 -33
- package/dist/cli/commands/config.js.map +2 -2
- package/dist/cli/commands/context.js.map +2 -2
- package/dist/cli/commands/dashboard.js +41 -13
- package/dist/cli/commands/dashboard.js.map +2 -2
- package/dist/cli/commands/gc.js +69 -20
- package/dist/cli/commands/gc.js.map +2 -2
- package/dist/cli/commands/handoff.js.map +2 -2
- package/dist/cli/commands/infinite-storage.js +60 -19
- package/dist/cli/commands/infinite-storage.js.map +2 -2
- package/dist/cli/commands/linear-create.js +36 -8
- package/dist/cli/commands/linear-create.js.map +2 -2
- package/dist/cli/commands/linear-list.js +33 -10
- package/dist/cli/commands/linear-list.js.map +2 -2
- package/dist/cli/commands/linear-migrate.js +17 -4
- package/dist/cli/commands/linear-migrate.js.map +2 -2
- package/dist/cli/commands/linear-test.js +14 -6
- package/dist/cli/commands/linear-test.js.map +2 -2
- package/dist/cli/commands/linear-unified.js +123 -35
- package/dist/cli/commands/linear-unified.js.map +2 -2
- package/dist/cli/commands/linear.js.map +2 -2
- package/dist/cli/commands/monitor.js.map +2 -2
- package/dist/cli/commands/onboard.js +35 -8
- package/dist/cli/commands/onboard.js.map +2 -2
- package/dist/cli/commands/quality.js +2 -7
- package/dist/cli/commands/quality.js.map +2 -2
- package/dist/cli/commands/session.js +23 -6
- package/dist/cli/commands/session.js.map +2 -2
- package/dist/cli/commands/skills.js +72 -27
- package/dist/cli/commands/skills.js.map +2 -2
- package/dist/cli/commands/storage.js +108 -38
- package/dist/cli/commands/storage.js.map +2 -2
- package/dist/cli/commands/tui.js.map +2 -2
- package/dist/cli/commands/webhook.js +57 -18
- package/dist/cli/commands/webhook.js.map +2 -2
- package/dist/cli/commands/workflow.js +8 -15
- package/dist/cli/commands/workflow.js.map +2 -2
- package/dist/cli/commands/worktree.js +34 -13
- package/dist/cli/commands/worktree.js.map +2 -2
- package/dist/cli/index.js +0 -11
- package/dist/cli/index.js.map +2 -2
- package/dist/core/config/types.js.map +1 -1
- package/dist/core/context/auto-context.js +10 -6
- package/dist/core/context/auto-context.js.map +2 -2
- package/dist/core/context/context-bridge.js.map +2 -2
- package/dist/core/context/frame-database.js +13 -3
- package/dist/core/context/frame-database.js.map +2 -2
- package/dist/core/context/frame-digest.js +7 -5
- package/dist/core/context/frame-digest.js.map +2 -2
- package/dist/core/context/frame-manager.js.map +2 -2
- package/dist/core/context/frame-stack.js +16 -5
- package/dist/core/context/frame-stack.js.map +2 -2
- package/dist/core/context/incremental-gc.js +10 -3
- package/dist/core/context/incremental-gc.js.map +2 -2
- package/dist/core/context/index.js.map +1 -1
- package/dist/core/context/permission-manager.js.map +2 -2
- package/dist/core/context/refactored-frame-manager.js +12 -3
- package/dist/core/context/refactored-frame-manager.js.map +2 -2
- package/dist/core/context/shared-context-layer.js +4 -2
- package/dist/core/context/shared-context-layer.js.map +2 -2
- package/dist/core/database/batch-operations.js +112 -86
- package/dist/core/database/batch-operations.js.map +2 -2
- package/dist/core/database/query-cache.js +19 -9
- package/dist/core/database/query-cache.js.map +2 -2
- package/dist/core/database/sqlite-adapter.js +1 -1
- package/dist/core/database/sqlite-adapter.js.map +2 -2
- package/dist/core/digest/enhanced-hybrid-digest.js +8 -2
- package/dist/core/digest/enhanced-hybrid-digest.js.map +2 -2
- package/dist/core/errors/recovery.js +9 -2
- package/dist/core/errors/recovery.js.map +2 -2
- package/dist/core/frame/workflow-templates-stub.js.map +1 -1
- package/dist/core/frame/workflow-templates.js +40 -1
- package/dist/core/frame/workflow-templates.js.map +2 -2
- package/dist/core/monitoring/logger.js +6 -1
- package/dist/core/monitoring/logger.js.map +2 -2
- package/dist/core/monitoring/metrics.js.map +2 -2
- package/dist/core/monitoring/progress-tracker.js.map +2 -2
- package/dist/core/performance/context-cache.js.map +2 -2
- package/dist/core/performance/lazy-context-loader.js +24 -20
- package/dist/core/performance/lazy-context-loader.js.map +2 -2
- package/dist/core/performance/optimized-frame-context.js +27 -12
- package/dist/core/performance/optimized-frame-context.js.map +2 -2
- package/dist/core/performance/performance-benchmark.js +10 -6
- package/dist/core/performance/performance-benchmark.js.map +2 -2
- package/dist/core/performance/performance-profiler.js +51 -14
- package/dist/core/performance/performance-profiler.js.map +2 -2
- package/dist/core/performance/streaming-jsonl-parser.js +5 -1
- package/dist/core/performance/streaming-jsonl-parser.js.map +2 -2
- package/dist/core/projects/project-manager.js +14 -20
- package/dist/core/projects/project-manager.js.map +2 -2
- package/dist/core/retrieval/context-retriever.js.map +1 -1
- package/dist/core/retrieval/llm-context-retrieval.js.map +2 -2
- package/dist/core/session/clear-survival-stub.js +5 -1
- package/dist/core/session/clear-survival-stub.js.map +2 -2
- package/dist/core/session/clear-survival.js +35 -0
- package/dist/core/session/clear-survival.js.map +2 -2
- package/dist/core/session/index.js.map +1 -1
- package/dist/core/session/session-manager.js.map +2 -2
- package/dist/core/storage/chromadb-adapter.js +6 -2
- package/dist/core/storage/chromadb-adapter.js.map +2 -2
- package/dist/core/storage/chromadb-simple.js +17 -5
- package/dist/core/storage/chromadb-simple.js.map +2 -2
- package/dist/core/storage/infinite-storage.js +109 -46
- package/dist/core/storage/infinite-storage.js.map +2 -2
- package/dist/core/storage/railway-optimized-storage.js +48 -22
- package/dist/core/storage/railway-optimized-storage.js.map +2 -2
- package/dist/core/storage/remote-storage.js +41 -23
- package/dist/core/storage/remote-storage.js.map +2 -2
- package/dist/core/trace/cli-trace-wrapper.js +9 -2
- package/dist/core/trace/cli-trace-wrapper.js.map +2 -2
- package/dist/core/trace/db-trace-wrapper.js +96 -68
- package/dist/core/trace/db-trace-wrapper.js.map +2 -2
- package/dist/core/trace/debug-trace.js +25 -8
- package/dist/core/trace/debug-trace.js.map +2 -2
- package/dist/core/trace/index.js +6 -2
- package/dist/core/trace/index.js.map +2 -2
- package/dist/core/trace/linear-api-wrapper.js +10 -5
- package/dist/core/trace/linear-api-wrapper.js.map +2 -2
- package/dist/core/trace/trace-demo.js +14 -10
- package/dist/core/trace/trace-demo.js.map +2 -2
- package/dist/core/trace/trace-detector.js +9 -2
- package/dist/core/trace/trace-detector.js.map +2 -2
- package/dist/core/trace/types.js.map +1 -1
- package/dist/core/utils/compression.js.map +1 -1
- package/dist/core/utils/update-checker.js.map +1 -1
- package/dist/core/worktree/worktree-manager.js +18 -7
- package/dist/core/worktree/worktree-manager.js.map +2 -2
- package/dist/features/analytics/core/analytics-service.js.map +2 -2
- package/dist/features/analytics/queries/metrics-queries.js +1 -1
- package/dist/features/analytics/queries/metrics-queries.js.map +2 -2
- package/dist/features/tasks/pebbles-task-store.js.map +1 -1
- package/dist/features/tui/components/analytics-panel.js +36 -15
- package/dist/features/tui/components/analytics-panel.js.map +2 -2
- package/dist/features/tui/components/pr-tracker.js +19 -7
- package/dist/features/tui/components/pr-tracker.js.map +2 -2
- package/dist/features/tui/components/session-monitor.js +22 -9
- package/dist/features/tui/components/session-monitor.js.map +2 -2
- package/dist/features/tui/components/subagent-fleet.js +20 -13
- package/dist/features/tui/components/subagent-fleet.js.map +2 -2
- package/dist/features/tui/components/task-board.js +26 -10
- package/dist/features/tui/components/task-board.js.map +2 -2
- package/dist/features/tui/index.js.map +2 -2
- package/dist/features/tui/services/data-service.js +6 -2
- package/dist/features/tui/services/data-service.js.map +2 -2
- package/dist/features/tui/services/linear-task-reader.js +3 -1
- package/dist/features/tui/services/linear-task-reader.js.map +2 -2
- package/dist/features/tui/services/websocket-client.js +3 -1
- package/dist/features/tui/services/websocket-client.js.map +2 -2
- package/dist/features/tui/terminal-compat.js +6 -2
- package/dist/features/tui/terminal-compat.js.map +2 -2
- package/dist/features/web/client/stores/task-store.js.map +2 -2
- package/dist/features/web/server/index.js +18 -10
- package/dist/features/web/server/index.js.map +2 -2
- package/dist/integrations/linear/sync-service.js +12 -13
- package/dist/integrations/linear/sync-service.js.map +2 -2
- package/dist/integrations/linear/sync.js +174 -12
- package/dist/integrations/linear/sync.js.map +2 -2
- package/dist/integrations/linear/unified-sync.js +1 -1
- package/dist/integrations/linear/unified-sync.js.map +1 -1
- package/dist/integrations/linear/webhook-server.js +15 -16
- package/dist/integrations/linear/webhook-server.js.map +2 -2
- package/dist/mcp/stackmemory-mcp-server.js +0 -11
- package/dist/mcp/stackmemory-mcp-server.js.map +2 -2
- package/dist/servers/production/auth-middleware.js.map +2 -2
- package/dist/servers/railway/index.js.map +2 -2
- package/dist/services/config-service.js +6 -7
- package/dist/services/config-service.js.map +2 -2
- package/dist/services/context-service.js +11 -12
- package/dist/services/context-service.js.map +2 -2
- package/dist/skills/claude-skills.js +4 -2
- package/dist/skills/claude-skills.js.map +2 -2
- package/dist/skills/dashboard-launcher.js.map +2 -2
- package/dist/skills/repo-ingestion-skill.js.map +2 -2
- package/dist/utils/env.js +46 -0
- package/dist/utils/env.js.map +7 -0
- package/dist/utils/logger.js +0 -11
- package/dist/utils/logger.js.map +2 -2
- package/package.json +1 -1
|
@@ -1,4 +1,8 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
S3Client,
|
|
3
|
+
PutObjectCommand,
|
|
4
|
+
GetObjectCommand
|
|
5
|
+
} from "@aws-sdk/client-s3";
|
|
2
6
|
import { createClient as createRedisClient } from "redis";
|
|
3
7
|
import { Pool } from "pg";
|
|
4
8
|
import { Logger } from "../monitoring/logger.js";
|
|
@@ -16,10 +20,30 @@ class InfiniteStorageSystem {
|
|
|
16
20
|
this.logger = new Logger("InfiniteStorage");
|
|
17
21
|
if (!config.tiers || config.tiers.length === 0) {
|
|
18
22
|
this.config.tiers = [
|
|
19
|
-
{
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
+
{
|
|
24
|
+
name: "hot",
|
|
25
|
+
ageThresholdHours: 1,
|
|
26
|
+
storageClass: "MEMORY",
|
|
27
|
+
accessLatencyMs: 5
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
name: "warm",
|
|
31
|
+
ageThresholdHours: 168,
|
|
32
|
+
storageClass: "TIMESERIES",
|
|
33
|
+
accessLatencyMs: 50
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
name: "cold",
|
|
37
|
+
ageThresholdHours: 720,
|
|
38
|
+
storageClass: "S3_STANDARD",
|
|
39
|
+
accessLatencyMs: 100
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
name: "archive",
|
|
43
|
+
ageThresholdHours: Infinity,
|
|
44
|
+
storageClass: "S3_GLACIER",
|
|
45
|
+
accessLatencyMs: 36e5
|
|
46
|
+
}
|
|
23
47
|
];
|
|
24
48
|
}
|
|
25
49
|
}
|
|
@@ -32,7 +56,10 @@ class InfiniteStorageSystem {
|
|
|
32
56
|
await this.redisClient.connect();
|
|
33
57
|
await this.redisClient.configSet("maxmemory-policy", "allkeys-lru");
|
|
34
58
|
if (this.config.redis.maxMemoryMB) {
|
|
35
|
-
await this.redisClient.configSet(
|
|
59
|
+
await this.redisClient.configSet(
|
|
60
|
+
"maxmemory",
|
|
61
|
+
`${this.config.redis.maxMemoryMB}mb`
|
|
62
|
+
);
|
|
36
63
|
}
|
|
37
64
|
this.logger.info("Redis client initialized for hot tier");
|
|
38
65
|
}
|
|
@@ -83,11 +110,13 @@ class InfiniteStorageSystem {
|
|
|
83
110
|
PRIMARY KEY (time, frame_id)
|
|
84
111
|
)
|
|
85
112
|
`);
|
|
86
|
-
await client.query(
|
|
113
|
+
await client.query(
|
|
114
|
+
`
|
|
87
115
|
SELECT create_hypertable('frame_timeseries', 'time',
|
|
88
116
|
chunk_time_interval => INTERVAL '1 day',
|
|
89
117
|
if_not_exists => TRUE)
|
|
90
|
-
`
|
|
118
|
+
`
|
|
119
|
+
).catch(() => {
|
|
91
120
|
this.logger.info("Using standard PostgreSQL partitioning");
|
|
92
121
|
});
|
|
93
122
|
await client.query(`
|
|
@@ -95,9 +124,11 @@ class InfiniteStorageSystem {
|
|
|
95
124
|
CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);
|
|
96
125
|
CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);
|
|
97
126
|
`);
|
|
98
|
-
await client.query(
|
|
127
|
+
await client.query(
|
|
128
|
+
`
|
|
99
129
|
SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)
|
|
100
|
-
`
|
|
130
|
+
`
|
|
131
|
+
).catch(() => {
|
|
101
132
|
this.logger.info("Compression policy not available");
|
|
102
133
|
});
|
|
103
134
|
} finally {
|
|
@@ -130,7 +161,8 @@ class InfiniteStorageSystem {
|
|
|
130
161
|
if (this.timeseriesPool) {
|
|
131
162
|
const client = await this.timeseriesPool.connect();
|
|
132
163
|
try {
|
|
133
|
-
await client.query(
|
|
164
|
+
await client.query(
|
|
165
|
+
`
|
|
134
166
|
INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)
|
|
135
167
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
136
168
|
ON CONFLICT (time, frame_id) DO UPDATE
|
|
@@ -138,16 +170,18 @@ class InfiniteStorageSystem {
|
|
|
138
170
|
compressed_data = EXCLUDED.compressed_data,
|
|
139
171
|
last_accessed = NOW(),
|
|
140
172
|
access_count = frame_timeseries.access_count + 1
|
|
141
|
-
`,
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
173
|
+
`,
|
|
174
|
+
[
|
|
175
|
+
new Date(frame.timestamp),
|
|
176
|
+
frame.frameId,
|
|
177
|
+
userId,
|
|
178
|
+
frame.projectName || "default",
|
|
179
|
+
frame.type,
|
|
180
|
+
frame,
|
|
181
|
+
compressedData,
|
|
182
|
+
"warm"
|
|
183
|
+
]
|
|
184
|
+
);
|
|
151
185
|
} finally {
|
|
152
186
|
client.release();
|
|
153
187
|
}
|
|
@@ -172,23 +206,31 @@ class InfiniteStorageSystem {
|
|
|
172
206
|
if (cached) {
|
|
173
207
|
const decompressed = await decompress(cached);
|
|
174
208
|
const frame = JSON.parse(decompressed);
|
|
175
|
-
await this.redisClient.expire(
|
|
209
|
+
await this.redisClient.expire(
|
|
210
|
+
frameKey,
|
|
211
|
+
this.config.redis.ttlSeconds || 3600
|
|
212
|
+
);
|
|
176
213
|
const latency = Date.now() - startTime;
|
|
177
214
|
this.trackLatency(latency);
|
|
178
|
-
this.logger.debug(
|
|
215
|
+
this.logger.debug(
|
|
216
|
+
`Retrieved frame ${frameId} from hot tier in ${latency}ms`
|
|
217
|
+
);
|
|
179
218
|
return frame;
|
|
180
219
|
}
|
|
181
220
|
}
|
|
182
221
|
if (this.timeseriesPool) {
|
|
183
222
|
const client = await this.timeseriesPool.connect();
|
|
184
223
|
try {
|
|
185
|
-
const result = await client.query(
|
|
224
|
+
const result = await client.query(
|
|
225
|
+
`
|
|
186
226
|
SELECT data, compressed_data, storage_tier
|
|
187
227
|
FROM frame_timeseries
|
|
188
228
|
WHERE frame_id = $1 AND user_id = $2
|
|
189
229
|
ORDER BY time DESC
|
|
190
230
|
LIMIT 1
|
|
191
|
-
`,
|
|
231
|
+
`,
|
|
232
|
+
[frameId, userId]
|
|
233
|
+
);
|
|
192
234
|
if (result.rows.length > 0) {
|
|
193
235
|
const row = result.rows[0];
|
|
194
236
|
let frame;
|
|
@@ -198,17 +240,22 @@ class InfiniteStorageSystem {
|
|
|
198
240
|
} else {
|
|
199
241
|
frame = row.data;
|
|
200
242
|
}
|
|
201
|
-
await client.query(
|
|
243
|
+
await client.query(
|
|
244
|
+
`
|
|
202
245
|
UPDATE frame_timeseries
|
|
203
246
|
SET last_accessed = NOW(), access_count = access_count + 1
|
|
204
247
|
WHERE frame_id = $1 AND user_id = $2
|
|
205
|
-
`,
|
|
248
|
+
`,
|
|
249
|
+
[frameId, userId]
|
|
250
|
+
);
|
|
206
251
|
if (this.redisClient) {
|
|
207
252
|
await this.promoteToHotTier(frame, userId);
|
|
208
253
|
}
|
|
209
254
|
const latency = Date.now() - startTime;
|
|
210
255
|
this.trackLatency(latency);
|
|
211
|
-
this.logger.debug(
|
|
256
|
+
this.logger.debug(
|
|
257
|
+
`Retrieved frame ${frameId} from warm tier in ${latency}ms`
|
|
258
|
+
);
|
|
212
259
|
return frame;
|
|
213
260
|
}
|
|
214
261
|
} finally {
|
|
@@ -229,7 +276,9 @@ class InfiniteStorageSystem {
|
|
|
229
276
|
await this.promoteFrame(frame, userId);
|
|
230
277
|
const latency = Date.now() - startTime;
|
|
231
278
|
this.trackLatency(latency);
|
|
232
|
-
this.logger.debug(
|
|
279
|
+
this.logger.debug(
|
|
280
|
+
`Retrieved frame ${frameId} from cold tier in ${latency}ms`
|
|
281
|
+
);
|
|
233
282
|
return frame;
|
|
234
283
|
} catch (error) {
|
|
235
284
|
if (error.Code !== "NoSuchKey") {
|
|
@@ -271,21 +320,24 @@ class InfiniteStorageSystem {
|
|
|
271
320
|
const client = await this.timeseriesPool.connect();
|
|
272
321
|
try {
|
|
273
322
|
const compressedData = await compress(JSON.stringify(frame));
|
|
274
|
-
await client.query(
|
|
323
|
+
await client.query(
|
|
324
|
+
`
|
|
275
325
|
INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)
|
|
276
326
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
277
327
|
ON CONFLICT (time, frame_id) DO UPDATE
|
|
278
328
|
SET storage_tier = 'warm',
|
|
279
329
|
last_accessed = NOW(),
|
|
280
330
|
access_count = frame_timeseries.access_count + 1
|
|
281
|
-
`,
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
331
|
+
`,
|
|
332
|
+
[
|
|
333
|
+
new Date(frame.timestamp),
|
|
334
|
+
frame.frameId,
|
|
335
|
+
userId,
|
|
336
|
+
frame,
|
|
337
|
+
compressedData,
|
|
338
|
+
"warm"
|
|
339
|
+
]
|
|
340
|
+
);
|
|
289
341
|
} finally {
|
|
290
342
|
client.release();
|
|
291
343
|
}
|
|
@@ -296,9 +348,12 @@ class InfiniteStorageSystem {
|
|
|
296
348
|
* Start background worker for tier migration
|
|
297
349
|
*/
|
|
298
350
|
startMigrationWorker() {
|
|
299
|
-
this.migrationWorker = setInterval(
|
|
300
|
-
|
|
301
|
-
|
|
351
|
+
this.migrationWorker = setInterval(
|
|
352
|
+
async () => {
|
|
353
|
+
await this.migrateAgedData();
|
|
354
|
+
},
|
|
355
|
+
60 * 60 * 1e3
|
|
356
|
+
);
|
|
302
357
|
this.logger.info("Migration worker started");
|
|
303
358
|
}
|
|
304
359
|
/**
|
|
@@ -319,11 +374,14 @@ class InfiniteStorageSystem {
|
|
|
319
374
|
`);
|
|
320
375
|
for (const row of coldEligible.rows) {
|
|
321
376
|
await this.migrateToS3(row, "STANDARD");
|
|
322
|
-
await client.query(
|
|
377
|
+
await client.query(
|
|
378
|
+
`
|
|
323
379
|
UPDATE frame_timeseries
|
|
324
380
|
SET storage_tier = 'cold'
|
|
325
381
|
WHERE frame_id = $1 AND user_id = $2
|
|
326
|
-
`,
|
|
382
|
+
`,
|
|
383
|
+
[row.frame_id, row.user_id]
|
|
384
|
+
);
|
|
327
385
|
}
|
|
328
386
|
const archiveEligible = await client.query(`
|
|
329
387
|
SELECT frame_id, user_id, data, compressed_data
|
|
@@ -335,13 +393,18 @@ class InfiniteStorageSystem {
|
|
|
335
393
|
`);
|
|
336
394
|
for (const row of archiveEligible.rows) {
|
|
337
395
|
await this.migrateToS3(row, "GLACIER");
|
|
338
|
-
await client.query(
|
|
396
|
+
await client.query(
|
|
397
|
+
`
|
|
339
398
|
UPDATE frame_timeseries
|
|
340
399
|
SET storage_tier = 'archive'
|
|
341
400
|
WHERE frame_id = $1 AND user_id = $2
|
|
342
|
-
`,
|
|
401
|
+
`,
|
|
402
|
+
[row.frame_id, row.user_id]
|
|
403
|
+
);
|
|
343
404
|
}
|
|
344
|
-
this.logger.info(
|
|
405
|
+
this.logger.info(
|
|
406
|
+
`Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`
|
|
407
|
+
);
|
|
345
408
|
} finally {
|
|
346
409
|
client.release();
|
|
347
410
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../../../src/core/storage/infinite-storage.ts"],
|
|
4
|
-
"sourcesContent": ["/**\n * Infinite Storage System for StackMemory\n * Implements STA-287: Remote storage with TimeSeries DB + S3 + Redis\n * \n * Storage Tiers:\n * - Hot: Redis (< 1 hour, frequently accessed)\n * - Warm: TimeSeries DB (1 hour - 7 days)\n * - Cold: S3 Standard (7 days - 30 days)\n * - Archive: S3 Glacier (> 30 days)\n */\n\nimport { S3Client, PutObjectCommand, GetObjectCommand, ListObjectsV2Command } from '@aws-sdk/client-s3';\nimport { createClient as createRedisClient } from 'redis';\nimport { Pool } from 'pg';\nimport { Logger } from '../monitoring/logger.js';\nimport { Frame } from '../context/frame-manager.js';\nimport { v4 as uuidv4 } from 'uuid';\nimport { compress, decompress } from '../utils/compression.js';\n\nexport interface StorageTier {\n name: 'hot' | 'warm' | 'cold' | 'archive';\n ageThresholdHours: number;\n storageClass: string;\n accessLatencyMs: number;\n}\n\nexport interface StorageConfig {\n redis: {\n url: string;\n ttlSeconds: number;\n maxMemoryMB: number;\n };\n timeseries: {\n connectionString: string;\n retentionDays: number;\n };\n s3: {\n bucket: string;\n region: string;\n accessKeyId?: string;\n secretAccessKey?: string;\n };\n tiers: StorageTier[];\n}\n\nexport interface StorageMetrics {\n totalObjects: number;\n tierDistribution: Record<string, number>;\n storageBytes: number;\n avgLatencyMs: number;\n p50LatencyMs: number;\n p99LatencyMs: number;\n}\n\nexport class InfiniteStorageSystem {\n private redisClient: any;\n private timeseriesPool: Pool;\n private s3Client: S3Client;\n private logger: Logger;\n private config: StorageConfig;\n private latencies: number[] = [];\n private migrationWorker: NodeJS.Timeout | null = null;\n\n constructor(config: StorageConfig) {\n this.config = config;\n this.logger = new Logger('InfiniteStorage');\n \n // Default storage tiers\n if (!config.tiers || config.tiers.length === 0) {\n this.config.tiers = [\n { name: 'hot', ageThresholdHours: 1, storageClass: 'MEMORY', accessLatencyMs: 5 },\n { name: 'warm', ageThresholdHours: 168, storageClass: 'TIMESERIES', accessLatencyMs: 50 },\n { name: 'cold', ageThresholdHours: 720, storageClass: 'S3_STANDARD', accessLatencyMs: 100 },\n { name: 'archive', ageThresholdHours: Infinity, storageClass: 'S3_GLACIER', accessLatencyMs: 3600000 },\n ];\n }\n }\n\n async initialize(): Promise<void> {\n try {\n // Initialize Redis (hot tier)\n if (this.config.redis?.url) {\n this.redisClient = createRedisClient({\n url: this.config.redis.url,\n });\n \n await this.redisClient.connect();\n \n // Configure Redis memory policy\n await this.redisClient.configSet('maxmemory-policy', 'allkeys-lru');\n if (this.config.redis.maxMemoryMB) {\n await this.redisClient.configSet('maxmemory', `${this.config.redis.maxMemoryMB}mb`);\n }\n \n this.logger.info('Redis client initialized for hot tier');\n }\n\n // Initialize TimeSeries DB (warm tier)\n if (this.config.timeseries?.connectionString) {\n this.timeseriesPool = new Pool({\n connectionString: this.config.timeseries.connectionString,\n max: 10,\n idleTimeoutMillis: 30000,\n });\n\n // Create TimeSeries tables if not exists\n await this.createTimeSeriesTables();\n this.logger.info('TimeSeries DB initialized for warm tier');\n }\n\n // Initialize S3 (cold/archive tiers)\n if (this.config.s3?.bucket) {\n this.s3Client = new S3Client({\n region: this.config.s3.region || 'us-east-1',\n credentials: this.config.s3.accessKeyId ? {\n accessKeyId: this.config.s3.accessKeyId,\n secretAccessKey: this.config.s3.secretAccessKey!,\n } : undefined,\n });\n \n this.logger.info('S3 client initialized for cold/archive tiers');\n }\n\n // Start background migration worker\n this.startMigrationWorker();\n \n this.logger.info('Infinite Storage System initialized');\n } catch (error: unknown) {\n this.logger.error('Failed to initialize storage system', error);\n throw error;\n }\n }\n\n /**\n * Create TimeSeries tables for warm tier storage\n */\n private async createTimeSeriesTables(): Promise<void> {\n const client = await this.timeseriesPool.connect();\n \n try {\n // Create hypertable for time-series data\n await client.query(`\n CREATE TABLE IF NOT EXISTS frame_timeseries (\n time TIMESTAMPTZ NOT NULL,\n frame_id TEXT NOT NULL,\n user_id TEXT NOT NULL,\n project_name TEXT,\n type TEXT,\n data JSONB,\n compressed_data BYTEA,\n storage_tier TEXT DEFAULT 'warm',\n access_count INTEGER DEFAULT 0,\n last_accessed TIMESTAMPTZ DEFAULT NOW(),\n PRIMARY KEY (time, frame_id)\n )\n `);\n\n // Create hypertable if using TimescaleDB\n await client.query(`\n SELECT create_hypertable('frame_timeseries', 'time', \n chunk_time_interval => INTERVAL '1 day',\n if_not_exists => TRUE)\n `).catch(() => {\n // Fallback to regular partitioning if not TimescaleDB\n this.logger.info('Using standard PostgreSQL partitioning');\n });\n\n // Create indexes\n await client.query(`\n CREATE INDEX IF NOT EXISTS idx_frame_user ON frame_timeseries (user_id, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);\n `);\n\n // Create compression policy (TimescaleDB specific)\n await client.query(`\n SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)\n `).catch(() => {\n this.logger.info('Compression policy not available');\n });\n\n } finally {\n client.release();\n }\n }\n\n /**\n * Store a frame with automatic tier selection\n */\n async storeFrame(frame: Frame, userId: string): Promise<void> {\n const startTime = Date.now();\n \n try {\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n const frameKey = `frame:${userId}:${frame.frameId}`;\n\n // Always store in hot tier first (Redis)\n if (this.redisClient) {\n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n \n // Store metadata for quick lookups\n await this.redisClient.hSet(`meta:${frameKey}`, {\n userId,\n projectName: frame.projectName || 'default',\n type: frame.type,\n timestamp: frame.timestamp,\n tier: 'hot',\n });\n }\n\n // Also store in warm tier for durability\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n await client.query(`\n INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET data = EXCLUDED.data,\n compressed_data = EXCLUDED.compressed_data,\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `, [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame.projectName || 'default',\n frame.type,\n frame,\n compressedData,\n 'warm',\n ]);\n } finally {\n client.release();\n }\n }\n\n // Track latency\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n \n this.logger.debug(`Stored frame ${frame.frameId} in ${latency}ms`);\n } catch (error: unknown) {\n this.logger.error(`Failed to store frame ${frame.frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Retrieve a frame with intelligent caching\n */\n async retrieveFrame(frameId: string, userId: string): Promise<Frame | null> {\n const startTime = Date.now();\n const frameKey = `frame:${userId}:${frameId}`;\n\n try {\n // Try hot tier first (Redis)\n if (this.redisClient) {\n const cached = await this.redisClient.get(frameKey);\n if (cached) {\n const decompressed = await decompress(cached);\n const frame = JSON.parse(decompressed);\n \n // Refresh TTL on access\n await this.redisClient.expire(frameKey, this.config.redis.ttlSeconds || 3600);\n \n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from hot tier in ${latency}ms`);\n \n return frame;\n }\n }\n\n // Try warm tier (TimeSeries DB)\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const result = await client.query(`\n SELECT data, compressed_data, storage_tier \n FROM frame_timeseries \n WHERE frame_id = $1 AND user_id = $2\n ORDER BY time DESC\n LIMIT 1\n `, [frameId, userId]);\n\n if (result.rows.length > 0) {\n const row = result.rows[0];\n let frame: Frame;\n \n if (row.compressed_data) {\n const decompressed = await decompress(row.compressed_data);\n frame = JSON.parse(decompressed);\n } else {\n frame = row.data;\n }\n\n // Update access stats\n await client.query(`\n UPDATE frame_timeseries \n SET last_accessed = NOW(), access_count = access_count + 1\n WHERE frame_id = $1 AND user_id = $2\n `, [frameId, userId]);\n\n // Promote to hot tier if frequently accessed\n if (this.redisClient) {\n await this.promoteToHotTier(frame, userId);\n }\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from warm tier in ${latency}ms`);\n \n return frame;\n }\n } finally {\n client.release();\n }\n }\n\n // Try cold/archive tiers (S3)\n if (this.s3Client && this.config.s3.bucket) {\n const key = `frames/${userId}/${frameId}.json.gz`;\n \n try {\n const command = new GetObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n });\n \n const response = await this.s3Client.send(command);\n const compressedData = await response.Body!.transformToByteArray();\n const decompressed = await decompress(Buffer.from(compressedData));\n const frame = JSON.parse(decompressed);\n\n // Promote to warmer tiers for future access\n await this.promoteFrame(frame, userId);\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from cold tier in ${latency}ms`);\n \n return frame;\n } catch (error: any) {\n if (error.Code !== 'NoSuchKey') {\n throw error;\n }\n }\n }\n\n this.logger.debug(`Frame ${frameId} not found in any tier`);\n return null;\n } catch (error: unknown) {\n this.logger.error(`Failed to retrieve frame ${frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Promote frame to hot tier for fast access\n */\n private async promoteToHotTier(frame: Frame, userId: string): Promise<void> {\n if (!this.redisClient) return;\n \n try {\n const frameKey = `frame:${userId}:${frame.frameId}`;\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n \n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n \n this.logger.debug(`Promoted frame ${frame.frameId} to hot tier`);\n } catch (error: unknown) {\n this.logger.error(`Failed to promote frame ${frame.frameId}`, error);\n }\n }\n\n /**\n * Promote frame through storage tiers\n */\n private async promoteFrame(frame: Frame, userId: string): Promise<void> {\n // Promote to warm tier\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const compressedData = await compress(JSON.stringify(frame));\n \n await client.query(`\n INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET storage_tier = 'warm',\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `, [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame,\n compressedData,\n 'warm',\n ]);\n } finally {\n client.release();\n }\n }\n\n // Also promote to hot tier\n await this.promoteToHotTier(frame, userId);\n }\n\n /**\n * Start background worker for tier migration\n */\n private startMigrationWorker(): void {\n // Run migration every hour\n this.migrationWorker = setInterval(async () => {\n await this.migrateAgedData();\n }, 60 * 60 * 1000);\n \n this.logger.info('Migration worker started');\n }\n\n /**\n * Migrate aged data to appropriate storage tiers\n */\n private async migrateAgedData(): Promise<void> {\n this.logger.info('Starting tier migration...');\n \n if (!this.timeseriesPool) return;\n \n const client = await this.timeseriesPool.connect();\n \n try {\n // Find data eligible for cold storage (> 7 days old)\n const coldEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'warm'\n AND time < NOW() - INTERVAL '7 days'\n AND last_accessed < NOW() - INTERVAL '7 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 cold storage\n for (const row of coldEligible.rows) {\n await this.migrateToS3(row, 'STANDARD');\n \n // Update tier in database\n await client.query(`\n UPDATE frame_timeseries\n SET storage_tier = 'cold'\n WHERE frame_id = $1 AND user_id = $2\n `, [row.frame_id, row.user_id]);\n }\n\n // Find data eligible for archive (> 30 days old)\n const archiveEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'cold'\n AND time < NOW() - INTERVAL '30 days'\n AND last_accessed < NOW() - INTERVAL '30 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 Glacier\n for (const row of archiveEligible.rows) {\n await this.migrateToS3(row, 'GLACIER');\n \n // Update tier in database\n await client.query(`\n UPDATE frame_timeseries\n SET storage_tier = 'archive'\n WHERE frame_id = $1 AND user_id = $2\n `, [row.frame_id, row.user_id]);\n }\n\n this.logger.info(`Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`);\n } finally {\n client.release();\n }\n }\n\n /**\n * Migrate data to S3 storage\n */\n private async migrateToS3(row: any, storageClass: string): Promise<void> {\n if (!this.s3Client || !this.config.s3.bucket) return;\n \n try {\n const key = `frames/${row.user_id}/${row.frame_id}.json.gz`;\n const data = row.compressed_data || await compress(JSON.stringify(row.data));\n \n const command = new PutObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n Body: data,\n StorageClass: storageClass,\n Metadata: {\n userId: row.user_id,\n frameId: row.frame_id,\n migratedAt: new Date().toISOString(),\n },\n });\n \n await this.s3Client.send(command);\n \n this.logger.debug(`Migrated frame ${row.frame_id} to S3 ${storageClass}`);\n } catch (error: unknown) {\n this.logger.error(`Failed to migrate frame ${row.frame_id} to S3`, error);\n throw error;\n }\n }\n\n /**\n * Track latency for performance monitoring\n */\n private trackLatency(latencyMs: number): void {\n this.latencies.push(latencyMs);\n \n // Keep only last 1000 measurements\n if (this.latencies.length > 1000) {\n this.latencies.shift();\n }\n }\n\n /**\n * Get storage metrics\n */\n async getMetrics(): Promise<StorageMetrics> {\n const metrics: StorageMetrics = {\n totalObjects: 0,\n tierDistribution: {},\n storageBytes: 0,\n avgLatencyMs: 0,\n p50LatencyMs: 0,\n p99LatencyMs: 0,\n };\n\n // Calculate latency percentiles\n if (this.latencies.length > 0) {\n const sorted = [...this.latencies].sort((a, b) => a - b);\n metrics.avgLatencyMs = sorted.reduce((a, b) => a + b, 0) / sorted.length;\n metrics.p50LatencyMs = sorted[Math.floor(sorted.length * 0.5)];\n metrics.p99LatencyMs = sorted[Math.floor(sorted.length * 0.99)];\n }\n\n // Get tier distribution from TimeSeries DB\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const result = await client.query(`\n SELECT \n storage_tier,\n COUNT(*) as count,\n SUM(pg_column_size(compressed_data)) as bytes\n FROM frame_timeseries\n GROUP BY storage_tier\n `);\n\n for (const row of result.rows) {\n metrics.tierDistribution[row.storage_tier] = parseInt(row.count);\n metrics.storageBytes += parseInt(row.bytes || 0);\n metrics.totalObjects += parseInt(row.count);\n }\n } finally {\n client.release();\n }\n }\n\n return metrics;\n }\n\n /**\n * Cleanup and shutdown\n */\n async shutdown(): Promise<void> {\n if (this.migrationWorker) {\n clearInterval(this.migrationWorker);\n }\n\n if (this.redisClient) {\n await this.redisClient.quit();\n }\n\n if (this.timeseriesPool) {\n await this.timeseriesPool.end();\n }\n\n this.logger.info('Infinite Storage System shut down');\n }\n}"],
|
|
5
|
-
"mappings": "AAWA,
|
|
4
|
+
"sourcesContent": ["/**\n * Infinite Storage System for StackMemory\n * Implements STA-287: Remote storage with TimeSeries DB + S3 + Redis\n *\n * Storage Tiers:\n * - Hot: Redis (< 1 hour, frequently accessed)\n * - Warm: TimeSeries DB (1 hour - 7 days)\n * - Cold: S3 Standard (7 days - 30 days)\n * - Archive: S3 Glacier (> 30 days)\n */\n\nimport {\n S3Client,\n PutObjectCommand,\n GetObjectCommand,\n ListObjectsV2Command,\n} from '@aws-sdk/client-s3';\nimport { createClient as createRedisClient } from 'redis';\nimport { Pool } from 'pg';\nimport { Logger } from '../monitoring/logger.js';\nimport { Frame } from '../context/frame-manager.js';\nimport { v4 as uuidv4 } from 'uuid';\nimport { compress, decompress } from '../utils/compression.js';\n\nexport interface StorageTier {\n name: 'hot' | 'warm' | 'cold' | 'archive';\n ageThresholdHours: number;\n storageClass: string;\n accessLatencyMs: number;\n}\n\nexport interface StorageConfig {\n redis: {\n url: string;\n ttlSeconds: number;\n maxMemoryMB: number;\n };\n timeseries: {\n connectionString: string;\n retentionDays: number;\n };\n s3: {\n bucket: string;\n region: string;\n accessKeyId?: string;\n secretAccessKey?: string;\n };\n tiers: StorageTier[];\n}\n\nexport interface StorageMetrics {\n totalObjects: number;\n tierDistribution: Record<string, number>;\n storageBytes: number;\n avgLatencyMs: number;\n p50LatencyMs: number;\n p99LatencyMs: number;\n}\n\nexport class InfiniteStorageSystem {\n private redisClient: any;\n private timeseriesPool: Pool;\n private s3Client: S3Client;\n private logger: Logger;\n private config: StorageConfig;\n private latencies: number[] = [];\n private migrationWorker: NodeJS.Timeout | null = null;\n\n constructor(config: StorageConfig) {\n this.config = config;\n this.logger = new Logger('InfiniteStorage');\n\n // Default storage tiers\n if (!config.tiers || config.tiers.length === 0) {\n this.config.tiers = [\n {\n name: 'hot',\n ageThresholdHours: 1,\n storageClass: 'MEMORY',\n accessLatencyMs: 5,\n },\n {\n name: 'warm',\n ageThresholdHours: 168,\n storageClass: 'TIMESERIES',\n accessLatencyMs: 50,\n },\n {\n name: 'cold',\n ageThresholdHours: 720,\n storageClass: 'S3_STANDARD',\n accessLatencyMs: 100,\n },\n {\n name: 'archive',\n ageThresholdHours: Infinity,\n storageClass: 'S3_GLACIER',\n accessLatencyMs: 3600000,\n },\n ];\n }\n }\n\n async initialize(): Promise<void> {\n try {\n // Initialize Redis (hot tier)\n if (this.config.redis?.url) {\n this.redisClient = createRedisClient({\n url: this.config.redis.url,\n });\n\n await this.redisClient.connect();\n\n // Configure Redis memory policy\n await this.redisClient.configSet('maxmemory-policy', 'allkeys-lru');\n if (this.config.redis.maxMemoryMB) {\n await this.redisClient.configSet(\n 'maxmemory',\n `${this.config.redis.maxMemoryMB}mb`\n );\n }\n\n this.logger.info('Redis client initialized for hot tier');\n }\n\n // Initialize TimeSeries DB (warm tier)\n if (this.config.timeseries?.connectionString) {\n this.timeseriesPool = new Pool({\n connectionString: this.config.timeseries.connectionString,\n max: 10,\n idleTimeoutMillis: 30000,\n });\n\n // Create TimeSeries tables if not exists\n await this.createTimeSeriesTables();\n this.logger.info('TimeSeries DB initialized for warm tier');\n }\n\n // Initialize S3 (cold/archive tiers)\n if (this.config.s3?.bucket) {\n this.s3Client = new S3Client({\n region: this.config.s3.region || 'us-east-1',\n credentials: this.config.s3.accessKeyId\n ? {\n accessKeyId: this.config.s3.accessKeyId,\n secretAccessKey: this.config.s3.secretAccessKey!,\n }\n : undefined,\n });\n\n this.logger.info('S3 client initialized for cold/archive tiers');\n }\n\n // Start background migration worker\n this.startMigrationWorker();\n\n this.logger.info('Infinite Storage System initialized');\n } catch (error: unknown) {\n this.logger.error('Failed to initialize storage system', error);\n throw error;\n }\n }\n\n /**\n * Create TimeSeries tables for warm tier storage\n */\n private async createTimeSeriesTables(): Promise<void> {\n const client = await this.timeseriesPool.connect();\n\n try {\n // Create hypertable for time-series data\n await client.query(`\n CREATE TABLE IF NOT EXISTS frame_timeseries (\n time TIMESTAMPTZ NOT NULL,\n frame_id TEXT NOT NULL,\n user_id TEXT NOT NULL,\n project_name TEXT,\n type TEXT,\n data JSONB,\n compressed_data BYTEA,\n storage_tier TEXT DEFAULT 'warm',\n access_count INTEGER DEFAULT 0,\n last_accessed TIMESTAMPTZ DEFAULT NOW(),\n PRIMARY KEY (time, frame_id)\n )\n `);\n\n // Create hypertable if using TimescaleDB\n await client\n .query(\n `\n SELECT create_hypertable('frame_timeseries', 'time', \n chunk_time_interval => INTERVAL '1 day',\n if_not_exists => TRUE)\n `\n )\n .catch(() => {\n // Fallback to regular partitioning if not TimescaleDB\n this.logger.info('Using standard PostgreSQL partitioning');\n });\n\n // Create indexes\n await client.query(`\n CREATE INDEX IF NOT EXISTS idx_frame_user ON frame_timeseries (user_id, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);\n `);\n\n // Create compression policy (TimescaleDB specific)\n await client\n .query(\n `\n SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)\n `\n )\n .catch(() => {\n this.logger.info('Compression policy not available');\n });\n } finally {\n client.release();\n }\n }\n\n /**\n * Store a frame with automatic tier selection\n */\n async storeFrame(frame: Frame, userId: string): Promise<void> {\n const startTime = Date.now();\n\n try {\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n const frameKey = `frame:${userId}:${frame.frameId}`;\n\n // Always store in hot tier first (Redis)\n if (this.redisClient) {\n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n\n // Store metadata for quick lookups\n await this.redisClient.hSet(`meta:${frameKey}`, {\n userId,\n projectName: frame.projectName || 'default',\n type: frame.type,\n timestamp: frame.timestamp,\n tier: 'hot',\n });\n }\n\n // Also store in warm tier for durability\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n\n try {\n await client.query(\n `\n INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET data = EXCLUDED.data,\n compressed_data = EXCLUDED.compressed_data,\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `,\n [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame.projectName || 'default',\n frame.type,\n frame,\n compressedData,\n 'warm',\n ]\n );\n } finally {\n client.release();\n }\n }\n\n // Track latency\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n\n this.logger.debug(`Stored frame ${frame.frameId} in ${latency}ms`);\n } catch (error: unknown) {\n this.logger.error(`Failed to store frame ${frame.frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Retrieve a frame with intelligent caching\n */\n async retrieveFrame(frameId: string, userId: string): Promise<Frame | null> {\n const startTime = Date.now();\n const frameKey = `frame:${userId}:${frameId}`;\n\n try {\n // Try hot tier first (Redis)\n if (this.redisClient) {\n const cached = await this.redisClient.get(frameKey);\n if (cached) {\n const decompressed = await decompress(cached);\n const frame = JSON.parse(decompressed);\n\n // Refresh TTL on access\n await this.redisClient.expire(\n frameKey,\n this.config.redis.ttlSeconds || 3600\n );\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(\n `Retrieved frame ${frameId} from hot tier in ${latency}ms`\n );\n\n return frame;\n }\n }\n\n // Try warm tier (TimeSeries DB)\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n\n try {\n const result = await client.query(\n `\n SELECT data, compressed_data, storage_tier \n FROM frame_timeseries \n WHERE frame_id = $1 AND user_id = $2\n ORDER BY time DESC\n LIMIT 1\n `,\n [frameId, userId]\n );\n\n if (result.rows.length > 0) {\n const row = result.rows[0];\n let frame: Frame;\n\n if (row.compressed_data) {\n const decompressed = await decompress(row.compressed_data);\n frame = JSON.parse(decompressed);\n } else {\n frame = row.data;\n }\n\n // Update access stats\n await client.query(\n `\n UPDATE frame_timeseries \n SET last_accessed = NOW(), access_count = access_count + 1\n WHERE frame_id = $1 AND user_id = $2\n `,\n [frameId, userId]\n );\n\n // Promote to hot tier if frequently accessed\n if (this.redisClient) {\n await this.promoteToHotTier(frame, userId);\n }\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(\n `Retrieved frame ${frameId} from warm tier in ${latency}ms`\n );\n\n return frame;\n }\n } finally {\n client.release();\n }\n }\n\n // Try cold/archive tiers (S3)\n if (this.s3Client && this.config.s3.bucket) {\n const key = `frames/${userId}/${frameId}.json.gz`;\n\n try {\n const command = new GetObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n });\n\n const response = await this.s3Client.send(command);\n const compressedData = await response.Body!.transformToByteArray();\n const decompressed = await decompress(Buffer.from(compressedData));\n const frame = JSON.parse(decompressed);\n\n // Promote to warmer tiers for future access\n await this.promoteFrame(frame, userId);\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(\n `Retrieved frame ${frameId} from cold tier in ${latency}ms`\n );\n\n return frame;\n } catch (error: any) {\n if (error.Code !== 'NoSuchKey') {\n throw error;\n }\n }\n }\n\n this.logger.debug(`Frame ${frameId} not found in any tier`);\n return null;\n } catch (error: unknown) {\n this.logger.error(`Failed to retrieve frame ${frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Promote frame to hot tier for fast access\n */\n private async promoteToHotTier(frame: Frame, userId: string): Promise<void> {\n if (!this.redisClient) return;\n\n try {\n const frameKey = `frame:${userId}:${frame.frameId}`;\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n\n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n\n this.logger.debug(`Promoted frame ${frame.frameId} to hot tier`);\n } catch (error: unknown) {\n this.logger.error(`Failed to promote frame ${frame.frameId}`, error);\n }\n }\n\n /**\n * Promote frame through storage tiers\n */\n private async promoteFrame(frame: Frame, userId: string): Promise<void> {\n // Promote to warm tier\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n\n try {\n const compressedData = await compress(JSON.stringify(frame));\n\n await client.query(\n `\n INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET storage_tier = 'warm',\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `,\n [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame,\n compressedData,\n 'warm',\n ]\n );\n } finally {\n client.release();\n }\n }\n\n // Also promote to hot tier\n await this.promoteToHotTier(frame, userId);\n }\n\n /**\n * Start background worker for tier migration\n */\n private startMigrationWorker(): void {\n // Run migration every hour\n this.migrationWorker = setInterval(\n async () => {\n await this.migrateAgedData();\n },\n 60 * 60 * 1000\n );\n\n this.logger.info('Migration worker started');\n }\n\n /**\n * Migrate aged data to appropriate storage tiers\n */\n private async migrateAgedData(): Promise<void> {\n this.logger.info('Starting tier migration...');\n\n if (!this.timeseriesPool) return;\n\n const client = await this.timeseriesPool.connect();\n\n try {\n // Find data eligible for cold storage (> 7 days old)\n const coldEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'warm'\n AND time < NOW() - INTERVAL '7 days'\n AND last_accessed < NOW() - INTERVAL '7 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 cold storage\n for (const row of coldEligible.rows) {\n await this.migrateToS3(row, 'STANDARD');\n\n // Update tier in database\n await client.query(\n `\n UPDATE frame_timeseries\n SET storage_tier = 'cold'\n WHERE frame_id = $1 AND user_id = $2\n `,\n [row.frame_id, row.user_id]\n );\n }\n\n // Find data eligible for archive (> 30 days old)\n const archiveEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'cold'\n AND time < NOW() - INTERVAL '30 days'\n AND last_accessed < NOW() - INTERVAL '30 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 Glacier\n for (const row of archiveEligible.rows) {\n await this.migrateToS3(row, 'GLACIER');\n\n // Update tier in database\n await client.query(\n `\n UPDATE frame_timeseries\n SET storage_tier = 'archive'\n WHERE frame_id = $1 AND user_id = $2\n `,\n [row.frame_id, row.user_id]\n );\n }\n\n this.logger.info(\n `Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`\n );\n } finally {\n client.release();\n }\n }\n\n /**\n * Migrate data to S3 storage\n */\n private async migrateToS3(row: any, storageClass: string): Promise<void> {\n if (!this.s3Client || !this.config.s3.bucket) return;\n\n try {\n const key = `frames/${row.user_id}/${row.frame_id}.json.gz`;\n const data =\n row.compressed_data || (await compress(JSON.stringify(row.data)));\n\n const command = new PutObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n Body: data,\n StorageClass: storageClass,\n Metadata: {\n userId: row.user_id,\n frameId: row.frame_id,\n migratedAt: new Date().toISOString(),\n },\n });\n\n await this.s3Client.send(command);\n\n this.logger.debug(`Migrated frame ${row.frame_id} to S3 ${storageClass}`);\n } catch (error: unknown) {\n this.logger.error(`Failed to migrate frame ${row.frame_id} to S3`, error);\n throw error;\n }\n }\n\n /**\n * Track latency for performance monitoring\n */\n private trackLatency(latencyMs: number): void {\n this.latencies.push(latencyMs);\n\n // Keep only last 1000 measurements\n if (this.latencies.length > 1000) {\n this.latencies.shift();\n }\n }\n\n /**\n * Get storage metrics\n */\n async getMetrics(): Promise<StorageMetrics> {\n const metrics: StorageMetrics = {\n totalObjects: 0,\n tierDistribution: {},\n storageBytes: 0,\n avgLatencyMs: 0,\n p50LatencyMs: 0,\n p99LatencyMs: 0,\n };\n\n // Calculate latency percentiles\n if (this.latencies.length > 0) {\n const sorted = [...this.latencies].sort((a, b) => a - b);\n metrics.avgLatencyMs = sorted.reduce((a, b) => a + b, 0) / sorted.length;\n metrics.p50LatencyMs = sorted[Math.floor(sorted.length * 0.5)];\n metrics.p99LatencyMs = sorted[Math.floor(sorted.length * 0.99)];\n }\n\n // Get tier distribution from TimeSeries DB\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n\n try {\n const result = await client.query(`\n SELECT \n storage_tier,\n COUNT(*) as count,\n SUM(pg_column_size(compressed_data)) as bytes\n FROM frame_timeseries\n GROUP BY storage_tier\n `);\n\n for (const row of result.rows) {\n metrics.tierDistribution[row.storage_tier] = parseInt(row.count);\n metrics.storageBytes += parseInt(row.bytes || 0);\n metrics.totalObjects += parseInt(row.count);\n }\n } finally {\n client.release();\n }\n }\n\n return metrics;\n }\n\n /**\n * Cleanup and shutdown\n */\n async shutdown(): Promise<void> {\n if (this.migrationWorker) {\n clearInterval(this.migrationWorker);\n }\n\n if (this.redisClient) {\n await this.redisClient.quit();\n }\n\n if (this.timeseriesPool) {\n await this.timeseriesPool.end();\n }\n\n this.logger.info('Infinite Storage System shut down');\n }\n}\n"],
|
|
5
|
+
"mappings": "AAWA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,gBAAgB,yBAAyB;AAClD,SAAS,YAAY;AACrB,SAAS,cAAc;AAGvB,SAAS,UAAU,kBAAkB;AAqC9B,MAAM,sBAAsB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,YAAsB,CAAC;AAAA,EACvB,kBAAyC;AAAA,EAEjD,YAAY,QAAuB;AACjC,SAAK,SAAS;AACd,SAAK,SAAS,IAAI,OAAO,iBAAiB;AAG1C,QAAI,CAAC,OAAO,SAAS,OAAO,MAAM,WAAW,GAAG;AAC9C,WAAK,OAAO,QAAQ;AAAA,QAClB;AAAA,UACE,MAAM;AAAA,UACN,mBAAmB;AAAA,UACnB,cAAc;AAAA,UACd,iBAAiB;AAAA,QACnB;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,mBAAmB;AAAA,UACnB,cAAc;AAAA,UACd,iBAAiB;AAAA,QACnB;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,mBAAmB;AAAA,UACnB,cAAc;AAAA,UACd,iBAAiB;AAAA,QACnB;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,mBAAmB;AAAA,UACnB,cAAc;AAAA,UACd,iBAAiB;AAAA,QACnB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,aAA4B;AAChC,QAAI;AAEF,UAAI,KAAK,OAAO,OAAO,KAAK;AAC1B,aAAK,cAAc,kBAAkB;AAAA,UACnC,KAAK,KAAK,OAAO,MAAM;AAAA,QACzB,CAAC;AAED,cAAM,KAAK,YAAY,QAAQ;AAG/B,cAAM,KAAK,YAAY,UAAU,oBAAoB,aAAa;AAClE,YAAI,KAAK,OAAO,MAAM,aAAa;AACjC,gBAAM,KAAK,YAAY;AAAA,YACrB;AAAA,YACA,GAAG,KAAK,OAAO,MAAM,WAAW;AAAA,UAClC;AAAA,QACF;AAEA,aAAK,OAAO,KAAK,uCAAuC;AAAA,MAC1D;AAGA,UAAI,KAAK,OAAO,YAAY,kBAAkB;AAC5C,aAAK,iBAAiB,IAAI,KAAK;AAAA,UAC7B,kBAAkB,KAAK,OAAO,WAAW;AAAA,UACzC,KAAK;AAAA,UACL,mBAAmB;AAAA,QACrB,CAAC;AAGD,cAAM,KAAK,uBAAuB;AAClC,aAAK,OAAO,KAAK,yCAAyC;AAAA,MAC5D;AAGA,UAAI,KAAK,OAAO,IAAI,QAAQ;AAC1B,aAAK,WAAW,IAAI,SAAS;AAAA,UAC3B,QAAQ,KAAK,OAAO,GAAG,UAAU;AAAA,UACjC,aAAa,KAAK,OAAO,GAAG,cACxB;AAAA,YACE,aAAa,KAAK,OAAO,GAAG;AAAA,YAC5B,iBAAiB,KAAK,OAAO,GAAG;AAAA,UAClC,IACA;AAAA,QACN,CAAC;AAED,aAAK,OAAO,KAAK,8CAA8C;AAAA,MACjE;AAGA,WAAK,qBAAqB;AAE1B,WAAK,OAAO,KAAK,qCAAqC;AAAA,IACxD,SAAS,OAAgB;AACvB,WAAK,OAAO,MAAM,uCAAuC,KAAK;AAC9D,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,yBAAwC;AACpD,UAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,QAAI;AAEF,YAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAclB;AAGD,YAAM,OACH;AAAA,QACC;AAAA;AAAA;AAAA;AAAA;AAAA,MAKF,EACC,MAAM,MAAM;AAEX,aAAK,OAAO,KAAK,wCAAwC;AAAA,MAC3D,CAAC;AAGH,YAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,OAIlB;AAGD,YAAM,OACH;AAAA,QACC;AAAA;AAAA;AAAA,MAGF,EACC,MAAM,MAAM;AACX,aAAK,OAAO,KAAK,kCAAkC;AAAA,MACrD,CAAC;AAAA,IACL,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,WAAW,OAAc,QAA+B;AAC5D,UAAM,YAAY,KAAK,IAAI;AAE3B,QAAI;AACF,YAAM,YAAY,KAAK,UAAU,KAAK;AACtC,YAAM,iBAAiB,MAAM,SAAS,SAAS;AAC/C,YAAM,WAAW,SAAS,MAAM,IAAI,MAAM,OAAO;AAGjD,UAAI,KAAK,aAAa;AACpB,cAAM,KAAK,YAAY;AAAA,UACrB;AAAA,UACA,KAAK,OAAO,MAAM,cAAc;AAAA,UAChC;AAAA,QACF;AAGA,cAAM,KAAK,YAAY,KAAK,QAAQ,QAAQ,IAAI;AAAA,UAC9C;AAAA,UACA,aAAa,MAAM,eAAe;AAAA,UAClC,MAAM,MAAM;AAAA,UACZ,WAAW,MAAM;AAAA,UACjB,MAAM;AAAA,QACR,CAAC;AAAA,MACH;AAGA,UAAI,KAAK,gBAAgB;AACvB,cAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,YAAI;AACF,gBAAM,OAAO;AAAA,YACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YASA;AAAA,cACE,IAAI,KAAK,MAAM,SAAS;AAAA,cACxB,MAAM;AAAA,cACN;AAAA,cACA,MAAM,eAAe;AAAA,cACrB,MAAM;AAAA,cACN;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF;AAAA,QACF,UAAE;AACA,iBAAO,QAAQ;AAAA,QACjB;AAAA,MACF;AAGA,YAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,WAAK,aAAa,OAAO;AAEzB,WAAK,OAAO,MAAM,gBAAgB,MAAM,OAAO,OAAO,OAAO,IAAI;AAAA,IACnE,SAAS,OAAgB;AACvB,WAAK,OAAO,MAAM,yBAAyB,MAAM,OAAO,IAAI,KAAK;AACjE,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,cAAc,SAAiB,QAAuC;AAC1E,UAAM,YAAY,KAAK,IAAI;AAC3B,UAAM,WAAW,SAAS,MAAM,IAAI,OAAO;AAE3C,QAAI;AAEF,UAAI,KAAK,aAAa;AACpB,cAAM,SAAS,MAAM,KAAK,YAAY,IAAI,QAAQ;AAClD,YAAI,QAAQ;AACV,gBAAM,eAAe,MAAM,WAAW,MAAM;AAC5C,gBAAM,QAAQ,KAAK,MAAM,YAAY;AAGrC,gBAAM,KAAK,YAAY;AAAA,YACrB;AAAA,YACA,KAAK,OAAO,MAAM,cAAc;AAAA,UAClC;AAEA,gBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,eAAK,aAAa,OAAO;AACzB,eAAK,OAAO;AAAA,YACV,mBAAmB,OAAO,qBAAqB,OAAO;AAAA,UACxD;AAEA,iBAAO;AAAA,QACT;AAAA,MACF;AAGA,UAAI,KAAK,gBAAgB;AACvB,cAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,YAAI;AACF,gBAAM,SAAS,MAAM,OAAO;AAAA,YAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YAOA,CAAC,SAAS,MAAM;AAAA,UAClB;AAEA,cAAI,OAAO,KAAK,SAAS,GAAG;AAC1B,kBAAM,MAAM,OAAO,KAAK,CAAC;AACzB,gBAAI;AAEJ,gBAAI,IAAI,iBAAiB;AACvB,oBAAM,eAAe,MAAM,WAAW,IAAI,eAAe;AACzD,sBAAQ,KAAK,MAAM,YAAY;AAAA,YACjC,OAAO;AACL,sBAAQ,IAAI;AAAA,YACd;AAGA,kBAAM,OAAO;AAAA,cACX;AAAA;AAAA;AAAA;AAAA;AAAA,cAKA,CAAC,SAAS,MAAM;AAAA,YAClB;AAGA,gBAAI,KAAK,aAAa;AACpB,oBAAM,KAAK,iBAAiB,OAAO,MAAM;AAAA,YAC3C;AAEA,kBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,iBAAK,aAAa,OAAO;AACzB,iBAAK,OAAO;AAAA,cACV,mBAAmB,OAAO,sBAAsB,OAAO;AAAA,YACzD;AAEA,mBAAO;AAAA,UACT;AAAA,QACF,UAAE;AACA,iBAAO,QAAQ;AAAA,QACjB;AAAA,MACF;AAGA,UAAI,KAAK,YAAY,KAAK,OAAO,GAAG,QAAQ;AAC1C,cAAM,MAAM,UAAU,MAAM,IAAI,OAAO;AAEvC,YAAI;AACF,gBAAM,UAAU,IAAI,iBAAiB;AAAA,YACnC,QAAQ,KAAK,OAAO,GAAG;AAAA,YACvB,KAAK;AAAA,UACP,CAAC;AAED,gBAAM,WAAW,MAAM,KAAK,SAAS,KAAK,OAAO;AACjD,gBAAM,iBAAiB,MAAM,SAAS,KAAM,qBAAqB;AACjE,gBAAM,eAAe,MAAM,WAAW,OAAO,KAAK,cAAc,CAAC;AACjE,gBAAM,QAAQ,KAAK,MAAM,YAAY;AAGrC,gBAAM,KAAK,aAAa,OAAO,MAAM;AAErC,gBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,eAAK,aAAa,OAAO;AACzB,eAAK,OAAO;AAAA,YACV,mBAAmB,OAAO,sBAAsB,OAAO;AAAA,UACzD;AAEA,iBAAO;AAAA,QACT,SAAS,OAAY;AACnB,cAAI,MAAM,SAAS,aAAa;AAC9B,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,WAAK,OAAO,MAAM,SAAS,OAAO,wBAAwB;AAC1D,aAAO;AAAA,IACT,SAAS,OAAgB;AACvB,WAAK,OAAO,MAAM,4BAA4B,OAAO,IAAI,KAAK;AAC9D,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,iBAAiB,OAAc,QAA+B;AAC1E,QAAI,CAAC,KAAK,YAAa;AAEvB,QAAI;AACF,YAAM,WAAW,SAAS,MAAM,IAAI,MAAM,OAAO;AACjD,YAAM,YAAY,KAAK,UAAU,KAAK;AACtC,YAAM,iBAAiB,MAAM,SAAS,SAAS;AAE/C,YAAM,KAAK,YAAY;AAAA,QACrB;AAAA,QACA,KAAK,OAAO,MAAM,cAAc;AAAA,QAChC;AAAA,MACF;AAEA,WAAK,OAAO,MAAM,kBAAkB,MAAM,OAAO,cAAc;AAAA,IACjE,SAAS,OAAgB;AACvB,WAAK,OAAO,MAAM,2BAA2B,MAAM,OAAO,IAAI,KAAK;AAAA,IACrE;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,aAAa,OAAc,QAA+B;AAEtE,QAAI,KAAK,gBAAgB;AACvB,YAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,UAAI;AACF,cAAM,iBAAiB,MAAM,SAAS,KAAK,UAAU,KAAK,CAAC;AAE3D,cAAM,OAAO;AAAA,UACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAQA;AAAA,YACE,IAAI,KAAK,MAAM,SAAS;AAAA,YACxB,MAAM;AAAA,YACN;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAAA,MACF,UAAE;AACA,eAAO,QAAQ;AAAA,MACjB;AAAA,IACF;AAGA,UAAM,KAAK,iBAAiB,OAAO,MAAM;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA,EAKQ,uBAA6B;AAEnC,SAAK,kBAAkB;AAAA,MACrB,YAAY;AACV,cAAM,KAAK,gBAAgB;AAAA,MAC7B;AAAA,MACA,KAAK,KAAK;AAAA,IACZ;AAEA,SAAK,OAAO,KAAK,0BAA0B;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,kBAAiC;AAC7C,SAAK,OAAO,KAAK,4BAA4B;AAE7C,QAAI,CAAC,KAAK,eAAgB;AAE1B,UAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,QAAI;AAEF,YAAM,eAAe,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAOvC;AAGD,iBAAW,OAAO,aAAa,MAAM;AACnC,cAAM,KAAK,YAAY,KAAK,UAAU;AAGtC,cAAM,OAAO;AAAA,UACX;AAAA;AAAA;AAAA;AAAA;AAAA,UAKA,CAAC,IAAI,UAAU,IAAI,OAAO;AAAA,QAC5B;AAAA,MACF;AAGA,YAAM,kBAAkB,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAO1C;AAGD,iBAAW,OAAO,gBAAgB,MAAM;AACtC,cAAM,KAAK,YAAY,KAAK,SAAS;AAGrC,cAAM,OAAO;AAAA,UACX;AAAA;AAAA;AAAA;AAAA;AAAA,UAKA,CAAC,IAAI,UAAU,IAAI,OAAO;AAAA,QAC5B;AAAA,MACF;AAEA,WAAK,OAAO;AAAA,QACV,wBAAwB,aAAa,KAAK,MAAM,aAAa,gBAAgB,KAAK,MAAM;AAAA,MAC1F;AAAA,IACF,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,YAAY,KAAU,cAAqC;AACvE,QAAI,CAAC,KAAK,YAAY,CAAC,KAAK,OAAO,GAAG,OAAQ;AAE9C,QAAI;AACF,YAAM,MAAM,UAAU,IAAI,OAAO,IAAI,IAAI,QAAQ;AACjD,YAAM,OACJ,IAAI,mBAAoB,MAAM,SAAS,KAAK,UAAU,IAAI,IAAI,CAAC;AAEjE,YAAM,UAAU,IAAI,iBAAiB;AAAA,QACnC,QAAQ,KAAK,OAAO,GAAG;AAAA,QACvB,KAAK;AAAA,QACL,MAAM;AAAA,QACN,cAAc;AAAA,QACd,UAAU;AAAA,UACR,QAAQ,IAAI;AAAA,UACZ,SAAS,IAAI;AAAA,UACb,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,QACrC;AAAA,MACF,CAAC;AAED,YAAM,KAAK,SAAS,KAAK,OAAO;AAEhC,WAAK,OAAO,MAAM,kBAAkB,IAAI,QAAQ,UAAU,YAAY,EAAE;AAAA,IAC1E,SAAS,OAAgB;AACvB,WAAK,OAAO,MAAM,2BAA2B,IAAI,QAAQ,UAAU,KAAK;AACxE,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,WAAyB;AAC5C,SAAK,UAAU,KAAK,SAAS;AAG7B,QAAI,KAAK,UAAU,SAAS,KAAM;AAChC,WAAK,UAAU,MAAM;AAAA,IACvB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,aAAsC;AAC1C,UAAM,UAA0B;AAAA,MAC9B,cAAc;AAAA,MACd,kBAAkB,CAAC;AAAA,MACnB,cAAc;AAAA,MACd,cAAc;AAAA,MACd,cAAc;AAAA,MACd,cAAc;AAAA,IAChB;AAGA,QAAI,KAAK,UAAU,SAAS,GAAG;AAC7B,YAAM,SAAS,CAAC,GAAG,KAAK,SAAS,EAAE,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AACvD,cAAQ,eAAe,OAAO,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,OAAO;AAClE,cAAQ,eAAe,OAAO,KAAK,MAAM,OAAO,SAAS,GAAG,CAAC;AAC7D,cAAQ,eAAe,OAAO,KAAK,MAAM,OAAO,SAAS,IAAI,CAAC;AAAA,IAChE;AAGA,QAAI,KAAK,gBAAgB;AACvB,YAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,UAAI;AACF,cAAM,SAAS,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,SAOjC;AAED,mBAAW,OAAO,OAAO,MAAM;AAC7B,kBAAQ,iBAAiB,IAAI,YAAY,IAAI,SAAS,IAAI,KAAK;AAC/D,kBAAQ,gBAAgB,SAAS,IAAI,SAAS,CAAC;AAC/C,kBAAQ,gBAAgB,SAAS,IAAI,KAAK;AAAA,QAC5C;AAAA,MACF,UAAE;AACA,eAAO,QAAQ;AAAA,MACjB;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,WAA0B;AAC9B,QAAI,KAAK,iBAAiB;AACxB,oBAAc,KAAK,eAAe;AAAA,IACpC;AAEA,QAAI,KAAK,aAAa;AACpB,YAAM,KAAK,YAAY,KAAK;AAAA,IAC9B;AAEA,QAAI,KAAK,gBAAgB;AACvB,YAAM,KAAK,eAAe,IAAI;AAAA,IAChC;AAEA,SAAK,OAAO,KAAK,mCAAmC;AAAA,EACtD;AACF;",
|
|
6
6
|
"names": []
|
|
7
7
|
}
|
|
@@ -1,5 +1,10 @@
|
|
|
1
1
|
import { createClient } from "redis";
|
|
2
|
-
import {
|
|
2
|
+
import {
|
|
3
|
+
S3Client,
|
|
4
|
+
PutObjectCommand,
|
|
5
|
+
GetObjectCommand,
|
|
6
|
+
DeleteObjectCommand
|
|
7
|
+
} from "@aws-sdk/client-s3";
|
|
3
8
|
import { Storage } from "@google-cloud/storage";
|
|
4
9
|
import { logger } from "../monitoring/logger.js";
|
|
5
10
|
import * as zlib from "zlib";
|
|
@@ -79,7 +84,10 @@ class RailwayOptimizedStorage {
|
|
|
79
84
|
await this.redisClient.configSet("maxmemory-policy", "allkeys-lru");
|
|
80
85
|
logger.info("Redis connected for hot tier storage");
|
|
81
86
|
} catch (error) {
|
|
82
|
-
logger.warn(
|
|
87
|
+
logger.warn(
|
|
88
|
+
"Redis connection failed, falling back to SQLite only",
|
|
89
|
+
error
|
|
90
|
+
);
|
|
83
91
|
}
|
|
84
92
|
}
|
|
85
93
|
if (this.config.railwayBuckets.accessKeyId) {
|
|
@@ -285,10 +293,12 @@ class RailwayOptimizedStorage {
|
|
|
285
293
|
if (this.railwayS3) {
|
|
286
294
|
try {
|
|
287
295
|
const warmKey = this.getWarmTierKey(trace);
|
|
288
|
-
await this.railwayS3.send(
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
296
|
+
await this.railwayS3.send(
|
|
297
|
+
new DeleteObjectCommand({
|
|
298
|
+
Bucket: this.config.railwayBuckets.bucket,
|
|
299
|
+
Key: warmKey
|
|
300
|
+
})
|
|
301
|
+
);
|
|
292
302
|
} catch (error) {
|
|
293
303
|
}
|
|
294
304
|
}
|
|
@@ -334,9 +344,7 @@ class RailwayOptimizedStorage {
|
|
|
334
344
|
*/
|
|
335
345
|
async retrieveTrace(traceId) {
|
|
336
346
|
await this.initialized;
|
|
337
|
-
const location = this.localDb.prepare(
|
|
338
|
-
"SELECT tier, location FROM storage_tiers WHERE trace_id = ?"
|
|
339
|
-
).get(traceId);
|
|
347
|
+
const location = this.localDb.prepare("SELECT tier, location FROM storage_tiers WHERE trace_id = ?").get(traceId);
|
|
340
348
|
if (!location) {
|
|
341
349
|
return null;
|
|
342
350
|
}
|
|
@@ -419,13 +427,15 @@ class RailwayOptimizedStorage {
|
|
|
419
427
|
trackStorage(traceId, tier, trace) {
|
|
420
428
|
const originalSize = JSON.stringify(trace).length;
|
|
421
429
|
const compressedSize = Math.floor(originalSize * 0.3);
|
|
422
|
-
this.localDb.prepare(
|
|
430
|
+
this.localDb.prepare(
|
|
431
|
+
`
|
|
423
432
|
INSERT OR REPLACE INTO storage_tiers (
|
|
424
433
|
trace_id, tier, location, original_size, compressed_size,
|
|
425
434
|
compression_ratio, access_count, last_accessed, created_at,
|
|
426
435
|
migrated_at, score
|
|
427
436
|
) VALUES (?, ?, ?, ?, ?, ?, 0, ?, ?, ?, ?)
|
|
428
|
-
`
|
|
437
|
+
`
|
|
438
|
+
).run(
|
|
429
439
|
traceId,
|
|
430
440
|
tier,
|
|
431
441
|
this.getStorageLocation(trace, tier),
|
|
@@ -472,12 +482,14 @@ class RailwayOptimizedStorage {
|
|
|
472
482
|
errors: []
|
|
473
483
|
};
|
|
474
484
|
const now = Date.now();
|
|
475
|
-
const candidates = this.localDb.prepare(
|
|
485
|
+
const candidates = this.localDb.prepare(
|
|
486
|
+
`
|
|
476
487
|
SELECT trace_id, tier, created_at, score
|
|
477
488
|
FROM storage_tiers
|
|
478
489
|
WHERE tier != 'cold'
|
|
479
490
|
ORDER BY created_at ASC
|
|
480
|
-
`
|
|
491
|
+
`
|
|
492
|
+
).all();
|
|
481
493
|
for (const candidate of candidates) {
|
|
482
494
|
const ageHours = (now - candidate.created_at) / (1e3 * 60 * 60);
|
|
483
495
|
try {
|
|
@@ -497,7 +509,9 @@ class RailwayOptimizedStorage {
|
|
|
497
509
|
}
|
|
498
510
|
}
|
|
499
511
|
} catch (error) {
|
|
500
|
-
results.errors.push(
|
|
512
|
+
results.errors.push(
|
|
513
|
+
`Failed to migrate ${candidate.trace_id}: ${error}`
|
|
514
|
+
);
|
|
501
515
|
}
|
|
502
516
|
}
|
|
503
517
|
logger.info("Tier migration completed", results);
|
|
@@ -507,7 +521,8 @@ class RailwayOptimizedStorage {
|
|
|
507
521
|
* Get storage statistics
|
|
508
522
|
*/
|
|
509
523
|
getStorageStats() {
|
|
510
|
-
const tierStats = this.localDb.prepare(
|
|
524
|
+
const tierStats = this.localDb.prepare(
|
|
525
|
+
`
|
|
511
526
|
SELECT
|
|
512
527
|
tier,
|
|
513
528
|
COUNT(*) as count,
|
|
@@ -517,8 +532,10 @@ class RailwayOptimizedStorage {
|
|
|
517
532
|
AVG(access_count) as avg_access
|
|
518
533
|
FROM storage_tiers
|
|
519
534
|
GROUP BY tier
|
|
520
|
-
`
|
|
521
|
-
|
|
535
|
+
`
|
|
536
|
+
).all();
|
|
537
|
+
const ageDistribution = this.localDb.prepare(
|
|
538
|
+
`
|
|
522
539
|
SELECT
|
|
523
540
|
CASE
|
|
524
541
|
WHEN (? - created_at) / 3600000 < 24 THEN '< 24h'
|
|
@@ -529,13 +546,20 @@ class RailwayOptimizedStorage {
|
|
|
529
546
|
COUNT(*) as count
|
|
530
547
|
FROM storage_tiers
|
|
531
548
|
GROUP BY age_group
|
|
532
|
-
`
|
|
549
|
+
`
|
|
550
|
+
).all(Date.now(), Date.now(), Date.now());
|
|
533
551
|
return {
|
|
534
552
|
byTier: tierStats,
|
|
535
553
|
byAge: ageDistribution,
|
|
536
554
|
totalTraces: tierStats.reduce((sum, t) => sum + t.count, 0),
|
|
537
|
-
totalSize: tierStats.reduce(
|
|
538
|
-
|
|
555
|
+
totalSize: tierStats.reduce(
|
|
556
|
+
(sum, t) => sum + t.total_original,
|
|
557
|
+
0
|
|
558
|
+
),
|
|
559
|
+
compressedSize: tierStats.reduce(
|
|
560
|
+
(sum, t) => sum + t.total_compressed,
|
|
561
|
+
0
|
|
562
|
+
)
|
|
539
563
|
};
|
|
540
564
|
}
|
|
541
565
|
/**
|
|
@@ -544,10 +568,12 @@ class RailwayOptimizedStorage {
|
|
|
544
568
|
async cleanup() {
|
|
545
569
|
let cleaned = 0;
|
|
546
570
|
const cutoff = Date.now() - 90 * 24 * 60 * 60 * 1e3;
|
|
547
|
-
const result = this.localDb.prepare(
|
|
571
|
+
const result = this.localDb.prepare(
|
|
572
|
+
`
|
|
548
573
|
DELETE FROM storage_tiers
|
|
549
574
|
WHERE tier = 'cold' AND created_at < ? AND access_count = 0
|
|
550
|
-
`
|
|
575
|
+
`
|
|
576
|
+
).run(cutoff);
|
|
551
577
|
cleaned = result.changes;
|
|
552
578
|
logger.info("Cleanup completed", { removed: cleaned });
|
|
553
579
|
return cleaned;
|