@stackmemoryai/stackmemory 0.3.5 → 0.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/commands/infinite-storage.js +240 -0
- package/dist/cli/commands/infinite-storage.js.map +7 -0
- package/dist/cli/index.js +2 -0
- package/dist/cli/index.js.map +2 -2
- package/dist/core/storage/chromadb-simple.js +160 -0
- package/dist/core/storage/chromadb-simple.js.map +7 -0
- package/dist/core/storage/infinite-storage.js +443 -0
- package/dist/core/storage/infinite-storage.js.map +7 -0
- package/dist/core/utils/compression.js +79 -0
- package/dist/core/utils/compression.js.map +7 -0
- package/dist/features/tui/services/data-service.js +12 -40
- package/dist/features/tui/services/data-service.js.map +2 -2
- package/dist/features/tui/services/linear-task-reader.js +100 -0
- package/dist/features/tui/services/linear-task-reader.js.map +7 -0
- package/dist/features/web/client/stores/task-store.js +22 -0
- package/dist/features/web/client/stores/task-store.js.map +7 -0
- package/dist/features/web/server/index.js +171 -0
- package/dist/features/web/server/index.js.map +7 -0
- package/dist/skills/claude-skills.js +47 -1
- package/dist/skills/claude-skills.js.map +2 -2
- package/dist/skills/dashboard-launcher.js +212 -0
- package/dist/skills/dashboard-launcher.js.map +7 -0
- package/package.json +3 -1
|
@@ -0,0 +1,443 @@
|
|
|
1
|
+
import { S3Client, PutObjectCommand, GetObjectCommand } from "@aws-sdk/client-s3";
|
|
2
|
+
import { createClient as createRedisClient } from "redis";
|
|
3
|
+
import { Pool } from "pg";
|
|
4
|
+
import { Logger } from "../monitoring/logger.js";
|
|
5
|
+
import { compress, decompress } from "../utils/compression.js";
|
|
6
|
+
class InfiniteStorageSystem {
|
|
7
|
+
redisClient;
|
|
8
|
+
timeseriesPool;
|
|
9
|
+
s3Client;
|
|
10
|
+
logger;
|
|
11
|
+
config;
|
|
12
|
+
latencies = [];
|
|
13
|
+
migrationWorker = null;
|
|
14
|
+
constructor(config) {
|
|
15
|
+
this.config = config;
|
|
16
|
+
this.logger = new Logger("InfiniteStorage");
|
|
17
|
+
if (!config.tiers || config.tiers.length === 0) {
|
|
18
|
+
this.config.tiers = [
|
|
19
|
+
{ name: "hot", ageThresholdHours: 1, storageClass: "MEMORY", accessLatencyMs: 5 },
|
|
20
|
+
{ name: "warm", ageThresholdHours: 168, storageClass: "TIMESERIES", accessLatencyMs: 50 },
|
|
21
|
+
{ name: "cold", ageThresholdHours: 720, storageClass: "S3_STANDARD", accessLatencyMs: 100 },
|
|
22
|
+
{ name: "archive", ageThresholdHours: Infinity, storageClass: "S3_GLACIER", accessLatencyMs: 36e5 }
|
|
23
|
+
];
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
async initialize() {
|
|
27
|
+
try {
|
|
28
|
+
if (this.config.redis?.url) {
|
|
29
|
+
this.redisClient = createRedisClient({
|
|
30
|
+
url: this.config.redis.url
|
|
31
|
+
});
|
|
32
|
+
await this.redisClient.connect();
|
|
33
|
+
await this.redisClient.configSet("maxmemory-policy", "allkeys-lru");
|
|
34
|
+
if (this.config.redis.maxMemoryMB) {
|
|
35
|
+
await this.redisClient.configSet("maxmemory", `${this.config.redis.maxMemoryMB}mb`);
|
|
36
|
+
}
|
|
37
|
+
this.logger.info("Redis client initialized for hot tier");
|
|
38
|
+
}
|
|
39
|
+
if (this.config.timeseries?.connectionString) {
|
|
40
|
+
this.timeseriesPool = new Pool({
|
|
41
|
+
connectionString: this.config.timeseries.connectionString,
|
|
42
|
+
max: 10,
|
|
43
|
+
idleTimeoutMillis: 3e4
|
|
44
|
+
});
|
|
45
|
+
await this.createTimeSeriesTables();
|
|
46
|
+
this.logger.info("TimeSeries DB initialized for warm tier");
|
|
47
|
+
}
|
|
48
|
+
if (this.config.s3?.bucket) {
|
|
49
|
+
this.s3Client = new S3Client({
|
|
50
|
+
region: this.config.s3.region || "us-east-1",
|
|
51
|
+
credentials: this.config.s3.accessKeyId ? {
|
|
52
|
+
accessKeyId: this.config.s3.accessKeyId,
|
|
53
|
+
secretAccessKey: this.config.s3.secretAccessKey
|
|
54
|
+
} : void 0
|
|
55
|
+
});
|
|
56
|
+
this.logger.info("S3 client initialized for cold/archive tiers");
|
|
57
|
+
}
|
|
58
|
+
this.startMigrationWorker();
|
|
59
|
+
this.logger.info("Infinite Storage System initialized");
|
|
60
|
+
} catch (error) {
|
|
61
|
+
this.logger.error("Failed to initialize storage system", error);
|
|
62
|
+
throw error;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Create TimeSeries tables for warm tier storage
|
|
67
|
+
*/
|
|
68
|
+
async createTimeSeriesTables() {
|
|
69
|
+
const client = await this.timeseriesPool.connect();
|
|
70
|
+
try {
|
|
71
|
+
await client.query(`
|
|
72
|
+
CREATE TABLE IF NOT EXISTS frame_timeseries (
|
|
73
|
+
time TIMESTAMPTZ NOT NULL,
|
|
74
|
+
frame_id TEXT NOT NULL,
|
|
75
|
+
user_id TEXT NOT NULL,
|
|
76
|
+
project_name TEXT,
|
|
77
|
+
type TEXT,
|
|
78
|
+
data JSONB,
|
|
79
|
+
compressed_data BYTEA,
|
|
80
|
+
storage_tier TEXT DEFAULT 'warm',
|
|
81
|
+
access_count INTEGER DEFAULT 0,
|
|
82
|
+
last_accessed TIMESTAMPTZ DEFAULT NOW(),
|
|
83
|
+
PRIMARY KEY (time, frame_id)
|
|
84
|
+
)
|
|
85
|
+
`);
|
|
86
|
+
await client.query(`
|
|
87
|
+
SELECT create_hypertable('frame_timeseries', 'time',
|
|
88
|
+
chunk_time_interval => INTERVAL '1 day',
|
|
89
|
+
if_not_exists => TRUE)
|
|
90
|
+
`).catch(() => {
|
|
91
|
+
this.logger.info("Using standard PostgreSQL partitioning");
|
|
92
|
+
});
|
|
93
|
+
await client.query(`
|
|
94
|
+
CREATE INDEX IF NOT EXISTS idx_frame_user ON frame_timeseries (user_id, time DESC);
|
|
95
|
+
CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);
|
|
96
|
+
CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);
|
|
97
|
+
`);
|
|
98
|
+
await client.query(`
|
|
99
|
+
SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)
|
|
100
|
+
`).catch(() => {
|
|
101
|
+
this.logger.info("Compression policy not available");
|
|
102
|
+
});
|
|
103
|
+
} finally {
|
|
104
|
+
client.release();
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Store a frame with automatic tier selection
|
|
109
|
+
*/
|
|
110
|
+
async storeFrame(frame, userId) {
|
|
111
|
+
const startTime = Date.now();
|
|
112
|
+
try {
|
|
113
|
+
const frameData = JSON.stringify(frame);
|
|
114
|
+
const compressedData = await compress(frameData);
|
|
115
|
+
const frameKey = `frame:${userId}:${frame.frameId}`;
|
|
116
|
+
if (this.redisClient) {
|
|
117
|
+
await this.redisClient.setEx(
|
|
118
|
+
frameKey,
|
|
119
|
+
this.config.redis.ttlSeconds || 3600,
|
|
120
|
+
compressedData
|
|
121
|
+
);
|
|
122
|
+
await this.redisClient.hSet(`meta:${frameKey}`, {
|
|
123
|
+
userId,
|
|
124
|
+
projectName: frame.projectName || "default",
|
|
125
|
+
type: frame.type,
|
|
126
|
+
timestamp: frame.timestamp,
|
|
127
|
+
tier: "hot"
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
if (this.timeseriesPool) {
|
|
131
|
+
const client = await this.timeseriesPool.connect();
|
|
132
|
+
try {
|
|
133
|
+
await client.query(`
|
|
134
|
+
INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)
|
|
135
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
136
|
+
ON CONFLICT (time, frame_id) DO UPDATE
|
|
137
|
+
SET data = EXCLUDED.data,
|
|
138
|
+
compressed_data = EXCLUDED.compressed_data,
|
|
139
|
+
last_accessed = NOW(),
|
|
140
|
+
access_count = frame_timeseries.access_count + 1
|
|
141
|
+
`, [
|
|
142
|
+
new Date(frame.timestamp),
|
|
143
|
+
frame.frameId,
|
|
144
|
+
userId,
|
|
145
|
+
frame.projectName || "default",
|
|
146
|
+
frame.type,
|
|
147
|
+
frame,
|
|
148
|
+
compressedData,
|
|
149
|
+
"warm"
|
|
150
|
+
]);
|
|
151
|
+
} finally {
|
|
152
|
+
client.release();
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
const latency = Date.now() - startTime;
|
|
156
|
+
this.trackLatency(latency);
|
|
157
|
+
this.logger.debug(`Stored frame ${frame.frameId} in ${latency}ms`);
|
|
158
|
+
} catch (error) {
|
|
159
|
+
this.logger.error(`Failed to store frame ${frame.frameId}`, error);
|
|
160
|
+
throw error;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Retrieve a frame with intelligent caching
|
|
165
|
+
*/
|
|
166
|
+
async retrieveFrame(frameId, userId) {
|
|
167
|
+
const startTime = Date.now();
|
|
168
|
+
const frameKey = `frame:${userId}:${frameId}`;
|
|
169
|
+
try {
|
|
170
|
+
if (this.redisClient) {
|
|
171
|
+
const cached = await this.redisClient.get(frameKey);
|
|
172
|
+
if (cached) {
|
|
173
|
+
const decompressed = await decompress(cached);
|
|
174
|
+
const frame = JSON.parse(decompressed);
|
|
175
|
+
await this.redisClient.expire(frameKey, this.config.redis.ttlSeconds || 3600);
|
|
176
|
+
const latency = Date.now() - startTime;
|
|
177
|
+
this.trackLatency(latency);
|
|
178
|
+
this.logger.debug(`Retrieved frame ${frameId} from hot tier in ${latency}ms`);
|
|
179
|
+
return frame;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
if (this.timeseriesPool) {
|
|
183
|
+
const client = await this.timeseriesPool.connect();
|
|
184
|
+
try {
|
|
185
|
+
const result = await client.query(`
|
|
186
|
+
SELECT data, compressed_data, storage_tier
|
|
187
|
+
FROM frame_timeseries
|
|
188
|
+
WHERE frame_id = $1 AND user_id = $2
|
|
189
|
+
ORDER BY time DESC
|
|
190
|
+
LIMIT 1
|
|
191
|
+
`, [frameId, userId]);
|
|
192
|
+
if (result.rows.length > 0) {
|
|
193
|
+
const row = result.rows[0];
|
|
194
|
+
let frame;
|
|
195
|
+
if (row.compressed_data) {
|
|
196
|
+
const decompressed = await decompress(row.compressed_data);
|
|
197
|
+
frame = JSON.parse(decompressed);
|
|
198
|
+
} else {
|
|
199
|
+
frame = row.data;
|
|
200
|
+
}
|
|
201
|
+
await client.query(`
|
|
202
|
+
UPDATE frame_timeseries
|
|
203
|
+
SET last_accessed = NOW(), access_count = access_count + 1
|
|
204
|
+
WHERE frame_id = $1 AND user_id = $2
|
|
205
|
+
`, [frameId, userId]);
|
|
206
|
+
if (this.redisClient) {
|
|
207
|
+
await this.promoteToHotTier(frame, userId);
|
|
208
|
+
}
|
|
209
|
+
const latency = Date.now() - startTime;
|
|
210
|
+
this.trackLatency(latency);
|
|
211
|
+
this.logger.debug(`Retrieved frame ${frameId} from warm tier in ${latency}ms`);
|
|
212
|
+
return frame;
|
|
213
|
+
}
|
|
214
|
+
} finally {
|
|
215
|
+
client.release();
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
if (this.s3Client && this.config.s3.bucket) {
|
|
219
|
+
const key = `frames/${userId}/${frameId}.json.gz`;
|
|
220
|
+
try {
|
|
221
|
+
const command = new GetObjectCommand({
|
|
222
|
+
Bucket: this.config.s3.bucket,
|
|
223
|
+
Key: key
|
|
224
|
+
});
|
|
225
|
+
const response = await this.s3Client.send(command);
|
|
226
|
+
const compressedData = await response.Body.transformToByteArray();
|
|
227
|
+
const decompressed = await decompress(Buffer.from(compressedData));
|
|
228
|
+
const frame = JSON.parse(decompressed);
|
|
229
|
+
await this.promoteFrame(frame, userId);
|
|
230
|
+
const latency = Date.now() - startTime;
|
|
231
|
+
this.trackLatency(latency);
|
|
232
|
+
this.logger.debug(`Retrieved frame ${frameId} from cold tier in ${latency}ms`);
|
|
233
|
+
return frame;
|
|
234
|
+
} catch (error) {
|
|
235
|
+
if (error.Code !== "NoSuchKey") {
|
|
236
|
+
throw error;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
this.logger.debug(`Frame ${frameId} not found in any tier`);
|
|
241
|
+
return null;
|
|
242
|
+
} catch (error) {
|
|
243
|
+
this.logger.error(`Failed to retrieve frame ${frameId}`, error);
|
|
244
|
+
throw error;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* Promote frame to hot tier for fast access
|
|
249
|
+
*/
|
|
250
|
+
async promoteToHotTier(frame, userId) {
|
|
251
|
+
if (!this.redisClient) return;
|
|
252
|
+
try {
|
|
253
|
+
const frameKey = `frame:${userId}:${frame.frameId}`;
|
|
254
|
+
const frameData = JSON.stringify(frame);
|
|
255
|
+
const compressedData = await compress(frameData);
|
|
256
|
+
await this.redisClient.setEx(
|
|
257
|
+
frameKey,
|
|
258
|
+
this.config.redis.ttlSeconds || 3600,
|
|
259
|
+
compressedData
|
|
260
|
+
);
|
|
261
|
+
this.logger.debug(`Promoted frame ${frame.frameId} to hot tier`);
|
|
262
|
+
} catch (error) {
|
|
263
|
+
this.logger.error(`Failed to promote frame ${frame.frameId}`, error);
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
/**
|
|
267
|
+
* Promote frame through storage tiers
|
|
268
|
+
*/
|
|
269
|
+
async promoteFrame(frame, userId) {
|
|
270
|
+
if (this.timeseriesPool) {
|
|
271
|
+
const client = await this.timeseriesPool.connect();
|
|
272
|
+
try {
|
|
273
|
+
const compressedData = await compress(JSON.stringify(frame));
|
|
274
|
+
await client.query(`
|
|
275
|
+
INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)
|
|
276
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
277
|
+
ON CONFLICT (time, frame_id) DO UPDATE
|
|
278
|
+
SET storage_tier = 'warm',
|
|
279
|
+
last_accessed = NOW(),
|
|
280
|
+
access_count = frame_timeseries.access_count + 1
|
|
281
|
+
`, [
|
|
282
|
+
new Date(frame.timestamp),
|
|
283
|
+
frame.frameId,
|
|
284
|
+
userId,
|
|
285
|
+
frame,
|
|
286
|
+
compressedData,
|
|
287
|
+
"warm"
|
|
288
|
+
]);
|
|
289
|
+
} finally {
|
|
290
|
+
client.release();
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
await this.promoteToHotTier(frame, userId);
|
|
294
|
+
}
|
|
295
|
+
/**
|
|
296
|
+
* Start background worker for tier migration
|
|
297
|
+
*/
|
|
298
|
+
startMigrationWorker() {
|
|
299
|
+
this.migrationWorker = setInterval(async () => {
|
|
300
|
+
await this.migrateAgedData();
|
|
301
|
+
}, 60 * 60 * 1e3);
|
|
302
|
+
this.logger.info("Migration worker started");
|
|
303
|
+
}
|
|
304
|
+
/**
|
|
305
|
+
* Migrate aged data to appropriate storage tiers
|
|
306
|
+
*/
|
|
307
|
+
async migrateAgedData() {
|
|
308
|
+
this.logger.info("Starting tier migration...");
|
|
309
|
+
if (!this.timeseriesPool) return;
|
|
310
|
+
const client = await this.timeseriesPool.connect();
|
|
311
|
+
try {
|
|
312
|
+
const coldEligible = await client.query(`
|
|
313
|
+
SELECT frame_id, user_id, data, compressed_data
|
|
314
|
+
FROM frame_timeseries
|
|
315
|
+
WHERE storage_tier = 'warm'
|
|
316
|
+
AND time < NOW() - INTERVAL '7 days'
|
|
317
|
+
AND last_accessed < NOW() - INTERVAL '7 days'
|
|
318
|
+
LIMIT 1000
|
|
319
|
+
`);
|
|
320
|
+
for (const row of coldEligible.rows) {
|
|
321
|
+
await this.migrateToS3(row, "STANDARD");
|
|
322
|
+
await client.query(`
|
|
323
|
+
UPDATE frame_timeseries
|
|
324
|
+
SET storage_tier = 'cold'
|
|
325
|
+
WHERE frame_id = $1 AND user_id = $2
|
|
326
|
+
`, [row.frame_id, row.user_id]);
|
|
327
|
+
}
|
|
328
|
+
const archiveEligible = await client.query(`
|
|
329
|
+
SELECT frame_id, user_id, data, compressed_data
|
|
330
|
+
FROM frame_timeseries
|
|
331
|
+
WHERE storage_tier = 'cold'
|
|
332
|
+
AND time < NOW() - INTERVAL '30 days'
|
|
333
|
+
AND last_accessed < NOW() - INTERVAL '30 days'
|
|
334
|
+
LIMIT 1000
|
|
335
|
+
`);
|
|
336
|
+
for (const row of archiveEligible.rows) {
|
|
337
|
+
await this.migrateToS3(row, "GLACIER");
|
|
338
|
+
await client.query(`
|
|
339
|
+
UPDATE frame_timeseries
|
|
340
|
+
SET storage_tier = 'archive'
|
|
341
|
+
WHERE frame_id = $1 AND user_id = $2
|
|
342
|
+
`, [row.frame_id, row.user_id]);
|
|
343
|
+
}
|
|
344
|
+
this.logger.info(`Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`);
|
|
345
|
+
} finally {
|
|
346
|
+
client.release();
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
/**
|
|
350
|
+
* Migrate data to S3 storage
|
|
351
|
+
*/
|
|
352
|
+
async migrateToS3(row, storageClass) {
|
|
353
|
+
if (!this.s3Client || !this.config.s3.bucket) return;
|
|
354
|
+
try {
|
|
355
|
+
const key = `frames/${row.user_id}/${row.frame_id}.json.gz`;
|
|
356
|
+
const data = row.compressed_data || await compress(JSON.stringify(row.data));
|
|
357
|
+
const command = new PutObjectCommand({
|
|
358
|
+
Bucket: this.config.s3.bucket,
|
|
359
|
+
Key: key,
|
|
360
|
+
Body: data,
|
|
361
|
+
StorageClass: storageClass,
|
|
362
|
+
Metadata: {
|
|
363
|
+
userId: row.user_id,
|
|
364
|
+
frameId: row.frame_id,
|
|
365
|
+
migratedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
366
|
+
}
|
|
367
|
+
});
|
|
368
|
+
await this.s3Client.send(command);
|
|
369
|
+
this.logger.debug(`Migrated frame ${row.frame_id} to S3 ${storageClass}`);
|
|
370
|
+
} catch (error) {
|
|
371
|
+
this.logger.error(`Failed to migrate frame ${row.frame_id} to S3`, error);
|
|
372
|
+
throw error;
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
/**
|
|
376
|
+
* Track latency for performance monitoring
|
|
377
|
+
*/
|
|
378
|
+
trackLatency(latencyMs) {
|
|
379
|
+
this.latencies.push(latencyMs);
|
|
380
|
+
if (this.latencies.length > 1e3) {
|
|
381
|
+
this.latencies.shift();
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Get storage metrics
|
|
386
|
+
*/
|
|
387
|
+
async getMetrics() {
|
|
388
|
+
const metrics = {
|
|
389
|
+
totalObjects: 0,
|
|
390
|
+
tierDistribution: {},
|
|
391
|
+
storageBytes: 0,
|
|
392
|
+
avgLatencyMs: 0,
|
|
393
|
+
p50LatencyMs: 0,
|
|
394
|
+
p99LatencyMs: 0
|
|
395
|
+
};
|
|
396
|
+
if (this.latencies.length > 0) {
|
|
397
|
+
const sorted = [...this.latencies].sort((a, b) => a - b);
|
|
398
|
+
metrics.avgLatencyMs = sorted.reduce((a, b) => a + b, 0) / sorted.length;
|
|
399
|
+
metrics.p50LatencyMs = sorted[Math.floor(sorted.length * 0.5)];
|
|
400
|
+
metrics.p99LatencyMs = sorted[Math.floor(sorted.length * 0.99)];
|
|
401
|
+
}
|
|
402
|
+
if (this.timeseriesPool) {
|
|
403
|
+
const client = await this.timeseriesPool.connect();
|
|
404
|
+
try {
|
|
405
|
+
const result = await client.query(`
|
|
406
|
+
SELECT
|
|
407
|
+
storage_tier,
|
|
408
|
+
COUNT(*) as count,
|
|
409
|
+
SUM(pg_column_size(compressed_data)) as bytes
|
|
410
|
+
FROM frame_timeseries
|
|
411
|
+
GROUP BY storage_tier
|
|
412
|
+
`);
|
|
413
|
+
for (const row of result.rows) {
|
|
414
|
+
metrics.tierDistribution[row.storage_tier] = parseInt(row.count);
|
|
415
|
+
metrics.storageBytes += parseInt(row.bytes || 0);
|
|
416
|
+
metrics.totalObjects += parseInt(row.count);
|
|
417
|
+
}
|
|
418
|
+
} finally {
|
|
419
|
+
client.release();
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
return metrics;
|
|
423
|
+
}
|
|
424
|
+
/**
|
|
425
|
+
* Cleanup and shutdown
|
|
426
|
+
*/
|
|
427
|
+
async shutdown() {
|
|
428
|
+
if (this.migrationWorker) {
|
|
429
|
+
clearInterval(this.migrationWorker);
|
|
430
|
+
}
|
|
431
|
+
if (this.redisClient) {
|
|
432
|
+
await this.redisClient.quit();
|
|
433
|
+
}
|
|
434
|
+
if (this.timeseriesPool) {
|
|
435
|
+
await this.timeseriesPool.end();
|
|
436
|
+
}
|
|
437
|
+
this.logger.info("Infinite Storage System shut down");
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
export {
|
|
441
|
+
InfiniteStorageSystem
|
|
442
|
+
};
|
|
443
|
+
//# sourceMappingURL=infinite-storage.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../../../src/core/storage/infinite-storage.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Infinite Storage System for StackMemory\n * Implements STA-287: Remote storage with TimeSeries DB + S3 + Redis\n * \n * Storage Tiers:\n * - Hot: Redis (< 1 hour, frequently accessed)\n * - Warm: TimeSeries DB (1 hour - 7 days)\n * - Cold: S3 Standard (7 days - 30 days)\n * - Archive: S3 Glacier (> 30 days)\n */\n\nimport { S3Client, PutObjectCommand, GetObjectCommand, ListObjectsV2Command } from '@aws-sdk/client-s3';\nimport { createClient as createRedisClient } from 'redis';\nimport { Pool } from 'pg';\nimport { Logger } from '../monitoring/logger.js';\nimport { Frame } from '../context/frame-manager.js';\nimport { v4 as uuidv4 } from 'uuid';\nimport { compress, decompress } from '../utils/compression.js';\n\nexport interface StorageTier {\n name: 'hot' | 'warm' | 'cold' | 'archive';\n ageThresholdHours: number;\n storageClass: string;\n accessLatencyMs: number;\n}\n\nexport interface StorageConfig {\n redis: {\n url: string;\n ttlSeconds: number;\n maxMemoryMB: number;\n };\n timeseries: {\n connectionString: string;\n retentionDays: number;\n };\n s3: {\n bucket: string;\n region: string;\n accessKeyId?: string;\n secretAccessKey?: string;\n };\n tiers: StorageTier[];\n}\n\nexport interface StorageMetrics {\n totalObjects: number;\n tierDistribution: Record<string, number>;\n storageBytes: number;\n avgLatencyMs: number;\n p50LatencyMs: number;\n p99LatencyMs: number;\n}\n\nexport class InfiniteStorageSystem {\n private redisClient: any;\n private timeseriesPool: Pool;\n private s3Client: S3Client;\n private logger: Logger;\n private config: StorageConfig;\n private latencies: number[] = [];\n private migrationWorker: NodeJS.Timeout | null = null;\n\n constructor(config: StorageConfig) {\n this.config = config;\n this.logger = new Logger('InfiniteStorage');\n \n // Default storage tiers\n if (!config.tiers || config.tiers.length === 0) {\n this.config.tiers = [\n { name: 'hot', ageThresholdHours: 1, storageClass: 'MEMORY', accessLatencyMs: 5 },\n { name: 'warm', ageThresholdHours: 168, storageClass: 'TIMESERIES', accessLatencyMs: 50 },\n { name: 'cold', ageThresholdHours: 720, storageClass: 'S3_STANDARD', accessLatencyMs: 100 },\n { name: 'archive', ageThresholdHours: Infinity, storageClass: 'S3_GLACIER', accessLatencyMs: 3600000 },\n ];\n }\n }\n\n async initialize(): Promise<void> {\n try {\n // Initialize Redis (hot tier)\n if (this.config.redis?.url) {\n this.redisClient = createRedisClient({\n url: this.config.redis.url,\n });\n \n await this.redisClient.connect();\n \n // Configure Redis memory policy\n await this.redisClient.configSet('maxmemory-policy', 'allkeys-lru');\n if (this.config.redis.maxMemoryMB) {\n await this.redisClient.configSet('maxmemory', `${this.config.redis.maxMemoryMB}mb`);\n }\n \n this.logger.info('Redis client initialized for hot tier');\n }\n\n // Initialize TimeSeries DB (warm tier)\n if (this.config.timeseries?.connectionString) {\n this.timeseriesPool = new Pool({\n connectionString: this.config.timeseries.connectionString,\n max: 10,\n idleTimeoutMillis: 30000,\n });\n\n // Create TimeSeries tables if not exists\n await this.createTimeSeriesTables();\n this.logger.info('TimeSeries DB initialized for warm tier');\n }\n\n // Initialize S3 (cold/archive tiers)\n if (this.config.s3?.bucket) {\n this.s3Client = new S3Client({\n region: this.config.s3.region || 'us-east-1',\n credentials: this.config.s3.accessKeyId ? {\n accessKeyId: this.config.s3.accessKeyId,\n secretAccessKey: this.config.s3.secretAccessKey!,\n } : undefined,\n });\n \n this.logger.info('S3 client initialized for cold/archive tiers');\n }\n\n // Start background migration worker\n this.startMigrationWorker();\n \n this.logger.info('Infinite Storage System initialized');\n } catch (error) {\n this.logger.error('Failed to initialize storage system', error);\n throw error;\n }\n }\n\n /**\n * Create TimeSeries tables for warm tier storage\n */\n private async createTimeSeriesTables(): Promise<void> {\n const client = await this.timeseriesPool.connect();\n \n try {\n // Create hypertable for time-series data\n await client.query(`\n CREATE TABLE IF NOT EXISTS frame_timeseries (\n time TIMESTAMPTZ NOT NULL,\n frame_id TEXT NOT NULL,\n user_id TEXT NOT NULL,\n project_name TEXT,\n type TEXT,\n data JSONB,\n compressed_data BYTEA,\n storage_tier TEXT DEFAULT 'warm',\n access_count INTEGER DEFAULT 0,\n last_accessed TIMESTAMPTZ DEFAULT NOW(),\n PRIMARY KEY (time, frame_id)\n )\n `);\n\n // Create hypertable if using TimescaleDB\n await client.query(`\n SELECT create_hypertable('frame_timeseries', 'time', \n chunk_time_interval => INTERVAL '1 day',\n if_not_exists => TRUE)\n `).catch(() => {\n // Fallback to regular partitioning if not TimescaleDB\n this.logger.info('Using standard PostgreSQL partitioning');\n });\n\n // Create indexes\n await client.query(`\n CREATE INDEX IF NOT EXISTS idx_frame_user ON frame_timeseries (user_id, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_project ON frame_timeseries (project_name, time DESC);\n CREATE INDEX IF NOT EXISTS idx_frame_tier ON frame_timeseries (storage_tier);\n `);\n\n // Create compression policy (TimescaleDB specific)\n await client.query(`\n SELECT add_compression_policy('frame_timeseries', INTERVAL '7 days', if_not_exists => TRUE)\n `).catch(() => {\n this.logger.info('Compression policy not available');\n });\n\n } finally {\n client.release();\n }\n }\n\n /**\n * Store a frame with automatic tier selection\n */\n async storeFrame(frame: Frame, userId: string): Promise<void> {\n const startTime = Date.now();\n \n try {\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n const frameKey = `frame:${userId}:${frame.frameId}`;\n\n // Always store in hot tier first (Redis)\n if (this.redisClient) {\n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n \n // Store metadata for quick lookups\n await this.redisClient.hSet(`meta:${frameKey}`, {\n userId,\n projectName: frame.projectName || 'default',\n type: frame.type,\n timestamp: frame.timestamp,\n tier: 'hot',\n });\n }\n\n // Also store in warm tier for durability\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n await client.query(`\n INSERT INTO frame_timeseries (time, frame_id, user_id, project_name, type, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET data = EXCLUDED.data,\n compressed_data = EXCLUDED.compressed_data,\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `, [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame.projectName || 'default',\n frame.type,\n frame,\n compressedData,\n 'warm',\n ]);\n } finally {\n client.release();\n }\n }\n\n // Track latency\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n \n this.logger.debug(`Stored frame ${frame.frameId} in ${latency}ms`);\n } catch (error) {\n this.logger.error(`Failed to store frame ${frame.frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Retrieve a frame with intelligent caching\n */\n async retrieveFrame(frameId: string, userId: string): Promise<Frame | null> {\n const startTime = Date.now();\n const frameKey = `frame:${userId}:${frameId}`;\n\n try {\n // Try hot tier first (Redis)\n if (this.redisClient) {\n const cached = await this.redisClient.get(frameKey);\n if (cached) {\n const decompressed = await decompress(cached);\n const frame = JSON.parse(decompressed);\n \n // Refresh TTL on access\n await this.redisClient.expire(frameKey, this.config.redis.ttlSeconds || 3600);\n \n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from hot tier in ${latency}ms`);\n \n return frame;\n }\n }\n\n // Try warm tier (TimeSeries DB)\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const result = await client.query(`\n SELECT data, compressed_data, storage_tier \n FROM frame_timeseries \n WHERE frame_id = $1 AND user_id = $2\n ORDER BY time DESC\n LIMIT 1\n `, [frameId, userId]);\n\n if (result.rows.length > 0) {\n const row = result.rows[0];\n let frame: Frame;\n \n if (row.compressed_data) {\n const decompressed = await decompress(row.compressed_data);\n frame = JSON.parse(decompressed);\n } else {\n frame = row.data;\n }\n\n // Update access stats\n await client.query(`\n UPDATE frame_timeseries \n SET last_accessed = NOW(), access_count = access_count + 1\n WHERE frame_id = $1 AND user_id = $2\n `, [frameId, userId]);\n\n // Promote to hot tier if frequently accessed\n if (this.redisClient) {\n await this.promoteToHotTier(frame, userId);\n }\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from warm tier in ${latency}ms`);\n \n return frame;\n }\n } finally {\n client.release();\n }\n }\n\n // Try cold/archive tiers (S3)\n if (this.s3Client && this.config.s3.bucket) {\n const key = `frames/${userId}/${frameId}.json.gz`;\n \n try {\n const command = new GetObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n });\n \n const response = await this.s3Client.send(command);\n const compressedData = await response.Body!.transformToByteArray();\n const decompressed = await decompress(Buffer.from(compressedData));\n const frame = JSON.parse(decompressed);\n\n // Promote to warmer tiers for future access\n await this.promoteFrame(frame, userId);\n\n const latency = Date.now() - startTime;\n this.trackLatency(latency);\n this.logger.debug(`Retrieved frame ${frameId} from cold tier in ${latency}ms`);\n \n return frame;\n } catch (error: any) {\n if (error.Code !== 'NoSuchKey') {\n throw error;\n }\n }\n }\n\n this.logger.debug(`Frame ${frameId} not found in any tier`);\n return null;\n } catch (error) {\n this.logger.error(`Failed to retrieve frame ${frameId}`, error);\n throw error;\n }\n }\n\n /**\n * Promote frame to hot tier for fast access\n */\n private async promoteToHotTier(frame: Frame, userId: string): Promise<void> {\n if (!this.redisClient) return;\n \n try {\n const frameKey = `frame:${userId}:${frame.frameId}`;\n const frameData = JSON.stringify(frame);\n const compressedData = await compress(frameData);\n \n await this.redisClient.setEx(\n frameKey,\n this.config.redis.ttlSeconds || 3600,\n compressedData\n );\n \n this.logger.debug(`Promoted frame ${frame.frameId} to hot tier`);\n } catch (error) {\n this.logger.error(`Failed to promote frame ${frame.frameId}`, error);\n }\n }\n\n /**\n * Promote frame through storage tiers\n */\n private async promoteFrame(frame: Frame, userId: string): Promise<void> {\n // Promote to warm tier\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const compressedData = await compress(JSON.stringify(frame));\n \n await client.query(`\n INSERT INTO frame_timeseries (time, frame_id, user_id, data, compressed_data, storage_tier)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (time, frame_id) DO UPDATE\n SET storage_tier = 'warm',\n last_accessed = NOW(),\n access_count = frame_timeseries.access_count + 1\n `, [\n new Date(frame.timestamp),\n frame.frameId,\n userId,\n frame,\n compressedData,\n 'warm',\n ]);\n } finally {\n client.release();\n }\n }\n\n // Also promote to hot tier\n await this.promoteToHotTier(frame, userId);\n }\n\n /**\n * Start background worker for tier migration\n */\n private startMigrationWorker(): void {\n // Run migration every hour\n this.migrationWorker = setInterval(async () => {\n await this.migrateAgedData();\n }, 60 * 60 * 1000);\n \n this.logger.info('Migration worker started');\n }\n\n /**\n * Migrate aged data to appropriate storage tiers\n */\n private async migrateAgedData(): Promise<void> {\n this.logger.info('Starting tier migration...');\n \n if (!this.timeseriesPool) return;\n \n const client = await this.timeseriesPool.connect();\n \n try {\n // Find data eligible for cold storage (> 7 days old)\n const coldEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'warm'\n AND time < NOW() - INTERVAL '7 days'\n AND last_accessed < NOW() - INTERVAL '7 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 cold storage\n for (const row of coldEligible.rows) {\n await this.migrateToS3(row, 'STANDARD');\n \n // Update tier in database\n await client.query(`\n UPDATE frame_timeseries\n SET storage_tier = 'cold'\n WHERE frame_id = $1 AND user_id = $2\n `, [row.frame_id, row.user_id]);\n }\n\n // Find data eligible for archive (> 30 days old)\n const archiveEligible = await client.query(`\n SELECT frame_id, user_id, data, compressed_data\n FROM frame_timeseries\n WHERE storage_tier = 'cold'\n AND time < NOW() - INTERVAL '30 days'\n AND last_accessed < NOW() - INTERVAL '30 days'\n LIMIT 1000\n `);\n\n // Migrate to S3 Glacier\n for (const row of archiveEligible.rows) {\n await this.migrateToS3(row, 'GLACIER');\n \n // Update tier in database\n await client.query(`\n UPDATE frame_timeseries\n SET storage_tier = 'archive'\n WHERE frame_id = $1 AND user_id = $2\n `, [row.frame_id, row.user_id]);\n }\n\n this.logger.info(`Migration completed: ${coldEligible.rows.length} to cold, ${archiveEligible.rows.length} to archive`);\n } finally {\n client.release();\n }\n }\n\n /**\n * Migrate data to S3 storage\n */\n private async migrateToS3(row: any, storageClass: string): Promise<void> {\n if (!this.s3Client || !this.config.s3.bucket) return;\n \n try {\n const key = `frames/${row.user_id}/${row.frame_id}.json.gz`;\n const data = row.compressed_data || await compress(JSON.stringify(row.data));\n \n const command = new PutObjectCommand({\n Bucket: this.config.s3.bucket,\n Key: key,\n Body: data,\n StorageClass: storageClass,\n Metadata: {\n userId: row.user_id,\n frameId: row.frame_id,\n migratedAt: new Date().toISOString(),\n },\n });\n \n await this.s3Client.send(command);\n \n this.logger.debug(`Migrated frame ${row.frame_id} to S3 ${storageClass}`);\n } catch (error) {\n this.logger.error(`Failed to migrate frame ${row.frame_id} to S3`, error);\n throw error;\n }\n }\n\n /**\n * Track latency for performance monitoring\n */\n private trackLatency(latencyMs: number): void {\n this.latencies.push(latencyMs);\n \n // Keep only last 1000 measurements\n if (this.latencies.length > 1000) {\n this.latencies.shift();\n }\n }\n\n /**\n * Get storage metrics\n */\n async getMetrics(): Promise<StorageMetrics> {\n const metrics: StorageMetrics = {\n totalObjects: 0,\n tierDistribution: {},\n storageBytes: 0,\n avgLatencyMs: 0,\n p50LatencyMs: 0,\n p99LatencyMs: 0,\n };\n\n // Calculate latency percentiles\n if (this.latencies.length > 0) {\n const sorted = [...this.latencies].sort((a, b) => a - b);\n metrics.avgLatencyMs = sorted.reduce((a, b) => a + b, 0) / sorted.length;\n metrics.p50LatencyMs = sorted[Math.floor(sorted.length * 0.5)];\n metrics.p99LatencyMs = sorted[Math.floor(sorted.length * 0.99)];\n }\n\n // Get tier distribution from TimeSeries DB\n if (this.timeseriesPool) {\n const client = await this.timeseriesPool.connect();\n \n try {\n const result = await client.query(`\n SELECT \n storage_tier,\n COUNT(*) as count,\n SUM(pg_column_size(compressed_data)) as bytes\n FROM frame_timeseries\n GROUP BY storage_tier\n `);\n\n for (const row of result.rows) {\n metrics.tierDistribution[row.storage_tier] = parseInt(row.count);\n metrics.storageBytes += parseInt(row.bytes || 0);\n metrics.totalObjects += parseInt(row.count);\n }\n } finally {\n client.release();\n }\n }\n\n return metrics;\n }\n\n /**\n * Cleanup and shutdown\n */\n async shutdown(): Promise<void> {\n if (this.migrationWorker) {\n clearInterval(this.migrationWorker);\n }\n\n if (this.redisClient) {\n await this.redisClient.quit();\n }\n\n if (this.timeseriesPool) {\n await this.timeseriesPool.end();\n }\n\n this.logger.info('Infinite Storage System shut down');\n }\n}"],
|
|
5
|
+
"mappings": "AAWA,SAAS,UAAU,kBAAkB,wBAA8C;AACnF,SAAS,gBAAgB,yBAAyB;AAClD,SAAS,YAAY;AACrB,SAAS,cAAc;AAGvB,SAAS,UAAU,kBAAkB;AAqC9B,MAAM,sBAAsB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,YAAsB,CAAC;AAAA,EACvB,kBAAyC;AAAA,EAEjD,YAAY,QAAuB;AACjC,SAAK,SAAS;AACd,SAAK,SAAS,IAAI,OAAO,iBAAiB;AAG1C,QAAI,CAAC,OAAO,SAAS,OAAO,MAAM,WAAW,GAAG;AAC9C,WAAK,OAAO,QAAQ;AAAA,QAClB,EAAE,MAAM,OAAO,mBAAmB,GAAG,cAAc,UAAU,iBAAiB,EAAE;AAAA,QAChF,EAAE,MAAM,QAAQ,mBAAmB,KAAK,cAAc,cAAc,iBAAiB,GAAG;AAAA,QACxF,EAAE,MAAM,QAAQ,mBAAmB,KAAK,cAAc,eAAe,iBAAiB,IAAI;AAAA,QAC1F,EAAE,MAAM,WAAW,mBAAmB,UAAU,cAAc,cAAc,iBAAiB,KAAQ;AAAA,MACvG;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,aAA4B;AAChC,QAAI;AAEF,UAAI,KAAK,OAAO,OAAO,KAAK;AAC1B,aAAK,cAAc,kBAAkB;AAAA,UACnC,KAAK,KAAK,OAAO,MAAM;AAAA,QACzB,CAAC;AAED,cAAM,KAAK,YAAY,QAAQ;AAG/B,cAAM,KAAK,YAAY,UAAU,oBAAoB,aAAa;AAClE,YAAI,KAAK,OAAO,MAAM,aAAa;AACjC,gBAAM,KAAK,YAAY,UAAU,aAAa,GAAG,KAAK,OAAO,MAAM,WAAW,IAAI;AAAA,QACpF;AAEA,aAAK,OAAO,KAAK,uCAAuC;AAAA,MAC1D;AAGA,UAAI,KAAK,OAAO,YAAY,kBAAkB;AAC5C,aAAK,iBAAiB,IAAI,KAAK;AAAA,UAC7B,kBAAkB,KAAK,OAAO,WAAW;AAAA,UACzC,KAAK;AAAA,UACL,mBAAmB;AAAA,QACrB,CAAC;AAGD,cAAM,KAAK,uBAAuB;AAClC,aAAK,OAAO,KAAK,yCAAyC;AAAA,MAC5D;AAGA,UAAI,KAAK,OAAO,IAAI,QAAQ;AAC1B,aAAK,WAAW,IAAI,SAAS;AAAA,UAC3B,QAAQ,KAAK,OAAO,GAAG,UAAU;AAAA,UACjC,aAAa,KAAK,OAAO,GAAG,cAAc;AAAA,YACxC,aAAa,KAAK,OAAO,GAAG;AAAA,YAC5B,iBAAiB,KAAK,OAAO,GAAG;AAAA,UAClC,IAAI;AAAA,QACN,CAAC;AAED,aAAK,OAAO,KAAK,8CAA8C;AAAA,MACjE;AAGA,WAAK,qBAAqB;AAE1B,WAAK,OAAO,KAAK,qCAAqC;AAAA,IACxD,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,uCAAuC,KAAK;AAC9D,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,yBAAwC;AACpD,UAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,QAAI;AAEF,YAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAclB;AAGD,YAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,OAIlB,EAAE,MAAM,MAAM;AAEb,aAAK,OAAO,KAAK,wCAAwC;AAAA,MAC3D,CAAC;AAGD,YAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,OAIlB;AAGD,YAAM,OAAO,MAAM;AAAA;AAAA,OAElB,EAAE,MAAM,MAAM;AACb,aAAK,OAAO,KAAK,kCAAkC;AAAA,MACrD,CAAC;AAAA,IAEH,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,WAAW,OAAc,QAA+B;AAC5D,UAAM,YAAY,KAAK,IAAI;AAE3B,QAAI;AACF,YAAM,YAAY,KAAK,UAAU,KAAK;AACtC,YAAM,iBAAiB,MAAM,SAAS,SAAS;AAC/C,YAAM,WAAW,SAAS,MAAM,IAAI,MAAM,OAAO;AAGjD,UAAI,KAAK,aAAa;AACpB,cAAM,KAAK,YAAY;AAAA,UACrB;AAAA,UACA,KAAK,OAAO,MAAM,cAAc;AAAA,UAChC;AAAA,QACF;AAGA,cAAM,KAAK,YAAY,KAAK,QAAQ,QAAQ,IAAI;AAAA,UAC9C;AAAA,UACA,aAAa,MAAM,eAAe;AAAA,UAClC,MAAM,MAAM;AAAA,UACZ,WAAW,MAAM;AAAA,UACjB,MAAM;AAAA,QACR,CAAC;AAAA,MACH;AAGA,UAAI,KAAK,gBAAgB;AACvB,cAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,YAAI;AACF,gBAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAQhB;AAAA,YACD,IAAI,KAAK,MAAM,SAAS;AAAA,YACxB,MAAM;AAAA,YACN;AAAA,YACA,MAAM,eAAe;AAAA,YACrB,MAAM;AAAA,YACN;AAAA,YACA;AAAA,YACA;AAAA,UACF,CAAC;AAAA,QACH,UAAE;AACA,iBAAO,QAAQ;AAAA,QACjB;AAAA,MACF;AAGA,YAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,WAAK,aAAa,OAAO;AAEzB,WAAK,OAAO,MAAM,gBAAgB,MAAM,OAAO,OAAO,OAAO,IAAI;AAAA,IACnE,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,yBAAyB,MAAM,OAAO,IAAI,KAAK;AACjE,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,cAAc,SAAiB,QAAuC;AAC1E,UAAM,YAAY,KAAK,IAAI;AAC3B,UAAM,WAAW,SAAS,MAAM,IAAI,OAAO;AAE3C,QAAI;AAEF,UAAI,KAAK,aAAa;AACpB,cAAM,SAAS,MAAM,KAAK,YAAY,IAAI,QAAQ;AAClD,YAAI,QAAQ;AACV,gBAAM,eAAe,MAAM,WAAW,MAAM;AAC5C,gBAAM,QAAQ,KAAK,MAAM,YAAY;AAGrC,gBAAM,KAAK,YAAY,OAAO,UAAU,KAAK,OAAO,MAAM,cAAc,IAAI;AAE5E,gBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,eAAK,aAAa,OAAO;AACzB,eAAK,OAAO,MAAM,mBAAmB,OAAO,qBAAqB,OAAO,IAAI;AAE5E,iBAAO;AAAA,QACT;AAAA,MACF;AAGA,UAAI,KAAK,gBAAgB;AACvB,cAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,YAAI;AACF,gBAAM,SAAS,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAM/B,CAAC,SAAS,MAAM,CAAC;AAEpB,cAAI,OAAO,KAAK,SAAS,GAAG;AAC1B,kBAAM,MAAM,OAAO,KAAK,CAAC;AACzB,gBAAI;AAEJ,gBAAI,IAAI,iBAAiB;AACvB,oBAAM,eAAe,MAAM,WAAW,IAAI,eAAe;AACzD,sBAAQ,KAAK,MAAM,YAAY;AAAA,YACjC,OAAO;AACL,sBAAQ,IAAI;AAAA,YACd;AAGA,kBAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,eAIhB,CAAC,SAAS,MAAM,CAAC;AAGpB,gBAAI,KAAK,aAAa;AACpB,oBAAM,KAAK,iBAAiB,OAAO,MAAM;AAAA,YAC3C;AAEA,kBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,iBAAK,aAAa,OAAO;AACzB,iBAAK,OAAO,MAAM,mBAAmB,OAAO,sBAAsB,OAAO,IAAI;AAE7E,mBAAO;AAAA,UACT;AAAA,QACF,UAAE;AACA,iBAAO,QAAQ;AAAA,QACjB;AAAA,MACF;AAGA,UAAI,KAAK,YAAY,KAAK,OAAO,GAAG,QAAQ;AAC1C,cAAM,MAAM,UAAU,MAAM,IAAI,OAAO;AAEvC,YAAI;AACF,gBAAM,UAAU,IAAI,iBAAiB;AAAA,YACnC,QAAQ,KAAK,OAAO,GAAG;AAAA,YACvB,KAAK;AAAA,UACP,CAAC;AAED,gBAAM,WAAW,MAAM,KAAK,SAAS,KAAK,OAAO;AACjD,gBAAM,iBAAiB,MAAM,SAAS,KAAM,qBAAqB;AACjE,gBAAM,eAAe,MAAM,WAAW,OAAO,KAAK,cAAc,CAAC;AACjE,gBAAM,QAAQ,KAAK,MAAM,YAAY;AAGrC,gBAAM,KAAK,aAAa,OAAO,MAAM;AAErC,gBAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,eAAK,aAAa,OAAO;AACzB,eAAK,OAAO,MAAM,mBAAmB,OAAO,sBAAsB,OAAO,IAAI;AAE7E,iBAAO;AAAA,QACT,SAAS,OAAY;AACnB,cAAI,MAAM,SAAS,aAAa;AAC9B,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,WAAK,OAAO,MAAM,SAAS,OAAO,wBAAwB;AAC1D,aAAO;AAAA,IACT,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,4BAA4B,OAAO,IAAI,KAAK;AAC9D,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,iBAAiB,OAAc,QAA+B;AAC1E,QAAI,CAAC,KAAK,YAAa;AAEvB,QAAI;AACF,YAAM,WAAW,SAAS,MAAM,IAAI,MAAM,OAAO;AACjD,YAAM,YAAY,KAAK,UAAU,KAAK;AACtC,YAAM,iBAAiB,MAAM,SAAS,SAAS;AAE/C,YAAM,KAAK,YAAY;AAAA,QACrB;AAAA,QACA,KAAK,OAAO,MAAM,cAAc;AAAA,QAChC;AAAA,MACF;AAEA,WAAK,OAAO,MAAM,kBAAkB,MAAM,OAAO,cAAc;AAAA,IACjE,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,2BAA2B,MAAM,OAAO,IAAI,KAAK;AAAA,IACrE;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,aAAa,OAAc,QAA+B;AAEtE,QAAI,KAAK,gBAAgB;AACvB,YAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,UAAI;AACF,cAAM,iBAAiB,MAAM,SAAS,KAAK,UAAU,KAAK,CAAC;AAE3D,cAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAOhB;AAAA,UACD,IAAI,KAAK,MAAM,SAAS;AAAA,UACxB,MAAM;AAAA,UACN;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH,UAAE;AACA,eAAO,QAAQ;AAAA,MACjB;AAAA,IACF;AAGA,UAAM,KAAK,iBAAiB,OAAO,MAAM;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA,EAKQ,uBAA6B;AAEnC,SAAK,kBAAkB,YAAY,YAAY;AAC7C,YAAM,KAAK,gBAAgB;AAAA,IAC7B,GAAG,KAAK,KAAK,GAAI;AAEjB,SAAK,OAAO,KAAK,0BAA0B;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,kBAAiC;AAC7C,SAAK,OAAO,KAAK,4BAA4B;AAE7C,QAAI,CAAC,KAAK,eAAgB;AAE1B,UAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,QAAI;AAEF,YAAM,eAAe,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAOvC;AAGD,iBAAW,OAAO,aAAa,MAAM;AACnC,cAAM,KAAK,YAAY,KAAK,UAAU;AAGtC,cAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,WAIhB,CAAC,IAAI,UAAU,IAAI,OAAO,CAAC;AAAA,MAChC;AAGA,YAAM,kBAAkB,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAO1C;AAGD,iBAAW,OAAO,gBAAgB,MAAM;AACtC,cAAM,KAAK,YAAY,KAAK,SAAS;AAGrC,cAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA,WAIhB,CAAC,IAAI,UAAU,IAAI,OAAO,CAAC;AAAA,MAChC;AAEA,WAAK,OAAO,KAAK,wBAAwB,aAAa,KAAK,MAAM,aAAa,gBAAgB,KAAK,MAAM,aAAa;AAAA,IACxH,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,YAAY,KAAU,cAAqC;AACvE,QAAI,CAAC,KAAK,YAAY,CAAC,KAAK,OAAO,GAAG,OAAQ;AAE9C,QAAI;AACF,YAAM,MAAM,UAAU,IAAI,OAAO,IAAI,IAAI,QAAQ;AACjD,YAAM,OAAO,IAAI,mBAAmB,MAAM,SAAS,KAAK,UAAU,IAAI,IAAI,CAAC;AAE3E,YAAM,UAAU,IAAI,iBAAiB;AAAA,QACnC,QAAQ,KAAK,OAAO,GAAG;AAAA,QACvB,KAAK;AAAA,QACL,MAAM;AAAA,QACN,cAAc;AAAA,QACd,UAAU;AAAA,UACR,QAAQ,IAAI;AAAA,UACZ,SAAS,IAAI;AAAA,UACb,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,QACrC;AAAA,MACF,CAAC;AAED,YAAM,KAAK,SAAS,KAAK,OAAO;AAEhC,WAAK,OAAO,MAAM,kBAAkB,IAAI,QAAQ,UAAU,YAAY,EAAE;AAAA,IAC1E,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,2BAA2B,IAAI,QAAQ,UAAU,KAAK;AACxE,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,WAAyB;AAC5C,SAAK,UAAU,KAAK,SAAS;AAG7B,QAAI,KAAK,UAAU,SAAS,KAAM;AAChC,WAAK,UAAU,MAAM;AAAA,IACvB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,aAAsC;AAC1C,UAAM,UAA0B;AAAA,MAC9B,cAAc;AAAA,MACd,kBAAkB,CAAC;AAAA,MACnB,cAAc;AAAA,MACd,cAAc;AAAA,MACd,cAAc;AAAA,MACd,cAAc;AAAA,IAChB;AAGA,QAAI,KAAK,UAAU,SAAS,GAAG;AAC7B,YAAM,SAAS,CAAC,GAAG,KAAK,SAAS,EAAE,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AACvD,cAAQ,eAAe,OAAO,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,OAAO;AAClE,cAAQ,eAAe,OAAO,KAAK,MAAM,OAAO,SAAS,GAAG,CAAC;AAC7D,cAAQ,eAAe,OAAO,KAAK,MAAM,OAAO,SAAS,IAAI,CAAC;AAAA,IAChE;AAGA,QAAI,KAAK,gBAAgB;AACvB,YAAM,SAAS,MAAM,KAAK,eAAe,QAAQ;AAEjD,UAAI;AACF,cAAM,SAAS,MAAM,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,SAOjC;AAED,mBAAW,OAAO,OAAO,MAAM;AAC7B,kBAAQ,iBAAiB,IAAI,YAAY,IAAI,SAAS,IAAI,KAAK;AAC/D,kBAAQ,gBAAgB,SAAS,IAAI,SAAS,CAAC;AAC/C,kBAAQ,gBAAgB,SAAS,IAAI,KAAK;AAAA,QAC5C;AAAA,MACF,UAAE;AACA,eAAO,QAAQ;AAAA,MACjB;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,WAA0B;AAC9B,QAAI,KAAK,iBAAiB;AACxB,oBAAc,KAAK,eAAe;AAAA,IACpC;AAEA,QAAI,KAAK,aAAa;AACpB,YAAM,KAAK,YAAY,KAAK;AAAA,IAC9B;AAEA,QAAI,KAAK,gBAAgB;AACvB,YAAM,KAAK,eAAe,IAAI;AAAA,IAChC;AAEA,SAAK,OAAO,KAAK,mCAAmC;AAAA,EACtD;AACF;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import zlib from "zlib";
|
|
2
|
+
import { promisify } from "util";
|
|
3
|
+
const gzipAsync = promisify(zlib.gzip);
|
|
4
|
+
const gunzipAsync = promisify(zlib.gunzip);
|
|
5
|
+
const brotliCompressAsync = promisify(zlib.brotliCompress);
|
|
6
|
+
const brotliDecompressAsync = promisify(zlib.brotliDecompress);
|
|
7
|
+
var CompressionType = /* @__PURE__ */ ((CompressionType2) => {
|
|
8
|
+
CompressionType2["NONE"] = "none";
|
|
9
|
+
CompressionType2["GZIP"] = "gzip";
|
|
10
|
+
CompressionType2["BROTLI"] = "brotli";
|
|
11
|
+
return CompressionType2;
|
|
12
|
+
})(CompressionType || {});
|
|
13
|
+
async function compress(data, options = {}) {
|
|
14
|
+
const { type = "gzip" /* GZIP */, level = 6 } = options;
|
|
15
|
+
const input = typeof data === "string" ? Buffer.from(data, "utf8") : data;
|
|
16
|
+
switch (type) {
|
|
17
|
+
case "none" /* NONE */:
|
|
18
|
+
return input;
|
|
19
|
+
case "gzip" /* GZIP */:
|
|
20
|
+
return gzipAsync(input, { level });
|
|
21
|
+
case "brotli" /* BROTLI */:
|
|
22
|
+
return brotliCompressAsync(input, {
|
|
23
|
+
params: {
|
|
24
|
+
[zlib.constants.BROTLI_PARAM_QUALITY]: level
|
|
25
|
+
}
|
|
26
|
+
});
|
|
27
|
+
default:
|
|
28
|
+
throw new Error(`Unknown compression type: ${type}`);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
async function decompress(data, type = "gzip" /* GZIP */) {
|
|
32
|
+
let decompressed;
|
|
33
|
+
switch (type) {
|
|
34
|
+
case "none" /* NONE */:
|
|
35
|
+
decompressed = data;
|
|
36
|
+
break;
|
|
37
|
+
case "gzip" /* GZIP */:
|
|
38
|
+
decompressed = await gunzipAsync(data);
|
|
39
|
+
break;
|
|
40
|
+
case "brotli" /* BROTLI */:
|
|
41
|
+
decompressed = await brotliDecompressAsync(data);
|
|
42
|
+
break;
|
|
43
|
+
default:
|
|
44
|
+
throw new Error(`Unknown compression type: ${type}`);
|
|
45
|
+
}
|
|
46
|
+
return decompressed.toString("utf8");
|
|
47
|
+
}
|
|
48
|
+
function compressionRatio(original, compressed) {
|
|
49
|
+
if (original === 0) return 0;
|
|
50
|
+
return (1 - compressed / original) * 100;
|
|
51
|
+
}
|
|
52
|
+
function detectCompressionType(data) {
|
|
53
|
+
if (data.length >= 2 && data[0] === 31 && data[1] === 139) {
|
|
54
|
+
return "gzip" /* GZIP */;
|
|
55
|
+
}
|
|
56
|
+
if (data.length >= 4 && data[0] === 206 && data[1] === 178) {
|
|
57
|
+
return "brotli" /* BROTLI */;
|
|
58
|
+
}
|
|
59
|
+
return "none" /* NONE */;
|
|
60
|
+
}
|
|
61
|
+
function chooseOptimalCompression(data, speedPriority = false) {
|
|
62
|
+
const size = typeof data === "string" ? Buffer.byteLength(data) : data.length;
|
|
63
|
+
if (size < 1024) {
|
|
64
|
+
return "none" /* NONE */;
|
|
65
|
+
}
|
|
66
|
+
if (speedPriority || size < 100 * 1024) {
|
|
67
|
+
return "gzip" /* GZIP */;
|
|
68
|
+
}
|
|
69
|
+
return "brotli" /* BROTLI */;
|
|
70
|
+
}
|
|
71
|
+
export {
|
|
72
|
+
CompressionType,
|
|
73
|
+
chooseOptimalCompression,
|
|
74
|
+
compress,
|
|
75
|
+
compressionRatio,
|
|
76
|
+
decompress,
|
|
77
|
+
detectCompressionType
|
|
78
|
+
};
|
|
79
|
+
//# sourceMappingURL=compression.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../../../src/core/utils/compression.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Compression utilities for storage optimization\n */\n\nimport zlib from 'zlib';\nimport { promisify } from 'util';\n\nconst gzipAsync = promisify(zlib.gzip);\nconst gunzipAsync = promisify(zlib.gunzip);\nconst brotliCompressAsync = promisify(zlib.brotliCompress);\nconst brotliDecompressAsync = promisify(zlib.brotliDecompress);\n\nexport enum CompressionType {\n NONE = 'none',\n GZIP = 'gzip',\n BROTLI = 'brotli',\n}\n\nexport interface CompressionOptions {\n type?: CompressionType;\n level?: number;\n}\n\n/**\n * Compress data using specified algorithm\n */\nexport async function compress(\n data: string | Buffer,\n options: CompressionOptions = {}\n): Promise<Buffer> {\n const { type = CompressionType.GZIP, level = 6 } = options;\n \n const input = typeof data === 'string' ? Buffer.from(data, 'utf8') : data;\n \n switch (type) {\n case CompressionType.NONE:\n return input;\n \n case CompressionType.GZIP:\n return gzipAsync(input, { level });\n \n case CompressionType.BROTLI:\n return brotliCompressAsync(input, {\n params: {\n [zlib.constants.BROTLI_PARAM_QUALITY]: level,\n },\n });\n \n default:\n throw new Error(`Unknown compression type: ${type}`);\n }\n}\n\n/**\n * Decompress data\n */\nexport async function decompress(\n data: Buffer,\n type: CompressionType = CompressionType.GZIP\n): Promise<string> {\n let decompressed: Buffer;\n \n switch (type) {\n case CompressionType.NONE:\n decompressed = data;\n break;\n \n case CompressionType.GZIP:\n decompressed = await gunzipAsync(data);\n break;\n \n case CompressionType.BROTLI:\n decompressed = await brotliDecompressAsync(data);\n break;\n \n default:\n throw new Error(`Unknown compression type: ${type}`);\n }\n \n return decompressed.toString('utf8');\n}\n\n/**\n * Calculate compression ratio\n */\nexport function compressionRatio(original: number, compressed: number): number {\n if (original === 0) return 0;\n return (1 - compressed / original) * 100;\n}\n\n/**\n * Auto-detect compression type from buffer\n */\nexport function detectCompressionType(data: Buffer): CompressionType {\n // Check for gzip magic number\n if (data.length >= 2 && data[0] === 0x1f && data[1] === 0x8b) {\n return CompressionType.GZIP;\n }\n \n // Check for brotli\n // Brotli doesn't have a consistent magic number, but we can try to decompress\n // This is a heuristic approach\n if (data.length >= 4 && data[0] === 0xce && data[1] === 0xb2) {\n return CompressionType.BROTLI;\n }\n \n return CompressionType.NONE;\n}\n\n/**\n * Choose optimal compression based on data characteristics\n */\nexport function chooseOptimalCompression(\n data: string | Buffer,\n speedPriority: boolean = false\n): CompressionType {\n const size = typeof data === 'string' ? Buffer.byteLength(data) : data.length;\n \n // Don't compress small data\n if (size < 1024) {\n return CompressionType.NONE;\n }\n \n // Use gzip for speed priority or medium data\n if (speedPriority || size < 100 * 1024) {\n return CompressionType.GZIP;\n }\n \n // Use brotli for large data and better compression\n return CompressionType.BROTLI;\n}"],
|
|
5
|
+
"mappings": "AAIA,OAAO,UAAU;AACjB,SAAS,iBAAiB;AAE1B,MAAM,YAAY,UAAU,KAAK,IAAI;AACrC,MAAM,cAAc,UAAU,KAAK,MAAM;AACzC,MAAM,sBAAsB,UAAU,KAAK,cAAc;AACzD,MAAM,wBAAwB,UAAU,KAAK,gBAAgB;AAEtD,IAAK,kBAAL,kBAAKA,qBAAL;AACL,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,YAAS;AAHC,SAAAA;AAAA,GAAA;AAcZ,eAAsB,SACpB,MACA,UAA8B,CAAC,GACd;AACjB,QAAM,EAAE,OAAO,mBAAsB,QAAQ,EAAE,IAAI;AAEnD,QAAM,QAAQ,OAAO,SAAS,WAAW,OAAO,KAAK,MAAM,MAAM,IAAI;AAErE,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IAET,KAAK;AACH,aAAO,UAAU,OAAO,EAAE,MAAM,CAAC;AAAA,IAEnC,KAAK;AACH,aAAO,oBAAoB,OAAO;AAAA,QAChC,QAAQ;AAAA,UACN,CAAC,KAAK,UAAU,oBAAoB,GAAG;AAAA,QACzC;AAAA,MACF,CAAC;AAAA,IAEH;AACE,YAAM,IAAI,MAAM,6BAA6B,IAAI,EAAE;AAAA,EACvD;AACF;AAKA,eAAsB,WACpB,MACA,OAAwB,mBACP;AACjB,MAAI;AAEJ,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,qBAAe;AACf;AAAA,IAEF,KAAK;AACH,qBAAe,MAAM,YAAY,IAAI;AACrC;AAAA,IAEF,KAAK;AACH,qBAAe,MAAM,sBAAsB,IAAI;AAC/C;AAAA,IAEF;AACE,YAAM,IAAI,MAAM,6BAA6B,IAAI,EAAE;AAAA,EACvD;AAEA,SAAO,aAAa,SAAS,MAAM;AACrC;AAKO,SAAS,iBAAiB,UAAkB,YAA4B;AAC7E,MAAI,aAAa,EAAG,QAAO;AAC3B,UAAQ,IAAI,aAAa,YAAY;AACvC;AAKO,SAAS,sBAAsB,MAA+B;AAEnE,MAAI,KAAK,UAAU,KAAK,KAAK,CAAC,MAAM,MAAQ,KAAK,CAAC,MAAM,KAAM;AAC5D,WAAO;AAAA,EACT;AAKA,MAAI,KAAK,UAAU,KAAK,KAAK,CAAC,MAAM,OAAQ,KAAK,CAAC,MAAM,KAAM;AAC5D,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAKO,SAAS,yBACd,MACA,gBAAyB,OACR;AACjB,QAAM,OAAO,OAAO,SAAS,WAAW,OAAO,WAAW,IAAI,IAAI,KAAK;AAGvE,MAAI,OAAO,MAAM;AACf,WAAO;AAAA,EACT;AAGA,MAAI,iBAAiB,OAAO,MAAM,MAAM;AACtC,WAAO;AAAA,EACT;AAGA,SAAO;AACT;",
|
|
6
|
+
"names": ["CompressionType"]
|
|
7
|
+
}
|