@wgtechlabs/nuvex 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +427 -0
- package/dist/.tsbuildinfo +1 -0
- package/dist/cjs/core/client.js +981 -0
- package/dist/cjs/core/client.js.map +1 -0
- package/dist/cjs/core/database.js +297 -0
- package/dist/cjs/core/database.js.map +1 -0
- package/dist/cjs/core/engine.js +1202 -0
- package/dist/cjs/core/engine.js.map +1 -0
- package/dist/cjs/core/index.js +35 -0
- package/dist/cjs/core/index.js.map +1 -0
- package/dist/cjs/index.js +109 -0
- package/dist/cjs/index.js.map +1 -0
- package/dist/cjs/interfaces/index.js +12 -0
- package/dist/cjs/interfaces/index.js.map +1 -0
- package/dist/cjs/layers/index.js +22 -0
- package/dist/cjs/layers/index.js.map +1 -0
- package/dist/cjs/layers/memory.js +388 -0
- package/dist/cjs/layers/memory.js.map +1 -0
- package/dist/cjs/layers/postgres.js +492 -0
- package/dist/cjs/layers/postgres.js.map +1 -0
- package/dist/cjs/layers/redis.js +388 -0
- package/dist/cjs/layers/redis.js.map +1 -0
- package/dist/cjs/types/index.js +52 -0
- package/dist/cjs/types/index.js.map +1 -0
- package/dist/esm/core/client.js +944 -0
- package/dist/esm/core/client.js.map +1 -0
- package/dist/esm/core/database.js +289 -0
- package/dist/esm/core/database.js.map +1 -0
- package/dist/esm/core/engine.js +1198 -0
- package/dist/esm/core/engine.js.map +1 -0
- package/dist/esm/core/index.js +16 -0
- package/dist/esm/core/index.js.map +1 -0
- package/dist/esm/index.js +87 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/interfaces/index.js +11 -0
- package/dist/esm/interfaces/index.js.map +1 -0
- package/dist/esm/layers/index.js +16 -0
- package/dist/esm/layers/index.js.map +1 -0
- package/dist/esm/layers/memory.js +384 -0
- package/dist/esm/layers/memory.js.map +1 -0
- package/dist/esm/layers/postgres.js +485 -0
- package/dist/esm/layers/postgres.js.map +1 -0
- package/dist/esm/layers/redis.js +384 -0
- package/dist/esm/layers/redis.js.map +1 -0
- package/dist/esm/types/index.js +49 -0
- package/dist/esm/types/index.js.map +1 -0
- package/dist/types/core/client.d.ts +561 -0
- package/dist/types/core/client.d.ts.map +1 -0
- package/dist/types/core/database.d.ts +130 -0
- package/dist/types/core/database.d.ts.map +1 -0
- package/dist/types/core/engine.d.ts +450 -0
- package/dist/types/core/engine.d.ts.map +1 -0
- package/dist/types/core/index.d.ts +13 -0
- package/dist/types/core/index.d.ts.map +1 -0
- package/dist/types/index.d.ts +85 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/interfaces/index.d.ts +209 -0
- package/dist/types/interfaces/index.d.ts.map +1 -0
- package/dist/types/layers/index.d.ts +16 -0
- package/dist/types/layers/index.d.ts.map +1 -0
- package/dist/types/layers/memory.d.ts +261 -0
- package/dist/types/layers/memory.d.ts.map +1 -0
- package/dist/types/layers/postgres.d.ts +313 -0
- package/dist/types/layers/postgres.d.ts.map +1 -0
- package/dist/types/layers/redis.d.ts +248 -0
- package/dist/types/layers/redis.d.ts.map +1 -0
- package/dist/types/types/index.d.ts +410 -0
- package/dist/types/types/index.d.ts.map +1 -0
- package/package.json +90 -0
|
@@ -0,0 +1,1198 @@
|
|
|
1
|
+
import { StorageLayer } from '../types/index.js';
|
|
2
|
+
import { MemoryStorage, RedisStorage, PostgresStorage } from '../layers/index.js';
|
|
3
|
+
/**
|
|
4
|
+
* # StorageEngine - Multi-layer Storage Architecture
|
|
5
|
+
*
|
|
6
|
+
* The core storage engine that implements Nuvex's intelligent three-tier storage system.
|
|
7
|
+
* Provides automatic data management, intelligent caching, and comprehensive fallback mechanisms
|
|
8
|
+
* across Memory, Redis, and PostgreSQL layers.
|
|
9
|
+
*
|
|
10
|
+
* ## Architecture Design
|
|
11
|
+
*
|
|
12
|
+
* The StorageEngine follows a hierarchical approach where data flows through layers based on
|
|
13
|
+
* access patterns and configured policies:
|
|
14
|
+
*
|
|
15
|
+
* ```
|
|
16
|
+
* ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
|
17
|
+
* │ Memory │───▶│ Redis │───▶│ PostgreSQL │
|
|
18
|
+
* │ (Layer 1) │ │ (Layer 2) │ │ (Layer 3) │
|
|
19
|
+
* │ < 1ms │ │ 1-5ms │ │ 5-50ms │
|
|
20
|
+
* └─────────────┘ └─────────────┘ └─────────────┘
|
|
21
|
+
* ```
|
|
22
|
+
*
|
|
23
|
+
* ## Key Features
|
|
24
|
+
*
|
|
25
|
+
* ### Intelligent Data Management
|
|
26
|
+
* - **Automatic Promotion**: Frequently accessed data moves to faster layers
|
|
27
|
+
* - **Smart Demotion**: Unused data gracefully moves to persistent storage
|
|
28
|
+
* - **TTL Management**: Configurable time-to-live for each layer
|
|
29
|
+
* - **Memory Optimization**: LRU eviction and automatic cleanup
|
|
30
|
+
*
|
|
31
|
+
* ### Performance Optimization
|
|
32
|
+
* - **Sub-millisecond Access**: Memory cache for hot data
|
|
33
|
+
* - **Batch Operations**: Efficient bulk data operations
|
|
34
|
+
* - **Connection Pooling**: Optimized database connections
|
|
35
|
+
* - **Metrics Collection**: Real-time performance monitoring
|
|
36
|
+
*
|
|
37
|
+
* ### Reliability Features
|
|
38
|
+
* - **Graceful Degradation**: Automatic fallback when layers are unavailable
|
|
39
|
+
* - **Error Recovery**: Comprehensive error handling and logging
|
|
40
|
+
* - **Data Consistency**: Synchronization across all storage layers
|
|
41
|
+
* - **Health Monitoring**: Layer availability and performance tracking
|
|
42
|
+
*
|
|
43
|
+
* ## Usage Examples
|
|
44
|
+
*
|
|
45
|
+
* ### Basic Operations
|
|
46
|
+
* ```typescript
|
|
47
|
+
* const engine = new StorageEngine({
|
|
48
|
+
* memory: { ttl: 3600000, maxSize: 10000 },
|
|
49
|
+
* redis: { url: 'redis://localhost:6379' },
|
|
50
|
+
* postgres: { host: 'localhost', database: 'app' }
|
|
51
|
+
* });
|
|
52
|
+
*
|
|
53
|
+
* await engine.connect();
|
|
54
|
+
*
|
|
55
|
+
* // Set data across all layers
|
|
56
|
+
* await engine.set('user:123', userData);
|
|
57
|
+
*
|
|
58
|
+
* // Get data (checks Memory → Redis → PostgreSQL)
|
|
59
|
+
* const user = await engine.get('user:123');
|
|
60
|
+
* ```
|
|
61
|
+
*
|
|
62
|
+
* ### Layer-specific Operations
|
|
63
|
+
* ```typescript
|
|
64
|
+
* // Store only in Redis
|
|
65
|
+
* await engine.set('session:abc', sessionData, {
|
|
66
|
+
* layer: StorageLayer.REDIS
|
|
67
|
+
* });
|
|
68
|
+
*
|
|
69
|
+
* // Skip cache and go directly to PostgreSQL
|
|
70
|
+
* const criticalData = await engine.get('config:critical', {
|
|
71
|
+
* skipCache: true
|
|
72
|
+
* });
|
|
73
|
+
* ```
|
|
74
|
+
*
|
|
75
|
+
* ### Batch Operations
|
|
76
|
+
* ```typescript
|
|
77
|
+
* const operations = [
|
|
78
|
+
* { operation: 'set', key: 'key1', value: 'value1' },
|
|
79
|
+
* { operation: 'set', key: 'key2', value: 'value2' }
|
|
80
|
+
* ];
|
|
81
|
+
*
|
|
82
|
+
* const results = await engine.setBatch(operations);
|
|
83
|
+
* ```
|
|
84
|
+
*
|
|
85
|
+
* @example
|
|
86
|
+
* ```typescript
|
|
87
|
+
* // Initialize with full configuration
|
|
88
|
+
* const engine = new StorageEngine({
|
|
89
|
+
* memory: {
|
|
90
|
+
* ttl: 24 * 60 * 60 * 1000, // 24 hours
|
|
91
|
+
* maxSize: 10000
|
|
92
|
+
* },
|
|
93
|
+
* redis: {
|
|
94
|
+
* url: 'redis://localhost:6379',
|
|
95
|
+
* ttl: 3 * 24 * 60 * 60 // 3 days
|
|
96
|
+
* },
|
|
97
|
+
* postgres: {
|
|
98
|
+
* host: 'localhost',
|
|
99
|
+
* port: 5432,
|
|
100
|
+
* database: 'myapp',
|
|
101
|
+
* user: 'user',
|
|
102
|
+
* password: 'password'
|
|
103
|
+
* },
|
|
104
|
+
* logging: {
|
|
105
|
+
* enabled: true,
|
|
106
|
+
* logger: console
|
|
107
|
+
* }
|
|
108
|
+
* });
|
|
109
|
+
*
|
|
110
|
+
* await engine.connect();
|
|
111
|
+
*
|
|
112
|
+
* // Your storage operations here...
|
|
113
|
+
*
|
|
114
|
+
* await engine.disconnect();
|
|
115
|
+
* ```
|
|
116
|
+
*
|
|
117
|
+
* @see {@link NuvexClient} for high-level client operations
|
|
118
|
+
* @see {@link NuvexConfig} for configuration options
|
|
119
|
+
* @see {@link StorageOptions} for operation-specific options
|
|
120
|
+
*
|
|
121
|
+
* @public
|
|
122
|
+
* @category Core
|
|
123
|
+
*/
|
|
124
|
+
export class StorageEngine {
|
|
125
|
+
/**
|
|
126
|
+
* Creates a new StorageEngine instance with the specified configuration.
|
|
127
|
+
*
|
|
128
|
+
* The constructor initializes all three storage layers and sets up automatic
|
|
129
|
+
* memory cleanup intervals. No connections are established until `connect()` is called.
|
|
130
|
+
*
|
|
131
|
+
* @param config - Complete configuration object for all storage layers
|
|
132
|
+
*
|
|
133
|
+
* @example
|
|
134
|
+
* ```typescript
|
|
135
|
+
* const engine = new StorageEngine({
|
|
136
|
+
* memory: { ttl: 3600000, maxSize: 10000 },
|
|
137
|
+
* redis: { url: 'redis://localhost:6379', ttl: 86400 },
|
|
138
|
+
* postgres: { host: 'localhost', database: 'myapp' },
|
|
139
|
+
* logging: { enabled: true }
|
|
140
|
+
* });
|
|
141
|
+
* ```
|
|
142
|
+
*
|
|
143
|
+
* @throws {Error} When configuration is invalid
|
|
144
|
+
* @since 1.0.0
|
|
145
|
+
*/
|
|
146
|
+
constructor(config) {
|
|
147
|
+
this.config = config;
|
|
148
|
+
this.connected = false;
|
|
149
|
+
this.cleanupInterval = null;
|
|
150
|
+
// Logging setup
|
|
151
|
+
this.logger = config.logging?.enabled ? (config.logging.logger || null) : null;
|
|
152
|
+
// Layer 1: Memory storage with LRU eviction
|
|
153
|
+
const maxMemorySize = config.memory?.maxSize || 10000; // 10k entries default
|
|
154
|
+
this.l1Memory = new MemoryStorage(maxMemorySize, this.logger);
|
|
155
|
+
// Layer 2: Redis storage (optional)
|
|
156
|
+
this.l2Redis = config.redis?.url
|
|
157
|
+
? new RedisStorage(config.redis.url, this.logger)
|
|
158
|
+
: null;
|
|
159
|
+
// Layer 3: PostgreSQL storage
|
|
160
|
+
this.l3Postgres = config.postgres
|
|
161
|
+
? new PostgresStorage(config.postgres, this.logger)
|
|
162
|
+
: null;
|
|
163
|
+
// Metrics initialization
|
|
164
|
+
this.metrics = {
|
|
165
|
+
memoryHits: 0,
|
|
166
|
+
memoryMisses: 0,
|
|
167
|
+
redisHits: 0,
|
|
168
|
+
redisMisses: 0,
|
|
169
|
+
postgresHits: 0,
|
|
170
|
+
postgresMisses: 0,
|
|
171
|
+
totalOperations: 0,
|
|
172
|
+
averageResponseTime: 0
|
|
173
|
+
};
|
|
174
|
+
// Start memory cleanup interval
|
|
175
|
+
this.startMemoryCleanup();
|
|
176
|
+
}
|
|
177
|
+
log(level, message, meta) {
|
|
178
|
+
if (this.logger) {
|
|
179
|
+
this.logger[level](message, meta);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Establishes connections to all configured storage layers.
|
|
184
|
+
*
|
|
185
|
+
* This method initializes connections to Redis and PostgreSQL (if configured)
|
|
186
|
+
* and sets up the internal state for multi-layer operations. The memory layer
|
|
187
|
+
* is always available and doesn't require connection setup.
|
|
188
|
+
*
|
|
189
|
+
* @throws {Error} When connection to any configured layer fails
|
|
190
|
+
*
|
|
191
|
+
* @example
|
|
192
|
+
* ```typescript
|
|
193
|
+
* const engine = new StorageEngine(config);
|
|
194
|
+
* await engine.connect();
|
|
195
|
+
* console.log('All storage layers connected');
|
|
196
|
+
* ```
|
|
197
|
+
*
|
|
198
|
+
* @since 1.0.0
|
|
199
|
+
* @public
|
|
200
|
+
*/
|
|
201
|
+
async connect() {
|
|
202
|
+
try {
|
|
203
|
+
// Connect to Redis (L2) - optional, gracefully degrade if unavailable
|
|
204
|
+
if (this.l2Redis) {
|
|
205
|
+
try {
|
|
206
|
+
await this.l2Redis.connect();
|
|
207
|
+
this.log('info', 'Redis L2 connected for Nuvex storage');
|
|
208
|
+
}
|
|
209
|
+
catch (error) {
|
|
210
|
+
this.log('warn', 'Redis L2 not available, using Memory + PostgreSQL only', {
|
|
211
|
+
error: error instanceof Error ? error.message : String(error)
|
|
212
|
+
});
|
|
213
|
+
this.l2Redis = null;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
else {
|
|
217
|
+
this.log('info', 'Redis L2 URL not provided, using Memory + PostgreSQL only');
|
|
218
|
+
}
|
|
219
|
+
// Connect to PostgreSQL (L3) - critical for data persistence
|
|
220
|
+
if (this.l3Postgres) {
|
|
221
|
+
await this.l3Postgres.connect();
|
|
222
|
+
this.log('info', 'PostgreSQL L3 connected for Nuvex storage');
|
|
223
|
+
}
|
|
224
|
+
this.connected = true;
|
|
225
|
+
this.log('info', 'Nuvex StorageEngine initialized with 3-layer architecture');
|
|
226
|
+
}
|
|
227
|
+
catch (error) {
|
|
228
|
+
const err = error;
|
|
229
|
+
this.log('error', 'Nuvex StorageEngine connection failed', {
|
|
230
|
+
error: err.message,
|
|
231
|
+
stack: err.stack
|
|
232
|
+
});
|
|
233
|
+
throw error;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
async disconnect() {
|
|
237
|
+
// Disconnect from Redis (L2)
|
|
238
|
+
if (this.l2Redis) {
|
|
239
|
+
await this.l2Redis.disconnect();
|
|
240
|
+
}
|
|
241
|
+
// Disconnect from PostgreSQL (L3)
|
|
242
|
+
if (this.l3Postgres) {
|
|
243
|
+
await this.l3Postgres.disconnect();
|
|
244
|
+
}
|
|
245
|
+
// Stop memory cleanup interval
|
|
246
|
+
if (this.cleanupInterval) {
|
|
247
|
+
clearInterval(this.cleanupInterval);
|
|
248
|
+
this.cleanupInterval = null;
|
|
249
|
+
}
|
|
250
|
+
this.connected = false;
|
|
251
|
+
this.log('info', 'Nuvex StorageEngine disconnected');
|
|
252
|
+
}
|
|
253
|
+
isConnected() {
|
|
254
|
+
return this.connected;
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Retrieves a value from storage using the intelligent layer hierarchy.
|
|
258
|
+
*
|
|
259
|
+
* The get operation follows the multi-layer approach:
|
|
260
|
+
* 1. **Memory Cache**: Checks in-memory storage first (fastest)
|
|
261
|
+
* 2. **Redis Cache**: Falls back to Redis if not in memory
|
|
262
|
+
* 3. **PostgreSQL**: Final fallback to persistent storage
|
|
263
|
+
*
|
|
264
|
+
* When data is found in a lower layer, it's automatically promoted to higher
|
|
265
|
+
* layers for faster future access (intelligent caching).
|
|
266
|
+
*
|
|
267
|
+
* @template T - The expected type of the stored value
|
|
268
|
+
* @param key - The storage key to retrieve
|
|
269
|
+
* @param options - Optional configuration for the get operation
|
|
270
|
+
* @returns Promise resolving to the stored value or null if not found
|
|
271
|
+
*
|
|
272
|
+
* @example
|
|
273
|
+
* ```typescript
|
|
274
|
+
* // Basic get operation
|
|
275
|
+
* const userData = await engine.get<UserData>('user:123');
|
|
276
|
+
*
|
|
277
|
+
* // Get from specific layer only
|
|
278
|
+
* const sessionData = await engine.get('session:abc', {
|
|
279
|
+
* layer: StorageLayer.REDIS
|
|
280
|
+
* });
|
|
281
|
+
*
|
|
282
|
+
* // Skip cache layers and get from PostgreSQL directly
|
|
283
|
+
* const criticalData = await engine.get('config:critical', {
|
|
284
|
+
* skipCache: true
|
|
285
|
+
* });
|
|
286
|
+
* ```
|
|
287
|
+
*
|
|
288
|
+
* @since 1.0.0
|
|
289
|
+
* @public
|
|
290
|
+
*/
|
|
291
|
+
async get(key, options = {}) {
|
|
292
|
+
const startTime = Date.now();
|
|
293
|
+
this.metrics.totalOperations++;
|
|
294
|
+
try {
|
|
295
|
+
// Default TTL configuration
|
|
296
|
+
const defaultTTL = this.config.redis?.ttl || 3 * 24 * 60 * 60; // 3 days in seconds
|
|
297
|
+
// Skip cache if requested - go directly to L3
|
|
298
|
+
if (options?.skipCache && this.l3Postgres) {
|
|
299
|
+
const value = await this.l3Postgres.get(key);
|
|
300
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
301
|
+
return value;
|
|
302
|
+
}
|
|
303
|
+
// Specific layer requested
|
|
304
|
+
if (options?.layer) {
|
|
305
|
+
let value = null;
|
|
306
|
+
switch (options.layer) {
|
|
307
|
+
case StorageLayer.MEMORY:
|
|
308
|
+
value = await this.l1Memory.get(key);
|
|
309
|
+
break;
|
|
310
|
+
case StorageLayer.REDIS:
|
|
311
|
+
value = this.l2Redis ? await this.l2Redis.get(key) : null;
|
|
312
|
+
break;
|
|
313
|
+
case StorageLayer.POSTGRES:
|
|
314
|
+
value = this.l3Postgres ? await this.l3Postgres.get(key) : null;
|
|
315
|
+
break;
|
|
316
|
+
}
|
|
317
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
318
|
+
return value;
|
|
319
|
+
}
|
|
320
|
+
// Layer 1: Check memory cache first (fastest)
|
|
321
|
+
let data = await this.l1Memory.get(key);
|
|
322
|
+
if (data !== null) {
|
|
323
|
+
this.metrics.memoryHits++;
|
|
324
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
325
|
+
return data;
|
|
326
|
+
}
|
|
327
|
+
this.metrics.memoryMisses++;
|
|
328
|
+
// Layer 2: Check Redis cache (fast distributed cache)
|
|
329
|
+
if (this.l2Redis) {
|
|
330
|
+
data = await this.l2Redis.get(key);
|
|
331
|
+
if (data !== null) {
|
|
332
|
+
this.metrics.redisHits++;
|
|
333
|
+
// Warm L1 cache for next access
|
|
334
|
+
await this.l1Memory.set(key, data, defaultTTL);
|
|
335
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
336
|
+
return data;
|
|
337
|
+
}
|
|
338
|
+
this.metrics.redisMisses++;
|
|
339
|
+
}
|
|
340
|
+
// Layer 3: Check PostgreSQL (persistent storage, source of truth)
|
|
341
|
+
if (this.l3Postgres) {
|
|
342
|
+
data = await this.l3Postgres.get(key);
|
|
343
|
+
if (data !== null) {
|
|
344
|
+
this.metrics.postgresHits++;
|
|
345
|
+
// Warm both L1 and L2 caches for future access
|
|
346
|
+
await Promise.allSettled([
|
|
347
|
+
this.l1Memory.set(key, data, defaultTTL),
|
|
348
|
+
this.l2Redis ? this.l2Redis.set(key, data, defaultTTL) : Promise.resolve()
|
|
349
|
+
]);
|
|
350
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
351
|
+
return data;
|
|
352
|
+
}
|
|
353
|
+
this.metrics.postgresMisses++;
|
|
354
|
+
}
|
|
355
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
356
|
+
return null;
|
|
357
|
+
}
|
|
358
|
+
catch (error) {
|
|
359
|
+
const err = error;
|
|
360
|
+
this.log('error', `Error getting ${key}`, {
|
|
361
|
+
error: err.message,
|
|
362
|
+
stack: err.stack,
|
|
363
|
+
operation: 'get',
|
|
364
|
+
key
|
|
365
|
+
});
|
|
366
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
367
|
+
return null;
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
/**
|
|
371
|
+
* Set value in storage layers using L3-first write strategy
|
|
372
|
+
*
|
|
373
|
+
* **L3-First Write Strategy:**
|
|
374
|
+
* 1. Write to PostgreSQL (L3) first as source of truth
|
|
375
|
+
* 2. If L3 write succeeds, warm caches (L1, L2) using Promise.allSettled
|
|
376
|
+
* 3. Cache failures don't break the operation (graceful degradation)
|
|
377
|
+
*
|
|
378
|
+
* **Error Handling:**
|
|
379
|
+
* - Returns `false` if L3 (PostgreSQL) write fails - operation is aborted
|
|
380
|
+
* - Returns `false` if engine is not connected
|
|
381
|
+
* - Returns `true` if L3 write succeeds (cache failures are tolerated)
|
|
382
|
+
* - For memory/Redis-only deployments (no L3), cache write success determines result
|
|
383
|
+
*
|
|
384
|
+
* **Usage Recommendations:**
|
|
385
|
+
* ```typescript
|
|
386
|
+
* // Always check the return value for critical data
|
|
387
|
+
* const success = await engine.set('user:123', userData);
|
|
388
|
+
* if (!success) {
|
|
389
|
+
* // Handle failure: retry, log, alert, or use fallback
|
|
390
|
+
* console.error('Failed to persist user data');
|
|
391
|
+
* throw new Error('Storage operation failed');
|
|
392
|
+
* }
|
|
393
|
+
*
|
|
394
|
+
* // For non-critical data, you may proceed regardless
|
|
395
|
+
* await engine.set('cache:temp', tempData); // Fire and forget
|
|
396
|
+
* ```
|
|
397
|
+
*
|
|
398
|
+
* @param key - The key to store
|
|
399
|
+
* @param value - The value to store
|
|
400
|
+
* @param options - Optional storage options (ttl, layer targeting)
|
|
401
|
+
* @returns Promise resolving to true if operation succeeded, false otherwise
|
|
402
|
+
*/
|
|
403
|
+
async set(key, value, options = {}) {
|
|
404
|
+
if (!this.connected) {
|
|
405
|
+
return false;
|
|
406
|
+
}
|
|
407
|
+
const startTime = Date.now();
|
|
408
|
+
this.metrics.totalOperations++;
|
|
409
|
+
try {
|
|
410
|
+
const ttl = options?.ttl;
|
|
411
|
+
// Specific layer requested
|
|
412
|
+
if (options?.layer) {
|
|
413
|
+
switch (options.layer) {
|
|
414
|
+
case StorageLayer.MEMORY:
|
|
415
|
+
await this.l1Memory.set(key, value, ttl);
|
|
416
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
417
|
+
return true;
|
|
418
|
+
case StorageLayer.REDIS:
|
|
419
|
+
if (this.l2Redis) {
|
|
420
|
+
await this.l2Redis.set(key, value, ttl);
|
|
421
|
+
}
|
|
422
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
423
|
+
return true;
|
|
424
|
+
case StorageLayer.POSTGRES:
|
|
425
|
+
if (this.l3Postgres) {
|
|
426
|
+
await this.l3Postgres.set(key, value, ttl);
|
|
427
|
+
}
|
|
428
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
429
|
+
return true;
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
// L3-First Write Strategy: Write to PostgreSQL first (source of truth)
|
|
433
|
+
if (this.l3Postgres) {
|
|
434
|
+
try {
|
|
435
|
+
await this.l3Postgres.set(key, value, ttl);
|
|
436
|
+
}
|
|
437
|
+
catch (error) {
|
|
438
|
+
const err = error;
|
|
439
|
+
this.log('error', `Critical: L3 PostgreSQL write failed for ${key}`, {
|
|
440
|
+
error: err.message,
|
|
441
|
+
stack: err.stack,
|
|
442
|
+
operation: 'set',
|
|
443
|
+
key,
|
|
444
|
+
layer: 'L3'
|
|
445
|
+
});
|
|
446
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
447
|
+
return false; // L3 failure is critical - abort operation
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
// Best-effort cache warming - tolerate cache failures using Promise.allSettled
|
|
451
|
+
await Promise.allSettled([
|
|
452
|
+
this.l1Memory.set(key, value, ttl),
|
|
453
|
+
this.l2Redis ? this.l2Redis.set(key, value, ttl) : Promise.resolve()
|
|
454
|
+
]);
|
|
455
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
456
|
+
return true;
|
|
457
|
+
}
|
|
458
|
+
catch (error) {
|
|
459
|
+
const err = error;
|
|
460
|
+
this.log('error', `Error setting ${key}`, {
|
|
461
|
+
error: err.message,
|
|
462
|
+
stack: err.stack,
|
|
463
|
+
operation: 'set',
|
|
464
|
+
key
|
|
465
|
+
});
|
|
466
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
467
|
+
return false;
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
/**
|
|
471
|
+
* Delete from all storage layers using resilient approach
|
|
472
|
+
*
|
|
473
|
+
* Uses Promise.allSettled to attempt deletion from all layers without
|
|
474
|
+
* failing if individual layers are unavailable. This provides graceful
|
|
475
|
+
* degradation - even if cache layers fail, the operation continues.
|
|
476
|
+
*
|
|
477
|
+
* @param key - The key to delete
|
|
478
|
+
* @param options - Optional storage options (layer targeting)
|
|
479
|
+
* @returns Promise resolving to true if operation completed
|
|
480
|
+
*/
|
|
481
|
+
async delete(key, options = {}) {
|
|
482
|
+
const startTime = Date.now();
|
|
483
|
+
this.metrics.totalOperations++;
|
|
484
|
+
try {
|
|
485
|
+
// Specific layer requested
|
|
486
|
+
if (options?.layer) {
|
|
487
|
+
switch (options.layer) {
|
|
488
|
+
case StorageLayer.MEMORY:
|
|
489
|
+
await this.l1Memory.delete(key);
|
|
490
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
491
|
+
return true;
|
|
492
|
+
case StorageLayer.REDIS:
|
|
493
|
+
if (this.l2Redis) {
|
|
494
|
+
await this.l2Redis.delete(key);
|
|
495
|
+
}
|
|
496
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
497
|
+
return true;
|
|
498
|
+
case StorageLayer.POSTGRES:
|
|
499
|
+
if (this.l3Postgres) {
|
|
500
|
+
await this.l3Postgres.delete(key);
|
|
501
|
+
}
|
|
502
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
503
|
+
return true;
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
// Delete from all layers using Promise.allSettled for resilience
|
|
507
|
+
// Even if some layers fail, we continue with others
|
|
508
|
+
await Promise.allSettled([
|
|
509
|
+
this.l1Memory.delete(key),
|
|
510
|
+
this.l2Redis ? this.l2Redis.delete(key) : Promise.resolve(),
|
|
511
|
+
this.l3Postgres ? this.l3Postgres.delete(key) : Promise.resolve()
|
|
512
|
+
]);
|
|
513
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
514
|
+
return true;
|
|
515
|
+
}
|
|
516
|
+
catch (error) {
|
|
517
|
+
const err = error;
|
|
518
|
+
this.log('error', `Error deleting ${key}`, {
|
|
519
|
+
error: err.message,
|
|
520
|
+
stack: err.stack,
|
|
521
|
+
operation: 'delete',
|
|
522
|
+
key
|
|
523
|
+
});
|
|
524
|
+
this.updateResponseTime(Date.now() - startTime);
|
|
525
|
+
return false;
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
/**
|
|
529
|
+
* Check if key exists in any storage layer
|
|
530
|
+
*/
|
|
531
|
+
async exists(key, options = {}) {
|
|
532
|
+
try {
|
|
533
|
+
// Specific layer requested
|
|
534
|
+
if (options?.layer) {
|
|
535
|
+
switch (options.layer) {
|
|
536
|
+
case StorageLayer.MEMORY:
|
|
537
|
+
return await this.l1Memory.exists(key);
|
|
538
|
+
case StorageLayer.REDIS:
|
|
539
|
+
return this.l2Redis ? await this.l2Redis.exists(key) : false;
|
|
540
|
+
case StorageLayer.POSTGRES:
|
|
541
|
+
return this.l3Postgres ? await this.l3Postgres.exists(key) : false;
|
|
542
|
+
}
|
|
543
|
+
}
|
|
544
|
+
// Check L1 (Memory) first
|
|
545
|
+
if (await this.l1Memory.exists(key)) {
|
|
546
|
+
return true;
|
|
547
|
+
}
|
|
548
|
+
// Check L2 (Redis)
|
|
549
|
+
if (this.l2Redis && await this.l2Redis.exists(key)) {
|
|
550
|
+
return true;
|
|
551
|
+
}
|
|
552
|
+
// Check L3 (PostgreSQL)
|
|
553
|
+
if (this.l3Postgres && await this.l3Postgres.exists(key)) {
|
|
554
|
+
return true;
|
|
555
|
+
}
|
|
556
|
+
return false;
|
|
557
|
+
}
|
|
558
|
+
catch (error) {
|
|
559
|
+
const err = error;
|
|
560
|
+
this.log('error', `Error checking existence of ${key}`, {
|
|
561
|
+
error: err.message,
|
|
562
|
+
stack: err.stack,
|
|
563
|
+
operation: 'exists',
|
|
564
|
+
key
|
|
565
|
+
});
|
|
566
|
+
return false;
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
/**
|
|
570
|
+
* Set expiration for a key
|
|
571
|
+
*
|
|
572
|
+
* Note: This is a simplified implementation that re-sets the value with new TTL.
|
|
573
|
+
* For a more efficient implementation, layers would need an expire() method.
|
|
574
|
+
*/
|
|
575
|
+
async expire(key, ttl) {
|
|
576
|
+
try {
|
|
577
|
+
// Get current value
|
|
578
|
+
const value = await this.get(key);
|
|
579
|
+
if (value === null) {
|
|
580
|
+
return false;
|
|
581
|
+
}
|
|
582
|
+
// Re-set with new TTL
|
|
583
|
+
return await this.set(key, value, { ttl });
|
|
584
|
+
}
|
|
585
|
+
catch (error) {
|
|
586
|
+
const err = error;
|
|
587
|
+
this.log('error', `Error setting expiration for ${key}`, {
|
|
588
|
+
error: err.message,
|
|
589
|
+
operation: 'expire',
|
|
590
|
+
key,
|
|
591
|
+
ttl
|
|
592
|
+
});
|
|
593
|
+
return false;
|
|
594
|
+
}
|
|
595
|
+
}
|
|
596
|
+
/**
|
|
597
|
+
* Atomically increment a numeric value
|
|
598
|
+
*
|
|
599
|
+
* This method uses layer-specific atomic operations when available,
|
|
600
|
+
* providing thread-safe increments across all storage layers.
|
|
601
|
+
*
|
|
602
|
+
* **Important:** The key must contain a numeric value (or not exist).
|
|
603
|
+
* If the key contains a non-numeric value, the operation will fail:
|
|
604
|
+
* - Redis: Throws error "value is not an integer"
|
|
605
|
+
* - PostgreSQL: Throws error during numeric cast
|
|
606
|
+
* - Memory: Treats non-numeric values as 0
|
|
607
|
+
*
|
|
608
|
+
* The increment cascades through layers:
|
|
609
|
+
* 1. If L3 (PostgreSQL) is available, use its atomic UPSERT
|
|
610
|
+
* 2. Else if L2 (Redis) is available, use INCRBY
|
|
611
|
+
* 3. Else use L1 (Memory) increment
|
|
612
|
+
*
|
|
613
|
+
* After incrementing in the authoritative layer, the new value is
|
|
614
|
+
* propagated to higher layers for cache consistency.
|
|
615
|
+
*
|
|
616
|
+
* @param key - The key to increment (must contain numeric value or not exist)
|
|
617
|
+
* @param delta - The amount to increment by (default: 1)
|
|
618
|
+
* @param ttl - Optional TTL in milliseconds
|
|
619
|
+
* @returns Promise resolving to the new value after increment
|
|
620
|
+
* @throws {Error} If the key contains a non-numeric value (Redis/PostgreSQL)
|
|
621
|
+
* @throws {Error} If no storage layer is available
|
|
622
|
+
*
|
|
623
|
+
* @example
|
|
624
|
+
* ```typescript
|
|
625
|
+
* // Increment counter by 1
|
|
626
|
+
* const newValue = await engine.increment('page_views');
|
|
627
|
+
*
|
|
628
|
+
* // Increment with custom delta
|
|
629
|
+
* const credits = await engine.increment('user:credits', 10);
|
|
630
|
+
*
|
|
631
|
+
* // Decrement (negative delta)
|
|
632
|
+
* const remaining = await engine.increment('inventory', -1);
|
|
633
|
+
* ```
|
|
634
|
+
*/
|
|
635
|
+
async increment(key, delta = 1, ttl) {
|
|
636
|
+
const startTime = Date.now();
|
|
637
|
+
const ttlSeconds = ttl ? Math.floor(ttl / 1000) : undefined;
|
|
638
|
+
let newValue;
|
|
639
|
+
try {
|
|
640
|
+
// Use atomic increment from the most authoritative layer available
|
|
641
|
+
if (this.l3Postgres?.increment) {
|
|
642
|
+
newValue = await this.l3Postgres.increment(key, delta, ttlSeconds);
|
|
643
|
+
this.metrics.totalOperations++;
|
|
644
|
+
// Propagate to upper layers for cache consistency
|
|
645
|
+
if (this.l2Redis) {
|
|
646
|
+
await this.l2Redis.set(key, newValue, ttlSeconds);
|
|
647
|
+
}
|
|
648
|
+
if (this.l1Memory) {
|
|
649
|
+
await this.l1Memory.set(key, newValue, ttlSeconds);
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
else if (this.l2Redis?.increment) {
|
|
653
|
+
newValue = await this.l2Redis.increment(key, delta, ttlSeconds);
|
|
654
|
+
this.metrics.totalOperations++;
|
|
655
|
+
// Propagate to memory layer
|
|
656
|
+
if (this.l1Memory) {
|
|
657
|
+
await this.l1Memory.set(key, newValue, ttlSeconds);
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
else if (this.l1Memory?.increment) {
|
|
661
|
+
newValue = await this.l1Memory.increment(key, delta, ttlSeconds);
|
|
662
|
+
this.metrics.totalOperations++;
|
|
663
|
+
}
|
|
664
|
+
else {
|
|
665
|
+
throw new Error('No storage layer available for increment operation');
|
|
666
|
+
}
|
|
667
|
+
const duration = Date.now() - startTime;
|
|
668
|
+
this.log('debug', `Incremented ${key} by ${delta}`, {
|
|
669
|
+
operation: 'increment',
|
|
670
|
+
key,
|
|
671
|
+
delta,
|
|
672
|
+
newValue,
|
|
673
|
+
duration,
|
|
674
|
+
success: true
|
|
675
|
+
});
|
|
676
|
+
return newValue;
|
|
677
|
+
}
|
|
678
|
+
catch (error) {
|
|
679
|
+
const err = error;
|
|
680
|
+
const duration = Date.now() - startTime;
|
|
681
|
+
this.log('error', `Error incrementing ${key}`, {
|
|
682
|
+
error: err.message,
|
|
683
|
+
operation: 'increment',
|
|
684
|
+
key,
|
|
685
|
+
delta,
|
|
686
|
+
duration,
|
|
687
|
+
success: false
|
|
688
|
+
});
|
|
689
|
+
throw error;
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
// Batch operations
|
|
693
|
+
async setBatch(operations) {
|
|
694
|
+
const settled = await Promise.allSettled(operations.map(async (op) => {
|
|
695
|
+
if (op.operation === 'set' && op.value !== undefined) {
|
|
696
|
+
const success = await this.set(op.key, op.value, op.options);
|
|
697
|
+
return { key: op.key, success };
|
|
698
|
+
}
|
|
699
|
+
return { key: op.key, success: false, error: 'Invalid operation' };
|
|
700
|
+
}));
|
|
701
|
+
return settled.map((result, index) => result.status === 'fulfilled'
|
|
702
|
+
? result.value
|
|
703
|
+
: { key: operations[index].key, success: false, error: result.reason.message });
|
|
704
|
+
}
|
|
705
|
+
async getBatch(keys, options = {}) {
|
|
706
|
+
const settled = await Promise.allSettled(keys.map(async (key) => {
|
|
707
|
+
const value = await this.get(key, options);
|
|
708
|
+
return value !== null
|
|
709
|
+
? { key, success: true, value }
|
|
710
|
+
: { key, success: false, value: null };
|
|
711
|
+
}));
|
|
712
|
+
return settled.map((result, index) => result.status === 'fulfilled'
|
|
713
|
+
? result.value
|
|
714
|
+
: { key: keys[index], success: false, error: result.reason.message });
|
|
715
|
+
}
|
|
716
|
+
async deleteBatch(keys) {
|
|
717
|
+
const settled = await Promise.allSettled(keys.map(async (key) => {
|
|
718
|
+
const existed = await this.exists(key);
|
|
719
|
+
if (existed) {
|
|
720
|
+
const success = await this.delete(key);
|
|
721
|
+
return { key, success };
|
|
722
|
+
}
|
|
723
|
+
return { key, success: false };
|
|
724
|
+
}));
|
|
725
|
+
return settled.map((result, index) => result.status === 'fulfilled'
|
|
726
|
+
? result.value
|
|
727
|
+
: { key: keys[index], success: false, error: result.reason.message });
|
|
728
|
+
}
|
|
729
|
+
// Query operations
|
|
730
|
+
async query(options) {
|
|
731
|
+
// This is a basic implementation - can be enhanced based on needs
|
|
732
|
+
const keys = await this.keys(options.pattern);
|
|
733
|
+
const items = [];
|
|
734
|
+
for (const key of keys) {
|
|
735
|
+
const value = await this.get(key);
|
|
736
|
+
if (value !== null) {
|
|
737
|
+
items.push({
|
|
738
|
+
key,
|
|
739
|
+
value,
|
|
740
|
+
metadata: {
|
|
741
|
+
value,
|
|
742
|
+
createdAt: new Date(), // This would need to be tracked
|
|
743
|
+
layer: (await this.getLayerInfo(key))?.layer || StorageLayer.MEMORY
|
|
744
|
+
}
|
|
745
|
+
});
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
// Apply sorting and pagination
|
|
749
|
+
const sortedItems = this.applySorting(items, options);
|
|
750
|
+
const paginatedItems = this.applyPagination(sortedItems, options);
|
|
751
|
+
return {
|
|
752
|
+
items: paginatedItems,
|
|
753
|
+
total: items.length,
|
|
754
|
+
hasMore: (options.offset || 0) + (options.limit || items.length) < items.length
|
|
755
|
+
};
|
|
756
|
+
}
|
|
757
|
+
/**
|
|
758
|
+
* Get all keys matching a pattern across all storage layers
|
|
759
|
+
*
|
|
760
|
+
* Collects keys from all available layers (Memory, Redis, PostgreSQL) and
|
|
761
|
+
* returns deduplicated results. Currently implemented for the Memory layer;
|
|
762
|
+
* Redis and PostgreSQL layer support will be added in future versions.
|
|
763
|
+
*
|
|
764
|
+
* @param pattern - Glob pattern for key matching (default: '*' returns all keys)
|
|
765
|
+
* @returns Promise resolving to array of unique matching keys
|
|
766
|
+
*
|
|
767
|
+
* @since 1.0.0
|
|
768
|
+
*/
|
|
769
|
+
async keys(pattern = '*') {
|
|
770
|
+
const allKeys = new Set();
|
|
771
|
+
// Collect keys from Memory (L1) - always available
|
|
772
|
+
if (this.l1Memory.keys) {
|
|
773
|
+
try {
|
|
774
|
+
const memoryKeys = await this.l1Memory.keys(pattern);
|
|
775
|
+
for (const key of memoryKeys) {
|
|
776
|
+
allKeys.add(key);
|
|
777
|
+
}
|
|
778
|
+
}
|
|
779
|
+
catch (error) {
|
|
780
|
+
this.log('warn', 'Error collecting keys from Memory L1', {
|
|
781
|
+
error: error instanceof Error ? error.message : String(error)
|
|
782
|
+
});
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
// Collect keys from Redis (L2) - if keys() is implemented
|
|
786
|
+
const l2Layer = this.l2Redis;
|
|
787
|
+
if (l2Layer?.keys) {
|
|
788
|
+
try {
|
|
789
|
+
const redisKeys = await l2Layer.keys(pattern);
|
|
790
|
+
for (const key of redisKeys) {
|
|
791
|
+
allKeys.add(key);
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
catch (error) {
|
|
795
|
+
this.log('warn', 'Error collecting keys from Redis L2', {
|
|
796
|
+
error: error instanceof Error ? error.message : String(error)
|
|
797
|
+
});
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
// Collect keys from PostgreSQL (L3) - if keys() is implemented
|
|
801
|
+
const l3Layer = this.l3Postgres;
|
|
802
|
+
if (l3Layer?.keys) {
|
|
803
|
+
try {
|
|
804
|
+
const pgKeys = await l3Layer.keys(pattern);
|
|
805
|
+
for (const key of pgKeys) {
|
|
806
|
+
allKeys.add(key);
|
|
807
|
+
}
|
|
808
|
+
}
|
|
809
|
+
catch (error) {
|
|
810
|
+
this.log('warn', 'Error collecting keys from PostgreSQL L3', {
|
|
811
|
+
error: error instanceof Error ? error.message : String(error)
|
|
812
|
+
});
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
return Array.from(allKeys);
|
|
816
|
+
}
|
|
817
|
+
async clear(pattern = '*') {
|
|
818
|
+
let cleared = 0;
|
|
819
|
+
// For now, only support clearing all (pattern = '*')
|
|
820
|
+
// Pattern-based clearing would require keys() implementation in each layer
|
|
821
|
+
if (pattern === '*') {
|
|
822
|
+
// Clear memory (L1)
|
|
823
|
+
cleared = this.l1Memory.size();
|
|
824
|
+
await this.l1Memory.clear();
|
|
825
|
+
// Clear Redis (L2) - best effort
|
|
826
|
+
if (this.l2Redis) {
|
|
827
|
+
try {
|
|
828
|
+
await this.l2Redis.clear();
|
|
829
|
+
}
|
|
830
|
+
catch (error) {
|
|
831
|
+
this.log('warn', 'Error clearing Redis L2', {
|
|
832
|
+
error: error instanceof Error ? error.message : String(error)
|
|
833
|
+
});
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
// Clear PostgreSQL (L3) - best effort
|
|
837
|
+
if (this.l3Postgres) {
|
|
838
|
+
try {
|
|
839
|
+
await this.l3Postgres.clear();
|
|
840
|
+
}
|
|
841
|
+
catch (error) {
|
|
842
|
+
this.log('warn', 'Error clearing PostgreSQL L3', {
|
|
843
|
+
error: error instanceof Error ? error.message : String(error)
|
|
844
|
+
});
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
else {
|
|
849
|
+
// Pattern-based clearing - iterate through keys
|
|
850
|
+
const keys = await this.keys(pattern);
|
|
851
|
+
for (const key of keys) {
|
|
852
|
+
await this.delete(key);
|
|
853
|
+
cleared++;
|
|
854
|
+
}
|
|
855
|
+
}
|
|
856
|
+
return cleared;
|
|
857
|
+
}
|
|
858
|
+
// Metrics and monitoring
|
|
859
|
+
/**
|
|
860
|
+
* Get performance metrics for all layers or specific layer(s)
|
|
861
|
+
*
|
|
862
|
+
* Returns metrics about storage operations and performance. Can be filtered
|
|
863
|
+
* to return metrics for specific layers only.
|
|
864
|
+
*
|
|
865
|
+
* **Metrics by Layer:**
|
|
866
|
+
* - Memory: memoryHits, memoryMisses, memorySize, memoryMaxSize
|
|
867
|
+
* - Redis: redisHits, redisMisses
|
|
868
|
+
* - PostgreSQL: postgresHits, postgresMisses
|
|
869
|
+
* - Overall: totalOperations, cacheHitRatio, averageResponseTime
|
|
870
|
+
*
|
|
871
|
+
* @param layers - Optional layer(s) to get metrics for. If not provided, returns all metrics.
|
|
872
|
+
* Can be a single layer string, 'all', or array of layer strings.
|
|
873
|
+
* @returns Object containing requested metrics
|
|
874
|
+
*
|
|
875
|
+
* @example
|
|
876
|
+
* ```typescript
|
|
877
|
+
* // Get all metrics
|
|
878
|
+
* const metrics = engine.getMetrics();
|
|
879
|
+
* // { memoryHits, memoryMisses, redisHits, redisMisses, postgresHits, ... }
|
|
880
|
+
*
|
|
881
|
+
* // Get specific layer metrics
|
|
882
|
+
* const memoryMetrics = engine.getMetrics('memory');
|
|
883
|
+
* // { memoryHits, memoryMisses, memorySize, memoryMaxSize }
|
|
884
|
+
*
|
|
885
|
+
* // Get multiple layer metrics
|
|
886
|
+
* const cacheMetrics = engine.getMetrics(['memory', 'redis']);
|
|
887
|
+
* // { memoryHits, memoryMisses, memorySize, memoryMaxSize, redisHits, redisMisses, totalOperations, averageResponseTime, cacheHitRatio }
|
|
888
|
+
* ```
|
|
889
|
+
*
|
|
890
|
+
* @since 1.0.0
|
|
891
|
+
* @public
|
|
892
|
+
*/
|
|
893
|
+
getMetrics(layers) {
|
|
894
|
+
// If no layers specified or 'all' specified, return all metrics
|
|
895
|
+
if (!layers || layers === 'all') {
|
|
896
|
+
return {
|
|
897
|
+
...this.metrics,
|
|
898
|
+
memorySize: this.l1Memory.size(),
|
|
899
|
+
memoryMaxSize: this.l1Memory.getMaxSize(),
|
|
900
|
+
cacheHitRatio: this.calculateCacheHitRatio()
|
|
901
|
+
};
|
|
902
|
+
}
|
|
903
|
+
// Normalize layers to an array
|
|
904
|
+
const layersToGet = typeof layers === 'string' ? [layers] : layers;
|
|
905
|
+
// Build filtered metrics object
|
|
906
|
+
const filteredMetrics = {};
|
|
907
|
+
for (const layer of layersToGet) {
|
|
908
|
+
switch (layer) {
|
|
909
|
+
case 'memory':
|
|
910
|
+
filteredMetrics.memoryHits = this.metrics.memoryHits;
|
|
911
|
+
filteredMetrics.memoryMisses = this.metrics.memoryMisses;
|
|
912
|
+
filteredMetrics.memorySize = this.l1Memory.size();
|
|
913
|
+
filteredMetrics.memoryMaxSize = this.l1Memory.getMaxSize();
|
|
914
|
+
break;
|
|
915
|
+
case 'redis':
|
|
916
|
+
filteredMetrics.redisHits = this.metrics.redisHits;
|
|
917
|
+
filteredMetrics.redisMisses = this.metrics.redisMisses;
|
|
918
|
+
break;
|
|
919
|
+
case 'postgres':
|
|
920
|
+
filteredMetrics.postgresHits = this.metrics.postgresHits;
|
|
921
|
+
filteredMetrics.postgresMisses = this.metrics.postgresMisses;
|
|
922
|
+
break;
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
// Add overall metrics if multiple layers are requested
|
|
926
|
+
if (layersToGet.length > 1) {
|
|
927
|
+
filteredMetrics.totalOperations = this.metrics.totalOperations;
|
|
928
|
+
filteredMetrics.averageResponseTime = this.metrics.averageResponseTime;
|
|
929
|
+
filteredMetrics.cacheHitRatio = this.calculateCacheHitRatio(layersToGet);
|
|
930
|
+
}
|
|
931
|
+
return filteredMetrics;
|
|
932
|
+
}
|
|
933
|
+
resetMetrics() {
|
|
934
|
+
this.metrics = {
|
|
935
|
+
memoryHits: 0,
|
|
936
|
+
memoryMisses: 0,
|
|
937
|
+
redisHits: 0,
|
|
938
|
+
redisMisses: 0,
|
|
939
|
+
postgresHits: 0,
|
|
940
|
+
postgresMisses: 0,
|
|
941
|
+
totalOperations: 0,
|
|
942
|
+
averageResponseTime: 0
|
|
943
|
+
};
|
|
944
|
+
}
|
|
945
|
+
/**
|
|
946
|
+
* Perform health check on all storage layers or specific layer(s)
|
|
947
|
+
*
|
|
948
|
+
* Uses Promise.allSettled to check all layers independently without
|
|
949
|
+
* failing if one layer is down. Each layer's ping() method is called
|
|
950
|
+
* to verify its operational status.
|
|
951
|
+
*
|
|
952
|
+
* **Layer Health Checks:**
|
|
953
|
+
* - Memory (L1): Always healthy if app is running
|
|
954
|
+
* - Redis (L2): PING command verification
|
|
955
|
+
* - PostgreSQL (L3): SELECT 1 query verification
|
|
956
|
+
*
|
|
957
|
+
* @param layers - Optional layer(s) to check. If not provided, checks all layers.
|
|
958
|
+
* Can be a single layer string or array of layer strings.
|
|
959
|
+
* @returns Promise resolving to health status of requested layer(s)
|
|
960
|
+
*
|
|
961
|
+
* @example
|
|
962
|
+
* ```typescript
|
|
963
|
+
* // Check all layers
|
|
964
|
+
* const health = await engine.healthCheck();
|
|
965
|
+
* // { memory: true, redis: true, postgres: true }
|
|
966
|
+
*
|
|
967
|
+
* // Check specific layer
|
|
968
|
+
* const redisHealth = await engine.healthCheck('redis');
|
|
969
|
+
* // { redis: true }
|
|
970
|
+
*
|
|
971
|
+
* // Check multiple specific layers
|
|
972
|
+
* const cacheHealth = await engine.healthCheck(['memory', 'redis']);
|
|
973
|
+
* // { memory: true, redis: true }
|
|
974
|
+
*
|
|
975
|
+
* if (!health.redis) {
|
|
976
|
+
* console.warn('Redis layer is down, degraded performance expected');
|
|
977
|
+
* }
|
|
978
|
+
* ```
|
|
979
|
+
*
|
|
980
|
+
* @since 1.0.0
|
|
981
|
+
* @public
|
|
982
|
+
*/
|
|
983
|
+
async healthCheck(layers) {
|
|
984
|
+
// Normalize layers to an array
|
|
985
|
+
let layersToCheck;
|
|
986
|
+
if (!layers) {
|
|
987
|
+
// No layers specified - check all
|
|
988
|
+
layersToCheck = ['memory', 'redis', 'postgres'];
|
|
989
|
+
}
|
|
990
|
+
else if (typeof layers === 'string') {
|
|
991
|
+
// Single layer specified
|
|
992
|
+
layersToCheck = [layers];
|
|
993
|
+
}
|
|
994
|
+
else {
|
|
995
|
+
// Array of layers specified
|
|
996
|
+
layersToCheck = layers;
|
|
997
|
+
}
|
|
998
|
+
// Build promises only for requested layers
|
|
999
|
+
const promises = [];
|
|
1000
|
+
const layerNames = [];
|
|
1001
|
+
for (const layer of layersToCheck) {
|
|
1002
|
+
switch (layer) {
|
|
1003
|
+
case 'memory':
|
|
1004
|
+
promises.push(this.l1Memory.ping());
|
|
1005
|
+
layerNames.push('memory');
|
|
1006
|
+
break;
|
|
1007
|
+
case 'redis':
|
|
1008
|
+
promises.push(this.l2Redis ? this.l2Redis.ping() : Promise.resolve(false));
|
|
1009
|
+
layerNames.push('redis');
|
|
1010
|
+
break;
|
|
1011
|
+
case 'postgres':
|
|
1012
|
+
promises.push(this.l3Postgres ? this.l3Postgres.ping() : Promise.resolve(false));
|
|
1013
|
+
layerNames.push('postgres');
|
|
1014
|
+
break;
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
const results = await Promise.allSettled(promises);
|
|
1018
|
+
// Build result object with only requested layers
|
|
1019
|
+
const healthStatus = {};
|
|
1020
|
+
for (let i = 0; i < layerNames.length; i++) {
|
|
1021
|
+
const result = results[i];
|
|
1022
|
+
healthStatus[layerNames[i]] = result.status === 'fulfilled' && result.value === true;
|
|
1023
|
+
}
|
|
1024
|
+
return healthStatus;
|
|
1025
|
+
}
|
|
1026
|
+
// Layer management
|
|
1027
|
+
async promote(key, targetLayer) {
|
|
1028
|
+
try {
|
|
1029
|
+
const value = await this.get(key);
|
|
1030
|
+
if (value === null)
|
|
1031
|
+
return false;
|
|
1032
|
+
switch (targetLayer) {
|
|
1033
|
+
case StorageLayer.MEMORY:
|
|
1034
|
+
await this.l1Memory.set(key, value);
|
|
1035
|
+
return true;
|
|
1036
|
+
case StorageLayer.REDIS:
|
|
1037
|
+
if (this.l2Redis) {
|
|
1038
|
+
await this.l2Redis.set(key, value);
|
|
1039
|
+
return true;
|
|
1040
|
+
}
|
|
1041
|
+
return false;
|
|
1042
|
+
case StorageLayer.POSTGRES:
|
|
1043
|
+
if (this.l3Postgres) {
|
|
1044
|
+
await this.l3Postgres.set(key, value);
|
|
1045
|
+
return true;
|
|
1046
|
+
}
|
|
1047
|
+
return false;
|
|
1048
|
+
default:
|
|
1049
|
+
return false;
|
|
1050
|
+
}
|
|
1051
|
+
}
|
|
1052
|
+
catch (error) {
|
|
1053
|
+
this.log('error', `Error promoting ${key} to ${targetLayer}`, {
|
|
1054
|
+
error: error instanceof Error ? error.message : String(error)
|
|
1055
|
+
});
|
|
1056
|
+
return false;
|
|
1057
|
+
}
|
|
1058
|
+
}
|
|
1059
|
+
async demote(key, targetLayer) {
|
|
1060
|
+
// For now, demote means remove from higher layers
|
|
1061
|
+
try {
|
|
1062
|
+
switch (targetLayer) {
|
|
1063
|
+
case StorageLayer.POSTGRES:
|
|
1064
|
+
// Remove from memory (L1) and Redis (L2)
|
|
1065
|
+
await this.l1Memory.delete(key);
|
|
1066
|
+
if (this.l2Redis) {
|
|
1067
|
+
await this.l2Redis.delete(key);
|
|
1068
|
+
}
|
|
1069
|
+
return true;
|
|
1070
|
+
case StorageLayer.REDIS:
|
|
1071
|
+
// Remove from memory (L1) only
|
|
1072
|
+
await this.l1Memory.delete(key);
|
|
1073
|
+
return true;
|
|
1074
|
+
default:
|
|
1075
|
+
return false;
|
|
1076
|
+
}
|
|
1077
|
+
}
|
|
1078
|
+
catch (error) {
|
|
1079
|
+
this.log('error', `Error demoting ${key} to ${targetLayer}`, {
|
|
1080
|
+
error: error instanceof Error ? error.message : String(error)
|
|
1081
|
+
});
|
|
1082
|
+
return false;
|
|
1083
|
+
}
|
|
1084
|
+
}
|
|
1085
|
+
async getLayerInfo(key) {
|
|
1086
|
+
// Check which layer has the key (L1 → L2 → L3)
|
|
1087
|
+
if (await this.l1Memory.exists(key)) {
|
|
1088
|
+
return { layer: StorageLayer.MEMORY };
|
|
1089
|
+
}
|
|
1090
|
+
if (this.l2Redis && await this.l2Redis.exists(key)) {
|
|
1091
|
+
return { layer: StorageLayer.REDIS };
|
|
1092
|
+
}
|
|
1093
|
+
if (this.l3Postgres && await this.l3Postgres.exists(key)) {
|
|
1094
|
+
return { layer: StorageLayer.POSTGRES };
|
|
1095
|
+
}
|
|
1096
|
+
return null;
|
|
1097
|
+
}
|
|
1098
|
+
// Private helper methods
|
|
1099
|
+
// Memory cleanup
|
|
1100
|
+
startMemoryCleanup() {
|
|
1101
|
+
const memoryTTL = this.config.memory?.ttl || 24 * 60 * 60 * 1000; // 24 hours default
|
|
1102
|
+
const cleanupInterval = memoryTTL / 24; // Clean up 24 times per TTL period
|
|
1103
|
+
this.cleanupInterval = setInterval(async () => {
|
|
1104
|
+
try {
|
|
1105
|
+
const cleaned = await this.l1Memory.cleanup();
|
|
1106
|
+
if (cleaned > 0) {
|
|
1107
|
+
this.log('debug', `Memory L1 cleanup completed - removed ${cleaned} expired entries`);
|
|
1108
|
+
}
|
|
1109
|
+
}
|
|
1110
|
+
catch (error) {
|
|
1111
|
+
this.log('error', 'Memory cleanup error', {
|
|
1112
|
+
error: error instanceof Error ? error.message : String(error)
|
|
1113
|
+
});
|
|
1114
|
+
}
|
|
1115
|
+
}, cleanupInterval);
|
|
1116
|
+
}
|
|
1117
|
+
// Utility methods
|
|
1118
|
+
updateResponseTime(duration) {
|
|
1119
|
+
// Exponential moving average with smoothing factor alpha
|
|
1120
|
+
const alpha = 0.2; // Smoothing factor (adjustable)
|
|
1121
|
+
this.metrics.averageResponseTime =
|
|
1122
|
+
alpha * duration + (1 - alpha) * this.metrics.averageResponseTime;
|
|
1123
|
+
}
|
|
1124
|
+
calculateCacheHitRatio(layers) {
|
|
1125
|
+
let totalHits = 0;
|
|
1126
|
+
let totalMisses = 0;
|
|
1127
|
+
// If no layers specified, calculate for all layers
|
|
1128
|
+
if (!layers) {
|
|
1129
|
+
totalHits = this.metrics.memoryHits + this.metrics.redisHits + this.metrics.postgresHits;
|
|
1130
|
+
totalMisses = this.metrics.memoryMisses + this.metrics.redisMisses + this.metrics.postgresMisses;
|
|
1131
|
+
}
|
|
1132
|
+
else {
|
|
1133
|
+
// Calculate only for specified layers
|
|
1134
|
+
for (const layer of layers) {
|
|
1135
|
+
switch (layer) {
|
|
1136
|
+
case 'memory':
|
|
1137
|
+
totalHits += this.metrics.memoryHits;
|
|
1138
|
+
totalMisses += this.metrics.memoryMisses;
|
|
1139
|
+
break;
|
|
1140
|
+
case 'redis':
|
|
1141
|
+
totalHits += this.metrics.redisHits;
|
|
1142
|
+
totalMisses += this.metrics.redisMisses;
|
|
1143
|
+
break;
|
|
1144
|
+
case 'postgres':
|
|
1145
|
+
totalHits += this.metrics.postgresHits;
|
|
1146
|
+
totalMisses += this.metrics.postgresMisses;
|
|
1147
|
+
break;
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
return totalHits + totalMisses > 0 ? totalHits / (totalHits + totalMisses) : 0;
|
|
1152
|
+
}
|
|
1153
|
+
applySorting(items, options) {
|
|
1154
|
+
if (!options.sortBy)
|
|
1155
|
+
return items;
|
|
1156
|
+
return items.sort((a, b) => {
|
|
1157
|
+
let aVal, bVal;
|
|
1158
|
+
switch (options.sortBy) {
|
|
1159
|
+
case 'key':
|
|
1160
|
+
aVal = a.key;
|
|
1161
|
+
bVal = b.key;
|
|
1162
|
+
break;
|
|
1163
|
+
case 'createdAt':
|
|
1164
|
+
aVal = a.metadata.createdAt;
|
|
1165
|
+
bVal = b.metadata.createdAt;
|
|
1166
|
+
break;
|
|
1167
|
+
default:
|
|
1168
|
+
return 0;
|
|
1169
|
+
}
|
|
1170
|
+
const order = options.sortOrder === 'desc' ? -1 : 1;
|
|
1171
|
+
return aVal < bVal ? -order : aVal > bVal ? order : 0;
|
|
1172
|
+
});
|
|
1173
|
+
}
|
|
1174
|
+
applyPagination(items, options) {
|
|
1175
|
+
const offset = options.offset || 0;
|
|
1176
|
+
const limit = options.limit;
|
|
1177
|
+
if (limit) {
|
|
1178
|
+
return items.slice(offset, offset + limit);
|
|
1179
|
+
}
|
|
1180
|
+
return items.slice(offset);
|
|
1181
|
+
}
|
|
1182
|
+
// Legacy compatibility methods (for migration)
|
|
1183
|
+
getStats() {
|
|
1184
|
+
return {
|
|
1185
|
+
memoryKeys: this.l1Memory.size(),
|
|
1186
|
+
connected: this.connected,
|
|
1187
|
+
layers: {
|
|
1188
|
+
memory: true,
|
|
1189
|
+
redis: this.l2Redis ? this.l2Redis.isConnected() : false,
|
|
1190
|
+
postgres: this.l3Postgres ? this.l3Postgres.isConnected() : false
|
|
1191
|
+
}
|
|
1192
|
+
};
|
|
1193
|
+
}
|
|
1194
|
+
async cleanupExpiredMemory() {
|
|
1195
|
+
return await this.l1Memory.cleanup();
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
//# sourceMappingURL=engine.js.map
|