@wgtechlabs/nuvex 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +427 -0
- package/dist/.tsbuildinfo +1 -0
- package/dist/cjs/core/client.js +981 -0
- package/dist/cjs/core/client.js.map +1 -0
- package/dist/cjs/core/database.js +297 -0
- package/dist/cjs/core/database.js.map +1 -0
- package/dist/cjs/core/engine.js +1202 -0
- package/dist/cjs/core/engine.js.map +1 -0
- package/dist/cjs/core/index.js +35 -0
- package/dist/cjs/core/index.js.map +1 -0
- package/dist/cjs/index.js +109 -0
- package/dist/cjs/index.js.map +1 -0
- package/dist/cjs/interfaces/index.js +12 -0
- package/dist/cjs/interfaces/index.js.map +1 -0
- package/dist/cjs/layers/index.js +22 -0
- package/dist/cjs/layers/index.js.map +1 -0
- package/dist/cjs/layers/memory.js +388 -0
- package/dist/cjs/layers/memory.js.map +1 -0
- package/dist/cjs/layers/postgres.js +492 -0
- package/dist/cjs/layers/postgres.js.map +1 -0
- package/dist/cjs/layers/redis.js +388 -0
- package/dist/cjs/layers/redis.js.map +1 -0
- package/dist/cjs/types/index.js +52 -0
- package/dist/cjs/types/index.js.map +1 -0
- package/dist/esm/core/client.js +944 -0
- package/dist/esm/core/client.js.map +1 -0
- package/dist/esm/core/database.js +289 -0
- package/dist/esm/core/database.js.map +1 -0
- package/dist/esm/core/engine.js +1198 -0
- package/dist/esm/core/engine.js.map +1 -0
- package/dist/esm/core/index.js +16 -0
- package/dist/esm/core/index.js.map +1 -0
- package/dist/esm/index.js +87 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/interfaces/index.js +11 -0
- package/dist/esm/interfaces/index.js.map +1 -0
- package/dist/esm/layers/index.js +16 -0
- package/dist/esm/layers/index.js.map +1 -0
- package/dist/esm/layers/memory.js +384 -0
- package/dist/esm/layers/memory.js.map +1 -0
- package/dist/esm/layers/postgres.js +485 -0
- package/dist/esm/layers/postgres.js.map +1 -0
- package/dist/esm/layers/redis.js +384 -0
- package/dist/esm/layers/redis.js.map +1 -0
- package/dist/esm/types/index.js +49 -0
- package/dist/esm/types/index.js.map +1 -0
- package/dist/types/core/client.d.ts +561 -0
- package/dist/types/core/client.d.ts.map +1 -0
- package/dist/types/core/database.d.ts +130 -0
- package/dist/types/core/database.d.ts.map +1 -0
- package/dist/types/core/engine.d.ts +450 -0
- package/dist/types/core/engine.d.ts.map +1 -0
- package/dist/types/core/index.d.ts +13 -0
- package/dist/types/core/index.d.ts.map +1 -0
- package/dist/types/index.d.ts +85 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/interfaces/index.d.ts +209 -0
- package/dist/types/interfaces/index.d.ts.map +1 -0
- package/dist/types/layers/index.d.ts +16 -0
- package/dist/types/layers/index.d.ts.map +1 -0
- package/dist/types/layers/memory.d.ts +261 -0
- package/dist/types/layers/memory.d.ts.map +1 -0
- package/dist/types/layers/postgres.d.ts +313 -0
- package/dist/types/layers/postgres.d.ts.map +1 -0
- package/dist/types/layers/redis.d.ts +248 -0
- package/dist/types/layers/redis.d.ts.map +1 -0
- package/dist/types/types/index.d.ts +410 -0
- package/dist/types/types/index.d.ts.map +1 -0
- package/package.json +90 -0
|
@@ -0,0 +1,944 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Nuvex - Client Implementation
|
|
3
|
+
* Next-gen Unified Vault Experience
|
|
4
|
+
*
|
|
5
|
+
* High-level client operations for any Node.js application using the StorageEngine
|
|
6
|
+
* multi-layer architecture. Provides application-centric methods for storing and
|
|
7
|
+
* retrieving data with built-in health checks, metrics, and maintenance operations.
|
|
8
|
+
*
|
|
9
|
+
* Core Features:
|
|
10
|
+
* - Generic key-value operations with intelligent caching
|
|
11
|
+
* - Health monitoring and diagnostics
|
|
12
|
+
* - Automatic cleanup and maintenance
|
|
13
|
+
* - Configuration management
|
|
14
|
+
* - Backup and restore capabilities
|
|
15
|
+
*
|
|
16
|
+
* @author Waren Gonzaga, WG Technology Labs
|
|
17
|
+
* @since 2025
|
|
18
|
+
*/
|
|
19
|
+
import { StorageEngine } from './engine.js';
|
|
20
|
+
/**
|
|
21
|
+
* Nuvex Client - High-level storage operations
|
|
22
|
+
*
|
|
23
|
+
* Provides a high-level interface for interacting with the multi-layer storage
|
|
24
|
+
* architecture. Implements the Store interface with additional convenience methods,
|
|
25
|
+
* health monitoring, backup/restore capabilities, and singleton pattern support.
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```typescript
|
|
29
|
+
* // Initialize as singleton
|
|
30
|
+
* const client = await NuvexClient.initialize({
|
|
31
|
+
* postgres: { host: 'localhost', port: 5432, database: 'myapp' },
|
|
32
|
+
* redis: { url: 'redis://localhost:6379' },
|
|
33
|
+
* memory: { ttl: 3600000, maxSize: 10000 }
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* // Store and retrieve data
|
|
37
|
+
* await client.set('user:123', { name: 'John', email: 'john@example.com' });
|
|
38
|
+
* const user = await client.get('user:123');
|
|
39
|
+
*
|
|
40
|
+
* // Use namespacing
|
|
41
|
+
* await client.setNamespaced('users', '123', userData);
|
|
42
|
+
* const userData = await client.getNamespaced('users', '123');
|
|
43
|
+
*
|
|
44
|
+
* // Perform health checks
|
|
45
|
+
* const health = await client.healthCheck();
|
|
46
|
+
* console.log('Storage layers healthy:', health.overall);
|
|
47
|
+
* ```
|
|
48
|
+
*
|
|
49
|
+
* @class NuvexClient
|
|
50
|
+
* @implements {IStore}
|
|
51
|
+
* @author Waren Gonzaga, WG Technology Labs
|
|
52
|
+
* @since 2025
|
|
53
|
+
*/
|
|
54
|
+
export class NuvexClient {
|
|
55
|
+
constructor(config) {
|
|
56
|
+
this.config = config;
|
|
57
|
+
this.logger = config.logging?.enabled ? (config.logging.logger || null) : null;
|
|
58
|
+
this.storage = new StorageEngine(config);
|
|
59
|
+
}
|
|
60
|
+
log(level, message, meta) {
|
|
61
|
+
if (this.logger) {
|
|
62
|
+
this.logger[level](message, meta);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Initialize the Store singleton instance
|
|
67
|
+
*
|
|
68
|
+
* Creates a new NuvexClient instance if one doesn't exist and connects to all
|
|
69
|
+
* configured storage layers. This method ensures only one instance exists
|
|
70
|
+
* throughout the application lifecycle.
|
|
71
|
+
*
|
|
72
|
+
* @param config - Configuration object for all storage layers
|
|
73
|
+
* @returns Promise that resolves to the initialized NuvexClient instance
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* ```typescript
|
|
77
|
+
* const client = await NuvexClient.initialize({
|
|
78
|
+
* postgres: { host: 'localhost', port: 5432, database: 'myapp' },
|
|
79
|
+
* redis: { url: 'redis://localhost:6379' },
|
|
80
|
+
* memory: { ttl: 3600000, maxSize: 10000 }
|
|
81
|
+
* });
|
|
82
|
+
* ```
|
|
83
|
+
*
|
|
84
|
+
* @since 1.0.0
|
|
85
|
+
*/
|
|
86
|
+
static async initialize(config) {
|
|
87
|
+
if (!NuvexClient.instance) {
|
|
88
|
+
NuvexClient.instance = new NuvexClient(config);
|
|
89
|
+
await NuvexClient.instance.storage.connect();
|
|
90
|
+
}
|
|
91
|
+
return NuvexClient.instance;
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* Get the singleton instance
|
|
95
|
+
*
|
|
96
|
+
* Returns the existing NuvexClient instance. Must be called after initialize().
|
|
97
|
+
*
|
|
98
|
+
* @returns The singleton NuvexClient instance
|
|
99
|
+
* @throws {Error} If the store has not been initialized
|
|
100
|
+
*
|
|
101
|
+
* @example
|
|
102
|
+
* ```typescript
|
|
103
|
+
* // After initialization
|
|
104
|
+
* const client = NuvexClient.getInstance();
|
|
105
|
+
* await client.set('key', 'value');
|
|
106
|
+
* ```
|
|
107
|
+
*
|
|
108
|
+
* @since 1.0.0
|
|
109
|
+
*/
|
|
110
|
+
static getInstance() {
|
|
111
|
+
if (!NuvexClient.instance) {
|
|
112
|
+
throw new Error('Store not initialized. Call NuvexClient.initialize() first.');
|
|
113
|
+
}
|
|
114
|
+
return NuvexClient.instance;
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Create a new Store instance (non-singleton)
|
|
118
|
+
*
|
|
119
|
+
* Creates a new NuvexClient instance without affecting the singleton.
|
|
120
|
+
* Useful for testing or when multiple isolated instances are needed.
|
|
121
|
+
*
|
|
122
|
+
* @param config - Configuration object for all storage layers
|
|
123
|
+
* @returns Promise that resolves to a new NuvexClient instance
|
|
124
|
+
*
|
|
125
|
+
* @example
|
|
126
|
+
* ```typescript
|
|
127
|
+
* const testClient = await NuvexClient.create({
|
|
128
|
+
* postgres: testDbConfig,
|
|
129
|
+
* memory: { ttl: 1000 }
|
|
130
|
+
* });
|
|
131
|
+
* ```
|
|
132
|
+
*
|
|
133
|
+
* @since 1.0.0
|
|
134
|
+
*/
|
|
135
|
+
static async create(config) {
|
|
136
|
+
const store = new NuvexClient(config);
|
|
137
|
+
await store.storage.connect();
|
|
138
|
+
return store;
|
|
139
|
+
}
|
|
140
|
+
// Connection management
|
|
141
|
+
/**
|
|
142
|
+
* Connect to all configured storage layers
|
|
143
|
+
*
|
|
144
|
+
* Establishes connections to PostgreSQL, Redis (if configured), and initializes
|
|
145
|
+
* the memory cache. This method is automatically called by initialize() and create().
|
|
146
|
+
*
|
|
147
|
+
* @returns Promise that resolves when all connections are established
|
|
148
|
+
* @throws {Error} If any required storage layer fails to connect
|
|
149
|
+
*
|
|
150
|
+
* @since 1.0.0
|
|
151
|
+
*/
|
|
152
|
+
async connect() {
|
|
153
|
+
await this.storage.connect();
|
|
154
|
+
this.log('info', 'Nuvex Client connected');
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Disconnect from all storage layers
|
|
158
|
+
*
|
|
159
|
+
* Cleanly closes all connections and clears the memory cache.
|
|
160
|
+
* Should be called during application shutdown.
|
|
161
|
+
*
|
|
162
|
+
* @returns Promise that resolves when all connections are closed
|
|
163
|
+
*
|
|
164
|
+
* @since 1.0.0
|
|
165
|
+
*/
|
|
166
|
+
async disconnect() {
|
|
167
|
+
await this.storage.disconnect();
|
|
168
|
+
this.log('info', 'Nuvex Client disconnected');
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* Check if the client is connected to storage layers
|
|
172
|
+
*
|
|
173
|
+
* @returns True if connected to at least the primary storage layer
|
|
174
|
+
*
|
|
175
|
+
* @since 1.0.0
|
|
176
|
+
*/
|
|
177
|
+
isConnected() {
|
|
178
|
+
return this.storage.isConnected();
|
|
179
|
+
}
|
|
180
|
+
// Basic operations (delegated to UnifiedStorage)
|
|
181
|
+
/**
|
|
182
|
+
* Store a value in the multi-layer storage system
|
|
183
|
+
*
|
|
184
|
+
* Stores the value across all available storage layers (Memory → Redis → PostgreSQL)
|
|
185
|
+
* with intelligent TTL management and layer-specific optimizations.
|
|
186
|
+
*
|
|
187
|
+
* @template T - The type of the value being stored
|
|
188
|
+
* @param key - Unique identifier for the stored value
|
|
189
|
+
* @param value - The value to store (will be JSON serialized)
|
|
190
|
+
* @param options - Optional storage configuration
|
|
191
|
+
* @returns Promise that resolves to true if stored successfully
|
|
192
|
+
*
|
|
193
|
+
* @example
|
|
194
|
+
* ```typescript
|
|
195
|
+
* // Store with default TTL
|
|
196
|
+
* await client.set('user:123', { name: 'John', email: 'john@example.com' });
|
|
197
|
+
*
|
|
198
|
+
* // Store with custom TTL (60 seconds)
|
|
199
|
+
* await client.set('session:abc', sessionData, { ttl: 60 });
|
|
200
|
+
*
|
|
201
|
+
* // Store only in memory layer
|
|
202
|
+
* await client.set('cache:temp', data, { layer: StorageLayer.MEMORY });
|
|
203
|
+
* ```
|
|
204
|
+
*
|
|
205
|
+
* @since 1.0.0
|
|
206
|
+
*/
|
|
207
|
+
async set(key, value, options) {
|
|
208
|
+
return this.storage.set(key, value, options);
|
|
209
|
+
}
|
|
210
|
+
/**
|
|
211
|
+
* Retrieve a value from the multi-layer storage system
|
|
212
|
+
*
|
|
213
|
+
* Searches for the value across storage layers in order (Memory → Redis → PostgreSQL)
|
|
214
|
+
* and automatically promotes the value to higher layers for faster future access.
|
|
215
|
+
*
|
|
216
|
+
* @template T - The expected type of the retrieved value
|
|
217
|
+
* @param key - Unique identifier of the value to retrieve
|
|
218
|
+
* @param options - Optional retrieval configuration
|
|
219
|
+
* @returns Promise that resolves to the value or null if not found
|
|
220
|
+
*
|
|
221
|
+
* @example
|
|
222
|
+
* ```typescript
|
|
223
|
+
* // Get from any layer
|
|
224
|
+
* const user = await client.get<UserType>('user:123');
|
|
225
|
+
*
|
|
226
|
+
* // Get only from PostgreSQL, skip cache
|
|
227
|
+
* const freshData = await client.get('data:key', { skipCache: true });
|
|
228
|
+
*
|
|
229
|
+
* // Get only from memory layer
|
|
230
|
+
* const cachedData = await client.get('cache:key', { layer: StorageLayer.MEMORY });
|
|
231
|
+
* ```
|
|
232
|
+
*
|
|
233
|
+
* @since 1.0.0
|
|
234
|
+
*/
|
|
235
|
+
async get(key, options = {}) {
|
|
236
|
+
return this.storage.get(key, options);
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Delete a value from all storage layers
|
|
240
|
+
*
|
|
241
|
+
* Removes the value from all storage layers to ensure consistency.
|
|
242
|
+
*
|
|
243
|
+
* @param key - Unique identifier of the value to delete
|
|
244
|
+
* @param options - Optional deletion configuration
|
|
245
|
+
* @returns Promise that resolves to true if deleted successfully
|
|
246
|
+
*
|
|
247
|
+
* @example
|
|
248
|
+
* ```typescript
|
|
249
|
+
* // Delete from all layers
|
|
250
|
+
* await client.delete('user:123');
|
|
251
|
+
*
|
|
252
|
+
* // Delete only from memory layer
|
|
253
|
+
* await client.delete('cache:temp', { layer: StorageLayer.MEMORY });
|
|
254
|
+
* ```
|
|
255
|
+
*
|
|
256
|
+
* @since 1.0.0
|
|
257
|
+
*/
|
|
258
|
+
async delete(key, options = {}) {
|
|
259
|
+
return this.storage.delete(key, options);
|
|
260
|
+
}
|
|
261
|
+
/**
|
|
262
|
+
* Check if a key exists in any storage layer
|
|
263
|
+
*
|
|
264
|
+
* @param key - Unique identifier to check for existence
|
|
265
|
+
* @param options - Optional configuration to check specific layer
|
|
266
|
+
* @returns Promise that resolves to true if the key exists
|
|
267
|
+
*
|
|
268
|
+
* @example
|
|
269
|
+
* ```typescript
|
|
270
|
+
* if (await client.exists('user:123')) {
|
|
271
|
+
* console.log('User exists');
|
|
272
|
+
* }
|
|
273
|
+
* ```
|
|
274
|
+
*
|
|
275
|
+
* @since 1.0.0
|
|
276
|
+
*/
|
|
277
|
+
async exists(key, options = {}) {
|
|
278
|
+
return this.storage.exists(key, options);
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Set or update the expiration time for a key
|
|
282
|
+
*
|
|
283
|
+
* @param key - Unique identifier of the value
|
|
284
|
+
* @param ttl - Time to live in seconds
|
|
285
|
+
* @returns Promise that resolves to true if expiration was set successfully
|
|
286
|
+
*
|
|
287
|
+
* @example
|
|
288
|
+
* ```typescript
|
|
289
|
+
* // Expire in 1 hour
|
|
290
|
+
* await client.expire('session:abc', 3600);
|
|
291
|
+
* ```
|
|
292
|
+
*
|
|
293
|
+
* @since 1.0.0
|
|
294
|
+
*/
|
|
295
|
+
async expire(key, ttl) {
|
|
296
|
+
return this.storage.expire(key, ttl);
|
|
297
|
+
}
|
|
298
|
+
// Batch operations
|
|
299
|
+
/**
|
|
300
|
+
* Execute multiple set operations in a batch
|
|
301
|
+
*
|
|
302
|
+
* Efficiently executes multiple storage operations with automatic error handling
|
|
303
|
+
* and transaction-like behavior where possible.
|
|
304
|
+
*
|
|
305
|
+
* @param operations - Array of batch operations to execute
|
|
306
|
+
* @returns Promise that resolves to an array of results for each operation
|
|
307
|
+
*
|
|
308
|
+
* @example
|
|
309
|
+
* ```typescript
|
|
310
|
+
* const results = await client.setBatch([
|
|
311
|
+
* { operation: 'set', key: 'user:1', value: userData1 },
|
|
312
|
+
* { operation: 'set', key: 'user:2', value: userData2, options: { ttl: 3600 } }
|
|
313
|
+
* ]);
|
|
314
|
+
*
|
|
315
|
+
* results.forEach((result, index) => {
|
|
316
|
+
* console.log(`Operation ${index}: ${result.success ? 'Success' : 'Failed'}`);
|
|
317
|
+
* });
|
|
318
|
+
* ```
|
|
319
|
+
*
|
|
320
|
+
* @since 1.0.0
|
|
321
|
+
*/
|
|
322
|
+
async setBatch(operations) {
|
|
323
|
+
return this.storage.setBatch(operations);
|
|
324
|
+
}
|
|
325
|
+
/**
|
|
326
|
+
* Retrieve multiple values in a batch
|
|
327
|
+
*
|
|
328
|
+
* Efficiently retrieves multiple values with layer optimization and
|
|
329
|
+
* automatic cache promotion.
|
|
330
|
+
*
|
|
331
|
+
* @param keys - Array of keys to retrieve
|
|
332
|
+
* @param options - Optional configuration applied to all operations
|
|
333
|
+
* @returns Promise that resolves to an array of results for each key
|
|
334
|
+
*
|
|
335
|
+
* @example
|
|
336
|
+
* ```typescript
|
|
337
|
+
* const results = await client.getBatch(['user:1', 'user:2', 'user:3']);
|
|
338
|
+
* const users = results
|
|
339
|
+
* .filter(result => result.success && result.value)
|
|
340
|
+
* .map(result => result.value);
|
|
341
|
+
* ```
|
|
342
|
+
*
|
|
343
|
+
* @since 1.0.0
|
|
344
|
+
*/
|
|
345
|
+
async getBatch(keys, options = {}) {
|
|
346
|
+
return this.storage.getBatch(keys, options);
|
|
347
|
+
}
|
|
348
|
+
/**
|
|
349
|
+
* Delete multiple values in a batch
|
|
350
|
+
*
|
|
351
|
+
* Efficiently deletes multiple values from all storage layers.
|
|
352
|
+
*
|
|
353
|
+
* @param keys - Array of keys to delete
|
|
354
|
+
* @returns Promise that resolves to an array of results for each key
|
|
355
|
+
*
|
|
356
|
+
* @example
|
|
357
|
+
* ```typescript
|
|
358
|
+
* const results = await client.deleteBatch(['temp:1', 'temp:2', 'temp:3']);
|
|
359
|
+
* const deletedCount = results.filter(r => r.success).length;
|
|
360
|
+
* ```
|
|
361
|
+
*
|
|
362
|
+
* @since 1.0.0
|
|
363
|
+
*/
|
|
364
|
+
async deleteBatch(keys) {
|
|
365
|
+
return this.storage.deleteBatch(keys);
|
|
366
|
+
}
|
|
367
|
+
// Query operations
|
|
368
|
+
async query(options) {
|
|
369
|
+
return this.storage.query(options);
|
|
370
|
+
}
|
|
371
|
+
async keys(pattern) {
|
|
372
|
+
return this.storage.keys(pattern);
|
|
373
|
+
}
|
|
374
|
+
async clear(pattern) {
|
|
375
|
+
this.log('warn', `Clearing storage${pattern ? ` with pattern: ${pattern}` : ' (all keys)'}`);
|
|
376
|
+
return this.storage.clear(pattern);
|
|
377
|
+
}
|
|
378
|
+
// Metrics and monitoring
|
|
379
|
+
/**
|
|
380
|
+
* Get performance metrics for all layers or specific layer(s)
|
|
381
|
+
*
|
|
382
|
+
* Returns metrics about storage operations and performance. Can be filtered
|
|
383
|
+
* to return metrics for specific layers only.
|
|
384
|
+
*
|
|
385
|
+
* @param layers - Optional layer(s) to get metrics for. If not provided, returns all metrics.
|
|
386
|
+
* Can be a single layer string, 'all', or array of layer strings.
|
|
387
|
+
* @returns Object containing requested metrics
|
|
388
|
+
*
|
|
389
|
+
* @example
|
|
390
|
+
* ```typescript
|
|
391
|
+
* // Get all metrics
|
|
392
|
+
* const metrics = client.getMetrics();
|
|
393
|
+
*
|
|
394
|
+
* // Get specific layer metrics
|
|
395
|
+
* const memoryMetrics = client.getMetrics('memory');
|
|
396
|
+
* // { memoryHits, memoryMisses, memorySize, memoryMaxSize }
|
|
397
|
+
*
|
|
398
|
+
* // Get multiple layer metrics
|
|
399
|
+
* const cacheMetrics = client.getMetrics(['memory', 'redis']);
|
|
400
|
+
* // { memoryHits, memoryMisses, memorySize, memoryMaxSize, redisHits, redisMisses, totalOperations, averageResponseTime, cacheHitRatio }
|
|
401
|
+
* ```
|
|
402
|
+
*
|
|
403
|
+
* @since 1.0.0
|
|
404
|
+
*/
|
|
405
|
+
getMetrics(layers) {
|
|
406
|
+
return this.storage.getMetrics(layers);
|
|
407
|
+
}
|
|
408
|
+
resetMetrics() {
|
|
409
|
+
this.storage.resetMetrics();
|
|
410
|
+
this.log('info', 'Storage metrics reset');
|
|
411
|
+
}
|
|
412
|
+
// Layer management
|
|
413
|
+
async promote(key, targetLayer) {
|
|
414
|
+
return this.storage.promote(key, targetLayer);
|
|
415
|
+
}
|
|
416
|
+
async demote(key, targetLayer) {
|
|
417
|
+
return this.storage.demote(key, targetLayer);
|
|
418
|
+
}
|
|
419
|
+
async getLayerInfo(key) {
|
|
420
|
+
return this.storage.getLayerInfo(key);
|
|
421
|
+
}
|
|
422
|
+
// Store-specific methods
|
|
423
|
+
/**
|
|
424
|
+
* Configure the store with new settings
|
|
425
|
+
*/
|
|
426
|
+
async configure(config) {
|
|
427
|
+
// Merge with existing config
|
|
428
|
+
this.config = { ...this.config, ...config };
|
|
429
|
+
// Update logger if changed
|
|
430
|
+
if (config.logging) {
|
|
431
|
+
this.logger = config.logging.enabled ? (config.logging.logger || null) : null;
|
|
432
|
+
}
|
|
433
|
+
this.log('info', 'Client configuration updated');
|
|
434
|
+
}
|
|
435
|
+
/**
|
|
436
|
+
* Get the underlying storage engine
|
|
437
|
+
*
|
|
438
|
+
* @internal This method is intended for internal and testing use only.
|
|
439
|
+
* It provides direct access to the StorageEngine instance.
|
|
440
|
+
*
|
|
441
|
+
* @returns The StorageEngine instance used by this client
|
|
442
|
+
*/
|
|
443
|
+
getEngine() {
|
|
444
|
+
return this.storage;
|
|
445
|
+
}
|
|
446
|
+
/**
|
|
447
|
+
* Get current configuration
|
|
448
|
+
*/
|
|
449
|
+
getConfig() {
|
|
450
|
+
return { ...this.config };
|
|
451
|
+
}
|
|
452
|
+
/**
|
|
453
|
+
* Health check for all storage layers or specific layer(s)
|
|
454
|
+
*
|
|
455
|
+
* Performs comprehensive health checks on configured storage layers
|
|
456
|
+
* using the underlying engine's ping() methods for each layer.
|
|
457
|
+
*
|
|
458
|
+
* @param layers - Optional layer(s) to check. If not provided, checks all layers.
|
|
459
|
+
* Can be a single layer string or array of layer strings.
|
|
460
|
+
* @returns Promise that resolves to health status for requested layer(s)
|
|
461
|
+
*
|
|
462
|
+
* @example
|
|
463
|
+
* ```typescript
|
|
464
|
+
* // Check all layers
|
|
465
|
+
* const health = await client.healthCheck();
|
|
466
|
+
* // { memory: true, redis: true, postgres: true }
|
|
467
|
+
*
|
|
468
|
+
* // Check specific layer
|
|
469
|
+
* const redisHealth = await client.healthCheck('redis');
|
|
470
|
+
* // { redis: true }
|
|
471
|
+
*
|
|
472
|
+
* // Check multiple layers
|
|
473
|
+
* const cacheHealth = await client.healthCheck(['memory', 'redis']);
|
|
474
|
+
* // { memory: true, redis: true }
|
|
475
|
+
*
|
|
476
|
+
* if (!health.redis) {
|
|
477
|
+
* console.error('Redis layer is down');
|
|
478
|
+
* }
|
|
479
|
+
* ```
|
|
480
|
+
*
|
|
481
|
+
* @since 1.0.0
|
|
482
|
+
*/
|
|
483
|
+
async healthCheck(layers) {
|
|
484
|
+
return this.storage.healthCheck(layers);
|
|
485
|
+
}
|
|
486
|
+
/**
|
|
487
|
+
* Cleanup expired entries and optimize storage
|
|
488
|
+
*/
|
|
489
|
+
async cleanup() {
|
|
490
|
+
this.log('info', 'Starting storage cleanup');
|
|
491
|
+
let cleaned = 0;
|
|
492
|
+
let errors = 0;
|
|
493
|
+
try {
|
|
494
|
+
// Clean up expired memory entries
|
|
495
|
+
const memoryCleanup = await this.storage.cleanupExpiredMemory();
|
|
496
|
+
cleaned += memoryCleanup;
|
|
497
|
+
this.log('info', `Cleanup completed: ${cleaned} entries cleaned, ${errors} errors`);
|
|
498
|
+
return { cleaned, errors };
|
|
499
|
+
}
|
|
500
|
+
catch (error) {
|
|
501
|
+
errors++;
|
|
502
|
+
this.log('error', 'Cleanup failed', { error: error.message });
|
|
503
|
+
return { cleaned, errors };
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
/**
|
|
507
|
+
* Compact storage and optimize performance
|
|
508
|
+
*/
|
|
509
|
+
async compact() {
|
|
510
|
+
this.log('info', 'Starting storage compaction');
|
|
511
|
+
try {
|
|
512
|
+
// For now, compaction means cleanup + metrics reset
|
|
513
|
+
await this.cleanup();
|
|
514
|
+
this.resetMetrics();
|
|
515
|
+
this.log('info', 'Storage compaction completed');
|
|
516
|
+
}
|
|
517
|
+
catch (error) {
|
|
518
|
+
this.log('error', 'Storage compaction failed', { error: error.message });
|
|
519
|
+
throw error;
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
/**
|
|
523
|
+
* Backup storage data to external location with incremental support
|
|
524
|
+
*/
|
|
525
|
+
async backup(destination, options) {
|
|
526
|
+
this.log('info', 'Starting storage backup');
|
|
527
|
+
try {
|
|
528
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
529
|
+
const backupId = destination || `nuvex-backup-${timestamp}`;
|
|
530
|
+
const { incremental = false, compression = true } = options || {}; // Get last backup timestamp for incremental backups
|
|
531
|
+
let lastBackupTime = null;
|
|
532
|
+
if (incremental) {
|
|
533
|
+
try {
|
|
534
|
+
const lastBackupMetadata = await this.storage.get('__nuvex_last_backup_metadata', {});
|
|
535
|
+
if (lastBackupMetadata && typeof lastBackupMetadata === 'object' && 'timestamp' in lastBackupMetadata) {
|
|
536
|
+
lastBackupTime = new Date(lastBackupMetadata.timestamp);
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
catch (error) {
|
|
540
|
+
this.log('warn', 'Could not retrieve last backup metadata, performing full backup', { error: error.message });
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
// Get all keys
|
|
544
|
+
const allKeys = await this.storage.keys();
|
|
545
|
+
const backupData = {};
|
|
546
|
+
let keysProcessed = 0;
|
|
547
|
+
let keysSkipped = 0;
|
|
548
|
+
// Backup data with metadata and TTL information
|
|
549
|
+
for (const key of allKeys) {
|
|
550
|
+
// Skip internal backup metadata keys
|
|
551
|
+
if (key.startsWith('__nuvex_') || key.startsWith('__backup:')) {
|
|
552
|
+
keysSkipped++;
|
|
553
|
+
continue;
|
|
554
|
+
}
|
|
555
|
+
const value = await this.storage.get(key, {});
|
|
556
|
+
const layerInfo = await this.storage.getLayerInfo(key);
|
|
557
|
+
if (value !== null) {
|
|
558
|
+
const itemData = {
|
|
559
|
+
value,
|
|
560
|
+
layerInfo,
|
|
561
|
+
createdAt: new Date().toISOString(),
|
|
562
|
+
version: '1.0.0'
|
|
563
|
+
};
|
|
564
|
+
// For incremental backups, include only changed data
|
|
565
|
+
if (incremental && lastBackupTime) {
|
|
566
|
+
// This is a simplified approach - in production, you'd track modification times
|
|
567
|
+
// For now, we'll include all data but mark it as incremental
|
|
568
|
+
const enhancedItemData = itemData;
|
|
569
|
+
enhancedItemData.backupType = 'incremental';
|
|
570
|
+
enhancedItemData.lastBackupTime = lastBackupTime.toISOString();
|
|
571
|
+
}
|
|
572
|
+
backupData[key] = itemData;
|
|
573
|
+
keysProcessed++;
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
// Create backup metadata
|
|
577
|
+
const backupMetadata = {
|
|
578
|
+
id: backupId,
|
|
579
|
+
createdAt: new Date().toISOString(),
|
|
580
|
+
keyCount: keysProcessed,
|
|
581
|
+
keysSkipped,
|
|
582
|
+
version: '1.0.0',
|
|
583
|
+
type: incremental ? 'incremental' : 'full',
|
|
584
|
+
lastBackupTime: lastBackupTime?.toISOString() || null,
|
|
585
|
+
compression,
|
|
586
|
+
totalKeys: allKeys.length
|
|
587
|
+
};
|
|
588
|
+
// Create complete backup package
|
|
589
|
+
const backupPackage = {
|
|
590
|
+
metadata: backupMetadata,
|
|
591
|
+
data: backupData
|
|
592
|
+
};
|
|
593
|
+
// Export to external storage (filesystem example)
|
|
594
|
+
const fs = await import('fs').catch(() => null);
|
|
595
|
+
const path = await import('path').catch(() => null);
|
|
596
|
+
if (fs && path) {
|
|
597
|
+
const backupDir = path.join(process.cwd(), 'nuvex-backups');
|
|
598
|
+
// Ensure backup directory exists
|
|
599
|
+
await fs.promises.mkdir(backupDir, { recursive: true });
|
|
600
|
+
const backupFilePath = path.join(backupDir, `${backupId}.json`);
|
|
601
|
+
const dataToWrite = JSON.stringify(backupPackage, null, 2);
|
|
602
|
+
// Apply compression if requested
|
|
603
|
+
if (compression) {
|
|
604
|
+
try {
|
|
605
|
+
const zlib = await import('zlib');
|
|
606
|
+
const compressed = zlib.gzipSync(Buffer.from(dataToWrite));
|
|
607
|
+
await fs.promises.writeFile(`${backupFilePath}.gz`, compressed);
|
|
608
|
+
this.log('info', `Backup compressed and saved to ${backupFilePath}.gz`);
|
|
609
|
+
}
|
|
610
|
+
catch (compressionError) {
|
|
611
|
+
this.log('warn', 'Compression failed, saving uncompressed', { error: compressionError.message });
|
|
612
|
+
await fs.promises.writeFile(backupFilePath, dataToWrite);
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
else {
|
|
616
|
+
await fs.promises.writeFile(backupFilePath, dataToWrite);
|
|
617
|
+
}
|
|
618
|
+
// Update last backup metadata for incremental backups
|
|
619
|
+
await this.storage.set('__nuvex_last_backup_metadata', {
|
|
620
|
+
backupId,
|
|
621
|
+
timestamp: new Date().toISOString(),
|
|
622
|
+
type: incremental ? 'incremental' : 'full'
|
|
623
|
+
}, {});
|
|
624
|
+
this.log('info', `Backup completed: ${backupId}`, {
|
|
625
|
+
keyCount: keysProcessed,
|
|
626
|
+
keysSkipped,
|
|
627
|
+
type: incremental ? 'incremental' : 'full',
|
|
628
|
+
compressed: compression
|
|
629
|
+
});
|
|
630
|
+
return backupId;
|
|
631
|
+
}
|
|
632
|
+
else {
|
|
633
|
+
// Fallback: store in memory/internal storage with warning
|
|
634
|
+
this.log('warn', 'File system not available, storing backup metadata internally (not recommended for production)');
|
|
635
|
+
await this.storage.set(`__backup:${backupId}`, backupMetadata, {});
|
|
636
|
+
return backupId;
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
catch (error) {
|
|
640
|
+
this.log('error', 'Backup failed', { error: error.message });
|
|
641
|
+
throw error;
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
/**
|
|
645
|
+
* Restore storage data from external backup location
|
|
646
|
+
*/
|
|
647
|
+
async restore(source, options) {
|
|
648
|
+
this.log('info', `Starting restore from backup: ${source}`);
|
|
649
|
+
try {
|
|
650
|
+
const { clearExisting = false, dryRun = false } = options || {};
|
|
651
|
+
// Try to load backup from external storage first
|
|
652
|
+
const fs = await import('fs').catch(() => null);
|
|
653
|
+
const path = await import('path').catch(() => null);
|
|
654
|
+
let backupPackage = null;
|
|
655
|
+
if (fs && path) {
|
|
656
|
+
const backupDir = path.join(process.cwd(), 'nuvex-backups');
|
|
657
|
+
const backupFilePath = path.join(backupDir, `${source}.json`);
|
|
658
|
+
const compressedFilePath = `${backupFilePath}.gz`;
|
|
659
|
+
// Try compressed file first
|
|
660
|
+
const compressedExists = await fs.promises.access(compressedFilePath).then(() => true).catch(() => false);
|
|
661
|
+
if (compressedExists) {
|
|
662
|
+
try {
|
|
663
|
+
const zlib = await import('zlib');
|
|
664
|
+
const compressedData = await fs.promises.readFile(compressedFilePath);
|
|
665
|
+
const decompressed = zlib.gunzipSync(compressedData);
|
|
666
|
+
backupPackage = JSON.parse(decompressed.toString());
|
|
667
|
+
this.log('info', 'Loaded compressed backup file');
|
|
668
|
+
}
|
|
669
|
+
catch (decompressionError) {
|
|
670
|
+
this.log('error', 'Failed to decompress backup file', { error: decompressionError.message });
|
|
671
|
+
throw decompressionError;
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
else {
|
|
675
|
+
const uncompressedExists = await fs.promises.access(backupFilePath).then(() => true).catch(() => false);
|
|
676
|
+
if (uncompressedExists) {
|
|
677
|
+
const backupData = await fs.promises.readFile(backupFilePath, 'utf8');
|
|
678
|
+
backupPackage = JSON.parse(backupData);
|
|
679
|
+
this.log('info', 'Loaded uncompressed backup file');
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
}
|
|
683
|
+
// Fallback: try to load from internal storage
|
|
684
|
+
if (!backupPackage) {
|
|
685
|
+
this.log('warn', 'External backup not found, checking internal storage');
|
|
686
|
+
const internalBackupMetadata = await this.storage.get(`__backup:${source}`, {});
|
|
687
|
+
if (!internalBackupMetadata) {
|
|
688
|
+
throw new Error(`Backup not found: ${source}`);
|
|
689
|
+
}
|
|
690
|
+
// For internal backups, we don't have the actual data, just metadata
|
|
691
|
+
this.log('warn', 'Internal backup found but data restoration from internal storage is limited');
|
|
692
|
+
return false;
|
|
693
|
+
}
|
|
694
|
+
const { metadata, data } = backupPackage;
|
|
695
|
+
if (!metadata || !data) {
|
|
696
|
+
throw new Error('Invalid backup format: missing metadata or data');
|
|
697
|
+
}
|
|
698
|
+
this.log('info', 'Backup metadata loaded', {
|
|
699
|
+
id: metadata.id,
|
|
700
|
+
createdAt: metadata.createdAt,
|
|
701
|
+
keyCount: metadata.keyCount,
|
|
702
|
+
type: metadata.type,
|
|
703
|
+
version: metadata.version
|
|
704
|
+
});
|
|
705
|
+
if (dryRun) {
|
|
706
|
+
this.log('info', 'Dry run mode: would restore the following keys', {
|
|
707
|
+
keys: Object.keys(data),
|
|
708
|
+
count: Object.keys(data).length
|
|
709
|
+
});
|
|
710
|
+
return true;
|
|
711
|
+
}
|
|
712
|
+
// Clear existing data if requested
|
|
713
|
+
if (clearExisting) {
|
|
714
|
+
this.log('warn', 'Clearing existing storage before restore');
|
|
715
|
+
await this.storage.clear();
|
|
716
|
+
}
|
|
717
|
+
// Restore data with proper metadata and TTL
|
|
718
|
+
let restoredCount = 0;
|
|
719
|
+
let errorCount = 0;
|
|
720
|
+
for (const [key, itemData] of Object.entries(data)) {
|
|
721
|
+
try {
|
|
722
|
+
const item = itemData;
|
|
723
|
+
const { value, layerInfo } = item;
|
|
724
|
+
// Restore to the original layer if possible
|
|
725
|
+
const restoreOptions = {};
|
|
726
|
+
if (layerInfo?.layer) {
|
|
727
|
+
restoreOptions.layer = layerInfo.layer;
|
|
728
|
+
}
|
|
729
|
+
// Restore TTL if available
|
|
730
|
+
if (layerInfo?.ttl && layerInfo.ttl > 0) {
|
|
731
|
+
restoreOptions.ttl = layerInfo.ttl;
|
|
732
|
+
}
|
|
733
|
+
const success = await this.storage.set(key, value, restoreOptions);
|
|
734
|
+
if (success) {
|
|
735
|
+
restoredCount++;
|
|
736
|
+
}
|
|
737
|
+
else {
|
|
738
|
+
errorCount++;
|
|
739
|
+
this.log('warn', `Failed to restore key: ${key}`);
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
catch (keyError) {
|
|
743
|
+
errorCount++;
|
|
744
|
+
this.log('error', `Error restoring key: ${key}`, { error: keyError.message });
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
// Update restoration metadata
|
|
748
|
+
await this.storage.set('__nuvex_last_restore_metadata', {
|
|
749
|
+
backupId: metadata.id,
|
|
750
|
+
restoredAt: new Date().toISOString(),
|
|
751
|
+
restoredCount,
|
|
752
|
+
errorCount,
|
|
753
|
+
totalKeys: metadata.keyCount
|
|
754
|
+
}, {});
|
|
755
|
+
this.log('info', `Restore completed from backup: ${source}`, {
|
|
756
|
+
restoredCount,
|
|
757
|
+
errorCount,
|
|
758
|
+
totalKeys: metadata.keyCount,
|
|
759
|
+
successRate: `${((restoredCount / metadata.keyCount) * 100).toFixed(2)}%`
|
|
760
|
+
});
|
|
761
|
+
return errorCount === 0;
|
|
762
|
+
}
|
|
763
|
+
catch (error) {
|
|
764
|
+
this.log('error', 'Restore failed', { error: error.message });
|
|
765
|
+
return false;
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
// Convenience methods for common patterns
|
|
769
|
+
/**
|
|
770
|
+
* Namespace-aware set operation
|
|
771
|
+
*/
|
|
772
|
+
async setNamespaced(namespace, key, value, options = {}) {
|
|
773
|
+
return this.storage.set(`${namespace}:${key}`, value, options);
|
|
774
|
+
}
|
|
775
|
+
/**
|
|
776
|
+
* Namespace-aware get operation
|
|
777
|
+
*/
|
|
778
|
+
async getNamespaced(namespace, key, options = {}) {
|
|
779
|
+
return this.storage.get(`${namespace}:${key}`, options);
|
|
780
|
+
}
|
|
781
|
+
/**
|
|
782
|
+
* Get all keys in a namespace
|
|
783
|
+
*/
|
|
784
|
+
async getNamespaceKeys(namespace) {
|
|
785
|
+
const allKeys = await this.keys(`${namespace}:*`);
|
|
786
|
+
return allKeys.map(key => key.replace(`${namespace}:`, ''));
|
|
787
|
+
}
|
|
788
|
+
/**
|
|
789
|
+
* Clear entire namespace
|
|
790
|
+
*/
|
|
791
|
+
async clearNamespace(namespace) {
|
|
792
|
+
return this.clear(`${namespace}:*`);
|
|
793
|
+
}
|
|
794
|
+
/**
|
|
795
|
+
* Atomically increment a numeric value
|
|
796
|
+
*
|
|
797
|
+
* This method provides true atomic increments that are safe for concurrent access
|
|
798
|
+
* across all storage layers. Uses native atomic operations from Redis (INCRBY) and
|
|
799
|
+
* PostgreSQL (UPDATE with row locks) when available.
|
|
800
|
+
*
|
|
801
|
+
* **Thread-Safety:**
|
|
802
|
+
* - ✅ Safe for concurrent increments to the same key
|
|
803
|
+
* - ✅ No lost updates in high-concurrency scenarios
|
|
804
|
+
* - ✅ Works correctly across multiple instances
|
|
805
|
+
*
|
|
806
|
+
* **Important:** The key must contain a numeric value (or not exist).
|
|
807
|
+
* Incrementing a non-numeric value will throw an error.
|
|
808
|
+
*
|
|
809
|
+
* **How It Works:**
|
|
810
|
+
* 1. Uses atomic increment at the authoritative layer (PostgreSQL or Redis)
|
|
811
|
+
* 2. Propagates the new value to cache layers for consistency
|
|
812
|
+
* 3. Returns the exact new value after increment
|
|
813
|
+
*
|
|
814
|
+
* **Example Usage:**
|
|
815
|
+
* ```typescript
|
|
816
|
+
* // ✅ SAFE: Concurrent increments work correctly
|
|
817
|
+
* await Promise.all([
|
|
818
|
+
* client.increment('counter'), // atomic: 5 → 6
|
|
819
|
+
* client.increment('counter') // atomic: 6 → 7
|
|
820
|
+
* ]);
|
|
821
|
+
* // Result: 7 (all increments counted correctly)
|
|
822
|
+
*
|
|
823
|
+
* // Custom delta
|
|
824
|
+
* await client.increment('page_views', 5);
|
|
825
|
+
*
|
|
826
|
+
* // With TTL (in milliseconds)
|
|
827
|
+
* await client.increment('rate_limit', 1, 3600000);
|
|
828
|
+
* ```
|
|
829
|
+
*
|
|
830
|
+
* **Use Cases:**
|
|
831
|
+
* - ✅ High-concurrency counters (page views, API calls)
|
|
832
|
+
* - ✅ Critical operations (user credits, inventory)
|
|
833
|
+
* - ✅ Financial operations requiring exactness
|
|
834
|
+
* - ✅ Distributed systems with multiple instances
|
|
835
|
+
*
|
|
836
|
+
* @param key - The key to increment (must contain numeric value or not exist)
|
|
837
|
+
* @param delta - The amount to increment by (default: 1)
|
|
838
|
+
* @param ttl - Optional TTL in milliseconds
|
|
839
|
+
* @returns Promise resolving to the new value after increment
|
|
840
|
+
* @throws {Error} If the key contains a non-numeric value
|
|
841
|
+
*/
|
|
842
|
+
async increment(key, delta = 1, ttl) {
|
|
843
|
+
return this.storage.increment(key, delta, ttl);
|
|
844
|
+
}
|
|
845
|
+
/**
|
|
846
|
+
* Atomically decrement a numeric value
|
|
847
|
+
*
|
|
848
|
+
* This is a convenience method that uses atomic increment with a negative delta.
|
|
849
|
+
* Provides the same thread-safety guarantees as increment().
|
|
850
|
+
*
|
|
851
|
+
* @param key - The key to decrement
|
|
852
|
+
* @param delta - The amount to decrement by (default: 1)
|
|
853
|
+
* @param ttl - Optional TTL in milliseconds
|
|
854
|
+
* @returns Promise resolving to the new value after decrement
|
|
855
|
+
*
|
|
856
|
+
* @example
|
|
857
|
+
* ```typescript
|
|
858
|
+
* // Decrement by 1
|
|
859
|
+
* await client.decrement('inventory');
|
|
860
|
+
*
|
|
861
|
+
* // Decrement by custom amount
|
|
862
|
+
* await client.decrement('stock', 5);
|
|
863
|
+
* ```
|
|
864
|
+
*/
|
|
865
|
+
async decrement(key, delta = 1, ttl) {
|
|
866
|
+
return this.increment(key, -delta, ttl);
|
|
867
|
+
}
|
|
868
|
+
/**
|
|
869
|
+
* Set if not exists
|
|
870
|
+
*
|
|
871
|
+
* Stores a value only if the key does not already exist. Useful for
|
|
872
|
+
* initializing values that should not be overwritten.
|
|
873
|
+
*
|
|
874
|
+
* @note This implementation uses a check-then-set pattern which has a
|
|
875
|
+
* TOCTOU (Time-of-Check to Time-of-Use) race condition. Between the
|
|
876
|
+
* `exists()` and `set()` calls, another client could write to the same key.
|
|
877
|
+
* For critical use cases requiring true atomic set-if-not-exists semantics,
|
|
878
|
+
* use Redis `SET key value NX` or PostgreSQL `INSERT ... ON CONFLICT DO NOTHING`
|
|
879
|
+
* directly via the storage layer.
|
|
880
|
+
*
|
|
881
|
+
* @template T - The type of the value being stored
|
|
882
|
+
* @param key - Unique identifier for the stored value
|
|
883
|
+
* @param value - The value to store if key doesn't exist
|
|
884
|
+
* @param options - Optional storage configuration
|
|
885
|
+
* @returns Promise resolving to true if value was set, false if key already existed
|
|
886
|
+
*
|
|
887
|
+
* @example
|
|
888
|
+
* ```typescript
|
|
889
|
+
* // Initialize a counter only if it doesn't exist
|
|
890
|
+
* await client.setIfNotExists('counter', 0);
|
|
891
|
+
* ```
|
|
892
|
+
*/
|
|
893
|
+
async setIfNotExists(key, value, options) {
|
|
894
|
+
const exists = await this.storage.exists(key, options);
|
|
895
|
+
if (!exists) {
|
|
896
|
+
return this.storage.set(key, value, options);
|
|
897
|
+
}
|
|
898
|
+
return false;
|
|
899
|
+
}
|
|
900
|
+
/**
|
|
901
|
+
* Get multiple keys with a common prefix
|
|
902
|
+
*/
|
|
903
|
+
async getByPrefix(prefix, options = {}) {
|
|
904
|
+
const keys = await this.keys(`${prefix}*`);
|
|
905
|
+
const result = {};
|
|
906
|
+
for (const key of keys) {
|
|
907
|
+
const value = await this.storage.get(key, options);
|
|
908
|
+
if (value !== null) {
|
|
909
|
+
result[key] = value;
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
return result;
|
|
913
|
+
}
|
|
914
|
+
// Static convenience methods
|
|
915
|
+
static async set(key, value, options) {
|
|
916
|
+
return NuvexClient.getInstance().set(key, value, options);
|
|
917
|
+
}
|
|
918
|
+
static async get(key, options = {}) {
|
|
919
|
+
return NuvexClient.getInstance().get(key, options);
|
|
920
|
+
}
|
|
921
|
+
static async delete(key, options = {}) {
|
|
922
|
+
return NuvexClient.getInstance().delete(key, options);
|
|
923
|
+
}
|
|
924
|
+
static async exists(key, options = {}) {
|
|
925
|
+
return NuvexClient.getInstance().exists(key, options);
|
|
926
|
+
}
|
|
927
|
+
static async healthCheck(layers) {
|
|
928
|
+
return NuvexClient.getInstance().healthCheck(layers);
|
|
929
|
+
}
|
|
930
|
+
static getMetrics(layers) {
|
|
931
|
+
return NuvexClient.getInstance().getMetrics(layers);
|
|
932
|
+
}
|
|
933
|
+
/**
|
|
934
|
+
* Shutdown the store and cleanup resources
|
|
935
|
+
*/
|
|
936
|
+
static async shutdown() {
|
|
937
|
+
if (NuvexClient.instance) {
|
|
938
|
+
await NuvexClient.instance.disconnect();
|
|
939
|
+
NuvexClient.instance = null;
|
|
940
|
+
}
|
|
941
|
+
}
|
|
942
|
+
}
|
|
943
|
+
NuvexClient.instance = null;
|
|
944
|
+
//# sourceMappingURL=client.js.map
|