@joystick.js/db-canary 0.0.0-canary.2270 → 0.0.0-canary.2272
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/server/lib/auto_index_manager.js +1 -1
- package/dist/server/lib/bulk_insert_optimizer.js +1 -0
- package/dist/server/lib/memory_efficient_bulk_insert.js +1 -0
- package/dist/server/lib/write_queue.js +1 -1
- package/package.json +10 -4
- package/src/server/lib/auto_index_manager.js +11 -4
- package/src/server/lib/bulk_insert_optimizer.js +559 -0
- package/src/server/lib/memory_efficient_bulk_insert.js +262 -0
- package/src/server/lib/write_queue.js +2 -137
- package/test_runner.js +353 -0
- package/tests/client/index.test.js +3 -1
- package/tests/performance/bulk_insert_1m_test.js +113 -0
- package/tests/performance/bulk_insert_benchmarks.test.js +570 -0
- package/tests/performance/bulk_insert_enterprise_isolated.test.js +469 -0
- package/tests/performance/bulk_insert_enterprise_scale_test.js +216 -0
- package/tests/server/integration/authentication_integration.test.js +3 -1
- package/tests/server/integration/auto_indexing_integration.test.js +1 -1
- package/tests/server/integration/development_mode_authentication.test.js +3 -1
- package/tests/server/integration/production_safety_integration.test.js +3 -1
- package/tests/server/lib/bulk_insert_optimizer.test.js +523 -0
- package/tests/server/lib/operations/admin.test.js +3 -1
- package/dist/server/lib/batched_write_queue.js +0 -1
- package/dist/server/lib/processing_lane.js +0 -1
- package/src/server/lib/batched_write_queue.js +0 -331
- package/src/server/lib/processing_lane.js +0 -417
- package/tests/server/lib/batched_write_queue.test.js +0 -402
- package/tests/server/lib/write_queue_integration.test.js +0 -186
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Memory-efficient bulk insert utilities for very large datasets.
|
|
3
|
+
* Provides streaming document generation and processing to minimize memory usage.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { bulk_insert_optimized } from './bulk_insert_optimizer.js';
|
|
7
|
+
import create_logger from './logger.js';
|
|
8
|
+
|
|
9
|
+
const { create_context_logger } = create_logger('memory_efficient_bulk_insert');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Memory-efficient document generator that yields documents in batches.
|
|
13
|
+
* @param {number} total_count - Total number of documents to generate
|
|
14
|
+
* @param {Object} [options={}] - Generation options
|
|
15
|
+
* @returns {AsyncGenerator<Array<Object>>} Generator yielding document batches
|
|
16
|
+
*/
|
|
17
|
+
const generate_documents_streaming = async function* (total_count, options = {}) {
|
|
18
|
+
const {
|
|
19
|
+
batch_size = 1000,
|
|
20
|
+
document_template = 'minimal',
|
|
21
|
+
test_id = Date.now().toString(36)
|
|
22
|
+
} = options;
|
|
23
|
+
|
|
24
|
+
for (let i = 0; i < total_count; i += batch_size) {
|
|
25
|
+
const current_batch_size = Math.min(batch_size, total_count - i);
|
|
26
|
+
const batch = [];
|
|
27
|
+
|
|
28
|
+
for (let j = 0; j < current_batch_size; j++) {
|
|
29
|
+
const doc_index = i + j;
|
|
30
|
+
|
|
31
|
+
let document;
|
|
32
|
+
if (document_template === 'minimal') {
|
|
33
|
+
document = {
|
|
34
|
+
_id: `mem_${test_id}_${doc_index.toString().padStart(8, '0')}`,
|
|
35
|
+
idx: doc_index,
|
|
36
|
+
cat: doc_index % 50,
|
|
37
|
+
val: doc_index % 1000
|
|
38
|
+
};
|
|
39
|
+
} else if (document_template === 'medium') {
|
|
40
|
+
document = {
|
|
41
|
+
_id: `mem_${test_id}_${doc_index.toString().padStart(8, '0')}`,
|
|
42
|
+
name: `Document ${doc_index}`,
|
|
43
|
+
index: doc_index,
|
|
44
|
+
category: `category_${doc_index % 100}`,
|
|
45
|
+
active: doc_index % 2 === 0,
|
|
46
|
+
priority: doc_index % 5,
|
|
47
|
+
score: Math.random() * 100,
|
|
48
|
+
created_timestamp: Date.now() + doc_index
|
|
49
|
+
};
|
|
50
|
+
} else if (document_template === 'large') {
|
|
51
|
+
document = {
|
|
52
|
+
_id: `mem_${test_id}_${doc_index.toString().padStart(8, '0')}`,
|
|
53
|
+
name: `Large Document ${doc_index}`,
|
|
54
|
+
index: doc_index,
|
|
55
|
+
category: `category_${doc_index % 100}`,
|
|
56
|
+
subcategory: `subcategory_${doc_index % 20}`,
|
|
57
|
+
active: doc_index % 2 === 0,
|
|
58
|
+
priority: doc_index % 5,
|
|
59
|
+
score: Math.random() * 100,
|
|
60
|
+
created_timestamp: Date.now() + doc_index,
|
|
61
|
+
description: `This is a large document with index ${doc_index} for performance testing purposes.`,
|
|
62
|
+
metadata: {
|
|
63
|
+
created_by: `user_${doc_index % 1000}`,
|
|
64
|
+
department: `dept_${doc_index % 50}`,
|
|
65
|
+
project: `project_${doc_index % 200}`,
|
|
66
|
+
tags: [`tag_${doc_index % 10}`, `tag_${(doc_index + 1) % 10}`]
|
|
67
|
+
},
|
|
68
|
+
measurements: Array.from({ length: 5 }, (_, k) => ({
|
|
69
|
+
timestamp: Date.now() + doc_index + k,
|
|
70
|
+
value: Math.random() * 1000
|
|
71
|
+
}))
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
batch.push(document);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
yield batch;
|
|
79
|
+
|
|
80
|
+
// Yield to event loop every batch to prevent blocking
|
|
81
|
+
await new Promise(resolve => setImmediate(resolve));
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Memory-efficient bulk insert that processes documents in streaming fashion.
|
|
87
|
+
* @param {string} database_name - Database name
|
|
88
|
+
* @param {string} collection_name - Collection name
|
|
89
|
+
* @param {number} document_count - Number of documents to insert
|
|
90
|
+
* @param {Object} [options={}] - Options
|
|
91
|
+
* @returns {Promise<Object>} Bulk insert results
|
|
92
|
+
*/
|
|
93
|
+
const memory_efficient_bulk_insert = async (database_name, collection_name, document_count, options = {}) => {
|
|
94
|
+
const {
|
|
95
|
+
generation_batch_size = 1000,
|
|
96
|
+
insert_batch_size = 250,
|
|
97
|
+
document_template = 'minimal',
|
|
98
|
+
disable_indexing = true,
|
|
99
|
+
pre_allocate_map_size = true,
|
|
100
|
+
sort_keys = true
|
|
101
|
+
} = options;
|
|
102
|
+
|
|
103
|
+
const log = create_context_logger();
|
|
104
|
+
const start_time = Date.now();
|
|
105
|
+
const start_memory = process.memoryUsage();
|
|
106
|
+
|
|
107
|
+
log.info('Starting memory-efficient bulk insert', {
|
|
108
|
+
database: database_name,
|
|
109
|
+
collection: collection_name,
|
|
110
|
+
document_count,
|
|
111
|
+
generation_batch_size,
|
|
112
|
+
insert_batch_size,
|
|
113
|
+
document_template
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
const all_inserted_ids = [];
|
|
117
|
+
let processed_count = 0;
|
|
118
|
+
let batch_number = 0;
|
|
119
|
+
|
|
120
|
+
try {
|
|
121
|
+
// Process documents in streaming fashion
|
|
122
|
+
for await (const document_batch of generate_documents_streaming(document_count, {
|
|
123
|
+
batch_size: generation_batch_size,
|
|
124
|
+
document_template
|
|
125
|
+
})) {
|
|
126
|
+
|
|
127
|
+
// Insert the batch using optimized bulk insert
|
|
128
|
+
const result = await bulk_insert_optimized(database_name, collection_name, document_batch, {
|
|
129
|
+
disable_indexing,
|
|
130
|
+
pre_allocate_map_size: batch_number === 0 ? pre_allocate_map_size : false, // Only pre-allocate on first batch
|
|
131
|
+
sort_keys,
|
|
132
|
+
stream_processing: true,
|
|
133
|
+
batch_size: insert_batch_size
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
all_inserted_ids.push(...result.inserted_ids);
|
|
137
|
+
processed_count += result.inserted_count;
|
|
138
|
+
batch_number++;
|
|
139
|
+
|
|
140
|
+
// Clear the batch to help GC
|
|
141
|
+
document_batch.length = 0;
|
|
142
|
+
|
|
143
|
+
// Log progress every 10 batches
|
|
144
|
+
if (batch_number % 10 === 0) {
|
|
145
|
+
const current_memory = process.memoryUsage();
|
|
146
|
+
log.info('Memory-efficient bulk insert progress', {
|
|
147
|
+
processed: processed_count,
|
|
148
|
+
total: document_count,
|
|
149
|
+
percentage: Math.round((processed_count / document_count) * 100),
|
|
150
|
+
current_heap_mb: Math.round(current_memory.heapUsed / (1024 * 1024)),
|
|
151
|
+
batches_processed: batch_number
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// Optimized memory management for very large datasets
|
|
156
|
+
if (document_count >= 10000000) {
|
|
157
|
+
// For 10M+ documents, force GC every 20 batches with minimal delay
|
|
158
|
+
if (batch_number % 20 === 0 && global.gc) {
|
|
159
|
+
global.gc();
|
|
160
|
+
await new Promise(resolve => setTimeout(resolve, 25));
|
|
161
|
+
}
|
|
162
|
+
// Yield less frequently for 10M+ to improve performance
|
|
163
|
+
if (batch_number % 5 === 0) {
|
|
164
|
+
await new Promise(resolve => setImmediate(resolve));
|
|
165
|
+
}
|
|
166
|
+
} else if (document_count >= 5000000) {
|
|
167
|
+
// For 5M+ documents, force GC every 10 batches
|
|
168
|
+
if (batch_number % 10 === 0 && global.gc) {
|
|
169
|
+
global.gc();
|
|
170
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
171
|
+
}
|
|
172
|
+
// Yield every other batch
|
|
173
|
+
if (batch_number % 2 === 0) {
|
|
174
|
+
await new Promise(resolve => setImmediate(resolve));
|
|
175
|
+
}
|
|
176
|
+
} else if (document_count >= 1000000) {
|
|
177
|
+
// For 1M+ documents, force GC every 10 batches
|
|
178
|
+
if (batch_number % 10 === 0 && global.gc) {
|
|
179
|
+
global.gc();
|
|
180
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
181
|
+
}
|
|
182
|
+
// Always yield to event loop
|
|
183
|
+
await new Promise(resolve => setImmediate(resolve));
|
|
184
|
+
} else {
|
|
185
|
+
// For smaller datasets, yield every batch
|
|
186
|
+
await new Promise(resolve => setImmediate(resolve));
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
const end_time = Date.now();
|
|
191
|
+
const end_memory = process.memoryUsage();
|
|
192
|
+
|
|
193
|
+
const performance_metrics = {
|
|
194
|
+
duration_ms: end_time - start_time,
|
|
195
|
+
documents_per_second: Math.round(document_count / ((end_time - start_time) / 1000)),
|
|
196
|
+
memory_usage: {
|
|
197
|
+
start_heap_mb: Math.round(start_memory.heapUsed / (1024 * 1024)),
|
|
198
|
+
end_heap_mb: Math.round(end_memory.heapUsed / (1024 * 1024)),
|
|
199
|
+
delta_heap_mb: Math.round((end_memory.heapUsed - start_memory.heapUsed) / (1024 * 1024)),
|
|
200
|
+
peak_heap_mb: Math.round(end_memory.heapUsed / (1024 * 1024))
|
|
201
|
+
}
|
|
202
|
+
};
|
|
203
|
+
|
|
204
|
+
log.info('Memory-efficient bulk insert completed', {
|
|
205
|
+
database: database_name,
|
|
206
|
+
collection: collection_name,
|
|
207
|
+
inserted_count: all_inserted_ids.length,
|
|
208
|
+
performance: performance_metrics
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
return {
|
|
212
|
+
acknowledged: true,
|
|
213
|
+
inserted_count: all_inserted_ids.length,
|
|
214
|
+
inserted_ids: all_inserted_ids,
|
|
215
|
+
performance: performance_metrics
|
|
216
|
+
};
|
|
217
|
+
|
|
218
|
+
} catch (error) {
|
|
219
|
+
log.error('Memory-efficient bulk insert failed', {
|
|
220
|
+
database: database_name,
|
|
221
|
+
collection: collection_name,
|
|
222
|
+
error: error.message
|
|
223
|
+
});
|
|
224
|
+
throw error;
|
|
225
|
+
}
|
|
226
|
+
};
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Estimates memory usage for a bulk insert operation.
|
|
230
|
+
* @param {number} document_count - Number of documents
|
|
231
|
+
* @param {string} document_template - Document template type
|
|
232
|
+
* @param {number} batch_size - Batch size for processing
|
|
233
|
+
* @returns {Object} Memory usage estimates
|
|
234
|
+
*/
|
|
235
|
+
const estimate_memory_usage = (document_count, document_template = 'minimal', batch_size = 1000) => {
|
|
236
|
+
const doc_sizes = {
|
|
237
|
+
minimal: 50, // ~50 bytes per document
|
|
238
|
+
medium: 200, // ~200 bytes per document
|
|
239
|
+
large: 500 // ~500 bytes per document
|
|
240
|
+
};
|
|
241
|
+
|
|
242
|
+
const avg_doc_size = doc_sizes[document_template] || doc_sizes.minimal;
|
|
243
|
+
const batch_memory_mb = Math.round((batch_size * avg_doc_size) / (1024 * 1024));
|
|
244
|
+
const total_data_size_mb = Math.round((document_count * avg_doc_size) / (1024 * 1024));
|
|
245
|
+
|
|
246
|
+
// Estimate peak memory usage (batch + overhead + LMDB buffers)
|
|
247
|
+
const estimated_peak_mb = batch_memory_mb * 3 + 100; // 3x batch size + 100MB overhead
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
avg_document_size_bytes: avg_doc_size,
|
|
251
|
+
total_data_size_mb,
|
|
252
|
+
batch_memory_mb,
|
|
253
|
+
estimated_peak_memory_mb: estimated_peak_mb,
|
|
254
|
+
recommended_batch_size: document_count >= 10000000 ? 2000 : document_count >= 5000000 ? 1000 : document_count >= 1000000 ? 750 : 1000
|
|
255
|
+
};
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
export {
|
|
259
|
+
memory_efficient_bulk_insert,
|
|
260
|
+
generate_documents_streaming,
|
|
261
|
+
estimate_memory_usage
|
|
262
|
+
};
|
|
@@ -2,13 +2,9 @@
|
|
|
2
2
|
* @fileoverview Write queue system for JoystickDB providing serialized write operations.
|
|
3
3
|
* Ensures write operations are processed sequentially to maintain data consistency and ACID properties.
|
|
4
4
|
* Includes retry logic, backoff strategies, performance monitoring, and graceful shutdown capabilities.
|
|
5
|
-
*
|
|
6
|
-
* Now supports both traditional sequential processing and high-performance batched processing
|
|
7
|
-
* with automatic fallback and transparent integration.
|
|
8
5
|
*/
|
|
9
6
|
|
|
10
7
|
import create_logger from './logger.js';
|
|
11
|
-
import { get_batched_write_queue, shutdown_batched_write_queue } from './batched_write_queue.js';
|
|
12
8
|
|
|
13
9
|
const { create_context_logger } = create_logger('write_queue');
|
|
14
10
|
|
|
@@ -316,26 +312,13 @@ class WriteQueue {
|
|
|
316
312
|
/** @type {WriteQueue|null} Singleton instance of the write queue */
|
|
317
313
|
let write_queue_instance = null;
|
|
318
314
|
|
|
319
|
-
/** @type {boolean} Whether to use batched write queue for improved performance */
|
|
320
|
-
let use_batched_queue = true;
|
|
321
|
-
|
|
322
315
|
/**
|
|
323
316
|
* Gets the singleton write queue instance, creating it if it doesn't exist.
|
|
324
|
-
* Automatically uses batched write queue for improved performance while maintaining
|
|
325
|
-
* complete backward compatibility.
|
|
326
|
-
* @param {Object} [options] - Configuration options for batched queue
|
|
327
317
|
* @returns {WriteQueue} The write queue instance
|
|
328
318
|
*/
|
|
329
|
-
export const get_write_queue = (
|
|
319
|
+
export const get_write_queue = () => {
|
|
330
320
|
if (!write_queue_instance) {
|
|
331
|
-
|
|
332
|
-
// Use batched write queue with WriteQueue-compatible wrapper
|
|
333
|
-
const batched_queue = get_batched_write_queue(options);
|
|
334
|
-
write_queue_instance = new WriteQueueWrapper(batched_queue);
|
|
335
|
-
} else {
|
|
336
|
-
// Use traditional sequential write queue
|
|
337
|
-
write_queue_instance = new WriteQueue();
|
|
338
|
-
}
|
|
321
|
+
write_queue_instance = new WriteQueue();
|
|
339
322
|
}
|
|
340
323
|
return write_queue_instance;
|
|
341
324
|
};
|
|
@@ -349,122 +332,4 @@ export const shutdown_write_queue = async () => {
|
|
|
349
332
|
await write_queue_instance.shutdown();
|
|
350
333
|
write_queue_instance = null;
|
|
351
334
|
}
|
|
352
|
-
|
|
353
|
-
// Also shutdown batched queue if it was used
|
|
354
|
-
if (use_batched_queue) {
|
|
355
|
-
await shutdown_batched_write_queue();
|
|
356
|
-
}
|
|
357
335
|
};
|
|
358
|
-
|
|
359
|
-
/**
|
|
360
|
-
* Enables or disables batched write queue usage.
|
|
361
|
-
* @param {boolean} enabled - Whether to use batched queue
|
|
362
|
-
*/
|
|
363
|
-
export const set_batched_queue_enabled = (enabled) => {
|
|
364
|
-
use_batched_queue = enabled;
|
|
365
|
-
};
|
|
366
|
-
|
|
367
|
-
/**
|
|
368
|
-
* Wrapper class that provides WriteQueue-compatible API while using BatchedWriteQueue internally.
|
|
369
|
-
* Ensures complete backward compatibility with existing code.
|
|
370
|
-
*/
|
|
371
|
-
class WriteQueueWrapper {
|
|
372
|
-
/**
|
|
373
|
-
* Creates a new WriteQueueWrapper instance.
|
|
374
|
-
* @param {BatchedWriteQueue} batched_queue - The batched write queue instance to wrap
|
|
375
|
-
*/
|
|
376
|
-
constructor(batched_queue) {
|
|
377
|
-
this.batched_queue = batched_queue;
|
|
378
|
-
this.log = create_context_logger('write_queue_wrapper');
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
/**
|
|
382
|
-
* Enqueues a write operation using the batched queue.
|
|
383
|
-
* Maintains identical API to original WriteQueue.
|
|
384
|
-
* @param {function} operation_fn - Async function that performs the write operation
|
|
385
|
-
* @param {Object} [context={}] - Additional context for logging and debugging
|
|
386
|
-
* @returns {Promise<*>} Promise that resolves with the operation result
|
|
387
|
-
*/
|
|
388
|
-
async enqueue_write_operation(operation_fn, context = {}) {
|
|
389
|
-
return this.batched_queue.enqueue_write_operation(operation_fn, context);
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
/**
|
|
393
|
-
* Gets queue statistics with backward-compatible format.
|
|
394
|
-
* @returns {Object} Statistics object matching original WriteQueue format
|
|
395
|
-
*/
|
|
396
|
-
get_stats() {
|
|
397
|
-
const batched_stats = this.batched_queue.get_stats();
|
|
398
|
-
|
|
399
|
-
// Return stats in original WriteQueue format for backward compatibility
|
|
400
|
-
return {
|
|
401
|
-
total_operations: batched_stats.total_operations,
|
|
402
|
-
completed_operations: batched_stats.completed_operations,
|
|
403
|
-
failed_operations: batched_stats.failed_operations,
|
|
404
|
-
current_queue_depth: batched_stats.current_queue_depth,
|
|
405
|
-
max_queue_depth: batched_stats.max_queue_depth,
|
|
406
|
-
avg_wait_time_ms: batched_stats.avg_wait_time_ms,
|
|
407
|
-
avg_processing_time_ms: batched_stats.avg_processing_time_ms,
|
|
408
|
-
success_rate: batched_stats.success_rate
|
|
409
|
-
};
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
/**
|
|
413
|
-
* Clears all statistics.
|
|
414
|
-
*/
|
|
415
|
-
clear_stats() {
|
|
416
|
-
this.batched_queue.clear_stats();
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
/**
|
|
420
|
-
* Gracefully shuts down the wrapper and underlying batched queue.
|
|
421
|
-
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
422
|
-
*/
|
|
423
|
-
async shutdown() {
|
|
424
|
-
await this.batched_queue.shutdown();
|
|
425
|
-
}
|
|
426
|
-
|
|
427
|
-
/**
|
|
428
|
-
* Determines if an error is retryable based on error patterns.
|
|
429
|
-
* Exposed for backward compatibility with existing tests.
|
|
430
|
-
* @param {Error} error - Error to check
|
|
431
|
-
* @returns {boolean} True if error is retryable, false otherwise
|
|
432
|
-
*/
|
|
433
|
-
is_retryable_error(error) {
|
|
434
|
-
const retryable_patterns = [
|
|
435
|
-
'MDB_MAP_FULL',
|
|
436
|
-
'MDB_TXN_FULL',
|
|
437
|
-
'MDB_READERS_FULL',
|
|
438
|
-
'EAGAIN',
|
|
439
|
-
'EBUSY'
|
|
440
|
-
];
|
|
441
|
-
|
|
442
|
-
return retryable_patterns.some(pattern =>
|
|
443
|
-
error.message.includes(pattern) || error.code === pattern
|
|
444
|
-
);
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
/**
|
|
448
|
-
* Calculates exponential backoff delay with jitter for retry attempts.
|
|
449
|
-
* Exposed for backward compatibility with existing tests.
|
|
450
|
-
* @param {number} attempt - Current attempt number (1-based)
|
|
451
|
-
* @returns {number} Delay in milliseconds
|
|
452
|
-
*/
|
|
453
|
-
calculate_backoff_delay(attempt) {
|
|
454
|
-
const base_delay = 100;
|
|
455
|
-
const max_delay = 5000;
|
|
456
|
-
const exponential_delay = base_delay * Math.pow(2, attempt - 1);
|
|
457
|
-
const jitter = Math.random() * 0.1 * exponential_delay;
|
|
458
|
-
|
|
459
|
-
return Math.min(exponential_delay + jitter, max_delay);
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
/**
|
|
463
|
-
* Generates a unique operation ID for tracking.
|
|
464
|
-
* Exposed for backward compatibility with existing tests.
|
|
465
|
-
* @returns {string} Unique operation identifier
|
|
466
|
-
*/
|
|
467
|
-
generate_operation_id() {
|
|
468
|
-
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
469
|
-
}
|
|
470
|
-
}
|