@joystick.js/db-canary 0.0.0-canary.2270 → 0.0.0-canary.2272

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/dist/server/lib/auto_index_manager.js +1 -1
  2. package/dist/server/lib/bulk_insert_optimizer.js +1 -0
  3. package/dist/server/lib/memory_efficient_bulk_insert.js +1 -0
  4. package/dist/server/lib/write_queue.js +1 -1
  5. package/package.json +10 -4
  6. package/src/server/lib/auto_index_manager.js +11 -4
  7. package/src/server/lib/bulk_insert_optimizer.js +559 -0
  8. package/src/server/lib/memory_efficient_bulk_insert.js +262 -0
  9. package/src/server/lib/write_queue.js +2 -137
  10. package/test_runner.js +353 -0
  11. package/tests/client/index.test.js +3 -1
  12. package/tests/performance/bulk_insert_1m_test.js +113 -0
  13. package/tests/performance/bulk_insert_benchmarks.test.js +570 -0
  14. package/tests/performance/bulk_insert_enterprise_isolated.test.js +469 -0
  15. package/tests/performance/bulk_insert_enterprise_scale_test.js +216 -0
  16. package/tests/server/integration/authentication_integration.test.js +3 -1
  17. package/tests/server/integration/auto_indexing_integration.test.js +1 -1
  18. package/tests/server/integration/development_mode_authentication.test.js +3 -1
  19. package/tests/server/integration/production_safety_integration.test.js +3 -1
  20. package/tests/server/lib/bulk_insert_optimizer.test.js +523 -0
  21. package/tests/server/lib/operations/admin.test.js +3 -1
  22. package/dist/server/lib/batched_write_queue.js +0 -1
  23. package/dist/server/lib/processing_lane.js +0 -1
  24. package/src/server/lib/batched_write_queue.js +0 -331
  25. package/src/server/lib/processing_lane.js +0 -417
  26. package/tests/server/lib/batched_write_queue.test.js +0 -402
  27. package/tests/server/lib/write_queue_integration.test.js +0 -186
@@ -1,402 +0,0 @@
1
- /**
2
- * @fileoverview Tests for batched write queue system.
3
- * Comprehensive test suite covering functionality, performance, and edge cases.
4
- */
5
-
6
- import test from 'ava';
7
- import BatchedWriteQueue, { get_batched_write_queue, shutdown_batched_write_queue } from '../../../src/server/lib/batched_write_queue.js';
8
- import ProcessingLane from '../../../src/server/lib/processing_lane.js';
9
-
10
- test('ProcessingLane should create with correct configuration', (t) => {
11
- const lane = new ProcessingLane({
12
- batch_size: 3,
13
- batch_timeout: 50,
14
- lane_id: 0
15
- });
16
-
17
- t.is(lane.batch_size, 3);
18
- t.is(lane.batch_timeout, 50);
19
- t.is(lane.lane_id, 0);
20
- t.is(lane.current_batch.length, 0);
21
- t.is(lane.processing, false);
22
- });
23
-
24
- test('ProcessingLane should add operations to batch and process when batch size is reached', async (t) => {
25
- const lane = new ProcessingLane({
26
- batch_size: 3,
27
- batch_timeout: 50,
28
- lane_id: 0
29
- });
30
-
31
- try {
32
- const operations = [];
33
-
34
- // Create 3 operations (batch size)
35
- for (let i = 0; i < 3; i++) {
36
- const operation_fn = async () => `result_${i}`;
37
- const context = { test: `operation_${i}` };
38
-
39
- operations.push(lane.add_operation({ operation_fn, context }));
40
- }
41
-
42
- // Wait for all operations to complete
43
- const completed_results = await Promise.all(operations);
44
-
45
- t.deepEqual(completed_results, ['result_0', 'result_1', 'result_2']);
46
-
47
- // Check statistics
48
- const stats = lane.get_stats();
49
- t.is(stats.total_operations, 3);
50
- t.is(stats.completed_operations, 3);
51
- t.is(stats.batches_processed, 1);
52
- } finally {
53
- await lane.shutdown();
54
- }
55
- });
56
-
57
- test('ProcessingLane should process partial batch on timeout', async (t) => {
58
- const lane = new ProcessingLane({
59
- batch_size: 3,
60
- batch_timeout: 50,
61
- lane_id: 0
62
- });
63
-
64
- try {
65
- const operation_fn = async () => 'timeout_result';
66
- const context = { test: 'timeout_operation' };
67
-
68
- // Add single operation (less than batch size)
69
- const result_promise = lane.add_operation({ operation_fn, context });
70
-
71
- // Wait for timeout to trigger processing
72
- const result = await result_promise;
73
-
74
- t.is(result, 'timeout_result');
75
-
76
- const stats = lane.get_stats();
77
- t.is(stats.total_operations, 1);
78
- t.is(stats.completed_operations, 1);
79
- t.is(stats.batches_processed, 1);
80
- } finally {
81
- await lane.shutdown();
82
- }
83
- });
84
-
85
- test('ProcessingLane should handle operation failures correctly', async (t) => {
86
- const lane = new ProcessingLane({
87
- batch_size: 3,
88
- batch_timeout: 50,
89
- lane_id: 0
90
- });
91
-
92
- try {
93
- const error_message = 'Test operation error';
94
- const operation_fn = async () => {
95
- throw new Error(error_message);
96
- };
97
-
98
- const error = await t.throwsAsync(
99
- lane.add_operation({ operation_fn, context: {} })
100
- );
101
-
102
- t.is(error.message, error_message);
103
-
104
- const stats = lane.get_stats();
105
- t.is(stats.total_operations, 1);
106
- t.is(stats.failed_operations, 1);
107
- t.is(stats.completed_operations, 0);
108
- } finally {
109
- await lane.shutdown();
110
- }
111
- });
112
-
113
- test('ProcessingLane should flush batch manually', async (t) => {
114
- const lane = new ProcessingLane({
115
- batch_size: 3,
116
- batch_timeout: 50,
117
- lane_id: 0
118
- });
119
-
120
- try {
121
- const operation_fn = async () => 'flush_result';
122
-
123
- // Add operation but don't wait for timeout
124
- const result_promise = lane.add_operation({ operation_fn, context: {} });
125
-
126
- // Manually flush the batch
127
- await lane.flush_batch();
128
-
129
- const result = await result_promise;
130
- t.is(result, 'flush_result');
131
- } finally {
132
- await lane.shutdown();
133
- }
134
- });
135
-
136
- test('ProcessingLane should reject operations during shutdown', async (t) => {
137
- const lane = new ProcessingLane({
138
- batch_size: 3,
139
- batch_timeout: 50,
140
- lane_id: 0
141
- });
142
-
143
- // Start shutdown
144
- const shutdown_promise = lane.shutdown();
145
-
146
- // Try to add operation during shutdown
147
- const error = await t.throwsAsync(
148
- lane.add_operation({
149
- operation_fn: async () => 'should_not_execute',
150
- context: {}
151
- })
152
- );
153
-
154
- t.is(error.message, 'Processing lane shutting down');
155
-
156
- await shutdown_promise;
157
- });
158
-
159
- test('BatchedWriteQueue should create with correct configuration', (t) => {
160
- const queue = new BatchedWriteQueue({
161
- batch_size: 3,
162
- batch_timeout: 50,
163
- lane_count: 2,
164
- queue_limit: 100
165
- });
166
-
167
- t.is(queue.batch_size, 3);
168
- t.is(queue.batch_timeout, 50);
169
- t.is(queue.lane_count, 2);
170
- t.is(queue.lanes.length, 2);
171
- t.is(queue.queue_limit, 100);
172
- });
173
-
174
- test('BatchedWriteQueue should distribute operations across lanes consistently', async (t) => {
175
- const queue = new BatchedWriteQueue({
176
- batch_size: 3,
177
- batch_timeout: 50,
178
- lane_count: 2,
179
- queue_limit: 100
180
- });
181
-
182
- try {
183
- const operations = [];
184
-
185
- // Create operations with different contexts
186
- for (let i = 0; i < 10; i++) {
187
- const operation_fn = async () => `result_${i}`;
188
- const context = {
189
- collection: 'test_collection',
190
- document_id: `doc_${i % 3}` // This should create consistent distribution
191
- };
192
-
193
- operations.push(queue.enqueue_write_operation(operation_fn, context));
194
- }
195
-
196
- // Wait for all operations to complete
197
- const results = await Promise.all(operations);
198
-
199
- t.is(results.length, 10);
200
-
201
- // Check that operations were distributed across lanes
202
- const stats = queue.get_stats();
203
- t.is(stats.total_operations, 10);
204
- t.is(stats.completed_operations, 10);
205
-
206
- // Verify lane distribution exists and totals correctly
207
- t.truthy(stats.lane_distribution);
208
- t.is(Array.isArray(stats.lane_distribution), true);
209
- const total_distributed = stats.lane_distribution.reduce((sum, count) => sum + count, 0);
210
- t.is(total_distributed, 10);
211
- } finally {
212
- await queue.shutdown();
213
- }
214
- });
215
-
216
- test('BatchedWriteQueue should maintain backward compatibility with WriteQueue API', async (t) => {
217
- const queue = new BatchedWriteQueue({
218
- batch_size: 3,
219
- batch_timeout: 50,
220
- lane_count: 2,
221
- queue_limit: 100
222
- });
223
-
224
- try {
225
- const operation_fn = async () => 'compatible_result';
226
- const context = { test: 'compatibility' };
227
-
228
- // Test the main API method
229
- const result = await queue.enqueue_write_operation(operation_fn, context);
230
- t.is(result, 'compatible_result');
231
-
232
- // Test statistics format
233
- const stats = queue.get_stats();
234
- t.is(typeof stats.total_operations, 'number');
235
- t.is(typeof stats.completed_operations, 'number');
236
- t.is(typeof stats.failed_operations, 'number');
237
- t.is(typeof stats.current_queue_depth, 'number');
238
- t.is(typeof stats.max_queue_depth, 'number');
239
- t.is(typeof stats.avg_wait_time_ms, 'number');
240
- t.is(typeof stats.avg_processing_time_ms, 'number');
241
- t.is(typeof stats.success_rate, 'number');
242
- } finally {
243
- await queue.shutdown();
244
- }
245
- });
246
-
247
- test('BatchedWriteQueue should clear statistics correctly', async (t) => {
248
- const queue = new BatchedWriteQueue({
249
- batch_size: 3,
250
- batch_timeout: 50,
251
- lane_count: 2,
252
- queue_limit: 100
253
- });
254
-
255
- try {
256
- // Add some operations first
257
- queue.stats.total_operations = 10;
258
- queue.stats.completed_operations = 8;
259
- queue.stats.failed_operations = 2;
260
-
261
- queue.clear_stats();
262
-
263
- const stats = queue.get_stats();
264
- t.is(stats.total_operations, 0);
265
- t.is(stats.completed_operations, 0);
266
- t.is(stats.failed_operations, 0);
267
- } finally {
268
- await queue.shutdown();
269
- }
270
- });
271
-
272
- test('BatchedWriteQueue should flush all batches correctly', async (t) => {
273
- const queue = new BatchedWriteQueue({
274
- batch_size: 3,
275
- batch_timeout: 50,
276
- lane_count: 2,
277
- queue_limit: 100
278
- });
279
-
280
- try {
281
- const operations = [];
282
-
283
- // Add operations to different lanes
284
- for (let i = 0; i < 4; i++) {
285
- const operation_fn = async () => `flush_result_${i}`;
286
- const context = { collection: 'test', document_id: `doc_${i}` };
287
-
288
- operations.push(queue.enqueue_write_operation(operation_fn, context));
289
- }
290
-
291
- // Flush all batches
292
- await queue.flush_all_batches();
293
-
294
- // All operations should complete
295
- const results = await Promise.all(operations);
296
- t.is(results.length, 4);
297
- } finally {
298
- await queue.shutdown();
299
- }
300
- });
301
-
302
- test.afterEach(async () => {
303
- await shutdown_batched_write_queue();
304
- });
305
-
306
- test('BatchedWriteQueue singleton should create singleton instance', (t) => {
307
- const queue1 = get_batched_write_queue();
308
- const queue2 = get_batched_write_queue();
309
-
310
- t.is(queue1, queue2);
311
- });
312
-
313
- test('BatchedWriteQueue singleton should shutdown correctly', async (t) => {
314
- const queue = get_batched_write_queue();
315
- t.truthy(queue);
316
-
317
- await shutdown_batched_write_queue();
318
-
319
- // Getting queue again should create new instance
320
- const new_queue = get_batched_write_queue();
321
- t.not(new_queue, queue);
322
-
323
- await shutdown_batched_write_queue();
324
- });
325
-
326
- test('BatchedWriteQueue should handle high throughput operations', async (t) => {
327
- const queue = new BatchedWriteQueue({
328
- batch_size: 50,
329
- batch_timeout: 10,
330
- lane_count: 4
331
- });
332
-
333
- try {
334
- const operation_count = 500; // Reduced for faster testing
335
- const operations = [];
336
- const start_time = Date.now();
337
-
338
- // Create many fast operations
339
- for (let i = 0; i < operation_count; i++) {
340
- const operation_fn = async () => `result_${i}`;
341
- const context = {
342
- collection: 'perf_test',
343
- document_id: `doc_${i % 100}` // Distribute across 100 different documents
344
- };
345
-
346
- operations.push(queue.enqueue_write_operation(operation_fn, context));
347
- }
348
-
349
- // Wait for all operations to complete
350
- const results = await Promise.all(operations);
351
- const end_time = Date.now();
352
-
353
- t.is(results.length, operation_count);
354
-
355
- const duration_ms = end_time - start_time;
356
- const throughput = Math.round(operation_count / (duration_ms / 1000));
357
-
358
- console.log(`Processed ${operation_count} operations in ${duration_ms}ms (${throughput} ops/sec)`);
359
-
360
- // Verify statistics
361
- const stats = queue.get_stats();
362
- t.is(stats.total_operations, operation_count);
363
- t.is(stats.completed_operations, operation_count);
364
- t.is(stats.success_rate, 100);
365
-
366
- } finally {
367
- await queue.shutdown();
368
- }
369
- });
370
-
371
- test('BatchedWriteQueue should demonstrate batching efficiency', async (t) => {
372
- const queue = new BatchedWriteQueue({
373
- batch_size: 100,
374
- batch_timeout: 5,
375
- lane_count: 4
376
- });
377
-
378
- try {
379
- const operation_count = 300; // Reduced for faster testing
380
- const operations = [];
381
-
382
- for (let i = 0; i < operation_count; i++) {
383
- const operation_fn = async () => `batch_result_${i}`;
384
- const context = { collection: 'batch_test', document_id: `doc_${i}` };
385
-
386
- operations.push(queue.enqueue_write_operation(operation_fn, context));
387
- }
388
-
389
- await Promise.all(operations);
390
-
391
- const stats = queue.get_stats();
392
-
393
- // Should have processed significantly fewer batches than operations
394
- t.true(stats.total_batches_processed < operation_count);
395
- t.true(stats.avg_batch_size > 1);
396
-
397
- console.log(`Batching efficiency: ${operation_count} operations in ${stats.total_batches_processed} batches (avg ${stats.avg_batch_size} ops/batch)`);
398
-
399
- } finally {
400
- await queue.shutdown();
401
- }
402
- });
@@ -1,186 +0,0 @@
1
- /**
2
- * @fileoverview Integration tests for write queue backward compatibility.
3
- * Ensures the batched write queue maintains complete API compatibility.
4
- */
5
-
6
- import test from 'ava';
7
- import { get_write_queue, shutdown_write_queue, set_batched_queue_enabled } from '../../../src/server/lib/write_queue.js';
8
-
9
- test.afterEach(async () => {
10
- await shutdown_write_queue();
11
- });
12
-
13
- test('WriteQueue should use batched queue by default', async (t) => {
14
- const write_queue = get_write_queue();
15
-
16
- // Should be using the wrapper
17
- t.is(write_queue.constructor.name, 'WriteQueueWrapper');
18
-
19
- // API should work identically
20
- const result = await write_queue.enqueue_write_operation(
21
- async () => 'test_result',
22
- { test: 'context' }
23
- );
24
-
25
- t.is(result, 'test_result');
26
- });
27
-
28
- test('WriteQueue should maintain backward compatible statistics format', async (t) => {
29
- const write_queue = get_write_queue();
30
-
31
- // Add some operations
32
- await write_queue.enqueue_write_operation(async () => 'result1', {});
33
- await write_queue.enqueue_write_operation(async () => 'result2', {});
34
-
35
- const stats = write_queue.get_stats();
36
-
37
- // Check all expected properties exist with correct types
38
- t.is(typeof stats.total_operations, 'number');
39
- t.is(typeof stats.completed_operations, 'number');
40
- t.is(typeof stats.failed_operations, 'number');
41
- t.is(typeof stats.current_queue_depth, 'number');
42
- t.is(typeof stats.max_queue_depth, 'number');
43
- t.is(typeof stats.avg_wait_time_ms, 'number');
44
- t.is(typeof stats.avg_processing_time_ms, 'number');
45
- t.is(typeof stats.success_rate, 'number');
46
-
47
- // Verify values
48
- t.is(stats.total_operations, 2);
49
- t.is(stats.completed_operations, 2);
50
- t.is(stats.success_rate, 100);
51
- });
52
-
53
- test('WriteQueue should support traditional queue when batched is disabled', async (t) => {
54
- // Disable batched queue
55
- set_batched_queue_enabled(false);
56
-
57
- const write_queue = get_write_queue();
58
-
59
- // Should be using traditional WriteQueue
60
- t.is(write_queue.constructor.name, 'WriteQueue');
61
-
62
- // API should work identically
63
- const result = await write_queue.enqueue_write_operation(
64
- async () => 'traditional_result',
65
- { test: 'traditional' }
66
- );
67
-
68
- t.is(result, 'traditional_result');
69
-
70
- // Re-enable for other tests
71
- set_batched_queue_enabled(true);
72
- });
73
-
74
- test('WriteQueue should handle errors consistently', async (t) => {
75
- const write_queue = get_write_queue();
76
-
77
- const error_message = 'Test error';
78
-
79
- const error = await t.throwsAsync(
80
- write_queue.enqueue_write_operation(
81
- async () => {
82
- throw new Error(error_message);
83
- },
84
- { test: 'error_handling' }
85
- )
86
- );
87
-
88
- t.is(error.message, error_message);
89
-
90
- const stats = write_queue.get_stats();
91
- t.is(stats.failed_operations, 1);
92
- });
93
-
94
- test('WriteQueue should clear statistics correctly', async (t) => {
95
- const write_queue = get_write_queue();
96
-
97
- // Add some operations
98
- await write_queue.enqueue_write_operation(async () => 'result', {});
99
-
100
- let stats = write_queue.get_stats();
101
- t.is(stats.total_operations, 1);
102
-
103
- // Clear stats
104
- write_queue.clear_stats();
105
-
106
- stats = write_queue.get_stats();
107
- t.is(stats.total_operations, 0);
108
- t.is(stats.completed_operations, 0);
109
- });
110
-
111
- test('WriteQueue should shutdown gracefully', async (t) => {
112
- const write_queue = get_write_queue();
113
-
114
- // Add operation
115
- await write_queue.enqueue_write_operation(async () => 'result', {});
116
-
117
- // Shutdown should complete without errors
118
- await shutdown_write_queue();
119
-
120
- // Getting queue again should create new instance
121
- const new_queue = get_write_queue();
122
- t.not(new_queue, write_queue);
123
-
124
- await shutdown_write_queue();
125
- });
126
-
127
- test('WriteQueue should handle concurrent operations correctly', async (t) => {
128
- const write_queue = get_write_queue();
129
- const operation_count = 50; // Reduced for faster testing
130
- const operations = [];
131
-
132
- // Create many concurrent operations
133
- for (let i = 0; i < operation_count; i++) {
134
- operations.push(
135
- write_queue.enqueue_write_operation(
136
- async () => `concurrent_result_${i}`,
137
- { operation_id: i }
138
- )
139
- );
140
- }
141
-
142
- // Wait for all to complete
143
- const results = await Promise.all(operations);
144
-
145
- t.is(results.length, operation_count);
146
-
147
- // Verify all results are unique and correct
148
- const expected_results = Array.from({ length: operation_count }, (_, i) => `concurrent_result_${i}`);
149
- results.sort();
150
- expected_results.sort();
151
-
152
- t.deepEqual(results, expected_results);
153
-
154
- const stats = write_queue.get_stats();
155
- t.is(stats.total_operations, operation_count);
156
- t.is(stats.completed_operations, operation_count);
157
- t.is(stats.success_rate, 100);
158
- });
159
-
160
- test('WriteQueue should maintain operation ordering within same context', async (t) => {
161
- const write_queue = get_write_queue();
162
- const results = [];
163
-
164
- // Create operations that will go to the same lane (same collection/document)
165
- const operations = [];
166
- for (let i = 0; i < 10; i++) {
167
- operations.push(
168
- write_queue.enqueue_write_operation(
169
- async () => {
170
- results.push(i);
171
- return `ordered_result_${i}`;
172
- },
173
- {
174
- collection: 'test_collection',
175
- document_id: 'same_document' // Same context = same lane
176
- }
177
- )
178
- );
179
- }
180
-
181
- await Promise.all(operations);
182
-
183
- // Results should be in order for same lane
184
- // Note: This test verifies that operations with same context maintain order
185
- t.is(results.length, 10);
186
- });