@joystick.js/db-canary 0.0.0-canary.2269 → 0.0.0-canary.2271
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/server/lib/auto_index_manager.js +1 -1
- package/dist/server/lib/write_queue.js +1 -1
- package/package.json +2 -2
- package/src/server/lib/auto_index_manager.js +11 -4
- package/src/server/lib/write_queue.js +2 -137
- package/tests/server/integration/auto_indexing_integration.test.js +1 -1
- package/dist/server/lib/batched_write_queue.js +0 -1
- package/dist/server/lib/processing_lane.js +0 -1
- package/src/server/lib/batched_write_queue.js +0 -331
- package/src/server/lib/processing_lane.js +0 -417
- package/tests/server/lib/batched_write_queue.test.js +0 -402
- package/tests/server/lib/write_queue_integration.test.js +0 -186
|
@@ -1 +1 @@
|
|
|
1
|
-
import{get_database as I}from"./query_engine.js";import{create_index as M,get_indexes as q,drop_index as w}from"./index_manager.js";import{get_settings as S}from"./load_settings.js";import A from"./logger.js";const{create_context_logger:
|
|
1
|
+
import{get_database as I}from"./query_engine.js";import{create_index as M,get_indexes as q,drop_index as w}from"./index_manager.js";import{get_settings as S}from"./load_settings.js";import A from"./logger.js";const{create_context_logger:_}=A("auto_index_manager");let u=null,d=new Map,c=new Map,m=null;const k=()=>(u||(u=I().openDB("auto_indexes",{create:!0}),d.clear(),c.clear(),N(),C(),O()),u),g=()=>{if(!u)throw new Error("Auto index database not initialized. Call initialize_auto_index_database first.");return u},f=()=>{try{return S().auto_indexing||{enabled:!0,frequency_threshold:100,performance_threshold_ms:50,max_auto_indexes_per_collection:10,monitoring_window_hours:24,cleanup_unused_after_hours:168,excluded_fields:["_id","created_at"],included_collections:["*"],excluded_collections:[]}}catch{return{enabled:!0,frequency_threshold:100,performance_threshold_ms:50,max_auto_indexes_per_collection:10,monitoring_window_hours:24,cleanup_unused_after_hours:168,excluded_fields:["_id","created_at"],included_collections:["*"],excluded_collections:[]}}},z=e=>{const o=f();return o.excluded_collections.includes(e)?!1:o.included_collections.includes("*")?!0:o.included_collections.includes(e)},Q=e=>!f().excluded_fields.includes(e),R=e=>{const o=[];if(!e||typeof e!="object")return o;for(const[t,a]of Object.entries(e))Q(t)&&o.push(t);return o},E=(e,o,t,a=!1,n=null)=>{const r=_(),s=f();if(!s.enabled||!z(e)||!u)return;const i=R(o),x=new Date;d.has(e)||d.set(e,new Map);const y=d.get(e);for(const h of i){y.has(h)||y.set(h,{query_count:0,total_time_ms:0,avg_time_ms:0,last_queried:x,slow_query_count:0,used_index_count:0});const l=y.get(h);l.query_count++,l.total_time_ms+=t,l.avg_time_ms=l.total_time_ms/l.query_count,l.last_queried=x,t>s.performance_threshold_ms&&l.slow_query_count++,a&&(n===h||n===null)&&l.used_index_count++}r.debug("Query recorded for auto-indexing analysis",{collection:e,fields:i,execution_time_ms:t,used_index:a,indexed_field:n})},B=()=>{const e=_();try{const o=g(),t={};for(const[a,n]of d.entries()){t[a]={};for(const[r,s]of n.entries())t[a][r]={...s,last_queried:s.last_queried.toISOString()}}o.put("query_stats",t),e.debug("Query statistics saved to database")}catch(o){e.error("Failed to save query statistics",{error:o.message})}},C=()=>{const e=_();try{const t=g().get("query_stats");if(t){d.clear();for(const[a,n]of Object.entries(t)){const r=new Map;for(const[s,i]of Object.entries(n))r.set(s,{...i,last_queried:new Date(i.last_queried)});d.set(a,r)}e.debug("Query statistics loaded from database")}}catch(o){e.error("Failed to load query statistics",{error:o.message})}},p=()=>{const e=_();try{const o=g(),t={};for(const[a,n]of c.entries()){t[a]={};for(const[r,s]of n.entries())t[a][r]={...s,created_at:s.created_at.toISOString(),last_used:s.last_used?s.last_used.toISOString():null}}o.put("auto_index_metadata",t),e.debug("Auto index metadata saved to database")}catch(o){e.error("Failed to save auto index metadata",{error:o.message})}},N=()=>{const e=_();try{const t=g().get("auto_index_metadata");if(t){c.clear();for(const[a,n]of Object.entries(t)){const r=new Map;for(const[s,i]of Object.entries(n))r.set(s,{...i,created_at:new Date(i.created_at),last_used:i.last_used?new Date(i.last_used):null});c.set(a,r)}e.debug("Auto index metadata loaded from database")}}catch(o){e.error("Failed to load auto index metadata",{error:o.message})}},v=(e,o)=>{try{const t=c.get(e);return!!(t&&t.has(o))}catch{return!1}},P=(e,o)=>q("default",e).filter(n=>v(e,n.field)).length>=o.max_auto_indexes_per_collection,T=(e,o)=>new Date-e.last_queried<=o,G=(e,o)=>o.some(t=>t.field===e),H=(e,o)=>{const t=e.query_count>=o.frequency_threshold,a=e.avg_time_ms>=o.performance_threshold_ms,n=e.slow_query_count>0;return t||a&&n},J=(e,o)=>e.slow_query_count*2+e.query_count/o.frequency_threshold,K=(e,o,t,a)=>({collection:e,field:o,stats:{...t},priority:J(t,a)}),b=()=>{const e=f(),o=[],t=e.monitoring_window_hours*60*60*1e3;for(const[a,n]of d.entries()){if(P(a,e))continue;const r=q("default",a);for(const[s,i]of n.entries())T(i,t)&&(G(s,r)||H(i,e)&&o.push(K(a,s,i,e)))}return o.sort((a,n)=>n.priority-a.priority)},F=async(e,o,t)=>{const a=_();try{return await M("default",e,o,{sparse:!0}),c.has(e)||c.set(e,new Map),c.get(e).set(o,{created_at:new Date,query_count_at_creation:t.query_count,avg_performance_improvement_ms:0,last_used:null,usage_count:0,auto_created:!0}),p(),a.info("Automatic index created",{collection:e,field:o,query_count:t.query_count,avg_time_ms:t.avg_time_ms,slow_query_count:t.slow_query_count}),!0}catch(n){return a.error("Failed to create automatic index",{collection:e,field:o,error:n.message}),!1}},D=async()=>{const e=_();if(f().enabled)try{const t=b();if(t.length===0){e.debug("No automatic index candidates found");return}e.info("Evaluating automatic index candidates",{candidate_count:t.length});for(const a of t.slice(0,5))await F(a.collection,a.field,a.stats)&&await new Promise(r=>setTimeout(r,100))}catch(t){e.error("Failed to evaluate automatic indexes",{error:t.message})}},L=async()=>{const e=_(),o=f(),t=new Date,a=o.cleanup_unused_after_hours*60*60*1e3;try{for(const[n,r]of c.entries())for(const[s,i]of r.entries())i.last_used?t-i.last_used>a&&(await w("default",n,s),r.delete(s),e.info("Removed unused automatic index",{collection:n,field:s,last_used:i.last_used,usage_count:i.usage_count})):t-i.created_at>a&&(await w("default",n,s),r.delete(s),e.info("Removed unused automatic index",{collection:n,field:s,created_at:i.created_at,usage_count:i.usage_count}));p()}catch(n){e.error("Failed to cleanup unused indexes",{error:n.message})}},U=(e,o)=>{const t=c.get(e);if(t&&t.has(o)){const a=t.get(o);a.last_used=new Date,a.usage_count++}},O=()=>{const e=f();m&&clearInterval(m),e.enabled&&(m=setInterval(async()=>{B(),await D(),await L()},6e4))},j=()=>{m&&(clearInterval(m),m=null)},V=(e=null)=>{if(e){const t=d.get(e);if(!t)return{};const a={};for(const[n,r]of t.entries())a[n]={...r};return a}const o={};for(const[t,a]of d.entries()){o[t]={};for(const[n,r]of a.entries())o[t][n]={...r}}return o},W=()=>{const e={total_auto_indexes:0,collections:{}};for(const[o,t]of c.entries()){e.collections[o]={};for(const[a,n]of t.entries())e.total_auto_indexes++,e.collections[o][a]={...n}}return e},X=async(e=null)=>{const o=_();try{if(e){const t=b().filter(a=>a.collection===e);for(const a of t)await F(a.collection,a.field,a.stats);o.info("Forced index evaluation completed",{collection:e,candidates_processed:t.length})}else await D(),o.info("Forced index evaluation completed for all collections");return{acknowledged:!0}}catch(t){throw o.error("Failed to force index evaluation",{error:t.message}),t}},Y=async(e,o=null)=>{const t=_();try{const a=c.get(e);if(!a)return{acknowledged:!0,removed_count:0};const n=o||Array.from(a.keys());let r=0;for(const s of n)a.has(s)&&(await w("default",e,s),a.delete(s),r++,t.info("Removed automatic index",{collection:e,field:s}));return p(),{acknowledged:!0,removed_count:r}}catch(a){throw t.error("Failed to remove automatic indexes",{collection:e,field_names:o,error:a.message}),a}},Z=()=>{if(j(),d.clear(),c.clear(),u){try{u.remove("query_stats"),u.remove("auto_index_metadata")}catch{}u=null}};export{Z as cleanup_auto_index_database,X as force_index_evaluation,g as get_auto_index_database,W as get_auto_index_statistics,V as get_query_statistics,k as initialize_auto_index_database,v as is_auto_created_index,U as record_index_usage,E as record_query,Y as remove_automatic_indexes,O as start_evaluation_timer,j as stop_evaluation_timer};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import
|
|
1
|
+
import _ from"./logger.js";const{create_context_logger:u}=_("write_queue");class h{constructor(){this.queue=[],this.processing=!1,this.shutting_down=!1,this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:0,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0},this.log=u()}async enqueue_write_operation(t,s={}){if(this.shutting_down)throw new Error("Server shutting down");return new Promise((o,i)=>{if(this.shutting_down){i(new Error("Server shutting down"));return}const e={operation_fn:t,context:s,resolve:o,reject:i,enqueued_at:Date.now(),id:this.generate_operation_id()};this.queue.push(e),this.stats.total_operations++,this.stats.current_queue_depth=this.queue.length,this.stats.current_queue_depth>this.stats.max_queue_depth&&(this.stats.max_queue_depth=this.stats.current_queue_depth),this.log.debug("Write operation enqueued",{operation_id:e.id,queue_depth:this.stats.current_queue_depth,context:s}),this.process_queue()})}async process_queue(){if(!(this.processing||this.queue.length===0||this.shutting_down)){for(this.processing=!0;this.queue.length>0&&!this.shutting_down;){const t=this.queue.shift();this.stats.current_queue_depth=this.queue.length;const s=Date.now()-t.enqueued_at;this.stats.total_wait_time_ms+=s;const o=Date.now();try{this.log.debug("Processing write operation",{operation_id:t.id,wait_time_ms:s,context:t.context});const i=await this.execute_with_retry(t.operation_fn,t.context),e=Date.now()-o;this.stats.total_processing_time_ms+=e,this.stats.completed_operations++,this.log.debug("Write operation completed",{operation_id:t.id,wait_time_ms:s,processing_time_ms:e,context:t.context}),t.resolve(i)}catch(i){const e=Date.now()-o;this.stats.total_processing_time_ms+=e,this.stats.failed_operations++,this.log.error("Write operation failed",{operation_id:t.id,wait_time_ms:s,processing_time_ms:e,error:i.message,context:t.context}),t.reject(i)}}this.processing=!1}}async execute_with_retry(t,s,o=3){let i=null;for(let e=1;e<=o;e++)try{return await t()}catch(n){if(i=n,this.is_retryable_error(n)&&e<o){const a=this.calculate_backoff_delay(e);this.log.warn("Write operation failed, retrying",{attempt:e,max_retries:o,delay_ms:a,error:n.message,context:s}),await this.sleep(a);continue}break}throw i}is_retryable_error(t){return["MDB_MAP_FULL","MDB_TXN_FULL","MDB_READERS_FULL","EAGAIN","EBUSY"].some(o=>t.message.includes(o)||t.code===o)}calculate_backoff_delay(t){const i=100*Math.pow(2,t-1),e=Math.random()*.1*i;return Math.min(i+e,5e3)}sleep(t){return new Promise(s=>setTimeout(s,t))}generate_operation_id(){return`${Date.now()}-${Math.random().toString(36).substr(2,9)}`}get_stats(){const t=this.stats.completed_operations>0?Math.round(this.stats.total_wait_time_ms/this.stats.completed_operations):0,s=this.stats.completed_operations>0?Math.round(this.stats.total_processing_time_ms/this.stats.completed_operations):0;return{...this.stats,avg_wait_time_ms:t,avg_processing_time_ms:s,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:this.queue.length,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0}}async shutdown(){for(this.log.info("Shutting down write queue",{pending_operations:this.queue.length,currently_processing:this.processing}),this.shutting_down=!0;this.processing;)await this.sleep(50);this.queue.forEach(t=>{t.reject(new Error("Server shutting down"))}),this.queue=[],this.processing=!1}}let r=null;const p=()=>(r||(r=new h),r),d=async()=>{r&&(await r.shutdown(),r=null)};export{p as get_write_queue,d as shutdown_write_queue};
|
package/package.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@joystick.js/db-canary",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "0.0.0-canary.
|
|
5
|
-
"canary_version": "0.0.0-canary.
|
|
4
|
+
"version": "0.0.0-canary.2271",
|
|
5
|
+
"canary_version": "0.0.0-canary.2270",
|
|
6
6
|
"description": "JoystickDB - A minimalist database server for the Joystick framework",
|
|
7
7
|
"main": "./dist/server/index.js",
|
|
8
8
|
"scripts": {
|
|
@@ -34,6 +34,10 @@ const initialize_auto_index_database = () => {
|
|
|
34
34
|
const main_db = get_database();
|
|
35
35
|
auto_index_db = main_db.openDB('auto_indexes', { create: true });
|
|
36
36
|
|
|
37
|
+
// Clear in-memory data first to ensure clean state
|
|
38
|
+
query_stats.clear();
|
|
39
|
+
auto_index_metadata.clear();
|
|
40
|
+
|
|
37
41
|
load_auto_index_metadata();
|
|
38
42
|
load_query_stats();
|
|
39
43
|
start_evaluation_timer();
|
|
@@ -713,18 +717,21 @@ const remove_automatic_indexes = async (collection_name, field_names = null) =>
|
|
|
713
717
|
|
|
714
718
|
const cleanup_auto_index_database = () => {
|
|
715
719
|
stop_evaluation_timer();
|
|
720
|
+
|
|
721
|
+
// Clear in-memory data first
|
|
722
|
+
query_stats.clear();
|
|
723
|
+
auto_index_metadata.clear();
|
|
724
|
+
|
|
716
725
|
if (auto_index_db) {
|
|
717
726
|
try {
|
|
718
|
-
// Clear database entries
|
|
727
|
+
// Clear database entries to prevent data persistence between tests
|
|
719
728
|
auto_index_db.remove('query_stats');
|
|
720
729
|
auto_index_db.remove('auto_index_metadata');
|
|
721
730
|
} catch (error) {
|
|
722
731
|
// Ignore errors during cleanup
|
|
723
732
|
}
|
|
733
|
+
auto_index_db = null;
|
|
724
734
|
}
|
|
725
|
-
query_stats.clear();
|
|
726
|
-
auto_index_metadata.clear();
|
|
727
|
-
auto_index_db = null;
|
|
728
735
|
};
|
|
729
736
|
|
|
730
737
|
export {
|
|
@@ -2,13 +2,9 @@
|
|
|
2
2
|
* @fileoverview Write queue system for JoystickDB providing serialized write operations.
|
|
3
3
|
* Ensures write operations are processed sequentially to maintain data consistency and ACID properties.
|
|
4
4
|
* Includes retry logic, backoff strategies, performance monitoring, and graceful shutdown capabilities.
|
|
5
|
-
*
|
|
6
|
-
* Now supports both traditional sequential processing and high-performance batched processing
|
|
7
|
-
* with automatic fallback and transparent integration.
|
|
8
5
|
*/
|
|
9
6
|
|
|
10
7
|
import create_logger from './logger.js';
|
|
11
|
-
import { get_batched_write_queue, shutdown_batched_write_queue } from './batched_write_queue.js';
|
|
12
8
|
|
|
13
9
|
const { create_context_logger } = create_logger('write_queue');
|
|
14
10
|
|
|
@@ -316,26 +312,13 @@ class WriteQueue {
|
|
|
316
312
|
/** @type {WriteQueue|null} Singleton instance of the write queue */
|
|
317
313
|
let write_queue_instance = null;
|
|
318
314
|
|
|
319
|
-
/** @type {boolean} Whether to use batched write queue for improved performance */
|
|
320
|
-
let use_batched_queue = true;
|
|
321
|
-
|
|
322
315
|
/**
|
|
323
316
|
* Gets the singleton write queue instance, creating it if it doesn't exist.
|
|
324
|
-
* Automatically uses batched write queue for improved performance while maintaining
|
|
325
|
-
* complete backward compatibility.
|
|
326
|
-
* @param {Object} [options] - Configuration options for batched queue
|
|
327
317
|
* @returns {WriteQueue} The write queue instance
|
|
328
318
|
*/
|
|
329
|
-
export const get_write_queue = (
|
|
319
|
+
export const get_write_queue = () => {
|
|
330
320
|
if (!write_queue_instance) {
|
|
331
|
-
|
|
332
|
-
// Use batched write queue with WriteQueue-compatible wrapper
|
|
333
|
-
const batched_queue = get_batched_write_queue(options);
|
|
334
|
-
write_queue_instance = new WriteQueueWrapper(batched_queue);
|
|
335
|
-
} else {
|
|
336
|
-
// Use traditional sequential write queue
|
|
337
|
-
write_queue_instance = new WriteQueue();
|
|
338
|
-
}
|
|
321
|
+
write_queue_instance = new WriteQueue();
|
|
339
322
|
}
|
|
340
323
|
return write_queue_instance;
|
|
341
324
|
};
|
|
@@ -349,122 +332,4 @@ export const shutdown_write_queue = async () => {
|
|
|
349
332
|
await write_queue_instance.shutdown();
|
|
350
333
|
write_queue_instance = null;
|
|
351
334
|
}
|
|
352
|
-
|
|
353
|
-
// Also shutdown batched queue if it was used
|
|
354
|
-
if (use_batched_queue) {
|
|
355
|
-
await shutdown_batched_write_queue();
|
|
356
|
-
}
|
|
357
335
|
};
|
|
358
|
-
|
|
359
|
-
/**
|
|
360
|
-
* Enables or disables batched write queue usage.
|
|
361
|
-
* @param {boolean} enabled - Whether to use batched queue
|
|
362
|
-
*/
|
|
363
|
-
export const set_batched_queue_enabled = (enabled) => {
|
|
364
|
-
use_batched_queue = enabled;
|
|
365
|
-
};
|
|
366
|
-
|
|
367
|
-
/**
|
|
368
|
-
* Wrapper class that provides WriteQueue-compatible API while using BatchedWriteQueue internally.
|
|
369
|
-
* Ensures complete backward compatibility with existing code.
|
|
370
|
-
*/
|
|
371
|
-
class WriteQueueWrapper {
|
|
372
|
-
/**
|
|
373
|
-
* Creates a new WriteQueueWrapper instance.
|
|
374
|
-
* @param {BatchedWriteQueue} batched_queue - The batched write queue instance to wrap
|
|
375
|
-
*/
|
|
376
|
-
constructor(batched_queue) {
|
|
377
|
-
this.batched_queue = batched_queue;
|
|
378
|
-
this.log = create_context_logger('write_queue_wrapper');
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
/**
|
|
382
|
-
* Enqueues a write operation using the batched queue.
|
|
383
|
-
* Maintains identical API to original WriteQueue.
|
|
384
|
-
* @param {function} operation_fn - Async function that performs the write operation
|
|
385
|
-
* @param {Object} [context={}] - Additional context for logging and debugging
|
|
386
|
-
* @returns {Promise<*>} Promise that resolves with the operation result
|
|
387
|
-
*/
|
|
388
|
-
async enqueue_write_operation(operation_fn, context = {}) {
|
|
389
|
-
return this.batched_queue.enqueue_write_operation(operation_fn, context);
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
/**
|
|
393
|
-
* Gets queue statistics with backward-compatible format.
|
|
394
|
-
* @returns {Object} Statistics object matching original WriteQueue format
|
|
395
|
-
*/
|
|
396
|
-
get_stats() {
|
|
397
|
-
const batched_stats = this.batched_queue.get_stats();
|
|
398
|
-
|
|
399
|
-
// Return stats in original WriteQueue format for backward compatibility
|
|
400
|
-
return {
|
|
401
|
-
total_operations: batched_stats.total_operations,
|
|
402
|
-
completed_operations: batched_stats.completed_operations,
|
|
403
|
-
failed_operations: batched_stats.failed_operations,
|
|
404
|
-
current_queue_depth: batched_stats.current_queue_depth,
|
|
405
|
-
max_queue_depth: batched_stats.max_queue_depth,
|
|
406
|
-
avg_wait_time_ms: batched_stats.avg_wait_time_ms,
|
|
407
|
-
avg_processing_time_ms: batched_stats.avg_processing_time_ms,
|
|
408
|
-
success_rate: batched_stats.success_rate
|
|
409
|
-
};
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
/**
|
|
413
|
-
* Clears all statistics.
|
|
414
|
-
*/
|
|
415
|
-
clear_stats() {
|
|
416
|
-
this.batched_queue.clear_stats();
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
/**
|
|
420
|
-
* Gracefully shuts down the wrapper and underlying batched queue.
|
|
421
|
-
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
422
|
-
*/
|
|
423
|
-
async shutdown() {
|
|
424
|
-
await this.batched_queue.shutdown();
|
|
425
|
-
}
|
|
426
|
-
|
|
427
|
-
/**
|
|
428
|
-
* Determines if an error is retryable based on error patterns.
|
|
429
|
-
* Exposed for backward compatibility with existing tests.
|
|
430
|
-
* @param {Error} error - Error to check
|
|
431
|
-
* @returns {boolean} True if error is retryable, false otherwise
|
|
432
|
-
*/
|
|
433
|
-
is_retryable_error(error) {
|
|
434
|
-
const retryable_patterns = [
|
|
435
|
-
'MDB_MAP_FULL',
|
|
436
|
-
'MDB_TXN_FULL',
|
|
437
|
-
'MDB_READERS_FULL',
|
|
438
|
-
'EAGAIN',
|
|
439
|
-
'EBUSY'
|
|
440
|
-
];
|
|
441
|
-
|
|
442
|
-
return retryable_patterns.some(pattern =>
|
|
443
|
-
error.message.includes(pattern) || error.code === pattern
|
|
444
|
-
);
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
/**
|
|
448
|
-
* Calculates exponential backoff delay with jitter for retry attempts.
|
|
449
|
-
* Exposed for backward compatibility with existing tests.
|
|
450
|
-
* @param {number} attempt - Current attempt number (1-based)
|
|
451
|
-
* @returns {number} Delay in milliseconds
|
|
452
|
-
*/
|
|
453
|
-
calculate_backoff_delay(attempt) {
|
|
454
|
-
const base_delay = 100;
|
|
455
|
-
const max_delay = 5000;
|
|
456
|
-
const exponential_delay = base_delay * Math.pow(2, attempt - 1);
|
|
457
|
-
const jitter = Math.random() * 0.1 * exponential_delay;
|
|
458
|
-
|
|
459
|
-
return Math.min(exponential_delay + jitter, max_delay);
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
/**
|
|
463
|
-
* Generates a unique operation ID for tracking.
|
|
464
|
-
* Exposed for backward compatibility with existing tests.
|
|
465
|
-
* @returns {string} Unique operation identifier
|
|
466
|
-
*/
|
|
467
|
-
generate_operation_id() {
|
|
468
|
-
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
|
469
|
-
}
|
|
470
|
-
}
|
|
@@ -186,7 +186,7 @@ test('should accumulate statistics across multiple operations', async (t) => {
|
|
|
186
186
|
const stats = await admin('get_query_stats', { collection: 'employees' });
|
|
187
187
|
|
|
188
188
|
t.is(stats.name.query_count, 3);
|
|
189
|
-
t.true(stats.name.total_time_ms
|
|
189
|
+
t.true(stats.name.total_time_ms >= 0);
|
|
190
190
|
});
|
|
191
191
|
|
|
192
192
|
test('should handle empty result sets', async (t) => {
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import l from"./processing_lane.js";import d from"./logger.js";const{create_context_logger:p}=d("batched_write_queue");class h{constructor(t={}){this.batch_size=t.batch_size||100,this.batch_timeout=t.batch_timeout||10,this.lane_count=t.lane_count||4,this.queue_limit=t.queue_limit||1e4,this.overflow_strategy=t.overflow_strategy||"block",this.lanes=Array(this.lane_count).fill(null).map((s,o)=>new l({batch_size:this.batch_size,batch_timeout:this.batch_timeout,lane_id:o})),this.shutting_down=!1,this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:0,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0,lane_distribution:new Array(this.lane_count).fill(0)},this.log=p()}async enqueue_write_operation(t,s={}){if(this.shutting_down)throw new Error("Server shutting down");if(this.get_current_queue_depth()>=this.queue_limit){if(this.overflow_strategy==="drop")throw new Error("Queue full, operation dropped");this.overflow_strategy==="block"&&await this.wait_for_queue_space()}const i={operation_fn:t,context:s,enqueued_at:Date.now()},_=this.get_lane_for_operation(i),e=this.lanes[_];this.stats.total_operations++,this.stats.lane_distribution[_]++,this.update_queue_depth_stats(),this.log.debug("Operation enqueued to lane",{lane_id:_,total_operations:this.stats.total_operations,context:s});try{const a=await e.add_operation(i);this.stats.completed_operations++;const r=Date.now()-i.enqueued_at;return this.stats.total_wait_time_ms+=r,a}catch(a){throw this.stats.failed_operations++,a}}get_lane_for_operation(t){const s=t.context||{},o=s.collection||"",i=s.document_id||s.id||"",_=`${o}:${i}`;let e=0;for(let r=0;r<_.length;r++){const c=_.charCodeAt(r);e=(e<<5)-e+c,e=e&e}return Math.abs(e)%this.lane_count}get_current_queue_depth(){return this.lanes.reduce((t,s)=>t+s.stats.current_batch_size,0)}update_queue_depth_stats(){this.stats.current_queue_depth=this.get_current_queue_depth(),this.stats.current_queue_depth>this.stats.max_queue_depth&&(this.stats.max_queue_depth=this.stats.current_queue_depth)}async wait_for_queue_space(){const o=Date.now();for(;this.get_current_queue_depth()>=this.queue_limit;){if(Date.now()-o>5e3)throw new Error("Queue full, timeout waiting for space");if(await new Promise(i=>setTimeout(i,10)),this.shutting_down)throw new Error("Server shutting down")}}async flush_all_batches(){const t=this.lanes.map(s=>s.flush_batch());await Promise.all(t)}get_stats(){const t=this.lanes.map(e=>e.get_stats()),s=this.stats.completed_operations>0?Math.round(this.stats.total_wait_time_ms/this.stats.completed_operations):0,o=t.reduce((e,a)=>e+a.total_batch_processing_time_ms,0),i=this.stats.completed_operations>0?Math.round(o/this.stats.completed_operations):0,_=this.stats.lane_distribution.map((e,a)=>({lane_id:a,operations:e,percentage:this.stats.total_operations>0?Math.round(e/this.stats.total_operations*100):0}));return{total_operations:this.stats.total_operations,completed_operations:this.stats.completed_operations,failed_operations:this.stats.failed_operations,current_queue_depth:this.get_current_queue_depth(),max_queue_depth:this.stats.max_queue_depth,avg_wait_time_ms:s,avg_processing_time_ms:i,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100,lane_count:this.lane_count,batch_size:this.batch_size,batch_timeout:this.batch_timeout,lane_distribution:this.stats.lane_distribution,lane_utilization:_,lane_stats:t,total_batches_processed:t.reduce((e,a)=>e+a.batches_processed,0),avg_batch_size:t.length>0?Math.round(t.reduce((e,a)=>e+a.avg_batch_size,0)/t.length):0}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:this.get_current_queue_depth(),max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0,lane_distribution:new Array(this.lane_count).fill(0)},this.lanes.forEach(t=>t.clear_stats())}async shutdown(){this.log.info("Shutting down batched write queue",{pending_operations:this.get_current_queue_depth(),lane_count:this.lane_count}),this.shutting_down=!0,await this.flush_all_batches();const t=this.lanes.map(s=>s.shutdown());await Promise.all(t),this.log.info("Batched write queue shutdown complete")}}let n=null;const g=u=>(n||(n=new h(u)),n),f=async()=>{n&&(await n.shutdown(),n=null)};var q=h;export{q as default,g as get_batched_write_queue,f as shutdown_batched_write_queue};
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import c from"./logger.js";const{create_context_logger:n}=c("processing_lane");class o{constructor(t={}){this.batch_size=t.batch_size||100,this.batch_timeout=t.batch_timeout||10,this.lane_id=t.lane_id||0,this.current_batch=[],this.processing=!1,this.shutting_down=!1,this.batch_timeout_handle=null,this.stats={total_operations:0,completed_operations:0,failed_operations:0,batches_processed:0,current_batch_size:0,max_batch_size:0,total_batch_wait_time_ms:0,total_batch_processing_time_ms:0},this.log=n(`lane_${this.lane_id}`)}async add_operation(t){if(this.shutting_down)throw new Error("Processing lane shutting down");return new Promise((a,s)=>{if(this.shutting_down){s(new Error("Processing lane shutting down"));return}const e={...t,resolve:a,reject:s,enqueued_at:Date.now(),id:this.generate_operation_id()};this.current_batch.push(e),this.stats.total_operations++,this.stats.current_batch_size=this.current_batch.length,this.stats.current_batch_size>this.stats.max_batch_size&&(this.stats.max_batch_size=this.stats.current_batch_size),this.log.debug("Operation added to batch",{lane_id:this.lane_id,operation_id:e.id,batch_size:this.stats.current_batch_size,context:t.context}),this.current_batch.length>=this.batch_size?this.process_current_batch():this.current_batch.length===1&&this.start_batch_timeout()})}start_batch_timeout(){this.batch_timeout_handle&&clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=setTimeout(()=>{this.current_batch.length>0&&!this.processing&&(this.log.debug("Batch timeout triggered",{lane_id:this.lane_id,batch_size:this.current_batch.length}),this.process_current_batch())},this.batch_timeout)}async process_current_batch(){if(this.processing||this.current_batch.length===0||this.shutting_down)return;this.batch_timeout_handle&&(clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=null),this.processing=!0;const t=[...this.current_batch];this.current_batch=[],this.stats.current_batch_size=0;const a=Date.now(),s=Math.min(...t.map(i=>i.enqueued_at)),e=a-s;this.stats.total_batch_wait_time_ms+=e,this.stats.batches_processed++,this.log.debug("Processing batch",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e});try{const i=await this.execute_batch_transaction(t),h=Date.now()-a;this.stats.total_batch_processing_time_ms+=h,this.stats.completed_operations+=t.length,t.forEach((_,r)=>{_.resolve(i[r])}),this.log.debug("Batch completed successfully",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e,batch_processing_time_ms:h})}catch(i){const h=Date.now()-a;this.stats.total_batch_processing_time_ms+=h,this.stats.failed_operations+=t.length,t.forEach(_=>{_.reject(i)}),this.log.error("Batch processing failed",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e,batch_processing_time_ms:h,error:i.message})}this.processing=!1,this.current_batch.length>0&&(this.current_batch.length>=this.batch_size?setImmediate(()=>this.process_current_batch()):this.start_batch_timeout())}async execute_batch_transaction(t){const a=[];for(const s of t)try{const e=await this.execute_with_retry(s.operation_fn,s.context);a.push(e)}catch(e){throw e}return a}async execute_with_retry(t,a,s=3){let e=null;for(let i=1;i<=s;i++)try{return await t()}catch(h){if(e=h,this.is_retryable_error(h)&&i<s){const _=this.calculate_backoff_delay(i);this.log.warn("Operation failed, retrying",{lane_id:this.lane_id,attempt:i,max_retries:s,delay_ms:_,error:h.message,context:a}),await this.sleep(_);continue}break}throw e}is_retryable_error(t){return["MDB_MAP_FULL","MDB_TXN_FULL","MDB_READERS_FULL","EAGAIN","EBUSY"].some(s=>t.message.includes(s)||t.code===s)}calculate_backoff_delay(t){const e=100*Math.pow(2,t-1),i=Math.random()*.1*e;return Math.min(e+i,5e3)}sleep(t){return new Promise(a=>setTimeout(a,t))}generate_operation_id(){return`lane_${this.lane_id}_${Date.now()}_${Math.random().toString(36).substr(2,9)}`}get_stats(){const t=this.stats.batches_processed>0?Math.round(this.stats.total_batch_wait_time_ms/this.stats.batches_processed):0,a=this.stats.batches_processed>0?Math.round(this.stats.total_batch_processing_time_ms/this.stats.batches_processed):0,s=this.stats.batches_processed>0?Math.round(this.stats.completed_operations/this.stats.batches_processed):0;return{lane_id:this.lane_id,...this.stats,avg_batch_wait_time_ms:t,avg_batch_processing_time_ms:a,avg_batch_size:s,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,batches_processed:0,current_batch_size:this.current_batch.length,max_batch_size:0,total_batch_wait_time_ms:0,total_batch_processing_time_ms:0}}async flush_batch(){this.current_batch.length>0&&!this.processing&&await this.process_current_batch()}async shutdown(){for(this.log.info("Shutting down processing lane",{lane_id:this.lane_id,pending_operations:this.current_batch.length,currently_processing:this.processing}),this.shutting_down=!0,this.batch_timeout_handle&&(clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=null),this.current_batch.length>0&&!this.processing&&await this.process_current_batch();this.processing;)await new Promise(t=>setTimeout(t,10));this.current_batch.forEach(t=>{t.reject(new Error("Processing lane shutting down"))}),this.current_batch=[],this.processing=!1}}var b=o;export{b as default};
|
|
@@ -1,331 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @fileoverview Batched write queue system with parallel processing lanes.
|
|
3
|
-
* Provides 3-4x performance improvement by batching operations and processing
|
|
4
|
-
* them in parallel lanes while maintaining backward compatibility.
|
|
5
|
-
*/
|
|
6
|
-
|
|
7
|
-
import ProcessingLane from './processing_lane.js';
|
|
8
|
-
import create_logger from './logger.js';
|
|
9
|
-
|
|
10
|
-
const { create_context_logger } = create_logger('batched_write_queue');
|
|
11
|
-
|
|
12
|
-
/**
|
|
13
|
-
* Batched write queue that distributes operations across parallel processing lanes.
|
|
14
|
-
* Maintains backward compatibility with existing WriteQueue API while providing
|
|
15
|
-
* significant performance improvements through batching and parallelization.
|
|
16
|
-
*/
|
|
17
|
-
class BatchedWriteQueue {
|
|
18
|
-
/**
|
|
19
|
-
* Creates a new BatchedWriteQueue instance.
|
|
20
|
-
* @param {Object} options - Configuration options
|
|
21
|
-
* @param {number} [options.batch_size=100] - Operations per batch
|
|
22
|
-
* @param {number} [options.batch_timeout=10] - Max wait time in milliseconds
|
|
23
|
-
* @param {number} [options.lane_count=4] - Number of parallel processing lanes
|
|
24
|
-
* @param {number} [options.queue_limit=10000] - Max queued operations
|
|
25
|
-
* @param {string} [options.overflow_strategy='block'] - 'block' | 'drop' | 'expand'
|
|
26
|
-
*/
|
|
27
|
-
constructor(options = {}) {
|
|
28
|
-
this.batch_size = options.batch_size || 100;
|
|
29
|
-
this.batch_timeout = options.batch_timeout || 10;
|
|
30
|
-
this.lane_count = options.lane_count || 4;
|
|
31
|
-
this.queue_limit = options.queue_limit || 10000;
|
|
32
|
-
this.overflow_strategy = options.overflow_strategy || 'block';
|
|
33
|
-
|
|
34
|
-
/** @type {Array<ProcessingLane>} Array of processing lanes */
|
|
35
|
-
this.lanes = Array(this.lane_count).fill(null).map((_, index) =>
|
|
36
|
-
new ProcessingLane({
|
|
37
|
-
batch_size: this.batch_size,
|
|
38
|
-
batch_timeout: this.batch_timeout,
|
|
39
|
-
lane_id: index
|
|
40
|
-
})
|
|
41
|
-
);
|
|
42
|
-
|
|
43
|
-
/** @type {boolean} Whether queue is shutting down */
|
|
44
|
-
this.shutting_down = false;
|
|
45
|
-
|
|
46
|
-
/** @type {Object} Overall queue statistics */
|
|
47
|
-
this.stats = {
|
|
48
|
-
total_operations: 0,
|
|
49
|
-
completed_operations: 0,
|
|
50
|
-
failed_operations: 0,
|
|
51
|
-
current_queue_depth: 0,
|
|
52
|
-
max_queue_depth: 0,
|
|
53
|
-
total_wait_time_ms: 0,
|
|
54
|
-
total_processing_time_ms: 0,
|
|
55
|
-
lane_distribution: new Array(this.lane_count).fill(0)
|
|
56
|
-
};
|
|
57
|
-
|
|
58
|
-
this.log = create_context_logger();
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Enqueues a write operation for batched processing.
|
|
63
|
-
* Maintains backward compatibility with existing WriteQueue API.
|
|
64
|
-
* @param {function} operation_fn - Async function that performs the write operation
|
|
65
|
-
* @param {Object} [context={}] - Additional context for logging and debugging
|
|
66
|
-
* @returns {Promise<*>} Promise that resolves with the operation result
|
|
67
|
-
* @throws {Error} When server is shutting down or queue is full
|
|
68
|
-
*/
|
|
69
|
-
async enqueue_write_operation(operation_fn, context = {}) {
|
|
70
|
-
if (this.shutting_down) {
|
|
71
|
-
throw new Error('Server shutting down');
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
// Check queue limits
|
|
75
|
-
const current_depth = this.get_current_queue_depth();
|
|
76
|
-
if (current_depth >= this.queue_limit) {
|
|
77
|
-
if (this.overflow_strategy === 'drop') {
|
|
78
|
-
throw new Error('Queue full, operation dropped');
|
|
79
|
-
} else if (this.overflow_strategy === 'block') {
|
|
80
|
-
// Wait for queue to have space (simple backpressure)
|
|
81
|
-
await this.wait_for_queue_space();
|
|
82
|
-
}
|
|
83
|
-
// 'expand' strategy allows unlimited growth
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
const operation = {
|
|
87
|
-
operation_fn,
|
|
88
|
-
context,
|
|
89
|
-
enqueued_at: Date.now()
|
|
90
|
-
};
|
|
91
|
-
|
|
92
|
-
// Select lane for this operation
|
|
93
|
-
const lane_index = this.get_lane_for_operation(operation);
|
|
94
|
-
const selected_lane = this.lanes[lane_index];
|
|
95
|
-
|
|
96
|
-
// Update statistics
|
|
97
|
-
this.stats.total_operations++;
|
|
98
|
-
this.stats.lane_distribution[lane_index]++;
|
|
99
|
-
this.update_queue_depth_stats();
|
|
100
|
-
|
|
101
|
-
this.log.debug('Operation enqueued to lane', {
|
|
102
|
-
lane_id: lane_index,
|
|
103
|
-
total_operations: this.stats.total_operations,
|
|
104
|
-
context: context
|
|
105
|
-
});
|
|
106
|
-
|
|
107
|
-
try {
|
|
108
|
-
const result = await selected_lane.add_operation(operation);
|
|
109
|
-
|
|
110
|
-
// Update completion statistics
|
|
111
|
-
this.stats.completed_operations++;
|
|
112
|
-
const wait_time_ms = Date.now() - operation.enqueued_at;
|
|
113
|
-
this.stats.total_wait_time_ms += wait_time_ms;
|
|
114
|
-
|
|
115
|
-
return result;
|
|
116
|
-
} catch (error) {
|
|
117
|
-
this.stats.failed_operations++;
|
|
118
|
-
throw error;
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Determines which lane should process the given operation.
|
|
124
|
-
* Uses consistent hashing based on operation context to ensure
|
|
125
|
-
* operations for the same collection/document go to the same lane.
|
|
126
|
-
* @param {Object} operation - Operation to assign to a lane
|
|
127
|
-
* @returns {number} Lane index (0 to lane_count-1)
|
|
128
|
-
*/
|
|
129
|
-
get_lane_for_operation(operation) {
|
|
130
|
-
// Extract collection and document identifiers for consistent hashing
|
|
131
|
-
const context = operation.context || {};
|
|
132
|
-
const collection = context.collection || '';
|
|
133
|
-
const document_id = context.document_id || context.id || '';
|
|
134
|
-
|
|
135
|
-
// Create hash key for consistent distribution
|
|
136
|
-
const hash_key = `${collection}:${document_id}`;
|
|
137
|
-
|
|
138
|
-
// Simple hash function for consistent distribution
|
|
139
|
-
let hash = 0;
|
|
140
|
-
for (let i = 0; i < hash_key.length; i++) {
|
|
141
|
-
const char = hash_key.charCodeAt(i);
|
|
142
|
-
hash = ((hash << 5) - hash) + char;
|
|
143
|
-
hash = hash & hash; // Convert to 32-bit integer
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
// Ensure positive value and map to lane index
|
|
147
|
-
const lane_index = Math.abs(hash) % this.lane_count;
|
|
148
|
-
|
|
149
|
-
return lane_index;
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
/**
|
|
153
|
-
* Gets the current total queue depth across all lanes.
|
|
154
|
-
* @returns {number} Total number of queued operations
|
|
155
|
-
*/
|
|
156
|
-
get_current_queue_depth() {
|
|
157
|
-
return this.lanes.reduce((total, lane) => {
|
|
158
|
-
return total + lane.stats.current_batch_size;
|
|
159
|
-
}, 0);
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
/**
|
|
163
|
-
* Updates queue depth statistics.
|
|
164
|
-
*/
|
|
165
|
-
update_queue_depth_stats() {
|
|
166
|
-
this.stats.current_queue_depth = this.get_current_queue_depth();
|
|
167
|
-
if (this.stats.current_queue_depth > this.stats.max_queue_depth) {
|
|
168
|
-
this.stats.max_queue_depth = this.stats.current_queue_depth;
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
/**
|
|
173
|
-
* Waits for queue to have available space (backpressure mechanism).
|
|
174
|
-
* @returns {Promise<void>} Promise that resolves when space is available
|
|
175
|
-
*/
|
|
176
|
-
async wait_for_queue_space() {
|
|
177
|
-
const check_interval = 10; // ms
|
|
178
|
-
const max_wait_time = 5000; // 5 seconds max wait
|
|
179
|
-
const start_time = Date.now();
|
|
180
|
-
|
|
181
|
-
while (this.get_current_queue_depth() >= this.queue_limit) {
|
|
182
|
-
if (Date.now() - start_time > max_wait_time) {
|
|
183
|
-
throw new Error('Queue full, timeout waiting for space');
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
await new Promise(resolve => setTimeout(resolve, check_interval));
|
|
187
|
-
|
|
188
|
-
if (this.shutting_down) {
|
|
189
|
-
throw new Error('Server shutting down');
|
|
190
|
-
}
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
/**
|
|
195
|
-
* Forces processing of all current batches across all lanes.
|
|
196
|
-
* Useful for ensuring all operations are processed before shutdown.
|
|
197
|
-
* @returns {Promise<void>} Promise that resolves when all batches are flushed
|
|
198
|
-
*/
|
|
199
|
-
async flush_all_batches() {
|
|
200
|
-
const flush_promises = this.lanes.map(lane => lane.flush_batch());
|
|
201
|
-
await Promise.all(flush_promises);
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
/**
|
|
205
|
-
* Gets comprehensive queue statistics including per-lane metrics.
|
|
206
|
-
* Maintains backward compatibility with existing WriteQueue stats format.
|
|
207
|
-
* @returns {Object} Statistics object with performance metrics
|
|
208
|
-
*/
|
|
209
|
-
get_stats() {
|
|
210
|
-
// Aggregate lane statistics
|
|
211
|
-
const lane_stats = this.lanes.map(lane => lane.get_stats());
|
|
212
|
-
|
|
213
|
-
// Calculate overall averages
|
|
214
|
-
const avg_wait_time = this.stats.completed_operations > 0
|
|
215
|
-
? Math.round(this.stats.total_wait_time_ms / this.stats.completed_operations)
|
|
216
|
-
: 0;
|
|
217
|
-
|
|
218
|
-
const total_processing_time = lane_stats.reduce((sum, stats) =>
|
|
219
|
-
sum + stats.total_batch_processing_time_ms, 0);
|
|
220
|
-
|
|
221
|
-
const avg_processing_time = this.stats.completed_operations > 0
|
|
222
|
-
? Math.round(total_processing_time / this.stats.completed_operations)
|
|
223
|
-
: 0;
|
|
224
|
-
|
|
225
|
-
// Calculate lane utilization
|
|
226
|
-
const lane_utilization = this.stats.lane_distribution.map((count, index) => ({
|
|
227
|
-
lane_id: index,
|
|
228
|
-
operations: count,
|
|
229
|
-
percentage: this.stats.total_operations > 0
|
|
230
|
-
? Math.round((count / this.stats.total_operations) * 100)
|
|
231
|
-
: 0
|
|
232
|
-
}));
|
|
233
|
-
|
|
234
|
-
return {
|
|
235
|
-
// Backward compatible stats
|
|
236
|
-
total_operations: this.stats.total_operations,
|
|
237
|
-
completed_operations: this.stats.completed_operations,
|
|
238
|
-
failed_operations: this.stats.failed_operations,
|
|
239
|
-
current_queue_depth: this.get_current_queue_depth(),
|
|
240
|
-
max_queue_depth: this.stats.max_queue_depth,
|
|
241
|
-
avg_wait_time_ms: avg_wait_time,
|
|
242
|
-
avg_processing_time_ms: avg_processing_time,
|
|
243
|
-
success_rate: this.stats.total_operations > 0
|
|
244
|
-
? Math.round((this.stats.completed_operations / this.stats.total_operations) * 100)
|
|
245
|
-
: 100,
|
|
246
|
-
|
|
247
|
-
// Batched queue specific stats
|
|
248
|
-
lane_count: this.lane_count,
|
|
249
|
-
batch_size: this.batch_size,
|
|
250
|
-
batch_timeout: this.batch_timeout,
|
|
251
|
-
lane_distribution: this.stats.lane_distribution,
|
|
252
|
-
lane_utilization,
|
|
253
|
-
lane_stats,
|
|
254
|
-
|
|
255
|
-
// Performance metrics
|
|
256
|
-
total_batches_processed: lane_stats.reduce((sum, stats) => sum + stats.batches_processed, 0),
|
|
257
|
-
avg_batch_size: lane_stats.length > 0
|
|
258
|
-
? Math.round(lane_stats.reduce((sum, stats) => sum + stats.avg_batch_size, 0) / lane_stats.length)
|
|
259
|
-
: 0
|
|
260
|
-
};
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
/**
|
|
264
|
-
* Clears all statistics across the queue and all lanes.
|
|
265
|
-
*/
|
|
266
|
-
clear_stats() {
|
|
267
|
-
this.stats = {
|
|
268
|
-
total_operations: 0,
|
|
269
|
-
completed_operations: 0,
|
|
270
|
-
failed_operations: 0,
|
|
271
|
-
current_queue_depth: this.get_current_queue_depth(),
|
|
272
|
-
max_queue_depth: 0,
|
|
273
|
-
total_wait_time_ms: 0,
|
|
274
|
-
total_processing_time_ms: 0,
|
|
275
|
-
lane_distribution: new Array(this.lane_count).fill(0)
|
|
276
|
-
};
|
|
277
|
-
|
|
278
|
-
this.lanes.forEach(lane => lane.clear_stats());
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
/**
|
|
282
|
-
* Gracefully shuts down the batched write queue.
|
|
283
|
-
* Processes all remaining operations and shuts down all lanes.
|
|
284
|
-
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
285
|
-
*/
|
|
286
|
-
async shutdown() {
|
|
287
|
-
this.log.info('Shutting down batched write queue', {
|
|
288
|
-
pending_operations: this.get_current_queue_depth(),
|
|
289
|
-
lane_count: this.lane_count
|
|
290
|
-
});
|
|
291
|
-
|
|
292
|
-
this.shutting_down = true;
|
|
293
|
-
|
|
294
|
-
// Flush all remaining batches
|
|
295
|
-
await this.flush_all_batches();
|
|
296
|
-
|
|
297
|
-
// Shutdown all lanes
|
|
298
|
-
const shutdown_promises = this.lanes.map(lane => lane.shutdown());
|
|
299
|
-
await Promise.all(shutdown_promises);
|
|
300
|
-
|
|
301
|
-
this.log.info('Batched write queue shutdown complete');
|
|
302
|
-
}
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
/** @type {BatchedWriteQueue|null} Singleton instance of the batched write queue */
|
|
306
|
-
let batched_write_queue_instance = null;
|
|
307
|
-
|
|
308
|
-
/**
|
|
309
|
-
* Gets the singleton batched write queue instance, creating it if it doesn't exist.
|
|
310
|
-
* @param {Object} [options] - Configuration options for new instance
|
|
311
|
-
* @returns {BatchedWriteQueue} The batched write queue instance
|
|
312
|
-
*/
|
|
313
|
-
export const get_batched_write_queue = (options) => {
|
|
314
|
-
if (!batched_write_queue_instance) {
|
|
315
|
-
batched_write_queue_instance = new BatchedWriteQueue(options);
|
|
316
|
-
}
|
|
317
|
-
return batched_write_queue_instance;
|
|
318
|
-
};
|
|
319
|
-
|
|
320
|
-
/**
|
|
321
|
-
* Shuts down the batched write queue and clears the singleton instance.
|
|
322
|
-
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
323
|
-
*/
|
|
324
|
-
export const shutdown_batched_write_queue = async () => {
|
|
325
|
-
if (batched_write_queue_instance) {
|
|
326
|
-
await batched_write_queue_instance.shutdown();
|
|
327
|
-
batched_write_queue_instance = null;
|
|
328
|
-
}
|
|
329
|
-
};
|
|
330
|
-
|
|
331
|
-
export default BatchedWriteQueue;
|