@joystick.js/db-canary 0.0.0-canary.2267 → 0.0.0-canary.2269
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/server/cluster/master.js +2 -2
- package/dist/server/lib/batched_write_queue.js +1 -0
- package/dist/server/lib/processing_lane.js +1 -0
- package/dist/server/lib/write_queue.js +1 -1
- package/package.json +2 -2
- package/src/server/cluster/master.js +6 -1
- package/src/server/lib/batched_write_queue.js +331 -0
- package/src/server/lib/processing_lane.js +417 -0
- package/src/server/lib/write_queue.js +137 -2
- package/tests/server/lib/batched_write_queue.test.js +402 -0
- package/tests/server/lib/write_queue_integration.test.js +186 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import n from"cluster";import h from"os";import{EventEmitter as d}from"events";import{writeFileSync as
|
|
1
|
+
import n from"cluster";import h from"os";import{EventEmitter as d}from"events";import{writeFileSync as u}from"fs";import{load_settings as c,get_settings as p,get_port_configuration as l}from"../lib/load_settings.js";import{restore_backup as w,start_backup_schedule as g,stop_backup_schedule as f}from"../lib/backup_manager.js";import m from"../lib/logger.js";import{initialize_database as k,check_and_grow_map_size as y,cleanup_database as b}from"../lib/query_engine.js";import{setup_authentication as v,verify_password as q,initialize_auth_manager as D}from"../lib/auth_manager.js";import x from"../lib/operations/insert_one.js";import E from"../lib/operations/update_one.js";import T from"../lib/operations/delete_one.js";import S from"../lib/operations/delete_many.js";import z from"../lib/operations/bulk_write.js";import W from"../lib/operations/find_one.js";import F from"../lib/operations/find.js";import N from"../lib/operations/count_documents.js";import P from"../lib/operations/create_index.js";import O from"../lib/operations/drop_index.js";import R from"../lib/operations/get_indexes.js";import{start_http_server as A,stop_http_server as I,is_setup_required as j}from"../lib/http_server.js";import{is_development_mode as L,display_development_startup_message as M}from"../lib/development_mode.js";import{initialize_api_key_manager as $}from"../lib/api_key_manager.js";class H extends d{constructor(e={}){super(),this.workers=new Map,this.write_queue=[],this.processing_writes=!1,this.authenticated_sessions=new Map,this.worker_count=e.worker_count||h.cpus().length,this.port=e.port||1983,this.settings_file=e.settings_file||"settings.db.json",this.settings=null,this.pending_writes=new Map,this.write_id_counter=0,this.shutting_down=!1,this.master_id=`master_${Date.now()}_${Math.random()}`;const{create_context_logger:t}=m("master");this.log=t({port:this.port,worker_count:this.worker_count,master_id:this.master_id}),this.setup_master()}setup_master(){n.setupPrimary({exec:new URL("./index.js",import.meta.url).pathname,args:[],silent:!1}),n.on("exit",(e,t,s)=>{this.log.warn("Worker died",{worker_pid:e.process.pid,exit_code:t,signal:s}),this.handle_worker_death(e)}),n.on("message",(e,t)=>{this.handle_worker_message(e,t)})}get_database_path(){let e="./data";try{const t=p();t?.data_path&&(e=t.data_path)}catch{}return e}async initialize_core_systems(){const e=this.get_database_path();k(e),D(),await $(),this.log.info("Database and auth manager initialized")}async handle_startup_restore(){if(this.settings?.restore_from)try{this.log.info("Startup restore requested",{backup_filename:this.settings.restore_from});const e=await w(this.settings.restore_from);this.log.info("Startup restore completed",{backup_filename:this.settings.restore_from,duration_ms:e.duration_ms}),this.remove_restore_from_settings()}catch(e){this.log.error("Startup restore failed",{backup_filename:this.settings.restore_from,error:e.message}),this.log.info("Continuing with existing database after restore failure")}}remove_restore_from_settings(){const e={...this.settings};delete e.restore_from,u(this.settings_file,JSON.stringify(e,null,2)),this.settings=c(this.settings_file),this.log.info("Removed restore_from from settings after successful restore")}start_backup_scheduling(){if(this.settings?.s3)try{g(),this.log.info("Backup scheduling started")}catch(e){this.log.warn("Failed to start backup scheduling",{error:e.message})}}async start_setup_server(){if(j())try{const{http_port:e}=l();await A(e)&&this.log.info("HTTP setup server started",{http_port:e})}catch(e){this.log.warn("Failed to start HTTP setup server",{error:e.message})}}spawn_all_workers(){for(let e=0;e<this.worker_count;e++)this.spawn_worker()}display_development_message(){if(L()){const{tcp_port:e,http_port:t}=l();M(e,t)}}async start(){const e=Date.now();try{this.settings=c(this.settings_file),this.log.info("Settings loaded successfully",{settings_file:this.settings_file}),await this.initialize_core_systems(),await this.handle_startup_restore(),this.start_backup_scheduling(),await this.start_setup_server(),this.spawn_all_workers(),this.display_development_message();const t=Date.now()-e;this.log.info("Master process started successfully",{workers_spawned:this.worker_count,startup_duration_ms:t})}catch(t){this.log.error("Failed to start master process",{error:t.message}),process.exit(1)}}spawn_worker(){const e=Date.now();this.log.info("Spawning worker");const t=n.fork({WORKER_PORT:this.port,WORKER_SETTINGS:JSON.stringify(this.settings)});this.workers.set(t.id,{worker:t,connections:0,last_heartbeat:Date.now(),status:"starting"});const s=Date.now()-e;return this.log.info("Worker spawned successfully",{worker_id:t.id,worker_pid:t.process.pid,spawn_duration_ms:s}),t}handle_worker_death(e){this.workers.delete(e.id),this.shutting_down||(this.log.info("Respawning worker after death",{dead_worker_id:e.id,respawn_delay_ms:1e3}),setTimeout(()=>{this.spawn_worker()},1e3))}handle_worker_message(e,t){switch(t.type){case"worker_ready":this.handle_worker_ready_for_config(e,t);break;case"server_ready":this.handle_worker_server_ready(e,t);break;case"write_request":this.handle_write_request(e,t);break;case"auth_request":this.handle_auth_request(e,t);break;case"setup_request":this.handle_setup_request(e,t);break;case"connection_count":this.update_worker_connections(e,t);break;case"heartbeat":this.handle_worker_heartbeat(e,t);break;default:this.log.warn("Unknown message type received",{message_type:t.type,worker_id:e.id})}}handle_worker_ready_for_config(e,t){this.log.info("Worker ready for config, sending configuration",{worker_id:e.id,worker_pid:e.process.pid,master_id:this.master_id}),e.send({type:"config",data:{port:this.port,settings:this.settings,master_id:this.master_id}})}handle_worker_server_ready(e,t){const s=this.workers.get(e.id);s&&(s.status="ready",this.log.info("Worker server ready",{worker_id:e.id,worker_pid:e.process.pid}))}async handle_write_request(e,t){if(this.shutting_down){e.send({type:"write_response",data:{write_id:t.data.write_id,success:!1,error:"Server is shutting down"}});return}const{write_id:s,op_type:r,data:i,socket_id:o}=t.data;try{const a={write_id:s,worker_id:e.id,op_type:r,data:i,socket_id:o,timestamp:Date.now()};this.write_queue.push(a),this.process_write_queue()}catch(a){e.send({type:"write_response",data:{write_id:s,success:!1,error:a.message}})}}async process_write_queue(){if(!(this.processing_writes||this.write_queue.length===0)){for(this.processing_writes=!0;this.write_queue.length>0;){const e=this.write_queue.shift();await this.execute_write_operation(e)}this.processing_writes=!1,this.shutting_down&&this.write_queue.length===0&&this.emit("writes_completed")}}async execute_write_operation(e){const{write_id:t,worker_id:s,op_type:r,data:i,socket_id:o}=e,a=this.workers.get(s);if(!a){this.log.error("Worker not found for write operation",{worker_id:s});return}try{const _=await this.perform_database_operation(r,i);a.worker.send({type:"write_response",data:{write_id:t,success:!0,result:_}}),this.broadcast_write_notification(r,i,s)}catch(_){this.log.error("Write operation failed",{write_id:t,op_type:r,worker_id:s,error_message:_.message}),a.worker.send({type:"write_response",data:{write_id:t,success:!1,error:_.message}})}}async perform_database_operation(e,t){const s=Date.now();this.log.info("Executing database operation",{op_type:e});try{let r;const i=t.database||"default";switch(e){case"insert_one":r=await x(i,t.collection,t.document,t.options);break;case"update_one":r=await E(i,t.collection,t.filter,t.update,t.options);break;case"delete_one":r=await T(i,t.collection,t.filter,t.options);break;case"delete_many":r=await S(i,t.collection,t.filter,t.options);break;case"bulk_write":r=await z(i,t.collection,t.operations,t.options);break;case"find_one":r=await W(i,t.collection,t.filter,t.options);break;case"find":r=await F(i,t.collection,t.filter,t.options);break;case"count_documents":r=await N(i,t.collection,t.filter,t.options);break;case"create_index":r=await P(i,t.collection,t.field,t.options);break;case"drop_index":r=await O(i,t.collection,t.field);break;case"get_indexes":r=await R(i,t.collection);break;default:throw new Error(`Unsupported database operation: ${e}`)}const o=Date.now()-s;return this.log.log_operation(e,o,{result:r}),["find_one","find","count_documents","get_indexes"].includes(e)||setImmediate(()=>y()),r}catch(r){const i=Date.now()-s;throw this.log.error("Database operation failed",{op_type:e,duration_ms:i,error_message:r.message}),r}}broadcast_write_notification(e,t,s){const r={type:"write_notification",data:{op_type:e,data:t,timestamp:Date.now()}};for(const[i,o]of this.workers)i!==s&&o.status==="ready"&&o.worker.send(r)}async handle_auth_request(e,t){const{auth_id:s,socket_id:r,password:i}=t.data;try{const o=await q(i,"cluster_client");o&&this.authenticated_sessions.set(r,{authenticated_at:Date.now(),worker_id:e.id}),e.send({type:"auth_response",data:{auth_id:s,success:o,message:o?"Authentication successful":"Authentication failed"}})}catch(o){e.send({type:"auth_response",data:{auth_id:s,success:!1,message:`Authentication error: ${o.message}`}})}}handle_setup_request(e,t){const{setup_id:s,socket_id:r}=t.data;try{const i=v(),o=`===
|
|
2
2
|
JoystickDB Setup
|
|
3
3
|
|
|
4
4
|
Your database has been setup. Follow the instructions below carefully to avoid issues.
|
|
@@ -17,4 +17,4 @@ const client = joystickdb.client({
|
|
|
17
17
|
});
|
|
18
18
|
|
|
19
19
|
await client.ping();
|
|
20
|
-
===`;e.send({type:"setup_response",data:{setup_id:s,success:!0,password:i,instructions:o,message:"Authentication setup completed successfully"}})}catch(i){e.send({type:"setup_response",data:{setup_id:s,success:!1,error:i.message}})}}update_worker_connections(e,t){const s=this.workers.get(e.id);s&&(s.connections=t.data.count)}handle_worker_heartbeat(e,t){const s=this.workers.get(e.id);s&&(s.last_heartbeat=Date.now())}get_cluster_stats(){const e={master_pid:process.pid,worker_count:this.workers.size,total_connections:0,write_queue_length:this.write_queue.length,authenticated_sessions:this.authenticated_sessions.size,workers:[]};for(const[t,s]of this.workers)e.total_connections+=s.connections,e.workers.push({id:t,pid:s.worker.process.pid,connections:s.connections,status:s.status,last_heartbeat:s.last_heartbeat});return e}async stop_http_server_gracefully(){try{await
|
|
20
|
+
===`;e.send({type:"setup_response",data:{setup_id:s,success:!0,password:i,instructions:o,message:"Authentication setup completed successfully"}})}catch(i){e.send({type:"setup_response",data:{setup_id:s,success:!1,error:i.message}})}}update_worker_connections(e,t){const s=this.workers.get(e.id);s&&(s.connections=t.data.count)}handle_worker_heartbeat(e,t){const s=this.workers.get(e.id);s&&(s.last_heartbeat=Date.now())}get_cluster_stats(){const e={master_pid:process.pid,worker_count:this.workers.size,total_connections:0,write_queue_length:this.write_queue.length,authenticated_sessions:this.authenticated_sessions.size,workers:[]};for(const[t,s]of this.workers)e.total_connections+=s.connections,e.workers.push({id:t,pid:s.worker.process.pid,connections:s.connections,status:s.status,last_heartbeat:s.last_heartbeat});return e}async stop_http_server_gracefully(){try{await I(),this.log.info("HTTP server stopped")}catch(e){this.log.warn("Failed to stop HTTP server",{error:e.message})}}stop_backup_scheduling_gracefully(){try{f(),this.log.info("Backup scheduling stopped")}catch(e){this.log.warn("Failed to stop backup scheduling",{error:e.message})}}send_shutdown_signals(){for(const[e,t]of this.workers)try{t.worker.send({type:"shutdown"})}catch(s){this.log.warn("Error sending shutdown signal to worker",{worker_id:e,error:s.message})}}async wait_for_pending_writes(){this.write_queue.length!==0&&(this.log.info("Waiting for pending writes to complete",{pending_writes:this.write_queue.length}),await new Promise(e=>{const t=setTimeout(()=>{this.log.warn("Timeout waiting for writes to complete, proceeding with shutdown"),e()},process.env.NODE_ENV==="test"?1e3:5e3);this.once("writes_completed",()=>{clearTimeout(t),e()})}))}disconnect_all_workers(){for(const[e,t]of this.workers)try{t.worker.disconnect()}catch(s){this.log.warn("Error disconnecting worker",{worker_id:e,error:s.message})}}force_kill_remaining_workers(){for(const[e,t]of this.workers){this.log.warn("Force killing worker after timeout",{worker_id:e});try{t.worker.kill("SIGKILL")}catch(s){this.log.warn("Error force killing worker",{worker_id:e,error:s.message})}}this.workers.clear()}async wait_for_workers_to_exit(){const e=process.env.NODE_ENV==="test"?500:3e3;await new Promise(t=>{const s=setTimeout(()=>{this.force_kill_remaining_workers(),t()},e),r=()=>{this.workers.size===0?(clearTimeout(s),t()):setTimeout(r,50)};r()})}cleanup_database_connections(){try{b(),this.log.info("Database cleanup completed")}catch(e){this.log.warn("Error during database cleanup",{error:e.message})}}clear_internal_state(){this.authenticated_sessions.clear(),this.write_queue.length=0,this.pending_writes.clear()}perform_test_environment_cleanup(){if(process.env.NODE_ENV==="test")try{for(const e in n.workers){const t=n.workers[e];if(t&&!t.isDead()){this.log.info("Force killing remaining cluster worker",{worker_id:e,worker_pid:t.process.pid});try{t.kill("SIGKILL")}catch(s){this.log.warn("Error force killing remaining worker",{worker_id:e,error:s.message})}}}for(const e in n.workers)delete n.workers[e];n.removeAllListeners(),this.log.info("Aggressive cluster cleanup completed for test environment")}catch(e){this.log.warn("Error during aggressive cluster cleanup",{error:e.message})}}async shutdown(){const e=Date.now();this.log.info("Initiating graceful shutdown"),this.shutting_down=!0,await this.stop_http_server_gracefully(),this.stop_backup_scheduling_gracefully(),this.send_shutdown_signals(),await this.wait_for_pending_writes(),this.log.info("All writes completed, disconnecting workers"),this.disconnect_all_workers(),await this.wait_for_workers_to_exit(),this.cleanup_database_connections(),this.clear_internal_state(),this.perform_test_environment_cleanup();const t=Date.now()-e;this.log.info("Shutdown complete",{shutdown_duration_ms:t})}}var pe=H;export{pe as default};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import l from"./processing_lane.js";import d from"./logger.js";const{create_context_logger:p}=d("batched_write_queue");class h{constructor(t={}){this.batch_size=t.batch_size||100,this.batch_timeout=t.batch_timeout||10,this.lane_count=t.lane_count||4,this.queue_limit=t.queue_limit||1e4,this.overflow_strategy=t.overflow_strategy||"block",this.lanes=Array(this.lane_count).fill(null).map((s,o)=>new l({batch_size:this.batch_size,batch_timeout:this.batch_timeout,lane_id:o})),this.shutting_down=!1,this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:0,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0,lane_distribution:new Array(this.lane_count).fill(0)},this.log=p()}async enqueue_write_operation(t,s={}){if(this.shutting_down)throw new Error("Server shutting down");if(this.get_current_queue_depth()>=this.queue_limit){if(this.overflow_strategy==="drop")throw new Error("Queue full, operation dropped");this.overflow_strategy==="block"&&await this.wait_for_queue_space()}const i={operation_fn:t,context:s,enqueued_at:Date.now()},_=this.get_lane_for_operation(i),e=this.lanes[_];this.stats.total_operations++,this.stats.lane_distribution[_]++,this.update_queue_depth_stats(),this.log.debug("Operation enqueued to lane",{lane_id:_,total_operations:this.stats.total_operations,context:s});try{const a=await e.add_operation(i);this.stats.completed_operations++;const r=Date.now()-i.enqueued_at;return this.stats.total_wait_time_ms+=r,a}catch(a){throw this.stats.failed_operations++,a}}get_lane_for_operation(t){const s=t.context||{},o=s.collection||"",i=s.document_id||s.id||"",_=`${o}:${i}`;let e=0;for(let r=0;r<_.length;r++){const c=_.charCodeAt(r);e=(e<<5)-e+c,e=e&e}return Math.abs(e)%this.lane_count}get_current_queue_depth(){return this.lanes.reduce((t,s)=>t+s.stats.current_batch_size,0)}update_queue_depth_stats(){this.stats.current_queue_depth=this.get_current_queue_depth(),this.stats.current_queue_depth>this.stats.max_queue_depth&&(this.stats.max_queue_depth=this.stats.current_queue_depth)}async wait_for_queue_space(){const o=Date.now();for(;this.get_current_queue_depth()>=this.queue_limit;){if(Date.now()-o>5e3)throw new Error("Queue full, timeout waiting for space");if(await new Promise(i=>setTimeout(i,10)),this.shutting_down)throw new Error("Server shutting down")}}async flush_all_batches(){const t=this.lanes.map(s=>s.flush_batch());await Promise.all(t)}get_stats(){const t=this.lanes.map(e=>e.get_stats()),s=this.stats.completed_operations>0?Math.round(this.stats.total_wait_time_ms/this.stats.completed_operations):0,o=t.reduce((e,a)=>e+a.total_batch_processing_time_ms,0),i=this.stats.completed_operations>0?Math.round(o/this.stats.completed_operations):0,_=this.stats.lane_distribution.map((e,a)=>({lane_id:a,operations:e,percentage:this.stats.total_operations>0?Math.round(e/this.stats.total_operations*100):0}));return{total_operations:this.stats.total_operations,completed_operations:this.stats.completed_operations,failed_operations:this.stats.failed_operations,current_queue_depth:this.get_current_queue_depth(),max_queue_depth:this.stats.max_queue_depth,avg_wait_time_ms:s,avg_processing_time_ms:i,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100,lane_count:this.lane_count,batch_size:this.batch_size,batch_timeout:this.batch_timeout,lane_distribution:this.stats.lane_distribution,lane_utilization:_,lane_stats:t,total_batches_processed:t.reduce((e,a)=>e+a.batches_processed,0),avg_batch_size:t.length>0?Math.round(t.reduce((e,a)=>e+a.avg_batch_size,0)/t.length):0}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:this.get_current_queue_depth(),max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0,lane_distribution:new Array(this.lane_count).fill(0)},this.lanes.forEach(t=>t.clear_stats())}async shutdown(){this.log.info("Shutting down batched write queue",{pending_operations:this.get_current_queue_depth(),lane_count:this.lane_count}),this.shutting_down=!0,await this.flush_all_batches();const t=this.lanes.map(s=>s.shutdown());await Promise.all(t),this.log.info("Batched write queue shutdown complete")}}let n=null;const g=u=>(n||(n=new h(u)),n),f=async()=>{n&&(await n.shutdown(),n=null)};var q=h;export{q as default,g as get_batched_write_queue,f as shutdown_batched_write_queue};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import c from"./logger.js";const{create_context_logger:n}=c("processing_lane");class o{constructor(t={}){this.batch_size=t.batch_size||100,this.batch_timeout=t.batch_timeout||10,this.lane_id=t.lane_id||0,this.current_batch=[],this.processing=!1,this.shutting_down=!1,this.batch_timeout_handle=null,this.stats={total_operations:0,completed_operations:0,failed_operations:0,batches_processed:0,current_batch_size:0,max_batch_size:0,total_batch_wait_time_ms:0,total_batch_processing_time_ms:0},this.log=n(`lane_${this.lane_id}`)}async add_operation(t){if(this.shutting_down)throw new Error("Processing lane shutting down");return new Promise((a,s)=>{if(this.shutting_down){s(new Error("Processing lane shutting down"));return}const e={...t,resolve:a,reject:s,enqueued_at:Date.now(),id:this.generate_operation_id()};this.current_batch.push(e),this.stats.total_operations++,this.stats.current_batch_size=this.current_batch.length,this.stats.current_batch_size>this.stats.max_batch_size&&(this.stats.max_batch_size=this.stats.current_batch_size),this.log.debug("Operation added to batch",{lane_id:this.lane_id,operation_id:e.id,batch_size:this.stats.current_batch_size,context:t.context}),this.current_batch.length>=this.batch_size?this.process_current_batch():this.current_batch.length===1&&this.start_batch_timeout()})}start_batch_timeout(){this.batch_timeout_handle&&clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=setTimeout(()=>{this.current_batch.length>0&&!this.processing&&(this.log.debug("Batch timeout triggered",{lane_id:this.lane_id,batch_size:this.current_batch.length}),this.process_current_batch())},this.batch_timeout)}async process_current_batch(){if(this.processing||this.current_batch.length===0||this.shutting_down)return;this.batch_timeout_handle&&(clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=null),this.processing=!0;const t=[...this.current_batch];this.current_batch=[],this.stats.current_batch_size=0;const a=Date.now(),s=Math.min(...t.map(i=>i.enqueued_at)),e=a-s;this.stats.total_batch_wait_time_ms+=e,this.stats.batches_processed++,this.log.debug("Processing batch",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e});try{const i=await this.execute_batch_transaction(t),h=Date.now()-a;this.stats.total_batch_processing_time_ms+=h,this.stats.completed_operations+=t.length,t.forEach((_,r)=>{_.resolve(i[r])}),this.log.debug("Batch completed successfully",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e,batch_processing_time_ms:h})}catch(i){const h=Date.now()-a;this.stats.total_batch_processing_time_ms+=h,this.stats.failed_operations+=t.length,t.forEach(_=>{_.reject(i)}),this.log.error("Batch processing failed",{lane_id:this.lane_id,batch_size:t.length,batch_wait_time_ms:e,batch_processing_time_ms:h,error:i.message})}this.processing=!1,this.current_batch.length>0&&(this.current_batch.length>=this.batch_size?setImmediate(()=>this.process_current_batch()):this.start_batch_timeout())}async execute_batch_transaction(t){const a=[];for(const s of t)try{const e=await this.execute_with_retry(s.operation_fn,s.context);a.push(e)}catch(e){throw e}return a}async execute_with_retry(t,a,s=3){let e=null;for(let i=1;i<=s;i++)try{return await t()}catch(h){if(e=h,this.is_retryable_error(h)&&i<s){const _=this.calculate_backoff_delay(i);this.log.warn("Operation failed, retrying",{lane_id:this.lane_id,attempt:i,max_retries:s,delay_ms:_,error:h.message,context:a}),await this.sleep(_);continue}break}throw e}is_retryable_error(t){return["MDB_MAP_FULL","MDB_TXN_FULL","MDB_READERS_FULL","EAGAIN","EBUSY"].some(s=>t.message.includes(s)||t.code===s)}calculate_backoff_delay(t){const e=100*Math.pow(2,t-1),i=Math.random()*.1*e;return Math.min(e+i,5e3)}sleep(t){return new Promise(a=>setTimeout(a,t))}generate_operation_id(){return`lane_${this.lane_id}_${Date.now()}_${Math.random().toString(36).substr(2,9)}`}get_stats(){const t=this.stats.batches_processed>0?Math.round(this.stats.total_batch_wait_time_ms/this.stats.batches_processed):0,a=this.stats.batches_processed>0?Math.round(this.stats.total_batch_processing_time_ms/this.stats.batches_processed):0,s=this.stats.batches_processed>0?Math.round(this.stats.completed_operations/this.stats.batches_processed):0;return{lane_id:this.lane_id,...this.stats,avg_batch_wait_time_ms:t,avg_batch_processing_time_ms:a,avg_batch_size:s,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,batches_processed:0,current_batch_size:this.current_batch.length,max_batch_size:0,total_batch_wait_time_ms:0,total_batch_processing_time_ms:0}}async flush_batch(){this.current_batch.length>0&&!this.processing&&await this.process_current_batch()}async shutdown(){for(this.log.info("Shutting down processing lane",{lane_id:this.lane_id,pending_operations:this.current_batch.length,currently_processing:this.processing}),this.shutting_down=!0,this.batch_timeout_handle&&(clearTimeout(this.batch_timeout_handle),this.batch_timeout_handle=null),this.current_batch.length>0&&!this.processing&&await this.process_current_batch();this.processing;)await new Promise(t=>setTimeout(t,10));this.current_batch.forEach(t=>{t.reject(new Error("Processing lane shutting down"))}),this.current_batch=[],this.processing=!1}}var b=o;export{b as default};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import
|
|
1
|
+
import c from"./logger.js";import{get_batched_write_queue as l,shutdown_batched_write_queue as p}from"./batched_write_queue.js";const{create_context_logger:h}=c("write_queue");class d{constructor(){this.queue=[],this.processing=!1,this.shutting_down=!1,this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:0,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0},this.log=h()}async enqueue_write_operation(t,e={}){if(this.shutting_down)throw new Error("Server shutting down");return new Promise((i,a)=>{if(this.shutting_down){a(new Error("Server shutting down"));return}const s={operation_fn:t,context:e,resolve:i,reject:a,enqueued_at:Date.now(),id:this.generate_operation_id()};this.queue.push(s),this.stats.total_operations++,this.stats.current_queue_depth=this.queue.length,this.stats.current_queue_depth>this.stats.max_queue_depth&&(this.stats.max_queue_depth=this.stats.current_queue_depth),this.log.debug("Write operation enqueued",{operation_id:s.id,queue_depth:this.stats.current_queue_depth,context:e}),this.process_queue()})}async process_queue(){if(!(this.processing||this.queue.length===0||this.shutting_down)){for(this.processing=!0;this.queue.length>0&&!this.shutting_down;){const t=this.queue.shift();this.stats.current_queue_depth=this.queue.length;const e=Date.now()-t.enqueued_at;this.stats.total_wait_time_ms+=e;const i=Date.now();try{this.log.debug("Processing write operation",{operation_id:t.id,wait_time_ms:e,context:t.context});const a=await this.execute_with_retry(t.operation_fn,t.context),s=Date.now()-i;this.stats.total_processing_time_ms+=s,this.stats.completed_operations++,this.log.debug("Write operation completed",{operation_id:t.id,wait_time_ms:e,processing_time_ms:s,context:t.context}),t.resolve(a)}catch(a){const s=Date.now()-i;this.stats.total_processing_time_ms+=s,this.stats.failed_operations++,this.log.error("Write operation failed",{operation_id:t.id,wait_time_ms:e,processing_time_ms:s,error:a.message,context:t.context}),t.reject(a)}}this.processing=!1}}async execute_with_retry(t,e,i=3){let a=null;for(let s=1;s<=i;s++)try{return await t()}catch(_){if(a=_,this.is_retryable_error(_)&&s<i){const u=this.calculate_backoff_delay(s);this.log.warn("Write operation failed, retrying",{attempt:s,max_retries:i,delay_ms:u,error:_.message,context:e}),await this.sleep(u);continue}break}throw a}is_retryable_error(t){return["MDB_MAP_FULL","MDB_TXN_FULL","MDB_READERS_FULL","EAGAIN","EBUSY"].some(i=>t.message.includes(i)||t.code===i)}calculate_backoff_delay(t){const a=100*Math.pow(2,t-1),s=Math.random()*.1*a;return Math.min(a+s,5e3)}sleep(t){return new Promise(e=>setTimeout(e,t))}generate_operation_id(){return`${Date.now()}-${Math.random().toString(36).substr(2,9)}`}get_stats(){const t=this.stats.completed_operations>0?Math.round(this.stats.total_wait_time_ms/this.stats.completed_operations):0,e=this.stats.completed_operations>0?Math.round(this.stats.total_processing_time_ms/this.stats.completed_operations):0;return{...this.stats,avg_wait_time_ms:t,avg_processing_time_ms:e,success_rate:this.stats.total_operations>0?Math.round(this.stats.completed_operations/this.stats.total_operations*100):100}}clear_stats(){this.stats={total_operations:0,completed_operations:0,failed_operations:0,current_queue_depth:this.queue.length,max_queue_depth:0,total_wait_time_ms:0,total_processing_time_ms:0}}async shutdown(){for(this.log.info("Shutting down write queue",{pending_operations:this.queue.length,currently_processing:this.processing}),this.shutting_down=!0;this.processing;)await this.sleep(50);this.queue.forEach(t=>{t.reject(new Error("Server shutting down"))}),this.queue=[],this.processing=!1}}let o=null,n=!0;const q=r=>{if(!o)if(n){const t=l(r);o=new m(t)}else o=new d;return o},y=async()=>{o&&(await o.shutdown(),o=null),n&&await p()},f=r=>{n=r};class m{constructor(t){this.batched_queue=t,this.log=h("write_queue_wrapper")}async enqueue_write_operation(t,e={}){return this.batched_queue.enqueue_write_operation(t,e)}get_stats(){const t=this.batched_queue.get_stats();return{total_operations:t.total_operations,completed_operations:t.completed_operations,failed_operations:t.failed_operations,current_queue_depth:t.current_queue_depth,max_queue_depth:t.max_queue_depth,avg_wait_time_ms:t.avg_wait_time_ms,avg_processing_time_ms:t.avg_processing_time_ms,success_rate:t.success_rate}}clear_stats(){this.batched_queue.clear_stats()}async shutdown(){await this.batched_queue.shutdown()}is_retryable_error(t){return["MDB_MAP_FULL","MDB_TXN_FULL","MDB_READERS_FULL","EAGAIN","EBUSY"].some(i=>t.message.includes(i)||t.code===i)}calculate_backoff_delay(t){const a=100*Math.pow(2,t-1),s=Math.random()*.1*a;return Math.min(a+s,5e3)}generate_operation_id(){return`${Date.now()}-${Math.random().toString(36).substr(2,9)}`}}export{q as get_write_queue,f as set_batched_queue_enabled,y as shutdown_write_queue};
|
package/package.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@joystick.js/db-canary",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "0.0.0-canary.
|
|
5
|
-
"canary_version": "0.0.0-canary.
|
|
4
|
+
"version": "0.0.0-canary.2269",
|
|
5
|
+
"canary_version": "0.0.0-canary.2268",
|
|
6
6
|
"description": "JoystickDB - A minimalist database server for the Joystick framework",
|
|
7
7
|
"main": "./dist/server/index.js",
|
|
8
8
|
"scripts": {
|
|
@@ -22,6 +22,7 @@ import delete_many from '../lib/operations/delete_many.js';
|
|
|
22
22
|
import bulk_write from '../lib/operations/bulk_write.js';
|
|
23
23
|
import find_one from '../lib/operations/find_one.js';
|
|
24
24
|
import find from '../lib/operations/find.js';
|
|
25
|
+
import count_documents from '../lib/operations/count_documents.js';
|
|
25
26
|
import create_index_operation from '../lib/operations/create_index.js';
|
|
26
27
|
import drop_index_operation from '../lib/operations/drop_index.js';
|
|
27
28
|
import get_indexes_operation from '../lib/operations/get_indexes.js';
|
|
@@ -528,6 +529,10 @@ class ClusterMaster extends EventEmitter {
|
|
|
528
529
|
result = await find(database_name, data.collection, data.filter, data.options);
|
|
529
530
|
break;
|
|
530
531
|
|
|
532
|
+
case 'count_documents':
|
|
533
|
+
result = await count_documents(database_name, data.collection, data.filter, data.options);
|
|
534
|
+
break;
|
|
535
|
+
|
|
531
536
|
case 'create_index':
|
|
532
537
|
result = await create_index_operation(database_name, data.collection, data.field, data.options);
|
|
533
538
|
break;
|
|
@@ -548,7 +553,7 @@ class ClusterMaster extends EventEmitter {
|
|
|
548
553
|
this.log.log_operation(op_type, duration_ms, { result });
|
|
549
554
|
|
|
550
555
|
// NOTE: Trigger map size check after write operations (not read operations).
|
|
551
|
-
const read_operations = ['find_one', 'find', 'get_indexes'];
|
|
556
|
+
const read_operations = ['find_one', 'find', 'count_documents', 'get_indexes'];
|
|
552
557
|
if (!read_operations.includes(op_type)) {
|
|
553
558
|
setImmediate(() => check_and_grow_map_size());
|
|
554
559
|
}
|
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Batched write queue system with parallel processing lanes.
|
|
3
|
+
* Provides 3-4x performance improvement by batching operations and processing
|
|
4
|
+
* them in parallel lanes while maintaining backward compatibility.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import ProcessingLane from './processing_lane.js';
|
|
8
|
+
import create_logger from './logger.js';
|
|
9
|
+
|
|
10
|
+
const { create_context_logger } = create_logger('batched_write_queue');
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Batched write queue that distributes operations across parallel processing lanes.
|
|
14
|
+
* Maintains backward compatibility with existing WriteQueue API while providing
|
|
15
|
+
* significant performance improvements through batching and parallelization.
|
|
16
|
+
*/
|
|
17
|
+
class BatchedWriteQueue {
|
|
18
|
+
/**
|
|
19
|
+
* Creates a new BatchedWriteQueue instance.
|
|
20
|
+
* @param {Object} options - Configuration options
|
|
21
|
+
* @param {number} [options.batch_size=100] - Operations per batch
|
|
22
|
+
* @param {number} [options.batch_timeout=10] - Max wait time in milliseconds
|
|
23
|
+
* @param {number} [options.lane_count=4] - Number of parallel processing lanes
|
|
24
|
+
* @param {number} [options.queue_limit=10000] - Max queued operations
|
|
25
|
+
* @param {string} [options.overflow_strategy='block'] - 'block' | 'drop' | 'expand'
|
|
26
|
+
*/
|
|
27
|
+
constructor(options = {}) {
|
|
28
|
+
this.batch_size = options.batch_size || 100;
|
|
29
|
+
this.batch_timeout = options.batch_timeout || 10;
|
|
30
|
+
this.lane_count = options.lane_count || 4;
|
|
31
|
+
this.queue_limit = options.queue_limit || 10000;
|
|
32
|
+
this.overflow_strategy = options.overflow_strategy || 'block';
|
|
33
|
+
|
|
34
|
+
/** @type {Array<ProcessingLane>} Array of processing lanes */
|
|
35
|
+
this.lanes = Array(this.lane_count).fill(null).map((_, index) =>
|
|
36
|
+
new ProcessingLane({
|
|
37
|
+
batch_size: this.batch_size,
|
|
38
|
+
batch_timeout: this.batch_timeout,
|
|
39
|
+
lane_id: index
|
|
40
|
+
})
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
/** @type {boolean} Whether queue is shutting down */
|
|
44
|
+
this.shutting_down = false;
|
|
45
|
+
|
|
46
|
+
/** @type {Object} Overall queue statistics */
|
|
47
|
+
this.stats = {
|
|
48
|
+
total_operations: 0,
|
|
49
|
+
completed_operations: 0,
|
|
50
|
+
failed_operations: 0,
|
|
51
|
+
current_queue_depth: 0,
|
|
52
|
+
max_queue_depth: 0,
|
|
53
|
+
total_wait_time_ms: 0,
|
|
54
|
+
total_processing_time_ms: 0,
|
|
55
|
+
lane_distribution: new Array(this.lane_count).fill(0)
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
this.log = create_context_logger();
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Enqueues a write operation for batched processing.
|
|
63
|
+
* Maintains backward compatibility with existing WriteQueue API.
|
|
64
|
+
* @param {function} operation_fn - Async function that performs the write operation
|
|
65
|
+
* @param {Object} [context={}] - Additional context for logging and debugging
|
|
66
|
+
* @returns {Promise<*>} Promise that resolves with the operation result
|
|
67
|
+
* @throws {Error} When server is shutting down or queue is full
|
|
68
|
+
*/
|
|
69
|
+
async enqueue_write_operation(operation_fn, context = {}) {
|
|
70
|
+
if (this.shutting_down) {
|
|
71
|
+
throw new Error('Server shutting down');
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Check queue limits
|
|
75
|
+
const current_depth = this.get_current_queue_depth();
|
|
76
|
+
if (current_depth >= this.queue_limit) {
|
|
77
|
+
if (this.overflow_strategy === 'drop') {
|
|
78
|
+
throw new Error('Queue full, operation dropped');
|
|
79
|
+
} else if (this.overflow_strategy === 'block') {
|
|
80
|
+
// Wait for queue to have space (simple backpressure)
|
|
81
|
+
await this.wait_for_queue_space();
|
|
82
|
+
}
|
|
83
|
+
// 'expand' strategy allows unlimited growth
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const operation = {
|
|
87
|
+
operation_fn,
|
|
88
|
+
context,
|
|
89
|
+
enqueued_at: Date.now()
|
|
90
|
+
};
|
|
91
|
+
|
|
92
|
+
// Select lane for this operation
|
|
93
|
+
const lane_index = this.get_lane_for_operation(operation);
|
|
94
|
+
const selected_lane = this.lanes[lane_index];
|
|
95
|
+
|
|
96
|
+
// Update statistics
|
|
97
|
+
this.stats.total_operations++;
|
|
98
|
+
this.stats.lane_distribution[lane_index]++;
|
|
99
|
+
this.update_queue_depth_stats();
|
|
100
|
+
|
|
101
|
+
this.log.debug('Operation enqueued to lane', {
|
|
102
|
+
lane_id: lane_index,
|
|
103
|
+
total_operations: this.stats.total_operations,
|
|
104
|
+
context: context
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
try {
|
|
108
|
+
const result = await selected_lane.add_operation(operation);
|
|
109
|
+
|
|
110
|
+
// Update completion statistics
|
|
111
|
+
this.stats.completed_operations++;
|
|
112
|
+
const wait_time_ms = Date.now() - operation.enqueued_at;
|
|
113
|
+
this.stats.total_wait_time_ms += wait_time_ms;
|
|
114
|
+
|
|
115
|
+
return result;
|
|
116
|
+
} catch (error) {
|
|
117
|
+
this.stats.failed_operations++;
|
|
118
|
+
throw error;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Determines which lane should process the given operation.
|
|
124
|
+
* Uses consistent hashing based on operation context to ensure
|
|
125
|
+
* operations for the same collection/document go to the same lane.
|
|
126
|
+
* @param {Object} operation - Operation to assign to a lane
|
|
127
|
+
* @returns {number} Lane index (0 to lane_count-1)
|
|
128
|
+
*/
|
|
129
|
+
get_lane_for_operation(operation) {
|
|
130
|
+
// Extract collection and document identifiers for consistent hashing
|
|
131
|
+
const context = operation.context || {};
|
|
132
|
+
const collection = context.collection || '';
|
|
133
|
+
const document_id = context.document_id || context.id || '';
|
|
134
|
+
|
|
135
|
+
// Create hash key for consistent distribution
|
|
136
|
+
const hash_key = `${collection}:${document_id}`;
|
|
137
|
+
|
|
138
|
+
// Simple hash function for consistent distribution
|
|
139
|
+
let hash = 0;
|
|
140
|
+
for (let i = 0; i < hash_key.length; i++) {
|
|
141
|
+
const char = hash_key.charCodeAt(i);
|
|
142
|
+
hash = ((hash << 5) - hash) + char;
|
|
143
|
+
hash = hash & hash; // Convert to 32-bit integer
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// Ensure positive value and map to lane index
|
|
147
|
+
const lane_index = Math.abs(hash) % this.lane_count;
|
|
148
|
+
|
|
149
|
+
return lane_index;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Gets the current total queue depth across all lanes.
|
|
154
|
+
* @returns {number} Total number of queued operations
|
|
155
|
+
*/
|
|
156
|
+
get_current_queue_depth() {
|
|
157
|
+
return this.lanes.reduce((total, lane) => {
|
|
158
|
+
return total + lane.stats.current_batch_size;
|
|
159
|
+
}, 0);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Updates queue depth statistics.
|
|
164
|
+
*/
|
|
165
|
+
update_queue_depth_stats() {
|
|
166
|
+
this.stats.current_queue_depth = this.get_current_queue_depth();
|
|
167
|
+
if (this.stats.current_queue_depth > this.stats.max_queue_depth) {
|
|
168
|
+
this.stats.max_queue_depth = this.stats.current_queue_depth;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Waits for queue to have available space (backpressure mechanism).
|
|
174
|
+
* @returns {Promise<void>} Promise that resolves when space is available
|
|
175
|
+
*/
|
|
176
|
+
async wait_for_queue_space() {
|
|
177
|
+
const check_interval = 10; // ms
|
|
178
|
+
const max_wait_time = 5000; // 5 seconds max wait
|
|
179
|
+
const start_time = Date.now();
|
|
180
|
+
|
|
181
|
+
while (this.get_current_queue_depth() >= this.queue_limit) {
|
|
182
|
+
if (Date.now() - start_time > max_wait_time) {
|
|
183
|
+
throw new Error('Queue full, timeout waiting for space');
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
await new Promise(resolve => setTimeout(resolve, check_interval));
|
|
187
|
+
|
|
188
|
+
if (this.shutting_down) {
|
|
189
|
+
throw new Error('Server shutting down');
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Forces processing of all current batches across all lanes.
|
|
196
|
+
* Useful for ensuring all operations are processed before shutdown.
|
|
197
|
+
* @returns {Promise<void>} Promise that resolves when all batches are flushed
|
|
198
|
+
*/
|
|
199
|
+
async flush_all_batches() {
|
|
200
|
+
const flush_promises = this.lanes.map(lane => lane.flush_batch());
|
|
201
|
+
await Promise.all(flush_promises);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Gets comprehensive queue statistics including per-lane metrics.
|
|
206
|
+
* Maintains backward compatibility with existing WriteQueue stats format.
|
|
207
|
+
* @returns {Object} Statistics object with performance metrics
|
|
208
|
+
*/
|
|
209
|
+
get_stats() {
|
|
210
|
+
// Aggregate lane statistics
|
|
211
|
+
const lane_stats = this.lanes.map(lane => lane.get_stats());
|
|
212
|
+
|
|
213
|
+
// Calculate overall averages
|
|
214
|
+
const avg_wait_time = this.stats.completed_operations > 0
|
|
215
|
+
? Math.round(this.stats.total_wait_time_ms / this.stats.completed_operations)
|
|
216
|
+
: 0;
|
|
217
|
+
|
|
218
|
+
const total_processing_time = lane_stats.reduce((sum, stats) =>
|
|
219
|
+
sum + stats.total_batch_processing_time_ms, 0);
|
|
220
|
+
|
|
221
|
+
const avg_processing_time = this.stats.completed_operations > 0
|
|
222
|
+
? Math.round(total_processing_time / this.stats.completed_operations)
|
|
223
|
+
: 0;
|
|
224
|
+
|
|
225
|
+
// Calculate lane utilization
|
|
226
|
+
const lane_utilization = this.stats.lane_distribution.map((count, index) => ({
|
|
227
|
+
lane_id: index,
|
|
228
|
+
operations: count,
|
|
229
|
+
percentage: this.stats.total_operations > 0
|
|
230
|
+
? Math.round((count / this.stats.total_operations) * 100)
|
|
231
|
+
: 0
|
|
232
|
+
}));
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
// Backward compatible stats
|
|
236
|
+
total_operations: this.stats.total_operations,
|
|
237
|
+
completed_operations: this.stats.completed_operations,
|
|
238
|
+
failed_operations: this.stats.failed_operations,
|
|
239
|
+
current_queue_depth: this.get_current_queue_depth(),
|
|
240
|
+
max_queue_depth: this.stats.max_queue_depth,
|
|
241
|
+
avg_wait_time_ms: avg_wait_time,
|
|
242
|
+
avg_processing_time_ms: avg_processing_time,
|
|
243
|
+
success_rate: this.stats.total_operations > 0
|
|
244
|
+
? Math.round((this.stats.completed_operations / this.stats.total_operations) * 100)
|
|
245
|
+
: 100,
|
|
246
|
+
|
|
247
|
+
// Batched queue specific stats
|
|
248
|
+
lane_count: this.lane_count,
|
|
249
|
+
batch_size: this.batch_size,
|
|
250
|
+
batch_timeout: this.batch_timeout,
|
|
251
|
+
lane_distribution: this.stats.lane_distribution,
|
|
252
|
+
lane_utilization,
|
|
253
|
+
lane_stats,
|
|
254
|
+
|
|
255
|
+
// Performance metrics
|
|
256
|
+
total_batches_processed: lane_stats.reduce((sum, stats) => sum + stats.batches_processed, 0),
|
|
257
|
+
avg_batch_size: lane_stats.length > 0
|
|
258
|
+
? Math.round(lane_stats.reduce((sum, stats) => sum + stats.avg_batch_size, 0) / lane_stats.length)
|
|
259
|
+
: 0
|
|
260
|
+
};
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
/**
|
|
264
|
+
* Clears all statistics across the queue and all lanes.
|
|
265
|
+
*/
|
|
266
|
+
clear_stats() {
|
|
267
|
+
this.stats = {
|
|
268
|
+
total_operations: 0,
|
|
269
|
+
completed_operations: 0,
|
|
270
|
+
failed_operations: 0,
|
|
271
|
+
current_queue_depth: this.get_current_queue_depth(),
|
|
272
|
+
max_queue_depth: 0,
|
|
273
|
+
total_wait_time_ms: 0,
|
|
274
|
+
total_processing_time_ms: 0,
|
|
275
|
+
lane_distribution: new Array(this.lane_count).fill(0)
|
|
276
|
+
};
|
|
277
|
+
|
|
278
|
+
this.lanes.forEach(lane => lane.clear_stats());
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
/**
|
|
282
|
+
* Gracefully shuts down the batched write queue.
|
|
283
|
+
* Processes all remaining operations and shuts down all lanes.
|
|
284
|
+
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
285
|
+
*/
|
|
286
|
+
async shutdown() {
|
|
287
|
+
this.log.info('Shutting down batched write queue', {
|
|
288
|
+
pending_operations: this.get_current_queue_depth(),
|
|
289
|
+
lane_count: this.lane_count
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
this.shutting_down = true;
|
|
293
|
+
|
|
294
|
+
// Flush all remaining batches
|
|
295
|
+
await this.flush_all_batches();
|
|
296
|
+
|
|
297
|
+
// Shutdown all lanes
|
|
298
|
+
const shutdown_promises = this.lanes.map(lane => lane.shutdown());
|
|
299
|
+
await Promise.all(shutdown_promises);
|
|
300
|
+
|
|
301
|
+
this.log.info('Batched write queue shutdown complete');
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
/** @type {BatchedWriteQueue|null} Singleton instance of the batched write queue */
|
|
306
|
+
let batched_write_queue_instance = null;
|
|
307
|
+
|
|
308
|
+
/**
|
|
309
|
+
* Gets the singleton batched write queue instance, creating it if it doesn't exist.
|
|
310
|
+
* @param {Object} [options] - Configuration options for new instance
|
|
311
|
+
* @returns {BatchedWriteQueue} The batched write queue instance
|
|
312
|
+
*/
|
|
313
|
+
export const get_batched_write_queue = (options) => {
|
|
314
|
+
if (!batched_write_queue_instance) {
|
|
315
|
+
batched_write_queue_instance = new BatchedWriteQueue(options);
|
|
316
|
+
}
|
|
317
|
+
return batched_write_queue_instance;
|
|
318
|
+
};
|
|
319
|
+
|
|
320
|
+
/**
|
|
321
|
+
* Shuts down the batched write queue and clears the singleton instance.
|
|
322
|
+
* @returns {Promise<void>} Promise that resolves when shutdown is complete
|
|
323
|
+
*/
|
|
324
|
+
export const shutdown_batched_write_queue = async () => {
|
|
325
|
+
if (batched_write_queue_instance) {
|
|
326
|
+
await batched_write_queue_instance.shutdown();
|
|
327
|
+
batched_write_queue_instance = null;
|
|
328
|
+
}
|
|
329
|
+
};
|
|
330
|
+
|
|
331
|
+
export default BatchedWriteQueue;
|