@joystick.js/db-canary 0.0.0-canary.2218 → 0.0.0-canary.2220

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import a from"cluster";import d from"os";import{EventEmitter as l}from"events";import{writeFileSync as u}from"fs";import{load_settings as _,get_port_configuration as w}from"../lib/load_settings.js";import{restore_backup as p,start_backup_schedule as g,stop_backup_schedule as f}from"../lib/backup_manager.js";import m from"../lib/logger.js";import{initialize_database as k,check_and_grow_map_size as y,cleanup_database as b}from"../lib/query_engine.js";import{setup_authentication as v,verify_password as q,initialize_auth_manager as D}from"../lib/auth_manager.js";import x from"../lib/operations/insert_one.js";import E from"../lib/operations/update_one.js";import T from"../lib/operations/delete_one.js";import S from"../lib/operations/bulk_write.js";import W from"../lib/operations/find_one.js";import F from"../lib/operations/find.js";import N from"../lib/operations/create_index.js";import P from"../lib/operations/drop_index.js";import O from"../lib/operations/get_indexes.js";import{start_http_server as R,stop_http_server as z,is_setup_required as A}from"../lib/http_server.js";class I extends l{constructor(t={}){super(),this.workers=new Map,this.write_queue=[],this.processing_writes=!1,this.authenticated_sessions=new Map,this.worker_count=t.worker_count||d.cpus().length,this.port=t.port||1983,this.settings_file=t.settings_file||"settings.db.json",this.settings=null,this.pending_writes=new Map,this.write_id_counter=0,this.shutting_down=!1,this.master_id=`master_${Date.now()}_${Math.random()}`;const{create_context_logger:e}=m("master");this.log=e({port:this.port,worker_count:this.worker_count,master_id:this.master_id}),this.setup_master()}setup_master(){a.setupPrimary({exec:new URL("./index.js",import.meta.url).pathname,args:[],silent:!1}),a.on("exit",(t,e,r)=>{this.log.warn("Worker died",{worker_pid:t.process.pid,exit_code:e,signal:r}),this.handle_worker_death(t)}),a.on("message",(t,e)=>{this.handle_worker_message(t,e)})}async start(){const t=Date.now();try{if(this.settings=_(this.settings_file),this.log.info("Settings loaded successfully",{settings_file:this.settings_file}),k(),D(),this.log.info("Database and auth manager initialized"),this.settings?.restore_from)try{this.log.info("Startup restore requested",{backup_filename:this.settings.restore_from});const r=await p(this.settings.restore_from);this.log.info("Startup restore completed",{backup_filename:this.settings.restore_from,duration_ms:r.duration_ms});const s={...this.settings};delete s.restore_from,u(this.settings_file,JSON.stringify(s,null,2)),this.settings=_(this.settings_file),this.log.info("Removed restore_from from settings after successful restore")}catch(r){this.log.error("Startup restore failed",{backup_filename:this.settings.restore_from,error:r.message}),this.log.info("Continuing with existing database after restore failure")}if(this.settings?.s3)try{g(),this.log.info("Backup scheduling started")}catch(r){this.log.warn("Failed to start backup scheduling",{error:r.message})}if(A())try{const{http_port:r}=w();await R(r)&&this.log.info("HTTP setup server started",{http_port:r})}catch(r){this.log.warn("Failed to start HTTP setup server",{error:r.message})}for(let r=0;r<this.worker_count;r++)this.spawn_worker();const e=Date.now()-t;this.log.info("Master process started successfully",{workers_spawned:this.worker_count,startup_duration_ms:e})}catch(e){this.log.error("Failed to start master process",{error:e.message}),process.exit(1)}}spawn_worker(){const t=Date.now();this.log.info("Spawning worker");const e=a.fork({WORKER_PORT:this.port,WORKER_SETTINGS:JSON.stringify(this.settings)});this.workers.set(e.id,{worker:e,connections:0,last_heartbeat:Date.now(),status:"starting"});const r=Date.now()-t;return this.log.info("Worker spawned successfully",{worker_id:e.id,worker_pid:e.process.pid,spawn_duration_ms:r}),e}handle_worker_death(t){this.workers.delete(t.id),this.shutting_down||(this.log.info("Respawning worker after death",{dead_worker_id:t.id,respawn_delay_ms:1e3}),setTimeout(()=>{this.spawn_worker()},1e3))}handle_worker_message(t,e){switch(e.type){case"worker_ready":this.handle_worker_ready_for_config(t,e);break;case"server_ready":this.handle_worker_server_ready(t,e);break;case"write_request":this.handle_write_request(t,e);break;case"auth_request":this.handle_auth_request(t,e);break;case"setup_request":this.handle_setup_request(t,e);break;case"connection_count":this.update_worker_connections(t,e);break;case"heartbeat":this.handle_worker_heartbeat(t,e);break;default:this.log.warn("Unknown message type received",{message_type:e.type,worker_id:t.id})}}handle_worker_ready_for_config(t,e){this.log.info("Worker ready for config, sending configuration",{worker_id:t.id,worker_pid:t.process.pid,master_id:this.master_id}),t.send({type:"config",data:{port:this.port,settings:this.settings,master_id:this.master_id}})}handle_worker_server_ready(t,e){const r=this.workers.get(t.id);r&&(r.status="ready",this.log.info("Worker server ready",{worker_id:t.id,worker_pid:t.process.pid}))}async handle_write_request(t,e){if(this.shutting_down){t.send({type:"write_response",data:{write_id:e.data.write_id,success:!1,error:"Server is shutting down"}});return}const{write_id:r,op_type:s,data:o,socket_id:i}=e.data;try{const n={write_id:r,worker_id:t.id,op_type:s,data:o,socket_id:i,timestamp:Date.now()};this.write_queue.push(n),this.process_write_queue()}catch(n){t.send({type:"write_response",data:{write_id:r,success:!1,error:n.message}})}}async process_write_queue(){if(!(this.processing_writes||this.write_queue.length===0)){for(this.processing_writes=!0;this.write_queue.length>0;){const t=this.write_queue.shift();await this.execute_write_operation(t)}this.processing_writes=!1,this.shutting_down&&this.write_queue.length===0&&this.emit("writes_completed")}}async execute_write_operation(t){const{write_id:e,worker_id:r,op_type:s,data:o,socket_id:i}=t,n=this.workers.get(r);if(!n){this.log.error("Worker not found for write operation",{worker_id:r});return}try{const c=await this.perform_database_operation(s,o);n.worker.send({type:"write_response",data:{write_id:e,success:!0,result:c}}),this.broadcast_write_notification(s,o,r)}catch(c){this.log.error("Write operation failed",{write_id:e,op_type:s,worker_id:r,error_message:c.message}),n.worker.send({type:"write_response",data:{write_id:e,success:!1,error:c.message}})}}async perform_database_operation(t,e){const r=Date.now();this.log.info("Executing database operation",{op_type:t});try{let s;const o=e.database||"default";switch(t){case"insert_one":s=await x(o,e.collection,e.document,e.options);break;case"update_one":s=await E(o,e.collection,e.filter,e.update,e.options);break;case"delete_one":s=await T(o,e.collection,e.filter,e.options);break;case"bulk_write":s=await S(o,e.collection,e.operations,e.options);break;case"find_one":s=await W(o,e.collection,e.filter,e.options);break;case"find":s=await F(o,e.collection,e.filter,e.options);break;case"create_index":s=await N(o,e.collection,e.field,e.options);break;case"drop_index":s=await P(o,e.collection,e.field);break;case"get_indexes":s=await O(o,e.collection);break;default:throw new Error(`Unsupported database operation: ${t}`)}const i=Date.now()-r;return this.log.log_operation(t,i,{result:s}),["find_one","find","get_indexes"].includes(t)||setImmediate(()=>y()),s}catch(s){const o=Date.now()-r;throw this.log.error("Database operation failed",{op_type:t,duration_ms:o,error_message:s.message}),s}}broadcast_write_notification(t,e,r){const s={type:"write_notification",data:{op_type:t,data:e,timestamp:Date.now()}};for(const[o,i]of this.workers)o!==r&&i.status==="ready"&&i.worker.send(s)}async handle_auth_request(t,e){const{auth_id:r,socket_id:s,password:o}=e.data;try{const i=await q(o,"cluster_client");i&&this.authenticated_sessions.set(s,{authenticated_at:Date.now(),worker_id:t.id}),t.send({type:"auth_response",data:{auth_id:r,success:i,message:i?"Authentication successful":"Authentication failed"}})}catch(i){t.send({type:"auth_response",data:{auth_id:r,success:!1,message:`Authentication error: ${i.message}`}})}}handle_setup_request(t,e){const{setup_id:r,socket_id:s}=e.data;try{const o=v(),i=`===
1
+ import a from"cluster";import l from"os";import{EventEmitter as u}from"events";import{writeFileSync as p}from"fs";import{load_settings as _,get_port_configuration as h}from"../lib/load_settings.js";import{restore_backup as w,start_backup_schedule as g,stop_backup_schedule as f}from"../lib/backup_manager.js";import m from"../lib/logger.js";import{initialize_database as k,check_and_grow_map_size as y,cleanup_database as b}from"../lib/query_engine.js";import{setup_authentication as v,verify_password as q,initialize_auth_manager as D}from"../lib/auth_manager.js";import x from"../lib/operations/insert_one.js";import E from"../lib/operations/update_one.js";import T from"../lib/operations/delete_one.js";import S from"../lib/operations/bulk_write.js";import W from"../lib/operations/find_one.js";import F from"../lib/operations/find.js";import N from"../lib/operations/create_index.js";import P from"../lib/operations/drop_index.js";import z from"../lib/operations/get_indexes.js";import{start_http_server as O,stop_http_server as R,is_setup_required as A}from"../lib/http_server.js";import{is_development_mode as I,display_development_startup_message as j}from"../lib/development_mode.js";import{initialize_api_key_manager as L}from"../lib/api_key_manager.js";class M extends u{constructor(t={}){super(),this.workers=new Map,this.write_queue=[],this.processing_writes=!1,this.authenticated_sessions=new Map,this.worker_count=t.worker_count||l.cpus().length,this.port=t.port||1983,this.settings_file=t.settings_file||"settings.db.json",this.settings=null,this.pending_writes=new Map,this.write_id_counter=0,this.shutting_down=!1,this.master_id=`master_${Date.now()}_${Math.random()}`;const{create_context_logger:e}=m("master");this.log=e({port:this.port,worker_count:this.worker_count,master_id:this.master_id}),this.setup_master()}setup_master(){a.setupPrimary({exec:new URL("./index.js",import.meta.url).pathname,args:[],silent:!1}),a.on("exit",(t,e,r)=>{this.log.warn("Worker died",{worker_pid:t.process.pid,exit_code:e,signal:r}),this.handle_worker_death(t)}),a.on("message",(t,e)=>{this.handle_worker_message(t,e)})}async start(){const t=Date.now();try{if(this.settings=_(this.settings_file),this.log.info("Settings loaded successfully",{settings_file:this.settings_file}),k(),D(),await L(),this.log.info("Database and auth manager initialized"),this.settings?.restore_from)try{this.log.info("Startup restore requested",{backup_filename:this.settings.restore_from});const r=await w(this.settings.restore_from);this.log.info("Startup restore completed",{backup_filename:this.settings.restore_from,duration_ms:r.duration_ms});const s={...this.settings};delete s.restore_from,p(this.settings_file,JSON.stringify(s,null,2)),this.settings=_(this.settings_file),this.log.info("Removed restore_from from settings after successful restore")}catch(r){this.log.error("Startup restore failed",{backup_filename:this.settings.restore_from,error:r.message}),this.log.info("Continuing with existing database after restore failure")}if(this.settings?.s3)try{g(),this.log.info("Backup scheduling started")}catch(r){this.log.warn("Failed to start backup scheduling",{error:r.message})}if(A())try{const{http_port:r}=h();await O(r)&&this.log.info("HTTP setup server started",{http_port:r})}catch(r){this.log.warn("Failed to start HTTP setup server",{error:r.message})}for(let r=0;r<this.worker_count;r++)this.spawn_worker();if(I()){const{tcp_port:r,http_port:s}=h();j(r,s)}const e=Date.now()-t;this.log.info("Master process started successfully",{workers_spawned:this.worker_count,startup_duration_ms:e})}catch(e){this.log.error("Failed to start master process",{error:e.message}),process.exit(1)}}spawn_worker(){const t=Date.now();this.log.info("Spawning worker");const e=a.fork({WORKER_PORT:this.port,WORKER_SETTINGS:JSON.stringify(this.settings)});this.workers.set(e.id,{worker:e,connections:0,last_heartbeat:Date.now(),status:"starting"});const r=Date.now()-t;return this.log.info("Worker spawned successfully",{worker_id:e.id,worker_pid:e.process.pid,spawn_duration_ms:r}),e}handle_worker_death(t){this.workers.delete(t.id),this.shutting_down||(this.log.info("Respawning worker after death",{dead_worker_id:t.id,respawn_delay_ms:1e3}),setTimeout(()=>{this.spawn_worker()},1e3))}handle_worker_message(t,e){switch(e.type){case"worker_ready":this.handle_worker_ready_for_config(t,e);break;case"server_ready":this.handle_worker_server_ready(t,e);break;case"write_request":this.handle_write_request(t,e);break;case"auth_request":this.handle_auth_request(t,e);break;case"setup_request":this.handle_setup_request(t,e);break;case"connection_count":this.update_worker_connections(t,e);break;case"heartbeat":this.handle_worker_heartbeat(t,e);break;default:this.log.warn("Unknown message type received",{message_type:e.type,worker_id:t.id})}}handle_worker_ready_for_config(t,e){this.log.info("Worker ready for config, sending configuration",{worker_id:t.id,worker_pid:t.process.pid,master_id:this.master_id}),t.send({type:"config",data:{port:this.port,settings:this.settings,master_id:this.master_id}})}handle_worker_server_ready(t,e){const r=this.workers.get(t.id);r&&(r.status="ready",this.log.info("Worker server ready",{worker_id:t.id,worker_pid:t.process.pid}))}async handle_write_request(t,e){if(this.shutting_down){t.send({type:"write_response",data:{write_id:e.data.write_id,success:!1,error:"Server is shutting down"}});return}const{write_id:r,op_type:s,data:o,socket_id:i}=e.data;try{const n={write_id:r,worker_id:t.id,op_type:s,data:o,socket_id:i,timestamp:Date.now()};this.write_queue.push(n),this.process_write_queue()}catch(n){t.send({type:"write_response",data:{write_id:r,success:!1,error:n.message}})}}async process_write_queue(){if(!(this.processing_writes||this.write_queue.length===0)){for(this.processing_writes=!0;this.write_queue.length>0;){const t=this.write_queue.shift();await this.execute_write_operation(t)}this.processing_writes=!1,this.shutting_down&&this.write_queue.length===0&&this.emit("writes_completed")}}async execute_write_operation(t){const{write_id:e,worker_id:r,op_type:s,data:o,socket_id:i}=t,n=this.workers.get(r);if(!n){this.log.error("Worker not found for write operation",{worker_id:r});return}try{const c=await this.perform_database_operation(s,o);n.worker.send({type:"write_response",data:{write_id:e,success:!0,result:c}}),this.broadcast_write_notification(s,o,r)}catch(c){this.log.error("Write operation failed",{write_id:e,op_type:s,worker_id:r,error_message:c.message}),n.worker.send({type:"write_response",data:{write_id:e,success:!1,error:c.message}})}}async perform_database_operation(t,e){const r=Date.now();this.log.info("Executing database operation",{op_type:t});try{let s;const o=e.database||"default";switch(t){case"insert_one":s=await x(o,e.collection,e.document,e.options);break;case"update_one":s=await E(o,e.collection,e.filter,e.update,e.options);break;case"delete_one":s=await T(o,e.collection,e.filter,e.options);break;case"bulk_write":s=await S(o,e.collection,e.operations,e.options);break;case"find_one":s=await W(o,e.collection,e.filter,e.options);break;case"find":s=await F(o,e.collection,e.filter,e.options);break;case"create_index":s=await N(o,e.collection,e.field,e.options);break;case"drop_index":s=await P(o,e.collection,e.field);break;case"get_indexes":s=await z(o,e.collection);break;default:throw new Error(`Unsupported database operation: ${t}`)}const i=Date.now()-r;return this.log.log_operation(t,i,{result:s}),["find_one","find","get_indexes"].includes(t)||setImmediate(()=>y()),s}catch(s){const o=Date.now()-r;throw this.log.error("Database operation failed",{op_type:t,duration_ms:o,error_message:s.message}),s}}broadcast_write_notification(t,e,r){const s={type:"write_notification",data:{op_type:t,data:e,timestamp:Date.now()}};for(const[o,i]of this.workers)o!==r&&i.status==="ready"&&i.worker.send(s)}async handle_auth_request(t,e){const{auth_id:r,socket_id:s,password:o}=e.data;try{const i=await q(o,"cluster_client");i&&this.authenticated_sessions.set(s,{authenticated_at:Date.now(),worker_id:t.id}),t.send({type:"auth_response",data:{auth_id:r,success:i,message:i?"Authentication successful":"Authentication failed"}})}catch(i){t.send({type:"auth_response",data:{auth_id:r,success:!1,message:`Authentication error: ${i.message}`}})}}handle_setup_request(t,e){const{setup_id:r,socket_id:s}=e.data;try{const o=v(),i=`===
2
2
  JoystickDB Setup
3
3
 
4
4
  Your database has been setup. Follow the instructions below carefully to avoid issues.
@@ -17,4 +17,4 @@ const client = joystickdb.client({
17
17
  });
18
18
 
19
19
  await client.ping();
20
- ===`;t.send({type:"setup_response",data:{setup_id:r,success:!0,password:o,instructions:i,message:"Authentication setup completed successfully"}})}catch(o){t.send({type:"setup_response",data:{setup_id:r,success:!1,error:o.message}})}}update_worker_connections(t,e){const r=this.workers.get(t.id);r&&(r.connections=e.data.count)}handle_worker_heartbeat(t,e){const r=this.workers.get(t.id);r&&(r.last_heartbeat=Date.now())}get_cluster_stats(){const t={master_pid:process.pid,worker_count:this.workers.size,total_connections:0,write_queue_length:this.write_queue.length,authenticated_sessions:this.authenticated_sessions.size,workers:[]};for(const[e,r]of this.workers)t.total_connections+=r.connections,t.workers.push({id:e,pid:r.worker.process.pid,connections:r.connections,status:r.status,last_heartbeat:r.last_heartbeat});return t}async shutdown(){const t=Date.now();this.log.info("Initiating graceful shutdown"),this.shutting_down=!0;try{await z(),this.log.info("HTTP server stopped")}catch(s){this.log.warn("Failed to stop HTTP server",{error:s.message})}try{f(),this.log.info("Backup scheduling stopped")}catch(s){this.log.warn("Failed to stop backup scheduling",{error:s.message})}for(const[s,o]of this.workers)try{o.worker.send({type:"shutdown"})}catch(i){this.log.warn("Error sending shutdown signal to worker",{worker_id:s,error:i.message})}this.write_queue.length>0&&(this.log.info("Waiting for pending writes to complete",{pending_writes:this.write_queue.length}),await new Promise(s=>{const o=setTimeout(()=>{this.log.warn("Timeout waiting for writes to complete, proceeding with shutdown"),s()},process.env.NODE_ENV==="test"?1e3:5e3);this.once("writes_completed",()=>{clearTimeout(o),s()})})),this.log.info("All writes completed, disconnecting workers");for(const[s,o]of this.workers)try{o.worker.disconnect()}catch(i){this.log.warn("Error disconnecting worker",{worker_id:s,error:i.message})}const e=process.env.NODE_ENV==="test"?500:3e3;await new Promise(s=>{const o=setTimeout(()=>{for(const[n,c]of this.workers){this.log.warn("Force killing worker after timeout",{worker_id:n});try{c.worker.kill("SIGKILL")}catch(h){this.log.warn("Error force killing worker",{worker_id:n,error:h.message})}}this.workers.clear(),s()},e),i=()=>{this.workers.size===0?(clearTimeout(o),s()):setTimeout(i,50)};i()});try{b(),this.log.info("Database cleanup completed")}catch(s){this.log.warn("Error during database cleanup",{error:s.message})}if(this.authenticated_sessions.clear(),this.write_queue.length=0,this.pending_writes.clear(),process.env.NODE_ENV==="test")try{for(const s in a.workers){const o=a.workers[s];if(o&&!o.isDead()){this.log.info("Force killing remaining cluster worker",{worker_id:s,worker_pid:o.process.pid});try{o.kill("SIGKILL")}catch(i){this.log.warn("Error force killing remaining worker",{worker_id:s,error:i.message})}}}for(const s in a.workers)delete a.workers[s];a.removeAllListeners(),this.log.info("Aggressive cluster cleanup completed for test environment")}catch(s){this.log.warn("Error during aggressive cluster cleanup",{error:s.message})}const r=Date.now()-t;this.log.info("Shutdown complete",{shutdown_duration_ms:r})}}var oe=I;export{oe as default};
20
+ ===`;t.send({type:"setup_response",data:{setup_id:r,success:!0,password:o,instructions:i,message:"Authentication setup completed successfully"}})}catch(o){t.send({type:"setup_response",data:{setup_id:r,success:!1,error:o.message}})}}update_worker_connections(t,e){const r=this.workers.get(t.id);r&&(r.connections=e.data.count)}handle_worker_heartbeat(t,e){const r=this.workers.get(t.id);r&&(r.last_heartbeat=Date.now())}get_cluster_stats(){const t={master_pid:process.pid,worker_count:this.workers.size,total_connections:0,write_queue_length:this.write_queue.length,authenticated_sessions:this.authenticated_sessions.size,workers:[]};for(const[e,r]of this.workers)t.total_connections+=r.connections,t.workers.push({id:e,pid:r.worker.process.pid,connections:r.connections,status:r.status,last_heartbeat:r.last_heartbeat});return t}async shutdown(){const t=Date.now();this.log.info("Initiating graceful shutdown"),this.shutting_down=!0;try{await R(),this.log.info("HTTP server stopped")}catch(s){this.log.warn("Failed to stop HTTP server",{error:s.message})}try{f(),this.log.info("Backup scheduling stopped")}catch(s){this.log.warn("Failed to stop backup scheduling",{error:s.message})}for(const[s,o]of this.workers)try{o.worker.send({type:"shutdown"})}catch(i){this.log.warn("Error sending shutdown signal to worker",{worker_id:s,error:i.message})}this.write_queue.length>0&&(this.log.info("Waiting for pending writes to complete",{pending_writes:this.write_queue.length}),await new Promise(s=>{const o=setTimeout(()=>{this.log.warn("Timeout waiting for writes to complete, proceeding with shutdown"),s()},process.env.NODE_ENV==="test"?1e3:5e3);this.once("writes_completed",()=>{clearTimeout(o),s()})})),this.log.info("All writes completed, disconnecting workers");for(const[s,o]of this.workers)try{o.worker.disconnect()}catch(i){this.log.warn("Error disconnecting worker",{worker_id:s,error:i.message})}const e=process.env.NODE_ENV==="test"?500:3e3;await new Promise(s=>{const o=setTimeout(()=>{for(const[n,c]of this.workers){this.log.warn("Force killing worker after timeout",{worker_id:n});try{c.worker.kill("SIGKILL")}catch(d){this.log.warn("Error force killing worker",{worker_id:n,error:d.message})}}this.workers.clear(),s()},e),i=()=>{this.workers.size===0?(clearTimeout(o),s()):setTimeout(i,50)};i()});try{b(),this.log.info("Database cleanup completed")}catch(s){this.log.warn("Error during database cleanup",{error:s.message})}if(this.authenticated_sessions.clear(),this.write_queue.length=0,this.pending_writes.clear(),process.env.NODE_ENV==="test")try{for(const s in a.workers){const o=a.workers[s];if(o&&!o.isDead()){this.log.info("Force killing remaining cluster worker",{worker_id:s,worker_pid:o.process.pid});try{o.kill("SIGKILL")}catch(i){this.log.warn("Error force killing remaining worker",{worker_id:s,error:i.message})}}}for(const s in a.workers)delete a.workers[s];a.removeAllListeners(),this.log.info("Aggressive cluster cleanup completed for test environment")}catch(s){this.log.warn("Error during aggressive cluster cleanup",{error:s.message})}const r=Date.now()-t;this.log.info("Shutdown complete",{shutdown_duration_ms:r})}}var he=M;export{he as default};
package/package.json CHANGED
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "name": "@joystick.js/db-canary",
3
3
  "type": "module",
4
- "version": "0.0.0-canary.2218",
5
- "canary_version": "0.0.0-canary.2217",
4
+ "version": "0.0.0-canary.2220",
5
+ "canary_version": "0.0.0-canary.2219",
6
6
  "description": "JoystickDB - A minimalist database server for the Joystick framework",
7
7
  "main": "./dist/server/index.js",
8
8
  "scripts": {
@@ -25,6 +25,12 @@ import create_index_operation from '../lib/operations/create_index.js';
25
25
  import drop_index_operation from '../lib/operations/drop_index.js';
26
26
  import get_indexes_operation from '../lib/operations/get_indexes.js';
27
27
  import { start_http_server, stop_http_server, is_setup_required } from '../lib/http_server.js';
28
+ import {
29
+ is_development_mode,
30
+ display_development_startup_message,
31
+ warn_undefined_node_env
32
+ } from '../lib/development_mode.js';
33
+ import { initialize_api_key_manager } from '../lib/api_key_manager.js';
28
34
 
29
35
  /**
30
36
  * @typedef {Object} ClusterMasterOptions
@@ -103,6 +109,7 @@ class ClusterMaster extends EventEmitter {
103
109
  // NOTE: Initialize database and auth manager.
104
110
  initialize_database();
105
111
  initialize_auth_manager();
112
+ await initialize_api_key_manager();
106
113
  this.log.info('Database and auth manager initialized');
107
114
 
108
115
  // NOTE: Handle startup restore if configured.
@@ -165,6 +172,12 @@ class ClusterMaster extends EventEmitter {
165
172
  this.spawn_worker();
166
173
  }
167
174
 
175
+ // NOTE: Display development mode startup message if in development.
176
+ if (is_development_mode()) {
177
+ const { tcp_port, http_port } = get_port_configuration();
178
+ display_development_startup_message(tcp_port, http_port);
179
+ }
180
+
168
181
  const duration_ms = Date.now() - start_time;
169
182
  this.log.info('Master process started successfully', {
170
183
  workers_spawned: this.worker_count,