harperdb 4.5.34 → 4.5.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,9 +21,9 @@ Caused by:`));else if(typeof u=="object")try{n+=JSON.stringify(u)}catch{n+="Obje
21
21
  `,""));return r.replace(`
22
22
  `,"")}a(ZH,"runCommand");async function nse(){try{await Mne.access(hO)}catch{return!1}let e=await ZH(`${hO} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return vne.eq(t,tse)}a(nse,"checkNATSServerInstalled");async function SO(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let o=await JH.getClusterUser();if(nl(o))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=o.username,r=o.decrypt_hash}Js.trace("create nats connection called");let i=await Yne({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:wr.get(ze.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:wr.get(ze.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:wr.get(ze.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),Js.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(o=>{o&&Js.error("Error with Nats client connection, connection closed",o),i===Xr&&ek()}),i}a(SO,"createConnection");function ek(){Xr=void 0,el=void 0,tl=void 0,rl=void 0}a(ek,"clearClientCache");async function sse(){Xr&&(await Xr.drain(),Xr=void 0,el=void 0,tl=void 0,rl=void 0)}a(sse,"closeConnection");var Xr,rl;async function Y_(){return rl||(rl=SO(wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),Xr=await rl),Xr||rl}a(Y_,"getConnection");async function W_(){if(el)return el;nl(Xr)&&await Y_();let{domain:e}=Gu(ze.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(nl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return el=await Xr.jetstreamManager({domain:e,timeout:6e4}),el}a(W_,"getJetStreamManager");async function tk(){if(tl)return tl;nl(Xr)&&await Y_();let{domain:e}=Gu(ze.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(nl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return tl=Xr.jetstream({domain:e,timeout:6e4}),tl}a(tk,"getJetStream");async function Bi(){let e=Xr||await Y_(),t=el||await W_(),r=tl||await tk();return{connection:e,jsm:t,js:r}}a(Bi,"getNATSReferences");async function ise(e){let t=wr.get(ze.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await JH.getClusterUser(),s=await SO(t,r,n),i=gO(),o=s.subscribe(i),c=[],l,u=(async()=>{for await(let f of o){let d=XH.decode(f.data);d.response_time=Date.now()-l,c.push(d)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await JE.async_set_timeout(e),await o.drain(),await s.close(),await u,c}a(ise,"getServerList");async function TO(e,t){let{jsm:r}=await Bi(),n=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:Wne.File,retention:zne.Limits,subjects:t,discard:jne.Old,max_msgs:s,max_bytes:i,max_age:n})}a(TO,"createLocalStream");async function rk(){let{jsm:e}=await Bi(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}a(rk,"listStreams");async function ose(e){let{jsm:t}=await Bi();await t.streams.delete(e)}a(ose,"deleteLocalStream");async function ase(e){let{connection:t}=await Bi(),r=[],n=gO(),s=t.subscribe(n),i=(async()=>{for await(let o of s)r.push(XH.decode(o.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}a(ase,"listRemoteStreams");async function cse(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Bi(),i=WH(),o={durable_name:i,ack_policy:pO.Explicit};t&&(o.deliver_policy=EO.StartTime,o.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,o);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let f of l){let d=mO(f.data),_={nats_timestamp:f.info.timestampNanos,nats_sequence:f.info.streamSequence,entry:d};if(f.headers&&(_.origin=f.headers.get(Hr.MSG_HEADERS.ORIGIN)),u.push(_),f.ack(),f.info.pending===0)break}return await c.delete(),u}a(cse,"viewStream");async function*lse(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Bi(),i=WH(),o={durable_name:i,ack_policy:pO.Explicit};t&&(o.deliver_policy=EO.StartTime,o.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,o);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let f=mO(u.data);f[0]||(f=[f]);for(let d of f){let _={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:d};u.headers&&(_.origin=u.headers.get(Hr.MSG_HEADERS.ORIGIN)),yield _}if(u.ack(),u.info.pending===0)break}await c.delete()}a(lse,"viewStreamIterator");async function use(e,t,r,n){Js.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=nk(n,r);let{js:s}=await Bi(),i=await ZE(),o=`${e}.${i}`,c=await Zne(()=>n instanceof Uint8Array?n:QH.encode(n));try{Js.trace(`publishToStream publishing to subject: ${o}`),Xne(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(o,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return ik(async()=>{try{await s.publish(o,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){Js.trace(`publishToStream creating stream: ${t}`);let f=o.split(".");f[2]="*",await TO(t,[o]),await s.publish(o,c,{headers:r})}else throw l}});throw l}}a(use,"publishToStream");function nk(e,t){t===void 0&&(t=Jne());let r=wr.get(ze.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Hr.MSG_HEADERS.ORIGIN)&&r&&t.append(Hr.MSG_HEADERS.ORIGIN,r),t}a(nk,"addNatsMsgHeader");function Gu(e){e=e.toLowerCase();let t=K_.join(wr.get(ze.CONFIG_PARAMS.ROOTPATH),ese);if(e===ze.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return nl(_O)&&(_O={port:$_.getConfigFromFile(ze.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:$_.getConfigFromFile(ze.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.HUB,config_file:Hr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:K_.join(t,Hr.PID_FILES.HUB),hdb_nats_path:t}),_O;if(e===ze.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return nl(fO)&&(fO={port:$_.getConfigFromFile(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:$_.getConfigFromFile(ze.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.LEAF,config_file:Hr.NATS_CONFIG_FILES.LEAF_SERVER,domain:$_.getConfigFromFile(ze.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.LEAF,pid_file_path:K_.join(t,Hr.PID_FILES.LEAF),hdb_nats_path:t}),fO;Js.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}a(Gu,"getServerConfig");async function sk(e,t,r,n){try{await e.consumers.add(t,{ack_policy:pO.Explicit,durable_name:r,deliver_policy:EO.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}a(sk,"createConsumer");async function dse(e,t,r){await e.consumers.delete(t,r)}a(dse,"removeConsumer");function fse(e){return e.split(".")[1]}a(fse,"extractServerName");async function _se(e,t,r=6e4,n=gO()){if(!JE.isObject(t))throw new Error("data param must be an object");let s=QH.encode(t),{connection:i}=await Bi(),o={timeout:r};n&&(o.reply=n,o.noMux=!0);let c=await i.request(e,s,o);return mO(c.data)}a(_se,"request");function AO(e){return new Promise(async(t,r)=>{let n=Hne(hO,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",o=>{r(o)}),n.stdout.on("data",o=>{i+=o.toString()}),n.stderr.on("data",o=>{s+=o.toString()}),n.stderr.on("close",o=>{s&&r(s),t(i)})})}a(AO,"reloadNATS");async function hse(){let{pid_file_path:e}=Gu(ze.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await AO(e)}a(hse,"reloadNATSHub");async function mse(){let{pid_file_path:e}=Gu(ze.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await AO(e)}a(mse,"reloadNATSLeaf");function pse(e,t,r){let n;switch(e.code){case YH.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case YH.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}a(pse,"requestErrorHandler");async function Ese(e,t){let r=t+Hr.SERVER_SUFFIX.LEAF,{connection:n}=await Bi(),{jsm:s}=await Ose(r),{schema:i,table:o}=e,c=XE.createNatsTableStreamName(i,o),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await ik(async()=>{if(e.subscribe===!0)await sk(s,c,n.info.server_name,l);else try{await dse(s,c,n.info.server_name)}catch(u){Js.trace(u)}})}a(Ese,"updateRemoteConsumer");async function gse(e,t,r,n){let s=XE.createNatsTableStreamName(e,t),i=r+Hr.SERVER_SUFFIX.LEAF,o={type:ze.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!jH&&Vne()<wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=dO();await c(o)}await Gne(o),n==="stop"&&await JE.async_set_timeout(1e3)}a(gse,"updateConsumerIterator");function ik(e){return qne.writeTransaction(ze.SYSTEM_SCHEMA_NAME,ze.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}a(ik,"exclusiveLock");async function ok(e,t){let r=XE.createNatsTableStreamName(e,t),n=await ZE(),s=Rse(e,t,n);await TO(r,[s])}a(ok,"createLocalTableStream");async function Sse(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await ok(n,s)}}a(Sse,"createTableStreams");async function ak(e,t,r=void 0){if(wr.get(ze.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=XE.createNatsTableStreamName(e,t),{domain:s}=Gu(ze.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await Y_()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")Js.warn(n);else throw n}}a(ak,"purgeTableStream");async function Tse(e,t){if(wr.get(ze.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await ak(e,t[r])}a(Tse,"purgeSchemaTableStreams");async function Ase(e){return(await W_()).streams.info(e)}a(Ase,"getStreamInfo");function Rse(e,t,r){return`${Hr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}a(Rse,"createSubjectName");async function ZE(){if(V_)return V_;if(V_=(await W_())?.nc?.info?.server_name,V_===void 0)throw new Error("Unable to get jetstream manager server name");return V_}a(ZE,"getJsmServerName");async function yse(){let e=await W_(),t=await ZE(),r=await rk();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let o=bse(n),c=i.split(".");if(c[c.length-1]===t&&!o||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let f=u.join(".");Js.trace(`Updating stream subject name from: ${i} to: ${f}`),s.subjects[0]=f,await e.streams.update(s.name,s)}}a(yse,"updateLocalStreams");function bse(e){let{config:t}=e,r=!1,n=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=wr.get(ze.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}a(bse,"updateStreamLimits");async function Ose(e){let t,r;try{t=await Xr.jetstream({domain:e}),r=await Xr.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw Js.error("Unable to connect to:",e),n}return{js:t,jsm:r}}a(Ose,"connectToRemoteJS")});function RO(e){let t=e.get(eg),r=t?(0,$u.unpack)(t):null;r||(r={remoteNameToId:{}});let n=rt(),s=!1;r.nodeName=rt();let i=r.remoteNameToId;if(i[n]!==0){let o=0,c;for(let l in i){let u=i[l];u===0?c=l:u>o&&(o=u)}if(c){o++,i[c]=o;let l=[Symbol.for("seq"),o];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:j_(e)??1,nodes:[]})})}i[n]=0,e.putSync(eg,(0,$u.pack)(r))}return r}function z_(e){return RO(e).remoteNameToId}function uk(e,t){let r=RO(t),n=r.remoteNameToId,s=new Map,i=!1;for(let o in e){let c=e[o],l=n[o];if(l==null){let u=0;for(let f in n){let d=n[f];d>u&&(u=d)}l=u+1,n[o]=l,i=!0}s.set(c,l)}return i&&t.putSync(eg,(0,$u.pack)(r)),s}function tg(e,t){let r=RO(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let o in n){let c=n[o];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(eg,(0,$u.pack)(r))}return lk.trace?.("The remote node name map",e,n,s),s}var lk,$u,eg,yO=Re(()=>{lk=M(qs());Es();$u=require("msgpackr"),eg=Symbol.for("remote-ids");a(RO,"getIdMappingRecord");a(z_,"exportIdMapping");a(uk,"remoteToLocalNodeId");a(tg,"getIdOfRemoteNode")});var bO={};Ue(bO,{commits_awaiting_replication:()=>Vu,getHDBNodeTable:()=>or,getReplicationSharedStatus:()=>rg,iterateRoutes:()=>J_,shouldReplicateToNode:()=>Q_,subscribeToNodeUpdates:()=>Ku});function or(){return dk||(dk=_t({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function rg(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function Ku(e){or().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;mk.debug?.("adding node",n,"on node",rt()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==rt()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of or().search({}))if(i.shard!=null){let o=s.get(i.shard);o||s.set(i.shard,o=[]),o.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function Q_(e,t){let r=Ta.default.get(x.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===Ta.default.get(x.REPLICATION_SHARD))))&&or().primaryStore.get(rt())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function Nse(){Ku(e=>{Aa({},(t,r)=>{let n=e.name,s=fk.get(n);if(s||fk.set(n,s=new Map),s.has(r))return;let i;for(let o in t)if(i=t[o].auditStore,i)break;if(i){let o=rg(i,r,n,()=>{let c=o[0],l=o.lastTime;for(let{txnTime:u,onConfirm:f}of Vu.get(r)||[])u>l&&u<=c&&f();o.lastTime=c});o.lastTime=0,s.set(r,o)}})})}function*J_(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=Ta.default.get(x.REPLICATION_SECUREPORT)??(!Ta.default.get(x.REPLICATION_PORT)&&Ta.default.get(x.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||Ta.default.get(x.REPLICATION_PORT)||Ta.default.get(x.OPERATIONSAPI_NETWORK_PORT);let o=i?.lastIndexOf?.(":");o>0&&(i=+i.slice(o+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){_k.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,start_time:t.startTime,revoked_certificates:t.revokedCertificates}}}var _k,hk,Ta,mk,dk,fk,Vu,sl=Re(()=>{xe();Es();np();_k=require("worker_threads"),hk=M(he()),Ta=M(oe());k();mk=M(qs());server.nodes=[];a(or,"getHDBNodeTable");a(rg,"getReplicationSharedStatus");a(Ku,"subscribeToNodeUpdates");a(Q_,"shouldReplicateToNode");fk=new Map;KD((e,t,r)=>{if(r>server.nodes.length)throw new hk.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);Vu||(Vu=new Map,Nse());let n=Vu.get(e);return n||(n=[],Vu.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:a(()=>{++i===r&&s()},"onConfirm")})})});a(Nse,"startSubscriptionToReplications");a(J_,"iterateRoutes")});var gk={};Ue(gk,{connectedToNode:()=>il,disconnectedFromNode:()=>Wu,ensureNode:()=>To,requestClusterStatus:()=>Ek,startOnMainThread:()=>wO});async function wO(e){let t=0,r=Xe();for(let i of Object.getOwnPropertyNames(r)){let o=r[i];for(let c in o){let l=o[c];if(l.auditStore){ng.set(i,j_(l.auditStore));break}}}ki.whenThreadsStarted.then(async()=>{let i=[];for await(let o of r.system.hdb_nodes?.search([])||[])i.push(o);for(let o of J_(e))try{let c=!o.subscriptions;if(c){let u=rt(),f=or().primaryStore.get(u);if(f!==null){let d=e.url??Ra();(f===void 0||f.url!==d||f.shard!==e.shard)&&await To(u,{name:u,url:d,shard:e.shard,replicates:!0})}}let l=o.trusted!==!1;if(c&&o.replicates==null&&(o.replicates=!0),i.find(u=>u.url===o.url))continue;s(o)}catch(c){console.error(c)}Ku(s)});let n;function s(i,o=i?.name){let c=rt()&&o===rt()||Ra()&&i?.url===Ra();if(c){let d=!!i?.replicates;if(n!==void 0&&n!==d)for(let _ of or().search([]))_.replicates&&_.name!==o&&s(_,_.name);n=d}if(it.trace("Setting up node replication for",i),!i){for(let[d,_]of Hi){let h;for(let[m,{worker:S,nodes:g}]of _){let R=g[0];if(R&&R.name==o){h=!0;for(let[E,{worker:T}]of _)_.delete(E),it.warn("Node was deleted, unsubscribing from node",o,E,d),T?.postMessage({type:"unsubscribe-from-node",node:o,database:E,url:d});break}}if(h){Hi.get(d).iterator.remove(),Hi.delete(d);return}}return}if(c)return;if(!i.url){it.info(`Node ${i.name} is missing url`);return}let l=Hi.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(it.info(`Added node ${i.name} at ${i.url} for process ${rt()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[d,_]of Yu)if(i.url===_.url){Yu.delete(d);break}Yu.set(i.name,i)}let u=Xe();if(l||(l=new Map,Hi.set(i.url,l)),l.iterator=Aa(e,(d,_,h)=>{h?f(_,!0):f(_,!1)}),i.subscriptions)for(let d of i.subscriptions){let _=d.database||d.schema;u[_]||(it.warn(`Database ${_} not found for node ${i.name}, making a subscription anyway`),f(_,!1))}function f(d,_){it.trace("Setting up replication for database",d,"on node",i.name);let h=l.get(d),m,S=[{replicateByDefault:_,...i}];ng.has(d)&&(S.push({replicateByDefault:_,name:rt(),start_time:ng.get(d),end_time:Date.now(),replicates:!0}),ng.delete(d));let g=Q_(i,d),R=ki.workers.filter(E=>E.name==="http");if(h?(m=h.worker,h.nodes=S):g&&(t=t%R.length,m=R[t++],l.set(d,{worker:m,nodes:S,url:i.url}),m?.on("exit",()=>{l.get(d)?.worker===m&&(l.delete(d),f(d,_))})),g)setTimeout(()=>{let E={type:"subscribe-to-node",database:d,nodes:S};m?m.postMessage(E):X_(E)},wse);else{it.info("Node no longer should be used, unsubscribing from node",i.replicates,!!u[d],or().primaryStore.get(rt())?.replicates),or().primaryStore.get(rt())?.replicates||(n=!1);let E={type:"unsubscribe-from-node",database:d,url:i.url,name:i.name};m?m.postMessage(E):ig(E)}}a(f,"onDatabase")}a(s,"onNodeUpdate"),Wu=a(function(i){try{it.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let o=Array.from(Yu.keys()),c=o.sort(),l=c.indexOf(i.name||Xs(i.url));if(l===-1){it.warn("Disconnected node not found in node map",i.name,o);return}let u=Hi.get(i.url),f=u?.get(i.database);if(!f){it.warn("Disconnected node not found in replication map",i.database,u);return}if(f.connected=!1,i.finished||!NO.default.get(x.REPLICATION_FAILOVER))return;let d=f.nodes[0];if(!(d.replicates===!0||d.replicates?.sends||d.subscriptions?.length))return;let _=d.shard,h=(l+1)%c.length;for(;l!==h;){let m=c[h],S=Yu.get(m);u=Hi.get(S.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==_){h=(h+1)%c.length;continue}let{worker:R,nodes:E}=g,T=!1;for(let N of f.nodes){if(E.some(v=>v.name===N.name)){it.info(`Disconnected node is already failing over to ${m} for ${i.database}`);continue}N.end_time<Date.now()||(E.push(N),T=!0)}if(f.nodes=[f.nodes[0]],!T){it.info(`Disconnected node ${i.name} has no nodes to fail over to ${m}`);return}it.info(`Failing over ${i.database} from ${i.name} to ${m}`),R?R.postMessage({type:"subscribe-to-node",database:i.database,nodes:E}):X_({database:i.database,nodes:E});return}it.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(o){it.error("Error failing over node",o)}},"disconnectedFromNode"),il=a(function(i){let o=Hi.get(i.url),c=o?.get(i.database);if(!c){it.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,o);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){it.info("Connected node has no nodes",i.database,c);return}if(!l.name){it.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let f of Hi.values()){let d=f.get(i.database);if(!d||d==c)continue;let{worker:_,nodes:h,connected:m}=d;if(h)if(m===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let S=h.filter(g=>g.name!==l.name);S.length<h.length&&(d.nodes=S,_.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,ki.onMessageByType)("disconnected-from-node",Wu),(0,ki.onMessageByType)("connected-to-node",il),(0,ki.onMessageByType)("request-cluster-status",Ek)}function Ek(e,t){let r=[];for(let[n,s]of Yu)try{let i=Hi.get(s.url);it.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let o=[];if(i){for(let[l,{worker:u,connected:f,nodes:d,latency:_}]of i)o.push({database:l,connected:f,latency:_,thread_id:u?.threadId,nodes:d.filter(h=>!(h.end_time<Date.now())).map(h=>h.name)});let c=(0,OO.cloneDeep)(s);c.database_sockets=o,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){it.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function To(e,t){let r=or();e=e??Xs(t.url),t.name=e;try{if(t.ca){let s=new pk.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subject_alt_name:s.subjectAltName,serial_number:s.serialNumber,valid_from:s.validFrom,valid_to:s.validTo}}}catch(s){it.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(it.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!NO.default.get(x.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],o=(0,OO.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of o)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...o,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}it.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var ki,sg,it,OO,NO,pk,wse,Hi,Wu,il,Yu,ng,Z_=Re(()=>{xe();ki=M(et());Es();sg=require("worker_threads");sl();it=M(X()),OO=require("lodash"),NO=M(oe());k();pk=require("crypto"),wse=200,Hi=new Map,Yu=new Map,ng=new Map;a(wO,"startOnMainThread");a(Ek,"requestClusterStatus");sg.parentPort&&(Wu=a(e=>{sg.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),il=a(e=>{sg.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,ki.onMessageByType)("subscribe-to-node",e=>{X_(e)}),(0,ki.onMessageByType)("unsubscribe-from-node",e=>{ig(e)}));a(To,"ensureNode")});var ti=C(qk=>{"use strict";var ar=require("path"),{watch:Ise}=require("chokidar"),bn=require("fs-extra"),zu=require("node-forge"),bk=require("net"),{generateKeyPair:CO,X509Certificate:Ao,createPrivateKey:Ok}=require("crypto"),Cse=require("util");CO=Cse.promisify(CO);var Ot=zu.pki,Zs=require("joi"),{v4:Nk}=require("uuid"),{validateBySchema:MO}=tt(),At=X(),Jn=oe(),gs=(k(),P(q)),{CONFIG_PARAMS:al}=gs,ei=Ey(),{ClientError:ba}=he(),og=require("node:tls"),{relative:wk,join:Pse}=require("node:path"),{CERT_PREFERENCE_APP:zCe,CERTIFICATE_VALUES:Sk}=ei,Dse=pc(),PO=Lt(),{table:Lse,getDatabases:Mse,databases:IO}=(xe(),P(ct)),{getJWTRSAKeys:Tk}=(Hu(),P(q_));Object.assign(qk,{generateKeys:xO,updateConfigCert:Uk,createCsr:Fse,signCertificate:qse,setCertTable:ju,loadCertificates:Lk,reviewSelfSignedCert:HO,createTLSSelector:Bk,listCertificates:kk,addCertificate:Wse,removeCertificate:jse,createNatsCerts:Vse,generateCertsKeys:$se,getReplicationCert:th,getReplicationCertAuth:kse,renewSelfSigned:Kse,hostnamesFromCert:kO,getKey:Qse});var{urlToNodeName:Ik,getThisNodeUrl:vse,getThisNodeName:cg,clearThisNodeName:Use}=(Es(),P(wa)),{readFileSync:xse,statSync:Ck}=require("node:fs"),jCe=oe(),{getTicketKeys:Bse,onMessageFromWorkers:Hse}=et(),ya=X(),{isMainThread:Pk}=require("worker_threads"),{TLSSocket:Dk,createSecureContext:QCe}=require("node:tls"),vO=3650,eh=["127.0.0.1","localhost","::1"],UO=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];Hse(async e=>{e.type===gs.ITC_EVENT_TYPES.RESTART&&(Jn.initSync(!0),await HO())});var kr;function Na(){return kr||(kr=Mse().system.hdb_certificate,kr||(kr=Lse({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),kr}a(Na,"getCertTable");async function th(){let e=Bk("operations-api"),t={secureContexts:null,setSecureContext:a(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(cg());if(!r)return;let n=new Ao(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}a(th,"getReplicationCert");async function kse(){Na();let e=(await th()).options.cert,r=new Ao(e).issuer.match(/CN=(.*)/)?.[1];return kr.get(r)}a(kse,"getReplicationCertAuth");var Ak,Oa=new Map;function Lk(){if(Ak)return;Ak=!0;let e=[{configKey:al.TLS},{configKey:al.OPERATIONSAPI_TLS}];Na();let t=ar.dirname(PO.getConfigFilePath()),r;for(let{configKey:n}of e){let s=PO.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let o=i.privateKey,c=o&&wk(Pse(t,"keys"),o);c&&Rk(o,l=>{Oa.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&Pk){let f;Rk(u,d=>{if(Sk.cert===d)return;let _=i.hostname??i.hostnames??i.host??i.hosts;_&&!Array.isArray(_)&&(_=[_]);let h=xk(u),m=new Ao(h),S;try{S=Fk(m)}catch(T){logger.error("error extracting host name from certificate",T);return}if(S==null){logger.error("No host name found on certificate");return}if(m.checkIssued(new Ao(Sk.cert)))return;let g=kr.primaryStore.get(S),R=Ck(u).mtimeMs,E=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&R<=E){R<E&&At.info(`Certificate ${S} at ${u} is older (${new Date(R)}) than the certificate in the database (${E>1?new Date(E):"only self signed certificate available"})`);return}r=kr.put({name:S,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:_,file_timestamp:R,details:{issuer:m.issuer.replace(/\n/g," "),subject:m.subject?.replace(/\n/g," "),subject_alt_name:m.subjectAltName,serial_number:m.serialNumber,valid_from:m.validFrom,valid_to:m.validTo}})},l?"certificate authority":"certificate")}}}}}return r}a(Lk,"loadCertificates");function Rk(e,t,r){let n,s=a((i,o)=>{try{let c=o.mtimeMs;c&&c!==n&&(n&&Pk&&At.warn(`Reloading ${r}:`,i),n=c,t(xk(i)))}catch(c){At.error(`Error loading ${r}:`,i,c)}},"loadFile");bn.existsSync(e)?s(e,Ck(e)):At.error(`${r} file not found:`,e),Ise(e,{persistent:!1}).on("change",s)}a(Rk,"loadAndWatch");function DO(){let e=vse();if(e==null){let t=eh[0];return At.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return Ik(e)}a(DO,"getHost");function ag(){let e=cg();if(e==null){let t=eh[0];return At.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}a(ag,"getCommonName");async function Fse(){let e=await th(),t=Ot.certificateFromPem(e.options.cert),r=Ot.privateKeyFromPem(e.options.key);At.info("Creating CSR with cert named:",e.name);let n=Ot.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:ag()},...UO];At.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:Mk()}];return At.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),zu.pki.certificationRequestToPem(n)}a(Fse,"createCsr");function Mk(){let e=eh.includes(ag())?eh:[...eh,ag()];return e.includes(DO())||e.push(DO()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>bk.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}a(Mk,"certExtensions");async function qse(e){let t={},r=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;Na();for await(let f of kr.search([]))if(f.is_authority&&!f.details.issuer.includes("HarperDB-Certificate-Authority")){if(Oa.has(f.private_key_name)){n=Oa.get(f.private_key_name),s=f;break}else if(f.private_key_name&&await bn.exists(ar.join(r,f.private_key_name))){n=bn.readFile(ar.join(r,f.private_key_name)),s=f;break}}if(!n){let f=await LO();s=f.ca,n=f.private_key}n=Ot.privateKeyFromPem(n),t.signingCA=s.certificate;let i=Ot.certificateFromPem(s.certificate);At.info("Signing CSR with cert named",s.name);let o=Ot.certificationRequestFromPem(e.csr);try{o.verify()}catch(f){return At.error(f),new Error("Error verifying CSR: "+f.message)}let c=zu.pki.createCertificate();c.serialNumber=Math.random().toString().slice(2,10),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+vO),At.info("sign cert setting validity:",c.validity),At.info("sign cert setting subject from CSR:",o.subject.attributes),c.setSubject(o.subject.attributes),At.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=o.getAttribute({name:"extensionRequest"}).extensions;At.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=o.publicKey,c.sign(n,zu.md.sha256.create()),t.certificate=Ot.certificateToPem(c)}else At.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}a(qse,"signCertificate");async function Gse(e,t){await ju({name:cg(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await ju({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:Ot.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}a(Gse,"createCertificateTable");async function ju(e){let t=new Ao(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},Na(),await kr.patch(e)}a(ju,"setCertTable");async function xO(){let e=await CO("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{public_key:Ot.publicKeyFromPem(e.publicKey),private_key:Ot.privateKeyFromPem(e.privateKey)}}a(xO,"generateKeys");async function BO(e,t,r){let n=Ot.createCertificate();if(!t){let o=await th();t=Ot.certificateFromPem(o.options.cert).publicKey}n.publicKey=t,n.serialNumber=Math.random().toString().slice(2,10),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+vO);let i=[{name:"commonName",value:ag()},...UO];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions(Mk()),n.sign(e,zu.md.sha256.create()),Ot.certificateToPem(n)}a(BO,"generateCertificates");async function LO(){let e=await kk(),t;for(let r of e){if(!r.is_authority)continue;let n=await Hk(r.private_key_name);if(r.private_key_name&&n&&new Ao(r.certificate).checkPrivateKey(Ok(n))){At.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;At.trace("No CA found with matching private key")}a(LO,"getCertAuthority");async function vk(e,t,r=!0){let n=Ot.createCertificate();n.publicKey=t,n.serialNumber=Math.random().toString().slice(2,10),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+vO);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${Jn.get(al.REPLICATION_HOSTNAME)??Ik(Jn.get(al.REPLICATION_URL))??Nk().split("-")[0]}`},...UO];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,zu.md.sha256.create());let o=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),c=ar.join(o,ei.PRIVATEKEY_PEM_NAME);return r&&await bn.writeFile(c,Ot.privateKeyToPem(e)),n}a(vk,"generateCertAuthority");async function $se(){let{private_key:e,public_key:t}=await xO(),r=await vk(e,t),n=await BO(e,t,r);await Gse(n,r),Uk()}a($se,"generateCertsKeys");async function Vse(){let e=await BO(Ot.privateKeyFromPem(ei.CERTIFICATE_VALUES.key),void 0,Ot.certificateFromPem(ei.CERTIFICATE_VALUES.cert)),t=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),r=ar.join(t,ei.NATS_CERTIFICATE_PEM_NAME);await bn.exists(r)||await bn.writeFile(r,e);let n=ar.join(t,ei.NATS_CA_PEM_NAME);await bn.exists(n)||await bn.writeFile(n,ei.CERTIFICATE_VALUES.cert)}a(Vse,"createNatsCerts");async function Kse(){Na();for await(let e of kr.search([{attribute:"is_self_signed",value:!0}]))await kr.delete(e.name);await HO()}a(Kse,"renewSelfSigned");async function HO(){Use(),await Lk(),Na();let e=await LO();if(!e){At.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=a(u=>{try{return{key:Ot.privateKeyFromPem(bn.readFileSync(u)),keyPath:u}}catch(f){return At.warn(`Failed to parse private key from ${u}:`,f.message),{key:null,keyPath:u}}},"tryToParseKey"),n=Jn.get(al.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let f=r(u.privateKey);if(s=f.key,i=f.keyPath,f.key)break}}else{let u=Jn.get(al.TLS_PRIVATEKEY),f=r(u);s=f.key,i=f.keyPath}let o=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),c=wk(o,i);s||(At.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{private_key:s}=await xO(),bn.existsSync(ar.join(o,ei.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${Nk().split("-")[0]}.pem`),await bn.writeFile(ar.join(o,c),Ot.privateKeyToPem(s)));let l=await vk(s,Ot.setRsaPublicKey(s.n,s.e),!1);await ju({name:l.subject.getField("CN").value,uses:["https"],certificate:Ot.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await th()){let r=cg();At.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await LO();let n=Ot.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await BO(Ot.privateKeyFromPem(e.private_key),s,n);await ju({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}a(HO,"reviewSelfSignedCert");function Uk(){let e=Dse(Object.keys(gs.CONFIG_PARAM_MAP),!0),t=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),r=ar.join(t,ei.PRIVATEKEY_PEM_NAME),n=ar.join(t,ei.NATS_CERTIFICATE_PEM_NAME),s=ar.join(t,ei.NATS_CA_PEM_NAME),i=gs.CONFIG_PARAMS,o={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(o[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(o[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(o[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,o[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,o[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),PO.updateConfigValue(void 0,void 0,o,!1,!0)}a(Uk,"updateConfigCert");function xk(e){return e.startsWith("-----BEGIN")?e:xse(e,"utf8")}a(xk,"readPEM");var yk=og.createSecureContext;og.createSecureContext=function(e){if(!e.cert||!e.key)return yk(e);let t={...e};delete t.key,delete t.cert;let r=yk(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var Yse=Dk.prototype._init;Dk.prototype._init=function(e,t){Yse.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,o)=>{this.sni_context=o?.context||o,this.certCbDone()})}};var ol=new Map;function Bk(e,t){let r=new Map,n,s=!1;return i.initialize=o=>i.ready?i.ready:(o&&(o.secureContexts=r,o.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),ol.clear();let f=0;for await(let d of IO.system.hdb_certificate.search([])){let _=d.certificate,h=new Ao(_);d.is_authority&&(h.asString=_,ol.set(h.subject,_))}for await(let d of IO.system.hdb_certificate.search([]))try{if(d.is_authority)continue;let _=e==="operations-api",h=d.is_self_signed?1:2;_&&d.uses?.includes?.("operations")&&(h+=1);let m=await Hk(d.private_key_name),S=d.certificate,g=new Ao(S);if(ol.has(g.issuer)&&(S+=`
23
23
  `+ol.get(g.issuer)),!m||!S)throw new Error("Missing private key or certificate for secure server");let R={ciphers:d.ciphers,ticketKeys:Bse(),availableCAs:ol,ca:t&&Array.from(ol.values()),cert:S,key:m,key_file:d.private_key_name,is_self_signed:d.is_self_signed};o&&(R.sessionIdContext=o.sessionIdContext);let E=og.createSecureContext(R);E.name=d.name,E.options=R,E.quality=h,E.certificateAuthorities=Array.from(ol),E.certStart=S.toString().slice(0,100);let T=d.hostnames??kO(g);Array.isArray(T)||(T=[T]);let N;for(let v of T)if(v){v[0]==="*"&&(s=!0,v=v.slice(1)),v===DO()&&(h+=2),bk.isIP(v)&&(N=!0);let H=r.get(v)?.quality??0;h>H&&r.set(v,E)}else ya.error("No hostname found for certificate at",og.certificate);ya.trace("Adding TLS",E.name,"for",o.ports||"client","cert named",d.name,"hostnames",T,"quality",h,"best quality",f),h>f&&(i.defaultContext=n=E,f=h,o&&(o.defaultContext=E))}catch(_){ya.error("Error applying TLS for",d.name,_)}o?.secureContextsListeners.forEach(d=>d()),c(n)}catch(f){l(f)}}a(u,"updateTLS"),IO.system.hdb_certificate.subscribe({listener:a(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(o,c){ya.info("TLS requested for",o||"(no SNI)");let l=o;for(;;){let f=r.get(l);if(f)return ya.debug("Found certificate for",o,f.certStart),f.updatedContext&&(f=f.updatedContext),c(null,f);if(s&&l){let d=l.indexOf(".",1);d<0?l="":l=l.slice(d)}else break}o?ya.debug("No certificate found to match",o,"using the default certificate"):ya.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):ya.info("No default certificate found"),c(null,u)}a(i,"SNICallback")}a(Bk,"createTLSSelector");async function Hk(e){let t=Oa.get(e);return!t&&e?await bn.readFile(ar.join(Jn.get(al.ROOTPATH),gs.LICENSE_KEY_DIR_NAME,e),"utf8"):t}a(Hk,"getPrivateKeyByName");async function kk(){Na();let e=[];for await(let t of kr.search([]))e.push(t);return e}a(kk,"listCertificates");async function Wse(e){let t=MO(e,Zs.object({name:Zs.string().required(),certificate:Zs.string().required(),is_authority:Zs.boolean().required(),private_key:Zs.string(),hosts:Zs.array(),uses:Zs.array()}));if(t)throw new ba(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,o=new Ao(n),c=!1,l=!1,u;for(let[h,m]of Oa)!s&&!c&&o.checkPrivateKey(Ok(m))&&(c=!0,u=h),s&&s===m&&(l=!0,u=h);if(!i&&!s&&!c)throw new ba("A suitable private key was not found for this certificate");let f;if(!r){try{f=Fk(o)}catch(h){At.error(h)}if(f==null)throw new ba("Error extracting certificate common name, please provide a name parameter")}let d=zse(r??f);s&&!c&&!l&&(await bn.writeFile(ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME,d+".pem"),s),Oa.set(d,s));let _={name:r??f,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(_.private_key_name=u??d+".pem"),e.ciphers&&(_.ciphers=e.ciphers),await ju(_),"Successfully added certificate: "+d}a(Wse,"addCertificate");function zse(e){return e.replace(/[^a-z0-9\.]/gi,"-")}a(zse,"sanitizeName");async function jse(e){let t=MO(e,Zs.object({name:Zs.string().required()}));if(t)throw new ba(t.message);let{name:r}=e;Na();let n=await kr.get(r);if(!n)throw new ba(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await kr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(At.info("Removing private key named",s),await bn.remove(ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME,s)))}return await kr.delete(r),"Successfully removed "+r}a(jse,"removeCertificate");function Fk(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||kO(e)[0]}a(Fk,"getPrimaryHostName");function kO(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}a(kO,"hostnamesFromCert");async function Qse(e){if(e.bypass_auth!==!0)throw new ba("Unauthorized","401");let t=MO(e,Zs.object({name:Zs.string().required()}));if(t)throw new ba(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await Tk()).privateKey;if(r===".jwtPublic")return(await Tk()).publicKey;if(Oa.get(r))return Oa.get(e.name);throw new ba("Key not found")}a(Qse,"getKey")});var lF={};Ue(lF,{CONFIRMATION_STATUS_POSITION:()=>oF,NodeReplicationConnection:()=>Ju,OPERATION_REQUEST:()=>$O,RECEIVED_TIME_POSITION:()=>KO,RECEIVED_VERSION_POSITION:()=>VO,RECEIVING_STATUS_POSITION:()=>YO,RECEIVING_STATUS_RECEIVING:()=>cF,RECEIVING_STATUS_WAITING:()=>aF,SENDING_TIME_POSITION:()=>rh,createWebSocket:()=>pg,database_subscriptions:()=>Ca,replicateOverWS:()=>nh,table_update_listeners:()=>zO});async function pg(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=rt(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!qO){let l=(0,rF.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),qO=u.secureContexts}if(i=qO.get(s),i&&le.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let o={};r&&(o.Authorization=r);let c={headers:o,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,sF.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(hg?.caCount!==Ro.size&&(hg=nF.createSecureContext({...i.options,ca:[...Ro,...i.options.availableCAs.values()]}),hg.caCount=Ro.size),c.secureContext=hg),new eF.WebSocket(e,"harperdb-replication-v1",c)}function nh(e,t,r){let n=t.port||t.securePort,s=cl.pid%1e3+"-"+tF.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3),i=0,o=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(o.buffer,0,1024),u=t.database,f=t.databaseSubscriptions||Ca,d,_,h=!1,m=t.subscription;m?.then&&m.then(A=>m=A);let S=t.tables||u&&Xe()[u];if(!r){le.error?.("No authorization provided"),ln(1008,"Unauthorized");return}let g=new Map,R=[],E=r.name;E&&t.connection&&(t.connection.nodeName=E);let T,N,v,H,Z,W,$,se=6e4,z,fe=0,ue=0,ee=0,Ae=Zk.default.get(x.REPLICATION_BLOBTIMEOUT)??12e4,me=new Map,ye=[],Ht=0,ft;if(t.url){let A=a(()=>{Z&&ue===e._socket?.bytesRead&&ee===e._socket?.bytesWritten?e.terminate():(Z=performance.now(),e.ping(),ue=e._socket?.bytesRead,ee=e._socket?.bytesWritten)},"send_ping");v=setInterval(A,Jk).unref(),A()}else Ln();e._socket?.setMaxListeners(200);function Ln(){clearTimeout(H),ue=e._socket?.bytesRead,ee=e._socket?.bytesWritten,H=setTimeout(()=>{ue===e._socket?.bytesRead&&ee===e._socket?.bytesWritten&&(le.warn?.(`Timeout waiting for ping from ${E}, terminating connection and reconnecting`),e.terminate())},Jk*2).unref()}a(Ln,"resetPingTimer");function cn(){return _||(_=rg(d,u,E)),_}a(cn,"getSharedStatus"),u&&Jo(u);let Cr,tf,lc=[],cA=[],lA,kt=[],rf=[],nf=[],uA=150,Um=25,sf=0,Ce=0,of=!1,Zi,Pr,Mn,uc;e.on("message",A=>{fe=performance.now();try{let y=A.dataView=new ll(A.buffer,A.byteOffset,A.byteLength);if(A[0]>127){let B=(0,Ze.decode)(A),[w,D,G]=B;switch(w){case $k:{if(D){if(E){if(E!==D){le.error?.(s,`Node name mismatch, expecting to connect to ${E}, but peer reported name as ${D}, disconnecting`),e.send((0,Ze.encode)([Qu])),ln(1008,"Node name mismatch");return}}else if(E=D,t.connection?.tentativeNode){let ae=t.connection.tentativeNode;ae.name=E,t.connection.tentativeNode=null,To(E,ae)}if(t.connection&&(t.connection.nodeName=E),le.debug?.(s,"received node name:",E,"db:",u),!u)try{Jo(u=B[2]),u==="system"&&(Cr=Aa(t,(ae,V)=>{Qo(V)&&lf(V)}),e.on("close",()=>{Cr?.remove()}))}catch(ae){le.warn?.(s,"Error setting database",ae),e.send((0,Ze.encode)([Qu])),ln(1008,ae.message);return}jo()}break}case jk:{le.debug?.(s,"Received table definitions for",D.map(ae=>ae.table));for(let ae of D){let V=B[2];ae.database=V;let J;Qo(V)&&(V==="system"?We[V]?.[ae.table]||(J=L(ae,We[V]?.[ae.table])):J=L(ae,We[V]?.[ae.table]),d||(d=J?.auditStore),S||(S=Xe()?.[V]))}break}case Qu:ln();break;case $O:try{let ae=r?.replicates||r?.subscribers||r?.name;server.operation(D,{user:r},!ae).then(V=>{Array.isArray(V)&&(V={results:V}),V.requestId=D.requestId,e.send((0,Ze.encode)([ug,V]))},V=>{e.send((0,Ze.encode)([ug,{requestId:D.requestId,error:V instanceof Error?V.toString():V}]))})}catch(ae){e.send((0,Ze.encode)([ug,{requestId:D.requestId,error:ae instanceof Error?ae.toString():ae}]))}break;case ug:let{resolve:Q,reject:j}=g.get(D.requestId);D.error?j(new Error(D.error)):Q(D),g.delete(D.requestId);break;case FO:let F=B[3];S||(u?le.error?.(s,"No tables found for",u):le.error?.(s,"Database name never received"));let ve=S[F];ve=L({table:F,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ve),lc[G]={name:F,decoder:new Ze.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(ae){return ve.primaryStore.getEntry(ae)},rootStore:ve.primaryStore.rootStore};break;case Vk:uc=d?uk(D,d):new Map,lA=B[2],le.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${lA}`);break;case Kk:let Ee=G;nf[Ee]=D;break;case zk:cn()[oF]=D,le.trace?.(s,"received and broadcasting committed update",D),cn().buffer.notify();break;case Wk:T=D,m.send({type:"end_txn",localTime:T,remoteNodeIds:R});break;case dg:{let ae=B[1],{fileId:V,size:J,finished:de,error:pe}=ae,ce=me.get(V);le.debug?.("Received blob",V,"has stream",!!ce,"connectedToBlob",!!ce?.connectedToBlob,"length",B[2].length,"finished",de),ce||(ce=new GO.PassThrough,ce.expectedSize=J,me.set(V,ce)),ce.lastChunk=Date.now();let Ie=B[2];Mt(Ie.byteLength,"bytes-received",`${E}.${u}`,"replication","blob");try{de?(pe?(ce.on("error",()=>{}),ce.destroy(new Error("Blob error: "+pe+" for record "+(ce.recordId??"unknown")+" from "+E))):ce.end(Ie),ce.connectedToBlob&&me.delete(V)):ce.write(Ie)}catch(He){le.error?.(`Error receiving blob for ${ce.recordId} from ${E} and streaming to storage`,He),me.delete(V)}break}case Yk:{let ae=D,V;try{let J=B[3],de=cA[G]||(cA[G]=S[B[4]]);if(!de)return le.warn?.("Unknown table id trying to handle record request",G);let pe=de.primaryStore.getBinaryFast(Symbol.for("structures")),ce=pe?.length;if(ce>0&&ce!==Ce){Ce=ce;let He=(0,Ze.decode)(pe);e.send((0,Ze.encode)([FO,{typedStructs:He.typed,structures:He.named},G,de.tableName]))}let Ie=de.primaryStore.getBinaryFast(J);if(Ie){let He=de.primaryStore.decoder.decode(Ie,{valueAsBuffer:!0}),De=He.value;He[Cc]&Ur&&(De=Buffer.from(De),xf(()=>de.primaryStore.decoder.decode(Ie),st=>dc(st,J),de.primaryStore.rootStore)),V=(0,Ze.encode)([lg,ae,{value:De,expiresAt:He.expiresAt,version:He.version,residencyId:He.residencyId,nodeId:He.nodeId,user:He.user}])}else V=(0,Ze.encode)([lg,ae])}catch(J){V=(0,Ze.encode)([lg,ae,{error:J.message}])}e.send(V);break}case lg:{let{resolve:ae,reject:V,tableId:J,key:de}=g.get(B[1]),pe=B[2];if(pe?.error)V(new Error(pe.error));else if(pe){let ce;Sp(()=>{let Ie=lc[J].decoder.decode(pe.value);pe.value=Ie,pe.key=de,ae(pe)||ce&&setTimeout(()=>ce.forEach(pp),6e4).unref()},d?.rootStore,Ie=>{let He=af(Ie,de);return ce||(ce=[]),ce.push(He),He})}else ae();g.delete(B[1]);break}case Gk:{Mn=D;let ae,V,J=!1;if(m){if(u!==m.databaseName&&!m.then){le.error?.("Subscription request for wrong database",u,m.databaseName);return}}else m=f.get(u);if(le.debug?.(s,"received subscription request for",u,"at",Mn),!m){let Oe;m=new Promise(Ft=>{le.debug?.("Waiting for subscription to database "+u),Oe=Ft}),m.ready=Oe,Ca.set(u,m)}if(r.name)V=or().subscribe(r.name),V.then(async Oe=>{ae=Oe;for await(let Ft of ae){let wt=Ft.value;if(!(wt?.replicates===!0||wt?.replicates?.receives||wt?.subscriptions?.some(yr=>(yr.database||yr.schema)===u&&yr.publish!==!1))){J=!0,e.send((0,Ze.encode)([Qu])),ln(1008,`Unauthorized database subscription to ${u}`);return}}},Oe=>{le.error?.(s,"Error subscribing to HDB nodes",Oe)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,Ze.encode)([Qu])),ln(1008,`Unauthorized database subscription to ${u}`);return}if(Pr&&(le.debug?.(s,"stopping previous subscription",u),Pr.emit("close")),Mn.length===0)return;let de=Mn[0],pe=a(Oe=>{if(Oe&&(de.replicateByDefault?!de.tables.includes(Oe.tableName):de.tables.includes(Oe.tableName)))return{table:Oe}},"tableToTableEntry"),ce={txnTime:0},Ie,He,De=1/0,st,vn=a((Oe,Ft)=>{if(Oe.type==="end_txn"){ce.txnTime&&(o[i]!==66&&le.error?.("Invalid encoding of message"),eo(9),eo(mg),p(st=Ft),fi()),i=c,ce.txnTime=0;return}let wt=Oe.nodeId,yr=Oe.tableId,yt=He[yr];if(!yt&&(yt=He[yr]=pe(m.tableById[yr]),!yt))return le.debug?.("Not subscribed to table",yr);let ls=yt.table,It=ls.primaryStore,xs=It.encoder;(Oe.extendedType&Ip||!xs.typedStructs)&&(xs._mergeStructures(xs.getStructures()),xs.typedStructs&&(xs.lastTypedStructuresLength=xs.typedStructs.length));let zl=Ie[wt];if(!(zl&&zl.startTime<Ft&&(!zl.endTime||zl.endTime>Ft)))return _g&&le.trace?.(s,"skipping replication update",Oe.recordId,"to:",E,"from:",wt,"subscribed:",Ie),CP();_g&&le.trace?.(s,"sending replication update",Oe.recordId,"to:",E,"from:",wt,"subscribed:",Ie);let dA=Oe.version;ce.txnTime!==dA&&(ce.txnTime&&(_g&&le.trace?.(s,"new txn time, sending queued txn",ce.txnTime),o[i]!==66&&le.error?.("Invalid encoding of message"),fi()),ce.txnTime=dA,i=c,p(dA));let hc=Oe.residencyId,fA=cf(hc,ls),Bm;if(fA&&!fA.includes(E)){let Bs=cf(Oe.previousResidencyId,ls);if(Bs&&!Bs.includes(E)&&(Oe.type==="put"||Oe.type==="patch")||ls.getResidencyById)return CP();let uf=Oe.recordId;le.trace?.(s,"sending invalidation",uf,E,"from",wt);let df=0;hc&&(df|=Lc),Oe.previousResidencyId&&(df|=Mc);let mA,Hm=null;for(let PP in ls.indices){if(!Hm){if(mA=Oe.getValue(It,!0),!mA)break;Hm={}}Hm[PP]=mA[PP]}Bm=Pc(Oe.version,yr,uf,null,wt,Oe.user,Oe.type==="put"||Oe.type==="patch"?"invalidate":Oe.type,xs.encode(Hm),df,hc,Oe.previousResidencyId,Oe.expiresAt)}function CP(){return le.trace?.(s,"skipping audit record",Oe.recordId),W||(W=setTimeout(()=>{W=null,(st||0)+Qk/2<De&&(_g&&le.trace?.(s,"sending skipped sequence update",De),e.send((0,Ze.encode)([Wk,De])))},Qk).unref()),new Promise(setImmediate)}a(CP,"skipAuditRecord");let _A=xs.typedStructs,hA=xs.structures;if((_A?.length!=yt.typed_length||hA?.length!=yt.structure_length)&&(yt.typed_length=_A?.length,yt.structure_length=hA.length,le.debug?.(s,"send table struct",yt.typed_length,yt.structure_length),yt.sentName||(yt.sentName=!0),e.send((0,Ze.encode)([FO,{typedStructs:_A,structures:hA,attributes:ls.attributes,schemaDefined:ls.schemaDefined},yr,yt.table.tableName]))),hc&&!rf[hc]&&(e.send((0,Ze.encode)([Kk,fA,hc])),rf[hc]=!0),Bm)eo(Bm.length),K(Bm);else{let Bs=Oe.encoded;Oe.extendedType&Ur&&xf(()=>Oe.getValue(It),df=>dc(df,Oe.recordId),It.rootStore);let uf=Bs[0]===66?8:0;eo(Bs.length-uf),K(Bs,uf),le.trace?.("wrote record",Oe.recordId,"length:",Bs.length)}return e._socket.writableNeedDrain?new Promise(Bs=>{le.debug?.(`Waiting for remote node ${E} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",Bs)}):Ht>Um?new Promise(Bs=>{ft=Bs}):new Promise(setImmediate)},"sendAuditRecord"),fi=a(()=>{c-i>8?(e.send(o.subarray(i,c)),le.debug?.(s,"Sent message, size:",c-i),Mt(c-i,"bytes-sent",`${E}.${u}`,"replication","egress")):le.debug?.(s,"skipping empty transaction")},"sendQueuedData");Pr=new WO.EventEmitter,Pr.once("close",()=>{J=!0,ae?.end()});for(let{startTime:Oe}of Mn)Oe<De&&(De=Oe);(V||Promise.resolve()).then(async()=>{m=await m,d=m.auditStore,He=m.tableById.map(pe),Ie=[];for(let{name:Ft,startTime:wt,endTime:yr}of Mn){let yt=tg(Ft,d);le.debug?.("subscription to",Ft,"using local id",yt,"starting",wt),Ie[yt]={startTime:wt,endTime:yr}}lf(u),Cr||(Cr=ul(Ft=>{Ft.databaseName===u&&lf(u)}),tf=sh(Ft=>{Ft===u&&(e.send((0,Ze.encode)([Qu])),ln())}),e.on("close",()=>{Cr?.remove(),tf?.remove()})),e.send((0,Ze.encode)([Vk,z_(m.auditStore),Mn.map(({name:Ft})=>Ft)]));let Oe=!0;do{isFinite(De)||(le.warn?.("Invalid sequence id "+De),ln(1008,"Invalid sequence id"+De));let Ft;if(Oe&&!J&&(Oe=!1,De===0)){let wt=De,yr=Eg(d);for(let yt in S){if(!pe(yt))continue;let ls=S[yt];le.warn?.(`Fully copying ${yt} table to ${E}`);for(let It of ls.primaryStore.getRange({snapshot:!1,versions:!0})){if(J)return;if(It.localTime>=De){le.trace?.(s,"Copying record from",u,yt,It.key,It.localTime),wt=Math.max(It.localTime,wt),Ft=!0,cn()[rh]=1;let xs=Pc(It.version,ls.tableId,It.key,null,yr,null,"put",xf(()=>ls.primaryStore.encoder.encode(It.value),zl=>dc(zl,It.key)),It.metadataFlags&-256,It.residencyId,null,It.expiresAt);await vn({recordId:It.key,tableId:ls.tableId,type:"put",getValue(){return It.value},encoded:xs,version:It.version,residencyId:It.residencyId,nodeId:yr,extendedType:It.metadataFlags},It.localTime)}}}Ft&&vn({type:"end_txn"},De),cn()[rh]=0,De=wt}for(let{key:wt,value:yr}of d.getRange({start:De||1,exclusiveStart:!0,snapshot:!1})){if(J)return;let yt=Tt(yr);le.debug?.("sending audit record",new Date(wt)),cn()[rh]=wt,De=wt,await vn(yt,wt),Pr.startTime=wt,Ft=!0}Ft&&vn({type:"end_txn"},De),cn()[rh]=0,await uF(d)}while(!J)}).catch(Oe=>{le.error?.(s,"Error handling subscription to node",Oe),ln(1008,"Error handling subscription to node")});break}}return}y.position=8;let I=!0,b,U;do{cn();let B=y.readInt();if(B===9&&y.getUint8(y.position)==mg){y.position++,T=U=y.readFloat64(),_[VO]=T,_[KO]=Date.now(),_[YO]=aF,le.trace?.("received remote sequence update",T,u);break}let w=y.position,D=Tt(A,w,w+B),G=lc[D.tableId];G||le.error?.(`No table found with an id of ${D.tableId}`);let Q;D.residencyId&&(Q=nf[D.residencyId],le.trace?.(s,"received residency list",Q,D.type,D.recordId));try{let j=D.recordId;Sp(()=>{b={table:G.name,id:j,type:D.type,nodeId:uc.get(D.nodeId),residencyList:Q,timestamp:D.version,value:D.getValue(G),user:D.user,beginTxn:I,expiresAt:D.expiresAt}},d?.rootStore,F=>af(F,j))}catch(j){throw j.message+="typed structures for current decoder"+JSON.stringify(G.decoder.typedStructs),j}I=!1,le.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),_[VO]=D.version,_[KO]=Date.now(),_[YO]=cF,m.send(b),y.position=w+B}while(y.position<A.byteLength);sf++,Mt(A.byteLength,"bytes-received",`${E}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),sf>uA&&!of&&(of=!0,e.pause(),le.debug?.(`Commit backlog causing replication back-pressure, requesting that ${E} pause replication`)),m.send({type:"end_txn",localTime:T,remoteNodeIds:R,async onCommit(){if(b){let B=Date.now()-b.timestamp;Mt(B,"replication-latency",E+"."+u+"."+b.table,b.type,"ingest")}sf--,of&&(of=!1,e.resume(),le.debug?.(`Replication resuming ${E}`)),ye.length>0&&await Promise.all(ye),le.trace?.("All blobs finished"),!N&&U&&(le.trace?.(s,"queuing confirmation of a commit at",U),setTimeout(()=>{e.send((0,Ze.encode)([zk,N])),le.trace?.(s,"sent confirmation of a commit at",N),N=null},Xse)),N=U,le.debug?.("last sequence committed",new Date(U),u)}})}catch(y){le.error?.(s,"Error handling incoming replication message",y)}}),e.on("ping",Ln),e.on("pong",()=>{t.connection&&(t.connection.latency=performance.now()-Z,t.isSubscriptionConnection&&il({name:E,database:u,url:t.url,latency:t.connection.latency})),Z=null}),e.on("close",(A,y)=>{clearInterval(v),clearTimeout(H),clearInterval($),Pr&&Pr.emit("close"),Zi&&Zi.end();for(let[I,{reject:b}]of g)b(new Error(`Connection closed ${y?.toString()} ${A}`));le.debug?.(s,"closed",A,y?.toString())});function ln(A,y){e.isFinished=!0,e.close(A,y),t.connection?.emit("finished")}a(ln,"close");let Dr=new Set;async function dc(A,y){let I=Ep(A);if(Dr.has(I)){le.debug?.("Blob already being sent",I);return}Dr.add(I);try{let b;Ht++;for await(let U of A.stream())b&&(le.debug?.("Sending blob chunk",I,"length",b.length),e.send((0,Ze.encode)([dg,{fileId:I,size:A.size},b]))),b=U,e._socket.writableNeedDrain&&(le.debug?.("draining",I),await new Promise(B=>e._socket.once("drain",B)),le.debug?.("drained",I)),Mt(U.length,"bytes-sent",`${E}.${u}`,"replication","blob");le.debug?.("Sending final blob chunk",I,"length",b.length),e.send((0,Ze.encode)([dg,{fileId:I,size:A.size,finished:!0},b]))}catch(b){le.warn?.("Error sending blob",b,"blob id",I,"for record",y),e.send((0,Ze.encode)([dg,{fileId:I,finished:!0,error:b.toString()},Buffer.alloc(0)]))}finally{Dr.delete(I),Ht--,Ht<Um&&ft?.()}}a(dc,"sendBlobs");function af(A,y){let I=Ep(A),b=me.get(I);le.debug?.("Received transaction for record",y,"with blob",I,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&me.delete(I):(b=new GO.PassThrough,me.set(I,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=y,A.size===void 0&&b.expectedSize&&(A.size=b.expectedSize);let U=b.blob??createBlob(b,A);b.blob=U;let B=so(()=>Uf(U).saving,m.auditStore?.rootStore);return B&&(B.blobId=I,ye.push(B),B.finally(()=>{le.debug?.(`Finished receiving blob stream ${I}`),ye.splice(ye.indexOf(B),1)})),U}a(af,"receiveBlobs");function jo(){if(h||(h=!0,t.connection?.on("subscriptions-updated",jo)),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let A=new Map;d||(d=m?.auditStore);try{for(let b of m?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let U of b.value.nodes||[])U.lastTxnTime>(A.get(U.id)??0)&&A.set(U.id,U.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let y=t.connection?.nodeSubscriptions?.[0];R=[];let I=t.connection?.nodeSubscriptions?.map((b,U)=>{let B=[],{replicateByDefault:w}=b;if(b.subscriptions){for(let j of b.subscriptions)if(j.subscribe&&(j.schema||j.database)===u){let F=j.table;S?.[F]?.replicate!==!1&&B.push(F)}w=!1}else for(let j in S)(w?S[j].replicate===!1:S[j].replicate)&&B.push(j);let D=d&&tg(b.name,d),G=m?.dbisDB?.get([Symbol.for("seq"),D])??1,Q=Math.max(G?.seqId??1,(typeof b.start_time=="string"?new Date(b.start_time).getTime():b.start_time)??1);if(le.debug?.("Starting time recorded in db",b.name,D,u,G?.seqId,"start time:",Q,new Date(Q)),y!==b){let j=d&&tg(y.name,d),F=m?.dbisDB?.get([Symbol.for("seq"),j])??1;for(let ve of F?.nodes||[])ve.name===b.name&&(Q=ve.seqId,le.debug?.("Using sequence id from proxy node",y.name,Q))}if(D===void 0?le.warn("Starting subscription request from node",b,"but no node id found"):R.push(D),A.get(D)>Q&&(Q=A.get(D),le.debug?.("Updating start time from more recent txn recorded",y.name,Q)),Q===1&&fg)try{new URL(fg).hostname===b.name&&E===b.name?(le.warn?.(`Requesting full copy of database ${u} from ${fg}`),Q=0):Q=Date.now()-6e4}catch(j){le.error?.("Error parsing leader URL",fg,j)}return le.trace?.(s,"defining subscription request",b.name,u,new Date(Q)),{name:b.name,replicateByDefault:w,tables:B,startTime:Q,endTime:b.end_time}});if(I)if(le.debug?.(s,"sending subscription request",I,m?.dbisDB?.path),clearTimeout(z),I.length>0)e.send((0,Ze.encode)([Gk,I]));else{let b=a(()=>{let U=performance.now();z=setTimeout(()=>{fe<=U?ln(1008,"Connection has no subscriptions and is no longer used"):b()},se).unref()},"schedule_close");b()}}a(jo,"sendSubscriptionRequestUpdate");function cf(A,y){if(!A)return;let I=kt[A];return I||(I=y.getResidencyRecord(A),kt[A]=I),I}a(cf,"getResidence");function Qo(A){return!(Ia&&Ia!="*"&&!Ia[A]&&!Ia.includes?.(A)&&!Ia.some?.(y=>y.name===A))}a(Qo,"checkDatabaseAccess");function Jo(A){if(m=m||f.get(A),!Qo(A))throw new Error(`Access to database "${A}" is not permitted`);m||le.warn?.(`No database named "${A}" was declared and registered`),d=m?.auditStore,S||(S=Xe()?.[A]);let y=rt();if(y===E)throw y?new Error("Should not connect to self",y):new Error("Node name not defined");return xm(y,A),!0}a(Jo,"setDatabase");function xm(A,y){let I=Xe()?.[y],b=[];for(let U in I){let B=I[U];b.push({table:U,schemaDefined:B.schemaDefined,attributes:B.attributes.map(w=>({name:w.name,type:w.type,isPrimaryKey:w.isPrimaryKey}))})}le.trace?.("Sending database info for node",A,"database name",y),e.send((0,Ze.encode)([$k,A,y,b]))}a(xm,"sendNodeDBName");function lf(A){let y=Xe()?.[A],I=[];for(let b in y){if(Mn&&!Mn.some(B=>B.replicateByDefault?!B.tables.includes(b):B.tables.includes(b)))continue;let U=y[b];I.push({table:b,schemaDefined:U.schemaDefined,attributes:U.attributes.map(B=>({name:B.name,type:B.type,isPrimaryKey:B.isPrimaryKey}))})}e.send((0,Ze.encode)([jk,I,A]))}a(lf,"sendDBSchema"),$=setInterval(()=>{for(let[A,y]of me)y.lastChunk+Ae<Date.now()&&(le.warn?.(`Timeout waiting for blob stream to finish ${A} for record ${y.recordId??"unknown"} from ${E}`),me.delete(A),y.end())},Ae).unref();let fc=1,_c=[];return{end(){Zi&&Zi.end(),Pr&&Pr.emit("close")},getRecord(A){let y=fc++;return new Promise((I,b)=>{let U=[Yk,y,A.table.tableId,A.id];_c[A.table.tableId]||(U.push(A.table.tableName),_c[A.table.tableId]=!0),e.send((0,Ze.encode)(U)),fe=performance.now(),g.set(y,{tableId:A.table.tableId,key:A.id,resolve(B){let{table:w,entry:D}=A;if(I(B),B)return w._recordRelocate(D,B)},reject:b})})},sendOperation(A){let y=fc++;return A.requestId=y,e.send((0,Ze.encode)([$O,A])),new Promise((I,b)=>{g.set(y,{resolve:I,reject:b})})}};function eo(A){O(5),A<128?o[c++]=A:A<16384?(l.setUint16(c,A|32768),c+=2):A<1056964608?(l.setUint32(c,A|3221225472),c+=4):(o[c]=255,l.setUint32(c+1,A),c+=5)}function K(A,y=0,I=A.length){let b=I-y;O(b),A.copy(o,c,y,I),c+=b}function p(A){O(8),l.setFloat64(c,A),c+=8}function O(A){if(A+16>o.length-c){let y=Buffer.allocUnsafeSlow(c+A-i+65536>>10<<11);o.copy(y,0,i,c),c=c-i,i=0,o=y,l=new DataView(o.buffer,0,o.length)}}function L(A,y){let I=A.database??"data";if(I!=="data"&&!We[I]){le.warn?.("Database not found",A.database);return}y||(y={});let b=y.schemaDefined,U=!1,B=A.schemaDefined,w=y.attributes||[];for(let D=0;D<A.attributes?.length;D++){let G=A.attributes[D],Q=w.find(j=>j.name===G.name);(!Q||Q.type!==G.type)&&(b?le.error?.(`Schema for '${u}.${A.table}' is defined locally, but attribute '${G.name}: ${G.type}' from '${E}' does not match local attribute ${Q?"'"+Q.name+": "+Q.type+"'":"which does not exist"}`):(U=!0,B||(G.indexed=!0),Q?w[w.indexOf(Q)]=G:w.push(G)))}return U?(le.debug?.("(Re)creating",A),_t({table:A.table,database:A.database,schemaDefined:A.schemaDefined,attributes:w,...y})):y}}var Zk,Ze,eF,tF,le,WO,rF,nF,cl,sF,GO,iF,Gk,$k,Vk,Qu,Kk,FO,Yk,lg,$O,ug,Wk,zk,jk,dg,oF,VO,KO,rh,YO,aF,cF,Jse,fg,zO,Ca,_g,Qk,Xse,Jk,qO,hg,Xk,Ju,jO=Re(()=>{xe();Ai();yO();QO();Es();Zk=M(oe());k();fu();Ze=require("msgpackr"),eF=require("ws"),tF=require("worker_threads"),le=M(qs());Z_();WO=require("events"),rF=M(ti()),nF=M(require("node:tls"));sl();cl=M(require("node:process")),sF=require("node:net");vi();En();GO=require("node:stream"),iF=M(require("minimist")),Gk=129,$k=140,Vk=141,Qu=142,Kk=130,FO=132,Yk=133,lg=134,$O=136,ug=137,Wk=143,zk=144,jk=145,dg=146,oF=0,VO=1,KO=2,rh=3,YO=4,aF=0,cF=1,Jse=(0,iF.default)(cl.argv),fg=Jse.HDB_LEADER_URL??cl.env.HDB_LEADER_URL,zO=new Map,Ca=new Map,_g=!0,Qk=300,Xse=2,Jk=3e4;a(pg,"createWebSocket");Xk=500,Ju=class extends WO.EventEmitter{constructor(r,n,s,i,o){super();this.url=r;this.subscription=n;this.databaseName=s;this.nodeName=i;this.authorization=o;this.nodeName=this.nodeName??Xs(r)}static{a(this,"NodeReplicationConnection")}socket;startTime;retryTime=Xk;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;async connect(){this.session||this.resetSession();let r=[];this.socket=await pg(this.url,{serverName:this.nodeName,authorization:this.authorization});let n;le.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${cl.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),le[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=Xk,this.nodeSubscriptions&&il({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,n=nh(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(n)}),this.socket.on("error",s=>{s.code==="SELF_SIGNED_CERT_IN_CHAIN"?(le.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),s.isHandled=!0):s.code!=="ECONNREFUSED"&&(s.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?le.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):le.error?.(`Error in connection to ${this.url} due to ${s.message}`)),this.sessionReject(s)}),this.socket.on("close",(s,i)=>{if(this.isConnected&&(this.nodeSubscriptions&&Wu({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,n?.end(),this.emit("finished");return}if(++this.retries%20===1){let o=i?.toString();le.warn?.(`${n?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${o?'"'+o+'" ':""}(code: ${s})`)}n=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((r,n)=>{this.sessionResolve=r,this.sessionReject=n})}subscribe(r,n){this.nodeSubscriptions=r,this.replicateTablesByDefault=n,this.emit("subscriptions-updated",r)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(r){return this.session.then(n=>n.getRecord(r))}};a(nh,"replicateOverWS")});var wa={};Ue(wa,{clearThisNodeName:()=>aie,disableReplication:()=>rie,enabled_databases:()=>Ia,forEachReplicatedDatabase:()=>Aa,getThisNodeId:()=>Eg,getThisNodeName:()=>rt,getThisNodeUrl:()=>Ra,hostnameToUrl:()=>Rg,lastTimeInAuditStore:()=>j_,monitorNodeCAs:()=>gF,replicateOperation:()=>lie,replication_certificate_authorities:()=>Ro,sendOperationToNode:()=>ih,servers:()=>eie,setReplicator:()=>TF,start:()=>tie,startOnMainThread:()=>wO,subscribeToNode:()=>X_,unsubscribeFromNode:()=>ig,urlToNodeName:()=>Xs});function tie(e){if(e.port||(e.port=Ss.default.get(x.OPERATIONSAPI_NETWORK_PORT)),e.securePort||(e.securePort=Ss.default.get(x.OPERATIONSAPI_NETWORK_SECUREPORT)),!rt())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of J_(e))t.set(Xs(s.url),s);nie(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Ye.ws(async(s,i,o,c)=>{if(i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,o);await o,s._socket.unref(),nh(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&cr.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Ye.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){!s.authorized&&s._nodeRequest.socket.authorizationError&&cr.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let o=or().primaryStore;if(s.authorized&&s.peerCertificate.subject){let c=s.peerCertificate.subject,l=c&&(o.get(c.CN)||t.get(c.CN));if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){cr.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else cr.warn(`No node found for certificate common name ${c.CN}, available nodes are ${Array.from(o.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=o.get(s.ip)||t.get(s.ip);c?s.user=c:cr.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...o.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=a(()=>{let o=new Set(s.secureContexts.values());s.defaultContext&&o.add(s.defaultContext);for(let c of o)try{let l=Array.from(Ro);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=Ag.createSecureContext(u)}catch(l){cr.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ss.default.get(x.REPLICATION_ENABLEROOTCAS)!==!1&&i()}gF(()=>{for(let s of n)s()})}function gF(e){let t=0;Ku(r=>{r?.ca&&(Ro.add(r.ca),Ro.size!==t&&(t=Ro.size,e?.()))})}function rie(e=!0){EF=e}function nie(e){EF||(Xe(),Ia=e.databases,Aa(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||Ca;for(let[s,i]of Sg){let o=i.get(r);o&&(o.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];TF(r,s,e),zO.get(s)?.forEach(i=>i(s))}}))}function TF(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class SF extends Vr{static{a(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||Ca,o=i.get(e),c=o?.tableById||[];c[t.tableId]=t;let l=o?.ready;if(cr.trace("Setting up replicator subscription to database",e),!o?.auditStore)return this.subscription=o=new xn,i.set(e,o),o.tableById=c,o.auditStore=t.auditStore,o.dbisDB=t.dbisDB,o.databaseName=e,l&&l(o),o;this.subscription=o}static subscribeOnThisThread(i,o){return!0}static async load(i){if(i){let o=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),o]);if(c){let l,u=new Set;do{let f;for(let _ of c){if(_===Ye.hostname)continue;let h=iie(_,SF.subscription,e);h?.isConnected&&!u.has(h)&&(!f||h.latency<f.latency)&&(f=h)}if(!f)throw l||new mF.ServerError("No connection to any other nodes are available",502);let d={requestId:Zse++,table:t,entry:i,id:i.key};u.add(f);try{return await f.getRecord(d)}catch(_){if(f.isConnected)throw _;cr.warn("Error in load from node",Tg,_),l||(l=_)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function sie(e,t,r,n,s){let i=Sg.get(e);i||(i=new Map,Sg.set(e,i));let o=i.get(r);if(o)return o;if(t)return i.set(r,o=new Ju(e,t,r,n,s)),o.connect(),o.once("finished",()=>i.delete(r)),o}function iie(e,t,r){let n=dF.get(e);n||(n=new Map,dF.set(e,n));let s=n.get(r);if(s)return s;let i=or().primaryStore.get(e);return i?.url&&(s=new Ju(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function ih(e,t,r){r||(r={}),r.serverName=e.name;let n=await pg(e.url,r),s=nh(n,{},{});return new Promise((i,o)=>{n.on("open",()=>{i(s.sendOperation(t))}),n.on("error",c=>{o(c)}),n.on("close",c=>{cr.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function X_(e){try{pF.isMainThread&&cr.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=Ca.get(e.database);if(!t){let n;t=new Promise(s=>{cr.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,Ca.set(e.database,t)}let r=sie(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>Q_(n,e.database)),e.replicateByDefault)}catch(t){cr.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function ig({name:e,url:t,database:r}){cr.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(or().primaryStore.getRange({})));let n=Sg.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function oie(){if(JO!==void 0)return JO;let e=Ss.default.get(x.OPERATIONSAPI_TLS_CERTIFICATE)||Ss.default.get(x.TLS_CERTIFICATE);if(e)return JO=new _F.X509Certificate((0,hF.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function rt(){return Tg||(Tg=Ss.default.get("replication_hostname")??Xs(Ss.default.get("replication_url"))??oie()??fF("operationsapi_network_secureport")??fF("operationsapi_network_port")??"127.0.0.1")}function aie(){Tg=void 0}function fF(e){let t=Ss.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function gg(e){let t=Ss.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function Eg(e){return z_(e)?.[rt()]}function Ra(){let e=Ss.default.get("replication_url");return e||Rg(rt())}function Rg(e){let t=gg("replication_port");if(t)return`ws://${e}:${t}`;if(t=gg("replication_secureport"),t)return`wss://${e}:${t}`;if(t=gg("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=gg("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function Xs(e){if(e)return new URL(e).hostname}function Aa(e,t){for(let n of Object.getOwnPropertyNames(We))r(n);return sh(n=>{r(n)}),ul((n,s)=>{r(n.databaseName)});function r(n){let s=We[n];cr.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):cie(n)&&t(s,n,!1)}a(r,"forDatabase")}function cie(e){let t=We[e];for(let r in t)if(t[r].replicate)return!0}function j_(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function lie(e){let t={message:""};if(e.replicated){e.replicated=!1,cr.trace?.("Replicating operation",e.operation,"to nodes",Ye.nodes.map(n=>n.name));let r=await Promise.allSettled(Ye.nodes.map(n=>ih(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Ye.nodes[s]?.name,i})}return t}var Ss,cr,_F,hF,Ag,mF,pF,EF,Zse,eie,Ro,Ia,Sg,dF,JO,Tg,Es=Re(()=>{xe();sa();Jl();jO();Mr();Ss=M(oe()),cr=M(X()),_F=require("crypto"),hF=require("fs");Z_();sl();k();yO();Ag=M(require("node:tls")),mF=M(he()),pF=require("worker_threads"),Zse=1,eie=[],Ro=Ss.default.get(x.REPLICATION_ENABLEROOTCAS)!==!1?new Set(Ag.rootCertificates):new Set;a(tie,"start");a(gF,"monitorNodeCAs");a(rie,"disableReplication");a(nie,"assignReplicationSource");a(TF,"setReplicator");Sg=new Map;a(sie,"getSubscriptionConnection");dF=new Map;a(iie,"getRetrievalConnectionByName");a(ih,"sendOperationToNode");a(X_,"subscribeToNode");a(ig,"unsubscribeFromNode");a(oie,"getCommonNameFromCert");a(rt,"getThisNodeName");a(aie,"clearThisNodeName");Object.defineProperty(Ye,"hostname",{get(){return rt()}});a(fF,"getHostFromListeningPort");a(gg,"getPortFromListeningPort");a(Eg,"getThisNodeId");Ye.replication={getThisNodeId:Eg,exportIdMapping:z_};a(Ra,"getThisNodeUrl");a(Rg,"hostnameToUrl");a(Xs,"urlToNodeName");a(Aa,"forEachReplicatedDatabase");a(cie,"hasExplicitlyReplicatedTable");a(j_,"lastTimeInAuditStore");a(lie,"replicateOperation")});var rd=C((yPe,OF)=>{"use strict";var Xu=MH(),{validateBySchema:oh}=tt(),{common_validators:Zu,schema_regex:XO}=Oi(),lr=require("joi"),uie=X(),die=require("uuid").v4,Og=po(),ed=(k(),P(q)),fie=require("util"),Pa=Gn(),{handleHDBError:yo,hdb_errors:_ie,ClientError:dl}=he(),{HDB_ERROR_MSGS:yg,HTTP_STATUS_CODES:bo}=_ie,{SchemaEventMsg:Ng}=Ws(),AF=Gt(),{getDatabases:hie}=(xe(),P(ct)),{transformReq:td}=ie(),{replicateOperation:RF}=(Es(),P(wa)),{cleanupOrphans:RPe}=(En(),P(uu)),bg=lr.string().min(1).max(Zu.schema_length.maximum).pattern(XO).messages({"string.pattern.base":"{:#label} "+Zu.schema_format.message}),mie=lr.string().min(1).max(Zu.schema_length.maximum).pattern(XO).messages({"string.pattern.base":"{:#label} "+Zu.schema_format.message}).required(),pie=lr.string().min(1).max(Zu.schema_length.maximum).pattern(XO).messages({"string.pattern.base":"{:#label} "+Zu.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();OF.exports={createSchema:Eie,createSchemaStructure:yF,createTable:gie,createTableStructure:bF,createAttribute:yie,dropSchema:Sie,dropTable:Tie,dropAttribute:Aie,getBackup:bie,cleanupOrphanBlobs:Oie};async function Eie(e){let t=await yF(e);return Og.signalSchemaChange(new Ng(process.pid,e.operation,e.schema)),t}a(Eie,"createSchema");async function yF(e){let t=oh(e,lr.object({database:bg,schema:bg}));if(t)throw new dl(t.message);if(td(e),!await Xu.checkSchemaExists(e.schema))throw yo(new Error,yg.SCHEMA_EXISTS_ERR(e.schema),bo.BAD_REQUEST,ed.LOG_LEVELS.ERROR,yg.SCHEMA_EXISTS_ERR(e.schema),!0);return await Pa.createSchema(e),`database '${e.schema}' successfully created`}a(yF,"createSchemaStructure");async function gie(e){return td(e),e.hash_attribute=e.primary_key??e.hash_attribute,await bF(e)}a(gie,"createTable");async function bF(e){let t=oh(e,lr.object({database:bg,schema:bg,table:mie,residence:lr.array().items(lr.string().min(1)).optional(),hash_attribute:pie}));if(t)throw new dl(t.message);if(!await Xu.checkSchemaTableExists(e.schema,e.table))throw yo(new Error,yg.TABLE_EXISTS_ERR(e.schema,e.table),bo.BAD_REQUEST,ed.LOG_LEVELS.ERROR,yg.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:die(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await Pa.createTable(n,e);else throw yo(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",bo.BAD_REQUEST);else await Pa.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}a(bF,"createTableStructure");async function Sie(e){let t=oh(e,lr.object({database:lr.string(),schema:lr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new dl(t.message);td(e);let r=await Xu.checkSchemaExists(e.schema);if(r)throw yo(new Error,r,bo.NOT_FOUND,ed.LOG_LEVELS.ERROR,r,!0);let n=await Xu.schema_describe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await Pa.dropSchema(e),Og.signalSchemaChange(new Ng(process.pid,e.operation,e.schema)),await AF.purgeSchemaTableStreams(e.schema,s);let i=await RF(e);return i.message=`successfully deleted '${e.schema}'`,i}a(Sie,"dropSchema");async function Tie(e){let t=oh(e,lr.object({database:lr.string(),schema:lr.string(),table:lr.string().required()}));if(t)throw new dl(t.message);td(e);let r=await Xu.checkSchemaTableExists(e.schema,e.table);if(r)throw yo(new Error,r,bo.NOT_FOUND,ed.LOG_LEVELS.ERROR,r,!0);await Pa.dropTable(e),await AF.purgeTableStream(e.schema,e.table);let n=await RF(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}a(Tie,"dropTable");async function Aie(e){let t=oh(e,lr.object({database:lr.string(),schema:lr.string(),table:lr.string().required(),attribute:lr.string().required()}));if(t)throw new dl(t.message);td(e);let r=await Xu.checkSchemaTableExists(e.schema,e.table);if(r)throw yo(new Error,r,bo.NOT_FOUND,ed.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw yo(new Error,"You cannot drop a hash attribute",bo.BAD_REQUEST,void 0,void 0,!0);if(ed.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw yo(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,bo.BAD_REQUEST,void 0,void 0,!0);try{return await Pa.dropAttribute(e),Rie(e),Og.signalSchemaChange(new Ng(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw uie.error(`Got an error deleting attribute ${fie.inspect(e)}.`),n}}a(Aie,"dropAttribute");function Rie(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}a(Rie,"dropAttributeFromGlobal");async function yie(e){td(e);let t=hie()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw yo(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,bo.BAD_REQUEST,void 0,void 0,!0);return await Pa.createAttribute(e),Og.signalSchemaChange(new Ng(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}a(yie,"createAttribute");function bie(e){return Pa.getBackup(e)}a(bie,"getBackup");function Oie(e){if(!e.database)throw new dl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new dl(`Unknown database '${e.database}'`);let{cleanupOrphans:r}=(En(),P(uu));return r(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}a(Oie,"cleanupOrphanBlobs")});var wF=C((OPe,NF)=>{"use strict";var{OPERATIONS_ENUM:Nie}=(k(),P(q)),ZO=class{static{a(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=Nie.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};NF.exports=ZO});var eN=C((IPe,LF)=>{"use strict";var wie=Gn(),wPe=wF(),wg=ie(),Ig=(k(),P(q)),Iie=oe(),{handleHDBError:IF,hdb_errors:Cie}=he(),{HDB_ERROR_MSGS:CF,HTTP_STATUS_CODES:PF}=Cie,Pie=Object.values(Ig.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),DF="To use this operation audit log must be enabled in harperdb-config.yaml";LF.exports=Die;async function Die(e){if(wg.isEmpty(e.schema))throw new Error(CF.SCHEMA_REQUIRED_ERR);if(wg.isEmpty(e.table))throw new Error(CF.TABLE_REQUIRED_ERR);if(!Iie.get(Ig.CONFIG_PARAMS.LOGGING_AUDITLOG))throw IF(new Error,DF,PF.BAD_REQUEST,Ig.LOG_LEVELS.ERROR,DF,!0);let t=wg.checkSchemaTableExist(e.schema,e.table);if(t)throw IF(new Error,t,PF.NOT_FOUND,Ig.LOG_LEVELS.ERROR,t,!0);if(!wg.isEmpty(e.search_type)&&Pie.indexOf(e.search_type)<0)throw new Error(`Invalid search_type '${e.search_type}'`);return await wie.readAuditLog(e)}a(Die,"readAuditLog")});var vF=C((PPe,MF)=>{"use strict";var{OPERATIONS_ENUM:Lie}=(k(),P(q)),tN=class{static{a(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=Lie.GET_BACKUP,this.schema=t,this.table=r}};MF.exports=tN});var BF=C((vPe,xF)=>{"use strict";var Mie=Gn(),LPe=vF(),rN=ie(),vie=(k(),P(q)),MPe=oe(),{handleHDBError:Uie,hdb_errors:xie}=he(),{HDB_ERROR_MSGS:UF,HTTP_STATUS_CODES:Bie}=xie;xF.exports=Hie;async function Hie(e){if(rN.isEmpty(e.schema))throw new Error(UF.SCHEMA_REQUIRED_ERR);if(rN.isEmpty(e.table))throw new Error(UF.TABLE_REQUIRED_ERR);let t=rN.checkSchemaTableExist(e.schema,e.table);if(t)throw Uie(new Error,t,Bie.NOT_FOUND,vie.LOG_LEVELS.ERROR,t,!0);return await Mie.getBackup(read_audit_log_object)}a(Hie,"getBackup")});var qF=C((xPe,FF)=>{"use strict";var kie=oe(),Da=require("joi"),Fie=tt(),HF=require("moment"),qie=require("fs-extra"),nN=require("path"),Gie=require("lodash"),ah=(k(),P(q)),{LOG_LEVELS:fl}=(k(),P(q)),$ie="YYYY-MM-DD hh:mm:ss",Vie=nN.resolve(__dirname,"../logs");FF.exports=function(e){return Fie.validateBySchema(e,Kie)};var Kie=Da.object({from:Da.custom(kF),until:Da.custom(kF),level:Da.valid(fl.NOTIFY,fl.FATAL,fl.ERROR,fl.WARN,fl.INFO,fl.DEBUG,fl.TRACE),order:Da.valid("asc","desc"),limit:Da.number().min(1),start:Da.number().min(0),log_name:Da.custom(Yie)});function kF(e,t){if(HF(e,HF.ISO_8601).format($ie)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}a(kF,"validateDatetime");function Yie(e,t){if(Gie.invert(ah.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=kie.get(ah.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?ah.LOG_NAMES.HDB:e,i=s===ah.LOG_NAMES.INSTALL?nN.join(Vie,ah.LOG_NAMES.INSTALL):nN.join(n,s);return qie.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}a(Yie,"validateReadLogPath")});var iN=C((HPe,$F)=>{"use strict";var Cg=(k(),P(q)),Wie=X(),zie=oe(),jie=qF(),sN=require("path"),GF=require("fs-extra"),{once:Qie}=require("events"),{handleHDBError:Jie,hdb_errors:Xie}=he(),{PACKAGE_ROOT:Zie}=gt(),eoe=sN.join(Zie,"logs"),toe=1e3,roe=200;$F.exports=noe;async function noe(e){let t=jie(e);if(t)throw Jie(t,t.message,Xie.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=zie.get(Cg.HDB_SETTINGS_NAMES.LOG_PATH_KEY),n=e.log_name===void 0?Cg.LOG_NAMES.HDB:e.log_name,s=n===Cg.LOG_NAMES.INSTALL?sN.join(eoe,Cg.LOG_NAMES.INSTALL):sN.join(r,n),i=e.level!==void 0,o=i?e.level:void 0,c=e.from!==void 0,l=c?new Date(e.from):void 0,u=e.until!==void 0,f=u?new Date(e.until):void 0,d=e.limit===void 0?toe:e.limit,_=e.order===void 0?void 0:e.order,h=e.start===void 0?0:e.start,m=h+d,S=0;_==="desc"&&!l&&!f&&(S=Math.max(GF.statSync(s).size-(m+5)*roe,0));let g=GF.createReadStream(s,{start:S});g.on("error",H=>{Wie.error(H)});let R=0,E=[],T="",N;g.on("data",H=>{let Z=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;H=T+H;let W=0,$;for(;($=Z.exec(H))&&!g.destroyed;){N&&(N.message=H.slice(W,$.index),v(N));let[se,z,fe]=$,ue=fe.split("] ["),ee=ue[0],Ae=ue[1];ue.splice(0,2),N={timestamp:z,thread:ee,level:Ae,tags:ue,message:""},W=$.index+se.length}T=H.slice(W)}),g.on("end",H=>{g.destroyed||N&&(N.message=T.trim(),v(N))}),g.resume();function v(H){let Z,W,$;switch(!0){case(i&&c&&u):Z=new Date(H.timestamp),W=new Date(l),$=new Date(f),H.level===o&&Z>=W&&Z<=$&&R<h?R++:H.level===o&&Z>=W&&Z<=$&&(La(H,_,E),R++,R===m&&g.destroy());break;case(i&&c):Z=new Date(H.timestamp),W=new Date(l),H.level===o&&Z>=W&&R<h?R++:H.level===o&&Z>=W&&(La(H,_,E),R++,R===m&&g.destroy());break;case(i&&u):Z=new Date(H.timestamp),$=new Date(f),H.level===o&&Z<=$&&R<h?R++:H.level===o&&Z<=$&&(La(H,_,E),R++,R===m&&g.destroy());break;case(c&&u):Z=new Date(H.timestamp),W=new Date(l),$=new Date(f),Z>=W&&Z<=$&&R<h?R++:Z>=W&&Z<=$&&(La(H,_,E),R++,R===m&&g.destroy());break;case i:H.level===o&&R<h?R++:H.level===o&&(La(H,_,E),R++,R===m&&g.destroy());break;case c:Z=new Date(H.timestamp),W=new Date(l),Z>=W&&R<h?R++:Z>=W&&R>=h&&(La(H,_,E),R++,R===m&&g.destroy());break;case u:Z=new Date(H.timestamp),$=new Date(f),Z<=$&&R<h?R++:Z<=$&&R>=h&&(La(H,_,E),R++,R===m&&g.destroy());break;default:R<h?R++:(La(H,_,E),R++,R===m&&g.destroy())}}return a(v,"onLogMessage"),await Qie(g,"close"),E}a(noe,"readLog");function La(e,t,r){t==="desc"?soe(e,r):t==="asc"?ioe(e,r):r.push(e)}a(La,"pushLineToResult");function soe(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}a(soe,"insertDescending");function ioe(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}a(ioe,"insertAscending")});var Pg=C((VPe,WF)=>{"use strict";var oN=require("joi"),{string:nd,boolean:VF,date:ooe}=oN.types(),aoe=tt(),{validateSchemaExists:FPe,validateTableExists:qPe,validateSchemaName:GPe}=Oi(),coe=(k(),P(q)),loe=lt(),KF=oe();KF.initSync();var $Pe=nd.invalid(KF.get(coe.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(loe.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),YF={operation:nd.valid("add_node","update_node","set_node_replication"),node_name:nd.optional(),subscriptions:oN.array().items({table:nd.optional(),schema:nd.optional(),database:nd.optional(),subscribe:VF.required(),publish:VF.required().custom(doe),start_time:ooe.iso()})};function uoe(e){return aoe.validateBySchema(e,oN.object(YF))}a(uoe,"addUpdateNodeValidator");function doe(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}a(doe,"checkForFalsy");WF.exports={addUpdateNodeValidator:uoe,validation_schema:YF}});var Ma=C((YPe,zF)=>{"use strict";var aN=class{static{a(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},cN=class{static{a(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};zF.exports={Node:aN,NodeSubscription:cN}});var QF=C((zPe,jF)=>{"use strict";var foe=(k(),P(q)).OPERATIONS_ENUM,lN=class{static{a(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=foe.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};jF.exports=lN});var ch=C((QPe,JF)=>{"use strict";var uN=class{static{a(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},dN=class{static{a(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,o,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=o,c!==void 0&&(this.attributes=c)}};JF.exports={RemotePayloadObject:uN,RemotePayloadSubscription:dN}});var ZF=C((XPe,XF)=>{"use strict";var fN=class{static{a(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,o=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=o}};XF.exports=fN});var tq=C((sDe,eq)=>{"use strict";var _oe=ZF(),eDe=qt(),tDe=ht(),hoe=X(),{getSchemaPath:rDe,getTransactionAuditStorePath:nDe}=St(),{getDatabases:moe}=(xe(),P(ct));eq.exports=poe;async function poe(e){let t=new _oe;try{let r=moe()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){hoe.warn(`unable to stat table dbi due to ${r}`)}return t}a(poe,"lmdbGetTableSize")});var nq=C((oDe,rq)=>{"use strict";var _N=class{static{a(this,"SystemInformationObject")}constructor(t,r,n,s,i,o,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=o,this.harperdb_processes=c}};rq.exports=_N});var id=C((fDe,aq)=>{"use strict";var Eoe=require("fs-extra"),goe=require("path"),Zr=require("systeminformation"),va=X(),sq=Gt(),cDe=lt(),sd=(k(),P(q)),Soe=tq(),Toe=Ii(),{getThreadInfo:iq}=et(),lh=oe();lh.initSync();var Aoe=nq(),{openEnvironment:lDe}=ht(),{getSchemaPath:uDe}=St(),{database:dDe,databases:hN}=(xe(),P(ct)),Dg;aq.exports={getHDBProcessInfo:gN,getNetworkInfo:TN,getDiskInfo:SN,getMemoryInfo:EN,getCPUInfo:pN,getTimeInfo:mN,getSystemInformation:AN,systemInformation:Roe,getTableSize:RN,getMetrics:yN};function mN(){return Zr.time()}a(mN,"getTimeInfo");async function pN(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:o,governor:c,socket:l,cache:u,...f}=await Zr.cpu();f.cpu_speed=await Zr.cpuCurrentSpeed();let{raw_currentload:d,raw_currentload_idle:_,raw_currentload_irq:h,raw_currentload_nice:m,raw_currentload_system:S,raw_currentload_user:g,cpus:R,...E}=await Zr.currentLoad();return E.cpus=[],R.forEach(T=>{let{raw_load:N,raw_load_idle:v,raw_load_irq:H,raw_load_nice:Z,raw_load_system:W,raw_load_user:$,...se}=T;E.cpus.push(se)}),f.current_load=E,f}catch(e){return va.error(`error in getCPUInfo: ${e}`),{}}}a(pN,"getCPUInfo");async function EN(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await Zr.mem();return Object.assign(s,process.memoryUsage())}catch(e){return va.error(`error in getMemoryInfo: ${e}`),{}}}a(EN,"getMemoryInfo");async function gN(){let e={core:[],clustering:[]};try{let t=await Zr.processes(),r;try{r=Number.parseInt(await Eoe.readFile(goe.join(lh.get(sd.CONFIG_PARAMS.ROOTPATH),sd.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===sd.NODE_ERROR_CODES.ENOENT)va.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return va.error(`error in getHDBProcessInfo: ${t}`),e}}a(gN,"getHDBProcessInfo");async function SN(){let e={};try{if(!lh.get(sd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await Zr.disksIO();e.io=i;let{rx_sec:o,tx_sec:c,wx_sec:l,...u}=await Zr.fsStats();return e.read_write=u,e.size=await Zr.fsSize(),e}catch(t){return va.error(`error in getDiskInfo: ${t}`),e}}a(SN,"getDiskInfo");async function TN(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return lh.get(sd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await Zr.networkInterfaceDefault(),e.latency=await Zr.inetChecksite("google.com"),(await Zr.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:o,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:f,carrier_changes:d,..._}=n;e.interfaces.push(_)}),(await Zr.networkStats()).forEach(n=>{let{rx_sec:s,tx_sec:i,ms:o,...c}=n;e.stats.push(c)})),e}catch(t){return va.error(`error in getNetworkInfo: ${t}`),e}}a(TN,"getNetworkInfo");async function AN(){if(Dg!==void 0)return Dg;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:o,...c}=await Zr.osInfo();e=c;let l=await Zr.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,Dg=e,Dg}catch(t){return va.error(`error in getSystemInformation: ${t}`),e}}a(AN,"getSystemInformation");async function RN(){let e=[],t=await Toe.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await Soe(n));return e}a(RN,"getTableSize");async function yN(){let e={};for(let t in hN){let r=e[t]={},n=r.tables={};for(let s in hN[t])try{let i=hN[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,f,d]=l.trim().split(" ");return{pid:u,thread:f,txnid:d}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:f,entryCount:d,overflowPages:_}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:f,entryCount:d,overflowPages:_}}let o=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=o[l];n[s]=c}catch(i){va.notify(`Error getting stats for table ${s}: ${i}`)}}return e}a(yN,"getMetrics");async function oq(){if(lh.get(sd.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await sq.getNATSReferences(),t=await sq.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let o={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(o)}return r}}a(oq,"getNatsStreamInfo");async function Roe(e){let t=new Aoe;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await AN(),t.time=mN(),t.cpu=await pN(),t.memory=await EN(),t.disk=await SN(),t.network=await TN(),t.harperdb_processes=await gN(),t.table_size=await RN(),t.metrics=await yN(),t.threads=await iq(),t.replication=await oq(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await AN();break;case"time":t.time=mN();break;case"cpu":t.cpu=await pN();break;case"memory":t.memory=await EN();break;case"disk":t.disk=await SN();break;case"network":t.network=await TN();break;case"harperdb_processes":t.harperdb_processes=await gN();break;case"table_size":t.table_size=await RN();break;case"database_metrics":case"metrics":t.metrics=await yN();break;case"threads":t.threads=await iq();break;case"replication":t.replication=await oq();break;default:break}return t}a(Roe,"systemInformation")});var Ts=C((EDe,dq)=>{"use strict";var yoe=jr(),bN=ie(),boe=require("util"),_l=(k(),P(q)),cq=oe();cq.initSync();var Ooe=rO(),lq=Yr(),{Node:hDe,NodeSubscription:mDe}=Ma(),Noe=Ou(),woe=QF(),{RemotePayloadObject:Ioe,RemotePayloadSubscription:Coe}=ch(),{handleHDBError:Poe,hdb_errors:Doe}=he(),{HTTP_STATUS_CODES:Loe,HDB_ERROR_MSGS:Moe}=Doe,voe=zs(),Uoe=id(),{packageJson:xoe}=gt(),{getDatabases:Boe}=(xe(),P(ct)),pDe=boe.promisify(Ooe.authorize),Hoe=lq.searchByHash,koe=lq.searchByValue;dq.exports={isEmpty:Foe,getNodeRecord:qoe,upsertNodeRecord:Goe,buildNodePayloads:$oe,checkClusteringEnabled:Voe,getAllNodeRecords:Koe,getSystemInfo:Yoe,reverseSubscription:uq};function Foe(e){return e==null}a(Foe,"isEmpty");async function qoe(e){let t=new Noe(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return Hoe(t)}a(qoe,"getNodeRecord");async function Goe(e){let t=new woe(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return yoe.upsert(t)}a(Goe,"upsertNodeRecord");function uq(e){if(bN.isEmpty(e.subscribe)||bN.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}a(uq,"reverseSubscription");function $oe(e,t,r,n){let s=[];for(let i=0,o=e.length;i<o;i++){let c=e[i],{schema:l,table:u}=c,f=bN.getTableHashAttribute(l,u),{subscribe:d,publish:_}=uq(c),h=Boe()[l]?.[u],m=new Coe(l,u,f,_,d,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(m)}return new Ioe(r,t,s,n)}a($oe,"buildNodePayloads");function Voe(){if(!cq.get(_l.CONFIG_PARAMS.CLUSTERING_ENABLED))throw Poe(new Error,Moe.CLUSTERING_NOT_ENABLED,Loe.BAD_REQUEST,void 0,void 0,!0)}a(Voe,"checkClusteringEnabled");async function Koe(){let e=new voe(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await koe(e))}a(Koe,"getAllNodeRecords");async function Yoe(){let e=await Uoe.getSystemInformation();return{hdb_version:xoe.version,node_version:e.node_version,platform:e.platform}}a(Yoe,"getSystemInfo")});var ON=C((SDe,Sq)=>{"use strict";var Lg=Gt(),fq=ie(),_q=lt(),hq=(k(),P(q)),Mg=X(),mq=rd(),Woe=wu(),{RemotePayloadObject:zoe}=ch(),{handleHDBError:pq,hdb_errors:joe}=he(),{HTTP_STATUS_CODES:Eq}=joe,{NodeSubscription:gq}=Ma();Sq.exports=Qoe;async function Qoe(e,t){let r;try{r=await Lg.request(`${t}.${_q.REQUEST_SUFFIX}`,new zoe(hq.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),Mg.trace("Response from remote describe all request:",r)}catch(o){Mg.error(`addNode received error from describe all request to remote node: ${o}`);let c=Lg.requestErrorHandler(o,"add_node",t);throw pq(new Error,c,Eq.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===_q.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let o=`Error returned from remote node ${t}: ${r.message}`;throw pq(new Error,o,Eq.INTERNAL_SERVER_ERROR,"error",o)}let n=r.message,s=[],i=[];for(let o of e){let{table:c}=o,l=o.database??o.schema??"data";if(l===hq.SYSTEM_SCHEMA_NAME){await Lg.createLocalTableStream(l,c);let m=new gq(l,c,o.publish,o.subscribe);m.start_time=o.start_time,i.push(m);continue}let u=fq.doesSchemaExist(l),f=n[l]!==void 0,d=c?fq.doesTableExist(l,c):!0,_=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!f||!d&&!_){s.push(o);continue}if(!u&&f&&(Mg.trace(`addNode creating schema: ${l}`),await mq.createSchema({operation:"create_schema",schema:l})),!d&&_){Mg.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let m=new Woe(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(m.attributes=n[l][c].attributes),await mq.createTable(m)}await Lg.createLocalTableStream(l,c);let h=new gq(l,c,o.publish,o.subscribe);h.start_time=o.start_time,i.push(h)}return{added:i,skipped:s}}a(Qoe,"reviewSubscriptions")});var hl={};Ue(hl,{addNodeBack:()=>tae,removeNodeBack:()=>rae,setNode:()=>eae});async function eae(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=Xs(t)):t=Rg(r);let n=(0,Aq.validateBySchema)(e,Zoe);if(n)throw(0,Oo.handleHDBError)(n,n.message,Xoe.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new Oo.ClientError("url or hostname is required for remove_node operation");let h=r,m=or(),S=await m.get(h);if(!S)throw new Oo.ClientError(h+" does not exist");try{await ih({url:S.url},{operation:zt.REMOVE_NODE_BACK,name:S?.subscriptions?.length>0?rt():h},void 0)}catch(g){Xn.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await m.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new Oo.ClientError("url required for this operation");let s=Ra();if(s==null)throw new Oo.ClientError("replication url is missing from harperdb-config.yaml");let i,o,c;if(t?.startsWith("wss:")){i=await(0,As.getReplicationCert)();let h=await(0,As.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(o=await(0,As.createCsr)(),Xn.info("Sending CSR to target node:",t)):h&&(c=h.certificate,Xn.info("Sending CA named",h.name,"to target node",t))}let l={operation:zt.ADD_NODE_BACK,hostname:(0,xa.get)(x.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:o,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,xa.get)(x.REPLICATION_SHARD)!==void 0&&(l.shard=(0,xa.get)(x.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(Tq):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=Tq(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,f;try{u=await ih({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,Xn.warn("Error adding node:",t,"to cluster:",h),f=h}if(o&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw f?(f.message+=" and connection was required to sign certificate",f):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);o&&(Xn.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,As.setCertTable)({name:Joe.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,As.setCertTable)({name:rt(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let d={url:t,ca:u?.usingCA};if(e.hostname&&(d.name=e.hostname),e.subscriptions?d.subscriptions=e.subscriptions:d.replicates=!0,e.start_time&&(d.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(d.authorization=e.authorization),e.revoked_certificates&&(d.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?d.shard=u.shard:e.shard!==void 0&&(d.shard=e.shard),d.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,xa.get)(x.REPLICATION_SHARD)!==void 0&&(h.shard=(0,xa.get)(x.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await To(rt(),h)}await To(u?u.nodeName:d.name??Xs(t),d);let _;return e.operation==="update_node"?_=`Successfully updated '${t}'`:_=`Successfully added '${t}' to cluster`,f&&(_+=" but there was an error updating target node: "+f.message),_}async function tae(e){Xn.trace("addNodeBack received request:",e);let t=await(0,As.signCertificate)(e),r;e.csr?(r=t.signingCA,Xn.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,Xn.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,As.getReplicationCertAuth)();if(n.replicates){let i={url:Ra(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,xa.get)(x.REPLICATION_SHARD)!==void 0&&(i.shard=(0,xa.get)(x.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await To(rt(),i)}return await To(e.hostname,n),t.nodeName=rt(),t.usingCA=s?.certificate,Xn.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function rae(e){Xn.trace("removeNodeBack received request:",e),await or().delete(e.name)}function Tq(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var As,Aq,Ua,xa,Xn,Oo,Joe,Xoe,Zoe,ml=Re(()=>{As=M(ti()),Aq=M(tt()),Ua=M(require("joi")),xa=M(oe());k();Z_();sl();Es();Xn=M(X()),Oo=M(he()),{pki:Joe}=require("node-forge"),{HTTP_STATUS_CODES:Xoe}=Oo.hdb_errors,Zoe=Ua.default.object({hostname:Ua.default.string(),verify_tls:Ua.default.boolean(),replicates:Ua.default.boolean(),subscriptions:Ua.default.array(),revoked_certificates:Ua.default.array(),shard:Ua.default.number()});a(eae,"setNode");a(tae,"addNodeBack");a(rae,"removeNodeBack");a(Tq,"reverseSubscription")});var Hg=C((IDe,yq)=>{"use strict";var{handleHDBError:vg,hdb_errors:nae}=he(),{HTTP_STATUS_CODES:Ug}=nae,{addUpdateNodeValidator:sae}=Pg(),xg=X(),Bg=(k(),P(q)),Rq=lt(),iae=ie(),uh=Gt(),dh=Ts(),NN=oe(),oae=ON(),{Node:aae,NodeSubscription:cae}=Ma(),{broadcast:lae}=et(),{setNode:uae}=(ml(),P(hl)),NDe=oe(),wDe=(k(),P(q)),dae="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",fae="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",_ae=NN.get(Bg.CONFIG_PARAMS.CLUSTERING_NODENAME);yq.exports=hae;async function hae(e,t=!1){if(xg.trace("addNode called with:",e),NN.get(Bg.CONFIG_PARAMS.REPLICATION_URL)||NN.get(Bg.CONFIG_PARAMS.REPLICATION_HOSTNAME))return uae(e);dh.checkClusteringEnabled();let r=sae(e);if(r)throw vg(r,r.message,Ug.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let d=await dh.getNodeRecord(n);if(!iae.isEmptyOrZeroLength(d))throw vg(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,Ug.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await oae(e.subscriptions,n),o={message:void 0,added:s,skipped:i};if(s.length===0)return o.message=dae,o;let c=dh.buildNodePayloads(s,_ae,Bg.OPERATIONS_ENUM.ADD_NODE,await dh.getSystemInfo()),l=[];for(let d=0,_=s.length;d<_;d++){let h=s[d];s[d].start_time===void 0&&delete s[d].start_time,l.push(new cae(h.schema,h.table,h.publish,h.subscribe))}xg.trace("addNode sending remote payload:",c);let u;try{u=await uh.request(`${n}.${Rq.REQUEST_SUFFIX}`,c)}catch(d){xg.error(`addNode received error from request: ${d}`);for(let h=0,m=s.length;h<m;h++){let S=s[h];S.publish=!1,S.subscribe=!1,await uh.updateRemoteConsumer(S,n)}let _=uh.requestErrorHandler(d,"add_node",n);throw vg(new Error,_,Ug.INTERNAL_SERVER_ERROR,"error",_)}if(u.status===Rq.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${n}: ${u.message}`;throw vg(new Error,d,Ug.INTERNAL_SERVER_ERROR,"error",d)}xg.trace(u);for(let d=0,_=s.length;d<_;d++){let h=s[d];await uh.updateRemoteConsumer(h,n),h.subscribe===!0&&await uh.updateConsumerIterator(h.schema,h.table,n,"start")}let f=new aae(n,l,u.system_info);return await dh.upsertNodeRecord(f),lae({type:"nats_update"}),i.length>0?o.message=fae:o.message=`Successfully added '${n}' to manifest`,o}a(hae,"addNode")});var PN=C((DDe,Oq)=>{"use strict";var{handleHDBError:wN,hdb_errors:mae}=he(),{HTTP_STATUS_CODES:IN}=mae,{addUpdateNodeValidator:pae}=Pg(),fh=X(),kg=(k(),P(q)),bq=lt(),PDe=ie(),_h=Gt(),hh=Ts(),CN=oe(),{cloneDeep:Eae}=require("lodash"),gae=ON(),{Node:Sae,NodeSubscription:Tae}=Ma(),{broadcast:Aae}=et(),{setNode:Rae}=(ml(),P(hl)),yae="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",bae="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Oae=CN.get(kg.CONFIG_PARAMS.CLUSTERING_NODENAME);Oq.exports=Nae;async function Nae(e){if(fh.trace("updateNode called with:",e),CN.get(kg.CONFIG_PARAMS.REPLICATION_URL)??CN.get(kg.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Rae(e);hh.checkClusteringEnabled();let t=pae(e);if(t)throw wN(t,t.message,IN.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await hh.getNodeRecord(r);s.length>0&&(n=Eae(s));let{added:i,skipped:o}=await gae(e.subscriptions,r),c={message:void 0,updated:i,skipped:o};if(i.length===0)return c.message=yae,c;let l=hh.buildNodePayloads(i,Oae,kg.OPERATIONS_ENUM.UPDATE_NODE,await hh.getSystemInfo());for(let f=0,d=i.length;f<d;f++){let _=i[f];fh.trace(`updateNode updating work stream for node: ${r} subscription:`,_),i[f].start_time===void 0&&delete i[f].start_time}fh.trace("updateNode sending remote payload:",l);let u;try{u=await _h.request(`${r}.${bq.REQUEST_SUFFIX}`,l)}catch(f){fh.error(`updateNode received error from request: ${f}`);let d=_h.requestErrorHandler(f,"update_node",r);throw wN(new Error,d,IN.INTERNAL_SERVER_ERROR,"error",d)}if(u.status===bq.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${r}: ${u.message}`;throw wN(new Error,f,IN.INTERNAL_SERVER_ERROR,"error",f)}fh.trace(u);for(let f=0,d=i.length;f<d;f++){let _=i[f];await _h.updateRemoteConsumer(_,r),_.subscribe===!0?await _h.updateConsumerIterator(_.schema,_.table,r,"start"):await _h.updateConsumerIterator(_.schema,_.table,r,"stop")}return n||(n=[new Sae(r,[],u.system_info)]),await wae(n[0],i,u.system_info),o.length>0?c.message=bae:c.message=`Successfully updated '${r}'`,c}a(Nae,"updateNode");async function wae(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let o=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let f=n.subscriptions[l];if(f.schema===o.schema&&f.table===o.table){f.publish=o.publish,f.subscribe=o.subscribe,c=!0;break}}c||n.subscriptions.push(new Tae(o.schema,o.table,o.publish,o.subscribe))}n.system_info=r,await hh.upsertNodeRecord(n),Aae({type:"nats_update"})}a(wae,"updateNodeTable")});var Pq=C((MDe,Cq)=>{"use strict";var Iq=require("joi"),{string:Nq}=Iq.types(),Iae=tt(),wq=(k(),P(q)),Cae=oe(),Pae=lt();Cq.exports=Dae;function Dae(e){let t=Nq.invalid(Cae.get(wq.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(Pae.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=Iq.object({operation:Nq.valid(wq.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return Iae.validateBySchema(e,r)}a(Dae,"removeNodeValidator")});var Fg=C((UDe,Uq)=>{"use strict";var{handleHDBError:Dq,hdb_errors:Lae}=he(),{HTTP_STATUS_CODES:Lq}=Lae,Mae=Pq(),mh=X(),Mq=Ts(),vae=ie(),od=(k(),P(q)),vq=lt(),DN=Gt(),LN=oe(),{RemotePayloadObject:Uae}=ch(),{NodeSubscription:xae}=Ma(),Bae=Nu(),Hae=_a(),{broadcast:kae}=et(),{setNode:Fae}=(ml(),P(hl)),qae=LN.get(od.CONFIG_PARAMS.CLUSTERING_NODENAME);Uq.exports=Gae;async function Gae(e){if(mh.trace("removeNode called with:",e),LN.get(od.CONFIG_PARAMS.REPLICATION_URL)??LN.get(od.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Fae(e);Mq.checkClusteringEnabled();let t=Mae(e);if(t)throw Dq(t,t.message,Lq.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await Mq.getNodeRecord(r);if(vae.isEmptyOrZeroLength(n))throw Dq(new Error,`Node '${r}' was not found.`,Lq.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new Uae(od.OPERATIONS_ENUM.REMOVE_NODE,qae,[]),i,o=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let f=n.subscriptions[l];f.subscribe===!0&&await DN.updateConsumerIterator(f.schema,f.table,r,"stop");try{await DN.updateRemoteConsumer(new xae(f.schema,f.table,!1,!1),r)}catch(d){mh.error(d)}}try{i=await DN.request(`${r}.${vq.REQUEST_SUFFIX}`,s),mh.trace("Remove node reply from remote node:",r,i)}catch(l){mh.error("removeNode received error from request:",l),o=!0}let c=new Bae(od.SYSTEM_SCHEMA_NAME,od.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await Hae.deleteRecord(c),kae({type:"nats_update"}),i?.status===vq.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||o?(mh.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}a(Gae,"removeNode")});var Hq=C((BDe,Bq)=>{"use strict";var xq=require("joi"),{string:$ae,array:Vae}=xq.types(),Kae=tt(),Yae=Pg();Bq.exports=Wae;function Wae(e){let t=xq.object({operation:$ae.valid("configure_cluster").required(),connections:Vae.items(Yae.validation_schema).required()});return Kae.validateBySchema(e,t)}a(Wae,"configureClusterValidator")});var MN=C((kDe,$q)=>{"use strict";var kq=(k(),P(q)),qg=X(),zae=ie(),jae=oe(),Qae=Fg(),Jae=Hg(),Xae=Ts(),Zae=Hq(),{handleHDBError:Fq,hdb_errors:ece}=he(),{HTTP_STATUS_CODES:qq}=ece,tce="Configure cluster complete.",rce="Failed to configure the cluster. Check the logs for more details.",nce="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";$q.exports=sce;async function sce(e){qg.trace("configure cluster called with:",e);let t=Zae(e);if(t)throw Fq(t,t.message,qq.BAD_REQUEST,void 0,void 0,!0);let r=await Xae.getAllNodeRecords(),n=[];if(jae.get(kq.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let f=0,d=r.length;f<d;f++){let _=await Gq(Qae,{operation:kq.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[f].name},r[f].name);n.push(_)}qg.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let f=0;f<i;f++){let d=e.connections[f],_=await Gq(Jae,d,d.node_name);s.push(_)}qg.trace("All results from configure_cluster add node:",s);let o=[],c=[],l=!1,u=n.concat(s);for(let f=0,d=u.length;f<d;f++){let _=u[f];_.status==="rejected"&&(qg.error(_.node_name,_?.error?.message,_?.error?.stack),o.includes(_.node_name)||o.push(_.node_name)),(_?.result?.message?.includes?.("Successfully")||_?.result?.includes?.("Successfully"))&&(l=!0),!(typeof _.result=="string"&&_.result.includes("Successfully removed")||_.status==="rejected")&&c.push({node_name:_?.node_name,response:_?.result})}if(zae.isEmptyOrZeroLength(o))return{message:tce,connections:c};if(l)return{message:nce,failed_nodes:o,connections:c};throw Fq(new Error,rce,qq.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}a(sce,"configureCluster");async function Gq(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}a(Gq,"functionWrapper")});var Wq=C((qDe,Yq)=>{"use strict";var ph=require("joi"),ice=tt(),{validateSchemaExists:Vq,validateTableExists:oce,validateSchemaName:Kq}=Oi(),ace=ph.object({operation:ph.string().valid("purge_stream"),schema:ph.string().custom(Vq).custom(Kq).optional(),database:ph.string().custom(Vq).custom(Kq).optional(),table:ph.string().custom(oce).required()});function cce(e){return ice.validateBySchema(e,ace)}a(cce,"purgeStreamValidator");Yq.exports=cce});var vN=C(($De,zq)=>{"use strict";var{handleHDBError:lce,hdb_errors:uce}=he(),{HTTP_STATUS_CODES:dce}=uce,fce=Wq(),_ce=Gt(),hce=Ts();zq.exports=mce;async function mce(e){e.schema=e.schema??e.database;let t=fce(e);if(t)throw lce(t,t.message,dce.BAD_REQUEST,void 0,void 0,!0);hce.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await _ce.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}a(mce,"purgeStream")});var BN=C((KDe,tG)=>{"use strict";var xN=Ts(),pce=Gt(),$g=oe(),ad=(k(),P(q)),pl=lt(),Ece=ie(),UN=X(),{RemotePayloadObject:gce}=ch(),{ErrorCode:jq}=require("nats"),{parentPort:Qq}=require("worker_threads"),{onMessageByType:Sce}=et(),{getThisNodeName:Tce}=(Es(),P(wa)),{requestClusterStatus:Ace}=(Z_(),P(gk)),{getReplicationSharedStatus:Rce,getHDBNodeTable:yce}=(sl(),P(bO)),{CONFIRMATION_STATUS_POSITION:bce,RECEIVED_VERSION_POSITION:Oce,RECEIVED_TIME_POSITION:Nce,SENDING_TIME_POSITION:wce,RECEIVING_STATUS_POSITION:Ice,RECEIVING_STATUS_RECEIVING:Cce}=(jO(),P(lF)),Jq=$g.get(ad.CONFIG_PARAMS.CLUSTERING_ENABLED),Xq=$g.get(ad.CONFIG_PARAMS.CLUSTERING_NODENAME);tG.exports={clusterStatus:Pce,buildNodeStatus:eG};var Zq;Sce("cluster-status",async e=>{Zq(e)});async function Pce(){if($g.get(ad.CONFIG_PARAMS.REPLICATION_URL)||$g.get(ad.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(Qq){Qq.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{Zq=i});for(let i of n.connections){let o=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let d of Object.values(databases[l]||{}))if(u=d.auditStore,u)break;if(!u)continue;let f=Rce(u,l,o);c.lastCommitConfirmed=Gg(f[bce]),c.lastReceivedRemoteTime=Gg(f[Oce]),c.lastReceivedLocalTime=Gg(f[Nce]),c.sendingMessage=Gg(f[wce]),c.lastReceivedStatus=f[Ice]===Cce?"Receiving":"Waiting"}}}else n=Ace();n.node_name=Tce();let s=yce().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:Xq,is_enabled:Jq,connections:[]};if(!Jq)return e;let t=await xN.getAllNodeRecords();if(Ece.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push(eG(t[n],e.connections));return await Promise.allSettled(r),e}a(Pce,"clusterStatus");function Gg(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}a(Gg,"asDate");async function eG(e,t){let r=e.name,n=new gce(ad.OPERATIONS_ENUM.CLUSTER_STATUS,Xq,void 0,await xN.getSystemInfo()),s,i,o=pl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await pce.request(pl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===pl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(o=pl.CLUSTER_STATUS_STATUSES.CLOSED,UN.error(`Error getting node status from ${r} `,s))}catch(l){UN.warn(`Error getting node status from ${r}`,l),l.code===jq.NoResponders?o=pl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===jq.Timeout?o=pl.CLUSTER_STATUS_STATUSES.TIMEOUT:o=pl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new Dce(r,o,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==ad.PRE_4_0_0_VERSION&&await xN.upsertNodeRecord(l)}catch(l){UN.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}a(eG,"buildNodeStatus");function Dce(e,t,r,n,s,i,o,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=o,this.system_info=c}a(Dce,"NodeStatusObject")});var kN=C((WDe,rG)=>{"use strict";var{handleHDBError:Lce,hdb_errors:Mce}=he(),{HTTP_STATUS_CODES:vce}=Mce,Uce=Gt(),xce=Ts(),HN=ie(),Vg=require("joi"),Bce=tt(),Hce=2e3,kce=Vg.object({timeout:Vg.number().min(1),connected_nodes:Vg.boolean(),routes:Vg.boolean()});rG.exports=Fce;async function Fce(e){xce.checkClusteringEnabled();let t=Bce.validateBySchema(e,kce);if(t)throw Lce(t,t.message,vce.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||HN.autoCastBoolean(n),o=s===void 0||HN.autoCastBoolean(s),c={nodes:[]},l=await Uce.getServerList(r??Hce),u={};if(i)for(let f=0,d=l.length;f<d;f++){let _=l[f].statsz;_&&(u[l[f].server.name]=_.routes)}for(let f=0,d=l.length;f<d;f++){if(l[f].statsz)continue;let _=l[f].server,h=l[f].data;if(_.name.endsWith("-hub")){let m={name:_.name.slice(0,-4),response_time:l[f].response_time};i&&(m.connected_nodes=[],u[_.name]&&u[_.name].forEach(S=>{m.connected_nodes.includes(S.name.slice(0,-4))||m.connected_nodes.push(S.name.slice(0,-4))})),o&&(m.routes=h.cluster?.urls?h.cluster?.urls.map(S=>({host:S.split(":")[0],port:HN.autoCast(S.split(":")[1])})):[]),c.nodes.push(m)}}return c}a(Fce,"clusterNetwork")});var oG=C((jDe,iG)=>{"use strict";var FN=require("joi"),nG=tt(),{route_constraints:sG}=Sy();iG.exports={setRoutesValidator:qce,deleteRoutesValidator:Gce};function qce(e){let t=FN.object({server:FN.valid("hub","leaf"),routes:sG.required()});return nG.validateBySchema(e,t)}a(qce,"setRoutesValidator");function Gce(e){let t=FN.object({routes:sG.required()});return nG.validateBySchema(e,t)}a(Gce,"deleteRoutesValidator")});var Kg=C((JDe,_G)=>{"use strict";var No=Lt(),qN=ie(),Rs=(k(),P(q)),cd=oe(),aG=oG(),{handleHDBError:cG,hdb_errors:$ce}=he(),{HTTP_STATUS_CODES:lG}=$ce,uG="cluster routes successfully set",dG="cluster routes successfully deleted";_G.exports={setRoutes:Kce,getRoutes:Yce,deleteRoutes:Wce};function Vce(e){let t=No.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let o=0,c=e.routes.length;o<c;o++){let l=e.routes[o];l.port=qN.autoCast(l.port);let u=r.some(d=>d.host===l.host&&d.port===l.port),f=n.some(d=>d.host===l.host&&d.port===l.port);u||f?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?No.updateConfigValue(Rs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):No.updateConfigValue(Rs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:uG,set:i,skipped:s}}a(Vce,"setRoutesNats");function Kce(e){let t=aG.setRoutesValidator(e);if(t)throw cG(t,t.message,lG.BAD_REQUEST,void 0,void 0,!0);if(cd.get(Rs.CONFIG_PARAMS.CLUSTERING_ENABLED))return Vce(e);let r=[],n=[],s=cd.get(Rs.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{fG(s,i)?n.push(i):(s.push(i),r.push(i))}),No.updateConfigValue(Rs.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:uG,set:r,skipped:n}}a(Kce,"setRoutes");function fG(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}a(fG,"existsInArray");function Yce(){if(cd.get(Rs.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=No.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return cd.get(Rs.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}a(Yce,"getRoutes");function Wce(e){let t=aG.deleteRoutesValidator(e);if(t)throw cG(t,t.message,lG.BAD_REQUEST,void 0,void 0,!0);if(cd.get(Rs.CONFIG_PARAMS.CLUSTERING_ENABLED))return zce(e);let r=[],n=[],s=cd.get(Rs.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(o=>{fG(e.routes,o)?r.push(o):(i.push(o),n.push(o))}),No.updateConfigValue(Rs.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:dG,deleted:r,skipped:n}}a(Wce,"deleteRoutes");function zce(e){let t=No.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],o=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let f=e.routes[l],d=!1;for(let _=0,h=r.length;_<h;_++){let m=r[_];if(f.host===m.host&&f.port===m.port){r.splice(_,1),d=!0,o=!0,s.push(f);break}}if(!d){let _=!0;for(let h=0,m=n.length;h<m;h++){let S=n[h];if(f.host===S.host&&f.port===S.port){n.splice(h,1),c=!0,_=!1,s.push(f);break}}_&&i.push(f)}}return o&&(r=qN.isEmptyOrZeroLength(r)?null:r,No.updateConfigValue(Rs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=qN.isEmptyOrZeroLength(n)?null:n,No.updateConfigValue(Rs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:dG,deleted:s,skipped:i}}a(zce,"deleteRoutesNats")});var mG=C((ZDe,hG)=>{"use strict";var Eh=require("alasql"),El=require("recursive-iterator"),ri=X(),jce=ie(),gh=(k(),P(q)),GN=class{static{a(this,"sql_statement_bucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,Jce(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>gh.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!gh.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,o=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[o]&&t[i].tables[o][gh.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[o].attribute_permissions.length>0?c=Qce(t[i].tables[o].attribute_permissions):c=global.hdb_schema[i][o].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(o).filter(u=>!gh.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let f=new Eh.yy.Column({columnid:u});s.tableid&&(f.tableid=s.tableid),this.ast.columns.push(f),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(o,l)}}),this.ast}};function Qce(e){return e.filter(t=>t[gh.PERMS_CRUD_ENUM.READ])}a(Qce,"filterReadRestrictedAttrs");function Jce(e,t,r,n,s){Xce(e,t,r,n,s)}a(Jce,"interpretAST");function Sh(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,o=e.tableid;e.as&&(o=e.as),s.set(o,i)}}a(Sh,"addSchemaTableToMap");function Xce(e,t,r,n,s){if(!e){ri.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof Eh.yy.Insert?rle(e,t,r):e instanceof Eh.yy.Select?Zce(e,t,r,n,s):e instanceof Eh.yy.Update?ele(e,t,r):e instanceof Eh.yy.Delete?tle(e,t,r):ri.error("AST in getRecordAttributesAST() is not a valid SQL type.")}a(Xce,"getRecordAttributesAST");function Zce(e,t,r,n,s){if(!e){ri.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(jce.isEmptyOrZeroLength(i)){ri.error("No schema specified");return}e.from.forEach(c=>{Sh(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),Sh(c.table,t,r,n,s)});let o=new El(e.columns);for(let{node:c}of o)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{ri.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new El(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let f=u.tableid?u.tableid:l;if(!t.get(i).has(f))if(r.has(f))f=r.get(f);else{ri.info(`table specified as ${f} not found.`);continue}t.get(i).get(f).indexOf(u.columnid)<0&&t.get(i).get(f).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new El(c.on);for(let{node:u}of l)if(u&&u.columnid){let f=u.tableid,d=s.get(f);if(!t.get(d).has(f))if(r.has(f))f=r.get(f);else{ri.info(`table specified as ${f} not found.`);continue}t.get(d).get(f).indexOf(u.columnid)<0&&t.get(d).get(f).push(u.columnid)}}),e.order){let c=new El(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,f=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(f).has(u))if(r.has(u))u=r.get(u);else{ri.info(`table specified as ${u} not found.`);return}t.get(f).get(u).indexOf(l.columnid)<0&&t.get(f).get(u).push(l.columnid)}}}a(Zce,"getSelectAttributes");function ele(e,t,r){if(!e){ri.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new El(e.columns),s=e.table.databaseid;Sh(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&$N(e.table.tableid,s,i.columnid,t,r)}a(ele,"getUpdateAttributes");function tle(e,t,r){if(!e){ri.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new El(e.where),s=e.table.databaseid;Sh(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&$N(e.table.tableid,s,i.columnid,t,r)}a(tle,"getDeleteAttributes");function rle(e,t,r){if(!e){ri.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new El(e.columns),s=e.into.databaseid;Sh(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&$N(e.into.tableid,s,i.columnid,t,r)}a(rle,"getInsertAttributes");function $N(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}a($N,"pushAttribute");hG.exports=GN});var EG=C((tLe,pG)=>{"use strict";var Yg=(k(),P(q)),Wg=class{static{a(this,"BaseLicense")}constructor(t=0,r=Yg.RAM_ALLOCATION_ENUM.DEFAULT,n=Yg.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},VN=class extends Wg{static{a(this,"ExtendedLicense")}constructor(t=0,r=Yg.RAM_ALLOCATION_ENUM.DEFAULT,n=Yg.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};pG.exports={BaseLicense:Wg,ExtendedLicense:VN}});var dd=C((nLe,yG)=>{"use strict";var ud=require("fs-extra"),zg=(zp(),P(Wp)),SG=require("crypto"),nle=require("moment"),sle=require("uuid").v4,en=X(),YN=require("path"),ile=ie(),gl=(k(),P(q)),{totalmem:gG}=require("os"),ole=EG().ExtendedLicense,ld="invalid license key format",ale="061183",cle="mofi25",lle="aes-256-cbc",ule=16,dle=32,TG=oe(),{resolvePath:AG}=Lt();TG.initSync();var KN;yG.exports={validateLicense:RG,generateFingerPrint:_le,licenseSearch:jN,getLicense:ple,checkMemoryLimit:Ele};function WN(){return YN.join(TG.getHdbBasePath(),gl.LICENSE_KEY_DIR_NAME,gl.LICENSE_FILE_NAME)}a(WN,"getLicenseDirPath");function fle(){let e=WN();return AG(YN.join(e,gl.LICENSE_FILE_NAME))}a(fle,"getLicenseFilePath");function zN(){let e=WN();return AG(YN.join(e,gl.REG_KEY_FILE_NAME))}a(zN,"getFingerPrintFilePath");async function _le(){let e=zN();try{return await ud.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await hle();throw en.error(`Error writing fingerprint file to ${e}`),en.error(t),new Error("There was an error generating the fingerprint")}}a(_le,"generateFingerPrint");async function hle(){let e=sle(),t=zg.hash(e,zg.HASH_FUNCTION.MD5),r=zN();try{await ud.mkdirp(WN()),await ud.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw en.error(`Error writing fingerprint file to ${r}`),en.error(n),new Error("There was an error generating the fingerprint")}return t}a(hle,"writeFingerprint");function RG(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:gl.RAM_ALLOCATION_ENUM.DEFAULT,version:gl.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return en.error("empty license key passed to validate."),r;let n=zN(),s=!1;try{s=ud.statSync(n)}catch(i){en.error(i)}if(s){let i;try{i=ud.readFileSync(n,"utf8")}catch{en.error("error validating this machine in the license"),r.valid_machine=!1;return}let o=e.split(cle),c=o[1];c=Buffer.concat([Buffer.from(c)],ule);let l=Buffer.concat([Buffer.from(i)],dle),u=SG.createDecipheriv(lle,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let f=null;try{f=u.update(o[0],"hex","utf8"),f.trim(),f+=u.final("utf8")}catch{let h=mle(o[0],i);if(h)f=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(ld),en.error(ld),new Error(ld)}let d;if(isNaN(f))try{d=JSON.parse(f),r.version=d.version,r.exp_date=d.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),d.ram_allocation&&(r.ram_allocation=d.ram_allocation)}catch{throw console.error(ld),en.error(ld),new Error(ld)}else r.exp_date=f;r.exp_date<nle().valueOf()&&(r.valid_date=!1),zg.validate(o[1],`${ale}${i}${t}`,zg.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||en.error("Invalid licence"),r}a(RG,"validateLicense");function mle(e,t){try{let r=SG.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{en.warn("Check old license failed")}}a(mle,"checkOldLicense");function jN(){let e=new ole,t=[];try{t=ud.readFileSync(fle(),"utf-8").split(`\r
24
- `)}catch(r){r.code==="ENOENT"?en.debug("no license file found"):en.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(ile.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=RG(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){en.error("There was an error parsing the license string."),en.error(s),e.ram_allocation=gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return KN=e,e}a(jN,"licenseSearch");async function ple(){return KN||await jN(),KN}a(ple,"getLicense");function Ele(){let e=jN().ram_allocation,t=process.constrainedMemory?.()||gG();if(t=Math.round(Math.min(t,gG())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}a(Ele,"checkMemoryLimit")});var XN=C((iLe,wG)=>{var jg=dd(),bG=require("chalk"),Zn=X(),OG=require("prompt"),{promisify:gle}=require("util"),QN=(k(),P(q)),Sle=require("fs-extra"),Tle=require("path"),Ale=ie(),{packageJson:Rle}=gt(),NG=oe();NG.initSync();var yle=require("moment"),ble=gle(OG.get),Ole=Tle.join(NG.getHdbBasePath(),QN.LICENSE_KEY_DIR_NAME,QN.LICENSE_FILE_NAME,QN.LICENSE_FILE_NAME);wG.exports={getFingerprint:wle,setLicense:Nle,parseLicense:JN,register:Ile,getRegistrationInfo:Ple};async function Nle(e){if(e&&e.key&&e.company){try{Zn.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await JN(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw Zn.error(r),Zn.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}a(Nle,"setLicense");async function wle(){let e={};try{e=await jg.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw Zn.error(r),Zn.error(t),new Error(r)}return e}a(wle,"getFingerprint");async function JN(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");Zn.info("Validating license input...");let r=jg.validateLicense(e,t);if(Zn.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(Zn.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(Zn.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{Zn.info("writing license to disk"),await Sle.writeFile(Ole,JSON.stringify({license_key:e,company:t}))}catch(n){throw Zn.error("Failed to write License"),n}return"Registration successful."}a(JN,"parseLicense");async function Ile(){let e=await Cle();return JN(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}a(Ile,"register");async function Cle(){let e=await jg.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:bG.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:bG.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{OG.start()}catch(n){Zn.error(n)}let r;try{r=await ble(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}a(Cle,"promptForRegistration");async function Ple(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await jg.getLicense()}catch(r){throw Zn.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Ale.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Rle.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=yle.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}a(Ple,"getRegistrationInfo")});var CG=C((aLe,IG)=>{"use strict";var Dle=lt(),ZN=class{static{a(this,"HubConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d,_,h,m){this.port=t,o===null&&(o=void 0),this.server_name=r+Dle.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c}},this.cluster={name:f,port:d,routes:_,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:m}},this.system_account="SYS"}};IG.exports=ZN});var LG=C((lLe,DG)=>{"use strict";var PG=lt(),ew=class{static{a(this,"LeafConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d){this.port=t,d===null&&(d=void 0),this.server_name=r+PG.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+PG.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:f,ca_file:d,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:d,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:d,insecure:!0},urls:o,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};DG.exports=ew});var vG=C((dLe,MG)=>{"use strict";var tw=class{static{a(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};MG.exports=tw});var xG=C((_Le,UG)=>{"use strict";var Lle=lt(),rw=class{static{a(this,"SysUserObject")}constructor(t,r){this.user=t+Lle.SERVER_SUFFIX.ADMIN,this.password=r}};UG.exports=rw});var Zg=C((mLe,kG)=>{"use strict";var Sl=require("path"),Tl=require("fs-extra"),Mle=CG(),vle=LG(),Ule=vG(),xle=xG(),nw=Kn(),_d=ie(),On=Lt(),Jg=(k(),P(q)),Th=lt(),{CONFIG_PARAMS:Xt}=Jg,hd=X(),Ah=oe(),BG=ho(),sw=Gt(),Ble=ti(),fd="clustering",Hle=1e4,HG=50;kG.exports={generateNatsConfig:Fle,removeNatsConfig:qle,getHubConfigPath:kle};function kle(){let e=Ah.get(Xt.ROOTPATH);return Sl.join(e,fd,Th.NATS_CONFIG_FILES.HUB_SERVER)}a(kle,"getHubConfigPath");async function Fle(e=!1,t=void 0){let r=Ah.get(Xt.ROOTPATH);Tl.ensureDirSync(Sl.join(r,"clustering","leaf")),Ah.initSync();let n=On.getConfigFromFile(Xt.CLUSTERING_TLS_CERT_AUTH),s=On.getConfigFromFile(Xt.CLUSTERING_TLS_PRIVATEKEY),i=On.getConfigFromFile(Xt.CLUSTERING_TLS_CERTIFICATE);!await Tl.exists(i)&&!await Tl.exists(!n)&&await Ble.createNatsCerts();let o=Sl.join(r,fd,Th.PID_FILES.HUB),c=Sl.join(r,fd,Th.PID_FILES.LEAF),l=On.getConfigFromFile(Xt.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=Sl.join(r,fd,Th.NATS_CONFIG_FILES.HUB_SERVER),f=Sl.join(r,fd,Th.NATS_CONFIG_FILES.LEAF_SERVER),d=On.getConfigFromFile(Xt.CLUSTERING_TLS_INSECURE),_=On.getConfigFromFile(Xt.CLUSTERING_TLS_VERIFY),h=On.getConfigFromFile(Xt.CLUSTERING_NODENAME),m=On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await sw.checkNATSServerInstalled()||Xg("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let S=await nw.listUsers(),g=On.getConfigFromFile(Xt.CLUSTERING_USER),R=await nw.getClusterUser();(_d.isEmpty(R)||R.active!==!0)&&Xg(`Invalid cluster user '${g}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await Qg(Xt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await Qg(Xt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await Qg(Xt.CLUSTERING_HUBSERVER_NETWORK_PORT),await Qg(Xt.CLUSTERING_LEAFSERVER_NETWORK_PORT));let E=[],T=[];for(let[se,z]of S.entries())z.role?.role===Jg.ROLE_TYPES_ENUM.CLUSTER_USER&&z.active&&(E.push(new xle(z.username,BG.decrypt(z.hash))),T.push(new Ule(z.username,BG.decrypt(z.hash))));let N=[],{hub_routes:v}=On.getClusteringRoutes();if(!_d.isEmptyOrZeroLength(v))for(let se of v)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${se.host}:${se.port}`);let H=new Mle(On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_NETWORK_PORT),h,o,i,s,n,d,_,m,On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_CLUSTER_NAME),On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,E,T);n==null&&(delete H.tls.ca_file,delete H.leafnodes.tls.ca_file),t=_d.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===Jg.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Tl.writeJson(u,H),hd.trace(`Hub server config written to ${u}`));let Z=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,W=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,$=new vle(On.getConfigFromFile(Xt.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[W],E,T,i,s,n,d);n==null&&delete $.tls.ca_file,(t===void 0||t===Jg.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Tl.writeJson(f,$),hd.trace(`Leaf server config written to ${f}`))}a(Fle,"generateNatsConfig");async function Qg(e){let t=Ah.get(e);return _d.isEmpty(t)&&Xg(`port undefined for '${e}'`),await _d.isPortTaken(t)&&Xg(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}a(Qg,"isPortAvailable");function Xg(e){let t=`Error generating clustering config: ${e}`;hd.error(t),console.error(t),process.exit(1)}a(Xg,"generateNatsConfigError");async function qle(e){let{port:t,config_file:r}=sw.getServerConfig(e),{username:n,decrypt_hash:s}=await nw.getClusterUser(),i=0,o=2e3;for(;i<HG;){try{let f=await sw.createConnection(t,n,s,!1);if(f.protocol.connected===!0){f.close();break}}catch(f){hd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${f}`)}if(i++,i>=HG)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=o*(i*2);u>3e4&&hd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await _d.async_set_timeout(u)}let c="0".repeat(Hle),l=Sl.join(Ah.get(Xt.ROOTPATH),fd,r);await Tl.writeFile(l,c),await Tl.remove(l),hd.notify(e,"started.")}a(qle,"removeNatsConfig")});var KG=C((ELe,VG)=>{"use strict";var es=oe(),Gle=dd(),Ve=(k(),P(q)),Rh=lt(),wo=require("path"),{PACKAGE_ROOT:tS}=gt(),FG=oe(),eS=ie(),md="/dev/null",$le=wo.join(tS,"launchServiceScripts"),qG=wo.join(tS,"utility/scripts"),Vle=wo.join(qG,Ve.HDB_RESTART_SCRIPT),GG=wo.resolve(tS,"dependencies",`${process.platform}-${process.arch}`,Rh.NATS_BINARY_NAME);function $G(){let t=Gle.licenseSearch().ram_allocation||Ve.RAM_ALLOCATION_ENUM.DEFAULT,r=Ve.MEM_SETTING_KEY+t,n={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return eS.noBootFile()&&(n[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=eS.getEnvCliRootPath()),{name:Ve.PROCESS_DESCRIPTORS.HDB,script:Ve.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:tS}}a($G,"generateMainServerConfig");var Kle=9930;function Yle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=wo.join(e,"clustering",Rh.NATS_CONFIG_FILES.HUB_SERVER),r=wo.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=FG.get(Ve.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=Rh.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==Kle?"-"+n:""),script:GG,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=md,i.error_file=md),i}a(Yle,"generateNatsHubServerConfig");var Wle=9940;function zle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=wo.join(e,"clustering",Rh.NATS_CONFIG_FILES.LEAF_SERVER),r=wo.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=FG.get(Ve.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=Rh.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==Wle?"-"+n:""),script:GG,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=md,i.error_file=md),i}a(zle,"generateNatsLeafServerConfig");function jle(){es.initSync();let e=wo.join(es.get(Ve.CONFIG_PARAMS.LOGGING_ROOT),Ve.LOG_NAMES.HDB),t={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ve.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:$le,autorestart:!1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=md,t.error_file=md),t}a(jle,"generateClusteringUpgradeV4ServiceConfig");function Qle(){let e={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.RESTART_HDB};return eS.noBootFile()&&(e[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=eS.getEnvCliRootPath()),{...{name:Ve.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:qG},script:Vle}}a(Qle,"generateRestart");function Jle(){return{apps:[$G()]}}a(Jle,"generateAllServiceConfigs");VG.exports={generateAllServiceConfigs:Jle,generateMainServerConfig:$G,generateRestart:Qle,generateNatsHubServerConfig:Yle,generateNatsLeafServerConfig:zle,generateClusteringUpgradeV4ServiceConfig:jle}});var bh=C((TLe,i$)=>{"use strict";var nt=(k(),P(q)),Xle=ie(),Co=Zg(),rS=Gt(),Io=lt(),Ba=KG(),nS=oe(),Al=X(),Zle=Ts(),{startWorker:YG,onMessageFromWorkers:eue}=et(),tue=id(),SLe=require("util"),rue=require("child_process"),nue=require("fs"),{execFile:sue}=rue,je;i$.exports={enterPM2Mode:iue,start:Ha,stop:iw,reload:zG,restart:jG,list:ow,describe:XG,connect:Po,kill:uue,startAllServices:due,startService:aw,getUniqueServicesList:ZG,restartAllServices:fue,isServiceRegistered:e$,reloadStopStart:t$,restartHdb:JG,deleteProcess:cue,startClusteringProcesses:n$,startClusteringThreads:s$,isHdbRestartRunning:lue,isClusteringRunning:hue,stopClustering:_ue,reloadClustering:mue,expectedRestartOfChildren:QG};var yh=!1;eue(e=>{e.type==="restart"&&nS.initSync(!0)});function iue(){yh=!0}a(iue,"enterPM2Mode");function Po(){return je||(je=require("pm2")),new Promise((e,t)=>{je.connect((r,n)=>{r&&t(r),e(n)})})}a(Po,"connect");var tn,oue=10,WG;function Ha(e,t=!1){if(yh)return aue(e);let r=sue(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let o=tn.indexOf(r);o>-1&&tn.splice(o,1),!WG&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<oue&&(nue.existsSync(Co.getHubConfigPath())?Ha(e):(await Co.generateNatsConfig(!0),Ha(e),await new Promise(c=>setTimeout(c,3e3)),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let o=nS.get(nt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,f;for(;l=c.exec(i);){if(l.index&&Io.LOG_LEVEL_HIERARCHY[o]>=Io.LOG_LEVEL_HIERARCHY[f||"info"]){let h=f===Io.LOG_LEVELS.ERR||f===Io.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",h,n,i.slice(u,l.index).trim())}let[d,_]=l;u=l.index+d.length,f=Io.LOG_LEVELS[_]}if(Io.LOG_LEVEL_HIERARCHY[o]>=Io.LOG_LEVEL_HIERARCHY[f||"info"]){let d=f===Io.LOG_LEVELS.ERR||f===Io.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",d,n,i.slice(u).trim())}}if(a(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!tn&&(tn=[],!t)){let i=a(()=>{WG=!0,tn&&(tn.map(o=>o.kill()),process.exit(0))},"kill_children");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}tn.push(r)}a(Ha,"start");function aue(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.start(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(aue,"startWithPM2");function iw(e){if(!yh){for(let t of tn||[])t.name===e&&(tn.splice(tn.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.stop(e,async(n,s)=>{n&&(je.disconnect(),r(n)),je.delete(e,(i,o)=>{i&&(je.disconnect(),r(n)),je.disconnect(),t(o)})})})}a(iw,"stop");function zG(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.reload(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(zG,"reload");function jG(e){if(!yh){QG();for(let t of tn||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.restart(e,(n,s)=>{je.disconnect(),t(s)})})}a(jG,"restart");function QG(){for(let e of tn||[])e.config&&(e.config.restarts=0)}a(QG,"expectedRestartOfChildren");function cue(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.delete(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(cue,"deleteProcess");async function JG(){await Ha(Ba.generateRestart())}a(JG,"restartHdb");async function lue(){let e=await ow();for(let t in e)if(e[t].name===nt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}a(lue,"isHdbRestartRunning");function ow(){return new Promise(async(e,t)=>{try{await Po()}catch(r){t(r)}je.list((r,n)=>{r&&(je.disconnect(),t(r)),je.disconnect(),e(n)})})}a(ow,"list");function XG(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.describe(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(XG,"describe");function uue(){if(!yh){for(let e of tn||[])e.kill();tn=[];return}return new Promise(async(e,t)=>{try{await Po()}catch(r){t(r)}je.killDaemon((r,n)=>{r&&(je.disconnect(),t(r)),je.disconnect(),e(n)})})}a(uue,"kill");async function due(){try{await n$(),await s$(),await Ha(Ba.generateAllServiceConfigs())}catch(e){throw je?.disconnect(),e}}a(due,"startAllServices");async function aw(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case nt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=Ba.generateMainServerConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=Ba.generateNatsIngestServiceConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=Ba.generateNatsReplyServiceConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=Ba.generateNatsHubServerConfig(),await Ha(r,t),await Co.removeNatsConfig(e);return;case nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=Ba.generateNatsLeafServerConfig(),await Ha(r,t),await Co.removeNatsConfig(e);return;case nt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=Ba.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await Ha(r)}catch(r){throw je?.disconnect(),r}}a(aw,"startService");async function ZG(){try{let e=await ow(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw je?.disconnect(),e}}a(ZG,"getUniqueServicesList");async function fue(e=[]){try{let t=!1,r=await ZG();for(let n=0,s=Object.values(r).length;n<s;n++){let o=Object.values(r)[n].name;e.includes(o)||(o===nt.PROCESS_DESCRIPTORS.HDB?t=!0:await jG(o))}t&&await t$(nt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw je?.disconnect(),t}}a(fue,"restartAllServices");async function e$(e){if(tn?.find(r=>r.name===e))return!0;let t=await tue.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}a(e$,"isServiceRegistered");async function t$(e){let t=nS.get(nt.CONFIG_PARAMS.THREADS_COUNT)??nS.get(nt.CONFIG_PARAMS.THREADS),r=await XG(e),n=Xle.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await iw(e),await aw(e)):e===nt.PROCESS_DESCRIPTORS.HDB?await JG():await zG(e)}a(t$,"reloadStopStart");var r$;async function n$(e=!1){for(let t in nt.CLUSTERING_PROCESSES){let r=nt.CLUSTERING_PROCESSES[t];await aw(r,e)}}a(n$,"startClusteringProcesses");async function s$(){r$=YG(nt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:nt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await rS.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await rS.updateLocalStreams();let e=await Zle.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===nt.PRE_4_0_0_VERSION){Al.info("Starting clustering upgrade 4.0.0 process"),YG(nt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}a(s$,"startClusteringThreads");async function _ue(){for(let e in nt.CLUSTERING_PROCESSES)if(e!==nt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===nt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await r$.terminate();else{let t=nt.CLUSTERING_PROCESSES[e];await iw(t)}}a(_ue,"stopClustering");async function hue(){for(let e in nt.CLUSTERING_PROCESSES){let t=nt.CLUSTERING_PROCESSES[e];if(await e$(t)===!1)return!1}return!0}a(hue,"isClusteringRunning");async function mue(){await Co.generateNatsConfig(!0),await rS.reloadNATSHub(),await rS.reloadNATSLeaf(),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}a(mue,"reloadClustering")});var uw={};Ue(uw,{compactOnStart:()=>pue,copyDb:()=>d$});async function pue(){ka.notify("Running compact on start"),console.log("Running compact on start");let e=(0,cw.get)(x.ROOTPATH),t=new Map,r=Xe();(0,lw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,sS.join)(e,"backup",n+".mdb"),o=(0,sS.join)(e,mc,n+"-copy.mdb"),c=0;try{c=await o$(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){ka.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{db_path:s,copy_dest:o,backup_dest:i,record_count:c}),await d$(n,o),console.log("Backing up",n,"to",i),await(0,Rl.move)(s,i,{overwrite:!0})}try{Ed()}catch(n){ka.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{db_path:s,copy_dest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Rl.move)(i,s,{overwrite:!0}),await(0,Rl.remove)((0,sS.join)(e,mc,`${n}-copy.mdb-lock`));try{Ed()}catch(n){ka.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){ka.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,lw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);for(let[s,{db_path:i,backup_dest:o}]of t){console.error("Moving backup database",o,"back to",i);try{await(0,Rl.move)(o,i,{overwrite:!0})}catch(c){console.error(c)}}throw Ed(),n}for(let[n,{backup_dest:s,record_count:i}]of t){let o=!0,c=await o$(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){o=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
25
- Total record count before compaction: ${i}, total after: ${c}.
26
- Database backup has not been removed and can be found here: ${s}`;ka.error(l),console.error(l)}(0,cw.get)(x.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||o===!1||(console.log("Removing backup",s),await(0,Rl.remove)(s))}}async function o$(e){let t=await(0,u$.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function pd(){}async function d$(e,t){console.log(`Copying database ${e} to ${t}`);let r=Xe()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let d in r){let _=r[d];_.primaryStore.put=pd,_.primaryStore.remove=pd;for(let h in _.indices){let m=_.indices[h];m.put=pd,m.remove=pd}_.auditStore&&(_.auditStore.put=pd,_.auditStore.remove=pd),n=_.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,o=(0,a$.open)(new c$.default(t)),c=o.openDB(iS.INTERNAL_DBIS_NAME),l,u=0,f=s.useReadTransaction();try{for(let{key:_,value:h}of s.getRange({transaction:f})){let m=h.is_hash_attribute||h.isPrimaryKey,S,g;if(m&&(S=h.compression,g=oS(),g?h.compression=g:delete h.compression,S?.dictionary?.toString()===g?.dictionary?.toString()&&(S=null,g=null)),c.put(_,h),!(m||h.indexed))continue;let R=new l$.default(!m,m);R.encoding="binary",R.compression=S;let E=n.openDB(_,R);E.decoder=null,E.decoderCopies=!1,E.encoding="binary",R.compression=g;let T=o.openDB(_,R);T.encoder=null,console.log("copying",_,"from",e,"to",t),await d(E,T,m,f)}if(i){let _=n.openDB(iS.AUDIT_STORE_NAME,Oh);console.log("copying audit log for",e,"to",t),d(i,_,!1,f)}async function d(_,h,m,S){let g=0,R=0,E=0,T=1e7,N=null;for(;T-- >0;)try{for(let v of _.getKeys({start:N,transaction:S}))try{N=v;let{value:H,version:Z}=_.getEntry(v,{transaction:S});if(H?.length<14&&m){E++;continue}l=h.put(v,H,m?Z:void 0),g++,S.openTimer&&(S.openTimer=0),R+=(v?.length||10)+H.length,u++>5e3&&(await l,console.log("copied",g,"entries, skipped",E,"delete records,",R,"bytes"),u=0)}catch(H){console.error("Error copying record",typeof v=="symbol"?"symbol":v,"from",e,"to",t,H)}console.log("finish copying, copied",g,"entries, skipped",E,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}a(d,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{f.done(),o.close()}}var a$,sS,Rl,cw,c$,l$,iS,u$,lw,ka,dw=Re(()=>{xe();a$=require("lmdb"),sS=require("path"),Rl=require("fs-extra"),cw=M(oe()),c$=M(zf()),l$=M(Wf()),iS=M(qt());k();Ai();u$=M(Ii()),lw=M(Lt()),ka=M(X());a(pue,"compactOnStart");a(o$,"getTotalDBRecordCount");a(pd,"noop");a(d$,"copyDb")});var Sd=C((CLe,g$)=>{"use strict";var Eue=require("minimist"),{isMainThread:_w,parentPort:wh,threadId:NLe}=require("worker_threads"),at=(k(),P(q)),Fi=X(),hw=ie(),cS=Zg(),aS=Gt(),wLe=lt(),m$=Lt(),ni=bh(),f$=id(),{compactOnStart:gue}=(dw(),P(uw)),Sue=pc(),{restartWorkers:lS,onMessageByType:Tue}=et(),{handleHDBError:Aue,hdb_errors:Rue}=he(),{HTTP_STATUS_CODES:yue}=Rue,Ih=oe(),{sendOperationToNode:_$,getThisNodeName:bue,monitorNodeCAs:Oue}=(Es(),P(wa)),{getHDBNodeTable:ILe}=(sl(),P(bO));Ih.initSync();var Nh=`Restarting HarperDB. This may take up to ${at.RESTART_TIMEOUT_MS/1e3} seconds.`,Nue="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",h$="Clustering is not enabled so cannot be restarted",wue="Invalid service",gd,ys;g$.exports={restart:p$,restartService:mw};_w&&Tue(at.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await mw({service:e.workerType}):p$({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function p$(e){ys=Object.keys(e).length===0,gd=await ni.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB);let t=Eue(process.argv);if(t.service){await mw(t);return}if(ys&&!gd){console.error(Nue);return}if(ys&&console.log(Nh),gd){ni.enterPM2Mode(),Fi.notify(Nh);let r=Sue(Object.keys(at.CONFIG_PARAM_MAP),!0);return hw.isEmptyOrZeroLength(Object.keys(r))||m$.updateConfigValue(void 0,void 0,r,!0,!0),Iue(),Nh}return _w?(Fi.notify(Nh),Ih.get(at.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await gue(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{lS()},50)):wh.postMessage({type:at.ITC_EVENT_TYPES.RESTART}),Nh}a(p$,"restart");async function mw(e){let{service:t}=e;if(at.HDB_PROCESS_SERVICES[t]===void 0)throw Aue(new Error,wue,yue.BAD_REQUEST,void 0,void 0,!0);if(ni.expectedRestartOfChildren(),gd=await ni.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB),!_w){e.replicated&&Oue(),wh.postMessage({type:at.ITC_EVENT_TYPES.RESTART,workerType:t}),wh.ref(),await new Promise(s=>{wh.on("message",i=>{i.type==="restart-complete"&&(s(),wh.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===bue())continue;let i;try{({job_id:i}=await _$(s,e))}catch(o){n.push({node:s.name,message:o.message});continue}n.push(await new Promise((o,c)=>{let u=2400,f=setInterval(async()=>{if(u--<=0){clearInterval(f);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let _=(await _$(s,{operation:"get_job",id:i})).results[0];if(_.status==="COMPLETE"&&(clearInterval(f),o({node:s.name,message:_.message})),_.status==="ERROR"){clearInterval(f);let h=new Error(_.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case at.HDB_PROCESS_SERVICES.clustering:if(!Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=h$;break}ys&&console.log("Restarting clustering"),Fi.notify("Restarting clustering"),await E$();break;case at.HDB_PROCESS_SERVICES.clustering_config:case at.HDB_PROCESS_SERVICES["clustering config"]:if(!Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=h$;break}ys&&console.log("Restarting clustering_config"),Fi.notify("Restarting clustering_config"),await ni.reloadClustering();break;case"custom_functions":case"custom functions":case at.HDB_PROCESS_SERVICES.harperdb:case at.HDB_PROCESS_SERVICES.http_workers:case at.HDB_PROCESS_SERVICES.http:if(ys&&!gd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}ys&&console.log("Restarting http_workers"),Fi.notify("Restarting http_workers"),ys?await ni.restart(at.PROCESS_DESCRIPTORS.HDB):await lS("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(Fi.error(r),ys&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}a(mw,"restartService");async function Iue(){await E$(),await ni.restart(at.PROCESS_DESCRIPTORS.HDB),await hw.async_set_timeout(2e3),Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await fw(),ys&&(await aS.closeConnection(),process.exit(0))}a(Iue,"restartPM2Mode");async function E$(){if(!m$.getConfigFromFile(at.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await f$.getHDBProcessInfo()).clustering.length===0)Fi.trace("Clustering not running, restart will start clustering services"),await cS.generateNatsConfig(!0),await ni.startClusteringProcesses(),await ni.startClusteringThreads(),await fw(),ys&&await aS.closeConnection();else{await cS.generateNatsConfig(!0),gd?(Fi.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await ni.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ni.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await f$.getHDBProcessInfo()).clustering.forEach(s=>{Fi.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await hw.async_set_timeout(3e3),await fw(),await aS.updateLocalStreams(),ys&&await aS.closeConnection(),Fi.trace("Restart clustering restarting ingest and reply service threads");let t=lS(at.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=lS(at.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}a(E$,"restartClustering");async function fw(){await cS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await cS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}a(fw,"removeNatsConfig")});var C$=C((LLe,I$)=>{"use strict";var DLe=require("lodash"),Nn=(k(),P(q)),{handleHDBError:S$,hdb_errors:Cue}=he(),{HDB_ERROR_MSGS:Pue,HTTP_STATUS_CODES:Due}=Cue,pw=X();I$.exports={getRolePermissions:Mue};var yl=Object.create(null),Lue=a(e=>({key:e,perms:{}}),"perms_template_obj"),y$=a((e=!1)=>({describe:e,tables:{}}),"schema_perms_template"),b$=a((e=!1,t=!1,r=!1,n=!1)=>({[Nn.PERMS_CRUD_ENUM.READ]:e,[Nn.PERMS_CRUD_ENUM.INSERT]:t,[Nn.PERMS_CRUD_ENUM.UPDATE]:r,[Nn.PERMS_CRUD_ENUM.DELETE]:n}),"permissions_template"),Ew=a((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...b$(t,r,n,s)}),"table_perms_template"),T$=a((e,t=b$())=>({attribute_name:e,describe:w$(t),[Ch]:t[Ch],[gw]:t[gw],[Sw]:t[Sw]}),"attr_perms_template"),A$=a((e,t=!1)=>({attribute_name:e,describe:t,[Ch]:t}),"timestamp_attr_perms_template"),{READ:Ch,INSERT:gw,UPDATE:Sw}=Nn.PERMS_CRUD_ENUM,O$=Object.values(Nn.PERMS_CRUD_ENUM),N$=[Ch,gw,Sw];function Mue(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Nn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(yl[t]&&yl[t].key===n)return yl[t].perms;let s=vue(e,r);return yl[t]?yl[t].key=n:yl[t]=Lue(n),yl[t].perms=s,s}catch(r){if(!e[Nn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Nn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Nn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${t}' must be updated to align with new structure from the 2.2.0 release.`;throw pw.error(n),pw.debug(r),S$(new Error,Pue.OUTDATED_PERMS_TRANSLATION_ERROR,Due.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
24
+ `)}catch(r){r.code==="ENOENT"?en.debug("no license file found"):en.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(ile.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=RG(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){en.error("There was an error parsing the license string."),en.error(s),e.ram_allocation=gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return KN=e,e}a(jN,"licenseSearch");async function ple(){return KN||await jN(),KN}a(ple,"getLicense");function Ele(){let e=jN().ram_allocation,t=process.constrainedMemory?.()||gG();if(t=Math.round(Math.min(t,gG())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}a(Ele,"checkMemoryLimit")});var XN=C((iLe,wG)=>{var jg=dd(),bG=require("chalk"),Zn=X(),OG=require("prompt"),{promisify:gle}=require("util"),QN=(k(),P(q)),Sle=require("fs-extra"),Tle=require("path"),Ale=ie(),{packageJson:Rle}=gt(),NG=oe();NG.initSync();var yle=require("moment"),ble=gle(OG.get),Ole=Tle.join(NG.getHdbBasePath(),QN.LICENSE_KEY_DIR_NAME,QN.LICENSE_FILE_NAME,QN.LICENSE_FILE_NAME);wG.exports={getFingerprint:wle,setLicense:Nle,parseLicense:JN,register:Ile,getRegistrationInfo:Ple};async function Nle(e){if(e&&e.key&&e.company){try{Zn.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await JN(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw Zn.error(r),Zn.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}a(Nle,"setLicense");async function wle(){let e={};try{e=await jg.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw Zn.error(r),Zn.error(t),new Error(r)}return e}a(wle,"getFingerprint");async function JN(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");Zn.info("Validating license input...");let r=jg.validateLicense(e,t);if(Zn.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(Zn.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(Zn.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{Zn.info("writing license to disk"),await Sle.writeFile(Ole,JSON.stringify({license_key:e,company:t}))}catch(n){throw Zn.error("Failed to write License"),n}return"Registration successful."}a(JN,"parseLicense");async function Ile(){let e=await Cle();return JN(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}a(Ile,"register");async function Cle(){let e=await jg.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:bG.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:bG.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{OG.start()}catch(n){Zn.error(n)}let r;try{r=await ble(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}a(Cle,"promptForRegistration");async function Ple(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await jg.getLicense()}catch(r){throw Zn.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Ale.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Rle.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=yle.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}a(Ple,"getRegistrationInfo")});var CG=C((aLe,IG)=>{"use strict";var Dle=lt(),ZN=class{static{a(this,"HubConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d,_,h,m){this.port=t,o===null&&(o=void 0),this.server_name=r+Dle.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c}},this.cluster={name:f,port:d,routes:_,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:m}},this.system_account="SYS"}};IG.exports=ZN});var LG=C((lLe,DG)=>{"use strict";var PG=lt(),ew=class{static{a(this,"LeafConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d){this.port=t,d===null&&(d=void 0),this.server_name=r+PG.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+PG.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:f,ca_file:d,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:d,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:d,insecure:!0},urls:o,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};DG.exports=ew});var vG=C((dLe,MG)=>{"use strict";var tw=class{static{a(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};MG.exports=tw});var xG=C((_Le,UG)=>{"use strict";var Lle=lt(),rw=class{static{a(this,"SysUserObject")}constructor(t,r){this.user=t+Lle.SERVER_SUFFIX.ADMIN,this.password=r}};UG.exports=rw});var Zg=C((mLe,kG)=>{"use strict";var Sl=require("path"),Tl=require("fs-extra"),Mle=CG(),vle=LG(),Ule=vG(),xle=xG(),nw=Kn(),_d=ie(),On=Lt(),Jg=(k(),P(q)),Th=lt(),{CONFIG_PARAMS:Xt}=Jg,hd=X(),Ah=oe(),BG=ho(),sw=Gt(),Ble=ti(),fd="clustering",Hle=1e4,HG=50;kG.exports={generateNatsConfig:Fle,removeNatsConfig:qle,getHubConfigPath:kle};function kle(){let e=Ah.get(Xt.ROOTPATH);return Sl.join(e,fd,Th.NATS_CONFIG_FILES.HUB_SERVER)}a(kle,"getHubConfigPath");async function Fle(e=!1,t=void 0){let r=Ah.get(Xt.ROOTPATH);Tl.ensureDirSync(Sl.join(r,"clustering","leaf")),Ah.initSync();let n=On.getConfigFromFile(Xt.CLUSTERING_TLS_CERT_AUTH),s=On.getConfigFromFile(Xt.CLUSTERING_TLS_PRIVATEKEY),i=On.getConfigFromFile(Xt.CLUSTERING_TLS_CERTIFICATE);!await Tl.exists(i)&&!await Tl.exists(!n)&&await Ble.createNatsCerts();let o=Sl.join(r,fd,Th.PID_FILES.HUB),c=Sl.join(r,fd,Th.PID_FILES.LEAF),l=On.getConfigFromFile(Xt.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=Sl.join(r,fd,Th.NATS_CONFIG_FILES.HUB_SERVER),f=Sl.join(r,fd,Th.NATS_CONFIG_FILES.LEAF_SERVER),d=On.getConfigFromFile(Xt.CLUSTERING_TLS_INSECURE),_=On.getConfigFromFile(Xt.CLUSTERING_TLS_VERIFY),h=On.getConfigFromFile(Xt.CLUSTERING_NODENAME),m=On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await sw.checkNATSServerInstalled()||Xg("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let S=await nw.listUsers(),g=On.getConfigFromFile(Xt.CLUSTERING_USER),R=await nw.getClusterUser();(_d.isEmpty(R)||R.active!==!0)&&Xg(`Invalid cluster user '${g}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await Qg(Xt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await Qg(Xt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await Qg(Xt.CLUSTERING_HUBSERVER_NETWORK_PORT),await Qg(Xt.CLUSTERING_LEAFSERVER_NETWORK_PORT));let E=[],T=[];for(let[se,z]of S.entries())z.role?.role===Jg.ROLE_TYPES_ENUM.CLUSTER_USER&&z.active&&(E.push(new xle(z.username,BG.decrypt(z.hash))),T.push(new Ule(z.username,BG.decrypt(z.hash))));let N=[],{hub_routes:v}=On.getClusteringRoutes();if(!_d.isEmptyOrZeroLength(v))for(let se of v)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${se.host}:${se.port}`);let H=new Mle(On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_NETWORK_PORT),h,o,i,s,n,d,_,m,On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_CLUSTER_NAME),On.getConfigFromFile(Xt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,E,T);n==null&&(delete H.tls.ca_file,delete H.leafnodes.tls.ca_file),t=_d.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===Jg.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Tl.writeJson(u,H),hd.trace(`Hub server config written to ${u}`));let Z=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,W=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,$=new vle(On.getConfigFromFile(Xt.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[W],E,T,i,s,n,d);n==null&&delete $.tls.ca_file,(t===void 0||t===Jg.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Tl.writeJson(f,$),hd.trace(`Leaf server config written to ${f}`))}a(Fle,"generateNatsConfig");async function Qg(e){let t=Ah.get(e);return _d.isEmpty(t)&&Xg(`port undefined for '${e}'`),await _d.isPortTaken(t)&&Xg(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}a(Qg,"isPortAvailable");function Xg(e){let t=`Error generating clustering config: ${e}`;hd.error(t),console.error(t),process.exit(1)}a(Xg,"generateNatsConfigError");async function qle(e){let{port:t,config_file:r}=sw.getServerConfig(e),{username:n,decrypt_hash:s}=await nw.getClusterUser(),i=0,o=2e3;for(;i<HG;){try{let f=await sw.createConnection(t,n,s,!1);if(f.protocol.connected===!0){f.close();break}}catch(f){hd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${f}`)}if(i++,i>=HG)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=o*(i*2);u>3e4&&hd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await _d.async_set_timeout(u)}let c="0".repeat(Hle),l=Sl.join(Ah.get(Xt.ROOTPATH),fd,r);await Tl.writeFile(l,c),await Tl.remove(l),hd.notify(e,"started.")}a(qle,"removeNatsConfig")});var KG=C((ELe,VG)=>{"use strict";var es=oe(),Gle=dd(),Ve=(k(),P(q)),Rh=lt(),wo=require("path"),{PACKAGE_ROOT:tS}=gt(),FG=oe(),eS=ie(),md="/dev/null",$le=wo.join(tS,"launchServiceScripts"),qG=wo.join(tS,"utility/scripts"),Vle=wo.join(qG,Ve.HDB_RESTART_SCRIPT),GG=wo.resolve(tS,"dependencies",`${process.platform}-${process.arch}`,Rh.NATS_BINARY_NAME);function $G(){let t=Gle.licenseSearch().ram_allocation||Ve.RAM_ALLOCATION_ENUM.DEFAULT,r=Ve.MEM_SETTING_KEY+t,n={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return eS.noBootFile()&&(n[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=eS.getEnvCliRootPath()),{name:Ve.PROCESS_DESCRIPTORS.HDB,script:Ve.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:tS}}a($G,"generateMainServerConfig");var Kle=9930;function Yle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=wo.join(e,"clustering",Rh.NATS_CONFIG_FILES.HUB_SERVER),r=wo.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=FG.get(Ve.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=Rh.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==Kle?"-"+n:""),script:GG,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=md,i.error_file=md),i}a(Yle,"generateNatsHubServerConfig");var Wle=9940;function zle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=wo.join(e,"clustering",Rh.NATS_CONFIG_FILES.LEAF_SERVER),r=wo.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=FG.get(Ve.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=Rh.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==Wle?"-"+n:""),script:GG,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=md,i.error_file=md),i}a(zle,"generateNatsLeafServerConfig");function jle(){es.initSync();let e=wo.join(es.get(Ve.CONFIG_PARAMS.LOGGING_ROOT),Ve.LOG_NAMES.HDB),t={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ve.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:$le,autorestart:!1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=md,t.error_file=md),t}a(jle,"generateClusteringUpgradeV4ServiceConfig");function Qle(){let e={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.RESTART_HDB};return eS.noBootFile()&&(e[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=eS.getEnvCliRootPath()),{...{name:Ve.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:qG},script:Vle}}a(Qle,"generateRestart");function Jle(){return{apps:[$G()]}}a(Jle,"generateAllServiceConfigs");VG.exports={generateAllServiceConfigs:Jle,generateMainServerConfig:$G,generateRestart:Qle,generateNatsHubServerConfig:Yle,generateNatsLeafServerConfig:zle,generateClusteringUpgradeV4ServiceConfig:jle}});var bh=C((TLe,i$)=>{"use strict";var nt=(k(),P(q)),Xle=ie(),Co=Zg(),rS=Gt(),Io=lt(),Ba=KG(),nS=oe(),Al=X(),Zle=Ts(),{startWorker:YG,onMessageFromWorkers:eue}=et(),tue=id(),SLe=require("util"),rue=require("child_process"),nue=require("fs"),{execFile:sue}=rue,je;i$.exports={enterPM2Mode:iue,start:Ha,stop:iw,reload:zG,restart:jG,list:ow,describe:XG,connect:Po,kill:uue,startAllServices:due,startService:aw,getUniqueServicesList:ZG,restartAllServices:fue,isServiceRegistered:e$,reloadStopStart:t$,restartHdb:JG,deleteProcess:cue,startClusteringProcesses:n$,startClusteringThreads:s$,isHdbRestartRunning:lue,isClusteringRunning:hue,stopClustering:_ue,reloadClustering:mue,expectedRestartOfChildren:QG};var yh=!1;eue(e=>{e.type==="restart"&&nS.initSync(!0)});function iue(){yh=!0}a(iue,"enterPM2Mode");function Po(){return je||(je=require("pm2")),new Promise((e,t)=>{je.connect((r,n)=>{r&&t(r),e(n)})})}a(Po,"connect");var tn,oue=10,WG;function Ha(e,t=!1){if(yh)return aue(e);let r=sue(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let o=tn.indexOf(r);o>-1&&tn.splice(o,1),!WG&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<oue&&(nue.existsSync(Co.getHubConfigPath())?Ha(e):(await Co.generateNatsConfig(!0),Ha(e),await new Promise(c=>setTimeout(c,3e3)),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let o=nS.get(nt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,f;for(;l=c.exec(i);){if(l.index&&Io.LOG_LEVEL_HIERARCHY[o]>=Io.LOG_LEVEL_HIERARCHY[f||"info"]){let h=f===Io.LOG_LEVELS.ERR||f===Io.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",h,n,i.slice(u,l.index).trim())}let[d,_]=l;u=l.index+d.length,f=Io.LOG_LEVELS[_]}if(Io.LOG_LEVEL_HIERARCHY[o]>=Io.LOG_LEVEL_HIERARCHY[f||"info"]){let d=f===Io.LOG_LEVELS.ERR||f===Io.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",d,n,i.slice(u).trim())}}if(a(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!tn&&(tn=[],!t)){let i=a(()=>{WG=!0,tn&&(tn.map(o=>o.kill()),process.exit(0))},"kill_children");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}tn.push(r)}a(Ha,"start");function aue(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.start(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(aue,"startWithPM2");function iw(e){if(!yh){for(let t of tn||[])t.name===e&&(tn.splice(tn.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.stop(e,async(n,s)=>{n&&(je.disconnect(),r(n)),je.delete(e,(i,o)=>{i&&(je.disconnect(),r(n)),je.disconnect(),t(o)})})})}a(iw,"stop");function zG(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.reload(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(zG,"reload");function jG(e){if(!yh){QG();for(let t of tn||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.restart(e,(n,s)=>{je.disconnect(),t(s)})})}a(jG,"restart");function QG(){for(let e of tn||[])e.config&&(e.config.restarts=0)}a(QG,"expectedRestartOfChildren");function cue(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.delete(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(cue,"deleteProcess");async function JG(){await Ha(Ba.generateRestart())}a(JG,"restartHdb");async function lue(){let e=await ow();for(let t in e)if(e[t].name===nt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}a(lue,"isHdbRestartRunning");function ow(){return new Promise(async(e,t)=>{try{await Po()}catch(r){t(r)}je.list((r,n)=>{r&&(je.disconnect(),t(r)),je.disconnect(),e(n)})})}a(ow,"list");function XG(e){return new Promise(async(t,r)=>{try{await Po()}catch(n){r(n)}je.describe(e,(n,s)=>{n&&(je.disconnect(),r(n)),je.disconnect(),t(s)})})}a(XG,"describe");function uue(){if(!yh){for(let e of tn||[])e.kill();tn=[];return}return new Promise(async(e,t)=>{try{await Po()}catch(r){t(r)}je.killDaemon((r,n)=>{r&&(je.disconnect(),t(r)),je.disconnect(),e(n)})})}a(uue,"kill");async function due(){try{await n$(),await s$(),await Ha(Ba.generateAllServiceConfigs())}catch(e){throw je?.disconnect(),e}}a(due,"startAllServices");async function aw(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case nt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=Ba.generateMainServerConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=Ba.generateNatsIngestServiceConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=Ba.generateNatsReplyServiceConfig();break;case nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=Ba.generateNatsHubServerConfig(),await Ha(r,t),await Co.removeNatsConfig(e);return;case nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=Ba.generateNatsLeafServerConfig(),await Ha(r,t),await Co.removeNatsConfig(e);return;case nt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=Ba.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await Ha(r)}catch(r){throw je?.disconnect(),r}}a(aw,"startService");async function ZG(){try{let e=await ow(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw je?.disconnect(),e}}a(ZG,"getUniqueServicesList");async function fue(e=[]){try{let t=!1,r=await ZG();for(let n=0,s=Object.values(r).length;n<s;n++){let o=Object.values(r)[n].name;e.includes(o)||(o===nt.PROCESS_DESCRIPTORS.HDB?t=!0:await jG(o))}t&&await t$(nt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw je?.disconnect(),t}}a(fue,"restartAllServices");async function e$(e){if(tn?.find(r=>r.name===e))return!0;let t=await tue.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}a(e$,"isServiceRegistered");async function t$(e){let t=nS.get(nt.CONFIG_PARAMS.THREADS_COUNT)??nS.get(nt.CONFIG_PARAMS.THREADS),r=await XG(e),n=Xle.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await iw(e),await aw(e)):e===nt.PROCESS_DESCRIPTORS.HDB?await JG():await zG(e)}a(t$,"reloadStopStart");var r$;async function n$(e=!1){for(let t in nt.CLUSTERING_PROCESSES){let r=nt.CLUSTERING_PROCESSES[t];await aw(r,e)}}a(n$,"startClusteringProcesses");async function s$(){r$=YG(nt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:nt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await rS.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await rS.updateLocalStreams();let e=await Zle.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===nt.PRE_4_0_0_VERSION){Al.info("Starting clustering upgrade 4.0.0 process"),YG(nt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}a(s$,"startClusteringThreads");async function _ue(){for(let e in nt.CLUSTERING_PROCESSES)if(e!==nt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===nt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await r$.terminate();else{let t=nt.CLUSTERING_PROCESSES[e];await iw(t)}}a(_ue,"stopClustering");async function hue(){for(let e in nt.CLUSTERING_PROCESSES){let t=nt.CLUSTERING_PROCESSES[e];if(await e$(t)===!1)return!1}return!0}a(hue,"isClusteringRunning");async function mue(){await Co.generateNatsConfig(!0),await rS.reloadNATSHub(),await rS.reloadNATSLeaf(),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Co.removeNatsConfig(nt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}a(mue,"reloadClustering")});var uw={};Ue(uw,{compactOnStart:()=>pue,copyDb:()=>d$});async function pue(){ka.notify("Running compact on start"),console.log("Running compact on start");let e=(0,cw.get)(x.ROOTPATH),t=new Map,r=Xe();(0,lw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,sS.join)(e,"backup",n+".mdb"),o=(0,sS.join)(e,mc,n+"-copy.mdb"),c=0;try{c=await o$(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){ka.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{db_path:s,copy_dest:o,backup_dest:i,record_count:c}),await d$(n,o),console.log("Backing up",n,"to",i),await(0,Rl.move)(s,i,{overwrite:!0}),console.log("Moving copy compacted",n,"to",s),await(0,Rl.move)(o,s,{overwrite:!0}),await(0,Rl.remove)((0,sS.join)(e,mc,`${n}-copy.mdb-lock`))}try{Ed()}catch(n){ka.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{Ed()}catch(n){ka.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){ka.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,lw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);for(let[s,{db_path:i,backup_dest:o}]of t){console.error("Moving backup database",o,"back to",i);try{await(0,Rl.move)(o,i,{overwrite:!0})}catch(c){console.error(c)}}throw Ed(),n}for(let[n,{backup_dest:s,record_count:i}]of t){let o=await o$(n);if(console.log("Database",n,"after compact has a total record count of",o),i!==o){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
25
+ Total record count before compaction: ${i}, total after: ${o}.
26
+ Database backup has not been removed and can be found here: ${s}`;ka.error(c),console.error(c)}(0,cw.get)(x.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Rl.remove)(s))}}async function o$(e){let t=await(0,u$.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function pd(){}async function d$(e,t){console.log(`Copying database ${e} to ${t}`);let r=Xe()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let d in r){let _=r[d];_.primaryStore.put=pd,_.primaryStore.remove=pd;for(let h in _.indices){let m=_.indices[h];m.put=pd,m.remove=pd}_.auditStore&&(_.auditStore.put=pd,_.auditStore.remove=pd),n=_.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,o=(0,a$.open)(new c$.default(t)),c=o.openDB(iS.INTERNAL_DBIS_NAME),l,u=0,f=s.useReadTransaction();try{for(let{key:_,value:h}of s.getRange({transaction:f})){let m=h.is_hash_attribute||h.isPrimaryKey,S,g;if(m&&(S=h.compression,g=oS(),g?h.compression=g:delete h.compression,S?.dictionary?.toString()===g?.dictionary?.toString()&&(S=null,g=null)),c.put(_,h),!(m||h.indexed))continue;let R=new l$.default(!m,m);R.encoding="binary",R.compression=S;let E=n.openDB(_,R);E.decoder=null,E.decoderCopies=!1,E.encoding="binary",R.compression=g;let T=o.openDB(_,R);T.encoder=null,console.log("copying",_,"from",e,"to",t),await d(E,T,m,f)}if(i){let _=n.openDB(iS.AUDIT_STORE_NAME,Oh);console.log("copying audit log for",e,"to",t),d(i,_,!1,f)}async function d(_,h,m,S){let g=0,R=0,E=0,T=1e7,N=null;for(;T-- >0;)try{for(let v of _.getKeys({start:N,transaction:S}))try{N=v;let{value:H,version:Z}=_.getEntry(v,{transaction:S});if(H?.length<14&&m){E++;continue}l=h.put(v,H,m?Z:void 0),g++,S.openTimer&&(S.openTimer=0),R+=(v?.length||10)+H.length,u++>5e3&&(await l,console.log("copied",g,"entries, skipped",E,"delete records,",R,"bytes"),u=0)}catch(H){console.error("Error copying record",typeof v=="symbol"?"symbol":v,"from",e,"to",t,H)}console.log("finish copying, copied",g,"entries, skipped",E,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}a(d,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{f.done(),o.close()}}var a$,sS,Rl,cw,c$,l$,iS,u$,lw,ka,dw=Re(()=>{xe();a$=require("lmdb"),sS=require("path"),Rl=require("fs-extra"),cw=M(oe()),c$=M(zf()),l$=M(Wf()),iS=M(qt());k();Ai();u$=M(Ii()),lw=M(Lt()),ka=M(X());a(pue,"compactOnStart");a(o$,"getTotalDBRecordCount");a(pd,"noop");a(d$,"copyDb")});var Sd=C((CLe,g$)=>{"use strict";var Eue=require("minimist"),{isMainThread:_w,parentPort:wh,threadId:NLe}=require("worker_threads"),at=(k(),P(q)),Fi=X(),hw=ie(),cS=Zg(),aS=Gt(),wLe=lt(),m$=Lt(),ni=bh(),f$=id(),{compactOnStart:gue}=(dw(),P(uw)),Sue=pc(),{restartWorkers:lS,onMessageByType:Tue}=et(),{handleHDBError:Aue,hdb_errors:Rue}=he(),{HTTP_STATUS_CODES:yue}=Rue,Ih=oe(),{sendOperationToNode:_$,getThisNodeName:bue,monitorNodeCAs:Oue}=(Es(),P(wa)),{getHDBNodeTable:ILe}=(sl(),P(bO));Ih.initSync();var Nh=`Restarting HarperDB. This may take up to ${at.RESTART_TIMEOUT_MS/1e3} seconds.`,Nue="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",h$="Clustering is not enabled so cannot be restarted",wue="Invalid service",gd,ys;g$.exports={restart:p$,restartService:mw};_w&&Tue(at.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await mw({service:e.workerType}):p$({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function p$(e){ys=Object.keys(e).length===0,gd=await ni.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB);let t=Eue(process.argv);if(t.service){await mw(t);return}if(ys&&!gd){console.error(Nue);return}if(ys&&console.log(Nh),gd){ni.enterPM2Mode(),Fi.notify(Nh);let r=Sue(Object.keys(at.CONFIG_PARAM_MAP),!0);return hw.isEmptyOrZeroLength(Object.keys(r))||m$.updateConfigValue(void 0,void 0,r,!0,!0),Iue(),Nh}return _w?(Fi.notify(Nh),Ih.get(at.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await gue(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{lS()},50)):wh.postMessage({type:at.ITC_EVENT_TYPES.RESTART}),Nh}a(p$,"restart");async function mw(e){let{service:t}=e;if(at.HDB_PROCESS_SERVICES[t]===void 0)throw Aue(new Error,wue,yue.BAD_REQUEST,void 0,void 0,!0);if(ni.expectedRestartOfChildren(),gd=await ni.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB),!_w){e.replicated&&Oue(),wh.postMessage({type:at.ITC_EVENT_TYPES.RESTART,workerType:t}),wh.ref(),await new Promise(s=>{wh.on("message",i=>{i.type==="restart-complete"&&(s(),wh.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===bue())continue;let i;try{({job_id:i}=await _$(s,e))}catch(o){n.push({node:s.name,message:o.message});continue}n.push(await new Promise((o,c)=>{let u=2400,f=setInterval(async()=>{if(u--<=0){clearInterval(f);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let _=(await _$(s,{operation:"get_job",id:i})).results[0];if(_.status==="COMPLETE"&&(clearInterval(f),o({node:s.name,message:_.message})),_.status==="ERROR"){clearInterval(f);let h=new Error(_.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case at.HDB_PROCESS_SERVICES.clustering:if(!Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=h$;break}ys&&console.log("Restarting clustering"),Fi.notify("Restarting clustering"),await E$();break;case at.HDB_PROCESS_SERVICES.clustering_config:case at.HDB_PROCESS_SERVICES["clustering config"]:if(!Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=h$;break}ys&&console.log("Restarting clustering_config"),Fi.notify("Restarting clustering_config"),await ni.reloadClustering();break;case"custom_functions":case"custom functions":case at.HDB_PROCESS_SERVICES.harperdb:case at.HDB_PROCESS_SERVICES.http_workers:case at.HDB_PROCESS_SERVICES.http:if(ys&&!gd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}ys&&console.log("Restarting http_workers"),Fi.notify("Restarting http_workers"),ys?await ni.restart(at.PROCESS_DESCRIPTORS.HDB):await lS("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(Fi.error(r),ys&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}a(mw,"restartService");async function Iue(){await E$(),await ni.restart(at.PROCESS_DESCRIPTORS.HDB),await hw.async_set_timeout(2e3),Ih.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await fw(),ys&&(await aS.closeConnection(),process.exit(0))}a(Iue,"restartPM2Mode");async function E$(){if(!m$.getConfigFromFile(at.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await f$.getHDBProcessInfo()).clustering.length===0)Fi.trace("Clustering not running, restart will start clustering services"),await cS.generateNatsConfig(!0),await ni.startClusteringProcesses(),await ni.startClusteringThreads(),await fw(),ys&&await aS.closeConnection();else{await cS.generateNatsConfig(!0),gd?(Fi.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await ni.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ni.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await f$.getHDBProcessInfo()).clustering.forEach(s=>{Fi.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await hw.async_set_timeout(3e3),await fw(),await aS.updateLocalStreams(),ys&&await aS.closeConnection(),Fi.trace("Restart clustering restarting ingest and reply service threads");let t=lS(at.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=lS(at.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}a(E$,"restartClustering");async function fw(){await cS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await cS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}a(fw,"removeNatsConfig")});var C$=C((LLe,I$)=>{"use strict";var DLe=require("lodash"),Nn=(k(),P(q)),{handleHDBError:S$,hdb_errors:Cue}=he(),{HDB_ERROR_MSGS:Pue,HTTP_STATUS_CODES:Due}=Cue,pw=X();I$.exports={getRolePermissions:Mue};var yl=Object.create(null),Lue=a(e=>({key:e,perms:{}}),"perms_template_obj"),y$=a((e=!1)=>({describe:e,tables:{}}),"schema_perms_template"),b$=a((e=!1,t=!1,r=!1,n=!1)=>({[Nn.PERMS_CRUD_ENUM.READ]:e,[Nn.PERMS_CRUD_ENUM.INSERT]:t,[Nn.PERMS_CRUD_ENUM.UPDATE]:r,[Nn.PERMS_CRUD_ENUM.DELETE]:n}),"permissions_template"),Ew=a((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...b$(t,r,n,s)}),"table_perms_template"),T$=a((e,t=b$())=>({attribute_name:e,describe:w$(t),[Ch]:t[Ch],[gw]:t[gw],[Sw]:t[Sw]}),"attr_perms_template"),A$=a((e,t=!1)=>({attribute_name:e,describe:t,[Ch]:t}),"timestamp_attr_perms_template"),{READ:Ch,INSERT:gw,UPDATE:Sw}=Nn.PERMS_CRUD_ENUM,O$=Object.values(Nn.PERMS_CRUD_ENUM),N$=[Ch,gw,Sw];function Mue(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Nn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(yl[t]&&yl[t].key===n)return yl[t].perms;let s=vue(e,r);return yl[t]?yl[t].key=n:yl[t]=Lue(n),yl[t].perms=s,s}catch(r){if(!e[Nn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Nn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Nn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${t}' must be updated to align with new structure from the 2.2.0 release.`;throw pw.error(n),pw.debug(r),S$(new Error,Pue.OUTDATED_PERMS_TRANSLATION_ERROR,Due.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
27
27
  ${r.stack}`;throw pw.error(n),S$(new Error)}}}a(Mue,"getRolePermissions");function vue(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[Nn.SYSTEM_SCHEMA_NAME]=n[Nn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=Uue(t[i]);return}r[i]=y$(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(o=>{if(n[i].tables[o]){let c=n[i].tables[o],l=t[i][o],u=xue(c,l);r[i].describe||O$.forEach(f=>{u[f]&&(r[i].describe=!0)}),r[i].tables[o]=u}else r[i].tables[o]=Ew()})):Object.keys(t[i]).forEach(o=>{r[i].tables[o]=Ew()})}),r}a(vue,"translateRolePermissions");function Uue(e){let t=y$(!0);return Object.keys(e).forEach(r=>{t.tables[r]=Ew(!0,!0,!0,!0,!0)}),t}a(Uue,"createStructureUserPermissions");function xue(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,f)=>{let{attribute_name:d}=f,_=f;return Nn.TIME_STAMP_NAMES.includes(d)&&(_=A$(d,f[Ch])),u[d]=_,u},{}),o=t.primaryKey||t.hash_attribute,c=!!i[o],l=T$(o);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let f=i[u];f.describe=w$(f),s.attribute_permissions.push(f),c||Bue(f,l)}else if(u!==o){let f;Nn.TIME_STAMP_NAMES.includes(u)?f=A$(u):f=T$(u),s.attribute_permissions.push(f)}}),c||s.attribute_permissions.push(l),s.describe=R$(s),s}else return e.describe=R$(e),e}a(xue,"getTableAttrPerms");function R$(e){return O$.filter(t=>e[t]).length>0}a(R$,"getSchemaTableDescribePerm");function w$(e){return N$.filter(t=>e[t]).length>0}a(w$,"getAttributeDescribePerm");function Bue(e,t){N$.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}a(Bue,"checkForHashPerms")});var Ph={};Ue(Ph,{authentication:()=>x$,bypassAuth:()=>Kue,login:()=>Wue,logout:()=>zue,start:()=>Yue});function Kue(){U$=!0}async function x$(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,o=[];try{if(i){let h=e.isOperationsServer?que?Fue:[]:kue?Hue:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let m=rn.get(x.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",S=new So([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",m],["Access-Control-Allow-Origin",i]]);return uS&&S.set("Access-Control-Allow-Credentials","true"),{status:200,headers:S}}o.push("Access-Control-Allow-Origin",i),uS&&o.push("Access-Control-Allow-Credentials","true")}}let l,u;if(uS){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",m=s?.split(/;\s+/)||[];for(let S of m)if(S.startsWith(h)){let g=S.indexOf(";");l=S.slice(h.length,g===-1?S.length:g),u=await P$.get(l);break}e.session=u||(u={})}let f=a((h,m,S)=>{let g=new Td.AuthAuditLog(h,m,Xo.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=S,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),m===Hs.SUCCESS?Tw.notify(g):Tw.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&Tw.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Ye.getUser(h,null,e),f(h,Hs.SUCCESS,"mTLS")):(0,Td.debug)("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let d;if(!e.user)if(n){if(d=bl.get(n),!d){let h=n.indexOf(" "),m=n.slice(0,h),S=n.slice(h+1),g,R;try{switch(m){case"Basic":let E=atob(S),T=E.indexOf(":");g=E.slice(0,T),R=E.slice(T+1),d=g||R?await Ye.getUser(g,R,e):null;break;case"Bearer":try{d=await tO(S)}catch(N){if(N.message==="invalid token")try{return await $E(S),c({status:-1})}catch{throw N}}break}}catch(E){return $ue&&(bl.get(S)||(bl.set(S,S),f(g,Hs.FAILURE,m))),c({status:401,body:ca({error:E.message},e)})}bl.set(n,d),Gue&&f(d.username,Hs.SUCCESS,m)}e.user=d}else u?.user?e.user=await Ye.getUser(u.user,null,e):(U$&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,L$.getSuperUser)());uS&&(e.session.update=function(h){let m=rn.get(x.AUTHENTICATION_COOKIE_EXPIRES);if(!l){l=(0,M$.v4)();let S=rn.get(x.AUTHENTICATION_COOKIE_DOMAINS),g=m?new Date(Date.now()+(0,Aw.convertToMS)(m)).toUTCString():Vue,R=S?.find(N=>r.host?.endsWith(N)),T=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${g}; ${R?"Domain="+R+"; ":""}HttpOnly${e.protocol==="https"?"; SameSite=None; Secure":""}`;o?o.push("Set-Cookie",T):_?.headers?.set&&_.headers.set("Set-Cookie",T)}return e.protocol==="https"&&(o?(i&&o.push("Access-Control-Expose-Headers","X-Hdb-Session"),o.push("X-Hdb-Session","Secure")):_?.headers?.set&&(i&&_.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),_.headers.set("X-Hdb-Session","Secure"))),h.id=l,P$.put(h,{expiresAt:m?Date.now()+(0,Aw.convertToMS)(m):void 0})},e.login=async function(h,m){let S=e.user=await Ye.authenticateUser(h,m,e);e.session.update({user:S&&(S.getId?.()??S.username)})});let _=await t(e);return _&&(_.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&fs.loginPath?(_.status=302,_.headers.set("Location",fs.loginPath(e))):_.headers.set("WWW-Authenticate","Basic")),c(_))}catch(l){throw c(l)}function c(l){let u=o.length;if(u>0){let f=l.headers;f||(l.headers=f=new So);for(let d=0;d<u;){let _=o[d++];f.set(_,o[d++])}}return o=null,l}a(c,"applyResponseHeaders")}function Yue({server:e,port:t,securePort:r}){e.http(x$,t||r?{port:t,securePort:r}:{port:"all"}),D$||(D$=!0,setInterval(()=>{bl=new Map},rn.get(x.AUTHENTICATION_CACHETTL)).unref(),v$.user.addListener(()=>{bl=new Map}))}async function Wue(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function zue(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var L$,M$,rn,Td,v$,Aw,Tw,Hue,kue,Fue,que,P$,uS,U$,Gue,$ue,Vue,bl,D$,dS=Re(()=>{L$=M(Kn());Mr();su();Hu();xe();M$=require("uuid"),rn=M(oe());k();Td=M(X()),v$=M(__());U_();Aw=M(ie());io();Tw=(0,Td.loggerWithTag)("auth-event");rn.initSync();Hue=rn.get(x.HTTP_CORSACCESSLIST),kue=rn.get(x.HTTP_CORS),Fue=rn.get(x.OPERATIONSAPI_NETWORK_CORSACCESSLIST),que=rn.get(x.OPERATIONSAPI_NETWORK_CORS),P$=_t({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),uS=rn.get(x.AUTHENTICATION_ENABLESESSIONS)??!0,U$=process.env.AUTHENTICATION_AUTHORIZELOCAL??rn.get(x.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,Gue=rn.get(x.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,$ue=rn.get(x.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,Vue="Tue, 01 Oct 8307 19:33:20 GMT",bl=new Map;Ye.onInvalidatedUser(()=>{bl=new Map});a(Kue,"bypassAuth");a(x$,"authentication");a(Yue,"start");a(Wue,"login");a(zue,"logout")});var $$=C((GLe,G$)=>{"use strict";var Ne=require("joi"),B$=require("fs-extra"),H$=require("path"),ts=tt(),k$=oe(),F$=(k(),P(q)),q$=X(),{hdb_errors:jue}=he(),{HDB_ERROR_MSGS:nn}=jue,Do=/^[a-zA-Z0-9-_]+$/,Que=/^[a-zA-Z0-9-_]+$/;G$.exports={getDropCustomFunctionValidator:Xue,setCustomFunctionValidator:Zue,addComponentValidator:nde,dropCustomFunctionProjectValidator:sde,packageComponentValidator:ide,deployComponentValidator:ode,setComponentFileValidator:ede,getComponentFileValidator:rde,dropComponentFileValidator:tde,addSSHKeyValidator:ade,updateSSHKeyValidator:cde,deleteSSHKeyValidator:lde,setSSHKnownHostsValidator:ude};function fS(e,t,r){try{let n=k$.get(F$.CONFIG_PARAMS.COMPONENTSROOT),s=H$.join(n,t);return B$.existsSync(s)?e?t:r.message(nn.PROJECT_EXISTS):e?r.message(nn.NO_PROJECT):t}catch(n){return q$.error(n),r.message(nn.VALIDATION_ERR)}}a(fS,"checkProjectExists");function Dh(e,t){return e.includes("..")?t.message("Invalid file path"):e}a(Dh,"checkFilePath");function Jue(e,t,r,n){try{let s=k$.get(F$.CONFIG_PARAMS.COMPONENTSROOT),i=H$.join(s,e,t,r+".js");return B$.existsSync(i)?r:n.message(nn.NO_FILE)}catch(s){return q$.error(s),n.message(nn.VALIDATION_ERR)}}a(Jue,"checkFileExists");function Xue(e){let t=Ne.object({project:Ne.string().pattern(Do).custom(fS.bind(null,!0)).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),type:Ne.string().valid("helpers","routes").required(),file:Ne.string().pattern(Do).custom(Jue.bind(null,e.project,e.type)).custom(Dh).required().messages({"string.pattern.base":nn.BAD_FILE_NAME})});return ts.validateBySchema(e,t)}a(Xue,"getDropCustomFunctionValidator");function Zue(e){let t=Ne.object({project:Ne.string().pattern(Do).custom(fS.bind(null,!0)).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),type:Ne.string().valid("helpers","routes").required(),file:Ne.string().custom(Dh).required(),function_content:Ne.string().required()});return ts.validateBySchema(e,t)}a(Zue,"setCustomFunctionValidator");function ede(e){let t=Ne.object({project:Ne.string().pattern(Do).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),file:Ne.string().custom(Dh).required(),payload:Ne.string().allow("").optional(),encoding:Ne.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ts.validateBySchema(e,t)}a(ede,"setComponentFileValidator");function tde(e){let t=Ne.object({project:Ne.string().pattern(Do).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),file:Ne.string().custom(Dh).optional()});return ts.validateBySchema(e,t)}a(tde,"dropComponentFileValidator");function rde(e){let t=Ne.object({project:Ne.string().required(),file:Ne.string().custom(Dh).required(),encoding:Ne.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ts.validateBySchema(e,t)}a(rde,"getComponentFileValidator");function nde(e){let t=Ne.object({project:Ne.string().pattern(Do).custom(fS.bind(null,!1)).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME})});return ts.validateBySchema(e,t)}a(nde,"addComponentValidator");function sde(e){let t=Ne.object({project:Ne.string().pattern(Do).custom(fS.bind(null,!0)).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME})});return ts.validateBySchema(e,t)}a(sde,"dropCustomFunctionProjectValidator");function ide(e){let t=Ne.object({project:Ne.string().pattern(Do).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),skip_node_modules:Ne.boolean(),skip_symlinks:Ne.boolean()});return ts.validateBySchema(e,t)}a(ide,"packageComponentValidator");function ode(e){let t=Ne.object({project:Ne.string().pattern(Do).required().messages({"string.pattern.base":nn.BAD_PROJECT_NAME}),package:Ne.string().optional(),restart:Ne.alternatives().try(Ne.boolean(),Ne.string().valid("rolling")).optional()});return ts.validateBySchema(e,t)}a(ode,"deployComponentValidator");function ade(e){let t=Ne.object({name:Ne.string().pattern(Que).required().messages({"string.pattern.base":nn.BAD_SSH_KEY_NAME}),key:Ne.string().required(),host:Ne.string().required(),hostname:Ne.string().required(),known_hosts:Ne.string().optional()});return ts.validateBySchema(e,t)}a(ade,"addSSHKeyValidator");function cde(e){let t=Ne.object({name:Ne.string().required(),key:Ne.string().required()});return ts.validateBySchema(e,t)}a(cde,"updateSSHKeyValidator");function lde(e){let t=Ne.object({name:Ne.string().required()});return ts.validateBySchema(e,t)}a(lde,"deleteSSHKeyValidator");function ude(e){let t=Ne.object({known_hosts:Ne.string().required()});return ts.validateBySchema(e,t)}a(ude,"setSSHKnownHostsValidator")});var vh=C((VLe,z$)=>{"use strict";var _S=require("joi"),Fa=require("path"),Ad=require("fs-extra"),{exec:dde,spawn:fde}=require("child_process"),_de=require("util"),hde=_de.promisify(dde),Rd=(k(),P(q)),{PACKAGE_ROOT:mde}=gt(),{handleHDBError:Lh,hdb_errors:pde}=he(),{HTTP_STATUS_CODES:Mh}=pde,Ol=oe(),Ede=tt(),qa=X(),{once:gde}=require("events");Ol.initSync();var Rw=Ol.get(Rd.CONFIG_PARAMS.COMPONENTSROOT),V$="npm install --force --omit=dev --json",Sde=`${V$} --dry-run`,Tde=Ol.get(Rd.CONFIG_PARAMS.ROOTPATH),hS=Fa.join(Tde,"ssh");z$.exports={installModules:bde,auditModules:Ode,installAllRootModules:Ade,uninstallRootModule:Rde,linkHarperdb:yde,runCommand:yd};async function Ade(e=!1,t=Ol.get(Rd.CONFIG_PARAMS.ROOTPATH)){await mS();let r=!1,n=process.env;Ad.pathExistsSync(hS)&&Ad.readdirSync(hS).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+Fa.join(hS,"config")+" -o UserKnownHostsFile="+Fa.join(hS,"known_hosts"),...process.env},r=!0)});try{let s=Ol.get(Rd.CONFIG_PARAMS.ROOTPATH),i=Fa.join(s,"node_modules","harperdb");Ad.lstatSync(i).isSymbolicLink()&&Ad.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&qa.error("Error removing symlink:",s)}await yd(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}a(Ade,"installAllRootModules");async function Rde(e){await yd(`npm uninstall ${e}`,Ol.get(Rd.CONFIG_PARAMS.ROOTPATH))}a(Rde,"uninstallRootModule");async function yde(){await mS(),await yd(`npm link ${mde}`,Ol.get(Rd.CONFIG_PARAMS.ROOTPATH))}a(yde,"linkHarperdb");async function yd(e,t=void 0,r=process.env){qa.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=fde(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();qa.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();qa.error({tagName:"npm_run_command:stderr"},l),i+=l});let[o]=await gde(n,"close");if(o!==0)throw new Error(`Command \`${e}\` exited with code ${o}.${i===""?"":` Error: ${i}`}`);return s||void 0}a(yd,"runCommand");async function bde(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";qa.warn(t,e.projects);let r=W$(e);if(r)throw Lh(r,r.message,Mh.BAD_REQUEST);let{projects:n,dry_run:s}=e,i=s===!0?Sde:V$;await mS(),await Y$(n);let o={};for(let c=0,l=n.length;c<l;c++){let u=n[c];o[u]={npm_output:null,npm_error:null};let f=Fa.join(Rw,u),d,_=null;try{let{stdout:h,stderr:m}=await hde(i,{cwd:f});d=h?h.replace(`
28
28
  `,""):null,_=m?m.replace(`
29
29
  `,""):null}catch(h){h.stderr?o[u].npm_error=K$(h.stderr):o[u].npm_error=h.message;continue}try{o[u].npm_output=JSON.parse(d)}catch{o[u].npm_output=d}try{o[u].npm_error=JSON.parse(_)}catch{o[u].npm_error=_}}return qa.info(`finished installModules with response ${o}`),o.warning=t,o}a(bde,"installModules");function K$(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}