harperdb 4.5.34 → 4.5.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "harperdb",
3
- "version": "4.5.34",
3
+ "version": "4.5.35",
4
4
  "description": "HarperDB is a distributed database, caching service, streaming broker, and application development platform focused on performance and ease of use.",
5
5
  "keywords": [
6
6
  "database",
@@ -21,9 +21,9 @@ Caused by:`));else if(typeof u=="object")try{n+=JSON.stringify(u)}catch{n+="Obje
21
21
  `,""));return r.replace(`
22
22
  `,"")}a(qH,"runCommand");async function Nne(){try{await rne.access(lO)}catch{return!1}let e=await qH(`${lO} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return nne.eq(t,bne)}a(Nne,"checkNATSServerInstalled");async function hO(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let o=await FH.getClusterUser();if(nl(o))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=o.username,r=o.decrypt_hash}Qs.trace("create nats connection called");let i=await mne({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:wr.get(je.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:wr.get(je.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:wr.get(je.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),Qs.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(o=>{o&&Qs.error("Error with Nats client connection, connection closed",o),i===Jr&&$H()}),i}a(hO,"createConnection");function $H(){Jr=void 0,el=void 0,tl=void 0,rl=void 0}a($H,"clearClientCache");async function wne(){Jr&&(await Jr.drain(),Jr=void 0,el=void 0,tl=void 0,rl=void 0)}a(wne,"closeConnection");var Jr,rl;async function V_(){return rl||(rl=hO(wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),Jr=await rl),Jr||rl}a(V_,"getConnection");async function K_(){if(el)return el;nl(Jr)&&await V_();let{domain:e}=Hu(je.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(nl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return el=await Jr.jetstreamManager({domain:e,timeout:6e4}),el}a(K_,"getJetStreamManager");async function VH(){if(tl)return tl;nl(Jr)&&await V_();let{domain:e}=Hu(je.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(nl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return tl=Jr.jetstream({domain:e,timeout:6e4}),tl}a(VH,"getJetStream");async function Ui(){let e=Jr||await V_(),t=el||await K_(),r=tl||await VH();return{connection:e,jsm:t,js:r}}a(Ui,"getNATSReferences");async function Ine(e){let t=wr.get(je.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await FH.getClusterUser(),s=await hO(t,r,n),i=_O(),o=s.subscribe(i),c=[],l,u=(async()=>{for await(let f of o){let d=GH.decode(f.data);d.response_time=Date.now()-l,c.push(d)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await jE.async_set_timeout(e),await o.drain(),await s.close(),await u,c}a(Ine,"getServerList");async function mO(e,t){let{jsm:r}=await Ui(),n=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:pne.File,retention:Ene.Limits,subjects:t,discard:gne.Old,max_msgs:s,max_bytes:i,max_age:n})}a(mO,"createLocalStream");async function KH(){let{jsm:e}=await Ui(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}a(KH,"listStreams");async function Cne(e){let{jsm:t}=await Ui();await t.streams.delete(e)}a(Cne,"deleteLocalStream");async function Pne(e){let{connection:t}=await Ui(),r=[],n=_O(),s=t.subscribe(n),i=(async()=>{for await(let o of s)r.push(GH.decode(o.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}a(Pne,"listRemoteStreams");async function Dne(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Ui(),i=xH(),o={durable_name:i,ack_policy:dO.Explicit};t&&(o.deliver_policy=fO.StartTime,o.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,o);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let f of l){let d=uO(f.data),_={nats_timestamp:f.info.timestampNanos,nats_sequence:f.info.streamSequence,entry:d};if(f.headers&&(_.origin=f.headers.get(Hr.MSG_HEADERS.ORIGIN)),u.push(_),f.ack(),f.info.pending===0)break}return await c.delete(),u}a(Dne,"viewStream");async function*Lne(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Ui(),i=xH(),o={durable_name:i,ack_policy:dO.Explicit};t&&(o.deliver_policy=fO.StartTime,o.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,o);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let f=uO(u.data);f[0]||(f=[f]);for(let d of f){let _={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:d};u.headers&&(_.origin=u.headers.get(Hr.MSG_HEADERS.ORIGIN)),yield _}if(u.ack(),u.info.pending===0)break}await c.delete()}a(Lne,"viewStreamIterator");async function Mne(e,t,r,n){Qs.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=YH(n,r);let{js:s}=await Ui(),i=await JE(),o=`${e}.${i}`,c=await Rne(()=>n instanceof Uint8Array?n:kH.encode(n));try{Qs.trace(`publishToStream publishing to subject: ${o}`),Ane(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(o,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return zH(async()=>{try{await s.publish(o,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){Qs.trace(`publishToStream creating stream: ${t}`);let f=o.split(".");f[2]="*",await mO(t,[o]),await s.publish(o,c,{headers:r})}else throw l}});throw l}}a(Mne,"publishToStream");function YH(e,t){t===void 0&&(t=Tne());let r=wr.get(je.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Hr.MSG_HEADERS.ORIGIN)&&r&&t.append(Hr.MSG_HEADERS.ORIGIN,r),t}a(YH,"addNatsMsgHeader");function Hu(e){e=e.toLowerCase();let t=$_.join(wr.get(je.CONFIG_PARAMS.ROOTPATH),yne);if(e===je.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return nl(cO)&&(cO={port:G_.getConfigFromFile(je.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:G_.getConfigFromFile(je.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.HUB,config_file:Hr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:$_.join(t,Hr.PID_FILES.HUB),hdb_nats_path:t}),cO;if(e===je.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return nl(aO)&&(aO={port:G_.getConfigFromFile(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:G_.getConfigFromFile(je.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.LEAF,config_file:Hr.NATS_CONFIG_FILES.LEAF_SERVER,domain:G_.getConfigFromFile(je.CONFIG_PARAMS.CLUSTERING_NODENAME)+Hr.SERVER_SUFFIX.LEAF,pid_file_path:$_.join(t,Hr.PID_FILES.LEAF),hdb_nats_path:t}),aO;Qs.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}a(Hu,"getServerConfig");async function WH(e,t,r,n){try{await e.consumers.add(t,{ack_policy:dO.Explicit,durable_name:r,deliver_policy:fO.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}a(WH,"createConsumer");async function vne(e,t,r){await e.consumers.delete(t,r)}a(vne,"removeConsumer");function Une(e){return e.split(".")[1]}a(Une,"extractServerName");async function xne(e,t,r=6e4,n=_O()){if(!jE.isObject(t))throw new Error("data param must be an object");let s=kH.encode(t),{connection:i}=await Ui(),o={timeout:r};n&&(o.reply=n,o.noMux=!0);let c=await i.request(e,s,o);return uO(c.data)}a(xne,"request");function pO(e){return new Promise(async(t,r)=>{let n=ane(lO,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",o=>{r(o)}),n.stdout.on("data",o=>{i+=o.toString()}),n.stderr.on("data",o=>{s+=o.toString()}),n.stderr.on("close",o=>{s&&r(s),t(i)})})}a(pO,"reloadNATS");async function Bne(){let{pid_file_path:e}=Hu(je.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await pO(e)}a(Bne,"reloadNATSHub");async function Hne(){let{pid_file_path:e}=Hu(je.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await pO(e)}a(Hne,"reloadNATSLeaf");function kne(e,t,r){let n;switch(e.code){case UH.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case UH.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}a(kne,"requestErrorHandler");async function Fne(e,t){let r=t+Hr.SERVER_SUFFIX.LEAF,{connection:n}=await Ui(),{jsm:s}=await zne(r),{schema:i,table:o}=e,c=QE.createNatsTableStreamName(i,o),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await zH(async()=>{if(e.subscribe===!0)await WH(s,c,n.info.server_name,l);else try{await vne(s,c,n.info.server_name)}catch(u){Qs.trace(u)}})}a(Fne,"updateRemoteConsumer");async function Gne(e,t,r,n){let s=QE.createNatsTableStreamName(e,t),i=r+Hr.SERVER_SUFFIX.LEAF,o={type:je.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!HH&&_ne()<wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=oO();await c(o)}await dne(o),n==="stop"&&await jE.async_set_timeout(1e3)}a(Gne,"updateConsumerIterator");function zH(e){return une.writeTransaction(je.SYSTEM_SCHEMA_NAME,je.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}a(zH,"exclusiveLock");async function jH(e,t){let r=QE.createNatsTableStreamName(e,t),n=await JE(),s=Kne(e,t,n);await mO(r,[s])}a(jH,"createLocalTableStream");async function qne(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await jH(n,s)}}a(qne,"createTableStreams");async function QH(e,t,r=void 0){if(wr.get(je.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=QE.createNatsTableStreamName(e,t),{domain:s}=Hu(je.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await V_()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")Qs.warn(n);else throw n}}a(QH,"purgeTableStream");async function $ne(e,t){if(wr.get(je.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await QH(e,t[r])}a($ne,"purgeSchemaTableStreams");async function Vne(e){return(await K_()).streams.info(e)}a(Vne,"getStreamInfo");function Kne(e,t,r){return`${Hr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}a(Kne,"createSubjectName");async function JE(){if(q_)return q_;if(q_=(await K_())?.nc?.info?.server_name,q_===void 0)throw new Error("Unable to get jetstream manager server name");return q_}a(JE,"getJsmServerName");async function Yne(){let e=await K_(),t=await JE(),r=await KH();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let o=Wne(n),c=i.split(".");if(c[c.length-1]===t&&!o||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let f=u.join(".");Qs.trace(`Updating stream subject name from: ${i} to: ${f}`),s.subjects[0]=f,await e.streams.update(s.name,s)}}a(Yne,"updateLocalStreams");function Wne(e){let{config:t}=e,r=!1,n=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=wr.get(je.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}a(Wne,"updateStreamLimits");async function zne(e){let t,r;try{t=await Jr.jetstream({domain:e}),r=await Jr.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw Qs.error("Unable to connect to:",e),n}return{js:t,jsm:r}}a(zne,"connectToRemoteJS")});function EO(e){let t=e.get(XE),r=t?(0,ku.unpack)(t):null;r||(r={remoteNameToId:{}});let n=et(),s=!1;r.nodeName=et();let i=r.remoteNameToId;if(i[n]!==0){let o=0,c;for(let l in i){let u=i[l];u===0?c=l:u>o&&(o=u)}if(c){o++,i[c]=o;let l=[Symbol.for("seq"),o];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:W_(e)??1,nodes:[]})})}i[n]=0,e.putSync(XE,(0,ku.pack)(r))}return r}function Y_(e){return EO(e).remoteNameToId}function ZH(e,t){let r=EO(t),n=r.remoteNameToId,s=new Map,i=!1;for(let o in e){let c=e[o],l=n[o];if(l==null){let u=0;for(let f in n){let d=n[f];d>u&&(u=d)}l=u+1,n[o]=l,i=!0}s.set(c,l)}return i&&t.putSync(XE,(0,ku.pack)(r)),s}function ZE(e,t){let r=EO(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let o in n){let c=n[o];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(XE,(0,ku.pack)(r))}return XH.trace?.("The remote node name map",e,n,s),s}var XH,ku,XE,gO=Re(()=>{XH=M(Gs());Es();ku=require("msgpackr"),XE=Symbol.for("remote-ids");a(EO,"getIdMappingRecord");a(Y_,"exportIdMapping");a(ZH,"remoteToLocalNodeId");a(ZE,"getIdOfRemoteNode")});var SO={};Ue(SO,{commits_awaiting_replication:()=>Fu,getHDBNodeTable:()=>or,getReplicationSharedStatus:()=>eg,iterateRoutes:()=>j_,shouldReplicateToNode:()=>z_,subscribeToNodeUpdates:()=>Gu});function or(){return ek||(ek=_t({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function eg(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function Gu(e){or().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;sk.debug?.("adding node",n,"on node",et()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==et()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of or().search({}))if(i.shard!=null){let o=s.get(i.shard);o||s.set(i.shard,o=[]),o.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function z_(e,t){let r=ga.default.get(x.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===ga.default.get(x.REPLICATION_SHARD))))&&or().primaryStore.get(et())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function jne(){Gu(e=>{Sa({},(t,r)=>{let n=e.name,s=tk.get(n);if(s||tk.set(n,s=new Map),s.has(r))return;let i;for(let o in t)if(i=t[o].auditStore,i)break;if(i){let o=eg(i,r,n,()=>{let c=o[0],l=o.lastTime;for(let{txnTime:u,onConfirm:f}of Fu.get(r)||[])u>l&&u<=c&&f();o.lastTime=c});o.lastTime=0,s.set(r,o)}})})}function*j_(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=ga.default.get(x.REPLICATION_SECUREPORT)??(!ga.default.get(x.REPLICATION_PORT)&&ga.default.get(x.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||ga.default.get(x.REPLICATION_PORT)||ga.default.get(x.OPERATIONSAPI_NETWORK_PORT);let o=i?.lastIndexOf?.(":");o>0&&(i=+i.slice(o+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){rk.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,start_time:t.startTime,revoked_certificates:t.revokedCertificates}}}var rk,nk,ga,sk,ek,tk,Fu,sl=Re(()=>{xe();Es();sp();rk=require("worker_threads"),nk=M(he()),ga=M(le());k();sk=M(Gs());server.nodes=[];a(or,"getHDBNodeTable");a(eg,"getReplicationSharedStatus");a(Gu,"subscribeToNodeUpdates");a(z_,"shouldReplicateToNode");tk=new Map;tL((e,t,r)=>{if(r>server.nodes.length)throw new nk.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);Fu||(Fu=new Map,jne());let n=Fu.get(e);return n||(n=[],Fu.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:a(()=>{++i===r&&s()},"onConfirm")})})});a(jne,"startSubscriptionToReplications");a(j_,"iterateRoutes")});var ak={};Ue(ak,{connectedToNode:()=>il,disconnectedFromNode:()=>$u,ensureNode:()=>To,requestClusterStatus:()=>ok,startOnMainThread:()=>RO});async function RO(e){let t=0,r=Xe();for(let i of Object.getOwnPropertyNames(r)){let o=r[i];for(let c in o){let l=o[c];if(l.auditStore){tg.set(i,W_(l.auditStore));break}}}Bi.whenThreadsStarted.then(async()=>{let i=[];for await(let o of r.system.hdb_nodes?.search([])||[])i.push(o);for(let o of j_(e))try{let c=!o.subscriptions;if(c){let u=et(),f=or().primaryStore.get(u);if(f!==null){let d=e.url??Ta();(f===void 0||f.url!==d||f.shard!==e.shard)&&await To(u,{name:u,url:d,shard:e.shard,replicates:!0})}}let l=o.trusted!==!1;if(c&&o.replicates==null&&(o.replicates=!0),i.find(u=>u.url===o.url))continue;s(o)}catch(c){console.error(c)}Gu(s)});let n;function s(i,o=i?.name){let c=et()&&o===et()||Ta()&&i?.url===Ta();if(c){let d=!!i?.replicates;if(n!==void 0&&n!==d)for(let _ of or().search([]))_.replicates&&_.name!==o&&s(_,_.name);n=d}if(nt.trace("Setting up node replication for",i),!i){for(let[d,_]of xi){let h;for(let[m,{worker:S,nodes:g}]of _){let R=g[0];if(R&&R.name==o){h=!0;for(let[E,{worker:T}]of _)_.delete(E),nt.warn("Node was deleted, unsubscribing from node",o,E,d),T?.postMessage({type:"unsubscribe-from-node",node:o,database:E,url:d});break}}if(h){xi.get(d).iterator.remove(),xi.delete(d);return}}return}if(c)return;if(!i.url){nt.info(`Node ${i.name} is missing url`);return}let l=xi.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(nt.info(`Added node ${i.name} at ${i.url} for process ${et()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[d,_]of qu)if(i.url===_.url){qu.delete(d);break}qu.set(i.name,i)}let u=Xe();if(l||(l=new Map,xi.set(i.url,l)),l.iterator=Sa(e,(d,_,h)=>{h?f(_,!0):f(_,!1)}),i.subscriptions)for(let d of i.subscriptions){let _=d.database||d.schema;u[_]||(nt.warn(`Database ${_} not found for node ${i.name}, making a subscription anyway`),f(_,!1))}function f(d,_){nt.trace("Setting up replication for database",d,"on node",i.name);let h=l.get(d),m,S=[{replicateByDefault:_,...i}];tg.has(d)&&(S.push({replicateByDefault:_,name:et(),start_time:tg.get(d),end_time:Date.now(),replicates:!0}),tg.delete(d));let g=z_(i,d),R=Bi.workers.filter(E=>E.name==="http");if(h?(m=h.worker,h.nodes=S):g&&(t=t%R.length,m=R[t++],l.set(d,{worker:m,nodes:S,url:i.url}),m?.on("exit",()=>{l.get(d)?.worker===m&&(l.delete(d),f(d,_))})),g)setTimeout(()=>{let E={type:"subscribe-to-node",database:d,nodes:S};m?m.postMessage(E):Q_(E)},Qne);else{nt.info("Node no longer should be used, unsubscribing from node",i.replicates,!!u[d],or().primaryStore.get(et())?.replicates),or().primaryStore.get(et())?.replicates||(n=!1);let E={type:"unsubscribe-from-node",database:d,url:i.url,name:i.name};m?m.postMessage(E):ng(E)}}a(f,"onDatabase")}a(s,"onNodeUpdate"),$u=a(function(i){try{nt.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let o=Array.from(qu.keys()),c=o.sort(),l=c.indexOf(i.name||Js(i.url));if(l===-1){nt.warn("Disconnected node not found in node map",i.name,o);return}let u=xi.get(i.url),f=u?.get(i.database);if(!f){nt.warn("Disconnected node not found in replication map",i.database,u);return}if(f.connected=!1,i.finished||!AO.default.get(x.REPLICATION_FAILOVER))return;let d=f.nodes[0];if(!(d.replicates===!0||d.replicates?.sends||d.subscriptions?.length))return;let _=d.shard,h=(l+1)%c.length;for(;l!==h;){let m=c[h],S=qu.get(m);u=xi.get(S.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==_){h=(h+1)%c.length;continue}let{worker:R,nodes:E}=g,T=!1;for(let N of f.nodes){if(E.some(v=>v.name===N.name)){nt.info(`Disconnected node is already failing over to ${m} for ${i.database}`);continue}N.end_time<Date.now()||(E.push(N),T=!0)}if(f.nodes=[f.nodes[0]],!T){nt.info(`Disconnected node ${i.name} has no nodes to fail over to ${m}`);return}nt.info(`Failing over ${i.database} from ${i.name} to ${m}`),R?R.postMessage({type:"subscribe-to-node",database:i.database,nodes:E}):Q_({database:i.database,nodes:E});return}nt.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(o){nt.error("Error failing over node",o)}},"disconnectedFromNode"),il=a(function(i){let o=xi.get(i.url),c=o?.get(i.database);if(!c){nt.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,o);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){nt.info("Connected node has no nodes",i.database,c);return}if(!l.name){nt.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let f of xi.values()){let d=f.get(i.database);if(!d||d==c)continue;let{worker:_,nodes:h,connected:m}=d;if(h)if(m===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let S=h.filter(g=>g.name!==l.name);S.length<h.length&&(d.nodes=S,_.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,Bi.onMessageByType)("disconnected-from-node",$u),(0,Bi.onMessageByType)("connected-to-node",il),(0,Bi.onMessageByType)("request-cluster-status",ok)}function ok(e,t){let r=[];for(let[n,s]of qu)try{let i=xi.get(s.url);nt.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let o=[];if(i){for(let[l,{worker:u,connected:f,nodes:d,latency:_}]of i)o.push({database:l,connected:f,latency:_,thread_id:u?.threadId,nodes:d.filter(h=>!(h.end_time<Date.now())).map(h=>h.name)});let c=(0,TO.cloneDeep)(s);c.database_sockets=o,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){nt.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function To(e,t){let r=or();e=e??Js(t.url),t.name=e;try{if(t.ca){let s=new ik.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subject_alt_name:s.subjectAltName,serial_number:s.serialNumber,valid_from:s.validFrom,valid_to:s.validTo}}}catch(s){nt.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(nt.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!AO.default.get(x.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],o=(0,TO.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of o)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...o,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}nt.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var Bi,rg,nt,TO,AO,ik,Qne,xi,$u,il,qu,tg,J_=Re(()=>{xe();Bi=M(ot());Es();rg=require("worker_threads");sl();nt=M(ee()),TO=require("lodash"),AO=M(le());k();ik=require("crypto"),Qne=200,xi=new Map,qu=new Map,tg=new Map;a(RO,"startOnMainThread");a(ok,"requestClusterStatus");rg.parentPort&&($u=a(e=>{rg.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),il=a(e=>{rg.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,Bi.onMessageByType)("subscribe-to-node",e=>{Q_(e)}),(0,Bi.onMessageByType)("unsubscribe-from-node",e=>{ng(e)}));a(To,"ensureNode")});var ei=C(Pk=>{"use strict";var ar=require("path"),{watch:Jne}=require("chokidar"),On=require("fs-extra"),Vu=require("node-forge"),_k=require("net"),{generateKeyPair:bO,X509Certificate:Ao,createPrivateKey:hk}=require("crypto"),Xne=require("util");bO=Xne.promisify(bO);var bt=Vu.pki,Xs=require("joi"),{v4:mk}=require("uuid"),{validateBySchema:IO}=it(),Tt=ee(),Jn=le(),gs=(k(),P(q)),{CONFIG_PARAMS:al}=gs,Zs=CA(),{ClientError:Ra}=he(),sg=require("node:tls"),{relative:pk,join:Zne}=require("node:path"),{CERT_PREFERENCE_APP:bIe,CERTIFICATE_VALUES:ck}=Zs,ese=hc(),OO=Ct(),{table:tse,getDatabases:rse,databases:yO}=(xe(),P(ct)),{getJWTRSAKeys:lk}=(vu(),P(k_));Object.assign(Pk,{generateKeys:DO,updateConfigCert:bk,createCsr:lse,signCertificate:use,setCertTable:Ku,loadCertificates:Ak,reviewSelfSignedCert:MO,createTLSSelector:Nk,listCertificates:Ik,addCertificate:pse,removeCertificate:gse,createNatsCerts:_se,generateCertsKeys:fse,getReplicationCert:Z_,getReplicationCertAuth:cse,renewSelfSigned:hse,hostnamesFromCert:vO,getKey:Sse});var{urlToNodeName:Ek,getThisNodeUrl:nse,getThisNodeName:og,clearThisNodeName:sse}=(Es(),P(Oa)),{readFileSync:ise,statSync:gk}=require("node:fs"),OIe=le(),{getTicketKeys:ose,onMessageFromWorkers:ase}=ot(),Aa=ee(),{isMainThread:Sk}=require("worker_threads"),{TLSSocket:Tk,createSecureContext:NIe}=require("node:tls"),CO=3650,X_=["127.0.0.1","localhost","::1"],PO=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];ase(async e=>{e.type===gs.ITC_EVENT_TYPES.RESTART&&(Jn.initSync(!0),await MO())});var kr;function ba(){return kr||(kr=rse().system.hdb_certificate,kr||(kr=tse({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),kr}a(ba,"getCertTable");async function Z_(){let e=Nk("operations-api"),t={secureContexts:null,setSecureContext:a(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(og());if(!r)return;let n=new Ao(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}a(Z_,"getReplicationCert");async function cse(){ba();let e=(await Z_()).options.cert,r=new Ao(e).issuer.match(/CN=(.*)/)?.[1];return kr.get(r)}a(cse,"getReplicationCertAuth");var uk,ya=new Map;function Ak(){if(uk)return;uk=!0;let e=[{configKey:al.TLS},{configKey:al.OPERATIONSAPI_TLS}];ba();let t=ar.dirname(OO.getConfigFilePath()),r;for(let{configKey:n}of e){let s=OO.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let o=i.privateKey,c=o&&pk(Zne(t,"keys"),o);c&&dk(o,l=>{ya.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&Sk){let f;dk(u,d=>{if(ck.cert===d)return;let _=i.hostname??i.hostnames??i.host??i.hosts;_&&!Array.isArray(_)&&(_=[_]);let h=Ok(u),m=new Ao(h),S;try{S=Ck(m)}catch(T){logger.error("error extracting host name from certificate",T);return}if(S==null){logger.error("No host name found on certificate");return}if(m.checkIssued(new Ao(ck.cert)))return;let g=kr.primaryStore.get(S),R=gk(u).mtimeMs,E=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&R<=E){R<E&&Tt.info(`Certificate ${S} at ${u} is older (${new Date(R)}) than the certificate in the database (${E>1?new Date(E):"only self signed certificate available"})`);return}r=kr.put({name:S,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:_,file_timestamp:R,details:{issuer:m.issuer.replace(/\n/g," "),subject:m.subject?.replace(/\n/g," "),subject_alt_name:m.subjectAltName,serial_number:m.serialNumber,valid_from:m.validFrom,valid_to:m.validTo}})},l?"certificate authority":"certificate")}}}}}return r}a(Ak,"loadCertificates");function dk(e,t,r){let n,s=a((i,o)=>{try{let c=o.mtimeMs;c&&c!==n&&(n&&Sk&&Tt.warn(`Reloading ${r}:`,i),n=c,t(Ok(i)))}catch(c){Tt.error(`Error loading ${r}:`,i,c)}},"loadFile");On.existsSync(e)?s(e,gk(e)):Tt.error(`${r} file not found:`,e),Jne(e,{persistent:!1}).on("change",s)}a(dk,"loadAndWatch");function NO(){let e=nse();if(e==null){let t=X_[0];return Tt.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return Ek(e)}a(NO,"getHost");function ig(){let e=og();if(e==null){let t=X_[0];return Tt.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}a(ig,"getCommonName");async function lse(){let e=await Z_(),t=bt.certificateFromPem(e.options.cert),r=bt.privateKeyFromPem(e.options.key);Tt.info("Creating CSR with cert named:",e.name);let n=bt.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:ig()},...PO];Tt.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:Rk()}];return Tt.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),Vu.pki.certificationRequestToPem(n)}a(lse,"createCsr");function Rk(){let e=X_.includes(ig())?X_:[...X_,ig()];return e.includes(NO())||e.push(NO()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>_k.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}a(Rk,"certExtensions");async function use(e){let t={},r=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;ba();for await(let f of kr.search([]))if(f.is_authority&&!f.details.issuer.includes("HarperDB-Certificate-Authority")){if(ya.has(f.private_key_name)){n=ya.get(f.private_key_name),s=f;break}else if(f.private_key_name&&await On.exists(ar.join(r,f.private_key_name))){n=On.readFile(ar.join(r,f.private_key_name)),s=f;break}}if(!n){let f=await wO();s=f.ca,n=f.private_key}n=bt.privateKeyFromPem(n),t.signingCA=s.certificate;let i=bt.certificateFromPem(s.certificate);Tt.info("Signing CSR with cert named",s.name);let o=bt.certificationRequestFromPem(e.csr);try{o.verify()}catch(f){return Tt.error(f),new Error("Error verifying CSR: "+f.message)}let c=Vu.pki.createCertificate();c.serialNumber=Math.random().toString().slice(2,10),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+CO),Tt.info("sign cert setting validity:",c.validity),Tt.info("sign cert setting subject from CSR:",o.subject.attributes),c.setSubject(o.subject.attributes),Tt.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=o.getAttribute({name:"extensionRequest"}).extensions;Tt.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=o.publicKey,c.sign(n,Vu.md.sha256.create()),t.certificate=bt.certificateToPem(c)}else Tt.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}a(use,"signCertificate");async function dse(e,t){await Ku({name:og(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await Ku({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:bt.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}a(dse,"createCertificateTable");async function Ku(e){let t=new Ao(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},ba(),await kr.patch(e)}a(Ku,"setCertTable");async function DO(){let e=await bO("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{public_key:bt.publicKeyFromPem(e.publicKey),private_key:bt.privateKeyFromPem(e.privateKey)}}a(DO,"generateKeys");async function LO(e,t,r){let n=bt.createCertificate();if(!t){let o=await Z_();t=bt.certificateFromPem(o.options.cert).publicKey}n.publicKey=t,n.serialNumber=Math.random().toString().slice(2,10),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+CO);let i=[{name:"commonName",value:ig()},...PO];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions(Rk()),n.sign(e,Vu.md.sha256.create()),bt.certificateToPem(n)}a(LO,"generateCertificates");async function wO(){let e=await Ik(),t;for(let r of e){if(!r.is_authority)continue;let n=await wk(r.private_key_name);if(r.private_key_name&&n&&new Ao(r.certificate).checkPrivateKey(hk(n))){Tt.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;Tt.trace("No CA found with matching private key")}a(wO,"getCertAuthority");async function yk(e,t,r=!0){let n=bt.createCertificate();n.publicKey=t,n.serialNumber=Math.random().toString().slice(2,10),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+CO);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${Jn.get(al.REPLICATION_HOSTNAME)??Ek(Jn.get(al.REPLICATION_URL))??mk().split("-")[0]}`},...PO];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,Vu.md.sha256.create());let o=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),c=ar.join(o,Zs.PRIVATEKEY_PEM_NAME);return r&&await On.writeFile(c,bt.privateKeyToPem(e)),n}a(yk,"generateCertAuthority");async function fse(){let{private_key:e,public_key:t}=await DO(),r=await yk(e,t),n=await LO(e,t,r);await dse(n,r),bk()}a(fse,"generateCertsKeys");async function _se(){let e=await LO(bt.privateKeyFromPem(Zs.CERTIFICATE_VALUES.key),void 0,bt.certificateFromPem(Zs.CERTIFICATE_VALUES.cert)),t=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),r=ar.join(t,Zs.NATS_CERTIFICATE_PEM_NAME);await On.exists(r)||await On.writeFile(r,e);let n=ar.join(t,Zs.NATS_CA_PEM_NAME);await On.exists(n)||await On.writeFile(n,Zs.CERTIFICATE_VALUES.cert)}a(_se,"createNatsCerts");async function hse(){ba();for await(let e of kr.search([{attribute:"is_self_signed",value:!0}]))await kr.delete(e.name);await MO()}a(hse,"renewSelfSigned");async function MO(){sse(),await Ak(),ba();let e=await wO();if(!e){Tt.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=a(u=>{try{return{key:bt.privateKeyFromPem(On.readFileSync(u)),keyPath:u}}catch(f){return Tt.warn(`Failed to parse private key from ${u}:`,f.message),{key:null,keyPath:u}}},"tryToParseKey"),n=Jn.get(al.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let f=r(u.privateKey);if(s=f.key,i=f.keyPath,f.key)break}}else{let u=Jn.get(al.TLS_PRIVATEKEY),f=r(u);s=f.key,i=f.keyPath}let o=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),c=pk(o,i);s||(Tt.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{private_key:s}=await DO(),On.existsSync(ar.join(o,Zs.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${mk().split("-")[0]}.pem`),await On.writeFile(ar.join(o,c),bt.privateKeyToPem(s)));let l=await yk(s,bt.setRsaPublicKey(s.n,s.e),!1);await Ku({name:l.subject.getField("CN").value,uses:["https"],certificate:bt.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await Z_()){let r=og();Tt.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await wO();let n=bt.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await LO(bt.privateKeyFromPem(e.private_key),s,n);await Ku({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}a(MO,"reviewSelfSignedCert");function bk(){let e=ese(Object.keys(gs.CONFIG_PARAM_MAP),!0),t=ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME),r=ar.join(t,Zs.PRIVATEKEY_PEM_NAME),n=ar.join(t,Zs.NATS_CERTIFICATE_PEM_NAME),s=ar.join(t,Zs.NATS_CA_PEM_NAME),i=gs.CONFIG_PARAMS,o={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(o[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(o[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(o[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(o[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,o[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,o[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),OO.updateConfigValue(void 0,void 0,o,!1,!0)}a(bk,"updateConfigCert");function Ok(e){return e.startsWith("-----BEGIN")?e:ise(e,"utf8")}a(Ok,"readPEM");var fk=sg.createSecureContext;sg.createSecureContext=function(e){if(!e.cert||!e.key)return fk(e);let t={...e};delete t.key,delete t.cert;let r=fk(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var mse=Tk.prototype._init;Tk.prototype._init=function(e,t){mse.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,o)=>{this.sni_context=o?.context||o,this.certCbDone()})}};var ol=new Map;function Nk(e,t){let r=new Map,n,s=!1;return i.initialize=o=>i.ready?i.ready:(o&&(o.secureContexts=r,o.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),ol.clear();let f=0;for await(let d of yO.system.hdb_certificate.search([])){let _=d.certificate,h=new Ao(_);d.is_authority&&(h.asString=_,ol.set(h.subject,_))}for await(let d of yO.system.hdb_certificate.search([]))try{if(d.is_authority)continue;let _=e==="operations-api",h=d.is_self_signed?1:2;_&&d.uses?.includes?.("operations")&&(h+=1);let m=await wk(d.private_key_name),S=d.certificate,g=new Ao(S);if(ol.has(g.issuer)&&(S+=`
23
23
  `+ol.get(g.issuer)),!m||!S)throw new Error("Missing private key or certificate for secure server");let R={ciphers:d.ciphers,ticketKeys:ose(),availableCAs:ol,ca:t&&Array.from(ol.values()),cert:S,key:m,key_file:d.private_key_name,is_self_signed:d.is_self_signed};o&&(R.sessionIdContext=o.sessionIdContext);let E=sg.createSecureContext(R);E.name=d.name,E.options=R,E.quality=h,E.certificateAuthorities=Array.from(ol),E.certStart=S.toString().slice(0,100);let T=d.hostnames??vO(g);Array.isArray(T)||(T=[T]);let N;for(let v of T)if(v){v[0]==="*"&&(s=!0,v=v.slice(1)),v===NO()&&(h+=2),_k.isIP(v)&&(N=!0);let H=r.get(v)?.quality??0;h>H&&r.set(v,E)}else Aa.error("No hostname found for certificate at",sg.certificate);Aa.trace("Adding TLS",E.name,"for",o.ports||"client","cert named",d.name,"hostnames",T,"quality",h,"best quality",f),h>f&&(i.defaultContext=n=E,f=h,o&&(o.defaultContext=E))}catch(_){Aa.error("Error applying TLS for",d.name,_)}o?.secureContextsListeners.forEach(d=>d()),c(n)}catch(f){l(f)}}a(u,"updateTLS"),yO.system.hdb_certificate.subscribe({listener:a(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(o,c){Aa.info("TLS requested for",o||"(no SNI)");let l=o;for(;;){let f=r.get(l);if(f)return Aa.debug("Found certificate for",o,f.certStart),f.updatedContext&&(f=f.updatedContext),c(null,f);if(s&&l){let d=l.indexOf(".",1);d<0?l="":l=l.slice(d)}else break}o?Aa.debug("No certificate found to match",o,"using the default certificate"):Aa.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):Aa.info("No default certificate found"),c(null,u)}a(i,"SNICallback")}a(Nk,"createTLSSelector");async function wk(e){let t=ya.get(e);return!t&&e?await On.readFile(ar.join(Jn.get(al.ROOTPATH),gs.LICENSE_KEY_DIR_NAME,e),"utf8"):t}a(wk,"getPrivateKeyByName");async function Ik(){ba();let e=[];for await(let t of kr.search([]))e.push(t);return e}a(Ik,"listCertificates");async function pse(e){let t=IO(e,Xs.object({name:Xs.string().required(),certificate:Xs.string().required(),is_authority:Xs.boolean().required(),private_key:Xs.string(),hosts:Xs.array(),uses:Xs.array()}));if(t)throw new Ra(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,o=new Ao(n),c=!1,l=!1,u;for(let[h,m]of ya)!s&&!c&&o.checkPrivateKey(hk(m))&&(c=!0,u=h),s&&s===m&&(l=!0,u=h);if(!i&&!s&&!c)throw new Ra("A suitable private key was not found for this certificate");let f;if(!r){try{f=Ck(o)}catch(h){Tt.error(h)}if(f==null)throw new Ra("Error extracting certificate common name, please provide a name parameter")}let d=Ese(r??f);s&&!c&&!l&&(await On.writeFile(ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME,d+".pem"),s),ya.set(d,s));let _={name:r??f,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(_.private_key_name=u??d+".pem"),e.ciphers&&(_.ciphers=e.ciphers),await Ku(_),"Successfully added certificate: "+d}a(pse,"addCertificate");function Ese(e){return e.replace(/[^a-z0-9\.]/gi,"-")}a(Ese,"sanitizeName");async function gse(e){let t=IO(e,Xs.object({name:Xs.string().required()}));if(t)throw new Ra(t.message);let{name:r}=e;ba();let n=await kr.get(r);if(!n)throw new Ra(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await kr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(Tt.info("Removing private key named",s),await On.remove(ar.join(Jn.getHdbBasePath(),gs.LICENSE_KEY_DIR_NAME,s)))}return await kr.delete(r),"Successfully removed "+r}a(gse,"removeCertificate");function Ck(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||vO(e)[0]}a(Ck,"getPrimaryHostName");function vO(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}a(vO,"hostnamesFromCert");async function Sse(e){if(e.bypass_auth!==!0)throw new Ra("Unauthorized","401");let t=IO(e,Xs.object({name:Xs.string().required()}));if(t)throw new Ra(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await lk()).privateKey;if(r===".jwtPublic")return(await lk()).publicKey;if(ya.get(r))return ya.get(e.name);throw new Ra("Key not found")}a(Sse,"getKey")});var Xk={};Ue(Xk,{CONFIRMATION_STATUS_POSITION:()=>jk,NodeReplicationConnection:()=>Wu,OPERATION_REQUEST:()=>HO,RECEIVED_TIME_POSITION:()=>FO,RECEIVED_VERSION_POSITION:()=>kO,RECEIVING_STATUS_POSITION:()=>GO,RECEIVING_STATUS_RECEIVING:()=>Jk,RECEIVING_STATUS_WAITING:()=>Qk,SENDING_TIME_POSITION:()=>eh,createWebSocket:()=>hg,database_subscriptions:()=>wa,replicateOverWS:()=>th,table_update_listeners:()=>$O});async function hg(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=et(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!xO){let l=(0,Kk.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),xO=u.secureContexts}if(i=xO.get(s),i&&ce.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let o={};r&&(o.Authorization=r);let c={headers:o,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,Wk.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(fg?.caCount!==Ro.size&&(fg=Yk.createSecureContext({...i.options,ca:[...Ro,...i.options.availableCAs.values()]}),fg.caCount=Ro.size),c.secureContext=fg),new $k.WebSocket(e,"harperdb-replication-v1",c)}function th(e,t,r){let n=t.port||t.securePort,s=cl.pid%1e3+"-"+Vk.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3),i=0,o=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(o.buffer,0,1024),u=t.database,f=t.databaseSubscriptions||wa,d,_,h=!1,m=t.subscription;m?.then&&m.then(A=>m=A);let S=t.tables||u&&Xe()[u];if(!r){ce.error?.("No authorization provided"),cn(1008,"Unauthorized");return}let g=new Map,R=[],E=r.name;E&&t.connection&&(t.connection.nodeName=E);let T,N,v,H,X,W,$,se=6e4,z,fe=0,ue=0,Z=0,Ae=qk.default.get(x.REPLICATION_BLOBTIMEOUT)??12e4,me=new Map,ye=[],Ht=0,dt;if(t.url){let A=a(()=>{X&&ue===e._socket?.bytesRead&&Z===e._socket?.bytesWritten?e.terminate():(X=performance.now(),e.ping(),ue=e._socket?.bytesRead,Z=e._socket?.bytesWritten)},"send_ping");v=setInterval(A,Fk).unref(),A()}else Mn();e._socket?.setMaxListeners(200);function Mn(){clearTimeout(H),ue=e._socket?.bytesRead,Z=e._socket?.bytesWritten,H=setTimeout(()=>{ue===e._socket?.bytesRead&&Z===e._socket?.bytesWritten&&(ce.warn?.(`Timeout waiting for ping from ${E}, terminating connection and reconnecting`),e.terminate())},Fk*2).unref()}a(Mn,"resetPingTimer");function an(){return _||(_=eg(d,u,E)),_}a(an,"getSharedStatus"),u&&Qo(u);let Cr,Qd,ac=[],nA=[],sA,kt=[],Jd=[],Xd=[],iA=150,Cm=25,Zd=0,Ce=0,ef=!1,Ji,Pr,vn,cc;e.on("message",A=>{fe=performance.now();try{let y=A.dataView=new ll(A.buffer,A.byteOffset,A.byteLength);if(A[0]>127){let B=(0,Ze.decode)(A),[w,D,G]=B;switch(w){case Lk:{if(D){if(E){if(E!==D){ce.error?.(s,`Node name mismatch, expecting to connect to ${E}, but peer reported name as ${D}, disconnecting`),e.send((0,Ze.encode)([Yu])),cn(1008,"Node name mismatch");return}}else if(E=D,t.connection?.tentativeNode){let oe=t.connection.tentativeNode;oe.name=E,t.connection.tentativeNode=null,To(E,oe)}if(t.connection&&(t.connection.nodeName=E),ce.debug?.(s,"received node name:",E,"db:",u),!u)try{Qo(u=B[2]),u==="system"&&(Cr=Sa(t,(oe,V)=>{jo(V)&&nf(V)}),e.on("close",()=>{Cr?.remove()}))}catch(oe){ce.warn?.(s,"Error setting database",oe),e.send((0,Ze.encode)([Yu])),cn(1008,oe.message);return}zo()}break}case Hk:{ce.debug?.(s,"Received table definitions for",D.map(oe=>oe.table));for(let oe of D){let V=B[2];oe.database=V;let J;jo(V)&&(V==="system"?We[V]?.[oe.table]||(J=L(oe,We[V]?.[oe.table])):J=L(oe,We[V]?.[oe.table]),d||(d=J?.auditStore),S||(S=Xe()?.[V]))}break}case Yu:cn();break;case HO:try{let oe=r?.replicates||r?.subscribers||r?.name;server.operation(D,{user:r},!oe).then(V=>{Array.isArray(V)&&(V={results:V}),V.requestId=D.requestId,e.send((0,Ze.encode)([cg,V]))},V=>{e.send((0,Ze.encode)([cg,{requestId:D.requestId,error:V instanceof Error?V.toString():V}]))})}catch(oe){e.send((0,Ze.encode)([cg,{requestId:D.requestId,error:oe instanceof Error?oe.toString():oe}]))}break;case cg:let{resolve:Q,reject:j}=g.get(D.requestId);D.error?j(new Error(D.error)):Q(D),g.delete(D.requestId);break;case UO:let F=B[3];S||(u?ce.error?.(s,"No tables found for",u):ce.error?.(s,"Database name never received"));let ve=S[F];ve=L({table:F,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ve),ac[G]={name:F,decoder:new Ze.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(oe){return ve.primaryStore.getEntry(oe)},rootStore:ve.primaryStore.rootStore};break;case Mk:cc=d?ZH(D,d):new Map,sA=B[2],ce.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${sA}`);break;case vk:let Ee=G;Xd[Ee]=D;break;case Bk:an()[jk]=D,ce.trace?.(s,"received and broadcasting committed update",D),an().buffer.notify();break;case xk:T=D,m.send({type:"end_txn",localTime:T,remoteNodeIds:R});break;case lg:{let oe=B[1],{fileId:V,size:J,finished:de,error:pe}=oe,ae=me.get(V);ce.debug?.("Received blob",V,"has stream",!!ae,"connectedToBlob",!!ae?.connectedToBlob,"length",B[2].length,"finished",de),ae||(ae=new BO.PassThrough,ae.expectedSize=J,me.set(V,ae)),ae.lastChunk=Date.now();let Ie=B[2];Mt(Ie.byteLength,"bytes-received",`${E}.${u}`,"replication","blob");try{de?(pe?(ae.on("error",()=>{}),ae.destroy(new Error("Blob error: "+pe+" for record "+(ae.recordId??"unknown")+" from "+E))):ae.end(Ie),ae.connectedToBlob&&me.delete(V)):ae.write(Ie)}catch(He){ce.error?.(`Error receiving blob for ${ae.recordId} from ${E} and streaming to storage`,He),me.delete(V)}break}case Uk:{let oe=D,V;try{let J=B[3],de=nA[G]||(nA[G]=S[B[4]]);if(!de)return ce.warn?.("Unknown table id trying to handle record request",G);let pe=de.primaryStore.getBinaryFast(Symbol.for("structures")),ae=pe?.length;if(ae>0&&ae!==Ce){Ce=ae;let He=(0,Ze.decode)(pe);e.send((0,Ze.encode)([UO,{typedStructs:He.typed,structures:He.named},G,de.tableName]))}let Ie=de.primaryStore.getBinaryFast(J);if(Ie){let He=de.primaryStore.decoder.decode(Ie,{valueAsBuffer:!0}),De=He.value;He[Pc]&Ur&&(De=Buffer.from(De),Uf(()=>de.primaryStore.decoder.decode(Ie),rt=>lc(rt,J),de.primaryStore.rootStore)),V=(0,Ze.encode)([ag,oe,{value:De,expiresAt:He.expiresAt,version:He.version,residencyId:He.residencyId,nodeId:He.nodeId,user:He.user}])}else V=(0,Ze.encode)([ag,oe])}catch(J){V=(0,Ze.encode)([ag,oe,{error:J.message}])}e.send(V);break}case ag:{let{resolve:oe,reject:V,tableId:J,key:de}=g.get(B[1]),pe=B[2];if(pe?.error)V(new Error(pe.error));else if(pe){let ae;Tp(()=>{let Ie=ac[J].decoder.decode(pe.value);pe.value=Ie,pe.key=de,oe(pe)||ae&&setTimeout(()=>ae.forEach(Ep),6e4).unref()},d?.rootStore,Ie=>{let He=tf(Ie,de);return ae||(ae=[]),ae.push(He),He})}else oe();g.delete(B[1]);break}case Dk:{vn=D;let oe,V,J=!1;if(m){if(u!==m.databaseName&&!m.then){ce.error?.("Subscription request for wrong database",u,m.databaseName);return}}else m=f.get(u);if(ce.debug?.(s,"received subscription request for",u,"at",vn),!m){let Oe;m=new Promise(Ft=>{ce.debug?.("Waiting for subscription to database "+u),Oe=Ft}),m.ready=Oe,wa.set(u,m)}if(r.name)V=or().subscribe(r.name),V.then(async Oe=>{oe=Oe;for await(let Ft of oe){let Nt=Ft.value;if(!(Nt?.replicates===!0||Nt?.replicates?.receives||Nt?.subscriptions?.some(yr=>(yr.database||yr.schema)===u&&yr.publish!==!1))){J=!0,e.send((0,Ze.encode)([Yu])),cn(1008,`Unauthorized database subscription to ${u}`);return}}},Oe=>{ce.error?.(s,"Error subscribing to HDB nodes",Oe)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,Ze.encode)([Yu])),cn(1008,`Unauthorized database subscription to ${u}`);return}if(Pr&&(ce.debug?.(s,"stopping previous subscription",u),Pr.emit("close")),vn.length===0)return;let de=vn[0],pe=a(Oe=>{if(Oe&&(de.replicateByDefault?!de.tables.includes(Oe.tableName):de.tables.includes(Oe.tableName)))return{table:Oe}},"tableToTableEntry"),ae={txnTime:0},Ie,He,De=1/0,rt,Un=a((Oe,Ft)=>{if(Oe.type==="end_txn"){ae.txnTime&&(o[i]!==66&&ce.error?.("Invalid encoding of message"),Xi(9),Xi(_g),p(rt=Ft),di()),i=c,ae.txnTime=0;return}let Nt=Oe.nodeId,yr=Oe.tableId,Rt=He[yr];if(!Rt&&(Rt=He[yr]=pe(m.tableById[yr]),!Rt))return ce.debug?.("Not subscribed to table",yr);let ls=Rt.table,wt=ls.primaryStore,Us=wt.encoder;(Oe.extendedType&Cp||!Us.typedStructs)&&(Us._mergeStructures(Us.getStructures()),Us.typedStructs&&(Us.lastTypedStructuresLength=Us.typedStructs.length));let Yl=Ie[Nt];if(!(Yl&&Yl.startTime<Ft&&(!Yl.endTime||Yl.endTime>Ft)))return dg&&ce.trace?.(s,"skipping replication update",Oe.recordId,"to:",E,"from:",Nt,"subscribed:",Ie),_P();dg&&ce.trace?.(s,"sending replication update",Oe.recordId,"to:",E,"from:",Nt,"subscribed:",Ie);let oA=Oe.version;ae.txnTime!==oA&&(ae.txnTime&&(dg&&ce.trace?.(s,"new txn time, sending queued txn",ae.txnTime),o[i]!==66&&ce.error?.("Invalid encoding of message"),di()),ae.txnTime=oA,i=c,p(oA));let fc=Oe.residencyId,aA=rf(fc,ls),Dm;if(aA&&!aA.includes(E)){let xs=rf(Oe.previousResidencyId,ls);if(xs&&!xs.includes(E)&&(Oe.type==="put"||Oe.type==="patch")||ls.getResidencyById)return _P();let sf=Oe.recordId;ce.trace?.(s,"sending invalidation",sf,E,"from",Nt);let of=0;fc&&(of|=Mc),Oe.previousResidencyId&&(of|=vc);let uA,Lm=null;for(let hP in ls.indices){if(!Lm){if(uA=Oe.getValue(wt,!0),!uA)break;Lm={}}Lm[hP]=uA[hP]}Dm=Dc(Oe.version,yr,sf,null,Nt,Oe.user,Oe.type==="put"||Oe.type==="patch"?"invalidate":Oe.type,Us.encode(Lm),of,fc,Oe.previousResidencyId,Oe.expiresAt)}function _P(){return ce.trace?.(s,"skipping audit record",Oe.recordId),W||(W=setTimeout(()=>{W=null,(rt||0)+kk/2<De&&(dg&&ce.trace?.(s,"sending skipped sequence update",De),e.send((0,Ze.encode)([xk,De])))},kk).unref()),new Promise(setImmediate)}a(_P,"skipAuditRecord");let cA=Us.typedStructs,lA=Us.structures;if((cA?.length!=Rt.typed_length||lA?.length!=Rt.structure_length)&&(Rt.typed_length=cA?.length,Rt.structure_length=lA.length,ce.debug?.(s,"send table struct",Rt.typed_length,Rt.structure_length),Rt.sentName||(Rt.sentName=!0),e.send((0,Ze.encode)([UO,{typedStructs:cA,structures:lA,attributes:ls.attributes,schemaDefined:ls.schemaDefined},yr,Rt.table.tableName]))),fc&&!Jd[fc]&&(e.send((0,Ze.encode)([vk,aA,fc])),Jd[fc]=!0),Dm)Xi(Dm.length),K(Dm);else{let xs=Oe.encoded;Oe.extendedType&Ur&&Uf(()=>Oe.getValue(wt),of=>lc(of,Oe.recordId),wt.rootStore);let sf=xs[0]===66?8:0;Xi(xs.length-sf),K(xs,sf),ce.trace?.("wrote record",Oe.recordId,"length:",xs.length)}return e._socket.writableNeedDrain?new Promise(xs=>{ce.debug?.(`Waiting for remote node ${E} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",xs)}):Ht>Cm?new Promise(xs=>{dt=xs}):new Promise(setImmediate)},"sendAuditRecord"),di=a(()=>{c-i>8?(e.send(o.subarray(i,c)),ce.debug?.(s,"Sent message, size:",c-i),Mt(c-i,"bytes-sent",`${E}.${u}`,"replication","egress")):ce.debug?.(s,"skipping empty transaction")},"sendQueuedData");Pr=new qO.EventEmitter,Pr.once("close",()=>{J=!0,oe?.end()});for(let{startTime:Oe}of vn)Oe<De&&(De=Oe);(V||Promise.resolve()).then(async()=>{m=await m,d=m.auditStore,He=m.tableById.map(pe),Ie=[];for(let{name:Ft,startTime:Nt,endTime:yr}of vn){let Rt=ZE(Ft,d);ce.debug?.("subscription to",Ft,"using local id",Rt,"starting",Nt),Ie[Rt]={startTime:Nt,endTime:yr}}nf(u),Cr||(Cr=ul(Ft=>{Ft.databaseName===u&&nf(u)}),Qd=rh(Ft=>{Ft===u&&(e.send((0,Ze.encode)([Yu])),cn())}),e.on("close",()=>{Cr?.remove(),Qd?.remove()})),e.send((0,Ze.encode)([Mk,Y_(m.auditStore),vn.map(({name:Ft})=>Ft)]));let Oe=!0;do{isFinite(De)||(ce.warn?.("Invalid sequence id "+De),cn(1008,"Invalid sequence id"+De));let Ft;if(Oe&&!J&&(Oe=!1,De===0)){let Nt=De,yr=mg(d);for(let Rt in S){if(!pe(Rt))continue;let ls=S[Rt];ce.warn?.(`Fully copying ${Rt} table to ${E}`);for(let wt of ls.primaryStore.getRange({snapshot:!1,versions:!0})){if(J)return;if(wt.localTime>=De){ce.trace?.(s,"Copying record from",u,Rt,wt.key,wt.localTime),Nt=Math.max(wt.localTime,Nt),Ft=!0,an()[eh]=1;let Us=Dc(wt.version,ls.tableId,wt.key,null,yr,null,"put",Uf(()=>ls.primaryStore.encoder.encode(wt.value),Yl=>lc(Yl,wt.key)),wt.metadataFlags&-256,wt.residencyId,null,wt.expiresAt);await Un({recordId:wt.key,tableId:ls.tableId,type:"put",getValue(){return wt.value},encoded:Us,version:wt.version,residencyId:wt.residencyId,nodeId:yr,extendedType:wt.metadataFlags},wt.localTime)}}}Ft&&Un({type:"end_txn"},De),an()[eh]=0,De=Nt}for(let{key:Nt,value:yr}of d.getRange({start:De||1,exclusiveStart:!0,snapshot:!1})){if(J)return;let Rt=St(yr);ce.debug?.("sending audit record",new Date(Nt)),an()[eh]=Nt,De=Nt,await Un(Rt,Nt),Pr.startTime=Nt,Ft=!0}Ft&&Un({type:"end_txn"},De),an()[eh]=0,await Zk(d)}while(!J)}).catch(Oe=>{ce.error?.(s,"Error handling subscription to node",Oe),cn(1008,"Error handling subscription to node")});break}}return}y.position=8;let I=!0,b,U;do{an();let B=y.readInt();if(B===9&&y.getUint8(y.position)==_g){y.position++,T=U=y.readFloat64(),_[kO]=T,_[FO]=Date.now(),_[GO]=Qk,ce.trace?.("received remote sequence update",T,u);break}let w=y.position,D=St(A,w,w+B),G=ac[D.tableId];G||ce.error?.(`No table found with an id of ${D.tableId}`);let Q;D.residencyId&&(Q=Xd[D.residencyId],ce.trace?.(s,"received residency list",Q,D.type,D.recordId));try{let j=D.recordId;Tp(()=>{b={table:G.name,id:j,type:D.type,nodeId:cc.get(D.nodeId),residencyList:Q,timestamp:D.version,value:D.getValue(G),user:D.user,beginTxn:I,expiresAt:D.expiresAt}},d?.rootStore,F=>tf(F,j))}catch(j){throw j.message+="typed structures for current decoder"+JSON.stringify(G.decoder.typedStructs),j}I=!1,ce.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),_[kO]=D.version,_[FO]=Date.now(),_[GO]=Jk,m.send(b),y.position=w+B}while(y.position<A.byteLength);Zd++,Mt(A.byteLength,"bytes-received",`${E}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),Zd>iA&&!ef&&(ef=!0,e.pause(),ce.debug?.(`Commit backlog causing replication back-pressure, requesting that ${E} pause replication`)),m.send({type:"end_txn",localTime:T,remoteNodeIds:R,async onCommit(){if(b){let B=Date.now()-b.timestamp;Mt(B,"replication-latency",E+"."+u+"."+b.table,b.type,"ingest")}Zd--,ef&&(ef=!1,e.resume(),ce.debug?.(`Replication resuming ${E}`)),ye.length>0&&await Promise.all(ye),ce.trace?.("All blobs finished"),!N&&U&&(ce.trace?.(s,"queuing confirmation of a commit at",U),setTimeout(()=>{e.send((0,Ze.encode)([Bk,N])),ce.trace?.(s,"sent confirmation of a commit at",N),N=null},Ase)),N=U,ce.debug?.("last sequence committed",new Date(U),u)}})}catch(y){ce.error?.(s,"Error handling incoming replication message",y)}}),e.on("ping",Mn),e.on("pong",()=>{t.connection&&(t.connection.latency=performance.now()-X,t.isSubscriptionConnection&&il({name:E,database:u,url:t.url,latency:t.connection.latency})),X=null}),e.on("close",(A,y)=>{clearInterval(v),clearTimeout(H),clearInterval($),Pr&&Pr.emit("close"),Ji&&Ji.end();for(let[I,{reject:b}]of g)b(new Error(`Connection closed ${y?.toString()} ${A}`));ce.debug?.(s,"closed",A,y?.toString())});function cn(A,y){e.isFinished=!0,e.close(A,y),t.connection?.emit("finished")}a(cn,"close");let Dr=new Set;async function lc(A,y){let I=gp(A);if(Dr.has(I)){ce.debug?.("Blob already being sent",I);return}Dr.add(I);try{let b;Ht++;for await(let U of A.stream())b&&(ce.debug?.("Sending blob chunk",I,"length",b.length),e.send((0,Ze.encode)([lg,{fileId:I,size:A.size},b]))),b=U,e._socket.writableNeedDrain&&(ce.debug?.("draining",I),await new Promise(B=>e._socket.once("drain",B)),ce.debug?.("drained",I)),Mt(U.length,"bytes-sent",`${E}.${u}`,"replication","blob");ce.debug?.("Sending final blob chunk",I,"length",b.length),e.send((0,Ze.encode)([lg,{fileId:I,size:A.size,finished:!0},b]))}catch(b){ce.warn?.("Error sending blob",b,"blob id",I,"for record",y),e.send((0,Ze.encode)([lg,{fileId:I,finished:!0,error:b.toString()},Buffer.alloc(0)]))}finally{Dr.delete(I),Ht--,Ht<Cm&&dt?.()}}a(lc,"sendBlobs");function tf(A,y){let I=gp(A),b=me.get(I);ce.debug?.("Received transaction for record",y,"with blob",I,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&me.delete(I):(b=new BO.PassThrough,me.set(I,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=y,A.size===void 0&&b.expectedSize&&(A.size=b.expectedSize);let U=b.blob??createBlob(b,A);b.blob=U;let B=no(()=>vf(U).saving,m.auditStore?.rootStore);return B&&(B.blobId=I,ye.push(B),B.finally(()=>{ce.debug?.(`Finished receiving blob stream ${I}`),ye.splice(ye.indexOf(B),1)})),U}a(tf,"receiveBlobs");function zo(){if(h||(h=!0,t.connection?.on("subscriptions-updated",zo)),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let A=new Map;d||(d=m?.auditStore);try{for(let b of m?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let U of b.value.nodes||[])U.lastTxnTime>(A.get(U.id)??0)&&A.set(U.id,U.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let y=t.connection?.nodeSubscriptions?.[0];R=[];let I=t.connection?.nodeSubscriptions?.map((b,U)=>{let B=[],{replicateByDefault:w}=b;if(b.subscriptions){for(let j of b.subscriptions)if(j.subscribe&&(j.schema||j.database)===u){let F=j.table;S?.[F]?.replicate!==!1&&B.push(F)}w=!1}else for(let j in S)(w?S[j].replicate===!1:S[j].replicate)&&B.push(j);let D=d&&ZE(b.name,d),G=m?.dbisDB?.get([Symbol.for("seq"),D])??1,Q=Math.max(G?.seqId??1,(typeof b.start_time=="string"?new Date(b.start_time).getTime():b.start_time)??1);if(ce.debug?.("Starting time recorded in db",b.name,D,u,G?.seqId,"start time:",Q,new Date(Q)),y!==b){let j=d&&ZE(y.name,d),F=m?.dbisDB?.get([Symbol.for("seq"),j])??1;for(let ve of F?.nodes||[])ve.name===b.name&&(Q=ve.seqId,ce.debug?.("Using sequence id from proxy node",y.name,Q))}if(D===void 0?ce.warn("Starting subscription request from node",b,"but no node id found"):R.push(D),A.get(D)>Q&&(Q=A.get(D),ce.debug?.("Updating start time from more recent txn recorded",y.name,Q)),Q===1&&ug)try{new URL(ug).hostname===b.name&&E===b.name?(ce.warn?.(`Requesting full copy of database ${u} from ${ug}`),Q=0):Q=Date.now()-6e4}catch(j){ce.error?.("Error parsing leader URL",ug,j)}return ce.trace?.(s,"defining subscription request",b.name,u,new Date(Q)),{name:b.name,replicateByDefault:w,tables:B,startTime:Q,endTime:b.end_time}});if(I)if(ce.debug?.(s,"sending subscription request",I,m?.dbisDB?.path),clearTimeout(z),I.length>0)e.send((0,Ze.encode)([Dk,I]));else{let b=a(()=>{let U=performance.now();z=setTimeout(()=>{fe<=U?cn(1008,"Connection has no subscriptions and is no longer used"):b()},se).unref()},"schedule_close");b()}}a(zo,"sendSubscriptionRequestUpdate");function rf(A,y){if(!A)return;let I=kt[A];return I||(I=y.getResidencyRecord(A),kt[A]=I),I}a(rf,"getResidence");function jo(A){return!(Na&&Na!="*"&&!Na[A]&&!Na.includes?.(A)&&!Na.some?.(y=>y.name===A))}a(jo,"checkDatabaseAccess");function Qo(A){if(m=m||f.get(A),!jo(A))throw new Error(`Access to database "${A}" is not permitted`);m||ce.warn?.(`No database named "${A}" was declared and registered`),d=m?.auditStore,S||(S=Xe()?.[A]);let y=et();if(y===E)throw y?new Error("Should not connect to self",y):new Error("Node name not defined");return Pm(y,A),!0}a(Qo,"setDatabase");function Pm(A,y){let I=Xe()?.[y],b=[];for(let U in I){let B=I[U];b.push({table:U,schemaDefined:B.schemaDefined,attributes:B.attributes.map(w=>({name:w.name,type:w.type,isPrimaryKey:w.isPrimaryKey}))})}ce.trace?.("Sending database info for node",A,"database name",y),e.send((0,Ze.encode)([Lk,A,y,b]))}a(Pm,"sendNodeDBName");function nf(A){let y=Xe()?.[A],I=[];for(let b in y){if(vn&&!vn.some(B=>B.replicateByDefault?!B.tables.includes(b):B.tables.includes(b)))continue;let U=y[b];I.push({table:b,schemaDefined:U.schemaDefined,attributes:U.attributes.map(B=>({name:B.name,type:B.type,isPrimaryKey:B.isPrimaryKey}))})}e.send((0,Ze.encode)([Hk,I,A]))}a(nf,"sendDBSchema"),$=setInterval(()=>{for(let[A,y]of me)y.lastChunk+Ae<Date.now()&&(ce.warn?.(`Timeout waiting for blob stream to finish ${A} for record ${y.recordId??"unknown"} from ${E}`),me.delete(A),y.end())},Ae).unref();let uc=1,dc=[];return{end(){Ji&&Ji.end(),Pr&&Pr.emit("close")},getRecord(A){let y=uc++;return new Promise((I,b)=>{let U=[Uk,y,A.table.tableId,A.id];dc[A.table.tableId]||(U.push(A.table.tableName),dc[A.table.tableId]=!0),e.send((0,Ze.encode)(U)),fe=performance.now(),g.set(y,{tableId:A.table.tableId,key:A.id,resolve(B){let{table:w,entry:D}=A;if(I(B),B)return w._recordRelocate(D,B)},reject:b})})},sendOperation(A){let y=uc++;return A.requestId=y,e.send((0,Ze.encode)([HO,A])),new Promise((I,b)=>{g.set(y,{resolve:I,reject:b})})}};function Xi(A){O(5),A<128?o[c++]=A:A<16384?(l.setUint16(c,A|32768),c+=2):A<1056964608?(l.setUint32(c,A|3221225472),c+=4):(o[c]=255,l.setUint32(c+1,A),c+=5)}function K(A,y=0,I=A.length){let b=I-y;O(b),A.copy(o,c,y,I),c+=b}function p(A){O(8),l.setFloat64(c,A),c+=8}function O(A){if(A+16>o.length-c){let y=Buffer.allocUnsafeSlow(c+A-i+65536>>10<<11);o.copy(y,0,i,c),c=c-i,i=0,o=y,l=new DataView(o.buffer,0,o.length)}}function L(A,y){let I=A.database??"data";if(I!=="data"&&!We[I]){ce.warn?.("Database not found",A.database);return}y||(y={});let b=y.schemaDefined,U=!1,B=A.schemaDefined,w=y.attributes||[];for(let D=0;D<A.attributes?.length;D++){let G=A.attributes[D],Q=w.find(j=>j.name===G.name);(!Q||Q.type!==G.type)&&(b?ce.error?.(`Schema for '${u}.${A.table}' is defined locally, but attribute '${G.name}: ${G.type}' from '${E}' does not match local attribute ${Q?"'"+Q.name+": "+Q.type+"'":"which does not exist"}`):(U=!0,B||(G.indexed=!0),Q?w[w.indexOf(Q)]=G:w.push(G)))}return U?(ce.debug?.("(Re)creating",A),_t({table:A.table,database:A.database,schemaDefined:A.schemaDefined,attributes:w,...y})):y}}var qk,Ze,$k,Vk,ce,qO,Kk,Yk,cl,Wk,BO,zk,Dk,Lk,Mk,Yu,vk,UO,Uk,ag,HO,cg,xk,Bk,Hk,lg,jk,kO,FO,eh,GO,Qk,Jk,Tse,ug,$O,wa,dg,kk,Ase,Fk,xO,fg,Gk,Wu,VO=Re(()=>{xe();Ai();gO();KO();Es();qk=M(le());k();fu();Ze=require("msgpackr"),$k=require("ws"),Vk=require("worker_threads"),ce=M(Gs());J_();qO=require("events"),Kk=M(ei()),Yk=M(require("node:tls"));sl();cl=M(require("node:process")),Wk=require("node:net");Li();gn();BO=require("node:stream"),zk=M(require("minimist")),Dk=129,Lk=140,Mk=141,Yu=142,vk=130,UO=132,Uk=133,ag=134,HO=136,cg=137,xk=143,Bk=144,Hk=145,lg=146,jk=0,kO=1,FO=2,eh=3,GO=4,Qk=0,Jk=1,Tse=(0,zk.default)(cl.argv),ug=Tse.HDB_LEADER_URL??cl.env.HDB_LEADER_URL,$O=new Map,wa=new Map,dg=!0,kk=300,Ase=2,Fk=3e4;a(hg,"createWebSocket");Gk=500,Wu=class extends qO.EventEmitter{constructor(r,n,s,i,o){super();this.url=r;this.subscription=n;this.databaseName=s;this.nodeName=i;this.authorization=o;this.nodeName=this.nodeName??Js(r)}static{a(this,"NodeReplicationConnection")}socket;startTime;retryTime=Gk;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;async connect(){this.session||this.resetSession();let r=[];this.socket=await hg(this.url,{serverName:this.nodeName,authorization:this.authorization});let n;ce.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${cl.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),ce[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=Gk,this.nodeSubscriptions&&il({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,n=th(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(n)}),this.socket.on("error",s=>{s.code==="SELF_SIGNED_CERT_IN_CHAIN"?(ce.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),s.isHandled=!0):s.code!=="ECONNREFUSED"&&(s.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?ce.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):ce.error?.(`Error in connection to ${this.url} due to ${s.message}`)),this.sessionReject(s)}),this.socket.on("close",(s,i)=>{if(this.isConnected&&(this.nodeSubscriptions&&$u({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,n?.end(),this.emit("finished");return}if(++this.retries%20===1){let o=i?.toString();ce.warn?.(`${n?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${o?'"'+o+'" ':""}(code: ${s})`)}n=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((r,n)=>{this.sessionResolve=r,this.sessionReject=n})}subscribe(r,n){this.nodeSubscriptions=r,this.replicateTablesByDefault=n,this.emit("subscriptions-updated",r)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(r){return this.session.then(n=>n.getRecord(r))}};a(th,"replicateOverWS")});var Oa={};Ue(Oa,{clearThisNodeName:()=>Pse,disableReplication:()=>Ose,enabled_databases:()=>Na,forEachReplicatedDatabase:()=>Sa,getThisNodeId:()=>mg,getThisNodeName:()=>et,getThisNodeUrl:()=>Ta,hostnameToUrl:()=>Tg,lastTimeInAuditStore:()=>W_,monitorNodeCAs:()=>aF,replicateOperation:()=>Lse,replication_certificate_authorities:()=>Ro,sendOperationToNode:()=>nh,servers:()=>yse,setReplicator:()=>lF,start:()=>bse,startOnMainThread:()=>RO,subscribeToNode:()=>Q_,unsubscribeFromNode:()=>ng,urlToNodeName:()=>Js});function bse(e){if(e.port||(e.port=Ss.default.get(x.OPERATIONSAPI_NETWORK_PORT)),e.securePort||(e.securePort=Ss.default.get(x.OPERATIONSAPI_NETWORK_SECUREPORT)),!et())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of j_(e))t.set(Js(s.url),s);Nse(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Ye.ws(async(s,i,o,c)=>{if(i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,o);await o,s._socket.unref(),th(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&cr.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Ye.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){!s.authorized&&s._nodeRequest.socket.authorizationError&&cr.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let o=or().primaryStore;if(s.authorized&&s.peerCertificate.subject){let c=s.peerCertificate.subject,l=c&&(o.get(c.CN)||t.get(c.CN));if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){cr.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else cr.warn(`No node found for certificate common name ${c.CN}, available nodes are ${Array.from(o.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=o.get(s.ip)||t.get(s.ip);c?s.user=c:cr.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...o.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=a(()=>{let o=new Set(s.secureContexts.values());s.defaultContext&&o.add(s.defaultContext);for(let c of o)try{let l=Array.from(Ro);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=Sg.createSecureContext(u)}catch(l){cr.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ss.default.get(x.REPLICATION_ENABLEROOTCAS)!==!1&&i()}aF(()=>{for(let s of n)s()})}function aF(e){let t=0;Gu(r=>{r?.ca&&(Ro.add(r.ca),Ro.size!==t&&(t=Ro.size,e?.()))})}function Ose(e=!0){oF=e}function Nse(e){oF||(Xe(),Na=e.databases,Sa(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||wa;for(let[s,i]of Eg){let o=i.get(r);o&&(o.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];lF(r,s,e),$O.get(s)?.forEach(i=>i(s))}}))}function lF(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class cF extends Kr{static{a(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||wa,o=i.get(e),c=o?.tableById||[];c[t.tableId]=t;let l=o?.ready;if(cr.trace("Setting up replicator subscription to database",e),!o?.auditStore)return this.subscription=o=new kn,i.set(e,o),o.tableById=c,o.auditStore=t.auditStore,o.dbisDB=t.dbisDB,o.databaseName=e,l&&l(o),o;this.subscription=o}static subscribeOnThisThread(i,o){return!0}static async load(i){if(i){let o=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),o]);if(c){let l,u=new Set;do{let f;for(let _ of c){if(_===Ye.hostname)continue;let h=Ise(_,cF.subscription,e);h?.isConnected&&!u.has(h)&&(!f||h.latency<f.latency)&&(f=h)}if(!f)throw l||new sF.ServerError("No connection to any other nodes are available",502);let d={requestId:Rse++,table:t,entry:i,id:i.key};u.add(f);try{return await f.getRecord(d)}catch(_){if(f.isConnected)throw _;cr.warn("Error in load from node",gg,_),l||(l=_)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function wse(e,t,r,n,s){let i=Eg.get(e);i||(i=new Map,Eg.set(e,i));let o=i.get(r);if(o)return o;if(t)return i.set(r,o=new Wu(e,t,r,n,s)),o.connect(),o.once("finished",()=>i.delete(r)),o}function Ise(e,t,r){let n=eF.get(e);n||(n=new Map,eF.set(e,n));let s=n.get(r);if(s)return s;let i=or().primaryStore.get(e);return i?.url&&(s=new Wu(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function nh(e,t,r){r||(r={}),r.serverName=e.name;let n=await hg(e.url,r),s=th(n,{},{});return new Promise((i,o)=>{n.on("open",()=>{i(s.sendOperation(t))}),n.on("error",c=>{o(c)}),n.on("close",c=>{cr.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function Q_(e){try{iF.isMainThread&&cr.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=wa.get(e.database);if(!t){let n;t=new Promise(s=>{cr.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,wa.set(e.database,t)}let r=wse(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>z_(n,e.database)),e.replicateByDefault)}catch(t){cr.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function ng({name:e,url:t,database:r}){cr.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(or().primaryStore.getRange({})));let n=Eg.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function Cse(){if(YO!==void 0)return YO;let e=Ss.default.get(x.OPERATIONSAPI_TLS_CERTIFICATE)||Ss.default.get(x.TLS_CERTIFICATE);if(e)return YO=new rF.X509Certificate((0,nF.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function et(){return gg||(gg=Ss.default.get("replication_hostname")??Js(Ss.default.get("replication_url"))??Cse()??tF("operationsapi_network_secureport")??tF("operationsapi_network_port")??"127.0.0.1")}function Pse(){gg=void 0}function tF(e){let t=Ss.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function pg(e){let t=Ss.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function mg(e){return Y_(e)?.[et()]}function Ta(){let e=Ss.default.get("replication_url");return e||Tg(et())}function Tg(e){let t=pg("replication_port");if(t)return`ws://${e}:${t}`;if(t=pg("replication_secureport"),t)return`wss://${e}:${t}`;if(t=pg("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=pg("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function Js(e){if(e)return new URL(e).hostname}function Sa(e,t){for(let n of Object.getOwnPropertyNames(We))r(n);return rh(n=>{r(n)}),ul((n,s)=>{r(n.databaseName)});function r(n){let s=We[n];cr.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):Dse(n)&&t(s,n,!1)}a(r,"forDatabase")}function Dse(e){let t=We[e];for(let r in t)if(t[r].replicate)return!0}function W_(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function Lse(e){let t={message:""};if(e.replicated){e.replicated=!1,cr.trace?.("Replicating operation",e.operation,"to nodes",Ye.nodes.map(n=>n.name));let r=await Promise.allSettled(Ye.nodes.map(n=>nh(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Ye.nodes[s]?.name,i})}return t}var Ss,cr,rF,nF,Sg,sF,iF,oF,Rse,yse,Ro,Na,Eg,eF,YO,gg,Es=Re(()=>{xe();ia();Xl();VO();Mr();Ss=M(le()),cr=M(ee()),rF=require("crypto"),nF=require("fs");J_();sl();k();gO();Sg=M(require("node:tls")),sF=M(he()),iF=require("worker_threads"),Rse=1,yse=[],Ro=Ss.default.get(x.REPLICATION_ENABLEROOTCAS)!==!1?new Set(Sg.rootCertificates):new Set;a(bse,"start");a(aF,"monitorNodeCAs");a(Ose,"disableReplication");a(Nse,"assignReplicationSource");a(lF,"setReplicator");Eg=new Map;a(wse,"getSubscriptionConnection");eF=new Map;a(Ise,"getRetrievalConnectionByName");a(nh,"sendOperationToNode");a(Q_,"subscribeToNode");a(ng,"unsubscribeFromNode");a(Cse,"getCommonNameFromCert");a(et,"getThisNodeName");a(Pse,"clearThisNodeName");Object.defineProperty(Ye,"hostname",{get(){return et()}});a(tF,"getHostFromListeningPort");a(pg,"getPortFromListeningPort");a(mg,"getThisNodeId");Ye.replication={getThisNodeId:mg,exportIdMapping:Y_};a(Ta,"getThisNodeUrl");a(Tg,"hostnameToUrl");a(Js,"urlToNodeName");a(Sa,"forEachReplicatedDatabase");a(Dse,"hasExplicitlyReplicatedTable");a(W_,"lastTimeInAuditStore");a(Lse,"replicateOperation")});var ih=C((ZIe,hF)=>{"use strict";var zu=RH(),{validateBySchema:sh}=it(),{common_validators:ju,schema_regex:WO}=Oi(),lr=require("joi"),Mse=ee(),vse=require("uuid").v4,yg=po(),Qu=(k(),P(q)),Use=require("util"),Ia=Kn(),{handleHDBError:yo,hdb_errors:xse,ClientError:dl}=he(),{HDB_ERROR_MSGS:Ag,HTTP_STATUS_CODES:bo}=xse,{SchemaEventMsg:bg}=Ys(),uF=ir(),{getDatabases:Bse}=(xe(),P(ct)),{transformReq:Ju}=ie(),{replicateOperation:dF}=(Es(),P(Oa)),{cleanupOrphans:XIe}=(gn(),P(uu)),Rg=lr.string().min(1).max(ju.schema_length.maximum).pattern(WO).messages({"string.pattern.base":"{:#label} "+ju.schema_format.message}),Hse=lr.string().min(1).max(ju.schema_length.maximum).pattern(WO).messages({"string.pattern.base":"{:#label} "+ju.schema_format.message}).required(),kse=lr.string().min(1).max(ju.schema_length.maximum).pattern(WO).messages({"string.pattern.base":"{:#label} "+ju.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();hF.exports={createSchema:Fse,createSchemaStructure:fF,createTable:Gse,createTableStructure:_F,createAttribute:Yse,dropSchema:qse,dropTable:$se,dropAttribute:Vse,getBackup:Wse,cleanupOrphanBlobs:zse};async function Fse(e){let t=await fF(e);return yg.signalSchemaChange(new bg(process.pid,e.operation,e.schema)),t}a(Fse,"createSchema");async function fF(e){let t=sh(e,lr.object({database:Rg,schema:Rg}));if(t)throw new dl(t.message);if(Ju(e),!await zu.checkSchemaExists(e.schema))throw yo(new Error,Ag.SCHEMA_EXISTS_ERR(e.schema),bo.BAD_REQUEST,Qu.LOG_LEVELS.ERROR,Ag.SCHEMA_EXISTS_ERR(e.schema),!0);return await Ia.createSchema(e),`database '${e.schema}' successfully created`}a(fF,"createSchemaStructure");async function Gse(e){return Ju(e),e.hash_attribute=e.primary_key??e.hash_attribute,await _F(e)}a(Gse,"createTable");async function _F(e){let t=sh(e,lr.object({database:Rg,schema:Rg,table:Hse,residence:lr.array().items(lr.string().min(1)).optional(),hash_attribute:kse}));if(t)throw new dl(t.message);if(!await zu.checkSchemaTableExists(e.schema,e.table))throw yo(new Error,Ag.TABLE_EXISTS_ERR(e.schema,e.table),bo.BAD_REQUEST,Qu.LOG_LEVELS.ERROR,Ag.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:vse(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await Ia.createTable(n,e);else throw yo(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",bo.BAD_REQUEST);else await Ia.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}a(_F,"createTableStructure");async function qse(e){let t=sh(e,lr.object({database:lr.string(),schema:lr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new dl(t.message);Ju(e);let r=await zu.checkSchemaExists(e.schema);if(r)throw yo(new Error,r,bo.NOT_FOUND,Qu.LOG_LEVELS.ERROR,r,!0);let n=await zu.schema_describe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await Ia.dropSchema(e),yg.signalSchemaChange(new bg(process.pid,e.operation,e.schema)),await uF.purgeSchemaTableStreams(e.schema,s);let i=await dF(e);return i.message=`successfully deleted '${e.schema}'`,i}a(qse,"dropSchema");async function $se(e){let t=sh(e,lr.object({database:lr.string(),schema:lr.string(),table:lr.string().required()}));if(t)throw new dl(t.message);Ju(e);let r=await zu.checkSchemaTableExists(e.schema,e.table);if(r)throw yo(new Error,r,bo.NOT_FOUND,Qu.LOG_LEVELS.ERROR,r,!0);await Ia.dropTable(e),await uF.purgeTableStream(e.schema,e.table);let n=await dF(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}a($se,"dropTable");async function Vse(e){let t=sh(e,lr.object({database:lr.string(),schema:lr.string(),table:lr.string().required(),attribute:lr.string().required()}));if(t)throw new dl(t.message);Ju(e);let r=await zu.checkSchemaTableExists(e.schema,e.table);if(r)throw yo(new Error,r,bo.NOT_FOUND,Qu.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw yo(new Error,"You cannot drop a hash attribute",bo.BAD_REQUEST,void 0,void 0,!0);if(Qu.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw yo(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,bo.BAD_REQUEST,void 0,void 0,!0);try{return await Ia.dropAttribute(e),Kse(e),yg.signalSchemaChange(new bg(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw Mse.error(`Got an error deleting attribute ${Use.inspect(e)}.`),n}}a(Vse,"dropAttribute");function Kse(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}a(Kse,"dropAttributeFromGlobal");async function Yse(e){Ju(e);let t=Bse()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw yo(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,bo.BAD_REQUEST,void 0,void 0,!0);return await Ia.createAttribute(e),yg.signalSchemaChange(new bg(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}a(Yse,"createAttribute");function Wse(e){return Ia.getBackup(e)}a(Wse,"getBackup");function zse(e){if(!e.database)throw new dl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new dl(`Unknown database '${e.database}'`);let{cleanupOrphans:r}=(gn(),P(uu));return r(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}a(zse,"cleanupOrphanBlobs")});var pF=C((tCe,mF)=>{"use strict";var{OPERATIONS_ENUM:jse}=(k(),P(q)),zO=class{static{a(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=jse.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};mF.exports=zO});var jO=C((sCe,AF)=>{"use strict";var Qse=Kn(),nCe=pF(),Og=ie(),Ng=(k(),P(q)),Jse=le(),{handleHDBError:EF,hdb_errors:Xse}=he(),{HDB_ERROR_MSGS:gF,HTTP_STATUS_CODES:SF}=Xse,Zse=Object.values(Ng.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),TF="To use this operation audit log must be enabled in harperdb-config.yaml";AF.exports=eie;async function eie(e){if(Og.isEmpty(e.schema))throw new Error(gF.SCHEMA_REQUIRED_ERR);if(Og.isEmpty(e.table))throw new Error(gF.TABLE_REQUIRED_ERR);if(!Jse.get(Ng.CONFIG_PARAMS.LOGGING_AUDITLOG))throw EF(new Error,TF,SF.BAD_REQUEST,Ng.LOG_LEVELS.ERROR,TF,!0);let t=Og.checkSchemaTableExist(e.schema,e.table);if(t)throw EF(new Error,t,SF.NOT_FOUND,Ng.LOG_LEVELS.ERROR,t,!0);if(!Og.isEmpty(e.search_type)&&Zse.indexOf(e.search_type)<0)throw new Error(`Invalid search_type '${e.search_type}'`);return await Qse.readAuditLog(e)}a(eie,"readAuditLog")});var yF=C((oCe,RF)=>{"use strict";var{OPERATIONS_ENUM:tie}=(k(),P(q)),QO=class{static{a(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=tie.GET_BACKUP,this.schema=t,this.table=r}};RF.exports=QO});var NF=C((uCe,OF)=>{"use strict";var rie=Kn(),cCe=yF(),JO=ie(),nie=(k(),P(q)),lCe=le(),{handleHDBError:sie,hdb_errors:iie}=he(),{HDB_ERROR_MSGS:bF,HTTP_STATUS_CODES:oie}=iie;OF.exports=aie;async function aie(e){if(JO.isEmpty(e.schema))throw new Error(bF.SCHEMA_REQUIRED_ERR);if(JO.isEmpty(e.table))throw new Error(bF.TABLE_REQUIRED_ERR);let t=JO.checkSchemaTableExist(e.schema,e.table);if(t)throw sie(new Error,t,oie.NOT_FOUND,nie.LOG_LEVELS.ERROR,t,!0);return await rie.getBackup(read_audit_log_object)}a(aie,"getBackup")});var PF=C((fCe,CF)=>{"use strict";var cie=le(),Ca=require("joi"),lie=it(),wF=require("moment"),uie=require("fs-extra"),XO=require("path"),die=require("lodash"),oh=(k(),P(q)),{LOG_LEVELS:fl}=(k(),P(q)),fie="YYYY-MM-DD hh:mm:ss",_ie=XO.resolve(__dirname,"../logs");CF.exports=function(e){return lie.validateBySchema(e,hie)};var hie=Ca.object({from:Ca.custom(IF),until:Ca.custom(IF),level:Ca.valid(fl.NOTIFY,fl.FATAL,fl.ERROR,fl.WARN,fl.INFO,fl.DEBUG,fl.TRACE),order:Ca.valid("asc","desc"),limit:Ca.number().min(1),start:Ca.number().min(0),log_name:Ca.custom(mie)});function IF(e,t){if(wF(e,wF.ISO_8601).format(fie)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}a(IF,"validateDatetime");function mie(e,t){if(die.invert(oh.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=cie.get(oh.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?oh.LOG_NAMES.HDB:e,i=s===oh.LOG_NAMES.INSTALL?XO.join(_ie,oh.LOG_NAMES.INSTALL):XO.join(n,s);return uie.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}a(mie,"validateReadLogPath")});var eN=C((hCe,LF)=>{"use strict";var wg=(k(),P(q)),pie=ee(),Eie=le(),gie=PF(),ZO=require("path"),DF=require("fs-extra"),{once:Sie}=require("events"),{handleHDBError:Tie,hdb_errors:Aie}=he(),{PACKAGE_ROOT:Rie}=Et(),yie=ZO.join(Rie,"logs"),bie=1e3,Oie=200;LF.exports=Nie;async function Nie(e){let t=gie(e);if(t)throw Tie(t,t.message,Aie.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=Eie.get(wg.HDB_SETTINGS_NAMES.LOG_PATH_KEY),n=e.log_name===void 0?wg.LOG_NAMES.HDB:e.log_name,s=n===wg.LOG_NAMES.INSTALL?ZO.join(yie,wg.LOG_NAMES.INSTALL):ZO.join(r,n),i=e.level!==void 0,o=i?e.level:void 0,c=e.from!==void 0,l=c?new Date(e.from):void 0,u=e.until!==void 0,f=u?new Date(e.until):void 0,d=e.limit===void 0?bie:e.limit,_=e.order===void 0?void 0:e.order,h=e.start===void 0?0:e.start,m=h+d,S=0;_==="desc"&&!l&&!f&&(S=Math.max(DF.statSync(s).size-(m+5)*Oie,0));let g=DF.createReadStream(s,{start:S});g.on("error",H=>{pie.error(H)});let R=0,E=[],T="",N;g.on("data",H=>{let X=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;H=T+H;let W=0,$;for(;($=X.exec(H))&&!g.destroyed;){N&&(N.message=H.slice(W,$.index),v(N));let[se,z,fe]=$,ue=fe.split("] ["),Z=ue[0],Ae=ue[1];ue.splice(0,2),N={timestamp:z,thread:Z,level:Ae,tags:ue,message:""},W=$.index+se.length}T=H.slice(W)}),g.on("end",H=>{g.destroyed||N&&(N.message=T.trim(),v(N))}),g.resume();function v(H){let X,W,$;switch(!0){case(i&&c&&u):X=new Date(H.timestamp),W=new Date(l),$=new Date(f),H.level===o&&X>=W&&X<=$&&R<h?R++:H.level===o&&X>=W&&X<=$&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case(i&&c):X=new Date(H.timestamp),W=new Date(l),H.level===o&&X>=W&&R<h?R++:H.level===o&&X>=W&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case(i&&u):X=new Date(H.timestamp),$=new Date(f),H.level===o&&X<=$&&R<h?R++:H.level===o&&X<=$&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case(c&&u):X=new Date(H.timestamp),W=new Date(l),$=new Date(f),X>=W&&X<=$&&R<h?R++:X>=W&&X<=$&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case i:H.level===o&&R<h?R++:H.level===o&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case c:X=new Date(H.timestamp),W=new Date(l),X>=W&&R<h?R++:X>=W&&R>=h&&(Pa(H,_,E),R++,R===m&&g.destroy());break;case u:X=new Date(H.timestamp),$=new Date(f),X<=$&&R<h?R++:X<=$&&R>=h&&(Pa(H,_,E),R++,R===m&&g.destroy());break;default:R<h?R++:(Pa(H,_,E),R++,R===m&&g.destroy())}}return a(v,"onLogMessage"),await Sie(g,"close"),E}a(Nie,"readLog");function Pa(e,t,r){t==="desc"?wie(e,r):t==="asc"?Iie(e,r):r.push(e)}a(Pa,"pushLineToResult");function wie(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}a(wie,"insertDescending");function Iie(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}a(Iie,"insertAscending")});var Ig=C((TCe,xF)=>{"use strict";var tN=require("joi"),{string:Xu,boolean:MF,date:Cie}=tN.types(),Pie=it(),{validateSchemaExists:pCe,validateTableExists:ECe,validateSchemaName:gCe}=Oi(),Die=(k(),P(q)),Lie=Lt(),vF=le();vF.initSync();var SCe=Xu.invalid(vF.get(Die.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(Lie.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),UF={operation:Xu.valid("add_node","update_node","set_node_replication"),node_name:Xu.optional(),subscriptions:tN.array().items({table:Xu.optional(),schema:Xu.optional(),database:Xu.optional(),subscribe:MF.required(),publish:MF.required().custom(vie),start_time:Cie.iso()})};function Mie(e){return Pie.validateBySchema(e,tN.object(UF))}a(Mie,"addUpdateNodeValidator");function vie(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}a(vie,"checkForFalsy");xF.exports={addUpdateNodeValidator:Mie,validation_schema:UF}});var Zu=C((RCe,BF)=>{"use strict";var rN=class{static{a(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},nN=class{static{a(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};BF.exports={Node:rN,NodeSubscription:nN}});var kF=C((bCe,HF)=>{"use strict";var Uie=(k(),P(q)).OPERATIONS_ENUM,sN=class{static{a(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=Uie.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};HF.exports=sN});var ah=C((NCe,FF)=>{"use strict";var iN=class{static{a(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},oN=class{static{a(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,o,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=o,c!==void 0&&(this.attributes=c)}};FF.exports={RemotePayloadObject:iN,RemotePayloadSubscription:oN}});var qF=C((ICe,GF)=>{"use strict";var aN=class{static{a(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,o=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=o}};GF.exports=aN});var VF=C((vCe,$F)=>{"use strict";var xie=qF(),PCe=Gt(),DCe=ht(),Bie=ee(),{getSchemaPath:LCe,getTransactionAuditStorePath:MCe}=gt(),{getDatabases:Hie}=(xe(),P(ct));$F.exports=kie;async function kie(e){let t=new xie;try{let r=Hie()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){Bie.warn(`unable to stat table dbi due to ${r}`)}return t}a(kie,"lmdbGetTableSize")});var YF=C((xCe,KF)=>{"use strict";var cN=class{static{a(this,"SystemInformationObject")}constructor(t,r,n,s,i,o,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=o,this.harperdb_processes=c}};KF.exports=cN});var td=C((qCe,QF)=>{"use strict";var Fie=require("fs-extra"),Gie=require("path"),Xr=require("systeminformation"),Da=ee(),WF=ir(),HCe=Lt(),ed=(k(),P(q)),qie=VF(),$ie=ho(),{getThreadInfo:zF}=ot(),ch=le();ch.initSync();var Vie=YF(),{openEnvironment:kCe}=ht(),{getSchemaPath:FCe}=gt(),{database:GCe,databases:lN}=(xe(),P(ct)),Cg;QF.exports={getHDBProcessInfo:_N,getNetworkInfo:mN,getDiskInfo:hN,getMemoryInfo:fN,getCPUInfo:dN,getTimeInfo:uN,getSystemInformation:pN,systemInformation:Kie,getTableSize:EN,getMetrics:gN};function uN(){return Xr.time()}a(uN,"getTimeInfo");async function dN(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:o,governor:c,socket:l,cache:u,...f}=await Xr.cpu();f.cpu_speed=await Xr.cpuCurrentSpeed();let{raw_currentload:d,raw_currentload_idle:_,raw_currentload_irq:h,raw_currentload_nice:m,raw_currentload_system:S,raw_currentload_user:g,cpus:R,...E}=await Xr.currentLoad();return E.cpus=[],R.forEach(T=>{let{raw_load:N,raw_load_idle:v,raw_load_irq:H,raw_load_nice:X,raw_load_system:W,raw_load_user:$,...se}=T;E.cpus.push(se)}),f.current_load=E,f}catch(e){return Da.error(`error in getCPUInfo: ${e}`),{}}}a(dN,"getCPUInfo");async function fN(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await Xr.mem();return Object.assign(s,process.memoryUsage())}catch(e){return Da.error(`error in getMemoryInfo: ${e}`),{}}}a(fN,"getMemoryInfo");async function _N(){let e={core:[],clustering:[]};try{let t=await Xr.processes(),r;try{r=Number.parseInt(await Fie.readFile(Gie.join(ch.get(ed.CONFIG_PARAMS.ROOTPATH),ed.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===ed.NODE_ERROR_CODES.ENOENT)Da.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return Da.error(`error in getHDBProcessInfo: ${t}`),e}}a(_N,"getHDBProcessInfo");async function hN(){let e={};try{if(!ch.get(ed.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await Xr.disksIO();e.io=i;let{rx_sec:o,tx_sec:c,wx_sec:l,...u}=await Xr.fsStats();return e.read_write=u,e.size=await Xr.fsSize(),e}catch(t){return Da.error(`error in getDiskInfo: ${t}`),e}}a(hN,"getDiskInfo");async function mN(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return ch.get(ed.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await Xr.networkInterfaceDefault(),e.latency=await Xr.inetChecksite("google.com"),(await Xr.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:o,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:f,carrier_changes:d,..._}=n;e.interfaces.push(_)}),(await Xr.networkStats()).forEach(n=>{let{rx_sec:s,tx_sec:i,ms:o,...c}=n;e.stats.push(c)})),e}catch(t){return Da.error(`error in getNetworkInfo: ${t}`),e}}a(mN,"getNetworkInfo");async function pN(){if(Cg!==void 0)return Cg;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:o,...c}=await Xr.osInfo();e=c;let l=await Xr.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,Cg=e,Cg}catch(t){return Da.error(`error in getSystemInformation: ${t}`),e}}a(pN,"getSystemInformation");async function EN(){let e=[],t=await $ie.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await qie(n));return e}a(EN,"getTableSize");async function gN(){let e={};for(let t in lN){let r=e[t]={},n=r.tables={};for(let s in lN[t])try{let i=lN[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,f,d]=l.trim().split(" ");return{pid:u,thread:f,txnid:d}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:f,entryCount:d,overflowPages:_}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:f,entryCount:d,overflowPages:_}}let o=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=o[l];n[s]=c}catch(i){Da.notify(`Error getting stats for table ${s}: ${i}`)}}return e}a(gN,"getMetrics");async function jF(){if(ch.get(ed.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await WF.getNATSReferences(),t=await WF.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let o={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(o)}return r}}a(jF,"getNatsStreamInfo");async function Kie(e){let t=new Vie;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await pN(),t.time=uN(),t.cpu=await dN(),t.memory=await fN(),t.disk=await hN(),t.network=await mN(),t.harperdb_processes=await _N(),t.table_size=await EN(),t.metrics=await gN(),t.threads=await zF(),t.replication=await jF(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await pN();break;case"time":t.time=uN();break;case"cpu":t.cpu=await dN();break;case"memory":t.memory=await fN();break;case"disk":t.disk=await hN();break;case"network":t.network=await mN();break;case"harperdb_processes":t.harperdb_processes=await _N();break;case"table_size":t.table_size=await EN();break;case"database_metrics":case"metrics":t.metrics=await gN();break;case"threads":t.threads=await zF();break;case"replication":t.replication=await jF();break;default:break}return t}a(Kie,"systemInformation")});var Oo=C((WCe,eG)=>{"use strict";var Yie=Rn(),SN=ie(),Wie=require("util"),_l=(k(),P(q)),JF=le();JF.initSync();var zie=Jb(),XF=Wr(),{Node:VCe,NodeSubscription:KCe}=Zu(),jie=yu(),Qie=kF(),{RemotePayloadObject:Jie,RemotePayloadSubscription:Xie}=ah(),{handleHDBError:Zie,hdb_errors:eoe}=he(),{HTTP_STATUS_CODES:toe,HDB_ERROR_MSGS:roe}=eoe,noe=Ws(),soe=td(),{packageJson:ioe}=Et(),{getDatabases:ooe}=(xe(),P(ct)),YCe=Wie.promisify(zie.authorize),aoe=XF.searchByHash,coe=XF.searchByValue;eG.exports={isEmpty:loe,getNodeRecord:uoe,upsertNodeRecord:doe,buildNodePayloads:foe,checkClusteringEnabled:_oe,getAllNodeRecords:hoe,getSystemInfo:moe,reverseSubscription:ZF};function loe(e){return e==null}a(loe,"isEmpty");async function uoe(e){let t=new jie(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return aoe(t)}a(uoe,"getNodeRecord");async function doe(e){let t=new Qie(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return Yie.upsert(t)}a(doe,"upsertNodeRecord");function ZF(e){if(SN.isEmpty(e.subscribe)||SN.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}a(ZF,"reverseSubscription");function foe(e,t,r,n){let s=[];for(let i=0,o=e.length;i<o;i++){let c=e[i],{schema:l,table:u}=c,f=SN.getTableHashAttribute(l,u),{subscribe:d,publish:_}=ZF(c),h=ooe()[l]?.[u],m=new Xie(l,u,f,_,d,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(m)}return new Jie(r,t,s,n)}a(foe,"buildNodePayloads");function _oe(){if(!JF.get(_l.CONFIG_PARAMS.CLUSTERING_ENABLED))throw Zie(new Error,roe.CLUSTERING_NOT_ENABLED,toe.BAD_REQUEST,void 0,void 0,!0)}a(_oe,"checkClusteringEnabled");async function hoe(){let e=new noe(_l.SYSTEM_SCHEMA_NAME,_l.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await coe(e))}a(hoe,"getAllNodeRecords");async function moe(){let e=await soe.getSystemInformation();return{hdb_version:ioe.version,node_version:e.node_version,platform:e.platform}}a(moe,"getSystemInfo")});var TN=C((jCe,cG)=>{"use strict";var Pg=ir(),tG=ie(),rG=Lt(),nG=(k(),P(q)),Dg=ee(),sG=ih(),poe=T_(),{RemotePayloadObject:Eoe}=ah(),{handleHDBError:iG,hdb_errors:goe}=he(),{HTTP_STATUS_CODES:oG}=goe,{NodeSubscription:aG}=Zu();cG.exports=Soe;async function Soe(e,t){let r;try{r=await Pg.request(`${t}.${rG.REQUEST_SUFFIX}`,new Eoe(nG.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),Dg.trace("Response from remote describe all request:",r)}catch(o){Dg.error(`addNode received error from describe all request to remote node: ${o}`);let c=Pg.requestErrorHandler(o,"add_node",t);throw iG(new Error,c,oG.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===rG.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let o=`Error returned from remote node ${t}: ${r.message}`;throw iG(new Error,o,oG.INTERNAL_SERVER_ERROR,"error",o)}let n=r.message,s=[],i=[];for(let o of e){let{table:c}=o,l=o.database??o.schema??"data";if(l===nG.SYSTEM_SCHEMA_NAME){await Pg.createLocalTableStream(l,c);let m=new aG(l,c,o.publish,o.subscribe);m.start_time=o.start_time,i.push(m);continue}let u=tG.doesSchemaExist(l),f=n[l]!==void 0,d=c?tG.doesTableExist(l,c):!0,_=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!f||!d&&!_){s.push(o);continue}if(!u&&f&&(Dg.trace(`addNode creating schema: ${l}`),await sG.createSchema({operation:"create_schema",schema:l})),!d&&_){Dg.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let m=new poe(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(m.attributes=n[l][c].attributes),await sG.createTable(m)}await Pg.createLocalTableStream(l,c);let h=new aG(l,c,o.publish,o.subscribe);h.start_time=o.start_time,i.push(h)}return{added:i,skipped:s}}a(Soe,"reviewSubscriptions")});var hl={};Ue(hl,{addNodeBack:()=>boe,removeNodeBack:()=>Ooe,setNode:()=>yoe});async function yoe(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=Js(t)):t=Tg(r);let n=(0,uG.validateBySchema)(e,Roe);if(n)throw(0,No.handleHDBError)(n,n.message,Aoe.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new No.ClientError("url or hostname is required for remove_node operation");let h=r,m=or(),S=await m.get(h);if(!S)throw new No.ClientError(h+" does not exist");try{await nh({url:S.url},{operation:Wt.REMOVE_NODE_BACK,name:S?.subscriptions?.length>0?et():h},void 0)}catch(g){Xn.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await m.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new No.ClientError("url required for this operation");let s=Ta();if(s==null)throw new No.ClientError("replication url is missing from harperdb-config.yaml");let i,o,c;if(t?.startsWith("wss:")){i=await(0,Ts.getReplicationCert)();let h=await(0,Ts.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(o=await(0,Ts.createCsr)(),Xn.info("Sending CSR to target node:",t)):h&&(c=h.certificate,Xn.info("Sending CA named",h.name,"to target node",t))}let l={operation:Wt.ADD_NODE_BACK,hostname:(0,Ma.get)(x.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:o,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,Ma.get)(x.REPLICATION_SHARD)!==void 0&&(l.shard=(0,Ma.get)(x.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(lG):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=lG(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,f;try{u=await nh({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,Xn.warn("Error adding node:",t,"to cluster:",h),f=h}if(o&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw f?(f.message+=" and connection was required to sign certificate",f):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);o&&(Xn.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,Ts.setCertTable)({name:Toe.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,Ts.setCertTable)({name:et(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let d={url:t,ca:u?.usingCA};if(e.hostname&&(d.name=e.hostname),e.subscriptions?d.subscriptions=e.subscriptions:d.replicates=!0,e.start_time&&(d.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(d.authorization=e.authorization),e.revoked_certificates&&(d.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?d.shard=u.shard:e.shard!==void 0&&(d.shard=e.shard),d.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,Ma.get)(x.REPLICATION_SHARD)!==void 0&&(h.shard=(0,Ma.get)(x.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await To(et(),h)}await To(u?u.nodeName:d.name??Js(t),d);let _;return e.operation==="update_node"?_=`Successfully updated '${t}'`:_=`Successfully added '${t}' to cluster`,f&&(_+=" but there was an error updating target node: "+f.message),_}async function boe(e){Xn.trace("addNodeBack received request:",e);let t=await(0,Ts.signCertificate)(e),r;e.csr?(r=t.signingCA,Xn.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,Xn.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,Ts.getReplicationCertAuth)();if(n.replicates){let i={url:Ta(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,Ma.get)(x.REPLICATION_SHARD)!==void 0&&(i.shard=(0,Ma.get)(x.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await To(et(),i)}return await To(e.hostname,n),t.nodeName=et(),t.usingCA=s?.certificate,Xn.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function Ooe(e){Xn.trace("removeNodeBack received request:",e),await or().delete(e.name)}function lG(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var Ts,uG,La,Ma,Xn,No,Toe,Aoe,Roe,ml=Re(()=>{Ts=M(ei()),uG=M(it()),La=M(require("joi")),Ma=M(le());k();J_();sl();Es();Xn=M(ee()),No=M(he()),{pki:Toe}=require("node-forge"),{HTTP_STATUS_CODES:Aoe}=No.hdb_errors,Roe=La.default.object({hostname:La.default.string(),verify_tls:La.default.boolean(),replicates:La.default.boolean(),subscriptions:La.default.array(),revoked_certificates:La.default.array(),shard:La.default.number()});a(yoe,"setNode");a(boe,"addNodeBack");a(Ooe,"removeNodeBack");a(lG,"reverseSubscription")});var xg=C((sPe,fG)=>{"use strict";var{handleHDBError:Lg,hdb_errors:Noe}=he(),{HTTP_STATUS_CODES:Mg}=Noe,{addUpdateNodeValidator:woe}=Ig(),vg=ee(),Ug=(k(),P(q)),dG=Lt(),Ioe=ie(),lh=ir(),uh=Oo(),AN=le(),Coe=TN(),{Node:Poe,NodeSubscription:Doe}=Zu(),{broadcast:Loe}=ot(),{setNode:Moe}=(ml(),P(hl)),rPe=le(),nPe=(k(),P(q)),voe="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",Uoe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",xoe=AN.get(Ug.CONFIG_PARAMS.CLUSTERING_NODENAME);fG.exports=Boe;async function Boe(e,t=!1){if(vg.trace("addNode called with:",e),AN.get(Ug.CONFIG_PARAMS.REPLICATION_URL)||AN.get(Ug.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Moe(e);uh.checkClusteringEnabled();let r=woe(e);if(r)throw Lg(r,r.message,Mg.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let d=await uh.getNodeRecord(n);if(!Ioe.isEmptyOrZeroLength(d))throw Lg(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,Mg.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await Coe(e.subscriptions,n),o={message:void 0,added:s,skipped:i};if(s.length===0)return o.message=voe,o;let c=uh.buildNodePayloads(s,xoe,Ug.OPERATIONS_ENUM.ADD_NODE,await uh.getSystemInfo()),l=[];for(let d=0,_=s.length;d<_;d++){let h=s[d];s[d].start_time===void 0&&delete s[d].start_time,l.push(new Doe(h.schema,h.table,h.publish,h.subscribe))}vg.trace("addNode sending remote payload:",c);let u;try{u=await lh.request(`${n}.${dG.REQUEST_SUFFIX}`,c)}catch(d){vg.error(`addNode received error from request: ${d}`);for(let h=0,m=s.length;h<m;h++){let S=s[h];S.publish=!1,S.subscribe=!1,await lh.updateRemoteConsumer(S,n)}let _=lh.requestErrorHandler(d,"add_node",n);throw Lg(new Error,_,Mg.INTERNAL_SERVER_ERROR,"error",_)}if(u.status===dG.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${n}: ${u.message}`;throw Lg(new Error,d,Mg.INTERNAL_SERVER_ERROR,"error",d)}vg.trace(u);for(let d=0,_=s.length;d<_;d++){let h=s[d];await lh.updateRemoteConsumer(h,n),h.subscribe===!0&&await lh.updateConsumerIterator(h.schema,h.table,n,"start")}let f=new Poe(n,l,u.system_info);return await uh.upsertNodeRecord(f),Loe({type:"nats_update"}),i.length>0?o.message=Uoe:o.message=`Successfully added '${n}' to manifest`,o}a(Boe,"addNode")});var ON=C((aPe,hG)=>{"use strict";var{handleHDBError:RN,hdb_errors:Hoe}=he(),{HTTP_STATUS_CODES:yN}=Hoe,{addUpdateNodeValidator:koe}=Ig(),dh=ee(),Bg=(k(),P(q)),_G=Lt(),oPe=ie(),fh=ir(),_h=Oo(),bN=le(),{cloneDeep:Foe}=require("lodash"),Goe=TN(),{Node:qoe,NodeSubscription:$oe}=Zu(),{broadcast:Voe}=ot(),{setNode:Koe}=(ml(),P(hl)),Yoe="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",Woe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",zoe=bN.get(Bg.CONFIG_PARAMS.CLUSTERING_NODENAME);hG.exports=joe;async function joe(e){if(dh.trace("updateNode called with:",e),bN.get(Bg.CONFIG_PARAMS.REPLICATION_URL)??bN.get(Bg.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Koe(e);_h.checkClusteringEnabled();let t=koe(e);if(t)throw RN(t,t.message,yN.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await _h.getNodeRecord(r);s.length>0&&(n=Foe(s));let{added:i,skipped:o}=await Goe(e.subscriptions,r),c={message:void 0,updated:i,skipped:o};if(i.length===0)return c.message=Yoe,c;let l=_h.buildNodePayloads(i,zoe,Bg.OPERATIONS_ENUM.UPDATE_NODE,await _h.getSystemInfo());for(let f=0,d=i.length;f<d;f++){let _=i[f];dh.trace(`updateNode updating work stream for node: ${r} subscription:`,_),i[f].start_time===void 0&&delete i[f].start_time}dh.trace("updateNode sending remote payload:",l);let u;try{u=await fh.request(`${r}.${_G.REQUEST_SUFFIX}`,l)}catch(f){dh.error(`updateNode received error from request: ${f}`);let d=fh.requestErrorHandler(f,"update_node",r);throw RN(new Error,d,yN.INTERNAL_SERVER_ERROR,"error",d)}if(u.status===_G.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${r}: ${u.message}`;throw RN(new Error,f,yN.INTERNAL_SERVER_ERROR,"error",f)}dh.trace(u);for(let f=0,d=i.length;f<d;f++){let _=i[f];await fh.updateRemoteConsumer(_,r),_.subscribe===!0?await fh.updateConsumerIterator(_.schema,_.table,r,"start"):await fh.updateConsumerIterator(_.schema,_.table,r,"stop")}return n||(n=[new qoe(r,[],u.system_info)]),await Qoe(n[0],i,u.system_info),o.length>0?c.message=Woe:c.message=`Successfully updated '${r}'`,c}a(joe,"updateNode");async function Qoe(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let o=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let f=n.subscriptions[l];if(f.schema===o.schema&&f.table===o.table){f.publish=o.publish,f.subscribe=o.subscribe,c=!0;break}}c||n.subscriptions.push(new $oe(o.schema,o.table,o.publish,o.subscribe))}n.system_info=r,await _h.upsertNodeRecord(n),Voe({type:"nats_update"})}a(Qoe,"updateNodeTable")});var SG=C((lPe,gG)=>{"use strict";var EG=require("joi"),{string:mG}=EG.types(),Joe=it(),pG=(k(),P(q)),Xoe=le(),Zoe=Lt();gG.exports=eae;function eae(e){let t=mG.invalid(Xoe.get(pG.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(Zoe.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=EG.object({operation:mG.valid(pG.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return Joe.validateBySchema(e,r)}a(eae,"removeNodeValidator")});var Hg=C((dPe,bG)=>{"use strict";var{handleHDBError:TG,hdb_errors:tae}=he(),{HTTP_STATUS_CODES:AG}=tae,rae=SG(),hh=ee(),RG=Oo(),nae=ie(),rd=(k(),P(q)),yG=Lt(),NN=ir(),wN=le(),{RemotePayloadObject:sae}=ah(),{NodeSubscription:iae}=Zu(),oae=S_(),aae=qc(),{broadcast:cae}=ot(),{setNode:lae}=(ml(),P(hl)),uae=wN.get(rd.CONFIG_PARAMS.CLUSTERING_NODENAME);bG.exports=dae;async function dae(e){if(hh.trace("removeNode called with:",e),wN.get(rd.CONFIG_PARAMS.REPLICATION_URL)??wN.get(rd.CONFIG_PARAMS.REPLICATION_HOSTNAME))return lae(e);RG.checkClusteringEnabled();let t=rae(e);if(t)throw TG(t,t.message,AG.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await RG.getNodeRecord(r);if(nae.isEmptyOrZeroLength(n))throw TG(new Error,`Node '${r}' was not found.`,AG.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new sae(rd.OPERATIONS_ENUM.REMOVE_NODE,uae,[]),i,o=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let f=n.subscriptions[l];f.subscribe===!0&&await NN.updateConsumerIterator(f.schema,f.table,r,"stop");try{await NN.updateRemoteConsumer(new iae(f.schema,f.table,!1,!1),r)}catch(d){hh.error(d)}}try{i=await NN.request(`${r}.${yG.REQUEST_SUFFIX}`,s),hh.trace("Remove node reply from remote node:",r,i)}catch(l){hh.error("removeNode received error from request:",l),o=!0}let c=new oae(rd.SYSTEM_SCHEMA_NAME,rd.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await aae.deleteRecord(c),cae({type:"nats_update"}),i?.status===yG.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||o?(hh.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}a(dae,"removeNode")});var wG=C((_Pe,NG)=>{"use strict";var OG=require("joi"),{string:fae,array:_ae}=OG.types(),hae=it(),mae=Ig();NG.exports=pae;function pae(e){let t=OG.object({operation:fae.valid("configure_cluster").required(),connections:_ae.items(mae.validation_schema).required()});return hae.validateBySchema(e,t)}a(pae,"configureClusterValidator")});var IN=C((mPe,LG)=>{"use strict";var IG=(k(),P(q)),kg=ee(),Eae=ie(),gae=le(),Sae=Hg(),Tae=xg(),Aae=Oo(),Rae=wG(),{handleHDBError:CG,hdb_errors:yae}=he(),{HTTP_STATUS_CODES:PG}=yae,bae="Configure cluster complete.",Oae="Failed to configure the cluster. Check the logs for more details.",Nae="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";LG.exports=wae;async function wae(e){kg.trace("configure cluster called with:",e);let t=Rae(e);if(t)throw CG(t,t.message,PG.BAD_REQUEST,void 0,void 0,!0);let r=await Aae.getAllNodeRecords(),n=[];if(gae.get(IG.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let f=0,d=r.length;f<d;f++){let _=await DG(Sae,{operation:IG.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[f].name},r[f].name);n.push(_)}kg.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let f=0;f<i;f++){let d=e.connections[f],_=await DG(Tae,d,d.node_name);s.push(_)}kg.trace("All results from configure_cluster add node:",s);let o=[],c=[],l=!1,u=n.concat(s);for(let f=0,d=u.length;f<d;f++){let _=u[f];_.status==="rejected"&&(kg.error(_.node_name,_?.error?.message,_?.error?.stack),o.includes(_.node_name)||o.push(_.node_name)),(_?.result?.message?.includes?.("Successfully")||_?.result?.includes?.("Successfully"))&&(l=!0),!(typeof _.result=="string"&&_.result.includes("Successfully removed")||_.status==="rejected")&&c.push({node_name:_?.node_name,response:_?.result})}if(Eae.isEmptyOrZeroLength(o))return{message:bae,connections:c};if(l)return{message:Nae,failed_nodes:o,connections:c};throw CG(new Error,Oae,PG.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}a(wae,"configureCluster");async function DG(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}a(DG,"functionWrapper")});var xG=C((EPe,UG)=>{"use strict";var mh=require("joi"),Iae=it(),{validateSchemaExists:MG,validateTableExists:Cae,validateSchemaName:vG}=Oi(),Pae=mh.object({operation:mh.string().valid("purge_stream"),schema:mh.string().custom(MG).custom(vG).optional(),database:mh.string().custom(MG).custom(vG).optional(),table:mh.string().custom(Cae).required()});function Dae(e){return Iae.validateBySchema(e,Pae)}a(Dae,"purgeStreamValidator");UG.exports=Dae});var CN=C((SPe,BG)=>{"use strict";var{handleHDBError:Lae,hdb_errors:Mae}=he(),{HTTP_STATUS_CODES:vae}=Mae,Uae=xG(),xae=ir(),Bae=Oo();BG.exports=Hae;async function Hae(e){e.schema=e.schema??e.database;let t=Uae(e);if(t)throw Lae(t,t.message,vae.BAD_REQUEST,void 0,void 0,!0);Bae.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await xae.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}a(Hae,"purgeStream")});var LN=C((APe,VG)=>{"use strict";var DN=Oo(),kae=ir(),Gg=le(),nd=(k(),P(q)),pl=Lt(),Fae=ie(),PN=ee(),{RemotePayloadObject:Gae}=ah(),{ErrorCode:HG}=require("nats"),{parentPort:kG}=require("worker_threads"),{onMessageByType:qae}=ot(),{getThisNodeName:$ae}=(Es(),P(Oa)),{requestClusterStatus:Vae}=(J_(),P(ak)),{getReplicationSharedStatus:Kae,getHDBNodeTable:Yae}=(sl(),P(SO)),{CONFIRMATION_STATUS_POSITION:Wae,RECEIVED_VERSION_POSITION:zae,RECEIVED_TIME_POSITION:jae,SENDING_TIME_POSITION:Qae,RECEIVING_STATUS_POSITION:Jae,RECEIVING_STATUS_RECEIVING:Xae}=(VO(),P(Xk)),FG=Gg.get(nd.CONFIG_PARAMS.CLUSTERING_ENABLED),GG=Gg.get(nd.CONFIG_PARAMS.CLUSTERING_NODENAME);VG.exports={clusterStatus:Zae,buildNodeStatus:$G};var qG;qae("cluster-status",async e=>{qG(e)});async function Zae(){if(Gg.get(nd.CONFIG_PARAMS.REPLICATION_URL)||Gg.get(nd.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(kG){kG.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{qG=i});for(let i of n.connections){let o=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let d of Object.values(databases[l]||{}))if(u=d.auditStore,u)break;if(!u)continue;let f=Kae(u,l,o);c.lastCommitConfirmed=Fg(f[Wae]),c.lastReceivedRemoteTime=Fg(f[zae]),c.lastReceivedLocalTime=Fg(f[jae]),c.sendingMessage=Fg(f[Qae]),c.lastReceivedStatus=f[Jae]===Xae?"Receiving":"Waiting"}}}else n=Vae();n.node_name=$ae();let s=Yae().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:GG,is_enabled:FG,connections:[]};if(!FG)return e;let t=await DN.getAllNodeRecords();if(Fae.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push($G(t[n],e.connections));return await Promise.allSettled(r),e}a(Zae,"clusterStatus");function Fg(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}a(Fg,"asDate");async function $G(e,t){let r=e.name,n=new Gae(nd.OPERATIONS_ENUM.CLUSTER_STATUS,GG,void 0,await DN.getSystemInfo()),s,i,o=pl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await kae.request(pl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===pl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(o=pl.CLUSTER_STATUS_STATUSES.CLOSED,PN.error(`Error getting node status from ${r} `,s))}catch(l){PN.warn(`Error getting node status from ${r}`,l),l.code===HG.NoResponders?o=pl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===HG.Timeout?o=pl.CLUSTER_STATUS_STATUSES.TIMEOUT:o=pl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new ece(r,o,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==nd.PRE_4_0_0_VERSION&&await DN.upsertNodeRecord(l)}catch(l){PN.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}a($G,"buildNodeStatus");function ece(e,t,r,n,s,i,o,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=o,this.system_info=c}a(ece,"NodeStatusObject")});var vN=C((yPe,KG)=>{"use strict";var{handleHDBError:tce,hdb_errors:rce}=he(),{HTTP_STATUS_CODES:nce}=rce,sce=ir(),ice=Oo(),MN=ie(),qg=require("joi"),oce=it(),ace=2e3,cce=qg.object({timeout:qg.number().min(1),connected_nodes:qg.boolean(),routes:qg.boolean()});KG.exports=lce;async function lce(e){ice.checkClusteringEnabled();let t=oce.validateBySchema(e,cce);if(t)throw tce(t,t.message,nce.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||MN.autoCastBoolean(n),o=s===void 0||MN.autoCastBoolean(s),c={nodes:[]},l=await sce.getServerList(r??ace),u={};if(i)for(let f=0,d=l.length;f<d;f++){let _=l[f].statsz;_&&(u[l[f].server.name]=_.routes)}for(let f=0,d=l.length;f<d;f++){if(l[f].statsz)continue;let _=l[f].server,h=l[f].data;if(_.name.endsWith("-hub")){let m={name:_.name.slice(0,-4),response_time:l[f].response_time};i&&(m.connected_nodes=[],u[_.name]&&u[_.name].forEach(S=>{m.connected_nodes.includes(S.name.slice(0,-4))||m.connected_nodes.push(S.name.slice(0,-4))})),o&&(m.routes=h.cluster?.urls?h.cluster?.urls.map(S=>({host:S.split(":")[0],port:MN.autoCast(S.split(":")[1])})):[]),c.nodes.push(m)}}return c}a(lce,"clusterNetwork")});var jG=C((OPe,zG)=>{"use strict";var UN=require("joi"),YG=it(),{route_constraints:WG}=DA();zG.exports={setRoutesValidator:uce,deleteRoutesValidator:dce};function uce(e){let t=UN.object({server:UN.valid("hub","leaf"),routes:WG.required()});return YG.validateBySchema(e,t)}a(uce,"setRoutesValidator");function dce(e){let t=UN.object({routes:WG.required()});return YG.validateBySchema(e,t)}a(dce,"deleteRoutesValidator")});var $g=C((wPe,rq)=>{"use strict";var wo=Ct(),xN=ie(),As=(k(),P(q)),sd=le(),QG=jG(),{handleHDBError:JG,hdb_errors:fce}=he(),{HTTP_STATUS_CODES:XG}=fce,ZG="cluster routes successfully set",eq="cluster routes successfully deleted";rq.exports={setRoutes:hce,getRoutes:mce,deleteRoutes:pce};function _ce(e){let t=wo.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let o=0,c=e.routes.length;o<c;o++){let l=e.routes[o];l.port=xN.autoCast(l.port);let u=r.some(d=>d.host===l.host&&d.port===l.port),f=n.some(d=>d.host===l.host&&d.port===l.port);u||f?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?wo.updateConfigValue(As.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):wo.updateConfigValue(As.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:ZG,set:i,skipped:s}}a(_ce,"setRoutesNats");function hce(e){let t=QG.setRoutesValidator(e);if(t)throw JG(t,t.message,XG.BAD_REQUEST,void 0,void 0,!0);if(sd.get(As.CONFIG_PARAMS.CLUSTERING_ENABLED))return _ce(e);let r=[],n=[],s=sd.get(As.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{tq(s,i)?n.push(i):(s.push(i),r.push(i))}),wo.updateConfigValue(As.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:ZG,set:r,skipped:n}}a(hce,"setRoutes");function tq(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}a(tq,"existsInArray");function mce(){if(sd.get(As.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=wo.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return sd.get(As.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}a(mce,"getRoutes");function pce(e){let t=QG.deleteRoutesValidator(e);if(t)throw JG(t,t.message,XG.BAD_REQUEST,void 0,void 0,!0);if(sd.get(As.CONFIG_PARAMS.CLUSTERING_ENABLED))return Ece(e);let r=[],n=[],s=sd.get(As.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(o=>{tq(e.routes,o)?r.push(o):(i.push(o),n.push(o))}),wo.updateConfigValue(As.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:eq,deleted:r,skipped:n}}a(pce,"deleteRoutes");function Ece(e){let t=wo.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],o=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let f=e.routes[l],d=!1;for(let _=0,h=r.length;_<h;_++){let m=r[_];if(f.host===m.host&&f.port===m.port){r.splice(_,1),d=!0,o=!0,s.push(f);break}}if(!d){let _=!0;for(let h=0,m=n.length;h<m;h++){let S=n[h];if(f.host===S.host&&f.port===S.port){n.splice(h,1),c=!0,_=!1,s.push(f);break}}_&&i.push(f)}}return o&&(r=xN.isEmptyOrZeroLength(r)?null:r,wo.updateConfigValue(As.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=xN.isEmptyOrZeroLength(n)?null:n,wo.updateConfigValue(As.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:eq,deleted:s,skipped:i}}a(Ece,"deleteRoutesNats")});var sq=C((CPe,nq)=>{"use strict";var ph=require("alasql"),El=require("recursive-iterator"),ti=ee(),gce=ie(),Eh=(k(),P(q)),BN=class{static{a(this,"sql_statement_bucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,Tce(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>Eh.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!Eh.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,o=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[o]&&t[i].tables[o][Eh.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[o].attribute_permissions.length>0?c=Sce(t[i].tables[o].attribute_permissions):c=global.hdb_schema[i][o].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(o).filter(u=>!Eh.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let f=new ph.yy.Column({columnid:u});s.tableid&&(f.tableid=s.tableid),this.ast.columns.push(f),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(o,l)}}),this.ast}};function Sce(e){return e.filter(t=>t[Eh.PERMS_CRUD_ENUM.READ])}a(Sce,"filterReadRestrictedAttrs");function Tce(e,t,r,n,s){Ace(e,t,r,n,s)}a(Tce,"interpretAST");function gh(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,o=e.tableid;e.as&&(o=e.as),s.set(o,i)}}a(gh,"addSchemaTableToMap");function Ace(e,t,r,n,s){if(!e){ti.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof ph.yy.Insert?Oce(e,t,r):e instanceof ph.yy.Select?Rce(e,t,r,n,s):e instanceof ph.yy.Update?yce(e,t,r):e instanceof ph.yy.Delete?bce(e,t,r):ti.error("AST in getRecordAttributesAST() is not a valid SQL type.")}a(Ace,"getRecordAttributesAST");function Rce(e,t,r,n,s){if(!e){ti.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(gce.isEmptyOrZeroLength(i)){ti.error("No schema specified");return}e.from.forEach(c=>{gh(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),gh(c.table,t,r,n,s)});let o=new El(e.columns);for(let{node:c}of o)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{ti.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new El(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let f=u.tableid?u.tableid:l;if(!t.get(i).has(f))if(r.has(f))f=r.get(f);else{ti.info(`table specified as ${f} not found.`);continue}t.get(i).get(f).indexOf(u.columnid)<0&&t.get(i).get(f).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new El(c.on);for(let{node:u}of l)if(u&&u.columnid){let f=u.tableid,d=s.get(f);if(!t.get(d).has(f))if(r.has(f))f=r.get(f);else{ti.info(`table specified as ${f} not found.`);continue}t.get(d).get(f).indexOf(u.columnid)<0&&t.get(d).get(f).push(u.columnid)}}),e.order){let c=new El(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,f=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(f).has(u))if(r.has(u))u=r.get(u);else{ti.info(`table specified as ${u} not found.`);return}t.get(f).get(u).indexOf(l.columnid)<0&&t.get(f).get(u).push(l.columnid)}}}a(Rce,"getSelectAttributes");function yce(e,t,r){if(!e){ti.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new El(e.columns),s=e.table.databaseid;gh(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&HN(e.table.tableid,s,i.columnid,t,r)}a(yce,"getUpdateAttributes");function bce(e,t,r){if(!e){ti.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new El(e.where),s=e.table.databaseid;gh(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&HN(e.table.tableid,s,i.columnid,t,r)}a(bce,"getDeleteAttributes");function Oce(e,t,r){if(!e){ti.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new El(e.columns),s=e.into.databaseid;gh(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&HN(e.into.tableid,s,i.columnid,t,r)}a(Oce,"getInsertAttributes");function HN(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}a(HN,"pushAttribute");nq.exports=BN});var oq=C((DPe,iq)=>{"use strict";var Vg=(k(),P(q)),Kg=class{static{a(this,"BaseLicense")}constructor(t=0,r=Vg.RAM_ALLOCATION_ENUM.DEFAULT,n=Vg.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},kN=class extends Kg{static{a(this,"ExtendedLicense")}constructor(t=0,r=Vg.RAM_ALLOCATION_ENUM.DEFAULT,n=Vg.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};iq.exports={BaseLicense:Kg,ExtendedLicense:kN}});var ad=C((MPe,fq)=>{"use strict";var od=require("fs-extra"),Yg=(jp(),P(zp)),cq=require("crypto"),Nce=require("moment"),wce=require("uuid").v4,Zr=ee(),GN=require("path"),Ice=ie(),gl=(k(),P(q)),{totalmem:aq}=require("os"),Cce=oq().ExtendedLicense,id="invalid license key format",Pce="061183",Dce="mofi25",Lce="aes-256-cbc",Mce=16,vce=32,lq=le(),{resolvePath:uq}=Ct();lq.initSync();var FN;fq.exports={validateLicense:dq,generateFingerPrint:xce,licenseSearch:VN,getLicense:kce,checkMemoryLimit:Fce};function qN(){return GN.join(lq.getHdbBasePath(),gl.LICENSE_KEY_DIR_NAME,gl.LICENSE_FILE_NAME)}a(qN,"getLicenseDirPath");function Uce(){let e=qN();return uq(GN.join(e,gl.LICENSE_FILE_NAME))}a(Uce,"getLicenseFilePath");function $N(){let e=qN();return uq(GN.join(e,gl.REG_KEY_FILE_NAME))}a($N,"getFingerPrintFilePath");async function xce(){let e=$N();try{return await od.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await Bce();throw Zr.error(`Error writing fingerprint file to ${e}`),Zr.error(t),new Error("There was an error generating the fingerprint")}}a(xce,"generateFingerPrint");async function Bce(){let e=wce(),t=Yg.hash(e,Yg.HASH_FUNCTION.MD5),r=$N();try{await od.mkdirp(qN()),await od.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw Zr.error(`Error writing fingerprint file to ${r}`),Zr.error(n),new Error("There was an error generating the fingerprint")}return t}a(Bce,"writeFingerprint");function dq(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:gl.RAM_ALLOCATION_ENUM.DEFAULT,version:gl.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return Zr.error("empty license key passed to validate."),r;let n=$N(),s=!1;try{s=od.statSync(n)}catch(i){Zr.error(i)}if(s){let i;try{i=od.readFileSync(n,"utf8")}catch{Zr.error("error validating this machine in the license"),r.valid_machine=!1;return}let o=e.split(Dce),c=o[1];c=Buffer.concat([Buffer.from(c)],Mce);let l=Buffer.concat([Buffer.from(i)],vce),u=cq.createDecipheriv(Lce,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let f=null;try{f=u.update(o[0],"hex","utf8"),f.trim(),f+=u.final("utf8")}catch{let h=Hce(o[0],i);if(h)f=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(id),Zr.error(id),new Error(id)}let d;if(isNaN(f))try{d=JSON.parse(f),r.version=d.version,r.exp_date=d.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),d.ram_allocation&&(r.ram_allocation=d.ram_allocation)}catch{throw console.error(id),Zr.error(id),new Error(id)}else r.exp_date=f;r.exp_date<Nce().valueOf()&&(r.valid_date=!1),Yg.validate(o[1],`${Pce}${i}${t}`,Yg.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||Zr.error("Invalid licence"),r}a(dq,"validateLicense");function Hce(e,t){try{let r=cq.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{Zr.warn("Check old license failed")}}a(Hce,"checkOldLicense");function VN(){let e=new Cce,t=[];try{t=od.readFileSync(Uce(),"utf-8").split(`\r
24
- `)}catch(r){r.code==="ENOENT"?Zr.debug("no license file found"):Zr.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Ice.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=dq(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){Zr.error("There was an error parsing the license string."),Zr.error(s),e.ram_allocation=gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return FN=e,e}a(VN,"licenseSearch");async function kce(){return FN||await VN(),FN}a(kce,"getLicense");function Fce(){let e=VN().ram_allocation,t=process.constrainedMemory?.()||aq();if(t=Math.round(Math.min(t,aq())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}a(Fce,"checkMemoryLimit")});var WN=C((UPe,pq)=>{var Wg=ad(),_q=require("chalk"),Zn=ee(),hq=require("prompt"),{promisify:Gce}=require("util"),KN=(k(),P(q)),qce=require("fs-extra"),$ce=require("path"),Vce=ie(),{packageJson:Kce}=Et(),mq=le();mq.initSync();var Yce=require("moment"),Wce=Gce(hq.get),zce=$ce.join(mq.getHdbBasePath(),KN.LICENSE_KEY_DIR_NAME,KN.LICENSE_FILE_NAME,KN.LICENSE_FILE_NAME);pq.exports={getFingerprint:Qce,setLicense:jce,parseLicense:YN,register:Jce,getRegistrationInfo:Zce};async function jce(e){if(e&&e.key&&e.company){try{Zn.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await YN(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw Zn.error(r),Zn.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}a(jce,"setLicense");async function Qce(){let e={};try{e=await Wg.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw Zn.error(r),Zn.error(t),new Error(r)}return e}a(Qce,"getFingerprint");async function YN(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");Zn.info("Validating license input...");let r=Wg.validateLicense(e,t);if(Zn.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(Zn.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(Zn.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{Zn.info("writing license to disk"),await qce.writeFile(zce,JSON.stringify({license_key:e,company:t}))}catch(n){throw Zn.error("Failed to write License"),n}return"Registration successful."}a(YN,"parseLicense");async function Jce(){let e=await Xce();return YN(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}a(Jce,"register");async function Xce(){let e=await Wg.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:_q.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:_q.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{hq.start()}catch(n){Zn.error(n)}let r;try{r=await Wce(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}a(Xce,"promptForRegistration");async function Zce(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await Wg.getLicense()}catch(r){throw Zn.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Vce.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Kce.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=Yce.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}a(Zce,"getRegistrationInfo")});var gq=C((BPe,Eq)=>{"use strict";var ele=Lt(),zN=class{static{a(this,"HubConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d,_,h,m){this.port=t,o===null&&(o=void 0),this.server_name=r+ele.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c}},this.cluster={name:f,port:d,routes:_,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:m}},this.system_account="SYS"}};Eq.exports=zN});var Aq=C((kPe,Tq)=>{"use strict";var Sq=Lt(),jN=class{static{a(this,"LeafConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d){this.port=t,d===null&&(d=void 0),this.server_name=r+Sq.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+Sq.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:f,ca_file:d,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:d,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:d,insecure:!0},urls:o,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};Tq.exports=jN});var yq=C((GPe,Rq)=>{"use strict";var QN=class{static{a(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};Rq.exports=QN});var Oq=C(($Pe,bq)=>{"use strict";var tle=Lt(),JN=class{static{a(this,"SysUserObject")}constructor(t,r){this.user=t+tle.SERVER_SUFFIX.ADMIN,this.password=r}};bq.exports=JN});var Jg=C((KPe,Iq)=>{"use strict";var Sl=require("path"),Tl=require("fs-extra"),rle=gq(),nle=Aq(),sle=yq(),ile=Oq(),XN=An(),ld=ie(),Nn=Ct(),jg=(k(),P(q)),Sh=Lt(),{CONFIG_PARAMS:Jt}=jg,ud=ee(),Th=le(),Nq=_o(),ZN=ir(),ole=ei(),cd="clustering",ale=1e4,wq=50;Iq.exports={generateNatsConfig:lle,removeNatsConfig:ule,getHubConfigPath:cle};function cle(){let e=Th.get(Jt.ROOTPATH);return Sl.join(e,cd,Sh.NATS_CONFIG_FILES.HUB_SERVER)}a(cle,"getHubConfigPath");async function lle(e=!1,t=void 0){let r=Th.get(Jt.ROOTPATH);Tl.ensureDirSync(Sl.join(r,"clustering","leaf")),Th.initSync();let n=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_CERT_AUTH),s=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_PRIVATEKEY),i=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_CERTIFICATE);!await Tl.exists(i)&&!await Tl.exists(!n)&&await ole.createNatsCerts();let o=Sl.join(r,cd,Sh.PID_FILES.HUB),c=Sl.join(r,cd,Sh.PID_FILES.LEAF),l=Nn.getConfigFromFile(Jt.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=Sl.join(r,cd,Sh.NATS_CONFIG_FILES.HUB_SERVER),f=Sl.join(r,cd,Sh.NATS_CONFIG_FILES.LEAF_SERVER),d=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_INSECURE),_=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_VERIFY),h=Nn.getConfigFromFile(Jt.CLUSTERING_NODENAME),m=Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await ZN.checkNATSServerInstalled()||Qg("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let S=await XN.listUsers(),g=Nn.getConfigFromFile(Jt.CLUSTERING_USER),R=await XN.getClusterUser();(ld.isEmpty(R)||R.active!==!0)&&Qg(`Invalid cluster user '${g}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await zg(Jt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await zg(Jt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await zg(Jt.CLUSTERING_HUBSERVER_NETWORK_PORT),await zg(Jt.CLUSTERING_LEAFSERVER_NETWORK_PORT));let E=[],T=[];for(let[se,z]of S.entries())z.role?.role===jg.ROLE_TYPES_ENUM.CLUSTER_USER&&z.active&&(E.push(new ile(z.username,Nq.decrypt(z.hash))),T.push(new sle(z.username,Nq.decrypt(z.hash))));let N=[],{hub_routes:v}=Nn.getClusteringRoutes();if(!ld.isEmptyOrZeroLength(v))for(let se of v)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${se.host}:${se.port}`);let H=new rle(Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_NETWORK_PORT),h,o,i,s,n,d,_,m,Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_CLUSTER_NAME),Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,E,T);n==null&&(delete H.tls.ca_file,delete H.leafnodes.tls.ca_file),t=ld.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===jg.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Tl.writeJson(u,H),ud.trace(`Hub server config written to ${u}`));let X=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,W=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,$=new nle(Nn.getConfigFromFile(Jt.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[X],[W],E,T,i,s,n,d);n==null&&delete $.tls.ca_file,(t===void 0||t===jg.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Tl.writeJson(f,$),ud.trace(`Leaf server config written to ${f}`))}a(lle,"generateNatsConfig");async function zg(e){let t=Th.get(e);return ld.isEmpty(t)&&Qg(`port undefined for '${e}'`),await ld.isPortTaken(t)&&Qg(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}a(zg,"isPortAvailable");function Qg(e){let t=`Error generating clustering config: ${e}`;ud.error(t),console.error(t),process.exit(1)}a(Qg,"generateNatsConfigError");async function ule(e){let{port:t,config_file:r}=ZN.getServerConfig(e),{username:n,decrypt_hash:s}=await XN.getClusterUser(),i=0,o=2e3;for(;i<wq;){try{let f=await ZN.createConnection(t,n,s,!1);if(f.protocol.connected===!0){f.close();break}}catch(f){ud.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${f}`)}if(i++,i>=wq)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=o*(i*2);u>3e4&&ud.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await ld.async_set_timeout(u)}let c="0".repeat(ale),l=Sl.join(Th.get(Jt.ROOTPATH),cd,r);await Tl.writeFile(l,c),await Tl.remove(l),ud.notify(e,"started.")}a(ule,"removeNatsConfig")});var vq=C((WPe,Mq)=>{"use strict";var es=le(),dle=ad(),Ve=(k(),P(q)),Ah=Lt(),Io=require("path"),{PACKAGE_ROOT:Zg}=Et(),Cq=le(),Xg=ie(),dd="/dev/null",fle=Io.join(Zg,"launchServiceScripts"),Pq=Io.join(Zg,"utility/scripts"),_le=Io.join(Pq,Ve.HDB_RESTART_SCRIPT),Dq=Io.resolve(Zg,"dependencies",`${process.platform}-${process.arch}`,Ah.NATS_BINARY_NAME);function Lq(){let t=dle.licenseSearch().ram_allocation||Ve.RAM_ALLOCATION_ENUM.DEFAULT,r=Ve.MEM_SETTING_KEY+t,n={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return Xg.noBootFile()&&(n[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=Xg.getEnvCliRootPath()),{name:Ve.PROCESS_DESCRIPTORS.HDB,script:Ve.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:Zg}}a(Lq,"generateMainServerConfig");var hle=9930;function mle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=Io.join(e,"clustering",Ah.NATS_CONFIG_FILES.HUB_SERVER),r=Io.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=Cq.get(Ve.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=Ah.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==hle?"-"+n:""),script:Dq,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=dd,i.error_file=dd),i}a(mle,"generateNatsHubServerConfig");var ple=9940;function Ele(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=Io.join(e,"clustering",Ah.NATS_CONFIG_FILES.LEAF_SERVER),r=Io.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=Cq.get(Ve.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=Ah.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==ple?"-"+n:""),script:Dq,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=dd,i.error_file=dd),i}a(Ele,"generateNatsLeafServerConfig");function gle(){es.initSync();let e=Io.join(es.get(Ve.CONFIG_PARAMS.LOGGING_ROOT),Ve.LOG_NAMES.HDB),t={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ve.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:fle,autorestart:!1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=dd,t.error_file=dd),t}a(gle,"generateClusteringUpgradeV4ServiceConfig");function Sle(){let e={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.RESTART_HDB};return Xg.noBootFile()&&(e[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=Xg.getEnvCliRootPath()),{...{name:Ve.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:Pq},script:_le}}a(Sle,"generateRestart");function Tle(){return{apps:[Lq()]}}a(Tle,"generateAllServiceConfigs");Mq.exports={generateAllServiceConfigs:Tle,generateMainServerConfig:Lq,generateRestart:Sle,generateNatsHubServerConfig:mle,generateNatsLeafServerConfig:Ele,generateClusteringUpgradeV4ServiceConfig:gle}});var yh=C((QPe,zq)=>{"use strict";var tt=(k(),P(q)),Ale=ie(),Po=Jg(),eS=ir(),Co=Lt(),va=vq(),tS=le(),Al=ee(),Rle=Oo(),{startWorker:Uq,onMessageFromWorkers:yle}=ot(),ble=td(),jPe=require("util"),Ole=require("child_process"),Nle=require("fs"),{execFile:wle}=Ole,Qe;zq.exports={enterPM2Mode:Ile,start:Ua,stop:ew,reload:Bq,restart:Hq,list:tw,describe:Gq,connect:Do,kill:Mle,startAllServices:vle,startService:rw,getUniqueServicesList:qq,restartAllServices:Ule,isServiceRegistered:$q,reloadStopStart:Vq,restartHdb:Fq,deleteProcess:Dle,startClusteringProcesses:Yq,startClusteringThreads:Wq,isHdbRestartRunning:Lle,isClusteringRunning:Ble,stopClustering:xle,reloadClustering:Hle,expectedRestartOfChildren:kq};var Rh=!1;yle(e=>{e.type==="restart"&&tS.initSync(!0)});function Ile(){Rh=!0}a(Ile,"enterPM2Mode");function Do(){return Qe||(Qe=require("pm2")),new Promise((e,t)=>{Qe.connect((r,n)=>{r&&t(r),e(n)})})}a(Do,"connect");var en,Cle=10,xq;function Ua(e,t=!1){if(Rh)return Ple(e);let r=wle(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let o=en.indexOf(r);o>-1&&en.splice(o,1),!xq&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Cle&&(Nle.existsSync(Po.getHubConfigPath())?Ua(e):(await Po.generateNatsConfig(!0),Ua(e),await new Promise(c=>setTimeout(c,3e3)),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let o=tS.get(tt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,f;for(;l=c.exec(i);){if(l.index&&Co.LOG_LEVEL_HIERARCHY[o]>=Co.LOG_LEVEL_HIERARCHY[f||"info"]){let h=f===Co.LOG_LEVELS.ERR||f===Co.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",h,n,i.slice(u,l.index).trim())}let[d,_]=l;u=l.index+d.length,f=Co.LOG_LEVELS[_]}if(Co.LOG_LEVEL_HIERARCHY[o]>=Co.LOG_LEVEL_HIERARCHY[f||"info"]){let d=f===Co.LOG_LEVELS.ERR||f===Co.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",d,n,i.slice(u).trim())}}if(a(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!en&&(en=[],!t)){let i=a(()=>{xq=!0,en&&(en.map(o=>o.kill()),process.exit(0))},"kill_children");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}en.push(r)}a(Ua,"start");function Ple(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.start(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Ple,"startWithPM2");function ew(e){if(!Rh){for(let t of en||[])t.name===e&&(en.splice(en.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.stop(e,async(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.delete(e,(i,o)=>{i&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(o)})})})}a(ew,"stop");function Bq(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.reload(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Bq,"reload");function Hq(e){if(!Rh){kq();for(let t of en||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.restart(e,(n,s)=>{Qe.disconnect(),t(s)})})}a(Hq,"restart");function kq(){for(let e of en||[])e.config&&(e.config.restarts=0)}a(kq,"expectedRestartOfChildren");function Dle(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.delete(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Dle,"deleteProcess");async function Fq(){await Ua(va.generateRestart())}a(Fq,"restartHdb");async function Lle(){let e=await tw();for(let t in e)if(e[t].name===tt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}a(Lle,"isHdbRestartRunning");function tw(){return new Promise(async(e,t)=>{try{await Do()}catch(r){t(r)}Qe.list((r,n)=>{r&&(Qe.disconnect(),t(r)),Qe.disconnect(),e(n)})})}a(tw,"list");function Gq(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.describe(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Gq,"describe");function Mle(){if(!Rh){for(let e of en||[])e.kill();en=[];return}return new Promise(async(e,t)=>{try{await Do()}catch(r){t(r)}Qe.killDaemon((r,n)=>{r&&(Qe.disconnect(),t(r)),Qe.disconnect(),e(n)})})}a(Mle,"kill");async function vle(){try{await Yq(),await Wq(),await Ua(va.generateAllServiceConfigs())}catch(e){throw Qe?.disconnect(),e}}a(vle,"startAllServices");async function rw(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case tt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=va.generateMainServerConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=va.generateNatsIngestServiceConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=va.generateNatsReplyServiceConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=va.generateNatsHubServerConfig(),await Ua(r,t),await Po.removeNatsConfig(e);return;case tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=va.generateNatsLeafServerConfig(),await Ua(r,t),await Po.removeNatsConfig(e);return;case tt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=va.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await Ua(r)}catch(r){throw Qe?.disconnect(),r}}a(rw,"startService");async function qq(){try{let e=await tw(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Qe?.disconnect(),e}}a(qq,"getUniqueServicesList");async function Ule(e=[]){try{let t=!1,r=await qq();for(let n=0,s=Object.values(r).length;n<s;n++){let o=Object.values(r)[n].name;e.includes(o)||(o===tt.PROCESS_DESCRIPTORS.HDB?t=!0:await Hq(o))}t&&await Vq(tt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Qe?.disconnect(),t}}a(Ule,"restartAllServices");async function $q(e){if(en?.find(r=>r.name===e))return!0;let t=await ble.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}a($q,"isServiceRegistered");async function Vq(e){let t=tS.get(tt.CONFIG_PARAMS.THREADS_COUNT)??tS.get(tt.CONFIG_PARAMS.THREADS),r=await Gq(e),n=Ale.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await ew(e),await rw(e)):e===tt.PROCESS_DESCRIPTORS.HDB?await Fq():await Bq(e)}a(Vq,"reloadStopStart");var Kq;async function Yq(e=!1){for(let t in tt.CLUSTERING_PROCESSES){let r=tt.CLUSTERING_PROCESSES[t];await rw(r,e)}}a(Yq,"startClusteringProcesses");async function Wq(){Kq=Uq(tt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:tt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await eS.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await eS.updateLocalStreams();let e=await Rle.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===tt.PRE_4_0_0_VERSION){Al.info("Starting clustering upgrade 4.0.0 process"),Uq(tt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}a(Wq,"startClusteringThreads");async function xle(){for(let e in tt.CLUSTERING_PROCESSES)if(e!==tt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===tt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await Kq.terminate();else{let t=tt.CLUSTERING_PROCESSES[e];await ew(t)}}a(xle,"stopClustering");async function Ble(){for(let e in tt.CLUSTERING_PROCESSES){let t=tt.CLUSTERING_PROCESSES[e];if(await $q(t)===!1)return!1}return!0}a(Ble,"isClusteringRunning");async function Hle(){await Po.generateNatsConfig(!0),await eS.reloadNATSHub(),await eS.reloadNATSLeaf(),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}a(Hle,"reloadClustering")});var iw={};Ue(iw,{compactOnStart:()=>kle,copyDb:()=>e$});async function kle(){xa.notify("Running compact on start"),console.log("Running compact on start");let e=(0,nw.get)(x.ROOTPATH),t=new Map,r=Xe();(0,sw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,rS.join)(e,"backup",n+".mdb"),o=(0,rS.join)(e,_c,n+"-copy.mdb"),c=0;try{c=await jq(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){xa.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{db_path:s,copy_dest:o,backup_dest:i,record_count:c}),await e$(n,o),console.log("Backing up",n,"to",i),await(0,Rl.move)(s,i,{overwrite:!0})}try{_d()}catch(n){xa.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{db_path:s,copy_dest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Rl.move)(i,s,{overwrite:!0}),await(0,Rl.remove)((0,rS.join)(e,_c,`${n}-copy.mdb-lock`));try{_d()}catch(n){xa.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){xa.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,sw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);for(let[s,{db_path:i,backup_dest:o}]of t){console.error("Moving backup database",o,"back to",i);try{await(0,Rl.move)(o,i,{overwrite:!0})}catch(c){console.error(c)}}throw _d(),n}for(let[n,{backup_dest:s,record_count:i}]of t){let o=!0,c=await jq(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){o=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
25
- Total record count before compaction: ${i}, total after: ${c}.
26
- Database backup has not been removed and can be found here: ${s}`;xa.error(l),console.error(l)}(0,nw.get)(x.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||o===!1||(console.log("Removing backup",s),await(0,Rl.remove)(s))}}async function jq(e){let t=await(0,Zq.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function fd(){}async function e$(e,t){console.log(`Copying database ${e} to ${t}`);let r=Xe()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let d in r){let _=r[d];_.primaryStore.put=fd,_.primaryStore.remove=fd;for(let h in _.indices){let m=_.indices[h];m.put=fd,m.remove=fd}_.auditStore&&(_.auditStore.put=fd,_.auditStore.remove=fd),n=_.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,o=(0,Qq.open)(new Jq.default(t)),c=o.openDB(nS.INTERNAL_DBIS_NAME),l,u=0,f=s.useReadTransaction();try{for(let{key:_,value:h}of s.getRange({transaction:f})){let m=h.is_hash_attribute||h.isPrimaryKey,S,g;if(m&&(S=h.compression,g=sS(),g?h.compression=g:delete h.compression,S?.dictionary?.toString()===g?.dictionary?.toString()&&(S=null,g=null)),c.put(_,h),!(m||h.indexed))continue;let R=new Xq.default(!m,m);R.encoding="binary",R.compression=S;let E=n.openDB(_,R);E.decoder=null,E.decoderCopies=!1,E.encoding="binary",R.compression=g;let T=o.openDB(_,R);T.encoder=null,console.log("copying",_,"from",e,"to",t),await d(E,T,m,f)}if(i){let _=n.openDB(nS.AUDIT_STORE_NAME,bh);console.log("copying audit log for",e,"to",t),d(i,_,!1,f)}async function d(_,h,m,S){let g=0,R=0,E=0,T=1e7,N=null;for(;T-- >0;)try{for(let v of _.getKeys({start:N,transaction:S}))try{N=v;let{value:H,version:X}=_.getEntry(v,{transaction:S});if(H?.length<14&&m){E++;continue}l=h.put(v,H,m?X:void 0),g++,S.openTimer&&(S.openTimer=0),R+=(v?.length||10)+H.length,u++>5e3&&(await l,console.log("copied",g,"entries, skipped",E,"delete records,",R,"bytes"),u=0)}catch(H){console.error("Error copying record",typeof v=="symbol"?"symbol":v,"from",e,"to",t,H)}console.log("finish copying, copied",g,"entries, skipped",E,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}a(d,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{f.done(),o.close()}}var Qq,rS,Rl,nw,Jq,Xq,nS,Zq,sw,xa,ow=Re(()=>{xe();Qq=require("lmdb"),rS=require("path"),Rl=require("fs-extra"),nw=M(le()),Jq=M(Wf()),Xq=M(Yf()),nS=M(Gt());k();Ai();Zq=M(ho()),sw=M(Ct()),xa=M(ee());a(kle,"compactOnStart");a(jq,"getTotalDBRecordCount");a(fd,"noop");a(e$,"copyDb")});var md=C((iDe,a$)=>{"use strict";var Fle=require("minimist"),{isMainThread:cw,parentPort:Nh,threadId:rDe}=require("worker_threads"),at=(k(),P(q)),Hi=ee(),lw=ie(),oS=Jg(),iS=ir(),nDe=Lt(),s$=Ct(),ri=yh(),t$=td(),{compactOnStart:Gle}=(ow(),P(iw)),qle=hc(),{restartWorkers:aS,onMessageByType:$le}=ot(),{handleHDBError:Vle,hdb_errors:Kle}=he(),{HTTP_STATUS_CODES:Yle}=Kle,wh=le(),{sendOperationToNode:r$,getThisNodeName:Wle,monitorNodeCAs:zle}=(Es(),P(Oa)),{getHDBNodeTable:sDe}=(sl(),P(SO));wh.initSync();var Oh=`Restarting HarperDB. This may take up to ${at.RESTART_TIMEOUT_MS/1e3} seconds.`,jle="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",n$="Clustering is not enabled so cannot be restarted",Qle="Invalid service",hd,Rs;a$.exports={restart:i$,restartService:uw};cw&&$le(at.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await uw({service:e.workerType}):i$({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function i$(e){Rs=Object.keys(e).length===0,hd=await ri.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB);let t=Fle(process.argv);if(t.service){await uw(t);return}if(Rs&&!hd){console.error(jle);return}if(Rs&&console.log(Oh),hd){ri.enterPM2Mode(),Hi.notify(Oh);let r=qle(Object.keys(at.CONFIG_PARAM_MAP),!0);return lw.isEmptyOrZeroLength(Object.keys(r))||s$.updateConfigValue(void 0,void 0,r,!0,!0),Jle(),Oh}return cw?(Hi.notify(Oh),wh.get(at.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Gle(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{aS()},50)):Nh.postMessage({type:at.ITC_EVENT_TYPES.RESTART}),Oh}a(i$,"restart");async function uw(e){let{service:t}=e;if(at.HDB_PROCESS_SERVICES[t]===void 0)throw Vle(new Error,Qle,Yle.BAD_REQUEST,void 0,void 0,!0);if(ri.expectedRestartOfChildren(),hd=await ri.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB),!cw){e.replicated&&zle(),Nh.postMessage({type:at.ITC_EVENT_TYPES.RESTART,workerType:t}),Nh.ref(),await new Promise(s=>{Nh.on("message",i=>{i.type==="restart-complete"&&(s(),Nh.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===Wle())continue;let i;try{({job_id:i}=await r$(s,e))}catch(o){n.push({node:s.name,message:o.message});continue}n.push(await new Promise((o,c)=>{let u=2400,f=setInterval(async()=>{if(u--<=0){clearInterval(f);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let _=(await r$(s,{operation:"get_job",id:i})).results[0];if(_.status==="COMPLETE"&&(clearInterval(f),o({node:s.name,message:_.message})),_.status==="ERROR"){clearInterval(f);let h=new Error(_.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case at.HDB_PROCESS_SERVICES.clustering:if(!wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=n$;break}Rs&&console.log("Restarting clustering"),Hi.notify("Restarting clustering"),await o$();break;case at.HDB_PROCESS_SERVICES.clustering_config:case at.HDB_PROCESS_SERVICES["clustering config"]:if(!wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=n$;break}Rs&&console.log("Restarting clustering_config"),Hi.notify("Restarting clustering_config"),await ri.reloadClustering();break;case"custom_functions":case"custom functions":case at.HDB_PROCESS_SERVICES.harperdb:case at.HDB_PROCESS_SERVICES.http_workers:case at.HDB_PROCESS_SERVICES.http:if(Rs&&!hd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Rs&&console.log("Restarting http_workers"),Hi.notify("Restarting http_workers"),Rs?await ri.restart(at.PROCESS_DESCRIPTORS.HDB):await aS("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(Hi.error(r),Rs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}a(uw,"restartService");async function Jle(){await o$(),await ri.restart(at.PROCESS_DESCRIPTORS.HDB),await lw.async_set_timeout(2e3),wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await aw(),Rs&&(await iS.closeConnection(),process.exit(0))}a(Jle,"restartPM2Mode");async function o$(){if(!s$.getConfigFromFile(at.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await t$.getHDBProcessInfo()).clustering.length===0)Hi.trace("Clustering not running, restart will start clustering services"),await oS.generateNatsConfig(!0),await ri.startClusteringProcesses(),await ri.startClusteringThreads(),await aw(),Rs&&await iS.closeConnection();else{await oS.generateNatsConfig(!0),hd?(Hi.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await ri.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ri.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await t$.getHDBProcessInfo()).clustering.forEach(s=>{Hi.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await lw.async_set_timeout(3e3),await aw(),await iS.updateLocalStreams(),Rs&&await iS.closeConnection(),Hi.trace("Restart clustering restarting ingest and reply service threads");let t=aS(at.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=aS(at.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}a(o$,"restartClustering");async function aw(){await oS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await oS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}a(aw,"removeNatsConfig")});var g$=C((cDe,E$)=>{"use strict";var aDe=require("lodash"),wn=(k(),P(q)),{handleHDBError:c$,hdb_errors:Xle}=he(),{HDB_ERROR_MSGS:Zle,HTTP_STATUS_CODES:eue}=Xle,dw=ee();E$.exports={getRolePermissions:rue};var yl=Object.create(null),tue=a(e=>({key:e,perms:{}}),"perms_template_obj"),f$=a((e=!1)=>({describe:e,tables:{}}),"schema_perms_template"),_$=a((e=!1,t=!1,r=!1,n=!1)=>({[wn.PERMS_CRUD_ENUM.READ]:e,[wn.PERMS_CRUD_ENUM.INSERT]:t,[wn.PERMS_CRUD_ENUM.UPDATE]:r,[wn.PERMS_CRUD_ENUM.DELETE]:n}),"permissions_template"),fw=a((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,..._$(t,r,n,s)}),"table_perms_template"),l$=a((e,t=_$())=>({attribute_name:e,describe:p$(t),[Ih]:t[Ih],[_w]:t[_w],[hw]:t[hw]}),"attr_perms_template"),u$=a((e,t=!1)=>({attribute_name:e,describe:t,[Ih]:t}),"timestamp_attr_perms_template"),{READ:Ih,INSERT:_w,UPDATE:hw}=wn.PERMS_CRUD_ENUM,h$=Object.values(wn.PERMS_CRUD_ENUM),m$=[Ih,_w,hw];function rue(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[wn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(yl[t]&&yl[t].key===n)return yl[t].perms;let s=nue(e,r);return yl[t]?yl[t].key=n:yl[t]=tue(n),yl[t].perms=s,s}catch(r){if(!e[wn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[wn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<wn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${t}' must be updated to align with new structure from the 2.2.0 release.`;throw dw.error(n),dw.debug(r),c$(new Error,Zle.OUTDATED_PERMS_TRANSLATION_ERROR,eue.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
24
+ `)}catch(r){r.code==="ENOENT"?Zr.debug("no license file found"):Zr.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Ice.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=dq(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){Zr.error("There was an error parsing the license string."),Zr.error(s),e.ram_allocation=gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return FN=e,e}a(VN,"licenseSearch");async function kce(){return FN||await VN(),FN}a(kce,"getLicense");function Fce(){let e=VN().ram_allocation,t=process.constrainedMemory?.()||aq();if(t=Math.round(Math.min(t,aq())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}a(Fce,"checkMemoryLimit")});var WN=C((UPe,pq)=>{var Wg=ad(),_q=require("chalk"),Zn=ee(),hq=require("prompt"),{promisify:Gce}=require("util"),KN=(k(),P(q)),qce=require("fs-extra"),$ce=require("path"),Vce=ie(),{packageJson:Kce}=Et(),mq=le();mq.initSync();var Yce=require("moment"),Wce=Gce(hq.get),zce=$ce.join(mq.getHdbBasePath(),KN.LICENSE_KEY_DIR_NAME,KN.LICENSE_FILE_NAME,KN.LICENSE_FILE_NAME);pq.exports={getFingerprint:Qce,setLicense:jce,parseLicense:YN,register:Jce,getRegistrationInfo:Zce};async function jce(e){if(e&&e.key&&e.company){try{Zn.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await YN(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw Zn.error(r),Zn.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}a(jce,"setLicense");async function Qce(){let e={};try{e=await Wg.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw Zn.error(r),Zn.error(t),new Error(r)}return e}a(Qce,"getFingerprint");async function YN(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");Zn.info("Validating license input...");let r=Wg.validateLicense(e,t);if(Zn.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(Zn.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(Zn.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{Zn.info("writing license to disk"),await qce.writeFile(zce,JSON.stringify({license_key:e,company:t}))}catch(n){throw Zn.error("Failed to write License"),n}return"Registration successful."}a(YN,"parseLicense");async function Jce(){let e=await Xce();return YN(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}a(Jce,"register");async function Xce(){let e=await Wg.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:_q.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:_q.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{hq.start()}catch(n){Zn.error(n)}let r;try{r=await Wce(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}a(Xce,"promptForRegistration");async function Zce(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await Wg.getLicense()}catch(r){throw Zn.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Vce.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Kce.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=Yce.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}a(Zce,"getRegistrationInfo")});var gq=C((BPe,Eq)=>{"use strict";var ele=Lt(),zN=class{static{a(this,"HubConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d,_,h,m){this.port=t,o===null&&(o=void 0),this.server_name=r+ele.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c}},this.cluster={name:f,port:d,routes:_,tls:{cert_file:s,key_file:i,ca_file:o,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:m}},this.system_account="SYS"}};Eq.exports=zN});var Aq=C((kPe,Tq)=>{"use strict";var Sq=Lt(),jN=class{static{a(this,"LeafConfigObject")}constructor(t,r,n,s,i,o,c,l,u,f,d){this.port=t,d===null&&(d=void 0),this.server_name=r+Sq.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+Sq.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:f,ca_file:d,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:d,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:d,insecure:!0},urls:o,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};Tq.exports=jN});var yq=C((GPe,Rq)=>{"use strict";var QN=class{static{a(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};Rq.exports=QN});var Oq=C(($Pe,bq)=>{"use strict";var tle=Lt(),JN=class{static{a(this,"SysUserObject")}constructor(t,r){this.user=t+tle.SERVER_SUFFIX.ADMIN,this.password=r}};bq.exports=JN});var Jg=C((KPe,Iq)=>{"use strict";var Sl=require("path"),Tl=require("fs-extra"),rle=gq(),nle=Aq(),sle=yq(),ile=Oq(),XN=An(),ld=ie(),Nn=Ct(),jg=(k(),P(q)),Sh=Lt(),{CONFIG_PARAMS:Jt}=jg,ud=ee(),Th=le(),Nq=_o(),ZN=ir(),ole=ei(),cd="clustering",ale=1e4,wq=50;Iq.exports={generateNatsConfig:lle,removeNatsConfig:ule,getHubConfigPath:cle};function cle(){let e=Th.get(Jt.ROOTPATH);return Sl.join(e,cd,Sh.NATS_CONFIG_FILES.HUB_SERVER)}a(cle,"getHubConfigPath");async function lle(e=!1,t=void 0){let r=Th.get(Jt.ROOTPATH);Tl.ensureDirSync(Sl.join(r,"clustering","leaf")),Th.initSync();let n=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_CERT_AUTH),s=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_PRIVATEKEY),i=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_CERTIFICATE);!await Tl.exists(i)&&!await Tl.exists(!n)&&await ole.createNatsCerts();let o=Sl.join(r,cd,Sh.PID_FILES.HUB),c=Sl.join(r,cd,Sh.PID_FILES.LEAF),l=Nn.getConfigFromFile(Jt.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=Sl.join(r,cd,Sh.NATS_CONFIG_FILES.HUB_SERVER),f=Sl.join(r,cd,Sh.NATS_CONFIG_FILES.LEAF_SERVER),d=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_INSECURE),_=Nn.getConfigFromFile(Jt.CLUSTERING_TLS_VERIFY),h=Nn.getConfigFromFile(Jt.CLUSTERING_NODENAME),m=Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await ZN.checkNATSServerInstalled()||Qg("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let S=await XN.listUsers(),g=Nn.getConfigFromFile(Jt.CLUSTERING_USER),R=await XN.getClusterUser();(ld.isEmpty(R)||R.active!==!0)&&Qg(`Invalid cluster user '${g}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await zg(Jt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await zg(Jt.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await zg(Jt.CLUSTERING_HUBSERVER_NETWORK_PORT),await zg(Jt.CLUSTERING_LEAFSERVER_NETWORK_PORT));let E=[],T=[];for(let[se,z]of S.entries())z.role?.role===jg.ROLE_TYPES_ENUM.CLUSTER_USER&&z.active&&(E.push(new ile(z.username,Nq.decrypt(z.hash))),T.push(new sle(z.username,Nq.decrypt(z.hash))));let N=[],{hub_routes:v}=Nn.getClusteringRoutes();if(!ld.isEmptyOrZeroLength(v))for(let se of v)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${se.host}:${se.port}`);let H=new rle(Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_NETWORK_PORT),h,o,i,s,n,d,_,m,Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_CLUSTER_NAME),Nn.getConfigFromFile(Jt.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,E,T);n==null&&(delete H.tls.ca_file,delete H.leafnodes.tls.ca_file),t=ld.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===jg.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Tl.writeJson(u,H),ud.trace(`Hub server config written to ${u}`));let X=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,W=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${m}`,$=new nle(Nn.getConfigFromFile(Jt.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[X],[W],E,T,i,s,n,d);n==null&&delete $.tls.ca_file,(t===void 0||t===jg.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Tl.writeJson(f,$),ud.trace(`Leaf server config written to ${f}`))}a(lle,"generateNatsConfig");async function zg(e){let t=Th.get(e);return ld.isEmpty(t)&&Qg(`port undefined for '${e}'`),await ld.isPortTaken(t)&&Qg(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}a(zg,"isPortAvailable");function Qg(e){let t=`Error generating clustering config: ${e}`;ud.error(t),console.error(t),process.exit(1)}a(Qg,"generateNatsConfigError");async function ule(e){let{port:t,config_file:r}=ZN.getServerConfig(e),{username:n,decrypt_hash:s}=await XN.getClusterUser(),i=0,o=2e3;for(;i<wq;){try{let f=await ZN.createConnection(t,n,s,!1);if(f.protocol.connected===!0){f.close();break}}catch(f){ud.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${f}`)}if(i++,i>=wq)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=o*(i*2);u>3e4&&ud.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await ld.async_set_timeout(u)}let c="0".repeat(ale),l=Sl.join(Th.get(Jt.ROOTPATH),cd,r);await Tl.writeFile(l,c),await Tl.remove(l),ud.notify(e,"started.")}a(ule,"removeNatsConfig")});var vq=C((WPe,Mq)=>{"use strict";var es=le(),dle=ad(),Ve=(k(),P(q)),Ah=Lt(),Io=require("path"),{PACKAGE_ROOT:Zg}=Et(),Cq=le(),Xg=ie(),dd="/dev/null",fle=Io.join(Zg,"launchServiceScripts"),Pq=Io.join(Zg,"utility/scripts"),_le=Io.join(Pq,Ve.HDB_RESTART_SCRIPT),Dq=Io.resolve(Zg,"dependencies",`${process.platform}-${process.arch}`,Ah.NATS_BINARY_NAME);function Lq(){let t=dle.licenseSearch().ram_allocation||Ve.RAM_ALLOCATION_ENUM.DEFAULT,r=Ve.MEM_SETTING_KEY+t,n={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return Xg.noBootFile()&&(n[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=Xg.getEnvCliRootPath()),{name:Ve.PROCESS_DESCRIPTORS.HDB,script:Ve.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:Zg}}a(Lq,"generateMainServerConfig");var hle=9930;function mle(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=Io.join(e,"clustering",Ah.NATS_CONFIG_FILES.HUB_SERVER),r=Io.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=Cq.get(Ve.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=Ah.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==hle?"-"+n:""),script:Dq,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=dd,i.error_file=dd),i}a(mle,"generateNatsHubServerConfig");var ple=9940;function Ele(){es.initSync(!0);let e=es.get(Ve.CONFIG_PARAMS.ROOTPATH),t=Io.join(e,"clustering",Ah.NATS_CONFIG_FILES.LEAF_SERVER),r=Io.join(es.get(Ve.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ve.LOG_NAMES.HDB),n=Cq.get(Ve.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=Ah.LOG_LEVEL_FLAGS[es.get(Ve.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==ple?"-"+n:""),script:Dq,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=dd,i.error_file=dd),i}a(Ele,"generateNatsLeafServerConfig");function gle(){es.initSync();let e=Io.join(es.get(Ve.CONFIG_PARAMS.LOGGING_ROOT),Ve.LOG_NAMES.HDB),t={name:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ve.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:fle,autorestart:!1};return es.get(Ve.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=dd,t.error_file=dd),t}a(gle,"generateClusteringUpgradeV4ServiceConfig");function Sle(){let e={[Ve.PROCESS_NAME_ENV_PROP]:Ve.PROCESS_DESCRIPTORS.RESTART_HDB};return Xg.noBootFile()&&(e[Ve.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=Xg.getEnvCliRootPath()),{...{name:Ve.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:Pq},script:_le}}a(Sle,"generateRestart");function Tle(){return{apps:[Lq()]}}a(Tle,"generateAllServiceConfigs");Mq.exports={generateAllServiceConfigs:Tle,generateMainServerConfig:Lq,generateRestart:Sle,generateNatsHubServerConfig:mle,generateNatsLeafServerConfig:Ele,generateClusteringUpgradeV4ServiceConfig:gle}});var yh=C((QPe,zq)=>{"use strict";var tt=(k(),P(q)),Ale=ie(),Po=Jg(),eS=ir(),Co=Lt(),va=vq(),tS=le(),Al=ee(),Rle=Oo(),{startWorker:Uq,onMessageFromWorkers:yle}=ot(),ble=td(),jPe=require("util"),Ole=require("child_process"),Nle=require("fs"),{execFile:wle}=Ole,Qe;zq.exports={enterPM2Mode:Ile,start:Ua,stop:ew,reload:Bq,restart:Hq,list:tw,describe:Gq,connect:Do,kill:Mle,startAllServices:vle,startService:rw,getUniqueServicesList:qq,restartAllServices:Ule,isServiceRegistered:$q,reloadStopStart:Vq,restartHdb:Fq,deleteProcess:Dle,startClusteringProcesses:Yq,startClusteringThreads:Wq,isHdbRestartRunning:Lle,isClusteringRunning:Ble,stopClustering:xle,reloadClustering:Hle,expectedRestartOfChildren:kq};var Rh=!1;yle(e=>{e.type==="restart"&&tS.initSync(!0)});function Ile(){Rh=!0}a(Ile,"enterPM2Mode");function Do(){return Qe||(Qe=require("pm2")),new Promise((e,t)=>{Qe.connect((r,n)=>{r&&t(r),e(n)})})}a(Do,"connect");var en,Cle=10,xq;function Ua(e,t=!1){if(Rh)return Ple(e);let r=wle(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let o=en.indexOf(r);o>-1&&en.splice(o,1),!xq&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Cle&&(Nle.existsSync(Po.getHubConfigPath())?Ua(e):(await Po.generateNatsConfig(!0),Ua(e),await new Promise(c=>setTimeout(c,3e3)),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let o=tS.get(tt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,f;for(;l=c.exec(i);){if(l.index&&Co.LOG_LEVEL_HIERARCHY[o]>=Co.LOG_LEVEL_HIERARCHY[f||"info"]){let h=f===Co.LOG_LEVELS.ERR||f===Co.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",h,n,i.slice(u,l.index).trim())}let[d,_]=l;u=l.index+d.length,f=Co.LOG_LEVELS[_]}if(Co.LOG_LEVEL_HIERARCHY[o]>=Co.LOG_LEVEL_HIERARCHY[f||"info"]){let d=f===Co.LOG_LEVELS.ERR||f===Co.LOG_LEVELS.WRN?Al.OUTPUTS.STDERR:Al.OUTPUTS.STDOUT;Al.logCustomLevel(f||"info",d,n,i.slice(u).trim())}}if(a(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!en&&(en=[],!t)){let i=a(()=>{xq=!0,en&&(en.map(o=>o.kill()),process.exit(0))},"kill_children");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}en.push(r)}a(Ua,"start");function Ple(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.start(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Ple,"startWithPM2");function ew(e){if(!Rh){for(let t of en||[])t.name===e&&(en.splice(en.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.stop(e,async(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.delete(e,(i,o)=>{i&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(o)})})})}a(ew,"stop");function Bq(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.reload(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Bq,"reload");function Hq(e){if(!Rh){kq();for(let t of en||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.restart(e,(n,s)=>{Qe.disconnect(),t(s)})})}a(Hq,"restart");function kq(){for(let e of en||[])e.config&&(e.config.restarts=0)}a(kq,"expectedRestartOfChildren");function Dle(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.delete(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Dle,"deleteProcess");async function Fq(){await Ua(va.generateRestart())}a(Fq,"restartHdb");async function Lle(){let e=await tw();for(let t in e)if(e[t].name===tt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}a(Lle,"isHdbRestartRunning");function tw(){return new Promise(async(e,t)=>{try{await Do()}catch(r){t(r)}Qe.list((r,n)=>{r&&(Qe.disconnect(),t(r)),Qe.disconnect(),e(n)})})}a(tw,"list");function Gq(e){return new Promise(async(t,r)=>{try{await Do()}catch(n){r(n)}Qe.describe(e,(n,s)=>{n&&(Qe.disconnect(),r(n)),Qe.disconnect(),t(s)})})}a(Gq,"describe");function Mle(){if(!Rh){for(let e of en||[])e.kill();en=[];return}return new Promise(async(e,t)=>{try{await Do()}catch(r){t(r)}Qe.killDaemon((r,n)=>{r&&(Qe.disconnect(),t(r)),Qe.disconnect(),e(n)})})}a(Mle,"kill");async function vle(){try{await Yq(),await Wq(),await Ua(va.generateAllServiceConfigs())}catch(e){throw Qe?.disconnect(),e}}a(vle,"startAllServices");async function rw(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case tt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=va.generateMainServerConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=va.generateNatsIngestServiceConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=va.generateNatsReplyServiceConfig();break;case tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=va.generateNatsHubServerConfig(),await Ua(r,t),await Po.removeNatsConfig(e);return;case tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=va.generateNatsLeafServerConfig(),await Ua(r,t),await Po.removeNatsConfig(e);return;case tt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=va.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await Ua(r)}catch(r){throw Qe?.disconnect(),r}}a(rw,"startService");async function qq(){try{let e=await tw(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Qe?.disconnect(),e}}a(qq,"getUniqueServicesList");async function Ule(e=[]){try{let t=!1,r=await qq();for(let n=0,s=Object.values(r).length;n<s;n++){let o=Object.values(r)[n].name;e.includes(o)||(o===tt.PROCESS_DESCRIPTORS.HDB?t=!0:await Hq(o))}t&&await Vq(tt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Qe?.disconnect(),t}}a(Ule,"restartAllServices");async function $q(e){if(en?.find(r=>r.name===e))return!0;let t=await ble.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}a($q,"isServiceRegistered");async function Vq(e){let t=tS.get(tt.CONFIG_PARAMS.THREADS_COUNT)??tS.get(tt.CONFIG_PARAMS.THREADS),r=await Gq(e),n=Ale.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await ew(e),await rw(e)):e===tt.PROCESS_DESCRIPTORS.HDB?await Fq():await Bq(e)}a(Vq,"reloadStopStart");var Kq;async function Yq(e=!1){for(let t in tt.CLUSTERING_PROCESSES){let r=tt.CLUSTERING_PROCESSES[t];await rw(r,e)}}a(Yq,"startClusteringProcesses");async function Wq(){Kq=Uq(tt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:tt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await eS.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await eS.updateLocalStreams();let e=await Rle.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===tt.PRE_4_0_0_VERSION){Al.info("Starting clustering upgrade 4.0.0 process"),Uq(tt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}a(Wq,"startClusteringThreads");async function xle(){for(let e in tt.CLUSTERING_PROCESSES)if(e!==tt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===tt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await Kq.terminate();else{let t=tt.CLUSTERING_PROCESSES[e];await ew(t)}}a(xle,"stopClustering");async function Ble(){for(let e in tt.CLUSTERING_PROCESSES){let t=tt.CLUSTERING_PROCESSES[e];if(await $q(t)===!1)return!1}return!0}a(Ble,"isClusteringRunning");async function Hle(){await Po.generateNatsConfig(!0),await eS.reloadNATSHub(),await eS.reloadNATSLeaf(),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Po.removeNatsConfig(tt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}a(Hle,"reloadClustering")});var iw={};Ue(iw,{compactOnStart:()=>kle,copyDb:()=>e$});async function kle(){xa.notify("Running compact on start"),console.log("Running compact on start");let e=(0,nw.get)(x.ROOTPATH),t=new Map,r=Xe();(0,sw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,rS.join)(e,"backup",n+".mdb"),o=(0,rS.join)(e,_c,n+"-copy.mdb"),c=0;try{c=await jq(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){xa.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{db_path:s,copy_dest:o,backup_dest:i,record_count:c}),await e$(n,o),console.log("Backing up",n,"to",i),await(0,Rl.move)(s,i,{overwrite:!0}),console.log("Moving copy compacted",n,"to",s),await(0,Rl.move)(o,s,{overwrite:!0}),await(0,Rl.remove)((0,rS.join)(e,_c,`${n}-copy.mdb-lock`))}try{_d()}catch(n){xa.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{_d()}catch(n){xa.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){xa.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,sw.updateConfigValue)(x.STORAGE_COMPACTONSTART,!1);for(let[s,{db_path:i,backup_dest:o}]of t){console.error("Moving backup database",o,"back to",i);try{await(0,Rl.move)(o,i,{overwrite:!0})}catch(c){console.error(c)}}throw _d(),n}for(let[n,{backup_dest:s,record_count:i}]of t){let o=await jq(n);if(console.log("Database",n,"after compact has a total record count of",o),i!==o){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
25
+ Total record count before compaction: ${i}, total after: ${o}.
26
+ Database backup has not been removed and can be found here: ${s}`;xa.error(c),console.error(c)}(0,nw.get)(x.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Rl.remove)(s))}}async function jq(e){let t=await(0,Zq.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function fd(){}async function e$(e,t){console.log(`Copying database ${e} to ${t}`);let r=Xe()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let d in r){let _=r[d];_.primaryStore.put=fd,_.primaryStore.remove=fd;for(let h in _.indices){let m=_.indices[h];m.put=fd,m.remove=fd}_.auditStore&&(_.auditStore.put=fd,_.auditStore.remove=fd),n=_.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,o=(0,Qq.open)(new Jq.default(t)),c=o.openDB(nS.INTERNAL_DBIS_NAME),l,u=0,f=s.useReadTransaction();try{for(let{key:_,value:h}of s.getRange({transaction:f})){let m=h.is_hash_attribute||h.isPrimaryKey,S,g;if(m&&(S=h.compression,g=sS(),g?h.compression=g:delete h.compression,S?.dictionary?.toString()===g?.dictionary?.toString()&&(S=null,g=null)),c.put(_,h),!(m||h.indexed))continue;let R=new Xq.default(!m,m);R.encoding="binary",R.compression=S;let E=n.openDB(_,R);E.decoder=null,E.decoderCopies=!1,E.encoding="binary",R.compression=g;let T=o.openDB(_,R);T.encoder=null,console.log("copying",_,"from",e,"to",t),await d(E,T,m,f)}if(i){let _=n.openDB(nS.AUDIT_STORE_NAME,bh);console.log("copying audit log for",e,"to",t),d(i,_,!1,f)}async function d(_,h,m,S){let g=0,R=0,E=0,T=1e7,N=null;for(;T-- >0;)try{for(let v of _.getKeys({start:N,transaction:S}))try{N=v;let{value:H,version:X}=_.getEntry(v,{transaction:S});if(H?.length<14&&m){E++;continue}l=h.put(v,H,m?X:void 0),g++,S.openTimer&&(S.openTimer=0),R+=(v?.length||10)+H.length,u++>5e3&&(await l,console.log("copied",g,"entries, skipped",E,"delete records,",R,"bytes"),u=0)}catch(H){console.error("Error copying record",typeof v=="symbol"?"symbol":v,"from",e,"to",t,H)}console.log("finish copying, copied",g,"entries, skipped",E,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}a(d,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{f.done(),o.close()}}var Qq,rS,Rl,nw,Jq,Xq,nS,Zq,sw,xa,ow=Re(()=>{xe();Qq=require("lmdb"),rS=require("path"),Rl=require("fs-extra"),nw=M(le()),Jq=M(Wf()),Xq=M(Yf()),nS=M(Gt());k();Ai();Zq=M(ho()),sw=M(Ct()),xa=M(ee());a(kle,"compactOnStart");a(jq,"getTotalDBRecordCount");a(fd,"noop");a(e$,"copyDb")});var md=C((iDe,a$)=>{"use strict";var Fle=require("minimist"),{isMainThread:cw,parentPort:Nh,threadId:rDe}=require("worker_threads"),at=(k(),P(q)),Hi=ee(),lw=ie(),oS=Jg(),iS=ir(),nDe=Lt(),s$=Ct(),ri=yh(),t$=td(),{compactOnStart:Gle}=(ow(),P(iw)),qle=hc(),{restartWorkers:aS,onMessageByType:$le}=ot(),{handleHDBError:Vle,hdb_errors:Kle}=he(),{HTTP_STATUS_CODES:Yle}=Kle,wh=le(),{sendOperationToNode:r$,getThisNodeName:Wle,monitorNodeCAs:zle}=(Es(),P(Oa)),{getHDBNodeTable:sDe}=(sl(),P(SO));wh.initSync();var Oh=`Restarting HarperDB. This may take up to ${at.RESTART_TIMEOUT_MS/1e3} seconds.`,jle="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",n$="Clustering is not enabled so cannot be restarted",Qle="Invalid service",hd,Rs;a$.exports={restart:i$,restartService:uw};cw&&$le(at.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await uw({service:e.workerType}):i$({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function i$(e){Rs=Object.keys(e).length===0,hd=await ri.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB);let t=Fle(process.argv);if(t.service){await uw(t);return}if(Rs&&!hd){console.error(jle);return}if(Rs&&console.log(Oh),hd){ri.enterPM2Mode(),Hi.notify(Oh);let r=qle(Object.keys(at.CONFIG_PARAM_MAP),!0);return lw.isEmptyOrZeroLength(Object.keys(r))||s$.updateConfigValue(void 0,void 0,r,!0,!0),Jle(),Oh}return cw?(Hi.notify(Oh),wh.get(at.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Gle(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{aS()},50)):Nh.postMessage({type:at.ITC_EVENT_TYPES.RESTART}),Oh}a(i$,"restart");async function uw(e){let{service:t}=e;if(at.HDB_PROCESS_SERVICES[t]===void 0)throw Vle(new Error,Qle,Yle.BAD_REQUEST,void 0,void 0,!0);if(ri.expectedRestartOfChildren(),hd=await ri.isServiceRegistered(at.PROCESS_DESCRIPTORS.HDB),!cw){e.replicated&&zle(),Nh.postMessage({type:at.ITC_EVENT_TYPES.RESTART,workerType:t}),Nh.ref(),await new Promise(s=>{Nh.on("message",i=>{i.type==="restart-complete"&&(s(),Nh.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===Wle())continue;let i;try{({job_id:i}=await r$(s,e))}catch(o){n.push({node:s.name,message:o.message});continue}n.push(await new Promise((o,c)=>{let u=2400,f=setInterval(async()=>{if(u--<=0){clearInterval(f);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let _=(await r$(s,{operation:"get_job",id:i})).results[0];if(_.status==="COMPLETE"&&(clearInterval(f),o({node:s.name,message:_.message})),_.status==="ERROR"){clearInterval(f);let h=new Error(_.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case at.HDB_PROCESS_SERVICES.clustering:if(!wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=n$;break}Rs&&console.log("Restarting clustering"),Hi.notify("Restarting clustering"),await o$();break;case at.HDB_PROCESS_SERVICES.clustering_config:case at.HDB_PROCESS_SERVICES["clustering config"]:if(!wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=n$;break}Rs&&console.log("Restarting clustering_config"),Hi.notify("Restarting clustering_config"),await ri.reloadClustering();break;case"custom_functions":case"custom functions":case at.HDB_PROCESS_SERVICES.harperdb:case at.HDB_PROCESS_SERVICES.http_workers:case at.HDB_PROCESS_SERVICES.http:if(Rs&&!hd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Rs&&console.log("Restarting http_workers"),Hi.notify("Restarting http_workers"),Rs?await ri.restart(at.PROCESS_DESCRIPTORS.HDB):await aS("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(Hi.error(r),Rs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}a(uw,"restartService");async function Jle(){await o$(),await ri.restart(at.PROCESS_DESCRIPTORS.HDB),await lw.async_set_timeout(2e3),wh.get(at.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await aw(),Rs&&(await iS.closeConnection(),process.exit(0))}a(Jle,"restartPM2Mode");async function o$(){if(!s$.getConfigFromFile(at.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await t$.getHDBProcessInfo()).clustering.length===0)Hi.trace("Clustering not running, restart will start clustering services"),await oS.generateNatsConfig(!0),await ri.startClusteringProcesses(),await ri.startClusteringThreads(),await aw(),Rs&&await iS.closeConnection();else{await oS.generateNatsConfig(!0),hd?(Hi.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await ri.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ri.restart(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await t$.getHDBProcessInfo()).clustering.forEach(s=>{Hi.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await lw.async_set_timeout(3e3),await aw(),await iS.updateLocalStreams(),Rs&&await iS.closeConnection(),Hi.trace("Restart clustering restarting ingest and reply service threads");let t=aS(at.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=aS(at.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}a(o$,"restartClustering");async function aw(){await oS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await oS.removeNatsConfig(at.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}a(aw,"removeNatsConfig")});var g$=C((cDe,E$)=>{"use strict";var aDe=require("lodash"),wn=(k(),P(q)),{handleHDBError:c$,hdb_errors:Xle}=he(),{HDB_ERROR_MSGS:Zle,HTTP_STATUS_CODES:eue}=Xle,dw=ee();E$.exports={getRolePermissions:rue};var yl=Object.create(null),tue=a(e=>({key:e,perms:{}}),"perms_template_obj"),f$=a((e=!1)=>({describe:e,tables:{}}),"schema_perms_template"),_$=a((e=!1,t=!1,r=!1,n=!1)=>({[wn.PERMS_CRUD_ENUM.READ]:e,[wn.PERMS_CRUD_ENUM.INSERT]:t,[wn.PERMS_CRUD_ENUM.UPDATE]:r,[wn.PERMS_CRUD_ENUM.DELETE]:n}),"permissions_template"),fw=a((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,..._$(t,r,n,s)}),"table_perms_template"),l$=a((e,t=_$())=>({attribute_name:e,describe:p$(t),[Ih]:t[Ih],[_w]:t[_w],[hw]:t[hw]}),"attr_perms_template"),u$=a((e,t=!1)=>({attribute_name:e,describe:t,[Ih]:t}),"timestamp_attr_perms_template"),{READ:Ih,INSERT:_w,UPDATE:hw}=wn.PERMS_CRUD_ENUM,h$=Object.values(wn.PERMS_CRUD_ENUM),m$=[Ih,_w,hw];function rue(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[wn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(yl[t]&&yl[t].key===n)return yl[t].perms;let s=nue(e,r);return yl[t]?yl[t].key=n:yl[t]=tue(n),yl[t].perms=s,s}catch(r){if(!e[wn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[wn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<wn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${t}' must be updated to align with new structure from the 2.2.0 release.`;throw dw.error(n),dw.debug(r),c$(new Error,Zle.OUTDATED_PERMS_TRANSLATION_ERROR,eue.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
27
27
  ${r.stack}`;throw dw.error(n),c$(new Error)}}}a(rue,"getRolePermissions");function nue(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[wn.SYSTEM_SCHEMA_NAME]=n[wn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=sue(t[i]);return}r[i]=f$(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(o=>{if(n[i].tables[o]){let c=n[i].tables[o],l=t[i][o],u=iue(c,l);r[i].describe||h$.forEach(f=>{u[f]&&(r[i].describe=!0)}),r[i].tables[o]=u}else r[i].tables[o]=fw()})):Object.keys(t[i]).forEach(o=>{r[i].tables[o]=fw()})}),r}a(nue,"translateRolePermissions");function sue(e){let t=f$(!0);return Object.keys(e).forEach(r=>{t.tables[r]=fw(!0,!0,!0,!0,!0)}),t}a(sue,"createStructureUserPermissions");function iue(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,f)=>{let{attribute_name:d}=f,_=f;return wn.TIME_STAMP_NAMES.includes(d)&&(_=u$(d,f[Ih])),u[d]=_,u},{}),o=t.primaryKey||t.hash_attribute,c=!!i[o],l=l$(o);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let f=i[u];f.describe=p$(f),s.attribute_permissions.push(f),c||oue(f,l)}else if(u!==o){let f;wn.TIME_STAMP_NAMES.includes(u)?f=u$(u):f=l$(u),s.attribute_permissions.push(f)}}),c||s.attribute_permissions.push(l),s.describe=d$(s),s}else return e.describe=d$(e),e}a(iue,"getTableAttrPerms");function d$(e){return h$.filter(t=>e[t]).length>0}a(d$,"getSchemaTableDescribePerm");function p$(e){return m$.filter(t=>e[t]).length>0}a(p$,"getAttributeDescribePerm");function oue(e,t){m$.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}a(oue,"checkForHashPerms")});var Ch={};Ue(Ch,{authentication:()=>O$,bypassAuth:()=>hue,login:()=>pue,logout:()=>Eue,start:()=>mue});function hue(){b$=!0}async function O$(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,o=[];try{if(i){let h=e.isOperationsServer?uue?lue:[]:cue?aue:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let m=tn.get(x.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",S=new So([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",m],["Access-Control-Allow-Origin",i]]);return cS&&S.set("Access-Control-Allow-Credentials","true"),{status:200,headers:S}}o.push("Access-Control-Allow-Origin",i),cS&&o.push("Access-Control-Allow-Credentials","true")}}let l,u;if(cS){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",m=s?.split(/;\s+/)||[];for(let S of m)if(S.startsWith(h)){let g=S.indexOf(";");l=S.slice(h.length,g===-1?S.length:g),u=await S$.get(l);break}e.session=u||(u={})}let f=a((h,m,S)=>{let g=new pd.AuthAuditLog(h,m,Jo.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=S,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),m===Bs.SUCCESS?mw.notify(g):mw.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&mw.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Ye.getUser(h,null,e),f(h,Bs.SUCCESS,"mTLS")):(0,pd.debug)("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let d;if(!e.user)if(n){if(d=bl.get(n),!d){let h=n.indexOf(" "),m=n.slice(0,h),S=n.slice(h+1),g,R;try{switch(m){case"Basic":let E=atob(S),T=E.indexOf(":");g=E.slice(0,T),R=E.slice(T+1),d=g||R?await Ye.getUser(g,R,e):null;break;case"Bearer":try{d=await Qb(S)}catch(N){if(N.message==="invalid token")try{return await GE(S),c({status:-1})}catch{throw N}}break}}catch(E){return fue&&(bl.get(S)||(bl.set(S,S),f(g,Bs.FAILURE,m))),c({status:401,body:la({error:E.message},e)})}bl.set(n,d),due&&f(d.username,Bs.SUCCESS,m)}e.user=d}else u?.user?e.user=await Ye.getUser(u.user,null,e):(b$&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,A$.getSuperUser)());cS&&(e.session.update=function(h){let m=tn.get(x.AUTHENTICATION_COOKIE_EXPIRES);if(!l){l=(0,R$.v4)();let S=tn.get(x.AUTHENTICATION_COOKIE_DOMAINS),g=m?new Date(Date.now()+(0,pw.convertToMS)(m)).toUTCString():_ue,R=S?.find(N=>r.host?.endsWith(N)),T=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${g}; ${R?"Domain="+R+"; ":""}HttpOnly${e.protocol==="https"?"; SameSite=None; Secure":""}`;o?o.push("Set-Cookie",T):_?.headers?.set&&_.headers.set("Set-Cookie",T)}return e.protocol==="https"&&(o?(i&&o.push("Access-Control-Expose-Headers","X-Hdb-Session"),o.push("X-Hdb-Session","Secure")):_?.headers?.set&&(i&&_.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),_.headers.set("X-Hdb-Session","Secure"))),h.id=l,S$.put(h,{expiresAt:m?Date.now()+(0,pw.convertToMS)(m):void 0})},e.login=async function(h,m){let S=e.user=await Ye.authenticateUser(h,m,e);e.session.update({user:S&&(S.getId?.()??S.username)})});let _=await t(e);return _&&(_.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&_s.loginPath?(_.status=302,_.headers.set("Location",_s.loginPath(e))):_.headers.set("WWW-Authenticate","Basic")),c(_))}catch(l){throw c(l)}function c(l){let u=o.length;if(u>0){let f=l.headers;f||(l.headers=f=new So);for(let d=0;d<u;){let _=o[d++];f.set(_,o[d++])}}return o=null,l}a(c,"applyResponseHeaders")}function mue({server:e,port:t,securePort:r}){e.http(O$,t||r?{port:t,securePort:r}:{port:"all"}),T$||(T$=!0,setInterval(()=>{bl=new Map},tn.get(x.AUTHENTICATION_CACHETTL)).unref(),y$.user.addListener(()=>{bl=new Map}))}async function pue(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function Eue(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var A$,R$,tn,pd,y$,pw,mw,aue,cue,lue,uue,S$,cS,b$,due,fue,_ue,bl,T$,lS=Re(()=>{A$=M(An());Mr();su();vu();xe();R$=require("uuid"),tn=M(le());k();pd=M(ee()),y$=M(u_());v_();pw=M(ie());so();mw=(0,pd.loggerWithTag)("auth-event");tn.initSync();aue=tn.get(x.HTTP_CORSACCESSLIST),cue=tn.get(x.HTTP_CORS),lue=tn.get(x.OPERATIONSAPI_NETWORK_CORSACCESSLIST),uue=tn.get(x.OPERATIONSAPI_NETWORK_CORS),S$=_t({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),cS=tn.get(x.AUTHENTICATION_ENABLESESSIONS)??!0,b$=process.env.AUTHENTICATION_AUTHORIZELOCAL??tn.get(x.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,due=tn.get(x.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,fue=tn.get(x.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,_ue="Tue, 01 Oct 8307 19:33:20 GMT",bl=new Map;Ye.onInvalidatedUser(()=>{bl=new Map});a(hue,"bypassAuth");a(O$,"authentication");a(mue,"start");a(pue,"login");a(Eue,"logout")});var L$=C((gDe,D$)=>{"use strict";var Ne=require("joi"),N$=require("fs-extra"),w$=require("path"),ts=it(),I$=le(),C$=(k(),P(q)),P$=ee(),{hdb_errors:gue}=he(),{HDB_ERROR_MSGS:rn}=gue,Lo=/^[a-zA-Z0-9-_]+$/,Sue=/^[a-zA-Z0-9-_]+$/;D$.exports={getDropCustomFunctionValidator:Aue,setCustomFunctionValidator:Rue,addComponentValidator:Nue,dropCustomFunctionProjectValidator:wue,packageComponentValidator:Iue,deployComponentValidator:Cue,setComponentFileValidator:yue,getComponentFileValidator:Oue,dropComponentFileValidator:bue,addSSHKeyValidator:Pue,updateSSHKeyValidator:Due,deleteSSHKeyValidator:Lue,setSSHKnownHostsValidator:Mue};function uS(e,t,r){try{let n=I$.get(C$.CONFIG_PARAMS.COMPONENTSROOT),s=w$.join(n,t);return N$.existsSync(s)?e?t:r.message(rn.PROJECT_EXISTS):e?r.message(rn.NO_PROJECT):t}catch(n){return P$.error(n),r.message(rn.VALIDATION_ERR)}}a(uS,"checkProjectExists");function Ph(e,t){return e.includes("..")?t.message("Invalid file path"):e}a(Ph,"checkFilePath");function Tue(e,t,r,n){try{let s=I$.get(C$.CONFIG_PARAMS.COMPONENTSROOT),i=w$.join(s,e,t,r+".js");return N$.existsSync(i)?r:n.message(rn.NO_FILE)}catch(s){return P$.error(s),n.message(rn.VALIDATION_ERR)}}a(Tue,"checkFileExists");function Aue(e){let t=Ne.object({project:Ne.string().pattern(Lo).custom(uS.bind(null,!0)).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),type:Ne.string().valid("helpers","routes").required(),file:Ne.string().pattern(Lo).custom(Tue.bind(null,e.project,e.type)).custom(Ph).required().messages({"string.pattern.base":rn.BAD_FILE_NAME})});return ts.validateBySchema(e,t)}a(Aue,"getDropCustomFunctionValidator");function Rue(e){let t=Ne.object({project:Ne.string().pattern(Lo).custom(uS.bind(null,!0)).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),type:Ne.string().valid("helpers","routes").required(),file:Ne.string().custom(Ph).required(),function_content:Ne.string().required()});return ts.validateBySchema(e,t)}a(Rue,"setCustomFunctionValidator");function yue(e){let t=Ne.object({project:Ne.string().pattern(Lo).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),file:Ne.string().custom(Ph).required(),payload:Ne.string().allow("").optional(),encoding:Ne.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ts.validateBySchema(e,t)}a(yue,"setComponentFileValidator");function bue(e){let t=Ne.object({project:Ne.string().pattern(Lo).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),file:Ne.string().custom(Ph).optional()});return ts.validateBySchema(e,t)}a(bue,"dropComponentFileValidator");function Oue(e){let t=Ne.object({project:Ne.string().required(),file:Ne.string().custom(Ph).required(),encoding:Ne.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ts.validateBySchema(e,t)}a(Oue,"getComponentFileValidator");function Nue(e){let t=Ne.object({project:Ne.string().pattern(Lo).custom(uS.bind(null,!1)).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME})});return ts.validateBySchema(e,t)}a(Nue,"addComponentValidator");function wue(e){let t=Ne.object({project:Ne.string().pattern(Lo).custom(uS.bind(null,!0)).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME})});return ts.validateBySchema(e,t)}a(wue,"dropCustomFunctionProjectValidator");function Iue(e){let t=Ne.object({project:Ne.string().pattern(Lo).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),skip_node_modules:Ne.boolean(),skip_symlinks:Ne.boolean()});return ts.validateBySchema(e,t)}a(Iue,"packageComponentValidator");function Cue(e){let t=Ne.object({project:Ne.string().pattern(Lo).required().messages({"string.pattern.base":rn.BAD_PROJECT_NAME}),package:Ne.string().optional(),restart:Ne.alternatives().try(Ne.boolean(),Ne.string().valid("rolling")).optional()});return ts.validateBySchema(e,t)}a(Cue,"deployComponentValidator");function Pue(e){let t=Ne.object({name:Ne.string().pattern(Sue).required().messages({"string.pattern.base":rn.BAD_SSH_KEY_NAME}),key:Ne.string().required(),host:Ne.string().required(),hostname:Ne.string().required(),known_hosts:Ne.string().optional()});return ts.validateBySchema(e,t)}a(Pue,"addSSHKeyValidator");function Due(e){let t=Ne.object({name:Ne.string().required(),key:Ne.string().required()});return ts.validateBySchema(e,t)}a(Due,"updateSSHKeyValidator");function Lue(e){let t=Ne.object({name:Ne.string().required()});return ts.validateBySchema(e,t)}a(Lue,"deleteSSHKeyValidator");function Mue(e){let t=Ne.object({known_hosts:Ne.string().required()});return ts.validateBySchema(e,t)}a(Mue,"setSSHKnownHostsValidator")});var Mh=C((TDe,B$)=>{"use strict";var dS=require("joi"),Ba=require("path"),Ed=require("fs-extra"),{exec:vue,spawn:Uue}=require("child_process"),xue=require("util"),Bue=xue.promisify(vue),gd=(k(),P(q)),{PACKAGE_ROOT:Hue}=Et(),{handleHDBError:Dh,hdb_errors:kue}=he(),{HTTP_STATUS_CODES:Lh}=kue,Ol=le(),Fue=it(),Ha=ee(),{once:Gue}=require("events");Ol.initSync();var Ew=Ol.get(gd.CONFIG_PARAMS.COMPONENTSROOT),M$="npm install --force --omit=dev --json",que=`${M$} --dry-run`,$ue=Ol.get(gd.CONFIG_PARAMS.ROOTPATH),fS=Ba.join($ue,"ssh");B$.exports={installModules:Wue,auditModules:zue,installAllRootModules:Vue,uninstallRootModule:Kue,linkHarperdb:Yue,runCommand:Sd};async function Vue(e=!1,t=Ol.get(gd.CONFIG_PARAMS.ROOTPATH)){await _S();let r=!1,n=process.env;Ed.pathExistsSync(fS)&&Ed.readdirSync(fS).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+Ba.join(fS,"config")+" -o UserKnownHostsFile="+Ba.join(fS,"known_hosts"),...process.env},r=!0)});try{let s=Ol.get(gd.CONFIG_PARAMS.ROOTPATH),i=Ba.join(s,"node_modules","harperdb");Ed.lstatSync(i).isSymbolicLink()&&Ed.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&Ha.error("Error removing symlink:",s)}await Sd(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}a(Vue,"installAllRootModules");async function Kue(e){await Sd(`npm uninstall ${e}`,Ol.get(gd.CONFIG_PARAMS.ROOTPATH))}a(Kue,"uninstallRootModule");async function Yue(){await _S(),await Sd(`npm link ${Hue}`,Ol.get(gd.CONFIG_PARAMS.ROOTPATH))}a(Yue,"linkHarperdb");async function Sd(e,t=void 0,r=process.env){Ha.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=Uue(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();Ha.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();Ha.error({tagName:"npm_run_command:stderr"},l),i+=l});let[o]=await Gue(n,"close");if(o!==0)throw new Error(`Command \`${e}\` exited with code ${o}.${i===""?"":` Error: ${i}`}`);return s||void 0}a(Sd,"runCommand");async function Wue(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";Ha.warn(t,e.projects);let r=x$(e);if(r)throw Dh(r,r.message,Lh.BAD_REQUEST);let{projects:n,dry_run:s}=e,i=s===!0?que:M$;await _S(),await U$(n);let o={};for(let c=0,l=n.length;c<l;c++){let u=n[c];o[u]={npm_output:null,npm_error:null};let f=Ba.join(Ew,u),d,_=null;try{let{stdout:h,stderr:m}=await Bue(i,{cwd:f});d=h?h.replace(`
28
28
  `,""):null,_=m?m.replace(`
29
29
  `,""):null}catch(h){h.stderr?o[u].npm_error=v$(h.stderr):o[u].npm_error=h.message;continue}try{o[u].npm_output=JSON.parse(d)}catch{o[u].npm_output=d}try{o[u].npm_error=JSON.parse(_)}catch{o[u].npm_error=_}}return Ha.info(`finished installModules with response ${o}`),o.warning=t,o}a(Wue,"installModules");function v$(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}