harperdb 4.6.16 → 4.6.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,9 +15,9 @@
15
15
  `,""));return r.replace(`
16
16
  `,"")}o(lq,"runCommand");async function Dae(){try{await cae.access(lw)}catch{return!1}let e=await lq(`${lw} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return lae.eq(t,Pae)}o(Dae,"checkNATSServerInstalled");async function hw(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let a=await aq.getClusterUser();if(Pl(a))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=a.username,r=a.decrypt_hash}hi.trace("create nats connection called");let i=await Rae({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),hi.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(a=>{a&&hi.error("Error with Nats client connection, connection closed",a),i===fn&&uq()}),i}o(hw,"createConnection");function uq(){fn=void 0,wl=void 0,Ol=void 0,Cl=void 0}o(uq,"clearClientCache");async function Mae(){fn&&(await fn.drain(),fn=void 0,wl=void 0,Ol=void 0,Cl=void 0)}o(Mae,"closeConnection");var fn,Cl;async function Uh(){return Cl||(Cl=hw(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),fn=await Cl),fn||Cl}o(Uh,"getConnection");async function xh(){if(wl)return wl;Pl(fn)&&await Uh();let{domain:e}=Rd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Pl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return wl=await fn.jetstreamManager({domain:e,timeout:6e4}),wl}o(xh,"getJetStreamManager");async function dq(){if(Ol)return Ol;Pl(fn)&&await Uh();let{domain:e}=Rd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Pl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Ol=fn.jetstream({domain:e,timeout:6e4}),Ol}o(dq,"getJetStream");async function Ji(){let e=fn||await Uh(),t=wl||await xh(),r=Ol||await dq();return{connection:e,jsm:t,js:r}}o(Ji,"getNATSReferences");async function vae(e){let t=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await aq.getClusterUser(),s=await hw(t,r,n),i=mw(),a=s.subscribe(i),c=[],l,u=(async()=>{for await(let d of a){let f=cq.decode(d.data);f.response_time=Date.now()-l,c.push(f)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await uS.asyncSetTimeout(e),await a.drain(),await s.close(),await u,c}o(vae,"getServerList");async function pw(e,t){let{jsm:r}=await Ji(),n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:yae.File,retention:Aae.Limits,subjects:t,discard:bae.Old,maxMsgs:s,maxBytes:i,maxAge:n})}o(pw,"createLocalStream");async function fq(){let{jsm:e}=await Ji(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}o(fq,"listStreams");async function Uae(e){let{jsm:t}=await Ji();await t.streams.delete(e)}o(Uae,"deleteLocalStream");async function xae(e){let{connection:t}=await Ji(),r=[],n=mw(),s=t.subscribe(n),i=(async()=>{for await(let a of s)r.push(cq.decode(a.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}o(xae,"listRemoteStreams");async function Bae(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Ji(),i=nq(),a={durable_name:i,ack_policy:dw.Explicit};t&&(a.deliver_policy=fw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let d of l){let f=uw(d.data),m={nats_timestamp:d.info.timestampNanos,nats_sequence:d.info.streamSequence,entry:f};if(d.headers&&(m.origin=d.headers.get(Qr.MSG_HEADERS.ORIGIN)),u.push(m),d.ack(),d.info.pending===0)break}return await c.delete(),u}o(Bae,"viewStream");async function*Fae(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Ji(),i=nq(),a={durable_name:i,ack_policy:dw.Explicit};t&&(a.deliver_policy=fw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let d=uw(u.data);d[0]||(d=[d]);for(let f of d){let m={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:f};u.headers&&(m.origin=u.headers.get(Qr.MSG_HEADERS.ORIGIN)),yield m}if(u.ack(),u.info.pending===0)break}await c.delete()}o(Fae,"viewStreamIterator");async function Hae(e,t,r,n){hi.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=mq(n,r);let{js:s}=await Ji(),i=await fS(),a=`${e}.${i}`,c=await Oae(()=>n instanceof Uint8Array?n:oq.encode(n));try{hi.trace(`publishToStream publishing to subject: ${a}`),wae(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(a,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return pq(async()=>{try{await s.publish(a,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){hi.trace(`publishToStream creating stream: ${t}`);let d=a.split(".");d[2]="*",await pw(t,[a]),await s.publish(a,c,{headers:r})}else throw l}});throw l}}o(Hae,"publishToStream");function mq(e,t){t===void 0&&(t=Nae());let r=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Qr.MSG_HEADERS.ORIGIN)&&r&&t.append(Qr.MSG_HEADERS.ORIGIN,r),t}o(mq,"addNatsMsgHeader");function Rd(e){e=e.toLowerCase();let t=vh.join(kr.get(Qe.CONFIG_PARAMS.ROOTPATH),Cae);if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return Pl(cw)&&(cw={port:Dh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:Dh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.HUB,config_file:Qr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:vh.join(t,Qr.PID_FILES.HUB),hdbNatsPath:t}),cw;if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return Pl(aw)&&(aw={port:Dh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:Dh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,config_file:Qr.NATS_CONFIG_FILES.LEAF_SERVER,domain:Dh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,pid_file_path:vh.join(t,Qr.PID_FILES.LEAF),hdbNatsPath:t}),aw;hi.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}o(Rd,"getServerConfig");async function hq(e,t,r,n){try{await e.consumers.add(t,{ack_policy:dw.Explicit,durable_name:r,deliver_policy:fw.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}o(hq,"createConsumer");async function kae(e,t,r){await e.consumers.delete(t,r)}o(kae,"removeConsumer");function Gae(e){return e.split(".")[1]}o(Gae,"extractServerName");async function qae(e,t,r=6e4,n=mw()){if(!uS.isObject(t))throw new Error("data param must be an object");let s=oq.encode(t),{connection:i}=await Ji(),a={timeout:r};n&&(a.reply=n,a.noMux=!0);let c=await i.request(e,s,a);return uw(c.data)}o(qae,"request");function Ew(e){return new Promise(async(t,r)=>{let n=mae(lw,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",a=>{r(a)}),n.stdout.on("data",a=>{i+=a.toString()}),n.stderr.on("data",a=>{s+=a.toString()}),n.stderr.on("close",a=>{s&&r(s),t(i)})})}o(Ew,"reloadNATS");async function $ae(){let{pid_file_path:e}=Rd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await Ew(e)}o($ae,"reloadNATSHub");async function Vae(){let{pid_file_path:e}=Rd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await Ew(e)}o(Vae,"reloadNATSLeaf");function Kae(e,t,r){let n;switch(e.code){case rq.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case rq.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}o(Kae,"requestErrorHandler");async function Yae(e,t){let r=t+Qr.SERVER_SUFFIX.LEAF,{connection:n}=await Ji(),{jsm:s}=await ece(r),{schema:i,table:a}=e,c=dS.createNatsTableStreamName(i,a),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await pq(async()=>{if(e.subscribe===!0)await hq(s,c,n.info.server_name,l);else try{await kae(s,c,n.info.server_name)}catch(u){hi.trace(u)}})}o(Yae,"updateRemoteConsumer");async function Wae(e,t,r,n){let s=dS.createNatsTableStreamName(e,t),i=r+Qr.SERVER_SUFFIX.LEAF,a={type:Qe.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!iq&&Sae()<kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=ow();await c(a)}await _ae(a),n==="stop"&&await uS.asyncSetTimeout(1e3)}o(Wae,"updateConsumerIterator");function pq(e){return Eae.writeTransaction(Qe.SYSTEM_SCHEMA_NAME,Qe.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}o(pq,"exclusiveLock");async function Eq(e,t){let r=dS.createNatsTableStreamName(e,t),n=await fS(),s=Jae(e,t,n);await pw(r,[s])}o(Eq,"createLocalTableStream");async function zae(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await Eq(n,s)}}o(zae,"createTableStreams");async function _q(e,t,r=void 0){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=dS.createNatsTableStreamName(e,t),{domain:s}=Rd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await Uh()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")hi.warn(n);else throw n}}o(_q,"purgeTableStream");async function jae(e,t){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await _q(e,t[r])}o(jae,"purgeSchemaTableStreams");async function Qae(e){return(await xh()).streams.info(e)}o(Qae,"getStreamInfo");function Jae(e,t,r){return`${Qr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}o(Jae,"createSubjectName");async function fS(){if(Mh)return Mh;if(Mh=(await xh())?.nc?.info?.server_name,Mh===void 0)throw new Error("Unable to get jetstream manager server name");return Mh}o(fS,"getJsmServerName");async function Xae(){let e=await xh(),t=await fS(),r=await fq();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let a=Zae(n),c=i.split(".");if(c[c.length-1]===t&&!a||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let d=u.join(".");hi.trace(`Updating stream subject name from: ${i} to: ${d}`),s.subjects[0]=d,await e.streams.update(s.name,s)}}o(Xae,"updateLocalStreams");function Zae(e){let{config:t}=e,r=!1,n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}o(Zae,"updateStreamLimits");async function ece(e){let t,r;try{t=await fn.jetstream({domain:e}),r=await fn.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw hi.error("Unable to connect to:",e),n}return{js:t,jsm:r}}o(ece,"connectToRemoteJS")});function _w(e){let t=e.get(mS),r=t?(0,yd.unpack)(t):null;r||(r={remoteNameToId:{}});let n=Ze(),s=!1;r.nodeName=Ze();let i=r.remoteNameToId;if(i[n]!==0){let a=0,c;for(let l in i){let u=i[l];u===0?c=l:u>a&&(a=u)}if(c){a++,i[c]=a;let l=[Symbol.for("seq"),a];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:Fh(e)??1,nodes:[]})})}i[n]=0,e.putSync(mS,(0,yd.pack)(r))}return r}function Bh(e){return _w(e).remoteNameToId}function Tq(e,t){let r=_w(t),n=r.remoteNameToId,s=new Map,i=!1;for(let a in e){let c=e[a],l=n[a];if(l==null){let u=0;for(let d in n){let f=n[d];f>u&&(u=f)}l=u+1,n[a]=l,i=!0}s.set(c,l)}return i&&t.putSync(mS,(0,yd.pack)(r)),s}function hS(e,t){let r=_w(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let a in n){let c=n[a];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(mS,(0,yd.pack)(r))}return Sq.trace?.("The remote node name map",e,n,s),s}var Sq,yd,mS,gw=ue(()=>{Sq=w(ti());ss();yd=require("msgpackr"),mS=Symbol.for("remote-ids");o(_w,"getIdMappingRecord");o(Bh,"exportIdMapping");o(Tq,"remoteToLocalNodeId");o(hS,"getIdOfRemoteNode")});var Sw={};Oe(Sw,{commitsAwaitingReplication:()=>Ad,getHDBNodeTable:()=>Kt,getReplicationSharedStatus:()=>bd,iterateRoutes:()=>kh,shouldReplicateToNode:()=>Hh,subscribeToNodeUpdates:()=>Id});function Kt(){return Rq||(Rq=je({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function bd(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function Id(e){Kt().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;Iq.debug?.("adding node",n,"on node",Ze()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==Ze()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of Kt().search({}))if(i.shard!=null){let a=s.get(i.shard);a||s.set(i.shard,a=[]),a.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function Hh(e,t){let r=Ka.default.get(U.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===Ka.default.get(U.REPLICATION_SHARD))))&&Kt().primaryStore.get(Ze())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function tce(){Id(e=>{Ya({},(t,r)=>{let n=e.name,s=yq.get(n);if(s||yq.set(n,s=new Map),s.has(r))return;let i;for(let a in t)if(i=t[a].auditStore,i)break;if(i){let a=bd(i,r,n,()=>{let c=a[0],l=a.lastTime;for(let{txnTime:u,onConfirm:d}of Ad.get(r)||[])u>l&&u<=c&&d();a.lastTime=c});a.lastTime=0,s.set(r,a)}})})}function*kh(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=Ka.default.get(U.REPLICATION_SECUREPORT)??(!Ka.default.get(U.REPLICATION_PORT)&&Ka.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||Ka.default.get(U.REPLICATION_PORT)||Ka.default.get(U.OPERATIONSAPI_NETWORK_PORT);let a=i?.lastIndexOf?.(":");a>0&&(i=+i.slice(a+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){Aq.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,startTime:t.startTime,revoked_certificates:t.revokedCertificates}}}var Aq,bq,Ka,Iq,Rq,yq,Ad,Ll=ue(()=>{De();ss();am();Aq=require("worker_threads"),bq=w(ge()),Ka=w(oe());k();Iq=w(ti());server.nodes=[];o(Kt,"getHDBNodeTable");o(bd,"getReplicationSharedStatus");o(Id,"subscribeToNodeUpdates");o(Hh,"shouldReplicateToNode");yq=new Map;Rv((e,t,r)=>{if(r>server.nodes.length)throw new bq.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);Ad||(Ad=new Map,tce());let n=Ad.get(e);return n||(n=[],Ad.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:o(()=>{++i===r&&s()},"onConfirm")})})});o(tce,"startSubscriptionToReplications");o(kh,"iterateRoutes")});var Oq={};Oe(Oq,{connectedToNode:()=>Dl,disconnectedFromNode:()=>wd,ensureNode:()=>Bo,requestClusterStatus:()=>wq,startOnMainThread:()=>yw});async function yw(e){let t=0,r=st();for(let i of Object.getOwnPropertyNames(r)){let a=r[i];for(let c in a){let l=a[c];if(l.auditStore){pS.set(i,Fh(l.auditStore));break}}}Zi.whenThreadsStarted.then(async()=>{let i=[];for await(let l of r.system.hdb_nodes?.search([])||[])i.push(l);let a=Ze();function c(){let l=Kt().primaryStore.get(a);if(l!==null){let u=e.url??Wa();if(l===void 0||l.url!==u||l.shard!==e.shard)return Bo(a,{name:a,url:u,shard:e.shard,replicates:!0})}}o(c,"ensureThisNode"),Kt().primaryStore.get(a)&&c();for(let l of kh(e))try{let u=!l.subscriptions;if(u&&await c(),u&&l.replicates==null&&(l.replicates=!0),i.find(d=>d.url===l.url))continue;s(l)}catch(u){console.error(u)}Id(s)});let n;function s(i,a=i?.name){let c=Ze()&&a===Ze()||Wa()&&i?.url===Wa();if(c){let f=!!i?.replicates;if(n!==void 0&&n!==f)for(let m of Kt().search([]))m.replicates&&m.name!==a&&s(m,m.name);n=f}if(at.trace("Setting up node replication for",i),!i){for(let[f,m]of Xi){let h;for(let[p,{worker:_,nodes:g}]of m){let y=g[0];if(y&&y.name==a){h=!0;for(let[T,{worker:R}]of m)m.delete(T),at.warn("Node was deleted, unsubscribing from node",a,T,f),R?.postMessage({type:"unsubscribe-from-node",node:a,database:T,url:f});break}}if(h){Xi.get(f).iterator.remove(),Xi.delete(f);return}}return}if(c)return;if(!i.url){at.info(`Node ${i.name} is missing url`);return}let l=Xi.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(at.info(`Added node ${i.name} at ${i.url} for process ${Ze()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[f,m]of Nd)if(i.url===m.url){Nd.delete(f);break}Nd.set(i.name,i)}let u=st();if(l||(l=new Map,Xi.set(i.url,l)),l.iterator=Ya(e,(f,m,h)=>{h?d(m,!0):d(m,!1)}),i.subscriptions)for(let f of i.subscriptions){let m=f.database||f.schema;u[m]||(at.warn(`Database ${m} not found for node ${i.name}, making a subscription anyway`),d(m,!1))}function d(f,m){at.trace("Setting up replication for database",f,"on node",i.name);let h=l.get(f),p,_=[{replicateByDefault:m,...i}];pS.has(f)&&(_.push({replicateByDefault:m,name:Ze(),startTime:pS.get(f),endTime:Date.now(),replicates:!0}),pS.delete(f));let g=Hh(i,f),y=Zi.workers.filter(T=>T.name==="http");if(h?(p=h.worker,h.nodes=_):g&&(t=t%y.length,p=y[t++],l.set(f,{worker:p,nodes:_,url:i.url}),p?.on("exit",()=>{l.get(f)?.worker===p&&(l.delete(f),d(f,m))})),g)setTimeout(()=>{let T={type:"subscribe-to-node",database:f,nodes:_};p?p.postMessage(T):Gh(T)},rce);else{at.info("Node no longer should be used, unsubscribing from node",{replicates:i.replicates,databaseName:f,node:i,subscriptions:i.subscriptions,hasDatabase:!!u[f],thisReplicates:Kt().primaryStore.get(Ze())?.replicates}),Kt().primaryStore.get(Ze())?.replicates||(n=!1,at.info("Disabling replication, this node name",Ze(),Kt().primaryStore.get(Ze()),f));let T={type:"unsubscribe-from-node",database:f,url:i.url,name:i.name};p?p.postMessage(T):_S(T)}}o(d,"onDatabase")}o(s,"onNodeUpdate"),wd=o(function(i){try{at.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let a=Array.from(Nd.keys()),c=a.sort(),l=c.indexOf(i.name||pi(i.url));if(l===-1){at.warn("Disconnected node not found in node map",i.name,a);return}let u=Xi.get(i.url),d=u?.get(i.database);if(!d){at.warn("Disconnected node not found in replication map",i.database,u);return}if(d.connected=!1,i.finished||!Rw.default.get(U.REPLICATION_FAILOVER))return;let f=d.nodes[0];if(!(f.replicates===!0||f.replicates?.sends||f.subscriptions?.length))return;let m=f.shard,h=(l+1)%c.length;for(;l!==h;){let p=c[h],_=Nd.get(p);u=Xi.get(_.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==m){h=(h+1)%c.length;continue}let{worker:y,nodes:T}=g,R=!1;for(let N of d.nodes){if(T.some(O=>O.name===N.name)){at.info(`Disconnected node is already failing over to ${p} for ${i.database}`);continue}N.endTime<Date.now()||(T.push(N),R=!0)}if(d.nodes=[d.nodes[0]],!R){at.info(`Disconnected node ${i.name} has no nodes to fail over to ${p}`);return}at.info(`Failing over ${i.database} from ${i.name} to ${p}`),y?y.postMessage({type:"subscribe-to-node",database:i.database,nodes:T}):Gh({database:i.database,nodes:T});return}at.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(a){at.error("Error failing over node",a)}},"disconnectedFromNode"),Dl=o(function(i){let a=Xi.get(i.url),c=a?.get(i.database);if(!c){at.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,a);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){at.warn("Newly connected node has no node subscriptions",i.database,c);return}if(!l.name){at.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let d of Xi.values()){let f=d.get(i.database);if(!f||f==c)continue;let{worker:m,nodes:h,connected:p}=f;if(h)if(p===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let _=h.filter(g=>g&&g.name!==l.name);_.length<h.length&&(f.nodes=_,m.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,Zi.onMessageByType)("disconnected-from-node",wd),(0,Zi.onMessageByType)("connected-to-node",Dl),(0,Zi.onMessageByType)("request-cluster-status",wq)}function wq(e,t){let r=[];for(let[n,s]of Nd)try{let i=Xi.get(s.url);at.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let a=[];if(i){for(let[l,{worker:u,connected:d,nodes:f,latency:m}]of i)a.push({database:l,connected:d,latency:m,threadId:u?.threadId,nodes:f.filter(h=>!(h.endTime<Date.now())).map(h=>h.name)});let c=(0,Tw.cloneDeep)(s);c.database_sockets=a,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){at.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function Bo(e,t){let r=Kt();e=e??pi(t.url),t.name=e;try{if(t.ca){let s=new Nq.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subjectAltName:s.subjectAltName,serialNumber:s.serialNumber,validFrom:s.validFrom,validTo:s.validTo}}}catch(s){at.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(at.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!Rw.default.get(U.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],a=(0,Tw.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of a)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...a,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}at.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var Zi,ES,at,Tw,Rw,Nq,rce,Xi,wd,Dl,Nd,pS,qh=ue(()=>{De();Zi=w(nt());ss();ES=require("worker_threads");Ll();at=w(Q()),Tw=require("lodash"),Rw=w(oe());k();Nq=require("crypto"),rce=200,Xi=new Map,Nd=new Map,pS=new Map;o(yw,"startOnMainThread");o(wq,"requestClusterStatus");ES.parentPort&&(wd=o(e=>{ES.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),Dl=o(e=>{ES.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,Zi.onMessageByType)("subscribe-to-node",e=>{Gh(e)}),(0,Zi.onMessageByType)("unsubscribe-from-node",e=>{_S(e)}));o(Bo,"ensureNode")});var os=M(Yt=>{"use strict";var hr=require("path"),{watch:nce}=require("chokidar"),Un=require("fs-extra"),Od=require("node-forge"),vq=require("net"),{generateKeyPair:Aw,X509Certificate:Fo,createPrivateKey:Uq}=require("crypto"),sce=require("util");Aw=sce.promisify(Aw);var wt=Od.pki,Ei=require("joi"),{v4:xq}=require("uuid"),{validateBySchema:ww}=lt(),{forComponent:ice}=Q(),is=oe(),Ls=(k(),v(W)),{CONFIG_PARAMS:vl}=Ls,_i=jy(),{ClientError:ja}=ge(),SS=require("node:tls"),{relative:Bq,join:oce}=require("node:path"),{CERT_PREFERENCE_APP:MMe,CERTIFICATE_VALUES:Cq}=_i,ace=Uc(),bw=yt(),{table:cce,getDatabases:lce,databases:gS}=(De(),v(mt)),{getJWTRSAKeys:Pq}=(_d(),v(Ph)),ht=ice("tls");Yt.generateKeys=Pw;Yt.updateConfigCert=Kq;Yt.createCsr=Ece;Yt.signCertificate=_ce;Yt.setCertTable=Cd;Yt.loadCertificates=qq;Yt.reviewSelfSignedCert=Dw;Yt.createTLSSelector=Wq;Yt.listCertificates=jq;Yt.addCertificate=Ace;Yt.removeCertificate=Ice;Yt.createNatsCerts=Tce;Yt.generateCertsKeys=Sce;Yt.getReplicationCert=Vh;Yt.getReplicationCertAuth=pce;Yt.renewSelfSigned=Rce;Yt.hostnamesFromCert=vw;Yt.getKey=Nce;Yt.getHostnamesFromCertificate=wce;Yt.getPrimaryHostName=Mw;var{urlToNodeName:Fq,getThisNodeUrl:uce,getThisNodeName:RS,clearThisNodeName:dce}=(ss(),v(Ho)),{readFileSync:fce,statSync:Hq}=require("node:fs"),vMe=oe(),{getTicketKeys:mce,onMessageFromWorkers:hce}=nt(),za=Q(),{isMainThread:kq}=require("worker_threads"),{TLSSocket:Gq,createSecureContext:UMe}=require("node:tls"),Ow=3650,$h=["127.0.0.1","localhost","::1"],Cw=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];hce(async e=>{e.type===Ls.ITC_EVENT_TYPES.RESTART&&(is.initSync(!0),await Dw())});var Jr;function Ja(){return Jr||(Jr=lce().system.hdb_certificate,Jr||(Jr=cce({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),Jr}o(Ja,"getCertTable");async function Vh(){let e=Wq("operations-api"),t={secureContexts:null,setSecureContext:o(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(RS());if(!r)return;let n=new Fo(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}o(Vh,"getReplicationCert");async function pce(){Ja();let e=(await Vh()).options.cert,r=new Fo(e).issuer.match(/CN=(.*)/)?.[1];return Jr.get(r)}o(pce,"getReplicationCertAuth");var Lq,Qa=new Map;function qq(){if(Lq)return;Lq=!0;let e=[{configKey:vl.TLS},{configKey:vl.OPERATIONSAPI_TLS}];Ja();let t=hr.dirname(bw.getConfigFilePath()),r;for(let{configKey:n}of e){let s=bw.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let a=i.privateKey,c=a&&Bq(oce(t,"keys"),a);c&&Dq(a,l=>{Qa.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&kq){let d;Dq(u,f=>{if(Cq.cert===f)return;let m=i.hostname??i.hostnames??i.host??i.hosts;m&&!Array.isArray(m)&&(m=[m]);let h=Yq(u),p=new Fo(h),_;try{_=Mw(p)}catch(R){ht.error("error extracting host name from certificate",R);return}if(_==null){ht.error("No host name found on certificate");return}if(p.checkIssued(new Fo(Cq.cert)))return;let g=Jr.primaryStore.get(_),y=Hq(u).mtimeMs,T=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&y<=T){y<T&&ht.info(`Certificate ${_} at ${u} is older (${new Date(y)}) than the certificate in the database (${T>1?new Date(T):"only self signed certificate available"})`);return}r=Jr.put({name:_,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:m,fileTimestamp:y,details:{issuer:p.issuer.replace(/\n/g," "),subject:p.subject?.replace(/\n/g," "),subject_alt_name:p.subjectAltName,serial_number:p.serialNumber,valid_from:p.validFrom,valid_to:p.validTo}})},l?"certificate authority":"certificate")}}}}}return r}o(qq,"loadCertificates");function Dq(e,t,r){let n,s=o((i,a)=>{try{let c=a.mtimeMs;c&&c!==n&&(n&&kq&&ht.warn(`Reloading ${r}:`,i),n=c,t(Yq(i)))}catch(c){ht.error(`Error loading ${r}:`,i,c)}},"loadFile");Un.existsSync(e)?s(e,Hq(e)):ht.error(`${r} file not found:`,e),nce(e,{persistent:!1}).on("change",s)}o(Dq,"loadAndWatch");function Iw(){let e=uce();if(e==null){let t=$h[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return Fq(e)}o(Iw,"getHost");function TS(){let e=RS();if(e==null){let t=$h[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}o(TS,"getCommonName");async function Ece(){let e=await Vh(),t=wt.certificateFromPem(e.options.cert),r=wt.privateKeyFromPem(e.options.key);ht.info("Creating CSR with cert named:",e.name);let n=wt.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:TS()},...Cw];ht.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:$q()}];return ht.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),Od.pki.certificationRequestToPem(n)}o(Ece,"createCsr");function $q(){let e=$h.includes(TS())?$h:[...$h,TS()];return e.includes(Iw())||e.push(Iw()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>vq.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}o($q,"certExtensions");async function _ce(e){let t={},r=hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;Ja();for await(let d of Jr.search([]))if(d.is_authority&&!d.details.issuer.includes("HarperDB-Certificate-Authority")){if(Qa.has(d.private_key_name)){n=Qa.get(d.private_key_name),s=d;break}else if(d.private_key_name&&await Un.exists(hr.join(r,d.private_key_name))){n=Un.readFile(hr.join(r,d.private_key_name)),s=d;break}}if(!n){let d=await Nw();s=d.ca,n=d.private_key}n=wt.privateKeyFromPem(n),t.signingCA=s.certificate;let i=wt.certificateFromPem(s.certificate);ht.info("Signing CSR with cert named",s.name);let a=wt.certificationRequestFromPem(e.csr);try{a.verify()}catch(d){return ht.error(d),new Error("Error verifying CSR: "+d.message)}let c=Od.pki.createCertificate();c.serialNumber="0"+Math.random().toString().slice(2,9),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+Ow),ht.info("sign cert setting validity:",c.validity),ht.info("sign cert setting subject from CSR:",a.subject.attributes),c.setSubject(a.subject.attributes),ht.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=a.getAttribute({name:"extensionRequest"}).extensions;ht.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=a.publicKey,c.sign(n,Od.md.sha256.create()),t.certificate=wt.certificateToPem(c)}else ht.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}o(_ce,"signCertificate");async function gce(e,t){await Cd({name:RS(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await Cd({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:wt.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}o(gce,"createCertificateTable");async function Cd(e){let t=new Fo(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},Ja(),await Jr.patch(e)}o(Cd,"setCertTable");async function Pw(){let e=await Aw("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{publicKey:wt.publicKeyFromPem(e.publicKey),privateKey:wt.privateKeyFromPem(e.privateKey)}}o(Pw,"generateKeys");async function Lw(e,t,r){let n=wt.createCertificate();if(!t){let a=await Vh();t=wt.certificateFromPem(a.options.cert).publicKey}n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Ow);let i=[{name:"commonName",value:TS()},...Cw];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions($q()),n.sign(e,Od.md.sha256.create()),wt.certificateToPem(n)}o(Lw,"generateCertificates");async function Nw(){let e=await jq(),t;for(let r of e){if(!r.is_authority)continue;let n=await zq(r.private_key_name);if(r.private_key_name&&n&&new Fo(r.certificate).checkPrivateKey(Uq(n))){ht.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;ht.trace("No CA found with matching private key")}o(Nw,"getCertAuthority");async function Vq(e,t,r=!0){let n=wt.createCertificate();n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Ow);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${is.get(vl.REPLICATION_HOSTNAME)??Fq(is.get(vl.REPLICATION_URL))??xq().split("-")[0]}`},...Cw];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,Od.md.sha256.create());let a=hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),c=hr.join(a,_i.PRIVATEKEY_PEM_NAME);return r&&await Un.writeFile(c,wt.privateKeyToPem(e)),n}o(Vq,"generateCertAuthority");async function Sce(){let{privateKey:e,publicKey:t}=await Pw(),r=await Vq(e,t),n=await Lw(e,t,r);await gce(n,r),Kq()}o(Sce,"generateCertsKeys");async function Tce(){let e=await Lw(wt.privateKeyFromPem(_i.CERTIFICATE_VALUES.key),void 0,wt.certificateFromPem(_i.CERTIFICATE_VALUES.cert)),t=hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME);await Un.exists(r)||await Un.writeFile(r,e);let n=hr.join(t,_i.NATS_CA_PEM_NAME);await Un.exists(n)||await Un.writeFile(n,_i.CERTIFICATE_VALUES.cert)}o(Tce,"createNatsCerts");async function Rce(){Ja();for await(let e of Jr.search([{attribute:"is_self_signed",value:!0}]))await Jr.delete(e.name);await Dw()}o(Rce,"renewSelfSigned");async function Dw(){dce(),await qq(),Ja();let e=await Nw();if(!e){ht.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=o(u=>{try{return{key:wt.privateKeyFromPem(Un.readFileSync(u)),keyPath:u}}catch(d){return ht.warn(`Failed to parse private key from ${u}:`,d.message),{key:null,keyPath:u}}},"tryToParseKey"),n=is.get(vl.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let d=r(u.privateKey);if(s=d.key,i=d.keyPath,d.key)break}}else{let u=is.get(vl.TLS_PRIVATEKEY),d=r(u);s=d.key,i=d.keyPath}let a=hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),c=Bq(a,i);s||(ht.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{privateKey:s}=await Pw(),Un.existsSync(hr.join(a,_i.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${xq().split("-")[0]}.pem`),await Un.writeFile(hr.join(a,c),wt.privateKeyToPem(s)));let l=await Vq(s,wt.setRsaPublicKey(s.n,s.e),!1);await Cd({name:l.subject.getField("CN").value,uses:["https"],certificate:wt.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await Vh()){let r=RS();ht.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await Nw();let n=wt.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await Lw(wt.privateKeyFromPem(e.private_key),s,n);await Cd({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}o(Dw,"reviewSelfSignedCert");function Kq(){let e=ace(Object.keys(Ls.CONFIG_PARAM_MAP),!0),t=hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.PRIVATEKEY_PEM_NAME),n=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME),s=hr.join(t,_i.NATS_CA_PEM_NAME),i=Ls.CONFIG_PARAMS,a={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(a[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(a[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,a[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,a[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),bw.updateConfigValue(void 0,void 0,a,!1,!0)}o(Kq,"updateConfigCert");function Yq(e){return e.startsWith("-----BEGIN")?e:fce(e,"utf8")}o(Yq,"readPEM");var Mq=SS.createSecureContext;SS.createSecureContext=function(e){if(!e.cert||!e.key)return Mq(e);let t={...e};delete t.key,delete t.cert;let r=Mq(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var yce=Gq.prototype._init;Gq.prototype._init=function(e,t){yce.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,a)=>{this.sni_context=a?.context||a,this.certCbDone()})}};var Ml=new Map;function Wq(e,t){let r=new Map,n,s=!1;return i.initialize=a=>i.ready?i.ready:(a&&(a.secureContexts=r,a.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),Ml.clear();let d=0;if(gS===void 0){c();return}for await(let f of gS.system.hdb_certificate.search([])){let m=f.certificate,h=new Fo(m);f.is_authority&&(h.asString=m,Ml.set(h.subject,m))}for await(let f of gS.system.hdb_certificate.search([]))try{if(f.is_authority)continue;let m=e==="operations-api",h=f.is_self_signed?1:2;m&&f.uses?.includes?.("operations")&&(h+=1);let p=await zq(f.private_key_name),_=f.certificate,g=new Fo(_);if(Ml.has(g.issuer)&&(_+=`
17
17
  `+Ml.get(g.issuer)),!p||!_)throw new Error("Missing private key or certificate for secure server");let y={ciphers:f.ciphers,ticketKeys:mce(),availableCAs:Ml,ca:t&&Array.from(Ml.values()),cert:_,key:p,key_file:f.private_key_name,is_self_signed:f.is_self_signed};a&&(y.sessionIdContext=a.sessionIdContext);let T=SS.createSecureContext(y);T.name=f.name,T.options=y,T.quality=h,T.certificateAuthorities=Array.from(Ml),T.certStart=_.toString().slice(0,100);let R=f.hostnames??vw(g);Array.isArray(R)||(R=[R]);let N;for(let O of R)if(O){O[0]==="*"&&(s=!0,O=O.slice(1)),O===Iw()&&(h+=2),vq.isIP(O)&&(N=!0);let F=r.get(O)?.quality??0;h>F&&r.set(O,T)}else za.error("No hostname found for certificate at",SS.certificate);za.trace("Adding TLS",T.name,"for",a.ports||"client","cert named",f.name,"hostnames",R,"quality",h,"best quality",d),h>d&&(i.defaultContext=n=T,d=h,a&&(a.defaultContext=T))}catch(m){za.error("Error applying TLS for",f.name,m)}a?.secureContextsListeners.forEach(f=>f()),c(n)}catch(d){l(d)}}o(u,"updateTLS"),gS?.system.hdb_certificate.subscribe({listener:o(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(a,c){za.info("TLS requested for",a||"(no SNI)");let l=a;for(;;){let d=r.get(l);if(d)return za.debug("Found certificate for",a,d.certStart),d.updatedContext&&(d=d.updatedContext),c(null,d);if(s&&l){let f=l.indexOf(".",1);f<0?l="":l=l.slice(f)}else break}a?za.debug("No certificate found to match",a,"using the default certificate"):za.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):za.info("No default certificate found"),c(null,u)}o(i,"SNICallback")}o(Wq,"createTLSSelector");async function zq(e){let t=Qa.get(e);return!t&&e?await Un.readFile(hr.join(is.get(vl.ROOTPATH),Ls.LICENSE_KEY_DIR_NAME,e),"utf8"):t}o(zq,"getPrivateKeyByName");async function jq(){Ja();let e=[];for await(let t of Jr.search([]))e.push(t);return e}o(jq,"listCertificates");async function Ace(e){let t=ww(e,Ei.object({name:Ei.string().required(),certificate:Ei.string().required(),is_authority:Ei.boolean().required(),private_key:Ei.string(),hosts:Ei.array(),uses:Ei.array()}));if(t)throw new ja(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,a=new Fo(n),c=!1,l=!1,u;for(let[h,p]of Qa)!s&&!c&&a.checkPrivateKey(Uq(p))&&(c=!0,u=h),s&&s===p&&(l=!0,u=h);if(!i&&!s&&!c)throw new ja("A suitable private key was not found for this certificate");let d;if(!r){try{d=Mw(a)}catch(h){ht.error(h)}if(d==null)throw new ja("Error extracting certificate host name, please provide a name parameter")}let f=bce(r??d);s&&!c&&!l&&(await Un.writeFile(hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME,f+".pem"),s),Qa.set(f,s));let m={name:r??d,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(m.private_key_name=u??f+".pem"),e.ciphers&&(m.ciphers=e.ciphers),await Cd(m),"Successfully added certificate: "+f}o(Ace,"addCertificate");function bce(e){return e.replace(/[^a-z0-9\.]/gi,"-")}o(bce,"sanitizeName");async function Ice(e){let t=ww(e,Ei.object({name:Ei.string().required()}));if(t)throw new ja(t.message);let{name:r}=e;Ja();let n=await Jr.get(r);if(!n)throw new ja(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await Jr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(ht.info("Removing private key named",s),await Un.remove(hr.join(is.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME,s)))}return await Jr.delete(r),"Successfully removed "+r}o(Ice,"removeCertificate");function Mw(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||vw(e)[0]}o(Mw,"getPrimaryHostName");function vw(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}o(vw,"hostnamesFromCert");async function Nce(e){if(e.bypass_auth!==!0)throw new ja("Unauthorized","401");let t=ww(e,Ei.object({name:Ei.string().required()}));if(t)throw new ja(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await Pq()).privateKey;if(r===".jwtPublic")return(await Pq()).publicKey;if(Qa.get(r))return Qa.get(e.name);throw new ja("Key not found")}o(Nce,"getKey");function wce(e){return[e.subject?.CN,...e.subjectaltname.split(",").filter(t=>t.trim().startsWith("DNS:")).map(t=>t.trim().substring(4))]}o(wce,"getHostnamesFromCertificate")});var g$={};Oe(g$,{CONFIRMATION_STATUS_POSITION:()=>p$,LATENCY_POSITION:()=>OS,NodeReplicationConnection:()=>Dd,OPERATION_REQUEST:()=>Fw,RECEIVED_TIME_POSITION:()=>kw,RECEIVED_VERSION_POSITION:()=>Hw,RECEIVING_STATUS_POSITION:()=>Gw,RECEIVING_STATUS_RECEIVING:()=>_$,RECEIVING_STATUS_WAITING:()=>E$,SENDING_TIME_POSITION:()=>Kh,createWebSocket:()=>CS,databaseSubscriptions:()=>Za,replicateOverWS:()=>Yh,tableUpdateListeners:()=>$w});async function CS(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=Ze(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!xw){let l=(0,u$.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),xw=u.secureContexts}if(i=xw.get(s),i&&ae.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let a={};r&&(a.Authorization=r);let c={headers:a,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,f$.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(wS?.caCount!==ko.size&&(wS=d$.createSecureContext({...i.options,ca:[...ko,...i.options.availableCAs.values()]}),wS.caCount=ko.size),c.secureContext=wS),new c$.WebSocket(e,"harperdb-replication-v1",c)}function Yh(e,t,r){let n=t.port||t.securePort,s=Ul.pid%1e3+"-"+l$.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3);ae.debug?.(s,"Initializing replication connection",r);let i=0,a=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(a.buffer,0,1024),u=t.database,d=t.databaseSubscriptions||Za,f,m,h=!1,p=t.subscription;p?.then&&p.then(E=>{p=E,p.auditStore&&(f=p.auditStore)});let _=t.tables||u&&st()[u],g;if(!r){ae.error?.(s,"No authorization provided"),Ss(1008,"Unauthorized");return}let y=new Map,T=[];g=r.name,g&&t.connection&&(t.connection.nodeName=g);let R,N,O,F,Z,G,Y,q=6e4,K,ce=0,le=0,se=0,pe=a$.default.get(U.REPLICATION_BLOBTIMEOUT)??12e4,Ne=new Map,Ue=[],xe=0,Rr;if(t.url){let E=o(()=>{Z&&le===e._socket?.bytesRead&&se===e._socket?.bytesWritten?e.terminate():(Z=performance.now(),e.ping(),le=e._socket?.bytesRead,se=e._socket?.bytesWritten)},"sendPing");O=setInterval(E,i$).unref(),E()}else Jt();e._socket?.setMaxListeners(200);function Jt(){clearTimeout(F),le=e._socket?.bytesRead,se=e._socket?.bytesWritten,F=setTimeout(()=>{le===e._socket?.bytesRead&&se===e._socket?.bytesWritten&&(ae.warn?.(`Timeout waiting for ping from ${g}, terminating connection and reconnecting`),e.terminate())},i$*2).unref()}o(Jt,"resetPingTimer");function kt(){if(!(!g||!u))return m||(m=bd(f,u,g)),m}o(kt,"getSharedStatus"),u&&ga(u);let Xt,vf,Cc=[],Gt=[],Uf,xf=[],bE=[],IE=[],Dy=150,Bf=25,Pe=0,NE=0,Ff=!1,po,Lr,yr,Hf;e.on("message",E=>{ce=performance.now();try{let S=E.dataView=new Yc(E.buffer,E.byteOffset,E.byteLength);if(E[0]>127){let P=(0,et.decode)(E),[L,D,H]=P;switch(L){case Jq:{if(D){if(g){if(g!==D){ae.error?.(s,`Node name mismatch, expecting to connect to ${g}, but peer reported name as ${D}, disconnecting`),e.send((0,et.encode)([Pd])),Ss(1008,"Node name mismatch");return}}else if(g=D,t.connection?.tentativeNode){let B=t.connection.tentativeNode;B.name=g,t.connection.tentativeNode=null,Bo(g,B)}if(t.connection&&(t.connection.nodeName=g),ae.debug?.(s,"received node name:",g,"db:",u??P[2]),!u)try{ga(u=P[2]),u==="system"&&(Xt=Ya(t,(B,de)=>{hu(de)&&Sa(de)}),e.on("close",()=>{Xt?.remove()}))}catch(B){ae.warn?.(s,"Error setting database",B),e.send((0,et.encode)([Pd])),Ss(1008,B.message);return}Dr()}break}case n$:{ae.debug?.(s,"Received table definitions for",D.map(B=>B.table));for(let B of D){let de=P[2];B.database=de;let me;hu(de)&&(de==="system"?ke[de]?.[B.table]||(me=V(B,ke[de]?.[B.table])):me=V(B,ke[de]?.[B.table]),f||(f=me?.auditStore),_||(_=st()?.[de]))}break}case Pd:Ss();break;case Fw:try{let B=r?.replicates||r?.subscribers||r?.name;ae.debug?.("Received operation request",D,"from",g),server.operation(D,{user:r},!B).then(de=>{Array.isArray(de)&&(de={results:de}),de.requestId=D.requestId,e.send((0,et.encode)([AS,de]))},de=>{e.send((0,et.encode)([AS,{requestId:D.requestId,error:(0,Ld.errorToString)(de)}]))})}catch(B){e.send((0,et.encode)([AS,{requestId:D.requestId,error:(0,Ld.errorToString)(B)}]))}break;case AS:let{resolve:C,reject:x}=y.get(D.requestId);D.error?x(new Error(D.error)):C(D),y.delete(D.requestId);break;case Uw:let z=P[3];if(!_){u?ae.error?.(s,"No database found for",u):ae.error?.(s,"Database name never received"),Ss();return}let ne=_[z];ne=V({table:z,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ne),Cc[H]={name:z,decoder:new et.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(B){return ne.primaryStore.getEntry(B)},rootStore:ne.primaryStore.rootStore};break;case Xq:Hf=f?Tq(D,f):new Map,Uf=P[2],ae.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${Uf}`);break;case Zq:let re=H;IE[re]=D;break;case r$:kt()[p$]=D,ae.trace?.(s,"received and broadcasting committed update",D),kt().buffer.notify();break;case t$:R=D,p.send({type:"end_txn",localTime:R,remoteNodeIds:T});break;case bS:{let B=P[1],{fileId:de,size:me,finished:Se,error:ee}=B,j=Ne.get(de);ae.debug?.("Received blob",de,"has stream",!!j,"connectedToBlob",!!j?.connectedToBlob,"length",P[2].length,"finished",Se),j||(j=new Bw.PassThrough,j.expectedSize=me,Ne.set(de,j)),j.lastChunk=Date.now();let he=P[2];it(he.byteLength,"bytes-received",`${g}.${u}`,"replication","blob");try{Se?(ee?(j.on("error",()=>{}),j.destroy(new Error("Blob error: "+ee+" for record "+(j.recordId??"unknown")+" from "+g))):j.end(he),j.connectedToBlob&&Ne.delete(de)):j.write(he)}catch(Te){ae.error?.(`Error receiving blob for ${j.recordId} from ${g} and streaming to storage`,Te),Ne.delete(de)}break}case e$:{let B=D,de;try{let me=P[3],Se=Gt[H]||(Gt[H]=_[P[4]]);if(!Se)return ae.warn?.("Unknown table id trying to handle record request",H);let ee=Se.primaryStore.getBinaryFast(Symbol.for("structures")),j=ee?.length??0;if(j>0&&j!==NE){NE=j;let Te=(0,et.decode)(ee);e.send((0,et.encode)([Uw,{typedStructs:Te.typed,structures:Te.named},H,Se.tableName]))}let he=Se.primaryStore.getBinaryFast(me);if(he){let Te=Se.primaryStore.decoder.decode(he,{valueAsBuffer:!0}),fe=ut||{};fe.version=(0,m$.getLastVersion)(),ut&&ut[Cu]&Vr&&(Te=Buffer.from(Te),_m(()=>Se.primaryStore.decoder.decode(he),We=>_a(We,me),Se.primaryStore.rootStore)),de=(0,et.encode)([yS,B,{value:Te,expiresAt:fe.expiresAt,version:fe.version,residencyId:fe.residencyId,nodeId:fe.nodeId,user:fe.user}])}else de=(0,et.encode)([yS,B])}catch(me){de=(0,et.encode)([yS,B,{error:me.message}])}e.send(de);break}case yS:{let{resolve:B,reject:de,tableId:me,key:Se}=y.get(P[1]),ee=P[2];if(ee?.error)de(new Error(ee.error));else if(ee){let j;l_(()=>{let he=Cc[me].decoder.decode(ee.value);ee.value=he,ee.key=Se,B(ee)||j&&setTimeout(()=>j.forEach(o_),6e4).unref()},f?.rootStore,he=>{let Te=Pc(he,Se);return j||(j=[]),j.push(Te),Te})}else B();y.delete(P[1]);break}case Qq:{yr=D;let B,de,me=!1;if(p){if(u!==p.databaseName&&!p.then){ae.error?.("Subscription request for wrong database",u,p.databaseName);return}}else p=d.get(u);if(ae.debug?.(s,"received subscription request for",u,"at",yr),!p){let Ee;p=new Promise(tt=>{ae.debug?.("Waiting for subscription to database "+u),Ee=tt}),p.ready=Ee,Za.set(u,p)}if(r.name)de=Kt().subscribe(r.name),de.then(async Ee=>{B=Ee;for await(let tt of B){let rt=tt.value;if(!(rt?.replicates===!0||rt?.replicates?.receives||rt?.subscriptions?.some(cr=>(cr.database||cr.schema)===u&&cr.publish!==!1))){me=!0,e.send((0,et.encode)([Pd])),Ss(1008,`Unauthorized database subscription to ${u}`);return}}},Ee=>{ae.error?.(s,"Error subscribing to HDB nodes",Ee)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,et.encode)([Pd])),Ss(1008,`Unauthorized database subscription to ${u}`);return}if(Lr&&(ae.debug?.(s,"stopping previous subscription",u),Lr.emit("close")),yr.length===0)return;let Se=yr[0],ee=o(Ee=>{if(Ee&&(Se.replicateByDefault?!Se.tables.includes(Ee.tableName):Se.tables.includes(Ee.tableName)))return{table:Ee}},"tableToTableEntry"),j={txnTime:0},he,Te,fe=1/0,We,Et=o((Ee,tt)=>{if(Ee.type==="end_txn"){j.txnTime&&(a[i]!==66&&ae.error?.("Invalid encoding of message"),pu(9),pu(T_),Dc(We=tt),dt()),i=c,j.txnTime=0;return}let rt=Ee.nodeId,cr=Ee.tableId,Mt=Te[cr];if(!Mt&&(Mt=Te[cr]=ee(p.tableById[cr]),!Mt))return ae.debug?.("Not subscribed to table",cr);let Ts=Mt.table,vt=Ts.primaryStore,Qs=vt.encoder;(Ee.extendedType&I_||!Qs.typedStructs)&&(Qs._mergeStructures(Qs.getStructures()),Qs.typedStructs&&(Qs.lastTypedStructuresLength=Qs.typedStructs.length));let Eu=he[rt];if(!(Eu&&Eu.startTime<tt&&(!Eu.endTime||Eu.endTime>tt)))return NS&&ae.trace?.(s,"skipping replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he),ZD();NS&&ae.trace?.(s,"sending replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he);let My=Ee.version;j.txnTime!==My&&(j.txnTime&&(NS&&ae.trace?.(s,"new txn time, sending queued txn",j.txnTime),a[i]!==66&&ae.error?.("Invalid encoding of message"),dt()),j.txnTime=My,i=c,Dc(My));let Mc=Ee.residencyId,vy=mu(Mc,Ts),OE;if(vy&&!vy.includes(g)){let Js=mu(Ee.previousResidencyId,Ts);if(Js&&!Js.includes(g)&&(Ee.type==="put"||Ee.type==="patch")||Ts.getResidencyById)return ZD();let Gf=Ee.recordId;ae.trace?.(s,"sending invalidation",Gf,g,"from",rt);let qf=0;Mc&&(qf|=Wc),Ee.previousResidencyId&&(qf|=zc);let By,CE=null;for(let eM in Ts.indices){if(!CE){if(By=Ee.getValue(vt,!0),!By)break;CE={}}CE[eM]=By[eM]}OE=jc(Ee.version,cr,Gf,null,rt,Ee.user,Ee.type==="put"||Ee.type==="patch"?"invalidate":Ee.type,Qs.encode(CE),qf,Mc,Ee.previousResidencyId,Ee.expiresAt)}function ZD(){return ae.trace?.(s,"skipping audit record",Ee.recordId),G||(G=setTimeout(()=>{G=null,(We||0)+s$/2<fe&&(NS&&ae.trace?.(s,"sending skipped sequence update",fe),e.send((0,et.encode)([t$,fe])))},s$).unref()),new Promise(setImmediate)}o(ZD,"skipAuditRecord");let Uy=Qs.typedStructs,xy=Qs.structures;if((Uy?.length!=Mt.typed_length||xy?.length!=Mt.structure_length)&&(Mt.typed_length=Uy?.length,Mt.structure_length=xy.length,ae.debug?.(s,"send table struct",Mt.typed_length,Mt.structure_length),Mt.sentName||(Mt.sentName=!0),e.send((0,et.encode)([Uw,{typedStructs:Uy,structures:xy,attributes:Ts.attributes,schemaDefined:Ts.schemaDefined},cr,Mt.table.tableName]))),Mc&&!bE[Mc]&&(e.send((0,et.encode)([Zq,vy,Mc])),bE[Mc]=!0),OE)pu(OE.length),Lc(OE);else{let Js=Ee.encoded;Ee.extendedType&Vr&&_m(()=>Ee.getValue(vt),qf=>_a(qf,Ee.recordId),vt.rootStore);let Gf=Js[0]===66?8:0;pu(Js.length-Gf),Lc(Js,Gf),ae.trace?.("wrote record",Ee.recordId,"length:",Js.length)}return e._socket.writableNeedDrain?new Promise(Js=>{ae.debug?.(`Waiting for remote node ${g} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",Js)}):xe>Bf?new Promise(Js=>{Rr=Js}):new Promise(setImmediate)},"sendAuditRecord"),dt=o(()=>{c-i>8?(e.send(a.subarray(i,c)),ae.debug?.(s,"Sent message, size:",c-i),it(c-i,"bytes-sent",`${g}.${u}`,"replication","egress")):ae.debug?.(s,"skipping empty transaction")},"sendQueuedData");Lr=new qw.EventEmitter,Lr.once("close",()=>{me=!0,B?.end()});for(let{startTime:Ee}of yr)Ee<fe&&(fe=Ee);(de||Promise.resolve()).then(async()=>{p=await p,f=p.auditStore,Te=p.tableById.map(ee),he=[];for(let{name:tt,startTime:rt,endTime:cr}of yr){let Mt=hS(tt,f);ae.debug?.("subscription to",tt,"using local id",Mt,"starting",rt),he[Mt]={startTime:rt,endTime:cr}}Sa(u),Xt||(Xt=Il(tt=>{tt.databaseName===u&&Sa(u)}),vf=yh(tt=>{tt===u&&(e.send((0,et.encode)([Pd])),Ss())}),e.on("close",()=>{Xt?.remove(),vf?.remove()})),e.send((0,et.encode)([Xq,Bh(p.auditStore),yr.map(({name:tt})=>tt)]));let Ee=!0;do{isFinite(fe)||(ae.warn?.("Invalid sequence id "+fe),Ss(1008,"Invalid sequence id"+fe));let tt;if(Ee&&!me&&(Ee=!1,fe===0)){ae.info?.("Replicating all tables to",g);let rt=fe,cr=PS(f);for(let Mt in _){if(!ee(Mt))continue;let Ts=_[Mt];for(let vt of Ts.primaryStore.getRange({snapshot:!1,versions:!0})){if(me)return;if(vt.localTime>=fe){ae.trace?.(s,"Copying record from",u,Mt,vt.key,vt.localTime),rt=Math.max(vt.localTime,rt),tt=!0,kt()[Kh]=1;let Qs=jc(vt.version,Ts.tableId,vt.key,null,cr,null,"put",_m(()=>Ts.primaryStore.encoder.encode(vt.value),Eu=>_a(Eu,vt.key)),vt.metadataFlags&-256,vt.residencyId,null,vt.expiresAt);await Et({recordId:vt.key,tableId:Ts.tableId,type:"put",getValue(){return vt.value},encoded:Qs,version:vt.version,residencyId:vt.residencyId,nodeId:cr,extendedType:vt.metadataFlags},vt.localTime)}}}tt&&Et({type:"end_txn"},fe),kt()[Kh]=0,fe=rt}for(let{key:rt,value:cr}of f.getRange({start:fe||1,exclusiveStart:!0,snapshot:!1})){if(me)return;let Mt=bt(cr);ae.debug?.("sending audit record",new Date(rt)),kt()[Kh]=rt,fe=rt,await Et(Mt,rt),Lr.startTime=rt,tt=!0}tt&&Et({type:"end_txn"},fe),kt()[Kh]=0,await HU(f)}while(!me)}).catch(Ee=>{ae.error?.(s,"Error handling subscription to node",Ee),Ss(1008,"Error handling subscription to node")});break}}return}S.position=8;let A=!0,b,I;do{kt();let P=S.readInt();if(P===9&&S.getUint8(S.position)==T_){S.position++,R=I=S.readFloat64(),m[Hw]=R,m[kw]=Date.now(),m[Gw]=E$,ae.trace?.("received remote sequence update",R,u);break}let L=S.position,D=bt(E,L,L+P),H=Cc[D.tableId];H||ae.error?.(`No table found with an id of ${D.tableId}`);let C;D.residencyId&&(C=IE[D.residencyId],ae.trace?.(s,"received residency list",C,D.type,D.recordId));try{let x=D.recordId;l_(()=>{b={table:H.name,id:D.recordId,type:D.type,nodeId:Hf.get(D.nodeId),residencyList:C,timestamp:D.version,value:D.getValue(H),user:D.user,beginTxn:A,expiresAt:D.expiresAt}},f?.rootStore,z=>Pc(z,x))}catch(x){throw x.message+="typed structures for current decoder"+JSON.stringify(H.decoder.typedStructs),x}A=!1,ae.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),m[Hw]=D.version,m[kw]=Date.now(),m[Gw]=_$,p.send(b),S.position=L+P}while(S.position<E.byteLength);Pe++,it(E.byteLength,"bytes-received",`${g}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),Pe>Dy&&!Ff&&(Ff=!0,e.pause(),ae.debug?.(`Commit backlog causing replication back-pressure, requesting that ${g} pause replication`)),p.send({type:"end_txn",localTime:R,remoteNodeIds:T,async onCommit(){if(b){let P=Date.now()-b.timestamp;it(P,"replication-latency",g+"."+u+"."+b.table,b.type,"ingest")}Pe--,Ff&&(Ff=!1,e.resume(),ae.debug?.(`Replication resuming ${g}`)),Ue.length>0&&await Promise.all(Ue),ae.trace?.("All blobs finished"),!N&&I&&(ae.trace?.(s,"queuing confirmation of a commit at",I),setTimeout(()=>{e.send((0,et.encode)([r$,N])),ae.trace?.(s,"sent confirmation of a commit at",N),N=null},Cce)),N=I,ae.debug?.("last sequence committed",new Date(I),u)}})}catch(S){ae.error?.(s,"Error handling incoming replication message",S)}}),e.on("ping",Jt),e.on("pong",()=>{if(t.connection){let E=performance.now()-Z;t.connection.latency=E,kt()&&(m[OS]=E),t.isSubscriptionConnection&&Dl({name:g,database:u,url:t.url,latency:E})}Z=null}),e.on("close",(E,S)=>{clearInterval(O),clearTimeout(F),clearInterval(Y),Lr&&Lr.emit("close"),po&&po.end();for(let[A,{reject:b}]of y)b(new Error(`Connection closed ${S?.toString()} ${E}`));ae.debug?.(s,"closed",E,S?.toString())});function Ss(E,S){try{e.isFinished=!0,ae.debug?.(s,"closing",g,u,E,S),e.close(E,S),t.connection?.emit("finished")}catch(A){ae.error?.(s,"Error closing connection",A)}}o(Ss,"close");let Ea=new Set;async function _a(E,S){let A=a_(E);if(Ea.has(A)){ae.debug?.("Blob already being sent",A);return}Ea.add(A);try{let b;xe++;for await(let I of E.stream())b&&(ae.debug?.("Sending blob chunk",A,"length",b.length),e.send((0,et.encode)([bS,{fileId:A,size:E.size},b]))),b=I,e._socket.writableNeedDrain&&(ae.debug?.("draining",A),await new Promise(P=>e._socket.once("drain",P)),ae.debug?.("drained",A)),it(I.length,"bytes-sent",`${g}.${u}`,"replication","blob");ae.debug?.("Sending final blob chunk",A,"length",b.length),e.send((0,et.encode)([bS,{fileId:A,size:E.size,finished:!0},b]))}catch(b){ae.warn?.("Error sending blob",b,"blob id",A,"for record",S),e.send((0,et.encode)([bS,{fileId:A,finished:!0,error:(0,Ld.errorToString)(b)},Buffer.alloc(0)]))}finally{Ea.delete(A),xe--,xe<Bf&&Rr?.()}}o(_a,"sendBlobs");function Pc(E,S){let A=a_(E),b=Ne.get(A);ae.debug?.("Received transaction with blob",A,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&Ne.delete(A):(b=new Bw.PassThrough,Ne.set(A,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=S,E.size===void 0&&b.expectedSize&&(E.size=b.expectedSize);let I=b.blob??createBlob(b,E);b.blob=I;let P=bo(()=>Em(I).saving,p.auditStore?.rootStore);return P&&(P.blobId=A,Ue.push(P),P.finally(()=>{ae.debug?.(`Finished receiving blob stream ${A}`),Ue.splice(Ue.indexOf(P),1)})),I}o(Pc,"receiveBlobs");function Dr(){if(h||(h=!0,t.connection?.on("subscriptions-updated",Dr)),!f&&p&&(f=p.auditStore),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let E=new Map;f||(f=p?.auditStore);try{for(let b of p?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let I of b.value.nodes||[])I.lastTxnTime>(E.get(I.id)??0)&&E.set(I.id,I.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let S=t.connection?.nodeSubscriptions?.[0];T=[];let A=t.connection?.nodeSubscriptions.map((b,I)=>{let P=[],{replicateByDefault:L}=b;if(b.subscriptions){for(let x of b.subscriptions)if(x.subscribe&&(x.schema||x.database)===u){let z=x.table;_?.[z]?.replicate!==!1&&P.push(z)}L=!1}else for(let x in _)(L?_[x].replicate===!1:_[x].replicate)&&P.push(x);let D=f&&hS(b.name,f),H=p?.dbisDB?.get([Symbol.for("seq"),D])??1,C=Math.max(H?.seqId??1,(typeof b.startTime=="string"?new Date(b.startTime).getTime():b.startTime)??1);if(ae.debug?.("Starting time recorded in db",b.name,D,u,H?.seqId,"start time:",C,new Date(C)),S!==b){let x=f&&hS(S.name,f),z=p?.dbisDB?.get([Symbol.for("seq"),x])??1;for(let ne of z?.nodes||[])ne.name===b.name&&(C=ne.seqId,ae.debug?.("Using sequence id from proxy node",S.name,C))}if(D===void 0?ae.warn("Starting subscription request from node",b,"but no node id found"):T.push(D),E.get(D)>C&&(C=E.get(D),ae.debug?.("Updating start time from more recent txn recorded",S.name,C)),C===1&&IS)try{new URL(IS).hostname===b.name?(ae.warn?.(`Requesting full copy of database ${u} from ${IS}`),C=0):C=Date.now()-6e4}catch(x){ae.error?.("Error parsing leader URL",IS,x)}return ae.trace?.(s,"defining subscription request",b.name,u,new Date(C)),{name:b.name,replicateByDefault:L,tables:P,startTime:C,endTime:b.endTime}});if(A)if(ae.debug?.(s,"sending subscription request",A,p?.dbisDB?.path),clearTimeout(K),A.length>0)e.send((0,et.encode)([Qq,A]));else{let b=o(()=>{let I=performance.now();K=setTimeout(()=>{ce<=I?Ss(1008,"Connection has no subscriptions and is no longer used"):b()},q).unref()},"scheduleClose");b()}}o(Dr,"sendSubscriptionRequestUpdate");function mu(E,S){if(!E)return;let A=xf[E];return A||(A=S.getResidencyRecord(E),xf[E]=A),A}o(mu,"getResidence");function hu(E){return!(Xa&&Xa!="*"&&!Xa[E]&&!Xa.includes?.(E)&&!Xa.some?.(S=>S.name===E))}o(hu,"checkDatabaseAccess");function ga(E){if(p=p||d.get(E),!hu(E))throw new Error(`Access to database "${E}" is not permitted`);p||ae.warn?.(`No database named "${E}" was declared and registered`),f=p?.auditStore,_||(_=st()?.[E]);let S=Ze();if(S===g)throw S?new Error("Should not connect to self",S):new Error("Node name not defined");return wE(S,E),!0}o(ga,"setDatabase");function wE(E,S){let A=st()?.[S],b=[];for(let I in A){let P=A[I];b.push({table:I,schemaDefined:P.schemaDefined,attributes:P.attributes.map(L=>({name:L.name,type:L.type,isPrimaryKey:L.isPrimaryKey}))})}ae.trace?.("Sending database info for node",E,"database name",S),e.send((0,et.encode)([Jq,E,S,b]))}o(wE,"sendNodeDBName");function Sa(E){let S=st()?.[E],A=[];for(let b in S){if(yr&&!yr.some(P=>P.replicateByDefault?!P.tables.includes(b):P.tables.includes(b)))continue;let I=S[b];A.push({table:b,schemaDefined:I.schemaDefined,attributes:I.attributes.map(P=>({name:P.name,type:P.type,isPrimaryKey:P.isPrimaryKey}))})}e.send((0,et.encode)([n$,A,E]))}o(Sa,"sendDBSchema"),Y=setInterval(()=>{for(let[E,S]of Ne)S.lastChunk+pe<Date.now()&&(ae.warn?.(`Timeout waiting for blob stream to finish ${E} for record ${S.recordId??"unknown"} from ${g}`),Ne.delete(E),S.end())},pe).unref();let Ta=1,kf=[];return{end(){po&&po.end(),Lr&&Lr.emit("close")},getRecord(E){let S=Ta++;return new Promise((A,b)=>{let I=[e$,S,E.table.tableId,E.id];kf[E.table.tableId]||(I.push(E.table.tableName),kf[E.table.tableId]=!0),e.send((0,et.encode)(I)),ce=performance.now(),y.set(S,{tableId:E.table.tableId,key:E.id,resolve(P){let{table:L,entry:D}=E;if(A(P),P)return L._recordRelocate(D,P)},reject:b})})},sendOperation(E){let S=Ta++;return E.requestId=S,e.send((0,et.encode)([Fw,E])),new Promise((A,b)=>{y.set(S,{resolve:A,reject:b})})}};function pu(E){Ra(5),E<128?a[c++]=E:E<16384?(l.setUint16(c,E|32768),c+=2):E<1056964608?(l.setUint32(c,E|3221225472),c+=4):(a[c]=255,l.setUint32(c+1,E),c+=5)}function Lc(E,S=0,A=E.length){let b=A-S;Ra(b),E.copy(a,c,S,A),c+=b}function Dc(E){Ra(8),l.setFloat64(c,E),c+=8}function Ra(E){if(E+16>a.length-c){let S=Buffer.allocUnsafeSlow(c+E-i+65536>>10<<11);a.copy(S,0,i,c),c=c-i,i=0,a=S,l=new DataView(a.buffer,0,a.length)}}function V(E,S){let A=E.database??"data";if(A!=="data"&&!ke[A]){ae.warn?.("Database not found",E.database);return}S||(S={});let b=S.schemaDefined,I=!1,P=E.schemaDefined,L=S.attributes||[];for(let D=0;D<E.attributes?.length;D++){let H=E.attributes[D],C=L.find(x=>x.name===H.name);(!C||C.type!==H.type)&&(b?ae.error?.(`Schema for '${u}.${E.table}' is defined locally, but attribute '${H.name}: ${H.type}' from '${g}' does not match local attribute ${C?"'"+C.name+": "+C.type+"'":"which does not exist"}`):(I=!0,P||(H.indexed=!0),C?L[L.indexOf(C)]=H:L.push(H)))}return I?(ae.debug?.("(Re)creating",E),je({table:E.table,database:E.database,schemaDefined:E.schemaDefined,attributes:L,...S})):S}}var a$,et,c$,l$,Ld,qw,u$,d$,Ul,f$,Bw,m$,h$,ae,Qq,Jq,Xq,Pd,Zq,Uw,e$,yS,Fw,AS,t$,r$,n$,bS,p$,Hw,kw,Kh,OS,Gw,E$,_$,Oce,IS,$w,Za,NS,s$,Cce,i$,xw,wS,o$,Dd,Vw=ue(()=>{De();Di();gw();nb();ss();a$=w(oe());k();Qc();et=require("msgpackr"),c$=require("ws"),l$=require("worker_threads"),Ld=w(Q());qh();qw=require("events"),u$=w(os()),d$=w(require("node:tls"));Ll();Ul=w(require("node:process")),f$=require("node:net");Wi();Wn();Bw=require("node:stream"),m$=require("lmdb"),h$=w(require("minimist")),ae=(0,Ld.forComponent)("replication").conditional,Qq=129,Jq=140,Xq=141,Pd=142,Zq=130,Uw=132,e$=133,yS=134,Fw=136,AS=137,t$=143,r$=144,n$=145,bS=146,p$=0,Hw=1,kw=2,Kh=3,OS=4,Gw=5,E$=0,_$=1,Oce=(0,h$.default)(Ul.argv),IS=Oce.HDB_LEADER_URL??Ul.env.HDB_LEADER_URL,$w=new Map,Za=new Map,NS=!0,s$=300,Cce=2,i$=3e4;o(CS,"createWebSocket");o$=500,Dd=class extends qw.EventEmitter{static{o(this,"NodeReplicationConnection")}socket;startTime;retryTime=o$;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;url;subscription;databaseName;nodeName;authorization;constructor(t,r,n,s,i){super(),this.url=t,this.subscription=r,this.databaseName=n,this.authorization=i,this.nodeName=this.nodeName??pi(t)}async connect(){this.session||this.resetSession();let t=[];this.socket=await CS(this.url,{serverName:this.nodeName,authorization:this.authorization});let r;ae.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${Ul.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),ae[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=o$,this.nodeSubscriptions&&Dl({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,r=Yh(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(r)}),this.socket.on("error",n=>{n.code==="SELF_SIGNED_CERT_IN_CHAIN"?(ae.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),n.isHandled=!0):n.code!=="ECONNREFUSED"&&(n.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?ae.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):ae.error?.(`Error in connection to ${this.url} due to ${n.message}`)),this.sessionReject(n)}),this.socket.on("close",(n,s)=>{if(this.isConnected&&(this.nodeSubscriptions&&wd({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,r?.end(),this.emit("finished");return}if(++this.retries%20===1){let i=s?.toString();ae.warn?.(`${r?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${i?'"'+i+'" ':""}(code: ${n})`)}r=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((t,r)=>{this.sessionResolve=t,this.sessionReject=r})}subscribe(t,r){this.nodeSubscriptions=t,this.replicateTablesByDefault=r,this.emit("subscriptions-updated",t)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(t){return this.session.then(r=>r.getRecord(t))}};o(Yh,"replicateOverWS")});var Ho={};Oe(Ho,{clearThisNodeName:()=>Fce,disableReplication:()=>Mce,enabledDatabases:()=>Xa,forEachReplicatedDatabase:()=>Ya,getThisNodeId:()=>PS,getThisNodeName:()=>Ze,getThisNodeUrl:()=>Wa,hostnameToUrl:()=>US,lastTimeInAuditStore:()=>Fh,monitorNodeCAs:()=>w$,replicateOperation:()=>kce,replicationCertificateAuthorities:()=>ko,sendOperationToNode:()=>Wh,servers:()=>Lce,setReplicator:()=>C$,start:()=>Dce,startOnMainThread:()=>yw,subscribeToNode:()=>Gh,unsubscribeFromNode:()=>_S,urlToNodeName:()=>pi});function Dce(e){if(!e.port&&!e.securePort&&(e.port=Ds.default.get(U.OPERATIONSAPI_NETWORK_PORT),e.securePort=Ds.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),!Ze())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of kh(e))t.set(pi(s.url),s);vce(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Le.ws(async(s,i,a,c)=>{if(Ot.debug("Incoming WS connection received "+i.url),i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,a);await a,s._socket.unref(),Yh(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&Ot.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Le.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){Ot.debug("Incoming replication WS connection received, authorized: "+s.authorized),!s.authorized&&s._nodeRequest.socket.authorizationError&&Ot.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let a=Kt().primaryStore;if(s.authorized&&s.peerCertificate.subjectaltname){let c=(0,I$.getHostnamesFromCertificate)(s.peerCertificate),l;for(let u of c)if(l=u&&(a.get(u)||t.get(u)),l)break;if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){Ot.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else Ot.warn(`No node found for certificate common name/SANs: ${c}, available nodes are ${Array.from(a.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=a.get(s.ip)||t.get(s.ip);c?s.user=c:Ot.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...a.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=o(()=>{let a=new Set(s.secureContexts.values());s.defaultContext&&a.add(s.defaultContext);for(let c of a)try{let l=Array.from(ko);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=vS.createSecureContext(u)}catch(l){Ot.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ds.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1&&i()}w$(()=>{for(let s of n)s()})}function w$(e){let t=0;Id(r=>{r?.ca&&(ko.add(r.ca),ko.size!==t&&(t=ko.size,e?.()))})}function Mce(e=!0){N$=e}function vce(e){N$||(st(),Xa=e.databases,Ya(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||Za;for(let[s,i]of DS){let a=i.get(r);a&&(a.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];C$(r,s,e),$w.get(s)?.forEach(i=>i(s))}}))}function C$(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class O$ extends xr{static{o(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||Za,a=i.get(e),c=a?.tableById||[];c[t.tableId]=t;let l=a?.ready;if(Ot.trace("Setting up replicator subscription to database",e),!a?.auditStore)return this.subscription=a=new Yn,i.set(e,a),a.tableById=c,a.auditStore=t.auditStore,a.dbisDB=t.dbisDB,a.databaseName=e,l&&l(a),a;this.subscription=a}static subscribeOnThisThread(i,a){return!0}static async load(i){if(i){let a=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),a]);if(c){let l,u=new Set;do{let d,f="",m=1/0;for(let p of c){if(u.has(p)||p===Le.hostname)continue;let _=xce(p,O$.subscription,e);if(_?.isConnected){let g=bd(t.auditStore,e,p)[OS];(!d||g<m)&&(d=_,f=p,m=g)}}if(!d)throw l||new A$.ServerError(`No connection to any other nodes are available: ${c}`,502);let h={requestId:Pce++,table:t,entry:i,id:i.key};u.add(f);try{return await d.getRecord(h)}catch(p){if(d.isConnected)throw p;Ot.warn("Error in load from node",MS,p),l||(l=p)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function Uce(e,t,r,n,s){let i=DS.get(e);i||DS.set(e,i=new Map);let a=i.get(r);if(a)return a;if(t)return i.set(r,a=new Dd(e,t,r,n,s)),a.connect(),a.once("finished",()=>i.delete(r)),a}function xce(e,t,r){let n=S$.get(e);n||(n=new Map,S$.set(e,n));let s=n.get(r);if(s)return s;let i=Kt().primaryStore.get(e);return i?.url&&(s=new Dd(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function Wh(e,t,r){r||(r={}),r.serverName=e.name;let n=await CS(e.url,r),s=Yh(n,{},{});return new Promise((i,a)=>{n.on("open",()=>{Ot.debug("Sending operation connection to "+e.url+" opened",t),i(s.sendOperation(t))}),n.on("error",c=>{a(c)}),n.on("close",c=>{Ot.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function Gh(e){try{b$.isMainThread&&Ot.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=Za.get(e.database);if(!t){let n;t=new Promise(s=>{Ot.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,Za.set(e.database,t)}let r=Uce(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>Hh(n,e.database)),e.replicateByDefault)}catch(t){Ot.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function _S({name:e,url:t,database:r}){Ot.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(Kt().primaryStore.getRange({})));let n=DS.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function Bce(){if(Kw!==void 0)return Kw;let e=Ds.default.get(U.OPERATIONSAPI_TLS_CERTIFICATE)||Ds.default.get(U.TLS_CERTIFICATE);if(e)return Kw=new R$.X509Certificate((0,y$.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function Ze(){return MS||(MS=Ds.default.get("replication_hostname")??pi(Ds.default.get("replication_url"))??Bce()??T$("operationsapi_network_secureport")??T$("operationsapi_network_port")??"127.0.0.1")}function Fce(){MS=void 0}function T$(e){let t=Ds.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function LS(e){let t=Ds.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function PS(e){return Bh(e)?.[Ze()]}function Wa(){let e=Ds.default.get("replication_url");return e||US(Ze())}function US(e){let t=LS("replication_port");if(t)return`ws://${e}:${t}`;if(t=LS("replication_secureport"),t)return`wss://${e}:${t}`;if(t=LS("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=LS("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function pi(e){if(e)return new URL(e).hostname}function Ya(e,t){for(let n of Object.getOwnPropertyNames(ke))r(n);return yh(n=>{r(n)}),Il((n,s)=>{r(n.databaseName)});function r(n){let s=ke[n];Ot.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):Hce(n)&&t(s,n,!1)}o(r,"forDatabase")}function Hce(e){let t=ke[e];for(let r in t)if(t[r].replicate)return!0}function Fh(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function kce(e){let t={message:""};if(e.replicated){e.replicated=!1,Ot.trace?.("Replicating operation",e.operation,"to nodes",Le.nodes.map(n=>n.name));let r=await Promise.allSettled(Le.nodes.map(n=>Wh(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Le.nodes[s]?.name,i})}return t}var Ds,Ot,R$,y$,vS,A$,b$,I$,N$,Pce,Lce,ko,Xa,DS,S$,Kw,MS,ss=ue(()=>{De();Da();yu();Vw();Mr();Ds=w(oe()),Ot=w(Q()),R$=require("crypto"),y$=require("fs");qh();Ll();k();gw();vS=w(require("node:tls")),A$=w(ge()),b$=require("worker_threads"),I$=w(os()),Pce=1,Lce=[],ko=Ds.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1?new Set(vS.rootCertificates):new Set;o(Dce,"start");o(w$,"monitorNodeCAs");o(Mce,"disableReplication");o(vce,"assignReplicationSource");o(C$,"setReplicator");DS=new Map;o(Uce,"getSubscriptionConnection");S$=new Map;o(xce,"getRetrievalConnectionByName");o(Wh,"sendOperationToNode");o(Gh,"subscribeToNode");o(_S,"unsubscribeFromNode");o(Bce,"getCommonNameFromCert");o(Ze,"getThisNodeName");o(Fce,"clearThisNodeName");Object.defineProperty(Le,"hostname",{get(){return Ze()}});o(T$,"getHostFromListeningPort");o(LS,"getPortFromListeningPort");o(PS,"getThisNodeId");Le.replication={getThisNodeId:PS,exportIdMapping:Bh};o(Wa,"getThisNodeUrl");o(US,"hostnameToUrl");o(pi,"urlToNodeName");o(Ya,"forEachReplicatedDatabase");o(Hce,"hasExplicitlyReplicatedTable");o(Fh,"lastTimeInAuditStore");o(kce,"replicateOperation")});var jh=M((ave,v$)=>{"use strict";var Md=$G(),{validateBySchema:zh}=lt(),{commonValidators:vd,schemaRegex:Yw}=ki(),pr=require("joi"),Gce=Q(),qce=require("uuid").v4,FS=Po(),Ud=(k(),v(W)),$ce=require("util"),ec=Zn(),{handleHDBError:Go,hdbErrors:Vce,ClientError:xl}=ge(),{HDB_ERROR_MSGS:xS,HTTP_STATUS_CODES:qo}=Vce,{SchemaEventMsg:HS}=ai(),P$=mr(),{getDatabases:Kce}=(De(),v(mt)),{transformReq:xd}=ie(),{replicateOperation:L$}=(ss(),v(Ho)),{cleanupOrphans:Yce}=(Wn(),v(u_)),BS=pr.string().min(1).max(vd.schema_length.maximum).pattern(Yw).messages({"string.pattern.base":"{:#label} "+vd.schema_format.message}),Wce=pr.string().min(1).max(vd.schema_length.maximum).pattern(Yw).messages({"string.pattern.base":"{:#label} "+vd.schema_format.message}).required(),zce=pr.string().min(1).max(vd.schema_length.maximum).pattern(Yw).messages({"string.pattern.base":"{:#label} "+vd.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();v$.exports={createSchema:jce,createSchemaStructure:D$,createTable:Qce,createTableStructure:M$,createAttribute:tle,dropSchema:Jce,dropTable:Xce,dropAttribute:Zce,getBackup:rle,cleanupOrphanBlobs:nle};async function jce(e){let t=await D$(e);return FS.signalSchemaChange(new HS(process.pid,e.operation,e.schema)),t}o(jce,"createSchema");async function D$(e){let t=zh(e,pr.object({database:BS,schema:BS}));if(t)throw new xl(t.message);if(xd(e),!await Md.checkSchemaExists(e.schema))throw Go(new Error,xS.SCHEMA_EXISTS_ERR(e.schema),qo.BAD_REQUEST,Ud.LOG_LEVELS.ERROR,xS.SCHEMA_EXISTS_ERR(e.schema),!0);return await ec.createSchema(e),`database '${e.schema}' successfully created`}o(D$,"createSchemaStructure");async function Qce(e){return xd(e),e.hash_attribute=e.primary_key??e.hash_attribute,await M$(e)}o(Qce,"createTable");async function M$(e){let t=zh(e,pr.object({database:BS,schema:BS,table:Wce,residence:pr.array().items(pr.string().min(1)).optional(),hash_attribute:zce}));if(t)throw new xl(t.message);if(!await Md.checkSchemaTableExists(e.schema,e.table))throw Go(new Error,xS.TABLE_EXISTS_ERR(e.schema,e.table),qo.BAD_REQUEST,Ud.LOG_LEVELS.ERROR,xS.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:qce(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await ec.createTable(n,e);else throw Go(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",qo.BAD_REQUEST);else await ec.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}o(M$,"createTableStructure");async function Jce(e){let t=zh(e,pr.object({database:pr.string(),schema:pr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new xl(t.message);xd(e);let r=await Md.checkSchemaExists(e.schema);if(r)throw Go(new Error,r,qo.NOT_FOUND,Ud.LOG_LEVELS.ERROR,r,!0);let n=await Md.schemaDescribe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await ec.dropSchema(e),FS.signalSchemaChange(new HS(process.pid,e.operation,e.schema)),await P$.purgeSchemaTableStreams(e.schema,s);let i=await L$(e);return i.message=`successfully deleted '${e.schema}'`,i}o(Jce,"dropSchema");async function Xce(e){let t=zh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required()}));if(t)throw new xl(t.message);xd(e);let r=await Md.checkSchemaTableExists(e.schema,e.table);if(r)throw Go(new Error,r,qo.NOT_FOUND,Ud.LOG_LEVELS.ERROR,r,!0);await ec.dropTable(e),await P$.purgeTableStream(e.schema,e.table);let n=await L$(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}o(Xce,"dropTable");async function Zce(e){let t=zh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required(),attribute:pr.string().required()}));if(t)throw new xl(t.message);xd(e);let r=await Md.checkSchemaTableExists(e.schema,e.table);if(r)throw Go(new Error,r,qo.NOT_FOUND,Ud.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw Go(new Error,"You cannot drop a hash attribute",qo.BAD_REQUEST,void 0,void 0,!0);if(Ud.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw Go(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,qo.BAD_REQUEST,void 0,void 0,!0);try{return await ec.dropAttribute(e),ele(e),FS.signalSchemaChange(new HS(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw Gce.error(`Got an error deleting attribute ${$ce.inspect(e)}.`),n}}o(Zce,"dropAttribute");function ele(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}o(ele,"dropAttributeFromGlobal");async function tle(e){xd(e);let t=Kce()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw Go(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,qo.BAD_REQUEST,void 0,void 0,!0);return await ec.createAttribute(e),FS.signalSchemaChange(new HS(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}o(tle,"createAttribute");function rle(e){return ec.getBackup(e)}o(rle,"getBackup");function nle(e){if(!e.database)throw new xl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new xl(`Unknown database '${e.database}'`);return Yce(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}o(nle,"cleanupOrphanBlobs")});var x$=M((lve,U$)=>{"use strict";var{OPERATIONS_ENUM:sle}=(k(),v(W)),Ww=class{static{o(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=sle.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};U$.exports=Ww});var zw=M((fve,G$)=>{"use strict";var ile=Zn(),dve=x$(),kS=ie(),GS=(k(),v(W)),ole=oe(),{handleHDBError:B$,hdbErrors:ale}=ge(),{HDB_ERROR_MSGS:F$,HTTP_STATUS_CODES:H$}=ale,cle=Object.values(GS.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),k$="To use this operation audit log must be enabled in harperdb-config.yaml";G$.exports=lle;async function lle(e){if(kS.isEmpty(e.schema))throw new Error(F$.SCHEMA_REQUIRED_ERR);if(kS.isEmpty(e.table))throw new Error(F$.TABLE_REQUIRED_ERR);if(!ole.get(GS.CONFIG_PARAMS.LOGGING_AUDITLOG))throw B$(new Error,k$,H$.BAD_REQUEST,GS.LOG_LEVELS.ERROR,k$,!0);let t=kS.checkSchemaTableExist(e.schema,e.table);if(t)throw B$(new Error,t,H$.NOT_FOUND,GS.LOG_LEVELS.ERROR,t,!0);if(!kS.isEmpty(e.search_type)&&cle.indexOf(e.search_type)<0)throw new Error(`Invalid searchType '${read_audit_log_object.search_type}'`);return await ile.readAuditLog(e)}o(lle,"readAuditLog")});var $$=M((hve,q$)=>{"use strict";var{OPERATIONS_ENUM:ule}=(k(),v(W)),jw=class{static{o(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=ule.GET_BACKUP,this.schema=t,this.table=r}};q$.exports=jw});var Y$=M((gve,K$)=>{"use strict";var dle=Zn(),Eve=$$(),Qw=ie(),fle=(k(),v(W)),_ve=oe(),{handleHDBError:mle,hdbErrors:hle}=ge(),{HDB_ERROR_MSGS:V$,HTTP_STATUS_CODES:ple}=hle;K$.exports=Ele;async function Ele(e){if(Qw.isEmpty(e.schema))throw new Error(V$.SCHEMA_REQUIRED_ERR);if(Qw.isEmpty(e.table))throw new Error(V$.TABLE_REQUIRED_ERR);let t=Qw.checkSchemaTableExist(e.schema,e.table);if(t)throw mle(new Error,t,ple.NOT_FOUND,fle.LOG_LEVELS.ERROR,t,!0);return await dle.getBackup(readAuditLogObject)}o(Ele,"getBackup")});var Q$=M((Tve,j$)=>{"use strict";var _le=oe(),tc=require("joi"),gle=lt(),W$=require("moment"),Sle=require("fs-extra"),Jw=require("path"),Tle=require("lodash"),Qh=(k(),v(W)),{LOG_LEVELS:Bl}=(k(),v(W)),Rle="YYYY-MM-DD hh:mm:ss",yle=Jw.resolve(__dirname,"../logs");j$.exports=function(e){return gle.validateBySchema(e,Ale)};var Ale=tc.object({from:tc.custom(z$),until:tc.custom(z$),level:tc.valid(Bl.NOTIFY,Bl.FATAL,Bl.ERROR,Bl.WARN,Bl.INFO,Bl.DEBUG,Bl.TRACE),order:tc.valid("asc","desc"),limit:tc.number().min(1),start:tc.number().min(0),log_name:tc.custom(ble)});function z$(e,t){if(W$(e,W$.ISO_8601).format(Rle)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}o(z$,"validateDatetime");function ble(e,t){if(Tle.invert(Qh.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=_le.get(Qh.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?Qh.LOG_NAMES.HDB:e,i=s===Qh.LOG_NAMES.INSTALL?Jw.join(yle,Qh.LOG_NAMES.INSTALL):Jw.join(n,s);return Sle.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}o(ble,"validateReadLogPath")});var Zw=M((yve,X$)=>{"use strict";var qS=(k(),v(W)),Ile=Q(),Nle=oe(),wle=Q$(),Xw=require("path"),J$=require("fs-extra"),{once:Ole}=require("events"),{handleHDBError:Cle,hdbErrors:Ple}=ge(),{PACKAGE_ROOT:Lle}=Rt(),{replicateOperation:Dle}=(ss(),v(Ho)),Mle=Xw.join(Lle,"logs"),vle=1e3,Ule=200;X$.exports=xle;async function xle(e){let t=wle(e);if(t)throw Cle(t,t.message,Ple.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=Dle(e),n=Nle.get(qS.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e.log_name===void 0?qS.LOG_NAMES.HDB:e.log_name,i=s===qS.LOG_NAMES.INSTALL?Xw.join(Mle,qS.LOG_NAMES.INSTALL):Xw.join(n,s),a=e.level!==void 0,c=a?e.level:void 0,l=e.from!==void 0,u=l?new Date(e.from):void 0,d=e.until!==void 0,f=d?new Date(e.until):void 0,m=e.limit===void 0?vle:e.limit,h=e.order===void 0?void 0:e.order,p=e.start===void 0?0:e.start,_=p+m,g=0;h==="desc"&&!u&&!f&&(g=Math.max(J$.statSync(i).size-(_+5)*Ule,0));let y=J$.createReadStream(i,{start:g});y.on("error",G=>{Ile.error(G)});let T=0,R=[],N="",O;y.on("data",G=>{let Y=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;G=N+G;let q=0,K;for(;(K=Y.exec(G))&&!y.destroyed;){O&&(O.message=G.slice(q,K.index),F(O));let[ce,le,se]=K,pe=se.split("] ["),Ne=pe[0],Ue=pe[1];pe.splice(0,2),O={timestamp:le,thread:Ne,level:Ue,tags:pe,message:""},q=K.index+ce.length}N=G.slice(q)}),y.on("end",G=>{y.destroyed||O&&(O.message=N.trim(),F(O))}),y.resume();function F(G){let Y,q,K;switch(!0){case(a&&l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),G.level===c&&Y>=q&&Y<=K&&T<p?T++:G.level===c&&Y>=q&&Y<=K&&(eo(G,h,R),T++,T===_&&y.destroy());break;case(a&&l):Y=new Date(G.timestamp),q=new Date(u),G.level===c&&Y>=q&&T<p?T++:G.level===c&&Y>=q&&(eo(G,h,R),T++,T===_&&y.destroy());break;case(a&&d):Y=new Date(G.timestamp),K=new Date(f),G.level===c&&Y<=K&&T<p?T++:G.level===c&&Y<=K&&(eo(G,h,R),T++,T===_&&y.destroy());break;case(l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),Y>=q&&Y<=K&&T<p?T++:Y>=q&&Y<=K&&(eo(G,h,R),T++,T===_&&y.destroy());break;case a:G.level===c&&T<p?T++:G.level===c&&(eo(G,h,R),T++,T===_&&y.destroy());break;case l:Y=new Date(G.timestamp),q=new Date(u),Y>=q&&T<p?T++:Y>=q&&T>=p&&(eo(G,h,R),T++,T===_&&y.destroy());break;case d:Y=new Date(G.timestamp),K=new Date(f),Y<=K&&T<p?T++:Y<=K&&T>=p&&(eo(G,h,R),T++,T===_&&y.destroy());break;default:T<p?T++:(eo(G,h,R),T++,T===_&&y.destroy())}}o(F,"onLogMessage"),await Ole(y,"close");let Z=await r;if(Z.replicated){for(let G of R)G.node=server.hostname;for(let G of Z.replicated){let Y=G.node;if(G.status==="failed")eo({timestamp:new Date().toISOString(),level:"error",node:Y,message:`Error retrieving logs: ${G.reason}`},h,R);else for(let q of G.results)q.node=Y,eo(q,h,R)}}return R}o(xle,"readLog");function eo(e,t,r){t==="desc"?Ble(e,r):t==="asc"?Fle(e,r):r.push(e)}o(eo,"pushLineToResult");function Ble(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}o(Ble,"insertDescending");function Fle(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}o(Fle,"insertAscending")});var $S=M((Ove,rV)=>{"use strict";var eO=require("joi"),{string:Bd,boolean:Z$,date:Hle}=eO.types(),kle=lt(),{validateSchemaExists:bve,validateTableExists:Ive,validateSchemaName:Nve}=ki(),Gle=(k(),v(W)),qle=Nt(),eV=oe();eV.initSync();var wve=Bd.invalid(eV.get(Gle.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(qle.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),tV={operation:Bd.valid("add_node","update_node","set_node_replication"),node_name:Bd.optional(),subscriptions:eO.array().items({table:Bd.optional(),schema:Bd.optional(),database:Bd.optional(),subscribe:Z$.required(),publish:Z$.required().custom(Vle),start_time:Hle.iso()})};function $le(e){return kle.validateBySchema(e,eO.object(tV))}o($le,"addUpdateNodeValidator");function Vle(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}o(Vle,"checkForFalsy");rV.exports={addUpdateNodeValidator:$le,validationSchema:tV}});var Fd=M((Pve,nV)=>{"use strict";var tO=class{static{o(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},rO=class{static{o(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};nV.exports={Node:tO,NodeSubscription:rO}});var iV=M((Dve,sV)=>{"use strict";var Kle=(k(),v(W)).OPERATIONS_ENUM,nO=class{static{o(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=Kle.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};sV.exports=nO});var Jh=M((vve,oV)=>{"use strict";var sO=class{static{o(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},iO=class{static{o(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,a,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=a,c!==void 0&&(this.attributes=c)}};oV.exports={RemotePayloadObject:sO,RemotePayloadSubscription:iO}});var cV=M((xve,aV)=>{"use strict";var oO=class{static{o(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,a=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=a}};aV.exports=oO});var uV=M((qve,lV)=>{"use strict";var Yle=cV(),Fve=qt(),Hve=gt(),Wle=Q(),{getSchemaPath:kve,getTransactionAuditStorePath:Gve}=At(),{getDatabases:zle}=(De(),v(mt));lV.exports=jle;async function jle(e){let t=new Yle;try{let r=zle()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){Wle.warn(`unable to stat table dbi due to ${r}`)}return t}o(jle,"lmdbGetTableSize")});var fV=M((Vve,dV)=>{"use strict";var aO=class{static{o(this,"SystemInformationObject")}constructor(t,r,n,s,i,a,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=a,this.harperdb_processes=c}};dV.exports=aO});var kd=M((Qve,EV)=>{"use strict";var Qle=require("fs-extra"),Jle=require("path"),mn=require("systeminformation"),rc=Q(),mV=mr(),Yve=Nt(),Hd=(k(),v(W)),Xle=uV(),Zle=Ba(),{getThreadInfo:hV}=nt(),Xh=oe();Xh.initSync();var eue=fV(),{openEnvironment:Wve}=gt(),{getSchemaPath:zve}=At(),{database:jve,databases:cO}=(De(),v(mt)),VS;EV.exports={getHDBProcessInfo:fO,getNetworkInfo:hO,getDiskInfo:mO,getMemoryInfo:dO,getCPUInfo:uO,getTimeInfo:lO,getSystemInformation:pO,systemInformation:tue,getTableSize:EO,getMetrics:_O};function lO(){return mn.time()}o(lO,"getTimeInfo");async function uO(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:a,governor:c,socket:l,cache:u,...d}=await mn.cpu();d.cpu_speed=await mn.cpuCurrentSpeed();let{rawCurrentload:f,rawCurrentloadIdle:m,rawCurrentloadIrq:h,rawCurrentloadNice:p,rawCurrentloadSystem:_,rawCurrentloadUser:g,cpus:y,...T}=await mn.currentLoad();return T.cpus=[],y.forEach(R=>{let{rawLoad:N,rawLoadIdle:O,rawLoadIrq:F,rawLoadNice:Z,rawLoadSystem:G,rawLoadUser:Y,...q}=R;T.cpus.push(q)}),d.current_load=T,d}catch(e){return rc.error(`error in getCPUInfo: ${e}`),{}}}o(uO,"getCPUInfo");async function dO(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await mn.mem();return Object.assign(s,process.memoryUsage())}catch(e){return rc.error(`error in getMemoryInfo: ${e}`),{}}}o(dO,"getMemoryInfo");async function fO(){let e={core:[],clustering:[]};try{let t=await mn.processes(),r;try{r=Number.parseInt(await Qle.readFile(Jle.join(Xh.get(Hd.CONFIG_PARAMS.ROOTPATH),Hd.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===Hd.NODE_ERROR_CODES.ENOENT)rc.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return rc.error(`error in getHDBProcessInfo: ${t}`),e}}o(fO,"getHDBProcessInfo");async function mO(){let e={};try{if(!Xh.get(Hd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await mn.disksIO();e.io=i;let{rxSec:a,txSec:c,wxSec:l,...u}=await mn.fsStats();return e.read_write=u,e.size=await mn.fsSize(),e}catch(t){return rc.error(`error in getDiskInfo: ${t}`),e}}o(mO,"getDiskInfo");async function hO(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return Xh.get(Hd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await mn.networkInterfaceDefault(),e.latency=await mn.inetChecksite("google.com"),(await mn.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:a,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:d,carrierChanges:f,...m}=n;e.interfaces.push(m)}),(await mn.networkStats()).forEach(n=>{let{rxSec:s,txSec:i,ms:a,...c}=n;e.stats.push(c)})),e}catch(t){return rc.error(`error in getNetworkInfo: ${t}`),e}}o(hO,"getNetworkInfo");async function pO(){if(VS!==void 0)return VS;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:a,...c}=await mn.osInfo();e=c;let l=await mn.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,VS=e,VS}catch(t){return rc.error(`error in getSystemInformation: ${t}`),e}}o(pO,"getSystemInformation");async function EO(){let e=[],t=await Zle.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await Xle(n));return e}o(EO,"getTableSize");async function _O(){let e={};for(let t in cO){let r=e[t]={},n=r.tables={};for(let s in cO[t])try{let i=cO[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,d,f]=l.trim().split(" ");return{pid:u,thread:d,txnid:f}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}}let a=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=a[l];n[s]=c}catch(i){rc.notify(`Error getting stats for table ${s}: ${i}`)}}return e}o(_O,"getMetrics");async function pV(){if(Xh.get(Hd.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await mV.getNATSReferences(),t=await mV.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let a={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(a)}return r}}o(pV,"getNatsStreamInfo");async function tue(e){let t=new eue;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await pO(),t.time=lO(),t.cpu=await uO(),t.memory=await dO(),t.disk=await mO(),t.network=await hO(),t.harperdb_processes=await fO(),t.table_size=await EO(),t.metrics=await _O(),t.threads=await hV(),t.replication=await pV(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await pO();break;case"time":t.time=lO();break;case"cpu":t.cpu=await uO();break;case"memory":t.memory=await dO();break;case"disk":t.disk=await mO();break;case"network":t.network=await hO();break;case"harperdb_processes":t.harperdb_processes=await fO();break;case"table_size":t.table_size=await EO();break;case"database_metrics":case"metrics":t.metrics=await _O();break;case"threads":t.threads=await hV();break;case"replication":t.replication=await pV();break;default:break}return t}o(tue,"systemInformation")});var $o=M((tUe,TV)=>{"use strict";var rue=vn(),gO=ie(),nue=require("util"),Fl=(k(),v(W)),_V=oe();_V.initSync();var sue=JN(),gV=cn(),{Node:Xve,NodeSubscription:Zve}=Fd(),iue=Yu(),oue=iV(),{RemotePayloadObject:aue,RemotePayloadSubscription:cue}=Jh(),{handleHDBError:lue,hdbErrors:uue}=ge(),{HTTP_STATUS_CODES:due,HDB_ERROR_MSGS:fue}=uue,mue=ci(),hue=kd(),{packageJson:pue}=Rt(),{getDatabases:Eue}=(De(),v(mt)),eUe=nue.promisify(sue.authorize),_ue=gV.searchByHash,gue=gV.searchByValue;TV.exports={isEmpty:Sue,getNodeRecord:Tue,upsertNodeRecord:Rue,buildNodePayloads:yue,checkClusteringEnabled:Aue,getAllNodeRecords:bue,getSystemInfo:Iue,reverseSubscription:SV};function Sue(e){return e==null}o(Sue,"isEmpty");async function Tue(e){let t=new iue(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return _ue(t)}o(Tue,"getNodeRecord");async function Rue(e){let t=new oue(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return rue.upsert(t)}o(Rue,"upsertNodeRecord");function SV(e){if(gO.isEmpty(e.subscribe)||gO.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}o(SV,"reverseSubscription");function yue(e,t,r,n){let s=[];for(let i=0,a=e.length;i<a;i++){let c=e[i],{schema:l,table:u}=c,d=gO.getTableHashAttribute(l,u),{subscribe:f,publish:m}=SV(c),h=Eue()[l]?.[u],p=new cue(l,u,d,m,f,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(p)}return new aue(r,t,s,n)}o(yue,"buildNodePayloads");function Aue(){if(!_V.get(Fl.CONFIG_PARAMS.CLUSTERING_ENABLED))throw lue(new Error,fue.CLUSTERING_NOT_ENABLED,due.BAD_REQUEST,void 0,void 0,!0)}o(Aue,"checkClusteringEnabled");async function bue(){let e=new mue(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await gue(e))}o(bue,"getAllNodeRecords");async function Iue(){let e=await hue.getSystemInformation();return{hdb_version:pue.version,node_version:e.node_version,platform:e.platform}}o(Iue,"getSystemInfo")});var SO=M((nUe,OV)=>{"use strict";var KS=mr(),RV=ie(),yV=Nt(),AV=(k(),v(W)),YS=Q(),bV=jh(),Nue=Km(),{RemotePayloadObject:wue}=Jh(),{handleHDBError:IV,hdbErrors:Oue}=ge(),{HTTP_STATUS_CODES:NV}=Oue,{NodeSubscription:wV}=Fd();OV.exports=Cue;async function Cue(e,t){let r;try{r=await KS.request(`${t}.${yV.REQUEST_SUFFIX}`,new wue(AV.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),YS.trace("Response from remote describe all request:",r)}catch(a){YS.error(`addNode received error from describe all request to remote node: ${a}`);let c=KS.requestErrorHandler(a,"add_node",t);throw IV(new Error,c,NV.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===yV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let a=`Error returned from remote node ${t}: ${r.message}`;throw IV(new Error,a,NV.INTERNAL_SERVER_ERROR,"error",a)}let n=r.message,s=[],i=[];for(let a of e){let{table:c}=a,l=a.database??a.schema??"data";if(l===AV.SYSTEM_SCHEMA_NAME){await KS.createLocalTableStream(l,c);let p=new wV(l,c,a.publish,a.subscribe);p.start_time=a.start_time,i.push(p);continue}let u=RV.doesSchemaExist(l),d=n[l]!==void 0,f=c?RV.doesTableExist(l,c):!0,m=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!d||!f&&!m){s.push(a);continue}if(!u&&d&&(YS.trace(`addNode creating schema: ${l}`),await bV.createSchema({operation:"create_schema",schema:l})),!f&&m){YS.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let p=new Nue(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(p.attributes=n[l][c].attributes),await bV.createTable(p)}await KS.createLocalTableStream(l,c);let h=new wV(l,c,a.publish,a.subscribe);h.start_time=a.start_time,i.push(h)}return{added:i,skipped:s}}o(Cue,"reviewSubscriptions")});var Gd={};Oe(Gd,{addNodeBack:()=>TO,removeNodeBack:()=>RO,setNode:()=>Mue});async function Mue(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=pi(t)):t=US(r);let n=(0,PV.validateBySchema)(e,Due);if(n)throw(0,Vo.handleHDBError)(n,n.message,Lue.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new Vo.ClientError("url or hostname is required for remove_node operation");let h=r,p=Kt(),_=await p.get(h);if(!_)throw new Vo.ClientError(h+" does not exist");try{await Wh({url:_.url},{operation:$.REMOVE_NODE_BACK,name:_?.subscriptions?.length>0?Ze():h},void 0)}catch(g){as.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await p.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new Vo.ClientError("url required for this operation");let s=Wa();if(s==null)throw new Vo.ClientError("replication url is missing from harperdb-config.yaml");let i,a,c;if(t?.startsWith("wss:")){i=await(0,Ms.getReplicationCert)();let h=await(0,Ms.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(a=await(0,Ms.createCsr)(),as.info("Sending CSR to target node:",t)):h&&(c=h.certificate,as.info("Sending CA named",h.name,"to target node",t))}let l={operation:$.ADD_NODE_BACK,hostname:(0,sc.get)(U.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:a,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,sc.get)(U.REPLICATION_SHARD)!==void 0&&(l.shard=(0,sc.get)(U.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(CV):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=CV(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,d;try{u=await Wh({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,as.warn("Error adding node:",t,"to cluster:",h),d=h}if(a&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw d?(d.message+=" and connection was required to sign certificate",d):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);a&&(as.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,Ms.setCertTable)({name:Pue.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,Ms.setCertTable)({name:Ze(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let f={url:t,ca:u?.usingCA};if(e.hostname&&(f.name=e.hostname),e.subscriptions?f.subscriptions=e.subscriptions:f.replicates=!0,e.start_time&&(f.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(f.authorization=e.authorization),e.revoked_certificates&&(f.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?f.shard=u.shard:e.shard!==void 0&&(f.shard=e.shard),f.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,sc.get)(U.REPLICATION_SHARD)!==void 0&&(h.shard=(0,sc.get)(U.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await Bo(Ze(),h)}await Bo(u?u.nodeName:f.name??pi(t),f);let m;return e.operation==="update_node"?m=`Successfully updated '${t}'`:m=`Successfully added '${t}' to cluster`,d&&(m+=" but there was an error updating target node: "+d.message),m}async function TO(e){as.trace("addNodeBack received request:",e);let t=await(0,Ms.signCertificate)(e),r;e.csr?(r=t.signingCA,as.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,as.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,Ms.getReplicationCertAuth)();if(n.replicates){let i={url:Wa(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,sc.get)(U.REPLICATION_SHARD)!==void 0&&(i.shard=(0,sc.get)(U.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await Bo(Ze(),i)}return await Bo(e.hostname,n),t.nodeName=Ze(),t.usingCA=s?.certificate,as.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function RO(e){as.trace("removeNodeBack received request:",e),await Kt().delete(e.name)}function CV(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var Ms,PV,nc,sc,as,Vo,Pue,Lue,Due,qd=ue(()=>{Ms=w(os()),PV=w(lt()),nc=w(require("joi")),sc=w(oe());k();qh();Ll();ss();as=w(Q()),Vo=w(ge()),{pki:Pue}=require("node-forge"),{HTTP_STATUS_CODES:Lue}=Vo.hdbErrors,Due=nc.default.object({hostname:nc.default.string(),verify_tls:nc.default.boolean(),replicates:nc.default.boolean(),subscriptions:nc.default.array(),revoked_certificates:nc.default.array(),shard:nc.default.number()});o(Mue,"setNode");o(TO,"addNodeBack");o(RO,"removeNodeBack");o(CV,"reverseSubscription")});var JS=M((fUe,DV)=>{"use strict";var{handleHDBError:WS,hdbErrors:vue}=ge(),{HTTP_STATUS_CODES:zS}=vue,{addUpdateNodeValidator:Uue}=$S(),jS=Q(),QS=(k(),v(W)),LV=Nt(),xue=ie(),Zh=mr(),ep=$o(),yO=oe(),Bue=SO(),{Node:Fue,NodeSubscription:Hue}=Fd(),{broadcast:kue}=nt(),{setNode:Gue}=(qd(),v(Gd)),uUe=oe(),dUe=(k(),v(W)),que="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",$ue="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Vue=yO.get(QS.CONFIG_PARAMS.CLUSTERING_NODENAME);DV.exports=Kue;async function Kue(e,t=!1){if(jS.trace("addNode called with:",e),yO.get(QS.CONFIG_PARAMS.REPLICATION_URL)||yO.get(QS.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Gue(e);ep.checkClusteringEnabled();let r=Uue(e);if(r)throw WS(r,r.message,zS.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let f=await ep.getNodeRecord(n);if(!xue.isEmptyOrZeroLength(f))throw WS(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,zS.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await Bue(e.subscriptions,n),a={message:void 0,added:s,skipped:i};if(s.length===0)return a.message=que,a;let c=ep.buildNodePayloads(s,Vue,QS.OPERATIONS_ENUM.ADD_NODE,await ep.getSystemInfo()),l=[];for(let f=0,m=s.length;f<m;f++){let h=s[f];s[f].start_time===void 0&&delete s[f].start_time,l.push(new Hue(h.schema,h.table,h.publish,h.subscribe))}jS.trace("addNode sending remote payload:",c);let u;try{u=await Zh.request(`${n}.${LV.REQUEST_SUFFIX}`,c)}catch(f){jS.error(`addNode received error from request: ${f}`);for(let h=0,p=s.length;h<p;h++){let _=s[h];_.publish=!1,_.subscribe=!1,await Zh.updateRemoteConsumer(_,n)}let m=Zh.requestErrorHandler(f,"add_node",n);throw WS(new Error,m,zS.INTERNAL_SERVER_ERROR,"error",m)}if(u.status===LV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${n}: ${u.message}`;throw WS(new Error,f,zS.INTERNAL_SERVER_ERROR,"error",f)}jS.trace(u);for(let f=0,m=s.length;f<m;f++){let h=s[f];await Zh.updateRemoteConsumer(h,n),h.subscribe===!0&&await Zh.updateConsumerIterator(h.schema,h.table,n,"start")}let d=new Fue(n,l,u.system_info);return await ep.upsertNodeRecord(d),kue({type:"nats_update"}),i.length>0?a.message=$ue:a.message=`Successfully added '${n}' to manifest`,a}o(Kue,"addNode")});var NO=M((pUe,vV)=>{"use strict";var{handleHDBError:AO,hdbErrors:Yue}=ge(),{HTTP_STATUS_CODES:bO}=Yue,{addUpdateNodeValidator:Wue}=$S(),tp=Q(),XS=(k(),v(W)),MV=Nt(),hUe=ie(),rp=mr(),np=$o(),IO=oe(),{cloneDeep:zue}=require("lodash"),jue=SO(),{Node:Que,NodeSubscription:Jue}=Fd(),{broadcast:Xue}=nt(),{setNode:Zue}=(qd(),v(Gd)),ede="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",tde="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",rde=IO.get(XS.CONFIG_PARAMS.CLUSTERING_NODENAME);vV.exports=nde;async function nde(e){if(tp.trace("updateNode called with:",e),IO.get(XS.CONFIG_PARAMS.REPLICATION_URL)??IO.get(XS.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Zue(e);np.checkClusteringEnabled();let t=Wue(e);if(t)throw AO(t,t.message,bO.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await np.getNodeRecord(r);s.length>0&&(n=zue(s));let{added:i,skipped:a}=await jue(e.subscriptions,r),c={message:void 0,updated:i,skipped:a};if(i.length===0)return c.message=ede,c;let l=np.buildNodePayloads(i,rde,XS.OPERATIONS_ENUM.UPDATE_NODE,await np.getSystemInfo());for(let d=0,f=i.length;d<f;d++){let m=i[d];tp.trace(`updateNode updating work stream for node: ${r} subscription:`,m),i[d].start_time===void 0&&delete i[d].start_time}tp.trace("updateNode sending remote payload:",l);let u;try{u=await rp.request(`${r}.${MV.REQUEST_SUFFIX}`,l)}catch(d){tp.error(`updateNode received error from request: ${d}`);let f=rp.requestErrorHandler(d,"update_node",r);throw AO(new Error,f,bO.INTERNAL_SERVER_ERROR,"error",f)}if(u.status===MV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${r}: ${u.message}`;throw AO(new Error,d,bO.INTERNAL_SERVER_ERROR,"error",d)}tp.trace(u);for(let d=0,f=i.length;d<f;d++){let m=i[d];await rp.updateRemoteConsumer(m,r),m.subscribe===!0?await rp.updateConsumerIterator(m.schema,m.table,r,"start"):await rp.updateConsumerIterator(m.schema,m.table,r,"stop")}return n||(n=[new Que(r,[],u.system_info)]),await sde(n[0],i,u.system_info),a.length>0?c.message=tde:c.message=`Successfully updated '${r}'`,c}o(nde,"updateNode");async function sde(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let a=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let d=n.subscriptions[l];if(d.schema===a.schema&&d.table===a.table){d.publish=a.publish,d.subscribe=a.subscribe,c=!0;break}}c||n.subscriptions.push(new Jue(a.schema,a.table,a.publish,a.subscribe))}n.system_info=r,await np.upsertNodeRecord(n),Xue({type:"nats_update"})}o(sde,"updateNodeTable")});var HV=M((_Ue,FV)=>{"use strict";var BV=require("joi"),{string:UV}=BV.types(),ide=lt(),xV=(k(),v(W)),ode=oe(),ade=Nt();FV.exports=cde;function cde(e){let t=UV.invalid(ode.get(xV.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(ade.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=BV.object({operation:UV.valid(xV.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return ide.validateBySchema(e,r)}o(cde,"removeNodeValidator")});var ZS=M((SUe,VV)=>{"use strict";var{handleHDBError:kV,hdbErrors:lde}=ge(),{HTTP_STATUS_CODES:GV}=lde,ude=HV(),sp=Q(),qV=$o(),dde=ie(),$d=(k(),v(W)),$V=Nt(),wO=mr(),OO=oe(),{RemotePayloadObject:fde}=Jh(),{NodeSubscription:mde}=Fd(),hde=Vm(),pde=gl(),{broadcast:Ede}=nt(),{setNode:_de}=(qd(),v(Gd)),gde=OO.get($d.CONFIG_PARAMS.CLUSTERING_NODENAME);VV.exports=Sde;async function Sde(e){if(sp.trace("removeNode called with:",e),OO.get($d.CONFIG_PARAMS.REPLICATION_URL)??OO.get($d.CONFIG_PARAMS.REPLICATION_HOSTNAME))return _de(e);qV.checkClusteringEnabled();let t=ude(e);if(t)throw kV(t,t.message,GV.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await qV.getNodeRecord(r);if(dde.isEmptyOrZeroLength(n))throw kV(new Error,`Node '${r}' was not found.`,GV.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new fde($d.OPERATIONS_ENUM.REMOVE_NODE,gde,[]),i,a=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let d=n.subscriptions[l];d.subscribe===!0&&await wO.updateConsumerIterator(d.schema,d.table,r,"stop");try{await wO.updateRemoteConsumer(new mde(d.schema,d.table,!1,!1),r)}catch(f){sp.error(f)}}try{i=await wO.request(`${r}.${$V.REQUEST_SUFFIX}`,s),sp.trace("Remove node reply from remote node:",r,i)}catch(l){sp.error("removeNode received error from request:",l),a=!0}let c=new hde($d.SYSTEM_SCHEMA_NAME,$d.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await pde.deleteRecord(c),Ede({type:"nats_update"}),i?.status===$V.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||a?(sp.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}o(Sde,"removeNode")});var WV=M((RUe,YV)=>{"use strict";var KV=require("joi"),{string:Tde,array:Rde}=KV.types(),yde=lt(),Ade=$S();YV.exports=bde;function bde(e){let t=KV.object({operation:Tde.valid("configure_cluster").required(),connections:Rde.items(Ade.validationSchema).required()});return yde.validateBySchema(e,t)}o(bde,"configureClusterValidator")});var CO=M((AUe,XV)=>{"use strict";var zV=(k(),v(W)),eT=Q(),Ide=ie(),Nde=oe(),wde=ZS(),Ode=JS(),Cde=$o(),Pde=WV(),{handleHDBError:jV,hdbErrors:Lde}=ge(),{HTTP_STATUS_CODES:QV}=Lde,Dde="Configure cluster complete.",Mde="Failed to configure the cluster. Check the logs for more details.",vde="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";XV.exports=Ude;async function Ude(e){eT.trace("configure cluster called with:",e);let t=Pde(e);if(t)throw jV(t,t.message,QV.BAD_REQUEST,void 0,void 0,!0);let r=await Cde.getAllNodeRecords(),n=[];if(Nde.get(zV.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let d=0,f=r.length;d<f;d++){let m=await JV(wde,{operation:zV.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[d].name},r[d].name);n.push(m)}eT.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let d=0;d<i;d++){let f=e.connections[d],m=await JV(Ode,f,f.node_name);s.push(m)}eT.trace("All results from configure_cluster add node:",s);let a=[],c=[],l=!1,u=n.concat(s);for(let d=0,f=u.length;d<f;d++){let m=u[d];m.status==="rejected"&&(eT.error(m.node_name,m?.error?.message,m?.error?.stack),a.includes(m.node_name)||a.push(m.node_name)),(m?.result?.message?.includes?.("Successfully")||m?.result?.includes?.("Successfully"))&&(l=!0),!(typeof m.result=="string"&&m.result.includes("Successfully removed")||m.status==="rejected")&&c.push({node_name:m?.node_name,response:m?.result})}if(Ide.isEmptyOrZeroLength(a))return{message:Dde,connections:c};if(l)return{message:vde,failed_nodes:a,connections:c};throw jV(new Error,Mde,QV.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}o(Ude,"configureCluster");async function JV(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}o(JV,"functionWrapper")});var r1=M((IUe,t1)=>{"use strict";var ip=require("joi"),xde=lt(),{validateSchemaExists:ZV,validateTableExists:Bde,validateSchemaName:e1}=ki(),Fde=ip.object({operation:ip.string().valid("purge_stream"),schema:ip.string().custom(ZV).custom(e1).optional(),database:ip.string().custom(ZV).custom(e1).optional(),table:ip.string().custom(Bde).required()});function Hde(e){return xde.validateBySchema(e,Fde)}o(Hde,"purgeStreamValidator");t1.exports=Hde});var PO=M((wUe,n1)=>{"use strict";var{handleHDBError:kde,hdbErrors:Gde}=ge(),{HTTP_STATUS_CODES:qde}=Gde,$de=r1(),Vde=mr(),Kde=$o();n1.exports=Yde;async function Yde(e){e.schema=e.schema??e.database;let t=$de(e);if(t)throw kde(t,t.message,qde.BAD_REQUEST,void 0,void 0,!0);Kde.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await Vde.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}o(Yde,"purgeStream")});var MO=M((CUe,u1)=>{"use strict";var DO=$o(),Wde=mr(),rT=oe(),Vd=(k(),v(W)),Hl=Nt(),zde=ie(),LO=Q(),{RemotePayloadObject:jde}=Jh(),{ErrorCode:s1}=require("nats"),{parentPort:i1}=require("worker_threads"),{onMessageByType:Qde}=nt(),{getThisNodeName:Jde}=(ss(),v(Ho)),{requestClusterStatus:Xde}=(qh(),v(Oq)),{getReplicationSharedStatus:Zde,getHDBNodeTable:efe}=(Ll(),v(Sw)),{CONFIRMATION_STATUS_POSITION:tfe,RECEIVED_VERSION_POSITION:rfe,RECEIVED_TIME_POSITION:nfe,SENDING_TIME_POSITION:sfe,RECEIVING_STATUS_POSITION:ife,RECEIVING_STATUS_RECEIVING:ofe}=(Vw(),v(g$)),o1=rT.get(Vd.CONFIG_PARAMS.CLUSTERING_ENABLED),a1=rT.get(Vd.CONFIG_PARAMS.CLUSTERING_NODENAME);u1.exports={clusterStatus:afe,buildNodeStatus:l1};var c1;Qde("cluster-status",async e=>{c1(e)});async function afe(){if(rT.get(Vd.CONFIG_PARAMS.REPLICATION_URL)||rT.get(Vd.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(i1){i1.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{c1=i});for(let i of n.connections){let a=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let f of Object.values(databases[l]||{}))if(u=f.auditStore,u)break;if(!u)continue;let d=Zde(u,l,a);c.lastCommitConfirmed=tT(d[tfe]),c.lastReceivedRemoteTime=tT(d[rfe]),c.lastReceivedLocalTime=tT(d[nfe]),c.sendingMessage=tT(d[sfe]),c.lastReceivedStatus=d[ife]===ofe?"Receiving":"Waiting"}}}else n=Xde();n.node_name=Jde();let s=efe().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:a1,is_enabled:o1,connections:[]};if(!o1)return e;let t=await DO.getAllNodeRecords();if(zde.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push(l1(t[n],e.connections));return await Promise.allSettled(r),e}o(afe,"clusterStatus");function tT(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}o(tT,"asDate");async function l1(e,t){let r=e.name,n=new jde(Vd.OPERATIONS_ENUM.CLUSTER_STATUS,a1,void 0,await DO.getSystemInfo()),s,i,a=Hl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await Wde.request(Hl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===Hl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(a=Hl.CLUSTER_STATUS_STATUSES.CLOSED,LO.error(`Error getting node status from ${r} `,s))}catch(l){LO.warn(`Error getting node status from ${r}`,l),l.code===s1.NoResponders?a=Hl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===s1.Timeout?a=Hl.CLUSTER_STATUS_STATUSES.TIMEOUT:a=Hl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new cfe(r,a,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==Vd.PRE_4_0_0_VERSION&&await DO.upsertNodeRecord(l)}catch(l){LO.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}o(l1,"buildNodeStatus");function cfe(e,t,r,n,s,i,a,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=a,this.system_info=c}o(cfe,"NodeStatusObject")});var UO=M((LUe,d1)=>{"use strict";var{handleHDBError:lfe,hdbErrors:ufe}=ge(),{HTTP_STATUS_CODES:dfe}=ufe,ffe=mr(),mfe=$o(),vO=ie(),nT=require("joi"),hfe=lt(),pfe=2e3,Efe=nT.object({timeout:nT.number().min(1),connected_nodes:nT.boolean(),routes:nT.boolean()});d1.exports=_fe;async function _fe(e){mfe.checkClusteringEnabled();let t=hfe.validateBySchema(e,Efe);if(t)throw lfe(t,t.message,dfe.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||vO.autoCastBoolean(n),a=s===void 0||vO.autoCastBoolean(s),c={nodes:[]},l=await ffe.getServerList(r??pfe),u={};if(i)for(let d=0,f=l.length;d<f;d++){let m=l[d].statsz;m&&(u[l[d].server.name]=m.routes)}for(let d=0,f=l.length;d<f;d++){if(l[d].statsz)continue;let m=l[d].server,h=l[d].data;if(m.name.endsWith("-hub")){let p={name:m.name.slice(0,-4),response_time:l[d].response_time};i&&(p.connected_nodes=[],u[m.name]&&u[m.name].forEach(_=>{p.connected_nodes.includes(_.name.slice(0,-4))||p.connected_nodes.push(_.name.slice(0,-4))})),a&&(p.routes=h.cluster?.urls?h.cluster?.urls.map(_=>({host:_.split(":")[0],port:vO.autoCast(_.split(":")[1])})):[]),c.nodes.push(p)}}return c}o(_fe,"clusterNetwork")});var p1=M((MUe,h1)=>{"use strict";var xO=require("joi"),f1=lt(),{routeConstraints:m1}=Jy();h1.exports={setRoutesValidator:gfe,deleteRoutesValidator:Sfe};function gfe(e){let t=xO.object({server:xO.valid("hub","leaf"),routes:m1.required()});return f1.validateBySchema(e,t)}o(gfe,"setRoutesValidator");function Sfe(e){let t=xO.object({routes:m1.required()});return f1.validateBySchema(e,t)}o(Sfe,"deleteRoutesValidator")});var sT=M((UUe,y1)=>{"use strict";var Ko=yt(),BO=ie(),vs=(k(),v(W)),Kd=oe(),E1=p1(),{handleHDBError:_1,hdbErrors:Tfe}=ge(),{HTTP_STATUS_CODES:g1}=Tfe,S1="cluster routes successfully set",T1="cluster routes successfully deleted";y1.exports={setRoutes:yfe,getRoutes:Afe,deleteRoutes:bfe};function Rfe(e){let t=Ko.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let a=0,c=e.routes.length;a<c;a++){let l=e.routes[a];l.port=BO.autoCast(l.port);let u=r.some(f=>f.host===l.host&&f.port===l.port),d=n.some(f=>f.host===l.host&&f.port===l.port);u||d?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?Ko.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):Ko.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:S1,set:i,skipped:s}}o(Rfe,"setRoutesNats");function yfe(e){let t=E1.setRoutesValidator(e);if(t)throw _1(t,t.message,g1.BAD_REQUEST,void 0,void 0,!0);if(Kd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED))return Rfe(e);let r=[],n=[],s=Kd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{R1(s,i)?n.push(i):(s.push(i),r.push(i))}),Ko.updateConfigValue(vs.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:S1,set:r,skipped:n}}o(yfe,"setRoutes");function R1(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}o(R1,"existsInArray");function Afe(){if(Kd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=Ko.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return Kd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}o(Afe,"getRoutes");function bfe(e){let t=E1.deleteRoutesValidator(e);if(t)throw _1(t,t.message,g1.BAD_REQUEST,void 0,void 0,!0);if(Kd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED))return Ife(e);let r=[],n=[],s=Kd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(a=>{R1(e.routes,a)?r.push(a):(i.push(a),n.push(a))}),Ko.updateConfigValue(vs.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:T1,deleted:r,skipped:n}}o(bfe,"deleteRoutes");function Ife(e){let t=Ko.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],a=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let d=e.routes[l],f=!1;for(let m=0,h=r.length;m<h;m++){let p=r[m];if(d.host===p.host&&d.port===p.port){r.splice(m,1),f=!0,a=!0,s.push(d);break}}if(!f){let m=!0;for(let h=0,p=n.length;h<p;h++){let _=n[h];if(d.host===_.host&&d.port===_.port){n.splice(h,1),c=!0,m=!1,s.push(d);break}}m&&i.push(d)}}return a&&(r=BO.isEmptyOrZeroLength(r)?null:r,Ko.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=BO.isEmptyOrZeroLength(n)?null:n,Ko.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:T1,deleted:s,skipped:i}}o(Ife,"deleteRoutesNats")});var b1=M((BUe,A1)=>{"use strict";var op=require("alasql"),kl=require("recursive-iterator"),gi=Q(),Nfe=ie(),ap=(k(),v(W)),FO=class{static{o(this,"sqlStatementBucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,Ofe(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>ap.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!ap.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,a=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[a]&&t[i].tables[a][ap.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[a].attribute_permissions.length>0?c=wfe(t[i].tables[a].attribute_permissions):c=global.hdb_schema[i][a].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(a).filter(u=>!ap.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let d=new op.yy.Column({columnid:u});s.tableid&&(d.tableid=s.tableid),this.ast.columns.push(d),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(a,l)}}),this.ast}};function wfe(e){return e.filter(t=>t[ap.PERMS_CRUD_ENUM.READ])}o(wfe,"filterReadRestrictedAttrs");function Ofe(e,t,r,n,s){Cfe(e,t,r,n,s)}o(Ofe,"interpretAST");function cp(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,a=e.tableid;e.as&&(a=e.as),s.set(a,i)}}o(cp,"addSchemaTableToMap");function Cfe(e,t,r,n,s){if(!e){gi.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof op.yy.Insert?Mfe(e,t,r):e instanceof op.yy.Select?Pfe(e,t,r,n,s):e instanceof op.yy.Update?Lfe(e,t,r):e instanceof op.yy.Delete?Dfe(e,t,r):gi.error("AST in getRecordAttributesAST() is not a valid SQL type.")}o(Cfe,"getRecordAttributesAST");function Pfe(e,t,r,n,s){if(!e){gi.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(Nfe.isEmptyOrZeroLength(i)){gi.error("No schema specified");return}e.from.forEach(c=>{cp(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),cp(c.table,t,r,n,s)});let a=new kl(e.columns);for(let{node:c}of a)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{gi.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new kl(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let d=u.tableid?u.tableid:l;if(!t.get(i).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(i).get(d).indexOf(u.columnid)<0&&t.get(i).get(d).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new kl(c.on);for(let{node:u}of l)if(u&&u.columnid){let d=u.tableid,f=s.get(d);if(!t.get(f).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(f).get(d).indexOf(u.columnid)<0&&t.get(f).get(d).push(u.columnid)}}),e.order){let c=new kl(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,d=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(d).has(u))if(r.has(u))u=r.get(u);else{gi.info(`table specified as ${u} not found.`);return}t.get(d).get(u).indexOf(l.columnid)<0&&t.get(d).get(u).push(l.columnid)}}}o(Pfe,"getSelectAttributes");function Lfe(e,t,r){if(!e){gi.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new kl(e.columns),s=e.table.databaseid;cp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&HO(e.table.tableid,s,i.columnid,t,r)}o(Lfe,"getUpdateAttributes");function Dfe(e,t,r){if(!e){gi.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new kl(e.where),s=e.table.databaseid;cp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&HO(e.table.tableid,s,i.columnid,t,r)}o(Dfe,"getDeleteAttributes");function Mfe(e,t,r){if(!e){gi.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new kl(e.columns),s=e.into.databaseid;cp(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&HO(e.into.tableid,s,i.columnid,t,r)}o(Mfe,"getInsertAttributes");function HO(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}o(HO,"pushAttribute");A1.exports=FO});var N1=M((HUe,I1)=>{"use strict";var iT=(k(),v(W)),oT=class{static{o(this,"BaseLicense")}constructor(t=0,r=iT.RAM_ALLOCATION_ENUM.DEFAULT,n=iT.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},kO=class extends oT{static{o(this,"ExtendedLicense")}constructor(t=0,r=iT.RAM_ALLOCATION_ENUM.DEFAULT,n=iT.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};I1.exports={BaseLicense:oT,ExtendedLicense:kO}});var zd=M((GUe,D1)=>{"use strict";var Wd=require("fs-extra"),aT=(_g(),v(Eg)),O1=require("crypto"),vfe=require("moment"),Ufe=require("uuid").v4,hn=Q(),qO=require("path"),xfe=ie(),Gl=(k(),v(W)),{totalmem:w1}=require("os"),Bfe=N1().ExtendedLicense,Yd="invalid license key format",Ffe="061183",Hfe="mofi25",kfe="aes-256-cbc",Gfe=16,qfe=32,C1=oe(),{resolvePath:P1}=yt();C1.initSync();var GO;D1.exports={validateLicense:L1,generateFingerPrint:Vfe,licenseSearch:KO,getLicense:Wfe,checkMemoryLimit:zfe};function $O(){return qO.join(C1.getHdbBasePath(),Gl.LICENSE_KEY_DIR_NAME,Gl.LICENSE_FILE_NAME)}o($O,"getLicenseDirPath");function $fe(){let e=$O();return P1(qO.join(e,Gl.LICENSE_FILE_NAME))}o($fe,"getLicenseFilePath");function VO(){let e=$O();return P1(qO.join(e,Gl.REG_KEY_FILE_NAME))}o(VO,"getFingerPrintFilePath");async function Vfe(){let e=VO();try{return await Wd.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await Kfe();throw hn.error(`Error writing fingerprint file to ${e}`),hn.error(t),new Error("There was an error generating the fingerprint")}}o(Vfe,"generateFingerPrint");async function Kfe(){let e=Ufe(),t=aT.hash(e,aT.HASH_FUNCTION.MD5),r=VO();try{await Wd.mkdirp($O()),await Wd.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw hn.error(`Error writing fingerprint file to ${r}`),hn.error(n),new Error("There was an error generating the fingerprint")}return t}o(Kfe,"writeFingerprint");function L1(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:Gl.RAM_ALLOCATION_ENUM.DEFAULT,version:Gl.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return hn.error("empty license key passed to validate."),r;let n=VO(),s=!1;try{s=Wd.statSync(n)}catch(i){hn.error(i)}if(s){let i;try{i=Wd.readFileSync(n,"utf8")}catch{hn.error("error validating this machine in the license"),r.valid_machine=!1;return}let a=e.split(Hfe),c=a[1];c=Buffer.concat([Buffer.from(c)],Gfe);let l=Buffer.concat([Buffer.from(i)],qfe),u=O1.createDecipheriv(kfe,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let d=null;try{d=u.update(a[0],"hex","utf8"),d.trim(),d+=u.final("utf8")}catch{let h=Yfe(a[0],i);if(h)d=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(Yd),hn.error(Yd),new Error(Yd)}let f;if(isNaN(d))try{f=JSON.parse(d),r.version=f.version,r.exp_date=f.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),f.ram_allocation&&(r.ram_allocation=f.ram_allocation)}catch{throw console.error(Yd),hn.error(Yd),new Error(Yd)}else r.exp_date=d;r.exp_date<vfe().valueOf()&&(r.valid_date=!1),aT.validate(a[1],`${Ffe}${i}${t}`,aT.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||hn.error("Invalid licence"),r}o(L1,"validateLicense");function Yfe(e,t){try{let r=O1.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{hn.warn("Check old license failed")}}o(Yfe,"checkOldLicense");function KO(){let e=new Bfe,t=[];try{t=Wd.readFileSync($fe(),"utf-8").split(`\r
18
- `)}catch(r){r.code==="ENOENT"?hn.debug("no license file found"):hn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(xfe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=L1(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){hn.error("There was an error parsing the license string."),hn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return GO=e,e}o(KO,"licenseSearch");async function Wfe(){return GO||await KO(),GO}o(Wfe,"getLicense");function zfe(){let e=KO().ram_allocation,t=process.constrainedMemory?.()||w1();if(t=Math.round(Math.min(t,w1())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(zfe,"checkMemoryLimit")});var zO=M(($Ue,x1)=>{var cT=zd(),M1=require("chalk"),cs=Q(),v1=require("prompt"),{promisify:jfe}=require("util"),YO=(k(),v(W)),Qfe=require("fs-extra"),Jfe=require("path"),Xfe=ie(),{packageJson:Zfe}=Rt(),U1=oe();U1.initSync();var eme=require("moment"),tme=jfe(v1.get),rme=Jfe.join(U1.getHdbBasePath(),YO.LICENSE_KEY_DIR_NAME,YO.LICENSE_FILE_NAME,YO.LICENSE_FILE_NAME);x1.exports={getFingerprint:sme,setLicense:nme,parseLicense:WO,register:ime,getRegistrationInfo:ame};async function nme(e){if(e&&e.key&&e.company){try{cs.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await WO(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw cs.error(r),cs.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(nme,"setLicense");async function sme(){let e={};try{e=await cT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw cs.error(r),cs.error(t),new Error(r)}return e}o(sme,"getFingerprint");async function WO(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");cs.info("Validating license input...");let r=cT.validateLicense(e,t);if(cs.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(cs.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(cs.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{cs.info("writing license to disk"),await Qfe.writeFile(rme,JSON.stringify({license_key:e,company:t}))}catch(n){throw cs.error("Failed to write License"),n}return"Registration successful."}o(WO,"parseLicense");async function ime(){let e=await ome();return WO(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(ime,"register");async function ome(){let e=await cT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:M1.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:M1.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{v1.start()}catch(n){cs.error(n)}let r;try{r=await tme(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(ome,"promptForRegistration");async function ame(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await cT.getLicense()}catch(r){throw cs.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Xfe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Zfe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=eme.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(ame,"getRegistrationInfo")});var F1=M((KUe,B1)=>{"use strict";var cme=Nt(),jO=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+cme.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};B1.exports=jO});var G1=M((WUe,k1)=>{"use strict";var H1=Nt(),QO=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+H1.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+H1.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};k1.exports=QO});var $1=M((jUe,q1)=>{"use strict";var JO=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};q1.exports=JO});var K1=M((JUe,V1)=>{"use strict";var lme=Nt(),XO=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+lme.SERVER_SUFFIX.ADMIN,this.password=r}};V1.exports=XO});var fT=M((ZUe,z1)=>{"use strict";var ql=require("path"),$l=require("fs-extra"),ume=F1(),dme=G1(),fme=$1(),mme=K1(),ZO=ts(),Qd=ie(),xn=yt(),uT=(k(),v(W)),lp=Nt(),{CONFIG_PARAMS:rr}=uT,Jd=Q(),up=oe(),Y1=Vi(),eC=mr(),hme=os(),jd="clustering",pme=1e4,W1=50;z1.exports={generateNatsConfig:_me,removeNatsConfig:gme,getHubConfigPath:Eme};function Eme(){let e=up.get(rr.ROOTPATH);return ql.join(e,jd,lp.NATS_CONFIG_FILES.HUB_SERVER)}o(Eme,"getHubConfigPath");async function _me(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=up.get(rr.ROOTPATH);$l.ensureDirSync(ql.join(r,"clustering","leaf")),up.initSync();let n=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERT_AUTH),s=xn.getConfigFromFile(rr.CLUSTERING_TLS_PRIVATEKEY),i=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERTIFICATE);!await $l.exists(i)&&!await $l.exists(!n)&&await hme.createNatsCerts();let a=ql.join(r,jd,lp.PID_FILES.HUB),c=ql.join(r,jd,lp.PID_FILES.LEAF),l=xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=ql.join(r,jd,lp.NATS_CONFIG_FILES.HUB_SERVER),d=ql.join(r,jd,lp.NATS_CONFIG_FILES.LEAF_SERVER),f=xn.getConfigFromFile(rr.CLUSTERING_TLS_INSECURE),m=xn.getConfigFromFile(rr.CLUSTERING_TLS_VERIFY),h=xn.getConfigFromFile(rr.CLUSTERING_NODENAME),p=xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await eC.checkNATSServerInstalled()||dT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await ZO.listUsers(),g=xn.getConfigFromFile(rr.CLUSTERING_USER),y=await ZO.getClusterUser();(Qd.isEmpty(y)||y.active!==!0)&&dT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await lT(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await lT(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await lT(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),await lT(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===uT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new mme(K.username,Y1.decrypt(K.hash))),R.push(new fme(K.username,Y1.decrypt(K.hash))));let N=[],{hub_routes:O}=xn.getClusteringRoutes();if(!Qd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new ume(xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NAME),xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Qd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===uT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await $l.writeJson(u,F),Jd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new dme(xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===uT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await $l.writeJson(d,Y),Jd.trace(`Leaf server config written to ${d}`))}o(_me,"generateNatsConfig");async function lT(e){let t=up.get(e);return Qd.isEmpty(t)&&dT(`port undefined for '${e}'`),await Qd.isPortTaken(t)&&dT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(lT,"isPortAvailable");function dT(e){let t=`Error generating clustering config: ${e}`;Jd.error(t),console.error(t),process.exit(1)}o(dT,"generateNatsConfigError");async function gme(e){let{port:t,config_file:r}=eC.getServerConfig(e),{username:n,decrypt_hash:s}=await ZO.getClusterUser(),i=0,a=2e3;for(;i<W1;){try{let d=await eC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Jd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=W1)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Jd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Qd.asyncSetTimeout(u)}let c="0".repeat(pme),l=ql.join(up.get(rr.ROOTPATH),jd,r);await $l.writeFile(l,c),await $l.remove(l),Jd.notify(e,"started.")}o(gme,"removeNatsConfig")});var nC={};Oe(nC,{compactOnStart:()=>Sme,copyDb:()=>eK});async function Sme(){ic.notify("Running compact on start"),console.log("Running compact on start");let e=(0,tC.get)(U.ROOTPATH),t=new Map,r=st();(0,rC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,mT.join)(e,"backup",n+".mdb"),a=(0,mT.join)(e,vc,n+"-copy.mdb"),c=0;try{c=await j1(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){ic.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await eK(n,a),console.log("Backing up",n,"to",i);try{await(0,Vl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}}try{cd()}catch(n){ic.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{dbPath:s,copyDest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Vl.move)(i,s,{overwrite:!0}),await(0,Vl.remove)((0,mT.join)(e,vc,`${n}-copy.mdb-lock`));try{cd()}catch(n){ic.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){ic.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,rC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Vl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw cd(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=!0,c=await j1(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){a=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
19
- Total record count before compaction: ${i}, total after: ${c}.
20
- Database backup has not been removed and can be found here: ${s}`;ic.error(l),console.error(l)}(0,tC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||a===!1||(console.log("Removing backup",s),await(0,Vl.remove)(s))}}async function j1(e){let t=await(0,Z1.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function Xd(){}async function eK(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=Xd,m.primaryStore.remove=Xd;for(let h in m.indices){let p=m.indices[h];p.put=Xd,p.remove=Xd}m.auditStore&&(m.auditStore.put=Xd,m.auditStore.remove=Xd),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,Q1.open)(new J1.default(t)),c=a.openDB(hT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Gg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new X1.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(hT.AUDIT_STORE_NAME,Rm);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var Q1,mT,Vl,tC,J1,X1,hT,Z1,rC,ic,sC=ue(()=>{De();Q1=require("lmdb"),mT=require("path"),Vl=require("fs-extra"),tC=w(oe()),J1=w(Lm()),X1=w(Pm()),hT=w(qt());k();Di();Z1=w(Ba()),rC=w(yt()),ic=w(Q());o(Sme,"compactOnStart");o(j1,"getTotalDBRecordCount");o(Xd,"noop");o(eK,"copyDb")});var ef=M((c0e,aK)=>{"use strict";var Tme=require("minimist"),{isMainThread:oC,parentPort:fp,threadId:i0e}=require("worker_threads"),ft=(k(),v(W)),to=Q(),aC=ie(),ET=fT(),pT=mr(),o0e=Nt(),sK=yt(),Si=tf(),tK=kd(),{compactOnStart:Rme}=(sC(),v(nC)),yme=Uc(),{restartWorkers:_T,onMessageByType:Ame}=nt(),{handleHDBError:bme,hdbErrors:Ime}=ge(),{HTTP_STATUS_CODES:Nme}=Ime,mp=oe(),{sendOperationToNode:rK,getThisNodeName:wme,monitorNodeCAs:Ome}=(ss(),v(Ho)),{getHDBNodeTable:a0e}=(Ll(),v(Sw));mp.initSync();var dp=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,Cme="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",nK="Clustering is not enabled so cannot be restarted",Pme="Invalid service",Zd,Us;aK.exports={restart:iK,restartService:cC};oC&&Ame(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await cC({service:e.workerType}):iK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function iK(e){Us=Object.keys(e).length===0,Zd=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=Tme(process.argv);if(t.service){await cC(t);return}if(Us&&!Zd){console.error(Cme);return}if(Us&&console.log(dp),Zd){Si.enterPM2Mode(),to.notify(dp);let r=yme(Object.keys(ft.CONFIG_PARAM_MAP),!0);return aC.isEmptyOrZeroLength(Object.keys(r))||sK.updateConfigValue(void 0,void 0,r,!0,!0),Lme(),dp}return oC?(to.notify(dp),mp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Rme(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{_T()},50)):fp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),dp}o(iK,"restart");async function cC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw bme(new Error,Pme,Nme.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),Zd=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!oC){e.replicated&&Ome(),fp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),fp.ref(),await new Promise(s=>{fp.on("message",i=>{i.type==="restart-complete"&&(s(),fp.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===wme())continue;let i;try{({job_id:i}=await rK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await rK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=nK;break}Us&&console.log("Restarting clustering"),to.notify("Restarting clustering"),await oK();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=nK;break}Us&&console.log("Restarting clusteringConfig"),to.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(Us&&!Zd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Us&&console.log("Restarting httpWorkers"),to.notify("Restarting http_workers"),Us?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await _T("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(to.error(r),Us&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(cC,"restartService");async function Lme(){await oK(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await aC.asyncSetTimeout(2e3),mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await iC(),Us&&(await pT.closeConnection(),process.exit(0))}o(Lme,"restartPM2Mode");async function oK(){if(!sK.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await tK.getHDBProcessInfo()).clustering.length===0)to.trace("Clustering not running, restart will start clustering services"),await ET.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await iC(),Us&&await pT.closeConnection();else{await ET.generateNatsConfig(!0),Zd?(to.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await tK.getHDBProcessInfo()).clustering.forEach(s=>{to.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await aC.asyncSetTimeout(3e3),await iC(),await pT.updateLocalStreams(),Us&&await pT.closeConnection(),to.trace("Restart clustering restarting ingest and reply service threads");let t=_T(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=_T(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(oK,"restartClustering");async function iC(){await ET.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ET.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(iC,"removeNatsConfig")});var gK=M((d0e,_K)=>{"use strict";var u0e=require("lodash"),Bn=(k(),v(W)),{handleHDBError:cK,hdbErrors:Dme}=ge(),{HDB_ERROR_MSGS:Mme,HTTP_STATUS_CODES:vme}=Dme,lC=Q();_K.exports={getRolePermissions:xme};var Kl=Object.create(null),Ume=o(e=>({key:e,perms:{}}),"permsTemplateObj"),fK=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),mK=o((e=!1,t=!1,r=!1,n=!1)=>({[Bn.PERMS_CRUD_ENUM.READ]:e,[Bn.PERMS_CRUD_ENUM.INSERT]:t,[Bn.PERMS_CRUD_ENUM.UPDATE]:r,[Bn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),uC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...mK(t,r,n,s)}),"tablePermsTemplate"),lK=o((e,t=mK())=>({attribute_name:e,describe:EK(t),[hp]:t[hp],[dC]:t[dC],[fC]:t[fC]}),"attrPermsTemplate"),uK=o((e,t=!1)=>({attribute_name:e,describe:t,[hp]:t}),"timestampAttrPermsTemplate"),{READ:hp,INSERT:dC,UPDATE:fC}=Bn.PERMS_CRUD_ENUM,hK=Object.values(Bn.PERMS_CRUD_ENUM),pK=[hp,dC,fC];function xme(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Bn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Kl[t]&&Kl[t].key===n)return Kl[t].perms;let s=Bme(e,r);return Kl[t]?Kl[t].key=n:Kl[t]=Ume(n),Kl[t].perms=s,s}catch(r){if(!e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Bn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw lC.error(n),lC.debug(r),cK(new Error,Mme.OUTDATED_PERMS_TRANSLATION_ERROR,vme.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
18
+ `)}catch(r){r.code==="ENOENT"?hn.debug("no license file found"):hn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(xfe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=L1(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){hn.error("There was an error parsing the license string."),hn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return GO=e,e}o(KO,"licenseSearch");async function Wfe(){return GO||await KO(),GO}o(Wfe,"getLicense");function zfe(){let e=KO().ram_allocation,t=process.constrainedMemory?.()||w1();if(t=Math.round(Math.min(t,w1())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(zfe,"checkMemoryLimit")});var zO=M(($Ue,x1)=>{var cT=zd(),M1=require("chalk"),cs=Q(),v1=require("prompt"),{promisify:jfe}=require("util"),YO=(k(),v(W)),Qfe=require("fs-extra"),Jfe=require("path"),Xfe=ie(),{packageJson:Zfe}=Rt(),U1=oe();U1.initSync();var eme=require("moment"),tme=jfe(v1.get),rme=Jfe.join(U1.getHdbBasePath(),YO.LICENSE_KEY_DIR_NAME,YO.LICENSE_FILE_NAME,YO.LICENSE_FILE_NAME);x1.exports={getFingerprint:sme,setLicense:nme,parseLicense:WO,register:ime,getRegistrationInfo:ame};async function nme(e){if(e&&e.key&&e.company){try{cs.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await WO(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw cs.error(r),cs.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(nme,"setLicense");async function sme(){let e={};try{e=await cT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw cs.error(r),cs.error(t),new Error(r)}return e}o(sme,"getFingerprint");async function WO(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");cs.info("Validating license input...");let r=cT.validateLicense(e,t);if(cs.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(cs.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(cs.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{cs.info("writing license to disk"),await Qfe.writeFile(rme,JSON.stringify({license_key:e,company:t}))}catch(n){throw cs.error("Failed to write License"),n}return"Registration successful."}o(WO,"parseLicense");async function ime(){let e=await ome();return WO(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(ime,"register");async function ome(){let e=await cT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:M1.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:M1.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{v1.start()}catch(n){cs.error(n)}let r;try{r=await tme(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(ome,"promptForRegistration");async function ame(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await cT.getLicense()}catch(r){throw cs.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Xfe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Zfe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=eme.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(ame,"getRegistrationInfo")});var F1=M((KUe,B1)=>{"use strict";var cme=Nt(),jO=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+cme.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};B1.exports=jO});var G1=M((WUe,k1)=>{"use strict";var H1=Nt(),QO=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+H1.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+H1.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};k1.exports=QO});var $1=M((jUe,q1)=>{"use strict";var JO=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};q1.exports=JO});var K1=M((JUe,V1)=>{"use strict";var lme=Nt(),XO=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+lme.SERVER_SUFFIX.ADMIN,this.password=r}};V1.exports=XO});var fT=M((ZUe,z1)=>{"use strict";var ql=require("path"),$l=require("fs-extra"),ume=F1(),dme=G1(),fme=$1(),mme=K1(),ZO=ts(),Qd=ie(),xn=yt(),uT=(k(),v(W)),lp=Nt(),{CONFIG_PARAMS:rr}=uT,Jd=Q(),up=oe(),Y1=Vi(),eC=mr(),hme=os(),jd="clustering",pme=1e4,W1=50;z1.exports={generateNatsConfig:_me,removeNatsConfig:gme,getHubConfigPath:Eme};function Eme(){let e=up.get(rr.ROOTPATH);return ql.join(e,jd,lp.NATS_CONFIG_FILES.HUB_SERVER)}o(Eme,"getHubConfigPath");async function _me(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=up.get(rr.ROOTPATH);$l.ensureDirSync(ql.join(r,"clustering","leaf")),up.initSync();let n=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERT_AUTH),s=xn.getConfigFromFile(rr.CLUSTERING_TLS_PRIVATEKEY),i=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERTIFICATE);!await $l.exists(i)&&!await $l.exists(!n)&&await hme.createNatsCerts();let a=ql.join(r,jd,lp.PID_FILES.HUB),c=ql.join(r,jd,lp.PID_FILES.LEAF),l=xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=ql.join(r,jd,lp.NATS_CONFIG_FILES.HUB_SERVER),d=ql.join(r,jd,lp.NATS_CONFIG_FILES.LEAF_SERVER),f=xn.getConfigFromFile(rr.CLUSTERING_TLS_INSECURE),m=xn.getConfigFromFile(rr.CLUSTERING_TLS_VERIFY),h=xn.getConfigFromFile(rr.CLUSTERING_NODENAME),p=xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await eC.checkNATSServerInstalled()||dT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await ZO.listUsers(),g=xn.getConfigFromFile(rr.CLUSTERING_USER),y=await ZO.getClusterUser();(Qd.isEmpty(y)||y.active!==!0)&&dT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await lT(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await lT(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await lT(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),await lT(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===uT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new mme(K.username,Y1.decrypt(K.hash))),R.push(new fme(K.username,Y1.decrypt(K.hash))));let N=[],{hub_routes:O}=xn.getClusteringRoutes();if(!Qd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new ume(xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NAME),xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Qd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===uT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await $l.writeJson(u,F),Jd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new dme(xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===uT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await $l.writeJson(d,Y),Jd.trace(`Leaf server config written to ${d}`))}o(_me,"generateNatsConfig");async function lT(e){let t=up.get(e);return Qd.isEmpty(t)&&dT(`port undefined for '${e}'`),await Qd.isPortTaken(t)&&dT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(lT,"isPortAvailable");function dT(e){let t=`Error generating clustering config: ${e}`;Jd.error(t),console.error(t),process.exit(1)}o(dT,"generateNatsConfigError");async function gme(e){let{port:t,config_file:r}=eC.getServerConfig(e),{username:n,decrypt_hash:s}=await ZO.getClusterUser(),i=0,a=2e3;for(;i<W1;){try{let d=await eC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Jd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=W1)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Jd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Qd.asyncSetTimeout(u)}let c="0".repeat(pme),l=ql.join(up.get(rr.ROOTPATH),jd,r);await $l.writeFile(l,c),await $l.remove(l),Jd.notify(e,"started.")}o(gme,"removeNatsConfig")});var nC={};Oe(nC,{compactOnStart:()=>Sme,copyDb:()=>eK});async function Sme(){ic.notify("Running compact on start"),console.log("Running compact on start");let e=(0,tC.get)(U.ROOTPATH),t=new Map,r=st();(0,rC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,mT.join)(e,"backup",n+".mdb"),a=(0,mT.join)(e,vc,n+"-copy.mdb"),c=0;try{c=await j1(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){ic.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await eK(n,a),console.log("Backing up",n,"to",i);try{await(0,Vl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}console.log("Moving copy compacted",n,"to",s),await(0,Vl.move)(a,s,{overwrite:!0}),await(0,Vl.remove)((0,mT.join)(e,vc,`${n}-copy.mdb-lock`))}try{cd()}catch(n){ic.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{cd()}catch(n){ic.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){ic.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,rC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Vl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw cd(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=await j1(n);if(console.log("Database",n,"after compact has a total record count of",a),i!==a){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
19
+ Total record count before compaction: ${i}, total after: ${a}.
20
+ Database backup has not been removed and can be found here: ${s}`;ic.error(c),console.error(c)}(0,tC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Vl.remove)(s))}}async function j1(e){let t=await(0,Z1.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function Xd(){}async function eK(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=Xd,m.primaryStore.remove=Xd;for(let h in m.indices){let p=m.indices[h];p.put=Xd,p.remove=Xd}m.auditStore&&(m.auditStore.put=Xd,m.auditStore.remove=Xd),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,Q1.open)(new J1.default(t)),c=a.openDB(hT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Gg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new X1.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(hT.AUDIT_STORE_NAME,Rm);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var Q1,mT,Vl,tC,J1,X1,hT,Z1,rC,ic,sC=ue(()=>{De();Q1=require("lmdb"),mT=require("path"),Vl=require("fs-extra"),tC=w(oe()),J1=w(Lm()),X1=w(Pm()),hT=w(qt());k();Di();Z1=w(Ba()),rC=w(yt()),ic=w(Q());o(Sme,"compactOnStart");o(j1,"getTotalDBRecordCount");o(Xd,"noop");o(eK,"copyDb")});var ef=M((c0e,aK)=>{"use strict";var Tme=require("minimist"),{isMainThread:oC,parentPort:fp,threadId:i0e}=require("worker_threads"),ft=(k(),v(W)),to=Q(),aC=ie(),ET=fT(),pT=mr(),o0e=Nt(),sK=yt(),Si=tf(),tK=kd(),{compactOnStart:Rme}=(sC(),v(nC)),yme=Uc(),{restartWorkers:_T,onMessageByType:Ame}=nt(),{handleHDBError:bme,hdbErrors:Ime}=ge(),{HTTP_STATUS_CODES:Nme}=Ime,mp=oe(),{sendOperationToNode:rK,getThisNodeName:wme,monitorNodeCAs:Ome}=(ss(),v(Ho)),{getHDBNodeTable:a0e}=(Ll(),v(Sw));mp.initSync();var dp=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,Cme="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",nK="Clustering is not enabled so cannot be restarted",Pme="Invalid service",Zd,Us;aK.exports={restart:iK,restartService:cC};oC&&Ame(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await cC({service:e.workerType}):iK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function iK(e){Us=Object.keys(e).length===0,Zd=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=Tme(process.argv);if(t.service){await cC(t);return}if(Us&&!Zd){console.error(Cme);return}if(Us&&console.log(dp),Zd){Si.enterPM2Mode(),to.notify(dp);let r=yme(Object.keys(ft.CONFIG_PARAM_MAP),!0);return aC.isEmptyOrZeroLength(Object.keys(r))||sK.updateConfigValue(void 0,void 0,r,!0,!0),Lme(),dp}return oC?(to.notify(dp),mp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Rme(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{_T()},50)):fp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),dp}o(iK,"restart");async function cC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw bme(new Error,Pme,Nme.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),Zd=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!oC){e.replicated&&Ome(),fp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),fp.ref(),await new Promise(s=>{fp.on("message",i=>{i.type==="restart-complete"&&(s(),fp.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===wme())continue;let i;try{({job_id:i}=await rK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await rK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=nK;break}Us&&console.log("Restarting clustering"),to.notify("Restarting clustering"),await oK();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=nK;break}Us&&console.log("Restarting clusteringConfig"),to.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(Us&&!Zd){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Us&&console.log("Restarting httpWorkers"),to.notify("Restarting http_workers"),Us?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await _T("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(to.error(r),Us&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(cC,"restartService");async function Lme(){await oK(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await aC.asyncSetTimeout(2e3),mp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await iC(),Us&&(await pT.closeConnection(),process.exit(0))}o(Lme,"restartPM2Mode");async function oK(){if(!sK.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await tK.getHDBProcessInfo()).clustering.length===0)to.trace("Clustering not running, restart will start clustering services"),await ET.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await iC(),Us&&await pT.closeConnection();else{await ET.generateNatsConfig(!0),Zd?(to.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await tK.getHDBProcessInfo()).clustering.forEach(s=>{to.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await aC.asyncSetTimeout(3e3),await iC(),await pT.updateLocalStreams(),Us&&await pT.closeConnection(),to.trace("Restart clustering restarting ingest and reply service threads");let t=_T(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=_T(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(oK,"restartClustering");async function iC(){await ET.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await ET.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(iC,"removeNatsConfig")});var gK=M((d0e,_K)=>{"use strict";var u0e=require("lodash"),Bn=(k(),v(W)),{handleHDBError:cK,hdbErrors:Dme}=ge(),{HDB_ERROR_MSGS:Mme,HTTP_STATUS_CODES:vme}=Dme,lC=Q();_K.exports={getRolePermissions:xme};var Kl=Object.create(null),Ume=o(e=>({key:e,perms:{}}),"permsTemplateObj"),fK=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),mK=o((e=!1,t=!1,r=!1,n=!1)=>({[Bn.PERMS_CRUD_ENUM.READ]:e,[Bn.PERMS_CRUD_ENUM.INSERT]:t,[Bn.PERMS_CRUD_ENUM.UPDATE]:r,[Bn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),uC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...mK(t,r,n,s)}),"tablePermsTemplate"),lK=o((e,t=mK())=>({attribute_name:e,describe:EK(t),[hp]:t[hp],[dC]:t[dC],[fC]:t[fC]}),"attrPermsTemplate"),uK=o((e,t=!1)=>({attribute_name:e,describe:t,[hp]:t}),"timestampAttrPermsTemplate"),{READ:hp,INSERT:dC,UPDATE:fC}=Bn.PERMS_CRUD_ENUM,hK=Object.values(Bn.PERMS_CRUD_ENUM),pK=[hp,dC,fC];function xme(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Bn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Kl[t]&&Kl[t].key===n)return Kl[t].perms;let s=Bme(e,r);return Kl[t]?Kl[t].key=n:Kl[t]=Ume(n),Kl[t].perms=s,s}catch(r){if(!e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Bn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw lC.error(n),lC.debug(r),cK(new Error,Mme.OUTDATED_PERMS_TRANSLATION_ERROR,vme.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
21
21
  ${r.stack}`;throw lC.error(n),cK(new Error)}}}o(xme,"getRolePermissions");function Bme(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[Bn.SYSTEM_SCHEMA_NAME]=n[Bn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=Fme(t[i]);return}r[i]=fK(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(a=>{if(n[i].tables[a]){let c=n[i].tables[a],l=t[i][a],u=Hme(c,l);r[i].describe||hK.forEach(d=>{u[d]&&(r[i].describe=!0)}),r[i].tables[a]=u}else r[i].tables[a]=uC()})):Object.keys(t[i]).forEach(a=>{r[i].tables[a]=uC()})}),r}o(Bme,"translateRolePermissions");function Fme(e){let t=fK(!0);return Object.keys(e).forEach(r=>{t.tables[r]=uC(!0,!0,!0,!0,!0)}),t}o(Fme,"createStructureUserPermissions");function Hme(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,d)=>{let{attribute_name:f}=d,m=d;return Bn.TIME_STAMP_NAMES.includes(f)&&(m=uK(f,d[hp])),u[f]=m,u},{}),a=t.primaryKey||t.hash_attribute,c=!!i[a],l=lK(a);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let d=i[u];d.describe=EK(d),s.attribute_permissions.push(d),c||kme(d,l)}else if(u!==a){let d;Bn.TIME_STAMP_NAMES.includes(u)?d=uK(u):d=lK(u),s.attribute_permissions.push(d)}}),c||s.attribute_permissions.push(l),s.describe=dK(s),s}else return e.describe=dK(e),e}o(Hme,"getTableAttrPerms");function dK(e){return hK.filter(t=>e[t]).length>0}o(dK,"getSchemaTableDescribePerm");function EK(e){return pK.filter(t=>e[t]).length>0}o(EK,"getAttributeDescribePerm");function kme(e,t){pK.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}o(kme,"checkForHashPerms")});var pp={};Oe(pp,{authentication:()=>NK,bypassAuth:()=>jme,login:()=>pC,logout:()=>EC,start:()=>Qme});function jme(){IK=!0}async function NK(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,a=[];try{if(i){let h=e.isOperationsServer?Kme?Vme:[]:$me?qme:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let p=pn.get(U.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",_=new Os([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",p],["Access-Control-Allow-Origin",i]]);return gT&&_.set("Access-Control-Allow-Credentials","true"),{status:200,headers:_}}a.push("Access-Control-Allow-Origin",i),gT&&a.push("Access-Control-Allow-Credentials","true")}}let l,u;if(gT){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",p=s?.split(/;\s+/)||[];for(let _ of p)if(_.startsWith(h)){let g=_.indexOf(";");l=_.slice(h.length,g===-1?_.length:g),u=await SK.get(l);break}e.session=u||(u={})}let d=o((h,p,_)=>{let g=new ST.AuthAuditLog(h,p,ya.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=_,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),p===Xs.SUCCESS?mC.notify(g):mC.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&mC.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Le.getUser(h,null,e),d(h,Xs.SUCCESS,"mTLS")):Gme("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let f;if(!e.user)if(n){if(f=Yl.get(n),!f){let h=n.indexOf(" "),p=n.slice(0,h),_=n.slice(h+1),g,y;try{switch(p){case"Basic":let T=atob(_),R=T.indexOf(":");g=T.slice(0,R),y=T.slice(R+1),f=g||y?await Le.getUser(g,y,e):null;break;case"Bearer":try{f=await QN(_)}catch(N){if(N.message==="invalid token")try{return await rS(_),c({status:-1})}catch{throw N}}break}}catch(T){return Wme&&(Yl.get(_)||(Yl.set(_,_),d(g,Xs.FAILURE,p))),c({status:401,body:Na({error:T.message},e)})}Yl.set(n,f),Yme&&d(f.username,Xs.SUCCESS,p)}e.user=f}else u?.user?e.user=await Le.getUser(u.user,null,e):(IK&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,RK.getSuperUser)());gT&&(e.session.update=function(h){let p=pn.get(U.AUTHENTICATION_COOKIE_EXPIRES),_=e.protocol==="https"||r.host?.startsWith("localhost:")||r.host?.startsWith("127.0.0.1:")||r.host?.startsWith("::1");if(!l){l=(0,yK.v4)();let g=pn.get(U.AUTHENTICATION_COOKIE_DOMAINS),y=p?new Date(Date.now()+(0,hC.convertToMS)(p)).toUTCString():zme,T=g?.find(O=>r.host?.endsWith(O)),N=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${y}; HttpOnly`;T&&(N+=`; Domain=${T}`),_&&(N+="; SameSite=None; Secure"),a?a.push("Set-Cookie",N):m?.headers?.set&&m.headers.set("Set-Cookie",N)}return _&&(a?(i&&a.push("Access-Control-Expose-Headers","X-Hdb-Session"),a.push("X-Hdb-Session","Secure")):m?.headers?.set&&(i&&m.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),m.headers.set("X-Hdb-Session","Secure"))),h.id=l,SK.put(h,{expiresAt:p?Date.now()+(0,hC.convertToMS)(p):void 0})},e.login=async function(h,p){let _=e.user=await Le.authenticateUser(h,p,e);e.session.update({user:_&&(_.getId?.()??_.username)})});let m=await t(e);return m&&(m.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&Is.loginPath?(m.status=302,m.headers.set("Location",Is.loginPath(e))):m.headers.set("WWW-Authenticate","Basic")),c(m))}catch(l){throw c(l)}function c(l){let u=a.length;if(u>0){let d=l.headers;d||(l.headers=d=new Os);for(let f=0;f<u;){let m=a[f++];d.set(m,a[f++])}}return a=null,l}o(c,"applyResponseHeaders")}function Qme({server:e,port:t,securePort:r}){e.http(NK,t||r?{port:t,securePort:r}:{port:"all"}),TK||(TK=!0,setInterval(()=>{Yl=new Map},pn.get(U.AUTHENTICATION_CACHETTL)).unref(),AK.user.addListener(()=>{Yl=new Map}))}async function pC(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function EC(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var RK,yK,pn,ST,AK,hC,bK,Gme,mC,qme,$me,Vme,Kme,SK,gT,IK,Yme,Wme,zme,Yl,TK,TT=ue(()=>{RK=w(ts());Mr();Hu();_d();De();yK=require("uuid"),pn=w(oe());k();ST=w(Q()),AK=w(ah());mh();hC=w(ie());yo();bK=(0,ST.forComponent)("authentication"),{debug:Gme}=bK,mC=bK.withTag("auth-event");pn.initSync();qme=pn.get(U.HTTP_CORSACCESSLIST),$me=pn.get(U.HTTP_CORS),Vme=pn.get(U.OPERATIONSAPI_NETWORK_CORSACCESSLIST),Kme=pn.get(U.OPERATIONSAPI_NETWORK_CORS),SK=je({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),gT=pn.get(U.AUTHENTICATION_ENABLESESSIONS)??!0,IK=process.env.AUTHENTICATION_AUTHORIZELOCAL??pn.get(U.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,Yme=pn.get(U.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,Wme=pn.get(U.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,zme="Tue, 01 Oct 8307 19:33:20 GMT",Yl=new Map;Le.onInvalidatedUser(()=>{Yl=new Map});o(jme,"bypassAuth");o(NK,"authentication");o(Qme,"start");o(pC,"login");o(EC,"logout")});var MK=M((R0e,DK)=>{"use strict";var we=require("joi"),wK=require("fs-extra"),OK=require("path"),ls=lt(),CK=oe(),PK=(k(),v(W)),LK=Q(),{hdbErrors:Jme}=ge(),{HDB_ERROR_MSGS:En}=Jme,Yo=/^[a-zA-Z0-9-_]+$/,Xme=/^[a-zA-Z0-9-_]+$/;DK.exports={getDropCustomFunctionValidator:ehe,setCustomFunctionValidator:the,addComponentValidator:ihe,dropCustomFunctionProjectValidator:ohe,packageComponentValidator:ahe,deployComponentValidator:che,setComponentFileValidator:rhe,getComponentFileValidator:she,dropComponentFileValidator:nhe,addSSHKeyValidator:lhe,updateSSHKeyValidator:uhe,deleteSSHKeyValidator:dhe,setSSHKnownHostsValidator:fhe};function RT(e,t,r){try{let n=CK.get(PK.CONFIG_PARAMS.COMPONENTSROOT),s=OK.join(n,t);return wK.existsSync(s)?e?t:r.message(En.PROJECT_EXISTS):e?r.message(En.NO_PROJECT):t}catch(n){return LK.error(n),r.message(En.VALIDATION_ERR)}}o(RT,"checkProjectExists");function Ep(e,t){return e.includes("..")?t.message("Invalid file path"):e}o(Ep,"checkFilePath");function Zme(e,t,r,n){try{let s=CK.get(PK.CONFIG_PARAMS.COMPONENTSROOT),i=OK.join(s,e,t,r+".js");return wK.existsSync(i)?r:n.message(En.NO_FILE)}catch(s){return LK.error(s),n.message(En.VALIDATION_ERR)}}o(Zme,"checkFileExists");function ehe(e){let t=we.object({project:we.string().pattern(Yo).custom(RT.bind(null,!0)).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().pattern(Yo).custom(Zme.bind(null,e.project,e.type)).custom(Ep).required().messages({"string.pattern.base":En.BAD_FILE_NAME})});return ls.validateBySchema(e,t)}o(ehe,"getDropCustomFunctionValidator");function the(e){let t=we.object({project:we.string().pattern(Yo).custom(RT.bind(null,!0)).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().custom(Ep).required(),function_content:we.string().required()});return ls.validateBySchema(e,t)}o(the,"setCustomFunctionValidator");function rhe(e){let t=we.object({project:we.string().pattern(Yo).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),file:we.string().custom(Ep).required(),payload:we.string().allow("").optional(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ls.validateBySchema(e,t)}o(rhe,"setComponentFileValidator");function nhe(e){let t=we.object({project:we.string().pattern(Yo).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),file:we.string().custom(Ep).optional()});return ls.validateBySchema(e,t)}o(nhe,"dropComponentFileValidator");function she(e){let t=we.object({project:we.string().required(),file:we.string().custom(Ep).required(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ls.validateBySchema(e,t)}o(she,"getComponentFileValidator");function ihe(e){let t=we.object({project:we.string().pattern(Yo).custom(RT.bind(null,!1)).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME})});return ls.validateBySchema(e,t)}o(ihe,"addComponentValidator");function ohe(e){let t=we.object({project:we.string().pattern(Yo).custom(RT.bind(null,!0)).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME})});return ls.validateBySchema(e,t)}o(ohe,"dropCustomFunctionProjectValidator");function ahe(e){let t=we.object({project:we.string().pattern(Yo).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),skip_node_modules:we.boolean(),skip_symlinks:we.boolean()});return ls.validateBySchema(e,t)}o(ahe,"packageComponentValidator");function che(e){let t=we.object({project:we.string().pattern(Yo).required().messages({"string.pattern.base":En.BAD_PROJECT_NAME}),package:we.string().optional(),restart:we.alternatives().try(we.boolean(),we.string().valid("rolling")).optional()});return ls.validateBySchema(e,t)}o(che,"deployComponentValidator");function lhe(e){let t=we.object({name:we.string().pattern(Xme).required().messages({"string.pattern.base":En.BAD_SSH_KEY_NAME}),key:we.string().required(),host:we.string().required(),hostname:we.string().required(),known_hosts:we.string().optional()});return ls.validateBySchema(e,t)}o(lhe,"addSSHKeyValidator");function uhe(e){let t=we.object({name:we.string().required(),key:we.string().required()});return ls.validateBySchema(e,t)}o(uhe,"updateSSHKeyValidator");function dhe(e){let t=we.object({name:we.string().required()});return ls.validateBySchema(e,t)}o(dhe,"deleteSSHKeyValidator");function fhe(e){let t=we.object({known_hosts:we.string().required()});return ls.validateBySchema(e,t)}o(fhe,"setSSHKnownHostsValidator")});var Sp=M((A0e,FK)=>{"use strict";var yT=require("joi"),oc=require("path"),rf=require("fs-extra"),{exec:mhe,spawn:hhe}=require("child_process"),phe=require("util"),Ehe=phe.promisify(mhe),nf=(k(),v(W)),{PACKAGE_ROOT:_he}=Rt(),{handleHDBError:_p,hdbErrors:ghe}=ge(),{HTTP_STATUS_CODES:gp}=ghe,Wl=oe(),She=lt(),ac=Q(),{once:The}=require("events");Wl.initSync();var _C=Wl.get(nf.CONFIG_PARAMS.COMPONENTSROOT),vK="npm install --force --omit=dev --json",Rhe=`${vK} --dry-run`,yhe=Wl.get(nf.CONFIG_PARAMS.ROOTPATH),AT=oc.join(yhe,"ssh");FK.exports={installModules:Nhe,auditModules:whe,installAllRootModules:Ahe,uninstallRootModule:bhe,linkHarperdb:Ihe,runCommand:sf};async function Ahe(e=!1,t=Wl.get(nf.CONFIG_PARAMS.ROOTPATH)){await bT();let r=!1,n=process.env;rf.pathExistsSync(AT)&&rf.readdirSync(AT).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+oc.join(AT,"config")+" -o UserKnownHostsFile="+oc.join(AT,"known_hosts"),...process.env},r=!0)});try{let s=Wl.get(nf.CONFIG_PARAMS.ROOTPATH),i=oc.join(s,"node_modules","harperdb");rf.lstatSync(i).isSymbolicLink()&&rf.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&ac.error("Error removing symlink:",s)}await sf(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}o(Ahe,"installAllRootModules");async function bhe(e){await sf(`npm uninstall ${e}`,Wl.get(nf.CONFIG_PARAMS.ROOTPATH))}o(bhe,"uninstallRootModule");async function Ihe(){await bT(),await sf(`npm link ${_he}`,Wl.get(nf.CONFIG_PARAMS.ROOTPATH))}o(Ihe,"linkHarperdb");async function sf(e,t=void 0,r=process.env){ac.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=hhe(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();ac.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();ac.error({tagName:"npm_run_command:stderr"},l),i+=l});let[a]=await The(n,"close");if(a!==0)throw new Error(`Command \`${e}\` exited with code ${a}.${i===""?"":` Error: ${i}`}`);return s||void 0}o(sf,"runCommand");async function Nhe(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";ac.warn(t,e.projects);let r=BK(e);if(r)throw _p(r,r.message,gp.BAD_REQUEST);let{projects:n,dryRun:s}=e,i=s===!0?Rhe:vK;await bT(),await xK(n);let a={};for(let c=0,l=n.length;c<l;c++){let u=n[c];a[u]={npm_output:null,npm_error:null};let d=oc.join(_C,u),f,m=null;try{let{stdout:h,stderr:p}=await Ehe(i,{cwd:d});f=h?h.replace(`
22
22
  `,""):null,m=p?p.replace(`
23
23
  `,""):null}catch(h){h.stderr?a[u].npm_error=UK(h.stderr):a[u].npm_error=h.message;continue}try{a[u].npm_output=JSON.parse(f)}catch{a[u].npm_output=f}try{a[u].npm_error=JSON.parse(m)}catch{a[u].npm_error=m}}return ac.info(`finished installModules with response ${a}`),a.warning=t,a}o(Nhe,"installModules");function UK(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}