harperdb 4.6.16 → 4.6.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/lite.js CHANGED
@@ -21,9 +21,9 @@ var K4=Object.create;var qf=Object.defineProperty;var Y4=Object.getOwnPropertyDe
21
21
  `,""));return r.replace(`
22
22
  `,"")}o(zq,"runCommand");async function Tle(){try{await Qce.access(bw)}catch{return!1}let e=await zq(`${bw} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return Jce.eq(t,gle)}o(Tle,"checkNATSServerInstalled");async function Cw(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let a=await Yq.getClusterUser();if(Pl(a))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=a.username,r=a.decrypt_hash}hi.trace("create nats connection called");let i=await lle({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),hi.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(a=>{a&&hi.error("Error with Nats client connection, connection closed",a),i===mn&&jq()}),i}o(Cw,"createConnection");function jq(){mn=void 0,wl=void 0,Ol=void 0,Cl=void 0}o(jq,"clearClientCache");async function Rle(){mn&&(await mn.drain(),mn=void 0,wl=void 0,Ol=void 0,Cl=void 0)}o(Rle,"closeConnection");var mn,Cl;async function Hh(){return Cl||(Cl=Cw(Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),mn=await Cl),mn||Cl}o(Hh,"getConnection");async function kh(){if(wl)return wl;Pl(mn)&&await Hh();let{domain:e}=Ad(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Pl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return wl=await mn.jetstreamManager({domain:e,timeout:6e4}),wl}o(kh,"getJetStreamManager");async function Qq(){if(Ol)return Ol;Pl(mn)&&await Hh();let{domain:e}=Ad(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Pl(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Ol=mn.jetstream({domain:e,timeout:6e4}),Ol}o(Qq,"getJetStream");async function Zi(){let e=mn||await Hh(),t=wl||await kh(),r=Ol||await Qq();return{connection:e,jsm:t,js:r}}o(Zi,"getNATSReferences");async function yle(e){let t=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await Yq.getClusterUser(),s=await Cw(t,r,n),i=Ow(),a=s.subscribe(i),c=[],l,u=(async()=>{for await(let d of a){let f=Wq.decode(d.data);f.response_time=Date.now()-l,c.push(f)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await bS.asyncSetTimeout(e),await a.drain(),await s.close(),await u,c}o(yle,"getServerList");async function Pw(e,t){let{jsm:r}=await Zi(),n=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:ule.File,retention:dle.Limits,subjects:t,discard:fle.Old,maxMsgs:s,maxBytes:i,maxAge:n})}o(Pw,"createLocalStream");async function Jq(){let{jsm:e}=await Zi(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}o(Jq,"listStreams");async function Ale(e){let{jsm:t}=await Zi();await t.streams.delete(e)}o(Ale,"deleteLocalStream");async function ble(e){let{connection:t}=await Zi(),r=[],n=Ow(),s=t.subscribe(n),i=(async()=>{for await(let a of s)r.push(Wq.decode(a.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}o(ble,"listRemoteStreams");async function Ile(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Zi(),i=qq(),a={durable_name:i,ack_policy:Nw.Explicit};t&&(a.deliver_policy=ww.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let d of l){let f=Iw(d.data),m={nats_timestamp:d.info.timestampNanos,nats_sequence:d.info.streamSequence,entry:f};if(d.headers&&(m.origin=d.headers.get(Qr.MSG_HEADERS.ORIGIN)),u.push(m),d.ack(),d.info.pending===0)break}return await c.delete(),u}o(Ile,"viewStream");async function*Nle(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Zi(),i=qq(),a={durable_name:i,ack_policy:Nw.Explicit};t&&(a.deliver_policy=ww.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let d=Iw(u.data);d[0]||(d=[d]);for(let f of d){let m={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:f};u.headers&&(m.origin=u.headers.get(Qr.MSG_HEADERS.ORIGIN)),yield m}if(u.ack(),u.info.pending===0)break}await c.delete()}o(Nle,"viewStreamIterator");async function wle(e,t,r,n){hi.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=Xq(n,r);let{js:s}=await Zi(),i=await NS(),a=`${e}.${i}`,c=await Ele(()=>n instanceof Uint8Array?n:Kq.encode(n));try{hi.trace(`publishToStream publishing to subject: ${a}`),ple(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(a,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return e$(async()=>{try{await s.publish(a,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){hi.trace(`publishToStream creating stream: ${t}`);let d=a.split(".");d[2]="*",await Pw(t,[a]),await s.publish(a,c,{headers:r})}else throw l}});throw l}}o(wle,"publishToStream");function Xq(e,t){t===void 0&&(t=hle());let r=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Qr.MSG_HEADERS.ORIGIN)&&r&&t.append(Qr.MSG_HEADERS.ORIGIN,r),t}o(Xq,"addNatsMsgHeader");function Ad(e){e=e.toLowerCase();let t=Fh.join(Gr.get(Qe.CONFIG_PARAMS.ROOTPATH),_le);if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return Pl(Aw)&&(Aw={port:xh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:xh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.HUB,config_file:Qr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:Fh.join(t,Qr.PID_FILES.HUB),hdbNatsPath:t}),Aw;if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return Pl(yw)&&(yw={port:xh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:xh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,config_file:Qr.NATS_CONFIG_FILES.LEAF_SERVER,domain:xh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,pid_file_path:Fh.join(t,Qr.PID_FILES.LEAF),hdbNatsPath:t}),yw;hi.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}o(Ad,"getServerConfig");async function Zq(e,t,r,n){try{await e.consumers.add(t,{ack_policy:Nw.Explicit,durable_name:r,deliver_policy:ww.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}o(Zq,"createConsumer");async function Ole(e,t,r){await e.consumers.delete(t,r)}o(Ole,"removeConsumer");function Cle(e){return e.split(".")[1]}o(Cle,"extractServerName");async function Ple(e,t,r=6e4,n=Ow()){if(!bS.isObject(t))throw new Error("data param must be an object");let s=Kq.encode(t),{connection:i}=await Zi(),a={timeout:r};n&&(a.reply=n,a.noMux=!0);let c=await i.request(e,s,a);return Iw(c.data)}o(Ple,"request");function Lw(e){return new Promise(async(t,r)=>{let n=tle(bw,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",a=>{r(a)}),n.stdout.on("data",a=>{i+=a.toString()}),n.stderr.on("data",a=>{s+=a.toString()}),n.stderr.on("close",a=>{s&&r(s),t(i)})})}o(Lw,"reloadNATS");async function Lle(){let{pid_file_path:e}=Ad(Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await Lw(e)}o(Lle,"reloadNATSHub");async function Dle(){let{pid_file_path:e}=Ad(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await Lw(e)}o(Dle,"reloadNATSLeaf");function Mle(e,t,r){let n;switch(e.code){case Gq.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case Gq.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}o(Mle,"requestErrorHandler");async function vle(e,t){let r=t+Qr.SERVER_SUFFIX.LEAF,{connection:n}=await Zi(),{jsm:s}=await qle(r),{schema:i,table:a}=e,c=IS.createNatsTableStreamName(i,a),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await e$(async()=>{if(e.subscribe===!0)await Zq(s,c,n.info.server_name,l);else try{await Ole(s,c,n.info.server_name)}catch(u){hi.trace(u)}})}o(vle,"updateRemoteConsumer");async function Ule(e,t,r,n){let s=IS.createNatsTableStreamName(e,t),i=r+Qr.SERVER_SUFFIX.LEAF,a={type:Qe.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!Vq&&ale()<Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=Rw();await c(a)}await ile(a),n==="stop"&&await bS.asyncSetTimeout(1e3)}o(Ule,"updateConsumerIterator");function e$(e){return sle.writeTransaction(Qe.SYSTEM_SCHEMA_NAME,Qe.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}o(e$,"exclusiveLock");async function t$(e,t){let r=IS.createNatsTableStreamName(e,t),n=await NS(),s=Hle(e,t,n);await Pw(r,[s])}o(t$,"createLocalTableStream");async function xle(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await t$(n,s)}}o(xle,"createTableStreams");async function r$(e,t,r=void 0){if(Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=IS.createNatsTableStreamName(e,t),{domain:s}=Ad(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await Hh()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")hi.warn(n);else throw n}}o(r$,"purgeTableStream");async function Ble(e,t){if(Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await r$(e,t[r])}o(Ble,"purgeSchemaTableStreams");async function Fle(e){return(await kh()).streams.info(e)}o(Fle,"getStreamInfo");function Hle(e,t,r){return`${Qr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}o(Hle,"createSubjectName");async function NS(){if(Bh)return Bh;if(Bh=(await kh())?.nc?.info?.server_name,Bh===void 0)throw new Error("Unable to get jetstream manager server name");return Bh}o(NS,"getJsmServerName");async function kle(){let e=await kh(),t=await NS(),r=await Jq();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let a=Gle(n),c=i.split(".");if(c[c.length-1]===t&&!a||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let d=u.join(".");hi.trace(`Updating stream subject name from: ${i} to: ${d}`),s.subjects[0]=d,await e.streams.update(s.name,s)}}o(kle,"updateLocalStreams");function Gle(e){let{config:t}=e,r=!1,n=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=Gr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}o(Gle,"updateStreamLimits");async function qle(e){let t,r;try{t=await mn.jetstream({domain:e}),r=await mn.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw hi.error("Unable to connect to:",e),n}return{js:t,jsm:r}}o(qle,"connectToRemoteJS")});function Dw(e){let t=e.get(wS),r=t?(0,bd.unpack)(t):null;r||(r={remoteNameToId:{}});let n=Ze(),s=!1;r.nodeName=Ze();let i=r.remoteNameToId;if(i[n]!==0){let a=0,c;for(let l in i){let u=i[l];u===0?c=l:u>a&&(a=u)}if(c){a++,i[c]=a;let l=[Symbol.for("seq"),a];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:qh(e)??1,nodes:[]})})}i[n]=0,e.putSync(wS,(0,bd.pack)(r))}return r}function Gh(e){return Dw(e).remoteNameToId}function i$(e,t){let r=Dw(t),n=r.remoteNameToId,s=new Map,i=!1;for(let a in e){let c=e[a],l=n[a];if(l==null){let u=0;for(let d in n){let f=n[d];f>u&&(u=f)}l=u+1,n[a]=l,i=!0}s.set(c,l)}return i&&t.putSync(wS,(0,bd.pack)(r)),s}function OS(e,t){let r=Dw(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let a in n){let c=n[a];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(wS,(0,bd.pack)(r))}return s$.trace?.("The remote node name map",e,n,s),s}var s$,bd,wS,Mw=ue(()=>{s$=w(ei());is();bd=require("msgpackr"),wS=Symbol.for("remote-ids");o(Dw,"getIdMappingRecord");o(Gh,"exportIdMapping");o(i$,"remoteToLocalNodeId");o(OS,"getIdOfRemoteNode")});var vw={};Oe(vw,{commitsAwaitingReplication:()=>Id,getHDBNodeTable:()=>Kt,getReplicationSharedStatus:()=>Nd,iterateRoutes:()=>Vh,shouldReplicateToNode:()=>$h,subscribeToNodeUpdates:()=>wd});function Kt(){return o$||(o$=ze({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function Nd(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function wd(e){Kt().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;u$.debug?.("adding node",n,"on node",Ze()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==Ze()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of Kt().search({}))if(i.shard!=null){let a=s.get(i.shard);a||s.set(i.shard,a=[]),a.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function $h(e,t){let r=Wa.default.get(U.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===Wa.default.get(U.REPLICATION_SHARD))))&&Kt().primaryStore.get(Ze())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function $le(){wd(e=>{za({},(t,r)=>{let n=e.name,s=a$.get(n);if(s||a$.set(n,s=new Map),s.has(r))return;let i;for(let a in t)if(i=t[a].auditStore,i)break;if(i){let a=Nd(i,r,n,()=>{let c=a[0],l=a.lastTime;for(let{txnTime:u,onConfirm:d}of Id.get(r)||[])u>l&&u<=c&&d();a.lastTime=c});a.lastTime=0,s.set(r,a)}})})}function*Vh(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=Wa.default.get(U.REPLICATION_SECUREPORT)??(!Wa.default.get(U.REPLICATION_PORT)&&Wa.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||Wa.default.get(U.REPLICATION_PORT)||Wa.default.get(U.OPERATIONSAPI_NETWORK_PORT);let a=i?.lastIndexOf?.(":");a>0&&(i=+i.slice(a+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){c$.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,startTime:t.startTime,revoked_certificates:t.revokedCertificates}}}var c$,l$,Wa,u$,o$,a$,Id,Ll=ue(()=>{De();is();rm();c$=require("worker_threads"),l$=w(ge()),Wa=w(oe());k();u$=w(ei());server.nodes=[];o(Kt,"getHDBNodeTable");o(Nd,"getReplicationSharedStatus");o(wd,"subscribeToNodeUpdates");o($h,"shouldReplicateToNode");a$=new Map;ev((e,t,r)=>{if(r>server.nodes.length)throw new l$.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);Id||(Id=new Map,$le());let n=Id.get(e);return n||(n=[],Id.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:o(()=>{++i===r&&s()},"onConfirm")})})});o($le,"startSubscriptionToReplications");o(Vh,"iterateRoutes")});var m$={};Oe(m$,{connectedToNode:()=>Dl,disconnectedFromNode:()=>Cd,ensureNode:()=>Ho,requestClusterStatus:()=>f$,startOnMainThread:()=>Bw});async function Bw(e){let t=0,r=st();for(let i of Object.getOwnPropertyNames(r)){let a=r[i];for(let c in a){let l=a[c];if(l.auditStore){CS.set(i,qh(l.auditStore));break}}}to.whenThreadsStarted.then(async()=>{let i=[];for await(let l of r.system.hdb_nodes?.search([])||[])i.push(l);let a=Ze();function c(){let l=Kt().primaryStore.get(a);if(l!==null){let u=e.url??ja();if(l===void 0||l.url!==u||l.shard!==e.shard)return Ho(a,{name:a,url:u,shard:e.shard,replicates:!0})}}o(c,"ensureThisNode"),Kt().primaryStore.get(a)&&c();for(let l of Vh(e))try{let u=!l.subscriptions;if(u&&await c(),u&&l.replicates==null&&(l.replicates=!0),i.find(d=>d.url===l.url))continue;s(l)}catch(u){console.error(u)}wd(s)});let n;function s(i,a=i?.name){let c=Ze()&&a===Ze()||ja()&&i?.url===ja();if(c){let f=!!i?.replicates;if(n!==void 0&&n!==f)for(let m of Kt().search([]))m.replicates&&m.name!==a&&s(m,m.name);n=f}if(at.trace("Setting up node replication for",i),!i){for(let[f,m]of eo){let h;for(let[p,{worker:_,nodes:g}]of m){let y=g[0];if(y&&y.name==a){h=!0;for(let[T,{worker:R}]of m)m.delete(T),at.warn("Node was deleted, unsubscribing from node",a,T,f),R?.postMessage({type:"unsubscribe-from-node",node:a,database:T,url:f});break}}if(h){eo.get(f).iterator.remove(),eo.delete(f);return}}return}if(c)return;if(!i.url){at.info(`Node ${i.name} is missing url`);return}let l=eo.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(at.info(`Added node ${i.name} at ${i.url} for process ${Ze()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[f,m]of Od)if(i.url===m.url){Od.delete(f);break}Od.set(i.name,i)}let u=st();if(l||(l=new Map,eo.set(i.url,l)),l.iterator=za(e,(f,m,h)=>{h?d(m,!0):d(m,!1)}),i.subscriptions)for(let f of i.subscriptions){let m=f.database||f.schema;u[m]||(at.warn(`Database ${m} not found for node ${i.name}, making a subscription anyway`),d(m,!1))}function d(f,m){at.trace("Setting up replication for database",f,"on node",i.name);let h=l.get(f),p,_=[{replicateByDefault:m,...i}];CS.has(f)&&(_.push({replicateByDefault:m,name:Ze(),startTime:CS.get(f),endTime:Date.now(),replicates:!0}),CS.delete(f));let g=$h(i,f),y=to.workers.filter(T=>T.name==="http");if(h?(p=h.worker,h.nodes=_):g&&(t=t%y.length,p=y[t++],l.set(f,{worker:p,nodes:_,url:i.url}),p?.on("exit",()=>{l.get(f)?.worker===p&&(l.delete(f),d(f,m))})),g)setTimeout(()=>{let T={type:"subscribe-to-node",database:f,nodes:_};p?p.postMessage(T):Kh(T)},Vle);else{at.info("Node no longer should be used, unsubscribing from node",{replicates:i.replicates,databaseName:f,node:i,subscriptions:i.subscriptions,hasDatabase:!!u[f],thisReplicates:Kt().primaryStore.get(Ze())?.replicates}),Kt().primaryStore.get(Ze())?.replicates||(n=!1,at.info("Disabling replication, this node name",Ze(),Kt().primaryStore.get(Ze()),f));let T={type:"unsubscribe-from-node",database:f,url:i.url,name:i.name};p?p.postMessage(T):LS(T)}}o(d,"onDatabase")}o(s,"onNodeUpdate"),Cd=o(function(i){try{at.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let a=Array.from(Od.keys()),c=a.sort(),l=c.indexOf(i.name||pi(i.url));if(l===-1){at.warn("Disconnected node not found in node map",i.name,a);return}let u=eo.get(i.url),d=u?.get(i.database);if(!d){at.warn("Disconnected node not found in replication map",i.database,u);return}if(d.connected=!1,i.finished||!xw.default.get(U.REPLICATION_FAILOVER))return;let f=d.nodes[0];if(!(f.replicates===!0||f.replicates?.sends||f.subscriptions?.length))return;let m=f.shard,h=(l+1)%c.length;for(;l!==h;){let p=c[h],_=Od.get(p);u=eo.get(_.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==m){h=(h+1)%c.length;continue}let{worker:y,nodes:T}=g,R=!1;for(let N of d.nodes){if(T.some(O=>O.name===N.name)){at.info(`Disconnected node is already failing over to ${p} for ${i.database}`);continue}N.endTime<Date.now()||(T.push(N),R=!0)}if(d.nodes=[d.nodes[0]],!R){at.info(`Disconnected node ${i.name} has no nodes to fail over to ${p}`);return}at.info(`Failing over ${i.database} from ${i.name} to ${p}`),y?y.postMessage({type:"subscribe-to-node",database:i.database,nodes:T}):Kh({database:i.database,nodes:T});return}at.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(a){at.error("Error failing over node",a)}},"disconnectedFromNode"),Dl=o(function(i){let a=eo.get(i.url),c=a?.get(i.database);if(!c){at.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,a);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){at.warn("Newly connected node has no node subscriptions",i.database,c);return}if(!l.name){at.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let d of eo.values()){let f=d.get(i.database);if(!f||f==c)continue;let{worker:m,nodes:h,connected:p}=f;if(h)if(p===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let _=h.filter(g=>g&&g.name!==l.name);_.length<h.length&&(f.nodes=_,m.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,to.onMessageByType)("disconnected-from-node",Cd),(0,to.onMessageByType)("connected-to-node",Dl),(0,to.onMessageByType)("request-cluster-status",f$)}function f$(e,t){let r=[];for(let[n,s]of Od)try{let i=eo.get(s.url);at.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let a=[];if(i){for(let[l,{worker:u,connected:d,nodes:f,latency:m}]of i)a.push({database:l,connected:d,latency:m,threadId:u?.threadId,nodes:f.filter(h=>!(h.endTime<Date.now())).map(h=>h.name)});let c=(0,Uw.cloneDeep)(s);c.database_sockets=a,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){at.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function Ho(e,t){let r=Kt();e=e??pi(t.url),t.name=e;try{if(t.ca){let s=new d$.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subjectAltName:s.subjectAltName,serialNumber:s.serialNumber,validFrom:s.validFrom,validTo:s.validTo}}}catch(s){at.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(at.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!xw.default.get(U.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],a=(0,Uw.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of a)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...a,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}at.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var to,PS,at,Uw,xw,d$,Vle,eo,Cd,Dl,Od,CS,Yh=ue(()=>{De();to=w(nt());is();PS=require("worker_threads");Ll();at=w(Q()),Uw=require("lodash"),xw=w(oe());k();d$=require("crypto"),Vle=200,eo=new Map,Od=new Map,CS=new Map;o(Bw,"startOnMainThread");o(f$,"requestClusterStatus");PS.parentPort&&(Cd=o(e=>{PS.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),Dl=o(e=>{PS.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,to.onMessageByType)("subscribe-to-node",e=>{Kh(e)}),(0,to.onMessageByType)("unsubscribe-from-node",e=>{LS(e)}));o(Ho,"ensureNode")});var as=M(Yt=>{"use strict";var hr=require("path"),{watch:Kle}=require("chokidar"),Bn=require("fs-extra"),Pd=require("node-forge"),S$=require("net"),{generateKeyPair:Fw,X509Certificate:ko,createPrivateKey:T$}=require("crypto"),Yle=require("util");Fw=Yle.promisify(Fw);var Ot=Pd.pki,Ei=require("joi"),{v4:R$}=require("uuid"),{validateBySchema:qw}=ut(),{forComponent:Wle}=Q(),os=oe(),Ls=(k(),v(W)),{CONFIG_PARAMS:vl}=Ls,_i=FI(),{ClientError:Ja}=ge(),MS=require("node:tls"),{relative:y$,join:zle}=require("node:path"),{CERT_PREFERENCE_APP:YMe,CERTIFICATE_VALUES:h$}=_i,jle=Uc(),Hw=bt(),{table:Qle,getDatabases:Jle,databases:DS}=(De(),v(mt)),{getJWTRSAKeys:p$}=(Sd(),v(vh)),ht=Wle("tls");Yt.generateKeys=Kw;Yt.updateConfigCert=P$;Yt.createCsr=sue;Yt.signCertificate=iue;Yt.setCertTable=Ld;Yt.loadCertificates=w$;Yt.reviewSelfSignedCert=Ww;Yt.createTLSSelector=D$;Yt.listCertificates=v$;Yt.addCertificate=due;Yt.removeCertificate=mue;Yt.createNatsCerts=cue;Yt.generateCertsKeys=aue;Yt.getReplicationCert=zh;Yt.getReplicationCertAuth=nue;Yt.renewSelfSigned=lue;Yt.hostnamesFromCert=jw;Yt.getKey=hue;Yt.getHostnamesFromCertificate=pue;Yt.getPrimaryHostName=zw;var{urlToNodeName:A$,getThisNodeUrl:Xle,getThisNodeName:US,clearThisNodeName:Zle}=(is(),v(Go)),{readFileSync:eue,statSync:b$}=require("node:fs"),WMe=oe(),{getTicketKeys:tue,onMessageFromWorkers:rue}=nt(),Qa=Q(),{isMainThread:I$}=require("worker_threads"),{TLSSocket:N$,createSecureContext:zMe}=require("node:tls"),$w=3650,Wh=["127.0.0.1","localhost","::1"],Vw=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];rue(async e=>{e.type===Ls.ITC_EVENT_TYPES.RESTART&&(os.initSync(!0),await Ww())});var Jr;function Za(){return Jr||(Jr=Jle().system.hdb_certificate,Jr||(Jr=Qle({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),Jr}o(Za,"getCertTable");async function zh(){let e=D$("operations-api"),t={secureContexts:null,setSecureContext:o(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(US());if(!r)return;let n=new ko(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}o(zh,"getReplicationCert");async function nue(){Za();let e=(await zh()).options.cert,r=new ko(e).issuer.match(/CN=(.*)/)?.[1];return Jr.get(r)}o(nue,"getReplicationCertAuth");var E$,Xa=new Map;function w$(){if(E$)return;E$=!0;let e=[{configKey:vl.TLS},{configKey:vl.OPERATIONSAPI_TLS}];Za();let t=hr.dirname(Hw.getConfigFilePath()),r;for(let{configKey:n}of e){let s=Hw.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let a=i.privateKey,c=a&&y$(zle(t,"keys"),a);c&&_$(a,l=>{Xa.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&I$){let d;_$(u,f=>{if(h$.cert===f)return;let m=i.hostname??i.hostnames??i.host??i.hosts;m&&!Array.isArray(m)&&(m=[m]);let h=L$(u),p=new ko(h),_;try{_=zw(p)}catch(R){ht.error("error extracting host name from certificate",R);return}if(_==null){ht.error("No host name found on certificate");return}if(p.checkIssued(new ko(h$.cert)))return;let g=Jr.primaryStore.get(_),y=b$(u).mtimeMs,T=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&y<=T){y<T&&ht.info(`Certificate ${_} at ${u} is older (${new Date(y)}) than the certificate in the database (${T>1?new Date(T):"only self signed certificate available"})`);return}r=Jr.put({name:_,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:m,fileTimestamp:y,details:{issuer:p.issuer.replace(/\n/g," "),subject:p.subject?.replace(/\n/g," "),subject_alt_name:p.subjectAltName,serial_number:p.serialNumber,valid_from:p.validFrom,valid_to:p.validTo}})},l?"certificate authority":"certificate")}}}}}return r}o(w$,"loadCertificates");function _$(e,t,r){let n,s=o((i,a)=>{try{let c=a.mtimeMs;c&&c!==n&&(n&&I$&&ht.warn(`Reloading ${r}:`,i),n=c,t(L$(i)))}catch(c){ht.error(`Error loading ${r}:`,i,c)}},"loadFile");Bn.existsSync(e)?s(e,b$(e)):ht.error(`${r} file not found:`,e),Kle(e,{persistent:!1}).on("change",s)}o(_$,"loadAndWatch");function kw(){let e=Xle();if(e==null){let t=Wh[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return A$(e)}o(kw,"getHost");function vS(){let e=US();if(e==null){let t=Wh[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}o(vS,"getCommonName");async function sue(){let e=await zh(),t=Ot.certificateFromPem(e.options.cert),r=Ot.privateKeyFromPem(e.options.key);ht.info("Creating CSR with cert named:",e.name);let n=Ot.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:vS()},...Vw];ht.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:O$()}];return ht.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),Pd.pki.certificationRequestToPem(n)}o(sue,"createCsr");function O$(){let e=Wh.includes(vS())?Wh:[...Wh,vS()];return e.includes(kw())||e.push(kw()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>S$.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}o(O$,"certExtensions");async function iue(e){let t={},r=hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;Za();for await(let d of Jr.search([]))if(d.is_authority&&!d.details.issuer.includes("HarperDB-Certificate-Authority")){if(Xa.has(d.private_key_name)){n=Xa.get(d.private_key_name),s=d;break}else if(d.private_key_name&&await Bn.exists(hr.join(r,d.private_key_name))){n=Bn.readFile(hr.join(r,d.private_key_name)),s=d;break}}if(!n){let d=await Gw();s=d.ca,n=d.private_key}n=Ot.privateKeyFromPem(n),t.signingCA=s.certificate;let i=Ot.certificateFromPem(s.certificate);ht.info("Signing CSR with cert named",s.name);let a=Ot.certificationRequestFromPem(e.csr);try{a.verify()}catch(d){return ht.error(d),new Error("Error verifying CSR: "+d.message)}let c=Pd.pki.createCertificate();c.serialNumber="0"+Math.random().toString().slice(2,9),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+$w),ht.info("sign cert setting validity:",c.validity),ht.info("sign cert setting subject from CSR:",a.subject.attributes),c.setSubject(a.subject.attributes),ht.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=a.getAttribute({name:"extensionRequest"}).extensions;ht.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=a.publicKey,c.sign(n,Pd.md.sha256.create()),t.certificate=Ot.certificateToPem(c)}else ht.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}o(iue,"signCertificate");async function oue(e,t){await Ld({name:US(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await Ld({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:Ot.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}o(oue,"createCertificateTable");async function Ld(e){let t=new ko(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},Za(),await Jr.patch(e)}o(Ld,"setCertTable");async function Kw(){let e=await Fw("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{publicKey:Ot.publicKeyFromPem(e.publicKey),privateKey:Ot.privateKeyFromPem(e.privateKey)}}o(Kw,"generateKeys");async function Yw(e,t,r){let n=Ot.createCertificate();if(!t){let a=await zh();t=Ot.certificateFromPem(a.options.cert).publicKey}n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+$w);let i=[{name:"commonName",value:vS()},...Vw];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions(O$()),n.sign(e,Pd.md.sha256.create()),Ot.certificateToPem(n)}o(Yw,"generateCertificates");async function Gw(){let e=await v$(),t;for(let r of e){if(!r.is_authority)continue;let n=await M$(r.private_key_name);if(r.private_key_name&&n&&new ko(r.certificate).checkPrivateKey(T$(n))){ht.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;ht.trace("No CA found with matching private key")}o(Gw,"getCertAuthority");async function C$(e,t,r=!0){let n=Ot.createCertificate();n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+$w);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${os.get(vl.REPLICATION_HOSTNAME)??A$(os.get(vl.REPLICATION_URL))??R$().split("-")[0]}`},...Vw];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,Pd.md.sha256.create());let a=hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),c=hr.join(a,_i.PRIVATEKEY_PEM_NAME);return r&&await Bn.writeFile(c,Ot.privateKeyToPem(e)),n}o(C$,"generateCertAuthority");async function aue(){let{privateKey:e,publicKey:t}=await Kw(),r=await C$(e,t),n=await Yw(e,t,r);await oue(n,r),P$()}o(aue,"generateCertsKeys");async function cue(){let e=await Yw(Ot.privateKeyFromPem(_i.CERTIFICATE_VALUES.key),void 0,Ot.certificateFromPem(_i.CERTIFICATE_VALUES.cert)),t=hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME);await Bn.exists(r)||await Bn.writeFile(r,e);let n=hr.join(t,_i.NATS_CA_PEM_NAME);await Bn.exists(n)||await Bn.writeFile(n,_i.CERTIFICATE_VALUES.cert)}o(cue,"createNatsCerts");async function lue(){Za();for await(let e of Jr.search([{attribute:"is_self_signed",value:!0}]))await Jr.delete(e.name);await Ww()}o(lue,"renewSelfSigned");async function Ww(){Zle(),await w$(),Za();let e=await Gw();if(!e){ht.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=o(u=>{try{return{key:Ot.privateKeyFromPem(Bn.readFileSync(u)),keyPath:u}}catch(d){return ht.warn(`Failed to parse private key from ${u}:`,d.message),{key:null,keyPath:u}}},"tryToParseKey"),n=os.get(vl.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let d=r(u.privateKey);if(s=d.key,i=d.keyPath,d.key)break}}else{let u=os.get(vl.TLS_PRIVATEKEY),d=r(u);s=d.key,i=d.keyPath}let a=hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),c=y$(a,i);s||(ht.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{privateKey:s}=await Kw(),Bn.existsSync(hr.join(a,_i.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${R$().split("-")[0]}.pem`),await Bn.writeFile(hr.join(a,c),Ot.privateKeyToPem(s)));let l=await C$(s,Ot.setRsaPublicKey(s.n,s.e),!1);await Ld({name:l.subject.getField("CN").value,uses:["https"],certificate:Ot.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await zh()){let r=US();ht.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await Gw();let n=Ot.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await Yw(Ot.privateKeyFromPem(e.private_key),s,n);await Ld({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}o(Ww,"reviewSelfSignedCert");function P$(){let e=jle(Object.keys(Ls.CONFIG_PARAM_MAP),!0),t=hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.PRIVATEKEY_PEM_NAME),n=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME),s=hr.join(t,_i.NATS_CA_PEM_NAME),i=Ls.CONFIG_PARAMS,a={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(a[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(a[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,a[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,a[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),Hw.updateConfigValue(void 0,void 0,a,!1,!0)}o(P$,"updateConfigCert");function L$(e){return e.startsWith("-----BEGIN")?e:eue(e,"utf8")}o(L$,"readPEM");var g$=MS.createSecureContext;MS.createSecureContext=function(e){if(!e.cert||!e.key)return g$(e);let t={...e};delete t.key,delete t.cert;let r=g$(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var uue=N$.prototype._init;N$.prototype._init=function(e,t){uue.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,a)=>{this.sni_context=a?.context||a,this.certCbDone()})}};var Ml=new Map;function D$(e,t){let r=new Map,n,s=!1;return i.initialize=a=>i.ready?i.ready:(a&&(a.secureContexts=r,a.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),Ml.clear();let d=0;if(DS===void 0){c();return}for await(let f of DS.system.hdb_certificate.search([])){let m=f.certificate,h=new ko(m);f.is_authority&&(h.asString=m,Ml.set(h.subject,m))}for await(let f of DS.system.hdb_certificate.search([]))try{if(f.is_authority)continue;let m=e==="operations-api",h=f.is_self_signed?1:2;m&&f.uses?.includes?.("operations")&&(h+=1);let p=await M$(f.private_key_name),_=f.certificate,g=new ko(_);if(Ml.has(g.issuer)&&(_+=`
23
23
  `+Ml.get(g.issuer)),!p||!_)throw new Error("Missing private key or certificate for secure server");let y={ciphers:f.ciphers,ticketKeys:tue(),availableCAs:Ml,ca:t&&Array.from(Ml.values()),cert:_,key:p,key_file:f.private_key_name,is_self_signed:f.is_self_signed};a&&(y.sessionIdContext=a.sessionIdContext);let T=MS.createSecureContext(y);T.name=f.name,T.options=y,T.quality=h,T.certificateAuthorities=Array.from(Ml),T.certStart=_.toString().slice(0,100);let R=f.hostnames??jw(g);Array.isArray(R)||(R=[R]);let N;for(let O of R)if(O){O[0]==="*"&&(s=!0,O=O.slice(1)),O===kw()&&(h+=2),S$.isIP(O)&&(N=!0);let F=r.get(O)?.quality??0;h>F&&r.set(O,T)}else Qa.error("No hostname found for certificate at",MS.certificate);Qa.trace("Adding TLS",T.name,"for",a.ports||"client","cert named",f.name,"hostnames",R,"quality",h,"best quality",d),h>d&&(i.defaultContext=n=T,d=h,a&&(a.defaultContext=T))}catch(m){Qa.error("Error applying TLS for",f.name,m)}a?.secureContextsListeners.forEach(f=>f()),c(n)}catch(d){l(d)}}o(u,"updateTLS"),DS?.system.hdb_certificate.subscribe({listener:o(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(a,c){Qa.info("TLS requested for",a||"(no SNI)");let l=a;for(;;){let d=r.get(l);if(d)return Qa.debug("Found certificate for",a,d.certStart),d.updatedContext&&(d=d.updatedContext),c(null,d);if(s&&l){let f=l.indexOf(".",1);f<0?l="":l=l.slice(f)}else break}a?Qa.debug("No certificate found to match",a,"using the default certificate"):Qa.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):Qa.info("No default certificate found"),c(null,u)}o(i,"SNICallback")}o(D$,"createTLSSelector");async function M$(e){let t=Xa.get(e);return!t&&e?await Bn.readFile(hr.join(os.get(vl.ROOTPATH),Ls.LICENSE_KEY_DIR_NAME,e),"utf8"):t}o(M$,"getPrivateKeyByName");async function v$(){Za();let e=[];for await(let t of Jr.search([]))e.push(t);return e}o(v$,"listCertificates");async function due(e){let t=qw(e,Ei.object({name:Ei.string().required(),certificate:Ei.string().required(),is_authority:Ei.boolean().required(),private_key:Ei.string(),hosts:Ei.array(),uses:Ei.array()}));if(t)throw new Ja(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,a=new ko(n),c=!1,l=!1,u;for(let[h,p]of Xa)!s&&!c&&a.checkPrivateKey(T$(p))&&(c=!0,u=h),s&&s===p&&(l=!0,u=h);if(!i&&!s&&!c)throw new Ja("A suitable private key was not found for this certificate");let d;if(!r){try{d=zw(a)}catch(h){ht.error(h)}if(d==null)throw new Ja("Error extracting certificate host name, please provide a name parameter")}let f=fue(r??d);s&&!c&&!l&&(await Bn.writeFile(hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME,f+".pem"),s),Xa.set(f,s));let m={name:r??d,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(m.private_key_name=u??f+".pem"),e.ciphers&&(m.ciphers=e.ciphers),await Ld(m),"Successfully added certificate: "+f}o(due,"addCertificate");function fue(e){return e.replace(/[^a-z0-9\.]/gi,"-")}o(fue,"sanitizeName");async function mue(e){let t=qw(e,Ei.object({name:Ei.string().required()}));if(t)throw new Ja(t.message);let{name:r}=e;Za();let n=await Jr.get(r);if(!n)throw new Ja(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await Jr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(ht.info("Removing private key named",s),await Bn.remove(hr.join(os.getHdbBasePath(),Ls.LICENSE_KEY_DIR_NAME,s)))}return await Jr.delete(r),"Successfully removed "+r}o(mue,"removeCertificate");function zw(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||jw(e)[0]}o(zw,"getPrimaryHostName");function jw(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}o(jw,"hostnamesFromCert");async function hue(e){if(e.bypass_auth!==!0)throw new Ja("Unauthorized","401");let t=qw(e,Ei.object({name:Ei.string().required()}));if(t)throw new Ja(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await p$()).privateKey;if(r===".jwtPublic")return(await p$()).publicKey;if(Xa.get(r))return Xa.get(e.name);throw new Ja("Key not found")}o(hue,"getKey");function pue(e){return[e.subject?.CN,...e.subjectaltname.split(",").filter(t=>t.trim().startsWith("DNS:")).map(t=>t.trim().substring(4))]}o(pue,"getHostnamesFromCertificate")});var nV={};Oe(nV,{CONFIRMATION_STATUS_POSITION:()=>eV,LATENCY_POSITION:()=>qS,NodeReplicationConnection:()=>vd,OPERATION_REQUEST:()=>Zw,RECEIVED_TIME_POSITION:()=>tO,RECEIVED_VERSION_POSITION:()=>eO,RECEIVING_STATUS_POSITION:()=>rO,RECEIVING_STATUS_RECEIVING:()=>rV,RECEIVING_STATUS_WAITING:()=>tV,SENDING_TIME_POSITION:()=>jh,createWebSocket:()=>$S,databaseSubscriptions:()=>tc,replicateOverWS:()=>Qh,tableUpdateListeners:()=>sO});async function $S(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=Ze(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!Jw){let l=(0,j$.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),Jw=u.secureContexts}if(i=Jw.get(s),i&&ae.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let a={};r&&(a.Authorization=r);let c={headers:a,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,J$.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(GS?.caCount!==qo.size&&(GS=Q$.createSecureContext({...i.options,ca:[...qo,...i.options.availableCAs.values()]}),GS.caCount=qo.size),c.secureContext=GS),new W$.WebSocket(e,"harperdb-replication-v1",c)}function Qh(e,t,r){let n=t.port||t.securePort,s=Ul.pid%1e3+"-"+z$.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3);ae.debug?.(s,"Initializing replication connection",r);let i=0,a=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(a.buffer,0,1024),u=t.database,d=t.databaseSubscriptions||tc,f,m,h=!1,p=t.subscription;p?.then&&p.then(E=>{p=E,p.auditStore&&(f=p.auditStore)});let _=t.tables||u&&st()[u],g;if(!r){ae.error?.(s,"No authorization provided"),Ss(1008,"Unauthorized");return}let y=new Map,T=[];g=r.name,g&&t.connection&&(t.connection.nodeName=g);let R,N,O,F,Z,G,Y,q=6e4,K,ce=0,le=0,se=0,pe=Y$.default.get(U.REPLICATION_BLOBTIMEOUT)??12e4,Ne=new Map,Ue=[],xe=0,Rr;if(t.url){let E=o(()=>{Z&&le===e._socket?.bytesRead&&se===e._socket?.bytesWritten?e.terminate():(Z=performance.now(),e.ping(),le=e._socket?.bytesRead,se=e._socket?.bytesWritten)},"sendPing");O=setInterval(E,V$).unref(),E()}else Jt();e._socket?.setMaxListeners(200);function Jt(){clearTimeout(F),le=e._socket?.bytesRead,se=e._socket?.bytesWritten,F=setTimeout(()=>{le===e._socket?.bytesRead&&se===e._socket?.bytesWritten&&(ae.warn?.(`Timeout waiting for ping from ${g}, terminating connection and reconnecting`),e.terminate())},V$*2).unref()}o(Jt,"resetPingTimer");function kt(){if(!(!g||!u))return m||(m=Nd(f,u,g)),m}o(kt,"getSharedStatus"),u&&ga(u);let Xt,Mf,Cc=[],Gt=[],vf,Uf=[],bE=[],IE=[],vy=150,xf=25,Pe=0,NE=0,Bf=!1,po,Lr,yr,Ff;e.on("message",E=>{ce=performance.now();try{let S=E.dataView=new $c(E.buffer,E.byteOffset,E.byteLength);if(E[0]>127){let P=(0,et.decode)(E),[L,D,H]=P;switch(L){case x$:{if(D){if(g){if(g!==D){ae.error?.(s,`Node name mismatch, expecting to connect to ${g}, but peer reported name as ${D}, disconnecting`),e.send((0,et.encode)([Dd])),Ss(1008,"Node name mismatch");return}}else if(g=D,t.connection?.tentativeNode){let B=t.connection.tentativeNode;B.name=g,t.connection.tentativeNode=null,Ho(g,B)}if(t.connection&&(t.connection.nodeName=g),ae.debug?.(s,"received node name:",g,"db:",u??P[2]),!u)try{ga(u=P[2]),u==="system"&&(Xt=za(t,(B,de)=>{hu(de)&&Sa(de)}),e.on("close",()=>{Xt?.remove()}))}catch(B){ae.warn?.(s,"Error setting database",B),e.send((0,et.encode)([Dd])),Ss(1008,B.message);return}Dr()}break}case q$:{ae.debug?.(s,"Received table definitions for",D.map(B=>B.table));for(let B of D){let de=P[2];B.database=de;let me;hu(de)&&(de==="system"?ke[de]?.[B.table]||(me=V(B,ke[de]?.[B.table])):me=V(B,ke[de]?.[B.table]),f||(f=me?.auditStore),_||(_=st()?.[de]))}break}case Dd:Ss();break;case Zw:try{let B=r?.replicates||r?.subscribers||r?.name;ae.debug?.("Received operation request",D,"from",g),server.operation(D,{user:r},!B).then(de=>{Array.isArray(de)&&(de={results:de}),de.requestId=D.requestId,e.send((0,et.encode)([BS,de]))},de=>{e.send((0,et.encode)([BS,{requestId:D.requestId,error:(0,Md.errorToString)(de)}]))})}catch(B){e.send((0,et.encode)([BS,{requestId:D.requestId,error:(0,Md.errorToString)(B)}]))}break;case BS:let{resolve:C,reject:x}=y.get(D.requestId);D.error?x(new Error(D.error)):C(D),y.delete(D.requestId);break;case Qw:let z=P[3];if(!_){u?ae.error?.(s,"No database found for",u):ae.error?.(s,"Database name never received"),Ss();return}let ne=_[z];ne=V({table:z,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ne),Cc[H]={name:z,decoder:new et.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(B){return ne.primaryStore.getEntry(B)},rootStore:ne.primaryStore.rootStore};break;case B$:Ff=f?i$(D,f):new Map,vf=P[2],ae.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${vf}`);break;case F$:let re=H;IE[re]=D;break;case G$:kt()[eV]=D,ae.trace?.(s,"received and broadcasting committed update",D),kt().buffer.notify();break;case k$:R=D,p.send({type:"end_txn",localTime:R,remoteNodeIds:T});break;case FS:{let B=P[1],{fileId:de,size:me,finished:Se,error:ee}=B,j=Ne.get(de);ae.debug?.("Received blob",de,"has stream",!!j,"connectedToBlob",!!j?.connectedToBlob,"length",P[2].length,"finished",Se),j||(j=new Xw.PassThrough,j.expectedSize=me,Ne.set(de,j)),j.lastChunk=Date.now();let he=P[2];it(he.byteLength,"bytes-received",`${g}.${u}`,"replication","blob");try{Se?(ee?(j.on("error",()=>{}),j.destroy(new Error("Blob error: "+ee+" for record "+(j.recordId??"unknown")+" from "+g))):j.end(he),j.connectedToBlob&&Ne.delete(de)):j.write(he)}catch(Te){ae.error?.(`Error receiving blob for ${j.recordId} from ${g} and streaming to storage`,Te),Ne.delete(de)}break}case H$:{let B=D,de;try{let me=P[3],Se=Gt[H]||(Gt[H]=_[P[4]]);if(!Se)return ae.warn?.("Unknown table id trying to handle record request",H);let ee=Se.primaryStore.getBinaryFast(Symbol.for("structures")),j=ee?.length??0;if(j>0&&j!==NE){NE=j;let Te=(0,et.decode)(ee);e.send((0,et.encode)([Qw,{typedStructs:Te.typed,structures:Te.named},H,Se.tableName]))}let he=Se.primaryStore.getBinaryFast(me);if(he){let Te=Se.primaryStore.decoder.decode(he,{valueAsBuffer:!0}),fe=lt||{};fe.version=(0,X$.getLastVersion)(),lt&&lt[wu]&Vr&&(Te=Buffer.from(Te),fm(()=>Se.primaryStore.decoder.decode(he),We=>_a(We,me),Se.primaryStore.rootStore)),de=(0,et.encode)([xS,B,{value:Te,expiresAt:fe.expiresAt,version:fe.version,residencyId:fe.residencyId,nodeId:fe.nodeId,user:fe.user}])}else de=(0,et.encode)([xS,B])}catch(me){de=(0,et.encode)([xS,B,{error:me.message}])}e.send(de);break}case xS:{let{resolve:B,reject:de,tableId:me,key:Se}=y.get(P[1]),ee=P[2];if(ee?.error)de(new Error(ee.error));else if(ee){let j;s_(()=>{let he=Cc[me].decoder.decode(ee.value);ee.value=he,ee.key=Se,B(ee)||j&&setTimeout(()=>j.forEach(t_),6e4).unref()},f?.rootStore,he=>{let Te=Pc(he,Se);return j||(j=[]),j.push(Te),Te})}else B();y.delete(P[1]);break}case U$:{yr=D;let B,de,me=!1;if(p){if(u!==p.databaseName&&!p.then){ae.error?.("Subscription request for wrong database",u,p.databaseName);return}}else p=d.get(u);if(ae.debug?.(s,"received subscription request for",u,"at",yr),!p){let Ee;p=new Promise(tt=>{ae.debug?.("Waiting for subscription to database "+u),Ee=tt}),p.ready=Ee,tc.set(u,p)}if(r.name)de=Kt().subscribe(r.name),de.then(async Ee=>{B=Ee;for await(let tt of B){let rt=tt.value;if(!(rt?.replicates===!0||rt?.replicates?.receives||rt?.subscriptions?.some(cr=>(cr.database||cr.schema)===u&&cr.publish!==!1))){me=!0,e.send((0,et.encode)([Dd])),Ss(1008,`Unauthorized database subscription to ${u}`);return}}},Ee=>{ae.error?.(s,"Error subscribing to HDB nodes",Ee)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,et.encode)([Dd])),Ss(1008,`Unauthorized database subscription to ${u}`);return}if(Lr&&(ae.debug?.(s,"stopping previous subscription",u),Lr.emit("close")),yr.length===0)return;let Se=yr[0],ee=o(Ee=>{if(Ee&&(Se.replicateByDefault?!Se.tables.includes(Ee.tableName):Se.tables.includes(Ee.tableName)))return{table:Ee}},"tableToTableEntry"),j={txnTime:0},he,Te,fe=1/0,We,Et=o((Ee,tt)=>{if(Ee.type==="end_txn"){j.txnTime&&(a[i]!==66&&ae.error?.("Invalid encoding of message"),pu(9),pu(p_),Dc(We=tt),dt()),i=c,j.txnTime=0;return}let rt=Ee.nodeId,cr=Ee.tableId,Mt=Te[cr];if(!Mt&&(Mt=Te[cr]=ee(p.tableById[cr]),!Mt))return ae.debug?.("Not subscribed to table",cr);let Ts=Mt.table,vt=Ts.primaryStore,Qs=vt.encoder;(Ee.extendedType&T_||!Qs.typedStructs)&&(Qs._mergeStructures(Qs.getStructures()),Qs.typedStructs&&(Qs.lastTypedStructuresLength=Qs.typedStructs.length));let Eu=he[rt];if(!(Eu&&Eu.startTime<tt&&(!Eu.endTime||Eu.endTime>tt)))return kS&&ae.trace?.(s,"skipping replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he),ZD();kS&&ae.trace?.(s,"sending replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he);let Uy=Ee.version;j.txnTime!==Uy&&(j.txnTime&&(kS&&ae.trace?.(s,"new txn time, sending queued txn",j.txnTime),a[i]!==66&&ae.error?.("Invalid encoding of message"),dt()),j.txnTime=Uy,i=c,Dc(Uy));let Mc=Ee.residencyId,xy=mu(Mc,Ts),OE;if(xy&&!xy.includes(g)){let Js=mu(Ee.previousResidencyId,Ts);if(Js&&!Js.includes(g)&&(Ee.type==="put"||Ee.type==="patch")||Ts.getResidencyById)return ZD();let kf=Ee.recordId;ae.trace?.(s,"sending invalidation",kf,g,"from",rt);let Gf=0;Mc&&(Gf|=Vc),Ee.previousResidencyId&&(Gf|=Kc);let Hy,CE=null;for(let eM in Ts.indices){if(!CE){if(Hy=Ee.getValue(vt,!0),!Hy)break;CE={}}CE[eM]=Hy[eM]}OE=Yc(Ee.version,cr,kf,null,rt,Ee.user,Ee.type==="put"||Ee.type==="patch"?"invalidate":Ee.type,Qs.encode(CE),Gf,Mc,Ee.previousResidencyId,Ee.expiresAt)}function ZD(){return ae.trace?.(s,"skipping audit record",Ee.recordId),G||(G=setTimeout(()=>{G=null,(We||0)+$$/2<fe&&(kS&&ae.trace?.(s,"sending skipped sequence update",fe),e.send((0,et.encode)([k$,fe])))},$$).unref()),new Promise(setImmediate)}o(ZD,"skipAuditRecord");let By=Qs.typedStructs,Fy=Qs.structures;if((By?.length!=Mt.typed_length||Fy?.length!=Mt.structure_length)&&(Mt.typed_length=By?.length,Mt.structure_length=Fy.length,ae.debug?.(s,"send table struct",Mt.typed_length,Mt.structure_length),Mt.sentName||(Mt.sentName=!0),e.send((0,et.encode)([Qw,{typedStructs:By,structures:Fy,attributes:Ts.attributes,schemaDefined:Ts.schemaDefined},cr,Mt.table.tableName]))),Mc&&!bE[Mc]&&(e.send((0,et.encode)([F$,xy,Mc])),bE[Mc]=!0),OE)pu(OE.length),Lc(OE);else{let Js=Ee.encoded;Ee.extendedType&Vr&&fm(()=>Ee.getValue(vt),Gf=>_a(Gf,Ee.recordId),vt.rootStore);let kf=Js[0]===66?8:0;pu(Js.length-kf),Lc(Js,kf),ae.trace?.("wrote record",Ee.recordId,"length:",Js.length)}return e._socket.writableNeedDrain?new Promise(Js=>{ae.debug?.(`Waiting for remote node ${g} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",Js)}):xe>xf?new Promise(Js=>{Rr=Js}):new Promise(setImmediate)},"sendAuditRecord"),dt=o(()=>{c-i>8?(e.send(a.subarray(i,c)),ae.debug?.(s,"Sent message, size:",c-i),it(c-i,"bytes-sent",`${g}.${u}`,"replication","egress")):ae.debug?.(s,"skipping empty transaction")},"sendQueuedData");Lr=new nO.EventEmitter,Lr.once("close",()=>{me=!0,B?.end()});for(let{startTime:Ee}of yr)Ee<fe&&(fe=Ee);(de||Promise.resolve()).then(async()=>{p=await p,f=p.auditStore,Te=p.tableById.map(ee),he=[];for(let{name:tt,startTime:rt,endTime:cr}of yr){let Mt=OS(tt,f);ae.debug?.("subscription to",tt,"using local id",Mt,"starting",rt),he[Mt]={startTime:rt,endTime:cr}}Sa(u),Xt||(Xt=bl(tt=>{tt.databaseName===u&&Sa(u)}),Mf=Sh(tt=>{tt===u&&(e.send((0,et.encode)([Dd])),Ss())}),e.on("close",()=>{Xt?.remove(),Mf?.remove()})),e.send((0,et.encode)([B$,Gh(p.auditStore),yr.map(({name:tt})=>tt)]));let Ee=!0;do{isFinite(fe)||(ae.warn?.("Invalid sequence id "+fe),Ss(1008,"Invalid sequence id"+fe));let tt;if(Ee&&!me&&(Ee=!1,fe===0)){ae.info?.("Replicating all tables to",g);let rt=fe,cr=VS(f);for(let Mt in _){if(!ee(Mt))continue;let Ts=_[Mt];for(let vt of Ts.primaryStore.getRange({snapshot:!1,versions:!0})){if(me)return;if(vt.localTime>=fe){ae.trace?.(s,"Copying record from",u,Mt,vt.key,vt.localTime),rt=Math.max(vt.localTime,rt),tt=!0,kt()[jh]=1;let Qs=Yc(vt.version,Ts.tableId,vt.key,null,cr,null,"put",fm(()=>Ts.primaryStore.encoder.encode(vt.value),Eu=>_a(Eu,vt.key)),vt.metadataFlags&-256,vt.residencyId,null,vt.expiresAt);await Et({recordId:vt.key,tableId:Ts.tableId,type:"put",getValue(){return vt.value},encoded:Qs,version:vt.version,residencyId:vt.residencyId,nodeId:cr,extendedType:vt.metadataFlags},vt.localTime)}}}tt&&Et({type:"end_txn"},fe),kt()[jh]=0,fe=rt}for(let{key:rt,value:cr}of f.getRange({start:fe||1,exclusiveStart:!0,snapshot:!1})){if(me)return;let Mt=At(cr);ae.debug?.("sending audit record",new Date(rt)),kt()[jh]=rt,fe=rt,await Et(Mt,rt),Lr.startTime=rt,tt=!0}tt&&Et({type:"end_txn"},fe),kt()[jh]=0,await gU(f)}while(!me)}).catch(Ee=>{ae.error?.(s,"Error handling subscription to node",Ee),Ss(1008,"Error handling subscription to node")});break}}return}S.position=8;let A=!0,b,I;do{kt();let P=S.readInt();if(P===9&&S.getUint8(S.position)==p_){S.position++,R=I=S.readFloat64(),m[eO]=R,m[tO]=Date.now(),m[rO]=tV,ae.trace?.("received remote sequence update",R,u);break}let L=S.position,D=At(E,L,L+P),H=Cc[D.tableId];H||ae.error?.(`No table found with an id of ${D.tableId}`);let C;D.residencyId&&(C=IE[D.residencyId],ae.trace?.(s,"received residency list",C,D.type,D.recordId));try{let x=D.recordId;s_(()=>{b={table:H.name,id:D.recordId,type:D.type,nodeId:Ff.get(D.nodeId),residencyList:C,timestamp:D.version,value:D.getValue(H),user:D.user,beginTxn:A,expiresAt:D.expiresAt}},f?.rootStore,z=>Pc(z,x))}catch(x){throw x.message+="typed structures for current decoder"+JSON.stringify(H.decoder.typedStructs),x}A=!1,ae.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),m[eO]=D.version,m[tO]=Date.now(),m[rO]=rV,p.send(b),S.position=L+P}while(S.position<E.byteLength);Pe++,it(E.byteLength,"bytes-received",`${g}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),Pe>vy&&!Bf&&(Bf=!0,e.pause(),ae.debug?.(`Commit backlog causing replication back-pressure, requesting that ${g} pause replication`)),p.send({type:"end_txn",localTime:R,remoteNodeIds:T,async onCommit(){if(b){let P=Date.now()-b.timestamp;it(P,"replication-latency",g+"."+u+"."+b.table,b.type,"ingest")}Pe--,Bf&&(Bf=!1,e.resume(),ae.debug?.(`Replication resuming ${g}`)),Ue.length>0&&await Promise.all(Ue),ae.trace?.("All blobs finished"),!N&&I&&(ae.trace?.(s,"queuing confirmation of a commit at",I),setTimeout(()=>{e.send((0,et.encode)([G$,N])),ae.trace?.(s,"sent confirmation of a commit at",N),N=null},_ue)),N=I,ae.debug?.("last sequence committed",new Date(I),u)}})}catch(S){ae.error?.(s,"Error handling incoming replication message",S)}}),e.on("ping",Jt),e.on("pong",()=>{if(t.connection){let E=performance.now()-Z;t.connection.latency=E,kt()&&(m[qS]=E),t.isSubscriptionConnection&&Dl({name:g,database:u,url:t.url,latency:E})}Z=null}),e.on("close",(E,S)=>{clearInterval(O),clearTimeout(F),clearInterval(Y),Lr&&Lr.emit("close"),po&&po.end();for(let[A,{reject:b}]of y)b(new Error(`Connection closed ${S?.toString()} ${E}`));ae.debug?.(s,"closed",E,S?.toString())});function Ss(E,S){try{e.isFinished=!0,ae.debug?.(s,"closing",g,u,E,S),e.close(E,S),t.connection?.emit("finished")}catch(A){ae.error?.(s,"Error closing connection",A)}}o(Ss,"close");let Ea=new Set;async function _a(E,S){let A=r_(E);if(Ea.has(A)){ae.debug?.("Blob already being sent",A);return}Ea.add(A);try{let b;xe++;for await(let I of E.stream())b&&(ae.debug?.("Sending blob chunk",A,"length",b.length),e.send((0,et.encode)([FS,{fileId:A,size:E.size},b]))),b=I,e._socket.writableNeedDrain&&(ae.debug?.("draining",A),await new Promise(P=>e._socket.once("drain",P)),ae.debug?.("drained",A)),it(I.length,"bytes-sent",`${g}.${u}`,"replication","blob");ae.debug?.("Sending final blob chunk",A,"length",b.length),e.send((0,et.encode)([FS,{fileId:A,size:E.size,finished:!0},b]))}catch(b){ae.warn?.("Error sending blob",b,"blob id",A,"for record",S),e.send((0,et.encode)([FS,{fileId:A,finished:!0,error:(0,Md.errorToString)(b)},Buffer.alloc(0)]))}finally{Ea.delete(A),xe--,xe<xf&&Rr?.()}}o(_a,"sendBlobs");function Pc(E,S){let A=r_(E),b=Ne.get(A);ae.debug?.("Received transaction with blob",A,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&Ne.delete(A):(b=new Xw.PassThrough,Ne.set(A,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=S,E.size===void 0&&b.expectedSize&&(E.size=b.expectedSize);let I=b.blob??createBlob(b,E);b.blob=I;let P=Ao(()=>dm(I).saving,p.auditStore?.rootStore);return P&&(P.blobId=A,Ue.push(P),P.finally(()=>{ae.debug?.(`Finished receiving blob stream ${A}`),Ue.splice(Ue.indexOf(P),1)})),I}o(Pc,"receiveBlobs");function Dr(){if(h||(h=!0,t.connection?.on("subscriptions-updated",Dr)),!f&&p&&(f=p.auditStore),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let E=new Map;f||(f=p?.auditStore);try{for(let b of p?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let I of b.value.nodes||[])I.lastTxnTime>(E.get(I.id)??0)&&E.set(I.id,I.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let S=t.connection?.nodeSubscriptions?.[0];T=[];let A=t.connection?.nodeSubscriptions.map((b,I)=>{let P=[],{replicateByDefault:L}=b;if(b.subscriptions){for(let x of b.subscriptions)if(x.subscribe&&(x.schema||x.database)===u){let z=x.table;_?.[z]?.replicate!==!1&&P.push(z)}L=!1}else for(let x in _)(L?_[x].replicate===!1:_[x].replicate)&&P.push(x);let D=f&&OS(b.name,f),H=p?.dbisDB?.get([Symbol.for("seq"),D])??1,C=Math.max(H?.seqId??1,(typeof b.startTime=="string"?new Date(b.startTime).getTime():b.startTime)??1);if(ae.debug?.("Starting time recorded in db",b.name,D,u,H?.seqId,"start time:",C,new Date(C)),S!==b){let x=f&&OS(S.name,f),z=p?.dbisDB?.get([Symbol.for("seq"),x])??1;for(let ne of z?.nodes||[])ne.name===b.name&&(C=ne.seqId,ae.debug?.("Using sequence id from proxy node",S.name,C))}if(D===void 0?ae.warn("Starting subscription request from node",b,"but no node id found"):T.push(D),E.get(D)>C&&(C=E.get(D),ae.debug?.("Updating start time from more recent txn recorded",S.name,C)),C===1&&HS)try{new URL(HS).hostname===b.name?(ae.warn?.(`Requesting full copy of database ${u} from ${HS}`),C=0):C=Date.now()-6e4}catch(x){ae.error?.("Error parsing leader URL",HS,x)}return ae.trace?.(s,"defining subscription request",b.name,u,new Date(C)),{name:b.name,replicateByDefault:L,tables:P,startTime:C,endTime:b.endTime}});if(A)if(ae.debug?.(s,"sending subscription request",A,p?.dbisDB?.path),clearTimeout(K),A.length>0)e.send((0,et.encode)([U$,A]));else{let b=o(()=>{let I=performance.now();K=setTimeout(()=>{ce<=I?Ss(1008,"Connection has no subscriptions and is no longer used"):b()},q).unref()},"scheduleClose");b()}}o(Dr,"sendSubscriptionRequestUpdate");function mu(E,S){if(!E)return;let A=Uf[E];return A||(A=S.getResidencyRecord(E),Uf[E]=A),A}o(mu,"getResidence");function hu(E){return!(ec&&ec!="*"&&!ec[E]&&!ec.includes?.(E)&&!ec.some?.(S=>S.name===E))}o(hu,"checkDatabaseAccess");function ga(E){if(p=p||d.get(E),!hu(E))throw new Error(`Access to database "${E}" is not permitted`);p||ae.warn?.(`No database named "${E}" was declared and registered`),f=p?.auditStore,_||(_=st()?.[E]);let S=Ze();if(S===g)throw S?new Error("Should not connect to self",S):new Error("Node name not defined");return wE(S,E),!0}o(ga,"setDatabase");function wE(E,S){let A=st()?.[S],b=[];for(let I in A){let P=A[I];b.push({table:I,schemaDefined:P.schemaDefined,attributes:P.attributes.map(L=>({name:L.name,type:L.type,isPrimaryKey:L.isPrimaryKey}))})}ae.trace?.("Sending database info for node",E,"database name",S),e.send((0,et.encode)([x$,E,S,b]))}o(wE,"sendNodeDBName");function Sa(E){let S=st()?.[E],A=[];for(let b in S){if(yr&&!yr.some(P=>P.replicateByDefault?!P.tables.includes(b):P.tables.includes(b)))continue;let I=S[b];A.push({table:b,schemaDefined:I.schemaDefined,attributes:I.attributes.map(P=>({name:P.name,type:P.type,isPrimaryKey:P.isPrimaryKey}))})}e.send((0,et.encode)([q$,A,E]))}o(Sa,"sendDBSchema"),Y=setInterval(()=>{for(let[E,S]of Ne)S.lastChunk+pe<Date.now()&&(ae.warn?.(`Timeout waiting for blob stream to finish ${E} for record ${S.recordId??"unknown"} from ${g}`),Ne.delete(E),S.end())},pe).unref();let Ta=1,Hf=[];return{end(){po&&po.end(),Lr&&Lr.emit("close")},getRecord(E){let S=Ta++;return new Promise((A,b)=>{let I=[H$,S,E.table.tableId,E.id];Hf[E.table.tableId]||(I.push(E.table.tableName),Hf[E.table.tableId]=!0),e.send((0,et.encode)(I)),ce=performance.now(),y.set(S,{tableId:E.table.tableId,key:E.id,resolve(P){let{table:L,entry:D}=E;if(A(P),P)return L._recordRelocate(D,P)},reject:b})})},sendOperation(E){let S=Ta++;return E.requestId=S,e.send((0,et.encode)([Zw,E])),new Promise((A,b)=>{y.set(S,{resolve:A,reject:b})})}};function pu(E){Ra(5),E<128?a[c++]=E:E<16384?(l.setUint16(c,E|32768),c+=2):E<1056964608?(l.setUint32(c,E|3221225472),c+=4):(a[c]=255,l.setUint32(c+1,E),c+=5)}function Lc(E,S=0,A=E.length){let b=A-S;Ra(b),E.copy(a,c,S,A),c+=b}function Dc(E){Ra(8),l.setFloat64(c,E),c+=8}function Ra(E){if(E+16>a.length-c){let S=Buffer.allocUnsafeSlow(c+E-i+65536>>10<<11);a.copy(S,0,i,c),c=c-i,i=0,a=S,l=new DataView(a.buffer,0,a.length)}}function V(E,S){let A=E.database??"data";if(A!=="data"&&!ke[A]){ae.warn?.("Database not found",E.database);return}S||(S={});let b=S.schemaDefined,I=!1,P=E.schemaDefined,L=S.attributes||[];for(let D=0;D<E.attributes?.length;D++){let H=E.attributes[D],C=L.find(x=>x.name===H.name);(!C||C.type!==H.type)&&(b?ae.error?.(`Schema for '${u}.${E.table}' is defined locally, but attribute '${H.name}: ${H.type}' from '${g}' does not match local attribute ${C?"'"+C.name+": "+C.type+"'":"which does not exist"}`):(I=!0,P||(H.indexed=!0),C?L[L.indexOf(C)]=H:L.push(H)))}return I?(ae.debug?.("(Re)creating",E),ze({table:E.table,database:E.database,schemaDefined:E.schemaDefined,attributes:L,...S})):S}}var Y$,et,W$,z$,Md,nO,j$,Q$,Ul,J$,Xw,X$,Z$,ae,U$,x$,B$,Dd,F$,Qw,H$,xS,Zw,BS,k$,G$,q$,FS,eV,eO,tO,jh,qS,rO,tV,rV,Eue,HS,sO,tc,kS,$$,_ue,V$,Jw,GS,K$,vd,iO=ue(()=>{De();Li();Mw();JA();is();Y$=w(oe());k();Wc();et=require("msgpackr"),W$=require("ws"),z$=require("worker_threads"),Md=w(Q());Yh();nO=require("events"),j$=w(as()),Q$=w(require("node:tls"));Ll();Ul=w(require("node:process")),J$=require("node:net");Wi();Kn();Xw=require("node:stream"),X$=require("lmdb"),Z$=w(require("minimist")),ae=(0,Md.forComponent)("replication").conditional,U$=129,x$=140,B$=141,Dd=142,F$=130,Qw=132,H$=133,xS=134,Zw=136,BS=137,k$=143,G$=144,q$=145,FS=146,eV=0,eO=1,tO=2,jh=3,qS=4,rO=5,tV=0,rV=1,Eue=(0,Z$.default)(Ul.argv),HS=Eue.HDB_LEADER_URL??Ul.env.HDB_LEADER_URL,sO=new Map,tc=new Map,kS=!0,$$=300,_ue=2,V$=3e4;o($S,"createWebSocket");K$=500,vd=class extends nO.EventEmitter{static{o(this,"NodeReplicationConnection")}socket;startTime;retryTime=K$;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;url;subscription;databaseName;nodeName;authorization;constructor(t,r,n,s,i){super(),this.url=t,this.subscription=r,this.databaseName=n,this.authorization=i,this.nodeName=this.nodeName??pi(t)}async connect(){this.session||this.resetSession();let t=[];this.socket=await $S(this.url,{serverName:this.nodeName,authorization:this.authorization});let r;ae.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${Ul.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),ae[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=K$,this.nodeSubscriptions&&Dl({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,r=Qh(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(r)}),this.socket.on("error",n=>{n.code==="SELF_SIGNED_CERT_IN_CHAIN"?(ae.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),n.isHandled=!0):n.code!=="ECONNREFUSED"&&(n.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?ae.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):ae.error?.(`Error in connection to ${this.url} due to ${n.message}`)),this.sessionReject(n)}),this.socket.on("close",(n,s)=>{if(this.isConnected&&(this.nodeSubscriptions&&Cd({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,r?.end(),this.emit("finished");return}if(++this.retries%20===1){let i=s?.toString();ae.warn?.(`${r?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${i?'"'+i+'" ':""}(code: ${n})`)}r=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((t,r)=>{this.sessionResolve=t,this.sessionReject=r})}subscribe(t,r){this.nodeSubscriptions=t,this.replicateTablesByDefault=r,this.emit("subscriptions-updated",t)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(t){return this.session.then(r=>r.getRecord(t))}};o(Qh,"replicateOverWS")});var Go={};Oe(Go,{clearThisNodeName:()=>Nue,disableReplication:()=>Rue,enabledDatabases:()=>ec,forEachReplicatedDatabase:()=>za,getThisNodeId:()=>VS,getThisNodeName:()=>Ze,getThisNodeUrl:()=>ja,hostnameToUrl:()=>jS,lastTimeInAuditStore:()=>qh,monitorNodeCAs:()=>fV,replicateOperation:()=>Oue,replicationCertificateAuthorities:()=>qo,sendOperationToNode:()=>Jh,servers:()=>Sue,setReplicator:()=>hV,start:()=>Tue,startOnMainThread:()=>Bw,subscribeToNode:()=>Kh,unsubscribeFromNode:()=>LS,urlToNodeName:()=>pi});function Tue(e){if(!e.port&&!e.securePort&&(e.port=Ds.default.get(U.OPERATIONSAPI_NETWORK_PORT),e.securePort=Ds.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),!Ze())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of Vh(e))t.set(pi(s.url),s);yue(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Le.ws(async(s,i,a,c)=>{if(Ct.debug("Incoming WS connection received "+i.url),i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,a);await a,s._socket.unref(),Qh(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&Ct.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Le.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){Ct.debug("Incoming replication WS connection received, authorized: "+s.authorized),!s.authorized&&s._nodeRequest.socket.authorizationError&&Ct.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let a=Kt().primaryStore;if(s.authorized&&s.peerCertificate.subjectaltname){let c=(0,uV.getHostnamesFromCertificate)(s.peerCertificate),l;for(let u of c)if(l=u&&(a.get(u)||t.get(u)),l)break;if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){Ct.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else Ct.warn(`No node found for certificate common name/SANs: ${c}, available nodes are ${Array.from(a.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=a.get(s.ip)||t.get(s.ip);c?s.user=c:Ct.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...a.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=o(()=>{let a=new Set(s.secureContexts.values());s.defaultContext&&a.add(s.defaultContext);for(let c of a)try{let l=Array.from(qo);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=zS.createSecureContext(u)}catch(l){Ct.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ds.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1&&i()}fV(()=>{for(let s of n)s()})}function fV(e){let t=0;wd(r=>{r?.ca&&(qo.add(r.ca),qo.size!==t&&(t=qo.size,e?.()))})}function Rue(e=!0){dV=e}function yue(e){dV||(st(),ec=e.databases,za(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||tc;for(let[s,i]of YS){let a=i.get(r);a&&(a.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];hV(r,s,e),sO.get(s)?.forEach(i=>i(s))}}))}function hV(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class mV extends xr{static{o(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||tc,a=i.get(e),c=a?.tableById||[];c[t.tableId]=t;let l=a?.ready;if(Ct.trace("Setting up replicator subscription to database",e),!a?.auditStore)return this.subscription=a=new Vn,i.set(e,a),a.tableById=c,a.auditStore=t.auditStore,a.dbisDB=t.dbisDB,a.databaseName=e,l&&l(a),a;this.subscription=a}static subscribeOnThisThread(i,a){return!0}static async load(i){if(i){let a=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),a]);if(c){let l,u=new Set;do{let d,f="",m=1/0;for(let p of c){if(u.has(p)||p===Le.hostname)continue;let _=bue(p,mV.subscription,e);if(_?.isConnected){let g=Nd(t.auditStore,e,p)[qS];(!d||g<m)&&(d=_,f=p,m=g)}}if(!d)throw l||new cV.ServerError(`No connection to any other nodes are available: ${c}`,502);let h={requestId:gue++,table:t,entry:i,id:i.key};u.add(f);try{return await d.getRecord(h)}catch(p){if(d.isConnected)throw p;Ct.warn("Error in load from node",WS,p),l||(l=p)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function Aue(e,t,r,n,s){let i=YS.get(e);i||YS.set(e,i=new Map);let a=i.get(r);if(a)return a;if(t)return i.set(r,a=new vd(e,t,r,n,s)),a.connect(),a.once("finished",()=>i.delete(r)),a}function bue(e,t,r){let n=sV.get(e);n||(n=new Map,sV.set(e,n));let s=n.get(r);if(s)return s;let i=Kt().primaryStore.get(e);return i?.url&&(s=new vd(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function Jh(e,t,r){r||(r={}),r.serverName=e.name;let n=await $S(e.url,r),s=Qh(n,{},{});return new Promise((i,a)=>{n.on("open",()=>{Ct.debug("Sending operation connection to "+e.url+" opened",t),i(s.sendOperation(t))}),n.on("error",c=>{a(c)}),n.on("close",c=>{Ct.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function Kh(e){try{lV.isMainThread&&Ct.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=tc.get(e.database);if(!t){let n;t=new Promise(s=>{Ct.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,tc.set(e.database,t)}let r=Aue(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>$h(n,e.database)),e.replicateByDefault)}catch(t){Ct.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function LS({name:e,url:t,database:r}){Ct.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(Kt().primaryStore.getRange({})));let n=YS.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function Iue(){if(oO!==void 0)return oO;let e=Ds.default.get(U.OPERATIONSAPI_TLS_CERTIFICATE)||Ds.default.get(U.TLS_CERTIFICATE);if(e)return oO=new oV.X509Certificate((0,aV.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function Ze(){return WS||(WS=Ds.default.get("replication_hostname")??pi(Ds.default.get("replication_url"))??Iue()??iV("operationsapi_network_secureport")??iV("operationsapi_network_port")??"127.0.0.1")}function Nue(){WS=void 0}function iV(e){let t=Ds.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function KS(e){let t=Ds.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function VS(e){return Gh(e)?.[Ze()]}function ja(){let e=Ds.default.get("replication_url");return e||jS(Ze())}function jS(e){let t=KS("replication_port");if(t)return`ws://${e}:${t}`;if(t=KS("replication_secureport"),t)return`wss://${e}:${t}`;if(t=KS("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=KS("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function pi(e){if(e)return new URL(e).hostname}function za(e,t){for(let n of Object.getOwnPropertyNames(ke))r(n);return Sh(n=>{r(n)}),bl((n,s)=>{r(n.databaseName)});function r(n){let s=ke[n];Ct.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):wue(n)&&t(s,n,!1)}o(r,"forDatabase")}function wue(e){let t=ke[e];for(let r in t)if(t[r].replicate)return!0}function qh(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function Oue(e){let t={message:""};if(e.replicated){e.replicated=!1,Ct.trace?.("Replicating operation",e.operation,"to nodes",Le.nodes.map(n=>n.name));let r=await Promise.allSettled(Le.nodes.map(n=>Jh(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Le.nodes[s]?.name,i})}return t}var Ds,Ct,oV,aV,zS,cV,lV,uV,dV,gue,Sue,qo,ec,YS,sV,oO,WS,is=ue(()=>{De();Pa();Tu();iO();Mr();Ds=w(oe()),Ct=w(Q()),oV=require("crypto"),aV=require("fs");Yh();Ll();k();Mw();zS=w(require("node:tls")),cV=w(ge()),lV=require("worker_threads"),uV=w(as()),gue=1,Sue=[],qo=Ds.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1?new Set(zS.rootCertificates):new Set;o(Tue,"start");o(fV,"monitorNodeCAs");o(Rue,"disableReplication");o(yue,"assignReplicationSource");o(hV,"setReplicator");YS=new Map;o(Aue,"getSubscriptionConnection");sV=new Map;o(bue,"getRetrievalConnectionByName");o(Jh,"sendOperationToNode");o(Kh,"subscribeToNode");o(LS,"unsubscribeFromNode");o(Iue,"getCommonNameFromCert");o(Ze,"getThisNodeName");o(Nue,"clearThisNodeName");Object.defineProperty(Le,"hostname",{get(){return Ze()}});o(iV,"getHostFromListeningPort");o(KS,"getPortFromListeningPort");o(VS,"getThisNodeId");Le.replication={getThisNodeId:VS,exportIdMapping:Gh};o(ja,"getThisNodeUrl");o(jS,"hostnameToUrl");o(pi,"urlToNodeName");o(za,"forEachReplicatedDatabase");o(wue,"hasExplicitlyReplicatedTable");o(qh,"lastTimeInAuditStore");o(Oue,"replicateOperation")});var Zh=M((Tve,SV)=>{"use strict";var Ud=Oq(),{validateBySchema:Xh}=ut(),{commonValidators:xd,schemaRegex:aO}=Hi(),pr=require("joi"),Cue=Q(),Pue=require("uuid").v4,XS=Co(),Bd=(k(),v(W)),Lue=require("util"),rc=Jn(),{handleHDBError:$o,hdbErrors:Due,ClientError:xl}=ge(),{HDB_ERROR_MSGS:QS,HTTP_STATUS_CODES:Vo}=Due,{SchemaEventMsg:ZS}=oi(),pV=mr(),{getDatabases:Mue}=(De(),v(mt)),{transformReq:Fd}=ie(),{replicateOperation:EV}=(is(),v(Go)),{cleanupOrphans:vue}=(Kn(),v(i_)),JS=pr.string().min(1).max(xd.schema_length.maximum).pattern(aO).messages({"string.pattern.base":"{:#label} "+xd.schema_format.message}),Uue=pr.string().min(1).max(xd.schema_length.maximum).pattern(aO).messages({"string.pattern.base":"{:#label} "+xd.schema_format.message}).required(),xue=pr.string().min(1).max(xd.schema_length.maximum).pattern(aO).messages({"string.pattern.base":"{:#label} "+xd.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();SV.exports={createSchema:Bue,createSchemaStructure:_V,createTable:Fue,createTableStructure:gV,createAttribute:$ue,dropSchema:Hue,dropTable:kue,dropAttribute:Gue,getBackup:Vue,cleanupOrphanBlobs:Kue};async function Bue(e){let t=await _V(e);return XS.signalSchemaChange(new ZS(process.pid,e.operation,e.schema)),t}o(Bue,"createSchema");async function _V(e){let t=Xh(e,pr.object({database:JS,schema:JS}));if(t)throw new xl(t.message);if(Fd(e),!await Ud.checkSchemaExists(e.schema))throw $o(new Error,QS.SCHEMA_EXISTS_ERR(e.schema),Vo.BAD_REQUEST,Bd.LOG_LEVELS.ERROR,QS.SCHEMA_EXISTS_ERR(e.schema),!0);return await rc.createSchema(e),`database '${e.schema}' successfully created`}o(_V,"createSchemaStructure");async function Fue(e){return Fd(e),e.hash_attribute=e.primary_key??e.hash_attribute,await gV(e)}o(Fue,"createTable");async function gV(e){let t=Xh(e,pr.object({database:JS,schema:JS,table:Uue,residence:pr.array().items(pr.string().min(1)).optional(),hash_attribute:xue}));if(t)throw new xl(t.message);if(!await Ud.checkSchemaTableExists(e.schema,e.table))throw $o(new Error,QS.TABLE_EXISTS_ERR(e.schema,e.table),Vo.BAD_REQUEST,Bd.LOG_LEVELS.ERROR,QS.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:Pue(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await rc.createTable(n,e);else throw $o(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",Vo.BAD_REQUEST);else await rc.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}o(gV,"createTableStructure");async function Hue(e){let t=Xh(e,pr.object({database:pr.string(),schema:pr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new xl(t.message);Fd(e);let r=await Ud.checkSchemaExists(e.schema);if(r)throw $o(new Error,r,Vo.NOT_FOUND,Bd.LOG_LEVELS.ERROR,r,!0);let n=await Ud.schemaDescribe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await rc.dropSchema(e),XS.signalSchemaChange(new ZS(process.pid,e.operation,e.schema)),await pV.purgeSchemaTableStreams(e.schema,s);let i=await EV(e);return i.message=`successfully deleted '${e.schema}'`,i}o(Hue,"dropSchema");async function kue(e){let t=Xh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required()}));if(t)throw new xl(t.message);Fd(e);let r=await Ud.checkSchemaTableExists(e.schema,e.table);if(r)throw $o(new Error,r,Vo.NOT_FOUND,Bd.LOG_LEVELS.ERROR,r,!0);await rc.dropTable(e),await pV.purgeTableStream(e.schema,e.table);let n=await EV(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}o(kue,"dropTable");async function Gue(e){let t=Xh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required(),attribute:pr.string().required()}));if(t)throw new xl(t.message);Fd(e);let r=await Ud.checkSchemaTableExists(e.schema,e.table);if(r)throw $o(new Error,r,Vo.NOT_FOUND,Bd.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw $o(new Error,"You cannot drop a hash attribute",Vo.BAD_REQUEST,void 0,void 0,!0);if(Bd.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw $o(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,Vo.BAD_REQUEST,void 0,void 0,!0);try{return await rc.dropAttribute(e),que(e),XS.signalSchemaChange(new ZS(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw Cue.error(`Got an error deleting attribute ${Lue.inspect(e)}.`),n}}o(Gue,"dropAttribute");function que(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}o(que,"dropAttributeFromGlobal");async function $ue(e){Fd(e);let t=Mue()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw $o(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,Vo.BAD_REQUEST,void 0,void 0,!0);return await rc.createAttribute(e),XS.signalSchemaChange(new ZS(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}o($ue,"createAttribute");function Vue(e){return rc.getBackup(e)}o(Vue,"getBackup");function Kue(e){if(!e.database)throw new xl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new xl(`Unknown database '${e.database}'`);return vue(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}o(Kue,"cleanupOrphanBlobs")});var RV=M((yve,TV)=>{"use strict";var{OPERATIONS_ENUM:Yue}=(k(),v(W)),cO=class{static{o(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=Yue.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};TV.exports=cO});var lO=M((Ive,NV)=>{"use strict";var Wue=Jn(),bve=RV(),eT=ie(),tT=(k(),v(W)),zue=oe(),{handleHDBError:yV,hdbErrors:jue}=ge(),{HDB_ERROR_MSGS:AV,HTTP_STATUS_CODES:bV}=jue,Que=Object.values(tT.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),IV="To use this operation audit log must be enabled in harperdb-config.yaml";NV.exports=Jue;async function Jue(e){if(eT.isEmpty(e.schema))throw new Error(AV.SCHEMA_REQUIRED_ERR);if(eT.isEmpty(e.table))throw new Error(AV.TABLE_REQUIRED_ERR);if(!zue.get(tT.CONFIG_PARAMS.LOGGING_AUDITLOG))throw yV(new Error,IV,bV.BAD_REQUEST,tT.LOG_LEVELS.ERROR,IV,!0);let t=eT.checkSchemaTableExist(e.schema,e.table);if(t)throw yV(new Error,t,bV.NOT_FOUND,tT.LOG_LEVELS.ERROR,t,!0);if(!eT.isEmpty(e.search_type)&&Que.indexOf(e.search_type)<0)throw new Error(`Invalid searchType '${read_audit_log_object.search_type}'`);return await Wue.readAuditLog(e)}o(Jue,"readAuditLog")});var OV=M((wve,wV)=>{"use strict";var{OPERATIONS_ENUM:Xue}=(k(),v(W)),uO=class{static{o(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=Xue.GET_BACKUP,this.schema=t,this.table=r}};wV.exports=uO});var LV=M((Lve,PV)=>{"use strict";var Zue=Jn(),Cve=OV(),dO=ie(),ede=(k(),v(W)),Pve=oe(),{handleHDBError:tde,hdbErrors:rde}=ge(),{HDB_ERROR_MSGS:CV,HTTP_STATUS_CODES:nde}=rde;PV.exports=sde;async function sde(e){if(dO.isEmpty(e.schema))throw new Error(CV.SCHEMA_REQUIRED_ERR);if(dO.isEmpty(e.table))throw new Error(CV.TABLE_REQUIRED_ERR);let t=dO.checkSchemaTableExist(e.schema,e.table);if(t)throw tde(new Error,t,nde.NOT_FOUND,ede.LOG_LEVELS.ERROR,t,!0);return await Zue.getBackup(readAuditLogObject)}o(sde,"getBackup")});var UV=M((Mve,vV)=>{"use strict";var ide=oe(),nc=require("joi"),ode=ut(),DV=require("moment"),ade=require("fs-extra"),fO=require("path"),cde=require("lodash"),ep=(k(),v(W)),{LOG_LEVELS:Bl}=(k(),v(W)),lde="YYYY-MM-DD hh:mm:ss",ude=fO.resolve(__dirname,"../logs");vV.exports=function(e){return ode.validateBySchema(e,dde)};var dde=nc.object({from:nc.custom(MV),until:nc.custom(MV),level:nc.valid(Bl.NOTIFY,Bl.FATAL,Bl.ERROR,Bl.WARN,Bl.INFO,Bl.DEBUG,Bl.TRACE),order:nc.valid("asc","desc"),limit:nc.number().min(1),start:nc.number().min(0),log_name:nc.custom(fde)});function MV(e,t){if(DV(e,DV.ISO_8601).format(lde)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}o(MV,"validateDatetime");function fde(e,t){if(cde.invert(ep.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=ide.get(ep.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?ep.LOG_NAMES.HDB:e,i=s===ep.LOG_NAMES.INSTALL?fO.join(ude,ep.LOG_NAMES.INSTALL):fO.join(n,s);return ade.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}o(fde,"validateReadLogPath")});var hO=M((Uve,BV)=>{"use strict";var rT=(k(),v(W)),mde=Q(),hde=oe(),pde=UV(),mO=require("path"),xV=require("fs-extra"),{once:Ede}=require("events"),{handleHDBError:_de,hdbErrors:gde}=ge(),{PACKAGE_ROOT:Sde}=Rt(),{replicateOperation:Tde}=(is(),v(Go)),Rde=mO.join(Sde,"logs"),yde=1e3,Ade=200;BV.exports=bde;async function bde(e){let t=pde(e);if(t)throw _de(t,t.message,gde.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=Tde(e),n=hde.get(rT.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e.log_name===void 0?rT.LOG_NAMES.HDB:e.log_name,i=s===rT.LOG_NAMES.INSTALL?mO.join(Rde,rT.LOG_NAMES.INSTALL):mO.join(n,s),a=e.level!==void 0,c=a?e.level:void 0,l=e.from!==void 0,u=l?new Date(e.from):void 0,d=e.until!==void 0,f=d?new Date(e.until):void 0,m=e.limit===void 0?yde:e.limit,h=e.order===void 0?void 0:e.order,p=e.start===void 0?0:e.start,_=p+m,g=0;h==="desc"&&!u&&!f&&(g=Math.max(xV.statSync(i).size-(_+5)*Ade,0));let y=xV.createReadStream(i,{start:g});y.on("error",G=>{mde.error(G)});let T=0,R=[],N="",O;y.on("data",G=>{let Y=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;G=N+G;let q=0,K;for(;(K=Y.exec(G))&&!y.destroyed;){O&&(O.message=G.slice(q,K.index),F(O));let[ce,le,se]=K,pe=se.split("] ["),Ne=pe[0],Ue=pe[1];pe.splice(0,2),O={timestamp:le,thread:Ne,level:Ue,tags:pe,message:""},q=K.index+ce.length}N=G.slice(q)}),y.on("end",G=>{y.destroyed||O&&(O.message=N.trim(),F(O))}),y.resume();function F(G){let Y,q,K;switch(!0){case(a&&l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),G.level===c&&Y>=q&&Y<=K&&T<p?T++:G.level===c&&Y>=q&&Y<=K&&(ro(G,h,R),T++,T===_&&y.destroy());break;case(a&&l):Y=new Date(G.timestamp),q=new Date(u),G.level===c&&Y>=q&&T<p?T++:G.level===c&&Y>=q&&(ro(G,h,R),T++,T===_&&y.destroy());break;case(a&&d):Y=new Date(G.timestamp),K=new Date(f),G.level===c&&Y<=K&&T<p?T++:G.level===c&&Y<=K&&(ro(G,h,R),T++,T===_&&y.destroy());break;case(l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),Y>=q&&Y<=K&&T<p?T++:Y>=q&&Y<=K&&(ro(G,h,R),T++,T===_&&y.destroy());break;case a:G.level===c&&T<p?T++:G.level===c&&(ro(G,h,R),T++,T===_&&y.destroy());break;case l:Y=new Date(G.timestamp),q=new Date(u),Y>=q&&T<p?T++:Y>=q&&T>=p&&(ro(G,h,R),T++,T===_&&y.destroy());break;case d:Y=new Date(G.timestamp),K=new Date(f),Y<=K&&T<p?T++:Y<=K&&T>=p&&(ro(G,h,R),T++,T===_&&y.destroy());break;default:T<p?T++:(ro(G,h,R),T++,T===_&&y.destroy())}}o(F,"onLogMessage"),await Ede(y,"close");let Z=await r;if(Z.replicated){for(let G of R)G.node=server.hostname;for(let G of Z.replicated){let Y=G.node;if(G.status==="failed")ro({timestamp:new Date().toISOString(),level:"error",node:Y,message:`Error retrieving logs: ${G.reason}`},h,R);else for(let q of G.results)q.node=Y,ro(q,h,R)}}return R}o(bde,"readLog");function ro(e,t,r){t==="desc"?Ide(e,r):t==="asc"?Nde(e,r):r.push(e)}o(ro,"pushLineToResult");function Ide(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}o(Ide,"insertDescending");function Nde(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}o(Nde,"insertAscending")});var nT=M((Gve,GV)=>{"use strict";var pO=require("joi"),{string:Hd,boolean:FV,date:wde}=pO.types(),Ode=ut(),{validateSchemaExists:Bve,validateTableExists:Fve,validateSchemaName:Hve}=Hi(),Cde=(k(),v(W)),Pde=It(),HV=oe();HV.initSync();var kve=Hd.invalid(HV.get(Cde.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(Pde.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),kV={operation:Hd.valid("add_node","update_node","set_node_replication"),node_name:Hd.optional(),subscriptions:pO.array().items({table:Hd.optional(),schema:Hd.optional(),database:Hd.optional(),subscribe:FV.required(),publish:FV.required().custom(Dde),start_time:wde.iso()})};function Lde(e){return Ode.validateBySchema(e,pO.object(kV))}o(Lde,"addUpdateNodeValidator");function Dde(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}o(Dde,"checkForFalsy");GV.exports={addUpdateNodeValidator:Lde,validationSchema:kV}});var kd=M(($ve,qV)=>{"use strict";var EO=class{static{o(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},_O=class{static{o(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};qV.exports={Node:EO,NodeSubscription:_O}});var VV=M((Kve,$V)=>{"use strict";var Mde=(k(),v(W)).OPERATIONS_ENUM,gO=class{static{o(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=Mde.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};$V.exports=gO});var tp=M((Wve,KV)=>{"use strict";var SO=class{static{o(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},TO=class{static{o(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,a,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=a,c!==void 0&&(this.attributes=c)}};KV.exports={RemotePayloadObject:SO,RemotePayloadSubscription:TO}});var WV=M((jve,YV)=>{"use strict";var RO=class{static{o(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,a=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=a}};YV.exports=RO});var jV=M((tUe,zV)=>{"use strict";var vde=WV(),Jve=qt(),Xve=_t(),Ude=Q(),{getSchemaPath:Zve,getTransactionAuditStorePath:eUe}=yt(),{getDatabases:xde}=(De(),v(mt));zV.exports=Bde;async function Bde(e){let t=new vde;try{let r=xde()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){Ude.warn(`unable to stat table dbi due to ${r}`)}return t}o(Bde,"lmdbGetTableSize")});var JV=M((nUe,QV)=>{"use strict";var yO=class{static{o(this,"SystemInformationObject")}constructor(t,r,n,s,i,a,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=a,this.harperdb_processes=c}};QV.exports=yO});var qd=M((lUe,t1)=>{"use strict";var Fde=require("fs-extra"),Hde=require("path"),hn=require("systeminformation"),sc=Q(),XV=mr(),iUe=It(),Gd=(k(),v(W)),kde=jV(),Gde=Ua(),{getThreadInfo:ZV}=nt(),rp=oe();rp.initSync();var qde=JV(),{openEnvironment:oUe}=_t(),{getSchemaPath:aUe}=yt(),{database:cUe,databases:AO}=(De(),v(mt)),sT;t1.exports={getHDBProcessInfo:wO,getNetworkInfo:CO,getDiskInfo:OO,getMemoryInfo:NO,getCPUInfo:IO,getTimeInfo:bO,getSystemInformation:PO,systemInformation:$de,getTableSize:LO,getMetrics:DO};function bO(){return hn.time()}o(bO,"getTimeInfo");async function IO(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:a,governor:c,socket:l,cache:u,...d}=await hn.cpu();d.cpu_speed=await hn.cpuCurrentSpeed();let{rawCurrentload:f,rawCurrentloadIdle:m,rawCurrentloadIrq:h,rawCurrentloadNice:p,rawCurrentloadSystem:_,rawCurrentloadUser:g,cpus:y,...T}=await hn.currentLoad();return T.cpus=[],y.forEach(R=>{let{rawLoad:N,rawLoadIdle:O,rawLoadIrq:F,rawLoadNice:Z,rawLoadSystem:G,rawLoadUser:Y,...q}=R;T.cpus.push(q)}),d.current_load=T,d}catch(e){return sc.error(`error in getCPUInfo: ${e}`),{}}}o(IO,"getCPUInfo");async function NO(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await hn.mem();return Object.assign(s,process.memoryUsage())}catch(e){return sc.error(`error in getMemoryInfo: ${e}`),{}}}o(NO,"getMemoryInfo");async function wO(){let e={core:[],clustering:[]};try{let t=await hn.processes(),r;try{r=Number.parseInt(await Fde.readFile(Hde.join(rp.get(Gd.CONFIG_PARAMS.ROOTPATH),Gd.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===Gd.NODE_ERROR_CODES.ENOENT)sc.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return sc.error(`error in getHDBProcessInfo: ${t}`),e}}o(wO,"getHDBProcessInfo");async function OO(){let e={};try{if(!rp.get(Gd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await hn.disksIO();e.io=i;let{rxSec:a,txSec:c,wxSec:l,...u}=await hn.fsStats();return e.read_write=u,e.size=await hn.fsSize(),e}catch(t){return sc.error(`error in getDiskInfo: ${t}`),e}}o(OO,"getDiskInfo");async function CO(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return rp.get(Gd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await hn.networkInterfaceDefault(),e.latency=await hn.inetChecksite("google.com"),(await hn.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:a,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:d,carrierChanges:f,...m}=n;e.interfaces.push(m)}),(await hn.networkStats()).forEach(n=>{let{rxSec:s,txSec:i,ms:a,...c}=n;e.stats.push(c)})),e}catch(t){return sc.error(`error in getNetworkInfo: ${t}`),e}}o(CO,"getNetworkInfo");async function PO(){if(sT!==void 0)return sT;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:a,...c}=await hn.osInfo();e=c;let l=await hn.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,sT=e,sT}catch(t){return sc.error(`error in getSystemInformation: ${t}`),e}}o(PO,"getSystemInformation");async function LO(){let e=[],t=await Gde.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await kde(n));return e}o(LO,"getTableSize");async function DO(){let e={};for(let t in AO){let r=e[t]={},n=r.tables={};for(let s in AO[t])try{let i=AO[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,d,f]=l.trim().split(" ");return{pid:u,thread:d,txnid:f}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}}let a=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=a[l];n[s]=c}catch(i){sc.notify(`Error getting stats for table ${s}: ${i}`)}}return e}o(DO,"getMetrics");async function e1(){if(rp.get(Gd.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await XV.getNATSReferences(),t=await XV.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let a={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(a)}return r}}o(e1,"getNatsStreamInfo");async function $de(e){let t=new qde;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await PO(),t.time=bO(),t.cpu=await IO(),t.memory=await NO(),t.disk=await OO(),t.network=await CO(),t.harperdb_processes=await wO(),t.table_size=await LO(),t.metrics=await DO(),t.threads=await ZV(),t.replication=await e1(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await PO();break;case"time":t.time=bO();break;case"cpu":t.cpu=await IO();break;case"memory":t.memory=await NO();break;case"disk":t.disk=await OO();break;case"network":t.network=await CO();break;case"harperdb_processes":t.harperdb_processes=await wO();break;case"table_size":t.table_size=await LO();break;case"database_metrics":case"metrics":t.metrics=await DO();break;case"threads":t.threads=await ZV();break;case"replication":t.replication=await e1();break;default:break}return t}o($de,"systemInformation")});var Ko=M((hUe,i1)=>{"use strict";var Vde=Mn(),MO=ie(),Kde=require("util"),Fl=(k(),v(W)),r1=oe();r1.initSync();var Yde=fw(),n1=on(),{Node:dUe,NodeSubscription:fUe}=kd(),Wde=Vu(),zde=VV(),{RemotePayloadObject:jde,RemotePayloadSubscription:Qde}=tp(),{handleHDBError:Jde,hdbErrors:Xde}=ge(),{HTTP_STATUS_CODES:Zde,HDB_ERROR_MSGS:efe}=Xde,tfe=ai(),rfe=qd(),{packageJson:nfe}=Rt(),{getDatabases:sfe}=(De(),v(mt)),mUe=Kde.promisify(Yde.authorize),ife=n1.searchByHash,ofe=n1.searchByValue;i1.exports={isEmpty:afe,getNodeRecord:cfe,upsertNodeRecord:lfe,buildNodePayloads:ufe,checkClusteringEnabled:dfe,getAllNodeRecords:ffe,getSystemInfo:mfe,reverseSubscription:s1};function afe(e){return e==null}o(afe,"isEmpty");async function cfe(e){let t=new Wde(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return ife(t)}o(cfe,"getNodeRecord");async function lfe(e){let t=new zde(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return Vde.upsert(t)}o(lfe,"upsertNodeRecord");function s1(e){if(MO.isEmpty(e.subscribe)||MO.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}o(s1,"reverseSubscription");function ufe(e,t,r,n){let s=[];for(let i=0,a=e.length;i<a;i++){let c=e[i],{schema:l,table:u}=c,d=MO.getTableHashAttribute(l,u),{subscribe:f,publish:m}=s1(c),h=sfe()[l]?.[u],p=new Qde(l,u,d,m,f,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(p)}return new jde(r,t,s,n)}o(ufe,"buildNodePayloads");function dfe(){if(!r1.get(Fl.CONFIG_PARAMS.CLUSTERING_ENABLED))throw Jde(new Error,efe.CLUSTERING_NOT_ENABLED,Zde.BAD_REQUEST,void 0,void 0,!0)}o(dfe,"checkClusteringEnabled");async function ffe(){let e=new tfe(Fl.SYSTEM_SCHEMA_NAME,Fl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await ofe(e))}o(ffe,"getAllNodeRecords");async function mfe(){let e=await rfe.getSystemInformation();return{hdb_version:nfe.version,node_version:e.node_version,platform:e.platform}}o(mfe,"getSystemInfo")});var vO=M((EUe,m1)=>{"use strict";var iT=mr(),o1=ie(),a1=It(),c1=(k(),v(W)),oT=Q(),l1=Zh(),hfe=km(),{RemotePayloadObject:pfe}=tp(),{handleHDBError:u1,hdbErrors:Efe}=ge(),{HTTP_STATUS_CODES:d1}=Efe,{NodeSubscription:f1}=kd();m1.exports=_fe;async function _fe(e,t){let r;try{r=await iT.request(`${t}.${a1.REQUEST_SUFFIX}`,new pfe(c1.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),oT.trace("Response from remote describe all request:",r)}catch(a){oT.error(`addNode received error from describe all request to remote node: ${a}`);let c=iT.requestErrorHandler(a,"add_node",t);throw u1(new Error,c,d1.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===a1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let a=`Error returned from remote node ${t}: ${r.message}`;throw u1(new Error,a,d1.INTERNAL_SERVER_ERROR,"error",a)}let n=r.message,s=[],i=[];for(let a of e){let{table:c}=a,l=a.database??a.schema??"data";if(l===c1.SYSTEM_SCHEMA_NAME){await iT.createLocalTableStream(l,c);let p=new f1(l,c,a.publish,a.subscribe);p.start_time=a.start_time,i.push(p);continue}let u=o1.doesSchemaExist(l),d=n[l]!==void 0,f=c?o1.doesTableExist(l,c):!0,m=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!d||!f&&!m){s.push(a);continue}if(!u&&d&&(oT.trace(`addNode creating schema: ${l}`),await l1.createSchema({operation:"create_schema",schema:l})),!f&&m){oT.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let p=new hfe(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(p.attributes=n[l][c].attributes),await l1.createTable(p)}await iT.createLocalTableStream(l,c);let h=new f1(l,c,a.publish,a.subscribe);h.start_time=a.start_time,i.push(h)}return{added:i,skipped:s}}o(_fe,"reviewSubscriptions")});var $d={};Oe($d,{addNodeBack:()=>UO,removeNodeBack:()=>xO,setNode:()=>Rfe});async function Rfe(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=pi(t)):t=jS(r);let n=(0,p1.validateBySchema)(e,Tfe);if(n)throw(0,Yo.handleHDBError)(n,n.message,Sfe.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new Yo.ClientError("url or hostname is required for remove_node operation");let h=r,p=Kt(),_=await p.get(h);if(!_)throw new Yo.ClientError(h+" does not exist");try{await Jh({url:_.url},{operation:$.REMOVE_NODE_BACK,name:_?.subscriptions?.length>0?Ze():h},void 0)}catch(g){cs.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await p.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new Yo.ClientError("url required for this operation");let s=ja();if(s==null)throw new Yo.ClientError("replication url is missing from harperdb-config.yaml");let i,a,c;if(t?.startsWith("wss:")){i=await(0,Ms.getReplicationCert)();let h=await(0,Ms.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(a=await(0,Ms.createCsr)(),cs.info("Sending CSR to target node:",t)):h&&(c=h.certificate,cs.info("Sending CA named",h.name,"to target node",t))}let l={operation:$.ADD_NODE_BACK,hostname:(0,oc.get)(U.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:a,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,oc.get)(U.REPLICATION_SHARD)!==void 0&&(l.shard=(0,oc.get)(U.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(h1):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=h1(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,d;try{u=await Jh({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,cs.warn("Error adding node:",t,"to cluster:",h),d=h}if(a&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw d?(d.message+=" and connection was required to sign certificate",d):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);a&&(cs.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,Ms.setCertTable)({name:gfe.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,Ms.setCertTable)({name:Ze(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let f={url:t,ca:u?.usingCA};if(e.hostname&&(f.name=e.hostname),e.subscriptions?f.subscriptions=e.subscriptions:f.replicates=!0,e.start_time&&(f.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(f.authorization=e.authorization),e.revoked_certificates&&(f.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?f.shard=u.shard:e.shard!==void 0&&(f.shard=e.shard),f.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,oc.get)(U.REPLICATION_SHARD)!==void 0&&(h.shard=(0,oc.get)(U.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await Ho(Ze(),h)}await Ho(u?u.nodeName:f.name??pi(t),f);let m;return e.operation==="update_node"?m=`Successfully updated '${t}'`:m=`Successfully added '${t}' to cluster`,d&&(m+=" but there was an error updating target node: "+d.message),m}async function UO(e){cs.trace("addNodeBack received request:",e);let t=await(0,Ms.signCertificate)(e),r;e.csr?(r=t.signingCA,cs.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,cs.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,Ms.getReplicationCertAuth)();if(n.replicates){let i={url:ja(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,oc.get)(U.REPLICATION_SHARD)!==void 0&&(i.shard=(0,oc.get)(U.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await Ho(Ze(),i)}return await Ho(e.hostname,n),t.nodeName=Ze(),t.usingCA=s?.certificate,cs.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function xO(e){cs.trace("removeNodeBack received request:",e),await Kt().delete(e.name)}function h1(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var Ms,p1,ic,oc,cs,Yo,gfe,Sfe,Tfe,Vd=ue(()=>{Ms=w(as()),p1=w(ut()),ic=w(require("joi")),oc=w(oe());k();Yh();Ll();is();cs=w(Q()),Yo=w(ge()),{pki:gfe}=require("node-forge"),{HTTP_STATUS_CODES:Sfe}=Yo.hdbErrors,Tfe=ic.default.object({hostname:ic.default.string(),verify_tls:ic.default.boolean(),replicates:ic.default.boolean(),subscriptions:ic.default.array(),revoked_certificates:ic.default.array(),shard:ic.default.number()});o(Rfe,"setNode");o(UO,"addNodeBack");o(xO,"removeNodeBack");o(h1,"reverseSubscription")});var dT=M((IUe,_1)=>{"use strict";var{handleHDBError:aT,hdbErrors:yfe}=ge(),{HTTP_STATUS_CODES:cT}=yfe,{addUpdateNodeValidator:Afe}=nT(),lT=Q(),uT=(k(),v(W)),E1=It(),bfe=ie(),np=mr(),sp=Ko(),BO=oe(),Ife=vO(),{Node:Nfe,NodeSubscription:wfe}=kd(),{broadcast:Ofe}=nt(),{setNode:Cfe}=(Vd(),v($d)),AUe=oe(),bUe=(k(),v(W)),Pfe="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",Lfe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Dfe=BO.get(uT.CONFIG_PARAMS.CLUSTERING_NODENAME);_1.exports=Mfe;async function Mfe(e,t=!1){if(lT.trace("addNode called with:",e),BO.get(uT.CONFIG_PARAMS.REPLICATION_URL)||BO.get(uT.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Cfe(e);sp.checkClusteringEnabled();let r=Afe(e);if(r)throw aT(r,r.message,cT.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let f=await sp.getNodeRecord(n);if(!bfe.isEmptyOrZeroLength(f))throw aT(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,cT.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await Ife(e.subscriptions,n),a={message:void 0,added:s,skipped:i};if(s.length===0)return a.message=Pfe,a;let c=sp.buildNodePayloads(s,Dfe,uT.OPERATIONS_ENUM.ADD_NODE,await sp.getSystemInfo()),l=[];for(let f=0,m=s.length;f<m;f++){let h=s[f];s[f].start_time===void 0&&delete s[f].start_time,l.push(new wfe(h.schema,h.table,h.publish,h.subscribe))}lT.trace("addNode sending remote payload:",c);let u;try{u=await np.request(`${n}.${E1.REQUEST_SUFFIX}`,c)}catch(f){lT.error(`addNode received error from request: ${f}`);for(let h=0,p=s.length;h<p;h++){let _=s[h];_.publish=!1,_.subscribe=!1,await np.updateRemoteConsumer(_,n)}let m=np.requestErrorHandler(f,"add_node",n);throw aT(new Error,m,cT.INTERNAL_SERVER_ERROR,"error",m)}if(u.status===E1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${n}: ${u.message}`;throw aT(new Error,f,cT.INTERNAL_SERVER_ERROR,"error",f)}lT.trace(u);for(let f=0,m=s.length;f<m;f++){let h=s[f];await np.updateRemoteConsumer(h,n),h.subscribe===!0&&await np.updateConsumerIterator(h.schema,h.table,n,"start")}let d=new Nfe(n,l,u.system_info);return await sp.upsertNodeRecord(d),Ofe({type:"nats_update"}),i.length>0?a.message=Lfe:a.message=`Successfully added '${n}' to manifest`,a}o(Mfe,"addNode")});var GO=M((OUe,S1)=>{"use strict";var{handleHDBError:FO,hdbErrors:vfe}=ge(),{HTTP_STATUS_CODES:HO}=vfe,{addUpdateNodeValidator:Ufe}=nT(),ip=Q(),fT=(k(),v(W)),g1=It(),wUe=ie(),op=mr(),ap=Ko(),kO=oe(),{cloneDeep:xfe}=require("lodash"),Bfe=vO(),{Node:Ffe,NodeSubscription:Hfe}=kd(),{broadcast:kfe}=nt(),{setNode:Gfe}=(Vd(),v($d)),qfe="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",$fe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Vfe=kO.get(fT.CONFIG_PARAMS.CLUSTERING_NODENAME);S1.exports=Kfe;async function Kfe(e){if(ip.trace("updateNode called with:",e),kO.get(fT.CONFIG_PARAMS.REPLICATION_URL)??kO.get(fT.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Gfe(e);ap.checkClusteringEnabled();let t=Ufe(e);if(t)throw FO(t,t.message,HO.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await ap.getNodeRecord(r);s.length>0&&(n=xfe(s));let{added:i,skipped:a}=await Bfe(e.subscriptions,r),c={message:void 0,updated:i,skipped:a};if(i.length===0)return c.message=qfe,c;let l=ap.buildNodePayloads(i,Vfe,fT.OPERATIONS_ENUM.UPDATE_NODE,await ap.getSystemInfo());for(let d=0,f=i.length;d<f;d++){let m=i[d];ip.trace(`updateNode updating work stream for node: ${r} subscription:`,m),i[d].start_time===void 0&&delete i[d].start_time}ip.trace("updateNode sending remote payload:",l);let u;try{u=await op.request(`${r}.${g1.REQUEST_SUFFIX}`,l)}catch(d){ip.error(`updateNode received error from request: ${d}`);let f=op.requestErrorHandler(d,"update_node",r);throw FO(new Error,f,HO.INTERNAL_SERVER_ERROR,"error",f)}if(u.status===g1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${r}: ${u.message}`;throw FO(new Error,d,HO.INTERNAL_SERVER_ERROR,"error",d)}ip.trace(u);for(let d=0,f=i.length;d<f;d++){let m=i[d];await op.updateRemoteConsumer(m,r),m.subscribe===!0?await op.updateConsumerIterator(m.schema,m.table,r,"start"):await op.updateConsumerIterator(m.schema,m.table,r,"stop")}return n||(n=[new Ffe(r,[],u.system_info)]),await Yfe(n[0],i,u.system_info),a.length>0?c.message=$fe:c.message=`Successfully updated '${r}'`,c}o(Kfe,"updateNode");async function Yfe(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let a=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let d=n.subscriptions[l];if(d.schema===a.schema&&d.table===a.table){d.publish=a.publish,d.subscribe=a.subscribe,c=!0;break}}c||n.subscriptions.push(new Hfe(a.schema,a.table,a.publish,a.subscribe))}n.system_info=r,await ap.upsertNodeRecord(n),kfe({type:"nats_update"})}o(Yfe,"updateNodeTable")});var b1=M((PUe,A1)=>{"use strict";var y1=require("joi"),{string:T1}=y1.types(),Wfe=ut(),R1=(k(),v(W)),zfe=oe(),jfe=It();A1.exports=Qfe;function Qfe(e){let t=T1.invalid(zfe.get(R1.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(jfe.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=y1.object({operation:T1.valid(R1.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return Wfe.validateBySchema(e,r)}o(Qfe,"removeNodeValidator")});var mT=M((DUe,C1)=>{"use strict";var{handleHDBError:I1,hdbErrors:Jfe}=ge(),{HTTP_STATUS_CODES:N1}=Jfe,Xfe=b1(),cp=Q(),w1=Ko(),Zfe=ie(),Kd=(k(),v(W)),O1=It(),qO=mr(),$O=oe(),{RemotePayloadObject:eme}=tp(),{NodeSubscription:tme}=kd(),rme=Hm(),nme=pl(),{broadcast:sme}=nt(),{setNode:ime}=(Vd(),v($d)),ome=$O.get(Kd.CONFIG_PARAMS.CLUSTERING_NODENAME);C1.exports=ame;async function ame(e){if(cp.trace("removeNode called with:",e),$O.get(Kd.CONFIG_PARAMS.REPLICATION_URL)??$O.get(Kd.CONFIG_PARAMS.REPLICATION_HOSTNAME))return ime(e);w1.checkClusteringEnabled();let t=Xfe(e);if(t)throw I1(t,t.message,N1.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await w1.getNodeRecord(r);if(Zfe.isEmptyOrZeroLength(n))throw I1(new Error,`Node '${r}' was not found.`,N1.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new eme(Kd.OPERATIONS_ENUM.REMOVE_NODE,ome,[]),i,a=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let d=n.subscriptions[l];d.subscribe===!0&&await qO.updateConsumerIterator(d.schema,d.table,r,"stop");try{await qO.updateRemoteConsumer(new tme(d.schema,d.table,!1,!1),r)}catch(f){cp.error(f)}}try{i=await qO.request(`${r}.${O1.REQUEST_SUFFIX}`,s),cp.trace("Remove node reply from remote node:",r,i)}catch(l){cp.error("removeNode received error from request:",l),a=!0}let c=new rme(Kd.SYSTEM_SCHEMA_NAME,Kd.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await nme.deleteRecord(c),sme({type:"nats_update"}),i?.status===O1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||a?(cp.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}o(ame,"removeNode")});var D1=M((vUe,L1)=>{"use strict";var P1=require("joi"),{string:cme,array:lme}=P1.types(),ume=ut(),dme=nT();L1.exports=fme;function fme(e){let t=P1.object({operation:cme.valid("configure_cluster").required(),connections:lme.items(dme.validationSchema).required()});return ume.validateBySchema(e,t)}o(fme,"configureClusterValidator")});var VO=M((xUe,B1)=>{"use strict";var M1=(k(),v(W)),hT=Q(),mme=ie(),hme=oe(),pme=mT(),Eme=dT(),_me=Ko(),gme=D1(),{handleHDBError:v1,hdbErrors:Sme}=ge(),{HTTP_STATUS_CODES:U1}=Sme,Tme="Configure cluster complete.",Rme="Failed to configure the cluster. Check the logs for more details.",yme="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";B1.exports=Ame;async function Ame(e){hT.trace("configure cluster called with:",e);let t=gme(e);if(t)throw v1(t,t.message,U1.BAD_REQUEST,void 0,void 0,!0);let r=await _me.getAllNodeRecords(),n=[];if(hme.get(M1.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let d=0,f=r.length;d<f;d++){let m=await x1(pme,{operation:M1.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[d].name},r[d].name);n.push(m)}hT.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let d=0;d<i;d++){let f=e.connections[d],m=await x1(Eme,f,f.node_name);s.push(m)}hT.trace("All results from configure_cluster add node:",s);let a=[],c=[],l=!1,u=n.concat(s);for(let d=0,f=u.length;d<f;d++){let m=u[d];m.status==="rejected"&&(hT.error(m.node_name,m?.error?.message,m?.error?.stack),a.includes(m.node_name)||a.push(m.node_name)),(m?.result?.message?.includes?.("Successfully")||m?.result?.includes?.("Successfully"))&&(l=!0),!(typeof m.result=="string"&&m.result.includes("Successfully removed")||m.status==="rejected")&&c.push({node_name:m?.node_name,response:m?.result})}if(mme.isEmptyOrZeroLength(a))return{message:Tme,connections:c};if(l)return{message:yme,failed_nodes:a,connections:c};throw v1(new Error,Rme,U1.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}o(Ame,"configureCluster");async function x1(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}o(x1,"functionWrapper")});var G1=M((FUe,k1)=>{"use strict";var lp=require("joi"),bme=ut(),{validateSchemaExists:F1,validateTableExists:Ime,validateSchemaName:H1}=Hi(),Nme=lp.object({operation:lp.string().valid("purge_stream"),schema:lp.string().custom(F1).custom(H1).optional(),database:lp.string().custom(F1).custom(H1).optional(),table:lp.string().custom(Ime).required()});function wme(e){return bme.validateBySchema(e,Nme)}o(wme,"purgeStreamValidator");k1.exports=wme});var KO=M((kUe,q1)=>{"use strict";var{handleHDBError:Ome,hdbErrors:Cme}=ge(),{HTTP_STATUS_CODES:Pme}=Cme,Lme=G1(),Dme=mr(),Mme=Ko();q1.exports=vme;async function vme(e){e.schema=e.schema??e.database;let t=Lme(e);if(t)throw Ome(t,t.message,Pme.BAD_REQUEST,void 0,void 0,!0);Mme.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await Dme.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}o(vme,"purgeStream")});var zO=M((qUe,j1)=>{"use strict";var WO=Ko(),Ume=mr(),ET=oe(),Yd=(k(),v(W)),Hl=It(),xme=ie(),YO=Q(),{RemotePayloadObject:Bme}=tp(),{ErrorCode:$1}=require("nats"),{parentPort:V1}=require("worker_threads"),{onMessageByType:Fme}=nt(),{getThisNodeName:Hme}=(is(),v(Go)),{requestClusterStatus:kme}=(Yh(),v(m$)),{getReplicationSharedStatus:Gme,getHDBNodeTable:qme}=(Ll(),v(vw)),{CONFIRMATION_STATUS_POSITION:$me,RECEIVED_VERSION_POSITION:Vme,RECEIVED_TIME_POSITION:Kme,SENDING_TIME_POSITION:Yme,RECEIVING_STATUS_POSITION:Wme,RECEIVING_STATUS_RECEIVING:zme}=(iO(),v(nV)),K1=ET.get(Yd.CONFIG_PARAMS.CLUSTERING_ENABLED),Y1=ET.get(Yd.CONFIG_PARAMS.CLUSTERING_NODENAME);j1.exports={clusterStatus:jme,buildNodeStatus:z1};var W1;Fme("cluster-status",async e=>{W1(e)});async function jme(){if(ET.get(Yd.CONFIG_PARAMS.REPLICATION_URL)||ET.get(Yd.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(V1){V1.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{W1=i});for(let i of n.connections){let a=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let f of Object.values(databases[l]||{}))if(u=f.auditStore,u)break;if(!u)continue;let d=Gme(u,l,a);c.lastCommitConfirmed=pT(d[$me]),c.lastReceivedRemoteTime=pT(d[Vme]),c.lastReceivedLocalTime=pT(d[Kme]),c.sendingMessage=pT(d[Yme]),c.lastReceivedStatus=d[Wme]===zme?"Receiving":"Waiting"}}}else n=kme();n.node_name=Hme();let s=qme().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:Y1,is_enabled:K1,connections:[]};if(!K1)return e;let t=await WO.getAllNodeRecords();if(xme.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push(z1(t[n],e.connections));return await Promise.allSettled(r),e}o(jme,"clusterStatus");function pT(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}o(pT,"asDate");async function z1(e,t){let r=e.name,n=new Bme(Yd.OPERATIONS_ENUM.CLUSTER_STATUS,Y1,void 0,await WO.getSystemInfo()),s,i,a=Hl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await Ume.request(Hl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===Hl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(a=Hl.CLUSTER_STATUS_STATUSES.CLOSED,YO.error(`Error getting node status from ${r} `,s))}catch(l){YO.warn(`Error getting node status from ${r}`,l),l.code===$1.NoResponders?a=Hl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===$1.Timeout?a=Hl.CLUSTER_STATUS_STATUSES.TIMEOUT:a=Hl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new Qme(r,a,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==Yd.PRE_4_0_0_VERSION&&await WO.upsertNodeRecord(l)}catch(l){YO.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}o(z1,"buildNodeStatus");function Qme(e,t,r,n,s,i,a,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=a,this.system_info=c}o(Qme,"NodeStatusObject")});var QO=M((VUe,Q1)=>{"use strict";var{handleHDBError:Jme,hdbErrors:Xme}=ge(),{HTTP_STATUS_CODES:Zme}=Xme,ehe=mr(),the=Ko(),jO=ie(),_T=require("joi"),rhe=ut(),nhe=2e3,she=_T.object({timeout:_T.number().min(1),connected_nodes:_T.boolean(),routes:_T.boolean()});Q1.exports=ihe;async function ihe(e){the.checkClusteringEnabled();let t=rhe.validateBySchema(e,she);if(t)throw Jme(t,t.message,Zme.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||jO.autoCastBoolean(n),a=s===void 0||jO.autoCastBoolean(s),c={nodes:[]},l=await ehe.getServerList(r??nhe),u={};if(i)for(let d=0,f=l.length;d<f;d++){let m=l[d].statsz;m&&(u[l[d].server.name]=m.routes)}for(let d=0,f=l.length;d<f;d++){if(l[d].statsz)continue;let m=l[d].server,h=l[d].data;if(m.name.endsWith("-hub")){let p={name:m.name.slice(0,-4),response_time:l[d].response_time};i&&(p.connected_nodes=[],u[m.name]&&u[m.name].forEach(_=>{p.connected_nodes.includes(_.name.slice(0,-4))||p.connected_nodes.push(_.name.slice(0,-4))})),a&&(p.routes=h.cluster?.urls?h.cluster?.urls.map(_=>({host:_.split(":")[0],port:jO.autoCast(_.split(":")[1])})):[]),c.nodes.push(p)}}return c}o(ihe,"clusterNetwork")});var eK=M((YUe,Z1)=>{"use strict";var JO=require("joi"),J1=ut(),{routeConstraints:X1}=kI();Z1.exports={setRoutesValidator:ohe,deleteRoutesValidator:ahe};function ohe(e){let t=JO.object({server:JO.valid("hub","leaf"),routes:X1.required()});return J1.validateBySchema(e,t)}o(ohe,"setRoutesValidator");function ahe(e){let t=JO.object({routes:X1.required()});return J1.validateBySchema(e,t)}o(ahe,"deleteRoutesValidator")});var gT=M((zUe,aK)=>{"use strict";var Wo=bt(),XO=ie(),vs=(k(),v(W)),Wd=oe(),tK=eK(),{handleHDBError:rK,hdbErrors:che}=ge(),{HTTP_STATUS_CODES:nK}=che,sK="cluster routes successfully set",iK="cluster routes successfully deleted";aK.exports={setRoutes:uhe,getRoutes:dhe,deleteRoutes:fhe};function lhe(e){let t=Wo.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let a=0,c=e.routes.length;a<c;a++){let l=e.routes[a];l.port=XO.autoCast(l.port);let u=r.some(f=>f.host===l.host&&f.port===l.port),d=n.some(f=>f.host===l.host&&f.port===l.port);u||d?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?Wo.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):Wo.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:sK,set:i,skipped:s}}o(lhe,"setRoutesNats");function uhe(e){let t=tK.setRoutesValidator(e);if(t)throw rK(t,t.message,nK.BAD_REQUEST,void 0,void 0,!0);if(Wd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED))return lhe(e);let r=[],n=[],s=Wd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{oK(s,i)?n.push(i):(s.push(i),r.push(i))}),Wo.updateConfigValue(vs.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:sK,set:r,skipped:n}}o(uhe,"setRoutes");function oK(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}o(oK,"existsInArray");function dhe(){if(Wd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=Wo.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return Wd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}o(dhe,"getRoutes");function fhe(e){let t=tK.deleteRoutesValidator(e);if(t)throw rK(t,t.message,nK.BAD_REQUEST,void 0,void 0,!0);if(Wd.get(vs.CONFIG_PARAMS.CLUSTERING_ENABLED))return mhe(e);let r=[],n=[],s=Wd.get(vs.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(a=>{oK(e.routes,a)?r.push(a):(i.push(a),n.push(a))}),Wo.updateConfigValue(vs.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:iK,deleted:r,skipped:n}}o(fhe,"deleteRoutes");function mhe(e){let t=Wo.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],a=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let d=e.routes[l],f=!1;for(let m=0,h=r.length;m<h;m++){let p=r[m];if(d.host===p.host&&d.port===p.port){r.splice(m,1),f=!0,a=!0,s.push(d);break}}if(!f){let m=!0;for(let h=0,p=n.length;h<p;h++){let _=n[h];if(d.host===_.host&&d.port===_.port){n.splice(h,1),c=!0,m=!1,s.push(d);break}}m&&i.push(d)}}return a&&(r=XO.isEmptyOrZeroLength(r)?null:r,Wo.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=XO.isEmptyOrZeroLength(n)?null:n,Wo.updateConfigValue(vs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:iK,deleted:s,skipped:i}}o(mhe,"deleteRoutesNats")});var lK=M((QUe,cK)=>{"use strict";var up=require("alasql"),kl=require("recursive-iterator"),gi=Q(),hhe=ie(),dp=(k(),v(W)),ZO=class{static{o(this,"sqlStatementBucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,Ehe(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>dp.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!dp.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,a=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[a]&&t[i].tables[a][dp.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[a].attribute_permissions.length>0?c=phe(t[i].tables[a].attribute_permissions):c=global.hdb_schema[i][a].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(a).filter(u=>!dp.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let d=new up.yy.Column({columnid:u});s.tableid&&(d.tableid=s.tableid),this.ast.columns.push(d),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(a,l)}}),this.ast}};function phe(e){return e.filter(t=>t[dp.PERMS_CRUD_ENUM.READ])}o(phe,"filterReadRestrictedAttrs");function Ehe(e,t,r,n,s){_he(e,t,r,n,s)}o(Ehe,"interpretAST");function fp(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,a=e.tableid;e.as&&(a=e.as),s.set(a,i)}}o(fp,"addSchemaTableToMap");function _he(e,t,r,n,s){if(!e){gi.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof up.yy.Insert?Rhe(e,t,r):e instanceof up.yy.Select?ghe(e,t,r,n,s):e instanceof up.yy.Update?She(e,t,r):e instanceof up.yy.Delete?The(e,t,r):gi.error("AST in getRecordAttributesAST() is not a valid SQL type.")}o(_he,"getRecordAttributesAST");function ghe(e,t,r,n,s){if(!e){gi.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(hhe.isEmptyOrZeroLength(i)){gi.error("No schema specified");return}e.from.forEach(c=>{fp(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),fp(c.table,t,r,n,s)});let a=new kl(e.columns);for(let{node:c}of a)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{gi.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new kl(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let d=u.tableid?u.tableid:l;if(!t.get(i).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(i).get(d).indexOf(u.columnid)<0&&t.get(i).get(d).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new kl(c.on);for(let{node:u}of l)if(u&&u.columnid){let d=u.tableid,f=s.get(d);if(!t.get(f).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(f).get(d).indexOf(u.columnid)<0&&t.get(f).get(d).push(u.columnid)}}),e.order){let c=new kl(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,d=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(d).has(u))if(r.has(u))u=r.get(u);else{gi.info(`table specified as ${u} not found.`);return}t.get(d).get(u).indexOf(l.columnid)<0&&t.get(d).get(u).push(l.columnid)}}}o(ghe,"getSelectAttributes");function She(e,t,r){if(!e){gi.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new kl(e.columns),s=e.table.databaseid;fp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&eC(e.table.tableid,s,i.columnid,t,r)}o(She,"getUpdateAttributes");function The(e,t,r){if(!e){gi.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new kl(e.where),s=e.table.databaseid;fp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&eC(e.table.tableid,s,i.columnid,t,r)}o(The,"getDeleteAttributes");function Rhe(e,t,r){if(!e){gi.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new kl(e.columns),s=e.into.databaseid;fp(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&eC(e.into.tableid,s,i.columnid,t,r)}o(Rhe,"getInsertAttributes");function eC(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}o(eC,"pushAttribute");cK.exports=ZO});var dK=M((XUe,uK)=>{"use strict";var ST=(k(),v(W)),TT=class{static{o(this,"BaseLicense")}constructor(t=0,r=ST.RAM_ALLOCATION_ENUM.DEFAULT,n=ST.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},tC=class extends TT{static{o(this,"ExtendedLicense")}constructor(t=0,r=ST.RAM_ALLOCATION_ENUM.DEFAULT,n=ST.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};uK.exports={BaseLicense:TT,ExtendedLicense:tC}});var Qd=M((e0e,_K)=>{"use strict";var jd=require("fs-extra"),RT=(fg(),v(dg)),mK=require("crypto"),yhe=require("moment"),Ahe=require("uuid").v4,pn=Q(),nC=require("path"),bhe=ie(),Gl=(k(),v(W)),{totalmem:fK}=require("os"),Ihe=dK().ExtendedLicense,zd="invalid license key format",Nhe="061183",whe="mofi25",Ohe="aes-256-cbc",Che=16,Phe=32,hK=oe(),{resolvePath:pK}=bt();hK.initSync();var rC;_K.exports={validateLicense:EK,generateFingerPrint:Dhe,licenseSearch:oC,getLicense:Uhe,checkMemoryLimit:xhe};function sC(){return nC.join(hK.getHdbBasePath(),Gl.LICENSE_KEY_DIR_NAME,Gl.LICENSE_FILE_NAME)}o(sC,"getLicenseDirPath");function Lhe(){let e=sC();return pK(nC.join(e,Gl.LICENSE_FILE_NAME))}o(Lhe,"getLicenseFilePath");function iC(){let e=sC();return pK(nC.join(e,Gl.REG_KEY_FILE_NAME))}o(iC,"getFingerPrintFilePath");async function Dhe(){let e=iC();try{return await jd.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await Mhe();throw pn.error(`Error writing fingerprint file to ${e}`),pn.error(t),new Error("There was an error generating the fingerprint")}}o(Dhe,"generateFingerPrint");async function Mhe(){let e=Ahe(),t=RT.hash(e,RT.HASH_FUNCTION.MD5),r=iC();try{await jd.mkdirp(sC()),await jd.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw pn.error(`Error writing fingerprint file to ${r}`),pn.error(n),new Error("There was an error generating the fingerprint")}return t}o(Mhe,"writeFingerprint");function EK(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:Gl.RAM_ALLOCATION_ENUM.DEFAULT,version:Gl.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return pn.error("empty license key passed to validate."),r;let n=iC(),s=!1;try{s=jd.statSync(n)}catch(i){pn.error(i)}if(s){let i;try{i=jd.readFileSync(n,"utf8")}catch{pn.error("error validating this machine in the license"),r.valid_machine=!1;return}let a=e.split(whe),c=a[1];c=Buffer.concat([Buffer.from(c)],Che);let l=Buffer.concat([Buffer.from(i)],Phe),u=mK.createDecipheriv(Ohe,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let d=null;try{d=u.update(a[0],"hex","utf8"),d.trim(),d+=u.final("utf8")}catch{let h=vhe(a[0],i);if(h)d=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(zd),pn.error(zd),new Error(zd)}let f;if(isNaN(d))try{f=JSON.parse(d),r.version=f.version,r.exp_date=f.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),f.ram_allocation&&(r.ram_allocation=f.ram_allocation)}catch{throw console.error(zd),pn.error(zd),new Error(zd)}else r.exp_date=d;r.exp_date<yhe().valueOf()&&(r.valid_date=!1),RT.validate(a[1],`${Nhe}${i}${t}`,RT.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||pn.error("Invalid licence"),r}o(EK,"validateLicense");function vhe(e,t){try{let r=mK.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{pn.warn("Check old license failed")}}o(vhe,"checkOldLicense");function oC(){let e=new Ihe,t=[];try{t=jd.readFileSync(Lhe(),"utf-8").split(`\r
24
- `)}catch(r){r.code==="ENOENT"?pn.debug("no license file found"):pn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(bhe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=EK(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){pn.error("There was an error parsing the license string."),pn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return rC=e,e}o(oC,"licenseSearch");async function Uhe(){return rC||await oC(),rC}o(Uhe,"getLicense");function xhe(){let e=oC().ram_allocation,t=process.constrainedMemory?.()||fK();if(t=Math.round(Math.min(t,fK())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(xhe,"checkMemoryLimit")});var lC=M((r0e,RK)=>{var yT=Qd(),gK=require("chalk"),ls=Q(),SK=require("prompt"),{promisify:Bhe}=require("util"),aC=(k(),v(W)),Fhe=require("fs-extra"),Hhe=require("path"),khe=ie(),{packageJson:Ghe}=Rt(),TK=oe();TK.initSync();var qhe=require("moment"),$he=Bhe(SK.get),Vhe=Hhe.join(TK.getHdbBasePath(),aC.LICENSE_KEY_DIR_NAME,aC.LICENSE_FILE_NAME,aC.LICENSE_FILE_NAME);RK.exports={getFingerprint:Yhe,setLicense:Khe,parseLicense:cC,register:Whe,getRegistrationInfo:jhe};async function Khe(e){if(e&&e.key&&e.company){try{ls.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await cC(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw ls.error(r),ls.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(Khe,"setLicense");async function Yhe(){let e={};try{e=await yT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw ls.error(r),ls.error(t),new Error(r)}return e}o(Yhe,"getFingerprint");async function cC(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");ls.info("Validating license input...");let r=yT.validateLicense(e,t);if(ls.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(ls.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(ls.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{ls.info("writing license to disk"),await Fhe.writeFile(Vhe,JSON.stringify({license_key:e,company:t}))}catch(n){throw ls.error("Failed to write License"),n}return"Registration successful."}o(cC,"parseLicense");async function Whe(){let e=await zhe();return cC(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(Whe,"register");async function zhe(){let e=await yT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:gK.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:gK.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{SK.start()}catch(n){ls.error(n)}let r;try{r=await $he(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(zhe,"promptForRegistration");async function jhe(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await yT.getLicense()}catch(r){throw ls.error(`There was an error when searching licenses due to: ${r.message}`),r}if(khe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Ghe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=qhe.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(jhe,"getRegistrationInfo")});var AK=M((s0e,yK)=>{"use strict";var Qhe=It(),uC=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+Qhe.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};yK.exports=uC});var NK=M((o0e,IK)=>{"use strict";var bK=It(),dC=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+bK.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+bK.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};IK.exports=dC});var OK=M((c0e,wK)=>{"use strict";var fC=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};wK.exports=fC});var PK=M((u0e,CK)=>{"use strict";var Jhe=It(),mC=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+Jhe.SERVER_SUFFIX.ADMIN,this.password=r}};CK.exports=mC});var NT=M((f0e,MK)=>{"use strict";var ql=require("path"),$l=require("fs-extra"),Xhe=AK(),Zhe=NK(),epe=OK(),tpe=PK(),hC=ts(),Xd=ie(),Fn=bt(),bT=(k(),v(W)),mp=It(),{CONFIG_PARAMS:nr}=bT,Zd=Q(),hp=oe(),LK=$i(),pC=mr(),rpe=as(),Jd="clustering",npe=1e4,DK=50;MK.exports={generateNatsConfig:ipe,removeNatsConfig:ope,getHubConfigPath:spe};function spe(){let e=hp.get(nr.ROOTPATH);return ql.join(e,Jd,mp.NATS_CONFIG_FILES.HUB_SERVER)}o(spe,"getHubConfigPath");async function ipe(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=hp.get(nr.ROOTPATH);$l.ensureDirSync(ql.join(r,"clustering","leaf")),hp.initSync();let n=Fn.getConfigFromFile(nr.CLUSTERING_TLS_CERT_AUTH),s=Fn.getConfigFromFile(nr.CLUSTERING_TLS_PRIVATEKEY),i=Fn.getConfigFromFile(nr.CLUSTERING_TLS_CERTIFICATE);!await $l.exists(i)&&!await $l.exists(!n)&&await rpe.createNatsCerts();let a=ql.join(r,Jd,mp.PID_FILES.HUB),c=ql.join(r,Jd,mp.PID_FILES.LEAF),l=Fn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=ql.join(r,Jd,mp.NATS_CONFIG_FILES.HUB_SERVER),d=ql.join(r,Jd,mp.NATS_CONFIG_FILES.LEAF_SERVER),f=Fn.getConfigFromFile(nr.CLUSTERING_TLS_INSECURE),m=Fn.getConfigFromFile(nr.CLUSTERING_TLS_VERIFY),h=Fn.getConfigFromFile(nr.CLUSTERING_NODENAME),p=Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await pC.checkNATSServerInstalled()||IT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await hC.listUsers(),g=Fn.getConfigFromFile(nr.CLUSTERING_USER),y=await hC.getClusterUser();(Xd.isEmpty(y)||y.active!==!0)&&IT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await AT(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await AT(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await AT(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),await AT(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===bT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new tpe(K.username,LK.decrypt(K.hash))),R.push(new epe(K.username,LK.decrypt(K.hash))));let N=[],{hub_routes:O}=Fn.getClusteringRoutes();if(!Xd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new Xhe(Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NAME),Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Xd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===bT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await $l.writeJson(u,F),Zd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new Zhe(Fn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===bT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await $l.writeJson(d,Y),Zd.trace(`Leaf server config written to ${d}`))}o(ipe,"generateNatsConfig");async function AT(e){let t=hp.get(e);return Xd.isEmpty(t)&&IT(`port undefined for '${e}'`),await Xd.isPortTaken(t)&&IT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(AT,"isPortAvailable");function IT(e){let t=`Error generating clustering config: ${e}`;Zd.error(t),console.error(t),process.exit(1)}o(IT,"generateNatsConfigError");async function ope(e){let{port:t,config_file:r}=pC.getServerConfig(e),{username:n,decrypt_hash:s}=await hC.getClusterUser(),i=0,a=2e3;for(;i<DK;){try{let d=await pC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Zd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=DK)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Zd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Xd.asyncSetTimeout(u)}let c="0".repeat(npe),l=ql.join(hp.get(nr.ROOTPATH),Jd,r);await $l.writeFile(l,c),await $l.remove(l),Zd.notify(e,"started.")}o(ope,"removeNatsConfig")});var HK=M((h0e,FK)=>{"use strict";var us=oe(),ape=Qd(),Ke=(k(),v(W)),pp=It(),zo=require("path"),{PACKAGE_ROOT:OT}=Rt(),vK=oe(),wT=ie(),ef="/dev/null",cpe=zo.join(OT,"launchServiceScripts"),UK=zo.join(OT,"utility/scripts"),lpe=zo.join(UK,Ke.HDB_RESTART_SCRIPT),xK=zo.resolve(OT,"dependencies",`${process.platform}-${process.arch}`,pp.NATS_BINARY_NAME);function BK(){let t=ape.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return wT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=wT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:OT}}o(BK,"generateMainServerConfig");var upe=9930;function dpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=zo.join(e,"clustering",pp.NATS_CONFIG_FILES.HUB_SERVER),r=zo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=vK.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=pp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==upe?"-"+n:""),script:xK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=ef,i.error_file=ef),i}o(dpe,"generateNatsHubServerConfig");var fpe=9940;function mpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=zo.join(e,"clustering",pp.NATS_CONFIG_FILES.LEAF_SERVER),r=zo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=vK.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=pp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==fpe?"-"+n:""),script:xK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=ef,i.error_file=ef),i}o(mpe,"generateNatsLeafServerConfig");function hpe(){us.initSync();let e=zo.join(us.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:cpe,autorestart:!1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=ef,t.error_file=ef),t}o(hpe,"generateClusteringUpgradeV4ServiceConfig");function ppe(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return wT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=wT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:UK},script:lpe}}o(ppe,"generateRestart");function Epe(){return{apps:[BK()]}}o(Epe,"generateAllServiceConfigs");FK.exports={generateAllServiceConfigs:Epe,generateMainServerConfig:BK,generateRestart:ppe,generateNatsHubServerConfig:dpe,generateNatsLeafServerConfig:mpe,generateClusteringUpgradeV4ServiceConfig:hpe}});var _p=M((_0e,ZK)=>{"use strict";var ct=(k(),v(W)),_pe=ie(),Qo=NT(),CT=mr(),jo=It(),ac=HK(),PT=oe(),Vl=Q(),gpe=Ko(),{startWorker:kK,onMessageFromWorkers:Spe}=nt(),Tpe=qd(),E0e=require("util"),Rpe=require("child_process"),ype=require("fs"),{execFile:Ape}=Rpe,Je;ZK.exports={enterPM2Mode:bpe,start:cc,stop:EC,reload:qK,restart:$K,list:_C,describe:YK,connect:Jo,kill:Cpe,startAllServices:Ppe,startService:gC,getUniqueServicesList:WK,restartAllServices:Lpe,isServiceRegistered:zK,reloadStopStart:jK,restartHdb:KK,deleteProcess:wpe,startClusteringProcesses:JK,startClusteringThreads:XK,isHdbRestartRunning:Ope,isClusteringRunning:Mpe,stopClustering:Dpe,reloadClustering:vpe,expectedRestartOfChildren:VK};var Ep=!1;Spe(e=>{e.type==="restart"&&PT.initSync(!0)});function bpe(){Ep=!0}o(bpe,"enterPM2Mode");function Jo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(Jo,"connect");var En,Ipe=10,GK;function cc(e,t=!1){if(Ep)return Npe(e);let r=Ape(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=En.indexOf(r);a>-1&&En.splice(a,1),!GK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Ipe&&(ype.existsSync(Qo.getHubConfigPath())?cc(e):(await Qo.generateNatsConfig(!0),cc(e),await new Promise(c=>setTimeout(c,3e3)),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=PT.get(ct.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&jo.LOG_LEVEL_HIERARCHY[a]>=jo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===jo.LOG_LEVELS.ERR||d===jo.LOG_LEVELS.WRN?Vl.OUTPUTS.STDERR:Vl.OUTPUTS.STDOUT;Vl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=jo.LOG_LEVELS[m]}if(jo.LOG_LEVEL_HIERARCHY[a]>=jo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===jo.LOG_LEVELS.ERR||d===jo.LOG_LEVELS.WRN?Vl.OUTPUTS.STDERR:Vl.OUTPUTS.STDOUT;Vl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!En&&(En=[],!t)){let i=o(()=>{GK=!0,En&&(En.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}En.push(r)}o(cc,"start");function Npe(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Npe,"startWithPM2");function EC(e){if(!Ep){for(let t of En||[])t.name===e&&(En.splice(En.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(EC,"stop");function qK(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(qK,"reload");function $K(e){if(!Ep){VK();for(let t of En||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o($K,"restart");function VK(){for(let e of En||[])e.config&&(e.config.restarts=0)}o(VK,"expectedRestartOfChildren");function wpe(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(wpe,"deleteProcess");async function KK(){await cc(ac.generateRestart())}o(KK,"restartHdb");async function Ope(){let e=await _C();for(let t in e)if(e[t].name===ct.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(Ope,"isHdbRestartRunning");function _C(){return new Promise(async(e,t)=>{try{await Jo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(_C,"list");function YK(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(YK,"describe");function Cpe(){if(!Ep){for(let e of En||[])e.kill();En=[];return}return new Promise(async(e,t)=>{try{await Jo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(Cpe,"kill");async function Ppe(){try{await JK(),await XK(),await cc(ac.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(Ppe,"startAllServices");async function gC(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case ct.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=ac.generateMainServerConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=ac.generateNatsIngestServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=ac.generateNatsReplyServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=ac.generateNatsHubServerConfig(),await cc(r,t),await Qo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=ac.generateNatsLeafServerConfig(),await cc(r,t),await Qo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=ac.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await cc(r)}catch(r){throw Je?.disconnect(),r}}o(gC,"startService");async function WK(){try{let e=await _C(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(WK,"getUniqueServicesList");async function Lpe(e=[]){try{let t=!1,r=await WK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===ct.PROCESS_DESCRIPTORS.HDB?t=!0:await $K(a))}t&&await jK(ct.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Lpe,"restartAllServices");async function zK(e){if(En?.find(r=>r.name===e))return!0;let t=await Tpe.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(zK,"isServiceRegistered");async function jK(e){let t=PT.get(ct.CONFIG_PARAMS.THREADS_COUNT)??PT.get(ct.CONFIG_PARAMS.THREADS),r=await YK(e),n=_pe.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await EC(e),await gC(e)):e===ct.PROCESS_DESCRIPTORS.HDB?await KK():await qK(e)}o(jK,"reloadStopStart");var QK;async function JK(e=!1){for(let t in ct.CLUSTERING_PROCESSES){let r=ct.CLUSTERING_PROCESSES[t];await gC(r,e)}}o(JK,"startClusteringProcesses");async function XK(){QK=kK(ct.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await CT.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await CT.updateLocalStreams();let e=await gpe.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===ct.PRE_4_0_0_VERSION){Vl.info("Starting clustering upgrade 4.0.0 process"),kK(ct.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(XK,"startClusteringThreads");async function Dpe(){for(let e in ct.CLUSTERING_PROCESSES)if(e!==ct.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===ct.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await QK.terminate();else{let t=ct.CLUSTERING_PROCESSES[e];await EC(t)}}o(Dpe,"stopClustering");async function Mpe(){for(let e in ct.CLUSTERING_PROCESSES){let t=ct.CLUSTERING_PROCESSES[e];if(await zK(t)===!1)return!1}return!0}o(Mpe,"isClusteringRunning");async function vpe(){await Qo.generateNatsConfig(!0),await CT.reloadNATSHub(),await CT.reloadNATSLeaf(),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(vpe,"reloadClustering")});var RC={};Oe(RC,{compactOnStart:()=>Upe,copyDb:()=>iY});async function Upe(){lc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,SC.get)(U.ROOTPATH),t=new Map,r=st();(0,TC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,LT.join)(e,"backup",n+".mdb"),a=(0,LT.join)(e,vc,n+"-copy.mdb"),c=0;try{c=await eY(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){lc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await iY(n,a),console.log("Backing up",n,"to",i);try{await(0,Kl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}}try{cd()}catch(n){lc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{dbPath:s,copyDest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Kl.move)(i,s,{overwrite:!0}),await(0,Kl.remove)((0,LT.join)(e,vc,`${n}-copy.mdb-lock`));try{cd()}catch(n){lc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){lc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,TC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Kl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw cd(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=!0,c=await eY(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){a=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
25
- Total record count before compaction: ${i}, total after: ${c}.
26
- Database backup has not been removed and can be found here: ${s}`;lc.error(l),console.error(l)}(0,SC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||a===!1||(console.log("Removing backup",s),await(0,Kl.remove)(s))}}async function eY(e){let t=await(0,sY.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function tf(){}async function iY(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=tf,m.primaryStore.remove=tf;for(let h in m.indices){let p=m.indices[h];p.put=tf,p.remove=tf}m.auditStore&&(m.auditStore.put=tf,m.auditStore.remove=tf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,tY.open)(new rY.default(t)),c=a.openDB(DT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=kg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new nY.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(DT.AUDIT_STORE_NAME,Em);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var tY,LT,Kl,SC,rY,nY,DT,sY,TC,lc,yC=ue(()=>{De();tY=require("lmdb"),LT=require("path"),Kl=require("fs-extra"),SC=w(oe()),rY=w(Nm()),nY=w(Im()),DT=w(qt());k();Li();sY=w(Ua()),TC=w(bt()),lc=w(Q());o(Upe,"compactOnStart");o(eY,"getTotalDBRecordCount");o(tf,"noop");o(iY,"copyDb")});var nf=M((N0e,fY)=>{"use strict";var xpe=require("minimist"),{isMainThread:bC,parentPort:Sp,threadId:A0e}=require("worker_threads"),ft=(k(),v(W)),no=Q(),IC=ie(),vT=NT(),MT=mr(),b0e=It(),lY=bt(),Si=_p(),oY=qd(),{compactOnStart:Bpe}=(yC(),v(RC)),Fpe=Uc(),{restartWorkers:UT,onMessageByType:Hpe}=nt(),{handleHDBError:kpe,hdbErrors:Gpe}=ge(),{HTTP_STATUS_CODES:qpe}=Gpe,Tp=oe(),{sendOperationToNode:aY,getThisNodeName:$pe,monitorNodeCAs:Vpe}=(is(),v(Go)),{getHDBNodeTable:I0e}=(Ll(),v(vw));Tp.initSync();var gp=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,Kpe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",cY="Clustering is not enabled so cannot be restarted",Ype="Invalid service",rf,Us;fY.exports={restart:uY,restartService:NC};bC&&Hpe(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await NC({service:e.workerType}):uY({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function uY(e){Us=Object.keys(e).length===0,rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=xpe(process.argv);if(t.service){await NC(t);return}if(Us&&!rf){console.error(Kpe);return}if(Us&&console.log(gp),rf){Si.enterPM2Mode(),no.notify(gp);let r=Fpe(Object.keys(ft.CONFIG_PARAM_MAP),!0);return IC.isEmptyOrZeroLength(Object.keys(r))||lY.updateConfigValue(void 0,void 0,r,!0,!0),Wpe(),gp}return bC?(no.notify(gp),Tp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Bpe(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{UT()},50)):Sp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),gp}o(uY,"restart");async function NC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw kpe(new Error,Ype,qpe.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!bC){e.replicated&&Vpe(),Sp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),Sp.ref(),await new Promise(s=>{Sp.on("message",i=>{i.type==="restart-complete"&&(s(),Sp.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===$pe())continue;let i;try{({job_id:i}=await aY(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await aY(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=cY;break}Us&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await dY();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=cY;break}Us&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(Us&&!rf){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Us&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),Us?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await UT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),Us&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(NC,"restartService");async function Wpe(){await dY(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await IC.asyncSetTimeout(2e3),Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await AC(),Us&&(await MT.closeConnection(),process.exit(0))}o(Wpe,"restartPM2Mode");async function dY(){if(!lY.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await oY.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await vT.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await AC(),Us&&await MT.closeConnection();else{await vT.generateNatsConfig(!0),rf?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await oY.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await IC.asyncSetTimeout(3e3),await AC(),await MT.updateLocalStreams(),Us&&await MT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=UT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=UT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(dY,"restartClustering");async function AC(){await vT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await vT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(AC,"removeNatsConfig")});var AY=M((C0e,yY)=>{"use strict";var O0e=require("lodash"),Hn=(k(),v(W)),{handleHDBError:mY,hdbErrors:zpe}=ge(),{HDB_ERROR_MSGS:jpe,HTTP_STATUS_CODES:Qpe}=zpe,wC=Q();yY.exports={getRolePermissions:Xpe};var Yl=Object.create(null),Jpe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),_Y=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),gY=o((e=!1,t=!1,r=!1,n=!1)=>({[Hn.PERMS_CRUD_ENUM.READ]:e,[Hn.PERMS_CRUD_ENUM.INSERT]:t,[Hn.PERMS_CRUD_ENUM.UPDATE]:r,[Hn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),OC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...gY(t,r,n,s)}),"tablePermsTemplate"),hY=o((e,t=gY())=>({attribute_name:e,describe:RY(t),[Rp]:t[Rp],[CC]:t[CC],[PC]:t[PC]}),"attrPermsTemplate"),pY=o((e,t=!1)=>({attribute_name:e,describe:t,[Rp]:t}),"timestampAttrPermsTemplate"),{READ:Rp,INSERT:CC,UPDATE:PC}=Hn.PERMS_CRUD_ENUM,SY=Object.values(Hn.PERMS_CRUD_ENUM),TY=[Rp,CC,PC];function Xpe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Hn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Yl[t]&&Yl[t].key===n)return Yl[t].perms;let s=Zpe(e,r);return Yl[t]?Yl[t].key=n:Yl[t]=Jpe(n),Yl[t].perms=s,s}catch(r){if(!e[Hn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Hn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Hn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw wC.error(n),wC.debug(r),mY(new Error,jpe.OUTDATED_PERMS_TRANSLATION_ERROR,Qpe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
24
+ `)}catch(r){r.code==="ENOENT"?pn.debug("no license file found"):pn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(bhe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=EK(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){pn.error("There was an error parsing the license string."),pn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return rC=e,e}o(oC,"licenseSearch");async function Uhe(){return rC||await oC(),rC}o(Uhe,"getLicense");function xhe(){let e=oC().ram_allocation,t=process.constrainedMemory?.()||fK();if(t=Math.round(Math.min(t,fK())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(xhe,"checkMemoryLimit")});var lC=M((r0e,RK)=>{var yT=Qd(),gK=require("chalk"),ls=Q(),SK=require("prompt"),{promisify:Bhe}=require("util"),aC=(k(),v(W)),Fhe=require("fs-extra"),Hhe=require("path"),khe=ie(),{packageJson:Ghe}=Rt(),TK=oe();TK.initSync();var qhe=require("moment"),$he=Bhe(SK.get),Vhe=Hhe.join(TK.getHdbBasePath(),aC.LICENSE_KEY_DIR_NAME,aC.LICENSE_FILE_NAME,aC.LICENSE_FILE_NAME);RK.exports={getFingerprint:Yhe,setLicense:Khe,parseLicense:cC,register:Whe,getRegistrationInfo:jhe};async function Khe(e){if(e&&e.key&&e.company){try{ls.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await cC(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw ls.error(r),ls.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(Khe,"setLicense");async function Yhe(){let e={};try{e=await yT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw ls.error(r),ls.error(t),new Error(r)}return e}o(Yhe,"getFingerprint");async function cC(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");ls.info("Validating license input...");let r=yT.validateLicense(e,t);if(ls.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(ls.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(ls.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{ls.info("writing license to disk"),await Fhe.writeFile(Vhe,JSON.stringify({license_key:e,company:t}))}catch(n){throw ls.error("Failed to write License"),n}return"Registration successful."}o(cC,"parseLicense");async function Whe(){let e=await zhe();return cC(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(Whe,"register");async function zhe(){let e=await yT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:gK.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:gK.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{SK.start()}catch(n){ls.error(n)}let r;try{r=await $he(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(zhe,"promptForRegistration");async function jhe(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await yT.getLicense()}catch(r){throw ls.error(`There was an error when searching licenses due to: ${r.message}`),r}if(khe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Ghe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=qhe.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(jhe,"getRegistrationInfo")});var AK=M((s0e,yK)=>{"use strict";var Qhe=It(),uC=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+Qhe.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};yK.exports=uC});var NK=M((o0e,IK)=>{"use strict";var bK=It(),dC=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+bK.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+bK.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};IK.exports=dC});var OK=M((c0e,wK)=>{"use strict";var fC=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};wK.exports=fC});var PK=M((u0e,CK)=>{"use strict";var Jhe=It(),mC=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+Jhe.SERVER_SUFFIX.ADMIN,this.password=r}};CK.exports=mC});var NT=M((f0e,MK)=>{"use strict";var ql=require("path"),$l=require("fs-extra"),Xhe=AK(),Zhe=NK(),epe=OK(),tpe=PK(),hC=ts(),Xd=ie(),Fn=bt(),bT=(k(),v(W)),mp=It(),{CONFIG_PARAMS:nr}=bT,Zd=Q(),hp=oe(),LK=$i(),pC=mr(),rpe=as(),Jd="clustering",npe=1e4,DK=50;MK.exports={generateNatsConfig:ipe,removeNatsConfig:ope,getHubConfigPath:spe};function spe(){let e=hp.get(nr.ROOTPATH);return ql.join(e,Jd,mp.NATS_CONFIG_FILES.HUB_SERVER)}o(spe,"getHubConfigPath");async function ipe(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=hp.get(nr.ROOTPATH);$l.ensureDirSync(ql.join(r,"clustering","leaf")),hp.initSync();let n=Fn.getConfigFromFile(nr.CLUSTERING_TLS_CERT_AUTH),s=Fn.getConfigFromFile(nr.CLUSTERING_TLS_PRIVATEKEY),i=Fn.getConfigFromFile(nr.CLUSTERING_TLS_CERTIFICATE);!await $l.exists(i)&&!await $l.exists(!n)&&await rpe.createNatsCerts();let a=ql.join(r,Jd,mp.PID_FILES.HUB),c=ql.join(r,Jd,mp.PID_FILES.LEAF),l=Fn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=ql.join(r,Jd,mp.NATS_CONFIG_FILES.HUB_SERVER),d=ql.join(r,Jd,mp.NATS_CONFIG_FILES.LEAF_SERVER),f=Fn.getConfigFromFile(nr.CLUSTERING_TLS_INSECURE),m=Fn.getConfigFromFile(nr.CLUSTERING_TLS_VERIFY),h=Fn.getConfigFromFile(nr.CLUSTERING_NODENAME),p=Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await pC.checkNATSServerInstalled()||IT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await hC.listUsers(),g=Fn.getConfigFromFile(nr.CLUSTERING_USER),y=await hC.getClusterUser();(Xd.isEmpty(y)||y.active!==!0)&&IT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await AT(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await AT(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await AT(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),await AT(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===bT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new tpe(K.username,LK.decrypt(K.hash))),R.push(new epe(K.username,LK.decrypt(K.hash))));let N=[],{hub_routes:O}=Fn.getClusteringRoutes();if(!Xd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new Xhe(Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NAME),Fn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Xd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===bT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await $l.writeJson(u,F),Zd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new Zhe(Fn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===bT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await $l.writeJson(d,Y),Zd.trace(`Leaf server config written to ${d}`))}o(ipe,"generateNatsConfig");async function AT(e){let t=hp.get(e);return Xd.isEmpty(t)&&IT(`port undefined for '${e}'`),await Xd.isPortTaken(t)&&IT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(AT,"isPortAvailable");function IT(e){let t=`Error generating clustering config: ${e}`;Zd.error(t),console.error(t),process.exit(1)}o(IT,"generateNatsConfigError");async function ope(e){let{port:t,config_file:r}=pC.getServerConfig(e),{username:n,decrypt_hash:s}=await hC.getClusterUser(),i=0,a=2e3;for(;i<DK;){try{let d=await pC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Zd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=DK)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Zd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Xd.asyncSetTimeout(u)}let c="0".repeat(npe),l=ql.join(hp.get(nr.ROOTPATH),Jd,r);await $l.writeFile(l,c),await $l.remove(l),Zd.notify(e,"started.")}o(ope,"removeNatsConfig")});var HK=M((h0e,FK)=>{"use strict";var us=oe(),ape=Qd(),Ke=(k(),v(W)),pp=It(),zo=require("path"),{PACKAGE_ROOT:OT}=Rt(),vK=oe(),wT=ie(),ef="/dev/null",cpe=zo.join(OT,"launchServiceScripts"),UK=zo.join(OT,"utility/scripts"),lpe=zo.join(UK,Ke.HDB_RESTART_SCRIPT),xK=zo.resolve(OT,"dependencies",`${process.platform}-${process.arch}`,pp.NATS_BINARY_NAME);function BK(){let t=ape.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return wT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=wT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:OT}}o(BK,"generateMainServerConfig");var upe=9930;function dpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=zo.join(e,"clustering",pp.NATS_CONFIG_FILES.HUB_SERVER),r=zo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=vK.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=pp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==upe?"-"+n:""),script:xK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=ef,i.error_file=ef),i}o(dpe,"generateNatsHubServerConfig");var fpe=9940;function mpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=zo.join(e,"clustering",pp.NATS_CONFIG_FILES.LEAF_SERVER),r=zo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=vK.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=pp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==fpe?"-"+n:""),script:xK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=ef,i.error_file=ef),i}o(mpe,"generateNatsLeafServerConfig");function hpe(){us.initSync();let e=zo.join(us.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:cpe,autorestart:!1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=ef,t.error_file=ef),t}o(hpe,"generateClusteringUpgradeV4ServiceConfig");function ppe(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return wT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=wT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:UK},script:lpe}}o(ppe,"generateRestart");function Epe(){return{apps:[BK()]}}o(Epe,"generateAllServiceConfigs");FK.exports={generateAllServiceConfigs:Epe,generateMainServerConfig:BK,generateRestart:ppe,generateNatsHubServerConfig:dpe,generateNatsLeafServerConfig:mpe,generateClusteringUpgradeV4ServiceConfig:hpe}});var _p=M((_0e,ZK)=>{"use strict";var ct=(k(),v(W)),_pe=ie(),Qo=NT(),CT=mr(),jo=It(),ac=HK(),PT=oe(),Vl=Q(),gpe=Ko(),{startWorker:kK,onMessageFromWorkers:Spe}=nt(),Tpe=qd(),E0e=require("util"),Rpe=require("child_process"),ype=require("fs"),{execFile:Ape}=Rpe,Je;ZK.exports={enterPM2Mode:bpe,start:cc,stop:EC,reload:qK,restart:$K,list:_C,describe:YK,connect:Jo,kill:Cpe,startAllServices:Ppe,startService:gC,getUniqueServicesList:WK,restartAllServices:Lpe,isServiceRegistered:zK,reloadStopStart:jK,restartHdb:KK,deleteProcess:wpe,startClusteringProcesses:JK,startClusteringThreads:XK,isHdbRestartRunning:Ope,isClusteringRunning:Mpe,stopClustering:Dpe,reloadClustering:vpe,expectedRestartOfChildren:VK};var Ep=!1;Spe(e=>{e.type==="restart"&&PT.initSync(!0)});function bpe(){Ep=!0}o(bpe,"enterPM2Mode");function Jo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(Jo,"connect");var En,Ipe=10,GK;function cc(e,t=!1){if(Ep)return Npe(e);let r=Ape(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=En.indexOf(r);a>-1&&En.splice(a,1),!GK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Ipe&&(ype.existsSync(Qo.getHubConfigPath())?cc(e):(await Qo.generateNatsConfig(!0),cc(e),await new Promise(c=>setTimeout(c,3e3)),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=PT.get(ct.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&jo.LOG_LEVEL_HIERARCHY[a]>=jo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===jo.LOG_LEVELS.ERR||d===jo.LOG_LEVELS.WRN?Vl.OUTPUTS.STDERR:Vl.OUTPUTS.STDOUT;Vl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=jo.LOG_LEVELS[m]}if(jo.LOG_LEVEL_HIERARCHY[a]>=jo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===jo.LOG_LEVELS.ERR||d===jo.LOG_LEVELS.WRN?Vl.OUTPUTS.STDERR:Vl.OUTPUTS.STDOUT;Vl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!En&&(En=[],!t)){let i=o(()=>{GK=!0,En&&(En.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}En.push(r)}o(cc,"start");function Npe(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Npe,"startWithPM2");function EC(e){if(!Ep){for(let t of En||[])t.name===e&&(En.splice(En.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(EC,"stop");function qK(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(qK,"reload");function $K(e){if(!Ep){VK();for(let t of En||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o($K,"restart");function VK(){for(let e of En||[])e.config&&(e.config.restarts=0)}o(VK,"expectedRestartOfChildren");function wpe(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(wpe,"deleteProcess");async function KK(){await cc(ac.generateRestart())}o(KK,"restartHdb");async function Ope(){let e=await _C();for(let t in e)if(e[t].name===ct.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(Ope,"isHdbRestartRunning");function _C(){return new Promise(async(e,t)=>{try{await Jo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(_C,"list");function YK(e){return new Promise(async(t,r)=>{try{await Jo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(YK,"describe");function Cpe(){if(!Ep){for(let e of En||[])e.kill();En=[];return}return new Promise(async(e,t)=>{try{await Jo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(Cpe,"kill");async function Ppe(){try{await JK(),await XK(),await cc(ac.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(Ppe,"startAllServices");async function gC(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case ct.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=ac.generateMainServerConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=ac.generateNatsIngestServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=ac.generateNatsReplyServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=ac.generateNatsHubServerConfig(),await cc(r,t),await Qo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=ac.generateNatsLeafServerConfig(),await cc(r,t),await Qo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=ac.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await cc(r)}catch(r){throw Je?.disconnect(),r}}o(gC,"startService");async function WK(){try{let e=await _C(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(WK,"getUniqueServicesList");async function Lpe(e=[]){try{let t=!1,r=await WK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===ct.PROCESS_DESCRIPTORS.HDB?t=!0:await $K(a))}t&&await jK(ct.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Lpe,"restartAllServices");async function zK(e){if(En?.find(r=>r.name===e))return!0;let t=await Tpe.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(zK,"isServiceRegistered");async function jK(e){let t=PT.get(ct.CONFIG_PARAMS.THREADS_COUNT)??PT.get(ct.CONFIG_PARAMS.THREADS),r=await YK(e),n=_pe.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await EC(e),await gC(e)):e===ct.PROCESS_DESCRIPTORS.HDB?await KK():await qK(e)}o(jK,"reloadStopStart");var QK;async function JK(e=!1){for(let t in ct.CLUSTERING_PROCESSES){let r=ct.CLUSTERING_PROCESSES[t];await gC(r,e)}}o(JK,"startClusteringProcesses");async function XK(){QK=kK(ct.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await CT.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await CT.updateLocalStreams();let e=await gpe.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===ct.PRE_4_0_0_VERSION){Vl.info("Starting clustering upgrade 4.0.0 process"),kK(ct.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(XK,"startClusteringThreads");async function Dpe(){for(let e in ct.CLUSTERING_PROCESSES)if(e!==ct.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===ct.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await QK.terminate();else{let t=ct.CLUSTERING_PROCESSES[e];await EC(t)}}o(Dpe,"stopClustering");async function Mpe(){for(let e in ct.CLUSTERING_PROCESSES){let t=ct.CLUSTERING_PROCESSES[e];if(await zK(t)===!1)return!1}return!0}o(Mpe,"isClusteringRunning");async function vpe(){await Qo.generateNatsConfig(!0),await CT.reloadNATSHub(),await CT.reloadNATSLeaf(),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await Qo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(vpe,"reloadClustering")});var RC={};Oe(RC,{compactOnStart:()=>Upe,copyDb:()=>iY});async function Upe(){lc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,SC.get)(U.ROOTPATH),t=new Map,r=st();(0,TC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,LT.join)(e,"backup",n+".mdb"),a=(0,LT.join)(e,vc,n+"-copy.mdb"),c=0;try{c=await eY(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){lc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await iY(n,a),console.log("Backing up",n,"to",i);try{await(0,Kl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}console.log("Moving copy compacted",n,"to",s),await(0,Kl.move)(a,s,{overwrite:!0}),await(0,Kl.remove)((0,LT.join)(e,vc,`${n}-copy.mdb-lock`))}try{cd()}catch(n){lc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{cd()}catch(n){lc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){lc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,TC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Kl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw cd(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=await eY(n);if(console.log("Database",n,"after compact has a total record count of",a),i!==a){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
25
+ Total record count before compaction: ${i}, total after: ${a}.
26
+ Database backup has not been removed and can be found here: ${s}`;lc.error(c),console.error(c)}(0,SC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Kl.remove)(s))}}async function eY(e){let t=await(0,sY.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function tf(){}async function iY(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=tf,m.primaryStore.remove=tf;for(let h in m.indices){let p=m.indices[h];p.put=tf,p.remove=tf}m.auditStore&&(m.auditStore.put=tf,m.auditStore.remove=tf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,tY.open)(new rY.default(t)),c=a.openDB(DT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=kg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new nY.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(DT.AUDIT_STORE_NAME,Em);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var tY,LT,Kl,SC,rY,nY,DT,sY,TC,lc,yC=ue(()=>{De();tY=require("lmdb"),LT=require("path"),Kl=require("fs-extra"),SC=w(oe()),rY=w(Nm()),nY=w(Im()),DT=w(qt());k();Li();sY=w(Ua()),TC=w(bt()),lc=w(Q());o(Upe,"compactOnStart");o(eY,"getTotalDBRecordCount");o(tf,"noop");o(iY,"copyDb")});var nf=M((N0e,fY)=>{"use strict";var xpe=require("minimist"),{isMainThread:bC,parentPort:Sp,threadId:A0e}=require("worker_threads"),ft=(k(),v(W)),no=Q(),IC=ie(),vT=NT(),MT=mr(),b0e=It(),lY=bt(),Si=_p(),oY=qd(),{compactOnStart:Bpe}=(yC(),v(RC)),Fpe=Uc(),{restartWorkers:UT,onMessageByType:Hpe}=nt(),{handleHDBError:kpe,hdbErrors:Gpe}=ge(),{HTTP_STATUS_CODES:qpe}=Gpe,Tp=oe(),{sendOperationToNode:aY,getThisNodeName:$pe,monitorNodeCAs:Vpe}=(is(),v(Go)),{getHDBNodeTable:I0e}=(Ll(),v(vw));Tp.initSync();var gp=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,Kpe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",cY="Clustering is not enabled so cannot be restarted",Ype="Invalid service",rf,Us;fY.exports={restart:uY,restartService:NC};bC&&Hpe(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await NC({service:e.workerType}):uY({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function uY(e){Us=Object.keys(e).length===0,rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=xpe(process.argv);if(t.service){await NC(t);return}if(Us&&!rf){console.error(Kpe);return}if(Us&&console.log(gp),rf){Si.enterPM2Mode(),no.notify(gp);let r=Fpe(Object.keys(ft.CONFIG_PARAM_MAP),!0);return IC.isEmptyOrZeroLength(Object.keys(r))||lY.updateConfigValue(void 0,void 0,r,!0,!0),Wpe(),gp}return bC?(no.notify(gp),Tp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Bpe(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{UT()},50)):Sp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),gp}o(uY,"restart");async function NC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw kpe(new Error,Ype,qpe.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!bC){e.replicated&&Vpe(),Sp.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),Sp.ref(),await new Promise(s=>{Sp.on("message",i=>{i.type==="restart-complete"&&(s(),Sp.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===$pe())continue;let i;try{({job_id:i}=await aY(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await aY(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=cY;break}Us&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await dY();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=cY;break}Us&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(Us&&!rf){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Us&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),Us?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await UT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),Us&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(NC,"restartService");async function Wpe(){await dY(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await IC.asyncSetTimeout(2e3),Tp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await AC(),Us&&(await MT.closeConnection(),process.exit(0))}o(Wpe,"restartPM2Mode");async function dY(){if(!lY.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await oY.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await vT.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await AC(),Us&&await MT.closeConnection();else{await vT.generateNatsConfig(!0),rf?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await oY.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await IC.asyncSetTimeout(3e3),await AC(),await MT.updateLocalStreams(),Us&&await MT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=UT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=UT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(dY,"restartClustering");async function AC(){await vT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await vT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(AC,"removeNatsConfig")});var AY=M((C0e,yY)=>{"use strict";var O0e=require("lodash"),Hn=(k(),v(W)),{handleHDBError:mY,hdbErrors:zpe}=ge(),{HDB_ERROR_MSGS:jpe,HTTP_STATUS_CODES:Qpe}=zpe,wC=Q();yY.exports={getRolePermissions:Xpe};var Yl=Object.create(null),Jpe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),_Y=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),gY=o((e=!1,t=!1,r=!1,n=!1)=>({[Hn.PERMS_CRUD_ENUM.READ]:e,[Hn.PERMS_CRUD_ENUM.INSERT]:t,[Hn.PERMS_CRUD_ENUM.UPDATE]:r,[Hn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),OC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...gY(t,r,n,s)}),"tablePermsTemplate"),hY=o((e,t=gY())=>({attribute_name:e,describe:RY(t),[Rp]:t[Rp],[CC]:t[CC],[PC]:t[PC]}),"attrPermsTemplate"),pY=o((e,t=!1)=>({attribute_name:e,describe:t,[Rp]:t}),"timestampAttrPermsTemplate"),{READ:Rp,INSERT:CC,UPDATE:PC}=Hn.PERMS_CRUD_ENUM,SY=Object.values(Hn.PERMS_CRUD_ENUM),TY=[Rp,CC,PC];function Xpe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Hn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Yl[t]&&Yl[t].key===n)return Yl[t].perms;let s=Zpe(e,r);return Yl[t]?Yl[t].key=n:Yl[t]=Jpe(n),Yl[t].perms=s,s}catch(r){if(!e[Hn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Hn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Hn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw wC.error(n),wC.debug(r),mY(new Error,jpe.OUTDATED_PERMS_TRANSLATION_ERROR,Qpe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
27
27
  ${r.stack}`;throw wC.error(n),mY(new Error)}}}o(Xpe,"getRolePermissions");function Zpe(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[Hn.SYSTEM_SCHEMA_NAME]=n[Hn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=eEe(t[i]);return}r[i]=_Y(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(a=>{if(n[i].tables[a]){let c=n[i].tables[a],l=t[i][a],u=tEe(c,l);r[i].describe||SY.forEach(d=>{u[d]&&(r[i].describe=!0)}),r[i].tables[a]=u}else r[i].tables[a]=OC()})):Object.keys(t[i]).forEach(a=>{r[i].tables[a]=OC()})}),r}o(Zpe,"translateRolePermissions");function eEe(e){let t=_Y(!0);return Object.keys(e).forEach(r=>{t.tables[r]=OC(!0,!0,!0,!0,!0)}),t}o(eEe,"createStructureUserPermissions");function tEe(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,d)=>{let{attribute_name:f}=d,m=d;return Hn.TIME_STAMP_NAMES.includes(f)&&(m=pY(f,d[Rp])),u[f]=m,u},{}),a=t.primaryKey||t.hash_attribute,c=!!i[a],l=hY(a);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let d=i[u];d.describe=RY(d),s.attribute_permissions.push(d),c||rEe(d,l)}else if(u!==a){let d;Hn.TIME_STAMP_NAMES.includes(u)?d=pY(u):d=hY(u),s.attribute_permissions.push(d)}}),c||s.attribute_permissions.push(l),s.describe=EY(s),s}else return e.describe=EY(e),e}o(tEe,"getTableAttrPerms");function EY(e){return SY.filter(t=>e[t]).length>0}o(EY,"getSchemaTableDescribePerm");function RY(e){return TY.filter(t=>e[t]).length>0}o(RY,"getAttributeDescribePerm");function rEe(e,t){TY.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}o(rEe,"checkForHashPerms")});var yp={};Oe(yp,{authentication:()=>LY,bypassAuth:()=>dEe,login:()=>MC,logout:()=>vC,start:()=>fEe});function dEe(){PY=!0}async function LY(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,a=[];try{if(i){let h=e.isOperationsServer?aEe?oEe:[]:iEe?sEe:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let p=_n.get(U.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",_=new Os([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",p],["Access-Control-Allow-Origin",i]]);return xT&&_.set("Access-Control-Allow-Credentials","true"),{status:200,headers:_}}a.push("Access-Control-Allow-Origin",i),xT&&a.push("Access-Control-Allow-Credentials","true")}}let l,u;if(xT){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",p=s?.split(/;\s+/)||[];for(let _ of p)if(_.startsWith(h)){let g=_.indexOf(";");l=_.slice(h.length,g===-1?_.length:g),u=await bY.get(l);break}e.session=u||(u={})}let d=o((h,p,_)=>{let g=new BT.AuthAuditLog(h,p,ya.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=_,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),p===Zs.SUCCESS?LC.notify(g):LC.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&LC.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Le.getUser(h,null,e),d(h,Zs.SUCCESS,"mTLS")):nEe("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let f;if(!e.user)if(n){if(f=Wl.get(n),!f){let h=n.indexOf(" "),p=n.slice(0,h),_=n.slice(h+1),g,y;try{switch(p){case"Basic":let T=atob(_),R=T.indexOf(":");g=T.slice(0,R),y=T.slice(R+1),f=g||y?await Le.getUser(g,y,e):null;break;case"Bearer":try{f=await dw(_)}catch(N){if(N.message==="invalid token")try{return await ES(_),c({status:-1})}catch{throw N}}break}}catch(T){return lEe&&(Wl.get(_)||(Wl.set(_,_),d(g,Zs.FAILURE,p))),c({status:401,body:ba({error:T.message},e)})}Wl.set(n,f),cEe&&d(f.username,Zs.SUCCESS,p)}e.user=f}else u?.user?e.user=await Le.getUser(u.user,null,e):(PY&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,NY.getSuperUser)());xT&&(e.session.update=function(h){let p=_n.get(U.AUTHENTICATION_COOKIE_EXPIRES),_=e.protocol==="https"||r.host?.startsWith("localhost:")||r.host?.startsWith("127.0.0.1:")||r.host?.startsWith("::1");if(!l){l=(0,wY.v4)();let g=_n.get(U.AUTHENTICATION_COOKIE_DOMAINS),y=p?new Date(Date.now()+(0,DC.convertToMS)(p)).toUTCString():uEe,T=g?.find(O=>r.host?.endsWith(O)),N=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${y}; HttpOnly`;T&&(N+=`; Domain=${T}`),_&&(N+="; SameSite=None; Secure"),a?a.push("Set-Cookie",N):m?.headers?.set&&m.headers.set("Set-Cookie",N)}return _&&(a?(i&&a.push("Access-Control-Expose-Headers","X-Hdb-Session"),a.push("X-Hdb-Session","Secure")):m?.headers?.set&&(i&&m.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),m.headers.set("X-Hdb-Session","Secure"))),h.id=l,bY.put(h,{expiresAt:p?Date.now()+(0,DC.convertToMS)(p):void 0})},e.login=async function(h,p){let _=e.user=await Le.authenticateUser(h,p,e);e.session.update({user:_&&(_.getId?.()??_.username)})});let m=await t(e);return m&&(m.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&bs.loginPath?(m.status=302,m.headers.set("Location",bs.loginPath(e))):m.headers.set("WWW-Authenticate","Basic")),c(m))}catch(l){throw c(l)}function c(l){let u=a.length;if(u>0){let d=l.headers;d||(l.headers=d=new Os);for(let f=0;f<u;){let m=a[f++];d.set(m,a[f++])}}return a=null,l}o(c,"applyResponseHeaders")}function fEe({server:e,port:t,securePort:r}){e.http(LY,t||r?{port:t,securePort:r}:{port:"all"}),IY||(IY=!0,setInterval(()=>{Wl=new Map},_n.get(U.AUTHENTICATION_CACHETTL)).unref(),OY.user.addListener(()=>{Wl=new Map}))}async function MC(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function vC(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var NY,wY,_n,BT,OY,DC,CY,nEe,LC,sEe,iEe,oEe,aEe,bY,xT,PY,cEe,lEe,uEe,Wl,IY,FT=ue(()=>{NY=w(ts());Mr();Bu();Sd();De();wY=require("uuid"),_n=w(oe());k();BT=w(Q()),OY=w(sh());uh();DC=w(ie());Ro();CY=(0,BT.forComponent)("authentication"),{debug:nEe}=CY,LC=CY.withTag("auth-event");_n.initSync();sEe=_n.get(U.HTTP_CORSACCESSLIST),iEe=_n.get(U.HTTP_CORS),oEe=_n.get(U.OPERATIONSAPI_NETWORK_CORSACCESSLIST),aEe=_n.get(U.OPERATIONSAPI_NETWORK_CORS),bY=ze({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),xT=_n.get(U.AUTHENTICATION_ENABLESESSIONS)??!0,PY=process.env.AUTHENTICATION_AUTHORIZELOCAL??_n.get(U.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,cEe=_n.get(U.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,lEe=_n.get(U.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,uEe="Tue, 01 Oct 8307 19:33:20 GMT",Wl=new Map;Le.onInvalidatedUser(()=>{Wl=new Map});o(dEe,"bypassAuth");o(LY,"authentication");o(fEe,"start");o(MC,"login");o(vC,"logout")});var FY=M((H0e,BY)=>{"use strict";var we=require("joi"),DY=require("fs-extra"),MY=require("path"),ds=ut(),vY=oe(),UY=(k(),v(W)),xY=Q(),{hdbErrors:mEe}=ge(),{HDB_ERROR_MSGS:gn}=mEe,Xo=/^[a-zA-Z0-9-_]+$/,hEe=/^[a-zA-Z0-9-_]+$/;BY.exports={getDropCustomFunctionValidator:EEe,setCustomFunctionValidator:_Ee,addComponentValidator:REe,dropCustomFunctionProjectValidator:yEe,packageComponentValidator:AEe,deployComponentValidator:bEe,setComponentFileValidator:gEe,getComponentFileValidator:TEe,dropComponentFileValidator:SEe,addSSHKeyValidator:IEe,updateSSHKeyValidator:NEe,deleteSSHKeyValidator:wEe,setSSHKnownHostsValidator:OEe};function HT(e,t,r){try{let n=vY.get(UY.CONFIG_PARAMS.COMPONENTSROOT),s=MY.join(n,t);return DY.existsSync(s)?e?t:r.message(gn.PROJECT_EXISTS):e?r.message(gn.NO_PROJECT):t}catch(n){return xY.error(n),r.message(gn.VALIDATION_ERR)}}o(HT,"checkProjectExists");function Ap(e,t){return e.includes("..")?t.message("Invalid file path"):e}o(Ap,"checkFilePath");function pEe(e,t,r,n){try{let s=vY.get(UY.CONFIG_PARAMS.COMPONENTSROOT),i=MY.join(s,e,t,r+".js");return DY.existsSync(i)?r:n.message(gn.NO_FILE)}catch(s){return xY.error(s),n.message(gn.VALIDATION_ERR)}}o(pEe,"checkFileExists");function EEe(e){let t=we.object({project:we.string().pattern(Xo).custom(HT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().pattern(Xo).custom(pEe.bind(null,e.project,e.type)).custom(Ap).required().messages({"string.pattern.base":gn.BAD_FILE_NAME})});return ds.validateBySchema(e,t)}o(EEe,"getDropCustomFunctionValidator");function _Ee(e){let t=we.object({project:we.string().pattern(Xo).custom(HT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().custom(Ap).required(),function_content:we.string().required()});return ds.validateBySchema(e,t)}o(_Ee,"setCustomFunctionValidator");function gEe(e){let t=we.object({project:we.string().pattern(Xo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),file:we.string().custom(Ap).required(),payload:we.string().allow("").optional(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ds.validateBySchema(e,t)}o(gEe,"setComponentFileValidator");function SEe(e){let t=we.object({project:we.string().pattern(Xo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),file:we.string().custom(Ap).optional()});return ds.validateBySchema(e,t)}o(SEe,"dropComponentFileValidator");function TEe(e){let t=we.object({project:we.string().required(),file:we.string().custom(Ap).required(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ds.validateBySchema(e,t)}o(TEe,"getComponentFileValidator");function REe(e){let t=we.object({project:we.string().pattern(Xo).custom(HT.bind(null,!1)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME})});return ds.validateBySchema(e,t)}o(REe,"addComponentValidator");function yEe(e){let t=we.object({project:we.string().pattern(Xo).custom(HT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME})});return ds.validateBySchema(e,t)}o(yEe,"dropCustomFunctionProjectValidator");function AEe(e){let t=we.object({project:we.string().pattern(Xo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),skip_node_modules:we.boolean(),skip_symlinks:we.boolean()});return ds.validateBySchema(e,t)}o(AEe,"packageComponentValidator");function bEe(e){let t=we.object({project:we.string().pattern(Xo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),package:we.string().optional(),restart:we.alternatives().try(we.boolean(),we.string().valid("rolling")).optional()});return ds.validateBySchema(e,t)}o(bEe,"deployComponentValidator");function IEe(e){let t=we.object({name:we.string().pattern(hEe).required().messages({"string.pattern.base":gn.BAD_SSH_KEY_NAME}),key:we.string().required(),host:we.string().required(),hostname:we.string().required(),known_hosts:we.string().optional()});return ds.validateBySchema(e,t)}o(IEe,"addSSHKeyValidator");function NEe(e){let t=we.object({name:we.string().required(),key:we.string().required()});return ds.validateBySchema(e,t)}o(NEe,"updateSSHKeyValidator");function wEe(e){let t=we.object({name:we.string().required()});return ds.validateBySchema(e,t)}o(wEe,"deleteSSHKeyValidator");function OEe(e){let t=we.object({known_hosts:we.string().required()});return ds.validateBySchema(e,t)}o(OEe,"setSSHKnownHostsValidator")});var Np=M((G0e,$Y)=>{"use strict";var kT=require("joi"),uc=require("path"),sf=require("fs-extra"),{exec:CEe,spawn:PEe}=require("child_process"),LEe=require("util"),DEe=LEe.promisify(CEe),of=(k(),v(W)),{PACKAGE_ROOT:MEe}=Rt(),{handleHDBError:bp,hdbErrors:vEe}=ge(),{HTTP_STATUS_CODES:Ip}=vEe,zl=oe(),UEe=ut(),dc=Q(),{once:xEe}=require("events");zl.initSync();var UC=zl.get(of.CONFIG_PARAMS.COMPONENTSROOT),HY="npm install --force --omit=dev --json",BEe=`${HY} --dry-run`,FEe=zl.get(of.CONFIG_PARAMS.ROOTPATH),GT=uc.join(FEe,"ssh");$Y.exports={installModules:qEe,auditModules:$Ee,installAllRootModules:HEe,uninstallRootModule:kEe,linkHarperdb:GEe,runCommand:af};async function HEe(e=!1,t=zl.get(of.CONFIG_PARAMS.ROOTPATH)){await qT();let r=!1,n=process.env;sf.pathExistsSync(GT)&&sf.readdirSync(GT).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+uc.join(GT,"config")+" -o UserKnownHostsFile="+uc.join(GT,"known_hosts"),...process.env},r=!0)});try{let s=zl.get(of.CONFIG_PARAMS.ROOTPATH),i=uc.join(s,"node_modules","harperdb");sf.lstatSync(i).isSymbolicLink()&&sf.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&dc.error("Error removing symlink:",s)}await af(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}o(HEe,"installAllRootModules");async function kEe(e){await af(`npm uninstall ${e}`,zl.get(of.CONFIG_PARAMS.ROOTPATH))}o(kEe,"uninstallRootModule");async function GEe(){await qT(),await af(`npm link ${MEe}`,zl.get(of.CONFIG_PARAMS.ROOTPATH))}o(GEe,"linkHarperdb");async function af(e,t=void 0,r=process.env){dc.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=PEe(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();dc.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();dc.error({tagName:"npm_run_command:stderr"},l),i+=l});let[a]=await xEe(n,"close");if(a!==0)throw new Error(`Command \`${e}\` exited with code ${a}.${i===""?"":` Error: ${i}`}`);return s||void 0}o(af,"runCommand");async function qEe(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";dc.warn(t,e.projects);let r=qY(e);if(r)throw bp(r,r.message,Ip.BAD_REQUEST);let{projects:n,dryRun:s}=e,i=s===!0?BEe:HY;await qT(),await GY(n);let a={};for(let c=0,l=n.length;c<l;c++){let u=n[c];a[u]={npm_output:null,npm_error:null};let d=uc.join(UC,u),f,m=null;try{let{stdout:h,stderr:p}=await DEe(i,{cwd:d});f=h?h.replace(`
28
28
  `,""):null,m=p?p.replace(`
29
29
  `,""):null}catch(h){h.stderr?a[u].npm_error=kY(h.stderr):a[u].npm_error=h.message;continue}try{a[u].npm_output=JSON.parse(f)}catch{a[u].npm_output=f}try{a[u].npm_error=JSON.parse(m)}catch{a[u].npm_error=m}}return dc.info(`finished installModules with response ${a}`),a.warning=t,a}o(qEe,"installModules");function kY(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}