harperdb 4.6.15 → 4.6.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,9 +16,9 @@
16
16
  `,""));return r.replace(`
17
17
  `,"")}o(BG,"runCommand");async function _le(){try{await Wce.access(Aw)}catch{return!1}let e=await BG(`${Aw} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return zce.eq(t,ple)}o(_le,"checkNATSServerInstalled");async function Ow(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let a=await UG.getClusterUser();if(Ll(a))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=a.username,r=a.decrypt_hash}pi.trace("create nats connection called");let i=await ole({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),pi.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(a=>{a&&pi.error("Error with Nats client connection, connection closed",a),i===mn&&FG()}),i}o(Ow,"createConnection");function FG(){mn=void 0,Ol=void 0,Cl=void 0,Pl=void 0}o(FG,"clearClientCache");async function gle(){mn&&(await mn.drain(),mn=void 0,Ol=void 0,Cl=void 0,Pl=void 0)}o(gle,"closeConnection");var mn,Pl;async function Bh(){return Pl||(Pl=Ow(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),mn=await Pl),mn||Pl}o(Bh,"getConnection");async function Fh(){if(Ol)return Ol;Ll(mn)&&await Bh();let{domain:e}=Nd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Ll(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Ol=await mn.jetstreamManager({domain:e,timeout:6e4}),Ol}o(Fh,"getJetStreamManager");async function HG(){if(Cl)return Cl;Ll(mn)&&await Bh();let{domain:e}=Nd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Ll(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Cl=mn.jetstream({domain:e,timeout:6e4}),Cl}o(HG,"getJetStream");async function Zi(){let e=mn||await Bh(),t=Ol||await Fh(),r=Cl||await HG();return{connection:e,jsm:t,js:r}}o(Zi,"getNATSReferences");async function Sle(e){let t=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await UG.getClusterUser(),s=await Ow(t,r,n),i=ww(),a=s.subscribe(i),c=[],l,u=(async()=>{for await(let d of a){let f=xG.decode(d.data);f.response_time=Date.now()-l,c.push(f)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await gS.asyncSetTimeout(e),await a.drain(),await s.close(),await u,c}o(Sle,"getServerList");async function Cw(e,t){let{jsm:r}=await Zi(),n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:ale.File,retention:cle.Limits,subjects:t,discard:lle.Old,maxMsgs:s,maxBytes:i,maxAge:n})}o(Cw,"createLocalStream");async function kG(){let{jsm:e}=await Zi(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}o(kG,"listStreams");async function Tle(e){let{jsm:t}=await Zi();await t.streams.delete(e)}o(Tle,"deleteLocalStream");async function Rle(e){let{connection:t}=await Zi(),r=[],n=ww(),s=t.subscribe(n),i=(async()=>{for await(let a of s)r.push(xG.decode(a.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}o(Rle,"listRemoteStreams");async function yle(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Zi(),i=LG(),a={durable_name:i,ack_policy:Iw.Explicit};t&&(a.deliver_policy=Nw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let d of l){let f=bw(d.data),m={nats_timestamp:d.info.timestampNanos,nats_sequence:d.info.streamSequence,entry:f};if(d.headers&&(m.origin=d.headers.get(Qr.MSG_HEADERS.ORIGIN)),u.push(m),d.ack(),d.info.pending===0)break}return await c.delete(),u}o(yle,"viewStream");async function*Ale(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Zi(),i=LG(),a={durable_name:i,ack_policy:Iw.Explicit};t&&(a.deliver_policy=Nw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let d=bw(u.data);d[0]||(d=[d]);for(let f of d){let m={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:f};u.headers&&(m.origin=u.headers.get(Qr.MSG_HEADERS.ORIGIN)),yield m}if(u.ack(),u.info.pending===0)break}await c.delete()}o(Ale,"viewStreamIterator");async function ble(e,t,r,n){pi.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=qG(n,r);let{js:s}=await Zi(),i=await TS(),a=`${e}.${i}`,c=await mle(()=>n instanceof Uint8Array?n:vG.encode(n));try{pi.trace(`publishToStream publishing to subject: ${a}`),fle(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(a,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return $G(async()=>{try{await s.publish(a,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){pi.trace(`publishToStream creating stream: ${t}`);let d=a.split(".");d[2]="*",await Cw(t,[a]),await s.publish(a,c,{headers:r})}else throw l}});throw l}}o(ble,"publishToStream");function qG(e,t){t===void 0&&(t=dle());let r=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Qr.MSG_HEADERS.ORIGIN)&&r&&t.append(Qr.MSG_HEADERS.ORIGIN,r),t}o(qG,"addNatsMsgHeader");function Nd(e){e=e.toLowerCase();let t=xh.join(kr.get(Qe.CONFIG_PARAMS.ROOTPATH),hle);if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return Ll(yw)&&(yw={port:vh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:vh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.HUB,config_file:Qr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:xh.join(t,Qr.PID_FILES.HUB),hdbNatsPath:t}),yw;if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return Ll(Rw)&&(Rw={port:vh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:vh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,config_file:Qr.NATS_CONFIG_FILES.LEAF_SERVER,domain:vh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,pid_file_path:xh.join(t,Qr.PID_FILES.LEAF),hdbNatsPath:t}),Rw;pi.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}o(Nd,"getServerConfig");async function GG(e,t,r,n){try{await e.consumers.add(t,{ack_policy:Iw.Explicit,durable_name:r,deliver_policy:Nw.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}o(GG,"createConsumer");async function Ile(e,t,r){await e.consumers.delete(t,r)}o(Ile,"removeConsumer");function Nle(e){return e.split(".")[1]}o(Nle,"extractServerName");async function wle(e,t,r=6e4,n=ww()){if(!gS.isObject(t))throw new Error("data param must be an object");let s=vG.encode(t),{connection:i}=await Zi(),a={timeout:r};n&&(a.reply=n,a.noMux=!0);let c=await i.request(e,s,a);return bw(c.data)}o(wle,"request");function Pw(e){return new Promise(async(t,r)=>{let n=Xce(Aw,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",a=>{r(a)}),n.stdout.on("data",a=>{i+=a.toString()}),n.stderr.on("data",a=>{s+=a.toString()}),n.stderr.on("close",a=>{s&&r(s),t(i)})})}o(Pw,"reloadNATS");async function Ole(){let{pid_file_path:e}=Nd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await Pw(e)}o(Ole,"reloadNATSHub");async function Cle(){let{pid_file_path:e}=Nd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await Pw(e)}o(Cle,"reloadNATSLeaf");function Ple(e,t,r){let n;switch(e.code){case PG.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case PG.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}o(Ple,"requestErrorHandler");async function Lle(e,t){let r=t+Qr.SERVER_SUFFIX.LEAF,{connection:n}=await Zi(),{jsm:s}=await Hle(r),{schema:i,table:a}=e,c=SS.createNatsTableStreamName(i,a),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await $G(async()=>{if(e.subscribe===!0)await GG(s,c,n.info.server_name,l);else try{await Ile(s,c,n.info.server_name)}catch(u){pi.trace(u)}})}o(Lle,"updateRemoteConsumer");async function Dle(e,t,r,n){let s=SS.createNatsTableStreamName(e,t),i=r+Qr.SERVER_SUFFIX.LEAF,a={type:Qe.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!MG&&sle()<kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=Tw();await c(a)}await rle(a),n==="stop"&&await gS.asyncSetTimeout(1e3)}o(Dle,"updateConsumerIterator");function $G(e){return tle.writeTransaction(Qe.SYSTEM_SCHEMA_NAME,Qe.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}o($G,"exclusiveLock");async function VG(e,t){let r=SS.createNatsTableStreamName(e,t),n=await TS(),s=xle(e,t,n);await Cw(r,[s])}o(VG,"createLocalTableStream");async function Mle(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await VG(n,s)}}o(Mle,"createTableStreams");async function KG(e,t,r=void 0){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=SS.createNatsTableStreamName(e,t),{domain:s}=Nd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await Bh()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")pi.warn(n);else throw n}}o(KG,"purgeTableStream");async function vle(e,t){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await KG(e,t[r])}o(vle,"purgeSchemaTableStreams");async function Ule(e){return(await Fh()).streams.info(e)}o(Ule,"getStreamInfo");function xle(e,t,r){return`${Qr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}o(xle,"createSubjectName");async function TS(){if(Uh)return Uh;if(Uh=(await Fh())?.nc?.info?.server_name,Uh===void 0)throw new Error("Unable to get jetstream manager server name");return Uh}o(TS,"getJsmServerName");async function Ble(){let e=await Fh(),t=await TS(),r=await kG();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let a=Fle(n),c=i.split(".");if(c[c.length-1]===t&&!a||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let d=u.join(".");pi.trace(`Updating stream subject name from: ${i} to: ${d}`),s.subjects[0]=d,await e.streams.update(s.name,s)}}o(Ble,"updateLocalStreams");function Fle(e){let{config:t}=e,r=!1,n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}o(Fle,"updateStreamLimits");async function Hle(e){let t,r;try{t=await mn.jetstream({domain:e}),r=await mn.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw pi.error("Unable to connect to:",e),n}return{js:t,jsm:r}}o(Hle,"connectToRemoteJS")});function Lw(e){let t=e.get(RS),r=t?(0,wd.unpack)(t):null;r||(r={remoteNameToId:{}});let n=et(),s=!1;r.nodeName=et();let i=r.remoteNameToId;if(i[n]!==0){let a=0,c;for(let l in i){let u=i[l];u===0?c=l:u>a&&(a=u)}if(c){a++,i[c]=a;let l=[Symbol.for("seq"),a];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:kh(e)??1,nodes:[]})})}i[n]=0,e.putSync(RS,(0,wd.pack)(r))}return r}function Hh(e){return Lw(e).remoteNameToId}function zG(e,t){let r=Lw(t),n=r.remoteNameToId,s=new Map,i=!1;for(let a in e){let c=e[a],l=n[a];if(l==null){let u=0;for(let d in n){let f=n[d];f>u&&(u=f)}l=u+1,n[a]=l,i=!0}s.set(c,l)}return i&&t.putSync(RS,(0,wd.pack)(r)),s}function yS(e,t){let r=Lw(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let a in n){let c=n[a];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(RS,(0,wd.pack)(r))}return WG.trace?.("The remote node name map",e,n,s),s}var WG,wd,RS,Dw=ue(()=>{WG=w(ri());is();wd=require("msgpackr"),RS=Symbol.for("remote-ids");o(Lw,"getIdMappingRecord");o(Hh,"exportIdMapping");o(zG,"remoteToLocalNodeId");o(yS,"getIdOfRemoteNode")});var Mw={};Oe(Mw,{commitsAwaitingReplication:()=>Od,getHDBNodeTable:()=>Yt,getReplicationSharedStatus:()=>Cd,iterateRoutes:()=>Gh,shouldReplicateToNode:()=>qh,subscribeToNodeUpdates:()=>Pd});function Yt(){return jG||(jG=je({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function Cd(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function Pd(e){Yt().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;ZG.debug?.("adding node",n,"on node",et()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==et()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of Yt().search({}))if(i.shard!=null){let a=s.get(i.shard);a||s.set(i.shard,a=[]),a.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function qh(e,t){let r=za.default.get(U.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===za.default.get(U.REPLICATION_SHARD))))&&Yt().primaryStore.get(et())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function kle(){Pd(e=>{ja({},(t,r)=>{let n=e.name,s=QG.get(n);if(s||QG.set(n,s=new Map),s.has(r))return;let i;for(let a in t)if(i=t[a].auditStore,i)break;if(i){let a=Cd(i,r,n,()=>{let c=a[0],l=a.lastTime;for(let{txnTime:u,onConfirm:d}of Od.get(r)||[])u>l&&u<=c&&d();a.lastTime=c});a.lastTime=0,s.set(r,a)}})})}function*Gh(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=za.default.get(U.REPLICATION_SECUREPORT)??(!za.default.get(U.REPLICATION_PORT)&&za.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||za.default.get(U.REPLICATION_PORT)||za.default.get(U.OPERATIONSAPI_NETWORK_PORT);let a=i?.lastIndexOf?.(":");a>0&&(i=+i.slice(a+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){JG.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,startTime:t.startTime,revoked_certificates:t.revokedCertificates}}}var JG,XG,za,ZG,jG,QG,Od,Dl=ue(()=>{De();is();cm();JG=require("worker_threads"),XG=w(ge()),za=w(oe());H();ZG=w(ri());server.nodes=[];o(Yt,"getHDBNodeTable");o(Cd,"getReplicationSharedStatus");o(Pd,"subscribeToNodeUpdates");o(qh,"shouldReplicateToNode");QG=new Map;Dv((e,t,r)=>{if(r>server.nodes.length)throw new XG.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);Od||(Od=new Map,kle());let n=Od.get(e);return n||(n=[],Od.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:o(()=>{++i===r&&s()},"onConfirm")})})});o(kle,"startSubscriptionToReplications");o(Gh,"iterateRoutes")});var r$={};Oe(r$,{connectedToNode:()=>Ml,disconnectedFromNode:()=>Dd,ensureNode:()=>Fo,requestClusterStatus:()=>t$,startOnMainThread:()=>xw});async function xw(e){let t=0,r=it();for(let i of Object.getOwnPropertyNames(r)){let a=r[i];for(let c in a){let l=a[c];if(l.auditStore){AS.set(i,kh(l.auditStore));break}}}to.whenThreadsStarted.then(async()=>{let i=[];for await(let l of r.system.hdb_nodes?.search([])||[])i.push(l);let a=et();function c(){let l=Yt().primaryStore.get(a);if(l!==null){let u=e.url??Qa();if(l===void 0||l.url!==u||l.shard!==e.shard)return Fo(a,{name:a,url:u,shard:e.shard,replicates:!0})}}o(c,"ensureThisNode"),Yt().primaryStore.get(a)&&c();for(let l of Gh(e))try{let u=!l.subscriptions;if(u&&await c(),u&&l.replicates==null&&(l.replicates=!0),i.find(d=>d.url===l.url))continue;s(l)}catch(u){console.error(u)}Pd(s)});let n;function s(i,a=i?.name){let c=et()&&a===et()||Qa()&&i?.url===Qa();if(c){let f=!!i?.replicates;if(n!==void 0&&n!==f)for(let m of Yt().search([]))m.replicates&&m.name!==a&&s(m,m.name);n=f}if(ct.trace("Setting up node replication for",i),!i){for(let[f,m]of eo){let h;for(let[p,{worker:_,nodes:g}]of m){let R=g[0];if(R&&R.name==a){h=!0;for(let[T,{worker:y}]of m)m.delete(T),ct.warn("Node was deleted, unsubscribing from node",a,T,f),y?.postMessage({type:"unsubscribe-from-node",node:a,database:T,url:f});break}}if(h){eo.get(f).iterator.remove(),eo.delete(f);return}}return}if(c)return;if(!i.url){ct.info(`Node ${i.name} is missing url`);return}let l=eo.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(ct.info(`Added node ${i.name} at ${i.url} for process ${et()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[f,m]of Ld)if(i.url===m.url){Ld.delete(f);break}Ld.set(i.name,i)}let u=it();if(l||(l=new Map,eo.set(i.url,l)),l.iterator=ja(e,(f,m,h)=>{h?d(m,!0):d(m,!1)}),i.subscriptions)for(let f of i.subscriptions){let m=f.database||f.schema;u[m]||(ct.warn(`Database ${m} not found for node ${i.name}, making a subscription anyway`),d(m,!1))}function d(f,m){ct.trace("Setting up replication for database",f,"on node",i.name);let h=l.get(f),p,_=[{replicateByDefault:m,...i}];AS.has(f)&&(_.push({replicateByDefault:m,name:et(),startTime:AS.get(f),endTime:Date.now(),replicates:!0}),AS.delete(f));let g=qh(i,f),R=to.workers.filter(T=>T.name==="http");if(h?(p=h.worker,h.nodes=_):g&&(t=t%R.length,p=R[t++],l.set(f,{worker:p,nodes:_,url:i.url}),p?.on("exit",()=>{l.get(f)?.worker===p&&(l.delete(f),d(f,m))})),g)setTimeout(()=>{let T={type:"subscribe-to-node",database:f,nodes:_};p?p.postMessage(T):$h(T)},qle);else{ct.info("Node no longer should be used, unsubscribing from node",{replicates:i.replicates,databaseName:f,node:i,subscriptions:i.subscriptions,hasDatabase:!!u[f],thisReplicates:Yt().primaryStore.get(et())?.replicates}),Yt().primaryStore.get(et())?.replicates||(n=!1,ct.info("Disabling replication, this node name",et(),Yt().primaryStore.get(et()),f));let T={type:"unsubscribe-from-node",database:f,url:i.url,name:i.name};p?p.postMessage(T):IS(T)}}o(d,"onDatabase")}o(s,"onNodeUpdate"),Dd=o(function(i){try{ct.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let a=Array.from(Ld.keys()),c=a.sort(),l=c.indexOf(i.name||Ei(i.url));if(l===-1){ct.warn("Disconnected node not found in node map",i.name,a);return}let u=eo.get(i.url),d=u?.get(i.database);if(!d){ct.warn("Disconnected node not found in replication map",i.database,u);return}if(d.connected=!1,i.finished||!Uw.default.get(U.REPLICATION_FAILOVER))return;let f=d.nodes[0];if(!(f.replicates===!0||f.replicates?.sends||f.subscriptions?.length))return;let m=f.shard,h=(l+1)%c.length;for(;l!==h;){let p=c[h],_=Ld.get(p);u=eo.get(_.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==m){h=(h+1)%c.length;continue}let{worker:R,nodes:T}=g,y=!1;for(let N of d.nodes){if(T.some(O=>O.name===N.name)){ct.info(`Disconnected node is already failing over to ${p} for ${i.database}`);continue}N.endTime<Date.now()||(T.push(N),y=!0)}if(d.nodes=[d.nodes[0]],!y){ct.info(`Disconnected node ${i.name} has no nodes to fail over to ${p}`);return}ct.info(`Failing over ${i.database} from ${i.name} to ${p}`),R?R.postMessage({type:"subscribe-to-node",database:i.database,nodes:T}):$h({database:i.database,nodes:T});return}ct.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(a){ct.error("Error failing over node",a)}},"disconnectedFromNode"),Ml=o(function(i){let a=eo.get(i.url),c=a?.get(i.database);if(!c){ct.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,a);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){ct.warn("Newly connected node has no node subscriptions",i.database,c);return}if(!l.name){ct.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let d of eo.values()){let f=d.get(i.database);if(!f||f==c)continue;let{worker:m,nodes:h,connected:p}=f;if(h)if(p===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let _=h.filter(g=>g&&g.name!==l.name);_.length<h.length&&(f.nodes=_,m.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,to.onMessageByType)("disconnected-from-node",Dd),(0,to.onMessageByType)("connected-to-node",Ml),(0,to.onMessageByType)("request-cluster-status",t$)}function t$(e,t){let r=[];for(let[n,s]of Ld)try{let i=eo.get(s.url);ct.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let a=[];if(i){for(let[l,{worker:u,connected:d,nodes:f,latency:m}]of i)a.push({database:l,connected:d,latency:m,threadId:u?.threadId,nodes:f.filter(h=>!(h.endTime<Date.now())).map(h=>h.name)});let c=(0,vw.cloneDeep)(s);c.database_sockets=a,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){ct.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function Fo(e,t){let r=Yt();e=e??Ei(t.url),t.name=e;try{if(t.ca){let s=new e$.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subjectAltName:s.subjectAltName,serialNumber:s.serialNumber,validFrom:s.validFrom,validTo:s.validTo}}}catch(s){ct.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(ct.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!Uw.default.get(U.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],a=(0,vw.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of a)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...a,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}ct.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var to,bS,ct,vw,Uw,e$,qle,eo,Dd,Ml,Ld,AS,Vh=ue(()=>{De();to=w(ze());is();bS=require("worker_threads");Dl();ct=w(j()),vw=require("lodash"),Uw=w(oe());H();e$=require("crypto"),qle=200,eo=new Map,Ld=new Map,AS=new Map;o(xw,"startOnMainThread");o(t$,"requestClusterStatus");bS.parentPort&&(Dd=o(e=>{bS.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),Ml=o(e=>{bS.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,to.onMessageByType)("subscribe-to-node",e=>{$h(e)}),(0,to.onMessageByType)("unsubscribe-from-node",e=>{IS(e)}));o(Fo,"ensureNode")});var as=M(Wt=>{"use strict";var hr=require("path"),{watch:Gle}=require("chokidar"),xn=require("fs-extra"),Md=require("node-forge"),c$=require("net"),{generateKeyPair:Bw,X509Certificate:Ho,createPrivateKey:l$}=require("crypto"),$le=require("util");Bw=$le.promisify(Bw);var wt=Md.pki,_i=require("joi"),{v4:u$}=require("uuid"),{validateBySchema:qw}=st(),{forComponent:Vle}=j(),os=oe(),Ds=(H(),v(Y)),{CONFIG_PARAMS:Ul}=Ds,gi=JI(),{ClientError:Xa}=ge(),wS=require("node:tls"),{relative:d$,join:Kle}=require("node:path"),{CERT_PREFERENCE_APP:gUe,CERTIFICATE_VALUES:n$}=gi,Yle=Fc(),Fw=It(),{table:Wle,getDatabases:zle,databases:NS}=(De(),v(ht)),{getJWTRSAKeys:s$}=(yd(),v(Dh)),pt=Vle("tls");Wt.generateKeys=Vw;Wt.updateConfigCert=S$;Wt.createCsr=tue;Wt.signCertificate=rue;Wt.setCertTable=vd;Wt.loadCertificates=E$;Wt.reviewSelfSignedCert=Yw;Wt.createTLSSelector=R$;Wt.listCertificates=A$;Wt.addCertificate=cue;Wt.removeCertificate=uue;Wt.createNatsCerts=iue;Wt.generateCertsKeys=sue;Wt.getReplicationCert=Yh;Wt.getReplicationCertAuth=eue;Wt.renewSelfSigned=oue;Wt.hostnamesFromCert=zw;Wt.getKey=due;Wt.getHostnamesFromCertificate=fue;Wt.getPrimaryHostName=Ww;var{urlToNodeName:f$,getThisNodeUrl:jle,getThisNodeName:CS,clearThisNodeName:Qle}=(is(),v(ko)),{readFileSync:Jle,statSync:m$}=require("node:fs"),SUe=oe(),{getTicketKeys:Xle,onMessageFromWorkers:Zle}=ze(),Ja=j(),{isMainThread:h$}=require("worker_threads"),{TLSSocket:p$,createSecureContext:TUe}=require("node:tls"),Gw=3650,Kh=["127.0.0.1","localhost","::1"],$w=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];Zle(async e=>{e.type===Ds.ITC_EVENT_TYPES.RESTART&&(os.initSync(!0),await Yw())});var Jr;function ec(){return Jr||(Jr=zle().system.hdb_certificate,Jr||(Jr=Wle({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),Jr}o(ec,"getCertTable");async function Yh(){let e=R$("operations-api"),t={secureContexts:null,setSecureContext:o(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(CS());if(!r)return;let n=new Ho(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}o(Yh,"getReplicationCert");async function eue(){ec();let e=(await Yh()).options.cert,r=new Ho(e).issuer.match(/CN=(.*)/)?.[1];return Jr.get(r)}o(eue,"getReplicationCertAuth");var i$,Za=new Map;function E$(){if(i$)return;i$=!0;let e=[{configKey:Ul.TLS},{configKey:Ul.OPERATIONSAPI_TLS}];ec();let t=hr.dirname(Fw.getConfigFilePath()),r;for(let{configKey:n}of e){let s=Fw.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let a=i.privateKey,c=a&&d$(Kle(t,"keys"),a);c&&o$(a,l=>{Za.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&h$){let d;o$(u,f=>{if(n$.cert===f)return;let m=i.hostname??i.hostnames??i.host??i.hosts;m&&!Array.isArray(m)&&(m=[m]);let h=T$(u),p=new Ho(h),_;try{_=Ww(p)}catch(y){pt.error("error extracting host name from certificate",y);return}if(_==null){pt.error("No host name found on certificate");return}if(p.checkIssued(new Ho(n$.cert)))return;let g=Jr.primaryStore.get(_),R=m$(u).mtimeMs,T=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&R<=T){R<T&&pt.info(`Certificate ${_} at ${u} is older (${new Date(R)}) than the certificate in the database (${T>1?new Date(T):"only self signed certificate available"})`);return}r=Jr.put({name:_,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:m,fileTimestamp:R,details:{issuer:p.issuer.replace(/\n/g," "),subject:p.subject?.replace(/\n/g," "),subject_alt_name:p.subjectAltName,serial_number:p.serialNumber,valid_from:p.validFrom,valid_to:p.validTo}})},l?"certificate authority":"certificate")}}}}}return r}o(E$,"loadCertificates");function o$(e,t,r){let n,s=o((i,a)=>{try{let c=a.mtimeMs;c&&c!==n&&(n&&h$&&pt.warn(`Reloading ${r}:`,i),n=c,t(T$(i)))}catch(c){pt.error(`Error loading ${r}:`,i,c)}},"loadFile");xn.existsSync(e)?s(e,m$(e)):pt.error(`${r} file not found:`,e),Gle(e,{persistent:!1}).on("change",s)}o(o$,"loadAndWatch");function Hw(){let e=jle();if(e==null){let t=Kh[0];return pt.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return f$(e)}o(Hw,"getHost");function OS(){let e=CS();if(e==null){let t=Kh[0];return pt.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}o(OS,"getCommonName");async function tue(){let e=await Yh(),t=wt.certificateFromPem(e.options.cert),r=wt.privateKeyFromPem(e.options.key);pt.info("Creating CSR with cert named:",e.name);let n=wt.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:OS()},...$w];pt.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:_$()}];return pt.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),Md.pki.certificationRequestToPem(n)}o(tue,"createCsr");function _$(){let e=Kh.includes(OS())?Kh:[...Kh,OS()];return e.includes(Hw())||e.push(Hw()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>c$.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}o(_$,"certExtensions");async function rue(e){let t={},r=hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;ec();for await(let d of Jr.search([]))if(d.is_authority&&!d.details.issuer.includes("HarperDB-Certificate-Authority")){if(Za.has(d.private_key_name)){n=Za.get(d.private_key_name),s=d;break}else if(d.private_key_name&&await xn.exists(hr.join(r,d.private_key_name))){n=xn.readFile(hr.join(r,d.private_key_name)),s=d;break}}if(!n){let d=await kw();s=d.ca,n=d.private_key}n=wt.privateKeyFromPem(n),t.signingCA=s.certificate;let i=wt.certificateFromPem(s.certificate);pt.info("Signing CSR with cert named",s.name);let a=wt.certificationRequestFromPem(e.csr);try{a.verify()}catch(d){return pt.error(d),new Error("Error verifying CSR: "+d.message)}let c=Md.pki.createCertificate();c.serialNumber="0"+Math.random().toString().slice(2,9),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+Gw),pt.info("sign cert setting validity:",c.validity),pt.info("sign cert setting subject from CSR:",a.subject.attributes),c.setSubject(a.subject.attributes),pt.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=a.getAttribute({name:"extensionRequest"}).extensions;pt.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=a.publicKey,c.sign(n,Md.md.sha256.create()),t.certificate=wt.certificateToPem(c)}else pt.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}o(rue,"signCertificate");async function nue(e,t){await vd({name:CS(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await vd({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:wt.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}o(nue,"createCertificateTable");async function vd(e){let t=new Ho(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},ec(),await Jr.patch(e)}o(vd,"setCertTable");async function Vw(){let e=await Bw("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{publicKey:wt.publicKeyFromPem(e.publicKey),privateKey:wt.privateKeyFromPem(e.privateKey)}}o(Vw,"generateKeys");async function Kw(e,t,r){let n=wt.createCertificate();if(!t){let a=await Yh();t=wt.certificateFromPem(a.options.cert).publicKey}n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Gw);let i=[{name:"commonName",value:OS()},...$w];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions(_$()),n.sign(e,Md.md.sha256.create()),wt.certificateToPem(n)}o(Kw,"generateCertificates");async function kw(){let e=await A$(),t;for(let r of e){if(!r.is_authority)continue;let n=await y$(r.private_key_name);if(r.private_key_name&&n&&new Ho(r.certificate).checkPrivateKey(l$(n))){pt.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;pt.trace("No CA found with matching private key")}o(kw,"getCertAuthority");async function g$(e,t,r=!0){let n=wt.createCertificate();n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Gw);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${os.get(Ul.REPLICATION_HOSTNAME)??f$(os.get(Ul.REPLICATION_URL))??u$().split("-")[0]}`},...$w];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,Md.md.sha256.create());let a=hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),c=hr.join(a,gi.PRIVATEKEY_PEM_NAME);return r&&await xn.writeFile(c,wt.privateKeyToPem(e)),n}o(g$,"generateCertAuthority");async function sue(){let{privateKey:e,publicKey:t}=await Vw(),r=await g$(e,t),n=await Kw(e,t,r);await nue(n,r),S$()}o(sue,"generateCertsKeys");async function iue(){let e=await Kw(wt.privateKeyFromPem(gi.CERTIFICATE_VALUES.key),void 0,wt.certificateFromPem(gi.CERTIFICATE_VALUES.cert)),t=hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),r=hr.join(t,gi.NATS_CERTIFICATE_PEM_NAME);await xn.exists(r)||await xn.writeFile(r,e);let n=hr.join(t,gi.NATS_CA_PEM_NAME);await xn.exists(n)||await xn.writeFile(n,gi.CERTIFICATE_VALUES.cert)}o(iue,"createNatsCerts");async function oue(){ec();for await(let e of Jr.search([{attribute:"is_self_signed",value:!0}]))await Jr.delete(e.name);await Yw()}o(oue,"renewSelfSigned");async function Yw(){Qle(),await E$(),ec();let e=await kw();if(!e){pt.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=o(u=>{try{return{key:wt.privateKeyFromPem(xn.readFileSync(u)),keyPath:u}}catch(d){return pt.warn(`Failed to parse private key from ${u}:`,d.message),{key:null,keyPath:u}}},"tryToParseKey"),n=os.get(Ul.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let d=r(u.privateKey);if(s=d.key,i=d.keyPath,d.key)break}}else{let u=os.get(Ul.TLS_PRIVATEKEY),d=r(u);s=d.key,i=d.keyPath}let a=hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),c=d$(a,i);s||(pt.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{privateKey:s}=await Vw(),xn.existsSync(hr.join(a,gi.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${u$().split("-")[0]}.pem`),await xn.writeFile(hr.join(a,c),wt.privateKeyToPem(s)));let l=await g$(s,wt.setRsaPublicKey(s.n,s.e),!1);await vd({name:l.subject.getField("CN").value,uses:["https"],certificate:wt.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await Yh()){let r=CS();pt.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await kw();let n=wt.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await Kw(wt.privateKeyFromPem(e.private_key),s,n);await vd({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}o(Yw,"reviewSelfSignedCert");function S$(){let e=Yle(Object.keys(Ds.CONFIG_PARAM_MAP),!0),t=hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),r=hr.join(t,gi.PRIVATEKEY_PEM_NAME),n=hr.join(t,gi.NATS_CERTIFICATE_PEM_NAME),s=hr.join(t,gi.NATS_CA_PEM_NAME),i=Ds.CONFIG_PARAMS,a={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(a[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(a[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,a[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,a[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),Fw.updateConfigValue(void 0,void 0,a,!1,!0)}o(S$,"updateConfigCert");function T$(e){return e.startsWith("-----BEGIN")?e:Jle(e,"utf8")}o(T$,"readPEM");var a$=wS.createSecureContext;wS.createSecureContext=function(e){if(!e.cert||!e.key)return a$(e);let t={...e};delete t.key,delete t.cert;let r=a$(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var aue=p$.prototype._init;p$.prototype._init=function(e,t){aue.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,a)=>{this.sni_context=a?.context||a,this.certCbDone()})}};var vl=new Map;function R$(e,t){let r=new Map,n,s=!1;return i.initialize=a=>i.ready?i.ready:(a&&(a.secureContexts=r,a.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),vl.clear();let d=0;if(NS===void 0){c();return}for await(let f of NS.system.hdb_certificate.search([])){let m=f.certificate,h=new Ho(m);f.is_authority&&(h.asString=m,vl.set(h.subject,m))}for await(let f of NS.system.hdb_certificate.search([]))try{if(f.is_authority)continue;let m=e==="operations-api",h=f.is_self_signed?1:2;m&&f.uses?.includes?.("operations")&&(h+=1);let p=await y$(f.private_key_name),_=f.certificate,g=new Ho(_);if(vl.has(g.issuer)&&(_+=`
18
18
  `+vl.get(g.issuer)),!p||!_)throw new Error("Missing private key or certificate for secure server");let R={ciphers:f.ciphers,ticketKeys:Xle(),availableCAs:vl,ca:t&&Array.from(vl.values()),cert:_,key:p,key_file:f.private_key_name,is_self_signed:f.is_self_signed};a&&(R.sessionIdContext=a.sessionIdContext);let T=wS.createSecureContext(R);T.name=f.name,T.options=R,T.quality=h,T.certificateAuthorities=Array.from(vl),T.certStart=_.toString().slice(0,100);let y=f.hostnames??zw(g);Array.isArray(y)||(y=[y]);let N;for(let O of y)if(O){O[0]==="*"&&(s=!0,O=O.slice(1)),O===Hw()&&(h+=2),c$.isIP(O)&&(N=!0);let F=r.get(O)?.quality??0;h>F&&r.set(O,T)}else Ja.error("No hostname found for certificate at",wS.certificate);Ja.trace("Adding TLS",T.name,"for",a.ports||"client","cert named",f.name,"hostnames",y,"quality",h,"best quality",d),h>d&&(i.defaultContext=n=T,d=h,a&&(a.defaultContext=T))}catch(m){Ja.error("Error applying TLS for",f.name,m)}a?.secureContextsListeners.forEach(f=>f()),c(n)}catch(d){l(d)}}o(u,"updateTLS"),NS?.system.hdb_certificate.subscribe({listener:o(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(a,c){Ja.info("TLS requested for",a||"(no SNI)");let l=a;for(;;){let d=r.get(l);if(d)return Ja.debug("Found certificate for",a,d.certStart),d.updatedContext&&(d=d.updatedContext),c(null,d);if(s&&l){let f=l.indexOf(".",1);f<0?l="":l=l.slice(f)}else break}a?Ja.debug("No certificate found to match",a,"using the default certificate"):Ja.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):Ja.info("No default certificate found"),c(null,u)}o(i,"SNICallback")}o(R$,"createTLSSelector");async function y$(e){let t=Za.get(e);return!t&&e?await xn.readFile(hr.join(os.get(Ul.ROOTPATH),Ds.LICENSE_KEY_DIR_NAME,e),"utf8"):t}o(y$,"getPrivateKeyByName");async function A$(){ec();let e=[];for await(let t of Jr.search([]))e.push(t);return e}o(A$,"listCertificates");async function cue(e){let t=qw(e,_i.object({name:_i.string().required(),certificate:_i.string().required(),is_authority:_i.boolean().required(),private_key:_i.string(),hosts:_i.array(),uses:_i.array()}));if(t)throw new Xa(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,a=new Ho(n),c=!1,l=!1,u;for(let[h,p]of Za)!s&&!c&&a.checkPrivateKey(l$(p))&&(c=!0,u=h),s&&s===p&&(l=!0,u=h);if(!i&&!s&&!c)throw new Xa("A suitable private key was not found for this certificate");let d;if(!r){try{d=Ww(a)}catch(h){pt.error(h)}if(d==null)throw new Xa("Error extracting certificate host name, please provide a name parameter")}let f=lue(r??d);s&&!c&&!l&&(await xn.writeFile(hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME,f+".pem"),s),Za.set(f,s));let m={name:r??d,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(m.private_key_name=u??f+".pem"),e.ciphers&&(m.ciphers=e.ciphers),await vd(m),"Successfully added certificate: "+f}o(cue,"addCertificate");function lue(e){return e.replace(/[^a-z0-9\.]/gi,"-")}o(lue,"sanitizeName");async function uue(e){let t=qw(e,_i.object({name:_i.string().required()}));if(t)throw new Xa(t.message);let{name:r}=e;ec();let n=await Jr.get(r);if(!n)throw new Xa(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await Jr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(pt.info("Removing private key named",s),await xn.remove(hr.join(os.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME,s)))}return await Jr.delete(r),"Successfully removed "+r}o(uue,"removeCertificate");function Ww(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||zw(e)[0]}o(Ww,"getPrimaryHostName");function zw(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}o(zw,"hostnamesFromCert");async function due(e){if(e.bypass_auth!==!0)throw new Xa("Unauthorized","401");let t=qw(e,_i.object({name:_i.string().required()}));if(t)throw new Xa(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await s$()).privateKey;if(r===".jwtPublic")return(await s$()).publicKey;if(Za.get(r))return Za.get(e.name);throw new Xa("Key not found")}o(due,"getKey");function fue(e){return[e.subject?.CN,...e.subjectaltname.split(",").filter(t=>t.trim().startsWith("DNS:")).map(t=>t.trim().substring(4))]}o(fue,"getHostnamesFromCertificate")});var Y$={};Oe(Y$,{CONFIRMATION_STATUS_POSITION:()=>$$,LATENCY_POSITION:()=>xS,NodeReplicationConnection:()=>Bd,OPERATION_REQUEST:()=>Xw,RECEIVED_TIME_POSITION:()=>eO,RECEIVED_VERSION_POSITION:()=>Zw,RECEIVING_STATUS_POSITION:()=>tO,RECEIVING_STATUS_RECEIVING:()=>K$,RECEIVING_STATUS_WAITING:()=>V$,SENDING_TIME_POSITION:()=>Wh,createWebSocket:()=>BS,databaseSubscriptions:()=>rc,replicateOverWS:()=>zh,tableUpdateListeners:()=>nO});async function BS(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=et(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!Qw){let l=(0,F$.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),Qw=u.secureContexts}if(i=Qw.get(s),i&&ae.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let a={};r&&(a.Authorization=r);let c={headers:a,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,k$.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(US?.caCount!==qo.size&&(US=H$.createSecureContext({...i.options,ca:[...qo,...i.options.availableCAs.values()]}),US.caCount=qo.size),c.secureContext=US),new x$.WebSocket(e,"harperdb-replication-v1",c)}function zh(e,t,r){let n=t.port||t.securePort,s=xl.pid%1e3+"-"+B$.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3);ae.debug?.(s,"Initializing replication connection",r);let i=0,a=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(a.buffer,0,1024),u=t.database,d=t.databaseSubscriptions||rc,f,m,h=!1,p=t.subscription;p?.then&&p.then(E=>{p=E,p.auditStore&&(f=p.auditStore)});let _=t.tables||u&&it()[u],g;if(!r){ae.error?.(s,"No authorization provided"),Ss(1008,"Unauthorized");return}let R=new Map,T=[];g=r.name,g&&t.connection&&(t.connection.nodeName=g);let y,N,O,F,Z,q,W,G=6e4,K,ce=0,le=0,ie=0,pe=U$.default.get(U.REPLICATION_BLOBTIMEOUT)??12e4,Ne=new Map,Ue=[],xe=0,Rr;if(t.url){let E=o(()=>{Z&&le===e._socket?.bytesRead&&ie===e._socket?.bytesWritten?e.terminate():(Z=performance.now(),e.ping(),le=e._socket?.bytesRead,ie=e._socket?.bytesWritten)},"sendPing");O=setInterval(E,M$).unref(),E()}else Xt();e._socket?.setMaxListeners(200);function Xt(){clearTimeout(F),le=e._socket?.bytesRead,ie=e._socket?.bytesWritten,F=setTimeout(()=>{le===e._socket?.bytesRead&&ie===e._socket?.bytesWritten&&(ae.warn?.(`Timeout waiting for ping from ${g}, terminating connection and reconnecting`),e.terminate())},M$*2).unref()}o(Xt,"resetPingTimer");function qt(){if(!(!g||!u))return m||(m=Cd(f,u,g)),m}o(qt,"getSharedStatus"),u&&Ta(u);let Zt,Hf,Dc=[],Gt=[],kf,qf=[],DE=[],ME=[],$y=150,Gf=25,Pe=0,vE=0,$f=!1,Eo,Lr,yr,Vf;e.on("message",E=>{ce=performance.now();try{let S=E.dataView=new Yc(E.buffer,E.byteOffset,E.byteLength);if(E[0]>127){let P=(0,tt.decode)(E),[L,D,k]=P;switch(L){case I$:{if(D){if(g){if(g!==D){ae.error?.(s,`Node name mismatch, expecting to connect to ${g}, but peer reported name as ${D}, disconnecting`),e.send((0,tt.encode)([Ud])),Ss(1008,"Node name mismatch");return}}else if(g=D,t.connection?.tentativeNode){let B=t.connection.tentativeNode;B.name=g,t.connection.tentativeNode=null,Fo(g,B)}if(t.connection&&(t.connection.nodeName=g),ae.debug?.(s,"received node name:",g,"db:",u??P[2]),!u)try{Ta(u=P[2]),u==="system"&&(Zt=ja(t,(B,de)=>{_u(de)&&Ra(de)}),e.on("close",()=>{Zt?.remove()}))}catch(B){ae.warn?.(s,"Error setting database",B),e.send((0,tt.encode)([Ud])),Ss(1008,B.message);return}Dr()}break}case L$:{ae.debug?.(s,"Received table definitions for",D.map(B=>B.table));for(let B of D){let de=P[2];B.database=de;let me;_u(de)&&(de==="system"?ke[de]?.[B.table]||(me=V(B,ke[de]?.[B.table])):me=V(B,ke[de]?.[B.table]),f||(f=me?.auditStore),_||(_=it()?.[de]))}break}case Ud:Ss();break;case Xw:try{let B=r?.replicates||r?.subscribers||r?.name;ae.debug?.("Received operation request",D,"from",g),server.operation(D,{user:r},!B).then(de=>{Array.isArray(de)&&(de={results:de}),de.requestId=D.requestId,e.send((0,tt.encode)([LS,de]))},de=>{e.send((0,tt.encode)([LS,{requestId:D.requestId,error:(0,xd.errorToString)(de)}]))})}catch(B){e.send((0,tt.encode)([LS,{requestId:D.requestId,error:(0,xd.errorToString)(B)}]))}break;case LS:let{resolve:C,reject:x}=R.get(D.requestId);D.error?x(new Error(D.error)):C(D),R.delete(D.requestId);break;case jw:let z=P[3];if(!_){u?ae.error?.(s,"No database found for",u):ae.error?.(s,"Database name never received"),Ss();return}let ne=_[z];ne=V({table:z,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ne),Dc[k]={name:z,decoder:new tt.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(B){return ne.primaryStore.getEntry(B)},rootStore:ne.primaryStore.rootStore};break;case N$:Vf=f?zG(D,f):new Map,kf=P[2],ae.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${kf}`);break;case w$:let re=k;ME[re]=D;break;case P$:qt()[$$]=D,ae.trace?.(s,"received and broadcasting committed update",D),qt().buffer.notify();break;case C$:y=D,p.send({type:"end_txn",localTime:y,remoteNodeIds:T});break;case DS:{let B=P[1],{fileId:de,size:me,finished:Se,error:ee}=B,Q=Ne.get(de);ae.debug?.("Received blob",de,"has stream",!!Q,"connectedToBlob",!!Q?.connectedToBlob,"length",P[2].length,"finished",Se),Q||(Q=new Jw.PassThrough,Q.expectedSize=me,Ne.set(de,Q)),Q.lastChunk=Date.now();let he=P[2];ot(he.byteLength,"bytes-received",`${g}.${u}`,"replication","blob");try{Se?(ee?(Q.on("error",()=>{}),Q.destroy(new Error("Blob error: "+ee+" for record "+(Q.recordId??"unknown")+" from "+g))):Q.end(he),Q.connectedToBlob&&Ne.delete(de)):Q.write(he)}catch(Te){ae.error?.(`Error receiving blob for ${Q.recordId} from ${g} and streaming to storage`,Te),Ne.delete(de)}break}case O$:{let B=D,de;try{let me=P[3],Se=Gt[k]||(Gt[k]=_[P[4]]);if(!Se)return ae.warn?.("Unknown table id trying to handle record request",k);let ee=Se.primaryStore.getBinaryFast(Symbol.for("structures")),Q=ee?.length??0;if(Q>0&&Q!==vE){vE=Q;let Te=(0,tt.decode)(ee);e.send((0,tt.encode)([jw,{typedStructs:Te.typed,structures:Te.named},k,Se.tableName]))}let he=Se.primaryStore.getBinaryFast(me);if(he){let Te=Se.primaryStore.decoder.decode(he,{valueAsBuffer:!0}),fe=ut||{};fe.version=(0,q$.getLastVersion)(),ut&&ut[Pu]&Vr&&(Te=Buffer.from(Te),gm(()=>Se.primaryStore.decoder.decode(he),We=>Sa(We,me),Se.primaryStore.rootStore)),de=(0,tt.encode)([PS,B,{value:Te,expiresAt:fe.expiresAt,version:fe.version,residencyId:fe.residencyId,nodeId:fe.nodeId,user:fe.user}])}else de=(0,tt.encode)([PS,B])}catch(me){de=(0,tt.encode)([PS,B,{error:me.message}])}e.send(de);break}case PS:{let{resolve:B,reject:de,tableId:me,key:Se}=R.get(P[1]),ee=P[2];if(ee?.error)de(new Error(ee.error));else if(ee){let Q;h_(()=>{let he=Dc[me].decoder.decode(ee.value);ee.value=he,ee.key=Se,B(ee)||Q&&setTimeout(()=>Q.forEach(d_),6e4).unref()},f?.rootStore,he=>{let Te=Mc(he,Se);return Q||(Q=[]),Q.push(Te),Te})}else B();R.delete(P[1]);break}case b$:{yr=D;let B,de,me=!1;if(p){if(u!==p.databaseName&&!p.then){ae.error?.("Subscription request for wrong database",u,p.databaseName);return}}else p=d.get(u);if(ae.debug?.(s,"received subscription request for",u,"at",yr),!p){let Ee;p=new Promise(rt=>{ae.debug?.("Waiting for subscription to database "+u),Ee=rt}),p.ready=Ee,rc.set(u,p)}if(r.name)de=Yt().subscribe(r.name),de.then(async Ee=>{B=Ee;for await(let rt of B){let nt=rt.value;if(!(nt?.replicates===!0||nt?.replicates?.receives||nt?.subscriptions?.some(lr=>(lr.database||lr.schema)===u&&lr.publish!==!1))){me=!0,e.send((0,tt.encode)([Ud])),Ss(1008,`Unauthorized database subscription to ${u}`);return}}},Ee=>{ae.error?.(s,"Error subscribing to HDB nodes",Ee)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,tt.encode)([Ud])),Ss(1008,`Unauthorized database subscription to ${u}`);return}if(Lr&&(ae.debug?.(s,"stopping previous subscription",u),Lr.emit("close")),yr.length===0)return;let Se=yr[0],ee=o(Ee=>{if(Ee&&(Se.replicateByDefault?!Se.tables.includes(Ee.tableName):Se.tables.includes(Ee.tableName)))return{table:Ee}},"tableToTableEntry"),Q={txnTime:0},he,Te,fe=1/0,We,_t=o((Ee,rt)=>{if(Ee.type==="end_txn"){Q.txnTime&&(a[i]!==66&&ae.error?.("Invalid encoding of message"),gu(9),gu(I_),Uc(We=rt),dt()),i=c,Q.txnTime=0;return}let nt=Ee.nodeId,lr=Ee.tableId,Mt=Te[lr];if(!Mt&&(Mt=Te[lr]=ee(p.tableById[lr]),!Mt))return ae.debug?.("Not subscribed to table",lr);let Ts=Mt.table,vt=Ts.primaryStore,Xs=vt.encoder;(Ee.extendedType&P_||!Xs.typedStructs)&&(Xs._mergeStructures(Xs.getStructures()),Xs.typedStructs&&(Xs.lastTypedStructuresLength=Xs.typedStructs.length));let Su=he[nt];if(!(Su&&Su.startTime<rt&&(!Su.endTime||Su.endTime>rt)))return vS&&ae.trace?.(s,"skipping replication update",Ee.recordId,"to:",g,"from:",nt,"subscribed:",he),gM();vS&&ae.trace?.(s,"sending replication update",Ee.recordId,"to:",g,"from:",nt,"subscribed:",he);let Vy=Ee.version;Q.txnTime!==Vy&&(Q.txnTime&&(vS&&ae.trace?.(s,"new txn time, sending queued txn",Q.txnTime),a[i]!==66&&ae.error?.("Invalid encoding of message"),dt()),Q.txnTime=Vy,i=c,Uc(Vy));let xc=Ee.residencyId,Ky=Eu(xc,Ts),xE;if(Ky&&!Ky.includes(g)){let Zs=Eu(Ee.previousResidencyId,Ts);if(Zs&&!Zs.includes(g)&&(Ee.type==="put"||Ee.type==="patch")||Ts.getResidencyById)return gM();let Yf=Ee.recordId;ae.trace?.(s,"sending invalidation",Yf,g,"from",nt);let Wf=0;xc&&(Wf|=Wc),Ee.previousResidencyId&&(Wf|=zc);let zy,BE=null;for(let SM in Ts.indices){if(!BE){if(zy=Ee.getValue(vt,!0),!zy)break;BE={}}BE[SM]=zy[SM]}xE=jc(Ee.version,lr,Yf,null,nt,Ee.user,Ee.type==="put"||Ee.type==="patch"?"invalidate":Ee.type,Xs.encode(BE),Wf,xc,Ee.previousResidencyId,Ee.expiresAt)}function gM(){return ae.trace?.(s,"skipping audit record",Ee.recordId),q||(q=setTimeout(()=>{q=null,(We||0)+D$/2<fe&&(vS&&ae.trace?.(s,"sending skipped sequence update",fe),e.send((0,tt.encode)([C$,fe])))},D$).unref()),new Promise(setImmediate)}o(gM,"skipAuditRecord");let Yy=Xs.typedStructs,Wy=Xs.structures;if((Yy?.length!=Mt.typed_length||Wy?.length!=Mt.structure_length)&&(Mt.typed_length=Yy?.length,Mt.structure_length=Wy.length,ae.debug?.(s,"send table struct",Mt.typed_length,Mt.structure_length),Mt.sentName||(Mt.sentName=!0),e.send((0,tt.encode)([jw,{typedStructs:Yy,structures:Wy,attributes:Ts.attributes,schemaDefined:Ts.schemaDefined},lr,Mt.table.tableName]))),xc&&!DE[xc]&&(e.send((0,tt.encode)([w$,Ky,xc])),DE[xc]=!0),xE)gu(xE.length),vc(xE);else{let Zs=Ee.encoded;Ee.extendedType&Vr&&gm(()=>Ee.getValue(vt),Wf=>Sa(Wf,Ee.recordId),vt.rootStore);let Yf=Zs[0]===66?8:0;gu(Zs.length-Yf),vc(Zs,Yf),ae.trace?.("wrote record",Ee.recordId,"length:",Zs.length)}return e._socket.writableNeedDrain?new Promise(Zs=>{ae.debug?.(`Waiting for remote node ${g} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",Zs)}):xe>Gf?new Promise(Zs=>{Rr=Zs}):new Promise(setImmediate)},"sendAuditRecord"),dt=o(()=>{c-i>8?(e.send(a.subarray(i,c)),ae.debug?.(s,"Sent message, size:",c-i),ot(c-i,"bytes-sent",`${g}.${u}`,"replication","egress")):ae.debug?.(s,"skipping empty transaction")},"sendQueuedData");Lr=new rO.EventEmitter,Lr.once("close",()=>{me=!0,B?.end()});for(let{startTime:Ee}of yr)Ee<fe&&(fe=Ee);(de||Promise.resolve()).then(async()=>{p=await p,f=p.auditStore,Te=p.tableById.map(ee),he=[];for(let{name:rt,startTime:nt,endTime:lr}of yr){let Mt=yS(rt,f);ae.debug?.("subscription to",rt,"using local id",Mt,"starting",nt),he[Mt]={startTime:nt,endTime:lr}}Ra(u),Zt||(Zt=Nl(rt=>{rt.databaseName===u&&Ra(u)}),Hf=Ah(rt=>{rt===u&&(e.send((0,tt.encode)([Ud])),Ss())}),e.on("close",()=>{Zt?.remove(),Hf?.remove()})),e.send((0,tt.encode)([N$,Hh(p.auditStore),yr.map(({name:rt})=>rt)]));let Ee=!0;do{isFinite(fe)||(ae.warn?.("Invalid sequence id "+fe),Ss(1008,"Invalid sequence id"+fe));let rt;if(Ee&&!me&&(Ee=!1,fe===0)){ae.info?.("Replicating all tables to",g);let nt=fe,lr=FS(f);for(let Mt in _){if(!ee(Mt))continue;let Ts=_[Mt];for(let vt of Ts.primaryStore.getRange({snapshot:!1,versions:!0})){if(me)return;if(vt.localTime>=fe){ae.trace?.(s,"Copying record from",u,Mt,vt.key,vt.localTime),nt=Math.max(vt.localTime,nt),rt=!0,qt()[Wh]=1;let Xs=jc(vt.version,Ts.tableId,vt.key,null,lr,null,"put",gm(()=>Ts.primaryStore.encoder.encode(vt.value),Su=>Sa(Su,vt.key)),vt.metadataFlags&-256,vt.residencyId,null,vt.expiresAt);await _t({recordId:vt.key,tableId:Ts.tableId,type:"put",getValue(){return vt.value},encoded:Xs,version:vt.version,residencyId:vt.residencyId,nodeId:lr,extendedType:vt.metadataFlags},vt.localTime)}}}rt&&_t({type:"end_txn"},fe),qt()[Wh]=0,fe=nt}for(let{key:nt,value:lr}of f.getRange({start:fe||1,exclusiveStart:!0,snapshot:!1})){if(me)return;let Mt=bt(lr);ae.debug?.("sending audit record",new Date(nt)),qt()[Wh]=nt,fe=nt,await _t(Mt,nt),Lr.startTime=nt,rt=!0}rt&&_t({type:"end_txn"},fe),qt()[Wh]=0,await QU(f)}while(!me)}).catch(Ee=>{ae.error?.(s,"Error handling subscription to node",Ee),Ss(1008,"Error handling subscription to node")});break}}return}S.position=8;let A=!0,b,I;do{qt();let P=S.readInt();if(P===9&&S.getUint8(S.position)==I_){S.position++,y=I=S.readFloat64(),m[Zw]=y,m[eO]=Date.now(),m[tO]=V$,ae.trace?.("received remote sequence update",y,u);break}let L=S.position,D=bt(E,L,L+P),k=Dc[D.tableId];k||ae.error?.(`No table found with an id of ${D.tableId}`);let C;D.residencyId&&(C=ME[D.residencyId],ae.trace?.(s,"received residency list",C,D.type,D.recordId));try{let x=D.recordId;h_(()=>{b={table:k.name,id:D.recordId,type:D.type,nodeId:Vf.get(D.nodeId),residencyList:C,timestamp:D.version,value:D.getValue(k),user:D.user,beginTxn:A,expiresAt:D.expiresAt}},f?.rootStore,z=>Mc(z,x))}catch(x){throw x.message+="typed structures for current decoder"+JSON.stringify(k.decoder.typedStructs),x}A=!1,ae.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),m[Zw]=D.version,m[eO]=Date.now(),m[tO]=K$,p.send(b),S.position=L+P}while(S.position<E.byteLength);Pe++,ot(E.byteLength,"bytes-received",`${g}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),Pe>$y&&!$f&&($f=!0,e.pause(),ae.debug?.(`Commit backlog causing replication back-pressure, requesting that ${g} pause replication`)),p.send({type:"end_txn",localTime:y,remoteNodeIds:T,async onCommit(){if(b){let P=Date.now()-b.timestamp;ot(P,"replication-latency",g+"."+u+"."+b.table,b.type,"ingest")}Pe--,$f&&($f=!1,e.resume(),ae.debug?.(`Replication resuming ${g}`)),Ue.length>0&&await Promise.all(Ue),ae.trace?.("All blobs finished"),!N&&I&&(ae.trace?.(s,"queuing confirmation of a commit at",I),setTimeout(()=>{e.send((0,tt.encode)([P$,N])),ae.trace?.(s,"sent confirmation of a commit at",N),N=null},hue)),N=I,ae.debug?.("last sequence committed",new Date(I),u)}})}catch(S){ae.error?.(s,"Error handling incoming replication message",S)}}),e.on("ping",Xt),e.on("pong",()=>{if(t.connection){let E=performance.now()-Z;t.connection.latency=E,qt()&&(m[xS]=E),t.isSubscriptionConnection&&Ml({name:g,database:u,url:t.url,latency:E})}Z=null}),e.on("close",(E,S)=>{clearInterval(O),clearTimeout(F),clearInterval(W),Lr&&Lr.emit("close"),Eo&&Eo.end();for(let[A,{reject:b}]of R)b(new Error(`Connection closed ${S?.toString()} ${E}`));ae.debug?.(s,"closed",E,S?.toString())});function Ss(E,S){try{e.isFinished=!0,ae.debug?.(s,"closing",g,u,E,S),e.close(E,S),t.connection?.emit("finished")}catch(A){ae.error?.(s,"Error closing connection",A)}}o(Ss,"close");let ga=new Set;async function Sa(E,S){let A=f_(E);if(ga.has(A)){ae.debug?.("Blob already being sent",A);return}ga.add(A);try{let b;xe++;for await(let I of E.stream())b&&(ae.debug?.("Sending blob chunk",A,"length",b.length),e.send((0,tt.encode)([DS,{fileId:A,size:E.size},b]))),b=I,e._socket.writableNeedDrain&&(ae.debug?.("draining",A),await new Promise(P=>e._socket.once("drain",P)),ae.debug?.("drained",A)),ot(I.length,"bytes-sent",`${g}.${u}`,"replication","blob");ae.debug?.("Sending final blob chunk",A,"length",b.length),e.send((0,tt.encode)([DS,{fileId:A,size:E.size,finished:!0},b]))}catch(b){ae.warn?.("Error sending blob",b,"blob id",A,"for record",S),e.send((0,tt.encode)([DS,{fileId:A,finished:!0,error:(0,xd.errorToString)(b)},Buffer.alloc(0)]))}finally{ga.delete(A),xe--,xe<Gf&&Rr?.()}}o(Sa,"sendBlobs");function Mc(E,S){let A=f_(E),b=Ne.get(A);ae.debug?.("Received transaction with blob",A,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&Ne.delete(A):(b=new Jw.PassThrough,Ne.set(A,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=S,E.size===void 0&&b.expectedSize&&(E.size=b.expectedSize);let I=b.blob??createBlob(b,E);b.blob=I;let P=bo(()=>_m(I).saving,p.auditStore?.rootStore);return P&&(P.blobId=A,Ue.push(P),P.finally(()=>{ae.debug?.(`Finished receiving blob stream ${A}`),Ue.splice(Ue.indexOf(P),1)})),I}o(Mc,"receiveBlobs");function Dr(){if(h||(h=!0,t.connection?.on("subscriptions-updated",Dr)),!f&&p&&(f=p.auditStore),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let E=new Map;f||(f=p?.auditStore);try{for(let b of p?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let I of b.value.nodes||[])I.lastTxnTime>(E.get(I.id)??0)&&E.set(I.id,I.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let S=t.connection?.nodeSubscriptions?.[0];T=[];let A=t.connection?.nodeSubscriptions.map((b,I)=>{let P=[],{replicateByDefault:L}=b;if(b.subscriptions){for(let x of b.subscriptions)if(x.subscribe&&(x.schema||x.database)===u){let z=x.table;_?.[z]?.replicate!==!1&&P.push(z)}L=!1}else for(let x in _)(L?_[x].replicate===!1:_[x].replicate)&&P.push(x);let D=f&&yS(b.name,f),k=p?.dbisDB?.get([Symbol.for("seq"),D])??1,C=Math.max(k?.seqId??1,(typeof b.startTime=="string"?new Date(b.startTime).getTime():b.startTime)??1);if(ae.debug?.("Starting time recorded in db",b.name,D,u,k?.seqId,"start time:",C,new Date(C)),S!==b){let x=f&&yS(S.name,f),z=p?.dbisDB?.get([Symbol.for("seq"),x])??1;for(let ne of z?.nodes||[])ne.name===b.name&&(C=ne.seqId,ae.debug?.("Using sequence id from proxy node",S.name,C))}if(D===void 0?ae.warn("Starting subscription request from node",b,"but no node id found"):T.push(D),E.get(D)>C&&(C=E.get(D),ae.debug?.("Updating start time from more recent txn recorded",S.name,C)),C===1&&MS)try{new URL(MS).hostname===b.name?(ae.warn?.(`Requesting full copy of database ${u} from ${MS}`),C=0):C=Date.now()-6e4}catch(x){ae.error?.("Error parsing leader URL",MS,x)}return ae.trace?.(s,"defining subscription request",b.name,u,new Date(C)),{name:b.name,replicateByDefault:L,tables:P,startTime:C,endTime:b.endTime}});if(A)if(ae.debug?.(s,"sending subscription request",A,p?.dbisDB?.path),clearTimeout(K),A.length>0)e.send((0,tt.encode)([b$,A]));else{let b=o(()=>{let I=performance.now();K=setTimeout(()=>{ce<=I?Ss(1008,"Connection has no subscriptions and is no longer used"):b()},G).unref()},"scheduleClose");b()}}o(Dr,"sendSubscriptionRequestUpdate");function Eu(E,S){if(!E)return;let A=qf[E];return A||(A=S.getResidencyRecord(E),qf[E]=A),A}o(Eu,"getResidence");function _u(E){return!(tc&&tc!="*"&&!tc[E]&&!tc.includes?.(E)&&!tc.some?.(S=>S.name===E))}o(_u,"checkDatabaseAccess");function Ta(E){if(p=p||d.get(E),!_u(E))throw new Error(`Access to database "${E}" is not permitted`);p||ae.warn?.(`No database named "${E}" was declared and registered`),f=p?.auditStore,_||(_=it()?.[E]);let S=et();if(S===g)throw S?new Error("Should not connect to self",S):new Error("Node name not defined");return UE(S,E),!0}o(Ta,"setDatabase");function UE(E,S){let A=it()?.[S],b=[];for(let I in A){let P=A[I];b.push({table:I,schemaDefined:P.schemaDefined,attributes:P.attributes.map(L=>({name:L.name,type:L.type,isPrimaryKey:L.isPrimaryKey}))})}ae.trace?.("Sending database info for node",E,"database name",S),e.send((0,tt.encode)([I$,E,S,b]))}o(UE,"sendNodeDBName");function Ra(E){let S=it()?.[E],A=[];for(let b in S){if(yr&&!yr.some(P=>P.replicateByDefault?!P.tables.includes(b):P.tables.includes(b)))continue;let I=S[b];A.push({table:b,schemaDefined:I.schemaDefined,attributes:I.attributes.map(P=>({name:P.name,type:P.type,isPrimaryKey:P.isPrimaryKey}))})}e.send((0,tt.encode)([L$,A,E]))}o(Ra,"sendDBSchema"),W=setInterval(()=>{for(let[E,S]of Ne)S.lastChunk+pe<Date.now()&&(ae.warn?.(`Timeout waiting for blob stream to finish ${E} for record ${S.recordId??"unknown"} from ${g}`),Ne.delete(E),S.end())},pe).unref();let ya=1,Kf=[];return{end(){Eo&&Eo.end(),Lr&&Lr.emit("close")},getRecord(E){let S=ya++;return new Promise((A,b)=>{let I=[O$,S,E.table.tableId,E.id];Kf[E.table.tableId]||(I.push(E.table.tableName),Kf[E.table.tableId]=!0),e.send((0,tt.encode)(I)),ce=performance.now(),R.set(S,{tableId:E.table.tableId,key:E.id,resolve(P){let{table:L,entry:D}=E;if(A(P),P)return L._recordRelocate(D,P)},reject:b})})},sendOperation(E){let S=ya++;return E.requestId=S,e.send((0,tt.encode)([Xw,E])),new Promise((A,b)=>{R.set(S,{resolve:A,reject:b})})}};function gu(E){Aa(5),E<128?a[c++]=E:E<16384?(l.setUint16(c,E|32768),c+=2):E<1056964608?(l.setUint32(c,E|3221225472),c+=4):(a[c]=255,l.setUint32(c+1,E),c+=5)}function vc(E,S=0,A=E.length){let b=A-S;Aa(b),E.copy(a,c,S,A),c+=b}function Uc(E){Aa(8),l.setFloat64(c,E),c+=8}function Aa(E){if(E+16>a.length-c){let S=Buffer.allocUnsafeSlow(c+E-i+65536>>10<<11);a.copy(S,0,i,c),c=c-i,i=0,a=S,l=new DataView(a.buffer,0,a.length)}}function V(E,S){let A=E.database??"data";if(A!=="data"&&!ke[A]){ae.warn?.("Database not found",E.database);return}S||(S={});let b=S.schemaDefined,I=!1,P=E.schemaDefined,L=S.attributes||[];for(let D=0;D<E.attributes?.length;D++){let k=E.attributes[D],C=L.find(x=>x.name===k.name);(!C||C.type!==k.type)&&(b?ae.error?.(`Schema for '${u}.${E.table}' is defined locally, but attribute '${k.name}: ${k.type}' from '${g}' does not match local attribute ${C?"'"+C.name+": "+C.type+"'":"which does not exist"}`):(I=!0,P||(k.indexed=!0),C?L[L.indexOf(C)]=k:L.push(k)))}return I?(ae.debug?.("(Re)creating",E),je({table:E.table,database:E.database,schemaDefined:E.schemaDefined,attributes:L,...S})):S}}var U$,tt,x$,B$,xd,rO,F$,H$,xl,k$,Jw,q$,G$,ae,b$,I$,N$,Ud,w$,jw,O$,PS,Xw,LS,C$,P$,L$,DS,$$,Zw,eO,Wh,xS,tO,V$,K$,mue,MS,nO,rc,vS,D$,hue,M$,Qw,US,v$,Bd,sO=ue(()=>{De();Mi();Dw();ub();is();U$=w(oe());H();Qc();tt=require("msgpackr"),x$=require("ws"),B$=require("worker_threads"),xd=w(j());Vh();rO=require("events"),F$=w(as()),H$=w(require("node:tls"));Dl();xl=w(require("node:process")),k$=require("node:net");ji();Yn();Jw=require("node:stream"),q$=require("lmdb"),G$=w(require("minimist")),ae=(0,xd.forComponent)("replication").conditional,b$=129,I$=140,N$=141,Ud=142,w$=130,jw=132,O$=133,PS=134,Xw=136,LS=137,C$=143,P$=144,L$=145,DS=146,$$=0,Zw=1,eO=2,Wh=3,xS=4,tO=5,V$=0,K$=1,mue=(0,G$.default)(xl.argv),MS=mue.HDB_LEADER_URL??xl.env.HDB_LEADER_URL,nO=new Map,rc=new Map,vS=!0,D$=300,hue=2,M$=3e4;o(BS,"createWebSocket");v$=500,Bd=class extends rO.EventEmitter{static{o(this,"NodeReplicationConnection")}socket;startTime;retryTime=v$;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;url;subscription;databaseName;nodeName;authorization;constructor(t,r,n,s,i){super(),this.url=t,this.subscription=r,this.databaseName=n,this.authorization=i,this.nodeName=this.nodeName??Ei(t)}async connect(){this.session||this.resetSession();let t=[];this.socket=await BS(this.url,{serverName:this.nodeName,authorization:this.authorization});let r;ae.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${xl.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),ae[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=v$,this.nodeSubscriptions&&Ml({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,r=zh(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(r)}),this.socket.on("error",n=>{n.code==="SELF_SIGNED_CERT_IN_CHAIN"?(ae.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),n.isHandled=!0):n.code!=="ECONNREFUSED"&&(n.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?ae.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):ae.error?.(`Error in connection to ${this.url} due to ${n.message}`)),this.sessionReject(n)}),this.socket.on("close",(n,s)=>{if(this.isConnected&&(this.nodeSubscriptions&&Dd({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,r?.end(),this.emit("finished");return}if(++this.retries%20===1){let i=s?.toString();ae.warn?.(`${r?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${i?'"'+i+'" ':""}(code: ${n})`)}r=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((t,r)=>{this.sessionResolve=t,this.sessionReject=r})}subscribe(t,r){this.nodeSubscriptions=t,this.replicateTablesByDefault=r,this.emit("subscriptions-updated",t)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(t){return this.session.then(r=>r.getRecord(t))}};o(zh,"replicateOverWS")});var ko={};Oe(ko,{clearThisNodeName:()=>Aue,disableReplication:()=>gue,enabledDatabases:()=>tc,forEachReplicatedDatabase:()=>ja,getThisNodeId:()=>FS,getThisNodeName:()=>et,getThisNodeUrl:()=>Qa,hostnameToUrl:()=>$S,lastTimeInAuditStore:()=>kh,monitorNodeCAs:()=>tV,replicateOperation:()=>Iue,replicationCertificateAuthorities:()=>qo,sendOperationToNode:()=>jh,servers:()=>Eue,setReplicator:()=>nV,start:()=>_ue,startOnMainThread:()=>xw,subscribeToNode:()=>$h,unsubscribeFromNode:()=>IS,urlToNodeName:()=>Ei});function _ue(e){if(!e.port&&!e.securePort&&(e.port=Ms.default.get(U.OPERATIONSAPI_NETWORK_PORT),e.securePort=Ms.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),!et())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of Gh(e))t.set(Ei(s.url),s);Sue(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Le.ws(async(s,i,a,c)=>{if(Ot.debug("Incoming WS connection received "+i.url),i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,a);await a,s._socket.unref(),zh(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&Ot.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Le.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){Ot.debug("Incoming replication WS connection received, authorized: "+s.authorized),!s.authorized&&s._nodeRequest.socket.authorizationError&&Ot.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let a=Yt().primaryStore;if(s.authorized&&s.peerCertificate.subjectaltname){let c=(0,Z$.getHostnamesFromCertificate)(s.peerCertificate),l;for(let u of c)if(l=u&&(a.get(u)||t.get(u)),l)break;if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){Ot.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else Ot.warn(`No node found for certificate common name/SANs: ${c}, available nodes are ${Array.from(a.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=a.get(s.ip)||t.get(s.ip);c?s.user=c:Ot.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...a.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=o(()=>{let a=new Set(s.secureContexts.values());s.defaultContext&&a.add(s.defaultContext);for(let c of a)try{let l=Array.from(qo);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=GS.createSecureContext(u)}catch(l){Ot.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ms.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1&&i()}tV(()=>{for(let s of n)s()})}function tV(e){let t=0;Pd(r=>{r?.ca&&(qo.add(r.ca),qo.size!==t&&(t=qo.size,e?.()))})}function gue(e=!0){eV=e}function Sue(e){eV||(it(),tc=e.databases,ja(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||rc;for(let[s,i]of kS){let a=i.get(r);a&&(a.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];nV(r,s,e),nO.get(s)?.forEach(i=>i(s))}}))}function nV(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class rV extends xr{static{o(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||rc,a=i.get(e),c=a?.tableById||[];c[t.tableId]=t;let l=a?.ready;if(Ot.trace("Setting up replicator subscription to database",e),!a?.auditStore)return this.subscription=a=new Kn,i.set(e,a),a.tableById=c,a.auditStore=t.auditStore,a.dbisDB=t.dbisDB,a.databaseName=e,l&&l(a),a;this.subscription=a}static subscribeOnThisThread(i,a){return!0}static async load(i){if(i){let a=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),a]);if(c){let l,u=new Set;do{let d,f="",m=1/0;for(let p of c){if(u.has(p)||p===Le.hostname)continue;let _=Rue(p,rV.subscription,e);if(_?.isConnected){let g=Cd(t.auditStore,e,p)[xS];(!d||g<m)&&(d=_,f=p,m=g)}}if(!d)throw l||new J$.ServerError(`No connection to any other nodes are available: ${c}`,502);let h={requestId:pue++,table:t,entry:i,id:i.key};u.add(f);try{return await d.getRecord(h)}catch(p){if(d.isConnected)throw p;Ot.warn("Error in load from node",qS,p),l||(l=p)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function Tue(e,t,r,n,s){let i=kS.get(e);i||kS.set(e,i=new Map);let a=i.get(r);if(a)return a;if(t)return i.set(r,a=new Bd(e,t,r,n,s)),a.connect(),a.once("finished",()=>i.delete(r)),a}function Rue(e,t,r){let n=W$.get(e);n||(n=new Map,W$.set(e,n));let s=n.get(r);if(s)return s;let i=Yt().primaryStore.get(e);return i?.url&&(s=new Bd(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function jh(e,t,r){r||(r={}),r.serverName=e.name;let n=await BS(e.url,r),s=zh(n,{},{});return new Promise((i,a)=>{n.on("open",()=>{Ot.debug("Sending operation connection to "+e.url+" opened",t),i(s.sendOperation(t))}),n.on("error",c=>{a(c)}),n.on("close",c=>{Ot.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function $h(e){try{X$.isMainThread&&Ot.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=rc.get(e.database);if(!t){let n;t=new Promise(s=>{Ot.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,rc.set(e.database,t)}let r=Tue(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>qh(n,e.database)),e.replicateByDefault)}catch(t){Ot.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function IS({name:e,url:t,database:r}){Ot.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(Yt().primaryStore.getRange({})));let n=kS.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function yue(){if(iO!==void 0)return iO;let e=Ms.default.get(U.OPERATIONSAPI_TLS_CERTIFICATE)||Ms.default.get(U.TLS_CERTIFICATE);if(e)return iO=new j$.X509Certificate((0,Q$.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function et(){return qS||(qS=Ms.default.get("replication_hostname")??Ei(Ms.default.get("replication_url"))??yue()??z$("operationsapi_network_secureport")??z$("operationsapi_network_port")??"127.0.0.1")}function Aue(){qS=void 0}function z$(e){let t=Ms.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function HS(e){let t=Ms.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function FS(e){return Hh(e)?.[et()]}function Qa(){let e=Ms.default.get("replication_url");return e||$S(et())}function $S(e){let t=HS("replication_port");if(t)return`ws://${e}:${t}`;if(t=HS("replication_secureport"),t)return`wss://${e}:${t}`;if(t=HS("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=HS("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function Ei(e){if(e)return new URL(e).hostname}function ja(e,t){for(let n of Object.getOwnPropertyNames(ke))r(n);return Ah(n=>{r(n)}),Nl((n,s)=>{r(n.databaseName)});function r(n){let s=ke[n];Ot.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):bue(n)&&t(s,n,!1)}o(r,"forDatabase")}function bue(e){let t=ke[e];for(let r in t)if(t[r].replicate)return!0}function kh(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function Iue(e){let t={message:""};if(e.replicated){e.replicated=!1,Ot.trace?.("Replicating operation",e.operation,"to nodes",Le.nodes.map(n=>n.name));let r=await Promise.allSettled(Le.nodes.map(n=>jh(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Le.nodes[s]?.name,i})}return t}var Ms,Ot,j$,Q$,GS,J$,X$,Z$,eV,pue,Eue,qo,tc,kS,W$,iO,qS,is=ue(()=>{De();Ma();Au();sO();Mr();Ms=w(oe()),Ot=w(j()),j$=require("crypto"),Q$=require("fs");Vh();Dl();H();Dw();GS=w(require("node:tls")),J$=w(ge()),X$=require("worker_threads"),Z$=w(as()),pue=1,Eue=[],qo=Ms.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1?new Set(GS.rootCertificates):new Set;o(_ue,"start");o(tV,"monitorNodeCAs");o(gue,"disableReplication");o(Sue,"assignReplicationSource");o(nV,"setReplicator");kS=new Map;o(Tue,"getSubscriptionConnection");W$=new Map;o(Rue,"getRetrievalConnectionByName");o(jh,"sendOperationToNode");o($h,"subscribeToNode");o(IS,"unsubscribeFromNode");o(yue,"getCommonNameFromCert");o(et,"getThisNodeName");o(Aue,"clearThisNodeName");Object.defineProperty(Le,"hostname",{get(){return et()}});o(z$,"getHostFromListeningPort");o(HS,"getPortFromListeningPort");o(FS,"getThisNodeId");Le.replication={getThisNodeId:FS,exportIdMapping:Hh};o(Qa,"getThisNodeUrl");o($S,"hostnameToUrl");o(Ei,"urlToNodeName");o(ja,"forEachReplicatedDatabase");o(bue,"hasExplicitlyReplicatedTable");o(kh,"lastTimeInAuditStore");o(Iue,"replicateOperation")});var Gd=M((YUe,cV)=>{"use strict";var Fd=_G(),{validateBySchema:Qh}=st(),{commonValidators:Hd,schemaRegex:oO}=qi(),pr=require("joi"),Nue=j(),wue=require("uuid").v4,YS=Po(),kd=(H(),v(Y)),Oue=require("util"),nc=Xn(),{handleHDBError:Go,hdbErrors:Cue,ClientError:Bl}=ge(),{HDB_ERROR_MSGS:VS,HTTP_STATUS_CODES:$o}=Cue,{SchemaEventMsg:WS}=ai(),sV=Ht(),{getDatabases:Pue}=(De(),v(ht)),{transformReq:qd}=se(),{replicateOperation:iV}=(is(),v(ko)),{cleanupOrphans:Lue}=(Yn(),v(p_)),KS=pr.string().min(1).max(Hd.schema_length.maximum).pattern(oO).messages({"string.pattern.base":"{:#label} "+Hd.schema_format.message}),Due=pr.string().min(1).max(Hd.schema_length.maximum).pattern(oO).messages({"string.pattern.base":"{:#label} "+Hd.schema_format.message}).required(),Mue=pr.string().min(1).max(Hd.schema_length.maximum).pattern(oO).messages({"string.pattern.base":"{:#label} "+Hd.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();cV.exports={createSchema:vue,createSchemaStructure:oV,createTable:Uue,createTableStructure:aV,createAttribute:kue,dropSchema:xue,dropTable:Bue,dropAttribute:Fue,getBackup:que,cleanupOrphanBlobs:Gue};async function vue(e){let t=await oV(e);return YS.signalSchemaChange(new WS(process.pid,e.operation,e.schema)),t}o(vue,"createSchema");async function oV(e){let t=Qh(e,pr.object({database:KS,schema:KS}));if(t)throw new Bl(t.message);if(qd(e),!await Fd.checkSchemaExists(e.schema))throw Go(new Error,VS.SCHEMA_EXISTS_ERR(e.schema),$o.BAD_REQUEST,kd.LOG_LEVELS.ERROR,VS.SCHEMA_EXISTS_ERR(e.schema),!0);return await nc.createSchema(e),`database '${e.schema}' successfully created`}o(oV,"createSchemaStructure");async function Uue(e){return qd(e),e.hash_attribute=e.primary_key??e.hash_attribute,await aV(e)}o(Uue,"createTable");async function aV(e){let t=Qh(e,pr.object({database:KS,schema:KS,table:Due,residence:pr.array().items(pr.string().min(1)).optional(),hash_attribute:Mue}));if(t)throw new Bl(t.message);if(!await Fd.checkSchemaTableExists(e.schema,e.table))throw Go(new Error,VS.TABLE_EXISTS_ERR(e.schema,e.table),$o.BAD_REQUEST,kd.LOG_LEVELS.ERROR,VS.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:wue(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await nc.createTable(n,e);else throw Go(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",$o.BAD_REQUEST);else await nc.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}o(aV,"createTableStructure");async function xue(e){let t=Qh(e,pr.object({database:pr.string(),schema:pr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new Bl(t.message);qd(e);let r=await Fd.checkSchemaExists(e.schema);if(r)throw Go(new Error,r,$o.NOT_FOUND,kd.LOG_LEVELS.ERROR,r,!0);let n=await Fd.schemaDescribe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await nc.dropSchema(e),YS.signalSchemaChange(new WS(process.pid,e.operation,e.schema)),await sV.purgeSchemaTableStreams(e.schema,s);let i=await iV(e);return i.message=`successfully deleted '${e.schema}'`,i}o(xue,"dropSchema");async function Bue(e){let t=Qh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required()}));if(t)throw new Bl(t.message);qd(e);let r=await Fd.checkSchemaTableExists(e.schema,e.table);if(r)throw Go(new Error,r,$o.NOT_FOUND,kd.LOG_LEVELS.ERROR,r,!0);await nc.dropTable(e),await sV.purgeTableStream(e.schema,e.table);let n=await iV(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}o(Bue,"dropTable");async function Fue(e){let t=Qh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required(),attribute:pr.string().required()}));if(t)throw new Bl(t.message);qd(e);let r=await Fd.checkSchemaTableExists(e.schema,e.table);if(r)throw Go(new Error,r,$o.NOT_FOUND,kd.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw Go(new Error,"You cannot drop a hash attribute",$o.BAD_REQUEST,void 0,void 0,!0);if(kd.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw Go(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,$o.BAD_REQUEST,void 0,void 0,!0);try{return await nc.dropAttribute(e),Hue(e),YS.signalSchemaChange(new WS(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw Nue.error(`Got an error deleting attribute ${Oue.inspect(e)}.`),n}}o(Fue,"dropAttribute");function Hue(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}o(Hue,"dropAttributeFromGlobal");async function kue(e){qd(e);let t=Pue()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw Go(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,$o.BAD_REQUEST,void 0,void 0,!0);return await nc.createAttribute(e),YS.signalSchemaChange(new WS(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}o(kue,"createAttribute");function que(e){return nc.getBackup(e)}o(que,"getBackup");function Gue(e){if(!e.database)throw new Bl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new Bl(`Unknown database '${e.database}'`);return Lue(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}o(Gue,"cleanupOrphanBlobs")});var uV=M((zUe,lV)=>{"use strict";var{OPERATIONS_ENUM:$ue}=(H(),v(Y)),aO=class{static{o(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=$ue.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};lV.exports=aO});var cO=M((JUe,pV)=>{"use strict";var Vue=Xn(),QUe=uV(),zS=se(),jS=(H(),v(Y)),Kue=oe(),{handleHDBError:dV,hdbErrors:Yue}=ge(),{HDB_ERROR_MSGS:fV,HTTP_STATUS_CODES:mV}=Yue,Wue=Object.values(jS.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),hV="To use this operation audit log must be enabled in harperdb-config.yaml";pV.exports=zue;async function zue(e){if(zS.isEmpty(e.schema))throw new Error(fV.SCHEMA_REQUIRED_ERR);if(zS.isEmpty(e.table))throw new Error(fV.TABLE_REQUIRED_ERR);if(!Kue.get(jS.CONFIG_PARAMS.LOGGING_AUDITLOG))throw dV(new Error,hV,mV.BAD_REQUEST,jS.LOG_LEVELS.ERROR,hV,!0);let t=zS.checkSchemaTableExist(e.schema,e.table);if(t)throw dV(new Error,t,mV.NOT_FOUND,jS.LOG_LEVELS.ERROR,t,!0);if(!zS.isEmpty(e.search_type)&&Wue.indexOf(e.search_type)<0)throw new Error(`Invalid searchType '${read_audit_log_object.search_type}'`);return await Vue.readAuditLog(e)}o(zue,"readAuditLog")});var _V=M((ZUe,EV)=>{"use strict";var{OPERATIONS_ENUM:jue}=(H(),v(Y)),lO=class{static{o(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=jue.GET_BACKUP,this.schema=t,this.table=r}};EV.exports=lO});var TV=M((n0e,SV)=>{"use strict";var Que=Xn(),t0e=_V(),uO=se(),Jue=(H(),v(Y)),r0e=oe(),{handleHDBError:Xue,hdbErrors:Zue}=ge(),{HDB_ERROR_MSGS:gV,HTTP_STATUS_CODES:ede}=Zue;SV.exports=tde;async function tde(e){if(uO.isEmpty(e.schema))throw new Error(gV.SCHEMA_REQUIRED_ERR);if(uO.isEmpty(e.table))throw new Error(gV.TABLE_REQUIRED_ERR);let t=uO.checkSchemaTableExist(e.schema,e.table);if(t)throw Xue(new Error,t,ede.NOT_FOUND,Jue.LOG_LEVELS.ERROR,t,!0);return await Que.getBackup(readAuditLogObject)}o(tde,"getBackup")});var bV=M((i0e,AV)=>{"use strict";var rde=oe(),sc=require("joi"),nde=st(),RV=require("moment"),sde=require("fs-extra"),dO=require("path"),ide=require("lodash"),Jh=(H(),v(Y)),{LOG_LEVELS:Fl}=(H(),v(Y)),ode="YYYY-MM-DD hh:mm:ss",ade=dO.resolve(__dirname,"../logs");AV.exports=function(e){return nde.validateBySchema(e,cde)};var cde=sc.object({from:sc.custom(yV),until:sc.custom(yV),level:sc.valid(Fl.NOTIFY,Fl.FATAL,Fl.ERROR,Fl.WARN,Fl.INFO,Fl.DEBUG,Fl.TRACE),order:sc.valid("asc","desc"),limit:sc.number().min(1),start:sc.number().min(0),log_name:sc.custom(lde)});function yV(e,t){if(RV(e,RV.ISO_8601).format(ode)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}o(yV,"validateDatetime");function lde(e,t){if(ide.invert(Jh.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=rde.get(Jh.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?Jh.LOG_NAMES.HDB:e,i=s===Jh.LOG_NAMES.INSTALL?dO.join(ade,Jh.LOG_NAMES.INSTALL):dO.join(n,s);return sde.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}o(lde,"validateReadLogPath")});var mO=M((a0e,NV)=>{"use strict";var QS=(H(),v(Y)),ude=j(),dde=oe(),fde=bV(),fO=require("path"),IV=require("fs-extra"),{once:mde}=require("events"),{handleHDBError:hde,hdbErrors:pde}=ge(),{PACKAGE_ROOT:Ede}=yt(),{replicateOperation:_de}=(is(),v(ko)),gde=fO.join(Ede,"logs"),Sde=1e3,Tde=200;NV.exports=Rde;async function Rde(e){let t=fde(e);if(t)throw hde(t,t.message,pde.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=_de(e),n=dde.get(QS.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e.log_name===void 0?QS.LOG_NAMES.HDB:e.log_name,i=s===QS.LOG_NAMES.INSTALL?fO.join(gde,QS.LOG_NAMES.INSTALL):fO.join(n,s),a=e.level!==void 0,c=a?e.level:void 0,l=e.from!==void 0,u=l?new Date(e.from):void 0,d=e.until!==void 0,f=d?new Date(e.until):void 0,m=e.limit===void 0?Sde:e.limit,h=e.order===void 0?void 0:e.order,p=e.start===void 0?0:e.start,_=p+m,g=0;h==="desc"&&!u&&!f&&(g=Math.max(IV.statSync(i).size-(_+5)*Tde,0));let R=IV.createReadStream(i,{start:g});R.on("error",q=>{ude.error(q)});let T=0,y=[],N="",O;R.on("data",q=>{let W=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;q=N+q;let G=0,K;for(;(K=W.exec(q))&&!R.destroyed;){O&&(O.message=q.slice(G,K.index),F(O));let[ce,le,ie]=K,pe=ie.split("] ["),Ne=pe[0],Ue=pe[1];pe.splice(0,2),O={timestamp:le,thread:Ne,level:Ue,tags:pe,message:""},G=K.index+ce.length}N=q.slice(G)}),R.on("end",q=>{R.destroyed||O&&(O.message=N.trim(),F(O))}),R.resume();function F(q){let W,G,K;switch(!0){case(a&&l&&d):W=new Date(q.timestamp),G=new Date(u),K=new Date(f),q.level===c&&W>=G&&W<=K&&T<p?T++:q.level===c&&W>=G&&W<=K&&(ro(q,h,y),T++,T===_&&R.destroy());break;case(a&&l):W=new Date(q.timestamp),G=new Date(u),q.level===c&&W>=G&&T<p?T++:q.level===c&&W>=G&&(ro(q,h,y),T++,T===_&&R.destroy());break;case(a&&d):W=new Date(q.timestamp),K=new Date(f),q.level===c&&W<=K&&T<p?T++:q.level===c&&W<=K&&(ro(q,h,y),T++,T===_&&R.destroy());break;case(l&&d):W=new Date(q.timestamp),G=new Date(u),K=new Date(f),W>=G&&W<=K&&T<p?T++:W>=G&&W<=K&&(ro(q,h,y),T++,T===_&&R.destroy());break;case a:q.level===c&&T<p?T++:q.level===c&&(ro(q,h,y),T++,T===_&&R.destroy());break;case l:W=new Date(q.timestamp),G=new Date(u),W>=G&&T<p?T++:W>=G&&T>=p&&(ro(q,h,y),T++,T===_&&R.destroy());break;case d:W=new Date(q.timestamp),K=new Date(f),W<=K&&T<p?T++:W<=K&&T>=p&&(ro(q,h,y),T++,T===_&&R.destroy());break;default:T<p?T++:(ro(q,h,y),T++,T===_&&R.destroy())}}o(F,"onLogMessage"),await mde(R,"close");let Z=await r;if(Z.replicated){for(let q of y)q.node=server.hostname;for(let q of Z.replicated){let W=q.node;if(q.status==="failed")ro({timestamp:new Date().toISOString(),level:"error",node:W,message:`Error retrieving logs: ${q.reason}`},h,y);else for(let G of q.results)G.node=W,ro(G,h,y)}}return y}o(Rde,"readLog");function ro(e,t,r){t==="desc"?yde(e,r):t==="asc"?Ade(e,r):r.push(e)}o(ro,"pushLineToResult");function yde(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}o(yde,"insertDescending");function Ade(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}o(Ade,"insertAscending")});var JS=M((m0e,PV)=>{"use strict";var hO=require("joi"),{string:$d,boolean:wV,date:bde}=hO.types(),Ide=st(),{validateSchemaExists:l0e,validateTableExists:u0e,validateSchemaName:d0e}=qi(),Nde=(H(),v(Y)),wde=ft(),OV=oe();OV.initSync();var f0e=$d.invalid(OV.get(Nde.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(wde.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),CV={operation:$d.valid("add_node","update_node","set_node_replication"),node_name:$d.optional(),subscriptions:hO.array().items({table:$d.optional(),schema:$d.optional(),database:$d.optional(),subscribe:wV.required(),publish:wV.required().custom(Cde),start_time:bde.iso()})};function Ode(e){return Ide.validateBySchema(e,hO.object(CV))}o(Ode,"addUpdateNodeValidator");function Cde(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}o(Cde,"checkForFalsy");PV.exports={addUpdateNodeValidator:Ode,validationSchema:CV}});var ic=M((p0e,LV)=>{"use strict";var pO=class{static{o(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},EO=class{static{o(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};LV.exports={Node:pO,NodeSubscription:EO}});var MV=M((_0e,DV)=>{"use strict";var Pde=(H(),v(Y)).OPERATIONS_ENUM,_O=class{static{o(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=Pde.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};DV.exports=_O});var Xh=M((S0e,vV)=>{"use strict";var gO=class{static{o(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},SO=class{static{o(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,a,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=a,c!==void 0&&(this.attributes=c)}};vV.exports={RemotePayloadObject:gO,RemotePayloadSubscription:SO}});var xV=M((R0e,UV)=>{"use strict";var TO=class{static{o(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,a=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=a}};UV.exports=TO});var FV=M((w0e,BV)=>{"use strict";var Lde=xV(),A0e=$t(),b0e=gt(),Dde=j(),{getSchemaPath:I0e,getTransactionAuditStorePath:N0e}=At(),{getDatabases:Mde}=(De(),v(ht));BV.exports=vde;async function vde(e){let t=new Lde;try{let r=Mde()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){Dde.warn(`unable to stat table dbi due to ${r}`)}return t}o(vde,"lmdbGetTableSize")});var kV=M((C0e,HV)=>{"use strict";var RO=class{static{o(this,"SystemInformationObject")}constructor(t,r,n,s,i,a,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=a,this.harperdb_processes=c}};HV.exports=RO});var Kd=M((U0e,VV)=>{"use strict";var Ude=require("fs-extra"),xde=require("path"),hn=require("systeminformation"),oc=j(),qV=Ht(),L0e=ft(),Vd=(H(),v(Y)),Bde=FV(),Fde=vo(),{getThreadInfo:GV}=ze(),Zh=oe();Zh.initSync();var Hde=kV(),{openEnvironment:D0e}=gt(),{getSchemaPath:M0e}=At(),{database:v0e,databases:yO}=(De(),v(ht)),XS;VV.exports={getHDBProcessInfo:NO,getNetworkInfo:OO,getDiskInfo:wO,getMemoryInfo:IO,getCPUInfo:bO,getTimeInfo:AO,getSystemInformation:CO,systemInformation:kde,getTableSize:PO,getMetrics:LO};function AO(){return hn.time()}o(AO,"getTimeInfo");async function bO(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:a,governor:c,socket:l,cache:u,...d}=await hn.cpu();d.cpu_speed=await hn.cpuCurrentSpeed();let{rawCurrentload:f,rawCurrentloadIdle:m,rawCurrentloadIrq:h,rawCurrentloadNice:p,rawCurrentloadSystem:_,rawCurrentloadUser:g,cpus:R,...T}=await hn.currentLoad();return T.cpus=[],R.forEach(y=>{let{rawLoad:N,rawLoadIdle:O,rawLoadIrq:F,rawLoadNice:Z,rawLoadSystem:q,rawLoadUser:W,...G}=y;T.cpus.push(G)}),d.current_load=T,d}catch(e){return oc.error(`error in getCPUInfo: ${e}`),{}}}o(bO,"getCPUInfo");async function IO(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await hn.mem();return Object.assign(s,process.memoryUsage())}catch(e){return oc.error(`error in getMemoryInfo: ${e}`),{}}}o(IO,"getMemoryInfo");async function NO(){let e={core:[],clustering:[]};try{let t=await hn.processes(),r;try{r=Number.parseInt(await Ude.readFile(xde.join(Zh.get(Vd.CONFIG_PARAMS.ROOTPATH),Vd.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===Vd.NODE_ERROR_CODES.ENOENT)oc.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return oc.error(`error in getHDBProcessInfo: ${t}`),e}}o(NO,"getHDBProcessInfo");async function wO(){let e={};try{if(!Zh.get(Vd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await hn.disksIO();e.io=i;let{rxSec:a,txSec:c,wxSec:l,...u}=await hn.fsStats();return e.read_write=u,e.size=await hn.fsSize(),e}catch(t){return oc.error(`error in getDiskInfo: ${t}`),e}}o(wO,"getDiskInfo");async function OO(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return Zh.get(Vd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await hn.networkInterfaceDefault(),e.latency=await hn.inetChecksite("google.com"),(await hn.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:a,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:d,carrierChanges:f,...m}=n;e.interfaces.push(m)}),(await hn.networkStats()).forEach(n=>{let{rxSec:s,txSec:i,ms:a,...c}=n;e.stats.push(c)})),e}catch(t){return oc.error(`error in getNetworkInfo: ${t}`),e}}o(OO,"getNetworkInfo");async function CO(){if(XS!==void 0)return XS;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:a,...c}=await hn.osInfo();e=c;let l=await hn.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,XS=e,XS}catch(t){return oc.error(`error in getSystemInformation: ${t}`),e}}o(CO,"getSystemInformation");async function PO(){let e=[],t=await Fde.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await Bde(n));return e}o(PO,"getTableSize");async function LO(){let e={};for(let t in yO){let r=e[t]={},n=r.tables={};for(let s in yO[t])try{let i=yO[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,d,f]=l.trim().split(" ");return{pid:u,thread:d,txnid:f}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}}let a=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=a[l];n[s]=c}catch(i){oc.notify(`Error getting stats for table ${s}: ${i}`)}}return e}o(LO,"getMetrics");async function $V(){if(Zh.get(Vd.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await qV.getNATSReferences(),t=await qV.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let a={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(a)}return r}}o($V,"getNatsStreamInfo");async function kde(e){let t=new Hde;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await CO(),t.time=AO(),t.cpu=await bO(),t.memory=await IO(),t.disk=await wO(),t.network=await OO(),t.harperdb_processes=await NO(),t.table_size=await PO(),t.metrics=await LO(),t.threads=await GV(),t.replication=await $V(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await CO();break;case"time":t.time=AO();break;case"cpu":t.cpu=await bO();break;case"memory":t.memory=await IO();break;case"disk":t.disk=await wO();break;case"network":t.network=await OO();break;case"harperdb_processes":t.harperdb_processes=await NO();break;case"table_size":t.table_size=await PO();break;case"database_metrics":case"metrics":t.metrics=await LO();break;case"threads":t.threads=await GV();break;case"replication":t.replication=await $V();break;default:break}return t}o(kde,"systemInformation")});var vs=M((k0e,zV)=>{"use strict";var qde=an(),DO=se(),Gde=require("util"),Hl=(H(),v(Y)),KV=oe();KV.initSync();var $de=dw(),YV=on(),{Node:B0e,NodeSubscription:F0e}=ic(),Vde=Wu(),Kde=MV(),{RemotePayloadObject:Yde,RemotePayloadSubscription:Wde}=Xh(),{handleHDBError:zde,hdbErrors:jde}=ge(),{HTTP_STATUS_CODES:Qde,HDB_ERROR_MSGS:Jde}=jde,Xde=ci(),Zde=Kd(),{packageJson:efe}=yt(),{getDatabases:tfe}=(De(),v(ht)),H0e=Gde.promisify($de.authorize),rfe=YV.searchByHash,nfe=YV.searchByValue;zV.exports={isEmpty:sfe,getNodeRecord:ife,upsertNodeRecord:ofe,buildNodePayloads:afe,checkClusteringEnabled:cfe,getAllNodeRecords:lfe,getSystemInfo:ufe,reverseSubscription:WV};function sfe(e){return e==null}o(sfe,"isEmpty");async function ife(e){let t=new Vde(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return rfe(t)}o(ife,"getNodeRecord");async function ofe(e){let t=new Kde(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return qde.upsert(t)}o(ofe,"upsertNodeRecord");function WV(e){if(DO.isEmpty(e.subscribe)||DO.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}o(WV,"reverseSubscription");function afe(e,t,r,n){let s=[];for(let i=0,a=e.length;i<a;i++){let c=e[i],{schema:l,table:u}=c,d=DO.getTableHashAttribute(l,u),{subscribe:f,publish:m}=WV(c),h=tfe()[l]?.[u],p=new Wde(l,u,d,m,f,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(p)}return new Yde(r,t,s,n)}o(afe,"buildNodePayloads");function cfe(){if(!KV.get(Hl.CONFIG_PARAMS.CLUSTERING_ENABLED))throw zde(new Error,Jde.CLUSTERING_NOT_ENABLED,Qde.BAD_REQUEST,void 0,void 0,!0)}o(cfe,"checkClusteringEnabled");async function lfe(){let e=new Xde(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await nfe(e))}o(lfe,"getAllNodeRecords");async function ufe(){let e=await Zde.getSystemInformation();return{hdb_version:efe.version,node_version:e.node_version,platform:e.platform}}o(ufe,"getSystemInfo")});var MO=M((G0e,r1)=>{"use strict";var ZS=Ht(),jV=se(),QV=ft(),JV=(H(),v(Y)),eT=j(),XV=Gd(),dfe=ju(),{RemotePayloadObject:ffe}=Xh(),{handleHDBError:ZV,hdbErrors:mfe}=ge(),{HTTP_STATUS_CODES:e1}=mfe,{NodeSubscription:t1}=ic();r1.exports=hfe;async function hfe(e,t){let r;try{r=await ZS.request(`${t}.${QV.REQUEST_SUFFIX}`,new ffe(JV.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),eT.trace("Response from remote describe all request:",r)}catch(a){eT.error(`addNode received error from describe all request to remote node: ${a}`);let c=ZS.requestErrorHandler(a,"add_node",t);throw ZV(new Error,c,e1.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===QV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let a=`Error returned from remote node ${t}: ${r.message}`;throw ZV(new Error,a,e1.INTERNAL_SERVER_ERROR,"error",a)}let n=r.message,s=[],i=[];for(let a of e){let{table:c}=a,l=a.database??a.schema??"data";if(l===JV.SYSTEM_SCHEMA_NAME){await ZS.createLocalTableStream(l,c);let p=new t1(l,c,a.publish,a.subscribe);p.start_time=a.start_time,i.push(p);continue}let u=jV.doesSchemaExist(l),d=n[l]!==void 0,f=c?jV.doesTableExist(l,c):!0,m=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!d||!f&&!m){s.push(a);continue}if(!u&&d&&(eT.trace(`addNode creating schema: ${l}`),await XV.createSchema({operation:"create_schema",schema:l})),!f&&m){eT.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let p=new dfe(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(p.attributes=n[l][c].attributes),await XV.createTable(p)}await ZS.createLocalTableStream(l,c);let h=new t1(l,c,a.publish,a.subscribe);h.start_time=a.start_time,i.push(h)}return{added:i,skipped:s}}o(hfe,"reviewSubscriptions")});var Yd={};Oe(Yd,{addNodeBack:()=>vO,removeNodeBack:()=>UO,setNode:()=>gfe});async function gfe(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=Ei(t)):t=$S(r);let n=(0,s1.validateBySchema)(e,_fe);if(n)throw(0,Vo.handleHDBError)(n,n.message,Efe.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new Vo.ClientError("url or hostname is required for remove_node operation");let h=r,p=Yt(),_=await p.get(h);if(!_)throw new Vo.ClientError(h+" does not exist");try{await jh({url:_.url},{operation:$.REMOVE_NODE_BACK,name:_?.subscriptions?.length>0?et():h},void 0)}catch(g){cs.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await p.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new Vo.ClientError("url required for this operation");let s=Qa();if(s==null)throw new Vo.ClientError("replication url is missing from harperdb-config.yaml");let i,a,c;if(t?.startsWith("wss:")){i=await(0,Us.getReplicationCert)();let h=await(0,Us.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(a=await(0,Us.createCsr)(),cs.info("Sending CSR to target node:",t)):h&&(c=h.certificate,cs.info("Sending CA named",h.name,"to target node",t))}let l={operation:$.ADD_NODE_BACK,hostname:(0,cc.get)(U.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:a,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,cc.get)(U.REPLICATION_SHARD)!==void 0&&(l.shard=(0,cc.get)(U.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(n1):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=n1(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,d;try{u=await jh({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,cs.warn("Error adding node:",t,"to cluster:",h),d=h}if(a&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw d?(d.message+=" and connection was required to sign certificate",d):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);a&&(cs.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,Us.setCertTable)({name:pfe.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,Us.setCertTable)({name:et(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let f={url:t,ca:u?.usingCA};if(e.hostname&&(f.name=e.hostname),e.subscriptions?f.subscriptions=e.subscriptions:f.replicates=!0,e.start_time&&(f.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(f.authorization=e.authorization),e.revoked_certificates&&(f.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?f.shard=u.shard:e.shard!==void 0&&(f.shard=e.shard),f.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,cc.get)(U.REPLICATION_SHARD)!==void 0&&(h.shard=(0,cc.get)(U.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await Fo(et(),h)}await Fo(u?u.nodeName:f.name??Ei(t),f);let m;return e.operation==="update_node"?m=`Successfully updated '${t}'`:m=`Successfully added '${t}' to cluster`,d&&(m+=" but there was an error updating target node: "+d.message),m}async function vO(e){cs.trace("addNodeBack received request:",e);let t=await(0,Us.signCertificate)(e),r;e.csr?(r=t.signingCA,cs.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,cs.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,Us.getReplicationCertAuth)();if(n.replicates){let i={url:Qa(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,cc.get)(U.REPLICATION_SHARD)!==void 0&&(i.shard=(0,cc.get)(U.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await Fo(et(),i)}return await Fo(e.hostname,n),t.nodeName=et(),t.usingCA=s?.certificate,cs.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function UO(e){cs.trace("removeNodeBack received request:",e),await Yt().delete(e.name)}function n1(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var Us,s1,ac,cc,cs,Vo,pfe,Efe,_fe,Wd=ue(()=>{Us=w(as()),s1=w(st()),ac=w(require("joi")),cc=w(oe());H();Vh();Dl();is();cs=w(j()),Vo=w(ge()),{pki:pfe}=require("node-forge"),{HTTP_STATUS_CODES:Efe}=Vo.hdbErrors,_fe=ac.default.object({hostname:ac.default.string(),verify_tls:ac.default.boolean(),replicates:ac.default.boolean(),subscriptions:ac.default.array(),revoked_certificates:ac.default.array(),shard:ac.default.number()});o(gfe,"setNode");o(vO,"addNodeBack");o(UO,"removeNodeBack");o(n1,"reverseSubscription")});var iT=M((J0e,o1)=>{"use strict";var{handleHDBError:tT,hdbErrors:Sfe}=ge(),{HTTP_STATUS_CODES:rT}=Sfe,{addUpdateNodeValidator:Tfe}=JS(),nT=j(),sT=(H(),v(Y)),i1=ft(),Rfe=se(),ep=Ht(),tp=vs(),xO=oe(),yfe=MO(),{Node:Afe,NodeSubscription:bfe}=ic(),{broadcast:Ife}=ze(),{setNode:Nfe}=(Wd(),v(Yd)),j0e=oe(),Q0e=(H(),v(Y)),wfe="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",Ofe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Cfe=xO.get(sT.CONFIG_PARAMS.CLUSTERING_NODENAME);o1.exports=Pfe;async function Pfe(e,t=!1){if(nT.trace("addNode called with:",e),xO.get(sT.CONFIG_PARAMS.REPLICATION_URL)||xO.get(sT.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Nfe(e);tp.checkClusteringEnabled();let r=Tfe(e);if(r)throw tT(r,r.message,rT.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let f=await tp.getNodeRecord(n);if(!Rfe.isEmptyOrZeroLength(f))throw tT(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,rT.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await yfe(e.subscriptions,n),a={message:void 0,added:s,skipped:i};if(s.length===0)return a.message=wfe,a;let c=tp.buildNodePayloads(s,Cfe,sT.OPERATIONS_ENUM.ADD_NODE,await tp.getSystemInfo()),l=[];for(let f=0,m=s.length;f<m;f++){let h=s[f];s[f].start_time===void 0&&delete s[f].start_time,l.push(new bfe(h.schema,h.table,h.publish,h.subscribe))}nT.trace("addNode sending remote payload:",c);let u;try{u=await ep.request(`${n}.${i1.REQUEST_SUFFIX}`,c)}catch(f){nT.error(`addNode received error from request: ${f}`);for(let h=0,p=s.length;h<p;h++){let _=s[h];_.publish=!1,_.subscribe=!1,await ep.updateRemoteConsumer(_,n)}let m=ep.requestErrorHandler(f,"add_node",n);throw tT(new Error,m,rT.INTERNAL_SERVER_ERROR,"error",m)}if(u.status===i1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${n}: ${u.message}`;throw tT(new Error,f,rT.INTERNAL_SERVER_ERROR,"error",f)}nT.trace(u);for(let f=0,m=s.length;f<m;f++){let h=s[f];await ep.updateRemoteConsumer(h,n),h.subscribe===!0&&await ep.updateConsumerIterator(h.schema,h.table,n,"start")}let d=new Afe(n,l,u.system_info);return await tp.upsertNodeRecord(d),Ife({type:"nats_update"}),i.length>0?a.message=Ofe:a.message=`Successfully added '${n}' to manifest`,a}o(Pfe,"addNode")});var kO=M((exe,c1)=>{"use strict";var{handleHDBError:BO,hdbErrors:Lfe}=ge(),{HTTP_STATUS_CODES:FO}=Lfe,{addUpdateNodeValidator:Dfe}=JS(),rp=j(),oT=(H(),v(Y)),a1=ft(),Z0e=se(),np=Ht(),sp=vs(),HO=oe(),{cloneDeep:Mfe}=require("lodash"),vfe=MO(),{Node:Ufe,NodeSubscription:xfe}=ic(),{broadcast:Bfe}=ze(),{setNode:Ffe}=(Wd(),v(Yd)),Hfe="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",kfe="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",qfe=HO.get(oT.CONFIG_PARAMS.CLUSTERING_NODENAME);c1.exports=Gfe;async function Gfe(e){if(rp.trace("updateNode called with:",e),HO.get(oT.CONFIG_PARAMS.REPLICATION_URL)??HO.get(oT.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Ffe(e);sp.checkClusteringEnabled();let t=Dfe(e);if(t)throw BO(t,t.message,FO.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await sp.getNodeRecord(r);s.length>0&&(n=Mfe(s));let{added:i,skipped:a}=await vfe(e.subscriptions,r),c={message:void 0,updated:i,skipped:a};if(i.length===0)return c.message=Hfe,c;let l=sp.buildNodePayloads(i,qfe,oT.OPERATIONS_ENUM.UPDATE_NODE,await sp.getSystemInfo());for(let d=0,f=i.length;d<f;d++){let m=i[d];rp.trace(`updateNode updating work stream for node: ${r} subscription:`,m),i[d].start_time===void 0&&delete i[d].start_time}rp.trace("updateNode sending remote payload:",l);let u;try{u=await np.request(`${r}.${a1.REQUEST_SUFFIX}`,l)}catch(d){rp.error(`updateNode received error from request: ${d}`);let f=np.requestErrorHandler(d,"update_node",r);throw BO(new Error,f,FO.INTERNAL_SERVER_ERROR,"error",f)}if(u.status===a1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${r}: ${u.message}`;throw BO(new Error,d,FO.INTERNAL_SERVER_ERROR,"error",d)}rp.trace(u);for(let d=0,f=i.length;d<f;d++){let m=i[d];await np.updateRemoteConsumer(m,r),m.subscribe===!0?await np.updateConsumerIterator(m.schema,m.table,r,"start"):await np.updateConsumerIterator(m.schema,m.table,r,"stop")}return n||(n=[new Ufe(r,[],u.system_info)]),await $fe(n[0],i,u.system_info),a.length>0?c.message=kfe:c.message=`Successfully updated '${r}'`,c}o(Gfe,"updateNode");async function $fe(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let a=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let d=n.subscriptions[l];if(d.schema===a.schema&&d.table===a.table){d.publish=a.publish,d.subscribe=a.subscribe,c=!0;break}}c||n.subscriptions.push(new xfe(a.schema,a.table,a.publish,a.subscribe))}n.system_info=r,await sp.upsertNodeRecord(n),Bfe({type:"nats_update"})}o($fe,"updateNodeTable")});var m1=M((rxe,f1)=>{"use strict";var d1=require("joi"),{string:l1}=d1.types(),Vfe=st(),u1=(H(),v(Y)),Kfe=oe(),Yfe=ft();f1.exports=Wfe;function Wfe(e){let t=l1.invalid(Kfe.get(u1.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(Yfe.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=d1.object({operation:l1.valid(u1.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return Vfe.validateBySchema(e,r)}o(Wfe,"removeNodeValidator")});var aT=M((sxe,g1)=>{"use strict";var{handleHDBError:h1,hdbErrors:zfe}=ge(),{HTTP_STATUS_CODES:p1}=zfe,jfe=m1(),ip=j(),E1=vs(),Qfe=se(),zd=(H(),v(Y)),_1=ft(),qO=Ht(),GO=oe(),{RemotePayloadObject:Jfe}=Xh(),{NodeSubscription:Xfe}=ic(),Zfe=zu(),eme=ka(),{broadcast:tme}=ze(),{setNode:rme}=(Wd(),v(Yd)),nme=GO.get(zd.CONFIG_PARAMS.CLUSTERING_NODENAME);g1.exports=sme;async function sme(e){if(ip.trace("removeNode called with:",e),GO.get(zd.CONFIG_PARAMS.REPLICATION_URL)??GO.get(zd.CONFIG_PARAMS.REPLICATION_HOSTNAME))return rme(e);E1.checkClusteringEnabled();let t=jfe(e);if(t)throw h1(t,t.message,p1.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await E1.getNodeRecord(r);if(Qfe.isEmptyOrZeroLength(n))throw h1(new Error,`Node '${r}' was not found.`,p1.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new Jfe(zd.OPERATIONS_ENUM.REMOVE_NODE,nme,[]),i,a=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let d=n.subscriptions[l];d.subscribe===!0&&await qO.updateConsumerIterator(d.schema,d.table,r,"stop");try{await qO.updateRemoteConsumer(new Xfe(d.schema,d.table,!1,!1),r)}catch(f){ip.error(f)}}try{i=await qO.request(`${r}.${_1.REQUEST_SUFFIX}`,s),ip.trace("Remove node reply from remote node:",r,i)}catch(l){ip.error("removeNode received error from request:",l),a=!0}let c=new Zfe(zd.SYSTEM_SCHEMA_NAME,zd.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await eme.deleteRecord(c),tme({type:"nats_update"}),i?.status===_1.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||a?(ip.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}o(sme,"removeNode")});var R1=M((oxe,T1)=>{"use strict";var S1=require("joi"),{string:ime,array:ome}=S1.types(),ame=st(),cme=JS();T1.exports=lme;function lme(e){let t=S1.object({operation:ime.valid("configure_cluster").required(),connections:ome.items(cme.validationSchema).required()});return ame.validateBySchema(e,t)}o(lme,"configureClusterValidator")});var $O=M((cxe,N1)=>{"use strict";var y1=(H(),v(Y)),cT=j(),ume=se(),dme=oe(),fme=aT(),mme=iT(),hme=vs(),pme=R1(),{handleHDBError:A1,hdbErrors:Eme}=ge(),{HTTP_STATUS_CODES:b1}=Eme,_me="Configure cluster complete.",gme="Failed to configure the cluster. Check the logs for more details.",Sme="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";N1.exports=Tme;async function Tme(e){cT.trace("configure cluster called with:",e);let t=pme(e);if(t)throw A1(t,t.message,b1.BAD_REQUEST,void 0,void 0,!0);let r=await hme.getAllNodeRecords(),n=[];if(dme.get(y1.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let d=0,f=r.length;d<f;d++){let m=await I1(fme,{operation:y1.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[d].name},r[d].name);n.push(m)}cT.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let d=0;d<i;d++){let f=e.connections[d],m=await I1(mme,f,f.node_name);s.push(m)}cT.trace("All results from configure_cluster add node:",s);let a=[],c=[],l=!1,u=n.concat(s);for(let d=0,f=u.length;d<f;d++){let m=u[d];m.status==="rejected"&&(cT.error(m.node_name,m?.error?.message,m?.error?.stack),a.includes(m.node_name)||a.push(m.node_name)),(m?.result?.message?.includes?.("Successfully")||m?.result?.includes?.("Successfully"))&&(l=!0),!(typeof m.result=="string"&&m.result.includes("Successfully removed")||m.status==="rejected")&&c.push({node_name:m?.node_name,response:m?.result})}if(ume.isEmptyOrZeroLength(a))return{message:_me,connections:c};if(l)return{message:Sme,failed_nodes:a,connections:c};throw A1(new Error,gme,b1.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}o(Tme,"configureCluster");async function I1(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}o(I1,"functionWrapper")});var P1=M((uxe,C1)=>{"use strict";var op=require("joi"),Rme=st(),{validateSchemaExists:w1,validateTableExists:yme,validateSchemaName:O1}=qi(),Ame=op.object({operation:op.string().valid("purge_stream"),schema:op.string().custom(w1).custom(O1).optional(),database:op.string().custom(w1).custom(O1).optional(),table:op.string().custom(yme).required()});function bme(e){return Rme.validateBySchema(e,Ame)}o(bme,"purgeStreamValidator");C1.exports=bme});var VO=M((fxe,L1)=>{"use strict";var{handleHDBError:Ime,hdbErrors:Nme}=ge(),{HTTP_STATUS_CODES:wme}=Nme,Ome=P1(),Cme=Ht(),Pme=vs();L1.exports=Lme;async function Lme(e){e.schema=e.schema??e.database;let t=Ome(e);if(t)throw Ime(t,t.message,wme.BAD_REQUEST,void 0,void 0,!0);Pme.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await Cme.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}o(Lme,"purgeStream")});var WO=M((hxe,F1)=>{"use strict";var YO=vs(),Dme=Ht(),uT=oe(),jd=(H(),v(Y)),kl=ft(),Mme=se(),KO=j(),{RemotePayloadObject:vme}=Xh(),{ErrorCode:D1}=require("nats"),{parentPort:M1}=require("worker_threads"),{onMessageByType:Ume}=ze(),{getThisNodeName:xme}=(is(),v(ko)),{requestClusterStatus:Bme}=(Vh(),v(r$)),{getReplicationSharedStatus:Fme,getHDBNodeTable:Hme}=(Dl(),v(Mw)),{CONFIRMATION_STATUS_POSITION:kme,RECEIVED_VERSION_POSITION:qme,RECEIVED_TIME_POSITION:Gme,SENDING_TIME_POSITION:$me,RECEIVING_STATUS_POSITION:Vme,RECEIVING_STATUS_RECEIVING:Kme}=(sO(),v(Y$)),v1=uT.get(jd.CONFIG_PARAMS.CLUSTERING_ENABLED),U1=uT.get(jd.CONFIG_PARAMS.CLUSTERING_NODENAME);F1.exports={clusterStatus:Yme,buildNodeStatus:B1};var x1;Ume("cluster-status",async e=>{x1(e)});async function Yme(){if(uT.get(jd.CONFIG_PARAMS.REPLICATION_URL)||uT.get(jd.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(M1){M1.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{x1=i});for(let i of n.connections){let a=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let f of Object.values(databases[l]||{}))if(u=f.auditStore,u)break;if(!u)continue;let d=Fme(u,l,a);c.lastCommitConfirmed=lT(d[kme]),c.lastReceivedRemoteTime=lT(d[qme]),c.lastReceivedLocalTime=lT(d[Gme]),c.sendingMessage=lT(d[$me]),c.lastReceivedStatus=d[Vme]===Kme?"Receiving":"Waiting"}}}else n=Bme();n.node_name=xme();let s=Hme().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:U1,is_enabled:v1,connections:[]};if(!v1)return e;let t=await YO.getAllNodeRecords();if(Mme.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push(B1(t[n],e.connections));return await Promise.allSettled(r),e}o(Yme,"clusterStatus");function lT(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}o(lT,"asDate");async function B1(e,t){let r=e.name,n=new vme(jd.OPERATIONS_ENUM.CLUSTER_STATUS,U1,void 0,await YO.getSystemInfo()),s,i,a=kl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await Dme.request(kl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===kl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(a=kl.CLUSTER_STATUS_STATUSES.CLOSED,KO.error(`Error getting node status from ${r} `,s))}catch(l){KO.warn(`Error getting node status from ${r}`,l),l.code===D1.NoResponders?a=kl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===D1.Timeout?a=kl.CLUSTER_STATUS_STATUSES.TIMEOUT:a=kl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new Wme(r,a,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==jd.PRE_4_0_0_VERSION&&await YO.upsertNodeRecord(l)}catch(l){KO.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}o(B1,"buildNodeStatus");function Wme(e,t,r,n,s,i,a,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=a,this.system_info=c}o(Wme,"NodeStatusObject")});var jO=M((Exe,H1)=>{"use strict";var{handleHDBError:zme,hdbErrors:jme}=ge(),{HTTP_STATUS_CODES:Qme}=jme,Jme=Ht(),Xme=vs(),zO=se(),dT=require("joi"),Zme=st(),ehe=2e3,the=dT.object({timeout:dT.number().min(1),connected_nodes:dT.boolean(),routes:dT.boolean()});H1.exports=rhe;async function rhe(e){Xme.checkClusteringEnabled();let t=Zme.validateBySchema(e,the);if(t)throw zme(t,t.message,Qme.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||zO.autoCastBoolean(n),a=s===void 0||zO.autoCastBoolean(s),c={nodes:[]},l=await Jme.getServerList(r??ehe),u={};if(i)for(let d=0,f=l.length;d<f;d++){let m=l[d].statsz;m&&(u[l[d].server.name]=m.routes)}for(let d=0,f=l.length;d<f;d++){if(l[d].statsz)continue;let m=l[d].server,h=l[d].data;if(m.name.endsWith("-hub")){let p={name:m.name.slice(0,-4),response_time:l[d].response_time};i&&(p.connected_nodes=[],u[m.name]&&u[m.name].forEach(_=>{p.connected_nodes.includes(_.name.slice(0,-4))||p.connected_nodes.push(_.name.slice(0,-4))})),a&&(p.routes=h.cluster?.urls?h.cluster?.urls.map(_=>({host:_.split(":")[0],port:zO.autoCast(_.split(":")[1])})):[]),c.nodes.push(p)}}return c}o(rhe,"clusterNetwork")});var $1=M((gxe,G1)=>{"use strict";var QO=require("joi"),k1=st(),{routeConstraints:q1}=ZI();G1.exports={setRoutesValidator:nhe,deleteRoutesValidator:she};function nhe(e){let t=QO.object({server:QO.valid("hub","leaf"),routes:q1.required()});return k1.validateBySchema(e,t)}o(nhe,"setRoutesValidator");function she(e){let t=QO.object({routes:q1.required()});return k1.validateBySchema(e,t)}o(she,"deleteRoutesValidator")});var fT=M((Txe,Q1)=>{"use strict";var Ko=It(),JO=se(),xs=(H(),v(Y)),Qd=oe(),V1=$1(),{handleHDBError:K1,hdbErrors:ihe}=ge(),{HTTP_STATUS_CODES:Y1}=ihe,W1="cluster routes successfully set",z1="cluster routes successfully deleted";Q1.exports={setRoutes:ahe,getRoutes:che,deleteRoutes:lhe};function ohe(e){let t=Ko.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let a=0,c=e.routes.length;a<c;a++){let l=e.routes[a];l.port=JO.autoCast(l.port);let u=r.some(f=>f.host===l.host&&f.port===l.port),d=n.some(f=>f.host===l.host&&f.port===l.port);u||d?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?Ko.updateConfigValue(xs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):Ko.updateConfigValue(xs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:W1,set:i,skipped:s}}o(ohe,"setRoutesNats");function ahe(e){let t=V1.setRoutesValidator(e);if(t)throw K1(t,t.message,Y1.BAD_REQUEST,void 0,void 0,!0);if(Qd.get(xs.CONFIG_PARAMS.CLUSTERING_ENABLED))return ohe(e);let r=[],n=[],s=Qd.get(xs.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{j1(s,i)?n.push(i):(s.push(i),r.push(i))}),Ko.updateConfigValue(xs.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:W1,set:r,skipped:n}}o(ahe,"setRoutes");function j1(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}o(j1,"existsInArray");function che(){if(Qd.get(xs.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=Ko.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return Qd.get(xs.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}o(che,"getRoutes");function lhe(e){let t=V1.deleteRoutesValidator(e);if(t)throw K1(t,t.message,Y1.BAD_REQUEST,void 0,void 0,!0);if(Qd.get(xs.CONFIG_PARAMS.CLUSTERING_ENABLED))return uhe(e);let r=[],n=[],s=Qd.get(xs.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(a=>{j1(e.routes,a)?r.push(a):(i.push(a),n.push(a))}),Ko.updateConfigValue(xs.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:z1,deleted:r,skipped:n}}o(lhe,"deleteRoutes");function uhe(e){let t=Ko.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],a=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let d=e.routes[l],f=!1;for(let m=0,h=r.length;m<h;m++){let p=r[m];if(d.host===p.host&&d.port===p.port){r.splice(m,1),f=!0,a=!0,s.push(d);break}}if(!f){let m=!0;for(let h=0,p=n.length;h<p;h++){let _=n[h];if(d.host===_.host&&d.port===_.port){n.splice(h,1),c=!0,m=!1,s.push(d);break}}m&&i.push(d)}}return a&&(r=JO.isEmptyOrZeroLength(r)?null:r,Ko.updateConfigValue(xs.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=JO.isEmptyOrZeroLength(n)?null:n,Ko.updateConfigValue(xs.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:z1,deleted:s,skipped:i}}o(uhe,"deleteRoutesNats")});var X1=M((yxe,J1)=>{"use strict";var ap=require("alasql"),ql=require("recursive-iterator"),Si=j(),dhe=se(),cp=(H(),v(Y)),XO=class{static{o(this,"sqlStatementBucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,mhe(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>cp.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!cp.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,a=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[a]&&t[i].tables[a][cp.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[a].attribute_permissions.length>0?c=fhe(t[i].tables[a].attribute_permissions):c=global.hdb_schema[i][a].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(a).filter(u=>!cp.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let d=new ap.yy.Column({columnid:u});s.tableid&&(d.tableid=s.tableid),this.ast.columns.push(d),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(a,l)}}),this.ast}};function fhe(e){return e.filter(t=>t[cp.PERMS_CRUD_ENUM.READ])}o(fhe,"filterReadRestrictedAttrs");function mhe(e,t,r,n,s){hhe(e,t,r,n,s)}o(mhe,"interpretAST");function lp(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,a=e.tableid;e.as&&(a=e.as),s.set(a,i)}}o(lp,"addSchemaTableToMap");function hhe(e,t,r,n,s){if(!e){Si.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof ap.yy.Insert?ghe(e,t,r):e instanceof ap.yy.Select?phe(e,t,r,n,s):e instanceof ap.yy.Update?Ehe(e,t,r):e instanceof ap.yy.Delete?_he(e,t,r):Si.error("AST in getRecordAttributesAST() is not a valid SQL type.")}o(hhe,"getRecordAttributesAST");function phe(e,t,r,n,s){if(!e){Si.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(dhe.isEmptyOrZeroLength(i)){Si.error("No schema specified");return}e.from.forEach(c=>{lp(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),lp(c.table,t,r,n,s)});let a=new ql(e.columns);for(let{node:c}of a)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{Si.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new ql(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let d=u.tableid?u.tableid:l;if(!t.get(i).has(d))if(r.has(d))d=r.get(d);else{Si.info(`table specified as ${d} not found.`);continue}t.get(i).get(d).indexOf(u.columnid)<0&&t.get(i).get(d).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new ql(c.on);for(let{node:u}of l)if(u&&u.columnid){let d=u.tableid,f=s.get(d);if(!t.get(f).has(d))if(r.has(d))d=r.get(d);else{Si.info(`table specified as ${d} not found.`);continue}t.get(f).get(d).indexOf(u.columnid)<0&&t.get(f).get(d).push(u.columnid)}}),e.order){let c=new ql(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,d=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(d).has(u))if(r.has(u))u=r.get(u);else{Si.info(`table specified as ${u} not found.`);return}t.get(d).get(u).indexOf(l.columnid)<0&&t.get(d).get(u).push(l.columnid)}}}o(phe,"getSelectAttributes");function Ehe(e,t,r){if(!e){Si.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new ql(e.columns),s=e.table.databaseid;lp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&ZO(e.table.tableid,s,i.columnid,t,r)}o(Ehe,"getUpdateAttributes");function _he(e,t,r){if(!e){Si.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new ql(e.where),s=e.table.databaseid;lp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&ZO(e.table.tableid,s,i.columnid,t,r)}o(_he,"getDeleteAttributes");function ghe(e,t,r){if(!e){Si.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new ql(e.columns),s=e.into.databaseid;lp(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&ZO(e.into.tableid,s,i.columnid,t,r)}o(ghe,"getInsertAttributes");function ZO(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}o(ZO,"pushAttribute");J1.exports=XO});var eK=M((bxe,Z1)=>{"use strict";var mT=(H(),v(Y)),hT=class{static{o(this,"BaseLicense")}constructor(t=0,r=mT.RAM_ALLOCATION_ENUM.DEFAULT,n=mT.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},eC=class extends hT{static{o(this,"ExtendedLicense")}constructor(t=0,r=mT.RAM_ALLOCATION_ENUM.DEFAULT,n=mT.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};Z1.exports={BaseLicense:hT,ExtendedLicense:eC}});var Zd=M((Nxe,oK)=>{"use strict";var Xd=require("fs-extra"),pT=(yg(),v(Rg)),rK=require("crypto"),She=require("moment"),The=require("uuid").v4,pn=j(),rC=require("path"),Rhe=se(),Gl=(H(),v(Y)),{totalmem:tK}=require("os"),yhe=eK().ExtendedLicense,Jd="invalid license key format",Ahe="061183",bhe="mofi25",Ihe="aes-256-cbc",Nhe=16,whe=32,nK=oe(),{resolvePath:sK}=It();nK.initSync();var tC;oK.exports={validateLicense:iK,generateFingerPrint:Che,licenseSearch:iC,getLicense:Dhe,checkMemoryLimit:Mhe};function nC(){return rC.join(nK.getHdbBasePath(),Gl.LICENSE_KEY_DIR_NAME,Gl.LICENSE_FILE_NAME)}o(nC,"getLicenseDirPath");function Ohe(){let e=nC();return sK(rC.join(e,Gl.LICENSE_FILE_NAME))}o(Ohe,"getLicenseFilePath");function sC(){let e=nC();return sK(rC.join(e,Gl.REG_KEY_FILE_NAME))}o(sC,"getFingerPrintFilePath");async function Che(){let e=sC();try{return await Xd.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await Phe();throw pn.error(`Error writing fingerprint file to ${e}`),pn.error(t),new Error("There was an error generating the fingerprint")}}o(Che,"generateFingerPrint");async function Phe(){let e=The(),t=pT.hash(e,pT.HASH_FUNCTION.MD5),r=sC();try{await Xd.mkdirp(nC()),await Xd.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw pn.error(`Error writing fingerprint file to ${r}`),pn.error(n),new Error("There was an error generating the fingerprint")}return t}o(Phe,"writeFingerprint");function iK(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:Gl.RAM_ALLOCATION_ENUM.DEFAULT,version:Gl.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return pn.error("empty license key passed to validate."),r;let n=sC(),s=!1;try{s=Xd.statSync(n)}catch(i){pn.error(i)}if(s){let i;try{i=Xd.readFileSync(n,"utf8")}catch{pn.error("error validating this machine in the license"),r.valid_machine=!1;return}let a=e.split(bhe),c=a[1];c=Buffer.concat([Buffer.from(c)],Nhe);let l=Buffer.concat([Buffer.from(i)],whe),u=rK.createDecipheriv(Ihe,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let d=null;try{d=u.update(a[0],"hex","utf8"),d.trim(),d+=u.final("utf8")}catch{let h=Lhe(a[0],i);if(h)d=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(Jd),pn.error(Jd),new Error(Jd)}let f;if(isNaN(d))try{f=JSON.parse(d),r.version=f.version,r.exp_date=f.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),f.ram_allocation&&(r.ram_allocation=f.ram_allocation)}catch{throw console.error(Jd),pn.error(Jd),new Error(Jd)}else r.exp_date=d;r.exp_date<She().valueOf()&&(r.valid_date=!1),pT.validate(a[1],`${Ahe}${i}${t}`,pT.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||pn.error("Invalid licence"),r}o(iK,"validateLicense");function Lhe(e,t){try{let r=rK.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{pn.warn("Check old license failed")}}o(Lhe,"checkOldLicense");function iC(){let e=new yhe,t=[];try{t=Xd.readFileSync(Ohe(),"utf-8").split(`\r
19
- `)}catch(r){r.code==="ENOENT"?pn.debug("no license file found"):pn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Rhe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=iK(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){pn.error("There was an error parsing the license string."),pn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return tC=e,e}o(iC,"licenseSearch");async function Dhe(){return tC||await iC(),tC}o(Dhe,"getLicense");function Mhe(){let e=iC().ram_allocation,t=process.constrainedMemory?.()||tK();if(t=Math.round(Math.min(t,tK())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(Mhe,"checkMemoryLimit")});var cC=M((Oxe,uK)=>{var ET=Zd(),aK=require("chalk"),ls=j(),cK=require("prompt"),{promisify:vhe}=require("util"),oC=(H(),v(Y)),Uhe=require("fs-extra"),xhe=require("path"),Bhe=se(),{packageJson:Fhe}=yt(),lK=oe();lK.initSync();var Hhe=require("moment"),khe=vhe(cK.get),qhe=xhe.join(lK.getHdbBasePath(),oC.LICENSE_KEY_DIR_NAME,oC.LICENSE_FILE_NAME,oC.LICENSE_FILE_NAME);uK.exports={getFingerprint:$he,setLicense:Ghe,parseLicense:aC,register:Vhe,getRegistrationInfo:Yhe};async function Ghe(e){if(e&&e.key&&e.company){try{ls.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await aC(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw ls.error(r),ls.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(Ghe,"setLicense");async function $he(){let e={};try{e=await ET.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw ls.error(r),ls.error(t),new Error(r)}return e}o($he,"getFingerprint");async function aC(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");ls.info("Validating license input...");let r=ET.validateLicense(e,t);if(ls.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(ls.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(ls.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{ls.info("writing license to disk"),await Uhe.writeFile(qhe,JSON.stringify({license_key:e,company:t}))}catch(n){throw ls.error("Failed to write License"),n}return"Registration successful."}o(aC,"parseLicense");async function Vhe(){let e=await Khe();return aC(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(Vhe,"register");async function Khe(){let e=await ET.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:aK.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:aK.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{cK.start()}catch(n){ls.error(n)}let r;try{r=await khe(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(Khe,"promptForRegistration");async function Yhe(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await ET.getLicense()}catch(r){throw ls.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Bhe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Fhe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=Hhe.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(Yhe,"getRegistrationInfo")});var fK=M((Pxe,dK)=>{"use strict";var Whe=ft(),lC=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+Whe.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};dK.exports=lC});var pK=M((Dxe,hK)=>{"use strict";var mK=ft(),uC=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+mK.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+mK.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};hK.exports=uC});var _K=M((vxe,EK)=>{"use strict";var dC=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};EK.exports=dC});var SK=M((xxe,gK)=>{"use strict";var zhe=ft(),fC=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+zhe.SERVER_SUFFIX.ADMIN,this.password=r}};gK.exports=fC});var TT=M((Fxe,yK)=>{"use strict";var $l=require("path"),Vl=require("fs-extra"),jhe=fK(),Qhe=pK(),Jhe=_K(),Xhe=SK(),mC=rs(),tf=se(),Bn=It(),gT=(H(),v(Y)),up=ft(),{CONFIG_PARAMS:nr}=gT,rf=j(),dp=oe(),TK=Ki(),hC=Ht(),Zhe=as(),ef="clustering",epe=1e4,RK=50;yK.exports={generateNatsConfig:rpe,removeNatsConfig:npe,getHubConfigPath:tpe};function tpe(){let e=dp.get(nr.ROOTPATH);return $l.join(e,ef,up.NATS_CONFIG_FILES.HUB_SERVER)}o(tpe,"getHubConfigPath");async function rpe(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=dp.get(nr.ROOTPATH);Vl.ensureDirSync($l.join(r,"clustering","leaf")),dp.initSync();let n=Bn.getConfigFromFile(nr.CLUSTERING_TLS_CERT_AUTH),s=Bn.getConfigFromFile(nr.CLUSTERING_TLS_PRIVATEKEY),i=Bn.getConfigFromFile(nr.CLUSTERING_TLS_CERTIFICATE);!await Vl.exists(i)&&!await Vl.exists(!n)&&await Zhe.createNatsCerts();let a=$l.join(r,ef,up.PID_FILES.HUB),c=$l.join(r,ef,up.PID_FILES.LEAF),l=Bn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=$l.join(r,ef,up.NATS_CONFIG_FILES.HUB_SERVER),d=$l.join(r,ef,up.NATS_CONFIG_FILES.LEAF_SERVER),f=Bn.getConfigFromFile(nr.CLUSTERING_TLS_INSECURE),m=Bn.getConfigFromFile(nr.CLUSTERING_TLS_VERIFY),h=Bn.getConfigFromFile(nr.CLUSTERING_NODENAME),p=Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await hC.checkNATSServerInstalled()||ST("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await mC.listUsers(),g=Bn.getConfigFromFile(nr.CLUSTERING_USER),R=await mC.getClusterUser();(tf.isEmpty(R)||R.active!==!0)&&ST(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await _T(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await _T(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await _T(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),await _T(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],y=[];for(let[G,K]of _.entries())K.role?.role===gT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new Xhe(K.username,TK.decrypt(K.hash))),y.push(new Jhe(K.username,TK.decrypt(K.hash))));let N=[],{hub_routes:O}=Bn.getClusteringRoutes();if(!tf.isEmptyOrZeroLength(O))for(let G of O)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${G.host}:${G.port}`);let F=new jhe(Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NAME),Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,y);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=tf.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===gT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Vl.writeJson(u,F),rf.trace(`Hub server config written to ${u}`));let Z=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${p}`,q=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${p}`,W=new Qhe(Bn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[q],T,y,i,s,n,f);n==null&&delete W.tls.ca_file,(t===void 0||t===gT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Vl.writeJson(d,W),rf.trace(`Leaf server config written to ${d}`))}o(rpe,"generateNatsConfig");async function _T(e){let t=dp.get(e);return tf.isEmpty(t)&&ST(`port undefined for '${e}'`),await tf.isPortTaken(t)&&ST(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(_T,"isPortAvailable");function ST(e){let t=`Error generating clustering config: ${e}`;rf.error(t),console.error(t),process.exit(1)}o(ST,"generateNatsConfigError");async function npe(e){let{port:t,config_file:r}=hC.getServerConfig(e),{username:n,decrypt_hash:s}=await mC.getClusterUser(),i=0,a=2e3;for(;i<RK;){try{let d=await hC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){rf.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=RK)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&rf.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await tf.asyncSetTimeout(u)}let c="0".repeat(epe),l=$l.join(dp.get(nr.ROOTPATH),ef,r);await Vl.writeFile(l,c),await Vl.remove(l),rf.notify(e,"started.")}o(npe,"removeNatsConfig")});var OK=M((kxe,wK)=>{"use strict";var us=oe(),spe=Zd(),Ke=(H(),v(Y)),fp=ft(),Yo=require("path"),{PACKAGE_ROOT:yT}=yt(),AK=oe(),RT=se(),nf="/dev/null",ipe=Yo.join(yT,"launchServiceScripts"),bK=Yo.join(yT,"utility/scripts"),ope=Yo.join(bK,Ke.HDB_RESTART_SCRIPT),IK=Yo.resolve(yT,"dependencies",`${process.platform}-${process.arch}`,fp.NATS_BINARY_NAME);function NK(){let t=spe.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return RT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=RT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:yT}}o(NK,"generateMainServerConfig");var ape=9930;function cpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Yo.join(e,"clustering",fp.NATS_CONFIG_FILES.HUB_SERVER),r=Yo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=AK.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=fp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==ape?"-"+n:""),script:IK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=nf,i.error_file=nf),i}o(cpe,"generateNatsHubServerConfig");var lpe=9940;function upe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Yo.join(e,"clustering",fp.NATS_CONFIG_FILES.LEAF_SERVER),r=Yo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=AK.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=fp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==lpe?"-"+n:""),script:IK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=nf,i.error_file=nf),i}o(upe,"generateNatsLeafServerConfig");function dpe(){us.initSync();let e=Yo.join(us.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:ipe,autorestart:!1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=nf,t.error_file=nf),t}o(dpe,"generateClusteringUpgradeV4ServiceConfig");function fpe(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return RT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=RT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:bK},script:ope}}o(fpe,"generateRestart");function mpe(){return{apps:[NK()]}}o(mpe,"generateAllServiceConfigs");wK.exports={generateAllServiceConfigs:mpe,generateMainServerConfig:NK,generateRestart:fpe,generateNatsHubServerConfig:cpe,generateNatsLeafServerConfig:upe,generateClusteringUpgradeV4ServiceConfig:dpe}});var hp=M(($xe,GK)=>{"use strict";var lt=(H(),v(Y)),hpe=se(),zo=TT(),AT=Ht(),Wo=ft(),lc=OK(),bT=oe(),Kl=j(),ppe=vs(),{startWorker:CK,onMessageFromWorkers:Epe}=ze(),_pe=Kd(),Gxe=require("util"),gpe=require("child_process"),Spe=require("fs"),{execFile:Tpe}=gpe,Je;GK.exports={enterPM2Mode:Rpe,start:uc,stop:pC,reload:LK,restart:DK,list:EC,describe:UK,connect:jo,kill:Npe,startAllServices:wpe,startService:_C,getUniqueServicesList:xK,restartAllServices:Ope,isServiceRegistered:BK,reloadStopStart:FK,restartHdb:vK,deleteProcess:bpe,startClusteringProcesses:kK,startClusteringThreads:qK,isHdbRestartRunning:Ipe,isClusteringRunning:Ppe,stopClustering:Cpe,reloadClustering:Lpe,expectedRestartOfChildren:MK};var mp=!1;Epe(e=>{e.type==="restart"&&bT.initSync(!0)});function Rpe(){mp=!0}o(Rpe,"enterPM2Mode");function jo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(jo,"connect");var En,ype=10,PK;function uc(e,t=!1){if(mp)return Ape(e);let r=Tpe(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=En.indexOf(r);a>-1&&En.splice(a,1),!PK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<ype&&(Spe.existsSync(zo.getHubConfigPath())?uc(e):(await zo.generateNatsConfig(!0),uc(e),await new Promise(c=>setTimeout(c,3e3)),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=bT.get(lt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&Wo.LOG_LEVEL_HIERARCHY[a]>=Wo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===Wo.LOG_LEVELS.ERR||d===Wo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=Wo.LOG_LEVELS[m]}if(Wo.LOG_LEVEL_HIERARCHY[a]>=Wo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===Wo.LOG_LEVELS.ERR||d===Wo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!En&&(En=[],!t)){let i=o(()=>{PK=!0,En&&(En.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}En.push(r)}o(uc,"start");function Ape(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Ape,"startWithPM2");function pC(e){if(!mp){for(let t of En||[])t.name===e&&(En.splice(En.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(pC,"stop");function LK(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(LK,"reload");function DK(e){if(!mp){MK();for(let t of En||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o(DK,"restart");function MK(){for(let e of En||[])e.config&&(e.config.restarts=0)}o(MK,"expectedRestartOfChildren");function bpe(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(bpe,"deleteProcess");async function vK(){await uc(lc.generateRestart())}o(vK,"restartHdb");async function Ipe(){let e=await EC();for(let t in e)if(e[t].name===lt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(Ipe,"isHdbRestartRunning");function EC(){return new Promise(async(e,t)=>{try{await jo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(EC,"list");function UK(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(UK,"describe");function Npe(){if(!mp){for(let e of En||[])e.kill();En=[];return}return new Promise(async(e,t)=>{try{await jo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(Npe,"kill");async function wpe(){try{await kK(),await qK(),await uc(lc.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(wpe,"startAllServices");async function _C(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case lt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=lc.generateMainServerConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=lc.generateNatsIngestServiceConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=lc.generateNatsReplyServiceConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=lc.generateNatsHubServerConfig(),await uc(r,t),await zo.removeNatsConfig(e);return;case lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=lc.generateNatsLeafServerConfig(),await uc(r,t),await zo.removeNatsConfig(e);return;case lt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=lc.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await uc(r)}catch(r){throw Je?.disconnect(),r}}o(_C,"startService");async function xK(){try{let e=await EC(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(xK,"getUniqueServicesList");async function Ope(e=[]){try{let t=!1,r=await xK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===lt.PROCESS_DESCRIPTORS.HDB?t=!0:await DK(a))}t&&await FK(lt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Ope,"restartAllServices");async function BK(e){if(En?.find(r=>r.name===e))return!0;let t=await _pe.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(BK,"isServiceRegistered");async function FK(e){let t=bT.get(lt.CONFIG_PARAMS.THREADS_COUNT)??bT.get(lt.CONFIG_PARAMS.THREADS),r=await UK(e),n=hpe.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await pC(e),await _C(e)):e===lt.PROCESS_DESCRIPTORS.HDB?await vK():await LK(e)}o(FK,"reloadStopStart");var HK;async function kK(e=!1){for(let t in lt.CLUSTERING_PROCESSES){let r=lt.CLUSTERING_PROCESSES[t];await _C(r,e)}}o(kK,"startClusteringProcesses");async function qK(){HK=CK(lt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:lt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await AT.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await AT.updateLocalStreams();let e=await ppe.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===lt.PRE_4_0_0_VERSION){Kl.info("Starting clustering upgrade 4.0.0 process"),CK(lt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(qK,"startClusteringThreads");async function Cpe(){for(let e in lt.CLUSTERING_PROCESSES)if(e!==lt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===lt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await HK.terminate();else{let t=lt.CLUSTERING_PROCESSES[e];await pC(t)}}o(Cpe,"stopClustering");async function Ppe(){for(let e in lt.CLUSTERING_PROCESSES){let t=lt.CLUSTERING_PROCESSES[e];if(await BK(t)===!1)return!1}return!0}o(Ppe,"isClusteringRunning");async function Lpe(){await zo.generateNatsConfig(!0),await AT.reloadNATSHub(),await AT.reloadNATSLeaf(),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(Lpe,"reloadClustering")});var TC={};Oe(TC,{compactOnStart:()=>Dpe,copyDb:()=>zK});async function Dpe(){dc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,gC.get)(U.ROOTPATH),t=new Map,r=it();(0,SC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,IT.join)(e,"backup",n+".mdb"),a=(0,IT.join)(e,Bc,n+"-copy.mdb"),c=0;try{c=await $K(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){dc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await zK(n,a),console.log("Backing up",n,"to",i);try{await(0,Yl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}}try{md()}catch(n){dc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{dbPath:s,copyDest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Yl.move)(i,s,{overwrite:!0}),await(0,Yl.remove)((0,IT.join)(e,Bc,`${n}-copy.mdb-lock`));try{md()}catch(n){dc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){dc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,SC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Yl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw md(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=!0,c=await $K(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){a=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
20
- Total record count before compaction: ${i}, total after: ${c}.
21
- Database backup has not been removed and can be found here: ${s}`;dc.error(l),console.error(l)}(0,gC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||a===!1||(console.log("Removing backup",s),await(0,Yl.remove)(s))}}async function $K(e){let t=await(0,WK.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function sf(){}async function zK(e,t){console.log(`Copying database ${e} to ${t}`);let r=it()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=sf,m.primaryStore.remove=sf;for(let h in m.indices){let p=m.indices[h];p.put=sf,p.remove=sf}m.auditStore&&(m.auditStore.put=sf,m.auditStore.remove=sf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,VK.open)(new KK.default(t)),c=a.openDB(NT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Qg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let R=new YK.default(!p,p);R.encoding="binary",R.compression=_;let T=n.openDB(m,R);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",R.compression=g;let y=a.openDB(m,R);y.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,y,p,d)}if(i){let m=n.openDB(NT.AUDIT_STORE_NAME,ym);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,R=0,T=0,y=1e7,N=null;for(;y-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),R+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",R,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var VK,IT,Yl,gC,KK,YK,NT,WK,SC,dc,RC=ue(()=>{De();VK=require("lmdb"),IT=require("path"),Yl=require("fs-extra"),gC=w(oe()),KK=w(Dm()),YK=w(Lm()),NT=w($t());H();Mi();WK=w(vo()),SC=w(It()),dc=w(j());o(Dpe,"compactOnStart");o($K,"getTotalDBRecordCount");o(sf,"noop");o(zK,"copyDb")});var af=M((Xxe,tY)=>{"use strict";var Mpe=require("minimist"),{isMainThread:AC,parentPort:Ep,threadId:jxe}=require("worker_threads"),mt=(H(),v(Y)),no=j(),bC=se(),OT=TT(),wT=Ht(),Qxe=ft(),XK=It(),Ti=hp(),jK=Kd(),{compactOnStart:vpe}=(RC(),v(TC)),Upe=Fc(),{restartWorkers:CT,onMessageByType:xpe}=ze(),{handleHDBError:Bpe,hdbErrors:Fpe}=ge(),{HTTP_STATUS_CODES:Hpe}=Fpe,_p=oe(),{sendOperationToNode:QK,getThisNodeName:kpe,monitorNodeCAs:qpe}=(is(),v(ko)),{getHDBNodeTable:Jxe}=(Dl(),v(Mw));_p.initSync();var pp=`Restarting HarperDB. This may take up to ${mt.RESTART_TIMEOUT_MS/1e3} seconds.`,Gpe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",JK="Clustering is not enabled so cannot be restarted",$pe="Invalid service",of,Bs;tY.exports={restart:ZK,restartService:IC};AC&&xpe(mt.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await IC({service:e.workerType}):ZK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function ZK(e){Bs=Object.keys(e).length===0,of=await Ti.isServiceRegistered(mt.PROCESS_DESCRIPTORS.HDB);let t=Mpe(process.argv);if(t.service){await IC(t);return}if(Bs&&!of){console.error(Gpe);return}if(Bs&&console.log(pp),of){Ti.enterPM2Mode(),no.notify(pp);let r=Upe(Object.keys(mt.CONFIG_PARAM_MAP),!0);return bC.isEmptyOrZeroLength(Object.keys(r))||XK.updateConfigValue(void 0,void 0,r,!0,!0),Vpe(),pp}return AC?(no.notify(pp),_p.get(mt.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await vpe(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{CT()},50)):Ep.postMessage({type:mt.ITC_EVENT_TYPES.RESTART}),pp}o(ZK,"restart");async function IC(e){let{service:t}=e;if(mt.HDB_PROCESS_SERVICES[t]===void 0)throw Bpe(new Error,$pe,Hpe.BAD_REQUEST,void 0,void 0,!0);if(Ti.expectedRestartOfChildren(),of=await Ti.isServiceRegistered(mt.PROCESS_DESCRIPTORS.HDB),!AC){e.replicated&&qpe(),Ep.postMessage({type:mt.ITC_EVENT_TYPES.RESTART,workerType:t}),Ep.ref(),await new Promise(s=>{Ep.on("message",i=>{i.type==="restart-complete"&&(s(),Ep.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===kpe())continue;let i;try{({job_id:i}=await QK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await QK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case mt.HDB_PROCESS_SERVICES.clustering:if(!_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=JK;break}Bs&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await eY();break;case mt.HDB_PROCESS_SERVICES.clustering_config:case mt.HDB_PROCESS_SERVICES["clustering config"]:if(!_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=JK;break}Bs&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Ti.reloadClustering();break;case"custom_functions":case"custom functions":case mt.HDB_PROCESS_SERVICES.harperdb:case mt.HDB_PROCESS_SERVICES.http_workers:case mt.HDB_PROCESS_SERVICES.http:if(Bs&&!of){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Bs&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),Bs?await Ti.restart(mt.PROCESS_DESCRIPTORS.HDB):await CT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),Bs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(IC,"restartService");async function Vpe(){await eY(),await Ti.restart(mt.PROCESS_DESCRIPTORS.HDB),await bC.asyncSetTimeout(2e3),_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await yC(),Bs&&(await wT.closeConnection(),process.exit(0))}o(Vpe,"restartPM2Mode");async function eY(){if(!XK.getConfigFromFile(mt.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await jK.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await OT.generateNatsConfig(!0),await Ti.startClusteringProcesses(),await Ti.startClusteringThreads(),await yC(),Bs&&await wT.closeConnection();else{await OT.generateNatsConfig(!0),of?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Ti.restart(mt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Ti.restart(mt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await jK.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await bC.asyncSetTimeout(3e3),await yC(),await wT.updateLocalStreams(),Bs&&await wT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=CT(mt.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=CT(mt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(eY,"restartClustering");async function yC(){await OT.removeNatsConfig(mt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await OT.removeNatsConfig(mt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(yC,"removeNatsConfig")});var fY=M((tBe,dY)=>{"use strict";var eBe=require("lodash"),Fn=(H(),v(Y)),{handleHDBError:rY,hdbErrors:Kpe}=ge(),{HDB_ERROR_MSGS:Ype,HTTP_STATUS_CODES:Wpe}=Kpe,NC=j();dY.exports={getRolePermissions:jpe};var Wl=Object.create(null),zpe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),oY=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),aY=o((e=!1,t=!1,r=!1,n=!1)=>({[Fn.PERMS_CRUD_ENUM.READ]:e,[Fn.PERMS_CRUD_ENUM.INSERT]:t,[Fn.PERMS_CRUD_ENUM.UPDATE]:r,[Fn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),wC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...aY(t,r,n,s)}),"tablePermsTemplate"),nY=o((e,t=aY())=>({attribute_name:e,describe:uY(t),[gp]:t[gp],[OC]:t[OC],[CC]:t[CC]}),"attrPermsTemplate"),sY=o((e,t=!1)=>({attribute_name:e,describe:t,[gp]:t}),"timestampAttrPermsTemplate"),{READ:gp,INSERT:OC,UPDATE:CC}=Fn.PERMS_CRUD_ENUM,cY=Object.values(Fn.PERMS_CRUD_ENUM),lY=[gp,OC,CC];function jpe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Fn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Wl[t]&&Wl[t].key===n)return Wl[t].perms;let s=Qpe(e,r);return Wl[t]?Wl[t].key=n:Wl[t]=zpe(n),Wl[t].perms=s,s}catch(r){if(!e[Fn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Fn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Fn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw NC.error(n),NC.debug(r),rY(new Error,Ype.OUTDATED_PERMS_TRANSLATION_ERROR,Wpe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
19
+ `)}catch(r){r.code==="ENOENT"?pn.debug("no license file found"):pn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Rhe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=iK(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){pn.error("There was an error parsing the license string."),pn.error(s),e.ram_allocation=Gl.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return tC=e,e}o(iC,"licenseSearch");async function Dhe(){return tC||await iC(),tC}o(Dhe,"getLicense");function Mhe(){let e=iC().ram_allocation,t=process.constrainedMemory?.()||tK();if(t=Math.round(Math.min(t,tK())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(Mhe,"checkMemoryLimit")});var cC=M((Oxe,uK)=>{var ET=Zd(),aK=require("chalk"),ls=j(),cK=require("prompt"),{promisify:vhe}=require("util"),oC=(H(),v(Y)),Uhe=require("fs-extra"),xhe=require("path"),Bhe=se(),{packageJson:Fhe}=yt(),lK=oe();lK.initSync();var Hhe=require("moment"),khe=vhe(cK.get),qhe=xhe.join(lK.getHdbBasePath(),oC.LICENSE_KEY_DIR_NAME,oC.LICENSE_FILE_NAME,oC.LICENSE_FILE_NAME);uK.exports={getFingerprint:$he,setLicense:Ghe,parseLicense:aC,register:Vhe,getRegistrationInfo:Yhe};async function Ghe(e){if(e&&e.key&&e.company){try{ls.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await aC(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw ls.error(r),ls.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(Ghe,"setLicense");async function $he(){let e={};try{e=await ET.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw ls.error(r),ls.error(t),new Error(r)}return e}o($he,"getFingerprint");async function aC(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");ls.info("Validating license input...");let r=ET.validateLicense(e,t);if(ls.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(ls.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(ls.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{ls.info("writing license to disk"),await Uhe.writeFile(qhe,JSON.stringify({license_key:e,company:t}))}catch(n){throw ls.error("Failed to write License"),n}return"Registration successful."}o(aC,"parseLicense");async function Vhe(){let e=await Khe();return aC(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(Vhe,"register");async function Khe(){let e=await ET.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:aK.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:aK.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{cK.start()}catch(n){ls.error(n)}let r;try{r=await khe(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(Khe,"promptForRegistration");async function Yhe(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await ET.getLicense()}catch(r){throw ls.error(`There was an error when searching licenses due to: ${r.message}`),r}if(Bhe.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=Fhe.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=Hhe.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(Yhe,"getRegistrationInfo")});var fK=M((Pxe,dK)=>{"use strict";var Whe=ft(),lC=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+Whe.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};dK.exports=lC});var pK=M((Dxe,hK)=>{"use strict";var mK=ft(),uC=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+mK.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+mK.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};hK.exports=uC});var _K=M((vxe,EK)=>{"use strict";var dC=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};EK.exports=dC});var SK=M((xxe,gK)=>{"use strict";var zhe=ft(),fC=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+zhe.SERVER_SUFFIX.ADMIN,this.password=r}};gK.exports=fC});var TT=M((Fxe,yK)=>{"use strict";var $l=require("path"),Vl=require("fs-extra"),jhe=fK(),Qhe=pK(),Jhe=_K(),Xhe=SK(),mC=rs(),tf=se(),Bn=It(),gT=(H(),v(Y)),up=ft(),{CONFIG_PARAMS:nr}=gT,rf=j(),dp=oe(),TK=Ki(),hC=Ht(),Zhe=as(),ef="clustering",epe=1e4,RK=50;yK.exports={generateNatsConfig:rpe,removeNatsConfig:npe,getHubConfigPath:tpe};function tpe(){let e=dp.get(nr.ROOTPATH);return $l.join(e,ef,up.NATS_CONFIG_FILES.HUB_SERVER)}o(tpe,"getHubConfigPath");async function rpe(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=dp.get(nr.ROOTPATH);Vl.ensureDirSync($l.join(r,"clustering","leaf")),dp.initSync();let n=Bn.getConfigFromFile(nr.CLUSTERING_TLS_CERT_AUTH),s=Bn.getConfigFromFile(nr.CLUSTERING_TLS_PRIVATEKEY),i=Bn.getConfigFromFile(nr.CLUSTERING_TLS_CERTIFICATE);!await Vl.exists(i)&&!await Vl.exists(!n)&&await Zhe.createNatsCerts();let a=$l.join(r,ef,up.PID_FILES.HUB),c=$l.join(r,ef,up.PID_FILES.LEAF),l=Bn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=$l.join(r,ef,up.NATS_CONFIG_FILES.HUB_SERVER),d=$l.join(r,ef,up.NATS_CONFIG_FILES.LEAF_SERVER),f=Bn.getConfigFromFile(nr.CLUSTERING_TLS_INSECURE),m=Bn.getConfigFromFile(nr.CLUSTERING_TLS_VERIFY),h=Bn.getConfigFromFile(nr.CLUSTERING_NODENAME),p=Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await hC.checkNATSServerInstalled()||ST("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await mC.listUsers(),g=Bn.getConfigFromFile(nr.CLUSTERING_USER),R=await mC.getClusterUser();(tf.isEmpty(R)||R.active!==!0)&&ST(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await _T(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await _T(nr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await _T(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),await _T(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],y=[];for(let[G,K]of _.entries())K.role?.role===gT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new Xhe(K.username,TK.decrypt(K.hash))),y.push(new Jhe(K.username,TK.decrypt(K.hash))));let N=[],{hub_routes:O}=Bn.getClusteringRoutes();if(!tf.isEmptyOrZeroLength(O))for(let G of O)N.push(`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@${G.host}:${G.port}`);let F=new jhe(Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NAME),Bn.getConfigFromFile(nr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,y);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=tf.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===gT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Vl.writeJson(u,F),rf.trace(`Hub server config written to ${u}`));let Z=`tls://${R.sys_name_encoded}:${R.uri_encoded_d_hash}@0.0.0.0:${p}`,q=`tls://${R.uri_encoded_name}:${R.uri_encoded_d_hash}@0.0.0.0:${p}`,W=new Qhe(Bn.getConfigFromFile(nr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[q],T,y,i,s,n,f);n==null&&delete W.tls.ca_file,(t===void 0||t===gT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Vl.writeJson(d,W),rf.trace(`Leaf server config written to ${d}`))}o(rpe,"generateNatsConfig");async function _T(e){let t=dp.get(e);return tf.isEmpty(t)&&ST(`port undefined for '${e}'`),await tf.isPortTaken(t)&&ST(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(_T,"isPortAvailable");function ST(e){let t=`Error generating clustering config: ${e}`;rf.error(t),console.error(t),process.exit(1)}o(ST,"generateNatsConfigError");async function npe(e){let{port:t,config_file:r}=hC.getServerConfig(e),{username:n,decrypt_hash:s}=await mC.getClusterUser(),i=0,a=2e3;for(;i<RK;){try{let d=await hC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){rf.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=RK)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&rf.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await tf.asyncSetTimeout(u)}let c="0".repeat(epe),l=$l.join(dp.get(nr.ROOTPATH),ef,r);await Vl.writeFile(l,c),await Vl.remove(l),rf.notify(e,"started.")}o(npe,"removeNatsConfig")});var OK=M((kxe,wK)=>{"use strict";var us=oe(),spe=Zd(),Ke=(H(),v(Y)),fp=ft(),Yo=require("path"),{PACKAGE_ROOT:yT}=yt(),AK=oe(),RT=se(),nf="/dev/null",ipe=Yo.join(yT,"launchServiceScripts"),bK=Yo.join(yT,"utility/scripts"),ope=Yo.join(bK,Ke.HDB_RESTART_SCRIPT),IK=Yo.resolve(yT,"dependencies",`${process.platform}-${process.arch}`,fp.NATS_BINARY_NAME);function NK(){let t=spe.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return RT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=RT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:yT}}o(NK,"generateMainServerConfig");var ape=9930;function cpe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Yo.join(e,"clustering",fp.NATS_CONFIG_FILES.HUB_SERVER),r=Yo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=AK.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=fp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==ape?"-"+n:""),script:IK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=nf,i.error_file=nf),i}o(cpe,"generateNatsHubServerConfig");var lpe=9940;function upe(){us.initSync(!0);let e=us.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Yo.join(e,"clustering",fp.NATS_CONFIG_FILES.LEAF_SERVER),r=Yo.join(us.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=AK.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=fp.LOG_LEVEL_FLAGS[us.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==lpe?"-"+n:""),script:IK,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=nf,i.error_file=nf),i}o(upe,"generateNatsLeafServerConfig");function dpe(){us.initSync();let e=Yo.join(us.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:ipe,autorestart:!1};return us.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=nf,t.error_file=nf),t}o(dpe,"generateClusteringUpgradeV4ServiceConfig");function fpe(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return RT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=RT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:bK},script:ope}}o(fpe,"generateRestart");function mpe(){return{apps:[NK()]}}o(mpe,"generateAllServiceConfigs");wK.exports={generateAllServiceConfigs:mpe,generateMainServerConfig:NK,generateRestart:fpe,generateNatsHubServerConfig:cpe,generateNatsLeafServerConfig:upe,generateClusteringUpgradeV4ServiceConfig:dpe}});var hp=M(($xe,GK)=>{"use strict";var lt=(H(),v(Y)),hpe=se(),zo=TT(),AT=Ht(),Wo=ft(),lc=OK(),bT=oe(),Kl=j(),ppe=vs(),{startWorker:CK,onMessageFromWorkers:Epe}=ze(),_pe=Kd(),Gxe=require("util"),gpe=require("child_process"),Spe=require("fs"),{execFile:Tpe}=gpe,Je;GK.exports={enterPM2Mode:Rpe,start:uc,stop:pC,reload:LK,restart:DK,list:EC,describe:UK,connect:jo,kill:Npe,startAllServices:wpe,startService:_C,getUniqueServicesList:xK,restartAllServices:Ope,isServiceRegistered:BK,reloadStopStart:FK,restartHdb:vK,deleteProcess:bpe,startClusteringProcesses:kK,startClusteringThreads:qK,isHdbRestartRunning:Ipe,isClusteringRunning:Ppe,stopClustering:Cpe,reloadClustering:Lpe,expectedRestartOfChildren:MK};var mp=!1;Epe(e=>{e.type==="restart"&&bT.initSync(!0)});function Rpe(){mp=!0}o(Rpe,"enterPM2Mode");function jo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(jo,"connect");var En,ype=10,PK;function uc(e,t=!1){if(mp)return Ape(e);let r=Tpe(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=En.indexOf(r);a>-1&&En.splice(a,1),!PK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<ype&&(Spe.existsSync(zo.getHubConfigPath())?uc(e):(await zo.generateNatsConfig(!0),uc(e),await new Promise(c=>setTimeout(c,3e3)),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=bT.get(lt.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&Wo.LOG_LEVEL_HIERARCHY[a]>=Wo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===Wo.LOG_LEVELS.ERR||d===Wo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=Wo.LOG_LEVELS[m]}if(Wo.LOG_LEVEL_HIERARCHY[a]>=Wo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===Wo.LOG_LEVELS.ERR||d===Wo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!En&&(En=[],!t)){let i=o(()=>{PK=!0,En&&(En.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}En.push(r)}o(uc,"start");function Ape(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Ape,"startWithPM2");function pC(e){if(!mp){for(let t of En||[])t.name===e&&(En.splice(En.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(pC,"stop");function LK(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(LK,"reload");function DK(e){if(!mp){MK();for(let t of En||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o(DK,"restart");function MK(){for(let e of En||[])e.config&&(e.config.restarts=0)}o(MK,"expectedRestartOfChildren");function bpe(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(bpe,"deleteProcess");async function vK(){await uc(lc.generateRestart())}o(vK,"restartHdb");async function Ipe(){let e=await EC();for(let t in e)if(e[t].name===lt.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(Ipe,"isHdbRestartRunning");function EC(){return new Promise(async(e,t)=>{try{await jo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(EC,"list");function UK(e){return new Promise(async(t,r)=>{try{await jo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(UK,"describe");function Npe(){if(!mp){for(let e of En||[])e.kill();En=[];return}return new Promise(async(e,t)=>{try{await jo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(Npe,"kill");async function wpe(){try{await kK(),await qK(),await uc(lc.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(wpe,"startAllServices");async function _C(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case lt.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=lc.generateMainServerConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=lc.generateNatsIngestServiceConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=lc.generateNatsReplyServiceConfig();break;case lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=lc.generateNatsHubServerConfig(),await uc(r,t),await zo.removeNatsConfig(e);return;case lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=lc.generateNatsLeafServerConfig(),await uc(r,t),await zo.removeNatsConfig(e);return;case lt.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=lc.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await uc(r)}catch(r){throw Je?.disconnect(),r}}o(_C,"startService");async function xK(){try{let e=await EC(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(xK,"getUniqueServicesList");async function Ope(e=[]){try{let t=!1,r=await xK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===lt.PROCESS_DESCRIPTORS.HDB?t=!0:await DK(a))}t&&await FK(lt.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Ope,"restartAllServices");async function BK(e){if(En?.find(r=>r.name===e))return!0;let t=await _pe.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(BK,"isServiceRegistered");async function FK(e){let t=bT.get(lt.CONFIG_PARAMS.THREADS_COUNT)??bT.get(lt.CONFIG_PARAMS.THREADS),r=await UK(e),n=hpe.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await pC(e),await _C(e)):e===lt.PROCESS_DESCRIPTORS.HDB?await vK():await LK(e)}o(FK,"reloadStopStart");var HK;async function kK(e=!1){for(let t in lt.CLUSTERING_PROCESSES){let r=lt.CLUSTERING_PROCESSES[t];await _C(r,e)}}o(kK,"startClusteringProcesses");async function qK(){HK=CK(lt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:lt.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await AT.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await AT.updateLocalStreams();let e=await ppe.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===lt.PRE_4_0_0_VERSION){Kl.info("Starting clustering upgrade 4.0.0 process"),CK(lt.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(qK,"startClusteringThreads");async function Cpe(){for(let e in lt.CLUSTERING_PROCESSES)if(e!==lt.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===lt.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await HK.terminate();else{let t=lt.CLUSTERING_PROCESSES[e];await pC(t)}}o(Cpe,"stopClustering");async function Ppe(){for(let e in lt.CLUSTERING_PROCESSES){let t=lt.CLUSTERING_PROCESSES[e];if(await BK(t)===!1)return!1}return!0}o(Ppe,"isClusteringRunning");async function Lpe(){await zo.generateNatsConfig(!0),await AT.reloadNATSHub(),await AT.reloadNATSLeaf(),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await zo.removeNatsConfig(lt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(Lpe,"reloadClustering")});var TC={};Oe(TC,{compactOnStart:()=>Dpe,copyDb:()=>zK});async function Dpe(){dc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,gC.get)(U.ROOTPATH),t=new Map,r=it();(0,SC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,IT.join)(e,"backup",n+".mdb"),a=(0,IT.join)(e,Bc,n+"-copy.mdb"),c=0;try{c=await $K(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){dc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await zK(n,a),console.log("Backing up",n,"to",i);try{await(0,Yl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}console.log("Moving copy compacted",n,"to",s),await(0,Yl.move)(a,s,{overwrite:!0}),await(0,Yl.remove)((0,IT.join)(e,Bc,`${n}-copy.mdb-lock`))}try{md()}catch(n){dc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{md()}catch(n){dc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){dc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,SC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Yl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw md(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=await $K(n);if(console.log("Database",n,"after compact has a total record count of",a),i!==a){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
20
+ Total record count before compaction: ${i}, total after: ${a}.
21
+ Database backup has not been removed and can be found here: ${s}`;dc.error(c),console.error(c)}(0,gC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Yl.remove)(s))}}async function $K(e){let t=await(0,WK.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function sf(){}async function zK(e,t){console.log(`Copying database ${e} to ${t}`);let r=it()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=sf,m.primaryStore.remove=sf;for(let h in m.indices){let p=m.indices[h];p.put=sf,p.remove=sf}m.auditStore&&(m.auditStore.put=sf,m.auditStore.remove=sf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,VK.open)(new KK.default(t)),c=a.openDB(NT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Qg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let R=new YK.default(!p,p);R.encoding="binary",R.compression=_;let T=n.openDB(m,R);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",R.compression=g;let y=a.openDB(m,R);y.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,y,p,d)}if(i){let m=n.openDB(NT.AUDIT_STORE_NAME,ym);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,R=0,T=0,y=1e7,N=null;for(;y-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),R+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",R,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",R,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var VK,IT,Yl,gC,KK,YK,NT,WK,SC,dc,RC=ue(()=>{De();VK=require("lmdb"),IT=require("path"),Yl=require("fs-extra"),gC=w(oe()),KK=w(Dm()),YK=w(Lm()),NT=w($t());H();Mi();WK=w(vo()),SC=w(It()),dc=w(j());o(Dpe,"compactOnStart");o($K,"getTotalDBRecordCount");o(sf,"noop");o(zK,"copyDb")});var af=M((Xxe,tY)=>{"use strict";var Mpe=require("minimist"),{isMainThread:AC,parentPort:Ep,threadId:jxe}=require("worker_threads"),mt=(H(),v(Y)),no=j(),bC=se(),OT=TT(),wT=Ht(),Qxe=ft(),XK=It(),Ti=hp(),jK=Kd(),{compactOnStart:vpe}=(RC(),v(TC)),Upe=Fc(),{restartWorkers:CT,onMessageByType:xpe}=ze(),{handleHDBError:Bpe,hdbErrors:Fpe}=ge(),{HTTP_STATUS_CODES:Hpe}=Fpe,_p=oe(),{sendOperationToNode:QK,getThisNodeName:kpe,monitorNodeCAs:qpe}=(is(),v(ko)),{getHDBNodeTable:Jxe}=(Dl(),v(Mw));_p.initSync();var pp=`Restarting HarperDB. This may take up to ${mt.RESTART_TIMEOUT_MS/1e3} seconds.`,Gpe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",JK="Clustering is not enabled so cannot be restarted",$pe="Invalid service",of,Bs;tY.exports={restart:ZK,restartService:IC};AC&&xpe(mt.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await IC({service:e.workerType}):ZK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function ZK(e){Bs=Object.keys(e).length===0,of=await Ti.isServiceRegistered(mt.PROCESS_DESCRIPTORS.HDB);let t=Mpe(process.argv);if(t.service){await IC(t);return}if(Bs&&!of){console.error(Gpe);return}if(Bs&&console.log(pp),of){Ti.enterPM2Mode(),no.notify(pp);let r=Upe(Object.keys(mt.CONFIG_PARAM_MAP),!0);return bC.isEmptyOrZeroLength(Object.keys(r))||XK.updateConfigValue(void 0,void 0,r,!0,!0),Vpe(),pp}return AC?(no.notify(pp),_p.get(mt.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await vpe(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{CT()},50)):Ep.postMessage({type:mt.ITC_EVENT_TYPES.RESTART}),pp}o(ZK,"restart");async function IC(e){let{service:t}=e;if(mt.HDB_PROCESS_SERVICES[t]===void 0)throw Bpe(new Error,$pe,Hpe.BAD_REQUEST,void 0,void 0,!0);if(Ti.expectedRestartOfChildren(),of=await Ti.isServiceRegistered(mt.PROCESS_DESCRIPTORS.HDB),!AC){e.replicated&&qpe(),Ep.postMessage({type:mt.ITC_EVENT_TYPES.RESTART,workerType:t}),Ep.ref(),await new Promise(s=>{Ep.on("message",i=>{i.type==="restart-complete"&&(s(),Ep.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===kpe())continue;let i;try{({job_id:i}=await QK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await QK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case mt.HDB_PROCESS_SERVICES.clustering:if(!_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=JK;break}Bs&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await eY();break;case mt.HDB_PROCESS_SERVICES.clustering_config:case mt.HDB_PROCESS_SERVICES["clustering config"]:if(!_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=JK;break}Bs&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Ti.reloadClustering();break;case"custom_functions":case"custom functions":case mt.HDB_PROCESS_SERVICES.harperdb:case mt.HDB_PROCESS_SERVICES.http_workers:case mt.HDB_PROCESS_SERVICES.http:if(Bs&&!of){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}Bs&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),Bs?await Ti.restart(mt.PROCESS_DESCRIPTORS.HDB):await CT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),Bs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(IC,"restartService");async function Vpe(){await eY(),await Ti.restart(mt.PROCESS_DESCRIPTORS.HDB),await bC.asyncSetTimeout(2e3),_p.get(mt.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await yC(),Bs&&(await wT.closeConnection(),process.exit(0))}o(Vpe,"restartPM2Mode");async function eY(){if(!XK.getConfigFromFile(mt.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await jK.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await OT.generateNatsConfig(!0),await Ti.startClusteringProcesses(),await Ti.startClusteringThreads(),await yC(),Bs&&await wT.closeConnection();else{await OT.generateNatsConfig(!0),of?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Ti.restart(mt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Ti.restart(mt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await jK.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await bC.asyncSetTimeout(3e3),await yC(),await wT.updateLocalStreams(),Bs&&await wT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=CT(mt.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=CT(mt.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(eY,"restartClustering");async function yC(){await OT.removeNatsConfig(mt.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await OT.removeNatsConfig(mt.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(yC,"removeNatsConfig")});var fY=M((tBe,dY)=>{"use strict";var eBe=require("lodash"),Fn=(H(),v(Y)),{handleHDBError:rY,hdbErrors:Kpe}=ge(),{HDB_ERROR_MSGS:Ype,HTTP_STATUS_CODES:Wpe}=Kpe,NC=j();dY.exports={getRolePermissions:jpe};var Wl=Object.create(null),zpe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),oY=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),aY=o((e=!1,t=!1,r=!1,n=!1)=>({[Fn.PERMS_CRUD_ENUM.READ]:e,[Fn.PERMS_CRUD_ENUM.INSERT]:t,[Fn.PERMS_CRUD_ENUM.UPDATE]:r,[Fn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),wC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...aY(t,r,n,s)}),"tablePermsTemplate"),nY=o((e,t=aY())=>({attribute_name:e,describe:uY(t),[gp]:t[gp],[OC]:t[OC],[CC]:t[CC]}),"attrPermsTemplate"),sY=o((e,t=!1)=>({attribute_name:e,describe:t,[gp]:t}),"timestampAttrPermsTemplate"),{READ:gp,INSERT:OC,UPDATE:CC}=Fn.PERMS_CRUD_ENUM,cY=Object.values(Fn.PERMS_CRUD_ENUM),lY=[gp,OC,CC];function jpe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Fn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Wl[t]&&Wl[t].key===n)return Wl[t].perms;let s=Qpe(e,r);return Wl[t]?Wl[t].key=n:Wl[t]=zpe(n),Wl[t].perms=s,s}catch(r){if(!e[Fn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Fn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Fn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw NC.error(n),NC.debug(r),rY(new Error,Ype.OUTDATED_PERMS_TRANSLATION_ERROR,Wpe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
22
22
  ${r.stack}`;throw NC.error(n),rY(new Error)}}}o(jpe,"getRolePermissions");function Qpe(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[Fn.SYSTEM_SCHEMA_NAME]=n[Fn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=Jpe(t[i]);return}r[i]=oY(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(a=>{if(n[i].tables[a]){let c=n[i].tables[a],l=t[i][a],u=Xpe(c,l);r[i].describe||cY.forEach(d=>{u[d]&&(r[i].describe=!0)}),r[i].tables[a]=u}else r[i].tables[a]=wC()})):Object.keys(t[i]).forEach(a=>{r[i].tables[a]=wC()})}),r}o(Qpe,"translateRolePermissions");function Jpe(e){let t=oY(!0);return Object.keys(e).forEach(r=>{t.tables[r]=wC(!0,!0,!0,!0,!0)}),t}o(Jpe,"createStructureUserPermissions");function Xpe(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,d)=>{let{attribute_name:f}=d,m=d;return Fn.TIME_STAMP_NAMES.includes(f)&&(m=sY(f,d[gp])),u[f]=m,u},{}),a=t.primaryKey||t.hash_attribute,c=!!i[a],l=nY(a);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let d=i[u];d.describe=uY(d),s.attribute_permissions.push(d),c||Zpe(d,l)}else if(u!==a){let d;Fn.TIME_STAMP_NAMES.includes(u)?d=sY(u):d=nY(u),s.attribute_permissions.push(d)}}),c||s.attribute_permissions.push(l),s.describe=iY(s),s}else return e.describe=iY(e),e}o(Xpe,"getTableAttrPerms");function iY(e){return cY.filter(t=>e[t]).length>0}o(iY,"getSchemaTableDescribePerm");function uY(e){return lY.filter(t=>e[t]).length>0}o(uY,"getAttributeDescribePerm");function Zpe(e,t){lY.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}o(Zpe,"checkForHashPerms")});var Sp={};Oe(Sp,{authentication:()=>TY,bypassAuth:()=>cEe,login:()=>DC,logout:()=>MC,start:()=>lEe});function cEe(){SY=!0}async function TY(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,a=[];try{if(i){let h=e.isOperationsServer?sEe?nEe:[]:rEe?tEe:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let p=_n.get(U.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",_=new Cs([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",p],["Access-Control-Allow-Origin",i]]);return PT&&_.set("Access-Control-Allow-Credentials","true"),{status:200,headers:_}}a.push("Access-Control-Allow-Origin",i),PT&&a.push("Access-Control-Allow-Credentials","true")}}let l,u;if(PT){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",p=s?.split(/;\s+/)||[];for(let _ of p)if(_.startsWith(h)){let g=_.indexOf(";");l=_.slice(h.length,g===-1?_.length:g),u=await mY.get(l);break}e.session=u||(u={})}let d=o((h,p,_)=>{let g=new LT.AuthAuditLog(h,p,ba.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=_,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),p===ei.SUCCESS?PC.notify(g):PC.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&PC.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Le.getUser(h,null,e),d(h,ei.SUCCESS,"mTLS")):eEe("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let f;if(!e.user)if(n){if(f=zl.get(n),!f){let h=n.indexOf(" "),p=n.slice(0,h),_=n.slice(h+1),g,R;try{switch(p){case"Basic":let T=atob(_),y=T.indexOf(":");g=T.slice(0,y),R=T.slice(y+1),f=g||R?await Le.getUser(g,R,e):null;break;case"Bearer":try{f=await uw(_)}catch(N){if(N.message==="invalid token")try{return await uS(_),c({status:-1})}catch{throw N}}break}}catch(T){return oEe&&(zl.get(_)||(zl.set(_,_),d(g,ei.FAILURE,p))),c({status:401,body:wa({error:T.message},e)})}zl.set(n,f),iEe&&d(f.username,ei.SUCCESS,p)}e.user=f}else u?.user?e.user=await Le.getUser(u.user,null,e):(SY&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,pY.getSuperUser)());PT&&(e.session.update=function(h){let p=_n.get(U.AUTHENTICATION_COOKIE_EXPIRES),_=e.protocol==="https"||r.host?.startsWith("localhost:")||r.host?.startsWith("127.0.0.1:")||r.host?.startsWith("::1");if(!l){l=(0,EY.v4)();let g=_n.get(U.AUTHENTICATION_COOKIE_DOMAINS),R=p?new Date(Date.now()+(0,LC.convertToMS)(p)).toUTCString():aEe,T=g?.find(O=>r.host?.endsWith(O)),N=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${R}; HttpOnly`;T&&(N+=`; Domain=${T}`),_&&(N+="; SameSite=None; Secure"),a?a.push("Set-Cookie",N):m?.headers?.set&&m.headers.set("Set-Cookie",N)}return _&&(a?(i&&a.push("Access-Control-Expose-Headers","X-Hdb-Session"),a.push("X-Hdb-Session","Secure")):m?.headers?.set&&(i&&m.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),m.headers.set("X-Hdb-Session","Secure"))),h.id=l,mY.put(h,{expiresAt:p?Date.now()+(0,LC.convertToMS)(p):void 0})},e.login=async function(h,p){let _=e.user=await Le.authenticateUser(h,p,e);e.session.update({user:_&&(_.getId?.()??_.username)})});let m=await t(e);return m&&(m.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&bs.loginPath?(m.status=302,m.headers.set("Location",bs.loginPath(e))):m.headers.set("WWW-Authenticate","Basic")),c(m))}catch(l){throw c(l)}function c(l){let u=a.length;if(u>0){let d=l.headers;d||(l.headers=d=new Cs);for(let f=0;f<u;){let m=a[f++];d.set(m,a[f++])}}return a=null,l}o(c,"applyResponseHeaders")}function lEe({server:e,port:t,securePort:r}){e.http(TY,t||r?{port:t,securePort:r}:{port:"all"}),hY||(hY=!0,setInterval(()=>{zl=new Map},_n.get(U.AUTHENTICATION_CACHETTL)).unref(),_Y.user.addListener(()=>{zl=new Map}))}async function DC(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function MC(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var pY,EY,_n,LT,_Y,LC,gY,eEe,PC,tEe,rEe,nEe,sEe,mY,PT,SY,iEe,oEe,aEe,zl,hY,DT=ue(()=>{pY=w(rs());Mr();ku();yd();De();EY=require("uuid"),_n=w(oe());H();LT=w(j()),_Y=w(ch());hh();LC=w(se());yo();gY=(0,LT.forComponent)("authentication"),{debug:eEe}=gY,PC=gY.withTag("auth-event");_n.initSync();tEe=_n.get(U.HTTP_CORSACCESSLIST),rEe=_n.get(U.HTTP_CORS),nEe=_n.get(U.OPERATIONSAPI_NETWORK_CORSACCESSLIST),sEe=_n.get(U.OPERATIONSAPI_NETWORK_CORS),mY=je({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),PT=_n.get(U.AUTHENTICATION_ENABLESESSIONS)??!0,SY=process.env.AUTHENTICATION_AUTHORIZELOCAL??_n.get(U.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,iEe=_n.get(U.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,oEe=_n.get(U.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,aEe="Tue, 01 Oct 8307 19:33:20 GMT",zl=new Map;Le.onInvalidatedUser(()=>{zl=new Map});o(cEe,"bypassAuth");o(TY,"authentication");o(lEe,"start");o(DC,"login");o(MC,"logout")});var wY=M((dBe,NY)=>{"use strict";var we=require("joi"),RY=require("fs-extra"),yY=require("path"),ds=st(),AY=oe(),bY=(H(),v(Y)),IY=j(),{hdbErrors:uEe}=ge(),{HDB_ERROR_MSGS:gn}=uEe,Qo=/^[a-zA-Z0-9-_]+$/,dEe=/^[a-zA-Z0-9-_]+$/;NY.exports={getDropCustomFunctionValidator:mEe,setCustomFunctionValidator:hEe,addComponentValidator:gEe,dropCustomFunctionProjectValidator:SEe,packageComponentValidator:TEe,deployComponentValidator:REe,setComponentFileValidator:pEe,getComponentFileValidator:_Ee,dropComponentFileValidator:EEe,addSSHKeyValidator:yEe,updateSSHKeyValidator:AEe,deleteSSHKeyValidator:bEe,setSSHKnownHostsValidator:IEe};function MT(e,t,r){try{let n=AY.get(bY.CONFIG_PARAMS.COMPONENTSROOT),s=yY.join(n,t);return RY.existsSync(s)?e?t:r.message(gn.PROJECT_EXISTS):e?r.message(gn.NO_PROJECT):t}catch(n){return IY.error(n),r.message(gn.VALIDATION_ERR)}}o(MT,"checkProjectExists");function Tp(e,t){return e.includes("..")?t.message("Invalid file path"):e}o(Tp,"checkFilePath");function fEe(e,t,r,n){try{let s=AY.get(bY.CONFIG_PARAMS.COMPONENTSROOT),i=yY.join(s,e,t,r+".js");return RY.existsSync(i)?r:n.message(gn.NO_FILE)}catch(s){return IY.error(s),n.message(gn.VALIDATION_ERR)}}o(fEe,"checkFileExists");function mEe(e){let t=we.object({project:we.string().pattern(Qo).custom(MT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().pattern(Qo).custom(fEe.bind(null,e.project,e.type)).custom(Tp).required().messages({"string.pattern.base":gn.BAD_FILE_NAME})});return ds.validateBySchema(e,t)}o(mEe,"getDropCustomFunctionValidator");function hEe(e){let t=we.object({project:we.string().pattern(Qo).custom(MT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().custom(Tp).required(),function_content:we.string().required()});return ds.validateBySchema(e,t)}o(hEe,"setCustomFunctionValidator");function pEe(e){let t=we.object({project:we.string().pattern(Qo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),file:we.string().custom(Tp).required(),payload:we.string().allow("").optional(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ds.validateBySchema(e,t)}o(pEe,"setComponentFileValidator");function EEe(e){let t=we.object({project:we.string().pattern(Qo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),file:we.string().custom(Tp).optional()});return ds.validateBySchema(e,t)}o(EEe,"dropComponentFileValidator");function _Ee(e){let t=we.object({project:we.string().required(),file:we.string().custom(Tp).required(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return ds.validateBySchema(e,t)}o(_Ee,"getComponentFileValidator");function gEe(e){let t=we.object({project:we.string().pattern(Qo).custom(MT.bind(null,!1)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME})});return ds.validateBySchema(e,t)}o(gEe,"addComponentValidator");function SEe(e){let t=we.object({project:we.string().pattern(Qo).custom(MT.bind(null,!0)).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME})});return ds.validateBySchema(e,t)}o(SEe,"dropCustomFunctionProjectValidator");function TEe(e){let t=we.object({project:we.string().pattern(Qo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),skip_node_modules:we.boolean(),skip_symlinks:we.boolean()});return ds.validateBySchema(e,t)}o(TEe,"packageComponentValidator");function REe(e){let t=we.object({project:we.string().pattern(Qo).required().messages({"string.pattern.base":gn.BAD_PROJECT_NAME}),package:we.string().optional(),restart:we.alternatives().try(we.boolean(),we.string().valid("rolling")).optional()});return ds.validateBySchema(e,t)}o(REe,"deployComponentValidator");function yEe(e){let t=we.object({name:we.string().pattern(dEe).required().messages({"string.pattern.base":gn.BAD_SSH_KEY_NAME}),key:we.string().required(),host:we.string().required(),hostname:we.string().required(),known_hosts:we.string().optional()});return ds.validateBySchema(e,t)}o(yEe,"addSSHKeyValidator");function AEe(e){let t=we.object({name:we.string().required(),key:we.string().required()});return ds.validateBySchema(e,t)}o(AEe,"updateSSHKeyValidator");function bEe(e){let t=we.object({name:we.string().required()});return ds.validateBySchema(e,t)}o(bEe,"deleteSSHKeyValidator");function IEe(e){let t=we.object({known_hosts:we.string().required()});return ds.validateBySchema(e,t)}o(IEe,"setSSHKnownHostsValidator")});var Ap=M((mBe,DY)=>{"use strict";var vT=require("joi"),fc=require("path"),cf=require("fs-extra"),{exec:NEe,spawn:wEe}=require("child_process"),OEe=require("util"),CEe=OEe.promisify(NEe),lf=(H(),v(Y)),{PACKAGE_ROOT:PEe}=yt(),{handleHDBError:Rp,hdbErrors:LEe}=ge(),{HTTP_STATUS_CODES:yp}=LEe,jl=oe(),DEe=st(),mc=j(),{once:MEe}=require("events");jl.initSync();var vC=jl.get(lf.CONFIG_PARAMS.COMPONENTSROOT),OY="npm install --force --omit=dev --json",vEe=`${OY} --dry-run`,UEe=jl.get(lf.CONFIG_PARAMS.ROOTPATH),UT=fc.join(UEe,"ssh");DY.exports={installModules:HEe,auditModules:kEe,installAllRootModules:xEe,uninstallRootModule:BEe,linkHarperdb:FEe,runCommand:uf};async function xEe(e=!1,t=jl.get(lf.CONFIG_PARAMS.ROOTPATH)){await xT();let r=!1,n=process.env;cf.pathExistsSync(UT)&&cf.readdirSync(UT).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+fc.join(UT,"config")+" -o UserKnownHostsFile="+fc.join(UT,"known_hosts"),...process.env},r=!0)});try{let s=jl.get(lf.CONFIG_PARAMS.ROOTPATH),i=fc.join(s,"node_modules","harperdb");cf.lstatSync(i).isSymbolicLink()&&cf.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&mc.error("Error removing symlink:",s)}await uf(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}o(xEe,"installAllRootModules");async function BEe(e){await uf(`npm uninstall ${e}`,jl.get(lf.CONFIG_PARAMS.ROOTPATH))}o(BEe,"uninstallRootModule");async function FEe(){await xT(),await uf(`npm link ${PEe}`,jl.get(lf.CONFIG_PARAMS.ROOTPATH))}o(FEe,"linkHarperdb");async function uf(e,t=void 0,r=process.env){mc.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=wEe(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();mc.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();mc.error({tagName:"npm_run_command:stderr"},l),i+=l});let[a]=await MEe(n,"close");if(a!==0)throw new Error(`Command \`${e}\` exited with code ${a}.${i===""?"":` Error: ${i}`}`);return s||void 0}o(uf,"runCommand");async function HEe(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";mc.warn(t,e.projects);let r=LY(e);if(r)throw Rp(r,r.message,yp.BAD_REQUEST);let{projects:n,dryRun:s}=e,i=s===!0?vEe:OY;await xT(),await PY(n);let a={};for(let c=0,l=n.length;c<l;c++){let u=n[c];a[u]={npm_output:null,npm_error:null};let d=fc.join(vC,u),f,m=null;try{let{stdout:h,stderr:p}=await CEe(i,{cwd:d});f=h?h.replace(`
23
23
  `,""):null,m=p?p.replace(`
24
24
  `,""):null}catch(h){h.stderr?a[u].npm_error=CY(h.stderr):a[u].npm_error=h.message;continue}try{a[u].npm_output=JSON.parse(f)}catch{a[u].npm_output=f}try{a[u].npm_error=JSON.parse(m)}catch{a[u].npm_error=m}}return mc.info(`finished installModules with response ${a}`),a.warning=t,a}o(HEe,"installModules");function CY(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}