harperdb 4.6.15 → 4.6.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,9 +15,9 @@
15
15
  `,""));return r.replace(`
16
16
  `,"")}o(uq,"runCommand");async function vae(){try{await uae.access(dw)}catch{return!1}let e=await uq(`${dw} --version`,void 0),t=e.substring(e.lastIndexOf("v")+1,e.length);return dae.eq(t,Dae)}o(vae,"checkNATSServerInstalled");async function Ew(e,t,r,n=!0,s="127.0.0.1"){if(!t&&!r){let a=await cq.getClusterUser();if(Ll(a))throw new Error("Unable to get nats connection. Cluster user is undefined.");t=a.username,r=a.decrypt_hash}hi.trace("create nats connection called");let i=await Aae({name:s,port:e,user:t,pass:r,maxReconnectAttempts:-1,waitOnFirstConnect:n,timeout:2e5,tls:{keyFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_PRIVATEKEY),certFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERTIFICATE),caFile:kr.get(Qe.CONFIG_PARAMS.CLUSTERING_TLS_CERT_AUTH),rejectUnauthorized:!1}});return i.protocol.transport.socket.unref(),hi.trace("create connection established a nats client connection with id",i?.info?.client_id),i.closed().then(a=>{a&&hi.error("Error with Nats client connection, connection closed",a),i===fn&&dq()}),i}o(Ew,"createConnection");function dq(){fn=void 0,Ol=void 0,Cl=void 0,Pl=void 0}o(dq,"clearClientCache");async function Uae(){fn&&(await fn.drain(),fn=void 0,Ol=void 0,Cl=void 0,Pl=void 0)}o(Uae,"closeConnection");var fn,Pl;async function xh(){return Pl||(Pl=Ew(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),void 0,void 0),fn=await Pl),fn||Pl}o(xh,"getConnection");async function Bh(){if(Ol)return Ol;Ll(fn)&&await xh();let{domain:e}=yd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Ll(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Ol=await fn.jetstreamManager({domain:e,timeout:6e4}),Ol}o(Bh,"getJetStreamManager");async function fq(){if(Cl)return Cl;Ll(fn)&&await xh();let{domain:e}=yd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);if(Ll(e))throw new Error("Error getting JetStream domain. Unable to get JetStream manager.");return Cl=fn.jetstream({domain:e,timeout:6e4}),Cl}o(fq,"getJetStream");async function Xi(){let e=fn||await xh(),t=Ol||await Bh(),r=Cl||await fq();return{connection:e,jsm:t,js:r}}o(Xi,"getNATSReferences");async function xae(e){let t=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),{sys_name:r,decrypt_hash:n}=await cq.getClusterUser(),s=await Ew(t,r,n),i=pw(),a=s.subscribe(i),c=[],l,u=(async()=>{for await(let d of a){let f=lq.decode(d.data);f.response_time=Date.now()-l,c.push(f)}})();return l=Date.now(),await s.publish("$SYS.REQ.SERVER.PING.VARZ",void 0,{reply:i}),await s.publish("$SYS.REQ.SERVER.PING",void 0,{reply:i}),await s.flush(),await mS.asyncSetTimeout(e),await a.drain(),await s.close(),await u,c}o(xae,"getServerList");async function _w(e,t){let{jsm:r}=await Xi(),n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);i=i===null?-1:i,await r.streams.add({name:e,storage:bae.File,retention:Iae.Limits,subjects:t,discard:Nae.Old,maxMsgs:s,maxBytes:i,maxAge:n})}o(_w,"createLocalStream");async function mq(){let{jsm:e}=await Xi(),t=await e.streams.list().next(),r=[];return t.forEach(n=>{r.push(n)}),r}o(mq,"listStreams");async function Bae(e){let{jsm:t}=await Xi();await t.streams.delete(e)}o(Bae,"deleteLocalStream");async function Fae(e){let{connection:t}=await Xi(),r=[],n=pw(),s=t.subscribe(n),i=(async()=>{for await(let a of s)r.push(lq.decode(a.data))})();return await t.publish(`$JS.${e}.API.STREAM.LIST`,void 0,{reply:n}),await t.flush(),await s.drain(),await i,r}o(Fae,"listRemoteStreams");async function Hae(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Xi(),i=sq(),a={durable_name:i,ack_policy:mw.Explicit};t&&(a.deliver_policy=hw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];let u=[];for await(let d of l){let f=fw(d.data),m={nats_timestamp:d.info.timestampNanos,nats_sequence:d.info.streamSequence,entry:f};if(d.headers&&(m.origin=d.headers.get(Qr.MSG_HEADERS.ORIGIN)),u.push(m),d.ack(),d.info.pending===0)break}return await c.delete(),u}o(Hae,"viewStream");async function*kae(e,t=void 0,r=void 0){let{jsm:n,js:s}=await Xi(),i=sq(),a={durable_name:i,ack_policy:mw.Explicit};t&&(a.deliver_policy=hw.StartTime,a.opt_start_time=new Date(t).toISOString()),await n.consumers.add(e,a);let c=await s.consumers.get(e,i),l=r?await c.fetch({max_messages:r,expires:2e3}):await c.consume();if(c._info.num_pending===0)return[];for await(let u of l){let d=fw(u.data);d[0]||(d=[d]);for(let f of d){let m={nats_timestamp:u.info.timestampNanos,nats_sequence:u.info.streamSequence,entry:f};u.headers&&(m.origin=u.headers.get(Qr.MSG_HEADERS.ORIGIN)),yield m}if(u.ack(),u.info.pending===0)break}await c.delete()}o(kae,"viewStreamIterator");async function Gae(e,t,r,n){hi.trace(`publishToStream called with subject: ${e}, stream: ${t}, entries:`,n.operation),r=hq(n,r);let{js:s}=await Xi(),i=await pS(),a=`${e}.${i}`,c=await Pae(()=>n instanceof Uint8Array?n:aq.encode(n));try{hi.trace(`publishToStream publishing to subject: ${a}`),Cae(c.length,"bytes-sent",e,n.operation,"replication"),await s.publish(a,c,{headers:r})}catch(l){if(l.code&&l.code.toString()==="503")return Eq(async()=>{try{await s.publish(a,c,{headers:r})}catch{if(l.code&&l.code.toString()==="503"){hi.trace(`publishToStream creating stream: ${t}`);let d=a.split(".");d[2]="*",await _w(t,[a]),await s.publish(a,c,{headers:r})}else throw l}});throw l}}o(Gae,"publishToStream");function hq(e,t){t===void 0&&(t=Oae());let r=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME);return!t.has(Qr.MSG_HEADERS.ORIGIN)&&r&&t.append(Qr.MSG_HEADERS.ORIGIN,r),t}o(hq,"addNatsMsgHeader");function yd(e){e=e.toLowerCase();let t=Uh.join(kr.get(Qe.CONFIG_PARAMS.ROOTPATH),Lae);if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())return Ll(uw)&&(uw={port:Mh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),server_name:Mh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.HUB,config_file:Qr.NATS_CONFIG_FILES.HUB_SERVER,pid_file_path:Uh.join(t,Qr.PID_FILES.HUB),hdbNatsPath:t}),uw;if(e===Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())return Ll(lw)&&(lw={port:Mh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),server_name:Mh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,config_file:Qr.NATS_CONFIG_FILES.LEAF_SERVER,domain:Mh.getConfigFromFile(Qe.CONFIG_PARAMS.CLUSTERING_NODENAME)+Qr.SERVER_SUFFIX.LEAF,pid_file_path:Uh.join(t,Qr.PID_FILES.LEAF),hdbNatsPath:t}),lw;hi.error(`Unable to get Nats server config. Unrecognized process: ${e}`)}o(yd,"getServerConfig");async function pq(e,t,r,n){try{await e.consumers.add(t,{ack_policy:mw.Explicit,durable_name:r,deliver_policy:hw.StartTime,opt_start_time:n})}catch(s){if(s.message!=="consumer already exists")throw s}}o(pq,"createConsumer");async function qae(e,t,r){await e.consumers.delete(t,r)}o(qae,"removeConsumer");function $ae(e){return e.split(".")[1]}o($ae,"extractServerName");async function Vae(e,t,r=6e4,n=pw()){if(!mS.isObject(t))throw new Error("data param must be an object");let s=aq.encode(t),{connection:i}=await Xi(),a={timeout:r};n&&(a.reply=n,a.noMux=!0);let c=await i.request(e,s,a);return fw(c.data)}o(Vae,"request");function gw(e){return new Promise(async(t,r)=>{let n=pae(dw,["--signal",`reload=${e}`],{cwd:__dirname}),s,i;n.on("error",a=>{r(a)}),n.stdout.on("data",a=>{i+=a.toString()}),n.stderr.on("data",a=>{s+=a.toString()}),n.stderr.on("close",a=>{s&&r(s),t(i)})})}o(gw,"reloadNATS");async function Kae(){let{pid_file_path:e}=yd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_HUB);await gw(e)}o(Kae,"reloadNATSHub");async function Yae(){let{pid_file_path:e}=yd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await gw(e)}o(Yae,"reloadNATSLeaf");function Wae(e,t,r){let n;switch(e.code){case nq.NoResponders:n=`Unable to ${t}, node '${r}' is not listening.`;break;case nq.Timeout:n=`Unable to ${t}, node '${r}' is listening but did not respond.`;break;default:n=e.message;break}return n}o(Wae,"requestErrorHandler");async function zae(e,t){let r=t+Qr.SERVER_SUFFIX.LEAF,{connection:n}=await Xi(),{jsm:s}=await rce(r),{schema:i,table:a}=e,c=hS.createNatsTableStreamName(i,a),l=e.start_time?e.start_time:new Date(Date.now()).toISOString();await Eq(async()=>{if(e.subscribe===!0)await pq(s,c,n.info.server_name,l);else try{await qae(s,c,n.info.server_name)}catch(u){hi.trace(u)}})}o(zae,"updateRemoteConsumer");async function jae(e,t,r,n){let s=hS.createNatsTableStreamName(e,t),i=r+Qr.SERVER_SUFFIX.LEAF,a={type:Qe.ITC_EVENT_TYPES.NATS_CONSUMER_UPDATE,status:n,stream_name:s,node_domain_name:i};if(!oq&&Rae()<kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXINGESTTHREADS)){let{updateConsumer:c}=cw();await c(a)}await Sae(a),n==="stop"&&await mS.asyncSetTimeout(1e3)}o(jae,"updateConsumerIterator");function Eq(e){return gae.writeTransaction(Qe.SYSTEM_SCHEMA_NAME,Qe.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,e)}o(Eq,"exclusiveLock");async function _q(e,t){let r=hS.createNatsTableStreamName(e,t),n=await pS(),s=Zae(e,t,n);await _w(r,[s])}o(_q,"createLocalTableStream");async function Qae(e){for(let t=0,r=e.length;t<r;t++){let n=e[t].schema,s=e[t].table;await _q(n,s)}}o(Qae,"createTableStreams");async function gq(e,t,r=void 0){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))try{let n=hS.createNatsTableStreamName(e,t),{domain:s}=yd(Qe.PROCESS_DESCRIPTORS.CLUSTERING_LEAF);await(await(await xh()).jetstreamManager({domain:s,timeout:24e4})).streams.purge(n,r)}catch(n){if(n.message==="stream not found")hi.warn(n);else throw n}}o(gq,"purgeTableStream");async function Jae(e,t){if(kr.get(Qe.CONFIG_PARAMS.CLUSTERING_ENABLED))for(let r=0,n=t.length;r<n;r++)await gq(e,t[r])}o(Jae,"purgeSchemaTableStreams");async function Xae(e){return(await Bh()).streams.info(e)}o(Xae,"getStreamInfo");function Zae(e,t,r){return`${Qr.SUBJECT_PREFIXES.TXN}.${e}${t?"."+t:""}.${r}`}o(Zae,"createSubjectName");async function pS(){if(vh)return vh;if(vh=(await Bh())?.nc?.info?.server_name,vh===void 0)throw new Error("Unable to get jetstream manager server name");return vh}o(pS,"getJsmServerName");async function ece(){let e=await Bh(),t=await pS(),r=await mq();for(let n of r){let s=n.config,i=s.subjects[0];if(!i)continue;let a=tce(n),c=i.split(".");if(c[c.length-1]===t&&!a||s.name==="__HARPERDB_WORK_QUEUE__")continue;let u=i.split(".");u[u.length-1]=t;let d=u.join(".");hi.trace(`Updating stream subject name from: ${i} to: ${d}`),s.subjects[0]=d,await e.streams.update(s.name,s)}}o(ece,"updateLocalStreams");function tce(e){let{config:t}=e,r=!1,n=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXAGE);n=n===null?0:n*1e9;let s=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXBYTES);s=s===null?-1:s;let i=kr.get(Qe.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_STREAMS_MAXMSGS);return i=i===null?-1:i,n!==t.max_age&&(t.max_age=n,r=!0),s!==t.max_bytes&&(t.max_bytes=s,r=!0),i!==t.max_msgs&&(t.max_msgs=i,r=!0),r}o(tce,"updateStreamLimits");async function rce(e){let t,r;try{t=await fn.jetstream({domain:e}),r=await fn.jetstreamManager({domain:e,checkAPI:!1})}catch(n){throw hi.error("Unable to connect to:",e),n}return{js:t,jsm:r}}o(rce,"connectToRemoteJS")});function Sw(e){let t=e.get(ES),r=t?(0,Ad.unpack)(t):null;r||(r={remoteNameToId:{}});let n=Ze(),s=!1;r.nodeName=Ze();let i=r.remoteNameToId;if(i[n]!==0){let a=0,c;for(let l in i){let u=i[l];u===0?c=l:u>a&&(a=u)}if(c){a++,i[c]=a;let l=[Symbol.for("seq"),a];e.rootStore.dbisDb.transactionSync(()=>{e.rootStore.dbisDb.get(l)||e.rootStore.dbisDb.putSync(l,{seqId:Hh(e)??1,nodes:[]})})}i[n]=0,e.putSync(ES,(0,Ad.pack)(r))}return r}function Fh(e){return Sw(e).remoteNameToId}function Rq(e,t){let r=Sw(t),n=r.remoteNameToId,s=new Map,i=!1;for(let a in e){let c=e[a],l=n[a];if(l==null){let u=0;for(let d in n){let f=n[d];f>u&&(u=f)}l=u+1,n[a]=l,i=!0}s.set(c,l)}return i&&t.putSync(ES,(0,Ad.pack)(r)),s}function _S(e,t){let r=Sw(t),n=r.remoteNameToId,s=n[e];if(s==null){let i=0;for(let a in n){let c=n[a];c>i&&(i=c)}s=i+1,n[e]=s,t.putSync(ES,(0,Ad.pack)(r))}return Tq.trace?.("The remote node name map",e,n,s),s}var Tq,Ad,ES,Tw=ue(()=>{Tq=w(ri());ss();Ad=require("msgpackr"),ES=Symbol.for("remote-ids");o(Sw,"getIdMappingRecord");o(Fh,"exportIdMapping");o(Rq,"remoteToLocalNodeId");o(_S,"getIdOfRemoteNode")});var Rw={};Oe(Rw,{commitsAwaitingReplication:()=>bd,getHDBNodeTable:()=>Kt,getReplicationSharedStatus:()=>Id,iterateRoutes:()=>Gh,shouldReplicateToNode:()=>kh,subscribeToNodeUpdates:()=>Nd});function Kt(){return yq||(yq=je({table:"hdb_nodes",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"subscriptions"},{attribute:"system_info"},{attribute:"url"},{attribute:"routes"},{attribute:"ca"},{attribute:"ca_info"},{attribute:"replicates"},{attribute:"revoked_certificates"},{attribute:"__createdtime__"},{attribute:"__updatedtime__"}]}))}function Id(e,t,r,n){return new Float64Array(e.getUserSharedBuffer(["replicated",t,r],new ArrayBuffer(48),n&&{callback:n}))}function Nd(e){Kt().subscribe({}).then(async t=>{for await(let r of t){let n=r?.value?.name;Nq.debug?.("adding node",n,"on node",Ze()," on process",process.pid),server.nodes=server.nodes.filter(i=>i.name!==n),r.type==="put"&&n!==Ze()&&(r.value?server.nodes.push(r.value):console.error("Invalid node update event",r));let s=new Map;for await(let i of Kt().search({}))if(i.shard!=null){let a=s.get(i.shard);a||s.set(i.shard,a=[]),a.push(i)}server.shards=s,(r.type==="put"||r.type==="delete")&&e(r.value,r.id)}})}function kh(e,t){let r=Ya.default.get(U.REPLICATION_DATABASES);return(e.replicates===!0||e.replicates?.sends)&&databases[t]&&(r==="*"||r?.find?.(n=>n.name===t&&(!n.sharded||e.shard===Ya.default.get(U.REPLICATION_SHARD))))&&Kt().primaryStore.get(Ze())?.replicates||e.subscriptions?.some(n=>(n.database||n.schema)===t&&n.subscribe)}function nce(){Nd(e=>{Wa({},(t,r)=>{let n=e.name,s=Aq.get(n);if(s||Aq.set(n,s=new Map),s.has(r))return;let i;for(let a in t)if(i=t[a].auditStore,i)break;if(i){let a=Id(i,r,n,()=>{let c=a[0],l=a.lastTime;for(let{txnTime:u,onConfirm:d}of bd.get(r)||[])u>l&&u<=c&&d();a.lastTime=c});a.lastTime=0,s.set(r,a)}})})}function*Gh(e){for(let t of e.routes||[]){let r=t.url,n;if(typeof t=="string"?t.includes("://")?r=t:n=t:n=t.hostname??t.host,n&&!r){let s=Ya.default.get(U.REPLICATION_SECUREPORT)??(!Ya.default.get(U.REPLICATION_PORT)&&Ya.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),i;(i=n.match(/:(\d+)$/)?.[1])?n=n.slice(0,-i[0].length-1):t.port?i=t.port:i=s||Ya.default.get(U.REPLICATION_PORT)||Ya.default.get(U.OPERATIONSAPI_NETWORK_PORT);let a=i?.lastIndexOf?.(":");a>0&&(i=+i.slice(a+1).replace(/[\[\]]/g,"")),r=(s?"wss://":"ws://")+n+":"+i}if(!r){bq.isMainThread&&console.error("Invalid route, must specify a url or host (with port)");continue}yield{replicates:!t.subscriptions,url:r,subscription:t.subscriptions,routes:t.routes,startTime:t.startTime,revoked_certificates:t.revokedCertificates}}}var bq,Iq,Ya,Nq,yq,Aq,bd,Dl=ue(()=>{De();ss();cm();bq=require("worker_threads"),Iq=w(ge()),Ya=w(oe());k();Nq=w(ri());server.nodes=[];o(Kt,"getHDBNodeTable");o(Id,"getReplicationSharedStatus");o(Nd,"subscribeToNodeUpdates");o(kh,"shouldReplicateToNode");Aq=new Map;yv((e,t,r)=>{if(r>server.nodes.length)throw new Iq.ClientError(`Cannot confirm replication to more nodes (${r}) than are in the network (${server.nodes.length})`);bd||(bd=new Map,nce());let n=bd.get(e);return n||(n=[],bd.set(e,n)),new Promise(s=>{let i=0;n.push({txnTime:t,onConfirm:o(()=>{++i===r&&s()},"onConfirm")})})});o(nce,"startSubscriptionToReplications");o(Gh,"iterateRoutes")});var Cq={};Oe(Cq,{connectedToNode:()=>Ml,disconnectedFromNode:()=>Od,ensureNode:()=>Ho,requestClusterStatus:()=>Oq,startOnMainThread:()=>bw});async function bw(e){let t=0,r=st();for(let i of Object.getOwnPropertyNames(r)){let a=r[i];for(let c in a){let l=a[c];if(l.auditStore){gS.set(i,Hh(l.auditStore));break}}}eo.whenThreadsStarted.then(async()=>{let i=[];for await(let l of r.system.hdb_nodes?.search([])||[])i.push(l);let a=Ze();function c(){let l=Kt().primaryStore.get(a);if(l!==null){let u=e.url??za();if(l===void 0||l.url!==u||l.shard!==e.shard)return Ho(a,{name:a,url:u,shard:e.shard,replicates:!0})}}o(c,"ensureThisNode"),Kt().primaryStore.get(a)&&c();for(let l of Gh(e))try{let u=!l.subscriptions;if(u&&await c(),u&&l.replicates==null&&(l.replicates=!0),i.find(d=>d.url===l.url))continue;s(l)}catch(u){console.error(u)}Nd(s)});let n;function s(i,a=i?.name){let c=Ze()&&a===Ze()||za()&&i?.url===za();if(c){let f=!!i?.replicates;if(n!==void 0&&n!==f)for(let m of Kt().search([]))m.replicates&&m.name!==a&&s(m,m.name);n=f}if(at.trace("Setting up node replication for",i),!i){for(let[f,m]of Zi){let h;for(let[p,{worker:_,nodes:g}]of m){let y=g[0];if(y&&y.name==a){h=!0;for(let[T,{worker:R}]of m)m.delete(T),at.warn("Node was deleted, unsubscribing from node",a,T,f),R?.postMessage({type:"unsubscribe-from-node",node:a,database:T,url:f});break}}if(h){Zi.get(f).iterator.remove(),Zi.delete(f);return}}return}if(c)return;if(!i.url){at.info(`Node ${i.name} is missing url`);return}let l=Zi.get(i.url);if(l&&l.iterator.remove(),!(i.replicates===!0||i.replicates?.sends)&&!i.subscriptions?.length&&!l)return;if(at.info(`Added node ${i.name} at ${i.url} for process ${Ze()}`),i.replicates&&i.subscriptions&&(i={...i,subscriptions:null}),i.name){for(let[f,m]of wd)if(i.url===m.url){wd.delete(f);break}wd.set(i.name,i)}let u=st();if(l||(l=new Map,Zi.set(i.url,l)),l.iterator=Wa(e,(f,m,h)=>{h?d(m,!0):d(m,!1)}),i.subscriptions)for(let f of i.subscriptions){let m=f.database||f.schema;u[m]||(at.warn(`Database ${m} not found for node ${i.name}, making a subscription anyway`),d(m,!1))}function d(f,m){at.trace("Setting up replication for database",f,"on node",i.name);let h=l.get(f),p,_=[{replicateByDefault:m,...i}];gS.has(f)&&(_.push({replicateByDefault:m,name:Ze(),startTime:gS.get(f),endTime:Date.now(),replicates:!0}),gS.delete(f));let g=kh(i,f),y=eo.workers.filter(T=>T.name==="http");if(h?(p=h.worker,h.nodes=_):g&&(t=t%y.length,p=y[t++],l.set(f,{worker:p,nodes:_,url:i.url}),p?.on("exit",()=>{l.get(f)?.worker===p&&(l.delete(f),d(f,m))})),g)setTimeout(()=>{let T={type:"subscribe-to-node",database:f,nodes:_};p?p.postMessage(T):qh(T)},sce);else{at.info("Node no longer should be used, unsubscribing from node",{replicates:i.replicates,databaseName:f,node:i,subscriptions:i.subscriptions,hasDatabase:!!u[f],thisReplicates:Kt().primaryStore.get(Ze())?.replicates}),Kt().primaryStore.get(Ze())?.replicates||(n=!1,at.info("Disabling replication, this node name",Ze(),Kt().primaryStore.get(Ze()),f));let T={type:"unsubscribe-from-node",database:f,url:i.url,name:i.name};p?p.postMessage(T):TS(T)}}o(d,"onDatabase")}o(s,"onNodeUpdate"),Od=o(function(i){try{at.info("Disconnected from node",i.name,i.url,"finished",!!i.finished);let a=Array.from(wd.keys()),c=a.sort(),l=c.indexOf(i.name||pi(i.url));if(l===-1){at.warn("Disconnected node not found in node map",i.name,a);return}let u=Zi.get(i.url),d=u?.get(i.database);if(!d){at.warn("Disconnected node not found in replication map",i.database,u);return}if(d.connected=!1,i.finished||!Aw.default.get(U.REPLICATION_FAILOVER))return;let f=d.nodes[0];if(!(f.replicates===!0||f.replicates?.sends||f.subscriptions?.length))return;let m=f.shard,h=(l+1)%c.length;for(;l!==h;){let p=c[h],_=wd.get(p);u=Zi.get(_.url);let g=u?.get(i.database);if(!g||g.connected===!1||g.nodes[0].shard!==m){h=(h+1)%c.length;continue}let{worker:y,nodes:T}=g,R=!1;for(let N of d.nodes){if(T.some(O=>O.name===N.name)){at.info(`Disconnected node is already failing over to ${p} for ${i.database}`);continue}N.endTime<Date.now()||(T.push(N),R=!0)}if(d.nodes=[d.nodes[0]],!R){at.info(`Disconnected node ${i.name} has no nodes to fail over to ${p}`);return}at.info(`Failing over ${i.database} from ${i.name} to ${p}`),y?y.postMessage({type:"subscribe-to-node",database:i.database,nodes:T}):qh({database:i.database,nodes:T});return}at.warn("Unable to find any other node to fail over to",i.name,i.url)}catch(a){at.error("Error failing over node",a)}},"disconnectedFromNode"),Ml=o(function(i){let a=Zi.get(i.url),c=a?.get(i.database);if(!c){at.warn("Connected node not found in replication map, this may be because the node is being removed",i.database,a);return}c.connected=!0,c.latency=i.latency;let l=c.nodes[0];if(!l){at.warn("Newly connected node has no node subscriptions",i.database,c);return}if(!l.name){at.debug("Connected node is not named yet",i.database,c);return}c.nodes=[l];let u=!1;for(let d of Zi.values()){let f=d.get(i.database);if(!f||f==c)continue;let{worker:m,nodes:h,connected:p}=f;if(h)if(p===!1&&h[0].shard===l.shard)u=!0,c.nodes.push(h[0]);else{let _=h.filter(g=>g&&g.name!==l.name);_.length<h.length&&(f.nodes=_,m.postMessage({type:"subscribe-to-node",database:i.database,nodes:h}))}}u&&c.worker&&c.worker.postMessage({type:"subscribe-to-node",database:i.database,nodes:c.nodes})},"connectedToNode"),(0,eo.onMessageByType)("disconnected-from-node",Od),(0,eo.onMessageByType)("connected-to-node",Ml),(0,eo.onMessageByType)("request-cluster-status",Oq)}function Oq(e,t){let r=[];for(let[n,s]of wd)try{let i=Zi.get(s.url);at.info("Getting cluster status for",n,s.url,"has dbs",i?.size);let a=[];if(i){for(let[l,{worker:u,connected:d,nodes:f,latency:m}]of i)a.push({database:l,connected:d,latency:m,threadId:u?.threadId,nodes:f.filter(h=>!(h.endTime<Date.now())).map(h=>h.name)});let c=(0,yw.cloneDeep)(s);c.database_sockets=a,delete c.ca,delete c.node_name,delete c.__updatedtime__,delete c.__createdtime__,r.push(c)}}catch(i){at.warn("Error getting cluster status for",s?.url,i)}return t?.postMessage({type:"cluster-status",connections:r}),{connections:r}}async function Ho(e,t){let r=Kt();e=e??pi(t.url),t.name=e;try{if(t.ca){let s=new wq.X509Certificate(t.ca);t.ca_info={issuer:s.issuer.replace(/\n/g," "),subject:s.subject.replace(/\n/g," "),subjectAltName:s.subjectAltName,serialNumber:s.serialNumber,validFrom:s.validFrom,validTo:s.validTo}}}catch(s){at.error("Error parsing replication CA info for hdb_nodes table",s.message)}let n=r.primaryStore.get(e);if(at.debug(`Ensuring node ${e} at ${t.url}, existing record:`,n,"new record:",t),!n)await r.patch(t);else{t.replicates&&!Aw.default.get(U.CLUSTERING_ENABLED)&&(t.subscriptions=null);for(let s in t)if(n[s]!==t[s]&&s==="subscriptions"&&t[s]&&n[s]){let i=[],a=(0,yw.cloneDeep)(n[s]);for(let c of t[s]){let l=!1;for(let u of a)if((c.database??c.schema)===(u.database??u.schema)&&c.table===u.table){u.publish=c.publish,u.subscribe=c.subscribe,l=!0;break}l||i.push(c)}t.subscriptions=[...a,...i];break}if(Array.isArray(t.revoked_certificates)){let s=n.revoked_certificates||[];t.revoked_certificates=[...new Set([...s,...t.revoked_certificates])]}at.info(`Updating node ${e} at ${t.url}`),await r.patch(t)}}var eo,SS,at,yw,Aw,wq,sce,Zi,Od,Ml,wd,gS,$h=ue(()=>{De();eo=w(nt());ss();SS=require("worker_threads");Dl();at=w(Q()),yw=require("lodash"),Aw=w(oe());k();wq=require("crypto"),sce=200,Zi=new Map,wd=new Map,gS=new Map;o(bw,"startOnMainThread");o(Oq,"requestClusterStatus");SS.parentPort&&(Od=o(e=>{SS.parentPort.postMessage({type:"disconnected-from-node",...e})},"disconnectedFromNode"),Ml=o(e=>{SS.parentPort.postMessage({type:"connected-to-node",...e})},"connectedToNode"),(0,eo.onMessageByType)("subscribe-to-node",e=>{qh(e)}),(0,eo.onMessageByType)("unsubscribe-from-node",e=>{TS(e)}));o(Ho,"ensureNode")});var os=M(Yt=>{"use strict";var hr=require("path"),{watch:ice}=require("chokidar"),Un=require("fs-extra"),Cd=require("node-forge"),Uq=require("net"),{generateKeyPair:Iw,X509Certificate:ko,createPrivateKey:xq}=require("crypto"),oce=require("util");Iw=oce.promisify(Iw);var wt=Cd.pki,Ei=require("joi"),{v4:Bq}=require("uuid"),{validateBySchema:Cw}=lt(),{forComponent:ace}=Q(),is=oe(),Ds=(k(),v(W)),{CONFIG_PARAMS:Ul}=Ds,_i=Jy(),{ClientError:Qa}=ge(),yS=require("node:tls"),{relative:Fq,join:cce}=require("node:path"),{CERT_PREFERENCE_APP:WMe,CERTIFICATE_VALUES:Pq}=_i,lce=xc(),Nw=yt(),{table:uce,getDatabases:dce,databases:RS}=(De(),v(mt)),{getJWTRSAKeys:Lq}=(gd(),v(Lh)),ht=ace("tls");Yt.generateKeys=Dw;Yt.updateConfigCert=Yq;Yt.createCsr=gce;Yt.signCertificate=Sce;Yt.setCertTable=Pd;Yt.loadCertificates=$q;Yt.reviewSelfSignedCert=vw;Yt.createTLSSelector=zq;Yt.listCertificates=Qq;Yt.addCertificate=Ice;Yt.removeCertificate=wce;Yt.createNatsCerts=yce;Yt.generateCertsKeys=Rce;Yt.getReplicationCert=Kh;Yt.getReplicationCertAuth=_ce;Yt.renewSelfSigned=Ace;Yt.hostnamesFromCert=xw;Yt.getKey=Oce;Yt.getHostnamesFromCertificate=Cce;Yt.getPrimaryHostName=Uw;var{urlToNodeName:Hq,getThisNodeUrl:fce,getThisNodeName:bS,clearThisNodeName:mce}=(ss(),v(Go)),{readFileSync:hce,statSync:kq}=require("node:fs"),zMe=oe(),{getTicketKeys:pce,onMessageFromWorkers:Ece}=nt(),ja=Q(),{isMainThread:Gq}=require("worker_threads"),{TLSSocket:qq,createSecureContext:jMe}=require("node:tls"),Pw=3650,Vh=["127.0.0.1","localhost","::1"],Lw=[{name:"countryName",value:"USA"},{name:"stateOrProvinceName",value:"Colorado"},{name:"localityName",value:"Denver"},{name:"organizationName",value:"HarperDB, Inc."}];Ece(async e=>{e.type===Ds.ITC_EVENT_TYPES.RESTART&&(is.initSync(!0),await vw())});var Jr;function Xa(){return Jr||(Jr=dce().system.hdb_certificate,Jr||(Jr=uce({table:"hdb_certificate",database:"system",attributes:[{name:"name",isPrimaryKey:!0},{attribute:"uses"},{attribute:"certificate"},{attribute:"is_authority"},{attribute:"private_key_name"},{attribute:"details"},{attribute:"is_self_signed"},{attribute:"__updatedtime__"}]}))),Jr}o(Xa,"getCertTable");async function Kh(){let e=zq("operations-api"),t={secureContexts:null,setSecureContext:o(s=>{},"setSecureContext")};await e.initialize(t);let r=t.secureContexts.get(bS());if(!r)return;let n=new ko(r.options.cert);return r.cert_parsed=n,r.issuer=n.issuer,r}o(Kh,"getReplicationCert");async function _ce(){Xa();let e=(await Kh()).options.cert,r=new ko(e).issuer.match(/CN=(.*)/)?.[1];return Jr.get(r)}o(_ce,"getReplicationCertAuth");var Dq,Ja=new Map;function $q(){if(Dq)return;Dq=!0;let e=[{configKey:Ul.TLS},{configKey:Ul.OPERATIONSAPI_TLS}];Xa();let t=hr.dirname(Nw.getConfigFilePath()),r;for(let{configKey:n}of e){let s=Nw.getConfigFromFile(n);if(s){Array.isArray(s)||(s=[s]);for(let i of s){let a=i.privateKey,c=a&&Fq(cce(t,"keys"),a);c&&Mq(a,l=>{Ja.set(c,l)},"private key");for(let l of[!1,!0]){let u=i[l?"certificateAuthority":"certificate"];if(u&&Gq){let d;Mq(u,f=>{if(Pq.cert===f)return;let m=i.hostname??i.hostnames??i.host??i.hosts;m&&!Array.isArray(m)&&(m=[m]);let h=Wq(u),p=new ko(h),_;try{_=Uw(p)}catch(R){ht.error("error extracting host name from certificate",R);return}if(_==null){ht.error("No host name found on certificate");return}if(p.checkIssued(new ko(Pq.cert)))return;let g=Jr.primaryStore.get(_),y=kq(u).mtimeMs,T=!g||g.is_self_signed?1:g.file_timestamp??g.__updatedtime__;if(g&&y<=T){y<T&&ht.info(`Certificate ${_} at ${u} is older (${new Date(y)}) than the certificate in the database (${T>1?new Date(T):"only self signed certificate available"})`);return}r=Jr.put({name:_,uses:["https",...n.includes("operations")?["operations"]:[]],ciphers:i.ciphers,certificate:h,private_key_name:c,is_authority:l,hostnames:m,fileTimestamp:y,details:{issuer:p.issuer.replace(/\n/g," "),subject:p.subject?.replace(/\n/g," "),subject_alt_name:p.subjectAltName,serial_number:p.serialNumber,valid_from:p.validFrom,valid_to:p.validTo}})},l?"certificate authority":"certificate")}}}}}return r}o($q,"loadCertificates");function Mq(e,t,r){let n,s=o((i,a)=>{try{let c=a.mtimeMs;c&&c!==n&&(n&&Gq&&ht.warn(`Reloading ${r}:`,i),n=c,t(Wq(i)))}catch(c){ht.error(`Error loading ${r}:`,i,c)}},"loadFile");Un.existsSync(e)?s(e,kq(e)):ht.error(`${r} file not found:`,e),ice(e,{persistent:!1}).on("change",s)}o(Mq,"loadAndWatch");function ww(){let e=fce();if(e==null){let t=Vh[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return Hq(e)}o(ww,"getHost");function AS(){let e=bS();if(e==null){let t=Vh[0];return ht.info("replication url is missing from harperdb-config.yaml, using default host"+t),t}return e}o(AS,"getCommonName");async function gce(){let e=await Kh(),t=wt.certificateFromPem(e.options.cert),r=wt.privateKeyFromPem(e.options.key);ht.info("Creating CSR with cert named:",e.name);let n=wt.createCertificationRequest();n.publicKey=t.publicKey;let s=[{name:"commonName",value:AS()},...Lw];ht.info("Creating CSR with subject",s),n.setSubject(s);let i=[{name:"unstructuredName",value:"HarperDB, Inc."},{name:"extensionRequest",extensions:Vq()}];return ht.info("Creating CSR with attributes",i),n.setAttributes(i),n.sign(r),Cd.pki.certificationRequestToPem(n)}o(gce,"createCsr");function Vq(){let e=Vh.includes(AS())?Vh:[...Vh,AS()];return e.includes(ww())||e.push(ww()),[{name:"basicConstraints",cA:!1,critical:!0},{name:"keyUsage",digitalSignature:!0,keyEncipherment:!0,critical:!0},{name:"extKeyUsage",serverAuth:!0,clientAuth:!0},{name:"nsCertType",client:!0,server:!0},{name:"subjectAltName",altNames:e.map(t=>Uq.isIP(t)?{type:7,ip:t}:{type:2,value:t})}]}o(Vq,"certExtensions");async function Sce(e){let t={},r=hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME);if(e.csr){let n,s;Xa();for await(let d of Jr.search([]))if(d.is_authority&&!d.details.issuer.includes("HarperDB-Certificate-Authority")){if(Ja.has(d.private_key_name)){n=Ja.get(d.private_key_name),s=d;break}else if(d.private_key_name&&await Un.exists(hr.join(r,d.private_key_name))){n=Un.readFile(hr.join(r,d.private_key_name)),s=d;break}}if(!n){let d=await Ow();s=d.ca,n=d.private_key}n=wt.privateKeyFromPem(n),t.signingCA=s.certificate;let i=wt.certificateFromPem(s.certificate);ht.info("Signing CSR with cert named",s.name);let a=wt.certificationRequestFromPem(e.csr);try{a.verify()}catch(d){return ht.error(d),new Error("Error verifying CSR: "+d.message)}let c=Cd.pki.createCertificate();c.serialNumber="0"+Math.random().toString().slice(2,9),c.validity.notBefore=new Date;let l=new Date;c.validity.notAfter=l,c.validity.notAfter.setDate(l.getDate()+Pw),ht.info("sign cert setting validity:",c.validity),ht.info("sign cert setting subject from CSR:",a.subject.attributes),c.setSubject(a.subject.attributes),ht.info("sign cert setting issuer:",i.subject.attributes),c.setIssuer(i.subject.attributes);let u=a.getAttribute({name:"extensionRequest"}).extensions;ht.info("sign cert adding extensions from CSR:",u),c.setExtensions(u),c.publicKey=a.publicKey,c.sign(n,Cd.md.sha256.create()),t.certificate=wt.certificateToPem(c)}else ht.info("Sign cert did not receive a CSR from:",e.url,"only the CA will be returned");return t}o(Sce,"signCertificate");async function Tce(e,t){await Pd({name:bS(),uses:["https","wss"],certificate:e,private_key_name:"privateKey.pem",is_authority:!1,is_self_signed:!0}),await Pd({name:t.subject.getField("CN").value,uses:["https","wss"],certificate:wt.certificateToPem(t),private_key_name:"privateKey.pem",is_authority:!0,is_self_signed:!0})}o(Tce,"createCertificateTable");async function Pd(e){let t=new ko(e.certificate);e.details={issuer:t.issuer.replace(/\n/g," "),subject:t.subject?.replace(/\n/g," "),subject_alt_name:t.subjectAltName,serial_number:t.serialNumber,valid_from:t.validFrom,valid_to:t.validTo},Xa(),await Jr.patch(e)}o(Pd,"setCertTable");async function Dw(){let e=await Iw("rsa",{modulusLength:4096,publicKeyEncoding:{type:"spki",format:"pem"},privateKeyEncoding:{type:"pkcs8",format:"pem"}});return{publicKey:wt.publicKeyFromPem(e.publicKey),privateKey:wt.privateKeyFromPem(e.privateKey)}}o(Dw,"generateKeys");async function Mw(e,t,r){let n=wt.createCertificate();if(!t){let a=await Kh();t=wt.certificateFromPem(a.options.cert).publicKey}n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Pw);let i=[{name:"commonName",value:AS()},...Lw];return n.setSubject(i),n.setIssuer(r.subject.attributes),n.setExtensions(Vq()),n.sign(e,Cd.md.sha256.create()),wt.certificateToPem(n)}o(Mw,"generateCertificates");async function Ow(){let e=await Qq(),t;for(let r of e){if(!r.is_authority)continue;let n=await jq(r.private_key_name);if(r.private_key_name&&n&&new ko(r.certificate).checkPrivateKey(xq(n))){ht.trace(`CA named: ${r.name} found with matching private key`),t={ca:r,private_key:n};break}}if(t)return t;ht.trace("No CA found with matching private key")}o(Ow,"getCertAuthority");async function Kq(e,t,r=!0){let n=wt.createCertificate();n.publicKey=t,n.serialNumber="0"+Math.random().toString().slice(2,9),n.validity.notBefore=new Date;let s=new Date;n.validity.notAfter=s,n.validity.notAfter.setDate(s.getDate()+Pw);let i=[{name:"commonName",value:`HarperDB-Certificate-Authority-${is.get(Ul.REPLICATION_HOSTNAME)??Hq(is.get(Ul.REPLICATION_URL))??Bq().split("-")[0]}`},...Lw];n.setSubject(i),n.setIssuer(i),n.setExtensions([{name:"basicConstraints",cA:!0,critical:!0},{name:"keyUsage",keyCertSign:!0,critical:!0}]),n.sign(e,Cd.md.sha256.create());let a=hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),c=hr.join(a,_i.PRIVATEKEY_PEM_NAME);return r&&await Un.writeFile(c,wt.privateKeyToPem(e)),n}o(Kq,"generateCertAuthority");async function Rce(){let{privateKey:e,publicKey:t}=await Dw(),r=await Kq(e,t),n=await Mw(e,t,r);await Tce(n,r),Yq()}o(Rce,"generateCertsKeys");async function yce(){let e=await Mw(wt.privateKeyFromPem(_i.CERTIFICATE_VALUES.key),void 0,wt.certificateFromPem(_i.CERTIFICATE_VALUES.cert)),t=hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME);await Un.exists(r)||await Un.writeFile(r,e);let n=hr.join(t,_i.NATS_CA_PEM_NAME);await Un.exists(n)||await Un.writeFile(n,_i.CERTIFICATE_VALUES.cert)}o(yce,"createNatsCerts");async function Ace(){Xa();for await(let e of Jr.search([{attribute:"is_self_signed",value:!0}]))await Jr.delete(e.name);await vw()}o(Ace,"renewSelfSigned");async function vw(){mce(),await $q(),Xa();let e=await Ow();if(!e){ht.notify("A matching Certificate Authority and key was not found. A new CA will be created in advance, so it's available if needed.");let r=o(u=>{try{return{key:wt.privateKeyFromPem(Un.readFileSync(u)),keyPath:u}}catch(d){return ht.warn(`Failed to parse private key from ${u}:`,d.message),{key:null,keyPath:u}}},"tryToParseKey"),n=is.get(Ul.TLS),s,i;if(Array.isArray(n)){for(let u of n)if(u.privateKey){let d=r(u.privateKey);if(s=d.key,i=d.keyPath,d.key)break}}else{let u=is.get(Ul.TLS_PRIVATEKEY),d=r(u);s=d.key,i=d.keyPath}let a=hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),c=Fq(a,i);s||(ht.warn("Unable to parse the TLS key",i,"A new key will be generated and used to create Certificate Authority"),{privateKey:s}=await Dw(),Un.existsSync(hr.join(a,_i.PRIVATEKEY_PEM_NAME))&&(c=`privateKey${Bq().split("-")[0]}.pem`),await Un.writeFile(hr.join(a,c),wt.privateKeyToPem(s)));let l=await Kq(s,wt.setRsaPublicKey(s.n,s.e),!1);await Pd({name:l.subject.getField("CN").value,uses:["https"],certificate:wt.certificateToPem(l),private_key_name:c,is_authority:!0,is_self_signed:!0})}if(!await Kh()){let r=bS();ht.notify(`A suitable replication certificate was not found, creating new self singed cert named: ${r}`),e=e??await Ow();let n=wt.certificateFromPem(e.ca.certificate),s=n.publicKey,i=await Mw(wt.privateKeyFromPem(e.private_key),s,n);await Pd({name:r,uses:["https","operations","wss"],certificate:i,is_authority:!1,private_key_name:e.ca.private_key_name,is_self_signed:!0})}}o(vw,"reviewSelfSignedCert");function Yq(){let e=lce(Object.keys(Ds.CONFIG_PARAM_MAP),!0),t=hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME),r=hr.join(t,_i.PRIVATEKEY_PEM_NAME),n=hr.join(t,_i.NATS_CERTIFICATE_PEM_NAME),s=hr.join(t,_i.NATS_CA_PEM_NAME),i=Ds.CONFIG_PARAMS,a={[i.TLS_PRIVATEKEY]:e[i.TLS_PRIVATEKEY.toLowerCase()]?e[i.TLS_PRIVATEKEY.toLowerCase()]:r};e[i.TLS_CERTIFICATE.toLowerCase()]&&(a[i.TLS_CERTIFICATE]=e[i.TLS_CERTIFICATE.toLowerCase()]),e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.TLS_CERTIFICATEAUTHORITY]=e[i.TLS_CERTIFICATEAUTHORITY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATE]=e[i.OPERATIONSAPI_TLS_CERTIFICATE.toLowerCase()]),e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_PRIVATEKEY]=e[i.OPERATIONSAPI_TLS_PRIVATEKEY.toLowerCase()]),e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]&&(a[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY]=e[i.OPERATIONSAPI_TLS_CERTIFICATEAUTHORITY.toLowerCase()]),(e[i.CLUSTERING_ENABLED.toLowerCase()]||e.clustering)&&(a[i.CLUSTERING_TLS_CERTIFICATE]=e[i.CLUSTERING_TLS_CERTIFICATE.toLowerCase()]??n,a[i.CLUSTERING_TLS_CERT_AUTH]=e[i.CLUSTERING_TLS_CERT_AUTH.toLowerCase()]??s,a[i.CLUSTERING_TLS_PRIVATEKEY]=e[i.CLUSTERING_TLS_PRIVATEKEY.toLowerCase()]??r),Nw.updateConfigValue(void 0,void 0,a,!1,!0)}o(Yq,"updateConfigCert");function Wq(e){return e.startsWith("-----BEGIN")?e:hce(e,"utf8")}o(Wq,"readPEM");var vq=yS.createSecureContext;yS.createSecureContext=function(e){if(!e.cert||!e.key)return vq(e);let t={...e};delete t.key,delete t.cert;let r=vq(t);return r.context.setCert(e.cert),r.context.setKey(e.key,void 0),r};var bce=qq.prototype._init;qq.prototype._init=function(e,t){bce.call(this,e,t);let r=this;this._handle.oncertcb=function(n){let s=n.servername;r._SNICallback(s,(i,a)=>{this.sni_context=a?.context||a,this.certCbDone()})}};var vl=new Map;function zq(e,t){let r=new Map,n,s=!1;return i.initialize=a=>i.ready?i.ready:(a&&(a.secureContexts=r,a.secureContextsListeners=[]),i.ready=new Promise((c,l)=>{async function u(){try{r.clear(),vl.clear();let d=0;if(RS===void 0){c();return}for await(let f of RS.system.hdb_certificate.search([])){let m=f.certificate,h=new ko(m);f.is_authority&&(h.asString=m,vl.set(h.subject,m))}for await(let f of RS.system.hdb_certificate.search([]))try{if(f.is_authority)continue;let m=e==="operations-api",h=f.is_self_signed?1:2;m&&f.uses?.includes?.("operations")&&(h+=1);let p=await jq(f.private_key_name),_=f.certificate,g=new ko(_);if(vl.has(g.issuer)&&(_+=`
17
17
  `+vl.get(g.issuer)),!p||!_)throw new Error("Missing private key or certificate for secure server");let y={ciphers:f.ciphers,ticketKeys:pce(),availableCAs:vl,ca:t&&Array.from(vl.values()),cert:_,key:p,key_file:f.private_key_name,is_self_signed:f.is_self_signed};a&&(y.sessionIdContext=a.sessionIdContext);let T=yS.createSecureContext(y);T.name=f.name,T.options=y,T.quality=h,T.certificateAuthorities=Array.from(vl),T.certStart=_.toString().slice(0,100);let R=f.hostnames??xw(g);Array.isArray(R)||(R=[R]);let N;for(let O of R)if(O){O[0]==="*"&&(s=!0,O=O.slice(1)),O===ww()&&(h+=2),Uq.isIP(O)&&(N=!0);let F=r.get(O)?.quality??0;h>F&&r.set(O,T)}else ja.error("No hostname found for certificate at",yS.certificate);ja.trace("Adding TLS",T.name,"for",a.ports||"client","cert named",f.name,"hostnames",R,"quality",h,"best quality",d),h>d&&(i.defaultContext=n=T,d=h,a&&(a.defaultContext=T))}catch(m){ja.error("Error applying TLS for",f.name,m)}a?.secureContextsListeners.forEach(f=>f()),c(n)}catch(d){l(d)}}o(u,"updateTLS"),RS?.system.hdb_certificate.subscribe({listener:o(()=>setTimeout(()=>u(),1500).unref(),"listener"),omitCurrent:!0}),u()})),i;function i(a,c){ja.info("TLS requested for",a||"(no SNI)");let l=a;for(;;){let d=r.get(l);if(d)return ja.debug("Found certificate for",a,d.certStart),d.updatedContext&&(d=d.updatedContext),c(null,d);if(s&&l){let f=l.indexOf(".",1);f<0?l="":l=l.slice(f)}else break}a?ja.debug("No certificate found to match",a,"using the default certificate"):ja.debug("No SNI, using the default certificate",n?.name);let u=n;u?u.updatedContext&&(u=u.updatedContext):ja.info("No default certificate found"),c(null,u)}o(i,"SNICallback")}o(zq,"createTLSSelector");async function jq(e){let t=Ja.get(e);return!t&&e?await Un.readFile(hr.join(is.get(Ul.ROOTPATH),Ds.LICENSE_KEY_DIR_NAME,e),"utf8"):t}o(jq,"getPrivateKeyByName");async function Qq(){Xa();let e=[];for await(let t of Jr.search([]))e.push(t);return e}o(Qq,"listCertificates");async function Ice(e){let t=Cw(e,Ei.object({name:Ei.string().required(),certificate:Ei.string().required(),is_authority:Ei.boolean().required(),private_key:Ei.string(),hosts:Ei.array(),uses:Ei.array()}));if(t)throw new Qa(t.message);let{name:r,certificate:n,private_key:s,is_authority:i}=e,a=new ko(n),c=!1,l=!1,u;for(let[h,p]of Ja)!s&&!c&&a.checkPrivateKey(xq(p))&&(c=!0,u=h),s&&s===p&&(l=!0,u=h);if(!i&&!s&&!c)throw new Qa("A suitable private key was not found for this certificate");let d;if(!r){try{d=Uw(a)}catch(h){ht.error(h)}if(d==null)throw new Qa("Error extracting certificate host name, please provide a name parameter")}let f=Nce(r??d);s&&!c&&!l&&(await Un.writeFile(hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME,f+".pem"),s),Ja.set(f,s));let m={name:r??d,certificate:n,is_authority:i,hosts:e.hosts,uses:e.uses};return(!i||i&&u||i&&s)&&(m.private_key_name=u??f+".pem"),e.ciphers&&(m.ciphers=e.ciphers),await Pd(m),"Successfully added certificate: "+f}o(Ice,"addCertificate");function Nce(e){return e.replace(/[^a-z0-9\.]/gi,"-")}o(Nce,"sanitizeName");async function wce(e){let t=Cw(e,Ei.object({name:Ei.string().required()}));if(t)throw new Qa(t.message);let{name:r}=e;Xa();let n=await Jr.get(r);if(!n)throw new Qa(r+" not found");let{private_key_name:s}=n;if(s){let i=Array.from(await Jr.search([{attribute:"private_key_name",value:s}]));i.length===1&&i[0].name===r&&(ht.info("Removing private key named",s),await Un.remove(hr.join(is.getHdbBasePath(),Ds.LICENSE_KEY_DIR_NAME,s)))}return await Jr.delete(r),"Successfully removed "+r}o(wce,"removeCertificate");function Uw(e){let t=e.subject?.match(/CN=(.*)/)?.[1];return t||xw(e)[0]}o(Uw,"getPrimaryHostName");function xw(e){if(e.subjectAltName)return e.subjectAltName.split(",").map(r=>{let n=r.indexOf(":");if(r=r.slice(n+1),r=r.trim(),r[0]==='"')try{r=JSON.parse(r)}catch{}return r.indexOf("=")>-1?r.match(/CN=([^,]*)/)?.[1]:r}).filter(r=>r);let t=certObj.subject?.match(/CN=(.*)/)?.[1];return t?[t]:[]}o(xw,"hostnamesFromCert");async function Oce(e){if(e.bypass_auth!==!0)throw new Qa("Unauthorized","401");let t=Cw(e,Ei.object({name:Ei.string().required()}));if(t)throw new Qa(t.message);let{name:r}=e;if(r===".jwtPrivate")return(await Lq()).privateKey;if(r===".jwtPublic")return(await Lq()).publicKey;if(Ja.get(r))return Ja.get(e.name);throw new Qa("Key not found")}o(Oce,"getKey");function Cce(e){return[e.subject?.CN,...e.subjectaltname.split(",").filter(t=>t.trim().startsWith("DNS:")).map(t=>t.trim().substring(4))]}o(Cce,"getHostnamesFromCertificate")});var S$={};Oe(S$,{CONFIRMATION_STATUS_POSITION:()=>E$,LATENCY_POSITION:()=>LS,NodeReplicationConnection:()=>Md,OPERATION_REQUEST:()=>kw,RECEIVED_TIME_POSITION:()=>qw,RECEIVED_VERSION_POSITION:()=>Gw,RECEIVING_STATUS_POSITION:()=>$w,RECEIVING_STATUS_RECEIVING:()=>g$,RECEIVING_STATUS_WAITING:()=>_$,SENDING_TIME_POSITION:()=>Yh,createWebSocket:()=>DS,databaseSubscriptions:()=>ec,replicateOverWS:()=>Wh,tableUpdateListeners:()=>Kw});async function DS(e,t){let{authorization:r,rejectUnauthorized:n}=t||{},s=Ze(),i;if(e==null)throw new TypeError(`Invalid URL: Expected a string URL for node "${s}" but received ${e}`);if(e.includes("wss://")){if(!Fw){let l=(0,d$.createTLSSelector)("operations-api"),u={secureContexts:null};await l.initialize(u),Fw=u.secureContexts}if(i=Fw.get(s),i&&ae.debug?.("Creating web socket for URL",e,"with certificate named:",i.name),!i&&n!==!1)throw new Error("Unable to find a valid certificate to use for replication to connect to "+e)}let a={};r&&(a.Authorization=r);let c={headers:a,localAddress:s?.startsWith("127.0")?s:void 0,servername:(0,m$.isIP)(t?.serverName)?void 0:t?.serverName,noDelay:!0,highWaterMark:128*1024,rejectUnauthorized:n!==!1,secureContext:void 0};return i&&(PS?.caCount!==qo.size&&(PS=f$.createSecureContext({...i.options,ca:[...qo,...i.options.availableCAs.values()]}),PS.caCount=qo.size),c.secureContext=PS),new l$.WebSocket(e,"harperdb-replication-v1",c)}function Wh(e,t,r){let n=t.port||t.securePort,s=xl.pid%1e3+"-"+u$.threadId+(n?"s:"+n:"c:"+t.url?.slice(-4))+" "+Math.random().toString().slice(2,3);ae.debug?.(s,"Initializing replication connection",r);let i=0,a=Buffer.allocUnsafeSlow(1024),c=0,l=new DataView(a.buffer,0,1024),u=t.database,d=t.databaseSubscriptions||ec,f,m,h=!1,p=t.subscription;p?.then&&p.then(E=>{p=E,p.auditStore&&(f=p.auditStore)});let _=t.tables||u&&st()[u],g;if(!r){ae.error?.(s,"No authorization provided"),Ss(1008,"Unauthorized");return}let y=new Map,T=[];g=r.name,g&&t.connection&&(t.connection.nodeName=g);let R,N,O,F,Z,G,Y,q=6e4,K,ce=0,le=0,se=0,pe=c$.default.get(U.REPLICATION_BLOBTIMEOUT)??12e4,Ne=new Map,Ue=[],xe=0,Rr;if(t.url){let E=o(()=>{Z&&le===e._socket?.bytesRead&&se===e._socket?.bytesWritten?e.terminate():(Z=performance.now(),e.ping(),le=e._socket?.bytesRead,se=e._socket?.bytesWritten)},"sendPing");O=setInterval(E,o$).unref(),E()}else Jt();e._socket?.setMaxListeners(200);function Jt(){clearTimeout(F),le=e._socket?.bytesRead,se=e._socket?.bytesWritten,F=setTimeout(()=>{le===e._socket?.bytesRead&&se===e._socket?.bytesWritten&&(ae.warn?.(`Timeout waiting for ping from ${g}, terminating connection and reconnecting`),e.terminate())},o$*2).unref()}o(Jt,"resetPingTimer");function kt(){if(!(!g||!u))return m||(m=Id(f,u,g)),m}o(kt,"getSharedStatus"),u&&Sa(u);let Xt,Uf,Pc=[],Gt=[],xf,Bf=[],wE=[],OE=[],vy=150,Ff=25,Pe=0,CE=0,Hf=!1,_o,Lr,yr,kf;e.on("message",E=>{ce=performance.now();try{let S=E.dataView=new Wc(E.buffer,E.byteOffset,E.byteLength);if(E[0]>127){let P=(0,et.decode)(E),[L,D,H]=P;switch(L){case Xq:{if(D){if(g){if(g!==D){ae.error?.(s,`Node name mismatch, expecting to connect to ${g}, but peer reported name as ${D}, disconnecting`),e.send((0,et.encode)([Ld])),Ss(1008,"Node name mismatch");return}}else if(g=D,t.connection?.tentativeNode){let B=t.connection.tentativeNode;B.name=g,t.connection.tentativeNode=null,Ho(g,B)}if(t.connection&&(t.connection.nodeName=g),ae.debug?.(s,"received node name:",g,"db:",u??P[2]),!u)try{Sa(u=P[2]),u==="system"&&(Xt=Wa(t,(B,de)=>{pu(de)&&Ta(de)}),e.on("close",()=>{Xt?.remove()}))}catch(B){ae.warn?.(s,"Error setting database",B),e.send((0,et.encode)([Ld])),Ss(1008,B.message);return}Dr()}break}case s$:{ae.debug?.(s,"Received table definitions for",D.map(B=>B.table));for(let B of D){let de=P[2];B.database=de;let me;pu(de)&&(de==="system"?ke[de]?.[B.table]||(me=V(B,ke[de]?.[B.table])):me=V(B,ke[de]?.[B.table]),f||(f=me?.auditStore),_||(_=st()?.[de]))}break}case Ld:Ss();break;case kw:try{let B=r?.replicates||r?.subscribers||r?.name;ae.debug?.("Received operation request",D,"from",g),server.operation(D,{user:r},!B).then(de=>{Array.isArray(de)&&(de={results:de}),de.requestId=D.requestId,e.send((0,et.encode)([NS,de]))},de=>{e.send((0,et.encode)([NS,{requestId:D.requestId,error:(0,Dd.errorToString)(de)}]))})}catch(B){e.send((0,et.encode)([NS,{requestId:D.requestId,error:(0,Dd.errorToString)(B)}]))}break;case NS:let{resolve:C,reject:x}=y.get(D.requestId);D.error?x(new Error(D.error)):C(D),y.delete(D.requestId);break;case Bw:let z=P[3];if(!_){u?ae.error?.(s,"No database found for",u):ae.error?.(s,"Database name never received"),Ss();return}let ne=_[z];ne=V({table:z,database:u,attributes:D.attributes,schemaDefined:D.schemaDefined},ne),Pc[H]={name:z,decoder:new et.Packr({useBigIntExtension:!0,randomAccessStructure:!0,freezeData:!0,typedStructs:D.typedStructs,structures:D.structures}),getEntry(B){return ne.primaryStore.getEntry(B)},rootStore:ne.primaryStore.rootStore};break;case Zq:kf=f?Rq(D,f):new Map,xf=P[2],ae.debug?.(s,`Acknowledged subscription request, receiving messages for nodes: ${xf}`);break;case e$:let re=H;OE[re]=D;break;case n$:kt()[E$]=D,ae.trace?.(s,"received and broadcasting committed update",D),kt().buffer.notify();break;case r$:R=D,p.send({type:"end_txn",localTime:R,remoteNodeIds:T});break;case wS:{let B=P[1],{fileId:de,size:me,finished:Se,error:ee}=B,j=Ne.get(de);ae.debug?.("Received blob",de,"has stream",!!j,"connectedToBlob",!!j?.connectedToBlob,"length",P[2].length,"finished",Se),j||(j=new Hw.PassThrough,j.expectedSize=me,Ne.set(de,j)),j.lastChunk=Date.now();let he=P[2];it(he.byteLength,"bytes-received",`${g}.${u}`,"replication","blob");try{Se?(ee?(j.on("error",()=>{}),j.destroy(new Error("Blob error: "+ee+" for record "+(j.recordId??"unknown")+" from "+g))):j.end(he),j.connectedToBlob&&Ne.delete(de)):j.write(he)}catch(Te){ae.error?.(`Error receiving blob for ${j.recordId} from ${g} and streaming to storage`,Te),Ne.delete(de)}break}case t$:{let B=D,de;try{let me=P[3],Se=Gt[H]||(Gt[H]=_[P[4]]);if(!Se)return ae.warn?.("Unknown table id trying to handle record request",H);let ee=Se.primaryStore.getBinaryFast(Symbol.for("structures")),j=ee?.length??0;if(j>0&&j!==CE){CE=j;let Te=(0,et.decode)(ee);e.send((0,et.encode)([Bw,{typedStructs:Te.typed,structures:Te.named},H,Se.tableName]))}let he=Se.primaryStore.getBinaryFast(me);if(he){let Te=Se.primaryStore.decoder.decode(he,{valueAsBuffer:!0}),fe=ut||{};fe.version=(0,h$.getLastVersion)(),ut&&ut[Pu]&Vr&&(Te=Buffer.from(Te),gm(()=>Se.primaryStore.decoder.decode(he),We=>ga(We,me),Se.primaryStore.rootStore)),de=(0,et.encode)([IS,B,{value:Te,expiresAt:fe.expiresAt,version:fe.version,residencyId:fe.residencyId,nodeId:fe.nodeId,user:fe.user}])}else de=(0,et.encode)([IS,B])}catch(me){de=(0,et.encode)([IS,B,{error:me.message}])}e.send(de);break}case IS:{let{resolve:B,reject:de,tableId:me,key:Se}=y.get(P[1]),ee=P[2];if(ee?.error)de(new Error(ee.error));else if(ee){let j;f_(()=>{let he=Pc[me].decoder.decode(ee.value);ee.value=he,ee.key=Se,B(ee)||j&&setTimeout(()=>j.forEach(l_),6e4).unref()},f?.rootStore,he=>{let Te=Lc(he,Se);return j||(j=[]),j.push(Te),Te})}else B();y.delete(P[1]);break}case Jq:{yr=D;let B,de,me=!1;if(p){if(u!==p.databaseName&&!p.then){ae.error?.("Subscription request for wrong database",u,p.databaseName);return}}else p=d.get(u);if(ae.debug?.(s,"received subscription request for",u,"at",yr),!p){let Ee;p=new Promise(tt=>{ae.debug?.("Waiting for subscription to database "+u),Ee=tt}),p.ready=Ee,ec.set(u,p)}if(r.name)de=Kt().subscribe(r.name),de.then(async Ee=>{B=Ee;for await(let tt of B){let rt=tt.value;if(!(rt?.replicates===!0||rt?.replicates?.receives||rt?.subscriptions?.some(cr=>(cr.database||cr.schema)===u&&cr.publish!==!1))){me=!0,e.send((0,et.encode)([Ld])),Ss(1008,`Unauthorized database subscription to ${u}`);return}}},Ee=>{ae.error?.(s,"Error subscribing to HDB nodes",Ee)});else if(!(r?.role?.permission?.super_user||r.replicates)){e.send((0,et.encode)([Ld])),Ss(1008,`Unauthorized database subscription to ${u}`);return}if(Lr&&(ae.debug?.(s,"stopping previous subscription",u),Lr.emit("close")),yr.length===0)return;let Se=yr[0],ee=o(Ee=>{if(Ee&&(Se.replicateByDefault?!Se.tables.includes(Ee.tableName):Se.tables.includes(Ee.tableName)))return{table:Ee}},"tableToTableEntry"),j={txnTime:0},he,Te,fe=1/0,We,Et=o((Ee,tt)=>{if(Ee.type==="end_txn"){j.txnTime&&(a[i]!==66&&ae.error?.("Invalid encoding of message"),Eu(9),Eu(A_),Mc(We=tt),dt()),i=c,j.txnTime=0;return}let rt=Ee.nodeId,cr=Ee.tableId,Mt=Te[cr];if(!Mt&&(Mt=Te[cr]=ee(p.tableById[cr]),!Mt))return ae.debug?.("Not subscribed to table",cr);let Ts=Mt.table,vt=Ts.primaryStore,Js=vt.encoder;(Ee.extendedType&O_||!Js.typedStructs)&&(Js._mergeStructures(Js.getStructures()),Js.typedStructs&&(Js.lastTypedStructuresLength=Js.typedStructs.length));let _u=he[rt];if(!(_u&&_u.startTime<tt&&(!_u.endTime||_u.endTime>tt)))return CS&&ae.trace?.(s,"skipping replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he),eM();CS&&ae.trace?.(s,"sending replication update",Ee.recordId,"to:",g,"from:",rt,"subscribed:",he);let Uy=Ee.version;j.txnTime!==Uy&&(j.txnTime&&(CS&&ae.trace?.(s,"new txn time, sending queued txn",j.txnTime),a[i]!==66&&ae.error?.("Invalid encoding of message"),dt()),j.txnTime=Uy,i=c,Mc(Uy));let vc=Ee.residencyId,xy=hu(vc,Ts),LE;if(xy&&!xy.includes(g)){let Xs=hu(Ee.previousResidencyId,Ts);if(Xs&&!Xs.includes(g)&&(Ee.type==="put"||Ee.type==="patch")||Ts.getResidencyById)return eM();let qf=Ee.recordId;ae.trace?.(s,"sending invalidation",qf,g,"from",rt);let $f=0;vc&&($f|=zc),Ee.previousResidencyId&&($f|=jc);let Hy,DE=null;for(let tM in Ts.indices){if(!DE){if(Hy=Ee.getValue(vt,!0),!Hy)break;DE={}}DE[tM]=Hy[tM]}LE=Qc(Ee.version,cr,qf,null,rt,Ee.user,Ee.type==="put"||Ee.type==="patch"?"invalidate":Ee.type,Js.encode(DE),$f,vc,Ee.previousResidencyId,Ee.expiresAt)}function eM(){return ae.trace?.(s,"skipping audit record",Ee.recordId),G||(G=setTimeout(()=>{G=null,(We||0)+i$/2<fe&&(CS&&ae.trace?.(s,"sending skipped sequence update",fe),e.send((0,et.encode)([r$,fe])))},i$).unref()),new Promise(setImmediate)}o(eM,"skipAuditRecord");let By=Js.typedStructs,Fy=Js.structures;if((By?.length!=Mt.typed_length||Fy?.length!=Mt.structure_length)&&(Mt.typed_length=By?.length,Mt.structure_length=Fy.length,ae.debug?.(s,"send table struct",Mt.typed_length,Mt.structure_length),Mt.sentName||(Mt.sentName=!0),e.send((0,et.encode)([Bw,{typedStructs:By,structures:Fy,attributes:Ts.attributes,schemaDefined:Ts.schemaDefined},cr,Mt.table.tableName]))),vc&&!wE[vc]&&(e.send((0,et.encode)([e$,xy,vc])),wE[vc]=!0),LE)Eu(LE.length),Dc(LE);else{let Xs=Ee.encoded;Ee.extendedType&Vr&&gm(()=>Ee.getValue(vt),$f=>ga($f,Ee.recordId),vt.rootStore);let qf=Xs[0]===66?8:0;Eu(Xs.length-qf),Dc(Xs,qf),ae.trace?.("wrote record",Ee.recordId,"length:",Xs.length)}return e._socket.writableNeedDrain?new Promise(Xs=>{ae.debug?.(`Waiting for remote node ${g} to allow more commits ${e._socket.writableNeedDrain?"due to network backlog":"due to requested flow directive"}`),e._socket.once("drain",Xs)}):xe>Ff?new Promise(Xs=>{Rr=Xs}):new Promise(setImmediate)},"sendAuditRecord"),dt=o(()=>{c-i>8?(e.send(a.subarray(i,c)),ae.debug?.(s,"Sent message, size:",c-i),it(c-i,"bytes-sent",`${g}.${u}`,"replication","egress")):ae.debug?.(s,"skipping empty transaction")},"sendQueuedData");Lr=new Vw.EventEmitter,Lr.once("close",()=>{me=!0,B?.end()});for(let{startTime:Ee}of yr)Ee<fe&&(fe=Ee);(de||Promise.resolve()).then(async()=>{p=await p,f=p.auditStore,Te=p.tableById.map(ee),he=[];for(let{name:tt,startTime:rt,endTime:cr}of yr){let Mt=_S(tt,f);ae.debug?.("subscription to",tt,"using local id",Mt,"starting",rt),he[Mt]={startTime:rt,endTime:cr}}Ta(u),Xt||(Xt=Nl(tt=>{tt.databaseName===u&&Ta(u)}),Uf=Ah(tt=>{tt===u&&(e.send((0,et.encode)([Ld])),Ss())}),e.on("close",()=>{Xt?.remove(),Uf?.remove()})),e.send((0,et.encode)([Zq,Fh(p.auditStore),yr.map(({name:tt})=>tt)]));let Ee=!0;do{isFinite(fe)||(ae.warn?.("Invalid sequence id "+fe),Ss(1008,"Invalid sequence id"+fe));let tt;if(Ee&&!me&&(Ee=!1,fe===0)){ae.info?.("Replicating all tables to",g);let rt=fe,cr=MS(f);for(let Mt in _){if(!ee(Mt))continue;let Ts=_[Mt];for(let vt of Ts.primaryStore.getRange({snapshot:!1,versions:!0})){if(me)return;if(vt.localTime>=fe){ae.trace?.(s,"Copying record from",u,Mt,vt.key,vt.localTime),rt=Math.max(vt.localTime,rt),tt=!0,kt()[Yh]=1;let Js=Qc(vt.version,Ts.tableId,vt.key,null,cr,null,"put",gm(()=>Ts.primaryStore.encoder.encode(vt.value),_u=>ga(_u,vt.key)),vt.metadataFlags&-256,vt.residencyId,null,vt.expiresAt);await Et({recordId:vt.key,tableId:Ts.tableId,type:"put",getValue(){return vt.value},encoded:Js,version:vt.version,residencyId:vt.residencyId,nodeId:cr,extendedType:vt.metadataFlags},vt.localTime)}}}tt&&Et({type:"end_txn"},fe),kt()[Yh]=0,fe=rt}for(let{key:rt,value:cr}of f.getRange({start:fe||1,exclusiveStart:!0,snapshot:!1})){if(me)return;let Mt=bt(cr);ae.debug?.("sending audit record",new Date(rt)),kt()[Yh]=rt,fe=rt,await Et(Mt,rt),Lr.startTime=rt,tt=!0}tt&&Et({type:"end_txn"},fe),kt()[Yh]=0,await kU(f)}while(!me)}).catch(Ee=>{ae.error?.(s,"Error handling subscription to node",Ee),Ss(1008,"Error handling subscription to node")});break}}return}S.position=8;let A=!0,b,I;do{kt();let P=S.readInt();if(P===9&&S.getUint8(S.position)==A_){S.position++,R=I=S.readFloat64(),m[Gw]=R,m[qw]=Date.now(),m[$w]=_$,ae.trace?.("received remote sequence update",R,u);break}let L=S.position,D=bt(E,L,L+P),H=Pc[D.tableId];H||ae.error?.(`No table found with an id of ${D.tableId}`);let C;D.residencyId&&(C=OE[D.residencyId],ae.trace?.(s,"received residency list",C,D.type,D.recordId));try{let x=D.recordId;f_(()=>{b={table:H.name,id:D.recordId,type:D.type,nodeId:kf.get(D.nodeId),residencyList:C,timestamp:D.version,value:D.getValue(H),user:D.user,beginTxn:A,expiresAt:D.expiresAt}},f?.rootStore,z=>Lc(z,x))}catch(x){throw x.message+="typed structures for current decoder"+JSON.stringify(H.decoder.typedStructs),x}A=!1,ae.trace?.(s,"received replication message",D.type,"id",b.id,"version",new Date(D.version),"nodeId",b.nodeId),m[Gw]=D.version,m[qw]=Date.now(),m[$w]=g$,p.send(b),S.position=L+P}while(S.position<E.byteLength);Pe++,it(E.byteLength,"bytes-received",`${g}.${u}.${b?.table||"unknown_table"}`,"replication","ingest"),Pe>vy&&!Hf&&(Hf=!0,e.pause(),ae.debug?.(`Commit backlog causing replication back-pressure, requesting that ${g} pause replication`)),p.send({type:"end_txn",localTime:R,remoteNodeIds:T,async onCommit(){if(b){let P=Date.now()-b.timestamp;it(P,"replication-latency",g+"."+u+"."+b.table,b.type,"ingest")}Pe--,Hf&&(Hf=!1,e.resume(),ae.debug?.(`Replication resuming ${g}`)),Ue.length>0&&await Promise.all(Ue),ae.trace?.("All blobs finished"),!N&&I&&(ae.trace?.(s,"queuing confirmation of a commit at",I),setTimeout(()=>{e.send((0,et.encode)([n$,N])),ae.trace?.(s,"sent confirmation of a commit at",N),N=null},Lce)),N=I,ae.debug?.("last sequence committed",new Date(I),u)}})}catch(S){ae.error?.(s,"Error handling incoming replication message",S)}}),e.on("ping",Jt),e.on("pong",()=>{if(t.connection){let E=performance.now()-Z;t.connection.latency=E,kt()&&(m[LS]=E),t.isSubscriptionConnection&&Ml({name:g,database:u,url:t.url,latency:E})}Z=null}),e.on("close",(E,S)=>{clearInterval(O),clearTimeout(F),clearInterval(Y),Lr&&Lr.emit("close"),_o&&_o.end();for(let[A,{reject:b}]of y)b(new Error(`Connection closed ${S?.toString()} ${E}`));ae.debug?.(s,"closed",E,S?.toString())});function Ss(E,S){try{e.isFinished=!0,ae.debug?.(s,"closing",g,u,E,S),e.close(E,S),t.connection?.emit("finished")}catch(A){ae.error?.(s,"Error closing connection",A)}}o(Ss,"close");let _a=new Set;async function ga(E,S){let A=u_(E);if(_a.has(A)){ae.debug?.("Blob already being sent",A);return}_a.add(A);try{let b;xe++;for await(let I of E.stream())b&&(ae.debug?.("Sending blob chunk",A,"length",b.length),e.send((0,et.encode)([wS,{fileId:A,size:E.size},b]))),b=I,e._socket.writableNeedDrain&&(ae.debug?.("draining",A),await new Promise(P=>e._socket.once("drain",P)),ae.debug?.("drained",A)),it(I.length,"bytes-sent",`${g}.${u}`,"replication","blob");ae.debug?.("Sending final blob chunk",A,"length",b.length),e.send((0,et.encode)([wS,{fileId:A,size:E.size,finished:!0},b]))}catch(b){ae.warn?.("Error sending blob",b,"blob id",A,"for record",S),e.send((0,et.encode)([wS,{fileId:A,finished:!0,error:(0,Dd.errorToString)(b)},Buffer.alloc(0)]))}finally{_a.delete(A),xe--,xe<Ff&&Rr?.()}}o(ga,"sendBlobs");function Lc(E,S){let A=u_(E),b=Ne.get(A);ae.debug?.("Received transaction with blob",A,"has stream",!!b,"ended",!!b?.writableEnded),b?b.writableEnded&&Ne.delete(A):(b=new Hw.PassThrough,Ne.set(A,b)),b.connectedToBlob=!0,b.lastChunk=Date.now(),b.recordId=S,E.size===void 0&&b.expectedSize&&(E.size=b.expectedSize);let I=b.blob??createBlob(b,E);b.blob=I;let P=No(()=>_m(I).saving,p.auditStore?.rootStore);return P&&(P.blobId=A,Ue.push(P),P.finally(()=>{ae.debug?.(`Finished receiving blob stream ${A}`),Ue.splice(Ue.indexOf(P),1)})),I}o(Lc,"receiveBlobs");function Dr(){if(h||(h=!0,t.connection?.on("subscriptions-updated",Dr)),!f&&p&&(f=p.auditStore),t.connection?.isFinished)throw new Error("Can not make a subscription request on a connection that is already closed");let E=new Map;f||(f=p?.auditStore);try{for(let b of p?.dbisDB?.getRange({start:Symbol.for("seq"),end:[Symbol.for("seq"),Buffer.from([255])]})||[])for(let I of b.value.nodes||[])I.lastTxnTime>(E.get(I.id)??0)&&E.set(I.id,I.lastTxnTime)}catch(b){if(!b.message.includes("Can not re"))throw b}let S=t.connection?.nodeSubscriptions?.[0];T=[];let A=t.connection?.nodeSubscriptions.map((b,I)=>{let P=[],{replicateByDefault:L}=b;if(b.subscriptions){for(let x of b.subscriptions)if(x.subscribe&&(x.schema||x.database)===u){let z=x.table;_?.[z]?.replicate!==!1&&P.push(z)}L=!1}else for(let x in _)(L?_[x].replicate===!1:_[x].replicate)&&P.push(x);let D=f&&_S(b.name,f),H=p?.dbisDB?.get([Symbol.for("seq"),D])??1,C=Math.max(H?.seqId??1,(typeof b.startTime=="string"?new Date(b.startTime).getTime():b.startTime)??1);if(ae.debug?.("Starting time recorded in db",b.name,D,u,H?.seqId,"start time:",C,new Date(C)),S!==b){let x=f&&_S(S.name,f),z=p?.dbisDB?.get([Symbol.for("seq"),x])??1;for(let ne of z?.nodes||[])ne.name===b.name&&(C=ne.seqId,ae.debug?.("Using sequence id from proxy node",S.name,C))}if(D===void 0?ae.warn("Starting subscription request from node",b,"but no node id found"):T.push(D),E.get(D)>C&&(C=E.get(D),ae.debug?.("Updating start time from more recent txn recorded",S.name,C)),C===1&&OS)try{new URL(OS).hostname===b.name?(ae.warn?.(`Requesting full copy of database ${u} from ${OS}`),C=0):C=Date.now()-6e4}catch(x){ae.error?.("Error parsing leader URL",OS,x)}return ae.trace?.(s,"defining subscription request",b.name,u,new Date(C)),{name:b.name,replicateByDefault:L,tables:P,startTime:C,endTime:b.endTime}});if(A)if(ae.debug?.(s,"sending subscription request",A,p?.dbisDB?.path),clearTimeout(K),A.length>0)e.send((0,et.encode)([Jq,A]));else{let b=o(()=>{let I=performance.now();K=setTimeout(()=>{ce<=I?Ss(1008,"Connection has no subscriptions and is no longer used"):b()},q).unref()},"scheduleClose");b()}}o(Dr,"sendSubscriptionRequestUpdate");function hu(E,S){if(!E)return;let A=Bf[E];return A||(A=S.getResidencyRecord(E),Bf[E]=A),A}o(hu,"getResidence");function pu(E){return!(Za&&Za!="*"&&!Za[E]&&!Za.includes?.(E)&&!Za.some?.(S=>S.name===E))}o(pu,"checkDatabaseAccess");function Sa(E){if(p=p||d.get(E),!pu(E))throw new Error(`Access to database "${E}" is not permitted`);p||ae.warn?.(`No database named "${E}" was declared and registered`),f=p?.auditStore,_||(_=st()?.[E]);let S=Ze();if(S===g)throw S?new Error("Should not connect to self",S):new Error("Node name not defined");return PE(S,E),!0}o(Sa,"setDatabase");function PE(E,S){let A=st()?.[S],b=[];for(let I in A){let P=A[I];b.push({table:I,schemaDefined:P.schemaDefined,attributes:P.attributes.map(L=>({name:L.name,type:L.type,isPrimaryKey:L.isPrimaryKey}))})}ae.trace?.("Sending database info for node",E,"database name",S),e.send((0,et.encode)([Xq,E,S,b]))}o(PE,"sendNodeDBName");function Ta(E){let S=st()?.[E],A=[];for(let b in S){if(yr&&!yr.some(P=>P.replicateByDefault?!P.tables.includes(b):P.tables.includes(b)))continue;let I=S[b];A.push({table:b,schemaDefined:I.schemaDefined,attributes:I.attributes.map(P=>({name:P.name,type:P.type,isPrimaryKey:P.isPrimaryKey}))})}e.send((0,et.encode)([s$,A,E]))}o(Ta,"sendDBSchema"),Y=setInterval(()=>{for(let[E,S]of Ne)S.lastChunk+pe<Date.now()&&(ae.warn?.(`Timeout waiting for blob stream to finish ${E} for record ${S.recordId??"unknown"} from ${g}`),Ne.delete(E),S.end())},pe).unref();let Ra=1,Gf=[];return{end(){_o&&_o.end(),Lr&&Lr.emit("close")},getRecord(E){let S=Ra++;return new Promise((A,b)=>{let I=[t$,S,E.table.tableId,E.id];Gf[E.table.tableId]||(I.push(E.table.tableName),Gf[E.table.tableId]=!0),e.send((0,et.encode)(I)),ce=performance.now(),y.set(S,{tableId:E.table.tableId,key:E.id,resolve(P){let{table:L,entry:D}=E;if(A(P),P)return L._recordRelocate(D,P)},reject:b})})},sendOperation(E){let S=Ra++;return E.requestId=S,e.send((0,et.encode)([kw,E])),new Promise((A,b)=>{y.set(S,{resolve:A,reject:b})})}};function Eu(E){ya(5),E<128?a[c++]=E:E<16384?(l.setUint16(c,E|32768),c+=2):E<1056964608?(l.setUint32(c,E|3221225472),c+=4):(a[c]=255,l.setUint32(c+1,E),c+=5)}function Dc(E,S=0,A=E.length){let b=A-S;ya(b),E.copy(a,c,S,A),c+=b}function Mc(E){ya(8),l.setFloat64(c,E),c+=8}function ya(E){if(E+16>a.length-c){let S=Buffer.allocUnsafeSlow(c+E-i+65536>>10<<11);a.copy(S,0,i,c),c=c-i,i=0,a=S,l=new DataView(a.buffer,0,a.length)}}function V(E,S){let A=E.database??"data";if(A!=="data"&&!ke[A]){ae.warn?.("Database not found",E.database);return}S||(S={});let b=S.schemaDefined,I=!1,P=E.schemaDefined,L=S.attributes||[];for(let D=0;D<E.attributes?.length;D++){let H=E.attributes[D],C=L.find(x=>x.name===H.name);(!C||C.type!==H.type)&&(b?ae.error?.(`Schema for '${u}.${E.table}' is defined locally, but attribute '${H.name}: ${H.type}' from '${g}' does not match local attribute ${C?"'"+C.name+": "+C.type+"'":"which does not exist"}`):(I=!0,P||(H.indexed=!0),C?L[L.indexOf(C)]=H:L.push(H)))}return I?(ae.debug?.("(Re)creating",E),je({table:E.table,database:E.database,schemaDefined:E.schemaDefined,attributes:L,...S})):S}}var c$,et,l$,u$,Dd,Vw,d$,f$,xl,m$,Hw,h$,p$,ae,Jq,Xq,Zq,Ld,e$,Bw,t$,IS,kw,NS,r$,n$,s$,wS,E$,Gw,qw,Yh,LS,$w,_$,g$,Pce,OS,Kw,ec,CS,i$,Lce,o$,Fw,PS,a$,Md,Yw=ue(()=>{De();Mi();Tw();ib();ss();c$=w(oe());k();Jc();et=require("msgpackr"),l$=require("ws"),u$=require("worker_threads"),Dd=w(Q());$h();Vw=require("events"),d$=w(os()),f$=w(require("node:tls"));Dl();xl=w(require("node:process")),m$=require("node:net");zi();Wn();Hw=require("node:stream"),h$=require("lmdb"),p$=w(require("minimist")),ae=(0,Dd.forComponent)("replication").conditional,Jq=129,Xq=140,Zq=141,Ld=142,e$=130,Bw=132,t$=133,IS=134,kw=136,NS=137,r$=143,n$=144,s$=145,wS=146,E$=0,Gw=1,qw=2,Yh=3,LS=4,$w=5,_$=0,g$=1,Pce=(0,p$.default)(xl.argv),OS=Pce.HDB_LEADER_URL??xl.env.HDB_LEADER_URL,Kw=new Map,ec=new Map,CS=!0,i$=300,Lce=2,o$=3e4;o(DS,"createWebSocket");a$=500,Md=class extends Vw.EventEmitter{static{o(this,"NodeReplicationConnection")}socket;startTime;retryTime=a$;retries=0;isConnected=!0;isFinished=!1;nodeSubscriptions;latency=0;replicateTablesByDefault;session;sessionResolve;sessionReject;url;subscription;databaseName;nodeName;authorization;constructor(t,r,n,s,i){super(),this.url=t,this.subscription=r,this.databaseName=n,this.authorization=i,this.nodeName=this.nodeName??pi(t)}async connect(){this.session||this.resetSession();let t=[];this.socket=await DS(this.url,{serverName:this.nodeName,authorization:this.authorization});let r;ae.debug?.(`Connecting to ${this.url}, db: ${this.databaseName}, process ${xl.pid}`),this.socket.on("open",()=>{this.socket._socket.unref(),ae[this.isConnected?"info":"warn"]?.(`Connected to ${this.url}, db: ${this.databaseName}`),this.retries=0,this.retryTime=a$,this.nodeSubscriptions&&Ml({name:this.nodeName,database:this.databaseName,url:this.url}),this.isConnected=!0,r=Wh(this.socket,{database:this.databaseName,subscription:this.subscription,url:this.url,connection:this,isSubscriptionConnection:this.nodeSubscriptions!==void 0},{replicates:!0}),this.sessionResolve(r)}),this.socket.on("error",n=>{n.code==="SELF_SIGNED_CERT_IN_CHAIN"?(ae.warn?.(`Can not connect to ${this.url}, this server does not have a certificate authority for the certificate provided by ${this.url}`),n.isHandled=!0):n.code!=="ECONNREFUSED"&&(n.code==="UNABLE_TO_VERIFY_LEAF_SIGNATURE"?ae.error?.(`Can not connect to ${this.url}, the certificate provided by ${this.url} is not trusted, this node needs to be added to the cluster, or a certificate authority needs to be added`):ae.error?.(`Error in connection to ${this.url} due to ${n.message}`)),this.sessionReject(n)}),this.socket.on("close",(n,s)=>{if(this.isConnected&&(this.nodeSubscriptions&&Od({name:this.nodeName,database:this.databaseName,url:this.url,finished:this.socket.isFinished}),this.isConnected=!1),this.removeAllListeners("subscriptions-updated"),this.socket.isFinished){this.isFinished=!0,r?.end(),this.emit("finished");return}if(++this.retries%20===1){let i=s?.toString();ae.warn?.(`${r?"Disconnected from":"Failed to connect to"} ${this.url} (db: "${this.databaseName}"), due to ${i?'"'+i+'" ':""}(code: ${n})`)}r=null,this.resetSession(),setTimeout(()=>{this.connect()},this.retryTime).unref(),this.retryTime+=this.retryTime>>8})}resetSession(){this.session=new Promise((t,r)=>{this.sessionResolve=t,this.sessionReject=r})}subscribe(t,r){this.nodeSubscriptions=t,this.replicateTablesByDefault=r,this.emit("subscriptions-updated",t)}unsubscribe(){this.socket.isFinished=!0,this.socket.close(1008,"No longer subscribed")}getRecord(t){return this.session.then(r=>r.getRecord(t))}};o(Wh,"replicateOverWS")});var Go={};Oe(Go,{clearThisNodeName:()=>kce,disableReplication:()=>Uce,enabledDatabases:()=>Za,forEachReplicatedDatabase:()=>Wa,getThisNodeId:()=>MS,getThisNodeName:()=>Ze,getThisNodeUrl:()=>za,hostnameToUrl:()=>FS,lastTimeInAuditStore:()=>Hh,monitorNodeCAs:()=>O$,replicateOperation:()=>qce,replicationCertificateAuthorities:()=>qo,sendOperationToNode:()=>zh,servers:()=>Mce,setReplicator:()=>P$,start:()=>vce,startOnMainThread:()=>bw,subscribeToNode:()=>qh,unsubscribeFromNode:()=>TS,urlToNodeName:()=>pi});function vce(e){if(!e.port&&!e.securePort&&(e.port=Ms.default.get(U.OPERATIONSAPI_NETWORK_PORT),e.securePort=Ms.default.get(U.OPERATIONSAPI_NETWORK_SECUREPORT)),!Ze())throw new Error("Can not load replication without a url (see replication.url in the config)");let t=new Map;for(let s of Gh(e))t.set(pi(s.url),s);xce(e),e={mtls:!0,isOperationsServer:!0,maxPayload:10*1024*1024*1024,...e};let r=Le.ws(async(s,i,a,c)=>{if(Ot.debug("Incoming WS connection received "+i.url),i.headers.get("sec-websocket-protocol")!=="harperdb-replication-v1")return c(s,i,a);await a,s._socket.unref(),Wh(s,e,i?.user),s.on("error",l=>{l.code!=="ECONNREFUSED"&&Ot.error("Error in connection to "+this.url,l.message)})},e);e.runFirst=!0,Le.http((s,i)=>{if(s.isWebSocket&&s.headers.get("Sec-WebSocket-Protocol")==="harperdb-replication-v1"){Ot.debug("Incoming replication WS connection received, authorized: "+s.authorized),!s.authorized&&s._nodeRequest.socket.authorizationError&&Ot.error(`Incoming client connection from ${s.ip} did not have valid certificate, you may need turn on enableRootCAs in the config if you are using a publicly signed certificate, or add the CA to the server's trusted CAs`,s._nodeRequest.socket.authorizationError);let a=Kt().primaryStore;if(s.authorized&&s.peerCertificate.subjectaltname){let c=(0,N$.getHostnamesFromCertificate)(s.peerCertificate),l;for(let u of c)if(l=u&&(a.get(u)||t.get(u)),l)break;if(l)if(l?.revoked_certificates?.includes(s.peerCertificate.serialNumber)){Ot.warn("Revoked certificate used in attempt to connect to node",l.name,"certificate serial number",s.peerCertificate.serialNumber);return}else s.user=l;else Ot.warn(`No node found for certificate common name/SANs: ${c}, available nodes are ${Array.from(a.getRange({}).filter(({value:u})=>u).map(({key:u})=>u)).join(", ")} and routes ${Array.from(t.keys()).join(", ")}, connection will require credentials.`)}else{let c=a.get(s.ip)||t.get(s.ip);c?s.user=c:Ot.warn(`No node found for IP address ${s.ip}, available nodes are ${Array.from(new Set([...a.getKeys(),...t.keys()])).join(", ")}, connection will require credentials.`)}}return i(s)},e);let n=[];for(let s of r)if(s.secureContexts){let i=o(()=>{let a=new Set(s.secureContexts.values());s.defaultContext&&a.add(s.defaultContext);for(let c of a)try{let l=Array.from(qo);c.options.availableCAs&&l.push(...c.options.availableCAs.values());let u={...c.options,ca:l};c.updatedContext=BS.createSecureContext(u)}catch(l){Ot.error("Error creating replication TLS config",l)}},"updateContexts");s.secureContextsListeners.push(i),n.push(i),Ms.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1&&i()}O$(()=>{for(let s of n)s()})}function O$(e){let t=0;Nd(r=>{r?.ca&&(qo.add(r.ca),qo.size!==t&&(t=qo.size,e?.()))})}function Uce(e=!0){w$=e}function xce(e){w$||(st(),Za=e.databases,Wa(e,(t,r)=>{if(!t){let n=e.databaseSubscriptions||ec;for(let[s,i]of US){let a=i.get(r);a&&(a.subscribe([],!1),i.delete(r))}n.delete(r);return}for(let n in t){let s=t[n];P$(r,s,e),Kw.get(s)?.forEach(i=>i(s))}}))}function P$(e,t,r){if(!t)return console.error(`Attempt to replicate non-existent table ${t.name} from database ${e}`);if(t.replicate===!1||t.sources?.some(s=>s.isReplicator))return;let n;t.sourcedFrom(class C$ extends xr{static{o(this,"Replicator")}static connection;static subscription;static async subscribe(){let i=r.databaseSubscriptions||ec,a=i.get(e),c=a?.tableById||[];c[t.tableId]=t;let l=a?.ready;if(Ot.trace("Setting up replicator subscription to database",e),!a?.auditStore)return this.subscription=a=new Yn,i.set(e,a),a.tableById=c,a.auditStore=t.auditStore,a.dbisDB=t.dbisDB,a.databaseName=e,l&&l(a),a;this.subscription=a}static subscribeOnThisThread(i,a){return!0}static async load(i){if(i){let a=i.residencyId,c=i.residency||t.dbisDB.get([Symbol.for("residency_by_id"),a]);if(c){let l,u=new Set;do{let d,f="",m=1/0;for(let p of c){if(u.has(p)||p===Le.hostname)continue;let _=Fce(p,C$.subscription,e);if(_?.isConnected){let g=Id(t.auditStore,e,p)[LS];(!d||g<m)&&(d=_,f=p,m=g)}}if(!d)throw l||new b$.ServerError(`No connection to any other nodes are available: ${c}`,502);let h={requestId:Dce++,table:t,entry:i,id:i.key};u.add(f);try{return await d.getRecord(h)}catch(p){if(d.isConnected)throw p;Ot.warn("Error in load from node",xS,p),l||(l=p)}}while(!0)}}}static isReplicator=!0},{intermediateSource:!0})}function Bce(e,t,r,n,s){let i=US.get(e);i||US.set(e,i=new Map);let a=i.get(r);if(a)return a;if(t)return i.set(r,a=new Md(e,t,r,n,s)),a.connect(),a.once("finished",()=>i.delete(r)),a}function Fce(e,t,r){let n=T$.get(e);n||(n=new Map,T$.set(e,n));let s=n.get(r);if(s)return s;let i=Kt().primaryStore.get(e);return i?.url&&(s=new Md(i.url,t,r,e,i.authorization),n.set(r,s),s.connect(),s.once("finished",()=>n.delete(r))),s}async function zh(e,t,r){r||(r={}),r.serverName=e.name;let n=await DS(e.url,r),s=Wh(n,{},{});return new Promise((i,a)=>{n.on("open",()=>{Ot.debug("Sending operation connection to "+e.url+" opened",t),i(s.sendOperation(t))}),n.on("error",c=>{a(c)}),n.on("close",c=>{Ot.info("Sending operation connection to "+e.url+" closed",c)})}).finally(()=>{n.close()})}function qh(e){try{I$.isMainThread&&Ot.trace("Subscribing on main thread (should not happen in multi-threaded instance)",e.nodes[0].url,e.database);let t=ec.get(e.database);if(!t){let n;t=new Promise(s=>{Ot.info("Waiting for subscription to database "+e.database),n=s}),t.ready=n,ec.set(e.database,t)}let r=Bce(e.nodes[0].url,t,e.database,e.nodes[0].name,e.nodes[0].authorization);e.nodes[0].name===void 0?r.tentativeNode=e.nodes[0]:r.nodeName=e.nodes[0].name,r.subscribe(e.nodes.filter(n=>kh(n,e.database)),e.replicateByDefault)}catch(t){Ot.error("Error in subscription to node",e.nodes[0]?.url,t)}}async function TS({name:e,url:t,database:r}){Ot.trace("Unsubscribing from node",e,t,r,"nodes",Array.from(Kt().primaryStore.getRange({})));let n=US.get(t);if(n){let s=n.get(r);s&&(s.unsubscribe(),n.delete(r))}}function Hce(){if(Ww!==void 0)return Ww;let e=Ms.default.get(U.OPERATIONSAPI_TLS_CERTIFICATE)||Ms.default.get(U.TLS_CERTIFICATE);if(e)return Ww=new y$.X509Certificate((0,A$.readFileSync)(e)).subject?.match(/CN=(.*)/)?.[1]??null}function Ze(){return xS||(xS=Ms.default.get("replication_hostname")??pi(Ms.default.get("replication_url"))??Hce()??R$("operationsapi_network_secureport")??R$("operationsapi_network_port")??"127.0.0.1")}function kce(){xS=void 0}function R$(e){let t=Ms.default.get(e),r=t?.lastIndexOf?.(":");if(r>0)return t.slice(0,r)}function vS(e){let t=Ms.default.get(e),r=t?.lastIndexOf?.(":");return r>0?+t.slice(r+1).replace(/[\[\]]/g,""):+t}function MS(e){return Fh(e)?.[Ze()]}function za(){let e=Ms.default.get("replication_url");return e||FS(Ze())}function FS(e){let t=vS("replication_port");if(t)return`ws://${e}:${t}`;if(t=vS("replication_secureport"),t)return`wss://${e}:${t}`;if(t=vS("operationsapi_network_port"),t)return`ws://${e}:${t}`;if(t=vS("operationsapi_network_secureport"),t)return`wss://${e}:${t}`}function pi(e){if(e)return new URL(e).hostname}function Wa(e,t){for(let n of Object.getOwnPropertyNames(ke))r(n);return Ah(n=>{r(n)}),Nl((n,s)=>{r(n.databaseName)});function r(n){let s=ke[n];Ot.trace("Checking replication status of ",n,e?.databases),e?.databases===void 0||e.databases==="*"||e.databases.includes(n)||e.databases.some?.(i=>i.name===n)||!s?t(s,n,!0):Gce(n)&&t(s,n,!1)}o(r,"forDatabase")}function Gce(e){let t=ke[e];for(let r in t)if(t[r].replicate)return!0}function Hh(e){for(let t of e.getKeys({limit:1,reverse:!0}))return t}async function qce(e){let t={message:""};if(e.replicated){e.replicated=!1,Ot.trace?.("Replicating operation",e.operation,"to nodes",Le.nodes.map(n=>n.name));let r=await Promise.allSettled(Le.nodes.map(n=>zh(n,e)));t.replicated=r.map((n,s)=>{let i=n.status==="rejected"?{status:"failed",reason:n.reason.toString()}:n.value;return i.node=Le.nodes[s]?.name,i})}return t}var Ms,Ot,y$,A$,BS,b$,I$,N$,w$,Dce,Mce,qo,Za,US,T$,Ww,xS,ss=ue(()=>{De();Ma();Au();Yw();Mr();Ms=w(oe()),Ot=w(Q()),y$=require("crypto"),A$=require("fs");$h();Dl();k();Tw();BS=w(require("node:tls")),b$=w(ge()),I$=require("worker_threads"),N$=w(os()),Dce=1,Mce=[],qo=Ms.default.get(U.REPLICATION_ENABLEROOTCAS)!==!1?new Set(BS.rootCertificates):new Set;o(vce,"start");o(O$,"monitorNodeCAs");o(Uce,"disableReplication");o(xce,"assignReplicationSource");o(P$,"setReplicator");US=new Map;o(Bce,"getSubscriptionConnection");T$=new Map;o(Fce,"getRetrievalConnectionByName");o(zh,"sendOperationToNode");o(qh,"subscribeToNode");o(TS,"unsubscribeFromNode");o(Hce,"getCommonNameFromCert");o(Ze,"getThisNodeName");o(kce,"clearThisNodeName");Object.defineProperty(Le,"hostname",{get(){return Ze()}});o(R$,"getHostFromListeningPort");o(vS,"getPortFromListeningPort");o(MS,"getThisNodeId");Le.replication={getThisNodeId:MS,exportIdMapping:Fh};o(za,"getThisNodeUrl");o(FS,"hostnameToUrl");o(pi,"urlToNodeName");o(Wa,"forEachReplicatedDatabase");o(Gce,"hasExplicitlyReplicatedTable");o(Hh,"lastTimeInAuditStore");o(qce,"replicateOperation")});var Qh=M((Rve,U$)=>{"use strict";var vd=VG(),{validateBySchema:jh}=lt(),{commonValidators:Ud,schemaRegex:zw}=Gi(),pr=require("joi"),$ce=Q(),Vce=require("uuid").v4,GS=Do(),xd=(k(),v(W)),Kce=require("util"),tc=Zn(),{handleHDBError:$o,hdbErrors:Yce,ClientError:Bl}=ge(),{HDB_ERROR_MSGS:HS,HTTP_STATUS_CODES:Vo}=Yce,{SchemaEventMsg:qS}=ai(),L$=mr(),{getDatabases:Wce}=(De(),v(mt)),{transformReq:Bd}=ie(),{replicateOperation:D$}=(ss(),v(Go)),{cleanupOrphans:zce}=(Wn(),v(m_)),kS=pr.string().min(1).max(Ud.schema_length.maximum).pattern(zw).messages({"string.pattern.base":"{:#label} "+Ud.schema_format.message}),jce=pr.string().min(1).max(Ud.schema_length.maximum).pattern(zw).messages({"string.pattern.base":"{:#label} "+Ud.schema_format.message}).required(),Qce=pr.string().min(1).max(Ud.schema_length.maximum).pattern(zw).messages({"string.pattern.base":"{:#label} "+Ud.schema_format.message,"any.required":"'primary_key' is required","string.base":"'primary_key' must be a string"}).required();U$.exports={createSchema:Jce,createSchemaStructure:M$,createTable:Xce,createTableStructure:v$,createAttribute:nle,dropSchema:Zce,dropTable:ele,dropAttribute:tle,getBackup:sle,cleanupOrphanBlobs:ile};async function Jce(e){let t=await M$(e);return GS.signalSchemaChange(new qS(process.pid,e.operation,e.schema)),t}o(Jce,"createSchema");async function M$(e){let t=jh(e,pr.object({database:kS,schema:kS}));if(t)throw new Bl(t.message);if(Bd(e),!await vd.checkSchemaExists(e.schema))throw $o(new Error,HS.SCHEMA_EXISTS_ERR(e.schema),Vo.BAD_REQUEST,xd.LOG_LEVELS.ERROR,HS.SCHEMA_EXISTS_ERR(e.schema),!0);return await tc.createSchema(e),`database '${e.schema}' successfully created`}o(M$,"createSchemaStructure");async function Xce(e){return Bd(e),e.hash_attribute=e.primary_key??e.hash_attribute,await v$(e)}o(Xce,"createTable");async function v$(e){let t=jh(e,pr.object({database:kS,schema:kS,table:jce,residence:pr.array().items(pr.string().min(1)).optional(),hash_attribute:Qce}));if(t)throw new Bl(t.message);if(!await vd.checkSchemaTableExists(e.schema,e.table))throw $o(new Error,HS.TABLE_EXISTS_ERR(e.schema,e.table),Vo.BAD_REQUEST,xd.LOG_LEVELS.ERROR,HS.TABLE_EXISTS_ERR(e.schema,e.table),!0);let n={name:e.table,schema:e.schema,id:Vce(),hash_attribute:e.hash_attribute};try{if(e.residence)if(global.clustering_on)n.residence=e.residence,await tc.createTable(n,e);else throw $o(new Error,"Clustering does not appear to be enabled. Cannot insert table with property 'residence'.",Vo.BAD_REQUEST);else await tc.createTable(n,e);return`table '${e.schema}.${e.table}' successfully created.`}catch(s){throw s}}o(v$,"createTableStructure");async function Zce(e){let t=jh(e,pr.object({database:pr.string(),schema:pr.string()}).or("database","schema").messages({"object.missing":"'database' is required"}));if(t)throw new Bl(t.message);Bd(e);let r=await vd.checkSchemaExists(e.schema);if(r)throw $o(new Error,r,Vo.NOT_FOUND,xd.LOG_LEVELS.ERROR,r,!0);let n=await vd.schemaDescribe.describeSchema({schema:e.schema}),s=Object.keys(global.hdb_schema[e.schema]);await tc.dropSchema(e),GS.signalSchemaChange(new qS(process.pid,e.operation,e.schema)),await L$.purgeSchemaTableStreams(e.schema,s);let i=await D$(e);return i.message=`successfully deleted '${e.schema}'`,i}o(Zce,"dropSchema");async function ele(e){let t=jh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required()}));if(t)throw new Bl(t.message);Bd(e);let r=await vd.checkSchemaTableExists(e.schema,e.table);if(r)throw $o(new Error,r,Vo.NOT_FOUND,xd.LOG_LEVELS.ERROR,r,!0);await tc.dropTable(e),await L$.purgeTableStream(e.schema,e.table);let n=await D$(e);return n.message=`successfully deleted table '${e.schema}.${e.table}'`,n}o(ele,"dropTable");async function tle(e){let t=jh(e,pr.object({database:pr.string(),schema:pr.string(),table:pr.string().required(),attribute:pr.string().required()}));if(t)throw new Bl(t.message);Bd(e);let r=await vd.checkSchemaTableExists(e.schema,e.table);if(r)throw $o(new Error,r,Vo.NOT_FOUND,xd.LOG_LEVELS.ERROR,r,!0);if(e.attribute===global.hdb_schema[e.schema][e.table].hash_attribute)throw $o(new Error,"You cannot drop a hash attribute",Vo.BAD_REQUEST,void 0,void 0,!0);if(xd.TIME_STAMP_NAMES.indexOf(e.attribute)>=0)throw $o(new Error,`cannot drop internal timestamp attribute: ${e.attribute}`,Vo.BAD_REQUEST,void 0,void 0,!0);try{return await tc.dropAttribute(e),rle(e),GS.signalSchemaChange(new qS(process.pid,e.operation,e.schema,e.table,e.attribute)),`successfully deleted attribute '${e.attribute}'`}catch(n){throw $ce.error(`Got an error deleting attribute ${Kce.inspect(e)}.`),n}}o(tle,"dropAttribute");function rle(e){let t=Object.values(global.hdb_schema[e.schema][e.table].attributes);for(let r=0;r<t.length;r++)t[r].attribute===e.attribute&&global.hdb_schema[e.schema][e.table].attributes.splice(r,1)}o(rle,"dropAttributeFromGlobal");async function nle(e){Bd(e);let t=Wce()[e.schema][e.table].attributes;for(let{name:r}of t)if(r===e.attribute)throw $o(new Error,`attribute '${e.attribute}' already exists in ${e.schema}.${e.table}`,Vo.BAD_REQUEST,void 0,void 0,!0);return await tc.createAttribute(e),GS.signalSchemaChange(new qS(process.pid,e.operation,e.schema,e.table,e.attribute)),`attribute '${e.schema}.${e.table}.${e.attribute}' successfully created.`}o(nle,"createAttribute");function sle(e){return tc.getBackup(e)}o(sle,"getBackup");function ile(e){if(!e.database)throw new Bl('Must provide "database" name for search for orphaned blobs');if(!databases[e.database])throw new Bl(`Unknown database '${e.database}'`);return zce(databases[e.database],e.database),{message:"Orphaned blobs cleanup started, check logs for progress"}}o(ile,"cleanupOrphanBlobs")});var B$=M((Ave,x$)=>{"use strict";var{OPERATIONS_ENUM:ole}=(k(),v(W)),jw=class{static{o(this,"ReadAuditLogObject")}constructor(t,r,n=void 0,s=void 0){this.operation=ole.READ_AUDIT_LOG,this.schema=t,this.table=r,this.search_type=n,this.search_values=s}};x$.exports=jw});var Qw=M((Nve,q$)=>{"use strict";var ale=Zn(),Ive=B$(),$S=ie(),VS=(k(),v(W)),cle=oe(),{handleHDBError:F$,hdbErrors:lle}=ge(),{HDB_ERROR_MSGS:H$,HTTP_STATUS_CODES:k$}=lle,ule=Object.values(VS.READ_AUDIT_LOG_SEARCH_TYPES_ENUM),G$="To use this operation audit log must be enabled in harperdb-config.yaml";q$.exports=dle;async function dle(e){if($S.isEmpty(e.schema))throw new Error(H$.SCHEMA_REQUIRED_ERR);if($S.isEmpty(e.table))throw new Error(H$.TABLE_REQUIRED_ERR);if(!cle.get(VS.CONFIG_PARAMS.LOGGING_AUDITLOG))throw F$(new Error,G$,k$.BAD_REQUEST,VS.LOG_LEVELS.ERROR,G$,!0);let t=$S.checkSchemaTableExist(e.schema,e.table);if(t)throw F$(new Error,t,k$.NOT_FOUND,VS.LOG_LEVELS.ERROR,t,!0);if(!$S.isEmpty(e.search_type)&&ule.indexOf(e.search_type)<0)throw new Error(`Invalid searchType '${read_audit_log_object.search_type}'`);return await ale.readAuditLog(e)}o(dle,"readAuditLog")});var V$=M((Ove,$$)=>{"use strict";var{OPERATIONS_ENUM:fle}=(k(),v(W)),Jw=class{static{o(this,"GetBackupObject")}constructor(t,r,n=void 0,s=void 0){this.operation=fle.GET_BACKUP,this.schema=t,this.table=r}};$$.exports=Jw});var W$=M((Dve,Y$)=>{"use strict";var mle=Zn(),Pve=V$(),Xw=ie(),hle=(k(),v(W)),Lve=oe(),{handleHDBError:ple,hdbErrors:Ele}=ge(),{HDB_ERROR_MSGS:K$,HTTP_STATUS_CODES:_le}=Ele;Y$.exports=gle;async function gle(e){if(Xw.isEmpty(e.schema))throw new Error(K$.SCHEMA_REQUIRED_ERR);if(Xw.isEmpty(e.table))throw new Error(K$.TABLE_REQUIRED_ERR);let t=Xw.checkSchemaTableExist(e.schema,e.table);if(t)throw ple(new Error,t,_le.NOT_FOUND,hle.LOG_LEVELS.ERROR,t,!0);return await mle.getBackup(readAuditLogObject)}o(gle,"getBackup")});var J$=M((vve,Q$)=>{"use strict";var Sle=oe(),rc=require("joi"),Tle=lt(),z$=require("moment"),Rle=require("fs-extra"),Zw=require("path"),yle=require("lodash"),Jh=(k(),v(W)),{LOG_LEVELS:Fl}=(k(),v(W)),Ale="YYYY-MM-DD hh:mm:ss",ble=Zw.resolve(__dirname,"../logs");Q$.exports=function(e){return Tle.validateBySchema(e,Ile)};var Ile=rc.object({from:rc.custom(j$),until:rc.custom(j$),level:rc.valid(Fl.NOTIFY,Fl.FATAL,Fl.ERROR,Fl.WARN,Fl.INFO,Fl.DEBUG,Fl.TRACE),order:rc.valid("asc","desc"),limit:rc.number().min(1),start:rc.number().min(0),log_name:rc.custom(Nle)});function j$(e,t){if(z$(e,z$.ISO_8601).format(Ale)==="Invalid date")return t.message(`'${t.state.path[0]}' date '${e}' is invalid.`)}o(j$,"validateDatetime");function Nle(e,t){if(yle.invert(Jh.LOG_NAMES)[e]===void 0)return t.message(`'log_name' '${e}' is invalid.`);let n=Sle.get(Jh.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e===void 0?Jh.LOG_NAMES.HDB:e,i=s===Jh.LOG_NAMES.INSTALL?Zw.join(ble,Jh.LOG_NAMES.INSTALL):Zw.join(n,s);return Rle.existsSync(i)?null:t.message(`'log_name' '${e}' does not exist.`)}o(Nle,"validateReadLogPath")});var tO=M((xve,Z$)=>{"use strict";var KS=(k(),v(W)),wle=Q(),Ole=oe(),Cle=J$(),eO=require("path"),X$=require("fs-extra"),{once:Ple}=require("events"),{handleHDBError:Lle,hdbErrors:Dle}=ge(),{PACKAGE_ROOT:Mle}=Rt(),{replicateOperation:vle}=(ss(),v(Go)),Ule=eO.join(Mle,"logs"),xle=1e3,Ble=200;Z$.exports=Fle;async function Fle(e){let t=Cle(e);if(t)throw Lle(t,t.message,Dle.HTTP_STATUS_CODES.BAD_REQUEST,void 0,void 0,!0);let r=vle(e),n=Ole.get(KS.HDB_SETTINGS_NAMES.LOG_PATH_KEY),s=e.log_name===void 0?KS.LOG_NAMES.HDB:e.log_name,i=s===KS.LOG_NAMES.INSTALL?eO.join(Ule,KS.LOG_NAMES.INSTALL):eO.join(n,s),a=e.level!==void 0,c=a?e.level:void 0,l=e.from!==void 0,u=l?new Date(e.from):void 0,d=e.until!==void 0,f=d?new Date(e.until):void 0,m=e.limit===void 0?xle:e.limit,h=e.order===void 0?void 0:e.order,p=e.start===void 0?0:e.start,_=p+m,g=0;h==="desc"&&!u&&!f&&(g=Math.max(X$.statSync(i).size-(_+5)*Ble,0));let y=X$.createReadStream(i,{start:g});y.on("error",G=>{wle.error(G)});let T=0,R=[],N="",O;y.on("data",G=>{let Y=/(?:^|\n)(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:[\d\.]+Z) \[(.+?)]: /g;G=N+G;let q=0,K;for(;(K=Y.exec(G))&&!y.destroyed;){O&&(O.message=G.slice(q,K.index),F(O));let[ce,le,se]=K,pe=se.split("] ["),Ne=pe[0],Ue=pe[1];pe.splice(0,2),O={timestamp:le,thread:Ne,level:Ue,tags:pe,message:""},q=K.index+ce.length}N=G.slice(q)}),y.on("end",G=>{y.destroyed||O&&(O.message=N.trim(),F(O))}),y.resume();function F(G){let Y,q,K;switch(!0){case(a&&l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),G.level===c&&Y>=q&&Y<=K&&T<p?T++:G.level===c&&Y>=q&&Y<=K&&(to(G,h,R),T++,T===_&&y.destroy());break;case(a&&l):Y=new Date(G.timestamp),q=new Date(u),G.level===c&&Y>=q&&T<p?T++:G.level===c&&Y>=q&&(to(G,h,R),T++,T===_&&y.destroy());break;case(a&&d):Y=new Date(G.timestamp),K=new Date(f),G.level===c&&Y<=K&&T<p?T++:G.level===c&&Y<=K&&(to(G,h,R),T++,T===_&&y.destroy());break;case(l&&d):Y=new Date(G.timestamp),q=new Date(u),K=new Date(f),Y>=q&&Y<=K&&T<p?T++:Y>=q&&Y<=K&&(to(G,h,R),T++,T===_&&y.destroy());break;case a:G.level===c&&T<p?T++:G.level===c&&(to(G,h,R),T++,T===_&&y.destroy());break;case l:Y=new Date(G.timestamp),q=new Date(u),Y>=q&&T<p?T++:Y>=q&&T>=p&&(to(G,h,R),T++,T===_&&y.destroy());break;case d:Y=new Date(G.timestamp),K=new Date(f),Y<=K&&T<p?T++:Y<=K&&T>=p&&(to(G,h,R),T++,T===_&&y.destroy());break;default:T<p?T++:(to(G,h,R),T++,T===_&&y.destroy())}}o(F,"onLogMessage"),await Ple(y,"close");let Z=await r;if(Z.replicated){for(let G of R)G.node=server.hostname;for(let G of Z.replicated){let Y=G.node;if(G.status==="failed")to({timestamp:new Date().toISOString(),level:"error",node:Y,message:`Error retrieving logs: ${G.reason}`},h,R);else for(let q of G.results)q.node=Y,to(q,h,R)}}return R}o(Fle,"readLog");function to(e,t,r){t==="desc"?Hle(e,r):t==="asc"?kle(e,r):r.push(e)}o(to,"pushLineToResult");function Hle(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)>r?n=i+1:s=i}t.splice(n,0,e)}o(Hle,"insertDescending");function kle(e,t){let r=new Date(e.timestamp),n=0,s=t.length;for(;n<s;){let i=n+s>>>1;new Date(t[i].timestamp)<r?n=i+1:s=i}t.splice(n,0,e)}o(kle,"insertAscending")});var YS=M((qve,nV)=>{"use strict";var rO=require("joi"),{string:Fd,boolean:eV,date:Gle}=rO.types(),qle=lt(),{validateSchemaExists:Fve,validateTableExists:Hve,validateSchemaName:kve}=Gi(),$le=(k(),v(W)),Vle=Nt(),tV=oe();tV.initSync();var Gve=Fd.invalid(tV.get($le.CONFIG_PARAMS.CLUSTERING_NODENAME)??"node_name").pattern(Vle.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),rV={operation:Fd.valid("add_node","update_node","set_node_replication"),node_name:Fd.optional(),subscriptions:rO.array().items({table:Fd.optional(),schema:Fd.optional(),database:Fd.optional(),subscribe:eV.required(),publish:eV.required().custom(Yle),start_time:Gle.iso()})};function Kle(e){return qle.validateBySchema(e,rO.object(rV))}o(Kle,"addUpdateNodeValidator");function Yle(e,t){if(t.state.ancestors[2].operation==="add_node"&&e===!1&&t.state.ancestors[0].subscribe===!1)return t.message(`'subscriptions[${t.state.path[1]}]' subscribe and/or publish must be set to true when adding a node`)}o(Yle,"checkForFalsy");nV.exports={addUpdateNodeValidator:Kle,validationSchema:rV}});var Hd=M((Vve,sV)=>{"use strict";var nO=class{static{o(this,"Node")}constructor(t,r,n){this.name=t,this.subscriptions=r,this.system_info=n}},sO=class{static{o(this,"NodeSubscription")}constructor(t,r,n,s){this.schema=t,this.table=r,this.publish=n,this.subscribe=s}};sV.exports={Node:nO,NodeSubscription:sO}});var oV=M((Yve,iV)=>{"use strict";var Wle=(k(),v(W)).OPERATIONS_ENUM,iO=class{static{o(this,"UpsertObject")}constructor(t,r,n,s=void 0){this.operation=Wle.UPSERT,this.schema=t,this.table=r,this.records=n,this.__origin=s}};iV.exports=iO});var Xh=M((zve,aV)=>{"use strict";var oO=class{static{o(this,"RemotePayloadObject")}constructor(t,r,n,s){this.operation=t,this.node_name=r,this.subscriptions=n,this.system_info=s}},aO=class{static{o(this,"RemotePayloadSubscription")}constructor(t,r,n,s,i,a,c){this.schema=t,this.table=r,this.hash_attribute=n,this.publish=s,this.subscribe=i,this.start_time=a,c!==void 0&&(this.attributes=c)}};aV.exports={RemotePayloadObject:oO,RemotePayloadSubscription:aO}});var lV=M((Qve,cV)=>{"use strict";var cO=class{static{o(this,"TableSizeObject")}constructor(t,r,n=0,s=0,i=0,a=0){this.schema=t,this.table=r,this.table_size=n,this.record_count=s,this.transaction_log_size=i,this.transaction_log_record_count=a}};cV.exports=cO});var dV=M((rUe,uV)=>{"use strict";var zle=lV(),Xve=qt(),Zve=gt(),jle=Q(),{getSchemaPath:eUe,getTransactionAuditStorePath:tUe}=At(),{getDatabases:Qle}=(De(),v(mt));uV.exports=Jle;async function Jle(e){let t=new zle;try{let r=Qle()[e.schema]?.[e.name],n=r.primaryStore.getStats(),s=r.auditStore?.getStats();t.schema=e.schema,t.table=e.name,t.record_count=n.entryCount,t.transaction_log_record_count=s.entryCount}catch(r){jle.warn(`unable to stat table dbi due to ${r}`)}return t}o(Jle,"lmdbGetTableSize")});var mV=M((sUe,fV)=>{"use strict";var lO=class{static{o(this,"SystemInformationObject")}constructor(t,r,n,s,i,a,c){this.system=t,this.time=r,this.cpu=n,this.memory=s,this.disk=i,this.network=a,this.harperdb_processes=c}};fV.exports=lO});var Gd=M((uUe,_V)=>{"use strict";var Xle=require("fs-extra"),Zle=require("path"),mn=require("systeminformation"),nc=Q(),hV=mr(),oUe=Nt(),kd=(k(),v(W)),eue=dV(),tue=Fa(),{getThreadInfo:pV}=nt(),Zh=oe();Zh.initSync();var rue=mV(),{openEnvironment:aUe}=gt(),{getSchemaPath:cUe}=At(),{database:lUe,databases:uO}=(De(),v(mt)),WS;_V.exports={getHDBProcessInfo:hO,getNetworkInfo:EO,getDiskInfo:pO,getMemoryInfo:mO,getCPUInfo:fO,getTimeInfo:dO,getSystemInformation:_O,systemInformation:nue,getTableSize:gO,getMetrics:SO};function dO(){return mn.time()}o(dO,"getTimeInfo");async function fO(){try{let{family:e,model:t,stepping:r,revision:n,voltage:s,speedmin:i,speedmax:a,governor:c,socket:l,cache:u,...d}=await mn.cpu();d.cpu_speed=await mn.cpuCurrentSpeed();let{rawCurrentload:f,rawCurrentloadIdle:m,rawCurrentloadIrq:h,rawCurrentloadNice:p,rawCurrentloadSystem:_,rawCurrentloadUser:g,cpus:y,...T}=await mn.currentLoad();return T.cpus=[],y.forEach(R=>{let{rawLoad:N,rawLoadIdle:O,rawLoadIrq:F,rawLoadNice:Z,rawLoadSystem:G,rawLoadUser:Y,...q}=R;T.cpus.push(q)}),d.current_load=T,d}catch(e){return nc.error(`error in getCPUInfo: ${e}`),{}}}o(fO,"getCPUInfo");async function mO(){try{let{buffers:e,cached:t,slab:r,buffcache:n,...s}=await mn.mem();return Object.assign(s,process.memoryUsage())}catch(e){return nc.error(`error in getMemoryInfo: ${e}`),{}}}o(mO,"getMemoryInfo");async function hO(){let e={core:[],clustering:[]};try{let t=await mn.processes(),r;try{r=Number.parseInt(await Xle.readFile(Zle.join(Zh.get(kd.CONFIG_PARAMS.ROOTPATH),kd.HDB_PID_FILE),"utf8"))}catch(n){if(n.code===kd.NODE_ERROR_CODES.ENOENT)nc.warn("Unable to locate 'hdb.pid' file, try stopping and starting HarperDB. This could be because HarperDB is not running.");else throw n}t.list.forEach(n=>{n.pid===r?e.core.push(n):n.name==="nats-server"&&e.clustering.push(n)});for(let n of e.core)for(let s of t.list)s.pid===n.parentPid&&(s.name==="PM2"||s.command==="PM2")&&(n.parent="PM2");return e}catch(t){return nc.error(`error in getHDBProcessInfo: ${t}`),e}}o(hO,"getHDBProcessInfo");async function pO(){let e={};try{if(!Zh.get(kd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK))return e;let{rIO_sec:t,wIO_sec:r,tIO_sec:n,ms:s,...i}=await mn.disksIO();e.io=i;let{rxSec:a,txSec:c,wxSec:l,...u}=await mn.fsStats();return e.read_write=u,e.size=await mn.fsSize(),e}catch(t){return nc.error(`error in getDiskInfo: ${t}`),e}}o(pO,"getDiskInfo");async function EO(){let e={default_interface:null,latency:{},interfaces:[],stats:[],connections:[]};try{return Zh.get(kd.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)&&(e.default_interface=await mn.networkInterfaceDefault(),e.latency=await mn.inetChecksite("google.com"),(await mn.networkInterfaces()).forEach(n=>{let{internal:s,virtual:i,mtu:a,dhcp:c,dnsSuffix:l,ieee8021xAuth:u,ieee8021xState:d,carrierChanges:f,...m}=n;e.interfaces.push(m)}),(await mn.networkStats()).forEach(n=>{let{rxSec:s,txSec:i,ms:a,...c}=n;e.stats.push(c)})),e}catch(t){return nc.error(`error in getNetworkInfo: ${t}`),e}}o(EO,"getNetworkInfo");async function _O(){if(WS!==void 0)return WS;let e={};try{let{codepage:t,logofile:r,serial:n,build:s,servicepack:i,uefi:a,...c}=await mn.osInfo();e=c;let l=await mn.versions("node, npm");return e.node_version=l.node,e.npm_version=l.npm,WS=e,WS}catch(t){return nc.error(`error in getSystemInformation: ${t}`),e}}o(_O,"getSystemInformation");async function gO(){let e=[],t=await tue.describeAll();for(let r of Object.values(t))for(let n of Object.values(r))e.push(await eue(n));return e}o(gO,"getTableSize");async function SO(){let e={};for(let t in uO){let r=e[t]={},n=r.tables={};for(let s in uO[t])try{let i=uO[t][s];if(!r.readers&&(Object.assign(r,i.primaryStore.rootStore.getStats()),delete r.root,r.readers=i.primaryStore.rootStore.readerList().split(/\n\s+/).slice(1).map(l=>{let[u,d,f]=l.trim().split(" ");return{pid:u,thread:d,txnid:f}}),i.auditStore)){let{treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}=i.auditStore.getStats();r.audit={treeDepth:l,treeBranchPageCount:u,treeLeafPageCount:d,entryCount:f,overflowPages:m}}let a=i.primaryStore.getStats(),c={};for(let l of["treeDepth","treeBranchPageCount","treeLeafPageCount","entryCount","overflowPages"])c[l]=a[l];n[s]=c}catch(i){nc.notify(`Error getting stats for table ${s}: ${i}`)}}return e}o(SO,"getMetrics");async function EV(){if(Zh.get(kd.CONFIG_PARAMS.CLUSTERING_ENABLED)){let{jsm:e}=await hV.getNATSReferences(),t=await hV.listStreams(),r=[];for(let n of t){let s=[],i=await e.consumers.list(n.config.name);for await(let c of i)s.push({name:c.name,created:c.created,num_ack_pending:c.num_ack_pending,num_redelivered:c.num_redelivered,num_waiting:c.num_waiting,num_pending:c.num_pending});let a={stream_name:n.config.name,database:n.config.subjects[0].split(".")[1],table:n.config.subjects[0].split(".")[2],state:n.state,consumers:s};r.push(a)}return r}}o(EV,"getNatsStreamInfo");async function nue(e){let t=new rue;if(!Array.isArray(e.attributes)||e.attributes.length===0)return t.system=await _O(),t.time=dO(),t.cpu=await fO(),t.memory=await mO(),t.disk=await pO(),t.network=await EO(),t.harperdb_processes=await hO(),t.table_size=await gO(),t.metrics=await SO(),t.threads=await pV(),t.replication=await EV(),t;for(let r=0;r<e.attributes.length;r++)switch(e.attributes[r]){case"system":t.system=await _O();break;case"time":t.time=dO();break;case"cpu":t.cpu=await fO();break;case"memory":t.memory=await mO();break;case"disk":t.disk=await pO();break;case"network":t.network=await EO();break;case"harperdb_processes":t.harperdb_processes=await hO();break;case"table_size":t.table_size=await gO();break;case"database_metrics":case"metrics":t.metrics=await SO();break;case"threads":t.threads=await pV();break;case"replication":t.replication=await EV();break;default:break}return t}o(nue,"systemInformation")});var ro=M((pUe,RV)=>{"use strict";var sue=vn(),TO=ie(),iue=require("util"),Hl=(k(),v(W)),gV=oe();gV.initSync();var oue=ZN(),SV=cn(),{Node:fUe,NodeSubscription:mUe}=Hd(),aue=Wu(),cue=oV(),{RemotePayloadObject:lue,RemotePayloadSubscription:uue}=Xh(),{handleHDBError:due,hdbErrors:fue}=ge(),{HTTP_STATUS_CODES:mue,HDB_ERROR_MSGS:hue}=fue,pue=ci(),Eue=Gd(),{packageJson:_ue}=Rt(),{getDatabases:gue}=(De(),v(mt)),hUe=iue.promisify(oue.authorize),Sue=SV.searchByHash,Tue=SV.searchByValue;RV.exports={isEmpty:Rue,getNodeRecord:yue,upsertNodeRecord:Aue,buildNodePayloads:bue,checkClusteringEnabled:Iue,getAllNodeRecords:Nue,getSystemInfo:wue,reverseSubscription:TV};function Rue(e){return e==null}o(Rue,"isEmpty");async function yue(e){let t=new aue(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e],["*"]);return Sue(t)}o(yue,"getNodeRecord");async function Aue(e){let t=new cue(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[e]);return sue.upsert(t)}o(Aue,"upsertNodeRecord");function TV(e){if(TO.isEmpty(e.subscribe)||TO.isEmpty(e.publish))throw new Error("Received invalid subscription object");let{schema:t,table:r,hash_attribute:n}=e,s={schema:t,table:r,hash_attribute:n};return e.subscribe===!0&&e.publish===!1?(s.subscribe=!1,s.publish=!0):e.subscribe===!1&&e.publish===!0?(s.subscribe=!0,s.publish=!1):(s.subscribe=e.subscribe,s.publish=e.publish),s}o(TV,"reverseSubscription");function bue(e,t,r,n){let s=[];for(let i=0,a=e.length;i<a;i++){let c=e[i],{schema:l,table:u}=c,d=TO.getTableHashAttribute(l,u),{subscribe:f,publish:m}=TV(c),h=gue()[l]?.[u],p=new uue(l,u,d,m,f,c.start_time,h.schemaDefined?h.attributes:void 0);s.push(p)}return new lue(r,t,s,n)}o(bue,"buildNodePayloads");function Iue(){if(!gV.get(Hl.CONFIG_PARAMS.CLUSTERING_ENABLED))throw due(new Error,hue.CLUSTERING_NOT_ENABLED,mue.BAD_REQUEST,void 0,void 0,!0)}o(Iue,"checkClusteringEnabled");async function Nue(){let e=new pue(Hl.SYSTEM_SCHEMA_NAME,Hl.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,"name","*",void 0,["*"]);return Array.from(await Tue(e))}o(Nue,"getAllNodeRecords");async function wue(){let e=await Eue.getSystemInformation();return{hdb_version:_ue.version,node_version:e.node_version,platform:e.platform}}o(wue,"getSystemInfo")});var RO=M((_Ue,CV)=>{"use strict";var zS=mr(),yV=ie(),AV=Nt(),bV=(k(),v(W)),jS=Q(),IV=Qh(),Oue=Ym(),{RemotePayloadObject:Cue}=Xh(),{handleHDBError:NV,hdbErrors:Pue}=ge(),{HTTP_STATUS_CODES:wV}=Pue,{NodeSubscription:OV}=Hd();CV.exports=Lue;async function Lue(e,t){let r;try{r=await zS.request(`${t}.${AV.REQUEST_SUFFIX}`,new Cue(bV.OPERATIONS_ENUM.DESCRIBE_ALL,t,void 0,void 0)),jS.trace("Response from remote describe all request:",r)}catch(a){jS.error(`addNode received error from describe all request to remote node: ${a}`);let c=zS.requestErrorHandler(a,"add_node",t);throw NV(new Error,c,wV.INTERNAL_SERVER_ERROR,"error",c)}if(r.status===AV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let a=`Error returned from remote node ${t}: ${r.message}`;throw NV(new Error,a,wV.INTERNAL_SERVER_ERROR,"error",a)}let n=r.message,s=[],i=[];for(let a of e){let{table:c}=a,l=a.database??a.schema??"data";if(l===bV.SYSTEM_SCHEMA_NAME){await zS.createLocalTableStream(l,c);let p=new OV(l,c,a.publish,a.subscribe);p.start_time=a.start_time,i.push(p);continue}let u=yV.doesSchemaExist(l),d=n[l]!==void 0,f=c?yV.doesTableExist(l,c):!0,m=c?n?.[l]?.[c]!==void 0:!0;if(!u&&!d||!f&&!m){s.push(a);continue}if(!u&&d&&(jS.trace(`addNode creating schema: ${l}`),await IV.createSchema({operation:"create_schema",schema:l})),!f&&m){jS.trace(`addNode creating table: ${c} in schema: ${l} with attributes ${JSON.stringify(n[l][c].attributes)}`);let p=new Oue(l,c,n[l][c].hash_attribute);n[l][c].attributes&&(p.attributes=n[l][c].attributes),await IV.createTable(p)}await zS.createLocalTableStream(l,c);let h=new OV(l,c,a.publish,a.subscribe);h.start_time=a.start_time,i.push(h)}return{added:i,skipped:s}}o(Lue,"reviewSubscriptions")});var qd={};Oe(qd,{addNodeBack:()=>yO,removeNodeBack:()=>AO,setNode:()=>Uue});async function Uue(e){e.node_name&&!e.hostname&&(e.hostname=e.node_name),e.verify_tls!==void 0&&(e.rejectUnauthorized=e.verify_tls);let{url:t,hostname:r}=e;t?r||(r=e.hostname=pi(t)):t=FS(r);let n=(0,LV.validateBySchema)(e,vue);if(n)throw(0,Ko.handleHDBError)(n,n.message,Mue.BAD_REQUEST,void 0,void 0,!0);if(e.operation==="remove_node"){if(!t&&!r)throw new Ko.ClientError("url or hostname is required for remove_node operation");let h=r,p=Kt(),_=await p.get(h);if(!_)throw new Ko.ClientError(h+" does not exist");try{await zh({url:_.url},{operation:$.REMOVE_NODE_BACK,name:_?.subscriptions?.length>0?Ze():h},void 0)}catch(g){as.warn(`Error removing node from target node ${h}, if it is offline and we be online in the future, you may need to clean up this node manually, or retry:`,g)}return await p.delete(h),`Successfully removed '${h}' from cluster`}if(!t)throw new Ko.ClientError("url required for this operation");let s=za();if(s==null)throw new Ko.ClientError("replication url is missing from harperdb-config.yaml");let i,a,c;if(t?.startsWith("wss:")){i=await(0,vs.getReplicationCert)();let h=await(0,vs.getReplicationCertAuth)();if(!i)throw new Error("Unable to find a certificate to use for replication");i.options.is_self_signed?(a=await(0,vs.createCsr)(),as.info("Sending CSR to target node:",t)):h&&(c=h.certificate,as.info("Sending CA named",h.name,"to target node",t))}let l={operation:$.ADD_NODE_BACK,hostname:(0,ic.get)(U.REPLICATION_HOSTNAME),target_hostname:r,url:s,csr:a,cert_auth:c,authorization:e.retain_authorization?e.authorization:null};if((0,ic.get)(U.REPLICATION_SHARD)!==void 0&&(l.shard=(0,ic.get)(U.REPLICATION_SHARD)),e.subscriptions?l.subscriptions=e.subscriptions.map(PV):l.subscriptions=null,e.hasOwnProperty("subscribe")||e.hasOwnProperty("publish")){let h=PV(e);l.subscribe=h.subscribe,l.publish=h.publish}e?.authorization?.username&&e?.authorization?.password&&(e.authorization="Basic "+Buffer.from(e.authorization.username+":"+e.authorization.password).toString("base64"));let u,d;try{u=await zh({url:t},l,e)}catch(h){h.message=`Error returned from ${t}: `+h.message,as.warn("Error adding node:",t,"to cluster:",h),d=h}if(a&&(!u?.certificate||!u?.certificate?.includes?.("BEGIN CERTIFICATE")))throw d?(d.message+=" and connection was required to sign certificate",d):new Error(`Unexpected certificate signature response from node ${t} response: ${JSON.stringify(u)}`);a&&(as.info("CSR response received from node:",t,"saving certificate and CA in hdb_certificate"),await(0,vs.setCertTable)({name:Due.certificateFromPem(u.signingCA).issuer.getField("CN").value,certificate:u.signingCA,is_authority:!0}),u.certificate&&await(0,vs.setCertTable)({name:Ze(),uses:["https","operations","wss"],certificate:u.certificate,private_key_name:i?.options?.key_file,is_authority:!1,is_self_signed:!1}),c=u.signingCA);let f={url:t,ca:u?.usingCA};if(e.hostname&&(f.name=e.hostname),e.subscriptions?f.subscriptions=e.subscriptions:f.replicates=!0,e.start_time&&(f.start_time=typeof e.start_time=="string"?new Date(e.start_time).getTime():e.start_time),e.retain_authorization&&(f.authorization=e.authorization),e.revoked_certificates&&(f.revoked_certificates=e.revoked_certificates),u?.shard!==void 0?f.shard=u.shard:e.shard!==void 0&&(f.shard=e.shard),f.replicates){let h={url:s,ca:c,replicates:!0,subscriptions:null};(0,ic.get)(U.REPLICATION_SHARD)!==void 0&&(h.shard=(0,ic.get)(U.REPLICATION_SHARD)),e.retain_authorization&&(h.authorization=e.authorization),e.start_time&&(h.start_time=e.start_time),await Ho(Ze(),h)}await Ho(u?u.nodeName:f.name??pi(t),f);let m;return e.operation==="update_node"?m=`Successfully updated '${t}'`:m=`Successfully added '${t}' to cluster`,d&&(m+=" but there was an error updating target node: "+d.message),m}async function yO(e){as.trace("addNodeBack received request:",e);let t=await(0,vs.signCertificate)(e),r;e.csr?(r=t.signingCA,as.info("addNodeBack received CSR from node:",e.url,"this node will use and respond with CA that was used to issue CSR")):(r=e?.cert_auth,as.info("addNodeBack received CA from node:",e.url));let n={url:e.url,ca:r};e.subscriptions?n.subscriptions=e.subscriptions:(n.replicates=!0,n.subscriptions=null),e.start_time&&(n.start_time=e.start_time),e.authorization&&(n.authorization=e.authorization),e.shard!==void 0&&(n.shard=e.shard);let s=await(0,vs.getReplicationCertAuth)();if(n.replicates){let i={url:za(),ca:s?.certificate,replicates:!0,subscriptions:null};(0,ic.get)(U.REPLICATION_SHARD)!==void 0&&(i.shard=(0,ic.get)(U.REPLICATION_SHARD),t.shard=i.shard),e.start_time&&(i.start_time=e.start_time),e.authorization&&(i.authorization=e.authorization),await Ho(Ze(),i)}return await Ho(e.hostname,n),t.nodeName=Ze(),t.usingCA=s?.certificate,as.info("addNodeBack responding to:",e.url,"with CA named:",s?.name),t}async function AO(e){as.trace("removeNodeBack received request:",e),await Kt().delete(e.name)}function PV(e){let{subscribe:t,publish:r}=e;return{...e,subscribe:r,publish:t}}var vs,LV,sc,ic,as,Ko,Due,Mue,vue,$d=ue(()=>{vs=w(os()),LV=w(lt()),sc=w(require("joi")),ic=w(oe());k();$h();Dl();ss();as=w(Q()),Ko=w(ge()),{pki:Due}=require("node-forge"),{HTTP_STATUS_CODES:Mue}=Ko.hdbErrors,vue=sc.default.object({hostname:sc.default.string(),verify_tls:sc.default.boolean(),replicates:sc.default.boolean(),subscriptions:sc.default.array(),revoked_certificates:sc.default.array(),shard:sc.default.number()});o(Uue,"setNode");o(yO,"addNodeBack");o(AO,"removeNodeBack");o(PV,"reverseSubscription")});var rp=M((NUe,MV)=>{"use strict";var{handleHDBError:QS,hdbErrors:xue}=ge(),{HTTP_STATUS_CODES:JS}=xue,{addUpdateNodeValidator:Bue}=YS(),XS=Q(),ZS=(k(),v(W)),DV=Nt(),Fue=ie(),ep=mr(),tp=ro(),bO=oe(),Hue=RO(),{Node:kue,NodeSubscription:Gue}=Hd(),{broadcast:que}=nt(),{setNode:$ue}=($d(),v(qd)),bUe=oe(),IUe=(k(),v(W)),Vue="Unable to create subscriptions due to schema and/or tables not existing on the local or remote node",Kue="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",Yue=bO.get(ZS.CONFIG_PARAMS.CLUSTERING_NODENAME);MV.exports=Wue;async function Wue(e,t=!1){if(XS.trace("addNode called with:",e),bO.get(ZS.CONFIG_PARAMS.REPLICATION_URL)||bO.get(ZS.CONFIG_PARAMS.REPLICATION_HOSTNAME))return $ue(e);tp.checkClusteringEnabled();let r=Bue(e);if(r)throw QS(r,r.message,JS.BAD_REQUEST,void 0,void 0,!0);let n=e.node_name;if(!t){let f=await tp.getNodeRecord(n);if(!Fue.isEmptyOrZeroLength(f))throw QS(new Error,`Node '${n}' has already been added, perform update_node to proceed.`,JS.BAD_REQUEST,void 0,void 0,!0)}let{added:s,skipped:i}=await Hue(e.subscriptions,n),a={message:void 0,added:s,skipped:i};if(s.length===0)return a.message=Vue,a;let c=tp.buildNodePayloads(s,Yue,ZS.OPERATIONS_ENUM.ADD_NODE,await tp.getSystemInfo()),l=[];for(let f=0,m=s.length;f<m;f++){let h=s[f];s[f].start_time===void 0&&delete s[f].start_time,l.push(new Gue(h.schema,h.table,h.publish,h.subscribe))}XS.trace("addNode sending remote payload:",c);let u;try{u=await ep.request(`${n}.${DV.REQUEST_SUFFIX}`,c)}catch(f){XS.error(`addNode received error from request: ${f}`);for(let h=0,p=s.length;h<p;h++){let _=s[h];_.publish=!1,_.subscribe=!1,await ep.updateRemoteConsumer(_,n)}let m=ep.requestErrorHandler(f,"add_node",n);throw QS(new Error,m,JS.INTERNAL_SERVER_ERROR,"error",m)}if(u.status===DV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let f=`Error returned from remote node ${n}: ${u.message}`;throw QS(new Error,f,JS.INTERNAL_SERVER_ERROR,"error",f)}XS.trace(u);for(let f=0,m=s.length;f<m;f++){let h=s[f];await ep.updateRemoteConsumer(h,n),h.subscribe===!0&&await ep.updateConsumerIterator(h.schema,h.table,n,"start")}let d=new kue(n,l,u.system_info);return await tp.upsertNodeRecord(d),que({type:"nats_update"}),i.length>0?a.message=Kue:a.message=`Successfully added '${n}' to manifest`,a}o(Wue,"addNode")});var OO=M((CUe,UV)=>{"use strict";var{handleHDBError:IO,hdbErrors:zue}=ge(),{HTTP_STATUS_CODES:NO}=zue,{addUpdateNodeValidator:jue}=YS(),np=Q(),eT=(k(),v(W)),vV=Nt(),OUe=ie(),sp=mr(),ip=ro(),wO=oe(),{cloneDeep:Que}=require("lodash"),Jue=RO(),{Node:Xue,NodeSubscription:Zue}=Hd(),{broadcast:ede}=nt(),{setNode:tde}=($d(),v(qd)),rde="Unable to update subscriptions due to schema and/or tables not existing on the local or remote node",nde="Some subscriptions were unsuccessful due to schema and/or tables not existing on the local or remote node",sde=wO.get(eT.CONFIG_PARAMS.CLUSTERING_NODENAME);UV.exports=ide;async function ide(e){if(np.trace("updateNode called with:",e),wO.get(eT.CONFIG_PARAMS.REPLICATION_URL)??wO.get(eT.CONFIG_PARAMS.REPLICATION_HOSTNAME))return tde(e);ip.checkClusteringEnabled();let t=jue(e);if(t)throw IO(t,t.message,NO.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n,s=await ip.getNodeRecord(r);s.length>0&&(n=Que(s));let{added:i,skipped:a}=await Jue(e.subscriptions,r),c={message:void 0,updated:i,skipped:a};if(i.length===0)return c.message=rde,c;let l=ip.buildNodePayloads(i,sde,eT.OPERATIONS_ENUM.UPDATE_NODE,await ip.getSystemInfo());for(let d=0,f=i.length;d<f;d++){let m=i[d];np.trace(`updateNode updating work stream for node: ${r} subscription:`,m),i[d].start_time===void 0&&delete i[d].start_time}np.trace("updateNode sending remote payload:",l);let u;try{u=await sp.request(`${r}.${vV.REQUEST_SUFFIX}`,l)}catch(d){np.error(`updateNode received error from request: ${d}`);let f=sp.requestErrorHandler(d,"update_node",r);throw IO(new Error,f,NO.INTERNAL_SERVER_ERROR,"error",f)}if(u.status===vV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR){let d=`Error returned from remote node ${r}: ${u.message}`;throw IO(new Error,d,NO.INTERNAL_SERVER_ERROR,"error",d)}np.trace(u);for(let d=0,f=i.length;d<f;d++){let m=i[d];await sp.updateRemoteConsumer(m,r),m.subscribe===!0?await sp.updateConsumerIterator(m.schema,m.table,r,"start"):await sp.updateConsumerIterator(m.schema,m.table,r,"stop")}return n||(n=[new Xue(r,[],u.system_info)]),await ode(n[0],i,u.system_info),a.length>0?c.message=nde:c.message=`Successfully updated '${r}'`,c}o(ide,"updateNode");async function ode(e,t,r){let n=e;for(let s=0,i=t.length;s<i;s++){let a=t[s],c=!1;for(let l=0,u=e.subscriptions.length;l<u;l++){let d=n.subscriptions[l];if(d.schema===a.schema&&d.table===a.table){d.publish=a.publish,d.subscribe=a.subscribe,c=!0;break}}c||n.subscriptions.push(new Zue(a.schema,a.table,a.publish,a.subscribe))}n.system_info=r,await ip.upsertNodeRecord(n),ede({type:"nats_update"})}o(ode,"updateNodeTable")});var kV=M((LUe,HV)=>{"use strict";var FV=require("joi"),{string:xV}=FV.types(),ade=lt(),BV=(k(),v(W)),cde=oe(),lde=Nt();HV.exports=ude;function ude(e){let t=xV.invalid(cde.get(BV.CONFIG_PARAMS.CLUSTERING_NODENAME)).pattern(lde.NATS_TERM_CONSTRAINTS_RX).messages({"string.pattern.base":"{:#label} invalid, must not contain ., * or >","any.invalid":"'node_name' cannot be this nodes name"}).empty(null),r=FV.object({operation:xV.valid(BV.OPERATIONS_ENUM.REMOVE_NODE).required(),node_name:t});return ade.validateBySchema(e,r)}o(ude,"removeNodeValidator")});var ap=M((MUe,KV)=>{"use strict";var{handleHDBError:GV,hdbErrors:dde}=ge(),{HTTP_STATUS_CODES:qV}=dde,fde=kV(),op=Q(),$V=ro(),mde=ie(),Vd=(k(),v(W)),VV=Nt(),CO=mr(),PO=oe(),{RemotePayloadObject:hde}=Xh(),{NodeSubscription:pde}=Hd(),Ede=Km(),_de=Sl(),{broadcast:gde}=nt(),{setNode:Sde}=($d(),v(qd)),Tde=PO.get(Vd.CONFIG_PARAMS.CLUSTERING_NODENAME);KV.exports=Rde;async function Rde(e){if(op.trace("removeNode called with:",e),PO.get(Vd.CONFIG_PARAMS.REPLICATION_URL)??PO.get(Vd.CONFIG_PARAMS.REPLICATION_HOSTNAME))return Sde(e);$V.checkClusteringEnabled();let t=fde(e);if(t)throw GV(t,t.message,qV.BAD_REQUEST,void 0,void 0,!0);let r=e.node_name,n=await $V.getNodeRecord(r);if(mde.isEmptyOrZeroLength(n))throw GV(new Error,`Node '${r}' was not found.`,qV.BAD_REQUEST,void 0,void 0,!0);n=n[0];let s=new hde(Vd.OPERATIONS_ENUM.REMOVE_NODE,Tde,[]),i,a=!1;for(let l=0,u=n.subscriptions.length;l<u;l++){let d=n.subscriptions[l];d.subscribe===!0&&await CO.updateConsumerIterator(d.schema,d.table,r,"stop");try{await CO.updateRemoteConsumer(new pde(d.schema,d.table,!1,!1),r)}catch(f){op.error(f)}}try{i=await CO.request(`${r}.${VV.REQUEST_SUFFIX}`,s),op.trace("Remove node reply from remote node:",r,i)}catch(l){op.error("removeNode received error from request:",l),a=!0}let c=new Ede(Vd.SYSTEM_SCHEMA_NAME,Vd.SYSTEM_TABLE_NAMES.NODE_TABLE_NAME,[r]);return await _de.deleteRecord(c),gde({type:"nats_update"}),i?.status===VV.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR||a?(op.error("Error returned from remote node:",r,i?.message),`Successfully removed '${r}' from local manifest, however there was an error reaching remote node. Check the logs for more details.`):`Successfully removed '${r}' from manifest`}o(Rde,"removeNode")});var zV=M((UUe,WV)=>{"use strict";var YV=require("joi"),{string:yde,array:Ade}=YV.types(),bde=lt(),Ide=YS();WV.exports=Nde;function Nde(e){let t=YV.object({operation:yde.valid("configure_cluster").required(),connections:Ade.items(Ide.validationSchema).required()});return bde.validateBySchema(e,t)}o(Nde,"configureClusterValidator")});var LO=M((BUe,ZV)=>{"use strict";var jV=(k(),v(W)),tT=Q(),wde=ie(),Ode=oe(),Cde=ap(),Pde=rp(),Lde=ro(),Dde=zV(),{handleHDBError:QV,hdbErrors:Mde}=ge(),{HTTP_STATUS_CODES:JV}=Mde,vde="Configure cluster complete.",Ude="Failed to configure the cluster. Check the logs for more details.",xde="Configure cluster was partially successful. Errors occurred when attempting to configure the following nodes. Check the logs for more details.";ZV.exports=Bde;async function Bde(e){tT.trace("configure cluster called with:",e);let t=Dde(e);if(t)throw QV(t,t.message,JV.BAD_REQUEST,void 0,void 0,!0);let r=await Lde.getAllNodeRecords(),n=[];if(Ode.get(jV.CONFIG_PARAMS.CLUSTERING_ENABLED)){for(let d=0,f=r.length;d<f;d++){let m=await XV(Cde,{operation:jV.OPERATIONS_ENUM.REMOVE_NODE,node_name:r[d].name},r[d].name);n.push(m)}tT.trace("All results from configure_cluster remove node:",n)}let s=[],i=e.connections.length;for(let d=0;d<i;d++){let f=e.connections[d],m=await XV(Pde,f,f.node_name);s.push(m)}tT.trace("All results from configure_cluster add node:",s);let a=[],c=[],l=!1,u=n.concat(s);for(let d=0,f=u.length;d<f;d++){let m=u[d];m.status==="rejected"&&(tT.error(m.node_name,m?.error?.message,m?.error?.stack),a.includes(m.node_name)||a.push(m.node_name)),(m?.result?.message?.includes?.("Successfully")||m?.result?.includes?.("Successfully"))&&(l=!0),!(typeof m.result=="string"&&m.result.includes("Successfully removed")||m.status==="rejected")&&c.push({node_name:m?.node_name,response:m?.result})}if(wde.isEmptyOrZeroLength(a))return{message:vde,connections:c};if(l)return{message:xde,failed_nodes:a,connections:c};throw QV(new Error,Ude,JV.INTERNAL_SERVER_ERROR,void 0,void 0,!0)}o(Bde,"configureCluster");async function XV(e,t,r){try{return{node_name:r,result:await e(t)}}catch(n){return{node_name:r,error:n,status:"rejected"}}}o(XV,"functionWrapper")});var n1=M((HUe,r1)=>{"use strict";var cp=require("joi"),Fde=lt(),{validateSchemaExists:e1,validateTableExists:Hde,validateSchemaName:t1}=Gi(),kde=cp.object({operation:cp.string().valid("purge_stream"),schema:cp.string().custom(e1).custom(t1).optional(),database:cp.string().custom(e1).custom(t1).optional(),table:cp.string().custom(Hde).required()});function Gde(e){return Fde.validateBySchema(e,kde)}o(Gde,"purgeStreamValidator");r1.exports=Gde});var DO=M((GUe,s1)=>{"use strict";var{handleHDBError:qde,hdbErrors:$de}=ge(),{HTTP_STATUS_CODES:Vde}=$de,Kde=n1(),Yde=mr(),Wde=ro();s1.exports=zde;async function zde(e){e.schema=e.schema??e.database;let t=Kde(e);if(t)throw qde(t,t.message,Vde.BAD_REQUEST,void 0,void 0,!0);Wde.checkClusteringEnabled();let{schema:r,table:n,options:s}=e;return await Yde.purgeTableStream(r,n,s),`Successfully purged table '${r}.${n}'`}o(zde,"purgeStream")});var sT=M(($Ue,d1)=>{"use strict";var vO=ro(),jde=mr(),nT=oe(),Kd=(k(),v(W)),kl=Nt(),Qde=ie(),MO=Q(),{RemotePayloadObject:Jde}=Xh(),{ErrorCode:i1}=require("nats"),{parentPort:o1}=require("worker_threads"),{onMessageByType:Xde}=nt(),{getThisNodeName:Zde}=(ss(),v(Go)),{requestClusterStatus:efe}=($h(),v(Cq)),{getReplicationSharedStatus:tfe,getHDBNodeTable:rfe}=(Dl(),v(Rw)),{CONFIRMATION_STATUS_POSITION:nfe,RECEIVED_VERSION_POSITION:sfe,RECEIVED_TIME_POSITION:ife,SENDING_TIME_POSITION:ofe,RECEIVING_STATUS_POSITION:afe,RECEIVING_STATUS_RECEIVING:cfe}=(Yw(),v(S$)),a1=nT.get(Kd.CONFIG_PARAMS.CLUSTERING_ENABLED),c1=nT.get(Kd.CONFIG_PARAMS.CLUSTERING_NODENAME);d1.exports={clusterStatus:lfe,buildNodeStatus:u1};var l1;Xde("cluster-status",async e=>{l1(e)});async function lfe(){if(nT.get(Kd.CONFIG_PARAMS.REPLICATION_URL)||nT.get(Kd.CONFIG_PARAMS.REPLICATION_HOSTNAME)){let n;if(o1){o1.postMessage({type:"request-cluster-status"}),n=await new Promise(i=>{l1=i});for(let i of n.connections){let a=i.name;for(let c of i.database_sockets){let l=c.database,u;for(let f of Object.values(databases[l]||{}))if(u=f.auditStore,u)break;if(!u)continue;let d=tfe(u,l,a);c.lastCommitConfirmed=rT(d[nfe]),c.lastReceivedRemoteTime=rT(d[sfe]),c.lastReceivedLocalTime=rT(d[ife]),c.sendingMessage=rT(d[ofe]),c.lastReceivedStatus=d[afe]===cfe?"Receiving":"Waiting"}}}else n=efe();n.node_name=Zde();let s=rfe().primaryStore.get(n.node_name);return s?.shard&&(n.shard=s.shard),s?.url&&(n.url=s.url),n.is_enabled=!0,n}let e={node_name:c1,is_enabled:a1,connections:[]};if(!a1)return e;let t=await vO.getAllNodeRecords();if(Qde.isEmptyOrZeroLength(t))return e;let r=[];for(let n=0,s=t.length;n<s;n++)r.push(u1(t[n],e.connections));return await Promise.allSettled(r),e}o(lfe,"clusterStatus");function rT(e){return e?e===1?"Copying":new Date(e).toUTCString():void 0}o(rT,"asDate");async function u1(e,t){let r=e.name,n=new Jde(Kd.OPERATIONS_ENUM.CLUSTER_STATUS,c1,void 0,await vO.getSystemInfo()),s,i,a=kl.CLUSTER_STATUS_STATUSES.OPEN;try{let l=Date.now();s=await jde.request(kl.REQUEST_SUBJECT(r),n),i=Date.now()-l,s.status===kl.UPDATE_REMOTE_RESPONSE_STATUSES.ERROR&&(a=kl.CLUSTER_STATUS_STATUSES.CLOSED,MO.error(`Error getting node status from ${r} `,s))}catch(l){MO.warn(`Error getting node status from ${r}`,l),l.code===i1.NoResponders?a=kl.CLUSTER_STATUS_STATUSES.NO_RESPONDERS:l.code===i1.Timeout?a=kl.CLUSTER_STATUS_STATUSES.TIMEOUT:a=kl.CLUSTER_STATUS_STATUSES.CLOSED}let c=new ufe(r,a,s?.message?.ports?.clustering,s?.message?.ports?.operations_api,i,s?.message?.uptime,e.subscriptions,s?.message?.system_info);try{let l={name:r,system_info:s?.message?.system_info};e.system_info?.hdb_version!==Kd.PRE_4_0_0_VERSION&&await vO.upsertNodeRecord(l)}catch(l){MO.error("Cluster status encountered an error updating system info for node:",r,l)}t.push(c)}o(u1,"buildNodeStatus");function ufe(e,t,r,n,s,i,a,c){this.node_name=e,this.status=t,this.ports={clustering:r,operations_api:n},this.latency_ms=s,this.uptime=i,this.subscriptions=a,this.system_info=c}o(ufe,"NodeStatusObject")});var xO=M((KUe,f1)=>{"use strict";var{handleHDBError:dfe,hdbErrors:ffe}=ge(),{HTTP_STATUS_CODES:mfe}=ffe,hfe=mr(),pfe=ro(),UO=ie(),iT=require("joi"),Efe=lt(),_fe=2e3,gfe=iT.object({timeout:iT.number().min(1),connected_nodes:iT.boolean(),routes:iT.boolean()});f1.exports=Sfe;async function Sfe(e){pfe.checkClusteringEnabled();let t=Efe.validateBySchema(e,gfe);if(t)throw dfe(t,t.message,mfe.BAD_REQUEST,void 0,void 0,!0);let{timeout:r,connected_nodes:n,routes:s}=e,i=n===void 0||UO.autoCastBoolean(n),a=s===void 0||UO.autoCastBoolean(s),c={nodes:[]},l=await hfe.getServerList(r??_fe),u={};if(i)for(let d=0,f=l.length;d<f;d++){let m=l[d].statsz;m&&(u[l[d].server.name]=m.routes)}for(let d=0,f=l.length;d<f;d++){if(l[d].statsz)continue;let m=l[d].server,h=l[d].data;if(m.name.endsWith("-hub")){let p={name:m.name.slice(0,-4),response_time:l[d].response_time};i&&(p.connected_nodes=[],u[m.name]&&u[m.name].forEach(_=>{p.connected_nodes.includes(_.name.slice(0,-4))||p.connected_nodes.push(_.name.slice(0,-4))})),a&&(p.routes=h.cluster?.urls?h.cluster?.urls.map(_=>({host:_.split(":")[0],port:UO.autoCast(_.split(":")[1])})):[]),c.nodes.push(p)}}return c}o(Sfe,"clusterNetwork")});var E1=M((WUe,p1)=>{"use strict";var BO=require("joi"),m1=lt(),{routeConstraints:h1}=Zy();p1.exports={setRoutesValidator:Tfe,deleteRoutesValidator:Rfe};function Tfe(e){let t=BO.object({server:BO.valid("hub","leaf"),routes:h1.required()});return m1.validateBySchema(e,t)}o(Tfe,"setRoutesValidator");function Rfe(e){let t=BO.object({routes:h1.required()});return m1.validateBySchema(e,t)}o(Rfe,"deleteRoutesValidator")});var oT=M((jUe,A1)=>{"use strict";var Yo=yt(),FO=ie(),Us=(k(),v(W)),Yd=oe(),_1=E1(),{handleHDBError:g1,hdbErrors:yfe}=ge(),{HTTP_STATUS_CODES:S1}=yfe,T1="cluster routes successfully set",R1="cluster routes successfully deleted";A1.exports={setRoutes:bfe,getRoutes:Ife,deleteRoutes:Nfe};function Afe(e){let t=Yo.getClusteringRoutes(),r=e.server==="hub"?t.hub_routes:t.leaf_routes,n=e.server==="hub"?t.leaf_routes:t.hub_routes,s=[],i=[];for(let a=0,c=e.routes.length;a<c;a++){let l=e.routes[a];l.port=FO.autoCast(l.port);let u=r.some(f=>f.host===l.host&&f.port===l.port),d=n.some(f=>f.host===l.host&&f.port===l.port);u||d?s.push(l):(r.push(l),i.push(l))}return e.server==="hub"?Yo.updateConfigValue(Us.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r):Yo.updateConfigValue(Us.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,r),{message:T1,set:i,skipped:s}}o(Afe,"setRoutesNats");function bfe(e){let t=_1.setRoutesValidator(e);if(t)throw g1(t,t.message,S1.BAD_REQUEST,void 0,void 0,!0);if(Yd.get(Us.CONFIG_PARAMS.CLUSTERING_ENABLED))return Afe(e);let r=[],n=[],s=Yd.get(Us.CONFIG_PARAMS.REPLICATION_ROUTES)??[];return e.routes.forEach(i=>{y1(s,i)?n.push(i):(s.push(i),r.push(i))}),Yo.updateConfigValue(Us.CONFIG_PARAMS.REPLICATION_ROUTES,s),{message:T1,set:r,skipped:n}}o(bfe,"setRoutes");function y1(e,t){return typeof t=="string"?e.includes(t):typeof t=="object"&&t!==null?e.some(r=>(r.host===t.host||r.hostname===t.hostname)&&r.port===t.port):!1}o(y1,"existsInArray");function Ife(){if(Yd.get(Us.CONFIG_PARAMS.CLUSTERING_ENABLED)){let e=Yo.getClusteringRoutes();return{hub:e.hub_routes,leaf:e.leaf_routes}}else return Yd.get(Us.CONFIG_PARAMS.REPLICATION_ROUTES)??[]}o(Ife,"getRoutes");function Nfe(e){let t=_1.deleteRoutesValidator(e);if(t)throw g1(t,t.message,S1.BAD_REQUEST,void 0,void 0,!0);if(Yd.get(Us.CONFIG_PARAMS.CLUSTERING_ENABLED))return wfe(e);let r=[],n=[],s=Yd.get(Us.CONFIG_PARAMS.REPLICATION_ROUTES)??[],i=[];return s.forEach(a=>{y1(e.routes,a)?r.push(a):(i.push(a),n.push(a))}),Yo.updateConfigValue(Us.CONFIG_PARAMS.REPLICATION_ROUTES,i),{message:R1,deleted:r,skipped:n}}o(Nfe,"deleteRoutes");function wfe(e){let t=Yo.getClusteringRoutes(),r=t.hub_routes,n=t.leaf_routes,s=[],i=[],a=!1,c=!1;for(let l=0,u=e.routes.length;l<u;l++){let d=e.routes[l],f=!1;for(let m=0,h=r.length;m<h;m++){let p=r[m];if(d.host===p.host&&d.port===p.port){r.splice(m,1),f=!0,a=!0,s.push(d);break}}if(!f){let m=!0;for(let h=0,p=n.length;h<p;h++){let _=n[h];if(d.host===_.host&&d.port===_.port){n.splice(h,1),c=!0,m=!1,s.push(d);break}}m&&i.push(d)}}return a&&(r=FO.isEmptyOrZeroLength(r)?null:r,Yo.updateConfigValue(Us.CONFIG_PARAMS.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES,r)),c&&(n=FO.isEmptyOrZeroLength(n)?null:n,Yo.updateConfigValue(Us.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_ROUTES,n)),{message:R1,deleted:s,skipped:i}}o(wfe,"deleteRoutesNats")});var I1=M((JUe,b1)=>{"use strict";var lp=require("alasql"),Gl=require("recursive-iterator"),gi=Q(),Ofe=ie(),up=(k(),v(W)),HO=class{static{o(this,"sqlStatementBucket")}constructor(t){this.ast=t,this.affected_attributes=new Map,this.table_lookup=new Map,this.schema_lookup=new Map,this.table_to_schema_lookup=new Map,Pfe(this.ast,this.affected_attributes,this.table_lookup,this.schema_lookup,this.table_to_schema_lookup)}getAttributesBySchemaTableName(t,r){if(!t||!r||!this.affected_attributes)return[];if(this.affected_attributes.has(t))return!this.affected_attributes.get(t).has(r)&&(r=this.table_lookup.get(r),!r)?[]:this.affected_attributes.get(t).get(r)}getAllTables(){let t=[];if(!this.affected_attributes)return t;for(let r of this.affected_attributes.keys())t.push(Array.from(this.affected_attributes.get(r).keys()));return t}getTablesBySchemaName(t){return!t||!this.affected_attributes?[]:Array.from(this.affected_attributes.get(t).keys())}getSchemas(){return this.affected_attributes?Array.from(this.affected_attributes.keys()):[]}getAst(){return this.ast}updateAttributeWildcardsForRolePerms(t){let r=this.ast.columns.filter(s=>up.SEARCH_WILDCARDS.includes(s.columnid));if(r.length===0)return this.ast;let n=this.ast.from[0].databaseid;return this.ast.columns=this.ast.columns.filter(s=>!up.SEARCH_WILDCARDS.includes(s.columnid)),r.forEach(s=>{let i=this.table_to_schema_lookup.has(s.tableid)?this.table_to_schema_lookup.get(s.tableid):n,a=this.table_lookup.has(s.tableid)?this.table_lookup.get(s.tableid):this.ast.from[0].tableid;if(t[i]&&t[i].tables[a]&&t[i].tables[a][up.PERMS_CRUD_ENUM.READ]){let c;t[i].tables[a].attribute_permissions.length>0?c=Cfe(t[i].tables[a].attribute_permissions):c=global.hdb_schema[i][a].attributes.map(u=>({attribute_name:u.attribute}));let l=this.affected_attributes.get(i).get(a).filter(u=>!up.SEARCH_WILDCARDS.includes(u));c.forEach(({attribute_name:u})=>{let d=new lp.yy.Column({columnid:u});s.tableid&&(d.tableid=s.tableid),this.ast.columns.push(d),l.includes(u)||l.push(u)}),this.affected_attributes.get(i).set(a,l)}}),this.ast}};function Cfe(e){return e.filter(t=>t[up.PERMS_CRUD_ENUM.READ])}o(Cfe,"filterReadRestrictedAttrs");function Pfe(e,t,r,n,s){Lfe(e,t,r,n,s)}o(Pfe,"interpretAST");function dp(e,t,r,n,s){if(!(!e||!e.databaseid)&&(t.has(e.databaseid)||t.set(e.databaseid,new Map),t.get(e.databaseid).has(e.tableid)||t.get(e.databaseid).set(e.tableid,[]),e.as&&(r.has(e.as)||r.set(e.as,e.tableid),n&&!n.has(e.as)&&n.set(e.as,e.databaseid)),s)){let i=e.databaseid,a=e.tableid;e.as&&(a=e.as),s.set(a,i)}}o(dp,"addSchemaTableToMap");function Lfe(e,t,r,n,s){if(!e){gi.info("getRecordAttributesAST: invalid SQL syntax tree");return}e instanceof lp.yy.Insert?Ufe(e,t,r):e instanceof lp.yy.Select?Dfe(e,t,r,n,s):e instanceof lp.yy.Update?Mfe(e,t,r):e instanceof lp.yy.Delete?vfe(e,t,r):gi.error("AST in getRecordAttributesAST() is not a valid SQL type.")}o(Lfe,"getRecordAttributesAST");function Dfe(e,t,r,n,s){if(!e){gi.info("getSelectAttributes: invalid SQL syntax tree");return}if(!e.from||e.from[0]===void 0)return;let i=e.from[0].databaseid;if(Ofe.isEmptyOrZeroLength(i)){gi.error("No schema specified");return}e.from.forEach(c=>{dp(c,t,r,n,s)}),e.joins&&e.joins.forEach(c=>{c.as&&(c.table.as=c.as),dp(c.table,t,r,n,s)});let a=new Gl(e.columns);for(let{node:c}of a)if(c&&c.columnid){let l=c.tableid,u=n.has(l)?n.get(l):i;if(l||(l=e.from[0].tableid),!t.get(u).has(l))if(r.has(l))l=r.get(l);else{gi.info(`table specified as ${l} not found.`);return}t.get(u).get(l).indexOf(c.columnid)<0&&t.get(u).get(l).push(c.columnid)}if(e.where){let c=new Gl(e.where),l=e.from[0].tableid;for(let{node:u}of c)if(u&&u.columnid){let d=u.tableid?u.tableid:l;if(!t.get(i).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(i).get(d).indexOf(u.columnid)<0&&t.get(i).get(d).push(u.columnid)}}if(e.joins&&e.joins.forEach(c=>{let l=new Gl(c.on);for(let{node:u}of l)if(u&&u.columnid){let d=u.tableid,f=s.get(d);if(!t.get(f).has(d))if(r.has(d))d=r.get(d);else{gi.info(`table specified as ${d} not found.`);continue}t.get(f).get(d).indexOf(u.columnid)<0&&t.get(f).get(d).push(u.columnid)}}),e.order){let c=new Gl(e.order);for(let{node:l}of c)if(l&&l.columnid){let u=l.tableid,d=n.has(u)?n.get(u):i;if(u||(u=e.from[0].tableid),!t.get(d).has(u))if(r.has(u))u=r.get(u);else{gi.info(`table specified as ${u} not found.`);return}t.get(d).get(u).indexOf(l.columnid)<0&&t.get(d).get(u).push(l.columnid)}}}o(Dfe,"getSelectAttributes");function Mfe(e,t,r){if(!e){gi.info("getUpdateAttributes: invalid SQL syntax tree");return}let n=new Gl(e.columns),s=e.table.databaseid;dp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&kO(e.table.tableid,s,i.columnid,t,r)}o(Mfe,"getUpdateAttributes");function vfe(e,t,r){if(!e){gi.info("getDeleteAttributes: invalid SQL syntax tree");return}let n=new Gl(e.where),s=e.table.databaseid;dp(e.table,t,r);for(let{node:i}of n)i&&i.columnid&&kO(e.table.tableid,s,i.columnid,t,r)}o(vfe,"getDeleteAttributes");function Ufe(e,t,r){if(!e){gi.info("getInsertAttributes: invalid SQL syntax tree");return}let n=new Gl(e.columns),s=e.into.databaseid;dp(e.into,t,r);for(let{node:i}of n)i&&i.columnid&&kO(e.into.tableid,s,i.columnid,t,r)}o(Ufe,"getInsertAttributes");function kO(e,t,r,n,s){if(!n.get(t))return;let i=e;n.get(t).has(i)||(i=s.get(i)),n.get(t).get(i).push(r)}o(kO,"pushAttribute");b1.exports=HO});var w1=M((ZUe,N1)=>{"use strict";var aT=(k(),v(W)),cT=class{static{o(this,"BaseLicense")}constructor(t=0,r=aT.RAM_ALLOCATION_ENUM.DEFAULT,n=aT.LICENSE_VALUES.VERSION_DEFAULT,s){this.exp_date=t,this.ram_allocation=r,this.version=n,this.fingerprint=s}},GO=class extends cT{static{o(this,"ExtendedLicense")}constructor(t=0,r=aT.RAM_ALLOCATION_ENUM.DEFAULT,n=aT.LICENSE_VALUES.VERSION_DEFAULT,s,i=!1){super(t,r,n,s),this.enterprise=i}};N1.exports={BaseLicense:cT,ExtendedLicense:GO}});var jd=M((t0e,M1)=>{"use strict";var zd=require("fs-extra"),lT=(Tg(),v(Sg)),C1=require("crypto"),xfe=require("moment"),Bfe=require("uuid").v4,hn=Q(),$O=require("path"),Ffe=ie(),ql=(k(),v(W)),{totalmem:O1}=require("os"),Hfe=w1().ExtendedLicense,Wd="invalid license key format",kfe="061183",Gfe="mofi25",qfe="aes-256-cbc",$fe=16,Vfe=32,P1=oe(),{resolvePath:L1}=yt();P1.initSync();var qO;M1.exports={validateLicense:D1,generateFingerPrint:Yfe,licenseSearch:YO,getLicense:jfe,checkMemoryLimit:Qfe};function VO(){return $O.join(P1.getHdbBasePath(),ql.LICENSE_KEY_DIR_NAME,ql.LICENSE_FILE_NAME)}o(VO,"getLicenseDirPath");function Kfe(){let e=VO();return L1($O.join(e,ql.LICENSE_FILE_NAME))}o(Kfe,"getLicenseFilePath");function KO(){let e=VO();return L1($O.join(e,ql.REG_KEY_FILE_NAME))}o(KO,"getFingerPrintFilePath");async function Yfe(){let e=KO();try{return await zd.readFile(e,"utf8")}catch(t){if(t.code==="ENOENT")return await Wfe();throw hn.error(`Error writing fingerprint file to ${e}`),hn.error(t),new Error("There was an error generating the fingerprint")}}o(Yfe,"generateFingerPrint");async function Wfe(){let e=Bfe(),t=lT.hash(e,lT.HASH_FUNCTION.MD5),r=KO();try{await zd.mkdirp(VO()),await zd.writeFile(r,t)}catch(n){if(n.code==="EEXIST")return t;throw hn.error(`Error writing fingerprint file to ${r}`),hn.error(n),new Error("There was an error generating the fingerprint")}return t}o(Wfe,"writeFingerprint");function D1(e,t){let r={valid_license:!1,valid_date:!1,valid_machine:!1,exp_date:null,ram_allocation:ql.RAM_ALLOCATION_ENUM.DEFAULT,version:ql.LICENSE_VALUES.VERSION_DEFAULT};if(!e)return hn.error("empty license key passed to validate."),r;let n=KO(),s=!1;try{s=zd.statSync(n)}catch(i){hn.error(i)}if(s){let i;try{i=zd.readFileSync(n,"utf8")}catch{hn.error("error validating this machine in the license"),r.valid_machine=!1;return}let a=e.split(Gfe),c=a[1];c=Buffer.concat([Buffer.from(c)],$fe);let l=Buffer.concat([Buffer.from(i)],Vfe),u=C1.createDecipheriv(qfe,l,c);r.valid_date=!0,r.valid_license=!0,r.valid_machine=!0;let d=null;try{d=u.update(a[0],"hex","utf8"),d.trim(),d+=u.final("utf8")}catch{let h=zfe(a[0],i);if(h)d=h;else throw r.valid_license=!1,r.valid_machine=!1,console.error(Wd),hn.error(Wd),new Error(Wd)}let f;if(isNaN(d))try{f=JSON.parse(d),r.version=f.version,r.exp_date=f.exp_date,isNaN(r.exp_date)&&(r.exp_date=new Date(r.exp_date).getTime()),f.ram_allocation&&(r.ram_allocation=f.ram_allocation)}catch{throw console.error(Wd),hn.error(Wd),new Error(Wd)}else r.exp_date=d;r.exp_date<xfe().valueOf()&&(r.valid_date=!1),lT.validate(a[1],`${kfe}${i}${t}`,lT.HASH_FUNCTION.MD5)||(r.valid_license=!1)}else r.valid_license=!1,r.valid_machine=!1;return r.valid_license&&r.valid_machine&&r.valid_date||hn.error("Invalid licence"),r}o(D1,"validateLicense");function zfe(e,t){try{let r=C1.createDecipher("aes192",t),n=r.update(e,"hex","utf8");return n.trim(),n+=r.final("utf8"),n}catch{hn.warn("Check old license failed")}}o(zfe,"checkOldLicense");function YO(){let e=new Hfe,t=[];try{t=zd.readFileSync(Kfe(),"utf-8").split(`\r
18
- `)}catch(r){r.code==="ENOENT"?hn.debug("no license file found"):hn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Ffe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=D1(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){hn.error("There was an error parsing the license string."),hn.error(s),e.ram_allocation=ql.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return qO=e,e}o(YO,"licenseSearch");async function jfe(){return qO||await YO(),qO}o(jfe,"getLicense");function Qfe(){let e=YO().ram_allocation,t=process.constrainedMemory?.()||O1();if(t=Math.round(Math.min(t,O1())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(Qfe,"checkMemoryLimit")});var jO=M((n0e,B1)=>{var uT=jd(),v1=require("chalk"),cs=Q(),U1=require("prompt"),{promisify:Jfe}=require("util"),WO=(k(),v(W)),Xfe=require("fs-extra"),Zfe=require("path"),eme=ie(),{packageJson:tme}=Rt(),x1=oe();x1.initSync();var rme=require("moment"),nme=Jfe(U1.get),sme=Zfe.join(x1.getHdbBasePath(),WO.LICENSE_KEY_DIR_NAME,WO.LICENSE_FILE_NAME,WO.LICENSE_FILE_NAME);B1.exports={getFingerprint:ome,setLicense:ime,parseLicense:zO,register:ame,getRegistrationInfo:lme};async function ime(e){if(e&&e.key&&e.company){try{cs.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await zO(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw cs.error(r),cs.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(ime,"setLicense");async function ome(){let e={};try{e=await uT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw cs.error(r),cs.error(t),new Error(r)}return e}o(ome,"getFingerprint");async function zO(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");cs.info("Validating license input...");let r=uT.validateLicense(e,t);if(cs.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(cs.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(cs.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{cs.info("writing license to disk"),await Xfe.writeFile(sme,JSON.stringify({license_key:e,company:t}))}catch(n){throw cs.error("Failed to write License"),n}return"Registration successful."}o(zO,"parseLicense");async function ame(){let e=await cme();return zO(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(ame,"register");async function cme(){let e=await uT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:v1.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:v1.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{U1.start()}catch(n){cs.error(n)}let r;try{r=await nme(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(cme,"promptForRegistration");async function lme(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await uT.getLicense()}catch(r){throw cs.error(`There was an error when searching licenses due to: ${r.message}`),r}if(eme.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=tme.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=rme.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(lme,"getRegistrationInfo")});var H1=M((i0e,F1)=>{"use strict";var ume=Nt(),QO=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+ume.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};F1.exports=QO});var q1=M((a0e,G1)=>{"use strict";var k1=Nt(),JO=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+k1.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+k1.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};G1.exports=JO});var V1=M((l0e,$1)=>{"use strict";var XO=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};$1.exports=XO});var Y1=M((d0e,K1)=>{"use strict";var dme=Nt(),ZO=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+dme.SERVER_SUFFIX.ADMIN,this.password=r}};K1.exports=ZO});var hT=M((m0e,j1)=>{"use strict";var $l=require("path"),Vl=require("fs-extra"),fme=H1(),mme=q1(),hme=V1(),pme=Y1(),eC=ts(),Jd=ie(),xn=yt(),fT=(k(),v(W)),fp=Nt(),{CONFIG_PARAMS:rr}=fT,Xd=Q(),mp=oe(),W1=Ki(),tC=mr(),Eme=os(),Qd="clustering",_me=1e4,z1=50;j1.exports={generateNatsConfig:Sme,removeNatsConfig:Tme,getHubConfigPath:gme};function gme(){let e=mp.get(rr.ROOTPATH);return $l.join(e,Qd,fp.NATS_CONFIG_FILES.HUB_SERVER)}o(gme,"getHubConfigPath");async function Sme(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=mp.get(rr.ROOTPATH);Vl.ensureDirSync($l.join(r,"clustering","leaf")),mp.initSync();let n=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERT_AUTH),s=xn.getConfigFromFile(rr.CLUSTERING_TLS_PRIVATEKEY),i=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERTIFICATE);!await Vl.exists(i)&&!await Vl.exists(!n)&&await Eme.createNatsCerts();let a=$l.join(r,Qd,fp.PID_FILES.HUB),c=$l.join(r,Qd,fp.PID_FILES.LEAF),l=xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=$l.join(r,Qd,fp.NATS_CONFIG_FILES.HUB_SERVER),d=$l.join(r,Qd,fp.NATS_CONFIG_FILES.LEAF_SERVER),f=xn.getConfigFromFile(rr.CLUSTERING_TLS_INSECURE),m=xn.getConfigFromFile(rr.CLUSTERING_TLS_VERIFY),h=xn.getConfigFromFile(rr.CLUSTERING_NODENAME),p=xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await tC.checkNATSServerInstalled()||mT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await eC.listUsers(),g=xn.getConfigFromFile(rr.CLUSTERING_USER),y=await eC.getClusterUser();(Jd.isEmpty(y)||y.active!==!0)&&mT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await dT(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await dT(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await dT(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),await dT(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===fT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new pme(K.username,W1.decrypt(K.hash))),R.push(new hme(K.username,W1.decrypt(K.hash))));let N=[],{hub_routes:O}=xn.getClusteringRoutes();if(!Jd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new fme(xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NAME),xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Jd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===fT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Vl.writeJson(u,F),Xd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new mme(xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===fT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Vl.writeJson(d,Y),Xd.trace(`Leaf server config written to ${d}`))}o(Sme,"generateNatsConfig");async function dT(e){let t=mp.get(e);return Jd.isEmpty(t)&&mT(`port undefined for '${e}'`),await Jd.isPortTaken(t)&&mT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(dT,"isPortAvailable");function mT(e){let t=`Error generating clustering config: ${e}`;Xd.error(t),console.error(t),process.exit(1)}o(mT,"generateNatsConfigError");async function Tme(e){let{port:t,config_file:r}=tC.getServerConfig(e),{username:n,decrypt_hash:s}=await eC.getClusterUser(),i=0,a=2e3;for(;i<z1;){try{let d=await tC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Xd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=z1)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Xd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Jd.asyncSetTimeout(u)}let c="0".repeat(_me),l=$l.join(mp.get(rr.ROOTPATH),Qd,r);await Vl.writeFile(l,c),await Vl.remove(l),Xd.notify(e,"started.")}o(Tme,"removeNatsConfig")});var tK=M((p0e,eK)=>{"use strict";var ls=oe(),Rme=jd(),Ke=(k(),v(W)),hp=Nt(),Wo=require("path"),{PACKAGE_ROOT:ET}=Rt(),Q1=oe(),pT=ie(),Zd="/dev/null",yme=Wo.join(ET,"launchServiceScripts"),J1=Wo.join(ET,"utility/scripts"),Ame=Wo.join(J1,Ke.HDB_RESTART_SCRIPT),X1=Wo.resolve(ET,"dependencies",`${process.platform}-${process.arch}`,hp.NATS_BINARY_NAME);function Z1(){let t=Rme.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return pT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=pT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:ET}}o(Z1,"generateMainServerConfig");var bme=9930;function Ime(){ls.initSync(!0);let e=ls.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Wo.join(e,"clustering",hp.NATS_CONFIG_FILES.HUB_SERVER),r=Wo.join(ls.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=Q1.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=hp.LOG_LEVEL_FLAGS[ls.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==bme?"-"+n:""),script:X1,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=Zd,i.error_file=Zd),i}o(Ime,"generateNatsHubServerConfig");var Nme=9940;function wme(){ls.initSync(!0);let e=ls.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Wo.join(e,"clustering",hp.NATS_CONFIG_FILES.LEAF_SERVER),r=Wo.join(ls.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=Q1.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=hp.LOG_LEVEL_FLAGS[ls.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==Nme?"-"+n:""),script:X1,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=Zd,i.error_file=Zd),i}o(wme,"generateNatsLeafServerConfig");function Ome(){ls.initSync();let e=Wo.join(ls.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:yme,autorestart:!1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=Zd,t.error_file=Zd),t}o(Ome,"generateClusteringUpgradeV4ServiceConfig");function Cme(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return pT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=pT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:J1},script:Ame}}o(Cme,"generateRestart");function Pme(){return{apps:[Z1()]}}o(Pme,"generateAllServiceConfigs");eK.exports={generateAllServiceConfigs:Pme,generateMainServerConfig:Z1,generateRestart:Cme,generateNatsHubServerConfig:Ime,generateNatsLeafServerConfig:wme,generateClusteringUpgradeV4ServiceConfig:Ome}});var ef=M((g0e,pK)=>{"use strict";var ct=(k(),v(W)),Lme=ie(),jo=hT(),_T=mr(),zo=Nt(),oc=tK(),gT=oe(),Kl=Q(),Dme=ro(),{startWorker:rK,onMessageFromWorkers:Mme}=nt(),vme=Gd(),_0e=require("util"),Ume=require("child_process"),xme=require("fs"),{execFile:Bme}=Ume,Je;pK.exports={enterPM2Mode:Fme,start:ac,stop:rC,reload:sK,restart:iK,list:nC,describe:cK,connect:Qo,kill:$me,startAllServices:Vme,startService:sC,getUniqueServicesList:lK,restartAllServices:Kme,isServiceRegistered:uK,reloadStopStart:dK,restartHdb:aK,deleteProcess:Gme,startClusteringProcesses:mK,startClusteringThreads:hK,isHdbRestartRunning:qme,isClusteringRunning:Wme,stopClustering:Yme,reloadClustering:zme,expectedRestartOfChildren:oK};var pp=!1;Mme(e=>{e.type==="restart"&&gT.initSync(!0)});function Fme(){pp=!0}o(Fme,"enterPM2Mode");function Qo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(Qo,"connect");var pn,Hme=10,nK;function ac(e,t=!1){if(pp)return kme(e);let r=Bme(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=pn.indexOf(r);a>-1&&pn.splice(a,1),!nK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Hme&&(xme.existsSync(jo.getHubConfigPath())?ac(e):(await jo.generateNatsConfig(!0),ac(e),await new Promise(c=>setTimeout(c,3e3)),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=gT.get(ct.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&zo.LOG_LEVEL_HIERARCHY[a]>=zo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===zo.LOG_LEVELS.ERR||d===zo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=zo.LOG_LEVELS[m]}if(zo.LOG_LEVEL_HIERARCHY[a]>=zo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===zo.LOG_LEVELS.ERR||d===zo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!pn&&(pn=[],!t)){let i=o(()=>{nK=!0,pn&&(pn.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}pn.push(r)}o(ac,"start");function kme(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(kme,"startWithPM2");function rC(e){if(!pp){for(let t of pn||[])t.name===e&&(pn.splice(pn.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(rC,"stop");function sK(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(sK,"reload");function iK(e){if(!pp){oK();for(let t of pn||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o(iK,"restart");function oK(){for(let e of pn||[])e.config&&(e.config.restarts=0)}o(oK,"expectedRestartOfChildren");function Gme(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Gme,"deleteProcess");async function aK(){await ac(oc.generateRestart())}o(aK,"restartHdb");async function qme(){let e=await nC();for(let t in e)if(e[t].name===ct.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(qme,"isHdbRestartRunning");function nC(){return new Promise(async(e,t)=>{try{await Qo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(nC,"list");function cK(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(cK,"describe");function $me(){if(!pp){for(let e of pn||[])e.kill();pn=[];return}return new Promise(async(e,t)=>{try{await Qo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o($me,"kill");async function Vme(){try{await mK(),await hK(),await ac(oc.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(Vme,"startAllServices");async function sC(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case ct.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=oc.generateMainServerConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=oc.generateNatsIngestServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=oc.generateNatsReplyServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=oc.generateNatsHubServerConfig(),await ac(r,t),await jo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=oc.generateNatsLeafServerConfig(),await ac(r,t),await jo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=oc.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await ac(r)}catch(r){throw Je?.disconnect(),r}}o(sC,"startService");async function lK(){try{let e=await nC(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(lK,"getUniqueServicesList");async function Kme(e=[]){try{let t=!1,r=await lK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===ct.PROCESS_DESCRIPTORS.HDB?t=!0:await iK(a))}t&&await dK(ct.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Kme,"restartAllServices");async function uK(e){if(pn?.find(r=>r.name===e))return!0;let t=await vme.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(uK,"isServiceRegistered");async function dK(e){let t=gT.get(ct.CONFIG_PARAMS.THREADS_COUNT)??gT.get(ct.CONFIG_PARAMS.THREADS),r=await cK(e),n=Lme.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await rC(e),await sC(e)):e===ct.PROCESS_DESCRIPTORS.HDB?await aK():await sK(e)}o(dK,"reloadStopStart");var fK;async function mK(e=!1){for(let t in ct.CLUSTERING_PROCESSES){let r=ct.CLUSTERING_PROCESSES[t];await sC(r,e)}}o(mK,"startClusteringProcesses");async function hK(){fK=rK(ct.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await _T.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await _T.updateLocalStreams();let e=await Dme.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===ct.PRE_4_0_0_VERSION){Kl.info("Starting clustering upgrade 4.0.0 process"),rK(ct.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(hK,"startClusteringThreads");async function Yme(){for(let e in ct.CLUSTERING_PROCESSES)if(e!==ct.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===ct.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await fK.terminate();else{let t=ct.CLUSTERING_PROCESSES[e];await rC(t)}}o(Yme,"stopClustering");async function Wme(){for(let e in ct.CLUSTERING_PROCESSES){let t=ct.CLUSTERING_PROCESSES[e];if(await uK(t)===!1)return!1}return!0}o(Wme,"isClusteringRunning");async function zme(){await jo.generateNatsConfig(!0),await _T.reloadNATSHub(),await _T.reloadNATSLeaf(),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(zme,"reloadClustering")});var aC={};Oe(aC,{compactOnStart:()=>jme,copyDb:()=>RK});async function jme(){cc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,iC.get)(U.ROOTPATH),t=new Map,r=st();(0,oC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,ST.join)(e,"backup",n+".mdb"),a=(0,ST.join)(e,Uc,n+"-copy.mdb"),c=0;try{c=await EK(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){cc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await RK(n,a),console.log("Backing up",n,"to",i);try{await(0,Yl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}}try{ld()}catch(n){cc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}for(let[n,{dbPath:s,copyDest:i}]of t)console.log("Moving copy compacted",n,"to",s),await(0,Yl.move)(i,s,{overwrite:!0}),await(0,Yl.remove)((0,ST.join)(e,Uc,`${n}-copy.mdb-lock`));try{ld()}catch(n){cc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){cc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,oC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Yl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw ld(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=!0,c=await EK(n);if(console.log("Database",n,"after compact has a total record count of",c),i!==c){a=!1;let l=`There is a discrepancy between pre and post compact record count for database ${n}.
19
- Total record count before compaction: ${i}, total after: ${c}.
20
- Database backup has not been removed and can be found here: ${s}`;cc.error(l),console.error(l)}(0,iC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)===!0||a===!1||(console.log("Removing backup",s),await(0,Yl.remove)(s))}}async function EK(e){let t=await(0,TK.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function tf(){}async function RK(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=tf,m.primaryStore.remove=tf;for(let h in m.indices){let p=m.indices[h];p.put=tf,p.remove=tf}m.auditStore&&(m.auditStore.put=tf,m.auditStore.remove=tf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,_K.open)(new gK.default(t)),c=a.openDB(TT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Vg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new SK.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(TT.AUDIT_STORE_NAME,ym);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var _K,ST,Yl,iC,gK,SK,TT,TK,oC,cc,cC=ue(()=>{De();_K=require("lmdb"),ST=require("path"),Yl=require("fs-extra"),iC=w(oe()),gK=w(Dm()),SK=w(Lm()),TT=w(qt());k();Mi();TK=w(Fa()),oC=w(yt()),cc=w(Q());o(jme,"compactOnStart");o(EK,"getTotalDBRecordCount");o(tf,"noop");o(RK,"copyDb")});var nf=M((w0e,OK)=>{"use strict";var Qme=require("minimist"),{isMainThread:uC,parentPort:_p,threadId:b0e}=require("worker_threads"),ft=(k(),v(W)),no=Q(),dC=ie(),yT=hT(),RT=mr(),I0e=Nt(),IK=yt(),Si=ef(),yK=Gd(),{compactOnStart:Jme}=(cC(),v(aC)),Xme=xc(),{restartWorkers:AT,onMessageByType:Zme}=nt(),{handleHDBError:ehe,hdbErrors:the}=ge(),{HTTP_STATUS_CODES:rhe}=the,gp=oe(),{sendOperationToNode:AK,getThisNodeName:nhe,monitorNodeCAs:she}=(ss(),v(Go)),{getHDBNodeTable:N0e}=(Dl(),v(Rw));gp.initSync();var Ep=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,ihe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",bK="Clustering is not enabled so cannot be restarted",ohe="Invalid service",rf,xs;OK.exports={restart:NK,restartService:fC};uC&&Zme(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await fC({service:e.workerType}):NK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function NK(e){xs=Object.keys(e).length===0,rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=Qme(process.argv);if(t.service){await fC(t);return}if(xs&&!rf){console.error(ihe);return}if(xs&&console.log(Ep),rf){Si.enterPM2Mode(),no.notify(Ep);let r=Xme(Object.keys(ft.CONFIG_PARAM_MAP),!0);return dC.isEmptyOrZeroLength(Object.keys(r))||IK.updateConfigValue(void 0,void 0,r,!0,!0),ahe(),Ep}return uC?(no.notify(Ep),gp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Jme(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{AT()},50)):_p.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),Ep}o(NK,"restart");async function fC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw ehe(new Error,ohe,rhe.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!uC){e.replicated&&she(),_p.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),_p.ref(),await new Promise(s=>{_p.on("message",i=>{i.type==="restart-complete"&&(s(),_p.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===nhe())continue;let i;try{({job_id:i}=await AK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await AK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=bK;break}xs&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await wK();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=bK;break}xs&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(xs&&!rf){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}xs&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),xs?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await AT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),xs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(fC,"restartService");async function ahe(){await wK(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await dC.asyncSetTimeout(2e3),gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await lC(),xs&&(await RT.closeConnection(),process.exit(0))}o(ahe,"restartPM2Mode");async function wK(){if(!IK.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await yK.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await yT.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await lC(),xs&&await RT.closeConnection();else{await yT.generateNatsConfig(!0),rf?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await yK.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await dC.asyncSetTimeout(3e3),await lC(),await RT.updateLocalStreams(),xs&&await RT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=AT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=AT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(wK,"restartClustering");async function lC(){await yT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await yT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(lC,"removeNatsConfig")});var HK=M((P0e,FK)=>{"use strict";var C0e=require("lodash"),Bn=(k(),v(W)),{handleHDBError:CK,hdbErrors:che}=ge(),{HDB_ERROR_MSGS:lhe,HTTP_STATUS_CODES:uhe}=che,mC=Q();FK.exports={getRolePermissions:fhe};var Wl=Object.create(null),dhe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),MK=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),vK=o((e=!1,t=!1,r=!1,n=!1)=>({[Bn.PERMS_CRUD_ENUM.READ]:e,[Bn.PERMS_CRUD_ENUM.INSERT]:t,[Bn.PERMS_CRUD_ENUM.UPDATE]:r,[Bn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),hC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...vK(t,r,n,s)}),"tablePermsTemplate"),PK=o((e,t=vK())=>({attribute_name:e,describe:BK(t),[Sp]:t[Sp],[pC]:t[pC],[EC]:t[EC]}),"attrPermsTemplate"),LK=o((e,t=!1)=>({attribute_name:e,describe:t,[Sp]:t}),"timestampAttrPermsTemplate"),{READ:Sp,INSERT:pC,UPDATE:EC}=Bn.PERMS_CRUD_ENUM,UK=Object.values(Bn.PERMS_CRUD_ENUM),xK=[Sp,pC,EC];function fhe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Bn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Wl[t]&&Wl[t].key===n)return Wl[t].perms;let s=mhe(e,r);return Wl[t]?Wl[t].key=n:Wl[t]=dhe(n),Wl[t].perms=s,s}catch(r){if(!e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Bn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw mC.error(n),mC.debug(r),CK(new Error,lhe.OUTDATED_PERMS_TRANSLATION_ERROR,uhe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
18
+ `)}catch(r){r.code==="ENOENT"?hn.debug("no license file found"):hn.error(`could not search for licenses due to: '${r.message}`)}for(let r=0;r<t.length;++r){let n=t[r];try{if(Ffe.isEmptyOrZeroLength(n))continue;let s=JSON.parse(n),i=D1(s.license_key,s.company);i.valid_machine===!0&&i.valid_date===!0&&i.valid_machine===!0&&(e.exp_date=i.exp_date>e.exp_date?i.exp_date:e.exp_date,e.ram_allocation=i.ram_allocation,e.enterprise=!0)}catch(s){hn.error("There was an error parsing the license string."),hn.error(s),e.ram_allocation=ql.RAM_ALLOCATION_ENUM.DEFAULT,e.enterprise=!1}}return qO=e,e}o(YO,"licenseSearch");async function jfe(){return qO||await YO(),qO}o(jfe,"getLicense");function Qfe(){let e=YO().ram_allocation,t=process.constrainedMemory?.()||O1();if(t=Math.round(Math.min(t,O1())/2**20),t>e)return`This server has more memory (${t}MB) than HarperDB is licensed for (${e}MB), this should only be used for educational and development purposes.`}o(Qfe,"checkMemoryLimit")});var jO=M((n0e,B1)=>{var uT=jd(),v1=require("chalk"),cs=Q(),U1=require("prompt"),{promisify:Jfe}=require("util"),WO=(k(),v(W)),Xfe=require("fs-extra"),Zfe=require("path"),eme=ie(),{packageJson:tme}=Rt(),x1=oe();x1.initSync();var rme=require("moment"),nme=Jfe(U1.get),sme=Zfe.join(x1.getHdbBasePath(),WO.LICENSE_KEY_DIR_NAME,WO.LICENSE_FILE_NAME,WO.LICENSE_FILE_NAME);B1.exports={getFingerprint:ome,setLicense:ime,parseLicense:zO,register:ame,getRegistrationInfo:lme};async function ime(e){if(e&&e.key&&e.company){try{cs.info(`parsing license key: ${e.key} and `);let t=e.company.toString();await zO(e.key.trim(),t.trim())}catch(t){let r="There was an error parsing the license key.";throw cs.error(r),cs.error(t),new Error(r)}return"Wrote license key file. Registration successful."}throw new Error("Invalid key or company specified for license file.")}o(ime,"setLicense");async function ome(){let e={};try{e=await uT.generateFingerPrint()}catch(t){let r="Error generating fingerprint.";throw cs.error(r),cs.error(t),new Error(r)}return e}o(ome,"getFingerprint");async function zO(e,t){if(!e||!t)throw new Error("Invalid entries for License Key and Customer Company");cs.info("Validating license input...");let r=uT.validateLicense(e,t);if(cs.info("checking for valid license..."),!r.valid_license)throw new Error("Invalid license found.");if(cs.info("checking valid license date..."),!r.valid_date)throw new Error("This License has expired.");if(cs.info(`checking for valid machine license ${r.valid_machine}`),!r.valid_machine)throw new Error("This license is in use on another machine.");try{cs.info("writing license to disk"),await Xfe.writeFile(sme,JSON.stringify({license_key:e,company:t}))}catch(n){throw cs.error("Failed to write License"),n}return"Registration successful."}o(zO,"parseLicense");async function ame(){let e=await cme();return zO(e.HDB_LICENSE,e.CUSTOMER_COMPANY)}o(ame,"register");async function cme(){let e=await uT.generateFingerPrint(),t={properties:{CUSTOMER_COMPANY:{description:v1.magenta("[COMPANY] Please enter your company name"),required:!0},HDB_LICENSE:{description:v1.magenta(`[HDB_LICENSE] Your fingerprint is ${e} Please enter your license key`),required:!0}}};try{U1.start()}catch(n){cs.error(n)}let r;try{r=await nme(t)}catch(n){throw console.error("There was a problem prompting for registration input. Exiting."),n}return r}o(cme,"promptForRegistration");async function lme(){let e={registered:!1,version:null,ram_allocation:null,license_expiration_date:null},t;try{t=await uT.getLicense()}catch(r){throw cs.error(`There was an error when searching licenses due to: ${r.message}`),r}if(eme.isEmptyOrZeroLength(t))throw new Error("There were no licenses found.");if(e.registered=t.enterprise,e.version=tme.version,e.ram_allocation=t.ram_allocation,isNaN(t.exp_date))e.license_expiration_date=t.enterprise?t.exp_date:null;else{let r=rme.utc(t.exp_date).format("YYYY-MM-DD");e.license_expiration_date=t.enterprise?r:null}return e}o(lme,"getRegistrationInfo")});var H1=M((i0e,F1)=>{"use strict";var ume=Nt(),QO=class{static{o(this,"HubConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f,m,h,p){this.port=t,a===null&&(a=void 0),this.server_name=r+ume.SERVER_SUFFIX.HUB,this.pid_file=n,this.max_payload=67108864,this.reconnect_error_reports=100,this.jetstream={enabled:!1},this.tls={cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l},this.leafnodes={port:u,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c}},this.cluster={name:d,port:f,routes:m,tls:{cert_file:s,key_file:i,ca_file:a,insecure:c,verify:l}},this.accounts={SYS:{users:h},HDB:{users:p}},this.system_account="SYS"}};F1.exports=QO});var q1=M((a0e,G1)=>{"use strict";var k1=Nt(),JO=class{static{o(this,"LeafConfigObject")}constructor(t,r,n,s,i,a,c,l,u,d,f){this.port=t,f===null&&(f=void 0),this.server_name=r+k1.SERVER_SUFFIX.LEAF,this.pid_file=n,this.max_payload=67108864,this.jetstream={enabled:!0,store_dir:s,domain:r+k1.SERVER_SUFFIX.LEAF},this.tls={cert_file:u,key_file:d,ca_file:f,insecure:!0},this.leafnodes={remotes:[{tls:{ca_file:f,insecure:!0},urls:i,account:"SYS"},{tls:{ca_file:f,insecure:!0},urls:a,account:"HDB"}]},this.accounts={SYS:{users:c},HDB:{users:l,jetstream:"enabled"}},this.system_account="SYS"}};G1.exports=JO});var V1=M((l0e,$1)=>{"use strict";var XO=class{static{o(this,"HdbUserObject")}constructor(t,r){this.user=t,this.password=r}};$1.exports=XO});var Y1=M((d0e,K1)=>{"use strict";var dme=Nt(),ZO=class{static{o(this,"SysUserObject")}constructor(t,r){this.user=t+dme.SERVER_SUFFIX.ADMIN,this.password=r}};K1.exports=ZO});var hT=M((m0e,j1)=>{"use strict";var $l=require("path"),Vl=require("fs-extra"),fme=H1(),mme=q1(),hme=V1(),pme=Y1(),eC=ts(),Jd=ie(),xn=yt(),fT=(k(),v(W)),fp=Nt(),{CONFIG_PARAMS:rr}=fT,Xd=Q(),mp=oe(),W1=Ki(),tC=mr(),Eme=os(),Qd="clustering",_me=1e4,z1=50;j1.exports={generateNatsConfig:Sme,removeNatsConfig:Tme,getHubConfigPath:gme};function gme(){let e=mp.get(rr.ROOTPATH);return $l.join(e,Qd,fp.NATS_CONFIG_FILES.HUB_SERVER)}o(gme,"getHubConfigPath");async function Sme(e=!1,t=void 0){console.error("Warning: NATS replication is deprecated and will be removed in version 5.0 of Harper");let r=mp.get(rr.ROOTPATH);Vl.ensureDirSync($l.join(r,"clustering","leaf")),mp.initSync();let n=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERT_AUTH),s=xn.getConfigFromFile(rr.CLUSTERING_TLS_PRIVATEKEY),i=xn.getConfigFromFile(rr.CLUSTERING_TLS_CERTIFICATE);!await Vl.exists(i)&&!await Vl.exists(!n)&&await Eme.createNatsCerts();let a=$l.join(r,Qd,fp.PID_FILES.HUB),c=$l.join(r,Qd,fp.PID_FILES.LEAF),l=xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_STREAMS_PATH),u=$l.join(r,Qd,fp.NATS_CONFIG_FILES.HUB_SERVER),d=$l.join(r,Qd,fp.NATS_CONFIG_FILES.LEAF_SERVER),f=xn.getConfigFromFile(rr.CLUSTERING_TLS_INSECURE),m=xn.getConfigFromFile(rr.CLUSTERING_TLS_VERIFY),h=xn.getConfigFromFile(rr.CLUSTERING_NODENAME),p=xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT);await tC.checkNATSServerInstalled()||mT("nats-server dependency is either missing or the wrong version. Run 'npm install' to fix");let _=await eC.listUsers(),g=xn.getConfigFromFile(rr.CLUSTERING_USER),y=await eC.getClusterUser();(Jd.isEmpty(y)||y.active!==!0)&&mT(`Invalid cluster user '${cluster_username}'. A valid user with the role 'cluster_user' must be defined under clustering.user in harperdb-config.yaml`),e||(await dT(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),await dT(rr.CLUSTERING_HUBSERVER_LEAFNODES_NETWORK_PORT),await dT(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),await dT(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT));let T=[],R=[];for(let[q,K]of _.entries())K.role?.role===fT.ROLE_TYPES_ENUM.CLUSTER_USER&&K.active&&(T.push(new pme(K.username,W1.decrypt(K.hash))),R.push(new hme(K.username,W1.decrypt(K.hash))));let N=[],{hub_routes:O}=xn.getClusteringRoutes();if(!Jd.isEmptyOrZeroLength(O))for(let q of O)N.push(`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@${q.host}:${q.port}`);let F=new fme(xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_NETWORK_PORT),h,a,i,s,n,f,m,p,xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NAME),xn.getConfigFromFile(rr.CLUSTERING_HUBSERVER_CLUSTER_NETWORK_PORT),N,T,R);n==null&&(delete F.tls.ca_file,delete F.leafnodes.tls.ca_file),t=Jd.isEmpty(t)?void 0:t.toLowerCase(),(t===void 0||t===fT.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase())&&(await Vl.writeJson(u,F),Xd.trace(`Hub server config written to ${u}`));let Z=`tls://${y.sys_name_encoded}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,G=`tls://${y.uri_encoded_name}:${y.uri_encoded_d_hash}@0.0.0.0:${p}`,Y=new mme(xn.getConfigFromFile(rr.CLUSTERING_LEAFSERVER_NETWORK_PORT),h,c,l,[Z],[G],T,R,i,s,n,f);n==null&&delete Y.tls.ca_file,(t===void 0||t===fT.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())&&(await Vl.writeJson(d,Y),Xd.trace(`Leaf server config written to ${d}`))}o(Sme,"generateNatsConfig");async function dT(e){let t=mp.get(e);return Jd.isEmpty(t)&&mT(`port undefined for '${e}'`),await Jd.isPortTaken(t)&&mT(`'${e}' port '${t}' is is in use by another process, check to see if HarperDB is already running or another process is using this port.`),!0}o(dT,"isPortAvailable");function mT(e){let t=`Error generating clustering config: ${e}`;Xd.error(t),console.error(t),process.exit(1)}o(mT,"generateNatsConfigError");async function Tme(e){let{port:t,config_file:r}=tC.getServerConfig(e),{username:n,decrypt_hash:s}=await eC.getClusterUser(),i=0,a=2e3;for(;i<z1;){try{let d=await tC.createConnection(t,n,s,!1);if(d.protocol.connected===!0){d.close();break}}catch(d){Xd.trace(`removeNatsConfig waiting for ${e}. Caught and swallowed error ${d}`)}if(i++,i>=z1)throw new Error(`Operations API timed out attempting to connect to ${e}. This is commonly caused by incorrect clustering config. Check hdb.log for further details.`);let u=a*(i*2);u>3e4&&Xd.notify("Operations API waiting for Nats server connection. This could be caused by large Nats streams or incorrect clustering config."),await Jd.asyncSetTimeout(u)}let c="0".repeat(_me),l=$l.join(mp.get(rr.ROOTPATH),Qd,r);await Vl.writeFile(l,c),await Vl.remove(l),Xd.notify(e,"started.")}o(Tme,"removeNatsConfig")});var tK=M((p0e,eK)=>{"use strict";var ls=oe(),Rme=jd(),Ke=(k(),v(W)),hp=Nt(),Wo=require("path"),{PACKAGE_ROOT:ET}=Rt(),Q1=oe(),pT=ie(),Zd="/dev/null",yme=Wo.join(ET,"launchServiceScripts"),J1=Wo.join(ET,"utility/scripts"),Ame=Wo.join(J1,Ke.HDB_RESTART_SCRIPT),X1=Wo.resolve(ET,"dependencies",`${process.platform}-${process.arch}`,hp.NATS_BINARY_NAME);function Z1(){let t=Rme.licenseSearch().ram_allocation||Ke.RAM_ALLOCATION_ENUM.DEFAULT,r=Ke.MEM_SETTING_KEY+t,n={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.HDB,IS_SCRIPTED_SERVICE:!0};return pT.noBootFile()&&(n[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=pT.getEnvCliRootPath()),{name:Ke.PROCESS_DESCRIPTORS.HDB,script:Ke.LAUNCH_SERVICE_SCRIPTS.MAIN,exec_mode:"fork",env:n,node_args:r,cwd:ET}}o(Z1,"generateMainServerConfig");var bme=9930;function Ime(){ls.initSync(!0);let e=ls.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Wo.join(e,"clustering",hp.NATS_CONFIG_FILES.HUB_SERVER),r=Wo.join(ls.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=Q1.get(Ke.CONFIG_PARAMS.CLUSTERING_HUBSERVER_NETWORK_PORT),s=hp.LOG_LEVEL_FLAGS[ls.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB+(n!==bme?"-"+n:""),script:X1,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_HUB},merge_logs:!0,out_file:r,error_file:r,instances:1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=Zd,i.error_file=Zd),i}o(Ime,"generateNatsHubServerConfig");var Nme=9940;function wme(){ls.initSync(!0);let e=ls.get(Ke.CONFIG_PARAMS.ROOTPATH),t=Wo.join(e,"clustering",hp.NATS_CONFIG_FILES.LEAF_SERVER),r=Wo.join(ls.get(Ke.HDB_SETTINGS_NAMES.LOG_PATH_KEY),Ke.LOG_NAMES.HDB),n=Q1.get(Ke.CONFIG_PARAMS.CLUSTERING_LEAFSERVER_NETWORK_PORT),s=hp.LOG_LEVEL_FLAGS[ls.get(Ke.CONFIG_PARAMS.CLUSTERING_LOGLEVEL)]??void 0,i={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF+(n!==Nme?"-"+n:""),script:X1,args:s?`${s} -c ${t}`:`-c ${t}`,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_LEAF},merge_logs:!0,out_file:r,error_file:r,instances:1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(i.out_file=Zd,i.error_file=Zd),i}o(wme,"generateNatsLeafServerConfig");function Ome(){ls.initSync();let e=Wo.join(ls.get(Ke.CONFIG_PARAMS.LOGGING_ROOT),Ke.LOG_NAMES.HDB),t={name:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0,script:Ke.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,exec_mode:"fork",env:{[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0},merge_logs:!0,out_file:e,error_file:e,instances:1,cwd:yme,autorestart:!1};return ls.get(Ke.HDB_SETTINGS_NAMES.LOG_TO_FILE)||(t.out_file=Zd,t.error_file=Zd),t}o(Ome,"generateClusteringUpgradeV4ServiceConfig");function Cme(){let e={[Ke.PROCESS_NAME_ENV_PROP]:Ke.PROCESS_DESCRIPTORS.RESTART_HDB};return pT.noBootFile()&&(e[Ke.CONFIG_PARAMS.ROOTPATH.toUpperCase()]=pT.getEnvCliRootPath()),{...{name:Ke.PROCESS_DESCRIPTORS.RESTART_HDB,exec_mode:"fork",env:e,instances:1,autorestart:!1,cwd:J1},script:Ame}}o(Cme,"generateRestart");function Pme(){return{apps:[Z1()]}}o(Pme,"generateAllServiceConfigs");eK.exports={generateAllServiceConfigs:Pme,generateMainServerConfig:Z1,generateRestart:Cme,generateNatsHubServerConfig:Ime,generateNatsLeafServerConfig:wme,generateClusteringUpgradeV4ServiceConfig:Ome}});var ef=M((g0e,pK)=>{"use strict";var ct=(k(),v(W)),Lme=ie(),jo=hT(),_T=mr(),zo=Nt(),oc=tK(),gT=oe(),Kl=Q(),Dme=ro(),{startWorker:rK,onMessageFromWorkers:Mme}=nt(),vme=Gd(),_0e=require("util"),Ume=require("child_process"),xme=require("fs"),{execFile:Bme}=Ume,Je;pK.exports={enterPM2Mode:Fme,start:ac,stop:rC,reload:sK,restart:iK,list:nC,describe:cK,connect:Qo,kill:$me,startAllServices:Vme,startService:sC,getUniqueServicesList:lK,restartAllServices:Kme,isServiceRegistered:uK,reloadStopStart:dK,restartHdb:aK,deleteProcess:Gme,startClusteringProcesses:mK,startClusteringThreads:hK,isHdbRestartRunning:qme,isClusteringRunning:Wme,stopClustering:Yme,reloadClustering:zme,expectedRestartOfChildren:oK};var pp=!1;Mme(e=>{e.type==="restart"&&gT.initSync(!0)});function Fme(){pp=!0}o(Fme,"enterPM2Mode");function Qo(){return Je||(Je=require("pm2")),new Promise((e,t)=>{Je.connect((r,n)=>{r&&t(r),e(n)})})}o(Qo,"connect");var pn,Hme=10,nK;function ac(e,t=!1){if(pp)return kme(e);let r=Bme(e.script,e.args.split(" "),e);r.name=e.name,r.config=e,r.on("exit",async i=>{let a=pn.indexOf(r);a>-1&&pn.splice(a,1),!nK&&i!==0&&(e.restarts=(e.restarts||0)+1,e.restarts<Hme&&(xme.existsSync(jo.getHubConfigPath())?ac(e):(await jo.generateNatsConfig(!0),ac(e),await new Promise(c=>setTimeout(c,3e3)),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF))))});let n={serviceName:e.name.replace(/ /g,"-")};function s(i){let a=gT.get(ct.CONFIG_PARAMS.CLUSTERING_LOGLEVEL),c=/\[\d+][^\[]+\[(\w+)]/g,l,u=0,d;for(;l=c.exec(i);){if(l.index&&zo.LOG_LEVEL_HIERARCHY[a]>=zo.LOG_LEVEL_HIERARCHY[d||"info"]){let h=d===zo.LOG_LEVELS.ERR||d===zo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",h,n,i.slice(u,l.index).trim())}let[f,m]=l;u=l.index+f.length,d=zo.LOG_LEVELS[m]}if(zo.LOG_LEVEL_HIERARCHY[a]>=zo.LOG_LEVEL_HIERARCHY[d||"info"]){let f=d===zo.LOG_LEVELS.ERR||d===zo.LOG_LEVELS.WRN?Kl.OUTPUTS.STDERR:Kl.OUTPUTS.STDOUT;Kl.logCustomLevel(d||"info",f,n,i.slice(u).trim())}}if(o(s,"extractMessages"),r.stdout.on("data",s),r.stderr.on("data",s),r.unref(),!pn&&(pn=[],!t)){let i=o(()=>{nK=!0,pn&&(pn.map(a=>a.kill()),process.exit(0))},"killChildren");process.on("exit",i),process.on("SIGINT",i),process.on("SIGQUIT",i),process.on("SIGTERM",i)}pn.push(r)}o(ac,"start");function kme(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.start(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(kme,"startWithPM2");function rC(e){if(!pp){for(let t of pn||[])t.name===e&&(pn.splice(pn.indexOf(t),1),t.kill());return}return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.stop(e,async(n,s)=>{n&&(Je.disconnect(),r(n)),Je.delete(e,(i,a)=>{i&&(Je.disconnect(),r(n)),Je.disconnect(),t(a)})})})}o(rC,"stop");function sK(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.reload(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(sK,"reload");function iK(e){if(!pp){oK();for(let t of pn||[])t.name===e&&t.kill()}return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.restart(e,(n,s)=>{Je.disconnect(),t(s)})})}o(iK,"restart");function oK(){for(let e of pn||[])e.config&&(e.config.restarts=0)}o(oK,"expectedRestartOfChildren");function Gme(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.delete(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(Gme,"deleteProcess");async function aK(){await ac(oc.generateRestart())}o(aK,"restartHdb");async function qme(){let e=await nC();for(let t in e)if(e[t].name===ct.PROCESS_DESCRIPTORS.RESTART_HDB)return!0;return!1}o(qme,"isHdbRestartRunning");function nC(){return new Promise(async(e,t)=>{try{await Qo()}catch(r){t(r)}Je.list((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o(nC,"list");function cK(e){return new Promise(async(t,r)=>{try{await Qo()}catch(n){r(n)}Je.describe(e,(n,s)=>{n&&(Je.disconnect(),r(n)),Je.disconnect(),t(s)})})}o(cK,"describe");function $me(){if(!pp){for(let e of pn||[])e.kill();pn=[];return}return new Promise(async(e,t)=>{try{await Qo()}catch(r){t(r)}Je.killDaemon((r,n)=>{r&&(Je.disconnect(),t(r)),Je.disconnect(),e(n)})})}o($me,"kill");async function Vme(){try{await mK(),await hK(),await ac(oc.generateAllServiceConfigs())}catch(e){throw Je?.disconnect(),e}}o(Vme,"startAllServices");async function sC(e,t=!1){try{let r;switch(e=e.toLowerCase(),e){case ct.PROCESS_DESCRIPTORS.HDB.toLowerCase():r=oc.generateMainServerConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_INGEST_SERVICE.toLowerCase():r=oc.generateNatsIngestServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE.toLowerCase():r=oc.generateNatsReplyServiceConfig();break;case ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase():r=oc.generateNatsHubServerConfig(),await ac(r,t),await jo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase():r=oc.generateNatsLeafServerConfig(),await ac(r,t),await jo.removeNatsConfig(e);return;case ct.PROCESS_DESCRIPTORS.CLUSTERING_UPGRADE_4_0_0.toLowerCase():r=oc.generateClusteringUpgradeV4ServiceConfig();break;default:throw new Error(`Start service called with unknown service config: ${e}`)}await ac(r)}catch(r){throw Je?.disconnect(),r}}o(sC,"startService");async function lK(){try{let e=await nC(),t={};for(let r=0,n=e.length;r<n;r++){let s=e[r];t[s.name]===void 0&&(t[s.name]={name:s.name,exec_mode:s.pm2_env.exec_mode})}return t}catch(e){throw Je?.disconnect(),e}}o(lK,"getUniqueServicesList");async function Kme(e=[]){try{let t=!1,r=await lK();for(let n=0,s=Object.values(r).length;n<s;n++){let a=Object.values(r)[n].name;e.includes(a)||(a===ct.PROCESS_DESCRIPTORS.HDB?t=!0:await iK(a))}t&&await dK(ct.PROCESS_DESCRIPTORS.HDB)}catch(t){throw Je?.disconnect(),t}}o(Kme,"restartAllServices");async function uK(e){if(pn?.find(r=>r.name===e))return!0;let t=await vme.getHDBProcessInfo();return t.core.length&&t.core[0]?.parent==="PM2"}o(uK,"isServiceRegistered");async function dK(e){let t=gT.get(ct.CONFIG_PARAMS.THREADS_COUNT)??gT.get(ct.CONFIG_PARAMS.THREADS),r=await cK(e),n=Lme.isEmptyOrZeroLength(r)?0:r.length;t!==n?(await rC(e),await sC(e)):e===ct.PROCESS_DESCRIPTORS.HDB?await aK():await sK(e)}o(dK,"reloadStopStart");var fK;async function mK(e=!1){for(let t in ct.CLUSTERING_PROCESSES){let r=ct.CLUSTERING_PROCESSES[t];await sC(r,e)}}o(mK,"startClusteringProcesses");async function hK(){fK=rK(ct.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE,{name:ct.PROCESS_DESCRIPTORS.CLUSTERING_REPLY_SERVICE});try{await _T.deleteLocalStream("__HARPERDB_WORK_QUEUE__")}catch{}await _T.updateLocalStreams();let e=await Dme.getAllNodeRecords();for(let t=0,r=e.length;t<r;t++)if(e[t].system_info?.hdb_version===ct.PRE_4_0_0_VERSION){Kl.info("Starting clustering upgrade 4.0.0 process"),rK(ct.LAUNCH_SERVICE_SCRIPTS.NODES_UPGRADE_4_0_0,{name:"Upgrade-4-0-0"});break}}o(hK,"startClusteringThreads");async function Yme(){for(let e in ct.CLUSTERING_PROCESSES)if(e!==ct.CLUSTERING_PROCESSES.CLUSTERING_INGEST_PROC_DESCRIPTOR)if(e===ct.CLUSTERING_PROCESSES.CLUSTERING_REPLY_SERVICE_DESCRIPTOR)await fK.terminate();else{let t=ct.CLUSTERING_PROCESSES[e];await rC(t)}}o(Yme,"stopClustering");async function Wme(){for(let e in ct.CLUSTERING_PROCESSES){let t=ct.CLUSTERING_PROCESSES[e];if(await uK(t)===!1)return!1}return!0}o(Wme,"isClusteringRunning");async function zme(){await jo.generateNatsConfig(!0),await _T.reloadNATSHub(),await _T.reloadNATSLeaf(),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_HUB.toLowerCase()),await jo.removeNatsConfig(ct.PROCESS_DESCRIPTORS.CLUSTERING_LEAF.toLowerCase())}o(zme,"reloadClustering")});var aC={};Oe(aC,{compactOnStart:()=>jme,copyDb:()=>RK});async function jme(){cc.notify("Running compact on start"),console.log("Running compact on start");let e=(0,iC.get)(U.ROOTPATH),t=new Map,r=st();(0,oC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);try{for(let n in r){if(n==="system"||n.endsWith("-copy"))continue;let s;for(let l in r[n]){s=r[n][l].primaryStore.path;break}if(!s){console.log("Couldn't find any tables in database",n);continue}let i=(0,ST.join)(e,"backup",n+".mdb"),a=(0,ST.join)(e,Uc,n+"-copy.mdb"),c=0;try{c=await EK(n),console.log("Database",n,"before compact has a total record count of",c)}catch(l){cc.error("Error getting record count for database",n,l),console.error("Error getting record count for database",n,l)}t.set(n,{dbPath:s,copyDest:a,backupDest:i,recordCount:c}),await RK(n,a),console.log("Backing up",n,"to",i);try{await(0,Yl.move)(s,i,{overwrite:!0})}catch(l){console.log("Error moving database",s,"to",i,l)}console.log("Moving copy compacted",n,"to",s),await(0,Yl.move)(a,s,{overwrite:!0}),await(0,Yl.remove)((0,ST.join)(e,Uc,`${n}-copy.mdb-lock`))}try{ld()}catch(n){cc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n)}try{ld()}catch(n){cc.error("Error resetting databases after backup",n),console.error("Error resetting databases after backup",n),process.exit(0)}}catch(n){cc.error("Error compacting database, rolling back operation",n),console.error("Error compacting database, rolling back operation",n),(0,oC.updateConfigValue)(U.STORAGE_COMPACTONSTART,!1);for(let[s,{dbPath:i,backupDest:a}]of t){console.error("Moving backup database",a,"back to",i);try{await(0,Yl.move)(a,i,{overwrite:!0})}catch(c){console.error(c)}}throw ld(),n}for(let[n,{backupDest:s,recordCount:i}]of t){let a=await EK(n);if(console.log("Database",n,"after compact has a total record count of",a),i!==a){let c=`There is a discrepancy between pre and post compact record count for database ${n}.
19
+ Total record count before compaction: ${i}, total after: ${a}.
20
+ Database backup has not been removed and can be found here: ${s}`;cc.error(c),console.error(c)}(0,iC.get)(U.STORAGE_COMPACTONSTARTKEEPBACKUP)!==!0&&(console.log("Removing backup",s),await(0,Yl.remove)(s))}}async function EK(e){let t=await(0,TK.describeSchema)({database:e}),r=0;for(let n in t)r+=t[n].record_count;return r}function tf(){}async function RK(e,t){console.log(`Copying database ${e} to ${t}`);let r=st()[e];if(!r)throw new Error(`Source database not found: ${e}`);let n;for(let f in r){let m=r[f];m.primaryStore.put=tf,m.primaryStore.remove=tf;for(let h in m.indices){let p=m.indices[h];p.put=tf,p.remove=tf}m.auditStore&&(m.auditStore.put=tf,m.auditStore.remove=tf),n=m.primaryStore.rootStore}if(!n)throw new Error(`Source database does not have any tables: ${e}`);let s=n.dbisDb,i=n.auditStore,a=(0,_K.open)(new gK.default(t)),c=a.openDB(TT.INTERNAL_DBIS_NAME),l,u=0,d=s.useReadTransaction();try{for(let{key:m,value:h}of s.getRange({transaction:d})){let p=h.is_hash_attribute||h.isPrimaryKey,_,g;if(p&&(_=h.compression,g=Vg(),g?h.compression=g:delete h.compression,_?.dictionary?.toString()===g?.dictionary?.toString()&&(_=null,g=null)),c.put(m,h),!(p||h.indexed))continue;let y=new SK.default(!p,p);y.encoding="binary",y.compression=_;let T=n.openDB(m,y);T.decoder=null,T.decoderCopies=!1,T.encoding="binary",y.compression=g;let R=a.openDB(m,y);R.encoder=null,console.log("copying",m,"from",e,"to",t),await f(T,R,p,d)}if(i){let m=n.openDB(TT.AUDIT_STORE_NAME,ym);console.log("copying audit log for",e,"to",t),f(i,m,!1,d)}async function f(m,h,p,_){let g=0,y=0,T=0,R=1e7,N=null;for(;R-- >0;)try{for(let O of m.getKeys({start:N,transaction:_}))try{N=O;let{value:F,version:Z}=m.getEntry(O,{transaction:_});if(F?.length<14&&p){T++;continue}l=h.put(O,F,p?Z:void 0),g++,_.openTimer&&(_.openTimer=0),y+=(O?.length||10)+F.length,u++>5e3&&(await l,console.log("copied",g,"entries",T,"delete records,",y,"bytes"),u=0)}catch(F){console.error("Error copying record",typeof O=="symbol"?"symbol":O,"from",e,"to",t,F)}console.log("finish copying, copied",g,"entries",T,"delete records,",y,"bytes");return}catch{if(typeof N=="string"){if(N==="z")return console.error("Reached end of dbi",N,"for",e,"to",t);N=N.slice(0,-2)+"z"}else if(typeof N=="number")N++;else return console.error("Unknown key type",N,"for",e,"to",t)}}o(f,"copyDbi"),await l,console.log("copied database "+e+" to "+t)}finally{d.done(),a.close()}}var _K,ST,Yl,iC,gK,SK,TT,TK,oC,cc,cC=ue(()=>{De();_K=require("lmdb"),ST=require("path"),Yl=require("fs-extra"),iC=w(oe()),gK=w(Dm()),SK=w(Lm()),TT=w(qt());k();Mi();TK=w(Fa()),oC=w(yt()),cc=w(Q());o(jme,"compactOnStart");o(EK,"getTotalDBRecordCount");o(tf,"noop");o(RK,"copyDb")});var nf=M((w0e,OK)=>{"use strict";var Qme=require("minimist"),{isMainThread:uC,parentPort:_p,threadId:b0e}=require("worker_threads"),ft=(k(),v(W)),no=Q(),dC=ie(),yT=hT(),RT=mr(),I0e=Nt(),IK=yt(),Si=ef(),yK=Gd(),{compactOnStart:Jme}=(cC(),v(aC)),Xme=xc(),{restartWorkers:AT,onMessageByType:Zme}=nt(),{handleHDBError:ehe,hdbErrors:the}=ge(),{HTTP_STATUS_CODES:rhe}=the,gp=oe(),{sendOperationToNode:AK,getThisNodeName:nhe,monitorNodeCAs:she}=(ss(),v(Go)),{getHDBNodeTable:N0e}=(Dl(),v(Rw));gp.initSync();var Ep=`Restarting HarperDB. This may take up to ${ft.RESTART_TIMEOUT_MS/1e3} seconds.`,ihe="Restart is not available from the CLI when running in non-pm2 mode. Either call restart from the API or stop and start HarperDB.",bK="Clustering is not enabled so cannot be restarted",ohe="Invalid service",rf,xs;OK.exports={restart:NK,restartService:fC};uC&&Zme(ft.ITC_EVENT_TYPES.RESTART,async(e,t)=>{e.workerType?await fC({service:e.workerType}):NK({operation:"restart"}),t.postMessage({type:"restart-complete"})});async function NK(e){xs=Object.keys(e).length===0,rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB);let t=Qme(process.argv);if(t.service){await fC(t);return}if(xs&&!rf){console.error(ihe);return}if(xs&&console.log(Ep),rf){Si.enterPM2Mode(),no.notify(Ep);let r=Xme(Object.keys(ft.CONFIG_PARAM_MAP),!0);return dC.isEmptyOrZeroLength(Object.keys(r))||IK.updateConfigValue(void 0,void 0,r,!0,!0),ahe(),Ep}return uC?(no.notify(Ep),gp.get(ft.CONFIG_PARAMS.STORAGE_COMPACTONSTART)&&await Jme(),process.env.HARPER_EXIT_ON_RESTART&&process.exit(0),setTimeout(()=>{AT()},50)):_p.postMessage({type:ft.ITC_EVENT_TYPES.RESTART}),Ep}o(NK,"restart");async function fC(e){let{service:t}=e;if(ft.HDB_PROCESS_SERVICES[t]===void 0)throw ehe(new Error,ohe,rhe.BAD_REQUEST,void 0,void 0,!0);if(Si.expectedRestartOfChildren(),rf=await Si.isServiceRegistered(ft.PROCESS_DESCRIPTORS.HDB),!uC){e.replicated&&she(),_p.postMessage({type:ft.ITC_EVENT_TYPES.RESTART,workerType:t}),_p.ref(),await new Promise(s=>{_p.on("message",i=>{i.type==="restart-complete"&&(s(),_p.unref())})});let n;if(e.replicated){e.replicated=!1,n=[];for(let s of server.nodes){if(s.name===nhe())continue;let i;try{({job_id:i}=await AK(s,e))}catch(a){n.push({node:s.name,message:a.message});continue}n.push(await new Promise((a,c)=>{let u=2400,d=setInterval(async()=>{if(u--<=0){clearInterval(d);let h=new Error("Timed out waiting for restart job to complete");h.replicated=n,c(h)}let m=(await AK(s,{operation:"get_job",id:i})).results[0];if(m.status==="COMPLETE"&&(clearInterval(d),a({node:s.name,message:m.message})),m.status==="ERROR"){clearInterval(d);let h=new Error(m.message);h.replicated=n,c(h)}},250)}))}return{replicated:n}}return}let r;switch(t){case ft.HDB_PROCESS_SERVICES.clustering:if(!gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=bK;break}xs&&console.log("Restarting clustering"),no.notify("Restarting clustering"),await wK();break;case ft.HDB_PROCESS_SERVICES.clustering_config:case ft.HDB_PROCESS_SERVICES["clustering config"]:if(!gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)){r=bK;break}xs&&console.log("Restarting clusteringConfig"),no.notify("Restarting clustering_config"),await Si.reloadClustering();break;case"custom_functions":case"custom functions":case ft.HDB_PROCESS_SERVICES.harperdb:case ft.HDB_PROCESS_SERVICES.http_workers:case ft.HDB_PROCESS_SERVICES.http:if(xs&&!rf){r=`Restart ${t} is not available from the CLI when running in non-pm2 mode. Either call restart ${t} from the API or stop and start HarperDB.`;break}xs&&console.log("Restarting httpWorkers"),no.notify("Restarting http_workers"),xs?await Si.restart(ft.PROCESS_DESCRIPTORS.HDB):await AT("http");break;default:r=`Unrecognized service: ${t}`;break}return r?(no.error(r),xs&&console.error(r),r):(t==="custom_functions"&&(t="Custom Functions"),`Restarting ${t}`)}o(fC,"restartService");async function ahe(){await wK(),await Si.restart(ft.PROCESS_DESCRIPTORS.HDB),await dC.asyncSetTimeout(2e3),gp.get(ft.CONFIG_PARAMS.CLUSTERING_ENABLED)&&await lC(),xs&&(await RT.closeConnection(),process.exit(0))}o(ahe,"restartPM2Mode");async function wK(){if(!IK.getConfigFromFile(ft.CONFIG_PARAMS.CLUSTERING_ENABLED))return;if((await yK.getHDBProcessInfo()).clustering.length===0)no.trace("Clustering not running, restart will start clustering services"),await yT.generateNatsConfig(!0),await Si.startClusteringProcesses(),await Si.startClusteringThreads(),await lC(),xs&&await RT.closeConnection();else{await yT.generateNatsConfig(!0),rf?(no.trace("Restart clustering restarting PM2 managed Hub and Leaf servers"),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await Si.restart(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)):(await yK.getHDBProcessInfo()).clustering.forEach(s=>{no.trace("Restart clustering killing process pid",s.pid),process.kill(s.pid)}),await dC.asyncSetTimeout(3e3),await lC(),await RT.updateLocalStreams(),xs&&await RT.closeConnection(),no.trace("Restart clustering restarting ingest and reply service threads");let t=AT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_INGEST_SERVICE),r=AT(ft.LAUNCH_SERVICE_SCRIPTS.NATS_REPLY_SERVICE);await t,await r}}o(wK,"restartClustering");async function lC(){await yT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_HUB),await yT.removeNatsConfig(ft.PROCESS_DESCRIPTORS.CLUSTERING_LEAF)}o(lC,"removeNatsConfig")});var HK=M((P0e,FK)=>{"use strict";var C0e=require("lodash"),Bn=(k(),v(W)),{handleHDBError:CK,hdbErrors:che}=ge(),{HDB_ERROR_MSGS:lhe,HTTP_STATUS_CODES:uhe}=che,mC=Q();FK.exports={getRolePermissions:fhe};var Wl=Object.create(null),dhe=o(e=>({key:e,perms:{}}),"permsTemplateObj"),MK=o((e=!1)=>({describe:e,tables:{}}),"schemaPermsTemplate"),vK=o((e=!1,t=!1,r=!1,n=!1)=>({[Bn.PERMS_CRUD_ENUM.READ]:e,[Bn.PERMS_CRUD_ENUM.INSERT]:t,[Bn.PERMS_CRUD_ENUM.UPDATE]:r,[Bn.PERMS_CRUD_ENUM.DELETE]:n}),"permissionsTemplate"),hC=o((e=!1,t=!1,r=!1,n=!1,s=!1)=>({attribute_permissions:[],describe:e,...vK(t,r,n,s)}),"tablePermsTemplate"),PK=o((e,t=vK())=>({attribute_name:e,describe:BK(t),[Sp]:t[Sp],[pC]:t[pC],[EC]:t[EC]}),"attrPermsTemplate"),LK=o((e,t=!1)=>({attribute_name:e,describe:t,[Sp]:t}),"timestampAttrPermsTemplate"),{READ:Sp,INSERT:pC,UPDATE:EC}=Bn.PERMS_CRUD_ENUM,UK=Object.values(Bn.PERMS_CRUD_ENUM),xK=[Sp,pC,EC];function fhe(e){let t;try{if(e.permission.super_user||e.permission.cluster_user)return e.permission;let r={...global.hdb_schema};delete r[Bn.SYSTEM_SCHEMA_NAME],t=e.role;let n=JSON.stringify([e.__updatedtime__,r]);if(Wl[t]&&Wl[t].key===n)return Wl[t].perms;let s=mhe(e,r);return Wl[t]?Wl[t].key=n:Wl[t]=dhe(n),Wl[t].perms=s,s}catch(r){if(!e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]||e[Bn.TIME_STAMP_NAMES_ENUM.UPDATED_TIME]<Bn.PERMS_UPDATE_RELEASE_TIMESTAMP){let n=`Role permissions for role '${role_name}' must be updated to align with new structure from the 2.2.0 release.`;throw mC.error(n),mC.debug(r),CK(new Error,lhe.OUTDATED_PERMS_TRANSLATION_ERROR,uhe.BAD_REQUEST)}else{let n=`There was an error while translating role permissions for role: ${t}.
21
21
  ${r.stack}`;throw mC.error(n),CK(new Error)}}}o(fhe,"getRolePermissions");function mhe(e,t){let r=Object.create(null);r.super_user=!1;let n=e.permission;r[Bn.SYSTEM_SCHEMA_NAME]=n[Bn.SYSTEM_SCHEMA_NAME],r.structure_user=n.structure_user;let s=Array.isArray(e.permission.structure_user)||e.permission.structure_user===!0?e.permission.structure_user:[];return Object.keys(t).forEach(i=>{if(s===!0||s.indexOf(i)>-1){r[i]=hhe(t[i]);return}r[i]=MK(),n[i]?(n[i].describe&&(r[i].describe=!0),Object.keys(t[i]).forEach(a=>{if(n[i].tables[a]){let c=n[i].tables[a],l=t[i][a],u=phe(c,l);r[i].describe||UK.forEach(d=>{u[d]&&(r[i].describe=!0)}),r[i].tables[a]=u}else r[i].tables[a]=hC()})):Object.keys(t[i]).forEach(a=>{r[i].tables[a]=hC()})}),r}o(mhe,"translateRolePermissions");function hhe(e){let t=MK(!0);return Object.keys(e).forEach(r=>{t.tables[r]=hC(!0,!0,!0,!0,!0)}),t}o(hhe,"createStructureUserPermissions");function phe(e,t){let{attribute_permissions:r}=e;if(r?.length>0){let s={...e};s.attribute_permissions=[];let i=r.reduce((u,d)=>{let{attribute_name:f}=d,m=d;return Bn.TIME_STAMP_NAMES.includes(f)&&(m=LK(f,d[Sp])),u[f]=m,u},{}),a=t.primaryKey||t.hash_attribute,c=!!i[a],l=PK(a);return t.attributes.forEach(({attribute:u})=>{if(i[u]){let d=i[u];d.describe=BK(d),s.attribute_permissions.push(d),c||Ehe(d,l)}else if(u!==a){let d;Bn.TIME_STAMP_NAMES.includes(u)?d=LK(u):d=PK(u),s.attribute_permissions.push(d)}}),c||s.attribute_permissions.push(l),s.describe=DK(s),s}else return e.describe=DK(e),e}o(phe,"getTableAttrPerms");function DK(e){return UK.filter(t=>e[t]).length>0}o(DK,"getSchemaTableDescribePerm");function BK(e){return xK.filter(t=>e[t]).length>0}o(BK,"getAttributeDescribePerm");function Ehe(e,t){xK.forEach(r=>{e[r]&&!t[r]&&(t[r]=!0,t.describe=!0)})}o(Ehe,"checkForHashPerms")});var Tp={};Oe(Tp,{authentication:()=>WK,bypassAuth:()=>Ihe,login:()=>SC,logout:()=>TC,start:()=>Nhe});function Ihe(){YK=!0}async function WK(e,t){let r=e.headers.asObject,n=r.authorization,s=r.cookie,i=r.origin,a=[];try{if(i){let h=e.isOperationsServer?Rhe?The:[]:She?ghe:[];if(h.includes(i)||h.includes("*")){if(e.method==="OPTIONS"){let p=En.get(U.HTTP_CORSACCESSCONTROLALLOWHEADERS)??"Accept, Content-Type, Authorization",_=new Cs([["Access-Control-Allow-Methods","POST, GET, PUT, DELETE, PATCH, OPTIONS"],["Access-Control-Allow-Headers",p],["Access-Control-Allow-Origin",i]]);return bT&&_.set("Access-Control-Allow-Credentials","true"),{status:200,headers:_}}a.push("Access-Control-Allow-Origin",i),bT&&a.push("Access-Control-Allow-Credentials","true")}}let l,u;if(bT){i||(i=r.host);let h=(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session=",p=s?.split(/;\s+/)||[];for(let _ of p)if(_.startsWith(h)){let g=_.indexOf(";");l=_.slice(h.length,g===-1?_.length:g),u=await kK.get(l);break}e.session=u||(u={})}let d=o((h,p,_)=>{let g=new IT.AuthAuditLog(h,p,Aa.AUTHENTICATION,r["x-forwarded-for"]??e.ip,e.method,e.pathname);g.auth_strategy=_,l&&(g.session_id=l),r.referer&&(g.referer=r.referer),r.origin&&(g.origin=r.origin),p===Zs.SUCCESS?_C.notify(g):_C.error(g)},"authAuditLog");if(!e.authorized&&e.mtlsConfig&&e.peerCertificate.subject&&e?._nodeRequest?.socket?.authorizationError&&_C.error("Authorization error:",e._nodeRequest.socket.authorizationError),e.mtlsConfig&&e.authorized&&e.peerCertificate.subject){let h=e.mtlsConfig.user;h!==null?((h===void 0||h==="Common Name"||h==="CN")&&(h=e.peerCertificate.subject.CN),e.user=await Le.getUser(h,null,e),d(h,Zs.SUCCESS,"mTLS")):_he("HTTPS/WSS mTLS authorized connection (mTLS did not authorize a user)","from",e.ip)}let f;if(!e.user)if(n){if(f=zl.get(n),!f){let h=n.indexOf(" "),p=n.slice(0,h),_=n.slice(h+1),g,y;try{switch(p){case"Basic":let T=atob(_),R=T.indexOf(":");g=T.slice(0,R),y=T.slice(R+1),f=g||y?await Le.getUser(g,y,e):null;break;case"Bearer":try{f=await XN(_)}catch(N){if(N.message==="invalid token")try{return await iS(_),c({status:-1})}catch{throw N}}break}}catch(T){return Ahe&&(zl.get(_)||(zl.set(_,_),d(g,Zs.FAILURE,p))),c({status:401,body:wa({error:T.message},e)})}zl.set(n,f),yhe&&d(f.username,Zs.SUCCESS,p)}e.user=f}else u?.user?e.user=await Le.getUser(u.user,null,e):(YK&&(e.ip?.includes("127.0.0.")||e.ip=="::1")||e?._nodeRequest?.socket?.server?._pipeName&&e.ip===void 0)&&(e.user=await(0,qK.getSuperUser)());bT&&(e.session.update=function(h){let p=En.get(U.AUTHENTICATION_COOKIE_EXPIRES),_=e.protocol==="https"||r.host?.startsWith("localhost:")||r.host?.startsWith("127.0.0.1:")||r.host?.startsWith("::1");if(!l){l=(0,$K.v4)();let g=En.get(U.AUTHENTICATION_COOKIE_DOMAINS),y=p?new Date(Date.now()+(0,gC.convertToMS)(p)).toUTCString():bhe,T=g?.find(O=>r.host?.endsWith(O)),N=`${(i?i.replace(/^https?:\/\//,"").replace(/\W/,"_")+"-":"")+"hdb-session="}${l}; Path=/; Expires=${y}; HttpOnly`;T&&(N+=`; Domain=${T}`),_&&(N+="; SameSite=None; Secure"),a?a.push("Set-Cookie",N):m?.headers?.set&&m.headers.set("Set-Cookie",N)}return _&&(a?(i&&a.push("Access-Control-Expose-Headers","X-Hdb-Session"),a.push("X-Hdb-Session","Secure")):m?.headers?.set&&(i&&m.headers.set("Access-Control-Expose-Headers","X-Hdb-Session"),m.headers.set("X-Hdb-Session","Secure"))),h.id=l,kK.put(h,{expiresAt:p?Date.now()+(0,gC.convertToMS)(p):void 0})},e.login=async function(h,p){let _=e.user=await Le.authenticateUser(h,p,e);e.session.update({user:_&&(_.getId?.()??_.username)})});let m=await t(e);return m&&(m.status===401&&(r["user-agent"]?.startsWith("Mozilla")&&r.accept?.startsWith("text/html")&&Is.loginPath?(m.status=302,m.headers.set("Location",Is.loginPath(e))):m.headers.set("WWW-Authenticate","Basic")),c(m))}catch(l){throw c(l)}function c(l){let u=a.length;if(u>0){let d=l.headers;d||(l.headers=d=new Cs);for(let f=0;f<u;){let m=a[f++];d.set(m,a[f++])}}return a=null,l}o(c,"applyResponseHeaders")}function Nhe({server:e,port:t,securePort:r}){e.http(WK,t||r?{port:t,securePort:r}:{port:"all"}),GK||(GK=!0,setInterval(()=>{zl=new Map},En.get(U.AUTHENTICATION_CACHETTL)).unref(),VK.user.addListener(()=>{zl=new Map}))}async function SC(e){if(!e.baseRequest?.login)throw new Error("No session for login");return e.baseResponse.headers.set=(t,r)=>{e.fastifyResponse.header(t,r)},await e.baseRequest.login(e.username,e.password??""),"Login successful"}async function TC(e){if(!e.baseRequest.session)throw new Error("No session for logout");return await e.baseRequest.session.update({user:null}),"Logout successful"}var qK,$K,En,IT,VK,gC,KK,_he,_C,ghe,She,The,Rhe,kK,bT,YK,yhe,Ahe,bhe,zl,GK,NT=ue(()=>{qK=w(ts());Mr();ku();gd();De();$K=require("uuid"),En=w(oe());k();IT=w(Q()),VK=w(ch());hh();gC=w(ie());bo();KK=(0,IT.forComponent)("authentication"),{debug:_he}=KK,_C=KK.withTag("auth-event");En.initSync();ghe=En.get(U.HTTP_CORSACCESSLIST),She=En.get(U.HTTP_CORS),The=En.get(U.OPERATIONSAPI_NETWORK_CORSACCESSLIST),Rhe=En.get(U.OPERATIONSAPI_NETWORK_CORS),kK=je({table:"hdb_session",database:"system",attributes:[{name:"id",isPrimaryKey:!0},{name:"user"}]}),bT=En.get(U.AUTHENTICATION_ENABLESESSIONS)??!0,YK=process.env.AUTHENTICATION_AUTHORIZELOCAL??En.get(U.AUTHENTICATION_AUTHORIZELOCAL)??process.env.DEV_MODE,yhe=En.get(U.LOGGING_AUDITAUTHEVENTS_LOGSUCCESSFUL)??!1,Ahe=En.get(U.LOGGING_AUDITAUTHEVENTS_LOGFAILED)??!1,bhe="Tue, 01 Oct 8307 19:33:20 GMT",zl=new Map;Le.onInvalidatedUser(()=>{zl=new Map});o(Ihe,"bypassAuth");o(WK,"authentication");o(Nhe,"start");o(SC,"login");o(TC,"logout")});var eY=M((k0e,ZK)=>{"use strict";var we=require("joi"),zK=require("fs-extra"),jK=require("path"),us=lt(),QK=oe(),JK=(k(),v(W)),XK=Q(),{hdbErrors:whe}=ge(),{HDB_ERROR_MSGS:_n}=whe,Jo=/^[a-zA-Z0-9-_]+$/,Ohe=/^[a-zA-Z0-9-_]+$/;ZK.exports={getDropCustomFunctionValidator:Phe,setCustomFunctionValidator:Lhe,addComponentValidator:Uhe,dropCustomFunctionProjectValidator:xhe,packageComponentValidator:Bhe,deployComponentValidator:Fhe,setComponentFileValidator:Dhe,getComponentFileValidator:vhe,dropComponentFileValidator:Mhe,addSSHKeyValidator:Hhe,updateSSHKeyValidator:khe,deleteSSHKeyValidator:Ghe,setSSHKnownHostsValidator:qhe};function wT(e,t,r){try{let n=QK.get(JK.CONFIG_PARAMS.COMPONENTSROOT),s=jK.join(n,t);return zK.existsSync(s)?e?t:r.message(_n.PROJECT_EXISTS):e?r.message(_n.NO_PROJECT):t}catch(n){return XK.error(n),r.message(_n.VALIDATION_ERR)}}o(wT,"checkProjectExists");function Rp(e,t){return e.includes("..")?t.message("Invalid file path"):e}o(Rp,"checkFilePath");function Che(e,t,r,n){try{let s=QK.get(JK.CONFIG_PARAMS.COMPONENTSROOT),i=jK.join(s,e,t,r+".js");return zK.existsSync(i)?r:n.message(_n.NO_FILE)}catch(s){return XK.error(s),n.message(_n.VALIDATION_ERR)}}o(Che,"checkFileExists");function Phe(e){let t=we.object({project:we.string().pattern(Jo).custom(wT.bind(null,!0)).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().pattern(Jo).custom(Che.bind(null,e.project,e.type)).custom(Rp).required().messages({"string.pattern.base":_n.BAD_FILE_NAME})});return us.validateBySchema(e,t)}o(Phe,"getDropCustomFunctionValidator");function Lhe(e){let t=we.object({project:we.string().pattern(Jo).custom(wT.bind(null,!0)).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),type:we.string().valid("helpers","routes").required(),file:we.string().custom(Rp).required(),function_content:we.string().required()});return us.validateBySchema(e,t)}o(Lhe,"setCustomFunctionValidator");function Dhe(e){let t=we.object({project:we.string().pattern(Jo).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),file:we.string().custom(Rp).required(),payload:we.string().allow("").optional(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return us.validateBySchema(e,t)}o(Dhe,"setComponentFileValidator");function Mhe(e){let t=we.object({project:we.string().pattern(Jo).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),file:we.string().custom(Rp).optional()});return us.validateBySchema(e,t)}o(Mhe,"dropComponentFileValidator");function vhe(e){let t=we.object({project:we.string().required(),file:we.string().custom(Rp).required(),encoding:we.string().valid("utf8","ASCII","binary","hex","base64","utf16le","latin1","ucs2").optional()});return us.validateBySchema(e,t)}o(vhe,"getComponentFileValidator");function Uhe(e){let t=we.object({project:we.string().pattern(Jo).custom(wT.bind(null,!1)).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME})});return us.validateBySchema(e,t)}o(Uhe,"addComponentValidator");function xhe(e){let t=we.object({project:we.string().pattern(Jo).custom(wT.bind(null,!0)).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME})});return us.validateBySchema(e,t)}o(xhe,"dropCustomFunctionProjectValidator");function Bhe(e){let t=we.object({project:we.string().pattern(Jo).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),skip_node_modules:we.boolean(),skip_symlinks:we.boolean()});return us.validateBySchema(e,t)}o(Bhe,"packageComponentValidator");function Fhe(e){let t=we.object({project:we.string().pattern(Jo).required().messages({"string.pattern.base":_n.BAD_PROJECT_NAME}),package:we.string().optional(),restart:we.alternatives().try(we.boolean(),we.string().valid("rolling")).optional()});return us.validateBySchema(e,t)}o(Fhe,"deployComponentValidator");function Hhe(e){let t=we.object({name:we.string().pattern(Ohe).required().messages({"string.pattern.base":_n.BAD_SSH_KEY_NAME}),key:we.string().required(),host:we.string().required(),hostname:we.string().required(),known_hosts:we.string().optional()});return us.validateBySchema(e,t)}o(Hhe,"addSSHKeyValidator");function khe(e){let t=we.object({name:we.string().required(),key:we.string().required()});return us.validateBySchema(e,t)}o(khe,"updateSSHKeyValidator");function Ghe(e){let t=we.object({name:we.string().required()});return us.validateBySchema(e,t)}o(Ghe,"deleteSSHKeyValidator");function qhe(e){let t=we.object({known_hosts:we.string().required()});return us.validateBySchema(e,t)}o(qhe,"setSSHKnownHostsValidator")});var bp=M((q0e,iY)=>{"use strict";var OT=require("joi"),lc=require("path"),sf=require("fs-extra"),{exec:$he,spawn:Vhe}=require("child_process"),Khe=require("util"),Yhe=Khe.promisify($he),of=(k(),v(W)),{PACKAGE_ROOT:Whe}=Rt(),{handleHDBError:yp,hdbErrors:zhe}=ge(),{HTTP_STATUS_CODES:Ap}=zhe,jl=oe(),jhe=lt(),uc=Q(),{once:Qhe}=require("events");jl.initSync();var RC=jl.get(of.CONFIG_PARAMS.COMPONENTSROOT),tY="npm install --force --omit=dev --json",Jhe=`${tY} --dry-run`,Xhe=jl.get(of.CONFIG_PARAMS.ROOTPATH),CT=lc.join(Xhe,"ssh");iY.exports={installModules:rpe,auditModules:npe,installAllRootModules:Zhe,uninstallRootModule:epe,linkHarperdb:tpe,runCommand:af};async function Zhe(e=!1,t=jl.get(of.CONFIG_PARAMS.ROOTPATH)){await PT();let r=!1,n=process.env;sf.pathExistsSync(CT)&&sf.readdirSync(CT).forEach(s=>{s.includes(".key")&&!r&&(n={GIT_SSH_COMMAND:"ssh -F "+lc.join(CT,"config")+" -o UserKnownHostsFile="+lc.join(CT,"known_hosts"),...process.env},r=!0)});try{let s=jl.get(of.CONFIG_PARAMS.ROOTPATH),i=lc.join(s,"node_modules","harperdb");sf.lstatSync(i).isSymbolicLink()&&sf.unlinkSync(i)}catch(s){s.code!=="ENOENT"&&uc.error("Error removing symlink:",s)}await af(e?"npm install --force --ignore-scripts":"npm install --force",t,n)}o(Zhe,"installAllRootModules");async function epe(e){await af(`npm uninstall ${e}`,jl.get(of.CONFIG_PARAMS.ROOTPATH))}o(epe,"uninstallRootModule");async function tpe(){await PT(),await af(`npm link ${Whe}`,jl.get(of.CONFIG_PARAMS.ROOTPATH))}o(tpe,"linkHarperdb");async function af(e,t=void 0,r=process.env){uc.debug({tagName:"npm_run_command"},`running command: \`${e}\``);let n=Vhe(e,{shell:!0,cwd:t,env:r,stdio:["ignore","pipe","pipe"]}),s="",i="";n.stdout.on("data",c=>{let l=c.toString();uc.debug({tagName:"npm_run_command:stdout"},l),s+=l}),n.stderr.on("data",c=>{let l=c.toString();uc.error({tagName:"npm_run_command:stderr"},l),i+=l});let[a]=await Qhe(n,"close");if(a!==0)throw new Error(`Command \`${e}\` exited with code ${a}.${i===""?"":` Error: ${i}`}`);return s||void 0}o(af,"runCommand");async function rpe(e){let t="install_node_modules is deprecated. Dependencies are automatically installed on deploy, and install_node_modules can lead to inconsistent behavior";uc.warn(t,e.projects);let r=sY(e);if(r)throw yp(r,r.message,Ap.BAD_REQUEST);let{projects:n,dryRun:s}=e,i=s===!0?Jhe:tY;await PT(),await nY(n);let a={};for(let c=0,l=n.length;c<l;c++){let u=n[c];a[u]={npm_output:null,npm_error:null};let d=lc.join(RC,u),f,m=null;try{let{stdout:h,stderr:p}=await Yhe(i,{cwd:d});f=h?h.replace(`
22
22
  `,""):null,m=p?p.replace(`
23
23
  `,""):null}catch(h){h.stderr?a[u].npm_error=rY(h.stderr):a[u].npm_error=h.message;continue}try{a[u].npm_output=JSON.parse(f)}catch{a[u].npm_output=f}try{a[u].npm_error=JSON.parse(m)}catch{a[u].npm_error=m}}return uc.info(`finished installModules with response ${a}`),a.warning=t,a}o(rpe,"installModules");function rY(e){let t='"error": {',r=e.indexOf('"error": {'),n=e.indexOf(`}