@frostpillar/frostpillar-storage-engine 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -723,7 +723,7 @@ var createEmptyTreeJSON = () => {
723
723
  };
724
724
  var ensureNonNegativeSafeInteger = (value, field) => {
725
725
  if (!Number.isSafeInteger(value) || typeof value !== "number" || value < 0) {
726
- throw new PageCorruptionError(`${field} must be a non-negative safe integer.`);
726
+ throw new PageCorruptionError(`${field} must be a non-negative safe integer, got ${String(value)}.`);
727
727
  }
728
728
  return value;
729
729
  };
@@ -459,7 +459,16 @@ var loadIndexedDBSnapshot = async (db, _objectStoreName) => {
459
459
  if (typeof treeJSON !== "object" || treeJSON === null || Array.isArray(treeJSON)) {
460
460
  throw new PageCorruptionError("treeJSON must be a non-null plain object.");
461
461
  }
462
- const currentSizeBytes = computeUtf8ByteLength(JSON.stringify(treeJSON));
462
+ let serialized;
463
+ try {
464
+ serialized = JSON.stringify(treeJSON);
465
+ } catch (err) {
466
+ throw new PageCorruptionError(
467
+ "Failed to serialize BTree snapshot for size estimation.",
468
+ { cause: err }
469
+ );
470
+ }
471
+ const currentSizeBytes = computeUtf8ByteLength(serialized);
463
472
  return { treeJSON, currentSizeBytes, commitId };
464
473
  };
465
474
  var commitIndexedDBSnapshot = async (db, _objectStoreName, treeJSON, commitId) => {
@@ -320,6 +320,11 @@ var parseLocalStorageConfig = (config) => {
320
320
  "localStorage.maxChunks must be a positive safe integer."
321
321
  );
322
322
  }
323
+ if (maxChunkChars * maxChunks > Number.MAX_SAFE_INTEGER) {
324
+ throw new ConfigurationError(
325
+ "localStorage.maxChunkChars * localStorage.maxChunks product exceeds Number.MAX_SAFE_INTEGER."
326
+ );
327
+ }
323
328
  return { keyPrefix, databaseKey, maxChunkChars, maxChunks };
324
329
  };
325
330
 
@@ -342,9 +347,12 @@ var cleanupGenerationChunks = (state, generation, knownChunkCount) => {
342
347
  return;
343
348
  }
344
349
  for (let i = 0; i < knownChunkCount; i += 1) {
345
- state.adapter.removeItem(
346
- chunkKey(state.keyPrefix, state.databaseKey, generation, i)
347
- );
350
+ try {
351
+ state.adapter.removeItem(
352
+ chunkKey(state.keyPrefix, state.databaseKey, generation, i)
353
+ );
354
+ } catch {
355
+ }
348
356
  }
349
357
  return;
350
358
  }
@@ -354,7 +362,10 @@ var cleanupGenerationChunks = (state, generation, knownChunkCount) => {
354
362
  for (let i = 0; i < state.maxChunks; i += 1) {
355
363
  const key = chunkKey(state.keyPrefix, state.databaseKey, generation, i);
356
364
  if (state.adapter.getItem(key) !== null) {
357
- state.adapter.removeItem(key);
365
+ try {
366
+ state.adapter.removeItem(key);
367
+ } catch {
368
+ }
358
369
  }
359
370
  }
360
371
  };
@@ -548,8 +548,8 @@ var isQuotaBrowserError = (error) => {
548
548
  if (!(error instanceof Error)) {
549
549
  return false;
550
550
  }
551
- const normalized = `${error.name}:${error.message}`;
552
- return /quota|max_items|quota_bytes|quota_bytes_per_item/i.test(normalized);
551
+ const normalized = `${error.name}:${error.message}`.toLowerCase();
552
+ return normalized.includes("quota") || normalized.includes("max_items");
553
553
  };
554
554
  var validateSyncStorageCommitQuota = (state, generation, chunks, manifest, resolveChunkKey, manifestStorageKey) => {
555
555
  const pendingItems = chunks.map(
package/dist/index.cjs CHANGED
@@ -233,7 +233,8 @@ var estimateObjectSizeBytes = (value) => {
233
233
  const obj = value;
234
234
  let size = 2;
235
235
  let visibleCount = 0;
236
- for (const k of Object.keys(obj)) {
236
+ for (const k in obj) {
237
+ if (!Object.hasOwn(obj, k)) continue;
237
238
  const v = obj[k];
238
239
  if (v === void 0) {
239
240
  continue;
@@ -1276,7 +1277,7 @@ var Datastore = class {
1276
1277
  }
1277
1278
  );
1278
1279
  this.keyIndex.put(normalizedKey, persistedRecord);
1279
- this.currentSizeBytes = Math.max(0, this.currentSizeBytes + encodedBytes);
1280
+ this.currentSizeBytes = this.currentSizeBytes + encodedBytes;
1280
1281
  await this.backendController?.handleRecordAppended(encodedBytes);
1281
1282
  }
1282
1283
  async putManyStrict(records) {
@@ -1645,6 +1646,11 @@ var parseLocalStorageConfig = (config) => {
1645
1646
  "localStorage.maxChunks must be a positive safe integer."
1646
1647
  );
1647
1648
  }
1649
+ if (maxChunkChars * maxChunks > Number.MAX_SAFE_INTEGER) {
1650
+ throw new ConfigurationError(
1651
+ "localStorage.maxChunkChars * localStorage.maxChunks product exceeds Number.MAX_SAFE_INTEGER."
1652
+ );
1653
+ }
1648
1654
  return { keyPrefix, databaseKey, maxChunkChars, maxChunks };
1649
1655
  };
1650
1656
 
@@ -1667,9 +1673,12 @@ var cleanupGenerationChunks = (state, generation, knownChunkCount) => {
1667
1673
  return;
1668
1674
  }
1669
1675
  for (let i = 0; i < knownChunkCount; i += 1) {
1670
- state.adapter.removeItem(
1671
- chunkKey(state.keyPrefix, state.databaseKey, generation, i)
1672
- );
1676
+ try {
1677
+ state.adapter.removeItem(
1678
+ chunkKey(state.keyPrefix, state.databaseKey, generation, i)
1679
+ );
1680
+ } catch {
1681
+ }
1673
1682
  }
1674
1683
  return;
1675
1684
  }
@@ -1679,7 +1688,10 @@ var cleanupGenerationChunks = (state, generation, knownChunkCount) => {
1679
1688
  for (let i = 0; i < state.maxChunks; i += 1) {
1680
1689
  const key = chunkKey(state.keyPrefix, state.databaseKey, generation, i);
1681
1690
  if (state.adapter.getItem(key) !== null) {
1682
- state.adapter.removeItem(key);
1691
+ try {
1692
+ state.adapter.removeItem(key);
1693
+ } catch {
1694
+ }
1683
1695
  }
1684
1696
  }
1685
1697
  };
@@ -2081,7 +2093,16 @@ var loadIndexedDBSnapshot = async (db, _objectStoreName) => {
2081
2093
  if (typeof treeJSON !== "object" || treeJSON === null || Array.isArray(treeJSON)) {
2082
2094
  throw new PageCorruptionError("treeJSON must be a non-null plain object.");
2083
2095
  }
2084
- const currentSizeBytes = computeUtf8ByteLength(JSON.stringify(treeJSON));
2096
+ let serialized;
2097
+ try {
2098
+ serialized = JSON.stringify(treeJSON);
2099
+ } catch (err) {
2100
+ throw new PageCorruptionError(
2101
+ "Failed to serialize BTree snapshot for size estimation.",
2102
+ { cause: err }
2103
+ );
2104
+ }
2105
+ const currentSizeBytes = computeUtf8ByteLength(serialized);
2085
2106
  return { treeJSON, currentSizeBytes, commitId };
2086
2107
  };
2087
2108
  var commitIndexedDBSnapshot = async (db, _objectStoreName, treeJSON, commitId) => {
@@ -2619,8 +2640,8 @@ var isQuotaBrowserError2 = (error) => {
2619
2640
  if (!(error instanceof Error)) {
2620
2641
  return false;
2621
2642
  }
2622
- const normalized = `${error.name}:${error.message}`;
2623
- return /quota|max_items|quota_bytes|quota_bytes_per_item/i.test(normalized);
2643
+ const normalized = `${error.name}:${error.message}`.toLowerCase();
2644
+ return normalized.includes("quota") || normalized.includes("max_items");
2624
2645
  };
2625
2646
  var validateSyncStorageCommitQuota = (state, generation, chunks, manifest, resolveChunkKey, manifestStorageKey) => {
2626
2647
  const pendingItems = chunks.map(
@@ -23,5 +23,8 @@ export const enforceCapacityPolicy = (capacityState, currentSizeBytes, encodedBy
23
23
  }
24
24
  nextSizeBytes -= evictedBytes;
25
25
  }
26
+ // Underflow is not expected: evictedBytes are always > 0 (enforced above) and
27
+ // derived from sizeBytes accumulated on insert. Math.max is a defensive guard
28
+ // against cumulative estimation rounding inconsistencies.
26
29
  return Math.max(0, nextSizeBytes);
27
30
  };
@@ -18,11 +18,14 @@ const computeUtf8ByteLengthJs = (value) => {
18
18
  i++; // skip low surrogate
19
19
  }
20
20
  else {
21
- bytes += 3; // lone high surrogate U+FFFD replacement (3 bytes)
21
+ // Lone high surrogate: Node.js encodes as 3-byte CESU-8 (not U+FFFD).
22
+ // For JSON-serialized byte counts, use estimateJsonStringBytes() instead.
23
+ bytes += 3;
22
24
  }
23
25
  }
24
26
  else if (code >= 0xdc00 && code <= 0xdfff) {
25
- bytes += 3; // lone low surrogate U+FFFD replacement (3 bytes)
27
+ // Lone low surrogate same platform-dependent caveat as above.
28
+ bytes += 3;
26
29
  }
27
30
  else {
28
31
  bytes += 3;
@@ -110,7 +113,9 @@ export const estimateObjectSizeBytes = (value) => {
110
113
  const obj = value;
111
114
  let size = 2; // { }
112
115
  let visibleCount = 0;
113
- for (const k of Object.keys(obj)) {
116
+ for (const k in obj) {
117
+ if (!Object.hasOwn(obj, k))
118
+ continue;
114
119
  const v = obj[k];
115
120
  // JSON.stringify omits undefined values
116
121
  if (v === undefined) {
@@ -197,6 +197,9 @@ export class Datastore {
197
197
  freedBytes += entry.value.sizeBytes;
198
198
  }
199
199
  totalRemoved += this.keyIndex.deleteRange(normalizedKey, normalizedKey);
200
+ // Underflow is not possible here: freedBytes is the sum of sizeBytes values
201
+ // that were accumulated into currentSizeBytes on insertion. Math.max is
202
+ // purely defensive against any future estimation inconsistency.
200
203
  this.currentSizeBytes = Math.max(0, this.currentSizeBytes - freedBytes);
201
204
  }
202
205
  return totalRemoved;
@@ -352,6 +355,9 @@ export class Datastore {
352
355
  if (this.duplicateKeyPolicy === 'replace') {
353
356
  const existing = this.keyIndex.findFirst(normalizedKey);
354
357
  if (existing !== null) {
358
+ // Underflow is not possible here: existing.value.sizeBytes was added to
359
+ // currentSizeBytes on insert and has not been modified since. Math.max is
360
+ // purely defensive against any future estimation inconsistency.
355
361
  this.currentSizeBytes = Math.max(0, this.currentSizeBytes - existing.value.sizeBytes);
356
362
  this.keyIndex.removeById(existing.entryId);
357
363
  }
@@ -364,7 +370,10 @@ export class Datastore {
364
370
  return evicted.value.sizeBytes;
365
371
  });
366
372
  this.keyIndex.put(normalizedKey, persistedRecord);
367
- this.currentSizeBytes = Math.max(0, this.currentSizeBytes + encodedBytes);
373
+ // encodedBytes is always >= 0, so this addition cannot produce a negative result.
374
+ // Math.max is omitted here intentionally: the guard would be misleading, implying
375
+ // a negative sum is possible when it is not.
376
+ this.currentSizeBytes = this.currentSizeBytes + encodedBytes;
368
377
  await this.backendController?.handleRecordAppended(encodedBytes);
369
378
  }
370
379
  async putManyStrict(records) {
@@ -396,6 +405,10 @@ export class Datastore {
396
405
  totalEncodedBytes += encodedBytes;
397
406
  this.keyIndex.put(normalizedKey, persistedRecord);
398
407
  }
408
+ // effectiveTotalDelta may be negative (net shrink from replacements), but
409
+ // cannot bring currentSizeBytes below 0 because actualReplaced is bounded
410
+ // by the bytes already present in currentSizeBytes. Math.max is purely
411
+ // defensive against any future estimation inconsistency.
399
412
  this.currentSizeBytes = Math.max(0, this.currentSizeBytes + effectiveTotalDelta);
400
413
  await this.backendController?.handleRecordAppended(totalEncodedBytes);
401
414
  }
@@ -442,6 +455,9 @@ export class Datastore {
442
455
  freedBytes += entry.value.sizeBytes;
443
456
  }
444
457
  const removedCount = this.keyIndex.deleteRange(normalizedKey, normalizedKey);
458
+ // Underflow is not possible here: freedBytes is the sum of sizeBytes values
459
+ // that were accumulated into currentSizeBytes on insertion. Math.max is
460
+ // purely defensive against any future estimation inconsistency.
445
461
  this.currentSizeBytes = Math.max(0, this.currentSizeBytes - freedBytes);
446
462
  await this.backendController?.handleRecordAppended(freedBytes);
447
463
  return removedCount;
@@ -47,6 +47,9 @@ export const updateRecordById = (options) => {
47
47
  if (options.keyIndex.updateById(options.id, updatedRecord) === null) {
48
48
  throw new IndexCorruptionError('Record index state is inconsistent during updateById.');
49
49
  }
50
+ // Underflow is not possible: encodedDelta = newSize - oldSize, and oldSize was
51
+ // accumulated into currentSizeBytes on insertion. Math.max is purely defensive
52
+ // against any future estimation inconsistency.
50
53
  return {
51
54
  updated: true,
52
55
  currentSizeBytes: Math.max(0, options.currentSizeBytes + encodedDelta),
@@ -63,6 +66,9 @@ export const deleteRecordById = (options) => {
63
66
  };
64
67
  }
65
68
  const freedBytes = removedFromIndex.value.sizeBytes;
69
+ // Underflow is not possible: freedBytes was accumulated into currentSizeBytes
70
+ // on insertion and has not been modified since. Math.max is purely defensive
71
+ // against any future estimation inconsistency.
66
72
  return {
67
73
  deleted: true,
68
74
  currentSizeBytes: Math.max(0, options.currentSizeBytes - freedBytes),
@@ -88,7 +88,14 @@ export const loadIndexedDBSnapshot = async (db, _objectStoreName) => {
88
88
  if (typeof treeJSON !== 'object' || treeJSON === null || Array.isArray(treeJSON)) {
89
89
  throw new PageCorruptionError('treeJSON must be a non-null plain object.');
90
90
  }
91
- const currentSizeBytes = computeUtf8ByteLength(JSON.stringify(treeJSON));
91
+ let serialized;
92
+ try {
93
+ serialized = JSON.stringify(treeJSON);
94
+ }
95
+ catch (err) {
96
+ throw new PageCorruptionError('Failed to serialize BTree snapshot for size estimation.', { cause: err });
97
+ }
98
+ const currentSizeBytes = computeUtf8ByteLength(serialized);
92
99
  return { treeJSON, currentSizeBytes, commitId };
93
100
  };
94
101
  // ---------------------------------------------------------------------------
@@ -68,6 +68,12 @@ const verifyLockOwnership = (lockPath) => {
68
68
  }
69
69
  };
70
70
  const acquireFileLock = (lockPath) => {
71
+ // Normal lifecycle: releaseFileLock() removes the lock file on close(), so
72
+ // lock files do not accumulate under normal operation. A lock file persisting
73
+ // after this process exits signals abnormal termination (crash / SIGKILL).
74
+ // That stale file is recovered lazily by tryRecoverStaleLock() the next time
75
+ // any process attempts to acquire the same datastore path — so no proactive
76
+ // sweep is needed and none is performed here.
71
77
  try {
72
78
  writeLockFile(lockPath);
73
79
  }
@@ -34,7 +34,7 @@ const createEmptyTreeJSON = () => {
34
34
  };
35
35
  const ensureNonNegativeSafeInteger = (value, field) => {
36
36
  if (!Number.isSafeInteger(value) || typeof value !== 'number' || value < 0) {
37
- throw new PageCorruptionError(`${field} must be a non-negative safe integer.`);
37
+ throw new PageCorruptionError(`${field} must be a non-negative safe integer, got ${String(value)}.`);
38
38
  }
39
39
  return value;
40
40
  };
@@ -12,5 +12,8 @@ export const parseLocalStorageConfig = (config) => {
12
12
  if (!Number.isSafeInteger(maxChunks) || maxChunks <= 0) {
13
13
  throw new ConfigurationError('localStorage.maxChunks must be a positive safe integer.');
14
14
  }
15
+ if (maxChunkChars * maxChunks > Number.MAX_SAFE_INTEGER) {
16
+ throw new ConfigurationError('localStorage.maxChunkChars * localStorage.maxChunks product exceeds Number.MAX_SAFE_INTEGER.');
17
+ }
15
18
  return { keyPrefix, databaseKey, maxChunkChars, maxChunks };
16
19
  };
@@ -6,7 +6,12 @@ export const cleanupGenerationChunks = (state, generation, knownChunkCount) => {
6
6
  return;
7
7
  }
8
8
  for (let i = 0; i < knownChunkCount; i += 1) {
9
- state.adapter.removeItem(chunkKey(state.keyPrefix, state.databaseKey, generation, i));
9
+ try {
10
+ state.adapter.removeItem(chunkKey(state.keyPrefix, state.databaseKey, generation, i));
11
+ }
12
+ catch {
13
+ // best-effort cleanup; continue deleting remaining chunks
14
+ }
10
15
  }
11
16
  return;
12
17
  }
@@ -16,7 +21,12 @@ export const cleanupGenerationChunks = (state, generation, knownChunkCount) => {
16
21
  for (let i = 0; i < state.maxChunks; i += 1) {
17
22
  const key = chunkKey(state.keyPrefix, state.databaseKey, generation, i);
18
23
  if (state.adapter.getItem(key) !== null) {
19
- state.adapter.removeItem(key);
24
+ try {
25
+ state.adapter.removeItem(key);
26
+ }
27
+ catch {
28
+ // best-effort cleanup; continue deleting remaining chunks
29
+ }
20
30
  }
21
31
  }
22
32
  };
@@ -14,8 +14,10 @@ export const isQuotaBrowserError = (error) => {
14
14
  if (!(error instanceof Error)) {
15
15
  return false;
16
16
  }
17
- const normalized = `${error.name}:${error.message}`;
18
- return /quota|max_items|quota_bytes|quota_bytes_per_item/i.test(normalized);
17
+ // Known browser patterns: "QuotaExceededError", "quota_bytes", "quota_bytes_per_item", "max_items".
18
+ // "quota" subsumes all quota_* variants; "max_items" is the only independent pattern.
19
+ const normalized = `${error.name}:${error.message}`.toLowerCase();
20
+ return normalized.includes('quota') || normalized.includes('max_items');
19
21
  };
20
22
  export const validateSyncStorageCommitQuota = (state, generation, chunks, manifest, resolveChunkKey, manifestStorageKey) => {
21
23
  const pendingItems = chunks.map((chunkValue, chunkIndex) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@frostpillar/frostpillar-storage-engine",
3
- "version": "0.0.1",
3
+ "version": "0.1.0",
4
4
  "description": "Chunk-based storage engine for browsers and Node.js that packs many small key-value entries into a single backing store.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -89,7 +89,7 @@
89
89
  "access": "public"
90
90
  },
91
91
  "dependencies": {
92
- "@frostpillar/frostpillar-btree": "0.2.0"
92
+ "@frostpillar/frostpillar-btree": "0.2.2"
93
93
  },
94
94
  "scripts": {
95
95
  "clean:build": "rm -rf dist tsconfig.tsbuildinfo",
@@ -1 +0,0 @@
1
- "use strict";var FrostpillarStorageEngine=(()=>{var be=Object.defineProperty;var Sr=Object.getOwnPropertyDescriptor;var wr=Object.getOwnPropertyNames;var kr=Object.prototype.hasOwnProperty;var Cr=(e,t)=>{for(var r in t)be(e,r,{get:t[r],enumerable:!0})},xr=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of wr(t))!kr.call(e,o)&&o!==r&&be(e,o,{get:()=>t[o],enumerable:!(n=Sr(t,o))||n.enumerable});return e};var vr=e=>xr(be({},"__esModule",{value:!0}),e);var Do={};Cr(Do,{BinaryFormatError:()=>ne,ClosedDatastoreError:()=>V,ConfigurationError:()=>h,DatabaseLockedError:()=>re,Datastore:()=>Se,FrostpillarError:()=>O,IndexCorruptionError:()=>B,InvalidQueryRangeError:()=>G,PageCorruptionError:()=>I,QuotaExceededError:()=>w,StorageEngineError:()=>d,UnsupportedBackendError:()=>b,ValidationError:()=>S,indexedDBDriver:()=>Wt,localStorageDriver:()=>zt,opfsDriver:()=>ir,syncStorageDriver:()=>hr});var O=class extends Error{constructor(t,r){super(t),this.name=new.target.name,r!==void 0&&(this.cause=r.cause)}},S=class extends O{},G=class extends O{},h=class extends O{},b=class extends O{},V=class extends O{},d=class extends O{},re=class extends d{},ne=class extends d{},I=class extends d{},B=class extends d{},w=class extends O{},Ie=(e,t)=>e instanceof d?e:e instanceof Error?new d(`${t}: ${e.message}`,{cause:e}):new d(t,{cause:e}),x=(e,t)=>e instanceof Error?e:new Error(t,{cause:e});var N=(e,t,r)=>({_id:e,key:t,payload:r.payload});var Ve=(e,t)=>{let n={source:"autoCommit",error:t instanceof d?t:new d(t instanceof Error?t.message:"Unknown auto-commit storage failure.",{cause:t}),occurredAt:Date.now()};for(let o of e)try{let i=o(n);Promise.resolve(i).catch(()=>{})}catch{}};var oe=class{constructor(){this.queue=[];this.head=0;this.locked=!1}acquire(){return this.locked?new Promise(t=>{this.queue.push(()=>t(this.createRelease()))}):(this.locked=!0,Promise.resolve(this.createRelease()))}createRelease(){let t=!1;return()=>{if(!t)if(t=!0,this.head<this.queue.length){let r=this.queue[this.head];this.queue[this.head]=void 0,this.head+=1,this.head>1024&&this.head>this.queue.length>>>1&&(this.queue=this.queue.slice(this.head),this.head=0),r()}else this.queue.length=0,this.head=0,this.locked=!1}}};var Br=e=>{let t=0;for(let r=0;r<e.length;r++){let n=e.charCodeAt(r);if(n<=127)t+=1;else if(n<=2047)t+=2;else if(n>=55296&&n<=56319){let o=r+1<e.length?e.charCodeAt(r+1):0;o>=56320&&o<=57343?(t+=4,r++):t+=3}else n>=56320&&n<=57343,t+=3}return t},br=typeof Buffer<"u"&&typeof Buffer.byteLength=="function",A=br?e=>Buffer.byteLength(e,"utf8"):Br,W=e=>{let t=2;for(let r=0;r<e.length;r++){let n=e.charCodeAt(r);if(n===34||n===92)t+=2;else if(n<=31)n===8||n===9||n===10||n===12||n===13?t+=2:t+=6;else if(n<=127)t+=1;else if(n<=2047)t+=2;else if(n>=55296&&n<=56319){let o=r+1<e.length?e.charCodeAt(r+1):0;o>=56320&&o<=57343?(t+=4,r++):t+=6}else n>=56320&&n<=57343?t+=6:t+=3}return t},ie=e=>{if(e===null)return 4;switch(typeof e){case"boolean":return e?4:5;case"number":return String(e).length;case"string":return W(e);case"object":{let t=e,r=2,n=0;for(let o of Object.keys(t)){let i=t[o];i!==void 0&&(n>0&&(r+=1),r+=W(o)+1,r+=ie(i),n++)}return r}default:return 0}},Ir=15,X=(e,t)=>ie(e)+ie(t)+Ir,ae=e=>ie(e);var He=64,$e=1024,Ue=65535,We=256,Xe=4096,Ye=1048576,Er=4,Or=1,Ar=2,Tr=15,Qe=e=>{if(typeof e!="object"||e===null||Array.isArray(e))return!1;let r=Object.getPrototypeOf(e);return r===Object.prototype||r===null},H=(e,t)=>{if(e.totalValidationBytes+=t,e.totalValidationBytes>Ye)throw new S(`Payload aggregate validation bytes must be <= ${Ye}.`)},Lr=(e,t)=>{if(e.trim().length===0)throw new S("Payload keys must be non-empty strings.");if(e==="__proto__"||e==="constructor"||e==="prototype")throw new S(`Payload key "${e}" is reserved and not allowed.`);if(A(e)>$e)throw new S(`Payload key UTF-8 byte length must be <= ${$e}.`);if(t.totalKeyCount+=1,t.totalKeyCount>Xe)throw new S(`Payload total key count must be <= ${Xe}.`);H(t,W(e)+Or)};var Pr=(e,t,r)=>{if(e===null)return H(r,Er),null;if(typeof e=="string"){if(A(e)>Ue)throw new S(`Payload string UTF-8 byte length must be <= ${Ue}.`);return H(r,W(e)),e}if(typeof e=="number"){if(!Number.isFinite(e))throw new S("Payload number values must be finite.");return H(r,String(e).length),e}if(typeof e=="boolean")return H(r,e?4:5),e;if(typeof e=="bigint")throw new S("Payload bigint values are not supported.");if(typeof e=="object"){if(Array.isArray(e))throw new S("Payload arrays are not supported.");if(!Qe(e))throw new S("Payload values must be plain objects.");return Ze(e,t+1,r)}throw new S("Payload values must be string | number | boolean | null or nested object.")},Ze=(e,t,r)=>{if(t+1>He)throw new S(`Payload nesting depth must be <= ${He}.`);if(r.activePath.has(e))throw new S("Circular payload references are not supported.");let o=Object.entries(e);if(o.length>We)throw new S(`Payload object key count must be <= ${We}.`);r.activePath.add(e);let i=o.length,a=i>1?i-1:0;H(r,Ar+a);let s={};for(let[c,l]of o)Lr(c,r),s[c]=Pr(l,t,r);return r.activePath.delete(e),s},$=e=>{if(!Qe(e))throw new S("payload must be a non-null plain object.");let t={activePath:new WeakSet,totalKeyCount:0,totalValidationBytes:0},r=Ze(e,0,t),n=t.totalValidationBytes+Tr;return{payload:r,sizeBytes:n}};var et=(e,t,r,n,o)=>{if(e===null)return t;if(r>e.maxSizeBytes)throw new w("Record exceeds configured capacity.maxSize boundary.");if(e.policy==="strict"){if(t+r>e.maxSizeBytes)throw new w("Insert exceeds configured capacity.maxSize under strict policy.");return t}let i=t;for(;i+r>e.maxSizeBytes;){if(n()===0)throw new w("Record cannot fit in turnover policy with empty datastore.");let a=o();if(!Number.isSafeInteger(a)||a<=0)throw new B("Turnover eviction reported non-progressing reclaimed bytes.");i-=a}return Math.max(0,i)};var Dr=/^(\d+)(B|KB|MB|GB)$/,Nr={B:1,KB:1024,MB:1024*1024,GB:1024*1024*1024},Rr=/^(\d+)(ms|s|m|h)$/,Kr={ms:1,s:1e3,m:60*1e3,h:3600*1e3},Mr=e=>{if(e==="backendLimit")throw new h('capacity.maxSize "backendLimit" must be resolved before capacity parsing.');if(typeof e=="number"){if(!Number.isSafeInteger(e)||e<=0)throw new h("capacity.maxSize must be a positive safe integer.");return e}let t=Dr.exec(e);if(t===null)throw new h("capacity.maxSize string must be <positive><B|KB|MB|GB>.");let r=Number(t[1]);if(!Number.isSafeInteger(r)||r<=0)throw new h("capacity.maxSize must be a positive safe integer.");let n=t[2],o=Nr[n],i=r*o;if(!Number.isSafeInteger(i)||i<=0)throw new h("capacity.maxSize exceeds safe integer range.");return i},tt=e=>{if(e===void 0)return null;let t=Mr(e.maxSize),r=e.policy??"strict";if(r!=="strict"&&r!=="turnover")throw new h('capacity.policy must be "strict" or "turnover".');return{maxSizeBytes:t,policy:r}},zr=e=>{let t=Rr.exec(e);if(t===null)throw new h("autoCommit.frequency string must be one of: <positive>ms, <positive>s, <positive>m, <positive>h.");let r=Number(t[1]);if(!Number.isSafeInteger(r)||r<=0)throw new h("autoCommit.frequency string amount must be a positive safe integer.");let n=t[2],o=Kr[n],i=r*o;if(!Number.isSafeInteger(i)||i<=0)throw new h("autoCommit.frequency exceeds safe integer range.");return i},R=e=>{if(e?.maxPendingBytes!==void 0&&(!Number.isSafeInteger(e.maxPendingBytes)||e.maxPendingBytes<=0))throw new h("autoCommit.maxPendingBytes must be a positive safe integer.");let t=e?.maxPendingBytes??null,r=e?.frequency;if(r===void 0||r==="immediate")return{frequency:"immediate",intervalMs:null,maxPendingBytes:t};if(typeof r=="number"){if(!Number.isSafeInteger(r)||r<=0)throw new h("autoCommit.frequency number must be a positive safe integer.");return{frequency:"scheduled",intervalMs:r,maxPendingBytes:t}}return{frequency:"scheduled",intervalMs:zr(r),maxPendingBytes:t}},_r=["allow","replace","reject"],rt=e=>{if(e===void 0)return"allow";if(!_r.includes(e))throw new h('duplicateKeys must be "allow", "replace", or "reject".');return e};var Fr=e=>{if(e.capacity===void 0)return;if(e.capacity.maxSize!=="backendLimit")return e.capacity;if(e.driver===void 0)throw new h('capacity.maxSize "backendLimit" requires a durable driver.');if(e.driver.resolveBackendLimitBytes===void 0)throw new h('capacity.maxSize "backendLimit" is not supported by the selected driver.');let t=e.driver.resolveBackendLimitBytes();return{...e.capacity,maxSize:t}},nt=e=>{let t=Fr(e);return tt(t)};var se=class{constructor(){this.closed=!1,this.closing=!1,this.closeInFlight=null,this.activeOperationCount=0,this.activeOperationsDrained=null,this.resolveActiveOperationsDrained=null}isClosed(){return this.closed}markClosing(){this.closing=!0}markClosed(){this.closed=!0,this.closing=!1}getCloseInFlight(){return this.closeInFlight}setCloseInFlight(t){this.closeInFlight=t}ensureOpen(){if(this.closed||this.closing)throw new V("Datastore has been closed.")}beginOperation(){this.ensureOpen(),this.activeOperationCount+=1}endOperation(){if(this.activeOperationCount-=1,this.activeOperationCount===0&&this.resolveActiveOperationsDrained!==null){let t=this.resolveActiveOperationsDrained;this.resolveActiveOperationsDrained=null,this.activeOperationsDrained=null,t()}}waitForActiveOperationsToDrain(){return this.activeOperationCount===0?Promise.resolve():(this.activeOperationsDrained===null&&(this.activeOperationsDrained=new Promise(t=>{this.resolveActiveOperationsDrained=t})),this.activeOperationsDrained)}};var ot=(e,t)=>{let r=e.peekById(t);return r===null?null:N(t,r.key,r.value)},Jr=(e,t,r,n)=>{let o={...e.payload,...t};if(n)return{payload:o,sizeBytes:X(r,o)};let i=$(o),a=ae(r);return{payload:i.payload,sizeBytes:i.sizeBytes+a}},it=e=>{let t=e.keyIndex.peekById(e.id);if(t===null)return{updated:!1,currentSizeBytes:e.currentSizeBytes,durabilitySignalBytes:0};let r=t.value,n=r.sizeBytes,o=Jr(r,e.patch,t.key,e.skipPayloadValidation),i=o.payload,a=o.sizeBytes,s=a-n;if(e.capacityState!==null&&s>0&&e.currentSizeBytes+s>e.capacityState.maxSizeBytes)throw new w("updateById exceeds configured capacity.maxSize boundary.");let c={payload:i,sizeBytes:a};if(e.keyIndex.updateById(e.id,c)===null)throw new B("Record index state is inconsistent during updateById.");return{updated:!0,currentSizeBytes:Math.max(0,e.currentSizeBytes+s),durabilitySignalBytes:Math.abs(s)}},at=e=>{let t=e.keyIndex.removeById(e.id);if(t===null)return{deleted:!1,currentSizeBytes:e.currentSizeBytes,durabilitySignalBytes:0};let r=t.value.sizeBytes;return{deleted:!0,currentSizeBytes:Math.max(0,e.currentSizeBytes-r),durabilitySignalBytes:r}};var jr=()=>{let e=globalThis.AggregateError;return typeof e!="function"?null:e},qr=(e,t)=>{let r=jr();if(r!==null)return new r([e,t],"Datastore close failed with multiple errors.");let n=new Error("Datastore close failed with multiple errors.");return n.errors=[e,t],n},st=async e=>{if(e.lifecycle.isClosed())return;let t=e.lifecycle.getCloseInFlight();if(t!==null){await t;return}e.lifecycle.markClosing();let r=Gr(e).finally(()=>{e.lifecycle.setCloseInFlight(null)});e.lifecycle.setCloseInFlight(r),await r},Gr=async e=>{let t=e.getPendingInit();t!==null&&await t,await e.lifecycle.waitForActiveOperationsToDrain();let r=e.getPendingInitError();try{await e.getBackendController()?.close()}catch(n){let o=x(n,"Datastore close failed with a non-Error value.");r===null?r=o:r=qr(r,o)}if(e.setBackendController(null),e.setPendingInitError(null),e.lifecycle.markClosed(),e.clearInMemoryState(),r!==null)throw r};var g=class extends Error{constructor(e){super(e),this.name="BTreeValidationError",Object.setPrototypeOf(this,new.target.prototype)}},f=class extends Error{constructor(e){super(e),this.name="BTreeInvariantError",Object.setPrototypeOf(this,new.target.prototype)}};var Te=64,Le=64,Ee=3,Oe=16384,Q=0,Vr=1,Hr=e=>{if(e===void 0)return"replace";if(e!=="allow"&&e!=="reject"&&e!=="replace")throw new g("Invalid duplicateKeys option.");return e},T=e=>e.kind===Q,_=(e,t)=>{if(e.kind===Q){if(e.entryOffset>=e.entries.length)return!1;let r=e.entries[e.entryOffset];return t.key=r.key,t.sequence=r.entryId,!0}return e.childOffset>=e.keys.length?!1:(t.key=e.keys[e.childOffset].key,t.sequence=e.keys[e.childOffset].sequence,!0)},le=(e,t,r)=>{if(e===void 0)return r;if(!Number.isInteger(e)||e<Ee||e>Oe)throw new g(`${t}: integer ${Ee}\u2013${Oe} required.`);return e},Pe=(e,t)=>({kind:Q,entries:e,entryOffset:0,parent:t,indexInParent:0,prev:null,next:null}),ue=(e,t)=>{let r=[],n={kind:Vr,children:e,keys:r,childOffset:0,parent:t,indexInParent:0};for(let o=0;o<e.length;o+=1){let i=e[o];i.parent=n,i.indexInParent=o;let a={key:void 0,sequence:0};if(!_(i,a))throw new f("branch child has no min key");r.push(a)}return n},p=e=>e.entries.length-e.entryOffset,y=(e,t)=>e.entries[e.entryOffset+t],pt=e=>{if(e.entryOffset>=e.entries.length)return;let t=e.entries[e.entryOffset];return e.entryOffset+=1,e.entryOffset>=e.entries.length>>>1&&(e.entries.copyWithin(0,e.entryOffset),e.entries.length=e.entries.length-e.entryOffset,e.entryOffset=0),t},$r=e=>{if(!(e.entryOffset>=e.entries.length))return e.entries.pop()},Ur=(e,t)=>{e.entryOffset>0?(e.entryOffset-=1,e.entries[e.entryOffset]=t):e.entries.unshift(t)},ht=(e,t)=>{let r=e.entries.length-e.entryOffset,n=e.entryOffset+t;t<r-1-t?(e.entries.copyWithin(e.entryOffset+1,e.entryOffset,n),e.entryOffset+=1,e.entryOffset>=e.entries.length>>>1&&(e.entries.copyWithin(0,e.entryOffset),e.entries.length-=e.entryOffset,e.entryOffset=0)):(e.entries.copyWithin(n,n+1),e.entries.length-=1)},Wr=(e,t,r)=>{let n=e.entryOffset+t;e.entryOffset>0&&t<e.entries.length-e.entryOffset>>>1?(e.entries.copyWithin(e.entryOffset-1,e.entryOffset,n),e.entryOffset-=1,e.entries[n-1]=r):e.entries.splice(n,0,r)},Xr=e=>{e.entryOffset>0&&(e.entries.copyWithin(0,e.entryOffset),e.entries.length=e.entries.length-e.entryOffset,e.entryOffset=0)},ct=1,De=e=>{if(e.childOffset>0){let t=e.childOffset<=ct?0:ct;e.children.copyWithin(t,e.childOffset),e.children.length-=e.childOffset-t,e.keys.copyWithin(t,e.childOffset),e.keys.length-=e.childOffset-t,e.childOffset=t;for(let r=t;r<e.children.length;r+=1)e.children[r].indexInParent=r}},ce=e=>e.children.length-e.childOffset,Yr=(e,t,r,n)=>{let o=e.childOffset+t,i=e.children.length-e.childOffset;if(e.childOffset>0&&t<i>>>1){e.children.copyWithin(e.childOffset-1,e.childOffset,o),e.keys.copyWithin(e.childOffset-1,e.childOffset,o),e.childOffset-=1,e.children[o-1]=r,e.keys[o-1]=n;for(let a=e.childOffset;a<o;a+=1)e.children[a].indexInParent=a;r.indexInParent=o-1}else{e.children.splice(o,0,r),e.keys.splice(o,0,n);for(let a=o;a<e.children.length;a+=1)e.children[a].indexInParent=a}},Qr=(e,t)=>{let r=t-e.childOffset,n=e.children.length-e.childOffset;if(r<n-1-r){e.children.copyWithin(e.childOffset+1,e.childOffset,t),e.keys.copyWithin(e.childOffset+1,e.childOffset,t),e.childOffset+=1;for(let o=e.childOffset;o<=t;o+=1)e.children[o].indexInParent=o;e.childOffset>=e.children.length>>>1&&De(e)}else{e.children.copyWithin(t,t+1),e.keys.copyWithin(t,t+1),e.children.length-=1,e.keys.length-=1;for(let o=t;o<e.children.length;o+=1)e.children[o].indexInParent=o}},Zr=(e,t,r,n)=>{let o=t.childOffset;if(o>=t.children.length)throw new f("branch has no children");let i=o,a=o,s=t.keys.length-1;for(;a<=s;){let c=a+s>>>1,l=t.keys[c],u=e(l.key,r);(u!==0?u:l.sequence-n)<=0?(i=c,a=c+1):s=c-1}return t.children[i]},L=(e,t,r)=>{let n=e.compareKeys,o=e.root;for(;o.kind!==Q;)o=Zr(n,o,t,r);return o},P=(e,t,r,n)=>{let o=e.compareKeys,i=t.entryOffset,a=t.entries.length;for(;i<a;){let s=i+a>>>1,c=t.entries[s],l=o(c.key,r);(l!==0?l:c.entryId-n)<0?i=s+1:a=s}return i-t.entryOffset},F=(e,t,r,n)=>{let o=e.compareKeys,i=t.entryOffset,a=t.entries.length;for(;i<a;){let s=i+a>>>1,c=t.entries[s],l=o(c.key,r);(l!==0?l:c.entryId-n)<=0?i=s+1:a=s}return i-t.entryOffset},en=(e,t,r,n)=>{let o=e.compareKeys,i=t,a=32-Math.clz32(e.entryCount+1);for(;a>0&&i.next!==null&&p(i.next)>0;){let s=y(i.next,0),c=o(s.key,r);if(c>0||c===0&&s.entryId>n)break;i=i.next,a-=1}if(a===0&&i.next!==null&&p(i.next)>0){let s=y(i.next,0),c=o(s.key,r);if(c<0||c===0&&s.entryId<=n)return L(e,r,n)}return i},de=(e,t)=>{if(e.entryCount===0)return null;let r=L(e,t,0),n=P(e,r,t,0);if(n>=p(r)&&(r.next===null||(r=r.next,n=P(e,r,t,0),n>=p(r)))||e.compareKeys(y(r,n).key,t)!==0)return null;let o=e._cursor;return o.leaf=r,o.index=n,o},tn=(e,t)=>{if(e.entryCount===0)return null;let r=L(e,t,Number.MAX_SAFE_INTEGER),n=F(e,r,t,Number.MAX_SAFE_INTEGER);if(n===0&&(r.prev===null||(r=r.prev,n=p(r),n===0))||(n-=1,e.compareKeys(y(r,n).key,t)!==0))return null;let o=e._cursor;return o.leaf=r,o.index=n,o},rn=(e,t)=>de(e,t)!==null,nn=(e,t)=>{if(e.entryCount===0)return null;let r=e.compareKeys,n=L(e,t,Number.MAX_SAFE_INTEGER),o=F(e,n,t,Number.MAX_SAFE_INTEGER);for(;n!==null;)if(o<p(n)){let i=y(n,o);if(r(i.key,t)>0)return i.key;o+=1}else n=n.next,o=0;return null},on=(e,t)=>{if(e.entryCount===0)return null;let r=e.compareKeys,n=L(e,t,0),o=P(e,n,t,0),i=n,a=o-1;for(;i!==null;){for(;a>=0;){let s=y(i,a);if(r(s.key,t)<0)return s.key;a-=1}i=i.prev,i!==null&&(a=p(i)-1)}return null},an=(e,t)=>{if(e.entryCount===0)return null;let r=e.compareKeys,n=L(e,t,0),o=P(e,n,t,0),i=e._cursor;if(o<p(n)){if(r(y(n,o).key,t)===0)return i.leaf=n,i.index=o,i}else if(n.next!==null){let a=P(e,n.next,t,0);if(a<p(n.next)&&r(y(n.next,a).key,t)===0)return i.leaf=n.next,i.index=a,i}if(o>0)return i.leaf=n,i.index=o-1,i;if(n.prev!==null){let a=p(n.prev);if(a>0)return i.leaf=n.prev,i.index=a-1,i}return null},K=e=>{let t=e;for(;t.parent!==null;){let r=t.indexInParent;if(!_(t,t.parent.keys[r])||r!==t.parent.childOffset)return;t=t.parent}},fe=e=>{if(e.parent===null)throw new f("no parent during rebalance");return e.parent},lt=e=>{if(!T(e))throw new f("expected leaf, got branch");return e},ut=e=>{if(T(e))throw new f("expected branch, got leaf");return e},me=(e,t)=>{if(t<e.childOffset||t>=e.children.length)throw new f("child index out of range");Qr(e,t)},dt=(e,t)=>{t.prev!==null?t.prev.next=t.next:t.next!==null&&(e.leftmostLeaf=t.next),t.next!==null?t.next.prev=t.prev:t.prev!==null&&(e.rightmostLeaf=t.prev),t.prev=null,t.next=null},sn=(e,t,r)=>{let n=t.children.pop();if(n===void 0)throw new f("left branch borrow failed");t.keys.pop(),n.parent=e;let o={key:void 0,sequence:0};if(!_(n,o))throw new f("borrowed child has no min key");if(e.childOffset>0)e.childOffset-=1,e.children[e.childOffset]=n,e.keys[e.childOffset]=o,n.indexInParent=e.childOffset;else{e.children.unshift(n),e.keys.unshift(o);for(let a=0;a<e.children.length;a+=1)e.children[a].indexInParent=a}let i=fe(e);i.keys[r]={key:o.key,sequence:o.sequence},K(e)},cn=(e,t,r)=>{let n=t.childOffset;if(n>=t.children.length)throw new f("right branch borrow failed");let o=t.children[n];t.childOffset+=1,t.childOffset>=t.children.length>>>1&&De(t),e.children.push(o),o.parent=e;let i={key:void 0,sequence:0};if(!_(o,i))throw new f("borrowed child has no min key");e.keys.push(i),o.indexInParent=e.children.length-1;let a=fe(e);_(t,a.keys[r+1])},ln=(e,t,r,n)=>{for(let i=t.childOffset;i<t.children.length;i+=1){let a=t.children[i];a.parent=r,a.indexInParent=r.children.length,r.children.push(a),r.keys.push(t.keys[i])}let o=fe(t);me(o,n),ye(e,o)},un=(e,t,r,n)=>{for(let i=r.childOffset;i<r.children.length;i+=1){let a=r.children[i];a.parent=t,a.indexInParent=t.children.length,t.children.push(a),t.keys.push(r.keys[i])}let o=fe(t);me(o,n+1),ye(e,o)},dn=(e,t)=>{let r=t>e.childOffset?ut(e.children[t-1]):null,n=t+1<e.children.length?ut(e.children[t+1]):null;return{left:r,right:n}},ye=(e,t)=>{let r=ce(t);if(t===e.root){if(r===1){let s=t.children[t.childOffset];s.parent=null,e.root=s,T(s)&&(e.leftmostLeaf=s,e.rightmostLeaf=s)}return}if(r>=e.minBranchChildren)return;let n=t.parent;if(n===null)throw new f("branch has no parent");let o=t.indexInParent,{left:i,right:a}=dn(n,o);if(a!==null&&ce(a)>e.minBranchChildren){cn(t,a,o);return}if(i!==null&&ce(i)>e.minBranchChildren){sn(t,i,o);return}if(i!==null){ln(e,t,i,o);return}if(a!==null){un(e,t,a,o);return}throw new f("no branch siblings to rebalance")},mt=(e,t)=>{e.entryOffset>0&&(e.entries.copyWithin(0,e.entryOffset),e.entries.length=e.entries.length-e.entryOffset,e.entryOffset=0);let r=t.entries;for(let n=t.entryOffset;n<r.length;n+=1)e.entries.push(r[n])},Z=(e,t)=>{if(t===e.root){e.entryCount===0&&(e.leftmostLeaf=t,e.rightmostLeaf=t);return}if(p(t)>=e.minLeafEntries)return;let r=t.parent;if(r===null)throw new f("Leaf node has no parent during rebalance.");let n=t.indexInParent,o=n>r.childOffset?lt(r.children[n-1]):null,i=n+1<r.children.length?lt(r.children[n+1]):null;if(i!==null&&p(i)>e.minLeafEntries){let a=pt(i);if(a===void 0)throw new f("right leaf borrow failed");t.entries.push(a),_(i,r.keys[n+1]);return}if(o!==null&&p(o)>e.minLeafEntries){let a=o.entries.pop();if(a===void 0)throw new f("left leaf borrow failed");Ur(t,a),r.keys[n]={key:a.key,sequence:a.entryId},K(t);return}if(o!==null){mt(o,t),dt(e,t),me(r,n),ye(e,r);return}if(i!==null){mt(t,i),dt(e,i),me(r,n+1),ye(e,r);return}throw new f("no leaf siblings to rebalance")},mn=(e,t,r)=>{let n=r?Number.MAX_SAFE_INTEGER:0,o=L(e,t,n),i=r?F(e,o,t,Number.MAX_SAFE_INTEGER):P(e,o,t,0);return i>=p(o)?o.next===null?null:{leaf:o.next,idx:0}:{leaf:o,idx:i}},yn=(e,t,r,n,o)=>{let i=p(t),a=r;for(;a<i;){let s=y(t,a),c=e.compareKeys(s.key,n);if(o?c>=0:c>0)break;a+=1}return a},fn=e=>{let t=0,r=e.root;for(;!T(r);)r=r.children[r.childOffset],t+=1;return t},pn=(e,t,r,n,o)=>{if(e.entryKeys!==null)for(let l=r;l<r+n;l+=1)e.entryKeys.delete(y(t,l).entryId);let i=t.entryOffset+r;t.entries.copyWithin(i,i+n),t.entries.length-=n,e.entryCount-=n;let a=p(t)===0;r===0&&!a&&t.parent!==null&&K(t);let s=p(t),c=o;for(;c>0&&t!==e.root&&p(t)<e.minLeafEntries&&(Z(e,t),!(t.parent!==null&&t.parent.children[t.indexInParent]!==t));)c-=1;return a&&p(t)>0&&t.parent!==null&&t.parent.children[t.indexInParent]===t&&K(t),s},hn=(e,t)=>t.parent===null?t===e.root:t.parent.children[t.indexInParent]===t,gn=(e,t,r,n)=>{if(e.entryCount===0)return 0;let o=e.compareKeys(t,r);if(o>0)return 0;let i=n?.lowerBound==="exclusive",a=n?.upperBound==="exclusive";if(i&&a&&o===0)return 0;let s=fn(e),c=0,l=!0,u=null,m=0;for(;e.entryCount>0;){if(l){let q=mn(e,t,i);if(q===null)break;u=q.leaf,m=q.idx,l=!1}if(m>=p(u))break;let k=p(u),E=yn(e,u,m,r,a),C=E-m;if(C===0)break;let j=pn(e,u,m,C,s);if(c+=C,E<k)break;if(!hn(e,u)){l=!0;continue}if(p(u)>j){l=!0;continue}if(u.next===null)break;u=u.next,m=0}return c},z=[{threshold:0,maxLeaf:32,maxBranch:32},{threshold:1e3,maxLeaf:64,maxBranch:64},{threshold:1e4,maxLeaf:128,maxBranch:128},{threshold:1e5,maxLeaf:256,maxBranch:128},{threshold:1e6,maxLeaf:512,maxBranch:256}],M=e=>{let t=z[0];for(let r=1;r<z.length&&e>=z[r].threshold;r+=1)t=z[r];return t},Ne=e=>{for(let t=1;t<z.length;t+=1)if(e<z[t].threshold)return z[t].threshold;return Number.MAX_SAFE_INTEGER},Sn=e=>{if(typeof e.compareKeys!="function")throw new g("compareKeys must be a function.");let t=e.autoScale===!0;if(t&&(e.maxLeafEntries!==void 0||e.maxBranchChildren!==void 0))throw new g("autoScale conflicts with explicit capacity.");let r,n;if(t){let a=M(0);r=a.maxLeaf,n=a.maxBranch}else r=le(e.maxLeafEntries,"maxLeafEntries",Te),n=le(e.maxBranchChildren,"maxBranchChildren",Le);let o=Hr(e.duplicateKeys),i=Pe([],null);return{compareKeys:e.compareKeys,maxLeafEntries:r,maxBranchChildren:n,duplicateKeys:o,minLeafEntries:Math.ceil(r/2),minBranchChildren:Math.ceil(n/2),root:i,leftmostLeaf:i,rightmostLeaf:i,entryCount:0,nextSequence:0,entryKeys:e.enableEntryIdLookup===!0?new Map:null,autoScale:t,_nextAutoScaleThreshold:t?Ne(0):Number.MAX_SAFE_INTEGER,_cursor:{leaf:i,index:0}}},gt=e=>{if(e.entryCount<e._nextAutoScaleThreshold)return;let{maxLeaf:t,maxBranch:r}=M(e.entryCount);t>e.maxLeafEntries&&(e.maxLeafEntries=t,e.minLeafEntries=Math.ceil(t/2)),r>e.maxBranchChildren&&(e.maxBranchChildren=r,e.minBranchChildren=Math.ceil(r/2)),e._nextAutoScaleThreshold=Ne(e.entryCount)},yt=(e,t,r)=>{if(!e.autoScale)return;let n=M(0),o=le(t,"maxLeafEntries",Te),i=le(r,"maxBranchChildren",Le);if(o<n.maxLeaf||i<n.maxBranch)throw new g("autoScale capacity snapshot must be >= tier-0 capacities.");e.maxLeafEntries=o,e.maxBranchChildren=i,e.minLeafEntries=Math.ceil(o/2),e.minBranchChildren=Math.ceil(i/2)},St=(e,t,r)=>{let n=[],o=0;for(;o<e;){let i=e-o;if(i>t&&i-t<r){let s=Math.ceil(i/2);n.push(o+s),n.push(e);break}let a=o+t<e?o+t:e;n.push(a),o=a}return n},wn=(e,t,r,n)=>{let o=St(t.length,e.maxLeafEntries,e.minLeafEntries),i=new Array(o.length),a=0;for(let s=0;s<o.length;s+=1){let c=o[s],l=new Array(c-a);for(let u=a;u<c;u+=1){let m=n+u;l[u-a]={key:t[u].key,entryId:m,value:t[u].value},r[u]=m,e.entryKeys!==null&&e.entryKeys.set(m,t[u].key)}i[s]=Pe(l,null),a=c}return i},kn=(e,t)=>{if(e.entryCount!==0)throw new f("bulk load requires empty tree");let r=e.nextSequence;if(r+t.length>Number.MAX_SAFE_INTEGER)throw new g("Sequence overflow.");let n=new Array(t.length),o=wn(e,t,n,r);e.nextSequence=r+t.length,e.entryCount=t.length;for(let i=0;i<o.length;i+=1)i>0&&(o[i].prev=o[i-1]),i<o.length-1&&(o[i].next=o[i+1]);if(e.leftmostLeaf=o[0],e.rightmostLeaf=o[o.length-1],o.length===1)e.root=o[0];else{let i=o;for(;i.length>1;){let a=St(i.length,e.maxBranchChildren,e.minBranchChildren),s=new Array(a.length),c=0;for(let l=0;l<a.length;l+=1)s[l]=ue(i.slice(c,a[l]),null),c=a[l];i=s}e.root=i[0]}return gt(e),n},wt=(e,t,r,n)=>{let o={key:void 0,sequence:0};if(!_(n,o))throw new f("inserted child has no min key");n.parent=t;let i=r.indexInParent-t.childOffset+1;Yr(t,i,n,o),ce(t)>e.maxBranchChildren&&xn(e,t)},Cn=(e,t)=>{Xr(t);let r=Math.ceil(t.entries.length/2),n={kind:Q,entries:t.entries.splice(r),entryOffset:0,parent:t.parent,indexInParent:0,prev:t,next:t.next};if(t.next!==null?t.next.prev=n:e.rightmostLeaf=n,t.next=n,t.parent===null){e.root=ue([t,n],null);return}wt(e,t.parent,t,n)},xn=(e,t)=>{De(t);let r=Math.ceil(t.children.length/2),n=ue(t.children.splice(r),t.parent);if(t.keys.splice(r),t.parent===null){e.root=ue([t,n],null);return}wt(e,t.parent,t,n)},kt=(e,t,r,n)=>{let o=e.nextSequence,i=F(e,t,r,o);if(e.duplicateKeys!=="allow"){let a=null;if(i>0){let s=y(t,i-1);e.compareKeys(s.key,r)===0&&(a=s)}else if(t.prev!==null&&p(t.prev)>0){let s=t.prev,c=y(s,p(s)-1);e.compareKeys(c.key,r)===0&&(a=c)}if(a!==null){if(e.duplicateKeys==="reject")throw new g("Duplicate key rejected.");return a.value=n,a.entryId}}if(e.nextSequence>=Number.MAX_SAFE_INTEGER)throw new g("Sequence overflow.");return e.nextSequence+=1,Wr(t,i,{key:r,entryId:o,value:n}),e.entryCount+=1,e.entryKeys!==null&&e.entryKeys.set(o,r),i===0&&t.parent!==null&&K(t),p(t)>e.maxLeafEntries&&Cn(e,t),gt(e),o},vn=(e,t,r)=>{let n=L(e,t,e.nextSequence);return kt(e,n,t,r)},Bn=e=>{if(e.entryCount===0)return null;let t=pt(e.leftmostLeaf);if(t===void 0)throw new f("leftmost leaf empty but count > 0");return e.entryCount-=1,e.entryKeys!==null&&e.entryKeys.delete(t.entryId),p(e.leftmostLeaf)>0&&e.leftmostLeaf.parent!==null&&K(e.leftmostLeaf),e.leftmostLeaf!==e.root&&p(e.leftmostLeaf)<e.minLeafEntries&&Z(e,e.leftmostLeaf),t},bn=e=>{if(e.entryCount===0)return null;let t=$r(e.rightmostLeaf);if(t===void 0)throw new f("rightmost leaf empty but count > 0");return e.entryCount-=1,e.entryKeys!==null&&e.entryKeys.delete(t.entryId),e.rightmostLeaf!==e.root&&p(e.rightmostLeaf)<e.minLeafEntries&&Z(e,e.rightmostLeaf),t},In=(e,t)=>{let r=de(e,t);if(r===null)return null;let n=r.leaf,o=r.index,i=y(n,o);return ht(n,o),e.entryCount-=1,e.entryKeys!==null&&e.entryKeys.delete(i.entryId),o===0&&p(n)>0&&n.parent!==null&&K(n),n!==e.root&&p(n)<e.minLeafEntries&&Z(e,n),i},Re=(e,t,r)=>{let n=L(e,t,r),o=P(e,n,t,r);return o>=p(n)||y(n,o).entryId!==r?null:{leaf:n,index:o}},En=(e,t)=>{let r=e.entryKeys.get(t);if(r===void 0)return null;let n=Re(e,r,t);if(n===null)return null;let o=y(n.leaf,n.index);return ht(n.leaf,n.index),e.entryCount-=1,e.entryKeys.delete(t),n.index===0&&p(n.leaf)>0&&n.leaf.parent!==null&&K(n.leaf),n.leaf!==e.root&&p(n.leaf)<e.minLeafEntries&&Z(e,n.leaf),o},On=(e,t)=>{let r=e.entryKeys.get(t);if(r===void 0)return null;let n=Re(e,r,t);return n===null?null:y(n.leaf,n.index)},An=(e,t,r)=>{let n=e.entryKeys.get(t);if(n===void 0)return null;let o=Re(e,n,t);if(o===null)return null;let i=y(o.leaf,o.index);return i.value=r,i},Tn=(e,t)=>{if(t.length===0)return[];let r=e.duplicateKeys!=="allow";for(let n=1;n<t.length;n+=1){let o=e.compareKeys(t[n-1].key,t[n].key);if(r?o>=0:o>0)throw new g(r?"putMany: not sorted in strict ascending order.":"putMany: not sorted in non-descending order.")}if(e.entryCount>0){let n=new Array(t.length),o=L(e,t[0].key,e.nextSequence);for(let i=0;i<t.length;i+=1){let a=t[i],s=en(e,o,a.key,e.nextSequence);n[i]=kt(e,s,a.key,a.value),o=s}return n}return kn(e,t)},Ct=(e,t,r,n)=>{if(e.entryCount===0)return null;let o=e.compareKeys,i=o(t,r);if(i>0)return null;let a=n?.lowerBound==="exclusive",s=n?.upperBound==="exclusive";if(a&&s&&i===0)return null;let c=a?Number.MAX_SAFE_INTEGER:0,l=L(e,t,c),u=a?F(e,l,t,Number.MAX_SAFE_INTEGER):P(e,l,t,0);return{leaf:l,index:u,compare:o,upperExclusive:s}},xt=(e,t,r,n)=>{let o=Ct(e,t,r,n);if(o===null)return 0;let i=o.leaf,a=o.index,{compare:s,upperExclusive:c}=o,l=0;for(;i!==null;){let u=p(i);if(a>=u){i=i.next,a=0;continue}let m=y(i,u-1),k=s(m.key,r);if(c?k<0:k<=0){l+=u-a,i=i.next,a=0;continue}let E=c?0:Number.MAX_SAFE_INTEGER,C=c?P(e,i,r,E):F(e,i,r,E),j=C<u?C:u;return l+=j-a,l}return l},Ln=200,Pn=(e,t,r,n,o,i,a,s)=>{let c=p(t);if(c-r>=Ln&&t.next!==null){let u=y(t,c-1),m=n(u.key,a);if(o?m<0:m<=0){let k=xt(e,i,a,s);return new Array(k)}}return[]},ft=(e,t,r,n,o,i)=>{if(o)for(let a=t;a<r;a+=1)n[i++]=y(e,a);else for(let a=t;a<r;a+=1)n.push(y(e,a));return i},Dn=(e,t,r,n)=>{let o=Ct(e,t,r,n);if(o===null)return[];let i=o.leaf,a=o.index,{compare:s,upperExclusive:c}=o,l=Pn(e,i,a,s,c,t,r,n),u=0,m=l.length>0;for(;i!==null;){let k=p(i);if(a>=k){i=i.next,a=0;continue}let E=y(i,k-1),C=s(E.key,r);if(c?C<0:C<=0){u=ft(i,a,k,l,m,u),i=i.next,a=0;continue}let j=c?0:Number.MAX_SAFE_INTEGER,q=c?P(e,i,r,j):F(e,i,r,j),gr=q<k?q:k;return ft(i,a,gr,l,m,u),l}return l},Nn=e=>{let t={compareKeys:e.compareKeys,duplicateKeys:e.duplicateKeys,enableEntryIdLookup:e.entryKeys!==null,autoScale:e.autoScale};return e.autoScale||(t.maxLeafEntries=e.maxLeafEntries,t.maxBranchChildren=e.maxBranchChildren),t},Rn=e=>{let t=new Array(e.entryCount),r=e.leftmostLeaf,n=0;for(;r!==null;){let o=p(r);for(let i=0;i<o;i+=1){let a=y(r,i);t[n++]=[a.key,a.value]}r=r.next}return{version:1,config:{maxLeafEntries:e.maxLeafEntries,maxBranchChildren:e.maxBranchChildren,duplicateKeys:e.duplicateKeys,enableEntryIdLookup:e.entryKeys!==null,autoScale:e.autoScale},entries:t}},Kn=1e6,Mn=e=>{if(typeof e!="object"||e===null||e.version!==1)throw new g(`BTreeJSON: expected version 1, got ${String(e?.version)}.`);if(typeof e.config!="object"||e.config===null)throw new g("BTreeJSON: invalid config.");if(!Array.isArray(e.entries))throw new g("BTreeJSON: entries must be array.");if(e.entries.length>Kn)throw new g("BTreeJSON: entry count exceeds maximum.");for(let t=0;t<e.entries.length;t+=1){let r=e.entries[t];if(!Array.isArray(r)||r.length!==2)throw new g(`BTreeJSON: bad entries[${t}].`)}},zn=e=>{let t=(r,n)=>{if(!Number.isInteger(n)||n<Ee||n>Oe)throw new g(`BTreeJSON: invalid ${r}.`)};if(e.duplicateKeys!=="allow"&&e.duplicateKeys!=="reject"&&e.duplicateKeys!=="replace")throw new g(`BTreeJSON: invalid duplicateKeys: ${String(e.duplicateKeys)}.`);if(typeof e.enableEntryIdLookup!="boolean")throw new g("BTreeJSON: invalid enableEntryIdLookup.");if(typeof e.autoScale!="boolean")throw new g("BTreeJSON: invalid autoScale.");if(typeof e.maxLeafEntries!="number")throw new g("BTreeJSON: invalid maxLeafEntries.");if(typeof e.maxBranchChildren!="number")throw new g("BTreeJSON: invalid maxBranchChildren.");if(t("maxLeafEntries",e.maxLeafEntries),t("maxBranchChildren",e.maxBranchChildren),e.autoScale){let r=M(0);if(e.maxLeafEntries<r.maxLeaf||e.maxBranchChildren<r.maxBranch)throw new g("BTreeJSON: autoScale capacity below tier-0.")}},_n=e=>{Mn(e),zn(e.config)},Fn=(e,t)=>{let r=e.config,n={compareKeys:t,duplicateKeys:r.duplicateKeys,enableEntryIdLookup:r.enableEntryIdLookup,autoScale:r.autoScale};return r.autoScale||(n.maxLeafEntries=r.maxLeafEntries,n.maxBranchChildren=r.maxBranchChildren),n},vt=e=>{if(T(e)){if(e.entryOffset>=e.entries.length)return null;let t=e.entries[e.entryOffset];return{key:t.key,sequence:t.entryId}}return e.childOffset>=e.keys.length?null:{key:e.keys[e.childOffset].key,sequence:e.keys[e.childOffset].sequence}},pe=(e,t,r,n,o)=>{let i=e(t,n);return i!==0?i:r-o},Bt=e=>{if(T(e)){if(e.entryOffset>=e.entries.length)return null;let t=e.entries[e.entries.length-1];return{key:t.key,sequence:t.entryId}}return e.childOffset>=e.children.length?null:Bt(e.children[e.children.length-1])},Y=e=>{if(!Number.isFinite(e))throw new g("compareKeys must return a finite number.");return e},Jn=(e,t)=>{if(Y(e(t,t))!==0)throw new g("compareKeys must satisfy reflexivity: compare(x, x) must return 0.")},jn=(e,t,r,n)=>{let o=Math.sign(Y(e(t,r))),i=Math.sign(Y(e(r,n)));if(o<0&&i<0&&Math.sign(Y(e(t,n)))>=0)throw new g("compareKeys must satisfy transitivity for observed key triples.");if(o>0&&i>0&&Math.sign(Y(e(t,n)))<=0)throw new g("compareKeys must satisfy transitivity for observed key triples.")},qn=(e,t)=>{try{Jn(e.compareKeys,t)}catch(r){throw r instanceof g?new f(r.message):r}},Gn=(e,t,r,n)=>{try{jn(e.compareKeys,t,r,n)}catch(o){throw o instanceof g?new f(o.message):o}},Vn=(e,t,r,n)=>{if(!T(t))throw new f("Leaf linkage cursor reached non-leaf node.");if(n.has(t))throw new f("Cycle detected in leaf linkage.");if(t.prev!==r)throw new f("Leaf prev pointer mismatch.");if(r!==null&&T(r)){let o=Bt(r),i=vt(t);if(o===null||i===null)throw new f("Non-empty tree leaf chain contains empty leaf node.");if(pe(e.compareKeys,o.key,o.sequence,i.key,i.sequence)>0)throw new f("Adjacent leaf key ranges are out of order.");let a=p(r),s=p(t);if(e.duplicateKeys!=="allow"&&a>0&&s>0&&e.compareKeys(y(r,a-1).key,y(t,0).key)===0)throw new f("Duplicate user key detected across adjacent leaves with uniqueness policy.")}},Hn=(e,t)=>{if(e.entryCount===0){if(!T(e.root))throw new f("Empty tree root must be a leaf node.");if(e.leftmostLeaf!==e.root||e.rightmostLeaf!==e.root)throw new f("Empty tree leaf pointers must reference root leaf.");return}if(e.leftmostLeaf.prev!==null)throw new f("Leftmost leaf prev pointer must be null.");if(e.rightmostLeaf.next!==null)throw new f("Rightmost leaf next pointer must be null.");let r=new Set,n=e.leftmostLeaf,o=null,i=0;for(;n!==null;)Vn(e,n,o,r),r.add(n),o=n,n=n.next,i+=1;if(o!==e.rightmostLeaf)throw new f("Rightmost leaf pointer mismatch.");if(i!==t)throw new f("Leaf chain count mismatch with tree traversal count.")},$n=(e,t)=>{let r=p(t);for(let n=0;n<r;n+=1)qn(e,y(t,n).key);for(let n=1;n<r;n+=1)if(pe(e.compareKeys,y(t,n-1).key,y(t,n-1).entryId,y(t,n).key,y(t,n).entryId)>=0)throw new f("Leaf entries are not strictly ordered.");if(e.duplicateKeys!=="allow"){for(let n=1;n<r;n+=1)if(e.compareKeys(y(t,n-1).key,y(t,n).key)===0)throw new f("Duplicate user key detected in tree with uniqueness policy.")}for(let n=2;n<r;n+=1){let o=y(t,n-2),i=y(t,n-1),a=y(t,n);Gn(e,o.key,i.key,a.key)}if(r>e.maxLeafEntries)throw new f("Leaf node exceeds maximum occupancy.")},Un=(e,t,r)=>{$n(e,t);let n=p(t),o=e.autoScale?Math.ceil(M(0).maxLeaf/2):e.minLeafEntries;if(t!==e.root&&n<o)throw new f("Non-root leaf node violates minimum occupancy.");let i=n===0?null:y(t,0),a=n===0?null:y(t,n-1),s=i===null?null:{key:i.key,sequence:i.entryId},c=a===null?null:{key:a.key,sequence:a.entryId};return{minKey:s,maxKey:c,leafDepth:n===0?null:r,leafCount:1,branchCount:0,entryCount:n}},Wn=(e,t)=>{let r=t.children.length-t.childOffset;if(r===0)throw new f("Branch node has zero children.");let n=e.autoScale?Math.ceil(M(0).maxBranch/2):e.minBranchChildren;if(t!==e.root&&r<n)throw new f("Non-root branch node violates minimum occupancy.");if(r>e.maxBranchChildren)throw new f("Branch node exceeds maximum occupancy.");if(t.keys.length!==t.children.length)throw new f("Branch keys array length does not match children array length.")},Xn=(e,t,r,n)=>{let o=t.children[r];if(o.parent!==t)throw new f("Child-parent pointer mismatch in branch node.");if(o.indexInParent!==r)throw new f("Child indexInParent does not match actual position in parent.");let i=bt(e,o,n+1);if(i.minKey===null||i.maxKey===null)throw new f("Branch child must not be empty in non-root branch tree.");let a=t.keys[r],s=vt(o);if(s===null||pe(e.compareKeys,a.key,a.sequence,s.key,s.sequence)!==0)throw new f("Branch cached key does not match actual child minimum key.");return i},bt=(e,t,r)=>{if(T(t))return Un(e,t,r);Wn(e,t);let n=null,o=0,i=1,a=0,s=null,c=null,l=null;for(let u=t.childOffset;u<t.children.length;u+=1){let m=Xn(e,t,u,r);if(n!==null&&m.leafDepth!==null&&m.leafDepth!==n)throw new f("Leaf depth mismatch detected in tree.");if(n===null&&m.leafDepth!==null&&(n=m.leafDepth),l!==null&&pe(e.compareKeys,l.key,l.sequence,m.minKey.key,m.minKey.sequence)>=0)throw new f("Branch child key ranges are not strictly ordered.");s===null&&(s=m.minKey),c=m.maxKey,l=m.maxKey,o+=m.leafCount,i+=m.branchCount,a+=m.entryCount}return{minKey:s,maxKey:c,leafDepth:n,leafCount:o,branchCount:i,entryCount:a}},Yn=e=>{let t=bt(e,e.root,0);if(t.entryCount!==e.entryCount)throw new f("Index entry count mismatch between tree traversal and tracked state.");Hn(e,t.leafCount)},It=e=>{if(T(e))return{height:1,leafCount:1,branchCount:0};let t=0,r=0,n=1;for(let o=e.childOffset;o<e.children.length;o+=1){let i=e.children[o],a=It(i);a.height>t&&(t=a.height),r+=a.leafCount,n+=a.branchCount}return{height:t+1,leafCount:r,branchCount:n}},Qn=e=>{let t=It(e.root);return{height:t.height,leafCount:t.leafCount,branchCount:t.branchCount,entryCount:e.entryCount}},he=class Ae{constructor(t){this.state=Sn(t)}put(t,r){return vn(this.state,t,r)}putMany(t){return Tn(this.state,t)}remove(t){return In(this.state,t)}removeById(t){if(this.state.entryKeys===null)throw new g("Requires enableEntryIdLookup: true.");return En(this.state,t)}peekById(t){if(this.state.entryKeys===null)throw new g("Requires enableEntryIdLookup: true.");return On(this.state,t)}updateById(t,r){if(this.state.entryKeys===null)throw new g("Requires enableEntryIdLookup: true.");return An(this.state,t,r)}popFirst(){return Bn(this.state)}peekFirst(){return this.state.entryCount===0?null:y(this.state.leftmostLeaf,0)}peekLast(){if(this.state.entryCount===0)return null;let t=this.state.rightmostLeaf;return y(t,p(t)-1)}popLast(){return bn(this.state)}clear(){let t=Pe([],null);if(this.state.root=t,this.state.leftmostLeaf=t,this.state.rightmostLeaf=t,this.state.entryCount=0,this.state._cursor.leaf=t,this.state._cursor.index=0,this.state.entryKeys!==null&&this.state.entryKeys.clear(),this.state.autoScale){let r=M(0);this.state.maxLeafEntries=r.maxLeaf,this.state.maxBranchChildren=r.maxBranch,this.state.minLeafEntries=Math.ceil(r.maxLeaf/2),this.state.minBranchChildren=Math.ceil(r.maxBranch/2),this.state._nextAutoScaleThreshold=Ne(0)}}get(t){let r=de(this.state,t);return r===null?null:y(r.leaf,r.index).value}hasKey(t){return rn(this.state,t)}findFirst(t){let r=de(this.state,t);return r===null?null:y(r.leaf,r.index)}findLast(t){let r=tn(this.state,t);return r===null?null:y(r.leaf,r.index)}nextHigherKey(t){return nn(this.state,t)}nextLowerKey(t){return on(this.state,t)}getPairOrNextLower(t){let r=an(this.state,t);return r===null?null:y(r.leaf,r.index)}count(t,r,n){return xt(this.state,t,r,n)}deleteRange(t,r,n){return gn(this.state,t,r,n)}range(t,r,n){return Dn(this.state,t,r,n)}*entries(){let t=this.state.leftmostLeaf;for(;t!==null;){let r=p(t);for(let n=0;n<r;n+=1)yield y(t,n);t=t.next}}*entriesReversed(){let t=this.state.rightmostLeaf;for(;t!==null;){let r=p(t);for(let n=r-1;n>=0;n-=1)yield y(t,n);t=t.prev}}*keys(){let t=this.state.leftmostLeaf;for(;t!==null;){let r=p(t);for(let n=0;n<r;n+=1)yield y(t,n).key;t=t.next}}*values(){let t=this.state.leftmostLeaf;for(;t!==null;){let r=p(t);for(let n=0;n<r;n+=1)yield y(t,n).value;t=t.next}}[Symbol.iterator](){return this.entries()}forEach(t,r){let n=this.state.leftmostLeaf;for(;n!==null;){let o=p(n);for(let i=0;i<o;i+=1)t.call(r,y(n,i));n=n.next}}snapshot(){let t=new Array(this.state.entryCount),r=this.state.leftmostLeaf,n=0;for(;r!==null;){let o=p(r);for(let i=0;i<o;i+=1)t[n++]=y(r,i);r=r.next}return t}clone(){let t=new Ae(Nn(this.state));if(yt(t.state,this.state.maxLeafEntries,this.state.maxBranchChildren),this.state.entryCount>0){let r=new Array(this.state.entryCount),n=this.state.leftmostLeaf,o=0;for(;n!==null;){let i=p(n);for(let a=0;a<i;a+=1)r[o++]=y(n,a);n=n.next}t.putMany(r)}return t}toJSON(){return Rn(this.state)}static fromJSON(t,r){_n(t);let n=t.config.duplicateKeys!=="allow";for(let i=1;i<t.entries.length;i+=1){let a=r(t.entries[i-1][0],t.entries[i][0]);if(a>0)throw new g("fromJSON: entries not sorted.");if(n&&a===0)throw new g('fromJSON: duplicate keys require duplicateKeys "allow".')}let o=new Ae(Fn(t,r));if(yt(o.state,t.config.maxLeafEntries,t.config.maxBranchChildren),t.entries.length>0){let i=new Array(t.entries.length);for(let a=0;a<t.entries.length;a+=1)i[a]={key:t.entries[a][0],value:t.entries[a][1]};o.putMany(i)}return o}size(){return this.state.entryCount}assertInvariants(){Yn(this.state)}getStats(){return Qn(this.state)}};var Ot=e=>{if(!Number.isFinite(e)||!Number.isInteger(e))throw new B("key comparator must return a finite integer result.");return e===0?0:e<0?-1:1},J=e=>e===0?0:e<0?-1:1,Et=e=>(t,r)=>{let n=e(t,r);if(n!==n)throw new B("key comparator must not return NaN.");return J(n)},ee=class e{constructor(t){let n={compareKeys:Et(t.compareKeys),duplicateKeys:t.duplicateKeys??"allow",enableEntryIdLookup:!0};this.tree=new he(n)}put(t,r){return this.tree.put(t,r)}putMany(t){return this.tree.putMany(t)}peekById(t){return this.tree.peekById(t)}updateById(t,r){return this.tree.updateById(t,r)}removeById(t){return this.tree.removeById(t)}rangeQuery(t,r){return this.tree.range(t,r)}deleteRange(t,r){return this.tree.deleteRange(t,r,{lowerBound:"inclusive",upperBound:"inclusive"})}snapshot(){return this.tree.snapshot()}peekLast(){return this.tree.peekLast()}popFirst(){return this.tree.popFirst()}size(){return this.tree.size()}findFirst(t){return this.tree.findFirst(t)}findLast(t){return this.tree.findLast(t)}hasKey(t){return this.tree.hasKey(t)}keys(){return this.tree.keys()}toJSON(){return this.tree.toJSON()}static fromJSON(t,r){let n=Et(r.compareKeys),o=Object.create(e.prototype),i=r.duplicateKeys??"allow",a=i!==t.config.duplicateKeys?{...t,config:{...t.config,duplicateKeys:i}}:t;return o.tree=he.fromJSON(a,n),o}clear(){this.tree.clear()}};var Ke=(e,t)=>{if(typeof e!="string")throw new S(`${t} must be a string.`);if(e.length===0)throw new S(`${t} must be a non-empty string.`);return e},Zn={normalize:(e,t)=>Ke(e,t),compare:(e,t)=>e<t?-1:e>t?1:0,serialize:e=>Ke(e,"key"),deserialize:e=>Ke(e,"serialized key")},eo=e=>{if(typeof e.normalize!="function")throw new h("config.key.normalize must be a function.");if(typeof e.compare!="function")throw new h("config.key.compare must be a function.");if(typeof e.serialize!="function")throw new h("config.key.serialize must be a function.");if(typeof e.deserialize!="function")throw new h("config.key.deserialize must be a function.")},At=e=>e.key===void 0?Zn:(eo(e.key),e.key),ge=e=>{if(Object.prototype.hasOwnProperty.call(e,"key"))return{rawKey:e.key,keyFieldName:"key"};throw new S('Record must include "key".')};var Se=class{constructor(t){this.errorListeners=new Set,this.keyDefinition=At(t);let r=rt(t.duplicateKeys);if(this.duplicateKeyPolicy=r,this.keyIndex=new ee({compareKeys:(o,i)=>this.keyDefinition.compare(o,i),duplicateKeys:r}),this.capacityState=nt(t),this.skipPayloadValidation=t.skipPayloadValidation===!0,this.lifecycle=new se,this.writeMutex=new oe,this.currentSizeBytes=0,this.backendController=null,this.pendingInit=null,this.pendingInitError=null,t.driver===void 0){if(t.autoCommit!==void 0)throw new h("autoCommit requires a durable driver.");return}let n=t.driver.init({getSnapshot:()=>({treeJSON:this.keyIndex.toJSON()}),autoCommit:t.autoCommit,onAutoCommitError:o=>{Ve(this.errorListeners,o)}});if(!Tt(n)){this.applyBackendInitResult(n);return}this.pendingInit=Promise.resolve(n).then(o=>{this.applyBackendInitResult(o)}).catch(o=>{this.pendingInitError=x(o,"Datastore backend initialization failed with a non-Error value.")}).finally(()=>{this.pendingInit=null})}put(t){return this.runWithOpenExclusive(()=>this.putSingle(t))}get(t){return this.runWithOpen(()=>{let r=this.keyDefinition.normalize(t,"key");return this.keyIndex.rangeQuery(r,r).map(n=>N(n.entryId,n.key,n.value))})}getFirst(t){return this.runWithOpen(()=>{let r=this.keyDefinition.normalize(t,"key"),n=this.keyIndex.findFirst(r);return n===null?null:N(n.entryId,n.key,n.value)})}getLast(t){return this.runWithOpen(()=>{let r=this.keyDefinition.normalize(t,"key"),n=this.keyIndex.findLast(r);return n===null?null:N(n.entryId,n.key,n.value)})}delete(t){return this.runWithOpenExclusive(()=>this.deleteSingle(t))}has(t){return this.runWithOpen(()=>{let r=this.keyDefinition.normalize(t,"key");return this.keyIndex.hasKey(r)})}getAll(){return this.runWithOpen(()=>this.keyIndex.snapshot().map(t=>N(t.entryId,t.key,t.value)))}getRange(t,r){return this.runWithOpen(()=>{let n=this.keyDefinition.normalize(t,"start"),o=this.keyDefinition.normalize(r,"end");if(Ot(this.keyDefinition.compare(n,o))>0)throw new G("start must be <= end.");return this.keyIndex.rangeQuery(n,o).map(i=>N(i.entryId,i.key,i.value))})}getMany(t){return this.runWithOpen(()=>{let r=[];for(let i of t)r.push(this.keyDefinition.normalize(i,"key"));r.sort((i,a)=>J(this.keyDefinition.compare(i,a)));let n=[],o;for(let i=0;i<r.length;i+=1){if(i>0&&J(this.keyDefinition.compare(r[i],o))===0)continue;o=r[i];let a=this.keyIndex.rangeQuery(r[i],r[i]);for(let s of a)n.push(N(s.entryId,s.key,s.value))}return n})}putMany(t){return this.runWithOpenExclusive(async()=>{if(this.capacityState===null&&this.backendController===null){for(let r of t){let{rawKey:n,keyFieldName:o}=ge(r),i=this.keyDefinition.normalize(n,o);if(this.duplicateKeyPolicy==="reject"&&this.keyIndex.findFirst(i)!==null)throw new S("Duplicate key rejected: a record with this key already exists.");let a=this.skipPayloadValidation?r.payload:$(r.payload).payload;this.keyIndex.put(i,{payload:a,sizeBytes:0})}return}if(this.capacityState===null){for(let r of t)await this.putSingle(r);return}if(this.capacityState.policy==="turnover"){for(let r of t)await this.putSingle(r);return}await this.putManyStrict(t)})}deleteMany(t){return this.runWithOpenExclusive(async()=>{if(this.backendController===null){let n=0;for(let o of t){let i=this.keyDefinition.normalize(o,"key"),a=this.keyIndex.rangeQuery(i,i);if(a.length===0)continue;let s=0;for(let c of a)s+=c.value.sizeBytes;n+=this.keyIndex.deleteRange(i,i),this.currentSizeBytes=Math.max(0,this.currentSizeBytes-s)}return n}let r=0;for(let n of t)r+=await this.deleteSingle(n);return r})}clear(){return this.runWithOpenExclusive(async()=>{this.keyIndex.clear(),this.currentSizeBytes=0,await this.backendController?.handleCleared()})}count(){return this.runWithOpen(()=>this.keyIndex.size())}keys(){return this.runWithOpen(()=>{let t=[],r,n=!0;for(let o of this.keyIndex.keys())(n||J(this.keyDefinition.compare(o,r))!==0)&&(t.push(o),r=o,n=!1);return t})}getById(t){return this.runWithOpen(()=>ot(this.keyIndex,t))}updateById(t,r){return this.runWithOpenExclusive(async()=>{let n=it({keyIndex:this.keyIndex,id:t,patch:r,capacityState:this.capacityState,currentSizeBytes:this.currentSizeBytes,skipPayloadValidation:this.skipPayloadValidation});return n.updated?(this.currentSizeBytes=n.currentSizeBytes,await this.backendController?.handleRecordAppended(n.durabilitySignalBytes),!0):!1})}deleteById(t){return this.runWithOpenExclusive(async()=>{let r=at({keyIndex:this.keyIndex,id:t,currentSizeBytes:this.currentSizeBytes});return r.deleted?(this.currentSizeBytes=r.currentSizeBytes,await this.backendController?.handleRecordAppended(r.durabilitySignalBytes),!0):!1})}commit(){return this.runWithOpenExclusive(async()=>{await this.backendController?.commitNow()})}on(t,r){if(t!=="error")throw new S('Only "error" event is supported.');return this.errorListeners.add(r),()=>{this.off(t,r)}}off(t,r){if(t!=="error")throw new S('Only "error" event is supported.');this.errorListeners.delete(r)}async close(){await st({lifecycle:this.lifecycle,getPendingInit:()=>this.pendingInit,getPendingInitError:()=>this.pendingInitError,setPendingInitError:t=>{this.pendingInitError=t},getBackendController:()=>this.backendController,setBackendController:t=>{this.backendController=t},clearInMemoryState:()=>{this.keyIndex.clear(),this.errorListeners.clear()}})}resolvePayload(t,r){if(this.skipPayloadValidation){let i=t.payload;return{payload:i,encodedBytes:X(r,i)}}let n=$(t.payload),o=ae(r);return{payload:n.payload,encodedBytes:n.sizeBytes+o}}async putSingle(t){let{rawKey:r,keyFieldName:n}=ge(t),o=this.keyDefinition.normalize(r,n);if(this.duplicateKeyPolicy==="reject"&&this.keyIndex.findFirst(o)!==null)throw new S("Duplicate key rejected: a record with this key already exists.");if(this.capacityState===null&&this.backendController===null){let c=this.skipPayloadValidation?t.payload:$(t.payload).payload;this.keyIndex.put(o,{payload:c,sizeBytes:0});return}let{payload:i,encodedBytes:a}=this.resolvePayload(t,o);if(this.capacityState===null){this.keyIndex.put(o,{payload:i,sizeBytes:a}),await this.backendController.handleRecordAppended(a);return}let s={payload:i,sizeBytes:a};if(a>this.capacityState.maxSizeBytes)throw new w("Record exceeds configured capacity.maxSize boundary.");if(this.duplicateKeyPolicy==="replace"){let c=this.keyIndex.findFirst(o);c!==null&&(this.currentSizeBytes=Math.max(0,this.currentSizeBytes-c.value.sizeBytes),this.keyIndex.removeById(c.entryId))}this.currentSizeBytes=et(this.capacityState,this.currentSizeBytes,a,()=>this.keyIndex.size(),()=>{let c=this.keyIndex.popFirst();if(c===null)throw new B("Record buffer reported empty state during turnover eviction.");return c.value.sizeBytes}),this.keyIndex.put(o,s),this.currentSizeBytes=Math.max(0,this.currentSizeBytes+a),await this.backendController?.handleRecordAppended(a)}async putManyStrict(t){let r=this.capacityState,n=this.keyDefinition.compare,o=[];for(let l=0;l<t.length;l+=1){let{rawKey:u,keyFieldName:m}=ge(t[l]);o.push({idx:l,normalizedKey:this.keyDefinition.normalize(u,m),record:t[l]})}o.sort((l,u)=>{let m=J(n(l.normalizedKey,u.normalizedKey));return m!==0?m:l.idx-u.idx});let{prepared:i,totalBatchDelta:a}=this.buildStrictBatchEntries(o,n,r.maxSizeBytes);if(this.currentSizeBytes+a>r.maxSizeBytes)throw new w("Insert exceeds configured capacity.maxSize under strict policy.");let s=0,c=0;for(let{normalizedKey:l,persistedRecord:u,encodedBytes:m,replacedBytes:k}of i){let E=k>0&&this.keyIndex.findFirst(l)===null?0:k;s+=m-E,c+=m,this.keyIndex.put(l,u)}this.currentSizeBytes=Math.max(0,this.currentSizeBytes+s),await this.backendController?.handleRecordAppended(c)}buildStrictBatchEntries(t,r,n){let o=[],i=0;for(let a=0;a<t.length;a+=1){let{normalizedKey:s,record:c}=t[a],l=a>0&&J(r(t[a-1].normalizedKey,s))===0;if(this.duplicateKeyPolicy==="reject"&&(l||this.keyIndex.findFirst(s)!==null))throw new S("Duplicate key rejected: a record with this key already exists.");let{payload:u,encodedBytes:m}=this.resolvePayload(c,s);if(m>n)throw new w("Record exceeds configured capacity.maxSize boundary.");let k=0;if(this.duplicateKeyPolicy==="replace"&&l){let C=o[o.length-1];i-=C.encodedBytes-C.replacedBytes,k=C.replacedBytes,o.pop()}else if(this.duplicateKeyPolicy==="replace"){let C=this.keyIndex.findFirst(s);k=C!==null?C.value.sizeBytes:0}let E={payload:u,sizeBytes:m};i+=m-k,o.push({normalizedKey:s,persistedRecord:E,encodedBytes:m,replacedBytes:k})}return{prepared:o,totalBatchDelta:i}}async deleteSingle(t){let r=this.keyDefinition.normalize(t,"key"),n=this.keyIndex.rangeQuery(r,r);if(n.length===0)return 0;let o=0;for(let a of n)o+=a.value.sizeBytes;let i=this.keyIndex.deleteRange(r,r);return this.currentSizeBytes=Math.max(0,this.currentSizeBytes-o),await this.backendController?.handleRecordAppended(o),i}runWithOpen(t){if(this.pendingInit!==null)return this.pendingInit.then(()=>{if(this.pendingInitError!==null)throw this.pendingInitError;return this.executeWithLifecycle(t)});if(this.pendingInitError!==null)return Promise.reject(this.pendingInitError);try{return Promise.resolve(this.executeWithLifecycle(t))}catch(r){return Promise.reject(r instanceof Error?r:new Error(String(r)))}}executeWithLifecycle(t){this.lifecycle.beginOperation();try{let r=t();return Tt(r)?Promise.resolve(r).then(n=>(this.lifecycle.endOperation(),n),n=>{throw this.lifecycle.endOperation(),n}):(this.lifecycle.endOperation(),r)}catch(r){throw this.lifecycle.endOperation(),r}}async runWithOpenExclusive(t){let r=await this.writeMutex.acquire();try{return await this.runWithOpen(t)}finally{r()}}applyBackendInitResult(t){t.initialTreeJSON!==null&&(this.keyIndex=ee.fromJSON(t.initialTreeJSON,{compareKeys:(r,n)=>this.keyDefinition.compare(r,n),duplicateKeys:this.duplicateKeyPolicy}),this.backfillMissingSizeBytes()),this.currentSizeBytes=t.initialCurrentSizeBytes,this.backendController=t.controller}backfillMissingSizeBytes(){for(let t of this.keyIndex.snapshot())if(typeof t.value.sizeBytes!="number"){let r={payload:t.value.payload,sizeBytes:X(t.key,t.value.payload)};this.keyIndex.updateById(t.entryId,r)}}},Tt=e=>typeof e!="object"&&typeof e!="function"||e===null?!1:typeof e.then=="function";var D=class{constructor(t,r){this.autoCommit=t,this.onAutoCommitError=r,this.pendingAutoCommitBytes=0,this.dirtyFromClear=!1,this.autoCommitTimer=null,this.commitInFlight=null,this.pendingForegroundCommitRequest=!1,this.pendingBackgroundCommitRequest=!1,this.closed=!1,this.startAutoCommitSchedule()}handleRecordAppended(t){return this.autoCommit.frequency==="immediate"?this.commitNow():(this.pendingAutoCommitBytes+=t,this.autoCommit.maxPendingBytes!==null&&this.pendingAutoCommitBytes>=this.autoCommit.maxPendingBytes?this.queueCommitRequest("foreground"):Promise.resolve())}handleCleared(){return this.dirtyFromClear=!0,this.autoCommit.frequency==="immediate"?this.commitNow():this.queueCommitRequest("background")}commitNow(){return this.queueCommitRequest("foreground")}async close(){if(this.closed)return;this.closed=!0,this.stopAutoCommitSchedule(),await this.waitForCommitSettlement();let t=null;if(this.pendingAutoCommitBytes>0||this.dirtyFromClear)try{await this.executeSingleCommit(),this.pendingAutoCommitBytes=0,this.dirtyFromClear=!1}catch(n){t=x(n,"Final close-time flush commit failed with a non-Error value.")}let r=null;try{await this.onCloseAfterDrain()}catch(n){r=x(n,"onCloseAfterDrain failed with a non-Error value.")}if(t!==null&&r!==null)throw ro(t,r);if(t!==null)throw t;if(r!==null)throw r}getPendingAutoCommitBytes(){return this.pendingAutoCommitBytes}onCloseAfterDrain(){return Promise.resolve()}waitForCommitSettlement(){return this.commitInFlight===null?Promise.resolve():this.commitInFlight.then(()=>{}).catch(()=>{})}queueCommitRequest(t){return t==="foreground"?this.pendingForegroundCommitRequest=!0:this.pendingBackgroundCommitRequest=!0,this.commitInFlight===null&&(this.commitInFlight=this.runCommitLoop().finally(()=>{this.commitInFlight=null})),t==="background"?Promise.resolve():this.commitInFlight}async runCommitLoop(){let t=!0;for(;t;){let r=this.pendingForegroundCommitRequest,n=this.pendingBackgroundCommitRequest,o=this.dirtyFromClear;if(this.pendingForegroundCommitRequest=!1,this.pendingBackgroundCommitRequest=!1,this.dirtyFromClear=!1,!(r||n&&(this.pendingAutoCommitBytes>0||o))){t=!1;continue}try{let a=this.pendingAutoCommitBytes;await this.executeSingleCommit(),this.pendingAutoCommitBytes=Math.max(0,this.pendingAutoCommitBytes-a)}catch(a){if(o&&(this.dirtyFromClear=!0),r)throw x(a,"Foreground auto-commit failed with a non-Error value.");this.onAutoCommitError(a)}!this.pendingForegroundCommitRequest&&!this.pendingBackgroundCommitRequest&&(t=!1)}}startAutoCommitSchedule(){this.autoCommit.frequency!=="scheduled"||this.autoCommit.intervalMs===null||(this.autoCommitTimer=setInterval(()=>{this.handleAutoCommitTick()},this.autoCommit.intervalMs),typeof this.autoCommitTimer=="object"&&this.autoCommitTimer!==null&&"unref"in this.autoCommitTimer&&this.autoCommitTimer.unref())}stopAutoCommitSchedule(){this.autoCommitTimer!==null&&(clearInterval(this.autoCommitTimer),this.autoCommitTimer=null)}handleAutoCommitTick(){this.closed||this.pendingAutoCommitBytes<=0&&!this.dirtyFromClear||this.queueCommitRequest("background")}},to=()=>{let e=globalThis.AggregateError;return typeof e!="function"?null:e},ro=(e,t)=>{let r=to();if(r!==null)return new r([e,t],"Close failed: both final flush and drain produced errors.");let n=new Error("Close failed: both final flush and drain produced errors.");return n.errors=[e,t],n};var no=32768,oo=64,we=e=>{let t=e?.keyPrefix??"frostpillar",r=e?.databaseKey??"default",n=e?.maxChunkChars??no,o=e?.maxChunks??oo;if(!Number.isSafeInteger(n)||n<=0)throw new h("localStorage.maxChunkChars must be a positive safe integer.");if(!Number.isSafeInteger(o)||o<=0)throw new h("localStorage.maxChunks must be a positive safe integer.");return{keyPrefix:t,databaseKey:r,maxChunkChars:n,maxChunks:o}};var v=(e,t,r)=>{if(typeof e!="number"||!Number.isSafeInteger(e)||e<0)throw new d(`${r} ${t} must be a non-negative safe integer.`);return e};var Me=(e,t)=>`${e}:ls:${t}:manifest`,te=(e,t,r,n)=>`${e}:ls:${t}:g:${r}:chunk:${n}`,ze=(e,t,r)=>{if(r!==null){if(r<=0)return;for(let n=0;n<r;n+=1)e.adapter.removeItem(te(e.keyPrefix,e.databaseKey,t,n));return}if(!(e.maxChunks<=0))for(let n=0;n<e.maxChunks;n+=1){let o=te(e.keyPrefix,e.databaseKey,t,n);e.adapter.getItem(o)!==null&&e.adapter.removeItem(o)}},Lt=e=>e instanceof Error?e.name==="QuotaExceededError"||e.name==="NS_ERROR_DOM_QUOTA_REACHED":!1;var Pt="FPLS_META",Dt=2,Nt=()=>{try{let t=globalThis.localStorage;return t??null}catch{return null}},Rt=(e,t,r,n,o)=>({adapter:e,keyPrefix:t,databaseKey:r,maxChunkChars:n,maxChunks:o,activeGeneration:0,commitId:0,activeChunkCount:0}),io=(e,t)=>{let r;try{r=JSON.parse(e)}catch{throw new d("localStorage manifest JSON is malformed.")}if(r.magic!==Pt||r.version!==Dt)throw new d("localStorage manifest magic/version mismatch.");let n=v(r.chunkCount,"manifest.chunkCount","localStorage");if(n>t)throw new d(`localStorage snapshot requires ${n} chunks but maxChunks is ${t}.`);return r},ao=(e,t,r)=>{let n=[];for(let a=0;a<r;a+=1){let s=te(e.keyPrefix,e.databaseKey,t,a),c=e.adapter.getItem(s);if(typeof c!="string")throw new d(`localStorage chunk "${s}" is missing or not a string.`);n.push(c)}let o=n.join(""),i;try{i=JSON.parse(o)}catch{throw new d("localStorage chunk data JSON is malformed.")}if(typeof i!="object"||i===null||Array.isArray(i))throw new I("treeJSON must be a non-null plain object.");return{treeJSON:i,rawJsonLength:A(o)}},Kt=e=>{let t=Me(e.keyPrefix,e.databaseKey),r=e.adapter.getItem(t);if(r===null)return{treeJSON:null,currentSizeBytes:0};let n=io(r,e.maxChunks),o=v(n.activeGeneration,"manifest.activeGeneration","localStorage"),i=v(n.commitId,"manifest.commitId","localStorage"),a=v(n.chunkCount,"manifest.chunkCount","localStorage"),{treeJSON:s,rawJsonLength:c}=ao(e,o,a),l=c;return e.activeGeneration=o,e.commitId=i,e.activeChunkCount=a,{treeJSON:s,currentSizeBytes:l}},so=(e,t,r,n)=>{let o=JSON.stringify(e),i=[];for(let a=0;a<o.length;a+=t)i.push(o.slice(a,a+t));if(i.length>r)throw new w(`${n} snapshot requires ${i.length} chunks but maxChunks is ${r}.`);return i},co=e=>{if(e.commitId>=Number.MAX_SAFE_INTEGER)throw new d("localStorage commitId has reached Number.MAX_SAFE_INTEGER.");if(e.activeGeneration>=Number.MAX_SAFE_INTEGER)throw new d("localStorage activeGeneration has reached Number.MAX_SAFE_INTEGER.")},lo=(e,t)=>{let r=e.commitId+1,n=e.activeGeneration+1,o=so(t,e.maxChunkChars,e.maxChunks,"localStorage"),i={magic:Pt,version:Dt,activeGeneration:n,commitId:r,chunkCount:o.length};return{nextCommitId:r,nextGeneration:n,chunks:o,manifestJson:JSON.stringify(i)}},uo=(e,t)=>{try{ze(e,t.nextGeneration,null);for(let r=0;r<t.chunks.length;r+=1)e.adapter.setItem(te(e.keyPrefix,e.databaseKey,t.nextGeneration,r),t.chunks[r]);e.adapter.setItem(Me(e.keyPrefix,e.databaseKey),t.manifestJson)}catch(r){throw Lt(r)||r instanceof w?new w("localStorage quota exceeded during commit."):new d("localStorage write failed during commit.")}},Mt=(e,t)=>{co(e);let r=lo(e,t);uo(e,r);let n=e.activeGeneration,o=e.activeChunkCount;e.activeGeneration=r.nextGeneration,e.commitId=r.nextCommitId,e.activeChunkCount=r.chunks.length,ze(e,n,o)};var ke=class e extends D{constructor(t,r,n,o){super(r,o),this.backend=t,this.getSnapshot=n}static create(t){let r=Nt();if(r===null)throw new b("localStorage is not available in the current runtime environment.");let n=we(t.config),o=R(t.autoCommit),i=Rt(r,n.keyPrefix,n.databaseKey,n.maxChunkChars,n.maxChunks),a=Kt(i);return{controller:new e(i,o,t.getSnapshot,t.onAutoCommitError),initialTreeJSON:a.treeJSON,initialCurrentSizeBytes:a.currentSizeBytes}}executeSingleCommit(){let t=this.getSnapshot();return Mt(this.backend,t.treeJSON),Promise.resolve()}};var zt=(e={})=>({init:t=>{let r=ke.create({config:e,autoCommit:t.autoCommit,getSnapshot:t.getSnapshot,onAutoCommitError:t.onAutoCommitError});return{controller:r.controller,initialTreeJSON:r.initialTreeJSON,initialCurrentSizeBytes:r.initialCurrentSizeBytes}},resolveBackendLimitBytes:()=>{let{maxChunkChars:t,maxChunks:r}=we(e);return t*r}});var _t=(e,t)=>{if(e.trim().length===0)throw new h(`${t} must be a non-empty string.`)},Ft=e=>{let t=e?.databaseName??"frostpillar",r=e?.objectStoreName??"frostpillar",n=e?.version??1;if(_t(t,"indexedDB.databaseName"),_t(r,"indexedDB.objectStoreName"),r==="_meta")throw new h('indexedDB.objectStoreName must not be "_meta" because it is reserved for internal metadata.');if(!Number.isSafeInteger(n)||n<=0)throw new h("indexedDB.version must be a positive safe integer.");return{databaseName:t,objectStoreName:r,version:n}};var Jt="FPIDB_META",jt=2,U="_meta",qt="config",Gt=()=>{try{let t=globalThis.indexedDB;return t??null}catch{return null}},mo=e=>new Promise((t,r)=>{e.onsuccess=n=>{t(n.target.result)},e.onerror=n=>{r(new d(`IndexedDB request failed: ${String(n.target.error?.message??"unknown")}`))}}),Vt=e=>new Promise((t,r)=>{e.oncomplete=()=>{t()},e.onerror=()=>{r(new d("IndexedDB transaction failed."))}}),Ht=(e,t,r,n)=>new Promise((o,i)=>{let a=e.open(t,n);a.onupgradeneeded=s=>{let c=s.target.result;c!==null&&(c.objectStoreNames.contains(r)||c.createObjectStore(r),c.objectStoreNames.contains(U)||c.createObjectStore(U))},a.onsuccess=s=>{let c=s.target.result;if(c===null){i(new d("IndexedDB open returned null database."));return}o(c)},a.onerror=s=>{i(new d(`IndexedDB open failed: ${String(s.target.error?.message??"unknown")}`))}}),$t=async(e,t)=>{let r=e.transaction([U],"readonly"),n=Vt(r),o=r.objectStore(U),i=await mo(o.get(qt));if(await n,i==null)return{treeJSON:null,currentSizeBytes:0,commitId:0};let a=i;if(a.magic!==Jt||a.version!==jt)throw new d("IndexedDB metadata magic/version mismatch.");let s=v(a.commitId,"meta.commitId","IndexedDB"),c=a.treeJSON;if(typeof c!="object"||c===null||Array.isArray(c))throw new I("treeJSON must be a non-null plain object.");let l=A(JSON.stringify(c));return{treeJSON:c,currentSizeBytes:l,commitId:s}},Ut=async(e,t,r,n)=>{let o=e.transaction([U],"readwrite"),i=Vt(o),a=o.objectStore(U),s={magic:Jt,version:jt,commitId:n,treeJSON:r};a.put(s,qt),await i};var Ce=class e extends D{constructor(t,r,n,o,i,a){super(o,a),this.db=t,this.objectStoreName=r,this.commitId=n,this.getSnapshot=i}static async create(t){let r=Gt();if(r===null)throw new b("indexedDB is not available in the current runtime environment.");let n=Ft(t.config),{databaseName:o,objectStoreName:i,version:a}=n,s=R(t.autoCommit),c=await Ht(r,o,i,a),l;try{l=await $t(c,i)}catch(m){try{c.close()}catch{}throw x(m,"IndexedDB bootstrap failed with a non-Error value.")}return{controller:new e(c,i,l.commitId,s,t.getSnapshot,t.onAutoCommitError),initialTreeJSON:l.treeJSON,initialCurrentSizeBytes:l.currentSizeBytes}}async executeSingleCommit(){let t=this.getSnapshot();if(this.commitId>=Number.MAX_SAFE_INTEGER)throw new d("IndexedDB commitId has reached Number.MAX_SAFE_INTEGER.");let r=this.commitId+1;await Ut(this.db,this.objectStoreName,t.treeJSON,r),this.commitId=r}onCloseAfterDrain(){return this.db.close(),Promise.resolve()}};var Wt=(e={})=>({init:async t=>{let r=await Ce.create({config:e,autoCommit:t.autoCommit,getSnapshot:t.getSnapshot,onAutoCommitError:t.onAutoCommitError});return{controller:r.controller,initialTreeJSON:r.initialTreeJSON,initialCurrentSizeBytes:r.initialCurrentSizeBytes}}});var Xt="FPOPFS_META",Yt=2,Qt="meta.json",Zt="data-a.json",er="data-b.json",yo=e=>e instanceof Error?e.name==="NotFoundError":!1,fo=e=>e!==null&&typeof e=="object"&&!Array.isArray(e),tr=()=>{try{let e=globalThis;return typeof e.navigator?.storage?.getDirectory=="function"?e.navigator.storage:null}catch{return null}},rr=async(e,t)=>(await e.getDirectory()).getDirectoryHandle(t,{create:!0}),po=e=>{let t;try{t=JSON.parse(e)}catch{throw new d("OPFS meta.json JSON is malformed.")}if(!fo(t))throw new d("OPFS meta.json must be a JSON object.");let r=t;if(r.magic!==Xt||r.version!==Yt)throw new d("OPFS meta.json magic/version mismatch.");if(r.activeData!=="a"&&r.activeData!=="b")throw new d('OPFS meta.json activeData must be "a" or "b".');let n=v(r.commitId,"meta.json commitId","OPFS");return{manifest:r,commitId:n,activeData:r.activeData}},ho=async(e,t)=>{let r;try{r=await(await(await e.getFileHandle(t,{create:!1})).getFile()).text()}catch{throw new d(`OPFS active data file "${t}" not found.`)}let n;try{n=JSON.parse(r)}catch{throw new d("OPFS data file JSON is malformed.")}if(typeof n!="object"||n===null||Array.isArray(n))throw new I("treeJSON must be a non-null plain object.");return{treeJSON:n,rawJsonLength:A(r)}},nr=async e=>{let t;try{t=await(await(await e.getFileHandle(Qt,{create:!1})).getFile()).text()}catch(c){if(!yo(c))throw Ie(c,"OPFS meta.json read failed");return{treeJSON:null,currentSizeBytes:0,commitId:0,activeData:"a"}}let{commitId:r,activeData:n}=po(t),o=n==="a"?Zt:er,{treeJSON:i,rawJsonLength:a}=await ho(e,o);return{treeJSON:i,currentSizeBytes:a,commitId:r,activeData:n}},or=async(e,t,r,n)=>{let o=t==="a"?"b":"a",i=o==="a"?Zt:er,a=JSON.stringify(r);try{let c=await(await e.getFileHandle(i,{create:!0})).createWritable();await c.write(a),await c.close();let l={magic:Xt,version:Yt,activeData:o,commitId:n},m=await(await e.getFileHandle(Qt,{create:!0})).createWritable();await m.write(JSON.stringify(l)),await m.close()}catch(s){throw Ie(s,"OPFS commit failed")}return o};var go="frostpillar",xe=class e extends D{constructor(t,r,n,o,i,a){super(o,a),this.dir=t,this.activeData=r,this.commitId=n,this.getSnapshot=i}static async create(t){let r=tr();if(r===null)throw new b("opfs (Origin Private File System) is not available in the current runtime environment.");let o=t.config?.directoryName??go,i=R(t.autoCommit),a=await rr(r,o),s=await nr(a);return{controller:new e(a,s.activeData,s.commitId,i,t.getSnapshot,t.onAutoCommitError),initialTreeJSON:s.treeJSON,initialCurrentSizeBytes:s.currentSizeBytes}}async executeSingleCommit(){let t=this.getSnapshot();if(this.commitId>=Number.MAX_SAFE_INTEGER)throw new d("OPFS commitId has reached Number.MAX_SAFE_INTEGER.");let r=this.commitId+1;this.activeData=await or(this.dir,this.activeData,t.treeJSON,r),this.commitId=r}};var ir=(e={})=>({init:async t=>{let r=await xe.create({config:e,autoCommit:t.autoCommit,getSnapshot:t.getSnapshot,onAutoCommitError:t.onAutoCommitError});return{controller:r.controller,initialTreeJSON:r.initialTreeJSON,initialCurrentSizeBytes:r.initialCurrentSizeBytes}}});var So=102400,_e=e=>{let t=e?.maxTotalBytes??So;if(!Number.isSafeInteger(t)||t<=0)throw new h("syncStorage.maxTotalBytes must be a positive safe integer.");return t},ar=e=>{let t=e?.keyPrefix??"frostpillar",r=e?.databaseKey??"default",n=e?.maxChunkChars??6e3,o=e?.maxChunks??511,i=e?.maxItemBytes??8192,a=_e(e),s=e?.maxItems??512;if(!Number.isSafeInteger(n)||n<=0)throw new h("syncStorage.maxChunkChars must be a positive safe integer.");if(!Number.isSafeInteger(o)||o<=0)throw new h("syncStorage.maxChunks must be a positive safe integer.");if(!Number.isSafeInteger(i)||i<=0)throw new h("syncStorage.maxItemBytes must be a positive safe integer.");if(!Number.isSafeInteger(s)||s<=0)throw new h("syncStorage.maxItems must be a positive safe integer.");if(o+1>s)throw new h("syncStorage.maxChunks + 1 (manifest item) must be <= syncStorage.maxItems.");return{keyPrefix:t,databaseKey:r,maxChunkChars:n,maxChunks:o,maxItemBytes:i,maxTotalBytes:a,maxItems:s}};var ve=e=>{if(typeof e!="object"||e===null||Array.isArray(e))return!1;let t=Object.getPrototypeOf(e);return t===Object.prototype||t===null};var Fe=e=>{let t=e?.lastError?.message;return t===void 0?null:t.trim().length===0?new Error("chrome.runtime.lastError is set with an empty message."):new Error(t)},wo=(e,t,r)=>new Promise((n,o)=>{try{e.get(r,i=>{let a=Fe(t);if(a!==null){o(a);return}n(i)})}catch(i){o(x(i,"chrome.storage.sync.get failed with a non-Error value."))}}),ko=(e,t,r)=>new Promise((n,o)=>{try{e.set(r,()=>{let i=Fe(t);if(i!==null){o(i);return}n()})}catch(i){o(x(i,"chrome.storage.sync.set failed with a non-Error value."))}}),Co=(e,t,r)=>new Promise((n,o)=>{try{e.remove(r,()=>{let i=Fe(t);if(i!==null){o(i);return}n()})}catch(i){o(x(i,"chrome.storage.sync.remove failed with a non-Error value."))}}),xo=e=>({getItems:async t=>await e.get(t),setItems:async t=>{await e.set(t)},removeItems:async t=>{await e.remove(t)}}),vo=(e,t)=>({getItems:async r=>await wo(e,t,r),setItems:async r=>{await ko(e,t,r)},removeItems:async r=>{await Co(e,t,r)}}),sr=e=>ve(e)?typeof e.get=="function"&&typeof e.set=="function"&&typeof e.remove=="function":!1,Bo=e=>sr(e),bo=e=>sr(e),Je=()=>{try{let e=globalThis,t=e.browser?.storage?.sync;if(Bo(t))return xo(t);let r=e.chrome?.storage?.sync;if(bo(r)){let n=e.chrome?.runtime??null;return vo(r,n)}return null}catch{return null}};var je=async(e,t,r,n)=>{if(r!==null){if(r<=0)return;let s=[];for(let c=0;c<r;c+=1)s.push(n(t,c));await e.adapter.removeItems(s);return}if(e.maxChunks<=0)return;let o=[];for(let s=0;s<e.maxChunks;s+=1)o.push(n(t,s));let i=await e.adapter.getItems(o),a=o.filter(s=>Object.prototype.hasOwnProperty.call(i,s));a.length!==0&&await e.adapter.removeItems(a)};var cr=new TextEncoder,Io=(e,t)=>{let r=JSON.stringify(t);if(r===void 0)throw new d(`syncStorage value for key "${e}" cannot be serialized.`);return cr.encode(e).byteLength+cr.encode(r).byteLength},lr=e=>{if(e instanceof w)return!0;if(!(e instanceof Error))return!1;let t=`${e.name}:${e.message}`;return/quota|max_items|quota_bytes|quota_bytes_per_item/i.test(t)},ur=(e,t,r,n,o,i)=>{let a=r.map((c,l)=>({key:o(t,l),value:c}));if(a.push({key:i,value:n}),a.length>e.maxItems)throw new w(`syncStorage snapshot requires ${a.length} items but maxItems is ${e.maxItems}.`);let s=0;for(let c of a){let l=Io(c.key,c.value);if(l>e.maxItemBytes)throw new w(`syncStorage item "${c.key}" requires ${l} bytes but maxItemBytes is ${e.maxItemBytes}.`);s+=l}if(s>e.maxTotalBytes)throw new w(`syncStorage snapshot requires ${s} bytes but maxTotalBytes is ${e.maxTotalBytes}.`)};var dr="FPSYNC_META",mr=2,qe=(e,t)=>`${e}:sync:${t}:manifest`,Ge=(e,t,r,n)=>`${e}:sync:${t}:g:${r}:chunk:${n}`;var yr=(e,t,r,n,o,i,a,s)=>({adapter:e,keyPrefix:t,databaseKey:r,maxChunkChars:n,maxChunks:o,maxItemBytes:i,maxTotalBytes:a,maxItems:s,activeGeneration:0,commitId:0,activeChunkCount:0}),Eo=(e,t)=>{if(!ve(e))throw new d("syncStorage manifest must be an object.");let r=e;if(r.magic!==dr||r.version!==mr)throw new d("syncStorage manifest magic/version mismatch.");let n=v(r.chunkCount,"manifest.chunkCount","syncStorage");if(n>t)throw new d(`syncStorage snapshot requires ${n} chunks but maxChunks is ${t}.`);return r},Oo=async(e,t,r)=>{let n=[];for(let c=0;c<r;c+=1)n.push(Ge(e.keyPrefix,e.databaseKey,t,c));let o=n.length===0?{}:await e.adapter.getItems(n),i=[];for(let c of n){let l=o[c];if(typeof l!="string")throw new d(`syncStorage chunk "${c}" is missing or not a string.`);i.push(l)}let a=i.join(""),s;try{s=JSON.parse(a)}catch{throw new d("syncStorage chunk data JSON is malformed.")}if(typeof s!="object"||s===null||Array.isArray(s))throw new I("treeJSON must be a non-null plain object.");return{treeJSON:s,rawJsonLength:A(a)}},fr=async e=>{let t=qe(e.keyPrefix,e.databaseKey),n=(await e.adapter.getItems([t]))[t];if(n===void 0)return{treeJSON:null,currentSizeBytes:0};let o=Eo(n,e.maxChunks),i=v(o.activeGeneration,"manifest.activeGeneration","syncStorage"),a=v(o.commitId,"manifest.commitId","syncStorage"),s=v(o.chunkCount,"manifest.chunkCount","syncStorage"),{treeJSON:c,rawJsonLength:l}=await Oo(e,i,s),u=l;return e.activeGeneration=i,e.commitId=a,e.activeChunkCount=s,{treeJSON:c,currentSizeBytes:u}},Ao=e=>(t,r)=>Ge(e.keyPrefix,e.databaseKey,t,r),To=(e,t,r,n)=>{let i={[qe(e.keyPrefix,e.databaseKey)]:r};for(let a=0;a<t.length;a+=1){let s=Ge(e.keyPrefix,e.databaseKey,n,a);i[s]=t[a]}return i},Lo=(e,t,r)=>{let n=JSON.stringify(e),o=[];for(let i=0;i<n.length;i+=t)o.push(n.slice(i,i+t));if(o.length>r)throw new w(`syncStorage snapshot requires ${o.length} chunks but maxChunks is ${r}.`);return o},Po=e=>{if(e.commitId>=Number.MAX_SAFE_INTEGER)throw new d("syncStorage commitId has reached Number.MAX_SAFE_INTEGER.");if(e.activeGeneration>=Number.MAX_SAFE_INTEGER)throw new d("syncStorage activeGeneration has reached Number.MAX_SAFE_INTEGER.")},pr=async(e,t)=>{Po(e);let r=e.commitId+1,n=e.activeGeneration+1,o=Lo(t,e.maxChunkChars,e.maxChunks),i={magic:dr,version:mr,activeGeneration:n,commitId:r,chunkCount:o.length},a=Ao(e),s=qe(e.keyPrefix,e.databaseKey);ur(e,n,o,i,a,s);let c=To(e,o,i,n);try{await je(e,n,null,a)}catch{}try{await e.adapter.setItems(c)}catch(m){throw lr(m)?new w("syncStorage quota exceeded during commit."):new d("syncStorage write failed during commit.",{cause:m})}let l=e.activeGeneration,u=e.activeChunkCount;e.activeGeneration=n,e.commitId=r,e.activeChunkCount=o.length,await je(e,l,u,a)};var Be=class e extends D{constructor(t,r,n,o){super(r,o),this.backend=t,this.getSnapshot=n}static async create(t){let r=Je();if(r===null)throw new b("browser sync storage is not available in the current runtime environment.");let n=ar(t.config),o=R(t.autoCommit),i=yr(r,n.keyPrefix,n.databaseKey,n.maxChunkChars,n.maxChunks,n.maxItemBytes,n.maxTotalBytes,n.maxItems),a=await fr(i);return{controller:new e(i,o,t.getSnapshot,t.onAutoCommitError),initialTreeJSON:a.treeJSON,initialCurrentSizeBytes:a.currentSizeBytes}}async executeSingleCommit(){let t=this.getSnapshot();await pr(this.backend,t.treeJSON)}};var hr=(e={})=>({init:async t=>{let r=await Be.create({config:e,autoCommit:t.autoCommit,getSnapshot:t.getSnapshot,onAutoCommitError:t.onAutoCommitError});return{controller:r.controller,initialTreeJSON:r.initialTreeJSON,initialCurrentSizeBytes:r.initialCurrentSizeBytes}},resolveBackendLimitBytes:()=>_e(e)});return vr(Do);})();