@frostpillar/frostpillar-storage-engine 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/LICENSE +21 -0
  2. package/README-JA.md +1205 -0
  3. package/README.md +1204 -0
  4. package/dist/drivers/file.cjs +960 -0
  5. package/dist/drivers/file.d.ts +3 -0
  6. package/dist/drivers/file.js +18 -0
  7. package/dist/drivers/indexedDB.cjs +570 -0
  8. package/dist/drivers/indexedDB.d.ts +3 -0
  9. package/dist/drivers/indexedDB.js +18 -0
  10. package/dist/drivers/localStorage.cjs +668 -0
  11. package/dist/drivers/localStorage.d.ts +3 -0
  12. package/dist/drivers/localStorage.js +23 -0
  13. package/dist/drivers/opfs.cjs +550 -0
  14. package/dist/drivers/opfs.d.ts +3 -0
  15. package/dist/drivers/opfs.js +18 -0
  16. package/dist/drivers/syncStorage.cjs +898 -0
  17. package/dist/drivers/syncStorage.d.ts +3 -0
  18. package/dist/drivers/syncStorage.js +22 -0
  19. package/dist/drivers/validation.d.ts +1 -0
  20. package/dist/drivers/validation.js +8 -0
  21. package/dist/errors/index.d.ts +32 -0
  22. package/dist/errors/index.js +48 -0
  23. package/dist/frostpillar-storage-engine.min.js +1 -0
  24. package/dist/index.cjs +2957 -0
  25. package/dist/index.d.ts +7 -0
  26. package/dist/index.js +6 -0
  27. package/dist/storage/backend/asyncDurableAutoCommitController.d.ts +26 -0
  28. package/dist/storage/backend/asyncDurableAutoCommitController.js +188 -0
  29. package/dist/storage/backend/asyncMutex.d.ts +7 -0
  30. package/dist/storage/backend/asyncMutex.js +38 -0
  31. package/dist/storage/backend/autoCommit.d.ts +2 -0
  32. package/dist/storage/backend/autoCommit.js +22 -0
  33. package/dist/storage/backend/capacity.d.ts +2 -0
  34. package/dist/storage/backend/capacity.js +27 -0
  35. package/dist/storage/backend/capacityResolver.d.ts +3 -0
  36. package/dist/storage/backend/capacityResolver.js +25 -0
  37. package/dist/storage/backend/encoding.d.ts +17 -0
  38. package/dist/storage/backend/encoding.js +148 -0
  39. package/dist/storage/backend/types.d.ts +184 -0
  40. package/dist/storage/backend/types.js +1 -0
  41. package/dist/storage/btree/recordKeyIndexBTree.d.ts +39 -0
  42. package/dist/storage/btree/recordKeyIndexBTree.js +104 -0
  43. package/dist/storage/config/config.browser.d.ts +4 -0
  44. package/dist/storage/config/config.browser.js +8 -0
  45. package/dist/storage/config/config.d.ts +1 -0
  46. package/dist/storage/config/config.js +1 -0
  47. package/dist/storage/config/config.node.d.ts +4 -0
  48. package/dist/storage/config/config.node.js +74 -0
  49. package/dist/storage/config/config.shared.d.ts +6 -0
  50. package/dist/storage/config/config.shared.js +105 -0
  51. package/dist/storage/datastore/Datastore.d.ts +47 -0
  52. package/dist/storage/datastore/Datastore.js +525 -0
  53. package/dist/storage/datastore/datastoreClose.d.ts +12 -0
  54. package/dist/storage/datastore/datastoreClose.js +60 -0
  55. package/dist/storage/datastore/datastoreKeyDefinition.d.ts +7 -0
  56. package/dist/storage/datastore/datastoreKeyDefinition.js +60 -0
  57. package/dist/storage/datastore/datastoreLifecycle.d.ts +18 -0
  58. package/dist/storage/datastore/datastoreLifecycle.js +63 -0
  59. package/dist/storage/datastore/mutationById.d.ts +29 -0
  60. package/dist/storage/datastore/mutationById.js +71 -0
  61. package/dist/storage/drivers/IndexedDB/indexedDBBackend.d.ts +11 -0
  62. package/dist/storage/drivers/IndexedDB/indexedDBBackend.js +109 -0
  63. package/dist/storage/drivers/IndexedDB/indexedDBBackendController.d.ts +27 -0
  64. package/dist/storage/drivers/IndexedDB/indexedDBBackendController.js +60 -0
  65. package/dist/storage/drivers/IndexedDB/indexedDBConfig.d.ts +7 -0
  66. package/dist/storage/drivers/IndexedDB/indexedDBConfig.js +24 -0
  67. package/dist/storage/drivers/file/fileBackend.d.ts +5 -0
  68. package/dist/storage/drivers/file/fileBackend.js +168 -0
  69. package/dist/storage/drivers/file/fileBackendController.d.ts +31 -0
  70. package/dist/storage/drivers/file/fileBackendController.js +72 -0
  71. package/dist/storage/drivers/file/fileBackendSnapshot.d.ts +10 -0
  72. package/dist/storage/drivers/file/fileBackendSnapshot.js +166 -0
  73. package/dist/storage/drivers/localStorage/localStorageBackend.d.ts +10 -0
  74. package/dist/storage/drivers/localStorage/localStorageBackend.js +156 -0
  75. package/dist/storage/drivers/localStorage/localStorageBackendController.d.ts +24 -0
  76. package/dist/storage/drivers/localStorage/localStorageBackendController.js +35 -0
  77. package/dist/storage/drivers/localStorage/localStorageConfig.d.ts +10 -0
  78. package/dist/storage/drivers/localStorage/localStorageConfig.js +16 -0
  79. package/dist/storage/drivers/localStorage/localStorageLayout.d.ts +5 -0
  80. package/dist/storage/drivers/localStorage/localStorageLayout.js +29 -0
  81. package/dist/storage/drivers/opfs/opfsBackend.d.ts +12 -0
  82. package/dist/storage/drivers/opfs/opfsBackend.js +142 -0
  83. package/dist/storage/drivers/opfs/opfsBackendController.d.ts +26 -0
  84. package/dist/storage/drivers/opfs/opfsBackendController.js +44 -0
  85. package/dist/storage/drivers/syncStorage/syncStorageAdapter.d.ts +2 -0
  86. package/dist/storage/drivers/syncStorage/syncStorageAdapter.js +123 -0
  87. package/dist/storage/drivers/syncStorage/syncStorageBackend.d.ts +11 -0
  88. package/dist/storage/drivers/syncStorage/syncStorageBackend.js +169 -0
  89. package/dist/storage/drivers/syncStorage/syncStorageBackendController.d.ts +24 -0
  90. package/dist/storage/drivers/syncStorage/syncStorageBackendController.js +34 -0
  91. package/dist/storage/drivers/syncStorage/syncStorageChunkMaintenance.d.ts +2 -0
  92. package/dist/storage/drivers/syncStorage/syncStorageChunkMaintenance.js +28 -0
  93. package/dist/storage/drivers/syncStorage/syncStorageConfig.d.ts +13 -0
  94. package/dist/storage/drivers/syncStorage/syncStorageConfig.js +42 -0
  95. package/dist/storage/drivers/syncStorage/syncStorageQuota.d.ts +3 -0
  96. package/dist/storage/drivers/syncStorage/syncStorageQuota.js +45 -0
  97. package/dist/storage/record/ordering.d.ts +3 -0
  98. package/dist/storage/record/ordering.js +7 -0
  99. package/dist/types.d.ts +125 -0
  100. package/dist/types.js +1 -0
  101. package/dist/validation/metadata.d.ts +1 -0
  102. package/dist/validation/metadata.js +7 -0
  103. package/dist/validation/payload.d.ts +7 -0
  104. package/dist/validation/payload.js +135 -0
  105. package/dist/validation/typeGuards.d.ts +1 -0
  106. package/dist/validation/typeGuards.js +7 -0
  107. package/package.json +110 -0
@@ -0,0 +1,63 @@
1
+ import { ClosedDatastoreError } from '../../errors/index.js';
2
+ export class DatastoreLifecycle {
3
+ closed;
4
+ closing;
5
+ closeInFlight;
6
+ activeOperationCount;
7
+ activeOperationsDrained;
8
+ resolveActiveOperationsDrained;
9
+ constructor() {
10
+ this.closed = false;
11
+ this.closing = false;
12
+ this.closeInFlight = null;
13
+ this.activeOperationCount = 0;
14
+ this.activeOperationsDrained = null;
15
+ this.resolveActiveOperationsDrained = null;
16
+ }
17
+ isClosed() {
18
+ return this.closed;
19
+ }
20
+ markClosing() {
21
+ this.closing = true;
22
+ }
23
+ markClosed() {
24
+ this.closed = true;
25
+ this.closing = false;
26
+ }
27
+ getCloseInFlight() {
28
+ return this.closeInFlight;
29
+ }
30
+ setCloseInFlight(closeInFlight) {
31
+ this.closeInFlight = closeInFlight;
32
+ }
33
+ ensureOpen() {
34
+ if (this.closed || this.closing) {
35
+ throw new ClosedDatastoreError('Datastore has been closed.');
36
+ }
37
+ }
38
+ beginOperation() {
39
+ this.ensureOpen();
40
+ this.activeOperationCount += 1;
41
+ }
42
+ endOperation() {
43
+ this.activeOperationCount -= 1;
44
+ if (this.activeOperationCount === 0 &&
45
+ this.resolveActiveOperationsDrained !== null) {
46
+ const resolve = this.resolveActiveOperationsDrained;
47
+ this.resolveActiveOperationsDrained = null;
48
+ this.activeOperationsDrained = null;
49
+ resolve();
50
+ }
51
+ }
52
+ waitForActiveOperationsToDrain() {
53
+ if (this.activeOperationCount === 0) {
54
+ return Promise.resolve();
55
+ }
56
+ if (this.activeOperationsDrained === null) {
57
+ this.activeOperationsDrained = new Promise((resolve) => {
58
+ this.resolveActiveOperationsDrained = resolve;
59
+ });
60
+ }
61
+ return this.activeOperationsDrained;
62
+ }
63
+ }
@@ -0,0 +1,29 @@
1
+ import type { KeyedRecord, PersistedRecord } from '../../types.js';
2
+ import type { CapacityState } from '../backend/types.js';
3
+ import type { EntryId, RecordKeyIndexBTree } from '../btree/recordKeyIndexBTree.js';
4
+ export declare const getPublicRecordById: (keyIndex: RecordKeyIndexBTree<unknown, PersistedRecord>, entryId: EntryId) => KeyedRecord<unknown> | null;
5
+ export interface UpdateByIdOptions {
6
+ keyIndex: RecordKeyIndexBTree<unknown, PersistedRecord>;
7
+ id: EntryId;
8
+ patch: Partial<KeyedRecord<unknown>['payload']>;
9
+ capacityState: CapacityState | null;
10
+ currentSizeBytes: number;
11
+ skipPayloadValidation: boolean;
12
+ }
13
+ export interface UpdateByIdResult {
14
+ updated: boolean;
15
+ currentSizeBytes: number;
16
+ durabilitySignalBytes: number;
17
+ }
18
+ export declare const updateRecordById: (options: UpdateByIdOptions) => UpdateByIdResult;
19
+ export interface DeleteByIdOptions {
20
+ keyIndex: RecordKeyIndexBTree<unknown, PersistedRecord>;
21
+ id: EntryId;
22
+ currentSizeBytes: number;
23
+ }
24
+ export interface DeleteByIdResult {
25
+ deleted: boolean;
26
+ currentSizeBytes: number;
27
+ durabilitySignalBytes: number;
28
+ }
29
+ export declare const deleteRecordById: (options: DeleteByIdOptions) => DeleteByIdResult;
@@ -0,0 +1,71 @@
1
+ import { IndexCorruptionError, QuotaExceededError, } from '../../errors/index.js';
2
+ import { toPublicRecord } from '../record/ordering.js';
3
+ import { validateAndNormalizePayload } from '../../validation/payload.js';
4
+ import { estimateKeySizeBytes, estimateRecordSizeBytes } from '../backend/encoding.js';
5
+ export const getPublicRecordById = (keyIndex, entryId) => {
6
+ const entry = keyIndex.peekById(entryId);
7
+ if (entry === null) {
8
+ return null;
9
+ }
10
+ return toPublicRecord(entryId, entry.key, entry.value);
11
+ };
12
+ const buildMergedPayload = (targetRecord, patch, entryKey, skipValidation) => {
13
+ const merged = { ...targetRecord.payload, ...patch };
14
+ if (skipValidation) {
15
+ return {
16
+ payload: merged,
17
+ sizeBytes: estimateRecordSizeBytes(entryKey, merged),
18
+ };
19
+ }
20
+ const validationResult = validateAndNormalizePayload(merged);
21
+ const keyBytes = estimateKeySizeBytes(entryKey);
22
+ return {
23
+ payload: validationResult.payload,
24
+ sizeBytes: validationResult.sizeBytes + keyBytes,
25
+ };
26
+ };
27
+ export const updateRecordById = (options) => {
28
+ const entry = options.keyIndex.peekById(options.id);
29
+ if (entry === null) {
30
+ return { updated: false, currentSizeBytes: options.currentSizeBytes, durabilitySignalBytes: 0 };
31
+ }
32
+ const targetRecord = entry.value;
33
+ const oldSize = targetRecord.sizeBytes;
34
+ const mergedResult = buildMergedPayload(targetRecord, options.patch, entry.key, options.skipPayloadValidation);
35
+ const mergedPayload = mergedResult.payload;
36
+ const newSize = mergedResult.sizeBytes;
37
+ const encodedDelta = newSize - oldSize;
38
+ if (options.capacityState !== null &&
39
+ encodedDelta > 0 &&
40
+ options.currentSizeBytes + encodedDelta > options.capacityState.maxSizeBytes) {
41
+ throw new QuotaExceededError('updateById exceeds configured capacity.maxSize boundary.');
42
+ }
43
+ const updatedRecord = {
44
+ payload: mergedPayload,
45
+ sizeBytes: newSize,
46
+ };
47
+ if (options.keyIndex.updateById(options.id, updatedRecord) === null) {
48
+ throw new IndexCorruptionError('Record index state is inconsistent during updateById.');
49
+ }
50
+ return {
51
+ updated: true,
52
+ currentSizeBytes: Math.max(0, options.currentSizeBytes + encodedDelta),
53
+ durabilitySignalBytes: Math.abs(encodedDelta),
54
+ };
55
+ };
56
+ export const deleteRecordById = (options) => {
57
+ const removedFromIndex = options.keyIndex.removeById(options.id);
58
+ if (removedFromIndex === null) {
59
+ return {
60
+ deleted: false,
61
+ currentSizeBytes: options.currentSizeBytes,
62
+ durabilitySignalBytes: 0,
63
+ };
64
+ }
65
+ const freedBytes = removedFromIndex.value.sizeBytes;
66
+ return {
67
+ deleted: true,
68
+ currentSizeBytes: Math.max(0, options.currentSizeBytes - freedBytes),
69
+ durabilitySignalBytes: freedBytes,
70
+ };
71
+ };
@@ -0,0 +1,11 @@
1
+ import type { BTreeJSON } from '../../../types.js';
2
+ import type { IDBDatabaseHandle, IDBFactoryAdapter } from '../../backend/types.js';
3
+ export interface LoadedIndexedDBSnapshot {
4
+ treeJSON: BTreeJSON<unknown, unknown> | null;
5
+ currentSizeBytes: number;
6
+ commitId: number;
7
+ }
8
+ export declare const detectGlobalIndexedDB: () => IDBFactoryAdapter | null;
9
+ export declare const openIndexedDB: (factory: IDBFactoryAdapter, databaseName: string, objectStoreName: string, version: number) => Promise<IDBDatabaseHandle>;
10
+ export declare const loadIndexedDBSnapshot: (db: IDBDatabaseHandle, _objectStoreName: string) => Promise<LoadedIndexedDBSnapshot>;
11
+ export declare const commitIndexedDBSnapshot: (db: IDBDatabaseHandle, _objectStoreName: string, treeJSON: BTreeJSON<unknown, unknown>, commitId: number) => Promise<void>;
@@ -0,0 +1,109 @@
1
+ import { PageCorruptionError, StorageEngineError } from '../../../errors/index.js';
2
+ import { parseNonNegativeSafeInteger } from '../../../validation/metadata.js';
3
+ import { computeUtf8ByteLength } from '../../backend/encoding.js';
4
+ const IDB_MAGIC = 'FPIDB_META';
5
+ const IDB_VERSION_VALUE = 2;
6
+ const IDB_META_STORE = '_meta';
7
+ const IDB_META_KEY = 'config';
8
+ // ---------------------------------------------------------------------------
9
+ // Global detection
10
+ // ---------------------------------------------------------------------------
11
+ export const detectGlobalIndexedDB = () => {
12
+ try {
13
+ const g = globalThis;
14
+ const idb = g.indexedDB;
15
+ if (idb === null || idb === undefined) {
16
+ return null;
17
+ }
18
+ return idb;
19
+ }
20
+ catch {
21
+ return null;
22
+ }
23
+ };
24
+ // ---------------------------------------------------------------------------
25
+ // Promise wrappers for IDB event-based API
26
+ // ---------------------------------------------------------------------------
27
+ const idbRequest = (req) => new Promise((resolve, reject) => {
28
+ req.onsuccess = (event) => {
29
+ resolve(event.target.result);
30
+ };
31
+ req.onerror = (event) => {
32
+ reject(new StorageEngineError(`IndexedDB request failed: ${String(event.target.error?.message ?? 'unknown')}`));
33
+ };
34
+ });
35
+ const idbTransaction = (tx) => new Promise((resolve, reject) => {
36
+ tx.oncomplete = () => { resolve(); };
37
+ tx.onerror = () => {
38
+ reject(new StorageEngineError('IndexedDB transaction failed.'));
39
+ };
40
+ });
41
+ // ---------------------------------------------------------------------------
42
+ // Open
43
+ // ---------------------------------------------------------------------------
44
+ export const openIndexedDB = (factory, databaseName, objectStoreName, version) => new Promise((resolve, reject) => {
45
+ const request = factory.open(databaseName, version);
46
+ request.onupgradeneeded = (event) => {
47
+ const db = event.target.result;
48
+ if (db === null) {
49
+ return;
50
+ }
51
+ if (!db.objectStoreNames.contains(objectStoreName)) {
52
+ db.createObjectStore(objectStoreName);
53
+ }
54
+ if (!db.objectStoreNames.contains(IDB_META_STORE)) {
55
+ db.createObjectStore(IDB_META_STORE);
56
+ }
57
+ };
58
+ request.onsuccess = (event) => {
59
+ const db = event.target.result;
60
+ if (db === null) {
61
+ reject(new StorageEngineError('IndexedDB open returned null database.'));
62
+ return;
63
+ }
64
+ resolve(db);
65
+ };
66
+ request.onerror = (event) => {
67
+ reject(new StorageEngineError(`IndexedDB open failed: ${String(event.target.error?.message ?? 'unknown')}`));
68
+ };
69
+ });
70
+ // ---------------------------------------------------------------------------
71
+ // Load snapshot
72
+ // ---------------------------------------------------------------------------
73
+ export const loadIndexedDBSnapshot = async (db, _objectStoreName) => {
74
+ const tx = db.transaction([IDB_META_STORE], 'readonly');
75
+ const txDone = idbTransaction(tx);
76
+ const metaStore = tx.objectStore(IDB_META_STORE);
77
+ const metaRaw = await idbRequest(metaStore.get(IDB_META_KEY));
78
+ await txDone;
79
+ if (metaRaw === null || metaRaw === undefined) {
80
+ return { treeJSON: null, currentSizeBytes: 0, commitId: 0 };
81
+ }
82
+ const meta = metaRaw;
83
+ if (meta.magic !== IDB_MAGIC || meta.version !== IDB_VERSION_VALUE) {
84
+ throw new StorageEngineError('IndexedDB metadata magic/version mismatch.');
85
+ }
86
+ const commitId = parseNonNegativeSafeInteger(meta.commitId, 'meta.commitId', 'IndexedDB');
87
+ const treeJSON = meta.treeJSON;
88
+ if (typeof treeJSON !== 'object' || treeJSON === null || Array.isArray(treeJSON)) {
89
+ throw new PageCorruptionError('treeJSON must be a non-null plain object.');
90
+ }
91
+ const currentSizeBytes = computeUtf8ByteLength(JSON.stringify(treeJSON));
92
+ return { treeJSON, currentSizeBytes, commitId };
93
+ };
94
+ // ---------------------------------------------------------------------------
95
+ // Commit snapshot
96
+ // ---------------------------------------------------------------------------
97
+ export const commitIndexedDBSnapshot = async (db, _objectStoreName, treeJSON, commitId) => {
98
+ const tx = db.transaction([IDB_META_STORE], 'readwrite');
99
+ const txDone = idbTransaction(tx);
100
+ const metaStore = tx.objectStore(IDB_META_STORE);
101
+ const meta = {
102
+ magic: IDB_MAGIC,
103
+ version: IDB_VERSION_VALUE,
104
+ commitId,
105
+ treeJSON,
106
+ };
107
+ metaStore.put(meta, IDB_META_KEY);
108
+ await txDone;
109
+ };
@@ -0,0 +1,27 @@
1
+ import type { AutoCommitConfig, BTreeJSON, IndexedDBConfig } from '../../../types.js';
2
+ import { AsyncDurableAutoCommitController } from '../../backend/asyncDurableAutoCommitController.js';
3
+ import type { DurableBackendController } from '../../backend/types.js';
4
+ export interface IndexedDBBackendControllerSnapshot {
5
+ treeJSON: BTreeJSON<unknown, unknown>;
6
+ }
7
+ export interface IndexedDBBackendControllerCreateOptions {
8
+ config: IndexedDBConfig;
9
+ autoCommit?: AutoCommitConfig;
10
+ getSnapshot: () => IndexedDBBackendControllerSnapshot;
11
+ onAutoCommitError: (error: unknown) => void;
12
+ }
13
+ export interface IndexedDBBackendControllerCreateResult {
14
+ controller: IndexedDBBackendController;
15
+ initialTreeJSON: BTreeJSON<unknown, unknown> | null;
16
+ initialCurrentSizeBytes: number;
17
+ }
18
+ export declare class IndexedDBBackendController extends AsyncDurableAutoCommitController implements DurableBackendController {
19
+ private db;
20
+ private readonly objectStoreName;
21
+ private readonly getSnapshot;
22
+ private commitId;
23
+ private constructor();
24
+ static create(options: IndexedDBBackendControllerCreateOptions): Promise<IndexedDBBackendControllerCreateResult>;
25
+ protected executeSingleCommit(): Promise<void>;
26
+ protected onCloseAfterDrain(): Promise<void>;
27
+ }
@@ -0,0 +1,60 @@
1
+ import { StorageEngineError, UnsupportedBackendError, toErrorInstance } from '../../../errors/index.js';
2
+ import { parseAutoCommitConfig } from '../../config/config.shared.js';
3
+ import { parseIndexedDBConfig } from './indexedDBConfig.js';
4
+ import { AsyncDurableAutoCommitController } from '../../backend/asyncDurableAutoCommitController.js';
5
+ import { commitIndexedDBSnapshot, detectGlobalIndexedDB, loadIndexedDBSnapshot, openIndexedDB, } from './indexedDBBackend.js';
6
+ export class IndexedDBBackendController extends AsyncDurableAutoCommitController {
7
+ db;
8
+ objectStoreName;
9
+ getSnapshot;
10
+ commitId;
11
+ constructor(db, objectStoreName, commitId, autoCommit, getSnapshot, onAutoCommitError) {
12
+ super(autoCommit, onAutoCommitError);
13
+ this.db = db;
14
+ this.objectStoreName = objectStoreName;
15
+ this.commitId = commitId;
16
+ this.getSnapshot = getSnapshot;
17
+ }
18
+ static async create(options) {
19
+ const factory = detectGlobalIndexedDB();
20
+ if (factory === null) {
21
+ throw new UnsupportedBackendError('indexedDB is not available in the current runtime environment.');
22
+ }
23
+ const idbConfig = parseIndexedDBConfig(options.config);
24
+ const { databaseName, objectStoreName, version } = idbConfig;
25
+ const autoCommit = parseAutoCommitConfig(options.autoCommit);
26
+ const db = await openIndexedDB(factory, databaseName, objectStoreName, version);
27
+ let loaded;
28
+ try {
29
+ loaded = await loadIndexedDBSnapshot(db, objectStoreName);
30
+ }
31
+ catch (error) {
32
+ try {
33
+ db.close();
34
+ }
35
+ catch {
36
+ // Preserve the original bootstrap failure as the primary error.
37
+ }
38
+ throw toErrorInstance(error, 'IndexedDB bootstrap failed with a non-Error value.');
39
+ }
40
+ const controller = new IndexedDBBackendController(db, objectStoreName, loaded.commitId, autoCommit, options.getSnapshot, options.onAutoCommitError);
41
+ return {
42
+ controller,
43
+ initialTreeJSON: loaded.treeJSON,
44
+ initialCurrentSizeBytes: loaded.currentSizeBytes,
45
+ };
46
+ }
47
+ async executeSingleCommit() {
48
+ const snapshot = this.getSnapshot();
49
+ if (this.commitId >= Number.MAX_SAFE_INTEGER) {
50
+ throw new StorageEngineError('IndexedDB commitId has reached Number.MAX_SAFE_INTEGER.');
51
+ }
52
+ const nextCommitId = this.commitId + 1;
53
+ await commitIndexedDBSnapshot(this.db, this.objectStoreName, snapshot.treeJSON, nextCommitId);
54
+ this.commitId = nextCommitId;
55
+ }
56
+ onCloseAfterDrain() {
57
+ this.db.close();
58
+ return Promise.resolve();
59
+ }
60
+ }
@@ -0,0 +1,7 @@
1
+ import type { IndexedDBConfig } from '../../../types.js';
2
+ export interface ParsedIndexedDBConfig {
3
+ databaseName: string;
4
+ objectStoreName: string;
5
+ version: number;
6
+ }
7
+ export declare const parseIndexedDBConfig: (config?: IndexedDBConfig) => ParsedIndexedDBConfig;
@@ -0,0 +1,24 @@
1
+ import { ConfigurationError } from '../../../errors/index.js';
2
+ const ensureNonEmptyString = (value, optionName) => {
3
+ if (value.trim().length === 0) {
4
+ throw new ConfigurationError(`${optionName} must be a non-empty string.`);
5
+ }
6
+ };
7
+ export const parseIndexedDBConfig = (config) => {
8
+ const databaseName = config?.databaseName ?? 'frostpillar';
9
+ const objectStoreName = config?.objectStoreName ?? 'frostpillar';
10
+ const version = config?.version ?? 1;
11
+ ensureNonEmptyString(databaseName, 'indexedDB.databaseName');
12
+ ensureNonEmptyString(objectStoreName, 'indexedDB.objectStoreName');
13
+ if (objectStoreName === '_meta') {
14
+ throw new ConfigurationError('indexedDB.objectStoreName must not be "_meta" because it is reserved for internal metadata.');
15
+ }
16
+ if (!Number.isSafeInteger(version) || version <= 0) {
17
+ throw new ConfigurationError('indexedDB.version must be a positive safe integer.');
18
+ }
19
+ return {
20
+ databaseName,
21
+ objectStoreName,
22
+ version,
23
+ };
24
+ };
@@ -0,0 +1,5 @@
1
+ import type { FileBackendConfig } from '../../../types.js';
2
+ import type { FileBackendState } from '../../backend/types.js';
3
+ export declare const createFileBackend: (config: FileBackendConfig) => FileBackendState;
4
+ export declare const cleanupStaleGenerationFiles: (backend: FileBackendState) => void;
5
+ export declare const releaseFileLock: (backend: FileBackendState) => void;
@@ -0,0 +1,168 @@
1
+ import { closeSync, existsSync, mkdirSync, openSync, readdirSync, readFileSync, unlinkSync, writeSync, } from 'node:fs';
2
+ import { basename, dirname, join } from 'node:path';
3
+ import { DatabaseLockedError, toStorageEngineError, } from '../../../errors/index.js';
4
+ import { ensureCanonicalPathWithinWorkingDirectory, resolveFileDataPath, } from '../../config/config.node.js';
5
+ const toNodeErrorCode = (error) => {
6
+ if (error instanceof Error) {
7
+ const nodeError = error;
8
+ return nodeError.code;
9
+ }
10
+ return undefined;
11
+ };
12
+ const isProcessAlive = (pid) => {
13
+ try {
14
+ process.kill(pid, 0);
15
+ return true;
16
+ }
17
+ catch (error) {
18
+ // ESRCH = no such process (genuinely dead).
19
+ // EPERM = process exists but owned by another user — treat as alive.
20
+ const code = error.code;
21
+ if (code === 'ESRCH') {
22
+ return false;
23
+ }
24
+ return true;
25
+ }
26
+ };
27
+ const tryRecoverStaleLock = (lockPath) => {
28
+ try {
29
+ const content = readFileSync(lockPath, 'utf8');
30
+ const parsed = JSON.parse(content);
31
+ if (typeof parsed.pid !== 'number' || !Number.isInteger(parsed.pid)) {
32
+ return false; // malformed — conservative, don't remove
33
+ }
34
+ if (isProcessAlive(parsed.pid)) {
35
+ return false; // process is alive — genuine lock
36
+ }
37
+ // PID is dead — stale lock
38
+ unlinkSync(lockPath);
39
+ return true;
40
+ }
41
+ catch {
42
+ return false; // unreadable — conservative
43
+ }
44
+ };
45
+ const writeLockFile = (lockPath) => {
46
+ const descriptor = openSync(lockPath, 'wx');
47
+ try {
48
+ const pidContent = JSON.stringify({ pid: process.pid, createdAt: new Date().toISOString() });
49
+ writeSync(descriptor, pidContent, null, 'utf8');
50
+ }
51
+ finally {
52
+ closeSync(descriptor);
53
+ }
54
+ };
55
+ const verifyLockOwnership = (lockPath) => {
56
+ try {
57
+ const content = readFileSync(lockPath, 'utf8');
58
+ const parsed = JSON.parse(content);
59
+ if (parsed.pid !== process.pid) {
60
+ throw new DatabaseLockedError('Lock was overtaken by another process during stale lock recovery.');
61
+ }
62
+ }
63
+ catch (error) {
64
+ if (error instanceof DatabaseLockedError) {
65
+ throw error;
66
+ }
67
+ throw new DatabaseLockedError('Lock file became unreadable during stale lock recovery.');
68
+ }
69
+ };
70
+ const acquireFileLock = (lockPath) => {
71
+ try {
72
+ writeLockFile(lockPath);
73
+ }
74
+ catch (error) {
75
+ const code = toNodeErrorCode(error);
76
+ if (code === 'EEXIST') {
77
+ if (tryRecoverStaleLock(lockPath)) {
78
+ try {
79
+ writeLockFile(lockPath);
80
+ verifyLockOwnership(lockPath);
81
+ return;
82
+ }
83
+ catch (retryError) {
84
+ if (retryError instanceof DatabaseLockedError) {
85
+ throw retryError;
86
+ }
87
+ throw new DatabaseLockedError('Datastore is locked by another process.');
88
+ }
89
+ }
90
+ throw new DatabaseLockedError('Datastore is locked by another process.');
91
+ }
92
+ throw toStorageEngineError(error, 'Failed to acquire file lock.');
93
+ }
94
+ };
95
+ const cleanupFileTempArtifacts = (backend) => {
96
+ try {
97
+ const sidecarTempPath = `${backend.sidecarPath}.tmp`;
98
+ if (existsSync(sidecarTempPath)) {
99
+ unlinkSync(sidecarTempPath);
100
+ }
101
+ const entries = readdirSync(backend.directoryPath);
102
+ const generationPrefix = `${backend.baseFileName}.g.`;
103
+ for (const entry of entries) {
104
+ if (entry.startsWith(generationPrefix) && entry.endsWith('.tmp')) {
105
+ unlinkSync(join(backend.directoryPath, entry));
106
+ }
107
+ }
108
+ }
109
+ catch (error) {
110
+ throw toStorageEngineError(error, 'Failed to cleanup temporary durability artifacts');
111
+ }
112
+ };
113
+ export const createFileBackend = (config) => {
114
+ const dataFilePath = resolveFileDataPath(config);
115
+ const directoryPath = dirname(dataFilePath);
116
+ const baseFileName = basename(dataFilePath);
117
+ const sidecarPath = `${dataFilePath}.meta.json`;
118
+ const lockPath = `${dataFilePath}.lock`;
119
+ ensureCanonicalPathWithinWorkingDirectory(dataFilePath, 'resolvedDataFilePath');
120
+ mkdirSync(directoryPath, { recursive: true });
121
+ acquireFileLock(lockPath);
122
+ const backend = {
123
+ dataFilePath,
124
+ directoryPath,
125
+ baseFileName,
126
+ sidecarPath,
127
+ lockPath,
128
+ activeDataFile: `${baseFileName}.g.0`,
129
+ commitId: 0,
130
+ lockAcquired: true,
131
+ };
132
+ try {
133
+ cleanupFileTempArtifacts(backend);
134
+ }
135
+ catch (error) {
136
+ releaseFileLock(backend);
137
+ throw error;
138
+ }
139
+ return backend;
140
+ };
141
+ export const cleanupStaleGenerationFiles = (backend) => {
142
+ try {
143
+ const entries = readdirSync(backend.directoryPath);
144
+ const generationPrefix = `${backend.baseFileName}.g.`;
145
+ for (const entry of entries) {
146
+ if (entry.startsWith(generationPrefix) &&
147
+ !entry.endsWith('.tmp') &&
148
+ entry !== backend.activeDataFile) {
149
+ unlinkSync(join(backend.directoryPath, entry));
150
+ }
151
+ }
152
+ }
153
+ catch {
154
+ // Best-effort cleanup: stale generation files do not affect
155
+ // data integrity since the sidecar always points to the active one.
156
+ }
157
+ };
158
+ export const releaseFileLock = (backend) => {
159
+ try {
160
+ if (existsSync(backend.lockPath)) {
161
+ unlinkSync(backend.lockPath);
162
+ }
163
+ backend.lockAcquired = false;
164
+ }
165
+ catch (error) {
166
+ throw toStorageEngineError(error, 'Failed to release file lock during close()');
167
+ }
168
+ };
@@ -0,0 +1,31 @@
1
+ import type { AutoCommitConfig, BTreeJSON, FileBackendConfig } from '../../../types.js';
2
+ import { AsyncDurableAutoCommitController } from '../../backend/asyncDurableAutoCommitController.js';
3
+ import type { DurableBackendController } from '../../backend/types.js';
4
+ export interface FileBackendControllerSnapshot {
5
+ treeJSON: BTreeJSON<unknown, unknown>;
6
+ }
7
+ export interface FileBackendControllerCreateOptions {
8
+ config: FileBackendConfig;
9
+ autoCommit?: AutoCommitConfig;
10
+ testHooks?: FileBackendControllerTestHooks;
11
+ getSnapshot: () => FileBackendControllerSnapshot;
12
+ onAutoCommitError: (error: unknown) => void;
13
+ }
14
+ export interface FileBackendControllerTestHooks {
15
+ beforeCommit?: () => void | Promise<void>;
16
+ afterCommit?: () => void | Promise<void>;
17
+ }
18
+ export interface FileBackendControllerCreateResult {
19
+ controller: FileBackendController;
20
+ initialTreeJSON: BTreeJSON<unknown, unknown> | null;
21
+ initialCurrentSizeBytes: number;
22
+ }
23
+ export declare class FileBackendController extends AsyncDurableAutoCommitController implements DurableBackendController {
24
+ private readonly backend;
25
+ private readonly getSnapshot;
26
+ private readonly testHooks;
27
+ private constructor();
28
+ static create(options: FileBackendControllerCreateOptions): FileBackendControllerCreateResult;
29
+ protected executeSingleCommit(): Promise<void>;
30
+ protected onCloseAfterDrain(): Promise<void>;
31
+ }