@livestore/wa-sqlite 1.0.1-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +78 -0
  3. package/dist/wa-sqlite-async.mjs +16 -0
  4. package/dist/wa-sqlite-async.wasm +0 -0
  5. package/dist/wa-sqlite-jspi.mjs +16 -0
  6. package/dist/wa-sqlite-jspi.wasm +0 -0
  7. package/dist/wa-sqlite.mjs +16 -0
  8. package/dist/wa-sqlite.wasm +0 -0
  9. package/package.json +45 -0
  10. package/src/FacadeVFS.js +508 -0
  11. package/src/VFS.js +222 -0
  12. package/src/WebLocksMixin.js +412 -0
  13. package/src/examples/AccessHandlePoolVFS.js +458 -0
  14. package/src/examples/IDBBatchAtomicVFS.js +820 -0
  15. package/src/examples/IDBMirrorVFS.js +875 -0
  16. package/src/examples/MemoryAsyncVFS.js +100 -0
  17. package/src/examples/MemoryVFS.js +176 -0
  18. package/src/examples/OPFSAdaptiveVFS.js +437 -0
  19. package/src/examples/OPFSAnyContextVFS.js +300 -0
  20. package/src/examples/OPFSCoopSyncVFS.js +590 -0
  21. package/src/examples/OPFSPermutedVFS.js +1214 -0
  22. package/src/examples/README.md +89 -0
  23. package/src/examples/tag.js +82 -0
  24. package/src/sqlite-api.js +914 -0
  25. package/src/sqlite-constants.js +275 -0
  26. package/src/types/globals.d.ts +60 -0
  27. package/src/types/index.d.ts +1302 -0
  28. package/src/types/tsconfig.json +6 -0
  29. package/test/AccessHandlePoolVFS.test.js +27 -0
  30. package/test/IDBBatchAtomicVFS.test.js +97 -0
  31. package/test/IDBMirrorVFS.test.js +27 -0
  32. package/test/MemoryAsyncVFS.test.js +27 -0
  33. package/test/MemoryVFS.test.js +27 -0
  34. package/test/OPFSAdaptiveVFS.test.js +27 -0
  35. package/test/OPFSAnyContextVFS.test.js +27 -0
  36. package/test/OPFSCoopSyncVFS.test.js +27 -0
  37. package/test/OPFSPermutedVFS.test.js +27 -0
  38. package/test/TestContext.js +96 -0
  39. package/test/WebLocksMixin.test.js +521 -0
  40. package/test/api.test.js +49 -0
  41. package/test/api_exec.js +89 -0
  42. package/test/api_misc.js +63 -0
  43. package/test/api_statements.js +426 -0
  44. package/test/callbacks.test.js +373 -0
  45. package/test/sql.test.js +64 -0
  46. package/test/sql_0001.js +49 -0
  47. package/test/sql_0002.js +52 -0
  48. package/test/sql_0003.js +83 -0
  49. package/test/sql_0004.js +81 -0
  50. package/test/sql_0005.js +76 -0
  51. package/test/test-worker.js +204 -0
  52. package/test/vfs_xAccess.js +2 -0
  53. package/test/vfs_xClose.js +52 -0
  54. package/test/vfs_xOpen.js +91 -0
  55. package/test/vfs_xRead.js +38 -0
  56. package/test/vfs_xWrite.js +36 -0
@@ -0,0 +1,1214 @@
1
+ // Copyright 2024 Roy T. Hashimoto. All Rights Reserved.
2
+ import { FacadeVFS } from '../FacadeVFS.js';
3
+ import * as VFS from '../VFS.js';
4
+ import { WebLocksMixin } from '../WebLocksMixin.js';
5
+
6
+ // Options for navigator.locks.request().
7
+ /** @type {LockOptions} */ const SHARED = { mode: 'shared' };
8
+ /** @type {LockOptions} */ const POLL_SHARED = { ifAvailable: true, mode: 'shared' };
9
+ /** @type {LockOptions} */ const POLL_EXCLUSIVE = { ifAvailable: true, mode: 'exclusive' };
10
+
11
+ // Default number of transactions between flushing the OPFS file and
12
+ // reclaiming free page offsets. Used only when synchronous! = 'full'.
13
+ const DEFAULT_FLUSH_INTERVAL = 64;
14
+
15
+ // Used only for debug logging.
16
+ const contextId = Math.random().toString(36).slice(2);
17
+
18
+ /**
19
+ * @typedef {Object} Transaction
20
+ * @property {number} txId
21
+ * @property {Map<number, { offset: number, digest: Uint32Array }>} [pages]
22
+ * @property {number} [fileSize]
23
+ * @property {number} [oldestTxId]
24
+ * @property {number[]} [reclaimable]
25
+ */
26
+
27
+ /**
28
+ * @typedef {Object} AccessRequest
29
+ * @property {boolean} exclusive
30
+ */
31
+
32
+ class File {
33
+ /** @type {string} */ path;
34
+ /** @type {number} */ flags;
35
+ /** @type {FileSystemSyncAccessHandle} */ accessHandle;
36
+
37
+ // Members below are only used for SQLITE_OPEN_MAIN_DB.
38
+
39
+ /** @type {number} */ pageSize;
40
+ /** @type {number} */ fileSize; // virtual file size exposed to SQLite
41
+
42
+ /** @type {IDBDatabase} */ idb;
43
+
44
+ /** @type {Transaction} */ viewTx; // last transaction incorporated
45
+ /** @type {function?} */ viewReleaser;
46
+
47
+ /** @type {BroadcastChannel} */ broadcastChannel;
48
+ /** @type {(Transaction|AccessRequest)[]} */ broadcastReceived;
49
+
50
+ /** @type {Map<number, number>} */ mapPageToOffset;
51
+ /** @type {Map<number, Transaction>} */ mapTxToPending;
52
+ /** @type {Set<number>} */ freeOffsets;
53
+
54
+ /** @type {number} */ lockState;
55
+ /** @type {{read?: function, write?: function, reserved?: function, hint?: function}} */ locks;
56
+
57
+ /** @type {AbortController} */ abortController;
58
+
59
+ /** @type {Transaction?} */ txActive; // transaction in progress
60
+ /** @type {number} */ txRealFileSize; // physical file size
61
+ /** @type {boolean} */ txIsOverwrite; // VACUUM in progress
62
+ /** @type {boolean} */ txWriteHint;
63
+
64
+ /** @type {'full'|'normal'} */ synchronous;
65
+ /** @type {number} */ flushInterval;
66
+
67
+ /**
68
+ * @param {string} pathname
69
+ * @param {number} flags
70
+ */
71
+ constructor(pathname, flags) {
72
+ this.path = pathname;
73
+ this.flags = flags;
74
+ }
75
+
76
+ /**
77
+ * @param {string} pathname
78
+ * @param {number} flags
79
+ * @returns
80
+ */
81
+ static async create(pathname, flags) {
82
+ const file = new File(pathname, flags);
83
+
84
+ const create = !!(flags & VFS.SQLITE_OPEN_CREATE);
85
+ const [directory, filename] = await getPathComponents(pathname, create);
86
+ const handle = await directory.getFileHandle(filename, { create });
87
+ // @ts-ignore
88
+ file.accessHandle = await handle.createSyncAccessHandle({ mode: 'readwrite-unsafe' });
89
+
90
+ if (flags & VFS.SQLITE_OPEN_MAIN_DB) {
91
+ file.idb = await new Promise((resolve, reject) => {
92
+ const request = indexedDB.open(pathname);
93
+ request.onupgradeneeded = () => {
94
+ const db = request.result;
95
+ db.createObjectStore('pages', { keyPath: 'i' });
96
+ db.createObjectStore('pending', { keyPath: 'txId'});
97
+ };
98
+ request.onsuccess = () => resolve(request.result);
99
+ request.onerror = () => reject(request.error);
100
+ });
101
+ }
102
+ return file;
103
+ }
104
+ }
105
+
106
+ export class OPFSPermutedVFS extends FacadeVFS {
107
+ /** @type {Map<number, File>} */ #mapIdToFile = new Map();
108
+ #lastError = null;
109
+
110
+ log = null; // (...args) => console.debug(contextId, ...args);
111
+
112
+ /**
113
+ * @param {string} name
114
+ * @param {*} module
115
+ * @returns
116
+ */
117
+ static async create(name, module) {
118
+ const vfs = new OPFSPermutedVFS(name, module);
119
+ await vfs.isReady();
120
+ return vfs;
121
+ }
122
+
123
+ /**
124
+ * @param {string?} zName
125
+ * @param {number} fileId
126
+ * @param {number} flags
127
+ * @param {DataView} pOutFlags
128
+ * @returns {Promise<number>}
129
+ */
130
+ async jOpen(zName, fileId, flags, pOutFlags) {
131
+ /** @type {(() => void)[]} */ const onFinally = [];
132
+ try {
133
+ const url = new URL(zName || Math.random().toString(36).slice(2), 'file://');
134
+ const path = url.pathname;
135
+
136
+ const file = await File.create(path, flags);
137
+ if (flags & VFS.SQLITE_OPEN_MAIN_DB) {
138
+ file.pageSize = 0;
139
+ file.fileSize = 0;
140
+ file.viewTx = { txId: 0 };
141
+ file.broadcastChannel = new BroadcastChannel(`permuted:${path}`);
142
+ file.broadcastReceived = [];
143
+ file.mapPageToOffset = new Map();
144
+ file.mapTxToPending = new Map();
145
+ file.freeOffsets = new Set();
146
+ file.lockState = VFS.SQLITE_LOCK_NONE;
147
+ file.locks = {};
148
+ file.abortController = new AbortController();
149
+ file.txIsOverwrite = false;
150
+ file.txActive = null;
151
+ file.synchronous = 'full';
152
+ file.flushInterval = DEFAULT_FLUSH_INTERVAL;
153
+
154
+ // Take the write lock so no other connection changes state
155
+ // during our initialization.
156
+ await this.#lock(file, 'write');
157
+ onFinally.push(() => file.locks.write());
158
+
159
+ // Load the initial page map from the database.
160
+ const tx = file.idb.transaction(['pages', 'pending']);
161
+ const pages = await idbX(tx.objectStore('pages').getAll());
162
+ file.pageSize = this.#getPageSize(file);
163
+ file.fileSize = pages.length * file.pageSize;
164
+
165
+ // Begin with adding all file offsets to the free list.
166
+ const opfsFileSize = file.accessHandle.getSize();
167
+ for (let i = 0; i < opfsFileSize; i += file.pageSize) {
168
+ file.freeOffsets.add(i);
169
+ }
170
+
171
+ // Incorporate the page map data.
172
+ for (const { i, o } of pages) {
173
+ file.mapPageToOffset.set(i, o);
174
+ file.freeOffsets.delete(o);
175
+ }
176
+
177
+ // Incorporate pending transactions.
178
+ try {
179
+ /** @type {Transaction[]} */
180
+ const transactions = await idbX(tx.objectStore('pending').getAll());
181
+ for (const transaction of transactions) {
182
+ // Verify checksums for all pages in this transaction.
183
+ for (const [index, { offset, digest }] of transaction.pages) {
184
+ const data = new Uint8Array(file.pageSize);
185
+ file.accessHandle.read(data, { at: offset });
186
+ if (checksum(data).some((v, i) => v !== digest[i])) {
187
+ throw Object.assign(new Error('checksum error'), { txId: transaction.txId });
188
+ }
189
+ }
190
+ this.#acceptTx(file, transaction);
191
+ file.viewTx = transaction;
192
+ }
193
+ } catch (e) {
194
+ if (e.message === 'checksum error') {
195
+ console.warn(`Checksum error, removing tx ${e.txId}+`)
196
+ const tx = file.idb.transaction('pending', 'readwrite');
197
+ const txCommit = new Promise((resolve, reject) => {
198
+ tx.oncomplete = resolve;
199
+ tx.onabort = () => reject(tx.error);
200
+ });
201
+ const range = IDBKeyRange.lowerBound(e.txId);
202
+ tx.objectStore('pending').delete(range);
203
+ tx.commit();
204
+ await txCommit;
205
+ } else {
206
+ throw e;
207
+ }
208
+ }
209
+
210
+ // Publish our view of the database. This prevents other connections
211
+ // from overwriting file data we still need.
212
+ await this.#setView(file, file.viewTx);
213
+
214
+ // Listen for broadcasts. Messages are cached until the database
215
+ // is unlocked.
216
+ file.broadcastChannel.addEventListener('message', event => {
217
+ file.broadcastReceived.push(event.data);
218
+ if (file.lockState === VFS.SQLITE_LOCK_NONE) {
219
+ this.#processBroadcasts(file);
220
+ }
221
+ });
222
+
223
+ // Connections usually hold this shared read lock so they don't
224
+ // acquire and release it for every transaction. The only time
225
+ // it is released is when a connection wants to VACUUM, which
226
+ // it signals with a broadcast message.
227
+ await this.#lock(file, 'read', SHARED)
228
+ }
229
+
230
+ pOutFlags.setInt32(0, flags, true);
231
+ this.#mapIdToFile.set(fileId, file);
232
+ return VFS.SQLITE_OK;
233
+ } catch (e) {
234
+ this.#lastError = e;
235
+ return VFS.SQLITE_CANTOPEN;
236
+ } finally {
237
+ while (onFinally.length) {
238
+ await onFinally.pop()();
239
+ }
240
+ }
241
+ }
242
+
243
+ /**
244
+ * @param {string} zName
245
+ * @param {number} syncDir
246
+ * @returns {Promise<number>}
247
+ */
248
+ async jDelete(zName, syncDir) {
249
+ try {
250
+ const url = new URL(zName, 'file://');
251
+ const pathname = url.pathname;
252
+
253
+ const [directoryHandle, name] = await getPathComponents(pathname, false);
254
+ const result = directoryHandle.removeEntry(name, { recursive: false });
255
+ if (syncDir) {
256
+ await result;
257
+ }
258
+ return VFS.SQLITE_OK;
259
+ } catch (e) {
260
+ return VFS.SQLITE_IOERR_DELETE;
261
+ }
262
+ }
263
+
264
+ /**
265
+ * @param {string} zName
266
+ * @param {number} flags
267
+ * @param {DataView} pResOut
268
+ * @returns {Promise<number>}
269
+ */
270
+ async jAccess(zName, flags, pResOut) {
271
+ try {
272
+ const url = new URL(zName, 'file://');
273
+ const pathname = url.pathname;
274
+
275
+ const [directoryHandle, dbName] = await getPathComponents(pathname, false);
276
+ await directoryHandle.getFileHandle(dbName, { create: false });
277
+ pResOut.setInt32(0, 1, true);
278
+ return VFS.SQLITE_OK;
279
+ } catch (e) {
280
+ if (e.name === 'NotFoundError') {
281
+ pResOut.setInt32(0, 0, true);
282
+ return VFS.SQLITE_OK;
283
+ }
284
+ this.#lastError = e;
285
+ return VFS.SQLITE_IOERR_ACCESS;
286
+ }
287
+ }
288
+
289
+ /**
290
+ * @param {number} fileId
291
+ * @returns {Promise<number>}
292
+ */
293
+ async jClose(fileId) {
294
+ try {
295
+ const file = this.#mapIdToFile.get(fileId);
296
+ this.#mapIdToFile.delete(fileId);
297
+ file?.accessHandle?.close();
298
+
299
+ if (file?.flags & VFS.SQLITE_OPEN_MAIN_DB) {
300
+ file.broadcastChannel.close();
301
+ file.viewReleaser?.();
302
+ }
303
+
304
+ if (file?.flags & VFS.SQLITE_OPEN_DELETEONCLOSE) {
305
+ const [directoryHandle, name] = await getPathComponents(file.path, false);
306
+ await directoryHandle.removeEntry(name, { recursive: false });
307
+ }
308
+ return VFS.SQLITE_OK;
309
+ } catch (e) {
310
+ return VFS.SQLITE_IOERR_CLOSE;
311
+ }
312
+ }
313
+
314
+ /**
315
+ * @param {number} fileId
316
+ * @param {Uint8Array} pData
317
+ * @param {number} iOffset
318
+ * @returns {number}
319
+ */
320
+ jRead(fileId, pData, iOffset) {
321
+ try {
322
+ const file = this.#mapIdToFile.get(fileId);
323
+
324
+ let bytesRead = 0;
325
+ if (file.flags & VFS.SQLITE_OPEN_MAIN_DB) {
326
+ file.abortController.signal.throwIfAborted();
327
+
328
+ // Look up the page location in the file. Check the pages in
329
+ // any active write transaction first, then the main map.
330
+ const pageIndex = file.pageSize ?
331
+ Math.trunc(iOffset / file.pageSize) + 1:
332
+ 1;
333
+ const pageOffset = file.txActive?.pages.has(pageIndex) ?
334
+ file.txActive.pages.get(pageIndex).offset :
335
+ file.mapPageToOffset.get(pageIndex);
336
+ if (pageOffset >= 0) {
337
+ this.log?.(`read page ${pageIndex} at ${pageOffset}`);
338
+ bytesRead = file.accessHandle.read(
339
+ pData.subarray(),
340
+ { at: pageOffset + (file.pageSize ? iOffset % file.pageSize : 0) });
341
+ }
342
+
343
+ // Get page size if not already known.
344
+ if (!file.pageSize && iOffset <= 16 && iOffset + bytesRead >= 18) {
345
+ const dataView = new DataView(pData.slice(16 - iOffset, 18 - iOffset).buffer);
346
+ file.pageSize = dataView.getUint16(0);
347
+ if (file.pageSize === 1) {
348
+ file.pageSize = 65536;
349
+ }
350
+ this.log?.(`set page size ${file.pageSize}`);
351
+ }
352
+ } else {
353
+ // On Chrome (at least), passing pData to accessHandle.read() is
354
+ // an error because pData is a Proxy of a Uint8Array. Calling
355
+ // subarray() produces a real Uint8Array and that works.
356
+ bytesRead = file.accessHandle.read(pData.subarray(), { at: iOffset });
357
+ }
358
+
359
+ if (bytesRead < pData.byteLength) {
360
+ pData.fill(0, bytesRead);
361
+ return VFS.SQLITE_IOERR_SHORT_READ;
362
+ }
363
+ return VFS.SQLITE_OK;
364
+ } catch (e) {
365
+ this.#lastError = e;
366
+ return VFS.SQLITE_IOERR_READ;
367
+ }
368
+ }
369
+
370
+ /**
371
+ * @param {number} fileId
372
+ * @param {Uint8Array} pData
373
+ * @param {number} iOffset
374
+ * @returns {number}
375
+ */
376
+ jWrite(fileId, pData, iOffset) {
377
+ try {
378
+ const file = this.#mapIdToFile.get(fileId);
379
+
380
+ if (file.flags & VFS.SQLITE_OPEN_MAIN_DB) {
381
+ file.abortController.signal.throwIfAborted();
382
+ if (!file.pageSize) {
383
+ this.log?.(`set page size ${pData.byteLength}`)
384
+ file.pageSize = pData.byteLength;
385
+ }
386
+
387
+ // The first write begins a transaction. Note that xLock/xUnlock
388
+ // is not a good way to determine transaction boundaries because
389
+ // PRAGMA locking_mode can change the behavior.
390
+ if (!file.txActive) {
391
+ this.#beginTx(file);
392
+ }
393
+
394
+ // Choose the offset in the file to write this page.
395
+ let pageOffset;
396
+ const pageIndex = Math.trunc(iOffset / file.pageSize) + 1;
397
+ if (file.txIsOverwrite) {
398
+ // For VACUUM, use the identity mapping to write each page
399
+ // at its canonical offset.
400
+ pageOffset = iOffset;
401
+ } else if (file.txActive.pages.has(pageIndex)) {
402
+ // This page has already been written in this transaction.
403
+ // Use the same offset.
404
+ pageOffset = file.txActive.pages.get(pageIndex).offset;
405
+ this.log?.(`overwrite page ${pageIndex} at ${pageOffset}`);
406
+ } else if (pageIndex === 1 && file.freeOffsets.delete(0)) {
407
+ // Offset 0 is available for page 1.
408
+ pageOffset = 0;
409
+ this.log?.(`write page ${pageIndex} at ${pageOffset}`);
410
+ } else {
411
+ // Use the first unused non-zero offset within the file.
412
+ for (const maybeOffset of file.freeOffsets) {
413
+ if (maybeOffset) {
414
+ if (maybeOffset < file.txRealFileSize) {
415
+ pageOffset = maybeOffset;
416
+ file.freeOffsets.delete(pageOffset);
417
+ this.log?.(`write page ${pageIndex} at ${pageOffset}`);
418
+ break;
419
+ } else {
420
+ // This offset is beyond the end of the file.
421
+ file.freeOffsets.delete(maybeOffset);
422
+ }
423
+ }
424
+ }
425
+
426
+ if (pageOffset === undefined) {
427
+ // Write to the end of the file.
428
+ pageOffset = file.txRealFileSize;
429
+ this.log?.(`append page ${pageIndex} at ${pageOffset}`);
430
+ }
431
+ }
432
+ file.accessHandle.write(pData.subarray(), { at: pageOffset });
433
+
434
+ // Update the transaction.
435
+ file.txActive.pages.set(pageIndex, {
436
+ offset: pageOffset,
437
+ digest: checksum(pData.subarray())
438
+ });
439
+ file.txActive.fileSize = Math.max(file.txActive.fileSize, pageIndex * file.pageSize);
440
+
441
+ // Track the actual file size.
442
+ file.txRealFileSize = Math.max(file.txRealFileSize, pageOffset + pData.byteLength);
443
+ } else {
444
+ // On Chrome (at least), passing pData to accessHandle.write() is
445
+ // an error because pData is a Proxy of a Uint8Array. Calling
446
+ // subarray() produces a real Uint8Array and that works.
447
+ file.accessHandle.write(pData.subarray(), { at: iOffset });
448
+ }
449
+ return VFS.SQLITE_OK;
450
+ } catch (e) {
451
+ this.#lastError = e;
452
+ return VFS.SQLITE_IOERR_WRITE;
453
+ }
454
+ }
455
+
456
+ /**
457
+ * @param {number} fileId
458
+ * @param {number} iSize
459
+ * @returns {number}
460
+ */
461
+ jTruncate(fileId, iSize) {
462
+ try {
463
+ const file = this.#mapIdToFile.get(fileId);
464
+ if ((file.flags & VFS.SQLITE_OPEN_MAIN_DB) && !file.txIsOverwrite) {
465
+ file.abortController.signal.throwIfAborted();
466
+ file.txActive.fileSize = iSize;
467
+
468
+ // Remove now obsolete pages from file.txActive.pages
469
+ for (const [index, { offset }] of file.txActive.pages) {
470
+ // Page indices are 1-based.
471
+ if (index * file.pageSize > iSize) {
472
+ file.txActive.pages.delete(index);
473
+ file.freeOffsets.add(offset);
474
+ }
475
+ }
476
+ return VFS.SQLITE_OK;
477
+ }
478
+ file.accessHandle.truncate(iSize);
479
+ return VFS.SQLITE_OK;
480
+ } catch (e) {
481
+ console.error(e);
482
+ this.lastError = e;
483
+ return VFS.SQLITE_IOERR_TRUNCATE;
484
+ }
485
+ }
486
+
487
+ /**
488
+ * @param {number} fileId
489
+ * @param {number} flags
490
+ * @returns {number}
491
+ */
492
+ jSync(fileId, flags) {
493
+ try {
494
+ // Main DB sync is handled by SQLITE_FCNTL_SYNC.
495
+ const file = this.#mapIdToFile.get(fileId);
496
+ if (!(file.flags & VFS.SQLITE_OPEN_MAIN_DB)) {
497
+ file.accessHandle.flush();
498
+ }
499
+ return VFS.SQLITE_OK;
500
+ } catch (e) {
501
+ this.#lastError = e;
502
+ return VFS.SQLITE_IOERR_FSYNC;
503
+ }
504
+ }
505
+
506
+ /**
507
+ * @param {number} fileId
508
+ * @param {DataView} pSize64
509
+ * @returns {number}
510
+ */
511
+ jFileSize(fileId, pSize64) {
512
+ try {
513
+ const file = this.#mapIdToFile.get(fileId);
514
+
515
+ let size;
516
+ if (file.flags & VFS.SQLITE_OPEN_MAIN_DB) {
517
+ file.abortController.signal.throwIfAborted();
518
+ size = file.txActive?.fileSize ?? file.fileSize;
519
+ } else {
520
+ size = file.accessHandle.getSize();
521
+ }
522
+
523
+ pSize64.setBigInt64(0, BigInt(size), true);
524
+ return VFS.SQLITE_OK;
525
+ } catch (e) {
526
+ this.#lastError = e;
527
+ return VFS.SQLITE_IOERR_FSTAT;
528
+ }
529
+ }
530
+
531
+ /**
532
+ * @param {number} fileId
533
+ * @param {number} lockType
534
+ * @returns {Promise<number>}
535
+ */
536
+ async jLock(fileId, lockType) {
537
+ const file = this.#mapIdToFile.get(fileId);
538
+ if (lockType <= file.lockState) return VFS.SQLITE_OK;
539
+ switch (lockType) {
540
+ case VFS.SQLITE_LOCK_SHARED:
541
+ if (file.txWriteHint) {
542
+ // xFileControl() has hinted that this transaction will
543
+ // write. Acquire the hint lock, which is required to reach
544
+ // the RESERVED state.
545
+ if (!await this.#lock(file, 'hint')) {
546
+ return VFS.SQLITE_BUSY;
547
+ }
548
+ }
549
+
550
+ if (!file.locks.read) {
551
+ // Reacquire lock if it was released by a broadcast request.
552
+ await this.#lock(file, 'read', SHARED);
553
+ }
554
+ break;
555
+ case VFS.SQLITE_LOCK_RESERVED:
556
+ // Ideally we should already have the hint lock, but if not
557
+ // poll for it here.
558
+ if (!file.locks.hint && !await this.#lock(file, 'hint', POLL_EXCLUSIVE)) {
559
+ return VFS.SQLITE_BUSY;
560
+ }
561
+
562
+ if (!await this.#lock(file, 'reserved', POLL_EXCLUSIVE)) {
563
+ file.locks.hint();
564
+ return VFS.SQLITE_BUSY;
565
+ }
566
+
567
+ // In order to write, our view of the database must be up to date.
568
+ // To check this, first fetch all transactions in IndexedDB equal to
569
+ // or greater than our view.
570
+ const tx = file.idb.transaction(['pending']);
571
+ const range = IDBKeyRange.lowerBound(file.viewTx.txId);
572
+
573
+ /** @type {Transaction[]} */
574
+ const entries = await idbX(tx.objectStore('pending').getAll(range));
575
+
576
+ // Ideally the fetched list of transactions should contain one
577
+ // entry matching our view. If not then our view is out of date.
578
+ if (entries.length && entries.at(-1).txId > file.viewTx.txId) {
579
+ // There are newer transactions in IndexedDB that we haven't
580
+ // seen via broadcast. Ensure that they are incorporated on unlock,
581
+ // and force the application to retry.
582
+ file.broadcastReceived.push(...entries);
583
+ file.locks.reserved();
584
+ return VFS.SQLITE_BUSY
585
+ }
586
+ break;
587
+ case VFS.SQLITE_LOCK_EXCLUSIVE:
588
+ await this.#lock(file, 'write');
589
+ break;
590
+ }
591
+ file.lockState = lockType;
592
+ return VFS.SQLITE_OK;
593
+ }
594
+
595
+ /**
596
+ * @param {number} fileId
597
+ * @param {number} lockType
598
+ * @returns {number}
599
+ */
600
+ jUnlock(fileId, lockType) {
601
+ const file = this.#mapIdToFile.get(fileId);
602
+ if (lockType >= file.lockState) return VFS.SQLITE_OK;
603
+ switch (lockType) {
604
+ case VFS.SQLITE_LOCK_SHARED:
605
+ file.locks.write?.();
606
+ file.locks.reserved?.();
607
+ file.locks.hint?.();
608
+ break;
609
+ case VFS.SQLITE_LOCK_NONE:
610
+ // Don't release the read lock here. It will be released on demand
611
+ // when a broadcast notifies us that another connections wants to
612
+ // VACUUM.
613
+ this.#processBroadcasts(file);
614
+ file.locks.write?.();
615
+ file.locks.reserved?.();
616
+ file.locks.hint?.();
617
+ break;
618
+ }
619
+ file.lockState = lockType;
620
+ return VFS.SQLITE_OK;
621
+ }
622
+
623
+ /**
624
+ * @param {number} fileId
625
+ * @param {DataView} pResOut
626
+ * @returns {Promise<number>}
627
+ */
628
+ async jCheckReservedLock(fileId, pResOut) {
629
+ try {
630
+ const file = this.#mapIdToFile.get(fileId);
631
+ if (await this.#lock(file, 'reserved', POLL_SHARED)) {
632
+ // This looks backwards, but if we get the lock then no one
633
+ // else had it.
634
+ pResOut.setInt32(0, 0, true);
635
+ file.locks.reserved();
636
+ } else {
637
+ pResOut.setInt32(0, 1, true);
638
+ }
639
+ return VFS.SQLITE_OK;
640
+ } catch (e) {
641
+ console.error(e);
642
+ this.lastError = e;
643
+ return VFS.SQLITE_IOERR_LOCK;
644
+ }
645
+ }
646
+
647
+ /**
648
+ * @param {number} fileId
649
+ * @param {number} op
650
+ * @param {DataView} pArg
651
+ * @returns {Promise<number>}
652
+ */
653
+ async jFileControl(fileId, op, pArg) {
654
+ try {
655
+ const file = this.#mapIdToFile.get(fileId);
656
+ switch (op) {
657
+ case VFS.SQLITE_FCNTL_PRAGMA:
658
+ const key = cvtString(pArg, 4);
659
+ const value = cvtString(pArg, 8);
660
+ this.log?.('xFileControl', file.path, 'PRAGMA', key, value);
661
+ switch (key.toLowerCase()) {
662
+ case 'page_size':
663
+ // Don't allow changing the page size.
664
+ if (value && file.pageSize && Number(value) !== file.pageSize) {
665
+ return VFS.SQLITE_ERROR;
666
+ }
667
+ break;
668
+ case 'synchronous':
669
+ // This VFS only recognizes 'full' and not 'full'.
670
+ if (value) {
671
+ switch (value.toLowerCase()) {
672
+ case 'full':
673
+ case '2':
674
+ case 'extra':
675
+ case '3':
676
+ file.synchronous = 'full';
677
+ break;
678
+ default:
679
+ file.synchronous = 'normal';
680
+ break;
681
+ }
682
+ }
683
+ break;
684
+ case 'flush_interval':
685
+ if (value) {
686
+ const interval = Number(value);
687
+ if (interval > 0) {
688
+ file.flushInterval = Number(value);
689
+ } else {
690
+ return VFS.SQLITE_ERROR;
691
+ }
692
+ } else {
693
+ // Report current value.
694
+ const buffer = new TextEncoder().encode(file.flushInterval.toString());
695
+ const s = this._module._sqlite3_malloc64(buffer.byteLength + 1);
696
+ new Uint8Array(this._module.HEAPU8.buffer, s, buffer.byteLength + 1)
697
+ .fill(0)
698
+ .set(buffer);
699
+
700
+ pArg.setUint32(0, s, true);
701
+ return VFS.SQLITE_OK;
702
+ }
703
+ break;
704
+ case 'write_hint':
705
+ return this.jFileControl(fileId, WebLocksMixin.WRITE_HINT_OP_CODE, null);
706
+ }
707
+ break;
708
+ case VFS.SQLITE_FCNTL_BEGIN_ATOMIC_WRITE:
709
+ this.log?.('xFileControl', 'BEGIN_ATOMIC_WRITE', file.path);
710
+ return VFS.SQLITE_OK;
711
+ case VFS.SQLITE_FCNTL_COMMIT_ATOMIC_WRITE:
712
+ this.log?.('xFileControl', 'COMMIT_ATOMIC_WRITE', file.path);
713
+ return VFS.SQLITE_OK;
714
+ case VFS.SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE:
715
+ this.log?.('xFileControl', 'ROLLBACK_ATOMIC_WRITE', file.path);
716
+ this.#rollbackTx(file);
717
+ return VFS.SQLITE_OK;
718
+ case VFS.SQLITE_FCNTL_OVERWRITE:
719
+ // This is a VACUUM.
720
+ this.log?.('xFileControl', 'OVERWRITE', file.path);
721
+ await this.#prepareOverwrite(file);
722
+ break;
723
+ case VFS.SQLITE_FCNTL_COMMIT_PHASETWO:
724
+ // Finish any transaction. Note that the transaction may not
725
+ // exist if there is a BEGIN IMMEDIATE...COMMIT block that
726
+ // does not actually call xWrite.
727
+ this.log?.('xFileControl', 'COMMIT_PHASETWO', file.path);
728
+ if (file.txActive) {
729
+ await this.#commitTx(file);
730
+ }
731
+ break;
732
+ case WebLocksMixin.WRITE_HINT_OP_CODE:
733
+ file.txWriteHint = true;
734
+ break;
735
+ }
736
+ } catch (e) {
737
+ this.#lastError = e;
738
+ return VFS.SQLITE_IOERR;
739
+ }
740
+ return VFS.SQLITE_NOTFOUND;
741
+ }
742
+
743
+ /**
744
+ * @param {number} fileId
745
+ * @returns {number|Promise<number>}
746
+ */
747
+ jDeviceCharacteristics(fileId) {
748
+ return 0
749
+ | VFS.SQLITE_IOCAP_BATCH_ATOMIC
750
+ | VFS.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN;
751
+ }
752
+
753
+ /**
754
+ * @param {Uint8Array} zBuf
755
+ * @returns {number}
756
+ */
757
+ jGetLastError(zBuf) {
758
+ if (this.#lastError) {
759
+ console.error(this.#lastError);
760
+ const outputArray = zBuf.subarray(0, zBuf.byteLength - 1);
761
+ const { written } = new TextEncoder().encodeInto(this.#lastError.message, outputArray);
762
+ zBuf[written] = 0;
763
+ }
764
+ return VFS.SQLITE_OK
765
+ }
766
+
767
+ /**
768
+ * Return the database page size, or 0 if not yet known.
769
+ * @param {File} file
770
+ * @returns {number}
771
+ */
772
+ #getPageSize(file) {
773
+ // Offset 0 will always contain a page 1. Even if it is out of
774
+ // date it will have a valid page size.
775
+ // https://sqlite.org/fileformat.html#page_size
776
+ const header = new DataView(new ArrayBuffer(2));
777
+ const n = file.accessHandle.read(header, { at: 16 });
778
+ if (n !== header.byteLength) return 0;
779
+ const pageSize = header.getUint16(0);
780
+ switch (pageSize) {
781
+ case 1:
782
+ return 65536;
783
+ default:
784
+ return pageSize;
785
+ }
786
+ }
787
+
788
+ /**
789
+ * Acquire one of the database file internal Web Locks.
790
+ * @param {File} file
791
+ * @param {'read'|'write'|'reserved'|'hint'} name
792
+ * @param {LockOptions} options
793
+ * @returns {Promise<boolean>}
794
+ */
795
+ #lock(file, name, options = {}) {
796
+ return new Promise(resolve => {
797
+ const lockName = `${file.path}@@${name}`;
798
+ navigator.locks.request(lockName, options, lock => {
799
+ if (lock) {
800
+ return new Promise(release => {
801
+ file.locks[name] = () => {
802
+ release();
803
+ file.locks[name] = null;
804
+ };
805
+ resolve(true);
806
+ });
807
+ } else {
808
+ file.locks[name] = null;
809
+ resolve(false);
810
+ }
811
+ }).catch(e => {
812
+ if (e.name !== 'AbortError') throw e;
813
+ });
814
+ });
815
+ }
816
+
817
+ /**
818
+ * @param {File} file
819
+ * @param {Transaction} tx
820
+ */
821
+ async #setView(file, tx) {
822
+ // Publish our view of the database with a lock name that includes
823
+ // the transaction id. As long as we hold the lock, no other connection
824
+ // will overwrite data we are using.
825
+ file.viewTx = tx;
826
+ const lockName = `${file.path}@@[${tx.txId}]`;
827
+ const newReleaser = await new Promise(resolve => {
828
+ navigator.locks.request(lockName, SHARED, lock => {
829
+ return new Promise(release => {
830
+ resolve(release);
831
+ });
832
+ });
833
+ });
834
+
835
+ // The new lock is acquired so release the old one.
836
+ file.viewReleaser?.();
837
+ file.viewReleaser = newReleaser;
838
+ }
839
+
840
+ /**
841
+ * Handle prevously received messages from other connections.
842
+ * @param {File} file
843
+ */
844
+ #processBroadcasts(file) {
845
+ // Sort transaction messages by id. Move other messages to the front.
846
+ // @ts-ignore
847
+ file.broadcastReceived.sort((a, b) => (a.txId ?? -1) - (b.txId ?? -1));
848
+
849
+ let nHandled = 0;
850
+ let newTx = file.viewTx;
851
+ for (const message of file.broadcastReceived) {
852
+ if (Object.hasOwn(message, 'txId')) {
853
+ const messageTx = /** @type {Transaction} */ (message)
854
+ if (messageTx.txId <= newTx.txId) {
855
+ // This transaction is already incorporated into our view.
856
+ } else if (messageTx.txId === newTx.txId + 1) {
857
+ // This is the next expected transaction.
858
+ this.log?.(`accept tx ${messageTx.txId}`);
859
+ this.#acceptTx(file, messageTx);
860
+ newTx = messageTx;
861
+ } else {
862
+ // There is a gap in the transaction sequence.
863
+ console.warn(`missing tx ${newTx.txId + 1} (got ${messageTx.txId})`);
864
+ break;
865
+ }
866
+ } else if (Object.hasOwn(message, 'exclusive')) {
867
+ // Release the read lock if we have it.
868
+ this.log?.('releasing read lock');
869
+ console.assert(file.lockState === VFS.SQLITE_LOCK_NONE);
870
+ file.locks.read?.();
871
+ }
872
+ nHandled++;
873
+ }
874
+
875
+ // Remove handled messages from the list.
876
+ file.broadcastReceived.splice(0, nHandled);
877
+
878
+ // Tell other connections about a change in our view.
879
+ if (newTx.txId > file.viewTx.txId) {
880
+ // No need to await here.
881
+ this.#setView(file, newTx);
882
+ }
883
+ }
884
+
885
+ /**
886
+ * @param {File} file
887
+ * @param {Transaction} message
888
+ */
889
+ #acceptTx(file, message) {
890
+ file.pageSize = file.pageSize || this.#getPageSize(file);
891
+
892
+ // Add list of pages made obsolete by this transaction. These pages
893
+ // can be moved to the free list when all connections have reached
894
+ // this point.
895
+ message.reclaimable = [];
896
+
897
+ // Update page mapping with transaction pages.
898
+ for (const [index, { offset }] of message.pages) {
899
+ if (file.mapPageToOffset.has(index)) {
900
+ // Remember overwritten pages that can be reused when all
901
+ // connections have seen this transaction.
902
+ message.reclaimable.push(file.mapPageToOffset.get(index));
903
+ }
904
+ file.mapPageToOffset.set(index, offset);
905
+ file.freeOffsets.delete(offset);
906
+ }
907
+
908
+ // Remove mappings for truncated pages.
909
+ const oldPageCount = file.fileSize / file.pageSize;
910
+ const newPageCount = message.fileSize / file.pageSize;
911
+ for (let index = newPageCount + 1; index <= oldPageCount; index++) {
912
+ message.reclaimable.push(file.mapPageToOffset.get(index));
913
+ file.mapPageToOffset.delete(index);
914
+ }
915
+
916
+ file.fileSize = message.fileSize;
917
+ file.mapTxToPending.set(message.txId, message);
918
+ if (message.oldestTxId) {
919
+ // Finalize pending transactions that are no longer needed.
920
+ for (const tx of file.mapTxToPending.values()) {
921
+ if (tx.txId > message.oldestTxId) break;
922
+
923
+ // Return no longer referenced pages to the free list.
924
+ for (const offset of tx.reclaimable) {
925
+ this.log?.(`reclaim offset ${offset}`);
926
+ file.freeOffsets.add(offset);
927
+ }
928
+ file.mapTxToPending.delete(tx.txId);
929
+ }
930
+ }
931
+ }
932
+
933
+ /**
934
+ * @param {File} file
935
+ */
936
+ #beginTx(file) {
937
+ // Start a new transaction.
938
+ file.txActive = {
939
+ txId: file.viewTx.txId + 1,
940
+ pages: new Map(),
941
+ fileSize: file.fileSize
942
+ };
943
+ file.txRealFileSize = file.accessHandle.getSize();
944
+ this.log?.(`begin transaction ${file.txActive.txId}`);
945
+ }
946
+
947
+ /**
948
+ * @param {File} file
949
+ */
950
+ async #commitTx(file) {
951
+ // Determine whether to finalize pending transactions, i.e. transfer
952
+ // them to the IndexedDB pages store.
953
+ if (file.synchronous === 'full' ||
954
+ file.txIsOverwrite ||
955
+ (file.txActive.txId % file.flushInterval) === 0) {
956
+ file.txActive.oldestTxId = await this.#getOldestTxInUse(file);
957
+ }
958
+
959
+ const tx = file.idb.transaction(
960
+ ['pages', 'pending'],
961
+ 'readwrite',
962
+ { durability: file.synchronous === 'full' ? 'strict' : 'relaxed'});
963
+
964
+ if (file.txActive.oldestTxId) {
965
+ // Ensure that all pending data is safely on storage.
966
+ if (file.txIsOverwrite) {
967
+ file.accessHandle.truncate(file.txActive.fileSize);
968
+ }
969
+ file.accessHandle.flush();
970
+
971
+ // Transfer page mappings to the pages store for all pending
972
+ // transactions that are no longer in use.
973
+ const pageStore = tx.objectStore('pages');
974
+ for (const tx of file.mapTxToPending.values()) {
975
+ if (tx.txId > file.txActive.oldestTxId) break;
976
+
977
+ for (const [index, { offset }] of tx.pages) {
978
+ pageStore.put({ i: index, o: offset });
979
+ }
980
+ }
981
+
982
+ // Delete pending store entries that are no longer needed.
983
+ tx.objectStore('pending')
984
+ .delete(IDBKeyRange.upperBound(file.txActive.oldestTxId));
985
+ }
986
+
987
+ // Publish the transaction via broadcast and IndexedDB.
988
+ this.log?.(`commit transaction ${file.txActive.txId}`);
989
+ tx.objectStore('pending').put(file.txActive);
990
+
991
+ const txComplete = new Promise((resolve, reject) => {
992
+ const message = file.txActive;
993
+ tx.oncomplete = () => {
994
+ file.broadcastChannel.postMessage(message);
995
+ resolve();
996
+ };
997
+ tx.onabort = () => {
998
+ file.abortController.abort();
999
+ reject(tx.error);
1000
+ };
1001
+ tx.commit();
1002
+ });
1003
+
1004
+ if (file.synchronous === 'full') {
1005
+ await txComplete;
1006
+ }
1007
+
1008
+ // Advance our own view. Even if we received our own broadcasts (we
1009
+ // don't), we want our view to be updated synchronously.
1010
+ this.#acceptTx(file, file.txActive);
1011
+ this.#setView(file, file.txActive);
1012
+ file.txActive = null;
1013
+ file.txWriteHint = false;
1014
+
1015
+ if (file.txIsOverwrite) {
1016
+ // Wait until all connections have seen the transaction.
1017
+ while (file.viewTx.txId !== await this.#getOldestTxInUse(file)) {
1018
+ await new Promise(resolve => setTimeout(resolve, 10));
1019
+ }
1020
+
1021
+ // Downgrade the exclusive read lock to a shared lock.
1022
+ file.locks.read();
1023
+ await this.#lock(file, 'read', SHARED);
1024
+
1025
+ // There should be no extra space in the file now.
1026
+ file.freeOffsets.clear();
1027
+
1028
+ file.txIsOverwrite = false;
1029
+ }
1030
+ }
1031
+
1032
+ /**
1033
+ * @param {File} file
1034
+ */
1035
+ #rollbackTx(file) {
1036
+ // Return offsets to the free list.
1037
+ this.log?.(`rollback transaction ${file.txActive.txId}`);
1038
+ for (const { offset } of file.txActive.pages.values()) {
1039
+ file.freeOffsets.add(offset);
1040
+ }
1041
+ file.txActive = null;
1042
+ file.txWriteHint = false;
1043
+ }
1044
+
1045
+ /**
1046
+ * @param {File} file
1047
+ */
1048
+ async #prepareOverwrite(file) {
1049
+ // Get an exclusive read lock to prevent other connections from
1050
+ // seeing the database in an inconsistent state.
1051
+ file.locks.read?.();
1052
+ if (!await this.#lock(file, 'read', POLL_EXCLUSIVE)) {
1053
+ // We didn't get the read lock because other connections have
1054
+ // it. Notify them that we want the lock and wait.
1055
+ const lockRequest = this.#lock(file, 'read');
1056
+ file.broadcastChannel.postMessage({ exclusive: true });
1057
+ await lockRequest;
1058
+ }
1059
+
1060
+ // Create a intermediate transaction to copy all current page data to
1061
+ // an offset past fileSize.
1062
+ file.txActive = {
1063
+ txId: file.viewTx.txId + 1,
1064
+ pages: new Map(),
1065
+ fileSize: file.fileSize
1066
+ };
1067
+
1068
+ // This helper generator provides offsets above fileSize.
1069
+ const offsetGenerator = (function*() {
1070
+ for (const offset of file.freeOffsets) {
1071
+ if (offset >= file.fileSize) {
1072
+ yield offset;
1073
+ }
1074
+ }
1075
+
1076
+ while (true) {
1077
+ yield file.accessHandle.getSize();
1078
+ }
1079
+ })();
1080
+
1081
+ const pageBuffer = new Uint8Array(file.pageSize);
1082
+ for (let offset = 0; offset < file.fileSize; offset += file.pageSize) {
1083
+ const pageIndex = offset / file.pageSize + 1;
1084
+ const oldOffset = file.mapPageToOffset.get(pageIndex);
1085
+ if (oldOffset < file.fileSize) {
1086
+ // This page is stored below fileSize. Read it into memory.
1087
+ if (file.accessHandle.read(pageBuffer, { at: oldOffset }) !== file.pageSize) {
1088
+ throw new Error('Failed to read page');
1089
+ }
1090
+
1091
+ // Perform the copy.
1092
+ const newOffset = offsetGenerator.next().value;
1093
+ if (file.accessHandle.write(pageBuffer, { at: newOffset }) !== file.pageSize) {
1094
+ throw new Error('Failed to write page');
1095
+ }
1096
+
1097
+ file.txActive.pages.set(pageIndex, {
1098
+ offset: newOffset,
1099
+ digest: checksum(pageBuffer)
1100
+ });
1101
+ }
1102
+ }
1103
+ file.accessHandle.flush();
1104
+ file.freeOffsets.clear();
1105
+
1106
+ // Publish transaction for others.
1107
+ file.broadcastChannel.postMessage(file.txActive);
1108
+ const tx = file.idb.transaction('pending', 'readwrite');
1109
+ const txComplete = new Promise((resolve, reject) => {
1110
+ tx.oncomplete = resolve;
1111
+ tx.onabort = () => reject(tx.error);
1112
+ });
1113
+ tx.objectStore('pending').put(file.txActive);
1114
+ tx.commit();
1115
+ await txComplete;
1116
+
1117
+ // Incorporate the transaction into our view.
1118
+ this.#acceptTx(file, file.txActive);
1119
+ this.#setView(file, file.txActive);
1120
+ file.txActive = null;
1121
+
1122
+ // Now all pages are in the file above fileSize. The VACUUM operation
1123
+ // will now copy the pages below fileSize in the proper order. After
1124
+ // that once all connections are up to date the file can be truncated.
1125
+
1126
+ // This flag tells xWrite to write pages at their canonical offset.
1127
+ file.txIsOverwrite = true;
1128
+ }
1129
+
1130
+ /**
1131
+ * @param {File} file
1132
+ * @returns {Promise<number>}
1133
+ */
1134
+ async #getOldestTxInUse(file) {
1135
+ // Each connection holds a shared Web Lock with a name that encodes
1136
+ // the latest transaction it knows about. We can find the oldest
1137
+ // transaction by listing the those locks and extracting the earliest
1138
+ // transaction id.
1139
+ const TX_LOCK_REGEX = /^(.*)@@\[(\d+)\]$/;
1140
+ let oldestTxId = file.viewTx.txId;
1141
+ const locks = await navigator.locks.query();
1142
+ for (const { name } of locks.held) {
1143
+ const m = TX_LOCK_REGEX.exec(name);
1144
+ if (m && m[1] === file.path) {
1145
+ oldestTxId = Math.min(oldestTxId, Number(m[2]));
1146
+ }
1147
+ }
1148
+ return oldestTxId;
1149
+ }
1150
+ }
1151
+
1152
+ /**
1153
+ * Wrap IndexedDB request with a Promise.
1154
+ * @param {IDBRequest} request
1155
+ * @returns
1156
+ */
1157
+ function idbX(request) {
1158
+ return new Promise((resolve, reject) => {
1159
+ request.onsuccess = () => resolve(request.result);
1160
+ request.onerror = () => reject(request.error);
1161
+ });
1162
+ }
1163
+
1164
+ /**
1165
+ * Given a path, return the directory handle and filename.
1166
+ * @param {string} path
1167
+ * @param {boolean} create
1168
+ * @returns {Promise<[FileSystemDirectoryHandle, string]>}
1169
+ */
1170
+ async function getPathComponents(path, create) {
1171
+ const components = path.split('/');
1172
+ const filename = components.pop();
1173
+ let directory = await navigator.storage.getDirectory();
1174
+ for (const component of components.filter(s => s)) {
1175
+ directory = await directory.getDirectoryHandle(component, { create });
1176
+ }
1177
+ return [directory, filename];
1178
+ }
1179
+
1180
+ /**
1181
+ * Extract a C string from WebAssembly memory.
1182
+ * @param {DataView} dataView
1183
+ * @param {number} offset
1184
+ * @returns
1185
+ */
1186
+ function cvtString(dataView, offset) {
1187
+ const p = dataView.getUint32(offset, true);
1188
+ if (p) {
1189
+ const chars = new Uint8Array(dataView.buffer, p);
1190
+ return new TextDecoder().decode(chars.subarray(0, chars.indexOf(0)));
1191
+ }
1192
+ return null;
1193
+ }
1194
+
1195
+ /**
1196
+ * Compute a page checksum.
1197
+ * @param {ArrayBufferView} data
1198
+ * @returns {Uint32Array}
1199
+ */
1200
+ function checksum(data) {
1201
+ const array = new Uint32Array(
1202
+ data.buffer,
1203
+ data.byteOffset,
1204
+ data.byteLength / Uint32Array.BYTES_PER_ELEMENT);
1205
+
1206
+ // https://en.wikipedia.org/wiki/Fletcher%27s_checksum
1207
+ let h1 = 0;
1208
+ let h2 = 0;
1209
+ for (const value of array) {
1210
+ h1 = (h1 + value) % 4294967295;
1211
+ h2 = (h2 + h1) % 4294967295;
1212
+ }
1213
+ return new Uint32Array([h1, h2]);
1214
+ }