@rivetkit/sqlite-wasm 2.2.1-pr.4600.b74ff3b
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +203 -0
- package/dist/schemas/file-meta/v1.ts +43 -0
- package/dist/tsup/index.cjs +1557 -0
- package/dist/tsup/index.cjs.map +1 -0
- package/dist/tsup/index.d.cts +253 -0
- package/dist/tsup/index.d.ts +253 -0
- package/dist/tsup/index.js +1557 -0
- package/dist/tsup/index.js.map +1 -0
- package/package.json +49 -0
- package/schemas/file-meta/mod.ts +2 -0
- package/schemas/file-meta/v1.bare +7 -0
- package/schemas/file-meta/versioned.ts +25 -0
- package/src/generated/empty-db-page.ts +23 -0
- package/src/index.ts +5 -0
- package/src/kv.ts +116 -0
- package/src/pool.ts +502 -0
- package/src/types.ts +20 -0
- package/src/vfs.ts +1646 -0
- package/src/wasm.d.ts +60 -0
package/src/vfs.ts
ADDED
|
@@ -0,0 +1,1646 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SQLite raw database with KV storage backend
|
|
3
|
+
*
|
|
4
|
+
* This module provides a SQLite API that uses a KV-backed VFS
|
|
5
|
+
* for storage. Each SqliteVfs instance is independent and can be
|
|
6
|
+
* used concurrently with other instances.
|
|
7
|
+
*
|
|
8
|
+
* Keep this VFS on direct VFS.Base callbacks for minimal wrapper overhead.
|
|
9
|
+
* Use @rivetkit/sqlite/src/FacadeVFS.js as the reference implementation for
|
|
10
|
+
* callback ABI and pointer/data conversion behavior.
|
|
11
|
+
* This implementation is optimized for single-writer semantics because each
|
|
12
|
+
* actor owns one SQLite database.
|
|
13
|
+
* SQLite invokes this VFS with byte-range file operations. This VFS maps those
|
|
14
|
+
* ranges onto fixed-size KV chunks keyed by file tag and chunk index.
|
|
15
|
+
* We intentionally rely on SQLite's pager cache for hot page reuse and do not
|
|
16
|
+
* add a second cache in this VFS. This avoids duplicate cache invalidation
|
|
17
|
+
* logic and keeps memory usage predictable for each actor.
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import * as VFS from "@rivetkit/sqlite/src/VFS.js";
|
|
21
|
+
import {
|
|
22
|
+
Factory,
|
|
23
|
+
SQLITE_OPEN_CREATE,
|
|
24
|
+
SQLITE_OPEN_READWRITE,
|
|
25
|
+
SQLITE_ROW,
|
|
26
|
+
} from "@rivetkit/sqlite";
|
|
27
|
+
import { readFileSync } from "node:fs";
|
|
28
|
+
import { createRequire } from "node:module";
|
|
29
|
+
import path from "node:path";
|
|
30
|
+
import { pathToFileURL } from "node:url";
|
|
31
|
+
import {
|
|
32
|
+
CHUNK_SIZE,
|
|
33
|
+
FILE_TAG_JOURNAL,
|
|
34
|
+
FILE_TAG_MAIN,
|
|
35
|
+
FILE_TAG_SHM,
|
|
36
|
+
FILE_TAG_WAL,
|
|
37
|
+
getChunkKey,
|
|
38
|
+
getChunkKeyRangeEnd,
|
|
39
|
+
getMetaKey,
|
|
40
|
+
type SqliteFileTag,
|
|
41
|
+
} from "./kv";
|
|
42
|
+
import { EMPTY_DB_PAGE } from "./generated/empty-db-page";
|
|
43
|
+
import {
|
|
44
|
+
FILE_META_VERSIONED,
|
|
45
|
+
CURRENT_VERSION,
|
|
46
|
+
} from "../schemas/file-meta/versioned";
|
|
47
|
+
import type { FileMeta } from "../schemas/file-meta/mod";
|
|
48
|
+
import type { KvVfsOptions } from "./types";
|
|
49
|
+
|
|
50
|
+
function createNodeRequire(): NodeJS.Require {
|
|
51
|
+
return createRequire(
|
|
52
|
+
path.join(process.cwd(), "__rivetkit_sqlite_require__.cjs"),
|
|
53
|
+
);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Common interface for database handles returned by ISqliteVfs.open().
|
|
58
|
+
* Both the concrete Database class and the pool's TrackedDatabase wrapper
|
|
59
|
+
* implement this, so consumers can use either interchangeably.
|
|
60
|
+
*/
|
|
61
|
+
export interface IDatabase {
|
|
62
|
+
exec(
|
|
63
|
+
sql: string,
|
|
64
|
+
callback?: (row: unknown[], columns: string[]) => void,
|
|
65
|
+
): Promise<void>;
|
|
66
|
+
run(sql: string, params?: SqliteBindings): Promise<void>;
|
|
67
|
+
query(
|
|
68
|
+
sql: string,
|
|
69
|
+
params?: SqliteBindings,
|
|
70
|
+
): Promise<{ rows: unknown[][]; columns: string[] }>;
|
|
71
|
+
close(): Promise<void>;
|
|
72
|
+
readonly fileName: string;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Common interface for SQLite VFS backends. Both standalone SqliteVfs and
|
|
77
|
+
* PooledSqliteHandle implement this so callers can use either interchangeably.
|
|
78
|
+
*/
|
|
79
|
+
export interface ISqliteVfs {
|
|
80
|
+
open(fileName: string, options: KvVfsOptions): Promise<IDatabase>;
|
|
81
|
+
destroy(): Promise<void>;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
type SqliteEsmFactory = (config?: {
|
|
85
|
+
wasmBinary?: ArrayBuffer | Uint8Array;
|
|
86
|
+
instantiateWasm?: (
|
|
87
|
+
imports: WebAssembly.Imports,
|
|
88
|
+
receiveInstance: (instance: WebAssembly.Instance) => void,
|
|
89
|
+
) => WebAssembly.Exports;
|
|
90
|
+
}) => Promise<unknown>;
|
|
91
|
+
type SQLite3Api = ReturnType<typeof Factory>;
|
|
92
|
+
type SqliteBindings = Parameters<SQLite3Api["bind_collection"]>[1];
|
|
93
|
+
type SqliteVfsRegistration = Parameters<SQLite3Api["vfs_register"]>[0];
|
|
94
|
+
|
|
95
|
+
interface SQLiteModule {
|
|
96
|
+
UTF8ToString: (ptr: number) => string;
|
|
97
|
+
HEAPU8: Uint8Array;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const TEXT_ENCODER = new TextEncoder();
|
|
101
|
+
const TEXT_DECODER = new TextDecoder();
|
|
102
|
+
const SQLITE_MAX_PATHNAME_BYTES = 64;
|
|
103
|
+
|
|
104
|
+
// Chunk keys encode the chunk index in 32 bits, so a file can span at most
|
|
105
|
+
// 2^32 chunks. At 4 KiB/chunk this yields a hard limit of 16 TiB.
|
|
106
|
+
const UINT32_SIZE = 0x100000000;
|
|
107
|
+
const MAX_CHUNK_INDEX = 0xffffffff;
|
|
108
|
+
const MAX_FILE_SIZE_BYTES = (MAX_CHUNK_INDEX + 1) * CHUNK_SIZE;
|
|
109
|
+
const MAX_FILE_SIZE_HI32 = Math.floor(MAX_FILE_SIZE_BYTES / UINT32_SIZE);
|
|
110
|
+
const MAX_FILE_SIZE_LO32 = MAX_FILE_SIZE_BYTES % UINT32_SIZE;
|
|
111
|
+
|
|
112
|
+
// Maximum number of keys the KV backend accepts in a single deleteBatch or putBatch call.
|
|
113
|
+
const KV_MAX_BATCH_KEYS = 128;
|
|
114
|
+
|
|
115
|
+
// -- BATCH_ATOMIC and KV round trip documentation --
|
|
116
|
+
//
|
|
117
|
+
// KV round trips per actor database lifecycle:
|
|
118
|
+
//
|
|
119
|
+
// Open (new database):
|
|
120
|
+
// 1 putBatch -- xOpen pre-writes EMPTY_DB_PAGE + metadata (2 keys)
|
|
121
|
+
// PRAGMAs are in-memory, 0 KV ops
|
|
122
|
+
//
|
|
123
|
+
// Open (existing database / wake from sleep):
|
|
124
|
+
// 1 get -- xOpen reads metadata to determine file size
|
|
125
|
+
// PRAGMAs are in-memory, 0 KV ops
|
|
126
|
+
//
|
|
127
|
+
// First SQL operation (e.g., migration CREATE TABLE):
|
|
128
|
+
// 1 getBatch -- pager reads page 1 (database header)
|
|
129
|
+
// N getBatch -- pager reads additional pages as needed by the schema
|
|
130
|
+
// 1 putBatch -- BATCH_ATOMIC commit (all dirty pages + metadata)
|
|
131
|
+
//
|
|
132
|
+
// Subsequent writes (warm pager cache):
|
|
133
|
+
// 0 reads -- pages served from pager cache
|
|
134
|
+
// 1 putBatch -- BATCH_ATOMIC commit
|
|
135
|
+
//
|
|
136
|
+
// Subsequent reads (warm pager cache):
|
|
137
|
+
// 0 reads -- pages served from pager cache
|
|
138
|
+
// 0 writes -- SELECT-only, no dirty pages
|
|
139
|
+
//
|
|
140
|
+
// Large writes (> 127 dirty pages):
|
|
141
|
+
// BATCH_ATOMIC COMMIT returns SQLITE_IOERR, SQLite falls back to
|
|
142
|
+
// journal mode with multiple putBatch calls (each <= 128 keys).
|
|
143
|
+
//
|
|
144
|
+
// BATCH_ATOMIC requires SQLite's pager to use an in-memory journal.
|
|
145
|
+
// The pager only does this when dbSize > 0. For new databases, xOpen
|
|
146
|
+
// pre-writes a valid empty page (EMPTY_DB_PAGE) so dbSize is 1 from
|
|
147
|
+
// the start. Without this, the first transaction opens a real journal
|
|
148
|
+
// file, and locking_mode=EXCLUSIVE prevents it from ever being closed,
|
|
149
|
+
// permanently disabling BATCH_ATOMIC.
|
|
150
|
+
//
|
|
151
|
+
// See scripts/generate-empty-db-page.ts for how EMPTY_DB_PAGE is built.
|
|
152
|
+
|
|
153
|
+
// BATCH_ATOMIC capability flag returned by xDeviceCharacteristics.
|
|
154
|
+
const SQLITE_IOCAP_BATCH_ATOMIC = 0x4000;
|
|
155
|
+
|
|
156
|
+
// xFileControl opcodes for atomic write bracketing.
|
|
157
|
+
const SQLITE_FCNTL_BEGIN_ATOMIC_WRITE = 31;
|
|
158
|
+
const SQLITE_FCNTL_COMMIT_ATOMIC_WRITE = 32;
|
|
159
|
+
const SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE = 33;
|
|
160
|
+
|
|
161
|
+
// libvfs captures this async/sync mask at registration time. Any VFS callback
|
|
162
|
+
// that returns a Promise must be listed here so SQLite uses async relays.
|
|
163
|
+
const SQLITE_ASYNC_METHODS = new Set([
|
|
164
|
+
"xOpen",
|
|
165
|
+
"xClose",
|
|
166
|
+
"xRead",
|
|
167
|
+
"xWrite",
|
|
168
|
+
"xTruncate",
|
|
169
|
+
"xSync",
|
|
170
|
+
"xFileSize",
|
|
171
|
+
"xDelete",
|
|
172
|
+
"xAccess",
|
|
173
|
+
"xFileControl",
|
|
174
|
+
]);
|
|
175
|
+
|
|
176
|
+
interface LoadedSqliteRuntime {
|
|
177
|
+
sqlite3: SQLite3Api;
|
|
178
|
+
module: SQLiteModule;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
function isSqliteEsmFactory(value: unknown): value is SqliteEsmFactory {
|
|
182
|
+
return typeof value === "function";
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
function isSQLiteModule(value: unknown): value is SQLiteModule {
|
|
186
|
+
if (!value || typeof value !== "object") {
|
|
187
|
+
return false;
|
|
188
|
+
}
|
|
189
|
+
const candidate = value as {
|
|
190
|
+
UTF8ToString?: unknown;
|
|
191
|
+
HEAPU8?: unknown;
|
|
192
|
+
};
|
|
193
|
+
return (
|
|
194
|
+
typeof candidate.UTF8ToString === "function" &&
|
|
195
|
+
candidate.HEAPU8 instanceof Uint8Array
|
|
196
|
+
);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
/**
|
|
200
|
+
* Lazily load and instantiate the async SQLite module for this VFS instance.
|
|
201
|
+
* We do this on first open so actors that do not use SQLite do not pay module
|
|
202
|
+
* parse and wasm initialization cost at startup, and we pass wasmBinary
|
|
203
|
+
* explicitly so this works consistently in both ESM and CJS bundles.
|
|
204
|
+
*/
|
|
205
|
+
async function loadSqliteRuntime(
|
|
206
|
+
wasmModule?: WebAssembly.Module,
|
|
207
|
+
): Promise<LoadedSqliteRuntime> {
|
|
208
|
+
const require = createNodeRequire();
|
|
209
|
+
const sqliteModulePath = require.resolve(
|
|
210
|
+
["@rivetkit/sqlite", "dist", "wa-sqlite-async.mjs"].join("/"),
|
|
211
|
+
);
|
|
212
|
+
const sqliteModule = await nativeDynamicImport<{ default?: unknown }>(
|
|
213
|
+
pathToFileURL(sqliteModulePath).href,
|
|
214
|
+
);
|
|
215
|
+
if (!isSqliteEsmFactory(sqliteModule.default)) {
|
|
216
|
+
throw new Error("Invalid SQLite ESM factory export");
|
|
217
|
+
}
|
|
218
|
+
const sqliteEsmFactory = sqliteModule.default;
|
|
219
|
+
|
|
220
|
+
let module: unknown;
|
|
221
|
+
if (wasmModule) {
|
|
222
|
+
// Use the pre-compiled WebAssembly.Module directly, skipping
|
|
223
|
+
// WebAssembly.compile. The Emscripten instantiateWasm callback lets us
|
|
224
|
+
// provide a module that has already been compiled and cached by the pool.
|
|
225
|
+
module = await sqliteEsmFactory({
|
|
226
|
+
instantiateWasm(
|
|
227
|
+
imports: WebAssembly.Imports,
|
|
228
|
+
receiveInstance: (instance: WebAssembly.Instance) => void,
|
|
229
|
+
) {
|
|
230
|
+
WebAssembly.instantiate(wasmModule, imports).then(
|
|
231
|
+
(instance) => {
|
|
232
|
+
receiveInstance(instance);
|
|
233
|
+
},
|
|
234
|
+
);
|
|
235
|
+
return {} as WebAssembly.Exports;
|
|
236
|
+
},
|
|
237
|
+
});
|
|
238
|
+
} else {
|
|
239
|
+
const sqliteDistPath = "@rivetkit/sqlite/dist/";
|
|
240
|
+
const wasmPath = require.resolve(
|
|
241
|
+
sqliteDistPath + "wa-sqlite-async.wasm",
|
|
242
|
+
);
|
|
243
|
+
const wasmBinary = readFileSync(wasmPath);
|
|
244
|
+
module = await sqliteEsmFactory({ wasmBinary });
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
if (!isSQLiteModule(module)) {
|
|
248
|
+
throw new Error("Invalid SQLite runtime module");
|
|
249
|
+
}
|
|
250
|
+
return {
|
|
251
|
+
sqlite3: Factory(module),
|
|
252
|
+
module,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
async function nativeDynamicImport<T>(specifier: string): Promise<T> {
|
|
257
|
+
try {
|
|
258
|
+
return (await import(specifier)) as T;
|
|
259
|
+
} catch (directError) {
|
|
260
|
+
const importer = new Function(
|
|
261
|
+
"moduleSpecifier",
|
|
262
|
+
"return import(moduleSpecifier);",
|
|
263
|
+
) as (moduleSpecifier: string) => Promise<T>;
|
|
264
|
+
try {
|
|
265
|
+
return await importer(specifier);
|
|
266
|
+
} catch {
|
|
267
|
+
throw directError;
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Represents an open file
|
|
274
|
+
*/
|
|
275
|
+
interface OpenFile {
|
|
276
|
+
/** File path */
|
|
277
|
+
path: string;
|
|
278
|
+
/** File kind tag used by compact key layout */
|
|
279
|
+
fileTag: SqliteFileTag;
|
|
280
|
+
/** Precomputed metadata key */
|
|
281
|
+
metaKey: Uint8Array;
|
|
282
|
+
/** File size in bytes */
|
|
283
|
+
size: number;
|
|
284
|
+
/** True when in-memory size has not been persisted yet */
|
|
285
|
+
metaDirty: boolean;
|
|
286
|
+
/** Open flags */
|
|
287
|
+
flags: number;
|
|
288
|
+
/** KV options for this file */
|
|
289
|
+
options: KvVfsOptions;
|
|
290
|
+
/** True while inside a BATCH_ATOMIC write bracket */
|
|
291
|
+
batchMode: boolean;
|
|
292
|
+
/** Buffered dirty pages during batch mode. Key is the chunk index. */
|
|
293
|
+
dirtyBuffer: Map<number, Uint8Array> | null;
|
|
294
|
+
/** File size saved at BEGIN_ATOMIC_WRITE for rollback */
|
|
295
|
+
savedFileSize: number;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
interface ResolvedFile {
|
|
299
|
+
options: KvVfsOptions;
|
|
300
|
+
fileTag: SqliteFileTag;
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
/**
|
|
304
|
+
* Encodes file metadata to a Uint8Array using BARE schema
|
|
305
|
+
*/
|
|
306
|
+
function encodeFileMeta(size: number): Uint8Array {
|
|
307
|
+
const meta: FileMeta = { size: BigInt(size) };
|
|
308
|
+
return FILE_META_VERSIONED.serializeWithEmbeddedVersion(
|
|
309
|
+
meta,
|
|
310
|
+
CURRENT_VERSION,
|
|
311
|
+
);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
/**
|
|
315
|
+
* Decodes file metadata from a Uint8Array using BARE schema
|
|
316
|
+
*/
|
|
317
|
+
function decodeFileMeta(data: Uint8Array): number {
|
|
318
|
+
const meta = FILE_META_VERSIONED.deserializeWithEmbeddedVersion(data);
|
|
319
|
+
return Number(meta.size);
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
function isValidFileSize(size: number): boolean {
|
|
323
|
+
return (
|
|
324
|
+
Number.isSafeInteger(size) && size >= 0 && size <= MAX_FILE_SIZE_BYTES
|
|
325
|
+
);
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
/**
|
|
329
|
+
* Simple async mutex for serializing database operations
|
|
330
|
+
* @rivetkit/sqlite calls are not safe to run concurrently on one module instance
|
|
331
|
+
*/
|
|
332
|
+
class AsyncMutex {
|
|
333
|
+
#locked = false;
|
|
334
|
+
#waiting: (() => void)[] = [];
|
|
335
|
+
|
|
336
|
+
async acquire(): Promise<void> {
|
|
337
|
+
while (this.#locked) {
|
|
338
|
+
await new Promise<void>((resolve) => this.#waiting.push(resolve));
|
|
339
|
+
}
|
|
340
|
+
this.#locked = true;
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
release(): void {
|
|
344
|
+
this.#locked = false;
|
|
345
|
+
const next = this.#waiting.shift();
|
|
346
|
+
if (next) {
|
|
347
|
+
next();
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
async run<T>(fn: () => Promise<T>): Promise<T> {
|
|
352
|
+
await this.acquire();
|
|
353
|
+
try {
|
|
354
|
+
return await fn();
|
|
355
|
+
} finally {
|
|
356
|
+
this.release();
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
/**
|
|
362
|
+
* Database wrapper that provides a simplified SQLite API
|
|
363
|
+
*/
|
|
364
|
+
export class Database implements IDatabase {
|
|
365
|
+
readonly #sqlite3: SQLite3Api;
|
|
366
|
+
readonly #handle: number;
|
|
367
|
+
readonly #fileName: string;
|
|
368
|
+
readonly #onClose: () => Promise<void>;
|
|
369
|
+
readonly #sqliteMutex: AsyncMutex;
|
|
370
|
+
#closed = false;
|
|
371
|
+
|
|
372
|
+
constructor(
|
|
373
|
+
sqlite3: SQLite3Api,
|
|
374
|
+
handle: number,
|
|
375
|
+
fileName: string,
|
|
376
|
+
onClose: () => Promise<void>,
|
|
377
|
+
sqliteMutex: AsyncMutex,
|
|
378
|
+
) {
|
|
379
|
+
this.#sqlite3 = sqlite3;
|
|
380
|
+
this.#handle = handle;
|
|
381
|
+
this.#fileName = fileName;
|
|
382
|
+
this.#onClose = onClose;
|
|
383
|
+
this.#sqliteMutex = sqliteMutex;
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Execute SQL with optional row callback
|
|
388
|
+
* @param sql - SQL statement to execute
|
|
389
|
+
* @param callback - Called for each result row with (row, columns)
|
|
390
|
+
*/
|
|
391
|
+
async exec(
|
|
392
|
+
sql: string,
|
|
393
|
+
callback?: (row: unknown[], columns: string[]) => void,
|
|
394
|
+
): Promise<void> {
|
|
395
|
+
await this.#sqliteMutex.run(async () => {
|
|
396
|
+
await this.#sqlite3.exec(this.#handle, sql, callback);
|
|
397
|
+
});
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Execute a parameterized SQL statement (no result rows)
|
|
402
|
+
* @param sql - SQL statement with ? placeholders
|
|
403
|
+
* @param params - Parameter values to bind
|
|
404
|
+
*/
|
|
405
|
+
async run(sql: string, params?: SqliteBindings): Promise<void> {
|
|
406
|
+
await this.#sqliteMutex.run(async () => {
|
|
407
|
+
for await (const stmt of this.#sqlite3.statements(
|
|
408
|
+
this.#handle,
|
|
409
|
+
sql,
|
|
410
|
+
)) {
|
|
411
|
+
if (params) {
|
|
412
|
+
this.#sqlite3.bind_collection(stmt, params);
|
|
413
|
+
}
|
|
414
|
+
while ((await this.#sqlite3.step(stmt)) === SQLITE_ROW) {
|
|
415
|
+
// Consume rows for statements that return results.
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
});
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
/**
|
|
422
|
+
* Execute a parameterized SQL query and return results
|
|
423
|
+
* @param sql - SQL query with ? placeholders
|
|
424
|
+
* @param params - Parameter values to bind
|
|
425
|
+
* @returns Object with rows (array of arrays) and columns (column names)
|
|
426
|
+
*/
|
|
427
|
+
async query(
|
|
428
|
+
sql: string,
|
|
429
|
+
params?: SqliteBindings,
|
|
430
|
+
): Promise<{ rows: unknown[][]; columns: string[] }> {
|
|
431
|
+
return this.#sqliteMutex.run(async () => {
|
|
432
|
+
const rows: unknown[][] = [];
|
|
433
|
+
let columns: string[] = [];
|
|
434
|
+
for await (const stmt of this.#sqlite3.statements(
|
|
435
|
+
this.#handle,
|
|
436
|
+
sql,
|
|
437
|
+
)) {
|
|
438
|
+
if (params) {
|
|
439
|
+
this.#sqlite3.bind_collection(stmt, params);
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
while ((await this.#sqlite3.step(stmt)) === SQLITE_ROW) {
|
|
443
|
+
if (columns.length === 0) {
|
|
444
|
+
columns = this.#sqlite3.column_names(stmt);
|
|
445
|
+
}
|
|
446
|
+
rows.push(this.#sqlite3.row(stmt));
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
return { rows, columns };
|
|
451
|
+
});
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
/**
|
|
455
|
+
* Close the database
|
|
456
|
+
*/
|
|
457
|
+
async close(): Promise<void> {
|
|
458
|
+
if (this.#closed) {
|
|
459
|
+
return;
|
|
460
|
+
}
|
|
461
|
+
this.#closed = true;
|
|
462
|
+
|
|
463
|
+
await this.#sqliteMutex.run(async () => {
|
|
464
|
+
await this.#sqlite3.close(this.#handle);
|
|
465
|
+
});
|
|
466
|
+
await this.#onClose();
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Get the database file name
|
|
471
|
+
*/
|
|
472
|
+
get fileName(): string {
|
|
473
|
+
return this.#fileName;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
/**
|
|
477
|
+
* Get the raw @rivetkit/sqlite API (for advanced usage)
|
|
478
|
+
*/
|
|
479
|
+
get sqlite3(): SQLite3Api {
|
|
480
|
+
return this.#sqlite3;
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
/**
|
|
484
|
+
* Get the raw database handle (for advanced usage)
|
|
485
|
+
*/
|
|
486
|
+
get handle(): number {
|
|
487
|
+
return this.#handle;
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
/**
|
|
492
|
+
* SQLite VFS backed by KV storage.
|
|
493
|
+
*
|
|
494
|
+
* Each instance is independent and has its own @rivetkit/sqlite WASM module.
|
|
495
|
+
* This allows multiple instances to operate concurrently without interference.
|
|
496
|
+
*/
|
|
497
|
+
export class SqliteVfs implements ISqliteVfs {
|
|
498
|
+
#sqlite3: SQLite3Api | null = null;
|
|
499
|
+
#sqliteSystem: SqliteSystem | null = null;
|
|
500
|
+
#initPromise: Promise<void> | null = null;
|
|
501
|
+
#openMutex = new AsyncMutex();
|
|
502
|
+
#sqliteMutex = new AsyncMutex();
|
|
503
|
+
#instanceId: string;
|
|
504
|
+
#destroyed = false;
|
|
505
|
+
#openDatabases: Set<Database> = new Set();
|
|
506
|
+
#wasmModule?: WebAssembly.Module;
|
|
507
|
+
|
|
508
|
+
constructor(wasmModule?: WebAssembly.Module) {
|
|
509
|
+
// Generate unique instance ID for VFS name
|
|
510
|
+
this.#instanceId = crypto.randomUUID().replace(/-/g, "").slice(0, 8);
|
|
511
|
+
this.#wasmModule = wasmModule;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
/**
|
|
515
|
+
* Initialize @rivetkit/sqlite and VFS (called once per instance)
|
|
516
|
+
*/
|
|
517
|
+
async #ensureInitialized(): Promise<void> {
|
|
518
|
+
if (this.#destroyed) {
|
|
519
|
+
throw new Error("SqliteVfs is closed");
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
// Fast path: already initialized
|
|
523
|
+
if (this.#sqlite3 && this.#sqliteSystem) {
|
|
524
|
+
return;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
// Synchronously create the promise if not started
|
|
528
|
+
if (!this.#initPromise) {
|
|
529
|
+
this.#initPromise = (async () => {
|
|
530
|
+
const { sqlite3, module } = await loadSqliteRuntime(
|
|
531
|
+
this.#wasmModule,
|
|
532
|
+
);
|
|
533
|
+
if (this.#destroyed) {
|
|
534
|
+
return;
|
|
535
|
+
}
|
|
536
|
+
this.#sqlite3 = sqlite3;
|
|
537
|
+
this.#sqliteSystem = new SqliteSystem(
|
|
538
|
+
sqlite3,
|
|
539
|
+
module,
|
|
540
|
+
`kv-vfs-${this.#instanceId}`,
|
|
541
|
+
);
|
|
542
|
+
this.#sqliteSystem.register();
|
|
543
|
+
})();
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
// Wait for initialization
|
|
547
|
+
try {
|
|
548
|
+
await this.#initPromise;
|
|
549
|
+
} catch (error) {
|
|
550
|
+
this.#initPromise = null;
|
|
551
|
+
throw error;
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
/**
|
|
556
|
+
* Open a SQLite database using KV storage backend
|
|
557
|
+
*
|
|
558
|
+
* @param fileName - The database file name (typically the actor ID)
|
|
559
|
+
* @param options - KV storage operations for this database
|
|
560
|
+
* @returns A Database instance
|
|
561
|
+
*/
|
|
562
|
+
async open(fileName: string, options: KvVfsOptions): Promise<IDatabase> {
|
|
563
|
+
if (this.#destroyed) {
|
|
564
|
+
throw new Error("SqliteVfs is closed");
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
// Serialize all open operations within this instance
|
|
568
|
+
await this.#openMutex.acquire();
|
|
569
|
+
try {
|
|
570
|
+
// Reject double-open of the same fileName. Two handles to the same
|
|
571
|
+
// file would have separate pager caches and no real locking
|
|
572
|
+
// (xLock/xUnlock are no-ops), causing silent data corruption.
|
|
573
|
+
for (const db of this.#openDatabases) {
|
|
574
|
+
if (db.fileName === fileName) {
|
|
575
|
+
throw new Error(
|
|
576
|
+
`SqliteVfs: fileName "${fileName}" is already open on this instance`,
|
|
577
|
+
);
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// Initialize @rivetkit/sqlite and SqliteSystem on first call
|
|
582
|
+
await this.#ensureInitialized();
|
|
583
|
+
|
|
584
|
+
if (!this.#sqlite3 || !this.#sqliteSystem) {
|
|
585
|
+
throw new Error("Failed to initialize SQLite");
|
|
586
|
+
}
|
|
587
|
+
const sqlite3 = this.#sqlite3;
|
|
588
|
+
const sqliteSystem = this.#sqliteSystem;
|
|
589
|
+
|
|
590
|
+
// Register this filename with its KV options
|
|
591
|
+
sqliteSystem.registerFile(fileName, options);
|
|
592
|
+
|
|
593
|
+
// Open database
|
|
594
|
+
const db = await this.#sqliteMutex.run(async () =>
|
|
595
|
+
sqlite3.open_v2(
|
|
596
|
+
fileName,
|
|
597
|
+
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE,
|
|
598
|
+
sqliteSystem.name,
|
|
599
|
+
),
|
|
600
|
+
);
|
|
601
|
+
// Single-writer optimizations for KV-backed SQLite. Each actor owns
|
|
602
|
+
// its database exclusively. BATCH_ATOMIC batches dirty pages into a
|
|
603
|
+
// single putBatch call instead of 5-7 individual KV round trips per
|
|
604
|
+
// write transaction.
|
|
605
|
+
//
|
|
606
|
+
// BATCH_ATOMIC requires an in-memory journal, which SQLite only uses
|
|
607
|
+
// when dbSize > 0. The xOpen handler pre-writes a valid empty page 1
|
|
608
|
+
// for new databases so this condition is satisfied from the start.
|
|
609
|
+
// See xOpen and scripts/generate-empty-db-page.ts for details.
|
|
610
|
+
await this.#sqliteMutex.run(async () => {
|
|
611
|
+
await sqlite3.exec(db, "PRAGMA page_size = 4096");
|
|
612
|
+
await sqlite3.exec(db, "PRAGMA journal_mode = DELETE");
|
|
613
|
+
await sqlite3.exec(db, "PRAGMA synchronous = NORMAL");
|
|
614
|
+
await sqlite3.exec(db, "PRAGMA temp_store = MEMORY");
|
|
615
|
+
await sqlite3.exec(db, "PRAGMA auto_vacuum = NONE");
|
|
616
|
+
await sqlite3.exec(db, "PRAGMA locking_mode = EXCLUSIVE");
|
|
617
|
+
});
|
|
618
|
+
|
|
619
|
+
// Wrap unregistration under #openMutex so it serializes with
|
|
620
|
+
// registerFile and prevents interleaving when short names recycle.
|
|
621
|
+
const onClose = async () => {
|
|
622
|
+
this.#openDatabases.delete(database);
|
|
623
|
+
await this.#openMutex.run(async () => {
|
|
624
|
+
sqliteSystem.unregisterFile(fileName);
|
|
625
|
+
});
|
|
626
|
+
};
|
|
627
|
+
|
|
628
|
+
const database = new Database(
|
|
629
|
+
sqlite3,
|
|
630
|
+
db,
|
|
631
|
+
fileName,
|
|
632
|
+
onClose,
|
|
633
|
+
this.#sqliteMutex,
|
|
634
|
+
);
|
|
635
|
+
this.#openDatabases.add(database);
|
|
636
|
+
|
|
637
|
+
return database;
|
|
638
|
+
} finally {
|
|
639
|
+
this.#openMutex.release();
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
/**
|
|
644
|
+
* Force-close all Database handles whose fileName exactly matches the
|
|
645
|
+
* given name. Snapshots the set to an array before iterating to avoid
|
|
646
|
+
* mutation during async iteration.
|
|
647
|
+
*
|
|
648
|
+
* Uses exact file name match because short names are numeric strings
|
|
649
|
+
* ('0', '1', ..., '10', '11', ...) and a prefix match like
|
|
650
|
+
* startsWith('1') would incorrectly match '10', '11', etc., causing
|
|
651
|
+
* cross-actor corruption. Sidecar files (-journal, -wal, -shm) are not
|
|
652
|
+
* tracked as separate Database handles, so prefix matching for sidecars
|
|
653
|
+
* is not needed.
|
|
654
|
+
*/
|
|
655
|
+
async forceCloseByFileName(
|
|
656
|
+
fileName: string,
|
|
657
|
+
): Promise<{ allSucceeded: boolean }> {
|
|
658
|
+
const snapshot = [...this.#openDatabases];
|
|
659
|
+
let allSucceeded = true;
|
|
660
|
+
for (const db of snapshot) {
|
|
661
|
+
if (db.fileName === fileName) {
|
|
662
|
+
try {
|
|
663
|
+
await db.close();
|
|
664
|
+
} catch {
|
|
665
|
+
allSucceeded = false;
|
|
666
|
+
// When close fails, onClose never fires, leaving orphaned
|
|
667
|
+
// entries in #openDatabases and #registeredFiles. Clean up
|
|
668
|
+
// manually so stale registrations don't accumulate.
|
|
669
|
+
this.#openDatabases.delete(db);
|
|
670
|
+
const sqliteSystem = this.#sqliteSystem;
|
|
671
|
+
if (sqliteSystem) {
|
|
672
|
+
await this.#openMutex.run(async () => {
|
|
673
|
+
sqliteSystem.unregisterFile(db.fileName);
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
}
|
|
679
|
+
return { allSucceeded };
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
/**
|
|
683
|
+
* Force-close all open Database handles. Best-effort: errors are
|
|
684
|
+
* swallowed so this is safe to call during instance teardown.
|
|
685
|
+
*/
|
|
686
|
+
async forceCloseAll(): Promise<void> {
|
|
687
|
+
const snapshot = [...this.#openDatabases];
|
|
688
|
+
for (const db of snapshot) {
|
|
689
|
+
try {
|
|
690
|
+
await db.close();
|
|
691
|
+
} catch {
|
|
692
|
+
// Best-effort teardown. Swallow errors.
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
/**
|
|
698
|
+
* Tears down this VFS instance and releases internal references.
|
|
699
|
+
*/
|
|
700
|
+
async destroy(): Promise<void> {
|
|
701
|
+
if (this.#destroyed) {
|
|
702
|
+
return;
|
|
703
|
+
}
|
|
704
|
+
this.#destroyed = true;
|
|
705
|
+
|
|
706
|
+
const initPromise = this.#initPromise;
|
|
707
|
+
if (initPromise) {
|
|
708
|
+
try {
|
|
709
|
+
await initPromise;
|
|
710
|
+
} catch {
|
|
711
|
+
// Initialization failure already surfaced to caller.
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
if (this.#sqliteSystem) {
|
|
716
|
+
await this.#sqliteSystem.close();
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
this.#sqliteSystem = null;
|
|
720
|
+
this.#sqlite3 = null;
|
|
721
|
+
this.#initPromise = null;
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
/**
|
|
725
|
+
* Alias for destroy to align with DB-style lifecycle naming.
|
|
726
|
+
*/
|
|
727
|
+
async close(): Promise<void> {
|
|
728
|
+
await this.destroy();
|
|
729
|
+
}
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
/**
|
|
733
|
+
* Internal VFS implementation
|
|
734
|
+
*/
|
|
735
|
+
class SqliteSystem implements SqliteVfsRegistration {
|
|
736
|
+
readonly name: string;
|
|
737
|
+
readonly mxPathName = SQLITE_MAX_PATHNAME_BYTES;
|
|
738
|
+
readonly mxPathname = SQLITE_MAX_PATHNAME_BYTES;
|
|
739
|
+
readonly #registeredFiles: Map<string, KvVfsOptions> = new Map();
|
|
740
|
+
readonly #openFiles: Map<number, OpenFile> = new Map();
|
|
741
|
+
readonly #sqlite3: SQLite3Api;
|
|
742
|
+
readonly #module: SQLiteModule;
|
|
743
|
+
#heapDataView: DataView;
|
|
744
|
+
#heapDataViewBuffer: ArrayBufferLike;
|
|
745
|
+
|
|
746
|
+
constructor(sqlite3: SQLite3Api, module: SQLiteModule, name: string) {
|
|
747
|
+
this.name = name;
|
|
748
|
+
this.#sqlite3 = sqlite3;
|
|
749
|
+
this.#module = module;
|
|
750
|
+
this.#heapDataViewBuffer = module.HEAPU8.buffer;
|
|
751
|
+
this.#heapDataView = new DataView(this.#heapDataViewBuffer);
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
async close(): Promise<void> {
|
|
755
|
+
this.#openFiles.clear();
|
|
756
|
+
this.#registeredFiles.clear();
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
isReady(): boolean {
|
|
760
|
+
return true;
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
hasAsyncMethod(methodName: string): boolean {
|
|
764
|
+
return SQLITE_ASYNC_METHODS.has(methodName);
|
|
765
|
+
}
|
|
766
|
+
|
|
767
|
+
/**
|
|
768
|
+
* Registers the VFS with SQLite
|
|
769
|
+
*/
|
|
770
|
+
register(): void {
|
|
771
|
+
this.#sqlite3.vfs_register(this, false);
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
/**
|
|
775
|
+
* Registers a file with its KV options (before opening).
|
|
776
|
+
*/
|
|
777
|
+
registerFile(fileName: string, options: KvVfsOptions): void {
|
|
778
|
+
this.#registeredFiles.set(fileName, options);
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
/**
|
|
782
|
+
* Unregisters a file's KV options (after closing).
|
|
783
|
+
*/
|
|
784
|
+
unregisterFile(fileName: string): void {
|
|
785
|
+
this.#registeredFiles.delete(fileName);
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
/**
|
|
789
|
+
* Resolve file path to a registered database file or one of its SQLite
|
|
790
|
+
* sidecars (-journal, -wal, -shm). File tags are reused across files
|
|
791
|
+
* because each file's KvVfsOptions routes to a separate KV namespace.
|
|
792
|
+
*/
|
|
793
|
+
#resolveFile(path: string): ResolvedFile | null {
|
|
794
|
+
// Direct match: O(1) lookup for main database file.
|
|
795
|
+
const directOptions = this.#registeredFiles.get(path);
|
|
796
|
+
if (directOptions) {
|
|
797
|
+
return { options: directOptions, fileTag: FILE_TAG_MAIN };
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
// Sidecar match: strip each known suffix and check the base name.
|
|
801
|
+
if (path.endsWith("-journal")) {
|
|
802
|
+
const baseName = path.slice(0, -8);
|
|
803
|
+
const options = this.#registeredFiles.get(baseName);
|
|
804
|
+
if (options) {
|
|
805
|
+
return { options, fileTag: FILE_TAG_JOURNAL };
|
|
806
|
+
}
|
|
807
|
+
} else if (path.endsWith("-wal")) {
|
|
808
|
+
const baseName = path.slice(0, -4);
|
|
809
|
+
const options = this.#registeredFiles.get(baseName);
|
|
810
|
+
if (options) {
|
|
811
|
+
return { options, fileTag: FILE_TAG_WAL };
|
|
812
|
+
}
|
|
813
|
+
} else if (path.endsWith("-shm")) {
|
|
814
|
+
const baseName = path.slice(0, -4);
|
|
815
|
+
const options = this.#registeredFiles.get(baseName);
|
|
816
|
+
if (options) {
|
|
817
|
+
return { options, fileTag: FILE_TAG_SHM };
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
return null;
|
|
822
|
+
}
|
|
823
|
+
|
|
824
|
+
#resolveFileOrThrow(path: string): ResolvedFile {
|
|
825
|
+
const resolved = this.#resolveFile(path);
|
|
826
|
+
if (resolved) {
|
|
827
|
+
return resolved;
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
if (this.#registeredFiles.size === 0) {
|
|
831
|
+
throw new Error(`No KV options registered for file: ${path}`);
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
const registered = Array.from(this.#registeredFiles.keys()).join(", ");
|
|
835
|
+
throw new Error(
|
|
836
|
+
`Unsupported SQLite file path ${path}. Registered base names: ${registered}.`,
|
|
837
|
+
);
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
#chunkKey(file: OpenFile, chunkIndex: number): Uint8Array {
|
|
841
|
+
return getChunkKey(file.fileTag, chunkIndex);
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
async xOpen(
|
|
845
|
+
_pVfs: number,
|
|
846
|
+
zName: number,
|
|
847
|
+
fileId: number,
|
|
848
|
+
flags: number,
|
|
849
|
+
pOutFlags: number,
|
|
850
|
+
): Promise<number> {
|
|
851
|
+
const path = this.#decodeFilename(zName, flags);
|
|
852
|
+
if (!path) {
|
|
853
|
+
return VFS.SQLITE_CANTOPEN;
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
// Get the registered KV options for this file
|
|
857
|
+
// For journal/wal files, use the main database's options
|
|
858
|
+
const { options, fileTag } = this.#resolveFileOrThrow(path);
|
|
859
|
+
const metaKey = getMetaKey(fileTag);
|
|
860
|
+
|
|
861
|
+
// Get existing file size if the file exists
|
|
862
|
+
let sizeData: Uint8Array | null;
|
|
863
|
+
try {
|
|
864
|
+
sizeData = await options.get(metaKey);
|
|
865
|
+
} catch (error) {
|
|
866
|
+
options.onError?.(error);
|
|
867
|
+
return VFS.SQLITE_CANTOPEN;
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
let size: number;
|
|
871
|
+
|
|
872
|
+
if (sizeData) {
|
|
873
|
+
// File exists, use existing size
|
|
874
|
+
size = decodeFileMeta(sizeData);
|
|
875
|
+
if (!isValidFileSize(size)) {
|
|
876
|
+
return VFS.SQLITE_IOERR;
|
|
877
|
+
}
|
|
878
|
+
} else if (flags & VFS.SQLITE_OPEN_CREATE) {
|
|
879
|
+
if (fileTag === FILE_TAG_MAIN) {
|
|
880
|
+
// Pre-write a valid empty database page so SQLite sees
|
|
881
|
+
// dbSize > 0 on first read. This enables BATCH_ATOMIC
|
|
882
|
+
// from the very first write transaction. Without this,
|
|
883
|
+
// SQLite's pager opens a real journal file for the first
|
|
884
|
+
// write (because jrnlBufferSize returns a positive value
|
|
885
|
+
// when dbSize == 0), and with locking_mode=EXCLUSIVE that
|
|
886
|
+
// real journal is never closed, permanently disabling
|
|
887
|
+
// batch atomic writes.
|
|
888
|
+
//
|
|
889
|
+
// The page is generated by scripts/generate-empty-header.ts
|
|
890
|
+
// using the same wa-sqlite WASM binary we ship.
|
|
891
|
+
const chunkKey = getChunkKey(fileTag, 0);
|
|
892
|
+
size = EMPTY_DB_PAGE.length;
|
|
893
|
+
try {
|
|
894
|
+
await options.putBatch([
|
|
895
|
+
[chunkKey, EMPTY_DB_PAGE],
|
|
896
|
+
[metaKey, encodeFileMeta(size)],
|
|
897
|
+
]);
|
|
898
|
+
} catch (error) {
|
|
899
|
+
options.onError?.(error);
|
|
900
|
+
return VFS.SQLITE_CANTOPEN;
|
|
901
|
+
}
|
|
902
|
+
} else {
|
|
903
|
+
// Sidecar files (journal, WAL, SHM) start empty.
|
|
904
|
+
size = 0;
|
|
905
|
+
try {
|
|
906
|
+
await options.put(metaKey, encodeFileMeta(size));
|
|
907
|
+
} catch (error) {
|
|
908
|
+
options.onError?.(error);
|
|
909
|
+
return VFS.SQLITE_CANTOPEN;
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
} else {
|
|
913
|
+
// File doesn't exist and we're not creating it
|
|
914
|
+
return VFS.SQLITE_CANTOPEN;
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
// Store open file info with options
|
|
918
|
+
this.#openFiles.set(fileId, {
|
|
919
|
+
path,
|
|
920
|
+
fileTag,
|
|
921
|
+
metaKey,
|
|
922
|
+
size,
|
|
923
|
+
metaDirty: false,
|
|
924
|
+
flags,
|
|
925
|
+
options,
|
|
926
|
+
batchMode: false,
|
|
927
|
+
dirtyBuffer: null,
|
|
928
|
+
savedFileSize: 0,
|
|
929
|
+
});
|
|
930
|
+
|
|
931
|
+
// Set output flags to the actual flags used.
|
|
932
|
+
this.#writeInt32(pOutFlags, flags);
|
|
933
|
+
|
|
934
|
+
return VFS.SQLITE_OK;
|
|
935
|
+
}
|
|
936
|
+
|
|
937
|
+
async xClose(fileId: number): Promise<number> {
|
|
938
|
+
const file = this.#openFiles.get(fileId);
|
|
939
|
+
if (!file) {
|
|
940
|
+
return VFS.SQLITE_OK;
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
try {
|
|
944
|
+
// Delete-on-close files should skip metadata flush because the file
|
|
945
|
+
// will be removed immediately.
|
|
946
|
+
if (file.flags & VFS.SQLITE_OPEN_DELETEONCLOSE) {
|
|
947
|
+
await this.#delete(file.path);
|
|
948
|
+
} else if (file.metaDirty) {
|
|
949
|
+
await file.options.put(file.metaKey, encodeFileMeta(file.size));
|
|
950
|
+
file.metaDirty = false;
|
|
951
|
+
}
|
|
952
|
+
} catch (error) {
|
|
953
|
+
// Always clean up the file handle even if the KV operation fails.
|
|
954
|
+
file.options.onError?.(error);
|
|
955
|
+
this.#openFiles.delete(fileId);
|
|
956
|
+
return VFS.SQLITE_IOERR;
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
this.#openFiles.delete(fileId);
|
|
960
|
+
return VFS.SQLITE_OK;
|
|
961
|
+
}
|
|
962
|
+
|
|
963
|
+
async xRead(
|
|
964
|
+
fileId: number,
|
|
965
|
+
pData: number,
|
|
966
|
+
iAmt: number,
|
|
967
|
+
iOffsetLo: number,
|
|
968
|
+
iOffsetHi: number,
|
|
969
|
+
): Promise<number> {
|
|
970
|
+
if (iAmt === 0) {
|
|
971
|
+
return VFS.SQLITE_OK;
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
const file = this.#openFiles.get(fileId);
|
|
975
|
+
if (!file) {
|
|
976
|
+
return VFS.SQLITE_IOERR_READ;
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
let data = this.#module.HEAPU8.subarray(pData, pData + iAmt);
|
|
980
|
+
const options = file.options;
|
|
981
|
+
const requestedLength = iAmt;
|
|
982
|
+
const iOffset = delegalize(iOffsetLo, iOffsetHi);
|
|
983
|
+
if (iOffset < 0) {
|
|
984
|
+
return VFS.SQLITE_IOERR_READ;
|
|
985
|
+
}
|
|
986
|
+
const fileSize = file.size;
|
|
987
|
+
|
|
988
|
+
// If offset is beyond file size, return short read with zeroed buffer
|
|
989
|
+
if (iOffset >= fileSize) {
|
|
990
|
+
data.fill(0);
|
|
991
|
+
return VFS.SQLITE_IOERR_SHORT_READ;
|
|
992
|
+
}
|
|
993
|
+
|
|
994
|
+
// Calculate which chunks we need to read
|
|
995
|
+
const startChunk = Math.floor(iOffset / CHUNK_SIZE);
|
|
996
|
+
const endChunk = Math.floor(
|
|
997
|
+
(iOffset + requestedLength - 1) / CHUNK_SIZE,
|
|
998
|
+
);
|
|
999
|
+
|
|
1000
|
+
// Fetch needed chunks, checking dirty buffer first in batch mode.
|
|
1001
|
+
const chunkKeys: Uint8Array[] = [];
|
|
1002
|
+
const chunkIndexToBuffered: Map<number, Uint8Array> = new Map();
|
|
1003
|
+
for (let i = startChunk; i <= endChunk; i++) {
|
|
1004
|
+
// In batch mode, serve from dirty buffer if available.
|
|
1005
|
+
if (file.batchMode && file.dirtyBuffer) {
|
|
1006
|
+
const buffered = file.dirtyBuffer.get(i);
|
|
1007
|
+
if (buffered) {
|
|
1008
|
+
chunkIndexToBuffered.set(i, buffered);
|
|
1009
|
+
continue;
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
chunkKeys.push(this.#chunkKey(file, i));
|
|
1013
|
+
}
|
|
1014
|
+
|
|
1015
|
+
let kvChunks: (Uint8Array | null)[];
|
|
1016
|
+
try {
|
|
1017
|
+
kvChunks =
|
|
1018
|
+
chunkKeys.length > 0 ? await options.getBatch(chunkKeys) : [];
|
|
1019
|
+
} catch (error) {
|
|
1020
|
+
options.onError?.(error);
|
|
1021
|
+
return VFS.SQLITE_IOERR_READ;
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
// Re-read HEAPU8 after await to defend against buffer detachment
|
|
1025
|
+
// from memory.grow() that may have occurred during getBatch.
|
|
1026
|
+
data = this.#module.HEAPU8.subarray(pData, pData + iAmt);
|
|
1027
|
+
|
|
1028
|
+
// Copy data from chunks to output buffer
|
|
1029
|
+
let kvIdx = 0;
|
|
1030
|
+
for (let i = startChunk; i <= endChunk; i++) {
|
|
1031
|
+
const chunkData = chunkIndexToBuffered.get(i) ?? kvChunks[kvIdx++];
|
|
1032
|
+
const chunkOffset = i * CHUNK_SIZE;
|
|
1033
|
+
|
|
1034
|
+
// Calculate the range within this chunk
|
|
1035
|
+
const readStart = Math.max(0, iOffset - chunkOffset);
|
|
1036
|
+
const readEnd = Math.min(
|
|
1037
|
+
CHUNK_SIZE,
|
|
1038
|
+
iOffset + requestedLength - chunkOffset,
|
|
1039
|
+
);
|
|
1040
|
+
|
|
1041
|
+
if (chunkData) {
|
|
1042
|
+
// Copy available data
|
|
1043
|
+
const sourceStart = readStart;
|
|
1044
|
+
const sourceEnd = Math.min(readEnd, chunkData.length);
|
|
1045
|
+
const destStart = chunkOffset + readStart - iOffset;
|
|
1046
|
+
|
|
1047
|
+
if (sourceEnd > sourceStart) {
|
|
1048
|
+
data.set(
|
|
1049
|
+
chunkData.subarray(sourceStart, sourceEnd),
|
|
1050
|
+
destStart,
|
|
1051
|
+
);
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
// Zero-fill if chunk is smaller than expected
|
|
1055
|
+
if (sourceEnd < readEnd) {
|
|
1056
|
+
const zeroStart = destStart + (sourceEnd - sourceStart);
|
|
1057
|
+
const zeroEnd = destStart + (readEnd - readStart);
|
|
1058
|
+
data.fill(0, zeroStart, zeroEnd);
|
|
1059
|
+
}
|
|
1060
|
+
} else {
|
|
1061
|
+
// Chunk doesn't exist, zero-fill
|
|
1062
|
+
const destStart = chunkOffset + readStart - iOffset;
|
|
1063
|
+
const destEnd = destStart + (readEnd - readStart);
|
|
1064
|
+
data.fill(0, destStart, destEnd);
|
|
1065
|
+
}
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
// If we read less than requested (past EOF), return short read
|
|
1069
|
+
const actualBytes = Math.min(requestedLength, fileSize - iOffset);
|
|
1070
|
+
if (actualBytes < requestedLength) {
|
|
1071
|
+
data.fill(0, actualBytes);
|
|
1072
|
+
return VFS.SQLITE_IOERR_SHORT_READ;
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
return VFS.SQLITE_OK;
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
async xWrite(
|
|
1079
|
+
fileId: number,
|
|
1080
|
+
pData: number,
|
|
1081
|
+
iAmt: number,
|
|
1082
|
+
iOffsetLo: number,
|
|
1083
|
+
iOffsetHi: number,
|
|
1084
|
+
): Promise<number> {
|
|
1085
|
+
if (iAmt === 0) {
|
|
1086
|
+
return VFS.SQLITE_OK;
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
const file = this.#openFiles.get(fileId);
|
|
1090
|
+
if (!file) {
|
|
1091
|
+
return VFS.SQLITE_IOERR_WRITE;
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
let data = this.#module.HEAPU8.subarray(pData, pData + iAmt);
|
|
1095
|
+
const iOffset = delegalize(iOffsetLo, iOffsetHi);
|
|
1096
|
+
if (iOffset < 0) {
|
|
1097
|
+
return VFS.SQLITE_IOERR_WRITE;
|
|
1098
|
+
}
|
|
1099
|
+
const options = file.options;
|
|
1100
|
+
const writeLength = iAmt;
|
|
1101
|
+
const writeEndOffset = iOffset + writeLength;
|
|
1102
|
+
if (writeEndOffset > MAX_FILE_SIZE_BYTES) {
|
|
1103
|
+
return VFS.SQLITE_IOERR_WRITE;
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
// Calculate which chunks we need to modify
|
|
1107
|
+
const startChunk = Math.floor(iOffset / CHUNK_SIZE);
|
|
1108
|
+
const endChunk = Math.floor((iOffset + writeLength - 1) / CHUNK_SIZE);
|
|
1109
|
+
|
|
1110
|
+
// Batch mode: buffer pages in dirtyBuffer instead of writing to KV.
|
|
1111
|
+
// COMMIT_ATOMIC_WRITE flushes the buffer in a single putBatch.
|
|
1112
|
+
if (file.batchMode && file.dirtyBuffer) {
|
|
1113
|
+
for (let i = startChunk; i <= endChunk; i++) {
|
|
1114
|
+
const chunkOffset = i * CHUNK_SIZE;
|
|
1115
|
+
const sourceStart = Math.max(0, chunkOffset - iOffset);
|
|
1116
|
+
const sourceEnd = Math.min(
|
|
1117
|
+
writeLength,
|
|
1118
|
+
chunkOffset + CHUNK_SIZE - iOffset,
|
|
1119
|
+
);
|
|
1120
|
+
// .slice() creates an independent copy that won't be
|
|
1121
|
+
// invalidated by memory.grow() after an await.
|
|
1122
|
+
file.dirtyBuffer.set(
|
|
1123
|
+
i,
|
|
1124
|
+
data.subarray(sourceStart, sourceEnd).slice(),
|
|
1125
|
+
);
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
// Update file size if write extends the file
|
|
1129
|
+
const newSize = Math.max(file.size, writeEndOffset);
|
|
1130
|
+
if (newSize !== file.size) {
|
|
1131
|
+
file.size = newSize;
|
|
1132
|
+
file.metaDirty = true;
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
return VFS.SQLITE_OK;
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
interface WritePlan {
|
|
1139
|
+
chunkKey: Uint8Array;
|
|
1140
|
+
chunkOffset: number;
|
|
1141
|
+
writeStart: number;
|
|
1142
|
+
writeEnd: number;
|
|
1143
|
+
existingChunkIndex: number;
|
|
1144
|
+
}
|
|
1145
|
+
|
|
1146
|
+
// Only fetch chunks where we must preserve existing prefix/suffix bytes.
|
|
1147
|
+
const plans: WritePlan[] = [];
|
|
1148
|
+
const chunkKeysToFetch: Uint8Array[] = [];
|
|
1149
|
+
for (let i = startChunk; i <= endChunk; i++) {
|
|
1150
|
+
const chunkOffset = i * CHUNK_SIZE;
|
|
1151
|
+
const writeStart = Math.max(0, iOffset - chunkOffset);
|
|
1152
|
+
const writeEnd = Math.min(
|
|
1153
|
+
CHUNK_SIZE,
|
|
1154
|
+
iOffset + writeLength - chunkOffset,
|
|
1155
|
+
);
|
|
1156
|
+
const existingBytesInChunk = Math.max(
|
|
1157
|
+
0,
|
|
1158
|
+
Math.min(CHUNK_SIZE, file.size - chunkOffset),
|
|
1159
|
+
);
|
|
1160
|
+
const needsExisting =
|
|
1161
|
+
writeStart > 0 || existingBytesInChunk > writeEnd;
|
|
1162
|
+
const chunkKey = this.#chunkKey(file, i);
|
|
1163
|
+
let existingChunkIndex = -1;
|
|
1164
|
+
if (needsExisting) {
|
|
1165
|
+
existingChunkIndex = chunkKeysToFetch.length;
|
|
1166
|
+
chunkKeysToFetch.push(chunkKey);
|
|
1167
|
+
}
|
|
1168
|
+
plans.push({
|
|
1169
|
+
chunkKey,
|
|
1170
|
+
chunkOffset,
|
|
1171
|
+
writeStart,
|
|
1172
|
+
writeEnd,
|
|
1173
|
+
existingChunkIndex,
|
|
1174
|
+
});
|
|
1175
|
+
}
|
|
1176
|
+
|
|
1177
|
+
let existingChunks: (Uint8Array | null)[];
|
|
1178
|
+
try {
|
|
1179
|
+
existingChunks =
|
|
1180
|
+
chunkKeysToFetch.length > 0
|
|
1181
|
+
? await options.getBatch(chunkKeysToFetch)
|
|
1182
|
+
: [];
|
|
1183
|
+
} catch (error) {
|
|
1184
|
+
options.onError?.(error);
|
|
1185
|
+
return VFS.SQLITE_IOERR_WRITE;
|
|
1186
|
+
}
|
|
1187
|
+
|
|
1188
|
+
// Re-read HEAPU8 after await to defend against buffer detachment
|
|
1189
|
+
// from memory.grow() that may have occurred during getBatch.
|
|
1190
|
+
data = this.#module.HEAPU8.subarray(pData, pData + iAmt);
|
|
1191
|
+
|
|
1192
|
+
// Prepare new chunk data
|
|
1193
|
+
const entriesToWrite: [Uint8Array, Uint8Array][] = [];
|
|
1194
|
+
|
|
1195
|
+
for (const plan of plans) {
|
|
1196
|
+
const existingChunk =
|
|
1197
|
+
plan.existingChunkIndex >= 0
|
|
1198
|
+
? existingChunks[plan.existingChunkIndex]
|
|
1199
|
+
: null;
|
|
1200
|
+
// Create new chunk data
|
|
1201
|
+
let newChunk: Uint8Array;
|
|
1202
|
+
if (existingChunk) {
|
|
1203
|
+
newChunk = new Uint8Array(
|
|
1204
|
+
Math.max(existingChunk.length, plan.writeEnd),
|
|
1205
|
+
);
|
|
1206
|
+
newChunk.set(existingChunk);
|
|
1207
|
+
} else {
|
|
1208
|
+
newChunk = new Uint8Array(plan.writeEnd);
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1211
|
+
// Copy data from input buffer to chunk
|
|
1212
|
+
const sourceStart = plan.chunkOffset + plan.writeStart - iOffset;
|
|
1213
|
+
const sourceEnd = sourceStart + (plan.writeEnd - plan.writeStart);
|
|
1214
|
+
newChunk.set(
|
|
1215
|
+
data.subarray(sourceStart, sourceEnd),
|
|
1216
|
+
plan.writeStart,
|
|
1217
|
+
);
|
|
1218
|
+
|
|
1219
|
+
entriesToWrite.push([plan.chunkKey, newChunk]);
|
|
1220
|
+
}
|
|
1221
|
+
|
|
1222
|
+
// Update file size if we wrote past the end
|
|
1223
|
+
const previousSize = file.size;
|
|
1224
|
+
const previousMetaDirty = file.metaDirty;
|
|
1225
|
+
const newSize = Math.max(file.size, writeEndOffset);
|
|
1226
|
+
if (newSize !== previousSize) {
|
|
1227
|
+
file.size = newSize;
|
|
1228
|
+
file.metaDirty = true;
|
|
1229
|
+
}
|
|
1230
|
+
if (file.metaDirty) {
|
|
1231
|
+
entriesToWrite.push([file.metaKey, encodeFileMeta(file.size)]);
|
|
1232
|
+
}
|
|
1233
|
+
|
|
1234
|
+
// Write all chunks and metadata
|
|
1235
|
+
try {
|
|
1236
|
+
await options.putBatch(entriesToWrite);
|
|
1237
|
+
} catch (error) {
|
|
1238
|
+
options.onError?.(error);
|
|
1239
|
+
file.size = previousSize;
|
|
1240
|
+
file.metaDirty = previousMetaDirty;
|
|
1241
|
+
return VFS.SQLITE_IOERR_WRITE;
|
|
1242
|
+
}
|
|
1243
|
+
if (file.metaDirty) {
|
|
1244
|
+
file.metaDirty = false;
|
|
1245
|
+
}
|
|
1246
|
+
file.metaDirty = false;
|
|
1247
|
+
|
|
1248
|
+
return VFS.SQLITE_OK;
|
|
1249
|
+
}
|
|
1250
|
+
|
|
1251
|
+
async xTruncate(
|
|
1252
|
+
fileId: number,
|
|
1253
|
+
sizeLo: number,
|
|
1254
|
+
sizeHi: number,
|
|
1255
|
+
): Promise<number> {
|
|
1256
|
+
const file = this.#openFiles.get(fileId);
|
|
1257
|
+
if (!file) {
|
|
1258
|
+
return VFS.SQLITE_IOERR_TRUNCATE;
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
const size = delegalize(sizeLo, sizeHi);
|
|
1262
|
+
if (size < 0 || size > MAX_FILE_SIZE_BYTES) {
|
|
1263
|
+
return VFS.SQLITE_IOERR_TRUNCATE;
|
|
1264
|
+
}
|
|
1265
|
+
const options = file.options;
|
|
1266
|
+
|
|
1267
|
+
// If truncating to larger size, just update metadata
|
|
1268
|
+
if (size >= file.size) {
|
|
1269
|
+
if (size > file.size) {
|
|
1270
|
+
const previousSize = file.size;
|
|
1271
|
+
const previousMetaDirty = file.metaDirty;
|
|
1272
|
+
file.size = size;
|
|
1273
|
+
file.metaDirty = true;
|
|
1274
|
+
try {
|
|
1275
|
+
await options.put(file.metaKey, encodeFileMeta(file.size));
|
|
1276
|
+
} catch (error) {
|
|
1277
|
+
options.onError?.(error);
|
|
1278
|
+
file.size = previousSize;
|
|
1279
|
+
file.metaDirty = previousMetaDirty;
|
|
1280
|
+
return VFS.SQLITE_IOERR_TRUNCATE;
|
|
1281
|
+
}
|
|
1282
|
+
file.metaDirty = false;
|
|
1283
|
+
}
|
|
1284
|
+
return VFS.SQLITE_OK;
|
|
1285
|
+
}
|
|
1286
|
+
|
|
1287
|
+
// Calculate which chunks to delete
|
|
1288
|
+
// Note: When size=0, lastChunkToKeep = floor(-1/4096) = -1, which means
|
|
1289
|
+
// all chunks (starting from index 0) will be deleted in the loop below.
|
|
1290
|
+
const lastChunkToKeep = Math.floor((size - 1) / CHUNK_SIZE);
|
|
1291
|
+
const lastExistingChunk = Math.floor((file.size - 1) / CHUNK_SIZE);
|
|
1292
|
+
|
|
1293
|
+
// Update metadata first so a crash leaves orphaned chunks (wasted
|
|
1294
|
+
// space) rather than metadata pointing at missing chunks (corruption).
|
|
1295
|
+
const previousSize = file.size;
|
|
1296
|
+
const previousMetaDirty = file.metaDirty;
|
|
1297
|
+
file.size = size;
|
|
1298
|
+
file.metaDirty = true;
|
|
1299
|
+
try {
|
|
1300
|
+
await options.put(file.metaKey, encodeFileMeta(file.size));
|
|
1301
|
+
} catch (error) {
|
|
1302
|
+
options.onError?.(error);
|
|
1303
|
+
file.size = previousSize;
|
|
1304
|
+
file.metaDirty = previousMetaDirty;
|
|
1305
|
+
return VFS.SQLITE_IOERR_TRUNCATE;
|
|
1306
|
+
}
|
|
1307
|
+
file.metaDirty = false;
|
|
1308
|
+
|
|
1309
|
+
// Remaining operations clean up old chunk data. Metadata already
|
|
1310
|
+
// reflects the new size, so failures here leave orphaned/oversized
|
|
1311
|
+
// chunks that are invisible to SQLite (xRead clips to file.size).
|
|
1312
|
+
try {
|
|
1313
|
+
// Truncate the last kept chunk if needed
|
|
1314
|
+
if (size > 0 && size % CHUNK_SIZE !== 0) {
|
|
1315
|
+
const lastChunkKey = this.#chunkKey(file, lastChunkToKeep);
|
|
1316
|
+
const lastChunkData = await options.get(lastChunkKey);
|
|
1317
|
+
|
|
1318
|
+
if (lastChunkData && lastChunkData.length > size % CHUNK_SIZE) {
|
|
1319
|
+
const truncatedChunk = lastChunkData.subarray(
|
|
1320
|
+
0,
|
|
1321
|
+
size % CHUNK_SIZE,
|
|
1322
|
+
);
|
|
1323
|
+
await options.put(lastChunkKey, truncatedChunk);
|
|
1324
|
+
}
|
|
1325
|
+
}
|
|
1326
|
+
|
|
1327
|
+
// Delete chunks beyond the new size
|
|
1328
|
+
const keysToDelete: Uint8Array[] = [];
|
|
1329
|
+
for (let i = lastChunkToKeep + 1; i <= lastExistingChunk; i++) {
|
|
1330
|
+
keysToDelete.push(this.#chunkKey(file, i));
|
|
1331
|
+
}
|
|
1332
|
+
|
|
1333
|
+
for (let b = 0; b < keysToDelete.length; b += KV_MAX_BATCH_KEYS) {
|
|
1334
|
+
await options.deleteBatch(
|
|
1335
|
+
keysToDelete.slice(b, b + KV_MAX_BATCH_KEYS),
|
|
1336
|
+
);
|
|
1337
|
+
}
|
|
1338
|
+
} catch (error) {
|
|
1339
|
+
options.onError?.(error);
|
|
1340
|
+
return VFS.SQLITE_IOERR_TRUNCATE;
|
|
1341
|
+
}
|
|
1342
|
+
|
|
1343
|
+
return VFS.SQLITE_OK;
|
|
1344
|
+
}
|
|
1345
|
+
|
|
1346
|
+
async xSync(fileId: number, _flags: number): Promise<number> {
|
|
1347
|
+
const file = this.#openFiles.get(fileId);
|
|
1348
|
+
if (!file || !file.metaDirty) {
|
|
1349
|
+
return VFS.SQLITE_OK;
|
|
1350
|
+
}
|
|
1351
|
+
|
|
1352
|
+
try {
|
|
1353
|
+
await file.options.put(file.metaKey, encodeFileMeta(file.size));
|
|
1354
|
+
} catch (error) {
|
|
1355
|
+
file.options.onError?.(error);
|
|
1356
|
+
return VFS.SQLITE_IOERR_FSYNC;
|
|
1357
|
+
}
|
|
1358
|
+
file.metaDirty = false;
|
|
1359
|
+
return VFS.SQLITE_OK;
|
|
1360
|
+
}
|
|
1361
|
+
|
|
1362
|
+
async xFileSize(fileId: number, pSize: number): Promise<number> {
|
|
1363
|
+
const file = this.#openFiles.get(fileId);
|
|
1364
|
+
if (!file) {
|
|
1365
|
+
return VFS.SQLITE_IOERR_FSTAT;
|
|
1366
|
+
}
|
|
1367
|
+
|
|
1368
|
+
// Set size as 64-bit integer.
|
|
1369
|
+
this.#writeBigInt64(pSize, BigInt(file.size));
|
|
1370
|
+
return VFS.SQLITE_OK;
|
|
1371
|
+
}
|
|
1372
|
+
|
|
1373
|
+
async xDelete(
|
|
1374
|
+
_pVfs: number,
|
|
1375
|
+
zName: number,
|
|
1376
|
+
_syncDir: number,
|
|
1377
|
+
): Promise<number> {
|
|
1378
|
+
try {
|
|
1379
|
+
await this.#delete(this.#module.UTF8ToString(zName));
|
|
1380
|
+
} catch (error) {
|
|
1381
|
+
// xDelete doesn't have a file handle, so we can't resolve
|
|
1382
|
+
// options.onError here. The error is still surfaced by
|
|
1383
|
+
// SQLite as SQLITE_IOERR_DELETE.
|
|
1384
|
+
return VFS.SQLITE_IOERR_DELETE;
|
|
1385
|
+
}
|
|
1386
|
+
return VFS.SQLITE_OK;
|
|
1387
|
+
}
|
|
1388
|
+
|
|
1389
|
+
/**
|
|
1390
|
+
* Internal delete implementation.
|
|
1391
|
+
* Uses deleteRange for O(1) chunk deletion instead of enumerating
|
|
1392
|
+
* individual chunk keys. The chunk keys for a file tag are
|
|
1393
|
+
* lexicographically contiguous, so range deletion is always safe.
|
|
1394
|
+
*/
|
|
1395
|
+
async #delete(path: string): Promise<void> {
|
|
1396
|
+
const { options, fileTag } = this.#resolveFileOrThrow(path);
|
|
1397
|
+
const metaKey = getMetaKey(fileTag);
|
|
1398
|
+
|
|
1399
|
+
// Get file size to check if the file exists
|
|
1400
|
+
const sizeData = await options.get(metaKey);
|
|
1401
|
+
|
|
1402
|
+
if (!sizeData) {
|
|
1403
|
+
// File doesn't exist, that's OK
|
|
1404
|
+
return;
|
|
1405
|
+
}
|
|
1406
|
+
|
|
1407
|
+
// Delete all chunks via range delete and the metadata key.
|
|
1408
|
+
await options.deleteRange(
|
|
1409
|
+
getChunkKey(fileTag, 0),
|
|
1410
|
+
getChunkKeyRangeEnd(fileTag),
|
|
1411
|
+
);
|
|
1412
|
+
await options.deleteBatch([metaKey]);
|
|
1413
|
+
}
|
|
1414
|
+
|
|
1415
|
+
async xAccess(
|
|
1416
|
+
_pVfs: number,
|
|
1417
|
+
zName: number,
|
|
1418
|
+
_flags: number,
|
|
1419
|
+
pResOut: number,
|
|
1420
|
+
): Promise<number> {
|
|
1421
|
+
// TODO: Measure how often xAccess runs during open and whether these
|
|
1422
|
+
// existence checks add meaningful KV round-trip overhead. If they do,
|
|
1423
|
+
// consider serving file existence from in-memory state.
|
|
1424
|
+
const path = this.#module.UTF8ToString(zName);
|
|
1425
|
+
const resolved = this.#resolveFile(path);
|
|
1426
|
+
if (!resolved) {
|
|
1427
|
+
// File not registered, doesn't exist
|
|
1428
|
+
this.#writeInt32(pResOut, 0);
|
|
1429
|
+
return VFS.SQLITE_OK;
|
|
1430
|
+
}
|
|
1431
|
+
|
|
1432
|
+
const compactMetaKey = getMetaKey(resolved.fileTag);
|
|
1433
|
+
let metaData: Uint8Array | null;
|
|
1434
|
+
try {
|
|
1435
|
+
metaData = await resolved.options.get(compactMetaKey);
|
|
1436
|
+
} catch (error) {
|
|
1437
|
+
resolved.options.onError?.(error);
|
|
1438
|
+
return VFS.SQLITE_IOERR_ACCESS;
|
|
1439
|
+
}
|
|
1440
|
+
|
|
1441
|
+
// Set result: 1 if file exists, 0 otherwise
|
|
1442
|
+
this.#writeInt32(pResOut, metaData ? 1 : 0);
|
|
1443
|
+
return VFS.SQLITE_OK;
|
|
1444
|
+
}
|
|
1445
|
+
|
|
1446
|
+
xCheckReservedLock(_fileId: number, pResOut: number): number {
|
|
1447
|
+
// This VFS is actor-scoped with one writer, so there is no external
|
|
1448
|
+
// reserved lock state to report.
|
|
1449
|
+
this.#writeInt32(pResOut, 0);
|
|
1450
|
+
return VFS.SQLITE_OK;
|
|
1451
|
+
}
|
|
1452
|
+
|
|
1453
|
+
xLock(_fileId: number, _flags: number): number {
|
|
1454
|
+
return VFS.SQLITE_OK;
|
|
1455
|
+
}
|
|
1456
|
+
|
|
1457
|
+
xUnlock(_fileId: number, _flags: number): number {
|
|
1458
|
+
return VFS.SQLITE_OK;
|
|
1459
|
+
}
|
|
1460
|
+
|
|
1461
|
+
async xFileControl(
|
|
1462
|
+
fileId: number,
|
|
1463
|
+
flags: number,
|
|
1464
|
+
_pArg: number,
|
|
1465
|
+
): Promise<number> {
|
|
1466
|
+
switch (flags) {
|
|
1467
|
+
case SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: {
|
|
1468
|
+
const file = this.#openFiles.get(fileId);
|
|
1469
|
+
if (!file) return VFS.SQLITE_NOTFOUND;
|
|
1470
|
+
file.savedFileSize = file.size;
|
|
1471
|
+
file.batchMode = true;
|
|
1472
|
+
file.metaDirty = false;
|
|
1473
|
+
file.dirtyBuffer = new Map();
|
|
1474
|
+
return VFS.SQLITE_OK;
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
case SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: {
|
|
1478
|
+
const file = this.#openFiles.get(fileId);
|
|
1479
|
+
if (!file) return VFS.SQLITE_NOTFOUND;
|
|
1480
|
+
const { dirtyBuffer, options } = file;
|
|
1481
|
+
|
|
1482
|
+
// Dynamic limit: if metadata is dirty, we need one slot for it.
|
|
1483
|
+
// If metadata is not dirty (file.size unchanged), all slots are available for pages.
|
|
1484
|
+
const maxDirtyPages = file.metaDirty
|
|
1485
|
+
? KV_MAX_BATCH_KEYS - 1
|
|
1486
|
+
: KV_MAX_BATCH_KEYS;
|
|
1487
|
+
if (dirtyBuffer && dirtyBuffer.size > maxDirtyPages) {
|
|
1488
|
+
dirtyBuffer.clear();
|
|
1489
|
+
file.dirtyBuffer = null;
|
|
1490
|
+
file.size = file.savedFileSize;
|
|
1491
|
+
file.metaDirty = false;
|
|
1492
|
+
file.batchMode = false;
|
|
1493
|
+
return VFS.SQLITE_IOERR;
|
|
1494
|
+
}
|
|
1495
|
+
|
|
1496
|
+
// Build entries array from dirty buffer + metadata.
|
|
1497
|
+
const entries: [Uint8Array, Uint8Array][] = [];
|
|
1498
|
+
if (dirtyBuffer) {
|
|
1499
|
+
for (const [chunkIndex, data] of dirtyBuffer) {
|
|
1500
|
+
entries.push([this.#chunkKey(file, chunkIndex), data]);
|
|
1501
|
+
}
|
|
1502
|
+
dirtyBuffer.clear();
|
|
1503
|
+
}
|
|
1504
|
+
if (file.metaDirty) {
|
|
1505
|
+
entries.push([file.metaKey, encodeFileMeta(file.size)]);
|
|
1506
|
+
}
|
|
1507
|
+
|
|
1508
|
+
try {
|
|
1509
|
+
await options.putBatch(entries);
|
|
1510
|
+
} catch (error) {
|
|
1511
|
+
options.onError?.(error);
|
|
1512
|
+
file.dirtyBuffer = null;
|
|
1513
|
+
file.size = file.savedFileSize;
|
|
1514
|
+
file.metaDirty = false;
|
|
1515
|
+
file.batchMode = false;
|
|
1516
|
+
return VFS.SQLITE_IOERR;
|
|
1517
|
+
}
|
|
1518
|
+
|
|
1519
|
+
file.dirtyBuffer = null;
|
|
1520
|
+
file.metaDirty = false;
|
|
1521
|
+
file.batchMode = false;
|
|
1522
|
+
return VFS.SQLITE_OK;
|
|
1523
|
+
}
|
|
1524
|
+
|
|
1525
|
+
case SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: {
|
|
1526
|
+
const file = this.#openFiles.get(fileId);
|
|
1527
|
+
if (!file || !file.batchMode) return VFS.SQLITE_OK;
|
|
1528
|
+
if (file.dirtyBuffer) {
|
|
1529
|
+
file.dirtyBuffer.clear();
|
|
1530
|
+
file.dirtyBuffer = null;
|
|
1531
|
+
}
|
|
1532
|
+
file.size = file.savedFileSize;
|
|
1533
|
+
file.metaDirty = false;
|
|
1534
|
+
file.batchMode = false;
|
|
1535
|
+
return VFS.SQLITE_OK;
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
default:
|
|
1539
|
+
return VFS.SQLITE_NOTFOUND;
|
|
1540
|
+
}
|
|
1541
|
+
}
|
|
1542
|
+
|
|
1543
|
+
// Return CHUNK_SIZE so SQLite aligns journal I/O to chunk boundaries.
|
|
1544
|
+
// Must match the native VFS (kv_io_sector_size in sqlite-native/src/vfs.rs).
|
|
1545
|
+
xSectorSize(_fileId: number): number {
|
|
1546
|
+
return CHUNK_SIZE;
|
|
1547
|
+
}
|
|
1548
|
+
|
|
1549
|
+
xDeviceCharacteristics(_fileId: number): number {
|
|
1550
|
+
return SQLITE_IOCAP_BATCH_ATOMIC;
|
|
1551
|
+
}
|
|
1552
|
+
|
|
1553
|
+
xFullPathname(
|
|
1554
|
+
_pVfs: number,
|
|
1555
|
+
zName: number,
|
|
1556
|
+
nOut: number,
|
|
1557
|
+
zOut: number,
|
|
1558
|
+
): number {
|
|
1559
|
+
const path = this.#module.UTF8ToString(zName);
|
|
1560
|
+
const bytes = TEXT_ENCODER.encode(path);
|
|
1561
|
+
const out = this.#module.HEAPU8.subarray(zOut, zOut + nOut);
|
|
1562
|
+
if (bytes.length >= out.length) {
|
|
1563
|
+
return VFS.SQLITE_IOERR;
|
|
1564
|
+
}
|
|
1565
|
+
out.set(bytes, 0);
|
|
1566
|
+
out[bytes.length] = 0;
|
|
1567
|
+
return VFS.SQLITE_OK;
|
|
1568
|
+
}
|
|
1569
|
+
|
|
1570
|
+
#decodeFilename(zName: number, flags: number): string | null {
|
|
1571
|
+
if (!zName) {
|
|
1572
|
+
return null;
|
|
1573
|
+
}
|
|
1574
|
+
|
|
1575
|
+
if (flags & VFS.SQLITE_OPEN_URI) {
|
|
1576
|
+
// Decode SQLite URI filename layout: path\0key\0value\0...\0
|
|
1577
|
+
let pName = zName;
|
|
1578
|
+
let state: 1 | 2 | 3 | null = 1;
|
|
1579
|
+
const charCodes: number[] = [];
|
|
1580
|
+
while (state) {
|
|
1581
|
+
const charCode = this.#module.HEAPU8[pName++];
|
|
1582
|
+
if (charCode) {
|
|
1583
|
+
charCodes.push(charCode);
|
|
1584
|
+
continue;
|
|
1585
|
+
}
|
|
1586
|
+
|
|
1587
|
+
if (!this.#module.HEAPU8[pName]) {
|
|
1588
|
+
state = null;
|
|
1589
|
+
}
|
|
1590
|
+
switch (state) {
|
|
1591
|
+
case 1:
|
|
1592
|
+
charCodes.push("?".charCodeAt(0));
|
|
1593
|
+
state = 2;
|
|
1594
|
+
break;
|
|
1595
|
+
case 2:
|
|
1596
|
+
charCodes.push("=".charCodeAt(0));
|
|
1597
|
+
state = 3;
|
|
1598
|
+
break;
|
|
1599
|
+
case 3:
|
|
1600
|
+
charCodes.push("&".charCodeAt(0));
|
|
1601
|
+
state = 2;
|
|
1602
|
+
break;
|
|
1603
|
+
}
|
|
1604
|
+
}
|
|
1605
|
+
return TEXT_DECODER.decode(new Uint8Array(charCodes));
|
|
1606
|
+
}
|
|
1607
|
+
|
|
1608
|
+
return this.#module.UTF8ToString(zName);
|
|
1609
|
+
}
|
|
1610
|
+
|
|
1611
|
+
#heapView(): DataView {
|
|
1612
|
+
const heapBuffer = this.#module.HEAPU8.buffer;
|
|
1613
|
+
if (heapBuffer !== this.#heapDataViewBuffer) {
|
|
1614
|
+
this.#heapDataViewBuffer = heapBuffer;
|
|
1615
|
+
this.#heapDataView = new DataView(heapBuffer);
|
|
1616
|
+
}
|
|
1617
|
+
return this.#heapDataView;
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
#writeInt32(pointer: number, value: number): void {
|
|
1621
|
+
const heapByteOffset = this.#module.HEAPU8.byteOffset + pointer;
|
|
1622
|
+
this.#heapView().setInt32(heapByteOffset, value, true);
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
#writeBigInt64(pointer: number, value: bigint): void {
|
|
1626
|
+
const heapByteOffset = this.#module.HEAPU8.byteOffset + pointer;
|
|
1627
|
+
this.#heapView().setBigInt64(heapByteOffset, value, true);
|
|
1628
|
+
}
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
/**
|
|
1632
|
+
* Rebuild an i64 from Emscripten's legalized (lo32, hi32) pair.
|
|
1633
|
+
* SQLite passes file offsets and sizes this way. We decode into unsigned words
|
|
1634
|
+
* and reject values above the VFS max file size.
|
|
1635
|
+
*/
|
|
1636
|
+
function delegalize(lo32: number, hi32: number): number {
|
|
1637
|
+
const hi = hi32 >>> 0;
|
|
1638
|
+
const lo = lo32 >>> 0;
|
|
1639
|
+
if (hi > MAX_FILE_SIZE_HI32) {
|
|
1640
|
+
return -1;
|
|
1641
|
+
}
|
|
1642
|
+
if (hi === MAX_FILE_SIZE_HI32 && lo > MAX_FILE_SIZE_LO32) {
|
|
1643
|
+
return -1;
|
|
1644
|
+
}
|
|
1645
|
+
return hi * UINT32_SIZE + lo;
|
|
1646
|
+
}
|