browser-sqlite 1.0.0-rc.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 lalexdotcom
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,161 @@
1
+ # browser-sqlite
2
+
3
+ A persistent SQLite database that lives in your browser — yes, for real. Powered by [wa-sqlite](https://github.com/rhashimoto/wa-sqlite) (WebAssembly), built for (read) concurrency.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install browser-sqlite
9
+ # or
10
+ pnpm add browser-sqlite
11
+ ```
12
+
13
+ Requires a bundler that supports Web Workers with dynamic imports (Rsbuild, webpack 5, Vite 3+).
14
+
15
+ ## VFS Selection
16
+
17
+ browser-sqlite delegates storage to a wa-sqlite Virtual File System (VFS). Choose based on browser support and storage requirements:
18
+
19
+ | VFS | Storage | Constraint | When to use |
20
+ |-----|---------|------------|-------------|
21
+ | `OPFSPermutedVFS` **(default)** | OPFS | None — supports `poolSize >= 1` | General purpose. Best choice for most applications. |
22
+ | `OPFSAdaptiveVFS` | OPFS | Requires JSPI (Chromium 126+) | When JSPI is available and adaptive sync strategy is desired. |
23
+ | `OPFSCoopSyncVFS` | OPFS | None — cooperative sync, no JSPI required | Broader browser compatibility fallback when JSPI is unavailable. |
24
+ | `AccessHandlePoolVFS` | OPFS | **`poolSize` must be `1`** — throws otherwise | Single-connection scenarios requiring access handle pool semantics. |
25
+ | `IDBBatchAtomicVFS` | IndexedDB | None | Fallback when OPFS is unavailable (older browsers, some mobile environments). |
26
+
27
+ When `vfs` is omitted, `OPFSPermutedVFS` is used.
28
+
29
+ For a detailed VFS comparison, see the [wa-sqlite VFS comparison](https://github.com/rhashimoto/wa-sqlite/tree/master/src/examples#vfs-comparison).
30
+
31
+ ## Usage
32
+
33
+ ### Initialize
34
+
35
+ ```typescript
36
+ import { createSQLiteClient } from 'browser-sqlite';
37
+
38
+ const db = createSQLiteClient('myapp.sqlite', {
39
+ poolSize: 2, // number of worker threads (default: 2)
40
+ vfs: 'OPFSPermutedVFS', // VFS selection (default: 'OPFSPermutedVFS')
41
+ pragmas: { // SQLite PRAGMAs applied on open
42
+ journal_mode: 'WAL',
43
+ synchronous: 'NORMAL',
44
+ },
45
+ });
46
+ ```
47
+
48
+ `createSQLiteClient` spawns `poolSize` Web Worker threads immediately. Workers reach READY state asynchronously — queries made before workers are ready are queued automatically.
49
+
50
+ ### Read
51
+
52
+ ```typescript
53
+ type User = { id: number; name: string };
54
+
55
+ const users = await db.read<User>(
56
+ 'SELECT id, name FROM users WHERE active = ?',
57
+ [1],
58
+ );
59
+ // users: User[]
60
+ ```
61
+
62
+ Read queries are dispatched to any available worker, enabling concurrent reads.
63
+
64
+ ### Write
65
+
66
+ ```typescript
67
+ const { affected } = await db.write(
68
+ 'INSERT INTO users (name, email) VALUES (?, ?)',
69
+ ['Alice', 'alice@example.com'],
70
+ );
71
+ // affected: number of rows inserted
72
+ ```
73
+
74
+ Write queries are serialized through a dedicated writer worker — only one write executes at a time.
75
+
76
+ ### Stream (large result sets)
77
+
78
+ ```typescript
79
+ // Worker is held for the full generator lifetime — always exhaust or break.
80
+ for await (const chunk of db.stream<User>(
81
+ 'SELECT * FROM large_table',
82
+ [],
83
+ { chunkSize: 100 },
84
+ )) {
85
+ processChunk(chunk); // chunk is User[]
86
+ }
87
+ ```
88
+
89
+ `stream()` yields rows in chunks without buffering the full result set in memory.
90
+
91
+ ### One (first row)
92
+
93
+ ```typescript
94
+ const user = await db.one<User>(
95
+ 'SELECT * FROM users WHERE id = ?',
96
+ [42],
97
+ );
98
+ // user: User | undefined
99
+ ```
100
+
101
+ `one()` automatically aborts after the first result row. Use it for lookups by primary key or unique field.
102
+
103
+ ### Advanced
104
+
105
+ For batch inserts, schema-driven table replacement, or explicit transactions, see:
106
+ - `db.bulkWrite(table, keys)` — batches inserts within `SQLITE_MAX_VARS` limit
107
+ - `db.output(table, schema, options)` — drops, recreates, and populates a table from a schema definition
108
+ - `db.transaction(callback, options)` — wraps operations in a SQLite transaction with auto-commit and rollback
109
+
110
+ ### Close
111
+
112
+ ```typescript
113
+ db.close();
114
+ ```
115
+
116
+ Terminates all worker threads.
117
+
118
+ ## Requirements
119
+
120
+ > **These HTTP headers are mandatory.** Without them, `new SharedArrayBuffer()` throws a `SecurityError` and browser-sqlite cannot initialize.
121
+
122
+ browser-sqlite uses a `SharedArrayBuffer` to coordinate worker pool state. Browsers require [cross-origin isolation](https://developer.mozilla.org/en-US/docs/Web/API/crossOriginIsolated) to create `SharedArrayBuffer` instances. Your page must be served with:
123
+
124
+ ```http
125
+ Cross-Origin-Opener-Policy: same-origin
126
+ Cross-Origin-Embedder-Policy: require-corp
127
+ ```
128
+
129
+ ### Server configuration examples
130
+
131
+ **Nginx**
132
+ ```nginx
133
+ add_header Cross-Origin-Opener-Policy "same-origin";
134
+ add_header Cross-Origin-Embedder-Policy "require-corp";
135
+ ```
136
+
137
+ **Express**
138
+ ```javascript
139
+ app.use((req, res, next) => {
140
+ res.setHeader('Cross-Origin-Opener-Policy', 'same-origin');
141
+ res.setHeader('Cross-Origin-Embedder-Policy', 'require-corp');
142
+ next();
143
+ });
144
+ ```
145
+
146
+ **Rsbuild / Vite dev server**
147
+ ```typescript
148
+ // rsbuild.config.ts or vite.config.ts
149
+ server: {
150
+ headers: {
151
+ 'Cross-Origin-Opener-Policy': 'same-origin',
152
+ 'Cross-Origin-Embedder-Policy': 'require-corp',
153
+ },
154
+ },
155
+ ```
156
+
157
+ ## Known Limitations
158
+
159
+ - **`AccessHandlePoolVFS` requires `poolSize: 1`.** Passing `poolSize > 1` with this VFS throws synchronously at client creation time.
160
+ - **`SharedArrayBuffer` requires cross-origin isolation.** See the [Requirements](#requirements) section. Omitting COOP/COEP headers causes a `SecurityError` at runtime with no fallback.
161
+ - **`OPFSAdaptiveVFS` requires Chromium 126+.** This VFS uses JavaScript Promise Integration (JSPI), which is not available in Firefox or Safari as of 2025.
@@ -0,0 +1,424 @@
1
+ import { defer } from "@lalex/promises";
2
+ const FlagsIndexes = {
3
+ INIT_LOCK: 0
4
+ };
5
+ const WorkerLock = {
6
+ FREE: 0,
7
+ LOCKED: 1
8
+ };
9
+ const WorkerStatuses = {
10
+ EMPTY: -3,
11
+ NEW: -2,
12
+ INITIALIZING: -1,
13
+ INITIALIZED: 0,
14
+ READY: 10,
15
+ RESERVED: 49,
16
+ RUNNING: 50,
17
+ ABORTING: 99,
18
+ DONE: 100
19
+ };
20
+ const FLAGS_WORKER_STATUS_OFFSET = Math.max(...Object.values(FlagsIndexes)) + 1;
21
+ class WorkerOrchestrator {
22
+ sharedArrayBuffer;
23
+ flags;
24
+ size;
25
+ constructor(init){
26
+ if ('number' == typeof init) this.sharedArrayBuffer = new SharedArrayBuffer((init + FLAGS_WORKER_STATUS_OFFSET) * Int32Array.BYTES_PER_ELEMENT);
27
+ else this.sharedArrayBuffer = init;
28
+ this.flags = new Int32Array(this.sharedArrayBuffer);
29
+ if ('number' == typeof init) {
30
+ this.size = init;
31
+ this.flags[FlagsIndexes.INIT_LOCK] = WorkerLock.FREE;
32
+ this.flags.fill(WorkerStatuses.EMPTY, FLAGS_WORKER_STATUS_OFFSET);
33
+ } else this.size = init.byteLength / Int32Array.BYTES_PER_ELEMENT - FLAGS_WORKER_STATUS_OFFSET;
34
+ }
35
+ lock() {
36
+ while(Atomics.compareExchange(this.flags, FlagsIndexes.INIT_LOCK, WorkerLock.FREE, WorkerLock.LOCKED) !== WorkerLock.FREE)Atomics.wait(this.flags, FlagsIndexes.INIT_LOCK, WorkerLock.LOCKED);
37
+ }
38
+ unlock() {
39
+ if (Atomics.compareExchange(this.flags, FlagsIndexes.INIT_LOCK, WorkerLock.LOCKED, WorkerLock.FREE) === WorkerLock.LOCKED) Atomics.notify(this.flags, FlagsIndexes.INIT_LOCK, 1);
40
+ }
41
+ setStatus(index, status, from) {
42
+ let oldValue;
43
+ const workerStatusIndex = index + FLAGS_WORKER_STATUS_OFFSET;
44
+ if (void 0 !== from) {
45
+ if (Atomics.compareExchange(this.flags, workerStatusIndex, from, status) === from) oldValue = from;
46
+ } else {
47
+ const oldStatus = Atomics.exchange(this.flags, workerStatusIndex, status);
48
+ if (oldStatus !== status) oldValue = oldStatus;
49
+ }
50
+ const success = void 0 !== oldValue;
51
+ return success;
52
+ }
53
+ getStatus(index) {
54
+ return Atomics.load(this.flags, index + FLAGS_WORKER_STATUS_OFFSET);
55
+ }
56
+ }
57
+ const isWriteQuery = (sql)=>/(INSERT|REPLACE|UPDATE|DELETE|CREATE|DROP|PRAGMA|ATTACH|DETACH)\s/gim.test(sql);
58
+ const DEFAULT_POOL_SIZE = 2;
59
+ let clientCount = 0;
60
+ const DEFAULT_VFS = 'OPFSPermutedVFS';
61
+ const createSQLiteClient = (file, clientOptions)=>{
62
+ const clientIndex = ++clientCount;
63
+ const clientPrefix = `${clientOptions?.name ?? 'SQLite'} ${clientIndex}`;
64
+ const poolSize = clientOptions?.poolSize ?? DEFAULT_POOL_SIZE;
65
+ const pool = [];
66
+ const orchestrator = new WorkerOrchestrator(poolSize);
67
+ const vfs = clientOptions?.vfs ?? DEFAULT_VFS;
68
+ if ('AccessHandlePoolVFS' === vfs && poolSize > 1) throw new Error('AccessHandlePoolVFS does not support pool sizes greater than 1');
69
+ const { state: debug, createRequestDebugState, createWorkerDebugState, createQueryDebugState } = {};
70
+ const createWorker = ()=>{
71
+ const deferredInit = defer();
72
+ const workerName = `${clientPrefix} / Worker ${pool.length + 1}`;
73
+ const index = pool.push(new Worker(new URL('./worker.ts', import.meta.url), {
74
+ name: workerName
75
+ })) - 1;
76
+ const worker = Object.assign(pool[index], {
77
+ index,
78
+ available: false
79
+ });
80
+ const state = createWorkerDebugState?.(index, workerName);
81
+ let currentCallId = 0;
82
+ let deferredChunk;
83
+ worker.onmessage = ({ data })=>{
84
+ const { callId, type } = data;
85
+ if (0 === callId && 'ready' === type) {
86
+ worker.available = true;
87
+ if (state) state.initializationTime = Date.now();
88
+ deferredInit.resolve(worker);
89
+ }
90
+ if (deferredChunk && callId === currentCallId) switch(type){
91
+ case 'chunk':
92
+ if (state?.currentRequest?.currentQuery) state.currentRequest.currentQuery.firstRowTime ??= Date.now();
93
+ deferredChunk.resolve(data.data);
94
+ deferredChunk = defer();
95
+ break;
96
+ case 'done':
97
+ {
98
+ const affected = data.affected;
99
+ if (state?.currentRequest?.currentQuery) {
100
+ state.currentRequest.currentQuery.affectedRows = affected;
101
+ state.currentRequest.affectedRows += affected;
102
+ state.currentRequest.currentQuery.endTime = Date.now();
103
+ }
104
+ deferredChunk.resolve(affected);
105
+ deferredChunk = void 0;
106
+ break;
107
+ }
108
+ case 'error':
109
+ {
110
+ const error = new Error(data.message, {
111
+ cause: data.cause
112
+ });
113
+ if (state?.currentRequest?.currentQuery) {
114
+ state.currentRequest.currentQuery.error = error;
115
+ state.currentRequest.currentQuery.endTime = Date.now();
116
+ }
117
+ deferredChunk.reject(error);
118
+ deferredChunk = void 0;
119
+ break;
120
+ }
121
+ }
122
+ };
123
+ const query = async function*(sql, params, options) {
124
+ try {
125
+ worker.available = false;
126
+ if (deferredChunk) {
127
+ console.error(`Previous query not finished on worker ${index + 1}`);
128
+ throw new Error('Worker is already processing a query');
129
+ }
130
+ if (state?.currentRequest) {
131
+ const queryState = createQueryDebugState?.(index, sql, params);
132
+ state.currentRequest.currentQuery = queryState;
133
+ }
134
+ const { chunkSize = 500, signal } = options ?? {};
135
+ const signalAbortHandler = ()=>{
136
+ orchestrator.setStatus(index, WorkerStatuses.ABORTING, WorkerStatuses.RUNNING);
137
+ };
138
+ signal?.addEventListener('abort', signalAbortHandler);
139
+ deferredChunk = defer();
140
+ worker.postMessage({
141
+ type: 'query',
142
+ callId: ++currentCallId,
143
+ sql,
144
+ params,
145
+ options: {
146
+ chunkSize
147
+ }
148
+ });
149
+ while(deferredChunk){
150
+ const chunk = await deferredChunk.promise;
151
+ yield chunk;
152
+ }
153
+ signal?.removeEventListener('abort', signalAbortHandler);
154
+ } finally{
155
+ deferredChunk = void 0;
156
+ worker.available = true;
157
+ }
158
+ };
159
+ Object.assign(worker, {
160
+ query
161
+ });
162
+ worker.postMessage({
163
+ callId: 0,
164
+ type: 'open',
165
+ file,
166
+ flags: orchestrator.sharedArrayBuffer,
167
+ index,
168
+ vfs,
169
+ pragmas: clientOptions?.pragmas
170
+ });
171
+ return deferredInit.promise;
172
+ };
173
+ const readerRequestQueue = [];
174
+ const writerRequestQueue = [];
175
+ let currentWriterIndex = -1;
176
+ const acquireWorker = (write = false)=>{
177
+ if (write && currentWriterIndex > -1) {
178
+ const writer = pool[currentWriterIndex];
179
+ if (writer.available) return writer;
180
+ return;
181
+ }
182
+ const availableWorker = pool.find((w)=>{
183
+ if (w.available) return true;
184
+ return false;
185
+ });
186
+ if (availableWorker && write) currentWriterIndex = availableWorker.index;
187
+ return availableWorker;
188
+ };
189
+ const acquireNextWorker = async (write = false)=>{
190
+ const availableWorker = acquireWorker(write);
191
+ if (availableWorker) {
192
+ availableWorker.available = false;
193
+ return availableWorker;
194
+ }
195
+ const { promise, resolve } = defer();
196
+ if (write) {
197
+ if (debug) debug.queue.write++;
198
+ writerRequestQueue.push((worker)=>{
199
+ worker.available = false;
200
+ resolve(worker);
201
+ });
202
+ } else {
203
+ if (debug) debug.queue.read++;
204
+ readerRequestQueue.push((worker)=>{
205
+ worker.available = false;
206
+ resolve(worker);
207
+ });
208
+ }
209
+ return promise;
210
+ };
211
+ const getNextAvailableWorker = async (write = false)=>{
212
+ const requestState = createRequestDebugState?.();
213
+ const availableWorker = await acquireNextWorker(write);
214
+ requestState?.assign(availableWorker.index);
215
+ return pool[availableWorker.index];
216
+ };
217
+ const releaseWorker = (worker)=>{
218
+ const requestState = debug?.workers[worker.index]?.currentRequest;
219
+ if (requestState) requestState.releaseTime = Date.now();
220
+ if (writerRequestQueue.length) {
221
+ if (currentWriterIndex === worker.index || -1 === currentWriterIndex) {
222
+ if (debug) debug.queue.write--;
223
+ writerRequestQueue.shift()?.(worker);
224
+ return;
225
+ }
226
+ }
227
+ if (readerRequestQueue.length) {
228
+ if (currentWriterIndex === worker.index) currentWriterIndex = -1;
229
+ if (debug) debug.queue.read--;
230
+ readerRequestQueue.shift()?.(worker);
231
+ return;
232
+ }
233
+ orchestrator.setStatus(worker.index, WorkerStatuses.READY);
234
+ };
235
+ const readWorker = async (worker, sql, params, options)=>{
236
+ const result = [];
237
+ for await (const chunk of worker.query(sql, params, options))if ('number' != typeof chunk) result.push(...chunk);
238
+ return result;
239
+ };
240
+ const read = async (sql, params, options)=>{
241
+ const worker = await getNextAvailableWorker(isWriteQuery(sql));
242
+ try {
243
+ return await readWorker(worker, sql, params, options);
244
+ } finally{
245
+ releaseWorker(worker);
246
+ }
247
+ };
248
+ const streamWorker = async function*(worker, sql, params, options) {
249
+ for await (const chunk of worker.query(sql, params, options))if ('number' != typeof chunk) yield chunk;
250
+ };
251
+ const stream = async function*(sql, params, options) {
252
+ const worker = await getNextAvailableWorker(isWriteQuery(sql));
253
+ try {
254
+ for await (const chunk of streamWorker(worker, sql, params, options))yield chunk;
255
+ } finally{
256
+ releaseWorker(worker);
257
+ }
258
+ };
259
+ const writeWorker = async (worker, sql, params, options)=>{
260
+ const result = [];
261
+ let affected = 0;
262
+ for await (const chunk of worker.query(sql, params, options))if ('number' != typeof chunk) result.push(...chunk);
263
+ else affected = chunk;
264
+ return {
265
+ result,
266
+ affected
267
+ };
268
+ };
269
+ const write = async (sql, params, options)=>{
270
+ const worker = await getNextAvailableWorker(isWriteQuery(sql));
271
+ try {
272
+ return await writeWorker(worker, sql, params, options);
273
+ } finally{
274
+ releaseWorker(worker);
275
+ }
276
+ };
277
+ const oneWorker = async (worker, sql, params, options)=>{
278
+ let result;
279
+ const abortController = new AbortController();
280
+ for await (const chunk of streamWorker(worker, sql, params, {
281
+ ...options,
282
+ signal: abortController.signal,
283
+ chunkSize: 1
284
+ })){
285
+ result = chunk[0];
286
+ abortController.abort();
287
+ break;
288
+ }
289
+ return result;
290
+ };
291
+ const one = async (sql, params, options)=>{
292
+ const worker = await getNextAvailableWorker(isWriteQuery(sql));
293
+ try {
294
+ return await oneWorker(worker, sql, params, options);
295
+ } finally{
296
+ releaseWorker(worker);
297
+ }
298
+ };
299
+ const bulkWrite = (table, keys)=>{
300
+ const SQLITE_MAX_VARS = 32766;
301
+ const maxBufferSize = Math.floor(SQLITE_MAX_VARS / keys.length);
302
+ const buffer = [];
303
+ let writePromise = Promise.resolve(0);
304
+ const flush = ()=>{
305
+ const toInsert = [
306
+ ...buffer
307
+ ];
308
+ buffer.length = 0;
309
+ writePromise = writePromise.then((currentAffected)=>write(`INSERT INTO ${table} (${keys.join(',')}) VALUES ${toInsert.map(()=>`(${keys.map(()=>'?')})`)}`, toInsert.flatMap((data)=>keys.map((k)=>data[k]))).then(({ affected: chunkAffected })=>currentAffected + chunkAffected));
310
+ };
311
+ return {
312
+ enqueue: (data)=>{
313
+ buffer.push(data);
314
+ if (buffer.length >= maxBufferSize) flush();
315
+ },
316
+ close: ()=>{
317
+ if (buffer.length) flush();
318
+ return writePromise;
319
+ }
320
+ };
321
+ };
322
+ const output = (table, schema, options)=>{
323
+ const { enqueue, close } = bulkWrite(table, Object.keys(schema).filter((col)=>'object' != typeof schema[col] || !schema[col].generated));
324
+ const normalizedSchema = Object.entries(schema).map(([k, v])=>{
325
+ const type = 'string' == typeof v ? v : v.type;
326
+ const unique = 'object' == typeof v && !!v.unique;
327
+ const notnull = 'object' == typeof v && !!v.required;
328
+ const generated = 'object' == typeof v ? v.generated : void 0;
329
+ return {
330
+ name: k,
331
+ type,
332
+ unique,
333
+ notnull,
334
+ generated
335
+ };
336
+ });
337
+ const createTablePromise = write(`
338
+ DROP TABLE IF EXISTS ${table}
339
+ `).then(async ()=>{
340
+ await write(`
341
+ CREATE ${options?.temp ? 'TEMPORARY' : ''} TABLE ${table}(
342
+ ${normalizedSchema.map(({ name, type, unique, notnull, generated })=>`${name} ${type} ${unique ? 'UNIQUE' : ''} ${notnull ? 'NOT NULL' : ''} ${generated ? `GENERATED ALWAYS AS ${generated}` : ''}`).join(',')}
343
+ )
344
+ `);
345
+ });
346
+ return {
347
+ enqueue: (data)=>{
348
+ createTablePromise.then(()=>enqueue(data));
349
+ },
350
+ close: ()=>createTablePromise.then(()=>close()).then(async (affected)=>{
351
+ if (options?.indexes) for (const index of options.indexes){
352
+ const columns = 'string' == typeof index ? [
353
+ index
354
+ ] : Array.isArray(index) ? index : 'object' == typeof index ? 'column' in index ? [
355
+ index.column
356
+ ] : index.columns : void 0;
357
+ const unique = !Array.isArray(index) && 'object' == typeof index && !!index.unique;
358
+ if (columns) await write(`CREATE ${unique ? 'UNIQUE' : ''} INDEX IF NOT EXISTS ${table}_${columns.join('_')}_${unique ? 'U' : 'IDX'} ON ${table}(${columns.join(',')})`);
359
+ }
360
+ return affected;
361
+ })
362
+ };
363
+ };
364
+ const transaction = async (callback, options)=>{
365
+ const { readOnly = false, autoCommit = true } = options ?? {};
366
+ const worker = await getNextAvailableWorker(!readOnly);
367
+ const checksql = (sql)=>{
368
+ if (readOnly && isWriteQuery(sql)) throw new Error('Cannot werite in read-only transaction');
369
+ return sql;
370
+ };
371
+ let done = false;
372
+ const db = {
373
+ read: (sql, ...args)=>readWorker(worker, checksql(sql), ...args),
374
+ write: (sql, ...args)=>writeWorker(worker, checksql(sql), ...args),
375
+ stream: (sql, ...args)=>streamWorker(worker, checksql(sql), ...args),
376
+ one: (sql, ...args)=>oneWorker(worker, checksql(sql), ...args),
377
+ commit: async ()=>{
378
+ done = true;
379
+ await oneWorker(worker, 'COMMIT');
380
+ },
381
+ rollback: async ()=>{
382
+ done = true;
383
+ await oneWorker(worker, 'ROLLBACK');
384
+ }
385
+ };
386
+ try {
387
+ await db.read('BEGIN');
388
+ const result = await callback(db);
389
+ if (!done) if (autoCommit) await db.commit();
390
+ else await db.rollback();
391
+ return result;
392
+ } catch (e) {
393
+ await db.rollback();
394
+ throw e;
395
+ } finally{
396
+ releaseWorker(worker);
397
+ }
398
+ };
399
+ const close = ()=>{
400
+ let worker = pool.shift();
401
+ while(void 0 !== worker){
402
+ worker.terminate();
403
+ worker = pool.shift();
404
+ }
405
+ };
406
+ Promise.all(Array.from({
407
+ length: poolSize
408
+ }).map(()=>createWorker().then((worker)=>worker))).then((allWorkers)=>{
409
+ for (const worker of allWorkers)releaseWorker(worker);
410
+ });
411
+ const api = {
412
+ read,
413
+ write,
414
+ stream,
415
+ one,
416
+ transaction,
417
+ bulkWrite,
418
+ output,
419
+ close,
420
+ debug
421
+ };
422
+ return api;
423
+ };
424
+ export { createSQLiteClient };
@@ -0,0 +1,2 @@
1
+ declare const _default: import("@rslib/core").RslibConfig;
2
+ export default _default;
@@ -0,0 +1,2 @@
1
+ declare const _default: import("@rstest/core").RstestConfig;
2
+ export default _default;
@@ -0,0 +1,332 @@
1
+ import type { SQLiteVFS } from './types';
2
+ /**
3
+ * Configuration options for creating a SQLite client.
4
+ */
5
+ export type CreateSQLiteClientOptions = {
6
+ /**
7
+ * Database file name within the OPFS origin private file system.
8
+ * Each unique name maps to a distinct SQLite database file.
9
+ * @defaultValue `"SQLite"` prefix + auto-incremented client index
10
+ */
11
+ name?: string;
12
+ /**
13
+ * Number of Web Workers spawned in the pool at initialization.
14
+ * A larger pool allows more concurrent read operations but increases
15
+ * memory consumption and OPFS file handle usage.
16
+ * Must be `1` when using `AccessHandlePoolVFS` — any larger value throws at construction time.
17
+ * @defaultValue `2`
18
+ */
19
+ poolSize?: number;
20
+ /**
21
+ * Virtual File System implementation used for SQLite storage.
22
+ * Controls whether data is stored in OPFS, IndexedDB, or memory.
23
+ * See the README VFS Selection guide for a comparison.
24
+ * @defaultValue `'OPFSPermutedVFS'`
25
+ */
26
+ vfs?: SQLiteVFS;
27
+ /**
28
+ * SQLite PRAGMAs applied to each worker's database connection on open.
29
+ * Keys are PRAGMA names, values are their string representations.
30
+ * Example: `{ journal_mode: 'WAL', synchronous: 'NORMAL' }`.
31
+ * If omitted, no PRAGMAs are applied beyond SQLite defaults.
32
+ */
33
+ pragmas?: Record<string, string>;
34
+ };
35
+ /**
36
+ * Query execution options.
37
+ */
38
+ type SQLiteQueryOptions<_T extends Record<string, unknown>> = {
39
+ id?: string;
40
+ chunkSize?: number;
41
+ signal?: AbortSignal;
42
+ debug?: string;
43
+ };
44
+ type SQLiteStreamOptions<T extends Record<string, unknown>> = SQLiteQueryOptions<T> & {
45
+ signal?: AbortSignal;
46
+ };
47
+ /**
48
+ * Main SQLite database API.
49
+ */
50
+ export type SQLiteDB = {
51
+ /**
52
+ * Executes a SELECT query and returns all matching rows as an array.
53
+ *
54
+ * Read queries are dispatched to any available worker in the pool,
55
+ * enabling concurrent execution across multiple readers.
56
+ *
57
+ * @param sql - SQL query string. Must be a SELECT (or equivalent read) statement.
58
+ * @param params - Positional parameters bound to `?` placeholders.
59
+ * @param options - Optional query options (`chunkSize`, `signal`, `id`).
60
+ * @returns Promise resolving to an array of typed rows (`T[]`). Returns `[]` for empty results.
61
+ */
62
+ read: <T extends Record<string, unknown>>(sql: string, params?: any[], options?: SQLiteQueryOptions<T>) => Promise<T[]>;
63
+ /**
64
+ * Executes a DML or DDL statement (INSERT, UPDATE, DELETE, CREATE, DROP, etc.)
65
+ * and returns both any result rows and the number of affected rows.
66
+ *
67
+ * Write queries are serialized through a single dedicated writer worker.
68
+ * Concurrent writes queue behind each other — only one write executes at a time.
69
+ *
70
+ * @param sql - SQL statement. Any statement recognized as a write by `isWriteQuery`.
71
+ * @param params - Positional parameters bound to `?` placeholders.
72
+ * @param options - Optional query options (`chunkSize`, `signal`, `id`).
73
+ * @returns Promise resolving to `{ result: T[], affected: number }` where
74
+ * `affected` is the SQLite `changes()` count for the statement.
75
+ */
76
+ write: <T extends Record<string, unknown>>(sql: string, params?: any[], options?: SQLiteQueryOptions<T>) => Promise<{
77
+ result: T[];
78
+ affected: number;
79
+ }>;
80
+ /**
81
+ * Executes a query and yields result rows in chunks via an async generator.
82
+ * Memory-efficient for large result sets — rows are not buffered in full.
83
+ *
84
+ * @remarks
85
+ * **Worker held for full generator lifetime.** A pool worker is acquired when
86
+ * the generator is created and released only when the generator is fully
87
+ * exhausted or the caller uses `break`. Failing to exhaust the generator
88
+ * starves the pool. Always use `for await...of` to completion or `break` to exit.
89
+ *
90
+ * @param sql - SQL query string.
91
+ * @param params - Positional parameters bound to `?` placeholders.
92
+ * @param options - Optional options including `chunkSize` (default `500`),
93
+ * `signal` (AbortSignal to cancel), and `id`.
94
+ * @returns AsyncGenerator yielding `T[]` chunks of at most `chunkSize` rows.
95
+ */
96
+ stream: <T extends Record<string, unknown>>(sql: string, params?: any[], options?: SQLiteStreamOptions<T>) => AsyncGenerator<T[]>;
97
+ /**
98
+ * Executes a query and returns the first row, or `undefined` if no rows match.
99
+ * Internally uses `chunkSize: 1` and aborts after the first result chunk.
100
+ *
101
+ * @remarks
102
+ * Intended for SELECT queries. Using `one()` with a write statement (INSERT, UPDATE)
103
+ * routes to the write worker and still executes the DML — use `write()` for mutations.
104
+ *
105
+ * @param sql - SQL query string.
106
+ * @param params - Positional parameters bound to `?` placeholders.
107
+ * @param options - Optional query options (`id`). `chunkSize` and `signal` are managed internally.
108
+ * @returns Promise resolving to the first row as `T`, or `undefined` if no rows.
109
+ */
110
+ one: <T extends Record<string, unknown>>(sql: string, params?: any[], options?: SQLiteQueryOptions<T>) => Promise<T | undefined>;
111
+ /**
112
+ * Executes a callback within a SQLite transaction, providing a scoped
113
+ * `TransactionDB` with `read`, `write`, `stream`, and `one` methods.
114
+ *
115
+ * The worker is held exclusively for the transaction's duration.
116
+ * On callback success: auto-commits if `autoCommit` is `true` (default).
117
+ * On callback error: rolls back automatically.
118
+ * The callback may call `db.commit()` or `db.rollback()` manually.
119
+ *
120
+ * @param callback - Async function receiving a `TransactionDB` instance.
121
+ * @param options - `readOnly` (default `false`) prevents write statements;
122
+ * `autoCommit` (default `true`) commits on callback success.
123
+ * @returns Promise resolving to the value returned by `callback`.
124
+ */
125
+ transaction: <T = void>(callback: (db: any) => Promise<T>, options?: {
126
+ readOnly?: boolean;
127
+ autoCommit?: boolean;
128
+ }) => Promise<T>;
129
+ /**
130
+ * Creates a buffered bulk-insert utility that batches rows to stay within
131
+ * SQLite's variable limit (`SQLITE_MAX_VARS = 32766`).
132
+ *
133
+ * Call `enqueue()` for each row to insert, then `close()` to flush the
134
+ * remaining buffer and await completion.
135
+ *
136
+ * @param table - Target table name.
137
+ * @param keys - Column names for the INSERT statement.
138
+ * @returns Object with:
139
+ * - `enqueue(data)` — buffers a row, flushing automatically when the buffer fills.
140
+ * - `close()` — flushes remaining rows and resolves with total affected row count.
141
+ */
142
+ bulkWrite: <KEYS extends string>(table: string, keys: KEYS[]) => {
143
+ enqueue: (data: Record<KEYS, any>) => void;
144
+ close: () => Promise<number>;
145
+ };
146
+ /**
147
+ * Schema-driven table replacement: drops the existing table, creates a new one
148
+ * from the provided schema, bulk-inserts all enqueued rows, then creates indexes.
149
+ *
150
+ * Useful for full-refresh ETL patterns where a table is rebuilt from scratch.
151
+ *
152
+ * @param table - Table name to drop and recreate.
153
+ * @param schema - Column definition map. Values are SQL type strings or
154
+ * objects with `{ type, required?, unique?, generated? }`.
155
+ * @param options - `indexes` array and `temp` flag for TEMPORARY tables.
156
+ * @returns Object with `enqueue(data)` and `close()` following the same
157
+ * contract as {@link SQLiteDB.bulkWrite}.
158
+ */
159
+ output: <SCHEMA extends Record<string, any>>(table: string, schema: SCHEMA, options?: any) => {
160
+ enqueue: (data: any) => void;
161
+ close: () => Promise<number>;
162
+ };
163
+ /**
164
+ * Terminates all workers in the pool.
165
+ *
166
+ * @remarks
167
+ * **OPFS files are NOT deleted.** `close()` calls `worker.terminate()` on each
168
+ * pool worker — it does not remove any OPFS database files. Files created by
169
+ * browser-sqlite persist in the origin's private file system across page loads.
170
+ * To delete OPFS files, use the `navigator.storage.getDirectory()` API directly.
171
+ */
172
+ close: () => void;
173
+ /**
174
+ * Internal diagnostic handle. Not part of the stable public API.
175
+ * Shape is subject to change without notice.
176
+ * @internal
177
+ */
178
+ debug: unknown;
179
+ };
180
+ /**
181
+ * Creates a SQLite client backed by a pool of Web Workers, each running
182
+ * a wa-sqlite instance in a dedicated thread.
183
+ *
184
+ * @remarks
185
+ * **Browser requirements (COOP/COEP):** This function constructs a
186
+ * `SharedArrayBuffer` for cross-thread worker synchronization. Browsers
187
+ * require the page to be served with the following HTTP headers:
188
+ * ```
189
+ * Cross-Origin-Opener-Policy: same-origin
190
+ * Cross-Origin-Embedder-Policy: require-corp
191
+ * ```
192
+ * Without these headers, `new SharedArrayBuffer()` throws a `SecurityError`
193
+ * and the pool will never initialize.
194
+ *
195
+ * **Worker pool side effect:** Calling this function immediately spawns
196
+ * `poolSize` Web Worker threads and begins asynchronous database
197
+ * initialization. Workers become queryable once they emit a `ready` message.
198
+ *
199
+ * @param file - SQLite database file name within the OPFS origin.
200
+ * Each distinct name corresponds to a separate database file.
201
+ * @param clientOptions - Optional pool and VFS configuration.
202
+ * See {@link CreateSQLiteClientOptions} for field defaults.
203
+ * @returns A {@link SQLiteDB} object providing `read`, `write`, `stream`,
204
+ * `one`, `transaction`, `bulkWrite`, `output`, and `close` methods.
205
+ *
206
+ * @throws {Error} When `vfs` is `'AccessHandlePoolVFS'` and `poolSize` is
207
+ * greater than `1`. AccessHandlePoolVFS does not support concurrent access
208
+ * handles — set `poolSize: 1` explicitly when using this VFS.
209
+ *
210
+ * @example
211
+ * ```typescript
212
+ * import { createSQLiteClient } from 'browser-sqlite';
213
+ *
214
+ * const db = createSQLiteClient('myapp.sqlite', {
215
+ * poolSize: 3,
216
+ * vfs: 'OPFSPermutedVFS',
217
+ * pragmas: { journal_mode: 'WAL', synchronous: 'NORMAL' },
218
+ * });
219
+ *
220
+ * const users = await db.read<{ id: number; name: string }>(
221
+ * 'SELECT id, name FROM users WHERE active = ?',
222
+ * [1],
223
+ * );
224
+ * ```
225
+ */
226
+ export declare const createSQLiteClient: (file: string, clientOptions?: CreateSQLiteClientOptions) => {
227
+ read: <T extends Record<string, unknown> = Record<string, unknown>>(sql: string, params?: unknown[], options?: SQLiteQueryOptions<T>) => Promise<T[]>;
228
+ write: <T extends Record<string, unknown> = Record<string, unknown>>(sql: string, params?: unknown[], options?: SQLiteQueryOptions<T>) => Promise<{
229
+ result: T[];
230
+ affected: number;
231
+ }>;
232
+ stream: <T extends Record<string, unknown> = Record<string, unknown>>(sql: string, params?: unknown[], options?: SQLiteQueryOptions<T>) => AsyncGenerator<T[], void, unknown>;
233
+ one: <T extends Record<string, unknown> = Record<string, unknown>>(sql: string, params?: unknown[], options?: Omit<SQLiteQueryOptions<T>, "chunkSize" | "signal">) => Promise<T | undefined>;
234
+ transaction: <T = void>(callback: (db: Pick<SQLiteDB, "one" | "read" | "write" | "stream"> & {
235
+ commit: () => Promise<void>;
236
+ rollback: () => Promise<void>;
237
+ }) => Promise<T>, options?: {
238
+ readOnly?: boolean;
239
+ autoCommit?: boolean;
240
+ }) => Promise<T>;
241
+ bulkWrite: <KEYS extends string>(table: string, keys: KEYS[]) => {
242
+ enqueue: (data: { [K in KEYS]: any; }) => void;
243
+ close: () => Promise<number>;
244
+ };
245
+ output: <SCHEMA extends Record<string, string | {
246
+ type: string;
247
+ generated?: string;
248
+ required?: boolean;
249
+ unique?: boolean;
250
+ }>>(table: string, schema: SCHEMA, options?: {
251
+ indexes?: (keyof SCHEMA | (keyof SCHEMA)[] | ({
252
+ unique?: boolean;
253
+ } & ({
254
+ column: keyof SCHEMA;
255
+ } | {
256
+ columns: (keyof SCHEMA)[];
257
+ })))[] | undefined;
258
+ temp?: boolean;
259
+ }) => {
260
+ enqueue: (data: { [K in keyof SCHEMA as SCHEMA[K] extends {
261
+ generated: string;
262
+ } ? never : K]: any; }) => void;
263
+ close: () => Promise<number>;
264
+ };
265
+ close: () => void;
266
+ debug: {
267
+ readonly file: string;
268
+ readonly vfs: SQLiteVFS;
269
+ readonly pragmas: Record<string, string>;
270
+ readonly name: string;
271
+ readonly queue: {
272
+ write: number;
273
+ read: number;
274
+ };
275
+ workers: {
276
+ index: number;
277
+ name: string;
278
+ creationTime: number;
279
+ initializationTime?: number;
280
+ requests: {
281
+ startTime: number;
282
+ acquireTime?: number;
283
+ releaseTime?: number;
284
+ affectedRows: number;
285
+ queries: {
286
+ sql: string;
287
+ params?: any[];
288
+ startTime: number;
289
+ firstRowTime?: number;
290
+ endTime?: number;
291
+ error?: any;
292
+ affectedRows: number;
293
+ }[];
294
+ currentQuery?: {
295
+ sql: string;
296
+ params?: any[];
297
+ startTime: number;
298
+ firstRowTime?: number;
299
+ endTime?: number;
300
+ error?: any;
301
+ affectedRows: number;
302
+ };
303
+ }[];
304
+ currentRequest?: {
305
+ startTime: number;
306
+ acquireTime?: number;
307
+ releaseTime?: number;
308
+ affectedRows: number;
309
+ queries: {
310
+ sql: string;
311
+ params?: any[];
312
+ startTime: number;
313
+ firstRowTime?: number;
314
+ endTime?: number;
315
+ error?: any;
316
+ affectedRows: number;
317
+ }[];
318
+ currentQuery?: {
319
+ sql: string;
320
+ params?: any[];
321
+ startTime: number;
322
+ firstRowTime?: number;
323
+ endTime?: number;
324
+ error?: any;
325
+ affectedRows: number;
326
+ };
327
+ };
328
+ readonly status: string;
329
+ }[];
330
+ };
331
+ };
332
+ export {};
@@ -0,0 +1,52 @@
1
+ import type { CreateSQLiteClientOptions } from './client';
2
+ import { type WorkerOrchestrator } from './orchestrator';
3
+ import type { SQLiteVFS } from './types';
4
+ export declare const debugSQLQuery: (sql: string, params?: any[]) => string;
5
+ export declare const statusToLabel: (status: number) => string;
6
+ type QueryDebugState = {
7
+ sql: string;
8
+ params?: any[];
9
+ startTime: number;
10
+ firstRowTime?: number;
11
+ endTime?: number;
12
+ error?: any;
13
+ affectedRows: number;
14
+ };
15
+ type RequestDebugState = {
16
+ startTime: number;
17
+ acquireTime?: number;
18
+ releaseTime?: number;
19
+ affectedRows: number;
20
+ queries: QueryDebugState[];
21
+ currentQuery?: QueryDebugState;
22
+ };
23
+ type WorkerDebugState = {
24
+ index: number;
25
+ name: string;
26
+ creationTime: number;
27
+ initializationTime?: number;
28
+ requests: RequestDebugState[];
29
+ currentRequest?: RequestDebugState;
30
+ readonly status: string;
31
+ };
32
+ type ClientDebugState = {
33
+ readonly file: string;
34
+ readonly vfs: SQLiteVFS;
35
+ readonly pragmas: Record<string, string>;
36
+ readonly name: string;
37
+ readonly queue: {
38
+ write: number;
39
+ read: number;
40
+ };
41
+ workers: WorkerDebugState[];
42
+ };
43
+ export declare const createClientDebug: (file: string, orchestrator: WorkerOrchestrator, clientOptions: Required<Pick<CreateSQLiteClientOptions, "vfs" | "pragmas" | "name">>) => {
44
+ readonly state: ClientDebugState;
45
+ readonly createWorkerDebugState: (index: number, name: string) => WorkerDebugState;
46
+ readonly createRequestDebugState: () => {
47
+ state: RequestDebugState;
48
+ assign: (index: number) => void;
49
+ };
50
+ readonly createQueryDebugState: (workerIndex: number, sql: string, params?: any[]) => QueryDebugState;
51
+ };
52
+ export {};
@@ -0,0 +1 @@
1
+ export * from './client';
@@ -0,0 +1,87 @@
1
+ /**
2
+ * Worker orchestrator for SQLite worker pool synchronization.
3
+ *
4
+ * Responsibilities:
5
+ * - Establish an initialization lock to serialize worker database initialization
6
+ * - Track worker status transitions throughout their lifecycle
7
+ */
8
+ export declare const WorkerStatuses: {
9
+ readonly EMPTY: -3;
10
+ readonly NEW: -2;
11
+ readonly INITIALIZING: -1;
12
+ readonly INITIALIZED: 0;
13
+ readonly READY: 10;
14
+ readonly RESERVED: 49;
15
+ readonly RUNNING: 50;
16
+ readonly ABORTING: 99;
17
+ readonly DONE: 100;
18
+ };
19
+ export type WorkerStatus = (typeof WorkerStatuses)[keyof typeof WorkerStatuses];
20
+ /**
21
+ * Worker pool orchestrator using SharedArrayBuffer for cross-thread synchronization.
22
+ *
23
+ * Key features:
24
+ * - Serializes worker initialization via an atomic lock to prevent database conflicts
25
+ * - Tracks individual worker status using atomic operations for thread-safe state management
26
+ *
27
+ * @remarks
28
+ * **Worker lifecycle state machine (per slot):**
29
+ *
30
+ * ```
31
+ * EMPTY (-3) — slot allocated; Worker object not yet created by client.ts
32
+ * │
33
+ * NEW (-2) — Worker thread started; 'open' message not yet sent
34
+ * │
35
+ * INITIALIZING (-1) — 'open' message sent; worker calling orchestrator.lock()
36
+ * │ to serialize VFS + SQLite DB initialization across the pool
37
+ * INITIALIZED (0) — DB opened; orchestrator.unlock() called; about to post 'ready'
38
+ * │
39
+ * READY (10) — worker available for queries; pool is queryable from client.ts
40
+ * │
41
+ * RUNNING (50) — worker executing a query (set by worker.ts via Atomics)
42
+ * │
43
+ * ABORTING (99) — AbortSignal fired; worker.ts checks this flag in the step loop
44
+ * │ and exits early; transitions to DONE after the current step
45
+ * DONE (100) — query finished (normal or aborted); client calls releaseWorker()
46
+ * which sets status back to READY via setStatus()
47
+ * ```
48
+ *
49
+ * Note: `RESERVED (49)` is defined but intentionally unused in v1.
50
+ * The `INITIALIZED (0)` state is transient — the worker moves to `READY` atomically
51
+ * inside the `finally` block of `open()` in `worker.ts`.
52
+ */
53
+ export declare class WorkerOrchestrator {
54
+ readonly sharedArrayBuffer: SharedArrayBuffer;
55
+ private flags;
56
+ readonly size: number;
57
+ /**
58
+ * Creates a new orchestrator instance.
59
+ * @param init - Pool size (creates new SharedArrayBuffer) or existing SharedArrayBuffer
60
+ */
61
+ constructor(init: number | SharedArrayBuffer);
62
+ /**
63
+ * Acquire initialization lock.
64
+ * Workers call this during startup to serialize database initialization.
65
+ * Uses busy-wait with Atomics.wait() for blocking.
66
+ */
67
+ lock(): void;
68
+ /**
69
+ * Release initialization lock.
70
+ * Notifies one waiting worker that the lock is now available.
71
+ */
72
+ unlock(): void;
73
+ /**
74
+ * Update worker status atomically.
75
+ * @param index - Worker index in the pool
76
+ * @param status - New status to set
77
+ * @param from - Optional: expected current status for conditional update (CAS)
78
+ * @returns true if status was successfully updated
79
+ */
80
+ setStatus(index: number, status: WorkerStatus, from?: WorkerStatus): boolean;
81
+ /**
82
+ * Get current worker status.
83
+ * @param index - Worker index in the pool
84
+ * @returns Current worker status
85
+ */
86
+ getStatus(index: number): WorkerStatus;
87
+ }
@@ -0,0 +1,83 @@
1
+ export type SQLiteClientCallData = {
2
+ type: 'open';
3
+ file: string;
4
+ workerIndex: number;
5
+ url?: string;
6
+ flag: SharedArrayBuffer;
7
+ } | {
8
+ type: 'sql';
9
+ sql: string;
10
+ params?: any[];
11
+ options?: {
12
+ debug?: boolean;
13
+ chunkSize?: number;
14
+ };
15
+ } | {
16
+ type: 'abort';
17
+ };
18
+ export type SQLiteCLientCallParams<K extends SQLiteClientCallData['type']> = Omit<Extract<SQLiteClientCallData, {
19
+ type: K;
20
+ }>, 'type'>;
21
+ export type SQLiteWorkerMessageData<_T = unknown> = {
22
+ callId: number;
23
+ terminate?: boolean;
24
+ } & (SQLWorkerResultData[keyof SQLWorkerResultData] | {
25
+ type: 'error';
26
+ message: string;
27
+ });
28
+ export type SQLWorkerResultData<T = unknown> = {
29
+ open: {
30
+ success: boolean;
31
+ };
32
+ sql: {
33
+ type: 'partial';
34
+ result: T[];
35
+ } | {
36
+ type: 'one';
37
+ sizes: number[];
38
+ };
39
+ abort: {
40
+ type: 'done';
41
+ };
42
+ };
43
+ export declare const SharedArrayTypes: {
44
+ INT: number;
45
+ STRING: number;
46
+ OBJECT: number;
47
+ };
48
+ type SQLOptions = {
49
+ chunkSize?: number;
50
+ };
51
+ export type ClientMessageData = {
52
+ type: 'open';
53
+ file: string;
54
+ flags: SharedArrayBuffer;
55
+ index: number;
56
+ vfs?: SQLiteVFS;
57
+ pragmas?: Record<string, string>;
58
+ } | {
59
+ type: 'query';
60
+ callId: number;
61
+ sql: string;
62
+ params: any[];
63
+ options?: SQLOptions;
64
+ };
65
+ export type WorkerMessageData = {
66
+ type: 'ready';
67
+ callId: number;
68
+ } | {
69
+ type: 'chunk';
70
+ callId: number;
71
+ data: any[];
72
+ } | {
73
+ type: 'done';
74
+ callId: number;
75
+ affected: number;
76
+ } | {
77
+ type: 'error';
78
+ callId: number;
79
+ message: string;
80
+ cause?: unknown;
81
+ };
82
+ export type SQLiteVFS = 'OPFSPermutedVFS' | 'OPFSAdaptiveVFS' | 'OPFSCoopSyncVFS' | 'AccessHandlePoolVFS' | 'IDBBatchAtomicVFS';
83
+ export {};
@@ -0,0 +1,6 @@
1
+ export declare const sqlParams: () => {
2
+ addParam: (v: any) => string;
3
+ addParamArray: (values: any[]) => string;
4
+ params: any[];
5
+ };
6
+ export declare const isWriteQuery: (sql: string) => boolean;
@@ -0,0 +1 @@
1
+ export {};
package/package.json ADDED
@@ -0,0 +1,79 @@
1
+ {
2
+ "name": "browser-sqlite",
3
+ "version": "1.0.0-rc.3",
4
+ "description": "Browser SQLite with concurrent read / serial write isolation, backed by Web Workers and wa-sqlite",
5
+ "author": "LAlex",
6
+ "license": "MIT",
7
+ "repository": {
8
+ "type": "git",
9
+ "url": "git+https://github.com/lalexdotcom/browser-sqlite.git"
10
+ },
11
+ "bugs": {
12
+ "url": "https://github.com/lalexdotcom/browser-sqlite/issues"
13
+ },
14
+ "homepage": "https://github.com/lalexdotcom/browser-sqlite#readme",
15
+ "keywords": [
16
+ "sqlite",
17
+ "browser",
18
+ "web-worker",
19
+ "opfs",
20
+ "wa-sqlite",
21
+ "wasm",
22
+ "shared-array-buffer"
23
+ ],
24
+ "type": "module",
25
+ "exports": {
26
+ ".": {
27
+ "types": "./dist/esm/index.d.ts",
28
+ "import": "./dist/esm/index.js",
29
+ "default": "./dist/esm/index.js"
30
+ }
31
+ },
32
+ "types": "./dist/esm/index.d.ts",
33
+ "files": [
34
+ "dist"
35
+ ],
36
+ "scripts": {
37
+ "build": "rslib build",
38
+ "check": "biome check --write",
39
+ "lint": "biome lint",
40
+ "dev": "rslib build --watch",
41
+ "format": "biome format --write",
42
+ "prepare": "simple-git-hooks",
43
+ "test": "rstest",
44
+ "test:watch": "rstest --watch",
45
+ "test:unit": "rstest --project unit",
46
+ "test:browser": "rstest --project browser"
47
+ },
48
+ "devDependencies": {
49
+ "@biomejs/biome": "2.4.6",
50
+ "@rslib/core": "^0.20.0",
51
+ "@rstest/adapter-rslib": "^0.2.1",
52
+ "@rstest/browser": "0.9.4",
53
+ "@rstest/core": "^0.9.0",
54
+ "@types/node": "^24.12.0",
55
+ "lint-staged": "^16.4.0",
56
+ "playwright": "1.58.2",
57
+ "simple-git-hooks": "^2.13.1",
58
+ "typescript": "^5.9.3"
59
+ },
60
+ "lint-staged": {
61
+ "*.{js,ts,jsx,tsx,mjs,cjs}": [
62
+ "biome check --write --no-errors-on-unmatched"
63
+ ]
64
+ },
65
+ "simple-git-hooks": {
66
+ "pre-commit": "npx lint-staged && pnpm test && pnpm exec tsc --noEmit"
67
+ },
68
+ "pnpm": {
69
+ "onlyBuiltDependencies": [
70
+ "simple-git-hooks"
71
+ ]
72
+ },
73
+ "private": false,
74
+ "dependencies": {
75
+ "@lalex/promises": "^1.2.0",
76
+ "wa-sqlite": "github:rhashimoto/wa-sqlite#v1.0.9"
77
+ },
78
+ "packageManager": "pnpm@10.31.0"
79
+ }