@livestore/sqlite-wasm 0.4.0-dev.0 → 0.4.0-dev.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/browser/mod.d.ts +1 -0
- package/dist/browser/mod.d.ts.map +1 -1
- package/dist/browser/mod.js.map +1 -1
- package/dist/browser/opfs/AccessHandlePoolVFS.d.ts +17 -0
- package/dist/browser/opfs/AccessHandlePoolVFS.d.ts.map +1 -1
- package/dist/browser/opfs/AccessHandlePoolVFS.js +72 -1
- package/dist/browser/opfs/AccessHandlePoolVFS.js.map +1 -1
- package/dist/cf/BlockManager.d.ts +61 -0
- package/dist/cf/BlockManager.d.ts.map +1 -0
- package/dist/cf/BlockManager.js +157 -0
- package/dist/cf/BlockManager.js.map +1 -0
- package/dist/cf/CloudflareSqlVFS.d.ts +51 -0
- package/dist/cf/CloudflareSqlVFS.d.ts.map +1 -0
- package/dist/cf/CloudflareSqlVFS.js +351 -0
- package/dist/cf/CloudflareSqlVFS.js.map +1 -0
- package/dist/cf/CloudflareWorkerVFS.d.ts +72 -0
- package/dist/cf/CloudflareWorkerVFS.d.ts.map +1 -0
- package/dist/cf/CloudflareWorkerVFS.js +552 -0
- package/dist/cf/CloudflareWorkerVFS.js.map +1 -0
- package/dist/cf/mod.d.ts +43 -0
- package/dist/cf/mod.d.ts.map +1 -0
- package/dist/cf/mod.js +74 -0
- package/dist/cf/mod.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js +314 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js +266 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js +462 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js +334 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js.map +1 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts +2 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts.map +1 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js +354 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js.map +1 -0
- package/dist/load-wasm/mod.node.d.ts.map +1 -1
- package/dist/load-wasm/mod.node.js +1 -2
- package/dist/load-wasm/mod.node.js.map +1 -1
- package/dist/load-wasm/mod.workerd.d.ts +2 -0
- package/dist/load-wasm/mod.workerd.d.ts.map +1 -0
- package/dist/load-wasm/mod.workerd.js +26 -0
- package/dist/load-wasm/mod.workerd.js.map +1 -0
- package/dist/make-sqlite-db.d.ts +1 -0
- package/dist/make-sqlite-db.d.ts.map +1 -1
- package/dist/make-sqlite-db.js +28 -4
- package/dist/make-sqlite-db.js.map +1 -1
- package/dist/node/NodeFS.d.ts +1 -2
- package/dist/node/NodeFS.d.ts.map +1 -1
- package/dist/node/NodeFS.js +1 -6
- package/dist/node/NodeFS.js.map +1 -1
- package/dist/node/mod.js +3 -8
- package/dist/node/mod.js.map +1 -1
- package/package.json +21 -8
- package/src/browser/mod.ts +1 -0
- package/src/browser/opfs/AccessHandlePoolVFS.ts +79 -1
- package/src/cf/BlockManager.ts +225 -0
- package/src/cf/CloudflareSqlVFS.ts +450 -0
- package/src/cf/CloudflareWorkerVFS.ts +664 -0
- package/src/cf/README.md +60 -0
- package/src/cf/mod.ts +143 -0
- package/src/cf/test/README.md +224 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.ts +389 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-core.test.ts +322 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-integration.test.ts +585 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.ts +403 -0
- package/src/cf/test/sql/cloudflare-sql-vfs-core.test.ts +433 -0
- package/src/load-wasm/mod.node.ts +1 -2
- package/src/load-wasm/mod.workerd.ts +26 -0
- package/src/make-sqlite-db.ts +38 -4
- package/src/node/NodeFS.ts +1 -9
- package/src/node/mod.ts +3 -10
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
/// <reference types="vitest/globals" />
|
|
2
|
+
|
|
3
|
+
import type { CfTypes } from '@livestore/common-cf'
|
|
4
|
+
import * as VFS from '@livestore/wa-sqlite/src/VFS.js'
|
|
5
|
+
import { beforeEach, describe, expect, it } from 'vitest'
|
|
6
|
+
import { CloudflareSqlVFS } from '../../mod.ts'
|
|
7
|
+
|
|
8
|
+
describe('CloudflareSqlVFS - Core Functionality', () => {
|
|
9
|
+
let vfs: CloudflareSqlVFS
|
|
10
|
+
let mockSql: CfTypes.SqlStorage
|
|
11
|
+
let mockDatabase: Map<string, any[]>
|
|
12
|
+
let queryLog: string[]
|
|
13
|
+
|
|
14
|
+
beforeEach(async () => {
|
|
15
|
+
mockDatabase = new Map()
|
|
16
|
+
queryLog = []
|
|
17
|
+
|
|
18
|
+
// Mock SQL storage that mimics the Cloudflare DurableObject SQL API
|
|
19
|
+
mockSql = {
|
|
20
|
+
exec: <T extends Record<string, CfTypes.SqlStorageValue>>(
|
|
21
|
+
query: string,
|
|
22
|
+
...bindings: any[]
|
|
23
|
+
): CfTypes.SqlStorageCursor<T> => {
|
|
24
|
+
queryLog.push(`${query} [${bindings.join(', ')}]`)
|
|
25
|
+
|
|
26
|
+
// Simple SQL parser for testing - handles basic CREATE, INSERT, SELECT, UPDATE, DELETE
|
|
27
|
+
const normalizedQuery = query.trim().toUpperCase()
|
|
28
|
+
|
|
29
|
+
if (
|
|
30
|
+
normalizedQuery.includes('CREATE TABLE') ||
|
|
31
|
+
normalizedQuery.includes('CREATE INDEX') ||
|
|
32
|
+
normalizedQuery.includes('CREATE TRIGGER')
|
|
33
|
+
) {
|
|
34
|
+
// Handle schema creation
|
|
35
|
+
return createMockCursor([] as any)
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
if (normalizedQuery.startsWith('INSERT OR REPLACE INTO VFS_BLOCKS')) {
|
|
39
|
+
const [filePath, blockId, blockData] = bindings
|
|
40
|
+
const key = `blocks:${filePath}`
|
|
41
|
+
if (!mockDatabase.has(key)) {
|
|
42
|
+
mockDatabase.set(key, [])
|
|
43
|
+
}
|
|
44
|
+
const blocks = mockDatabase.get(key)!
|
|
45
|
+
const existingIndex = blocks.findIndex((b: any) => b.block_id === blockId)
|
|
46
|
+
const blockEntry = { file_path: filePath, block_id: blockId, block_data: blockData }
|
|
47
|
+
|
|
48
|
+
if (existingIndex >= 0) {
|
|
49
|
+
blocks[existingIndex] = blockEntry
|
|
50
|
+
} else {
|
|
51
|
+
blocks.push(blockEntry)
|
|
52
|
+
}
|
|
53
|
+
return createMockCursor([] as any)
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (normalizedQuery.startsWith('INSERT INTO VFS_FILES')) {
|
|
57
|
+
const [filePath, fileSize, flags, createdAt, modifiedAt] = bindings
|
|
58
|
+
mockDatabase.set(`file:${filePath}`, {
|
|
59
|
+
file_path: filePath as string,
|
|
60
|
+
file_size: fileSize as number,
|
|
61
|
+
flags: flags as number,
|
|
62
|
+
created_at: createdAt as number,
|
|
63
|
+
modified_at: modifiedAt as number,
|
|
64
|
+
} as any)
|
|
65
|
+
return createMockCursor([] as any)
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (normalizedQuery.startsWith('SELECT') && normalizedQuery.includes('FROM VFS_FILES')) {
|
|
69
|
+
if (normalizedQuery.includes('WHERE FILE_PATH = ?')) {
|
|
70
|
+
const [filePath] = bindings
|
|
71
|
+
const fileData = mockDatabase.get(`file:${filePath}`)
|
|
72
|
+
return createMockCursor(fileData ? [fileData] : ([] as any))
|
|
73
|
+
}
|
|
74
|
+
return createMockCursor([] as any)
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
if (normalizedQuery.startsWith('SELECT') && normalizedQuery.includes('FROM VFS_BLOCKS')) {
|
|
78
|
+
if (normalizedQuery.includes('WHERE FILE_PATH = ?')) {
|
|
79
|
+
const filePath = bindings[0]
|
|
80
|
+
const blocks = mockDatabase.get(`blocks:${filePath}`) || []
|
|
81
|
+
|
|
82
|
+
if (normalizedQuery.includes('AND BLOCK_ID IN')) {
|
|
83
|
+
const requestedBlockIds = bindings.slice(1)
|
|
84
|
+
const matchingBlocks = blocks.filter((b: any) => requestedBlockIds.includes(b.block_id))
|
|
85
|
+
return createMockCursor(matchingBlocks as any)
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return createMockCursor(blocks as any)
|
|
89
|
+
}
|
|
90
|
+
return createMockCursor([] as any)
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (normalizedQuery.startsWith('UPDATE VFS_FILES')) {
|
|
94
|
+
if (normalizedQuery.includes('SET FILE_SIZE = ?')) {
|
|
95
|
+
const [newSize, filePath] = bindings
|
|
96
|
+
const fileData = mockDatabase.get(`file:${filePath}`) as any
|
|
97
|
+
if (fileData) {
|
|
98
|
+
fileData.file_size = newSize as number
|
|
99
|
+
fileData.modified_at = Math.floor(Date.now() / 1000)
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
return createMockCursor([] as any)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (normalizedQuery.startsWith('DELETE FROM VFS_BLOCKS')) {
|
|
106
|
+
const [filePath, minBlockId] = bindings
|
|
107
|
+
const blocks = mockDatabase.get(`blocks:${filePath}`)
|
|
108
|
+
if (blocks) {
|
|
109
|
+
const filteredBlocks = blocks.filter((b: any) => b.block_id < minBlockId)
|
|
110
|
+
mockDatabase.set(`blocks:${filePath}`, filteredBlocks)
|
|
111
|
+
}
|
|
112
|
+
return createMockCursor([] as any)
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (normalizedQuery.startsWith('DELETE FROM VFS_FILES')) {
|
|
116
|
+
const [filePath] = bindings
|
|
117
|
+
mockDatabase.delete(`file:${filePath}`)
|
|
118
|
+
mockDatabase.delete(`blocks:${filePath}`)
|
|
119
|
+
return createMockCursor([] as any)
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Default empty result for unhandled queries
|
|
123
|
+
console.warn('Unhandled query:', query, bindings)
|
|
124
|
+
return createMockCursor([] as any)
|
|
125
|
+
},
|
|
126
|
+
|
|
127
|
+
get databaseSize(): number {
|
|
128
|
+
return 1024 * 1024 // Mock 1MB database
|
|
129
|
+
},
|
|
130
|
+
|
|
131
|
+
Cursor: {} as any,
|
|
132
|
+
Statement: {} as any,
|
|
133
|
+
} as CfTypes.SqlStorage
|
|
134
|
+
|
|
135
|
+
function createMockCursor<T extends Record<string, CfTypes.SqlStorageValue>>(
|
|
136
|
+
data: T[],
|
|
137
|
+
): CfTypes.SqlStorageCursor<T> {
|
|
138
|
+
let index = 0
|
|
139
|
+
|
|
140
|
+
return {
|
|
141
|
+
next: () => {
|
|
142
|
+
if (index < data.length) {
|
|
143
|
+
return { done: false, value: data[index++] }
|
|
144
|
+
}
|
|
145
|
+
return { done: true }
|
|
146
|
+
},
|
|
147
|
+
toArray: () => data,
|
|
148
|
+
one: () => {
|
|
149
|
+
if (data.length === 0) {
|
|
150
|
+
throw new Error('No results')
|
|
151
|
+
}
|
|
152
|
+
return data[0]
|
|
153
|
+
},
|
|
154
|
+
raw: function* () {
|
|
155
|
+
for (const item of data) {
|
|
156
|
+
yield Object.values(item) as CfTypes.SqlStorageValue[]
|
|
157
|
+
}
|
|
158
|
+
},
|
|
159
|
+
columnNames: Object.keys(data[0] || {}),
|
|
160
|
+
get rowsRead() {
|
|
161
|
+
return data.length
|
|
162
|
+
},
|
|
163
|
+
get rowsWritten() {
|
|
164
|
+
return 0
|
|
165
|
+
},
|
|
166
|
+
[Symbol.iterator]: function* () {
|
|
167
|
+
for (const item of data) {
|
|
168
|
+
yield item
|
|
169
|
+
}
|
|
170
|
+
},
|
|
171
|
+
} as CfTypes.SqlStorageCursor<T>
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
vfs = new CloudflareSqlVFS('test-sql-vfs', mockSql, {})
|
|
175
|
+
await vfs.isReady()
|
|
176
|
+
})
|
|
177
|
+
|
|
178
|
+
describe('Basic File Operations', () => {
|
|
179
|
+
it('should create and open files', async () => {
|
|
180
|
+
const path = '/test/basic.db'
|
|
181
|
+
const fileId = 1
|
|
182
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
183
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
184
|
+
|
|
185
|
+
const result = vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
186
|
+
expect(result).toBe(VFS.SQLITE_OK)
|
|
187
|
+
expect(pOutFlags.getUint32(0, true)).toBe(flags)
|
|
188
|
+
|
|
189
|
+
expect(vfs.jClose(fileId)).toBe(VFS.SQLITE_OK)
|
|
190
|
+
|
|
191
|
+
// Verify file was created in mock database
|
|
192
|
+
expect(mockDatabase.has(`file:${path}`)).toBe(true)
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
it('should handle file access checks', async () => {
|
|
196
|
+
const path = '/test/access.db'
|
|
197
|
+
const fileId = 1
|
|
198
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
199
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
200
|
+
|
|
201
|
+
// File doesn't exist initially
|
|
202
|
+
const pResOut = new DataView(new ArrayBuffer(4))
|
|
203
|
+
expect(vfs.jAccess(path, VFS.SQLITE_ACCESS_EXISTS, pResOut)).toBe(VFS.SQLITE_OK)
|
|
204
|
+
expect(pResOut.getUint32(0, true)).toBe(0)
|
|
205
|
+
|
|
206
|
+
// Create file
|
|
207
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
208
|
+
vfs.jClose(fileId)
|
|
209
|
+
|
|
210
|
+
// File should exist now
|
|
211
|
+
expect(vfs.jAccess(path, VFS.SQLITE_ACCESS_EXISTS, pResOut)).toBe(VFS.SQLITE_OK)
|
|
212
|
+
expect(pResOut.getUint32(0, true)).toBe(1)
|
|
213
|
+
})
|
|
214
|
+
|
|
215
|
+
it('should handle basic read/write operations', async () => {
|
|
216
|
+
const path = '/test/readwrite.db'
|
|
217
|
+
const fileId = 1
|
|
218
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
219
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
220
|
+
|
|
221
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
222
|
+
|
|
223
|
+
// Write data
|
|
224
|
+
const testData = new TextEncoder().encode('Hello, SQL VFS!')
|
|
225
|
+
expect(vfs.jWrite(fileId, testData, 0)).toBe(VFS.SQLITE_OK)
|
|
226
|
+
|
|
227
|
+
// Read data back
|
|
228
|
+
const readBuffer = new Uint8Array(testData.length)
|
|
229
|
+
expect(vfs.jRead(fileId, readBuffer, 0)).toBe(VFS.SQLITE_OK)
|
|
230
|
+
expect(readBuffer).toEqual(testData)
|
|
231
|
+
|
|
232
|
+
vfs.jClose(fileId)
|
|
233
|
+
})
|
|
234
|
+
|
|
235
|
+
it('should handle file size operations', async () => {
|
|
236
|
+
const path = '/test/size.db'
|
|
237
|
+
const fileId = 1
|
|
238
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
239
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
240
|
+
|
|
241
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
242
|
+
|
|
243
|
+
// Initial size should be 0
|
|
244
|
+
const pSize64 = new DataView(new ArrayBuffer(8))
|
|
245
|
+
expect(vfs.jFileSize(fileId, pSize64)).toBe(VFS.SQLITE_OK)
|
|
246
|
+
expect(pSize64.getBigInt64(0, true)).toBe(0n)
|
|
247
|
+
|
|
248
|
+
// Write data and check size
|
|
249
|
+
const testData = new Uint8Array(1000)
|
|
250
|
+
testData.fill(0xaa)
|
|
251
|
+
vfs.jWrite(fileId, testData, 0)
|
|
252
|
+
|
|
253
|
+
expect(vfs.jFileSize(fileId, pSize64)).toBe(VFS.SQLITE_OK)
|
|
254
|
+
expect(pSize64.getBigInt64(0, true)).toBe(1000n)
|
|
255
|
+
|
|
256
|
+
vfs.jClose(fileId)
|
|
257
|
+
})
|
|
258
|
+
|
|
259
|
+
it('should handle file truncation', async () => {
|
|
260
|
+
const path = '/test/truncate.db'
|
|
261
|
+
const fileId = 1
|
|
262
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
263
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
264
|
+
|
|
265
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
266
|
+
|
|
267
|
+
// Write data
|
|
268
|
+
const testData = new Uint8Array(2000)
|
|
269
|
+
testData.fill(0xbb)
|
|
270
|
+
vfs.jWrite(fileId, testData, 0)
|
|
271
|
+
|
|
272
|
+
// Truncate to smaller size
|
|
273
|
+
expect(vfs.jTruncate(fileId, 500)).toBe(VFS.SQLITE_OK)
|
|
274
|
+
|
|
275
|
+
// Verify size
|
|
276
|
+
const pSize64 = new DataView(new ArrayBuffer(8))
|
|
277
|
+
expect(vfs.jFileSize(fileId, pSize64)).toBe(VFS.SQLITE_OK)
|
|
278
|
+
expect(pSize64.getBigInt64(0, true)).toBe(500n)
|
|
279
|
+
|
|
280
|
+
vfs.jClose(fileId)
|
|
281
|
+
})
|
|
282
|
+
|
|
283
|
+
it('should handle sync operations', async () => {
|
|
284
|
+
const path = '/test/sync.db'
|
|
285
|
+
const fileId = 1
|
|
286
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
287
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
288
|
+
|
|
289
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
290
|
+
|
|
291
|
+
const testData = new TextEncoder().encode('Sync test data')
|
|
292
|
+
vfs.jWrite(fileId, testData, 0)
|
|
293
|
+
|
|
294
|
+
// Test different sync modes - should all be no-ops for SQL VFS
|
|
295
|
+
expect(vfs.jSync(fileId, VFS.SQLITE_SYNC_NORMAL)).toBe(VFS.SQLITE_OK)
|
|
296
|
+
expect(vfs.jSync(fileId, VFS.SQLITE_SYNC_FULL)).toBe(VFS.SQLITE_OK)
|
|
297
|
+
expect(vfs.jSync(fileId, VFS.SQLITE_SYNC_DATAONLY)).toBe(VFS.SQLITE_OK)
|
|
298
|
+
|
|
299
|
+
vfs.jClose(fileId)
|
|
300
|
+
})
|
|
301
|
+
|
|
302
|
+
it('should handle file deletion', async () => {
|
|
303
|
+
const path = '/test/delete.db'
|
|
304
|
+
const fileId = 1
|
|
305
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
306
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
307
|
+
|
|
308
|
+
// Create file
|
|
309
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
310
|
+
const testData = new TextEncoder().encode('Delete test')
|
|
311
|
+
vfs.jWrite(fileId, testData, 0)
|
|
312
|
+
vfs.jClose(fileId)
|
|
313
|
+
|
|
314
|
+
// Delete file
|
|
315
|
+
expect(vfs.jDelete(path, 0)).toBe(VFS.SQLITE_OK)
|
|
316
|
+
|
|
317
|
+
// Verify file is gone
|
|
318
|
+
expect(mockDatabase.has(`file:${path}`)).toBe(false)
|
|
319
|
+
expect(mockDatabase.has(`blocks:${path}`)).toBe(false)
|
|
320
|
+
})
|
|
321
|
+
})
|
|
322
|
+
|
|
323
|
+
describe('VFS Management', () => {
|
|
324
|
+
it('should provide correct VFS characteristics', () => {
|
|
325
|
+
expect(vfs.jSectorSize(1)).toBe(4096)
|
|
326
|
+
expect(vfs.jDeviceCharacteristics(1)).toBe(VFS.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN)
|
|
327
|
+
})
|
|
328
|
+
|
|
329
|
+
it('should handle multiple files', async () => {
|
|
330
|
+
const files = [
|
|
331
|
+
{ path: '/test/file1.db', id: 1 },
|
|
332
|
+
{ path: '/test/file2.db', id: 2 },
|
|
333
|
+
{ path: '/test/file3.db', id: 3 },
|
|
334
|
+
]
|
|
335
|
+
|
|
336
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
337
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
338
|
+
|
|
339
|
+
// Open all files
|
|
340
|
+
for (const file of files) {
|
|
341
|
+
expect(vfs.jOpen(file.path, file.id, flags, pOutFlags)).toBe(VFS.SQLITE_OK)
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// Write different data to each
|
|
345
|
+
for (let i = 0; i < files.length; i++) {
|
|
346
|
+
const data = new TextEncoder().encode(`File ${i + 1} data`)
|
|
347
|
+
expect(vfs.jWrite(files[i]?.id ?? 0, data, 0)).toBe(VFS.SQLITE_OK)
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Read back and verify
|
|
351
|
+
for (let i = 0; i < files.length; i++) {
|
|
352
|
+
const expected = new TextEncoder().encode(`File ${i + 1} data`)
|
|
353
|
+
const actual = new Uint8Array(expected.length)
|
|
354
|
+
expect(vfs.jRead(files[i]?.id ?? 0, actual, 0)).toBe(VFS.SQLITE_OK)
|
|
355
|
+
expect(actual).toEqual(expected)
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
// Close all files
|
|
359
|
+
for (const file of files) {
|
|
360
|
+
expect(vfs.jClose(file.id)).toBe(VFS.SQLITE_OK)
|
|
361
|
+
}
|
|
362
|
+
})
|
|
363
|
+
|
|
364
|
+
it('should provide VFS statistics', () => {
|
|
365
|
+
const stats = vfs.getStats()
|
|
366
|
+
expect(stats).toHaveProperty('activeFiles')
|
|
367
|
+
expect(stats).toHaveProperty('openFiles')
|
|
368
|
+
expect(stats).toHaveProperty('maxFiles')
|
|
369
|
+
expect(stats).toHaveProperty('blockSize')
|
|
370
|
+
expect(stats).toHaveProperty('totalStoredBytes')
|
|
371
|
+
expect(stats.blockSize).toBe(64 * 1024)
|
|
372
|
+
})
|
|
373
|
+
})
|
|
374
|
+
|
|
375
|
+
describe('Error Handling', () => {
|
|
376
|
+
it('should handle invalid file IDs', () => {
|
|
377
|
+
const invalidFileId = 999
|
|
378
|
+
const buffer = new Uint8Array(100)
|
|
379
|
+
|
|
380
|
+
expect(vfs.jRead(invalidFileId, buffer, 0)).toBe(VFS.SQLITE_IOERR)
|
|
381
|
+
expect(vfs.jWrite(invalidFileId, buffer, 0)).toBe(VFS.SQLITE_IOERR)
|
|
382
|
+
expect(vfs.jTruncate(invalidFileId, 50)).toBe(VFS.SQLITE_IOERR)
|
|
383
|
+
expect(vfs.jSync(invalidFileId, 0)).toBe(VFS.SQLITE_IOERR)
|
|
384
|
+
expect(vfs.jClose(invalidFileId)).toBe(VFS.SQLITE_OK)
|
|
385
|
+
})
|
|
386
|
+
|
|
387
|
+
it('should handle invalid paths', () => {
|
|
388
|
+
const invalidPath = ''
|
|
389
|
+
const fileId = 1
|
|
390
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
391
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
392
|
+
|
|
393
|
+
expect(vfs.jOpen(invalidPath, fileId, flags, pOutFlags)).toBe(VFS.SQLITE_OK)
|
|
394
|
+
})
|
|
395
|
+
|
|
396
|
+
it('should handle file operations on closed files', () => {
|
|
397
|
+
const path = '/test/closed.db'
|
|
398
|
+
const fileId = 1
|
|
399
|
+
const flags = VFS.SQLITE_OPEN_CREATE | VFS.SQLITE_OPEN_READWRITE
|
|
400
|
+
const pOutFlags = new DataView(new ArrayBuffer(4))
|
|
401
|
+
|
|
402
|
+
// Open and close file
|
|
403
|
+
vfs.jOpen(path, fileId, flags, pOutFlags)
|
|
404
|
+
vfs.jClose(fileId)
|
|
405
|
+
|
|
406
|
+
// Try to operate on closed file
|
|
407
|
+
const buffer = new Uint8Array(10)
|
|
408
|
+
expect(vfs.jRead(fileId, buffer, 0)).toBe(VFS.SQLITE_IOERR)
|
|
409
|
+
expect(vfs.jWrite(fileId, buffer, 0)).toBe(VFS.SQLITE_IOERR)
|
|
410
|
+
})
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
describe('Constants and Compatibility', () => {
|
|
414
|
+
it('should define correct VFS constants', () => {
|
|
415
|
+
expect(VFS.SQLITE_OK).toBe(0)
|
|
416
|
+
expect(VFS.SQLITE_IOERR).toBe(10)
|
|
417
|
+
expect(VFS.SQLITE_CANTOPEN).toBe(14)
|
|
418
|
+
expect(VFS.SQLITE_READONLY).toBe(8)
|
|
419
|
+
expect(VFS.SQLITE_IOERR_SHORT_READ).toBe(522)
|
|
420
|
+
expect(VFS.SQLITE_IOERR_WRITE).toBe(778)
|
|
421
|
+
expect(VFS.SQLITE_IOERR_TRUNCATE).toBe(1546)
|
|
422
|
+
})
|
|
423
|
+
|
|
424
|
+
it('should handle VFS flags correctly', () => {
|
|
425
|
+
expect(VFS.SQLITE_OPEN_CREATE).toBeTruthy()
|
|
426
|
+
expect(VFS.SQLITE_OPEN_READWRITE).toBeTruthy()
|
|
427
|
+
expect(VFS.SQLITE_OPEN_READONLY).toBeTruthy()
|
|
428
|
+
expect(VFS.SQLITE_OPEN_MAIN_DB).toBeTruthy()
|
|
429
|
+
expect(VFS.SQLITE_OPEN_WAL).toBeTruthy()
|
|
430
|
+
expect(VFS.SQLITE_OPEN_MAIN_JOURNAL).toBeTruthy()
|
|
431
|
+
})
|
|
432
|
+
})
|
|
433
|
+
})
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import * as WaSqlite from '@livestore/wa-sqlite'
|
|
2
|
-
// @ts-expect-error TODO fix types in wa-sqlite
|
|
3
1
|
import WaSqliteFactory from '@livestore/wa-sqlite/dist/wa-sqlite.node.mjs'
|
|
2
|
+
import * as WaSqlite from '@livestore/wa-sqlite/src/sqlite-api.js'
|
|
4
3
|
|
|
5
4
|
export const loadSqlite3Wasm = async () => {
|
|
6
5
|
const module = await WaSqliteFactory()
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import * as WaSqlite from '@livestore/wa-sqlite'
|
|
2
|
+
import WaSqliteFactory from '@livestore/wa-sqlite/dist/wa-sqlite.mjs'
|
|
3
|
+
|
|
4
|
+
// @ts-expect-error TODO fix types in wa-sqlite
|
|
5
|
+
import wasm from '@livestore/wa-sqlite/dist/wa-sqlite.wasm'
|
|
6
|
+
|
|
7
|
+
export const loadSqlite3Wasm = async () => {
|
|
8
|
+
// It seems we need to pass in the wasm binary directly for workerd to work
|
|
9
|
+
const module = await WaSqliteFactory({
|
|
10
|
+
instantiateWasm: (info: any, receiveInstance: any) => {
|
|
11
|
+
try {
|
|
12
|
+
// Use the pre-compiled module directly
|
|
13
|
+
const instance = new WebAssembly.Instance(wasm, info)
|
|
14
|
+
receiveInstance(instance, wasm)
|
|
15
|
+
return instance.exports
|
|
16
|
+
} catch (error) {
|
|
17
|
+
console.error('Failed to instantiate WASM:', error)
|
|
18
|
+
throw error
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
})
|
|
22
|
+
const sqlite3 = WaSqlite.Factory(module)
|
|
23
|
+
// @ts-expect-error TODO fix types
|
|
24
|
+
sqlite3.module = module
|
|
25
|
+
return sqlite3
|
|
26
|
+
}
|
package/src/make-sqlite-db.ts
CHANGED
|
@@ -7,6 +7,7 @@ import type {
|
|
|
7
7
|
} from '@livestore/common'
|
|
8
8
|
import { SqliteDbHelper, SqliteError } from '@livestore/common'
|
|
9
9
|
import { EventSequenceNumber } from '@livestore/common/schema'
|
|
10
|
+
import type { SQLiteAPI } from '@livestore/wa-sqlite'
|
|
10
11
|
import * as SqliteConstants from '@livestore/wa-sqlite/src/sqlite-constants.js'
|
|
11
12
|
import { makeInMemoryDb } from './in-memory-vfs.ts'
|
|
12
13
|
|
|
@@ -175,15 +176,48 @@ export const makeSqliteDb = <
|
|
|
175
176
|
// if (readOnly === true) {
|
|
176
177
|
// sqlite3.deserialize(db, 'main', bytes, bytes.length, bytes.length, FREE_ON_CLOSE | RESIZEABLE)
|
|
177
178
|
// } else {
|
|
179
|
+
const ensureSuccess = (rc: number, operation: string) => {
|
|
180
|
+
if (rc !== SqliteConstants.SQLITE_OK) {
|
|
181
|
+
throw new SqliteError({
|
|
182
|
+
code: rc,
|
|
183
|
+
cause: new Error(`${operation} failed with rc=${rc}`),
|
|
184
|
+
note: 'Snapshot import failed during SQLite copy',
|
|
185
|
+
})
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
178
189
|
if (source instanceof Uint8Array) {
|
|
190
|
+
const WAL_FILE_FORMAT = 2
|
|
191
|
+
if (source.length >= 24 && (source[18] === WAL_FILE_FORMAT || source[19] === WAL_FILE_FORMAT)) {
|
|
192
|
+
throw new SqliteError({
|
|
193
|
+
code: SqliteConstants.SQLITE_CANTOPEN,
|
|
194
|
+
cause: new Error('WAL snapshots are not supported'),
|
|
195
|
+
note: 'Import expects rollback-journal snapshots (journal_mode=DELETE). Please convert snapshot before importing.',
|
|
196
|
+
})
|
|
197
|
+
}
|
|
198
|
+
|
|
179
199
|
const tmpDb = makeInMemoryDb(sqlite3)
|
|
180
200
|
// TODO find a way to do this more efficiently with sqlite to avoid either of the deserialize + backup call
|
|
181
201
|
// Maybe this can be done via the VFS API
|
|
182
|
-
sqlite3.deserialize(
|
|
183
|
-
|
|
184
|
-
|
|
202
|
+
const rcDeserialize = sqlite3.deserialize(
|
|
203
|
+
tmpDb.dbPointer,
|
|
204
|
+
'main',
|
|
205
|
+
source,
|
|
206
|
+
source.length,
|
|
207
|
+
source.length,
|
|
208
|
+
FREE_ON_CLOSE | RESIZEABLE,
|
|
209
|
+
)
|
|
210
|
+
ensureSuccess(rcDeserialize, 'sqlite3.deserialize')
|
|
211
|
+
|
|
212
|
+
try {
|
|
213
|
+
const rcBackup = sqlite3.backup(dbPointer, 'main', tmpDb.dbPointer, 'main')
|
|
214
|
+
ensureSuccess(rcBackup, 'sqlite3.backup')
|
|
215
|
+
} finally {
|
|
216
|
+
sqlite3.close(tmpDb.dbPointer)
|
|
217
|
+
}
|
|
185
218
|
} else {
|
|
186
|
-
sqlite3.backup(dbPointer, 'main', source.metadata.dbPointer, 'main')
|
|
219
|
+
const rcBackup = sqlite3.backup(dbPointer, 'main', source.metadata.dbPointer, 'main')
|
|
220
|
+
ensureSuccess(rcBackup, 'sqlite3.backup')
|
|
187
221
|
}
|
|
188
222
|
|
|
189
223
|
metadata.configureDb(sqliteDb)
|
package/src/node/NodeFS.ts
CHANGED
|
@@ -19,12 +19,10 @@ export class NodeFS extends FacadeVFS {
|
|
|
19
19
|
// biome-ignore lint/correctness/noUnusedPrivateClassMembers: for debugging
|
|
20
20
|
private lastError: Error | null = null
|
|
21
21
|
private readonly directory: string
|
|
22
|
-
|
|
23
|
-
constructor(name: string, sqlite3: WaSqlite.SQLiteAPI, directory: string, onShutdown: () => void) {
|
|
22
|
+
constructor(name: string, sqlite3: WaSqlite.SQLiteAPI, directory: string) {
|
|
24
23
|
super(name, sqlite3)
|
|
25
24
|
|
|
26
25
|
this.directory = directory
|
|
27
|
-
this.onShutdown = onShutdown
|
|
28
26
|
}
|
|
29
27
|
|
|
30
28
|
getFilename(fileId: number): string {
|
|
@@ -104,8 +102,6 @@ export class NodeFS extends FacadeVFS {
|
|
|
104
102
|
|
|
105
103
|
jClose(fileId: number): number {
|
|
106
104
|
try {
|
|
107
|
-
this.onShutdown()
|
|
108
|
-
|
|
109
105
|
const file = this.mapIdToFile.get(fileId)
|
|
110
106
|
if (!file) return VFS.SQLITE_OK
|
|
111
107
|
|
|
@@ -167,8 +163,6 @@ export class NodeFS extends FacadeVFS {
|
|
|
167
163
|
|
|
168
164
|
jDelete(zName: string, _syncDir: number): number {
|
|
169
165
|
try {
|
|
170
|
-
this.onShutdown()
|
|
171
|
-
|
|
172
166
|
const pathname = path.resolve(this.directory, zName)
|
|
173
167
|
fs.unlinkSync(pathname)
|
|
174
168
|
return VFS.SQLITE_OK
|
|
@@ -191,8 +185,6 @@ export class NodeFS extends FacadeVFS {
|
|
|
191
185
|
}
|
|
192
186
|
|
|
193
187
|
deleteDb(fileName: string) {
|
|
194
|
-
this.onShutdown()
|
|
195
|
-
|
|
196
188
|
fs.unlinkSync(path.join(this.directory, fileName))
|
|
197
189
|
}
|
|
198
190
|
}
|
package/src/node/mod.ts
CHANGED
|
@@ -96,8 +96,6 @@ export const sqliteDbFactory = ({
|
|
|
96
96
|
}),
|
|
97
97
|
)
|
|
98
98
|
|
|
99
|
-
const nodeFsVfsMap = new Map<string, NodeFS>()
|
|
100
|
-
|
|
101
99
|
const makeNodeFsDb = ({
|
|
102
100
|
sqlite3,
|
|
103
101
|
fileName,
|
|
@@ -113,14 +111,11 @@ const makeNodeFsDb = ({
|
|
|
113
111
|
// NOTE to keep the filePath short, we use the directory name in the vfs name
|
|
114
112
|
// If this is becoming a problem, we can use a hashed version of the directory name
|
|
115
113
|
const vfsName = `node-fs-${directory}`
|
|
116
|
-
if (
|
|
114
|
+
if (sqlite3.vfs_registered.has(vfsName) === false) {
|
|
117
115
|
// TODO refactor with Effect FileSystem instead of using `node:fs` directly inside of NodeFS
|
|
118
|
-
const nodeFsVfs = new NodeFS(vfsName, (sqlite3 as any).module, directory
|
|
119
|
-
nodeFsVfsMap.delete(vfsName)
|
|
120
|
-
})
|
|
116
|
+
const nodeFsVfs = new NodeFS(vfsName, (sqlite3 as any).module, directory)
|
|
121
117
|
// @ts-expect-error TODO fix types
|
|
122
118
|
sqlite3.vfs_register(nodeFsVfs, false)
|
|
123
|
-
nodeFsVfsMap.set(vfsName, nodeFsVfs)
|
|
124
119
|
}
|
|
125
120
|
|
|
126
121
|
yield* fs.makeDirectory(directory, { recursive: true })
|
|
@@ -133,7 +128,5 @@ const makeNodeFsDb = ({
|
|
|
133
128
|
// NOTE SQLite will return a "disk I/O error" if the file path is too long.
|
|
134
129
|
const dbPointer = sqlite3.open_v2Sync(fileName, undefined, vfsName)
|
|
135
130
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
return { dbPointer, vfs }
|
|
131
|
+
return { dbPointer, vfs: {} as UNUSED<'only needed in web adapter currently and should longer-term be removed'> }
|
|
139
132
|
}).pipe(UnexpectedError.mapToUnexpectedError)
|