@livestore/sqlite-wasm 0.4.0-dev.1 → 0.4.0-dev.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/browser/mod.d.ts +1 -0
  3. package/dist/browser/mod.d.ts.map +1 -1
  4. package/dist/browser/mod.js.map +1 -1
  5. package/dist/browser/opfs/AccessHandlePoolVFS.d.ts +17 -0
  6. package/dist/browser/opfs/AccessHandlePoolVFS.d.ts.map +1 -1
  7. package/dist/browser/opfs/AccessHandlePoolVFS.js +72 -1
  8. package/dist/browser/opfs/AccessHandlePoolVFS.js.map +1 -1
  9. package/dist/cf/BlockManager.d.ts +61 -0
  10. package/dist/cf/BlockManager.d.ts.map +1 -0
  11. package/dist/cf/BlockManager.js +157 -0
  12. package/dist/cf/BlockManager.js.map +1 -0
  13. package/dist/cf/CloudflareSqlVFS.d.ts +51 -0
  14. package/dist/cf/CloudflareSqlVFS.d.ts.map +1 -0
  15. package/dist/cf/CloudflareSqlVFS.js +351 -0
  16. package/dist/cf/CloudflareSqlVFS.js.map +1 -0
  17. package/dist/cf/CloudflareWorkerVFS.d.ts +72 -0
  18. package/dist/cf/CloudflareWorkerVFS.d.ts.map +1 -0
  19. package/dist/cf/CloudflareWorkerVFS.js +552 -0
  20. package/dist/cf/CloudflareWorkerVFS.js.map +1 -0
  21. package/dist/cf/mod.d.ts +43 -0
  22. package/dist/cf/mod.d.ts.map +1 -0
  23. package/dist/cf/mod.js +74 -0
  24. package/dist/cf/mod.js.map +1 -0
  25. package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts +2 -0
  26. package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts.map +1 -0
  27. package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js +314 -0
  28. package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js.map +1 -0
  29. package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts +2 -0
  30. package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts.map +1 -0
  31. package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js +266 -0
  32. package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js.map +1 -0
  33. package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts +2 -0
  34. package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts.map +1 -0
  35. package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js +462 -0
  36. package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js.map +1 -0
  37. package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts +2 -0
  38. package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts.map +1 -0
  39. package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js +334 -0
  40. package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js.map +1 -0
  41. package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts +2 -0
  42. package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts.map +1 -0
  43. package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js +354 -0
  44. package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js.map +1 -0
  45. package/dist/load-wasm/mod.node.d.ts.map +1 -1
  46. package/dist/load-wasm/mod.node.js +1 -2
  47. package/dist/load-wasm/mod.node.js.map +1 -1
  48. package/dist/load-wasm/mod.workerd.d.ts +2 -0
  49. package/dist/load-wasm/mod.workerd.d.ts.map +1 -0
  50. package/dist/load-wasm/mod.workerd.js +28 -0
  51. package/dist/load-wasm/mod.workerd.js.map +1 -0
  52. package/dist/make-sqlite-db.d.ts +1 -0
  53. package/dist/make-sqlite-db.d.ts.map +1 -1
  54. package/dist/make-sqlite-db.js +28 -4
  55. package/dist/make-sqlite-db.js.map +1 -1
  56. package/dist/node/NodeFS.d.ts +1 -2
  57. package/dist/node/NodeFS.d.ts.map +1 -1
  58. package/dist/node/NodeFS.js +1 -6
  59. package/dist/node/NodeFS.js.map +1 -1
  60. package/dist/node/mod.js +3 -8
  61. package/dist/node/mod.js.map +1 -1
  62. package/package.json +21 -8
  63. package/src/browser/mod.ts +1 -0
  64. package/src/browser/opfs/AccessHandlePoolVFS.ts +79 -1
  65. package/src/cf/BlockManager.ts +225 -0
  66. package/src/cf/CloudflareSqlVFS.ts +450 -0
  67. package/src/cf/CloudflareWorkerVFS.ts +664 -0
  68. package/src/cf/README.md +60 -0
  69. package/src/cf/mod.ts +143 -0
  70. package/src/cf/test/README.md +224 -0
  71. package/src/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.ts +389 -0
  72. package/src/cf/test/async-storage/cloudflare-worker-vfs-core.test.ts +322 -0
  73. package/src/cf/test/async-storage/cloudflare-worker-vfs-integration.test.ts +585 -0
  74. package/src/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.ts +403 -0
  75. package/src/cf/test/sql/cloudflare-sql-vfs-core.test.ts +433 -0
  76. package/src/load-wasm/mod.node.ts +1 -2
  77. package/src/load-wasm/mod.workerd.ts +28 -0
  78. package/src/make-sqlite-db.ts +38 -4
  79. package/src/node/NodeFS.ts +1 -9
  80. package/src/node/mod.ts +3 -10
@@ -0,0 +1,664 @@
1
+ import type { CfTypes } from '@livestore/common-cf'
2
+ import * as VFS from '@livestore/wa-sqlite/src/VFS.js'
3
+ import { FacadeVFS } from '../FacadeVFS.ts'
4
+
5
+ const SECTOR_SIZE = 4096
6
+
7
+ // Chunk size optimized for SQLite I/O patterns
8
+ // 64 KiB provides good balance between memory usage and I/O efficiency
9
+ // while staying well under DurableObjectStorage's 128 KiB limit
10
+ const CHUNK_SIZE = 64 * 1024 // 64 KiB
11
+
12
+ // Cache configuration for synchronous operations
13
+ const DEFAULT_CACHE_SIZE = 10 // Number of chunks to cache
14
+ const DEFAULT_MAX_FILES = 100 // Maximum number of files
15
+
16
+ // These file types are expected to persist in the file system outside
17
+ // a session. Other files will be removed on VFS start.
18
+ const PERSISTENT_FILE_TYPES =
19
+ VFS.SQLITE_OPEN_MAIN_DB | VFS.SQLITE_OPEN_MAIN_JOURNAL | VFS.SQLITE_OPEN_SUPER_JOURNAL | VFS.SQLITE_OPEN_WAL
20
+
21
+ interface FileMetadata {
22
+ size: number
23
+ flags: number
24
+ chunkCount: number
25
+ created: number
26
+ }
27
+
28
+ interface CacheEntry {
29
+ data: Uint8Array
30
+ lastAccessed: number
31
+ }
32
+
33
+ interface FileHandle {
34
+ path: string
35
+ flags: number
36
+ metadata: FileMetadata
37
+ }
38
+
39
+ /**
40
+ * VFS implementation using Cloudflare DurableObjectStorage as the backend.
41
+ * Uses chunked storage with in-memory caching for synchronous operations.
42
+ *
43
+ * Storage Strategy:
44
+ * - Files are split into 64 KiB chunks for optimal SQLite I/O patterns
45
+ * - Metadata cached in memory for synchronous access
46
+ * - LRU cache for frequently accessed chunks
47
+ *
48
+ * Key Schema:
49
+ * - file:${path}:meta - File metadata (size, flags, chunkCount, created)
50
+ * - file:${path}:${chunkIndex} - File data chunks (64 KiB max)
51
+ * - index:files - Set of active file paths
52
+ */
53
+ export class CloudflareWorkerVFS extends FacadeVFS {
54
+ log = null
55
+
56
+ #storage: CfTypes.DurableObjectStorage
57
+ #initialized = false
58
+
59
+ // In-memory caches for synchronous operations
60
+ #metadataCache = new Map<string, FileMetadata>()
61
+ #chunkCache = new Map<string, CacheEntry>()
62
+ #activeFiles = new Set<string>()
63
+ #openFiles = new Map<number, FileHandle>()
64
+
65
+ // Configuration
66
+ #maxCacheSize: number
67
+ #maxFiles: number
68
+
69
+ static async create(name: string, storage: CfTypes.DurableObjectStorage, module: any) {
70
+ const vfs = new CloudflareWorkerVFS(name, storage, module)
71
+ await vfs.isReady()
72
+ return vfs
73
+ }
74
+
75
+ constructor(name: string, storage: CfTypes.DurableObjectStorage, module: any) {
76
+ super(name, module)
77
+ this.#storage = storage
78
+ this.#maxCacheSize = DEFAULT_CACHE_SIZE
79
+ this.#maxFiles = DEFAULT_MAX_FILES
80
+ }
81
+
82
+ // Storage key generation helpers
83
+ #getMetadataKey(path: string): string {
84
+ return `file:${path}:meta`
85
+ }
86
+
87
+ #getChunkKey(path: string, chunkIndex: number): string {
88
+ return `file:${path}:${chunkIndex}`
89
+ }
90
+
91
+ #getCacheKey(path: string, chunkIndex: number): string {
92
+ return `${path}:${chunkIndex}`
93
+ }
94
+
95
+ // Cache management
96
+ #evictLRUChunk() {
97
+ if (this.#chunkCache.size < this.#maxCacheSize) return
98
+
99
+ let oldestKey = ''
100
+ let oldestTime = Date.now()
101
+
102
+ for (const [key, entry] of this.#chunkCache) {
103
+ if (entry.lastAccessed < oldestTime) {
104
+ oldestTime = entry.lastAccessed
105
+ oldestKey = key
106
+ }
107
+ }
108
+
109
+ if (oldestKey) {
110
+ this.#chunkCache.delete(oldestKey)
111
+ }
112
+ }
113
+
114
+ #getCachedChunk(path: string, chunkIndex: number): Uint8Array | undefined {
115
+ const key = this.#getCacheKey(path, chunkIndex)
116
+ const entry = this.#chunkCache.get(key)
117
+ if (entry) {
118
+ entry.lastAccessed = Date.now()
119
+ return entry.data
120
+ }
121
+ return undefined
122
+ }
123
+
124
+ #setCachedChunk(path: string, chunkIndex: number, data: Uint8Array) {
125
+ this.#evictLRUChunk()
126
+ const key = this.#getCacheKey(path, chunkIndex)
127
+ this.#chunkCache.set(key, {
128
+ data: data.slice(), // Copy the data
129
+ lastAccessed: Date.now(),
130
+ })
131
+ }
132
+
133
+ // Critical: Handle synchronous operations with async backend
134
+ // Strategy: Use aggressive caching + background sync for durability
135
+ // All reads must be served from cache, writes are cached immediately
136
+ // and synced to storage asynchronously
137
+
138
+ #pendingWrites = new Set<string>()
139
+ #writePromises = new Map<string, Promise<unknown>>()
140
+
141
+ #scheduleWrite(path: string, operation: () => Promise<unknown>): void {
142
+ const key = `write:${path}`
143
+
144
+ // Cancel any pending write for this path
145
+ if (this.#writePromises.has(key)) {
146
+ this.#pendingWrites.delete(key)
147
+ }
148
+
149
+ // Schedule new write
150
+ this.#pendingWrites.add(key)
151
+ const promise = operation().finally(() => {
152
+ this.#pendingWrites.delete(key)
153
+ this.#writePromises.delete(key)
154
+ })
155
+
156
+ this.#writePromises.set(key, promise)
157
+ }
158
+
159
+ async #flushPendingWrites(): Promise<void> {
160
+ const promises = Array.from(this.#writePromises.values())
161
+ await Promise.all(promises)
162
+ }
163
+
164
+ async #loadMetadata(path: string): Promise<FileMetadata | undefined> {
165
+ const cached = this.#metadataCache.get(path)
166
+ if (cached) return cached
167
+
168
+ const metadata = await this.#storage.get<FileMetadata>(this.#getMetadataKey(path))
169
+ if (metadata) {
170
+ this.#metadataCache.set(path, metadata)
171
+ }
172
+ return metadata
173
+ }
174
+
175
+ async #saveMetadata(path: string, metadata: FileMetadata): Promise<void> {
176
+ // TODO: Consider allowUnconfirmed: true for better performance
177
+ // Currently using strict consistency as requested
178
+ // Future optimization: explore allowUnconfirmed for non-critical writes
179
+ await this.#storage.put(this.#getMetadataKey(path), metadata)
180
+ this.#metadataCache.set(path, metadata)
181
+ }
182
+
183
+ async #loadChunk(path: string, chunkIndex: number): Promise<Uint8Array | undefined> {
184
+ const cached = this.#getCachedChunk(path, chunkIndex)
185
+ if (cached) return cached
186
+
187
+ const chunk = await this.#storage.get<Uint8Array>(this.#getChunkKey(path, chunkIndex))
188
+ if (chunk) {
189
+ this.#setCachedChunk(path, chunkIndex, chunk)
190
+ }
191
+ return chunk
192
+ }
193
+
194
+ async #saveChunk(path: string, chunkIndex: number, data: Uint8Array): Promise<void> {
195
+ await this.#storage.put(this.#getChunkKey(path, chunkIndex), data)
196
+ this.#setCachedChunk(path, chunkIndex, data)
197
+ }
198
+
199
+ async #deleteFile(path: string): Promise<void> {
200
+ const metadata = await this.#loadMetadata(path)
201
+ if (!metadata) return
202
+
203
+ // Delete all chunks and metadata atomically
204
+ const keysToDelete = [this.#getMetadataKey(path)]
205
+ for (let i = 0; i < metadata.chunkCount; i++) {
206
+ keysToDelete.push(this.#getChunkKey(path, i))
207
+ }
208
+
209
+ await this.#storage.delete(keysToDelete)
210
+
211
+ // Clean up caches
212
+ this.#metadataCache.delete(path)
213
+ for (let i = 0; i < metadata.chunkCount; i++) {
214
+ this.#chunkCache.delete(this.#getCacheKey(path, i))
215
+ }
216
+
217
+ this.#activeFiles.delete(path)
218
+
219
+ // Update the file index
220
+ await this.#updateFileIndex()
221
+ }
222
+
223
+ jOpen(zName: string, fileId: number, flags: number, pOutFlags: DataView): number {
224
+ try {
225
+ const path = zName ? this.#getPath(zName) : Math.random().toString(36)
226
+ const metadata = this.#metadataCache.get(path)
227
+
228
+ if (!metadata && flags & VFS.SQLITE_OPEN_CREATE) {
229
+ // Create new file
230
+ if (this.#activeFiles.size >= this.#maxFiles) {
231
+ throw new Error('cannot create file: capacity exceeded')
232
+ }
233
+
234
+ const newMetadata: FileMetadata = {
235
+ size: 0,
236
+ flags,
237
+ chunkCount: 0,
238
+ created: Date.now(),
239
+ }
240
+
241
+ // Cache the metadata immediately for synchronous access
242
+ this.#metadataCache.set(path, newMetadata)
243
+ this.#activeFiles.add(path)
244
+
245
+ // Schedule async save to maintain durability
246
+ this.#scheduleWrite(path, () => this.#saveMetadata(path, newMetadata))
247
+ }
248
+
249
+ if (!this.#metadataCache.has(path)) {
250
+ throw new Error('file not found')
251
+ }
252
+
253
+ const handle: FileHandle = {
254
+ path,
255
+ flags,
256
+ metadata: this.#metadataCache.get(path)!,
257
+ }
258
+
259
+ this.#openFiles.set(fileId, handle)
260
+ pOutFlags.setInt32(0, flags, true)
261
+ return VFS.SQLITE_OK
262
+ } catch (e: any) {
263
+ console.error(e.message)
264
+ return VFS.SQLITE_CANTOPEN
265
+ }
266
+ }
267
+
268
+ jClose(fileId: number): number {
269
+ const handle = this.#openFiles.get(fileId)
270
+ if (handle) {
271
+ this.#openFiles.delete(fileId)
272
+ if (handle.flags & VFS.SQLITE_OPEN_DELETEONCLOSE) {
273
+ // Schedule async delete
274
+ this.#scheduleWrite(handle.path, () => this.#deleteFile(handle.path))
275
+ }
276
+ }
277
+ return VFS.SQLITE_OK
278
+ }
279
+
280
+ jRead(fileId: number, pData: Uint8Array, iOffset: number): number {
281
+ try {
282
+ const handle = this.#openFiles.get(fileId)
283
+ if (!handle) return VFS.SQLITE_IOERR
284
+
285
+ const fileSize = handle.metadata.size
286
+ const requestedBytes = pData.byteLength
287
+
288
+ // Zero-length reads should always succeed
289
+ if (requestedBytes === 0) {
290
+ return VFS.SQLITE_OK
291
+ }
292
+
293
+ if (iOffset >= fileSize) {
294
+ pData.fill(0)
295
+ return VFS.SQLITE_IOERR_SHORT_READ
296
+ }
297
+
298
+ const bytesToRead = Math.min(requestedBytes, fileSize - iOffset)
299
+ const startChunk = Math.floor(iOffset / CHUNK_SIZE)
300
+ const endChunk = Math.floor((iOffset + bytesToRead - 1) / CHUNK_SIZE)
301
+
302
+ let bytesRead = 0
303
+
304
+ for (let chunkIndex = startChunk; chunkIndex <= endChunk; chunkIndex++) {
305
+ const chunk = this.#getCachedChunk(handle.path, chunkIndex)
306
+ if (!chunk) {
307
+ // Cache miss - this is a problem for synchronous operation
308
+ // We should have preloaded chunks during initialization
309
+ console.warn(`Cache miss for chunk ${chunkIndex} of ${handle.path}`)
310
+
311
+ // Emergency: try to preload the chunk for future reads
312
+ this.#preloadChunks(handle.path, chunkIndex, 1).catch(console.error)
313
+
314
+ pData.fill(0, bytesRead)
315
+ return VFS.SQLITE_IOERR_SHORT_READ
316
+ }
317
+
318
+ const chunkOffset = chunkIndex * CHUNK_SIZE
319
+ const readStart = Math.max(0, iOffset - chunkOffset)
320
+ const readEnd = Math.min(chunk.length, iOffset + requestedBytes - chunkOffset)
321
+ const chunkBytesToRead = readEnd - readStart
322
+
323
+ if (chunkBytesToRead > 0) {
324
+ pData.set(chunk.subarray(readStart, readEnd), bytesRead)
325
+ bytesRead += chunkBytesToRead
326
+ }
327
+ }
328
+
329
+ if (bytesRead < requestedBytes) {
330
+ pData.fill(0, bytesRead, requestedBytes)
331
+ return VFS.SQLITE_IOERR_SHORT_READ
332
+ }
333
+
334
+ return VFS.SQLITE_OK
335
+ } catch (e: any) {
336
+ console.error('jRead error:', e.message)
337
+ return VFS.SQLITE_IOERR
338
+ }
339
+ }
340
+
341
+ jWrite(fileId: number, pData: Uint8Array, iOffset: number): number {
342
+ try {
343
+ const handle = this.#openFiles.get(fileId)
344
+ if (!handle) return VFS.SQLITE_IOERR
345
+
346
+ const bytesToWrite = pData.byteLength
347
+ const startChunk = Math.floor(iOffset / CHUNK_SIZE)
348
+ const endChunk = Math.floor((iOffset + bytesToWrite - 1) / CHUNK_SIZE)
349
+
350
+ let bytesWritten = 0
351
+ const chunksToSave: Array<{ chunkIndex: number; data: Uint8Array }> = []
352
+
353
+ for (let chunkIndex = startChunk; chunkIndex <= endChunk; chunkIndex++) {
354
+ const chunkOffset = chunkIndex * CHUNK_SIZE
355
+ const writeStart = Math.max(0, iOffset - chunkOffset)
356
+ const writeEnd = Math.min(CHUNK_SIZE, iOffset + bytesToWrite - chunkOffset)
357
+
358
+ let chunk = this.#getCachedChunk(handle.path, chunkIndex)
359
+ if (!chunk) {
360
+ // Create new chunk
361
+ chunk = new Uint8Array(CHUNK_SIZE)
362
+ } else {
363
+ // Copy existing chunk for modification
364
+ chunk = chunk.slice()
365
+ }
366
+
367
+ const chunkBytesToWrite = writeEnd - writeStart
368
+ if (chunkBytesToWrite > 0) {
369
+ const dataOffset = bytesWritten
370
+ chunk.set(pData.subarray(dataOffset, dataOffset + chunkBytesToWrite), writeStart)
371
+ bytesWritten += chunkBytesToWrite
372
+
373
+ chunksToSave.push({ chunkIndex, data: chunk })
374
+ }
375
+ }
376
+
377
+ // Update metadata
378
+ const newSize = Math.max(handle.metadata.size, iOffset + bytesToWrite)
379
+ const newChunkCount = Math.ceil(newSize / CHUNK_SIZE)
380
+
381
+ handle.metadata.size = newSize
382
+ handle.metadata.chunkCount = newChunkCount
383
+
384
+ // Cache the modified chunks immediately
385
+ for (const { chunkIndex, data } of chunksToSave) {
386
+ this.#setCachedChunk(handle.path, chunkIndex, data)
387
+ }
388
+
389
+ // Schedule async saves to maintain durability
390
+ this.#scheduleWrite(handle.path, async () => {
391
+ await this.#saveMetadata(handle.path, handle.metadata)
392
+ await Promise.all(chunksToSave.map(({ chunkIndex, data }) => this.#saveChunk(handle.path, chunkIndex, data)))
393
+ })
394
+
395
+ return VFS.SQLITE_OK
396
+ } catch (e: any) {
397
+ console.error('jWrite error:', e.message)
398
+ return VFS.SQLITE_IOERR
399
+ }
400
+ }
401
+
402
+ jTruncate(fileId: number, iSize: number): number {
403
+ try {
404
+ const handle = this.#openFiles.get(fileId)
405
+ if (!handle) return VFS.SQLITE_IOERR
406
+
407
+ // const oldSize = handle.metadata.size
408
+ const newChunkCount = Math.ceil(iSize / CHUNK_SIZE)
409
+ const oldChunkCount = handle.metadata.chunkCount
410
+
411
+ handle.metadata.size = iSize
412
+ handle.metadata.chunkCount = newChunkCount
413
+
414
+ // If truncating to smaller size, remove excess chunks
415
+ if (newChunkCount < oldChunkCount) {
416
+ const chunksToDelete: string[] = []
417
+ for (let i = newChunkCount; i < oldChunkCount; i++) {
418
+ const cacheKey = this.#getCacheKey(handle.path, i)
419
+ this.#chunkCache.delete(cacheKey)
420
+ chunksToDelete.push(this.#getChunkKey(handle.path, i))
421
+ }
422
+
423
+ // Schedule async delete of excess chunks
424
+ if (chunksToDelete.length > 0) {
425
+ this.#scheduleWrite(handle.path, async () => {
426
+ await this.#storage.delete(chunksToDelete)
427
+ })
428
+ }
429
+ }
430
+
431
+ // If the last chunk needs to be truncated, update it
432
+ if (newChunkCount > 0) {
433
+ const lastChunkIndex = newChunkCount - 1
434
+ const lastChunkSize = iSize - lastChunkIndex * CHUNK_SIZE
435
+
436
+ if (lastChunkSize < CHUNK_SIZE) {
437
+ const lastChunk = this.#getCachedChunk(handle.path, lastChunkIndex)
438
+ if (lastChunk) {
439
+ const truncatedChunk = new Uint8Array(CHUNK_SIZE)
440
+ truncatedChunk.set(lastChunk.subarray(0, lastChunkSize))
441
+ this.#setCachedChunk(handle.path, lastChunkIndex, truncatedChunk)
442
+ this.#scheduleWrite(handle.path, () => this.#saveChunk(handle.path, lastChunkIndex, truncatedChunk))
443
+ }
444
+ }
445
+ }
446
+
447
+ // Schedule async metadata update
448
+ this.#scheduleWrite(handle.path, () => this.#saveMetadata(handle.path, handle.metadata))
449
+
450
+ return VFS.SQLITE_OK
451
+ } catch (e: any) {
452
+ console.error('jTruncate error:', e.message)
453
+ return VFS.SQLITE_IOERR
454
+ }
455
+ }
456
+
457
+ jSync(fileId: number, _flags: number): number {
458
+ try {
459
+ const handle = this.#openFiles.get(fileId)
460
+ if (!handle) return VFS.SQLITE_IOERR
461
+
462
+ // Force sync all pending writes for this file
463
+ // Note: DurableObjectStorage operations are already synchronous
464
+ // and atomic, so this is mostly a no-op
465
+ return VFS.SQLITE_OK
466
+ } catch (e: any) {
467
+ console.error('jSync error:', e.message)
468
+ return VFS.SQLITE_IOERR
469
+ }
470
+ }
471
+
472
+ jFileSize(fileId: number, pSize64: DataView): number {
473
+ try {
474
+ const handle = this.#openFiles.get(fileId)
475
+ if (!handle) return VFS.SQLITE_IOERR
476
+
477
+ pSize64.setBigInt64(0, BigInt(handle.metadata.size), true)
478
+ return VFS.SQLITE_OK
479
+ } catch (e: any) {
480
+ console.error('jFileSize error:', e.message)
481
+ return VFS.SQLITE_IOERR
482
+ }
483
+ }
484
+
485
+ jSectorSize(_fileId: number): number {
486
+ return SECTOR_SIZE
487
+ }
488
+
489
+ jDeviceCharacteristics(_fileId: number): number {
490
+ return VFS.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
491
+ }
492
+
493
+ jAccess(zName: string, _flags: number, pResOut: DataView): number {
494
+ try {
495
+ const path = this.#getPath(zName)
496
+ const exists = this.#activeFiles.has(path)
497
+ pResOut.setInt32(0, exists ? 1 : 0, true)
498
+ return VFS.SQLITE_OK
499
+ } catch (e: any) {
500
+ console.error('jAccess error:', e.message)
501
+ return VFS.SQLITE_IOERR
502
+ }
503
+ }
504
+
505
+ jDelete(zName: string, _syncDir: number): number {
506
+ try {
507
+ const path = this.#getPath(zName)
508
+
509
+ // Schedule async delete
510
+ this.#scheduleWrite(path, () => this.#deleteFile(path))
511
+
512
+ return VFS.SQLITE_OK
513
+ } catch (e: any) {
514
+ console.error('jDelete error:', e.message)
515
+ return VFS.SQLITE_IOERR
516
+ }
517
+ }
518
+
519
+ async close() {
520
+ // Clear all caches
521
+ this.#metadataCache.clear()
522
+ this.#chunkCache.clear()
523
+ this.#activeFiles.clear()
524
+ this.#openFiles.clear()
525
+ this.#initialized = false
526
+ }
527
+
528
+ async isReady() {
529
+ if (!this.#initialized) {
530
+ await this.#initializeStorage()
531
+ this.#initialized = true
532
+ }
533
+ return true
534
+ }
535
+
536
+ async #initializeStorage() {
537
+ // Load list of existing files
538
+ const fileList = await this.#storage.get<string[]>('index:files')
539
+ if (fileList) {
540
+ for (const path of fileList) {
541
+ this.#activeFiles.add(path)
542
+ // Preload metadata for all files
543
+ await this.#loadMetadata(path)
544
+ // Preload first chunk of each file for better performance
545
+ await this.#loadChunk(path, 0)
546
+ }
547
+ }
548
+
549
+ // Clean up temporary files that shouldn't persist
550
+ await this.#cleanupTemporaryFiles()
551
+
552
+ // Update the file index to reflect any cleanup
553
+ await this.#updateFileIndex()
554
+ }
555
+
556
+ async #cleanupTemporaryFiles() {
557
+ for (const path of this.#activeFiles) {
558
+ const metadata = this.#metadataCache.get(path)
559
+ if (
560
+ metadata &&
561
+ (metadata.flags & VFS.SQLITE_OPEN_DELETEONCLOSE || (metadata.flags & PERSISTENT_FILE_TYPES) === 0)
562
+ ) {
563
+ console.warn(`Cleaning up temporary file: ${path}`)
564
+ await this.#deleteFile(path)
565
+ }
566
+ }
567
+ }
568
+
569
+ /**
570
+ * Returns the number of SQLite files in the file system.
571
+ */
572
+ getSize(): number {
573
+ return this.#activeFiles.size
574
+ }
575
+
576
+ /**
577
+ * Returns the maximum number of SQLite files the file system can hold.
578
+ */
579
+ getCapacity(): number {
580
+ return this.#maxFiles
581
+ }
582
+
583
+ /**
584
+ * Increase the capacity of the file system by n.
585
+ */
586
+ async addCapacity(n: number): Promise<number> {
587
+ this.#maxFiles += n
588
+ return n
589
+ }
590
+
591
+ /**
592
+ * Decrease the capacity of the file system by n. The capacity cannot be
593
+ * decreased to fewer than the current number of SQLite files in the
594
+ * file system.
595
+ */
596
+ async removeCapacity(n: number): Promise<number> {
597
+ const currentSize = this.getSize()
598
+ const currentCapacity = this.getCapacity()
599
+ const newCapacity = Math.max(currentSize, currentCapacity - n)
600
+ const actualReduction = currentCapacity - newCapacity
601
+
602
+ this.#maxFiles = newCapacity
603
+ return actualReduction
604
+ }
605
+
606
+ async #updateFileIndex() {
607
+ // Update the persistent file index
608
+ const fileList = Array.from(this.#activeFiles)
609
+ await this.#storage.put('index:files', fileList)
610
+ }
611
+
612
+ /**
613
+ * Preload chunks for a file to support synchronous reads.
614
+ * SQLite typically reads files sequentially, so we preload nearby chunks.
615
+ */
616
+ async #preloadChunks(path: string, startChunk: number, count = 3) {
617
+ const metadata = this.#metadataCache.get(path)
618
+ if (!metadata) return
619
+
620
+ const endChunk = Math.min(startChunk + count, metadata.chunkCount)
621
+ const promises: Promise<void>[] = []
622
+
623
+ for (let i = startChunk; i < endChunk; i++) {
624
+ if (!this.#getCachedChunk(path, i)) {
625
+ promises.push(this.#loadChunk(path, i).then(() => {}))
626
+ }
627
+ }
628
+
629
+ await Promise.all(promises)
630
+ }
631
+
632
+ /**
633
+ * Flush all pending writes and sync to storage.
634
+ * This is useful for ensuring durability before critical operations.
635
+ */
636
+ async syncToStorage(): Promise<void> {
637
+ await this.#flushPendingWrites()
638
+ await this.#storage.sync()
639
+ }
640
+
641
+ /**
642
+ * Get statistics about the VFS for debugging and monitoring.
643
+ */
644
+ getStats() {
645
+ return {
646
+ activeFiles: this.#activeFiles.size,
647
+ openFiles: this.#openFiles.size,
648
+ cachedChunks: this.#chunkCache.size,
649
+ cachedMetadata: this.#metadataCache.size,
650
+ pendingWrites: this.#pendingWrites.size,
651
+ maxFiles: this.#maxFiles,
652
+ maxCacheSize: this.#maxCacheSize,
653
+ chunkSize: CHUNK_SIZE,
654
+ }
655
+ }
656
+
657
+ /**
658
+ * Convert a bare filename, path, or URL to a UNIX-style path.
659
+ */
660
+ #getPath(nameOrURL: string | URL): string {
661
+ const url = typeof nameOrURL === 'string' ? new URL(nameOrURL, 'file://localhost/') : nameOrURL
662
+ return url.pathname
663
+ }
664
+ }