@livestore/sqlite-wasm 0.4.0-dev.3 → 0.4.0-dev.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/browser/mod.d.ts +1 -0
- package/dist/browser/mod.d.ts.map +1 -1
- package/dist/browser/mod.js.map +1 -1
- package/dist/cf/BlockManager.d.ts +61 -0
- package/dist/cf/BlockManager.d.ts.map +1 -0
- package/dist/cf/BlockManager.js +157 -0
- package/dist/cf/BlockManager.js.map +1 -0
- package/dist/cf/CloudflareSqlVFS.d.ts +51 -0
- package/dist/cf/CloudflareSqlVFS.d.ts.map +1 -0
- package/dist/cf/CloudflareSqlVFS.js +351 -0
- package/dist/cf/CloudflareSqlVFS.js.map +1 -0
- package/dist/cf/CloudflareWorkerVFS.d.ts +72 -0
- package/dist/cf/CloudflareWorkerVFS.d.ts.map +1 -0
- package/dist/cf/CloudflareWorkerVFS.js +552 -0
- package/dist/cf/CloudflareWorkerVFS.js.map +1 -0
- package/dist/cf/mod.d.ts +43 -0
- package/dist/cf/mod.d.ts.map +1 -0
- package/dist/cf/mod.js +74 -0
- package/dist/cf/mod.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js +314 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js +266 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-core.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js +444 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-integration.test.js.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts +2 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.d.ts.map +1 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js +334 -0
- package/dist/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.js.map +1 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts +2 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.d.ts.map +1 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js +354 -0
- package/dist/cf/test/sql/cloudflare-sql-vfs-core.test.js.map +1 -0
- package/dist/load-wasm/mod.node.d.ts.map +1 -1
- package/dist/load-wasm/mod.node.js +1 -2
- package/dist/load-wasm/mod.node.js.map +1 -1
- package/dist/load-wasm/mod.workerd.d.ts +2 -0
- package/dist/load-wasm/mod.workerd.d.ts.map +1 -0
- package/dist/load-wasm/mod.workerd.js +26 -0
- package/dist/load-wasm/mod.workerd.js.map +1 -0
- package/dist/make-sqlite-db.d.ts +1 -0
- package/dist/make-sqlite-db.d.ts.map +1 -1
- package/dist/make-sqlite-db.js.map +1 -1
- package/dist/node/NodeFS.d.ts +1 -2
- package/dist/node/NodeFS.d.ts.map +1 -1
- package/dist/node/NodeFS.js +1 -6
- package/dist/node/NodeFS.js.map +1 -1
- package/dist/node/mod.js +3 -8
- package/dist/node/mod.js.map +1 -1
- package/package.json +20 -7
- package/src/browser/mod.ts +1 -0
- package/src/cf/BlockManager.ts +225 -0
- package/src/cf/CloudflareSqlVFS.ts +450 -0
- package/src/cf/CloudflareWorkerVFS.ts +664 -0
- package/src/cf/README.md +60 -0
- package/src/cf/mod.ts +143 -0
- package/src/cf/test/README.md +224 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-advanced.test.ts +389 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-core.test.ts +322 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-integration.test.ts +567 -0
- package/src/cf/test/async-storage/cloudflare-worker-vfs-reliability.test.ts +403 -0
- package/src/cf/test/sql/cloudflare-sql-vfs-core.test.ts +433 -0
- package/src/load-wasm/mod.node.ts +1 -2
- package/src/load-wasm/mod.workerd.ts +26 -0
- package/src/make-sqlite-db.ts +1 -0
- package/src/node/NodeFS.ts +1 -9
- package/src/node/mod.ts +3 -10
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@livestore/sqlite-wasm",
|
|
3
|
-
"version": "0.4.0-dev.
|
|
3
|
+
"version": "0.4.0-dev.5",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"exports": {
|
|
6
6
|
".": {
|
|
@@ -9,6 +9,7 @@
|
|
|
9
9
|
},
|
|
10
10
|
"./load-wasm": {
|
|
11
11
|
"types": "./dist/load-wasm/mod.browser.d.ts",
|
|
12
|
+
"workerd": "./dist/load-wasm/mod.workerd.js",
|
|
12
13
|
"browser": "./dist/load-wasm/mod.browser.js",
|
|
13
14
|
"worker": "./dist/load-wasm/mod.browser.js",
|
|
14
15
|
"node": "./dist/load-wasm/mod.node.js",
|
|
@@ -18,20 +19,28 @@
|
|
|
18
19
|
"types": "./dist/node/mod.d.ts",
|
|
19
20
|
"default": "./dist/node/mod.js"
|
|
20
21
|
},
|
|
22
|
+
"./cf": {
|
|
23
|
+
"types": "./dist/cf/mod.d.ts",
|
|
24
|
+
"default": "./dist/cf/mod.js"
|
|
25
|
+
},
|
|
21
26
|
"./browser": {
|
|
22
27
|
"types": "./dist/browser/mod.d.ts",
|
|
23
28
|
"default": "./dist/browser/mod.js"
|
|
24
29
|
}
|
|
25
30
|
},
|
|
26
31
|
"dependencies": {
|
|
27
|
-
"@
|
|
28
|
-
"@livestore/common": "0.4.0-dev.
|
|
29
|
-
"@livestore/utils": "0.4.0-dev.
|
|
32
|
+
"@cloudflare/workers-types": "4.20250823.0",
|
|
33
|
+
"@livestore/common": "0.4.0-dev.5",
|
|
34
|
+
"@livestore/utils": "0.4.0-dev.5",
|
|
35
|
+
"@livestore/common-cf": "0.4.0-dev.5",
|
|
36
|
+
"@livestore/wa-sqlite": "0.4.0-dev.5"
|
|
30
37
|
},
|
|
31
38
|
"devDependencies": {
|
|
32
|
-
"@types/chrome": "^0.1.
|
|
39
|
+
"@types/chrome": "^0.1.4",
|
|
33
40
|
"@types/node": "24.2.0",
|
|
34
|
-
"@types/wicg-file-system-access": "^2023.10.6"
|
|
41
|
+
"@types/wicg-file-system-access": "^2023.10.6",
|
|
42
|
+
"vitest": "3.2.4",
|
|
43
|
+
"wrangler": "^4.32.0"
|
|
35
44
|
},
|
|
36
45
|
"files": [
|
|
37
46
|
"package.json",
|
|
@@ -42,5 +51,9 @@
|
|
|
42
51
|
"publishConfig": {
|
|
43
52
|
"access": "public"
|
|
44
53
|
},
|
|
45
|
-
"scripts": {
|
|
54
|
+
"scripts": {
|
|
55
|
+
"test": "vitest",
|
|
56
|
+
"test:ui": "vitest --ui",
|
|
57
|
+
"test:watch": "vitest --watch"
|
|
58
|
+
}
|
|
46
59
|
}
|
package/src/browser/mod.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { MakeSqliteDb, PersistenceInfo, SqliteDb } from '@livestore/common'
|
|
2
2
|
import { Effect, Hash } from '@livestore/utils/effect'
|
|
3
|
+
import type { SQLiteAPI } from '@livestore/wa-sqlite'
|
|
3
4
|
import type { MemoryVFS } from '@livestore/wa-sqlite/src/examples/MemoryVFS.js'
|
|
4
5
|
|
|
5
6
|
import { makeInMemoryDb } from '../in-memory-vfs.ts'
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
import type { CfTypes } from '@livestore/common-cf'
|
|
2
|
+
|
|
3
|
+
export interface BlockRange {
|
|
4
|
+
startBlock: number
|
|
5
|
+
endBlock: number
|
|
6
|
+
startOffset: number
|
|
7
|
+
endOffset: number
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export interface BlockData {
|
|
11
|
+
blockId: number
|
|
12
|
+
data: Uint8Array
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* BlockManager handles the conversion between file operations and block-based storage
|
|
17
|
+
* for the CloudflareSqlVFS. It manages fixed-size blocks stored in SQL tables.
|
|
18
|
+
*/
|
|
19
|
+
export class BlockManager {
|
|
20
|
+
private readonly blockSize: number
|
|
21
|
+
|
|
22
|
+
constructor(blockSize: number = 64 * 1024) {
|
|
23
|
+
this.blockSize = blockSize
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Calculate which blocks are needed for a given file operation
|
|
28
|
+
*/
|
|
29
|
+
calculateBlockRange(offset: number, length: number): BlockRange {
|
|
30
|
+
const startBlock = Math.floor(offset / this.blockSize)
|
|
31
|
+
const endBlock = Math.floor((offset + length - 1) / this.blockSize)
|
|
32
|
+
const startOffset = offset % this.blockSize
|
|
33
|
+
const endOffset = ((offset + length - 1) % this.blockSize) + 1
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
startBlock,
|
|
37
|
+
endBlock,
|
|
38
|
+
startOffset,
|
|
39
|
+
endOffset,
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Read blocks from SQL storage and return as a Map
|
|
45
|
+
*/
|
|
46
|
+
readBlocks(sql: CfTypes.SqlStorage, filePath: string, blockIds: number[]): Map<number, Uint8Array> {
|
|
47
|
+
const blocks = new Map<number, Uint8Array>()
|
|
48
|
+
|
|
49
|
+
if (blockIds.length === 0) {
|
|
50
|
+
return blocks
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// Build IN clause for efficient querying
|
|
54
|
+
const placeholders = blockIds.map(() => '?').join(',')
|
|
55
|
+
const query = `
|
|
56
|
+
SELECT block_id, block_data
|
|
57
|
+
FROM vfs_blocks
|
|
58
|
+
WHERE file_path = ? AND block_id IN (${placeholders})
|
|
59
|
+
ORDER BY block_id
|
|
60
|
+
`
|
|
61
|
+
|
|
62
|
+
const cursor = sql.exec<{ block_id: number; block_data: ArrayBuffer }>(query, filePath, ...blockIds)
|
|
63
|
+
|
|
64
|
+
for (const row of cursor) {
|
|
65
|
+
blocks.set(row.block_id, new Uint8Array(row.block_data))
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return blocks
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Write blocks to SQL storage using exec for now (prepared statements later)
|
|
73
|
+
*/
|
|
74
|
+
writeBlocks(sql: CfTypes.SqlStorage, filePath: string, blocks: Map<number, Uint8Array>): void {
|
|
75
|
+
if (blocks.size === 0) {
|
|
76
|
+
return
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
for (const [blockId, data] of blocks) {
|
|
80
|
+
sql.exec(
|
|
81
|
+
'INSERT OR REPLACE INTO vfs_blocks (file_path, block_id, block_data) VALUES (?, ?, ?)',
|
|
82
|
+
filePath,
|
|
83
|
+
blockId,
|
|
84
|
+
data,
|
|
85
|
+
)
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Delete blocks at or after the specified block ID (used for truncation)
|
|
91
|
+
*/
|
|
92
|
+
deleteBlocksAfter(sql: CfTypes.SqlStorage, filePath: string, startBlockId: number): void {
|
|
93
|
+
sql.exec('DELETE FROM vfs_blocks WHERE file_path = ? AND block_id >= ?', filePath, startBlockId)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Split write data into blocks, handling partial blocks at boundaries
|
|
98
|
+
*/
|
|
99
|
+
splitIntoBlocks(
|
|
100
|
+
data: Uint8Array,
|
|
101
|
+
offset: number,
|
|
102
|
+
): Map<number, { blockId: number; blockOffset: number; data: Uint8Array }> {
|
|
103
|
+
const blocks = new Map<number, { blockId: number; blockOffset: number; data: Uint8Array }>()
|
|
104
|
+
|
|
105
|
+
let remainingData = data
|
|
106
|
+
let currentOffset = offset
|
|
107
|
+
|
|
108
|
+
while (remainingData.length > 0) {
|
|
109
|
+
const blockId = Math.floor(currentOffset / this.blockSize)
|
|
110
|
+
const blockOffset = currentOffset % this.blockSize
|
|
111
|
+
const bytesToWrite = Math.min(remainingData.length, this.blockSize - blockOffset)
|
|
112
|
+
|
|
113
|
+
const blockData = remainingData.slice(0, bytesToWrite)
|
|
114
|
+
blocks.set(blockId, {
|
|
115
|
+
blockId,
|
|
116
|
+
blockOffset,
|
|
117
|
+
data: blockData,
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
remainingData = remainingData.slice(bytesToWrite)
|
|
121
|
+
currentOffset += bytesToWrite
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return blocks
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Assemble read data from blocks into a continuous buffer
|
|
129
|
+
*/
|
|
130
|
+
assembleBlocks(blocks: Map<number, Uint8Array>, range: BlockRange, requestedLength: number): Uint8Array {
|
|
131
|
+
const result = new Uint8Array(requestedLength)
|
|
132
|
+
let resultOffset = 0
|
|
133
|
+
|
|
134
|
+
for (let blockId = range.startBlock; blockId <= range.endBlock; blockId++) {
|
|
135
|
+
const blockData = blocks.get(blockId)
|
|
136
|
+
if (!blockData) {
|
|
137
|
+
// Block not found - fill with zeros (sparse file behavior)
|
|
138
|
+
const zeroLength = Math.min(this.blockSize, requestedLength - resultOffset)
|
|
139
|
+
// result is already zero-filled by default
|
|
140
|
+
resultOffset += zeroLength
|
|
141
|
+
continue
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// Calculate the slice of this block we need
|
|
145
|
+
const blockStartOffset = blockId === range.startBlock ? range.startOffset : 0
|
|
146
|
+
const blockEndOffset = blockId === range.endBlock ? range.endOffset : blockData.length
|
|
147
|
+
const sliceLength = blockEndOffset - blockStartOffset
|
|
148
|
+
|
|
149
|
+
if (sliceLength > 0) {
|
|
150
|
+
const slice = blockData.slice(blockStartOffset, blockEndOffset)
|
|
151
|
+
result.set(slice, resultOffset)
|
|
152
|
+
resultOffset += sliceLength
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return result
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Handle partial block writes by reading existing block, modifying, and returning complete block
|
|
161
|
+
*/
|
|
162
|
+
mergePartialBlock(
|
|
163
|
+
sql: CfTypes.SqlStorage,
|
|
164
|
+
filePath: string,
|
|
165
|
+
blockId: number,
|
|
166
|
+
blockOffset: number,
|
|
167
|
+
newData: Uint8Array,
|
|
168
|
+
): Uint8Array {
|
|
169
|
+
// Read existing block data if it exists
|
|
170
|
+
const existingBlocks = this.readBlocks(sql, filePath, [blockId])
|
|
171
|
+
const existingBlock = existingBlocks.get(blockId) || new Uint8Array(this.blockSize)
|
|
172
|
+
|
|
173
|
+
// Create a new block with the merged data
|
|
174
|
+
const mergedBlock = new Uint8Array(this.blockSize)
|
|
175
|
+
mergedBlock.set(existingBlock)
|
|
176
|
+
mergedBlock.set(newData, blockOffset)
|
|
177
|
+
|
|
178
|
+
return mergedBlock
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Get statistics about block usage for a file
|
|
183
|
+
*/
|
|
184
|
+
getBlockStats(
|
|
185
|
+
sql: CfTypes.SqlStorage,
|
|
186
|
+
filePath: string,
|
|
187
|
+
): { totalBlocks: number; storedBlocks: number; totalBytes: number } {
|
|
188
|
+
const blockStatsCursor = sql.exec<{ stored_blocks: number; total_bytes: number }>(
|
|
189
|
+
`SELECT
|
|
190
|
+
COUNT(*) as stored_blocks,
|
|
191
|
+
COALESCE(SUM(LENGTH(block_data)), 0) as total_bytes
|
|
192
|
+
FROM vfs_blocks
|
|
193
|
+
WHERE file_path = ?`,
|
|
194
|
+
filePath,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
const result = blockStatsCursor.one()
|
|
198
|
+
|
|
199
|
+
// Get file size to calculate theoretical total blocks
|
|
200
|
+
const fileSizeCursor = sql.exec<{ file_size: number }>(
|
|
201
|
+
'SELECT file_size FROM vfs_files WHERE file_path = ?',
|
|
202
|
+
filePath,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
let fileSize = 0
|
|
206
|
+
try {
|
|
207
|
+
const fileSizeResult = fileSizeCursor.one()
|
|
208
|
+
fileSize = fileSizeResult.file_size
|
|
209
|
+
} catch {
|
|
210
|
+
// File doesn't exist
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
const totalBlocks = Math.ceil(fileSize / this.blockSize)
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
totalBlocks,
|
|
217
|
+
storedBlocks: result.stored_blocks,
|
|
218
|
+
totalBytes: result.total_bytes,
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
getBlockSize(): number {
|
|
223
|
+
return this.blockSize
|
|
224
|
+
}
|
|
225
|
+
}
|