@livestore/adapter-web 0.4.0-dev.5 → 0.4.0-dev.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/web-worker/client-session/persisted-adapter.d.ts +15 -0
- package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.js +9 -2
- package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.d.ts +23 -0
- package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.js +58 -0
- package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
- package/dist/web-worker/common/worker-schema.d.ts +4 -4
- package/dist/web-worker/leader-worker/make-leader-worker.js +9 -1
- package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.js +9 -2
- package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
- package/package.json +6 -6
- package/src/web-worker/client-session/persisted-adapter.ts +25 -1
- package/src/web-worker/common/persisted-sqlite.ts +77 -1
- package/src/web-worker/leader-worker/make-leader-worker.ts +10 -1
- package/src/web-worker/shared-worker/make-shared-worker.ts +14 -3
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import { liveStoreStorageFormatVersion, UnexpectedError } from '@livestore/common'
|
|
2
2
|
import type { LiveStoreSchema } from '@livestore/common/schema'
|
|
3
|
-
import { decodeSAHPoolFilename, HEADER_OFFSET_DATA } from '@livestore/sqlite-wasm/browser'
|
|
3
|
+
import { decodeSAHPoolFilename, HEADER_OFFSET_DATA, type WebDatabaseMetadataOpfs } from '@livestore/sqlite-wasm/browser'
|
|
4
4
|
import { Effect, Schedule, Schema } from '@livestore/utils/effect'
|
|
5
5
|
|
|
6
6
|
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
7
7
|
import type * as WorkerSchema from './worker-schema.ts'
|
|
8
8
|
|
|
9
9
|
export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>()('PersistedSqliteError', {
|
|
10
|
+
message: Schema.String,
|
|
10
11
|
cause: Schema.Defect,
|
|
11
12
|
}) {}
|
|
12
13
|
|
|
@@ -141,3 +142,78 @@ export const getStateDbFileName = (schema: LiveStoreSchema) => {
|
|
|
141
142
|
schema.state.sqlite.migrations.strategy === 'manual' ? 'fixed' : schema.state.sqlite.hash.toString()
|
|
142
143
|
return `state${schemaHashSuffix}.db`
|
|
143
144
|
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Cleanup old state database files after successful migration.
|
|
148
|
+
* This prevents OPFS file pool capacity from being exhausted by accumulated schema files.
|
|
149
|
+
*
|
|
150
|
+
* @param vfs - The AccessHandlePoolVFS instance for safe file operations
|
|
151
|
+
* @param currentSchema - Current schema (to avoid deleting the active database)
|
|
152
|
+
*/
|
|
153
|
+
export const cleanupOldStateDbFiles = Effect.fn('@livestore/adapter-web:cleanupOldStateDbFiles')(
|
|
154
|
+
function* ({ vfs, currentSchema }: { vfs: WebDatabaseMetadataOpfs['vfs']; currentSchema: LiveStoreSchema }) {
|
|
155
|
+
// Only cleanup for auto migration strategy because:
|
|
156
|
+
// - Auto strategy: Creates new database files per schema change (e.g., state123.db, state456.db)
|
|
157
|
+
// which accumulate over time and can exhaust OPFS file pool capacity
|
|
158
|
+
// - Manual strategy: Always reuses the same database file (statefixed.db) across schema changes,
|
|
159
|
+
// so there are never multiple old files to clean up
|
|
160
|
+
if (currentSchema.state.sqlite.migrations.strategy === 'manual') {
|
|
161
|
+
yield* Effect.logDebug('Skipping state db cleanup - manual migration strategy uses fixed filename')
|
|
162
|
+
return
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
const currentDbFileName = getStateDbFileName(currentSchema)
|
|
166
|
+
const currentPath = `/${currentDbFileName}`
|
|
167
|
+
|
|
168
|
+
const allPaths = yield* Effect.sync(() => vfs.getTrackedFilePaths())
|
|
169
|
+
const oldStateDbPaths = allPaths.filter(
|
|
170
|
+
(path) => path.startsWith('/state') && path.endsWith('.db') && path !== currentPath,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
if (oldStateDbPaths.length === 0) {
|
|
174
|
+
yield* Effect.logDebug('State db cleanup completed: no old database files found')
|
|
175
|
+
return
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
yield* Effect.logDebug(`Found ${oldStateDbPaths.length} old state database file(s) to clean up`)
|
|
179
|
+
|
|
180
|
+
let deletedCount = 0
|
|
181
|
+
for (const path of oldStateDbPaths) {
|
|
182
|
+
const fileName = path.startsWith('/') ? path.slice(1) : path
|
|
183
|
+
|
|
184
|
+
const vfsResultCode = yield* Effect.try({
|
|
185
|
+
try: () => vfs.jDelete(fileName, 0),
|
|
186
|
+
catch: (cause) => new SqliteVfsError({ operation: 'jDelete', fileName, cause }),
|
|
187
|
+
})
|
|
188
|
+
|
|
189
|
+
// 0 indicates a successful result in SQLite.
|
|
190
|
+
// See https://www.sqlite.org/c3ref/c_abort.html
|
|
191
|
+
if (vfsResultCode !== 0) {
|
|
192
|
+
return yield* new SqliteVfsError({
|
|
193
|
+
operation: 'jDelete',
|
|
194
|
+
fileName,
|
|
195
|
+
vfsResultCode,
|
|
196
|
+
})
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
deletedCount++
|
|
200
|
+
yield* Effect.logDebug(`Successfully deleted old state database file: ${fileName}`)
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
yield* Effect.logDebug(`State db cleanup completed: removed ${deletedCount} old database file(s)`)
|
|
204
|
+
},
|
|
205
|
+
Effect.mapError(
|
|
206
|
+
(error) =>
|
|
207
|
+
new PersistedSqliteError({
|
|
208
|
+
message: 'Failed to clean up old state database file(s)',
|
|
209
|
+
cause: error,
|
|
210
|
+
}),
|
|
211
|
+
),
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
export class SqliteVfsError extends Schema.TaggedError<SqliteVfsError>()('SqliteVfsError', {
|
|
215
|
+
operation: Schema.String,
|
|
216
|
+
fileName: Schema.String,
|
|
217
|
+
vfsResultCode: Schema.optional(Schema.Number),
|
|
218
|
+
cause: Schema.optional(Schema.Defect),
|
|
219
|
+
}) {}
|
|
@@ -26,7 +26,7 @@ import {
|
|
|
26
26
|
import type * as otel from '@opentelemetry/api'
|
|
27
27
|
|
|
28
28
|
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
29
|
-
import { getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
29
|
+
import { cleanupOldStateDbFiles, getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
30
30
|
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
31
31
|
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
32
32
|
|
|
@@ -131,6 +131,15 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
|
|
|
131
131
|
concurrency: 2,
|
|
132
132
|
})
|
|
133
133
|
|
|
134
|
+
// Clean up old state database files after successful database creation
|
|
135
|
+
// This prevents OPFS file pool capacity exhaustion from accumulated state db files after schema changes/migrations
|
|
136
|
+
if (dbState.metadata._tag === 'opfs') {
|
|
137
|
+
yield* cleanupOldStateDbFiles({
|
|
138
|
+
vfs: dbState.metadata.vfs,
|
|
139
|
+
currentSchema: schema,
|
|
140
|
+
})
|
|
141
|
+
}
|
|
142
|
+
|
|
134
143
|
const devtoolsOptions = yield* makeDevtoolsOptions({ devtoolsEnabled, dbState, dbEventlog })
|
|
135
144
|
const shutdownChannel = yield* makeShutdownChannel(storeId)
|
|
136
145
|
|
|
@@ -28,6 +28,20 @@ import {
|
|
|
28
28
|
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
29
29
|
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
30
30
|
|
|
31
|
+
// Extract from `livestore-shared-worker-${storeId}`
|
|
32
|
+
const storeId = self.name.replace('livestore-shared-worker-', '')
|
|
33
|
+
|
|
34
|
+
// We acquire a lock that is held as long as this shared worker is alive.
|
|
35
|
+
// This way, when the shared worker is terminated (e.g. by the browser when the page is closed),
|
|
36
|
+
// the lock is released and any thread waiting for the lock can be notified.
|
|
37
|
+
const LIVESTORE_SHARED_WORKER_TERMINATION_LOCK = `livestore-shared-worker-termination-lock-${storeId}`
|
|
38
|
+
navigator.locks.request(
|
|
39
|
+
LIVESTORE_SHARED_WORKER_TERMINATION_LOCK,
|
|
40
|
+
{ steal: true },
|
|
41
|
+
// We use a never-resolving promise to hold the lock
|
|
42
|
+
async () => new Promise(() => {}),
|
|
43
|
+
)
|
|
44
|
+
|
|
31
45
|
if (isDevEnv()) {
|
|
32
46
|
globalThis.__debugLiveStoreUtils = {
|
|
33
47
|
blobUrl: (buffer: Uint8Array<ArrayBuffer>) =>
|
|
@@ -241,9 +255,6 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
241
255
|
}).pipe(Layer.unwrapScoped)
|
|
242
256
|
|
|
243
257
|
export const makeWorker = () => {
|
|
244
|
-
// Extract from `livestore-shared-worker-${storeId}`
|
|
245
|
-
const storeId = self.name.replace('livestore-shared-worker-', '')
|
|
246
|
-
|
|
247
258
|
makeWorkerRunner.pipe(
|
|
248
259
|
Layer.provide(BrowserWorkerRunner.layer),
|
|
249
260
|
// WorkerRunner.launch,
|