@livestore/adapter-web 0.4.0-dev.0 → 0.4.0-dev.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/in-memory/in-memory-adapter.d.ts.map +1 -1
  3. package/dist/in-memory/in-memory-adapter.js +2 -1
  4. package/dist/in-memory/in-memory-adapter.js.map +1 -1
  5. package/dist/opfs-utils.d.ts +3 -1
  6. package/dist/opfs-utils.d.ts.map +1 -1
  7. package/dist/opfs-utils.js +4 -4
  8. package/dist/opfs-utils.js.map +1 -1
  9. package/dist/web-worker/client-session/persisted-adapter.d.ts +15 -0
  10. package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
  11. package/dist/web-worker/client-session/persisted-adapter.js +13 -2
  12. package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
  13. package/dist/web-worker/common/persisted-sqlite.d.ts +34 -0
  14. package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -1
  15. package/dist/web-worker/common/persisted-sqlite.js +152 -0
  16. package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
  17. package/dist/web-worker/common/shutdown-channel.d.ts +1 -1
  18. package/dist/web-worker/common/shutdown-channel.d.ts.map +1 -1
  19. package/dist/web-worker/common/worker-schema.d.ts +28 -6
  20. package/dist/web-worker/common/worker-schema.d.ts.map +1 -1
  21. package/dist/web-worker/common/worker-schema.js +15 -3
  22. package/dist/web-worker/common/worker-schema.js.map +1 -1
  23. package/dist/web-worker/leader-worker/make-leader-worker.js +20 -2
  24. package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
  25. package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -1
  26. package/dist/web-worker/shared-worker/make-shared-worker.js +11 -2
  27. package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
  28. package/dist/web-worker/vite-dev-polyfill.js +1 -0
  29. package/dist/web-worker/vite-dev-polyfill.js.map +1 -1
  30. package/package.json +7 -7
  31. package/src/in-memory/in-memory-adapter.ts +3 -1
  32. package/src/opfs-utils.ts +4 -4
  33. package/src/web-worker/client-session/persisted-adapter.ts +30 -1
  34. package/src/web-worker/common/persisted-sqlite.ts +220 -1
  35. package/src/web-worker/common/worker-schema.ts +23 -0
  36. package/src/web-worker/leader-worker/make-leader-worker.ts +24 -4
  37. package/src/web-worker/shared-worker/make-shared-worker.ts +16 -3
  38. package/src/web-worker/vite-dev-polyfill.ts +1 -0
@@ -1,12 +1,14 @@
1
1
  import { liveStoreStorageFormatVersion, UnexpectedError } from '@livestore/common'
2
2
  import type { LiveStoreSchema } from '@livestore/common/schema'
3
- import { decodeSAHPoolFilename, HEADER_OFFSET_DATA } from '@livestore/sqlite-wasm/browser'
3
+ import { decodeSAHPoolFilename, HEADER_OFFSET_DATA, type WebDatabaseMetadataOpfs } from '@livestore/sqlite-wasm/browser'
4
+ import { isDevEnv } from '@livestore/utils'
4
5
  import { Effect, Schedule, Schema } from '@livestore/utils/effect'
5
6
 
6
7
  import * as OpfsUtils from '../../opfs-utils.ts'
7
8
  import type * as WorkerSchema from './worker-schema.ts'
8
9
 
9
10
  export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>()('PersistedSqliteError', {
11
+ message: Schema.String,
10
12
  cause: Schema.Defect,
11
13
  }) {}
12
14
 
@@ -141,3 +143,220 @@ export const getStateDbFileName = (schema: LiveStoreSchema) => {
141
143
  schema.state.sqlite.migrations.strategy === 'manual' ? 'fixed' : schema.state.sqlite.hash.toString()
142
144
  return `state${schemaHashSuffix}.db`
143
145
  }
146
+
147
+ export const MAX_ARCHIVED_STATE_DBS_IN_DEV = 3
148
+
149
+ /**
150
+ * Cleanup old state database files after successful migration.
151
+ * This prevents OPFS file pool capacity from being exhausted by accumulated schema files.
152
+ *
153
+ * @param vfs - The AccessHandlePoolVFS instance for safe file operations
154
+ * @param currentSchema - Current schema (to avoid deleting the active database)
155
+ */
156
+ export const cleanupOldStateDbFiles = Effect.fn('@livestore/adapter-web:cleanupOldStateDbFiles')(
157
+ function* ({
158
+ vfs,
159
+ currentSchema,
160
+ opfsDirectory,
161
+ }: {
162
+ vfs: WebDatabaseMetadataOpfs['vfs']
163
+ currentSchema: LiveStoreSchema
164
+ opfsDirectory: string
165
+ }) {
166
+ // Only cleanup for auto migration strategy because:
167
+ // - Auto strategy: Creates new database files per schema change (e.g., state123.db, state456.db)
168
+ // which accumulate over time and can exhaust OPFS file pool capacity
169
+ // - Manual strategy: Always reuses the same database file (statefixed.db) across schema changes,
170
+ // so there are never multiple old files to clean up
171
+ if (currentSchema.state.sqlite.migrations.strategy === 'manual') {
172
+ yield* Effect.logDebug('Skipping state db cleanup - manual migration strategy uses fixed filename')
173
+ return
174
+ }
175
+
176
+ const isDev = isDevEnv()
177
+ const currentDbFileName = getStateDbFileName(currentSchema)
178
+ const currentPath = `/${currentDbFileName}`
179
+
180
+ const allPaths = yield* Effect.sync(() => vfs.getTrackedFilePaths())
181
+ const oldStateDbPaths = allPaths.filter(
182
+ (path) => path.startsWith('/state') && path.endsWith('.db') && path !== currentPath,
183
+ )
184
+
185
+ if (oldStateDbPaths.length === 0) {
186
+ yield* Effect.logDebug('State db cleanup completed: no old database files found')
187
+ return
188
+ }
189
+
190
+ yield* Effect.logDebug(`Found ${oldStateDbPaths.length} old state database file(s) to clean up`)
191
+
192
+ let deletedCount = 0
193
+ const archivedFileNames: string[] = []
194
+ let archiveDirHandle: FileSystemDirectoryHandle | undefined
195
+
196
+ for (const path of oldStateDbPaths) {
197
+ const fileName = path.startsWith('/') ? path.slice(1) : path
198
+
199
+ if (isDev) {
200
+ archiveDirHandle = yield* Effect.tryPromise({
201
+ try: () => OpfsUtils.getDirHandle(`${opfsDirectory}/archive`, { create: true }),
202
+ catch: (cause) => new ArchiveStateDbError({ message: 'Failed to ensure archive directory', cause }),
203
+ })
204
+
205
+ const archivedFileName = yield* archiveStateDbFile({
206
+ vfs,
207
+ fileName,
208
+ archiveDirHandle,
209
+ })
210
+
211
+ archivedFileNames.push(archivedFileName)
212
+ }
213
+
214
+ const vfsResultCode = yield* Effect.try({
215
+ try: () => vfs.jDelete(fileName, 0),
216
+ catch: (cause) => new SqliteVfsError({ operation: 'jDelete', fileName, cause }),
217
+ })
218
+
219
+ // 0 indicates a successful result in SQLite.
220
+ // See https://www.sqlite.org/c3ref/c_abort.html
221
+ if (vfsResultCode !== 0) {
222
+ return yield* new SqliteVfsError({
223
+ operation: 'jDelete',
224
+ fileName,
225
+ vfsResultCode,
226
+ })
227
+ }
228
+
229
+ deletedCount++
230
+ yield* Effect.logDebug(`Successfully deleted old state database file: ${fileName}`)
231
+ }
232
+
233
+ if (isDev && archiveDirHandle !== undefined) {
234
+ const pruneResult = yield* pruneArchiveDir({
235
+ archiveDirHandle,
236
+ keep: MAX_ARCHIVED_STATE_DBS_IN_DEV,
237
+ })
238
+
239
+ yield* Effect.logDebug(
240
+ `State db cleanup completed: archived ${archivedFileNames.length} file(s); removed ${deletedCount} old database file(s) from active pool; archive retained ${pruneResult.retained.length} file(s)`,
241
+ )
242
+ } else {
243
+ yield* Effect.logDebug(`State db cleanup completed: removed ${deletedCount} old database file(s)`)
244
+ }
245
+ },
246
+ Effect.mapError(
247
+ (error) =>
248
+ new PersistedSqliteError({
249
+ message: 'Failed to clean up old state database file(s)',
250
+ cause: error,
251
+ }),
252
+ ),
253
+ )
254
+
255
+ const archiveStateDbFile = Effect.fn('@livestore/adapter-web:archiveStateDbFile')(function* ({
256
+ vfs,
257
+ fileName,
258
+ archiveDirHandle,
259
+ }: {
260
+ vfs: WebDatabaseMetadataOpfs['vfs']
261
+ fileName: string
262
+ archiveDirHandle: FileSystemDirectoryHandle
263
+ }) {
264
+ const stateDbBuffer = vfs.readFilePayload(fileName)
265
+
266
+ const archiveFileName = `${Date.now()}-${fileName}`
267
+
268
+ const archiveFileHandle = yield* Effect.tryPromise({
269
+ try: () => archiveDirHandle.getFileHandle(archiveFileName, { create: true }),
270
+ catch: (cause) =>
271
+ new ArchiveStateDbError({
272
+ message: 'Failed to open archive file handle',
273
+ fileName: archiveFileName,
274
+ cause,
275
+ }),
276
+ })
277
+
278
+ const archiveFileAccessHandle = yield* Effect.acquireRelease(
279
+ Effect.tryPromise({
280
+ try: () => archiveFileHandle.createSyncAccessHandle(),
281
+ catch: (cause) =>
282
+ new ArchiveStateDbError({
283
+ message: 'Failed to create sync access handle for archived file',
284
+ fileName: archiveFileName,
285
+ cause,
286
+ }),
287
+ }),
288
+ (handle) => Effect.sync(() => handle.close()).pipe(Effect.ignoreLogged),
289
+ )
290
+
291
+ yield* Effect.try({
292
+ try: () => {
293
+ archiveFileAccessHandle.write(stateDbBuffer)
294
+ archiveFileAccessHandle.flush()
295
+ },
296
+ catch: (cause) =>
297
+ new ArchiveStateDbError({
298
+ message: 'Failed to write archived state database',
299
+ fileName: archiveFileName,
300
+ cause,
301
+ }),
302
+ })
303
+
304
+ return archiveFileName
305
+ }, Effect.scoped)
306
+
307
+ const pruneArchiveDir = Effect.fn('@livestore/adapter-web:pruneArchiveDir')(function* ({
308
+ archiveDirHandle,
309
+ keep,
310
+ }: {
311
+ archiveDirHandle: FileSystemDirectoryHandle
312
+ keep: number
313
+ }) {
314
+ const files = yield* Effect.tryPromise({
315
+ try: async () => {
316
+ const result: { name: string; lastModified: number }[] = []
317
+
318
+ for await (const entry of archiveDirHandle.values()) {
319
+ if (entry.kind !== 'file') continue
320
+ const fileHandle = await archiveDirHandle.getFileHandle(entry.name)
321
+ const file = await fileHandle.getFile()
322
+ result.push({ name: entry.name, lastModified: file.lastModified })
323
+ }
324
+
325
+ return result.sort((a, b) => b.lastModified - a.lastModified)
326
+ },
327
+ catch: (cause) => new ArchiveStateDbError({ message: 'Failed to enumerate archived state databases', cause }),
328
+ })
329
+
330
+ const retained = files.slice(0, keep)
331
+ const toDelete = files.slice(keep)
332
+
333
+ yield* Effect.forEach(toDelete, ({ name }) =>
334
+ Effect.tryPromise({
335
+ try: () => archiveDirHandle.removeEntry(name),
336
+ catch: (cause) =>
337
+ new ArchiveStateDbError({
338
+ message: 'Failed to delete archived state database',
339
+ fileName: name,
340
+ cause,
341
+ }),
342
+ }),
343
+ )
344
+
345
+ return {
346
+ retained,
347
+ deleted: toDelete,
348
+ }
349
+ })
350
+
351
+ export class ArchiveStateDbError extends Schema.TaggedError<ArchiveStateDbError>()('ArchiveStateDbError', {
352
+ message: Schema.String,
353
+ fileName: Schema.optional(Schema.String),
354
+ cause: Schema.Defect,
355
+ }) {}
356
+
357
+ export class SqliteVfsError extends Schema.TaggedError<SqliteVfsError>()('SqliteVfsError', {
358
+ operation: Schema.String,
359
+ fileName: Schema.String,
360
+ vfsResultCode: Schema.optional(Schema.Number),
361
+ cause: Schema.optional(Schema.Defect),
362
+ }) {}
@@ -4,6 +4,7 @@ import {
4
4
  LeaderAheadError,
5
5
  liveStoreVersion,
6
6
  MigrationsReport,
7
+ SyncBackend,
7
8
  SyncState,
8
9
  UnexpectedError,
9
10
  } from '@livestore/common'
@@ -146,6 +147,24 @@ export class LeaderWorkerInnerGetLeaderSyncState extends Schema.TaggedRequest<Le
146
147
  },
147
148
  ) {}
148
149
 
150
+ export class LeaderWorkerInnerGetNetworkStatus extends Schema.TaggedRequest<LeaderWorkerInnerGetNetworkStatus>()(
151
+ 'GetNetworkStatus',
152
+ {
153
+ payload: {},
154
+ success: SyncBackend.NetworkStatus,
155
+ failure: UnexpectedError,
156
+ },
157
+ ) {}
158
+
159
+ export class LeaderWorkerInnerNetworkStatusStream extends Schema.TaggedRequest<LeaderWorkerInnerNetworkStatusStream>()(
160
+ 'NetworkStatusStream',
161
+ {
162
+ payload: {},
163
+ success: SyncBackend.NetworkStatus,
164
+ failure: UnexpectedError,
165
+ },
166
+ ) {}
167
+
149
168
  export class LeaderWorkerInnerShutdown extends Schema.TaggedRequest<LeaderWorkerInnerShutdown>()('Shutdown', {
150
169
  payload: {},
151
170
  success: Schema.Void,
@@ -173,6 +192,8 @@ export const LeaderWorkerInnerRequest = Schema.Union(
173
192
  LeaderWorkerInnerGetRecreateSnapshot,
174
193
  LeaderWorkerInnerGetLeaderHead,
175
194
  LeaderWorkerInnerGetLeaderSyncState,
195
+ LeaderWorkerInnerGetNetworkStatus,
196
+ LeaderWorkerInnerNetworkStatusStream,
176
197
  LeaderWorkerInnerShutdown,
177
198
  LeaderWorkerInnerExtraDevtoolsMessage,
178
199
  WebmeshWorker.Schema.CreateConnection,
@@ -218,6 +239,8 @@ export class SharedWorkerRequest extends Schema.Union(
218
239
  LeaderWorkerInnerExportEventlog,
219
240
  LeaderWorkerInnerGetLeaderHead,
220
241
  LeaderWorkerInnerGetLeaderSyncState,
242
+ LeaderWorkerInnerGetNetworkStatus,
243
+ LeaderWorkerInnerNetworkStatusStream,
221
244
  LeaderWorkerInnerShutdown,
222
245
  LeaderWorkerInnerExtraDevtoolsMessage,
223
246
 
@@ -26,7 +26,7 @@ import {
26
26
  import type * as otel from '@opentelemetry/api'
27
27
 
28
28
  import * as OpfsUtils from '../../opfs-utils.ts'
29
- import { getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
29
+ import { cleanupOldStateDbFiles, getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
30
30
  import { makeShutdownChannel } from '../common/shutdown-channel.ts'
31
31
  import * as WorkerSchema from '../common/worker-schema.ts'
32
32
 
@@ -59,16 +59,16 @@ export const makeWorkerEffect = (options: WorkerOptions) => {
59
59
  )
60
60
  : undefined
61
61
 
62
+ const layer = Layer.mergeAll(Logger.prettyWithThread(self.name), FetchHttpClient.layer, TracingLive ?? Layer.empty)
63
+
62
64
  return makeWorkerRunnerOuter(options).pipe(
63
65
  Layer.provide(BrowserWorkerRunner.layer),
64
66
  WorkerRunner.launch,
65
67
  Effect.scoped,
66
68
  Effect.tapCauseLogPretty,
67
69
  Effect.annotateLogs({ thread: self.name }),
68
- Effect.provide(Logger.prettyWithThread(self.name)),
69
- Effect.provide(FetchHttpClient.layer),
70
+ Effect.provide(layer),
70
71
  LS_DEV ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
71
- TracingLive ? Effect.provide(TracingLive) : identity,
72
72
  // We're using this custom scheduler to improve op batching behaviour and reduce the overhead
73
73
  // of the Effect fiber runtime given we have different tradeoffs on a worker thread.
74
74
  // Despite the "message channel" name, is has nothing to do with the `incomingRequestsPort` above.
@@ -131,6 +131,16 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
131
131
  concurrency: 2,
132
132
  })
133
133
 
134
+ // Clean up old state database files after successful database creation
135
+ // This prevents OPFS file pool capacity exhaustion from accumulated state db files after schema changes/migrations
136
+ if (dbState.metadata._tag === 'opfs') {
137
+ yield* cleanupOldStateDbFiles({
138
+ vfs: dbState.metadata.vfs,
139
+ currentSchema: schema,
140
+ opfsDirectory: dbState.metadata.persistenceInfo.opfsDirectory,
141
+ })
142
+ }
143
+
134
144
  const devtoolsOptions = yield* makeDevtoolsOptions({ devtoolsEnabled, dbState, dbEventlog })
135
145
  const shutdownChannel = yield* makeShutdownChannel(storeId)
136
146
 
@@ -212,6 +222,16 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
212
222
  UnexpectedError.mapToUnexpectedError,
213
223
  Effect.withSpan('@livestore/adapter-web:worker:GetLeaderSyncState'),
214
224
  ),
225
+ GetNetworkStatus: () =>
226
+ Effect.gen(function* () {
227
+ const workerCtx = yield* LeaderThreadCtx
228
+ return yield* workerCtx.networkStatus
229
+ }).pipe(UnexpectedError.mapToUnexpectedError, Effect.withSpan('@livestore/adapter-web:worker:GetNetworkStatus')),
230
+ NetworkStatusStream: () =>
231
+ Effect.gen(function* () {
232
+ const workerCtx = yield* LeaderThreadCtx
233
+ return workerCtx.networkStatus.changes
234
+ }).pipe(Stream.unwrapScoped),
215
235
  Shutdown: () =>
216
236
  Effect.gen(function* () {
217
237
  yield* Effect.logDebug('[@livestore/adapter-web:worker] Shutdown')
@@ -28,6 +28,20 @@ import {
28
28
  import { makeShutdownChannel } from '../common/shutdown-channel.ts'
29
29
  import * as WorkerSchema from '../common/worker-schema.ts'
30
30
 
31
+ // Extract from `livestore-shared-worker-${storeId}`
32
+ const storeId = self.name.replace('livestore-shared-worker-', '')
33
+
34
+ // We acquire a lock that is held as long as this shared worker is alive.
35
+ // This way, when the shared worker is terminated (e.g. by the browser when the page is closed),
36
+ // the lock is released and any thread waiting for the lock can be notified.
37
+ const LIVESTORE_SHARED_WORKER_TERMINATION_LOCK = `livestore-shared-worker-termination-lock-${storeId}`
38
+ navigator.locks.request(
39
+ LIVESTORE_SHARED_WORKER_TERMINATION_LOCK,
40
+ { steal: true },
41
+ // We use a never-resolving promise to hold the lock
42
+ async () => new Promise(() => {}),
43
+ )
44
+
31
45
  if (isDevEnv()) {
32
46
  globalThis.__debugLiveStoreUtils = {
33
47
  blobUrl: (buffer: Uint8Array<ArrayBuffer>) =>
@@ -232,6 +246,8 @@ const makeWorkerRunner = Effect.gen(function* () {
232
246
  Setup: forwardRequest,
233
247
  GetLeaderSyncState: forwardRequest,
234
248
  GetLeaderHead: forwardRequest,
249
+ GetNetworkStatus: forwardRequest,
250
+ NetworkStatusStream: forwardRequestStream,
235
251
  Shutdown: forwardRequest,
236
252
  ExtraDevtoolsMessage: forwardRequest,
237
253
 
@@ -241,9 +257,6 @@ const makeWorkerRunner = Effect.gen(function* () {
241
257
  }).pipe(Layer.unwrapScoped)
242
258
 
243
259
  export const makeWorker = () => {
244
- // Extract from `livestore-shared-worker-${storeId}`
245
- const storeId = self.name.replace('livestore-shared-worker-', '')
246
-
247
260
  makeWorkerRunner.pipe(
248
261
  Layer.provide(BrowserWorkerRunner.layer),
249
262
  // WorkerRunner.launch,
@@ -5,6 +5,7 @@ globalThis.$RefreshReg$ = () => {}
5
5
  // @ts-expect-error TODO remove when Vite does proper treeshaking during dev
6
6
  globalThis.$RefreshSig$ = () => (type: any) => type
7
7
 
8
+ // biome-ignore lint/suspicious/noTsIgnore: sometimes @types/node is there, sometimes not.
8
9
  // @ts-ignore
9
10
  globalThis.process = globalThis.process ?? { env: {} }
10
11