@livestore/adapter-web 0.4.0-dev.21 → 0.4.0-dev.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +5 -5
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/in-memory/in-memory-adapter.d.ts.map +1 -1
  4. package/dist/in-memory/in-memory-adapter.js +8 -4
  5. package/dist/in-memory/in-memory-adapter.js.map +1 -1
  6. package/dist/index.d.ts +11 -1
  7. package/dist/index.d.ts.map +1 -1
  8. package/dist/index.js +11 -1
  9. package/dist/index.js.map +1 -1
  10. package/dist/single-tab/mod.d.ts +15 -0
  11. package/dist/single-tab/mod.d.ts.map +1 -0
  12. package/dist/single-tab/mod.js +15 -0
  13. package/dist/single-tab/mod.js.map +1 -0
  14. package/dist/single-tab/single-tab-adapter.d.ts +108 -0
  15. package/dist/single-tab/single-tab-adapter.d.ts.map +1 -0
  16. package/dist/single-tab/single-tab-adapter.js +271 -0
  17. package/dist/single-tab/single-tab-adapter.js.map +1 -0
  18. package/dist/web-worker/client-session/client-session-devtools.d.ts +1 -1
  19. package/dist/web-worker/client-session/client-session-devtools.d.ts.map +1 -1
  20. package/dist/web-worker/client-session/client-session-devtools.js +7 -7
  21. package/dist/web-worker/client-session/client-session-devtools.js.map +1 -1
  22. package/dist/web-worker/client-session/persisted-adapter.d.ts +18 -0
  23. package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
  24. package/dist/web-worker/client-session/persisted-adapter.js +95 -36
  25. package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
  26. package/dist/web-worker/client-session/sqlite-loader.d.ts.map +1 -1
  27. package/dist/web-worker/client-session/sqlite-loader.js +1 -1
  28. package/dist/web-worker/client-session/sqlite-loader.js.map +1 -1
  29. package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -1
  30. package/dist/web-worker/common/persisted-sqlite.js +13 -11
  31. package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
  32. package/dist/web-worker/common/worker-schema.d.ts +34 -31
  33. package/dist/web-worker/common/worker-schema.d.ts.map +1 -1
  34. package/dist/web-worker/common/worker-schema.js +18 -19
  35. package/dist/web-worker/common/worker-schema.js.map +1 -1
  36. package/dist/web-worker/leader-worker/make-leader-worker.d.ts +2 -2
  37. package/dist/web-worker/leader-worker/make-leader-worker.d.ts.map +1 -1
  38. package/dist/web-worker/leader-worker/make-leader-worker.js +59 -25
  39. package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
  40. package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -1
  41. package/dist/web-worker/shared-worker/make-shared-worker.js +15 -15
  42. package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
  43. package/package.json +56 -17
  44. package/src/in-memory/in-memory-adapter.ts +9 -5
  45. package/src/index.ts +15 -1
  46. package/src/single-tab/mod.ts +15 -0
  47. package/src/single-tab/single-tab-adapter.ts +499 -0
  48. package/src/web-worker/client-session/client-session-devtools.ts +26 -27
  49. package/src/web-worker/client-session/persisted-adapter.ts +126 -64
  50. package/src/web-worker/client-session/sqlite-loader.ts +1 -1
  51. package/src/web-worker/common/persisted-sqlite.ts +13 -10
  52. package/src/web-worker/common/worker-schema.ts +19 -18
  53. package/src/web-worker/leader-worker/make-leader-worker.ts +94 -52
  54. package/src/web-worker/shared-worker/make-shared-worker.ts +26 -48
@@ -1,6 +1,7 @@
1
- import type { Adapter, ClientSession, LockStatus } from '@livestore/common'
1
+ import type { Adapter, BootWarningReason, ClientSession, LockStatus } from '@livestore/common'
2
2
  import {
3
3
  IntentionalShutdownCause,
4
+ isWorkerTransportError,
4
5
  liveStoreVersion,
5
6
  makeClientSession,
6
7
  StoreInterrupted,
@@ -12,7 +13,7 @@ import {
12
13
  // import LiveStoreSharedWorker from '@livestore/adapter-web/internal-shared-worker?sharedworker'
13
14
  import { EventSequenceNumber } from '@livestore/common/schema'
14
15
  import { sqliteDbFactory } from '@livestore/sqlite-wasm/browser'
15
- import { isDevEnv, shouldNeverHappen, tryAsFunctionAndNew } from '@livestore/utils'
16
+ import { isDevEnv, omitUndefineds, shouldNeverHappen, tryAsFunctionAndNew } from '@livestore/utils'
16
17
  import {
17
18
  Cause,
18
19
  Deferred,
@@ -20,17 +21,18 @@ import {
20
21
  Exit,
21
22
  Fiber,
22
23
  Layer,
23
- ParseResult,
24
+ Option,
24
25
  Queue,
25
26
  Schema,
26
27
  Stream,
27
28
  Subscribable,
28
29
  SubscriptionRef,
29
30
  Worker,
30
- WorkerError,
31
31
  } from '@livestore/utils/effect'
32
- import { BrowserWorker, Opfs, WebLock } from '@livestore/utils/effect/browser'
32
+ import { BrowserWorker, Opfs, WebError, WebLock } from '@livestore/utils/effect/browser'
33
33
  import { nanoid } from '@livestore/utils/nanoid'
34
+
35
+ import { makeSingleTabAdapter } from '../../single-tab/single-tab-adapter.ts'
34
36
  import {
35
37
  readPersistedStateDbFromClientSession,
36
38
  resetPersistedDataFromClientSession,
@@ -41,7 +43,17 @@ import * as WorkerSchema from '../common/worker-schema.ts'
41
43
  import { connectWebmeshNodeClientSession } from './client-session-devtools.ts'
42
44
  import { loadSqlite3 } from './sqlite-loader.ts'
43
45
 
44
- if (isDevEnv()) {
46
+ /**
47
+ * Checks if SharedWorker API is available in the current browser context.
48
+ *
49
+ * Returns false on Android Chrome and other browsers without SharedWorker support.
50
+ *
51
+ * @see https://github.com/livestorejs/livestore/issues/321
52
+ * @see https://issues.chromium.org/issues/40290702
53
+ */
54
+ export const canUseSharedWorker = (): boolean => typeof SharedWorker !== 'undefined'
55
+
56
+ if (isDevEnv() === true) {
45
57
  globalThis.__debugLiveStoreUtils = {
46
58
  ...globalThis.__debugLiveStoreUtils,
47
59
  opfs: Opfs.debugUtils,
@@ -120,6 +132,15 @@ export type WebAdapterOptions = {
120
132
  * Creates a web adapter with persistent storage (currently only supports OPFS).
121
133
  * Requires both a web worker and a shared worker.
122
134
  *
135
+ * On browsers without SharedWorker support (e.g. Android Chrome), this adapter
136
+ * automatically falls back to single-tab mode. In single-tab mode:
137
+ * - Each tab runs independently with its own leader worker
138
+ * - Multi-tab synchronization is not available
139
+ * - Devtools are not supported
140
+ *
141
+ * @see https://github.com/livestorejs/livestore/issues/321 - SharedWorker tracking issue
142
+ * @see https://issues.chromium.org/issues/40290702 - Chromium SharedWorker bug
143
+ *
123
144
  * @example
124
145
  * ```ts
125
146
  * import { makePersistedAdapter } from '@livestore/adapter-web'
@@ -137,6 +158,26 @@ export const makePersistedAdapter =
137
158
  (options: WebAdapterOptions): Adapter =>
138
159
  (adapterArgs) =>
139
160
  Effect.gen(function* () {
161
+ // Check SharedWorker availability first and fall back to single-tab mode if unavailable
162
+ if (canUseSharedWorker() === false) {
163
+ yield* Effect.logWarning(
164
+ '[@livestore/adapter-web] SharedWorker unavailable (e.g. Android Chrome). ' +
165
+ 'Falling back to single-tab mode. Multi-tab synchronization and devtools are disabled. ' +
166
+ 'See: https://github.com/livestorejs/livestore/issues/321',
167
+ )
168
+
169
+ return yield* makeSingleTabAdapter({
170
+ worker: options.worker,
171
+ storage: options.storage,
172
+ ...omitUndefineds({
173
+ resetPersistence: options.resetPersistence,
174
+ clientId: options.clientId,
175
+ sessionId: options.sessionId,
176
+ experimental: options.experimental,
177
+ }),
178
+ })(adapterArgs)
179
+ }
180
+
140
181
  const {
141
182
  schema,
142
183
  storeId,
@@ -168,10 +209,21 @@ export const makePersistedAdapter =
168
209
 
169
210
  const shutdownChannel = yield* makeShutdownChannel(storeId)
170
211
 
171
- if (options.resetPersistence === true) {
212
+ // Check OPFS availability early and notify user if storage is unavailable (e.g. private browsing)
213
+ const opfsWarning = yield* checkOpfsAvailability
214
+ if (opfsWarning !== undefined) {
215
+ yield* Effect.logWarning('[@livestore/adapter-web:client-session] OPFS unavailable', opfsWarning)
216
+ }
217
+
218
+ if (options.resetPersistence === true && opfsWarning === undefined) {
172
219
  yield* shutdownChannel.send(IntentionalShutdownCause.make({ reason: 'adapter-reset' }))
173
220
 
174
221
  yield* resetPersistedDataFromClientSession({ storageOptions, storeId })
222
+ } else if (options.resetPersistence === true) {
223
+ yield* Effect.logWarning(
224
+ '[@livestore/adapter-web:client-session] Skipping persistence reset because storage is unavailable',
225
+ opfsWarning,
226
+ )
175
227
  }
176
228
 
177
229
  // Note on fast-path booting:
@@ -181,7 +233,7 @@ export const makePersistedAdapter =
181
233
  // We need to be extra careful though to not run into any race conditions or inconsistencies.
182
234
  // TODO also verify persisted data
183
235
  const dataFromFile =
184
- options.experimental?.disableFastPath === true
236
+ options.experimental?.disableFastPath === true || opfsWarning !== undefined
185
237
  ? undefined
186
238
  : yield* readPersistedStateDbFromClientSession({ storageOptions, storeId, schema }).pipe(
187
239
  Effect.tapError((error) =>
@@ -203,7 +255,7 @@ export const makePersistedAdapter =
203
255
  yield* shutdownChannel.listen.pipe(
204
256
  Stream.flatten(),
205
257
  Stream.tap((cause) =>
206
- shutdown(cause._tag === 'LiveStore.IntentionalShutdownCause' ? Exit.succeed(cause) : Exit.fail(cause)),
258
+ shutdown(cause._tag === 'IntentionalShutdownCause' ? Exit.succeed(cause) : Exit.fail(cause)),
207
259
  ),
208
260
  Stream.runDrain,
209
261
  Effect.interruptible,
@@ -213,7 +265,7 @@ export const makePersistedAdapter =
213
265
 
214
266
  const sharedWebWorker = tryAsFunctionAndNew(options.sharedWorker, { name: `livestore-shared-worker-${storeId}` })
215
267
 
216
- if (options.experimental?.awaitSharedWorkerTermination) {
268
+ if (options.experimental?.awaitSharedWorkerTermination === true) {
217
269
  // Relying on the lock being available is currently the only mechanism we're aware of
218
270
  // to know whether the shared worker has terminated.
219
271
  yield* Effect.addFinalizer(() => WebLock.waitForLock(LIVESTORE_SHARED_WORKER_TERMINATION_LOCK))
@@ -226,7 +278,7 @@ export const makePersistedAdapter =
226
278
  }).pipe(
227
279
  Effect.provide(sharedWorkerContext),
228
280
  Effect.tapCauseLogPretty,
229
- UnknownError.mapToUnknownError,
281
+ Effect.orDie,
230
282
  Effect.tapErrorCause((cause) => shutdown(Exit.failCause(cause))),
231
283
  Effect.withSpan('@livestore/adapter-web:client-session:setupSharedWorker'),
232
284
  Effect.forkScoped,
@@ -238,7 +290,7 @@ export const makePersistedAdapter =
238
290
  //
239
291
  // Sorry for this pun ...
240
292
  let gotLocky = yield* WebLock.tryGetDeferredLock(lockDeferred, LIVESTORE_TAB_LOCK)
241
- const lockStatus = yield* SubscriptionRef.make<LockStatus>(gotLocky ? 'has-lock' : 'no-lock')
293
+ const lockStatus = yield* SubscriptionRef.make<LockStatus>(gotLocky === true ? 'has-lock' : 'no-lock')
242
294
 
243
295
  // Ideally we can come up with a simpler implementation that doesn't require this
244
296
  const waitForSharedWorkerInitialized = yield* Deferred.make<void>()
@@ -323,15 +375,14 @@ export const makePersistedAdapter =
323
375
  yield* runLocked.pipe(Effect.interruptible, Effect.tapCauseLogPretty, Effect.forkScoped)
324
376
  }
325
377
 
326
- const runInWorker = <TReq extends typeof WorkerSchema.SharedWorkerRequest.Type>(
327
- req: TReq,
328
- ): TReq extends Schema.WithResult<infer A, infer _I, infer E, infer _EI, infer R>
329
- ? Effect.Effect<A, UnknownError | E, R>
330
- : never =>
378
+ const runInWorker = <A, I, E, EI, R>(
379
+ req: WorkerSchema.SharedWorkerRequest & Schema.WithResult<A, I, E, EI, R>,
380
+ ): Effect.Effect<A, E, R> =>
331
381
  Fiber.join(sharedWorkerFiber).pipe(
332
382
  // NOTE we need to wait for the shared worker to be initialized before we can send requests to it
333
383
  Effect.tap(() => waitForSharedWorkerInitialized),
334
- Effect.flatMap((worker) => worker.executeEffect(req) as any),
384
+ Effect.flatMap((worker) => worker.executeEffect(req)),
385
+ Effect.catchIf(isWorkerTransportError, (e) => Effect.die(e)),
335
386
  // NOTE we want to treat worker requests as atomic and therefore not allow them to be interrupted
336
387
  // Interruption usually only happens during leader re-election or store shutdown
337
388
  // Effect.uninterruptible,
@@ -340,40 +391,24 @@ export const makePersistedAdapter =
340
391
  duration: 2000,
341
392
  }),
342
393
  Effect.withSpan(`@livestore/adapter-web:client-session:runInWorker:${req._tag}`),
343
- Effect.mapError((cause) =>
344
- Schema.is(UnknownError)(cause)
345
- ? cause
346
- : ParseResult.isParseError(cause) || Schema.is(WorkerError.WorkerError)(cause)
347
- ? new UnknownError({ cause })
348
- : cause,
349
- ),
350
- Effect.catchAllDefect((cause) => new UnknownError({ cause })),
351
- ) as any
352
-
353
- const runInWorkerStream = <TReq extends typeof WorkerSchema.SharedWorkerRequest.Type>(
354
- req: TReq,
355
- ): TReq extends Schema.WithResult<infer A, infer _I, infer _E, infer _EI, infer R>
356
- ? Stream.Stream<A, UnknownError, R>
357
- : never =>
394
+ )
395
+
396
+ const runInWorkerStream = <A, I, E, EI, R>(
397
+ req: WorkerSchema.SharedWorkerRequest & Schema.WithResult<A, I, E, EI, R>,
398
+ ): Stream.Stream<A, E, R> =>
358
399
  Effect.gen(function* () {
359
400
  const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
360
- return sharedWorker.execute(req as any).pipe(
361
- Stream.mapError((cause) =>
362
- Schema.is(UnknownError)(cause)
363
- ? cause
364
- : ParseResult.isParseError(cause) || Schema.is(WorkerError.WorkerError)(cause)
365
- ? new UnknownError({ cause })
366
- : cause,
367
- ),
401
+ return sharedWorker.execute(req).pipe(
402
+ Stream.refineOrDie((e) => isWorkerTransportError(e) === true ? Option.none() : Option.some(e)),
368
403
  Stream.withSpan(`@livestore/adapter-web:client-session:runInWorkerStream:${req._tag}`),
369
404
  )
370
- }).pipe(Stream.unwrap) as any
405
+ }).pipe(Stream.unwrap)
371
406
 
372
407
  const bootStatusFiber = yield* runInWorkerStream(new WorkerSchema.LeaderWorkerInnerBootStatusStream()).pipe(
373
408
  Stream.tap((_) => Queue.offer(bootStatusQueue, _)),
374
409
  Stream.runDrain,
375
410
  Effect.tapErrorCause((cause) =>
376
- Cause.isInterruptedOnly(cause) ? Effect.void : shutdown(Exit.failCause(cause)),
411
+ Cause.isInterruptedOnly(cause) === true ? Effect.void : shutdown(Exit.failCause(cause)),
377
412
  ),
378
413
  Effect.interruptible,
379
414
  Effect.tapCauseLogPretty,
@@ -428,20 +463,21 @@ export const makePersistedAdapter =
428
463
  .first(),
429
464
  )
430
465
 
431
- const initialLeaderHead = initialLeaderHeadRes
432
- ? EventSequenceNumber.Client.Composite.make({
433
- global: initialLeaderHeadRes.seqNumGlobal,
434
- client: initialLeaderHeadRes.seqNumClient,
435
- rebaseGeneration: initialLeaderHeadRes.seqNumRebaseGeneration,
436
- })
437
- : EventSequenceNumber.Client.ROOT
466
+ const initialLeaderHead =
467
+ initialLeaderHeadRes !== undefined
468
+ ? EventSequenceNumber.Client.Composite.make({
469
+ global: initialLeaderHeadRes.seqNumGlobal,
470
+ client: initialLeaderHeadRes.seqNumClient,
471
+ rebaseGeneration: initialLeaderHeadRes.seqNumRebaseGeneration,
472
+ })
473
+ : EventSequenceNumber.Client.ROOT
438
474
 
439
475
  // console.debug('[@livestore/adapter-web:client-session] initialLeaderHead', initialLeaderHead)
440
476
 
441
477
  yield* Effect.addFinalizer((ex) =>
442
478
  Effect.gen(function* () {
443
479
  if (
444
- Exit.isFailure(ex) &&
480
+ Exit.isFailure(ex) === true &&
445
481
  Exit.isInterrupted(ex) === false &&
446
482
  Schema.is(IntentionalShutdownCause)(Cause.squash(ex.cause)) === false &&
447
483
  Schema.is(StoreInterrupted)(Cause.squash(ex.cause)) === false
@@ -451,7 +487,7 @@ export const makePersistedAdapter =
451
487
  yield* Effect.logDebug('[@livestore/adapter-web:client-session] client-session shutdown', gotLocky, ex)
452
488
  }
453
489
 
454
- if (gotLocky) {
490
+ if (gotLocky === true) {
455
491
  yield* Deferred.succeed(lockDeferred, undefined)
456
492
  }
457
493
  }).pipe(Effect.tapCauseLogPretty, Effect.orDie),
@@ -459,8 +495,7 @@ export const makePersistedAdapter =
459
495
 
460
496
  const leaderThread: ClientSession['leaderThread'] = {
461
497
  export: runInWorker(new WorkerSchema.LeaderWorkerInnerExport()).pipe(
462
- Effect.timeout(10_000),
463
- UnknownError.mapToUnknownError,
498
+ Effect.timeoutOrDie(10_000),
464
499
  Effect.withSpan('@livestore/adapter-web:client-session:export'),
465
500
  ),
466
501
 
@@ -480,17 +515,19 @@ export const makePersistedAdapter =
480
515
  ),
481
516
  },
482
517
 
483
- initialState: { leaderHead: initialLeaderHead, migrationsReport },
518
+ initialState: {
519
+ leaderHead: initialLeaderHead,
520
+ migrationsReport,
521
+ storageMode: opfsWarning === undefined ? 'persisted' : 'in-memory',
522
+ },
484
523
 
485
524
  getEventlogData: runInWorker(new WorkerSchema.LeaderWorkerInnerExportEventlog()).pipe(
486
- Effect.timeout(10_000),
487
- UnknownError.mapToUnknownError,
525
+ Effect.timeoutOrDie(10_000),
488
526
  Effect.withSpan('@livestore/adapter-web:client-session:getEventlogData'),
489
527
  ),
490
528
 
491
529
  syncState: Subscribable.make({
492
530
  get: runInWorker(new WorkerSchema.LeaderWorkerInnerGetLeaderSyncState()).pipe(
493
- UnknownError.mapToUnknownError,
494
531
  Effect.withSpan('@livestore/adapter-web:client-session:getLeaderSyncState'),
495
532
  ),
496
533
  changes: runInWorkerStream(new WorkerSchema.LeaderWorkerInnerSyncStateStream()).pipe(Stream.orDie),
@@ -498,12 +535,11 @@ export const makePersistedAdapter =
498
535
 
499
536
  sendDevtoolsMessage: (message) =>
500
537
  runInWorker(new WorkerSchema.LeaderWorkerInnerExtraDevtoolsMessage({ message })).pipe(
501
- UnknownError.mapToUnknownError,
502
538
  Effect.withSpan('@livestore/adapter-web:client-session:devtoolsMessageForLeader'),
503
539
  ),
504
540
  networkStatus: Subscribable.make({
505
- get: runInWorker(new WorkerSchema.LeaderWorkerInnerGetNetworkStatus()).pipe(Effect.orDie),
506
- changes: runInWorkerStream(new WorkerSchema.LeaderWorkerInnerNetworkStatusStream()).pipe(Stream.orDie),
541
+ get: runInWorker(new WorkerSchema.LeaderWorkerInnerGetNetworkStatus()),
542
+ changes: runInWorkerStream(new WorkerSchema.LeaderWorkerInnerNetworkStatusStream()),
507
543
  }),
508
544
  }
509
545
 
@@ -546,7 +582,7 @@ const getPersistedId = (key: string, storageType: 'session' | 'local') => {
546
582
  ? sessionStorage
547
583
  : storageType === 'local'
548
584
  ? localStorage
549
- : shouldNeverHappen(`[@livestore/adapter-web] Invalid storage type: ${storageType}`)
585
+ : shouldNeverHappen(`[@livestore/adapter-web] Invalid storage type: ${String(storageType)}`)
550
586
 
551
587
  // in case of a worker, we need the id of the parent window, to keep the id consistent
552
588
  // we also need to handle the case where there are multiple workers being spawned by the same window
@@ -557,7 +593,7 @@ const getPersistedId = (key: string, storageType: 'session' | 'local') => {
557
593
  const fullKey = `livestore:${key}`
558
594
  const storedKey = storage.getItem(fullKey)
559
595
 
560
- if (storedKey) return storedKey
596
+ if (storedKey !== null) return storedKey
561
597
 
562
598
  const newKey = makeId()
563
599
  storage.setItem(fullKey, newKey)
@@ -568,7 +604,7 @@ const getPersistedId = (key: string, storageType: 'session' | 'local') => {
568
604
  const ensureBrowserRequirements = Effect.gen(function* () {
569
605
  const validate = (condition: boolean, label: string) =>
570
606
  Effect.gen(function* () {
571
- if (condition) {
607
+ if (condition === true) {
572
608
  return yield* UnknownError.make({
573
609
  cause: `[@livestore/adapter-web] Browser not supported. The LiveStore web adapter needs '${label}' to work properly`,
574
610
  })
@@ -584,3 +620,29 @@ const ensureBrowserRequirements = Effect.gen(function* () {
584
620
  validate(typeof sessionStorage === 'undefined', 'sessionStorage'),
585
621
  ])
586
622
  })
623
+
624
+ /**
625
+ * Attempts to access OPFS and returns a warning if unavailable.
626
+ *
627
+ * Common failure scenarios:
628
+ * - Safari/Firefox private browsing: SecurityError or NotAllowedError
629
+ * - Permission denied: NotAllowedError
630
+ * - Quota exceeded: QuotaExceededError
631
+ */
632
+ const checkOpfsAvailability = Effect.gen(function* () {
633
+ const opfs = yield* Opfs.Opfs
634
+ return yield* opfs.getRootDirectoryHandle.pipe(
635
+ Effect.as(undefined),
636
+ Effect.catchAll((error) => {
637
+ const reason: BootWarningReason =
638
+ Schema.is(WebError.SecurityError)(error) === true || Schema.is(WebError.NotAllowedError)(error) === true
639
+ ? 'private-browsing'
640
+ : 'storage-unavailable'
641
+ const message =
642
+ reason === 'private-browsing'
643
+ ? 'Storage unavailable in private browsing mode. LiveStore will continue without persistence.'
644
+ : 'Storage access denied. LiveStore will continue without persistence.'
645
+ return Effect.succeed({ reason, message } as const)
646
+ }),
647
+ )
648
+ })
@@ -16,4 +16,4 @@ if (isServerRuntime === false) {
16
16
  sqlite3Promise = loadSqlite3Wasm()
17
17
  }
18
18
 
19
- export const loadSqlite3 = () => (isServerRuntime ? loadSqlite3Wasm() : (sqlite3Promise ?? loadSqlite3Wasm()))
19
+ export const loadSqlite3 = () => (isServerRuntime === true ? loadSqlite3Wasm() : (sqlite3Promise ?? loadSqlite3Wasm()))
@@ -8,9 +8,10 @@ import {
8
8
  import { isDevEnv } from '@livestore/utils'
9
9
  import { Chunk, Effect, Option, Order, Schedule, Schema, Stream } from '@livestore/utils/effect'
10
10
  import { Opfs, type WebError } from '@livestore/utils/effect/browser'
11
+
11
12
  import type * as WorkerSchema from './worker-schema.ts'
12
13
 
13
- export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>()('PersistedSqliteError', {
14
+ export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>('~@livestore/adapter-web/PersistedSqliteError')('PersistedSqliteError', {
14
15
  message: Schema.String,
15
16
  cause: Schema.optional(Schema.Defect),
16
17
  }) {}
@@ -56,7 +57,7 @@ export const readPersistedStateDbFromClientSession: (args: {
56
57
  Stream.runHead,
57
58
  )
58
59
 
59
- if (Option.isNone(stateDbFileOption)) {
60
+ if (Option.isNone(stateDbFileOption) === true) {
60
61
  return yield* new PersistedSqliteError({
61
62
  message: `State database file not found in client session (expected '${stateDbFileName}' in '${accessHandlePoolDirString}')`,
62
63
  })
@@ -88,9 +89,9 @@ export const resetPersistedDataFromClientSession = Effect.fn(
88
89
  )(
89
90
  function* ({ storageOptions, storeId }: { storageOptions: WorkerSchema.StorageType; storeId: string }) {
90
91
  const directory = yield* sanitizeOpfsDir(storageOptions.directory, storeId)
91
- yield* Opfs.remove(directory).pipe(
92
+ yield* Opfs.remove(directory, { recursive: true }).pipe(
92
93
  // We ignore NotFoundError here as it may not exist or have already been deleted
93
- Effect.catchTag('@livestore/utils/Web/NotFoundError', () => Effect.void),
94
+ Effect.catchTag('NotFoundError', () => Effect.void),
94
95
  )
95
96
  },
96
97
  Effect.retry({
@@ -106,7 +107,7 @@ export const sanitizeOpfsDir = Effect.fn('@livestore/adapter-web:sanitizeOpfsDir
106
107
  return `livestore-${storeId}@${liveStoreStorageFormatVersion}`
107
108
  }
108
109
 
109
- if (directory.includes('/')) {
110
+ if (directory.includes('/') === true) {
110
111
  return yield* new PersistedSqliteError({
111
112
  message: `Nested directories are not yet supported ('${directory}')`,
112
113
  })
@@ -183,12 +184,13 @@ export const cleanupOldStateDbFiles: (options: {
183
184
  }
184
185
 
185
186
  const absoluteArchiveDirName = `${opfsDirectory}/${ARCHIVE_DIR_NAME}`
186
- if (isDev && !(yield* Opfs.exists(absoluteArchiveDirName))) yield* Opfs.makeDirectory(absoluteArchiveDirName)
187
+ if (isDev === true && (yield* Opfs.exists(absoluteArchiveDirName)) === false)
188
+ yield* Opfs.makeDirectory(absoluteArchiveDirName)
187
189
 
188
190
  for (const path of oldStateDbPaths) {
189
- const fileName = path.startsWith('/') ? path.slice(1) : path
191
+ const fileName = path.startsWith('/') === true ? path.slice(1) : path
190
192
 
191
- if (isDev) {
193
+ if (isDev === true) {
192
194
  const archiveFileData = yield* vfs.readFilePayload(fileName)
193
195
 
194
196
  const archiveFileName = `${Date.now()}-${fileName}`
@@ -201,7 +203,7 @@ export const cleanupOldStateDbFiles: (options: {
201
203
  const supportsCreateWritable =
202
204
  typeof FileSystemFileHandle !== 'undefined' && 'createWritable' in FileSystemFileHandle.prototype
203
205
 
204
- if (supportsCreateWritable) {
206
+ if (supportsCreateWritable === true) {
205
207
  yield* Opfs.writeFile(archivePath, archiveData)
206
208
  } else {
207
209
  yield* Opfs.syncWriteFile(archivePath, archiveData)
@@ -225,7 +227,7 @@ export const cleanupOldStateDbFiles: (options: {
225
227
  yield* Effect.logDebug(`Deleted old state database file: ${fileName}`)
226
228
  }
227
229
 
228
- if (isDev) {
230
+ if (isDev === true) {
229
231
  yield* pruneArchiveDirectory({
230
232
  archiveDirectory: absoluteArchiveDirName,
231
233
  keep: MAX_ARCHIVED_STATE_DBS_IN_DEV,
@@ -248,6 +250,7 @@ const pruneArchiveDirectory = Effect.fn('@livestore/adapter-web:pruneArchiveDire
248
250
  Stream.runCollect,
249
251
  )
250
252
  const filesToDelete = filesWithMetadata.pipe(
253
+ // oxlint-disable-next-line unicorn/no-array-sort -- false positive: Effect Chunk.sort is immutable, not Array#sort (https://github.com/oxc-project/oxc/issues/19110)
251
254
  Chunk.sort(Order.mapInput(Order.number, (entry: { lastModified: number }) => entry.lastModified)),
252
255
  Chunk.drop(keep),
253
256
  Chunk.toReadonlyArray,
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  BootStatus,
3
3
  Devtools,
4
- LeaderAheadError,
4
+ RejectedPushError,
5
5
  liveStoreVersion,
6
6
  MigrationsReport,
7
7
  SyncBackend,
@@ -50,7 +50,7 @@ export class LeaderWorkerOuterInitialMessage extends Schema.TaggedRequest<Leader
50
50
  {
51
51
  payload: { port: Transferable.MessagePort, storeId: Schema.String, clientId: Schema.String },
52
52
  success: Schema.Void,
53
- failure: UnknownError,
53
+ failure: Schema.Never,
54
54
  },
55
55
  ) {}
56
56
 
@@ -78,7 +78,7 @@ export class LeaderWorkerInnerBootStatusStream extends Schema.TaggedRequest<Lead
78
78
  {
79
79
  payload: {},
80
80
  success: BootStatus,
81
- failure: UnknownError,
81
+ failure: Schema.Never,
82
82
  },
83
83
  ) {}
84
84
 
@@ -89,7 +89,7 @@ export class LeaderWorkerInnerPushToLeader extends Schema.TaggedRequest<LeaderWo
89
89
  batch: Schema.Array(Schema.typeSchema(LiveStoreEvent.Client.Encoded)),
90
90
  },
91
91
  success: Schema.Void as Schema.Schema<void>,
92
- failure: Schema.Union(UnknownError, LeaderAheadError),
92
+ failure: RejectedPushError,
93
93
  },
94
94
  ) {}
95
95
 
@@ -100,7 +100,7 @@ export class LeaderWorkerInnerPullStream extends Schema.TaggedRequest<LeaderWork
100
100
  success: Schema.Struct({
101
101
  payload: SyncState.PayloadUpstream,
102
102
  }),
103
- failure: UnknownError,
103
+ failure: Schema.Never,
104
104
  }) {}
105
105
 
106
106
  export class LeaderWorkerInnerStreamEvents extends Schema.TaggedRequest<LeaderWorkerInnerStreamEvents>()(
@@ -108,14 +108,14 @@ export class LeaderWorkerInnerStreamEvents extends Schema.TaggedRequest<LeaderWo
108
108
  {
109
109
  payload: StreamEventsOptionsFields,
110
110
  success: LiveStoreEvent.Client.Encoded,
111
- failure: UnknownError,
111
+ failure: Schema.Never,
112
112
  },
113
113
  ) {}
114
114
 
115
115
  export class LeaderWorkerInnerExport extends Schema.TaggedRequest<LeaderWorkerInnerExport>()('Export', {
116
116
  payload: {},
117
117
  success: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
118
- failure: UnknownError,
118
+ failure: Schema.Never,
119
119
  }) {}
120
120
 
121
121
  export class LeaderWorkerInnerExportEventlog extends Schema.TaggedRequest<LeaderWorkerInnerExportEventlog>()(
@@ -123,7 +123,7 @@ export class LeaderWorkerInnerExportEventlog extends Schema.TaggedRequest<Leader
123
123
  {
124
124
  payload: {},
125
125
  success: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
126
- failure: UnknownError,
126
+ failure: Schema.Never,
127
127
  },
128
128
  ) {}
129
129
 
@@ -135,7 +135,7 @@ export class LeaderWorkerInnerGetRecreateSnapshot extends Schema.TaggedRequest<L
135
135
  snapshot: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
136
136
  migrationsReport: MigrationsReport,
137
137
  }),
138
- failure: UnknownError,
138
+ failure: Schema.Never,
139
139
  },
140
140
  ) {}
141
141
 
@@ -144,7 +144,7 @@ export class LeaderWorkerInnerGetLeaderHead extends Schema.TaggedRequest<LeaderW
144
144
  {
145
145
  payload: {},
146
146
  success: Schema.typeSchema(EventSequenceNumber.Client.Composite),
147
- failure: UnknownError,
147
+ failure: Schema.Never,
148
148
  },
149
149
  ) {}
150
150
 
@@ -153,7 +153,7 @@ export class LeaderWorkerInnerGetLeaderSyncState extends Schema.TaggedRequest<Le
153
153
  {
154
154
  payload: {},
155
155
  success: SyncState.SyncState,
156
- failure: UnknownError,
156
+ failure: Schema.Never,
157
157
  },
158
158
  ) {}
159
159
 
@@ -162,7 +162,7 @@ export class LeaderWorkerInnerSyncStateStream extends Schema.TaggedRequest<Leade
162
162
  {
163
163
  payload: {},
164
164
  success: SyncState.SyncState,
165
- failure: UnknownError,
165
+ failure: Schema.Never,
166
166
  },
167
167
  ) {}
168
168
 
@@ -171,7 +171,7 @@ export class LeaderWorkerInnerGetNetworkStatus extends Schema.TaggedRequest<Lead
171
171
  {
172
172
  payload: {},
173
173
  success: SyncBackend.NetworkStatus,
174
- failure: UnknownError,
174
+ failure: Schema.Never,
175
175
  },
176
176
  ) {}
177
177
 
@@ -180,14 +180,14 @@ export class LeaderWorkerInnerNetworkStatusStream extends Schema.TaggedRequest<L
180
180
  {
181
181
  payload: {},
182
182
  success: SyncBackend.NetworkStatus,
183
- failure: UnknownError,
183
+ failure: Schema.Never,
184
184
  },
185
185
  ) {}
186
186
 
187
187
  export class LeaderWorkerInnerShutdown extends Schema.TaggedRequest<LeaderWorkerInnerShutdown>()('Shutdown', {
188
188
  payload: {},
189
189
  success: Schema.Void,
190
- failure: UnknownError,
190
+ failure: Schema.Never,
191
191
  }) {}
192
192
 
193
193
  export class LeaderWorkerInnerExtraDevtoolsMessage extends Schema.TaggedRequest<LeaderWorkerInnerExtraDevtoolsMessage>()(
@@ -197,7 +197,7 @@ export class LeaderWorkerInnerExtraDevtoolsMessage extends Schema.TaggedRequest<
197
197
  message: Devtools.Leader.MessageToApp,
198
198
  },
199
199
  success: Schema.Void,
200
- failure: UnknownError,
200
+ failure: Schema.Never,
201
201
  },
202
202
  ) {}
203
203
 
@@ -241,7 +241,7 @@ export class SharedWorkerUpdateMessagePort extends Schema.TaggedRequest<SharedWo
241
241
  },
242
242
  ) {}
243
243
 
244
- export class SharedWorkerRequest extends Schema.Union(
244
+ export const SharedWorkerRequest = Schema.Union(
245
245
  SharedWorkerUpdateMessagePort,
246
246
 
247
247
  // Proxied requests
@@ -261,4 +261,5 @@ export class SharedWorkerRequest extends Schema.Union(
261
261
  LeaderWorkerInnerExtraDevtoolsMessage,
262
262
 
263
263
  WebmeshWorker.Schema.CreateConnection,
264
- ) {}
264
+ )
265
+ export type SharedWorkerRequest = typeof SharedWorkerRequest.Type