@livestore/adapter-web 0.4.0-dev.12 → 0.4.0-dev.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -200,21 +200,6 @@ export const makePersistedAdapter =
200
200
  const sharedWorkerFiber = yield* Worker.makePoolSerialized<typeof WorkerSchema.SharedWorkerRequest.Type>({
201
201
  size: 1,
202
202
  concurrency: 100,
203
- initialMessage: () =>
204
- new WorkerSchema.SharedWorkerInitialMessage({
205
- liveStoreVersion,
206
- payload: {
207
- _tag: 'FromClientSession',
208
- initialMessage: new WorkerSchema.LeaderWorkerInnerInitialMessage({
209
- storageOptions,
210
- storeId,
211
- clientId,
212
- devtoolsEnabled,
213
- debugInstanceId,
214
- syncPayload,
215
- }),
216
- },
217
- }),
218
203
  }).pipe(
219
204
  Effect.provide(sharedWorkerContext),
220
205
  Effect.tapCauseLogPretty,
@@ -270,10 +255,25 @@ export const makePersistedAdapter =
270
255
  yield* workerDisconnectChannel.send(DedicatedWorkerDisconnectBroadcast.make({}))
271
256
 
272
257
  const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
273
- yield* sharedWorker.executeEffect(new WorkerSchema.SharedWorkerUpdateMessagePort({ port: mc.port2 })).pipe(
274
- UnexpectedError.mapToUnexpectedError,
275
- Effect.tapErrorCause((cause) => shutdown(Exit.failCause(cause))),
276
- )
258
+ yield* sharedWorker
259
+ .executeEffect(
260
+ new WorkerSchema.SharedWorkerUpdateMessagePort({
261
+ port: mc.port2,
262
+ liveStoreVersion,
263
+ initial: new WorkerSchema.LeaderWorkerInnerInitialMessage({
264
+ storageOptions,
265
+ storeId,
266
+ clientId,
267
+ devtoolsEnabled,
268
+ debugInstanceId,
269
+ syncPayload,
270
+ }),
271
+ }),
272
+ )
273
+ .pipe(
274
+ UnexpectedError.mapToUnexpectedError,
275
+ Effect.tapErrorCause((cause) => shutdown(Exit.failCause(cause))),
276
+ )
277
277
 
278
278
  yield* Deferred.succeed(waitForSharedWorkerInitialized, undefined)
279
279
 
@@ -487,10 +487,10 @@ export const makePersistedAdapter =
487
487
  lockStatus,
488
488
  clientId,
489
489
  sessionId,
490
- // isLeader: gotLocky, // TODO update when leader is changing
491
- isLeader: true,
490
+ isLeader: gotLocky,
492
491
  leaderThread,
493
492
  webmeshMode: 'direct',
493
+ origin: typeof window !== 'undefined' ? window.location.origin : self.location.origin,
494
494
  connectWebmeshNode: ({ sessionInfo, webmeshNode }) =>
495
495
  connectWebmeshNodeClientSession({ webmeshNode, sessionInfo, sharedWorker, devtoolsEnabled, schema }),
496
496
  registerBeforeUnload: (onBeforeUnload) => {
@@ -210,26 +210,20 @@ export const LeaderWorkerInnerRequest = Schema.Union(
210
210
  )
211
211
  export type LeaderWorkerInnerRequest = typeof LeaderWorkerInnerRequest.Type
212
212
 
213
- export class SharedWorkerInitialMessagePayloadFromClientSession extends Schema.TaggedStruct('FromClientSession', {
214
- initialMessage: LeaderWorkerInnerInitialMessage,
215
- }) {}
216
-
217
- export class SharedWorkerInitialMessage extends Schema.TaggedRequest<SharedWorkerInitialMessage>()('InitialMessage', {
218
- payload: {
219
- payload: Schema.Union(SharedWorkerInitialMessagePayloadFromClientSession, Schema.TaggedStruct('FromWebBridge', {})),
220
- // To guard against scenarios where a client session is already running a newer version of LiveStore
221
- // We should probably find a better way to handle those cases once they become more common.
222
- liveStoreVersion: Schema.Literal(liveStoreVersion),
223
- },
224
- success: Schema.Void,
225
- failure: UnexpectedError,
226
- }) {}
227
-
228
213
  export class SharedWorkerUpdateMessagePort extends Schema.TaggedRequest<SharedWorkerUpdateMessagePort>()(
229
214
  'UpdateMessagePort',
230
215
  {
231
216
  payload: {
232
217
  port: Transferable.MessagePort,
218
+ // Version gate to prevent mixed LiveStore builds talking to the same SharedWorker
219
+ liveStoreVersion: Schema.Literal(liveStoreVersion),
220
+ /**
221
+ * Initial configuration for the leader worker. This replaces the previous
222
+ * two-phase SharedWorker handshake and is sent under the tab lock by the
223
+ * elected leader. Subsequent calls can omit changes and will simply rebind
224
+ * the port (join) without reinitializing the store.
225
+ */
226
+ initial: LeaderWorkerInnerInitialMessage,
233
227
  },
234
228
  success: Schema.Void,
235
229
  failure: UnexpectedError,
@@ -237,7 +231,6 @@ export class SharedWorkerUpdateMessagePort extends Schema.TaggedRequest<SharedWo
237
231
  ) {}
238
232
 
239
233
  export class SharedWorkerRequest extends Schema.Union(
240
- SharedWorkerInitialMessage,
241
234
  SharedWorkerUpdateMessagePort,
242
235
 
243
236
  // Proxied requests
@@ -1,4 +1,4 @@
1
- import { Devtools, UnexpectedError } from '@livestore/common'
1
+ import { Devtools, liveStoreVersion, UnexpectedError } from '@livestore/common'
2
2
  import * as DevtoolsWeb from '@livestore/devtools-web-common/web-channel'
3
3
  import * as WebmeshWorker from '@livestore/devtools-web-common/worker'
4
4
  import { isDevEnv, isNotUndefined, LS_DEV } from '@livestore/utils'
@@ -60,10 +60,6 @@ const makeWorkerRunner = Effect.gen(function* () {
60
60
  | undefined
61
61
  >(undefined)
62
62
 
63
- const initialMessagePayloadDeferredRef = yield* Deferred.make<
64
- typeof WorkerSchema.SharedWorkerInitialMessagePayloadFromClientSession.Type
65
- >().pipe(Effect.andThen(Ref.make))
66
-
67
63
  const waitForWorker = SubscriptionRef.waitUntil(leaderWorkerContextSubRef, isNotUndefined).pipe(
68
64
  Effect.map((_) => _.worker),
69
65
  )
@@ -155,62 +151,59 @@ const makeWorkerRunner = Effect.gen(function* () {
155
151
  }
156
152
  }).pipe(Effect.withSpan('@livestore/adapter-web:shared-worker:resetCurrentWorkerCtx'))
157
153
 
158
- // const devtoolsWebBridge = yield* makeDevtoolsWebBridge
159
-
160
154
  const reset = Effect.gen(function* () {
161
155
  yield* Effect.logDebug('reset')
162
-
163
- const initialMessagePayloadDeferred =
164
- yield* Deferred.make<typeof WorkerSchema.SharedWorkerInitialMessagePayloadFromClientSession.Type>()
165
- yield* Ref.set(initialMessagePayloadDeferredRef, initialMessagePayloadDeferred)
166
-
156
+ // Clear cached invariants so a fresh configuration can be accepted after shutdown
157
+ yield* Ref.set(invariantsRef, undefined)
158
+ // Tear down current leader worker context
167
159
  yield* resetCurrentWorkerCtx
168
- // yield* devtoolsWebBridge.reset
169
160
  })
170
161
 
171
- return WorkerRunner.layerSerialized(WorkerSchema.SharedWorkerRequest, {
172
- InitialMessage: (message) =>
173
- Effect.gen(function* () {
174
- if (message.payload._tag === 'FromWebBridge') return
175
-
176
- const initialMessagePayloadDeferred = yield* Ref.get(initialMessagePayloadDeferredRef)
177
- const deferredAlreadyDone = yield* Deferred.isDone(initialMessagePayloadDeferred)
178
- const initialMessage = message.payload.initialMessage
179
-
180
- if (deferredAlreadyDone) {
181
- const previousInitialMessage = yield* Deferred.await(initialMessagePayloadDeferred)
182
- const messageSchema = WorkerSchema.LeaderWorkerInnerInitialMessage.pipe(
183
- Schema.omit('devtoolsEnabled', 'debugInstanceId'),
184
- )
185
- const isEqual = Schema.equivalence(messageSchema)
186
- if (isEqual(initialMessage, previousInitialMessage.initialMessage) === false) {
187
- const diff = Schema.debugDiff(messageSchema)(previousInitialMessage.initialMessage, initialMessage)
162
+ // Cache first-applied invariants to enforce stability across leader transitions
163
+ const InvariantsSchema = Schema.Struct({
164
+ storeId: Schema.String,
165
+ storageOptions: WorkerSchema.StorageType,
166
+ syncPayload: Schema.UndefinedOr(Schema.JsonValue),
167
+ liveStoreVersion: Schema.Literal(liveStoreVersion),
168
+ devtoolsEnabled: Schema.Boolean,
169
+ })
170
+ type Invariants = typeof InvariantsSchema.Type
171
+ const invariantsRef = yield* Ref.make<Invariants | undefined>(undefined)
172
+ const sameInvariants = Schema.equivalence(InvariantsSchema)
188
173
 
189
- return yield* new UnexpectedError({
190
- cause: 'Initial message already sent and was different now',
191
- payload: {
192
- diff,
193
- previousInitialMessage: previousInitialMessage.initialMessage,
194
- newInitialMessage: initialMessage,
195
- },
196
- })
197
- }
198
- } else {
199
- yield* Deferred.succeed(initialMessagePayloadDeferred, message.payload)
200
- }
201
- }),
174
+ return WorkerRunner.layerSerialized(WorkerSchema.SharedWorkerRequest, {
202
175
  // Whenever the client session leader changes (and thus creates a new leader thread), the new client session leader
203
176
  // sends a new MessagePort to the shared worker which proxies messages to the new leader thread.
204
- UpdateMessagePort: ({ port }) =>
177
+ UpdateMessagePort: ({ port, initial, liveStoreVersion: clientLiveStoreVersion }) =>
205
178
  Effect.gen(function* () {
206
- const initialMessagePayload = yield* initialMessagePayloadDeferredRef.get.pipe(Effect.andThen(Deferred.await))
179
+ // Enforce invariants: storeId, storageOptions, syncPayload, liveStoreVersion must remain stable
180
+ const invariants: Invariants = {
181
+ storeId: initial.storeId,
182
+ storageOptions: initial.storageOptions,
183
+ syncPayload: initial.syncPayload,
184
+ liveStoreVersion: clientLiveStoreVersion,
185
+ devtoolsEnabled: initial.devtoolsEnabled,
186
+ }
187
+ const prev = yield* Ref.get(invariantsRef)
188
+ // Early return on mismatch to keep happy path linear
189
+ if (prev !== undefined && !sameInvariants(prev, invariants)) {
190
+ const diff = Schema.debugDiff(InvariantsSchema)(prev, invariants)
191
+ return yield* new UnexpectedError({
192
+ cause: 'Store invariants changed across leader transitions',
193
+ payload: { diff, previous: prev, next: invariants },
194
+ })
195
+ }
196
+ // First writer records invariants
197
+ if (prev === undefined) {
198
+ yield* Ref.set(invariantsRef, invariants)
199
+ }
207
200
 
208
201
  yield* resetCurrentWorkerCtx
209
202
 
210
203
  const scope = yield* Scope.make()
211
204
 
212
205
  yield* Effect.gen(function* () {
213
- const shutdownChannel = yield* makeShutdownChannel(initialMessagePayload.initialMessage.storeId)
206
+ const shutdownChannel = yield* makeShutdownChannel(initial.storeId)
214
207
 
215
208
  yield* shutdownChannel.listen.pipe(
216
209
  Stream.flatten(),
@@ -225,7 +218,7 @@ const makeWorkerRunner = Effect.gen(function* () {
225
218
  const worker = yield* Worker.makePoolSerialized<WorkerSchema.LeaderWorkerInnerRequest>({
226
219
  size: 1,
227
220
  concurrency: 100,
228
- initialMessage: () => initialMessagePayload.initialMessage,
221
+ initialMessage: () => initial,
229
222
  }).pipe(
230
223
  Effect.provide(workerLayer),
231
224
  Effect.withSpan('@livestore/adapter-web:shared-worker:makeWorkerProxyFromPort'),
@@ -233,7 +226,7 @@ const makeWorkerRunner = Effect.gen(function* () {
233
226
 
234
227
  // Prepare the web mesh connection for leader worker to be able to connect to the devtools
235
228
  const { node } = yield* WebmeshWorker.CacheService
236
- const { storeId, clientId } = initialMessagePayload.initialMessage
229
+ const { storeId, clientId } = initial
237
230
 
238
231
  yield* DevtoolsWeb.connectViaWorker({
239
232
  node,