@livestore/common 0.3.0-dev.26 → 0.3.0-dev.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/adapter-types.d.ts +13 -12
  3. package/dist/adapter-types.d.ts.map +1 -1
  4. package/dist/adapter-types.js +5 -6
  5. package/dist/adapter-types.js.map +1 -1
  6. package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
  7. package/dist/devtools/devtools-messages-common.d.ts +13 -6
  8. package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
  9. package/dist/devtools/devtools-messages-common.js +6 -0
  10. package/dist/devtools/devtools-messages-common.js.map +1 -1
  11. package/dist/devtools/devtools-messages-leader.d.ts +25 -25
  12. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  13. package/dist/devtools/devtools-messages-leader.js +1 -2
  14. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  15. package/dist/leader-thread/LeaderSyncProcessor.d.ts +29 -7
  16. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  17. package/dist/leader-thread/LeaderSyncProcessor.js +259 -199
  18. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  19. package/dist/leader-thread/apply-mutation.d.ts +14 -9
  20. package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
  21. package/dist/leader-thread/apply-mutation.js +43 -36
  22. package/dist/leader-thread/apply-mutation.js.map +1 -1
  23. package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
  24. package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
  25. package/dist/leader-thread/leader-worker-devtools.js +4 -5
  26. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  27. package/dist/leader-thread/make-leader-thread-layer.d.ts +15 -3
  28. package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
  29. package/dist/leader-thread/make-leader-thread-layer.js +29 -34
  30. package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
  31. package/dist/leader-thread/mod.d.ts +1 -1
  32. package/dist/leader-thread/mod.d.ts.map +1 -1
  33. package/dist/leader-thread/mod.js +1 -1
  34. package/dist/leader-thread/mod.js.map +1 -1
  35. package/dist/leader-thread/mutationlog.d.ts +19 -3
  36. package/dist/leader-thread/mutationlog.d.ts.map +1 -1
  37. package/dist/leader-thread/mutationlog.js +105 -12
  38. package/dist/leader-thread/mutationlog.js.map +1 -1
  39. package/dist/leader-thread/pull-queue-set.d.ts +1 -1
  40. package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
  41. package/dist/leader-thread/pull-queue-set.js +6 -16
  42. package/dist/leader-thread/pull-queue-set.js.map +1 -1
  43. package/dist/leader-thread/recreate-db.d.ts.map +1 -1
  44. package/dist/leader-thread/recreate-db.js +4 -3
  45. package/dist/leader-thread/recreate-db.js.map +1 -1
  46. package/dist/leader-thread/types.d.ts +34 -19
  47. package/dist/leader-thread/types.d.ts.map +1 -1
  48. package/dist/leader-thread/types.js.map +1 -1
  49. package/dist/rehydrate-from-mutationlog.d.ts +5 -4
  50. package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
  51. package/dist/rehydrate-from-mutationlog.js +7 -9
  52. package/dist/rehydrate-from-mutationlog.js.map +1 -1
  53. package/dist/schema/EventId.d.ts +9 -0
  54. package/dist/schema/EventId.d.ts.map +1 -1
  55. package/dist/schema/EventId.js +22 -2
  56. package/dist/schema/EventId.js.map +1 -1
  57. package/dist/schema/MutationEvent.d.ts +78 -25
  58. package/dist/schema/MutationEvent.d.ts.map +1 -1
  59. package/dist/schema/MutationEvent.js +25 -12
  60. package/dist/schema/MutationEvent.js.map +1 -1
  61. package/dist/schema/schema.js +1 -1
  62. package/dist/schema/schema.js.map +1 -1
  63. package/dist/schema/system-tables.d.ts +67 -0
  64. package/dist/schema/system-tables.d.ts.map +1 -1
  65. package/dist/schema/system-tables.js +12 -1
  66. package/dist/schema/system-tables.js.map +1 -1
  67. package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
  68. package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
  69. package/dist/sync/ClientSessionSyncProcessor.js +25 -19
  70. package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
  71. package/dist/sync/sync.d.ts +6 -5
  72. package/dist/sync/sync.d.ts.map +1 -1
  73. package/dist/sync/sync.js.map +1 -1
  74. package/dist/sync/syncstate.d.ts +47 -71
  75. package/dist/sync/syncstate.d.ts.map +1 -1
  76. package/dist/sync/syncstate.js +136 -139
  77. package/dist/sync/syncstate.js.map +1 -1
  78. package/dist/sync/syncstate.test.js +203 -284
  79. package/dist/sync/syncstate.test.js.map +1 -1
  80. package/dist/version.d.ts +1 -1
  81. package/dist/version.js +1 -1
  82. package/package.json +2 -2
  83. package/src/adapter-types.ts +11 -13
  84. package/src/devtools/devtools-messages-common.ts +9 -0
  85. package/src/devtools/devtools-messages-leader.ts +1 -2
  86. package/src/leader-thread/LeaderSyncProcessor.ts +457 -351
  87. package/src/leader-thread/apply-mutation.ts +81 -71
  88. package/src/leader-thread/leader-worker-devtools.ts +5 -7
  89. package/src/leader-thread/make-leader-thread-layer.ts +60 -53
  90. package/src/leader-thread/mod.ts +1 -1
  91. package/src/leader-thread/mutationlog.ts +166 -13
  92. package/src/leader-thread/recreate-db.ts +4 -3
  93. package/src/leader-thread/types.ts +33 -23
  94. package/src/rehydrate-from-mutationlog.ts +12 -12
  95. package/src/schema/EventId.ts +26 -2
  96. package/src/schema/MutationEvent.ts +32 -16
  97. package/src/schema/schema.ts +1 -1
  98. package/src/schema/system-tables.ts +20 -1
  99. package/src/sync/ClientSessionSyncProcessor.ts +35 -23
  100. package/src/sync/sync.ts +6 -9
  101. package/src/sync/syncstate.test.ts +228 -315
  102. package/src/sync/syncstate.ts +202 -187
  103. package/src/version.ts +1 -1
  104. package/tmp/pack.tgz +0 -0
  105. package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,15 +1,14 @@
1
1
  import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
2
- import type { HttpClient, Scope, Tracer } from '@livestore/utils/effect'
2
+ import type { HttpClient, Runtime, Scope, Tracer } from '@livestore/utils/effect'
3
3
  import {
4
4
  BucketQueue,
5
5
  Deferred,
6
6
  Effect,
7
7
  Exit,
8
8
  FiberHandle,
9
- Option,
10
9
  OtelTracer,
10
+ Queue,
11
11
  ReadonlyArray,
12
- Schema,
13
12
  Stream,
14
13
  Subscribable,
15
14
  SubscriptionRef,
@@ -18,30 +17,26 @@ import type * as otel from '@opentelemetry/api'
18
17
 
19
18
  import type { SqliteDb } from '../adapter-types.js'
20
19
  import { UnexpectedError } from '../adapter-types.js'
21
- import type { LiveStoreSchema, SessionChangesetMetaRow } from '../schema/mod.js'
20
+ import type { LiveStoreSchema } from '../schema/mod.js'
22
21
  import {
23
22
  EventId,
24
23
  getMutationDef,
25
- MUTATION_LOG_META_TABLE,
24
+ LEADER_MERGE_COUNTER_TABLE,
26
25
  MutationEvent,
27
- mutationLogMetaTable,
28
26
  SESSION_CHANGESET_META_TABLE,
29
27
  } from '../schema/mod.js'
30
- import { updateRows } from '../sql-queries/index.js'
31
28
  import { LeaderAheadError } from '../sync/sync.js'
32
29
  import * as SyncState from '../sync/syncstate.js'
33
30
  import { sql } from '../util.js'
34
- import { makeApplyMutation } from './apply-mutation.js'
35
- import { execSql } from './connection.js'
36
- import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js'
37
- import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
31
+ import { rollback } from './apply-mutation.js'
32
+ import * as Mutationlog from './mutationlog.js'
33
+ import type { InitialBlockingSyncContext, LeaderSyncProcessor } from './types.js'
38
34
  import { LeaderThreadCtx } from './types.js'
39
35
 
40
- export const BACKEND_PUSH_BATCH_SIZE = 50
41
-
42
36
  type LocalPushQueueItem = [
43
37
  mutationEvent: MutationEvent.EncodedWithMeta,
44
38
  deferred: Deferred.Deferred<void, LeaderAheadError> | undefined,
39
+ /** Used to determine whether the batch has become invalid due to a rejected local push batch */
45
40
  generation: number,
46
41
  ]
47
42
 
@@ -52,40 +47,68 @@ type LocalPushQueueItem = [
52
47
  * In the LeaderSyncProcessor, pulling always has precedence over pushing.
53
48
  *
54
49
  * Responsibilities:
55
- * - Queueing incoming local mutations in a localPushMailbox.
50
+ * - Queueing incoming local mutations in a localPushesQueue.
56
51
  * - Broadcasting mutations to client sessions via pull queues.
57
52
  * - Pushing mutations to the sync backend.
58
53
  *
59
54
  * Notes:
60
55
  *
61
56
  * local push processing:
62
- * - localPushMailbox:
57
+ * - localPushesQueue:
63
58
  * - Maintains events in ascending order.
64
59
  * - Uses `Deferred` objects to resolve/reject events based on application success.
65
- * - Processes events from the mailbox, applying mutations in batches.
60
+ * - Processes events from the queue, applying mutations in batches.
66
61
  * - Controlled by a `Latch` to manage execution flow.
67
62
  * - The latch closes on pull receipt and re-opens post-pull completion.
68
63
  * - Processes up to `maxBatchSize` events per cycle.
69
64
  *
65
+ * Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
66
+ *
67
+ * Tricky concurrency scenarios:
68
+ * - Queued local push batches becoming invalid due to a prior local push item being rejected.
69
+ * Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
70
+ *
70
71
  */
71
72
  export const makeLeaderSyncProcessor = ({
72
73
  schema,
73
- dbMissing,
74
+ dbMutationLogMissing,
74
75
  dbMutationLog,
75
- clientId,
76
+ dbReadModel,
77
+ dbReadModelMissing,
76
78
  initialBlockingSyncContext,
77
79
  onError,
80
+ params,
81
+ testing,
78
82
  }: {
79
83
  schema: LiveStoreSchema
80
84
  /** Only used to know whether we can safely query dbMutationLog during setup execution */
81
- dbMissing: boolean
85
+ dbMutationLogMissing: boolean
82
86
  dbMutationLog: SqliteDb
83
- clientId: string
87
+ dbReadModel: SqliteDb
88
+ /** Only used to know whether we can safely query dbReadModel during setup execution */
89
+ dbReadModelMissing: boolean
84
90
  initialBlockingSyncContext: InitialBlockingSyncContext
85
91
  onError: 'shutdown' | 'ignore'
92
+ params: {
93
+ /**
94
+ * @default 10
95
+ */
96
+ localPushBatchSize?: number
97
+ /**
98
+ * @default 50
99
+ */
100
+ backendPushBatchSize?: number
101
+ }
102
+ testing: {
103
+ delays?: {
104
+ localPushProcessing?: Effect.Effect<void>
105
+ }
106
+ }
86
107
  }): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
87
108
  Effect.gen(function* () {
88
- const syncBackendQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
109
+ const syncBackendPushQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
110
+ const localPushBatchSize = params.localPushBatchSize ?? 10
111
+ const backendPushBatchSize = params.backendPushBatchSize ?? 50
89
112
 
90
113
  const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
91
114
 
@@ -94,13 +117,20 @@ export const makeLeaderSyncProcessor = ({
94
117
  return mutationDef.options.clientOnly
95
118
  }
96
119
 
120
+ const connectedClientSessionPullQueues = yield* makePullQueueSet
121
+
97
122
  /**
98
123
  * Tracks generations of queued local push events.
99
- * If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
124
+ * If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
100
125
  * even if they would be valid on their own.
101
126
  */
127
+ // TODO get rid of this in favour of the `mergeGeneration` event id field
102
128
  const currentLocalPushGenerationRef = { current: 0 }
103
129
 
130
+ type MergeCounter = number
131
+ const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) }
132
+ const mergePayloads = new Map<MergeCounter, typeof SyncState.PayloadUpstream.Type>()
133
+
104
134
  // This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
105
135
  const ctxRef = {
106
136
  current: undefined as
@@ -109,6 +139,7 @@ export const makeLeaderSyncProcessor = ({
109
139
  otelSpan: otel.Span | undefined
110
140
  span: Tracer.Span
111
141
  devtoolsLatch: Effect.Latch | undefined
142
+ runtime: Runtime.Runtime<LeaderThreadCtx>
112
143
  },
113
144
  }
114
145
 
@@ -116,23 +147,28 @@ export const makeLeaderSyncProcessor = ({
116
147
  const localPushesLatch = yield* Effect.makeLatch(true)
117
148
  const pullLatch = yield* Effect.makeLatch(true)
118
149
 
150
+ /**
151
+ * Additionally to the `syncStateSref` we also need the `pushHeadRef` in order to prevent old/duplicate
152
+ * events from being pushed in a scenario like this:
153
+ * - client session A pushes e1
154
+ * - leader sync processor takes a bit and hasn't yet taken e1 from the localPushesQueue
155
+ * - client session B also pushes e1 (which should be rejected)
156
+ *
157
+ * Thus the purpoe of the pushHeadRef is the guard the integrity of the local push queue
158
+ */
159
+ const pushHeadRef = { current: EventId.ROOT }
160
+ const advancePushHead = (eventId: EventId.EventId) => {
161
+ pushHeadRef.current = EventId.max(pushHeadRef.current, eventId)
162
+ }
163
+
164
+ // NOTE: New events are only pushed to sync backend after successful local push processing
119
165
  const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
120
166
  Effect.gen(function* () {
121
- // TODO validate batch
122
167
  if (newEvents.length === 0) return
123
168
 
124
- // if (options.generation < currentLocalPushGenerationRef.current) {
125
- // debugger
126
- // // We can safely drop this batch as it's from a previous push generation
127
- // return
128
- // }
129
-
130
- if (clientId === 'client-b') {
131
- // console.log(
132
- // 'push from client session',
133
- // newEvents.map((item) => item.toJSON()),
134
- // )
135
- }
169
+ yield* validatePushBatch(newEvents, pushHeadRef.current)
170
+
171
+ advancePushHead(newEvents.at(-1)!.id)
136
172
 
137
173
  const waitForProcessing = options?.waitForProcessing ?? false
138
174
  const generation = currentLocalPushGenerationRef.current
@@ -154,7 +190,7 @@ export const makeLeaderSyncProcessor = ({
154
190
  yield* BucketQueue.offerAll(localPushesQueue, items)
155
191
  }
156
192
  }).pipe(
157
- Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
193
+ Effect.withSpan('@livestore/common:LeaderSyncProcessor:push', {
158
194
  attributes: {
159
195
  batchSize: newEvents.length,
160
196
  batch: TRACE_VERBOSE ? newEvents : undefined,
@@ -164,7 +200,7 @@ export const makeLeaderSyncProcessor = ({
164
200
  )
165
201
 
166
202
  const pushPartial: LeaderSyncProcessor['pushPartial'] = ({
167
- mutationEvent: partialMutationEvent,
203
+ mutationEvent: { mutation, args },
168
204
  clientId,
169
205
  sessionId,
170
206
  }) =>
@@ -172,10 +208,11 @@ export const makeLeaderSyncProcessor = ({
172
208
  const syncState = yield* syncStateSref
173
209
  if (syncState === undefined) return shouldNeverHappen('Not initialized')
174
210
 
175
- const mutationDef = getMutationDef(schema, partialMutationEvent.mutation)
211
+ const mutationDef = getMutationDef(schema, mutation)
176
212
 
177
213
  const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
178
- ...partialMutationEvent,
214
+ mutation,
215
+ args,
179
216
  clientId,
180
217
  sessionId,
181
218
  ...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
@@ -185,134 +222,178 @@ export const makeLeaderSyncProcessor = ({
185
222
  }).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie))
186
223
 
187
224
  // Starts various background loops
188
- const boot: LeaderSyncProcessor['boot'] = ({ dbReady }) =>
189
- Effect.gen(function* () {
190
- const span = yield* Effect.currentSpan.pipe(Effect.orDie)
191
- const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
192
- const { devtools, shutdownChannel } = yield* LeaderThreadCtx
193
-
194
- ctxRef.current = {
195
- otelSpan,
196
- span,
197
- devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
198
- }
225
+ const boot: LeaderSyncProcessor['boot'] = Effect.gen(function* () {
226
+ const span = yield* Effect.currentSpan.pipe(Effect.orDie)
227
+ const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
228
+ const { devtools, shutdownChannel } = yield* LeaderThreadCtx
229
+ const runtime = yield* Effect.runtime<LeaderThreadCtx>()
230
+
231
+ ctxRef.current = {
232
+ otelSpan,
233
+ span,
234
+ devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
235
+ runtime,
236
+ }
199
237
 
200
- const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog)
201
- const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog)
238
+ const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog)
202
239
 
203
- if (initialBackendHead > initialLocalHead.global) {
204
- return shouldNeverHappen(
205
- `During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
206
- )
207
- }
240
+ const initialBackendHead = dbMutationLogMissing
241
+ ? EventId.ROOT.global
242
+ : Mutationlog.getBackendHeadFromDb(dbMutationLog)
208
243
 
209
- const pendingMutationEvents = yield* getMutationEventsSince({
210
- global: initialBackendHead,
211
- client: EventId.clientDefault,
212
- }).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))))
213
-
214
- const initialSyncState = new SyncState.SyncState({
215
- pending: pendingMutationEvents,
216
- // On the leader we don't need a rollback tail beyond `pending` items
217
- rollbackTail: [],
218
- upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
219
- localHead: initialLocalHead,
220
- })
244
+ if (initialBackendHead > initialLocalHead.global) {
245
+ return shouldNeverHappen(
246
+ `During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
247
+ )
248
+ }
249
+
250
+ const pendingMutationEvents = dbMutationLogMissing
251
+ ? []
252
+ : yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault })
221
253
 
222
- /** State transitions need to happen atomically, so we use a Ref to track the state */
223
- yield* SubscriptionRef.set(syncStateSref, initialSyncState)
254
+ const initialSyncState = new SyncState.SyncState({
255
+ pending: pendingMutationEvents,
256
+ upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
257
+ localHead: initialLocalHead,
258
+ })
224
259
 
225
- // Rehydrate sync queue
226
- if (pendingMutationEvents.length > 0) {
227
- const filteredBatch = pendingMutationEvents
228
- // Don't sync clientOnly mutations
229
- .filter((mutationEventEncoded) => {
230
- const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation)
231
- return mutationDef.options.clientOnly === false
232
- })
260
+ /** State transitions need to happen atomically, so we use a Ref to track the state */
261
+ yield* SubscriptionRef.set(syncStateSref, initialSyncState)
233
262
 
234
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
263
+ // Rehydrate sync queue
264
+ if (pendingMutationEvents.length > 0) {
265
+ const globalPendingMutationEvents = pendingMutationEvents
266
+ // Don't sync clientOnly mutations
267
+ .filter((mutationEventEncoded) => {
268
+ const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation)
269
+ return mutationDef.options.clientOnly === false
270
+ })
271
+
272
+ if (globalPendingMutationEvents.length > 0) {
273
+ yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents)
235
274
  }
275
+ }
236
276
 
237
- const shutdownOnError = (cause: unknown) =>
277
+ const shutdownOnError = (cause: unknown) =>
278
+ Effect.gen(function* () {
279
+ if (onError === 'shutdown') {
280
+ yield* shutdownChannel.send(UnexpectedError.make({ cause }))
281
+ yield* Effect.die(cause)
282
+ }
283
+ })
284
+
285
+ yield* backgroundApplyLocalPushes({
286
+ localPushesLatch,
287
+ localPushesQueue,
288
+ pullLatch,
289
+ syncStateSref,
290
+ syncBackendPushQueue,
291
+ schema,
292
+ isClientEvent,
293
+ otelSpan,
294
+ currentLocalPushGenerationRef,
295
+ connectedClientSessionPullQueues,
296
+ mergeCounterRef,
297
+ mergePayloads,
298
+ localPushBatchSize,
299
+ testing: {
300
+ delay: testing?.delays?.localPushProcessing,
301
+ },
302
+ }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
303
+
304
+ const backendPushingFiberHandle = yield* FiberHandle.make()
305
+ const backendPushingEffect = backgroundBackendPushing({
306
+ syncBackendPushQueue,
307
+ otelSpan,
308
+ devtoolsLatch: ctxRef.current?.devtoolsLatch,
309
+ backendPushBatchSize,
310
+ }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError))
311
+
312
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect)
313
+
314
+ yield* backgroundBackendPulling({
315
+ initialBackendHead,
316
+ isClientEvent,
317
+ restartBackendPushing: (filteredRebasedPending) =>
238
318
  Effect.gen(function* () {
239
- if (onError === 'shutdown') {
240
- yield* shutdownChannel.send(UnexpectedError.make({ cause }))
241
- yield* Effect.die(cause)
319
+ // Stop current pushing fiber
320
+ yield* FiberHandle.clear(backendPushingFiberHandle)
321
+
322
+ // Reset the sync backend push queue
323
+ yield* BucketQueue.clear(syncBackendPushQueue)
324
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending)
325
+
326
+ // Restart pushing fiber
327
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect)
328
+ }),
329
+ syncStateSref,
330
+ localPushesLatch,
331
+ pullLatch,
332
+ otelSpan,
333
+ initialBlockingSyncContext,
334
+ devtoolsLatch: ctxRef.current?.devtoolsLatch,
335
+ connectedClientSessionPullQueues,
336
+ mergeCounterRef,
337
+ mergePayloads,
338
+ advancePushHead,
339
+ }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
340
+
341
+ return { initialLeaderHead: initialLocalHead }
342
+ }).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'))
343
+
344
+ const pull: LeaderSyncProcessor['pull'] = ({ cursor }) =>
345
+ Effect.gen(function* () {
346
+ const queue = yield* pullQueue({ cursor })
347
+ return Stream.fromQueue(queue)
348
+ }).pipe(Stream.unwrapScoped)
349
+
350
+ const pullQueue: LeaderSyncProcessor['pullQueue'] = ({ cursor }) => {
351
+ const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized')
352
+ return Effect.gen(function* () {
353
+ const queue = yield* connectedClientSessionPullQueues.makeQueue
354
+ const payloadsSinceCursor = Array.from(mergePayloads.entries())
355
+ .map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
356
+ .filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
357
+ .toSorted((a, b) => a.mergeCounter - b.mergeCounter)
358
+ .map(({ payload, mergeCounter }) => {
359
+ if (payload._tag === 'upstream-advance') {
360
+ return {
361
+ payload: {
362
+ _tag: 'upstream-advance' as const,
363
+ newEvents: ReadonlyArray.dropWhile(payload.newEvents, (mutationEventEncoded) =>
364
+ EventId.isGreaterThanOrEqual(cursor.eventId, mutationEventEncoded.id),
365
+ ),
366
+ },
367
+ mergeCounter,
368
+ }
369
+ } else {
370
+ return { payload, mergeCounter }
242
371
  }
243
372
  })
244
373
 
245
- yield* backgroundApplyLocalPushes({
246
- localPushesLatch,
247
- localPushesQueue,
248
- pullLatch,
249
- syncStateSref,
250
- syncBackendQueue,
251
- schema,
252
- isClientEvent,
253
- otelSpan,
254
- currentLocalPushGenerationRef,
255
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
256
-
257
- const backendPushingFiberHandle = yield* FiberHandle.make()
258
-
259
- yield* FiberHandle.run(
260
- backendPushingFiberHandle,
261
- backgroundBackendPushing({
262
- dbReady,
263
- syncBackendQueue,
264
- otelSpan,
265
- devtoolsLatch: ctxRef.current?.devtoolsLatch,
266
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
267
- )
374
+ yield* queue.offerAll(payloadsSinceCursor)
268
375
 
269
- yield* backgroundBackendPulling({
270
- dbReady,
271
- initialBackendHead,
272
- isClientEvent,
273
- restartBackendPushing: (filteredRebasedPending) =>
274
- Effect.gen(function* () {
275
- // Stop current pushing fiber
276
- yield* FiberHandle.clear(backendPushingFiberHandle)
277
-
278
- // Reset the sync queue
279
- yield* BucketQueue.clear(syncBackendQueue)
280
- yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending)
281
-
282
- // Restart pushing fiber
283
- yield* FiberHandle.run(
284
- backendPushingFiberHandle,
285
- backgroundBackendPushing({
286
- dbReady,
287
- syncBackendQueue,
288
- otelSpan,
289
- devtoolsLatch: ctxRef.current?.devtoolsLatch,
290
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
291
- )
292
- }),
293
- syncStateSref,
294
- localPushesLatch,
295
- pullLatch,
296
- otelSpan,
297
- initialBlockingSyncContext,
298
- devtoolsLatch: ctxRef.current?.devtoolsLatch,
299
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
376
+ return queue
377
+ }).pipe(Effect.provide(runtime))
378
+ }
300
379
 
301
- return { initialLeaderHead: initialLocalHead }
302
- }).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'))
380
+ const syncState = Subscribable.make({
381
+ get: Effect.gen(function* () {
382
+ const syncState = yield* syncStateSref
383
+ if (syncState === undefined) return shouldNeverHappen('Not initialized')
384
+ return syncState
385
+ }),
386
+ changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
387
+ })
303
388
 
304
389
  return {
390
+ pull,
391
+ pullQueue,
305
392
  push,
306
393
  pushPartial,
307
394
  boot,
308
- syncState: Subscribable.make({
309
- get: Effect.gen(function* () {
310
- const syncState = yield* syncStateSref
311
- if (syncState === undefined) return shouldNeverHappen('Not initialized')
312
- return syncState
313
- }),
314
- changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
315
- }),
395
+ syncState,
396
+ getMergeCounter: () => mergeCounterRef.current,
316
397
  } satisfies LeaderSyncProcessor
317
398
  })
318
399
 
@@ -321,30 +402,41 @@ const backgroundApplyLocalPushes = ({
321
402
  localPushesQueue,
322
403
  pullLatch,
323
404
  syncStateSref,
324
- syncBackendQueue,
405
+ syncBackendPushQueue,
325
406
  schema,
326
407
  isClientEvent,
327
408
  otelSpan,
328
409
  currentLocalPushGenerationRef,
410
+ connectedClientSessionPullQueues,
411
+ mergeCounterRef,
412
+ mergePayloads,
413
+ localPushBatchSize,
414
+ testing,
329
415
  }: {
330
416
  pullLatch: Effect.Latch
331
417
  localPushesLatch: Effect.Latch
332
418
  localPushesQueue: BucketQueue.BucketQueue<LocalPushQueueItem>
333
419
  syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
334
- syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
420
+ syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
335
421
  schema: LiveStoreSchema
336
422
  isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
337
423
  otelSpan: otel.Span | undefined
338
424
  currentLocalPushGenerationRef: { current: number }
425
+ connectedClientSessionPullQueues: PullQueueSet
426
+ mergeCounterRef: { current: number }
427
+ mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
428
+ localPushBatchSize: number
429
+ testing: {
430
+ delay: Effect.Effect<void> | undefined
431
+ }
339
432
  }) =>
340
433
  Effect.gen(function* () {
341
- const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx
342
-
343
- const applyMutationItems = yield* makeApplyMutationItems
344
-
345
434
  while (true) {
346
- // TODO make batch size configurable
347
- const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10)
435
+ if (testing.delay !== undefined) {
436
+ yield* testing.delay.pipe(Effect.withSpan('localPushProcessingDelay'))
437
+ }
438
+
439
+ const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, localPushBatchSize)
348
440
 
349
441
  // Wait for the backend pulling to finish
350
442
  yield* localPushesLatch.await
@@ -377,9 +469,11 @@ const backgroundApplyLocalPushes = ({
377
469
  isEqualEvent: MutationEvent.isEqualEncoded,
378
470
  })
379
471
 
472
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
473
+
380
474
  switch (mergeResult._tag) {
381
475
  case 'unexpected-error': {
382
- otelSpan?.addEvent('local-push:unexpected-error', {
476
+ otelSpan?.addEvent(`[${mergeCounter}]:push:unexpected-error`, {
383
477
  batchSize: newEvents.length,
384
478
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
385
479
  })
@@ -389,15 +483,12 @@ const backgroundApplyLocalPushes = ({
389
483
  return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
390
484
  }
391
485
  case 'reject': {
392
- otelSpan?.addEvent('local-push:reject', {
486
+ otelSpan?.addEvent(`[${mergeCounter}]:push:reject`, {
393
487
  batchSize: newEvents.length,
394
488
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
395
489
  })
396
490
 
397
- /*
398
-
399
- TODO: how to test this?
400
- */
491
+ // TODO: how to test this?
401
492
  currentLocalPushGenerationRef.current++
402
493
 
403
494
  const nextGeneration = currentLocalPushGenerationRef.current
@@ -411,7 +502,8 @@ const backgroundApplyLocalPushes = ({
411
502
  (item) => item[2] >= nextGeneration,
412
503
  )
413
504
 
414
- if ((yield* BucketQueue.size(localPushesQueue)) > 0) {
505
+ // TODO we still need to better understand and handle this scenario
506
+ if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
415
507
  console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue))
416
508
  debugger
417
509
  }
@@ -449,16 +541,13 @@ const backgroundApplyLocalPushes = ({
449
541
 
450
542
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
451
543
 
452
- if (clientId === 'client-b') {
453
- // yield* Effect.log('offer upstream-advance due to local-push')
454
- // debugger
455
- }
456
544
  yield* connectedClientSessionPullQueues.offer({
457
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents },
458
- remaining: 0,
545
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
546
+ mergeCounter,
459
547
  })
548
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
460
549
 
461
- otelSpan?.addEvent('local-push', {
550
+ otelSpan?.addEvent(`[${mergeCounter}]:push:advance`, {
462
551
  batchSize: newEvents.length,
463
552
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
464
553
  })
@@ -469,67 +558,65 @@ const backgroundApplyLocalPushes = ({
469
558
  return mutationDef.options.clientOnly === false
470
559
  })
471
560
 
472
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
561
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch)
473
562
 
474
- yield* applyMutationItems({ batchItems: newEvents, deferreds })
563
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds })
475
564
 
476
565
  // Allow the backend pulling to start
477
566
  yield* pullLatch.open
478
567
  }
479
568
  })
480
569
 
481
- type ApplyMutationItems = (_: {
570
+ type ApplyMutationsBatch = (_: {
482
571
  batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
483
- /** Indexes are aligned with `batchItems` */
572
+ /**
573
+ * The deferreds are used by the caller to know when the mutation has been processed.
574
+ * Indexes are aligned with `batchItems`
575
+ */
484
576
  deferreds: ReadonlyArray<Deferred.Deferred<void, LeaderAheadError> | undefined> | undefined
485
- }) => Effect.Effect<void, UnexpectedError>
577
+ }) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx>
486
578
 
487
579
  // TODO how to handle errors gracefully
488
- const makeApplyMutationItems: Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =
580
+ const applyMutationsBatch: ApplyMutationsBatch = ({ batchItems, deferreds }) =>
489
581
  Effect.gen(function* () {
490
- const leaderThreadCtx = yield* LeaderThreadCtx
491
- const { dbReadModel: db, dbMutationLog } = leaderThreadCtx
582
+ const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx
492
583
 
493
- const applyMutation = yield* makeApplyMutation
584
+ // NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
585
+ db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
586
+ dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
494
587
 
495
- return ({ batchItems, deferreds }) =>
588
+ yield* Effect.addFinalizer((exit) =>
496
589
  Effect.gen(function* () {
497
- db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
498
- dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
590
+ if (Exit.isSuccess(exit)) return
499
591
 
500
- yield* Effect.addFinalizer((exit) =>
501
- Effect.gen(function* () {
502
- if (Exit.isSuccess(exit)) return
503
-
504
- // Rollback in case of an error
505
- db.execute('ROLLBACK', undefined)
506
- dbMutationLog.execute('ROLLBACK', undefined)
507
- }),
508
- )
592
+ // Rollback in case of an error
593
+ db.execute('ROLLBACK', undefined)
594
+ dbMutationLog.execute('ROLLBACK', undefined)
595
+ }),
596
+ )
509
597
 
510
- for (let i = 0; i < batchItems.length; i++) {
511
- yield* applyMutation(batchItems[i]!)
598
+ for (let i = 0; i < batchItems.length; i++) {
599
+ const { sessionChangeset } = yield* applyMutation(batchItems[i]!)
600
+ batchItems[i]!.meta.sessionChangeset = sessionChangeset
512
601
 
513
- if (deferreds?.[i] !== undefined) {
514
- yield* Deferred.succeed(deferreds[i]!, void 0)
515
- }
516
- }
602
+ if (deferreds?.[i] !== undefined) {
603
+ yield* Deferred.succeed(deferreds[i]!, void 0)
604
+ }
605
+ }
517
606
 
518
- db.execute('COMMIT', undefined) // Commit the transaction
519
- dbMutationLog.execute('COMMIT', undefined) // Commit the transaction
520
- }).pipe(
521
- Effect.uninterruptible,
522
- Effect.scoped,
523
- Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
524
- attributes: { count: batchItems.length },
525
- }),
526
- Effect.tapCauseLogPretty,
527
- UnexpectedError.mapToUnexpectedError,
528
- )
529
- })
607
+ db.execute('COMMIT', undefined) // Commit the transaction
608
+ dbMutationLog.execute('COMMIT', undefined) // Commit the transaction
609
+ }).pipe(
610
+ Effect.uninterruptible,
611
+ Effect.scoped,
612
+ Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
613
+ attributes: { batchSize: batchItems.length },
614
+ }),
615
+ Effect.tapCauseLogPretty,
616
+ UnexpectedError.mapToUnexpectedError,
617
+ )
530
618
 
531
619
  const backgroundBackendPulling = ({
532
- dbReady,
533
620
  initialBackendHead,
534
621
  isClientEvent,
535
622
  restartBackendPushing,
@@ -539,8 +626,11 @@ const backgroundBackendPulling = ({
539
626
  pullLatch,
540
627
  devtoolsLatch,
541
628
  initialBlockingSyncContext,
629
+ connectedClientSessionPullQueues,
630
+ mergeCounterRef,
631
+ mergePayloads,
632
+ advancePushHead,
542
633
  }: {
543
- dbReady: Deferred.Deferred<void>
544
634
  initialBackendHead: EventId.GlobalEventId
545
635
  isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
546
636
  restartBackendPushing: (
@@ -552,23 +642,16 @@ const backgroundBackendPulling = ({
552
642
  pullLatch: Effect.Latch
553
643
  devtoolsLatch: Effect.Latch | undefined
554
644
  initialBlockingSyncContext: InitialBlockingSyncContext
645
+ connectedClientSessionPullQueues: PullQueueSet
646
+ mergeCounterRef: { current: number }
647
+ mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
648
+ advancePushHead: (eventId: EventId.EventId) => void
555
649
  }) =>
556
650
  Effect.gen(function* () {
557
- const {
558
- syncBackend,
559
- dbReadModel: db,
560
- dbMutationLog,
561
- connectedClientSessionPullQueues,
562
- schema,
563
- clientId,
564
- } = yield* LeaderThreadCtx
651
+ const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx
565
652
 
566
653
  if (syncBackend === undefined) return
567
654
 
568
- const cursorInfo = yield* getCursorInfo(initialBackendHead)
569
-
570
- const applyMutationItems = yield* makeApplyMutationItems
571
-
572
655
  const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
573
656
  Effect.gen(function* () {
574
657
  if (newEvents.length === 0) return
@@ -586,20 +669,20 @@ const backgroundBackendPulling = ({
586
669
  const syncState = yield* syncStateSref
587
670
  if (syncState === undefined) return shouldNeverHappen('Not initialized')
588
671
 
589
- const trimRollbackUntil = newEvents.at(-1)!.id
590
-
591
672
  const mergeResult = SyncState.merge({
592
673
  syncState,
593
- payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
674
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
594
675
  isClientEvent,
595
676
  isEqualEvent: MutationEvent.isEqualEncoded,
596
677
  ignoreClientEvents: true,
597
678
  })
598
679
 
680
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
681
+
599
682
  if (mergeResult._tag === 'reject') {
600
683
  return shouldNeverHappen('The leader thread should never reject upstream advances')
601
684
  } else if (mergeResult._tag === 'unexpected-error') {
602
- otelSpan?.addEvent('backend-pull:unexpected-error', {
685
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:unexpected-error`, {
603
686
  newEventsCount: newEvents.length,
604
687
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
605
688
  })
@@ -608,62 +691,81 @@ const backgroundBackendPulling = ({
608
691
 
609
692
  const newBackendHead = newEvents.at(-1)!.id
610
693
 
611
- updateBackendHead(dbMutationLog, newBackendHead)
694
+ Mutationlog.updateBackendHead(dbMutationLog, newBackendHead)
612
695
 
613
696
  if (mergeResult._tag === 'rebase') {
614
- otelSpan?.addEvent('backend-pull:rebase', {
697
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:rebase`, {
615
698
  newEventsCount: newEvents.length,
616
699
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
617
- rollbackCount: mergeResult.eventsToRollback.length,
700
+ rollbackCount: mergeResult.rollbackEvents.length,
618
701
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
619
702
  })
620
703
 
621
- const filteredRebasedPending = mergeResult.newSyncState.pending.filter((mutationEvent) => {
704
+ const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
622
705
  const mutationDef = getMutationDef(schema, mutationEvent.mutation)
623
706
  return mutationDef.options.clientOnly === false
624
707
  })
625
- yield* restartBackendPushing(filteredRebasedPending)
708
+ yield* restartBackendPushing(globalRebasedPendingEvents)
626
709
 
627
- if (mergeResult.eventsToRollback.length > 0) {
628
- yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.eventsToRollback.map((_) => _.id) })
710
+ if (mergeResult.rollbackEvents.length > 0) {
711
+ yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) })
629
712
  }
630
713
 
631
714
  yield* connectedClientSessionPullQueues.offer({
632
- payload: {
633
- _tag: 'upstream-rebase',
715
+ payload: SyncState.PayloadUpstreamRebase.make({
634
716
  newEvents: mergeResult.newEvents,
635
- rollbackUntil: mergeResult.eventsToRollback.at(0)!.id,
636
- trimRollbackUntil,
637
- },
638
- remaining,
717
+ rollbackEvents: mergeResult.rollbackEvents,
718
+ }),
719
+ mergeCounter,
639
720
  })
721
+ mergePayloads.set(
722
+ mergeCounter,
723
+ SyncState.PayloadUpstreamRebase.make({
724
+ newEvents: mergeResult.newEvents,
725
+ rollbackEvents: mergeResult.rollbackEvents,
726
+ }),
727
+ )
640
728
  } else {
641
- otelSpan?.addEvent('backend-pull:advance', {
729
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:advance`, {
642
730
  newEventsCount: newEvents.length,
643
731
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
644
732
  })
645
733
 
646
- if (clientId === 'client-b') {
647
- // yield* Effect.log('offer upstream-advance due to pull')
648
- }
649
734
  yield* connectedClientSessionPullQueues.offer({
650
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents, trimRollbackUntil },
651
- remaining,
735
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
736
+ mergeCounter,
652
737
  })
738
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
739
+
740
+ if (mergeResult.confirmedEvents.length > 0) {
741
+ // `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
742
+ // `newEvents` instead which we filter via `mergeResult.confirmedEvents`
743
+ const confirmedNewEvents = newEvents.filter((mutationEvent) =>
744
+ mergeResult.confirmedEvents.some((confirmedEvent) =>
745
+ EventId.isEqual(mutationEvent.id, confirmedEvent.id),
746
+ ),
747
+ )
748
+ yield* Mutationlog.updateSyncMetadata(confirmedNewEvents)
749
+ }
653
750
  }
654
751
 
752
+ // Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
655
753
  trimChangesetRows(db, newBackendHead)
656
754
 
657
- yield* applyMutationItems({ batchItems: mergeResult.newEvents, deferreds: undefined })
755
+ advancePushHead(mergeResult.newSyncState.localHead)
756
+
757
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined })
658
758
 
659
759
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
660
760
 
761
+ // Allow local pushes to be processed again
661
762
  if (remaining === 0) {
662
- // Allow local pushes to be processed again
663
763
  yield* localPushesLatch.open
664
764
  }
665
765
  })
666
766
 
767
+ const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead)
768
+
667
769
  yield* syncBackend.pull(cursorInfo).pipe(
668
770
  // TODO only take from queue while connected
669
771
  Stream.tap(({ batch, remaining }) =>
@@ -675,16 +777,13 @@ const backgroundBackendPulling = ({
675
777
  // },
676
778
  // })
677
779
 
678
- // Wait for the db to be initially created
679
- yield* dbReady
680
-
681
780
  // NOTE we only want to take process mutations when the sync backend is connected
682
781
  // (e.g. needed for simulating being offline)
683
782
  // TODO remove when there's a better way to handle this in stream above
684
783
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
685
784
 
686
785
  yield* onNewPullChunk(
687
- batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)),
786
+ batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)),
688
787
  remaining,
689
788
  )
690
789
 
@@ -694,102 +793,27 @@ const backgroundBackendPulling = ({
694
793
  Stream.runDrain,
695
794
  Effect.interruptible,
696
795
  )
697
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pulling'))
698
-
699
- const rollback = ({
700
- db,
701
- dbMutationLog,
702
- eventIdsToRollback,
703
- }: {
704
- db: SqliteDb
705
- dbMutationLog: SqliteDb
706
- eventIdsToRollback: EventId.EventId[]
707
- }) =>
708
- Effect.gen(function* () {
709
- const rollbackEvents = db
710
- .select<SessionChangesetMetaRow>(
711
- sql`SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`,
712
- )
713
- .map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
714
- .sort((a, b) => EventId.compare(a.id, b.id))
715
- // TODO bring back `.toSorted` once Expo supports it
716
- // .toSorted((a, b) => EventId.compare(a.id, b.id))
717
-
718
- // Apply changesets in reverse order
719
- for (let i = rollbackEvents.length - 1; i >= 0; i--) {
720
- const { changeset } = rollbackEvents[i]!
721
- if (changeset !== null) {
722
- db.makeChangeset(changeset).invert().apply()
723
- }
724
- }
725
-
726
- const eventIdPairChunks = ReadonlyArray.chunksOf(100)(
727
- eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`),
728
- )
729
-
730
- // Delete the changeset rows
731
- for (const eventIdPairChunk of eventIdPairChunks) {
732
- db.execute(
733
- sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
734
- )
735
- }
736
-
737
- // Delete the mutation log rows
738
- for (const eventIdPairChunk of eventIdPairChunks) {
739
- dbMutationLog.execute(
740
- sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
741
- )
742
- }
743
- }).pipe(
744
- Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
745
- attributes: { count: eventIdsToRollback.length },
746
- }),
747
- )
748
-
749
- const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
750
- Effect.gen(function* () {
751
- const { dbMutationLog } = yield* LeaderThreadCtx
752
-
753
- if (remoteHead === EventId.ROOT.global) return Option.none()
754
-
755
- const MutationlogQuerySchema = Schema.Struct({
756
- syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
757
- }).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
758
-
759
- const syncMetadataOption = yield* Effect.sync(() =>
760
- dbMutationLog.select<{ syncMetadataJson: string }>(
761
- sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`,
762
- ),
763
- ).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
764
-
765
- return Option.some({
766
- cursor: { global: remoteHead, client: EventId.clientDefault },
767
- metadata: syncMetadataOption,
768
- }) satisfies InitialSyncInfo
769
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
796
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'))
770
797
 
771
798
  const backgroundBackendPushing = ({
772
- dbReady,
773
- syncBackendQueue,
799
+ syncBackendPushQueue,
774
800
  otelSpan,
775
801
  devtoolsLatch,
802
+ backendPushBatchSize,
776
803
  }: {
777
- dbReady: Deferred.Deferred<void>
778
- syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
804
+ syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
779
805
  otelSpan: otel.Span | undefined
780
806
  devtoolsLatch: Effect.Latch | undefined
807
+ backendPushBatchSize: number
781
808
  }) =>
782
809
  Effect.gen(function* () {
783
- const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx
810
+ const { syncBackend } = yield* LeaderThreadCtx
784
811
  if (syncBackend === undefined) return
785
812
 
786
- yield* dbReady
787
-
788
813
  while (true) {
789
814
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
790
815
 
791
- // TODO make batch size configurable
792
- const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE)
816
+ const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, backendPushBatchSize)
793
817
 
794
818
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
795
819
 
@@ -813,27 +837,109 @@ const backgroundBackendPushing = ({
813
837
  // wait for interrupt caused by background pulling which will then restart pushing
814
838
  return yield* Effect.never
815
839
  }
816
-
817
- const { metadata } = pushResult.right
818
-
819
- // TODO try to do this in a single query
820
- for (let i = 0; i < queueItems.length; i++) {
821
- const mutationEventEncoded = queueItems[i]!
822
- yield* execSql(
823
- dbMutationLog,
824
- ...updateRows({
825
- tableName: MUTATION_LOG_META_TABLE,
826
- columns: mutationLogMetaTable.sqliteDef.columns,
827
- where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
828
- updateValues: { syncMetadataJson: metadata[i]! },
829
- }),
830
- )
831
- }
832
840
  }
833
- }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'))
841
+ }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'))
834
842
 
835
843
  const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
836
844
  // Since we're using the session changeset rows to query for the current head,
837
845
  // we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
838
846
  db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
839
847
  }
848
+
849
+ interface PullQueueSet {
850
+ makeQueue: Effect.Effect<
851
+ Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>,
852
+ UnexpectedError,
853
+ Scope.Scope | LeaderThreadCtx
854
+ >
855
+ offer: (item: {
856
+ payload: typeof SyncState.PayloadUpstream.Type
857
+ mergeCounter: number
858
+ }) => Effect.Effect<void, UnexpectedError>
859
+ }
860
+
861
+ const makePullQueueSet = Effect.gen(function* () {
862
+ const set = new Set<Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>>()
863
+
864
+ yield* Effect.addFinalizer(() =>
865
+ Effect.gen(function* () {
866
+ for (const queue of set) {
867
+ yield* Queue.shutdown(queue)
868
+ }
869
+
870
+ set.clear()
871
+ }),
872
+ )
873
+
874
+ const makeQueue: PullQueueSet['makeQueue'] = Effect.gen(function* () {
875
+ const queue = yield* Queue.unbounded<{
876
+ payload: typeof SyncState.PayloadUpstream.Type
877
+ mergeCounter: number
878
+ }>().pipe(Effect.acquireRelease(Queue.shutdown))
879
+
880
+ yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)))
881
+
882
+ set.add(queue)
883
+
884
+ return queue
885
+ })
886
+
887
+ const offer: PullQueueSet['offer'] = (item) =>
888
+ Effect.gen(function* () {
889
+ // Short-circuit if the payload is an empty upstream advance
890
+ if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
891
+ return
892
+ }
893
+
894
+ for (const queue of set) {
895
+ yield* Queue.offer(queue, item)
896
+ }
897
+ })
898
+
899
+ return {
900
+ makeQueue,
901
+ offer,
902
+ }
903
+ })
904
+
905
+ const incrementMergeCounter = (mergeCounterRef: { current: number }) =>
906
+ Effect.gen(function* () {
907
+ const { dbReadModel } = yield* LeaderThreadCtx
908
+ mergeCounterRef.current++
909
+ dbReadModel.execute(
910
+ sql`INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`,
911
+ )
912
+ return mergeCounterRef.current
913
+ })
914
+
915
+ const getMergeCounterFromDb = (dbReadModel: SqliteDb) =>
916
+ Effect.gen(function* () {
917
+ const result = dbReadModel.select<{ mergeCounter: number }>(
918
+ sql`SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`,
919
+ )
920
+ return result[0]?.mergeCounter ?? 0
921
+ })
922
+
923
+ const validatePushBatch = (batch: ReadonlyArray<MutationEvent.EncodedWithMeta>, pushHead: EventId.EventId) =>
924
+ Effect.gen(function* () {
925
+ if (batch.length === 0) {
926
+ return
927
+ }
928
+
929
+ // Make sure batch is monotonically increasing
930
+ for (let i = 1; i < batch.length; i++) {
931
+ if (EventId.isGreaterThanOrEqual(batch[i - 1]!.id, batch[i]!.id)) {
932
+ shouldNeverHappen(
933
+ `Events must be ordered in monotonically ascending order by eventId. Received: [${batch.map((e) => EventId.toString(e.id)).join(', ')}]`,
934
+ )
935
+ }
936
+ }
937
+
938
+ // Make sure smallest event id is > pushHead
939
+ if (EventId.isGreaterThanOrEqual(pushHead, batch[0]!.id)) {
940
+ return yield* LeaderAheadError.make({
941
+ minimumExpectedId: pushHead,
942
+ providedId: batch[0]!.id,
943
+ })
944
+ }
945
+ })