@livestore/common 0.3.0-dev.26 → 0.3.0-dev.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/adapter-types.d.ts +13 -12
  3. package/dist/adapter-types.d.ts.map +1 -1
  4. package/dist/adapter-types.js +5 -6
  5. package/dist/adapter-types.js.map +1 -1
  6. package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
  7. package/dist/devtools/devtools-messages-common.d.ts +13 -6
  8. package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
  9. package/dist/devtools/devtools-messages-common.js +6 -0
  10. package/dist/devtools/devtools-messages-common.js.map +1 -1
  11. package/dist/devtools/devtools-messages-leader.d.ts +25 -25
  12. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  13. package/dist/devtools/devtools-messages-leader.js +1 -2
  14. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  15. package/dist/leader-thread/LeaderSyncProcessor.d.ts +29 -7
  16. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  17. package/dist/leader-thread/LeaderSyncProcessor.js +259 -199
  18. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  19. package/dist/leader-thread/apply-mutation.d.ts +14 -9
  20. package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
  21. package/dist/leader-thread/apply-mutation.js +43 -36
  22. package/dist/leader-thread/apply-mutation.js.map +1 -1
  23. package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
  24. package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
  25. package/dist/leader-thread/leader-worker-devtools.js +4 -5
  26. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  27. package/dist/leader-thread/make-leader-thread-layer.d.ts +15 -3
  28. package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
  29. package/dist/leader-thread/make-leader-thread-layer.js +29 -34
  30. package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
  31. package/dist/leader-thread/mod.d.ts +1 -1
  32. package/dist/leader-thread/mod.d.ts.map +1 -1
  33. package/dist/leader-thread/mod.js +1 -1
  34. package/dist/leader-thread/mod.js.map +1 -1
  35. package/dist/leader-thread/mutationlog.d.ts +19 -3
  36. package/dist/leader-thread/mutationlog.d.ts.map +1 -1
  37. package/dist/leader-thread/mutationlog.js +105 -12
  38. package/dist/leader-thread/mutationlog.js.map +1 -1
  39. package/dist/leader-thread/pull-queue-set.d.ts +1 -1
  40. package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
  41. package/dist/leader-thread/pull-queue-set.js +6 -16
  42. package/dist/leader-thread/pull-queue-set.js.map +1 -1
  43. package/dist/leader-thread/recreate-db.d.ts.map +1 -1
  44. package/dist/leader-thread/recreate-db.js +4 -3
  45. package/dist/leader-thread/recreate-db.js.map +1 -1
  46. package/dist/leader-thread/types.d.ts +34 -19
  47. package/dist/leader-thread/types.d.ts.map +1 -1
  48. package/dist/leader-thread/types.js.map +1 -1
  49. package/dist/rehydrate-from-mutationlog.d.ts +5 -4
  50. package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
  51. package/dist/rehydrate-from-mutationlog.js +7 -9
  52. package/dist/rehydrate-from-mutationlog.js.map +1 -1
  53. package/dist/schema/EventId.d.ts +9 -0
  54. package/dist/schema/EventId.d.ts.map +1 -1
  55. package/dist/schema/EventId.js +22 -2
  56. package/dist/schema/EventId.js.map +1 -1
  57. package/dist/schema/MutationEvent.d.ts +78 -25
  58. package/dist/schema/MutationEvent.d.ts.map +1 -1
  59. package/dist/schema/MutationEvent.js +25 -12
  60. package/dist/schema/MutationEvent.js.map +1 -1
  61. package/dist/schema/schema.js +1 -1
  62. package/dist/schema/schema.js.map +1 -1
  63. package/dist/schema/system-tables.d.ts +67 -0
  64. package/dist/schema/system-tables.d.ts.map +1 -1
  65. package/dist/schema/system-tables.js +12 -1
  66. package/dist/schema/system-tables.js.map +1 -1
  67. package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
  68. package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
  69. package/dist/sync/ClientSessionSyncProcessor.js +25 -19
  70. package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
  71. package/dist/sync/sync.d.ts +6 -5
  72. package/dist/sync/sync.d.ts.map +1 -1
  73. package/dist/sync/sync.js.map +1 -1
  74. package/dist/sync/syncstate.d.ts +47 -71
  75. package/dist/sync/syncstate.d.ts.map +1 -1
  76. package/dist/sync/syncstate.js +136 -139
  77. package/dist/sync/syncstate.js.map +1 -1
  78. package/dist/sync/syncstate.test.js +203 -284
  79. package/dist/sync/syncstate.test.js.map +1 -1
  80. package/dist/version.d.ts +1 -1
  81. package/dist/version.js +1 -1
  82. package/package.json +2 -2
  83. package/src/adapter-types.ts +11 -13
  84. package/src/devtools/devtools-messages-common.ts +9 -0
  85. package/src/devtools/devtools-messages-leader.ts +1 -2
  86. package/src/leader-thread/LeaderSyncProcessor.ts +457 -351
  87. package/src/leader-thread/apply-mutation.ts +81 -71
  88. package/src/leader-thread/leader-worker-devtools.ts +5 -7
  89. package/src/leader-thread/make-leader-thread-layer.ts +60 -53
  90. package/src/leader-thread/mod.ts +1 -1
  91. package/src/leader-thread/mutationlog.ts +166 -13
  92. package/src/leader-thread/recreate-db.ts +4 -3
  93. package/src/leader-thread/types.ts +33 -23
  94. package/src/rehydrate-from-mutationlog.ts +12 -12
  95. package/src/schema/EventId.ts +26 -2
  96. package/src/schema/MutationEvent.ts +32 -16
  97. package/src/schema/schema.ts +1 -1
  98. package/src/schema/system-tables.ts +20 -1
  99. package/src/sync/ClientSessionSyncProcessor.ts +35 -23
  100. package/src/sync/sync.ts +6 -9
  101. package/src/sync/syncstate.test.ts +228 -315
  102. package/src/sync/syncstate.ts +202 -187
  103. package/src/version.ts +1 -1
  104. package/tmp/pack.tgz +0 -0
  105. package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,16 +1,13 @@
1
1
  import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils';
2
- import { BucketQueue, Deferred, Effect, Exit, FiberHandle, Option, OtelTracer, ReadonlyArray, Schema, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
2
+ import { BucketQueue, Deferred, Effect, Exit, FiberHandle, OtelTracer, Queue, ReadonlyArray, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
3
3
  import { UnexpectedError } from '../adapter-types.js';
4
- import { EventId, getMutationDef, MUTATION_LOG_META_TABLE, MutationEvent, mutationLogMetaTable, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
5
- import { updateRows } from '../sql-queries/index.js';
4
+ import { EventId, getMutationDef, LEADER_MERGE_COUNTER_TABLE, MutationEvent, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
6
5
  import { LeaderAheadError } from '../sync/sync.js';
7
6
  import * as SyncState from '../sync/syncstate.js';
8
7
  import { sql } from '../util.js';
9
- import { makeApplyMutation } from './apply-mutation.js';
10
- import { execSql } from './connection.js';
11
- import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js';
8
+ import { rollback } from './apply-mutation.js';
9
+ import * as Mutationlog from './mutationlog.js';
12
10
  import { LeaderThreadCtx } from './types.js';
13
- export const BACKEND_PUSH_BATCH_SIZE = 50;
14
11
  /**
15
12
  * The LeaderSyncProcessor manages synchronization of mutations between
16
13
  * the local state and the sync backend, ensuring efficient and orderly processing.
@@ -18,35 +15,47 @@ export const BACKEND_PUSH_BATCH_SIZE = 50;
18
15
  * In the LeaderSyncProcessor, pulling always has precedence over pushing.
19
16
  *
20
17
  * Responsibilities:
21
- * - Queueing incoming local mutations in a localPushMailbox.
18
+ * - Queueing incoming local mutations in a localPushesQueue.
22
19
  * - Broadcasting mutations to client sessions via pull queues.
23
20
  * - Pushing mutations to the sync backend.
24
21
  *
25
22
  * Notes:
26
23
  *
27
24
  * local push processing:
28
- * - localPushMailbox:
25
+ * - localPushesQueue:
29
26
  * - Maintains events in ascending order.
30
27
  * - Uses `Deferred` objects to resolve/reject events based on application success.
31
- * - Processes events from the mailbox, applying mutations in batches.
28
+ * - Processes events from the queue, applying mutations in batches.
32
29
  * - Controlled by a `Latch` to manage execution flow.
33
30
  * - The latch closes on pull receipt and re-opens post-pull completion.
34
31
  * - Processes up to `maxBatchSize` events per cycle.
35
32
  *
33
+ * Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
34
+ *
35
+ * Tricky concurrency scenarios:
36
+ * - Queued local push batches becoming invalid due to a prior local push item being rejected.
37
+ * Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
38
+ *
36
39
  */
37
- export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clientId, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
38
- const syncBackendQueue = yield* BucketQueue.make();
40
+ export const makeLeaderSyncProcessor = ({ schema, dbMutationLogMissing, dbMutationLog, dbReadModel, dbReadModelMissing, initialBlockingSyncContext, onError, params, testing, }) => Effect.gen(function* () {
41
+ const syncBackendPushQueue = yield* BucketQueue.make();
42
+ const localPushBatchSize = params.localPushBatchSize ?? 10;
43
+ const backendPushBatchSize = params.backendPushBatchSize ?? 50;
39
44
  const syncStateSref = yield* SubscriptionRef.make(undefined);
40
45
  const isClientEvent = (mutationEventEncoded) => {
41
46
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
42
47
  return mutationDef.options.clientOnly;
43
48
  };
49
+ const connectedClientSessionPullQueues = yield* makePullQueueSet;
44
50
  /**
45
51
  * Tracks generations of queued local push events.
46
- * If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
52
+ * If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
47
53
  * even if they would be valid on their own.
48
54
  */
55
+ // TODO get rid of this in favour of the `mergeGeneration` event id field
49
56
  const currentLocalPushGenerationRef = { current: 0 };
57
+ const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) };
58
+ const mergePayloads = new Map();
50
59
  // This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
51
60
  const ctxRef = {
52
61
  current: undefined,
@@ -54,21 +63,25 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
54
63
  const localPushesQueue = yield* BucketQueue.make();
55
64
  const localPushesLatch = yield* Effect.makeLatch(true);
56
65
  const pullLatch = yield* Effect.makeLatch(true);
66
+ /**
67
+ * Additionally to the `syncStateSref` we also need the `pushHeadRef` in order to prevent old/duplicate
68
+ * events from being pushed in a scenario like this:
69
+ * - client session A pushes e1
70
+ * - leader sync processor takes a bit and hasn't yet taken e1 from the localPushesQueue
71
+ * - client session B also pushes e1 (which should be rejected)
72
+ *
73
+ * Thus the purpoe of the pushHeadRef is the guard the integrity of the local push queue
74
+ */
75
+ const pushHeadRef = { current: EventId.ROOT };
76
+ const advancePushHead = (eventId) => {
77
+ pushHeadRef.current = EventId.max(pushHeadRef.current, eventId);
78
+ };
79
+ // NOTE: New events are only pushed to sync backend after successful local push processing
57
80
  const push = (newEvents, options) => Effect.gen(function* () {
58
- // TODO validate batch
59
81
  if (newEvents.length === 0)
60
82
  return;
61
- // if (options.generation < currentLocalPushGenerationRef.current) {
62
- // debugger
63
- // // We can safely drop this batch as it's from a previous push generation
64
- // return
65
- // }
66
- if (clientId === 'client-b') {
67
- // console.log(
68
- // 'push from client session',
69
- // newEvents.map((item) => item.toJSON()),
70
- // )
71
- }
83
+ yield* validatePushBatch(newEvents, pushHeadRef.current);
84
+ advancePushHead(newEvents.at(-1).id);
72
85
  const waitForProcessing = options?.waitForProcessing ?? false;
73
86
  const generation = currentLocalPushGenerationRef.current;
74
87
  if (waitForProcessing) {
@@ -81,20 +94,21 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
81
94
  const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined, generation]);
82
95
  yield* BucketQueue.offerAll(localPushesQueue, items);
83
96
  }
84
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
97
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:push', {
85
98
  attributes: {
86
99
  batchSize: newEvents.length,
87
100
  batch: TRACE_VERBOSE ? newEvents : undefined,
88
101
  },
89
102
  links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
90
103
  }));
91
- const pushPartial = ({ mutationEvent: partialMutationEvent, clientId, sessionId, }) => Effect.gen(function* () {
104
+ const pushPartial = ({ mutationEvent: { mutation, args }, clientId, sessionId, }) => Effect.gen(function* () {
92
105
  const syncState = yield* syncStateSref;
93
106
  if (syncState === undefined)
94
107
  return shouldNeverHappen('Not initialized');
95
- const mutationDef = getMutationDef(schema, partialMutationEvent.mutation);
108
+ const mutationDef = getMutationDef(schema, mutation);
96
109
  const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
97
- ...partialMutationEvent,
110
+ mutation,
111
+ args,
98
112
  clientId,
99
113
  sessionId,
100
114
  ...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
@@ -102,28 +116,29 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
102
116
  yield* push([mutationEventEncoded]);
103
117
  }).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie));
104
118
  // Starts various background loops
105
- const boot = ({ dbReady }) => Effect.gen(function* () {
119
+ const boot = Effect.gen(function* () {
106
120
  const span = yield* Effect.currentSpan.pipe(Effect.orDie);
107
121
  const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)));
108
122
  const { devtools, shutdownChannel } = yield* LeaderThreadCtx;
123
+ const runtime = yield* Effect.runtime();
109
124
  ctxRef.current = {
110
125
  otelSpan,
111
126
  span,
112
127
  devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
128
+ runtime,
113
129
  };
114
- const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog);
115
- const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog);
130
+ const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog);
131
+ const initialBackendHead = dbMutationLogMissing
132
+ ? EventId.ROOT.global
133
+ : Mutationlog.getBackendHeadFromDb(dbMutationLog);
116
134
  if (initialBackendHead > initialLocalHead.global) {
117
135
  return shouldNeverHappen(`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`);
118
136
  }
119
- const pendingMutationEvents = yield* getMutationEventsSince({
120
- global: initialBackendHead,
121
- client: EventId.clientDefault,
122
- }).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))));
137
+ const pendingMutationEvents = dbMutationLogMissing
138
+ ? []
139
+ : yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault });
123
140
  const initialSyncState = new SyncState.SyncState({
124
141
  pending: pendingMutationEvents,
125
- // On the leader we don't need a rollback tail beyond `pending` items
126
- rollbackTail: [],
127
142
  upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
128
143
  localHead: initialLocalHead,
129
144
  });
@@ -131,13 +146,15 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
131
146
  yield* SubscriptionRef.set(syncStateSref, initialSyncState);
132
147
  // Rehydrate sync queue
133
148
  if (pendingMutationEvents.length > 0) {
134
- const filteredBatch = pendingMutationEvents
149
+ const globalPendingMutationEvents = pendingMutationEvents
135
150
  // Don't sync clientOnly mutations
136
151
  .filter((mutationEventEncoded) => {
137
152
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
138
153
  return mutationDef.options.clientOnly === false;
139
154
  });
140
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
155
+ if (globalPendingMutationEvents.length > 0) {
156
+ yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents);
157
+ }
141
158
  }
142
159
  const shutdownOnError = (cause) => Effect.gen(function* () {
143
160
  if (onError === 'shutdown') {
@@ -150,36 +167,38 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
150
167
  localPushesQueue,
151
168
  pullLatch,
152
169
  syncStateSref,
153
- syncBackendQueue,
170
+ syncBackendPushQueue,
154
171
  schema,
155
172
  isClientEvent,
156
173
  otelSpan,
157
174
  currentLocalPushGenerationRef,
175
+ connectedClientSessionPullQueues,
176
+ mergeCounterRef,
177
+ mergePayloads,
178
+ localPushBatchSize,
179
+ testing: {
180
+ delay: testing?.delays?.localPushProcessing,
181
+ },
158
182
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
159
183
  const backendPushingFiberHandle = yield* FiberHandle.make();
160
- yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
161
- dbReady,
162
- syncBackendQueue,
184
+ const backendPushingEffect = backgroundBackendPushing({
185
+ syncBackendPushQueue,
163
186
  otelSpan,
164
187
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
165
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
188
+ backendPushBatchSize,
189
+ }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError));
190
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect);
166
191
  yield* backgroundBackendPulling({
167
- dbReady,
168
192
  initialBackendHead,
169
193
  isClientEvent,
170
194
  restartBackendPushing: (filteredRebasedPending) => Effect.gen(function* () {
171
195
  // Stop current pushing fiber
172
196
  yield* FiberHandle.clear(backendPushingFiberHandle);
173
- // Reset the sync queue
174
- yield* BucketQueue.clear(syncBackendQueue);
175
- yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending);
197
+ // Reset the sync backend push queue
198
+ yield* BucketQueue.clear(syncBackendPushQueue);
199
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending);
176
200
  // Restart pushing fiber
177
- yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
178
- dbReady,
179
- syncBackendQueue,
180
- otelSpan,
181
- devtoolsLatch: ctxRef.current?.devtoolsLatch,
182
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
201
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect);
183
202
  }),
184
203
  syncStateSref,
185
204
  localPushesLatch,
@@ -187,30 +206,68 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
187
206
  otelSpan,
188
207
  initialBlockingSyncContext,
189
208
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
209
+ connectedClientSessionPullQueues,
210
+ mergeCounterRef,
211
+ mergePayloads,
212
+ advancePushHead,
190
213
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
191
214
  return { initialLeaderHead: initialLocalHead };
192
- }).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'));
215
+ }).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'));
216
+ const pull = ({ cursor }) => Effect.gen(function* () {
217
+ const queue = yield* pullQueue({ cursor });
218
+ return Stream.fromQueue(queue);
219
+ }).pipe(Stream.unwrapScoped);
220
+ const pullQueue = ({ cursor }) => {
221
+ const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized');
222
+ return Effect.gen(function* () {
223
+ const queue = yield* connectedClientSessionPullQueues.makeQueue;
224
+ const payloadsSinceCursor = Array.from(mergePayloads.entries())
225
+ .map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
226
+ .filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
227
+ .toSorted((a, b) => a.mergeCounter - b.mergeCounter)
228
+ .map(({ payload, mergeCounter }) => {
229
+ if (payload._tag === 'upstream-advance') {
230
+ return {
231
+ payload: {
232
+ _tag: 'upstream-advance',
233
+ newEvents: ReadonlyArray.dropWhile(payload.newEvents, (mutationEventEncoded) => EventId.isGreaterThanOrEqual(cursor.eventId, mutationEventEncoded.id)),
234
+ },
235
+ mergeCounter,
236
+ };
237
+ }
238
+ else {
239
+ return { payload, mergeCounter };
240
+ }
241
+ });
242
+ yield* queue.offerAll(payloadsSinceCursor);
243
+ return queue;
244
+ }).pipe(Effect.provide(runtime));
245
+ };
246
+ const syncState = Subscribable.make({
247
+ get: Effect.gen(function* () {
248
+ const syncState = yield* syncStateSref;
249
+ if (syncState === undefined)
250
+ return shouldNeverHappen('Not initialized');
251
+ return syncState;
252
+ }),
253
+ changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
254
+ });
193
255
  return {
256
+ pull,
257
+ pullQueue,
194
258
  push,
195
259
  pushPartial,
196
260
  boot,
197
- syncState: Subscribable.make({
198
- get: Effect.gen(function* () {
199
- const syncState = yield* syncStateSref;
200
- if (syncState === undefined)
201
- return shouldNeverHappen('Not initialized');
202
- return syncState;
203
- }),
204
- changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
205
- }),
261
+ syncState,
262
+ getMergeCounter: () => mergeCounterRef.current,
206
263
  };
207
264
  });
208
- const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, }) => Effect.gen(function* () {
209
- const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx;
210
- const applyMutationItems = yield* makeApplyMutationItems;
265
+ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendPushQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, localPushBatchSize, testing, }) => Effect.gen(function* () {
211
266
  while (true) {
212
- // TODO make batch size configurable
213
- const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10);
267
+ if (testing.delay !== undefined) {
268
+ yield* testing.delay.pipe(Effect.withSpan('localPushProcessingDelay'));
269
+ }
270
+ const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, localPushBatchSize);
214
271
  // Wait for the backend pulling to finish
215
272
  yield* localPushesLatch.await;
216
273
  // Prevent backend pull processing until this local push is finished
@@ -236,9 +293,10 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
236
293
  isClientEvent,
237
294
  isEqualEvent: MutationEvent.isEqualEncoded,
238
295
  });
296
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
239
297
  switch (mergeResult._tag) {
240
298
  case 'unexpected-error': {
241
- otelSpan?.addEvent('local-push:unexpected-error', {
299
+ otelSpan?.addEvent(`[${mergeCounter}]:push:unexpected-error`, {
242
300
  batchSize: newEvents.length,
243
301
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
244
302
  });
@@ -248,14 +306,11 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
248
306
  return shouldNeverHappen('The leader thread should never have to rebase due to a local push');
249
307
  }
250
308
  case 'reject': {
251
- otelSpan?.addEvent('local-push:reject', {
309
+ otelSpan?.addEvent(`[${mergeCounter}]:push:reject`, {
252
310
  batchSize: newEvents.length,
253
311
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
254
312
  });
255
- /*
256
-
257
- TODO: how to test this?
258
- */
313
+ // TODO: how to test this?
259
314
  currentLocalPushGenerationRef.current++;
260
315
  const nextGeneration = currentLocalPushGenerationRef.current;
261
316
  const providedId = newEvents.at(0).id;
@@ -263,7 +318,8 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
263
318
  // We're also handling the case where the localPushQueue already contains events
264
319
  // from the next generation which we preserve in the queue
265
320
  const remainingEventsMatchingGeneration = yield* BucketQueue.takeSplitWhere(localPushesQueue, (item) => item[2] >= nextGeneration);
266
- if ((yield* BucketQueue.size(localPushesQueue)) > 0) {
321
+ // TODO we still need to better understand and handle this scenario
322
+ if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
267
323
  console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue));
268
324
  debugger;
269
325
  }
@@ -290,15 +346,12 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
290
346
  }
291
347
  }
292
348
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
293
- if (clientId === 'client-b') {
294
- // yield* Effect.log('offer upstream-advance due to local-push')
295
- // debugger
296
- }
297
349
  yield* connectedClientSessionPullQueues.offer({
298
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents },
299
- remaining: 0,
350
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
351
+ mergeCounter,
300
352
  });
301
- otelSpan?.addEvent('local-push', {
353
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
354
+ otelSpan?.addEvent(`[${mergeCounter}]:push:advance`, {
302
355
  batchSize: newEvents.length,
303
356
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
304
357
  });
@@ -307,45 +360,41 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
307
360
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
308
361
  return mutationDef.options.clientOnly === false;
309
362
  });
310
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
311
- yield* applyMutationItems({ batchItems: newEvents, deferreds });
363
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch);
364
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds });
312
365
  // Allow the backend pulling to start
313
366
  yield* pullLatch.open;
314
367
  }
315
368
  });
316
369
  // TODO how to handle errors gracefully
317
- const makeApplyMutationItems = Effect.gen(function* () {
318
- const leaderThreadCtx = yield* LeaderThreadCtx;
319
- const { dbReadModel: db, dbMutationLog } = leaderThreadCtx;
320
- const applyMutation = yield* makeApplyMutation;
321
- return ({ batchItems, deferreds }) => Effect.gen(function* () {
322
- db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
323
- dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
324
- yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
325
- if (Exit.isSuccess(exit))
326
- return;
327
- // Rollback in case of an error
328
- db.execute('ROLLBACK', undefined);
329
- dbMutationLog.execute('ROLLBACK', undefined);
330
- }));
331
- for (let i = 0; i < batchItems.length; i++) {
332
- yield* applyMutation(batchItems[i]);
333
- if (deferreds?.[i] !== undefined) {
334
- yield* Deferred.succeed(deferreds[i], void 0);
335
- }
370
+ const applyMutationsBatch = ({ batchItems, deferreds }) => Effect.gen(function* () {
371
+ const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx;
372
+ // NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
373
+ db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
374
+ dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
375
+ yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
376
+ if (Exit.isSuccess(exit))
377
+ return;
378
+ // Rollback in case of an error
379
+ db.execute('ROLLBACK', undefined);
380
+ dbMutationLog.execute('ROLLBACK', undefined);
381
+ }));
382
+ for (let i = 0; i < batchItems.length; i++) {
383
+ const { sessionChangeset } = yield* applyMutation(batchItems[i]);
384
+ batchItems[i].meta.sessionChangeset = sessionChangeset;
385
+ if (deferreds?.[i] !== undefined) {
386
+ yield* Deferred.succeed(deferreds[i], void 0);
336
387
  }
337
- db.execute('COMMIT', undefined); // Commit the transaction
338
- dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
339
- }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
340
- attributes: { count: batchItems.length },
341
- }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
342
- });
343
- const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, }) => Effect.gen(function* () {
344
- const { syncBackend, dbReadModel: db, dbMutationLog, connectedClientSessionPullQueues, schema, clientId, } = yield* LeaderThreadCtx;
388
+ }
389
+ db.execute('COMMIT', undefined); // Commit the transaction
390
+ dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
391
+ }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
392
+ attributes: { batchSize: batchItems.length },
393
+ }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
394
+ const backgroundBackendPulling = ({ initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, advancePushHead, }) => Effect.gen(function* () {
395
+ const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx;
345
396
  if (syncBackend === undefined)
346
397
  return;
347
- const cursorInfo = yield* getCursorInfo(initialBackendHead);
348
- const applyMutationItems = yield* makeApplyMutationItems;
349
398
  const onNewPullChunk = (newEvents, remaining) => Effect.gen(function* () {
350
399
  if (newEvents.length === 0)
351
400
  return;
@@ -359,72 +408,81 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
359
408
  const syncState = yield* syncStateSref;
360
409
  if (syncState === undefined)
361
410
  return shouldNeverHappen('Not initialized');
362
- const trimRollbackUntil = newEvents.at(-1).id;
363
411
  const mergeResult = SyncState.merge({
364
412
  syncState,
365
- payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
413
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
366
414
  isClientEvent,
367
415
  isEqualEvent: MutationEvent.isEqualEncoded,
368
416
  ignoreClientEvents: true,
369
417
  });
418
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
370
419
  if (mergeResult._tag === 'reject') {
371
420
  return shouldNeverHappen('The leader thread should never reject upstream advances');
372
421
  }
373
422
  else if (mergeResult._tag === 'unexpected-error') {
374
- otelSpan?.addEvent('backend-pull:unexpected-error', {
423
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:unexpected-error`, {
375
424
  newEventsCount: newEvents.length,
376
425
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
377
426
  });
378
427
  return yield* Effect.fail(mergeResult.cause);
379
428
  }
380
429
  const newBackendHead = newEvents.at(-1).id;
381
- updateBackendHead(dbMutationLog, newBackendHead);
430
+ Mutationlog.updateBackendHead(dbMutationLog, newBackendHead);
382
431
  if (mergeResult._tag === 'rebase') {
383
- otelSpan?.addEvent('backend-pull:rebase', {
432
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:rebase`, {
384
433
  newEventsCount: newEvents.length,
385
434
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
386
- rollbackCount: mergeResult.eventsToRollback.length,
435
+ rollbackCount: mergeResult.rollbackEvents.length,
387
436
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
388
437
  });
389
- const filteredRebasedPending = mergeResult.newSyncState.pending.filter((mutationEvent) => {
438
+ const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
390
439
  const mutationDef = getMutationDef(schema, mutationEvent.mutation);
391
440
  return mutationDef.options.clientOnly === false;
392
441
  });
393
- yield* restartBackendPushing(filteredRebasedPending);
394
- if (mergeResult.eventsToRollback.length > 0) {
395
- yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.eventsToRollback.map((_) => _.id) });
442
+ yield* restartBackendPushing(globalRebasedPendingEvents);
443
+ if (mergeResult.rollbackEvents.length > 0) {
444
+ yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) });
396
445
  }
397
446
  yield* connectedClientSessionPullQueues.offer({
398
- payload: {
399
- _tag: 'upstream-rebase',
447
+ payload: SyncState.PayloadUpstreamRebase.make({
400
448
  newEvents: mergeResult.newEvents,
401
- rollbackUntil: mergeResult.eventsToRollback.at(0).id,
402
- trimRollbackUntil,
403
- },
404
- remaining,
449
+ rollbackEvents: mergeResult.rollbackEvents,
450
+ }),
451
+ mergeCounter,
405
452
  });
453
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamRebase.make({
454
+ newEvents: mergeResult.newEvents,
455
+ rollbackEvents: mergeResult.rollbackEvents,
456
+ }));
406
457
  }
407
458
  else {
408
- otelSpan?.addEvent('backend-pull:advance', {
459
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:advance`, {
409
460
  newEventsCount: newEvents.length,
410
461
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
411
462
  });
412
- if (clientId === 'client-b') {
413
- // yield* Effect.log('offer upstream-advance due to pull')
414
- }
415
463
  yield* connectedClientSessionPullQueues.offer({
416
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents, trimRollbackUntil },
417
- remaining,
464
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
465
+ mergeCounter,
418
466
  });
467
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
468
+ if (mergeResult.confirmedEvents.length > 0) {
469
+ // `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
470
+ // `newEvents` instead which we filter via `mergeResult.confirmedEvents`
471
+ const confirmedNewEvents = newEvents.filter((mutationEvent) => mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(mutationEvent.id, confirmedEvent.id)));
472
+ yield* Mutationlog.updateSyncMetadata(confirmedNewEvents);
473
+ }
419
474
  }
475
+ // Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
420
476
  trimChangesetRows(db, newBackendHead);
421
- yield* applyMutationItems({ batchItems: mergeResult.newEvents, deferreds: undefined });
477
+ advancePushHead(mergeResult.newSyncState.localHead);
478
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined });
422
479
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
480
+ // Allow local pushes to be processed again
423
481
  if (remaining === 0) {
424
- // Allow local pushes to be processed again
425
482
  yield* localPushesLatch.open;
426
483
  }
427
484
  });
485
+ const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead);
428
486
  yield* syncBackend.pull(cursorInfo).pipe(
429
487
  // TODO only take from queue while connected
430
488
  Stream.tap(({ batch, remaining }) => Effect.gen(function* () {
@@ -434,64 +492,21 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
434
492
  // batch: TRACE_VERBOSE ? batch : undefined,
435
493
  // },
436
494
  // })
437
- // Wait for the db to be initially created
438
- yield* dbReady;
439
495
  // NOTE we only want to take process mutations when the sync backend is connected
440
496
  // (e.g. needed for simulating being offline)
441
497
  // TODO remove when there's a better way to handle this in stream above
442
498
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
443
- yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)), remaining);
499
+ yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)), remaining);
444
500
  yield* initialBlockingSyncContext.update({ processed: batch.length, remaining });
445
501
  })), Stream.runDrain, Effect.interruptible);
446
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pulling'));
447
- const rollback = ({ db, dbMutationLog, eventIdsToRollback, }) => Effect.gen(function* () {
448
- const rollbackEvents = db
449
- .select(sql `SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`)
450
- .map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
451
- .sort((a, b) => EventId.compare(a.id, b.id));
452
- // TODO bring back `.toSorted` once Expo supports it
453
- // .toSorted((a, b) => EventId.compare(a.id, b.id))
454
- // Apply changesets in reverse order
455
- for (let i = rollbackEvents.length - 1; i >= 0; i--) {
456
- const { changeset } = rollbackEvents[i];
457
- if (changeset !== null) {
458
- db.makeChangeset(changeset).invert().apply();
459
- }
460
- }
461
- const eventIdPairChunks = ReadonlyArray.chunksOf(100)(eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`));
462
- // Delete the changeset rows
463
- for (const eventIdPairChunk of eventIdPairChunks) {
464
- db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
465
- }
466
- // Delete the mutation log rows
467
- for (const eventIdPairChunk of eventIdPairChunks) {
468
- dbMutationLog.execute(sql `DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
469
- }
470
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
471
- attributes: { count: eventIdsToRollback.length },
472
- }));
473
- const getCursorInfo = (remoteHead) => Effect.gen(function* () {
474
- const { dbMutationLog } = yield* LeaderThreadCtx;
475
- if (remoteHead === EventId.ROOT.global)
476
- return Option.none();
477
- const MutationlogQuerySchema = Schema.Struct({
478
- syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
479
- }).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head);
480
- const syncMetadataOption = yield* Effect.sync(() => dbMutationLog.select(sql `SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`)).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie);
481
- return Option.some({
482
- cursor: { global: remoteHead, client: EventId.clientDefault },
483
- metadata: syncMetadataOption,
484
- });
485
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }));
486
- const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
487
- const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx;
502
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'));
503
+ const backgroundBackendPushing = ({ syncBackendPushQueue, otelSpan, devtoolsLatch, backendPushBatchSize, }) => Effect.gen(function* () {
504
+ const { syncBackend } = yield* LeaderThreadCtx;
488
505
  if (syncBackend === undefined)
489
506
  return;
490
- yield* dbReady;
491
507
  while (true) {
492
508
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
493
- // TODO make batch size configurable
494
- const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE);
509
+ const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, backendPushBatchSize);
495
510
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
496
511
  if (devtoolsLatch !== undefined) {
497
512
  yield* devtoolsLatch.await;
@@ -510,22 +525,67 @@ const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtool
510
525
  // wait for interrupt caused by background pulling which will then restart pushing
511
526
  return yield* Effect.never;
512
527
  }
513
- const { metadata } = pushResult.right;
514
- // TODO try to do this in a single query
515
- for (let i = 0; i < queueItems.length; i++) {
516
- const mutationEventEncoded = queueItems[i];
517
- yield* execSql(dbMutationLog, ...updateRows({
518
- tableName: MUTATION_LOG_META_TABLE,
519
- columns: mutationLogMetaTable.sqliteDef.columns,
520
- where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
521
- updateValues: { syncMetadataJson: metadata[i] },
522
- }));
523
- }
524
528
  }
525
- }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'));
529
+ }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'));
526
530
  const trimChangesetRows = (db, newHead) => {
527
531
  // Since we're using the session changeset rows to query for the current head,
528
532
  // we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
529
533
  db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`);
530
534
  };
535
+ const makePullQueueSet = Effect.gen(function* () {
536
+ const set = new Set();
537
+ yield* Effect.addFinalizer(() => Effect.gen(function* () {
538
+ for (const queue of set) {
539
+ yield* Queue.shutdown(queue);
540
+ }
541
+ set.clear();
542
+ }));
543
+ const makeQueue = Effect.gen(function* () {
544
+ const queue = yield* Queue.unbounded().pipe(Effect.acquireRelease(Queue.shutdown));
545
+ yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)));
546
+ set.add(queue);
547
+ return queue;
548
+ });
549
+ const offer = (item) => Effect.gen(function* () {
550
+ // Short-circuit if the payload is an empty upstream advance
551
+ if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
552
+ return;
553
+ }
554
+ for (const queue of set) {
555
+ yield* Queue.offer(queue, item);
556
+ }
557
+ });
558
+ return {
559
+ makeQueue,
560
+ offer,
561
+ };
562
+ });
563
+ const incrementMergeCounter = (mergeCounterRef) => Effect.gen(function* () {
564
+ const { dbReadModel } = yield* LeaderThreadCtx;
565
+ mergeCounterRef.current++;
566
+ dbReadModel.execute(sql `INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`);
567
+ return mergeCounterRef.current;
568
+ });
569
+ const getMergeCounterFromDb = (dbReadModel) => Effect.gen(function* () {
570
+ const result = dbReadModel.select(sql `SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`);
571
+ return result[0]?.mergeCounter ?? 0;
572
+ });
573
+ const validatePushBatch = (batch, pushHead) => Effect.gen(function* () {
574
+ if (batch.length === 0) {
575
+ return;
576
+ }
577
+ // Make sure batch is monotonically increasing
578
+ for (let i = 1; i < batch.length; i++) {
579
+ if (EventId.isGreaterThanOrEqual(batch[i - 1].id, batch[i].id)) {
580
+ shouldNeverHappen(`Events must be ordered in monotonically ascending order by eventId. Received: [${batch.map((e) => EventId.toString(e.id)).join(', ')}]`);
581
+ }
582
+ }
583
+ // Make sure smallest event id is > pushHead
584
+ if (EventId.isGreaterThanOrEqual(pushHead, batch[0].id)) {
585
+ return yield* LeaderAheadError.make({
586
+ minimumExpectedId: pushHead,
587
+ providedId: batch[0].id,
588
+ });
589
+ }
590
+ });
531
591
  //# sourceMappingURL=LeaderSyncProcessor.js.map