@livestore/common 0.0.0-snapshot-f6ec49b1a18859aad769f0a0d8edf8bae231ed07 → 0.0.0-snapshot-2ef046b02334f52613d31dbe06af53487685edc0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/adapter-types.d.ts +7 -12
  3. package/dist/adapter-types.d.ts.map +1 -1
  4. package/dist/adapter-types.js +1 -7
  5. package/dist/adapter-types.js.map +1 -1
  6. package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
  7. package/dist/devtools/devtools-messages-common.d.ts +13 -6
  8. package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
  9. package/dist/devtools/devtools-messages-common.js +6 -0
  10. package/dist/devtools/devtools-messages-common.js.map +1 -1
  11. package/dist/devtools/devtools-messages-leader.d.ts +25 -25
  12. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  13. package/dist/devtools/devtools-messages-leader.js +1 -2
  14. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  15. package/dist/leader-thread/LeaderSyncProcessor.d.ts +16 -6
  16. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  17. package/dist/leader-thread/LeaderSyncProcessor.js +227 -215
  18. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  19. package/dist/leader-thread/apply-mutation.d.ts +14 -9
  20. package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
  21. package/dist/leader-thread/apply-mutation.js +43 -36
  22. package/dist/leader-thread/apply-mutation.js.map +1 -1
  23. package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
  24. package/dist/leader-thread/leader-worker-devtools.js +2 -5
  25. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  26. package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
  27. package/dist/leader-thread/make-leader-thread-layer.js +22 -33
  28. package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
  29. package/dist/leader-thread/mod.d.ts +1 -1
  30. package/dist/leader-thread/mod.d.ts.map +1 -1
  31. package/dist/leader-thread/mod.js +1 -1
  32. package/dist/leader-thread/mod.js.map +1 -1
  33. package/dist/leader-thread/mutationlog.d.ts +20 -3
  34. package/dist/leader-thread/mutationlog.d.ts.map +1 -1
  35. package/dist/leader-thread/mutationlog.js +106 -12
  36. package/dist/leader-thread/mutationlog.js.map +1 -1
  37. package/dist/leader-thread/recreate-db.d.ts.map +1 -1
  38. package/dist/leader-thread/recreate-db.js +4 -3
  39. package/dist/leader-thread/recreate-db.js.map +1 -1
  40. package/dist/leader-thread/types.d.ts +35 -19
  41. package/dist/leader-thread/types.d.ts.map +1 -1
  42. package/dist/leader-thread/types.js.map +1 -1
  43. package/dist/rehydrate-from-mutationlog.d.ts +5 -4
  44. package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
  45. package/dist/rehydrate-from-mutationlog.js +7 -9
  46. package/dist/rehydrate-from-mutationlog.js.map +1 -1
  47. package/dist/schema/EventId.d.ts +4 -0
  48. package/dist/schema/EventId.d.ts.map +1 -1
  49. package/dist/schema/EventId.js +7 -1
  50. package/dist/schema/EventId.js.map +1 -1
  51. package/dist/schema/MutationEvent.d.ts +87 -18
  52. package/dist/schema/MutationEvent.d.ts.map +1 -1
  53. package/dist/schema/MutationEvent.js +35 -6
  54. package/dist/schema/MutationEvent.js.map +1 -1
  55. package/dist/schema/schema.js +1 -1
  56. package/dist/schema/schema.js.map +1 -1
  57. package/dist/schema/system-tables.d.ts +67 -0
  58. package/dist/schema/system-tables.d.ts.map +1 -1
  59. package/dist/schema/system-tables.js +12 -1
  60. package/dist/schema/system-tables.js.map +1 -1
  61. package/dist/sync/ClientSessionSyncProcessor.d.ts +11 -1
  62. package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
  63. package/dist/sync/ClientSessionSyncProcessor.js +54 -47
  64. package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
  65. package/dist/sync/sync.d.ts +16 -5
  66. package/dist/sync/sync.d.ts.map +1 -1
  67. package/dist/sync/sync.js.map +1 -1
  68. package/dist/sync/syncstate.d.ts +81 -83
  69. package/dist/sync/syncstate.d.ts.map +1 -1
  70. package/dist/sync/syncstate.js +159 -125
  71. package/dist/sync/syncstate.js.map +1 -1
  72. package/dist/sync/syncstate.test.js +97 -138
  73. package/dist/sync/syncstate.test.js.map +1 -1
  74. package/dist/version.d.ts +1 -1
  75. package/dist/version.js +1 -1
  76. package/package.json +2 -2
  77. package/src/adapter-types.ts +5 -12
  78. package/src/devtools/devtools-messages-common.ts +9 -0
  79. package/src/devtools/devtools-messages-leader.ts +1 -2
  80. package/src/leader-thread/LeaderSyncProcessor.ts +398 -370
  81. package/src/leader-thread/apply-mutation.ts +81 -71
  82. package/src/leader-thread/leader-worker-devtools.ts +3 -8
  83. package/src/leader-thread/make-leader-thread-layer.ts +27 -41
  84. package/src/leader-thread/mod.ts +1 -1
  85. package/src/leader-thread/mutationlog.ts +167 -13
  86. package/src/leader-thread/recreate-db.ts +4 -3
  87. package/src/leader-thread/types.ts +34 -23
  88. package/src/rehydrate-from-mutationlog.ts +12 -12
  89. package/src/schema/EventId.ts +8 -1
  90. package/src/schema/MutationEvent.ts +42 -10
  91. package/src/schema/schema.ts +1 -1
  92. package/src/schema/system-tables.ts +20 -1
  93. package/src/sync/ClientSessionSyncProcessor.ts +64 -50
  94. package/src/sync/sync.ts +16 -9
  95. package/src/sync/syncstate.test.ts +173 -217
  96. package/src/sync/syncstate.ts +184 -151
  97. package/src/version.ts +1 -1
  98. package/dist/leader-thread/pull-queue-set.d.ts +0 -7
  99. package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
  100. package/dist/leader-thread/pull-queue-set.js +0 -48
  101. package/dist/leader-thread/pull-queue-set.js.map +0 -1
  102. package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,16 +1,15 @@
1
1
  import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils';
2
- import { BucketQueue, Deferred, Effect, Exit, FiberHandle, Option, OtelTracer, ReadonlyArray, Schema, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
2
+ import { BucketQueue, Deferred, Effect, Exit, FiberHandle, OtelTracer, Queue, ReadonlyArray, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
3
3
  import { UnexpectedError } from '../adapter-types.js';
4
- import { EventId, getMutationDef, MUTATION_LOG_META_TABLE, MutationEvent, mutationLogMetaTable, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
5
- import { updateRows } from '../sql-queries/index.js';
4
+ import { EventId, getMutationDef, LEADER_MERGE_COUNTER_TABLE, MutationEvent, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
6
5
  import { LeaderAheadError } from '../sync/sync.js';
7
6
  import * as SyncState from '../sync/syncstate.js';
8
7
  import { sql } from '../util.js';
9
- import { makeApplyMutation } from './apply-mutation.js';
10
- import { execSql } from './connection.js';
11
- import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js';
8
+ import { rollback } from './apply-mutation.js';
9
+ import * as Mutationlog from './mutationlog.js';
12
10
  import { LeaderThreadCtx } from './types.js';
13
11
  export const BACKEND_PUSH_BATCH_SIZE = 50;
12
+ export const LOCAL_PUSH_BATCH_SIZE = 10;
14
13
  /**
15
14
  * The LeaderSyncProcessor manages synchronization of mutations between
16
15
  * the local state and the sync backend, ensuring efficient and orderly processing.
@@ -18,35 +17,44 @@ export const BACKEND_PUSH_BATCH_SIZE = 50;
18
17
  * In the LeaderSyncProcessor, pulling always has precedence over pushing.
19
18
  *
20
19
  * Responsibilities:
21
- * - Queueing incoming local mutations in a localPushMailbox.
20
+ * - Queueing incoming local mutations in a localPushesQueue.
22
21
  * - Broadcasting mutations to client sessions via pull queues.
23
22
  * - Pushing mutations to the sync backend.
24
23
  *
25
24
  * Notes:
26
25
  *
27
26
  * local push processing:
28
- * - localPushMailbox:
27
+ * - localPushesQueue:
29
28
  * - Maintains events in ascending order.
30
29
  * - Uses `Deferred` objects to resolve/reject events based on application success.
31
- * - Processes events from the mailbox, applying mutations in batches.
30
+ * - Processes events from the queue, applying mutations in batches.
32
31
  * - Controlled by a `Latch` to manage execution flow.
33
32
  * - The latch closes on pull receipt and re-opens post-pull completion.
34
33
  * - Processes up to `maxBatchSize` events per cycle.
35
34
  *
35
+ * Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
36
+ *
37
+ * Tricky concurrency scenarios:
38
+ * - Queued local push batches becoming invalid due to a prior local push item being rejected.
39
+ * Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
40
+ *
36
41
  */
37
- export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clientId, initialBlockingSyncContext, }) => Effect.gen(function* () {
38
- const syncBackendQueue = yield* BucketQueue.make();
42
+ export const makeLeaderSyncProcessor = ({ schema, dbMutationLogMissing, dbMutationLog, dbReadModel, dbReadModelMissing, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
43
+ const syncBackendPushQueue = yield* BucketQueue.make();
39
44
  const syncStateSref = yield* SubscriptionRef.make(undefined);
40
- const isLocalEvent = (mutationEventEncoded) => {
45
+ const isClientEvent = (mutationEventEncoded) => {
41
46
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
42
47
  return mutationDef.options.clientOnly;
43
48
  };
49
+ const connectedClientSessionPullQueues = yield* makePullQueueSet;
44
50
  /**
45
51
  * Tracks generations of queued local push events.
46
- * If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
52
+ * If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
47
53
  * even if they would be valid on their own.
48
54
  */
49
55
  const currentLocalPushGenerationRef = { current: 0 };
56
+ const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) };
57
+ const mergePayloads = new Map();
50
58
  // This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
51
59
  const ctxRef = {
52
60
  current: undefined,
@@ -54,21 +62,11 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
54
62
  const localPushesQueue = yield* BucketQueue.make();
55
63
  const localPushesLatch = yield* Effect.makeLatch(true);
56
64
  const pullLatch = yield* Effect.makeLatch(true);
65
+ // NOTE: New events are only pushed to sync backend after successful local push processing
57
66
  const push = (newEvents, options) => Effect.gen(function* () {
58
67
  // TODO validate batch
59
68
  if (newEvents.length === 0)
60
69
  return;
61
- // if (options.generation < currentLocalPushGenerationRef.current) {
62
- // debugger
63
- // // We can safely drop this batch as it's from a previous push generation
64
- // return
65
- // }
66
- if (clientId === 'client-b') {
67
- // console.log(
68
- // 'push from client session',
69
- // newEvents.map((item) => item.toJSON()),
70
- // )
71
- }
72
70
  const waitForProcessing = options?.waitForProcessing ?? false;
73
71
  const generation = currentLocalPushGenerationRef.current;
74
72
  if (waitForProcessing) {
@@ -81,20 +79,21 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
81
79
  const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined, generation]);
82
80
  yield* BucketQueue.offerAll(localPushesQueue, items);
83
81
  }
84
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
82
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:local-push', {
85
83
  attributes: {
86
84
  batchSize: newEvents.length,
87
85
  batch: TRACE_VERBOSE ? newEvents : undefined,
88
86
  },
89
87
  links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
90
88
  }));
91
- const pushPartial = ({ mutationEvent: partialMutationEvent, clientId, sessionId, }) => Effect.gen(function* () {
89
+ const pushPartial = ({ mutationEvent: { mutation, args }, clientId, sessionId, }) => Effect.gen(function* () {
92
90
  const syncState = yield* syncStateSref;
93
91
  if (syncState === undefined)
94
92
  return shouldNeverHappen('Not initialized');
95
- const mutationDef = getMutationDef(schema, partialMutationEvent.mutation);
93
+ const mutationDef = getMutationDef(schema, mutation);
96
94
  const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
97
- ...partialMutationEvent,
95
+ mutation,
96
+ args,
98
97
  clientId,
99
98
  sessionId,
100
99
  ...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
@@ -102,28 +101,29 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
102
101
  yield* push([mutationEventEncoded]);
103
102
  }).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie));
104
103
  // Starts various background loops
105
- const boot = ({ dbReady }) => Effect.gen(function* () {
104
+ const boot = Effect.gen(function* () {
106
105
  const span = yield* Effect.currentSpan.pipe(Effect.orDie);
107
106
  const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)));
108
107
  const { devtools, shutdownChannel } = yield* LeaderThreadCtx;
108
+ const runtime = yield* Effect.runtime();
109
109
  ctxRef.current = {
110
110
  otelSpan,
111
111
  span,
112
112
  devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
113
+ runtime,
113
114
  };
114
- const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog);
115
- const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog);
115
+ const initialBackendHead = dbMutationLogMissing
116
+ ? EventId.ROOT.global
117
+ : Mutationlog.getBackendHeadFromDb(dbMutationLog);
118
+ const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog);
116
119
  if (initialBackendHead > initialLocalHead.global) {
117
120
  return shouldNeverHappen(`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`);
118
121
  }
119
- const pendingMutationEvents = yield* getMutationEventsSince({
120
- global: initialBackendHead,
121
- client: EventId.clientDefault,
122
- }).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))));
122
+ const pendingMutationEvents = dbMutationLogMissing
123
+ ? []
124
+ : yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault });
123
125
  const initialSyncState = new SyncState.SyncState({
124
126
  pending: pendingMutationEvents,
125
- // On the leader we don't need a rollback tail beyond `pending` items
126
- rollbackTail: [],
127
127
  upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
128
128
  localHead: initialLocalHead,
129
129
  });
@@ -131,50 +131,54 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
131
131
  yield* SubscriptionRef.set(syncStateSref, initialSyncState);
132
132
  // Rehydrate sync queue
133
133
  if (pendingMutationEvents.length > 0) {
134
- const filteredBatch = pendingMutationEvents
134
+ const globalPendingMutationEvents = pendingMutationEvents
135
135
  // Don't sync clientOnly mutations
136
136
  .filter((mutationEventEncoded) => {
137
137
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
138
138
  return mutationDef.options.clientOnly === false;
139
139
  });
140
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
140
+ if (globalPendingMutationEvents.length > 0) {
141
+ yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents);
142
+ }
141
143
  }
142
144
  const shutdownOnError = (cause) => Effect.gen(function* () {
143
- yield* shutdownChannel.send(UnexpectedError.make({ cause }));
144
- yield* Effect.die(cause);
145
+ if (onError === 'shutdown') {
146
+ yield* shutdownChannel.send(UnexpectedError.make({ cause }));
147
+ yield* Effect.die(cause);
148
+ }
145
149
  });
146
150
  yield* backgroundApplyLocalPushes({
147
151
  localPushesLatch,
148
152
  localPushesQueue,
149
153
  pullLatch,
150
154
  syncStateSref,
151
- syncBackendQueue,
155
+ syncBackendPushQueue,
152
156
  schema,
153
- isLocalEvent,
157
+ isClientEvent,
154
158
  otelSpan,
155
159
  currentLocalPushGenerationRef,
160
+ connectedClientSessionPullQueues,
161
+ mergeCounterRef,
162
+ mergePayloads,
156
163
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
157
164
  const backendPushingFiberHandle = yield* FiberHandle.make();
158
165
  yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
159
- dbReady,
160
- syncBackendQueue,
166
+ syncBackendPushQueue,
161
167
  otelSpan,
162
168
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
163
169
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
164
170
  yield* backgroundBackendPulling({
165
- dbReady,
166
171
  initialBackendHead,
167
- isLocalEvent,
172
+ isClientEvent,
168
173
  restartBackendPushing: (filteredRebasedPending) => Effect.gen(function* () {
169
174
  // Stop current pushing fiber
170
175
  yield* FiberHandle.clear(backendPushingFiberHandle);
171
- // Reset the sync queue
172
- yield* BucketQueue.clear(syncBackendQueue);
173
- yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending);
176
+ // Reset the sync backend push queue
177
+ yield* BucketQueue.clear(syncBackendPushQueue);
178
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending);
174
179
  // Restart pushing fiber
175
180
  yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
176
- dbReady,
177
- syncBackendQueue,
181
+ syncBackendPushQueue,
178
182
  otelSpan,
179
183
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
180
184
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
@@ -185,30 +189,53 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
185
189
  otelSpan,
186
190
  initialBlockingSyncContext,
187
191
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
192
+ connectedClientSessionPullQueues,
193
+ mergeCounterRef,
194
+ mergePayloads,
188
195
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
189
196
  return { initialLeaderHead: initialLocalHead };
190
- }).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'));
197
+ }).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'));
198
+ const pull = ({ cursor }) => {
199
+ return Effect.gen(function* () {
200
+ const queue = yield* pullQueue({ cursor });
201
+ return Stream.fromQueue(queue);
202
+ }).pipe(Stream.unwrapScoped);
203
+ };
204
+ const pullQueue = ({ cursor }) => {
205
+ const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized');
206
+ return Effect.gen(function* () {
207
+ const queue = yield* connectedClientSessionPullQueues.makeQueue(cursor);
208
+ const payloadsSinceCursor = Array.from(mergePayloads.entries())
209
+ .map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
210
+ .filter(({ mergeCounter }) => mergeCounter > cursor)
211
+ .toSorted((a, b) => a.mergeCounter - b.mergeCounter);
212
+ yield* queue.offerAll(payloadsSinceCursor);
213
+ return queue;
214
+ }).pipe(Effect.provide(runtime));
215
+ };
216
+ const syncState = Subscribable.make({
217
+ get: Effect.gen(function* () {
218
+ const syncState = yield* syncStateSref;
219
+ if (syncState === undefined)
220
+ return shouldNeverHappen('Not initialized');
221
+ return syncState;
222
+ }),
223
+ changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
224
+ });
191
225
  return {
226
+ pull,
227
+ pullQueue,
192
228
  push,
193
229
  pushPartial,
194
230
  boot,
195
- syncState: Subscribable.make({
196
- get: Effect.gen(function* () {
197
- const syncState = yield* syncStateSref;
198
- if (syncState === undefined)
199
- return shouldNeverHappen('Not initialized');
200
- return syncState;
201
- }),
202
- changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
203
- }),
231
+ syncState,
232
+ getMergeCounter: () => mergeCounterRef.current,
204
233
  };
205
234
  });
206
- const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendQueue, schema, isLocalEvent, otelSpan, currentLocalPushGenerationRef, }) => Effect.gen(function* () {
207
- const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx;
208
- const applyMutationItems = yield* makeApplyMutationItems;
235
+ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendPushQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
209
236
  while (true) {
210
237
  // TODO make batch size configurable
211
- const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10);
238
+ const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, LOCAL_PUSH_BATCH_SIZE);
212
239
  // Wait for the backend pulling to finish
213
240
  yield* localPushesLatch.await;
214
241
  // Prevent backend pull processing until this local push is finished
@@ -228,32 +255,30 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
228
255
  const syncState = yield* syncStateSref;
229
256
  if (syncState === undefined)
230
257
  return shouldNeverHappen('Not initialized');
231
- const updateResult = SyncState.updateSyncState({
258
+ const mergeResult = SyncState.merge({
232
259
  syncState,
233
260
  payload: { _tag: 'local-push', newEvents },
234
- isLocalEvent,
261
+ isClientEvent,
235
262
  isEqualEvent: MutationEvent.isEqualEncoded,
236
263
  });
237
- switch (updateResult._tag) {
264
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
265
+ switch (mergeResult._tag) {
238
266
  case 'unexpected-error': {
239
- otelSpan?.addEvent('local-push:unexpected-error', {
267
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:unexpected-error`, {
240
268
  batchSize: newEvents.length,
241
269
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
242
270
  });
243
- return yield* Effect.fail(updateResult.cause);
271
+ return yield* Effect.fail(mergeResult.cause);
244
272
  }
245
273
  case 'rebase': {
246
274
  return shouldNeverHappen('The leader thread should never have to rebase due to a local push');
247
275
  }
248
276
  case 'reject': {
249
- otelSpan?.addEvent('local-push:reject', {
277
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:reject`, {
250
278
  batchSize: newEvents.length,
251
- updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
279
+ mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
252
280
  });
253
- /*
254
-
255
- TODO: how to test this?
256
- */
281
+ // TODO: how to test this?
257
282
  currentLocalPushGenerationRef.current++;
258
283
  const nextGeneration = currentLocalPushGenerationRef.current;
259
284
  const providedId = newEvents.at(0).id;
@@ -261,7 +286,8 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
261
286
  // We're also handling the case where the localPushQueue already contains events
262
287
  // from the next generation which we preserve in the queue
263
288
  const remainingEventsMatchingGeneration = yield* BucketQueue.takeSplitWhere(localPushesQueue, (item) => item[2] >= nextGeneration);
264
- if ((yield* BucketQueue.size(localPushesQueue)) > 0) {
289
+ // TODO we still need to better understand and handle this scenario
290
+ if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
265
291
  console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue));
266
292
  debugger;
267
293
  }
@@ -270,7 +296,7 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
270
296
  ...remainingEventsMatchingGeneration.map(([_, deferred]) => deferred),
271
297
  ].filter(isNotUndefined);
272
298
  yield* Effect.forEach(allDeferredsToReject, (deferred) => Deferred.fail(deferred, LeaderAheadError.make({
273
- minimumExpectedId: updateResult.expectedMinimumId,
299
+ minimumExpectedId: mergeResult.expectedMinimumId,
274
300
  providedId,
275
301
  // nextGeneration,
276
302
  })));
@@ -284,66 +310,59 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
284
310
  break;
285
311
  }
286
312
  default: {
287
- casesHandled(updateResult);
313
+ casesHandled(mergeResult);
288
314
  }
289
315
  }
290
- yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState);
291
- if (clientId === 'client-b') {
292
- // yield* Effect.log('offer upstream-advance due to local-push')
293
- // debugger
294
- }
316
+ yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
295
317
  yield* connectedClientSessionPullQueues.offer({
296
- payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
297
- remaining: 0,
318
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
319
+ mergeCounter,
298
320
  });
299
- otelSpan?.addEvent('local-push', {
321
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
322
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:advance`, {
300
323
  batchSize: newEvents.length,
301
- updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
324
+ mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
302
325
  });
303
326
  // Don't sync clientOnly mutations
304
- const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
327
+ const filteredBatch = mergeResult.newEvents.filter((mutationEventEncoded) => {
305
328
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
306
329
  return mutationDef.options.clientOnly === false;
307
330
  });
308
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
309
- yield* applyMutationItems({ batchItems: newEvents, deferreds });
331
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch);
332
+ yield* applyMutationsBatch({ batchItems: newEvents, deferreds });
310
333
  // Allow the backend pulling to start
311
334
  yield* pullLatch.open;
312
335
  }
313
336
  });
314
337
  // TODO how to handle errors gracefully
315
- const makeApplyMutationItems = Effect.gen(function* () {
316
- const leaderThreadCtx = yield* LeaderThreadCtx;
317
- const { dbReadModel: db, dbMutationLog } = leaderThreadCtx;
318
- const applyMutation = yield* makeApplyMutation;
319
- return ({ batchItems, deferreds }) => Effect.gen(function* () {
320
- db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
321
- dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
322
- yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
323
- if (Exit.isSuccess(exit))
324
- return;
325
- // Rollback in case of an error
326
- db.execute('ROLLBACK', undefined);
327
- dbMutationLog.execute('ROLLBACK', undefined);
328
- }));
329
- for (let i = 0; i < batchItems.length; i++) {
330
- yield* applyMutation(batchItems[i]);
331
- if (deferreds?.[i] !== undefined) {
332
- yield* Deferred.succeed(deferreds[i], void 0);
333
- }
338
+ const applyMutationsBatch = ({ batchItems, deferreds }) => Effect.gen(function* () {
339
+ const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx;
340
+ // NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
341
+ db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
342
+ dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
343
+ yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
344
+ if (Exit.isSuccess(exit))
345
+ return;
346
+ // Rollback in case of an error
347
+ db.execute('ROLLBACK', undefined);
348
+ dbMutationLog.execute('ROLLBACK', undefined);
349
+ }));
350
+ for (let i = 0; i < batchItems.length; i++) {
351
+ const { sessionChangeset } = yield* applyMutation(batchItems[i]);
352
+ batchItems[i].meta.sessionChangeset = sessionChangeset;
353
+ if (deferreds?.[i] !== undefined) {
354
+ yield* Deferred.succeed(deferreds[i], void 0);
334
355
  }
335
- db.execute('COMMIT', undefined); // Commit the transaction
336
- dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
337
- }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
338
- attributes: { count: batchItems.length },
339
- }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
340
- });
341
- const backgroundBackendPulling = ({ dbReady, initialBackendHead, isLocalEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, }) => Effect.gen(function* () {
342
- const { syncBackend, dbReadModel: db, dbMutationLog, connectedClientSessionPullQueues, schema, clientId, } = yield* LeaderThreadCtx;
356
+ }
357
+ db.execute('COMMIT', undefined); // Commit the transaction
358
+ dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
359
+ }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
360
+ attributes: { batchSize: batchItems.length },
361
+ }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
362
+ const backgroundBackendPulling = ({ initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
363
+ const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx;
343
364
  if (syncBackend === undefined)
344
365
  return;
345
- const cursorInfo = yield* getCursorInfo(initialBackendHead);
346
- const applyMutationItems = yield* makeApplyMutationItems;
347
366
  const onNewPullChunk = (newEvents, remaining) => Effect.gen(function* () {
348
367
  if (newEvents.length === 0)
349
368
  return;
@@ -357,72 +376,80 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isLocalEvent, r
357
376
  const syncState = yield* syncStateSref;
358
377
  if (syncState === undefined)
359
378
  return shouldNeverHappen('Not initialized');
360
- const trimRollbackUntil = newEvents.at(-1).id;
361
- const updateResult = SyncState.updateSyncState({
379
+ const mergeResult = SyncState.merge({
362
380
  syncState,
363
- payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
364
- isLocalEvent,
381
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
382
+ isClientEvent,
365
383
  isEqualEvent: MutationEvent.isEqualEncoded,
366
- ignoreLocalEvents: true,
384
+ ignoreClientEvents: true,
367
385
  });
368
- if (updateResult._tag === 'reject') {
386
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
387
+ if (mergeResult._tag === 'reject') {
369
388
  return shouldNeverHappen('The leader thread should never reject upstream advances');
370
389
  }
371
- else if (updateResult._tag === 'unexpected-error') {
372
- otelSpan?.addEvent('backend-pull:unexpected-error', {
390
+ else if (mergeResult._tag === 'unexpected-error') {
391
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:unexpected-error`, {
373
392
  newEventsCount: newEvents.length,
374
393
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
375
394
  });
376
- return yield* Effect.fail(updateResult.cause);
395
+ return yield* Effect.fail(mergeResult.cause);
377
396
  }
378
397
  const newBackendHead = newEvents.at(-1).id;
379
- updateBackendHead(dbMutationLog, newBackendHead);
380
- if (updateResult._tag === 'rebase') {
381
- otelSpan?.addEvent('backend-pull:rebase', {
398
+ Mutationlog.updateBackendHead(dbMutationLog, newBackendHead);
399
+ if (mergeResult._tag === 'rebase') {
400
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:rebase`, {
382
401
  newEventsCount: newEvents.length,
383
402
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
384
- rollbackCount: updateResult.eventsToRollback.length,
385
- updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
403
+ rollbackCount: mergeResult.rollbackEvents.length,
404
+ mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
386
405
  });
387
- const filteredRebasedPending = updateResult.newSyncState.pending.filter((mutationEvent) => {
406
+ const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
388
407
  const mutationDef = getMutationDef(schema, mutationEvent.mutation);
389
408
  return mutationDef.options.clientOnly === false;
390
409
  });
391
- yield* restartBackendPushing(filteredRebasedPending);
392
- if (updateResult.eventsToRollback.length > 0) {
393
- yield* rollback({ db, dbMutationLog, eventIdsToRollback: updateResult.eventsToRollback.map((_) => _.id) });
410
+ yield* restartBackendPushing(globalRebasedPendingEvents);
411
+ if (mergeResult.rollbackEvents.length > 0) {
412
+ yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) });
394
413
  }
395
414
  yield* connectedClientSessionPullQueues.offer({
396
- payload: {
397
- _tag: 'upstream-rebase',
398
- newEvents: updateResult.newEvents,
399
- rollbackUntil: updateResult.eventsToRollback.at(0).id,
400
- trimRollbackUntil,
401
- },
402
- remaining,
415
+ payload: SyncState.PayloadUpstreamRebase.make({
416
+ newEvents: mergeResult.newEvents,
417
+ rollbackEvents: mergeResult.rollbackEvents,
418
+ }),
419
+ mergeCounter,
403
420
  });
421
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamRebase.make({
422
+ newEvents: mergeResult.newEvents,
423
+ rollbackEvents: mergeResult.rollbackEvents,
424
+ }));
404
425
  }
405
426
  else {
406
- otelSpan?.addEvent('backend-pull:advance', {
427
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:advance`, {
407
428
  newEventsCount: newEvents.length,
408
- updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
429
+ mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
409
430
  });
410
- if (clientId === 'client-b') {
411
- // yield* Effect.log('offer upstream-advance due to pull')
412
- }
413
431
  yield* connectedClientSessionPullQueues.offer({
414
- payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents, trimRollbackUntil },
415
- remaining,
432
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
433
+ mergeCounter,
416
434
  });
435
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
436
+ if (mergeResult.confirmedEvents.length > 0) {
437
+ // `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
438
+ // `newEvents` instead which we filter via `mergeResult.confirmedEvents`
439
+ const confirmedNewEvents = newEvents.filter((mutationEvent) => mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(mutationEvent.id, confirmedEvent.id)));
440
+ yield* Mutationlog.updateSyncMetadata(confirmedNewEvents);
441
+ }
417
442
  }
443
+ // Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
418
444
  trimChangesetRows(db, newBackendHead);
419
- yield* applyMutationItems({ batchItems: updateResult.newEvents, deferreds: undefined });
420
- yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState);
445
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined });
446
+ yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
447
+ // Allow local pushes to be processed again
421
448
  if (remaining === 0) {
422
- // Allow local pushes to be processed again
423
449
  yield* localPushesLatch.open;
424
450
  }
425
451
  });
452
+ const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead);
426
453
  yield* syncBackend.pull(cursorInfo).pipe(
427
454
  // TODO only take from queue while connected
428
455
  Stream.tap(({ batch, remaining }) => Effect.gen(function* () {
@@ -432,64 +459,22 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isLocalEvent, r
432
459
  // batch: TRACE_VERBOSE ? batch : undefined,
433
460
  // },
434
461
  // })
435
- // Wait for the db to be initially created
436
- yield* dbReady;
437
462
  // NOTE we only want to take process mutations when the sync backend is connected
438
463
  // (e.g. needed for simulating being offline)
439
464
  // TODO remove when there's a better way to handle this in stream above
440
465
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
441
- yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)), remaining);
466
+ yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)), remaining);
442
467
  yield* initialBlockingSyncContext.update({ processed: batch.length, remaining });
443
468
  })), Stream.runDrain, Effect.interruptible);
444
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pulling'));
445
- const rollback = ({ db, dbMutationLog, eventIdsToRollback, }) => Effect.gen(function* () {
446
- const rollbackEvents = db
447
- .select(sql `SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`)
448
- .map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
449
- .sort((a, b) => EventId.compare(a.id, b.id));
450
- // TODO bring back `.toSorted` once Expo supports it
451
- // .toSorted((a, b) => EventId.compare(a.id, b.id))
452
- // Apply changesets in reverse order
453
- for (let i = rollbackEvents.length - 1; i >= 0; i--) {
454
- const { changeset } = rollbackEvents[i];
455
- if (changeset !== null) {
456
- db.makeChangeset(changeset).invert().apply();
457
- }
458
- }
459
- const eventIdPairChunks = ReadonlyArray.chunksOf(100)(eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`));
460
- // Delete the changeset rows
461
- for (const eventIdPairChunk of eventIdPairChunks) {
462
- db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
463
- }
464
- // Delete the mutation log rows
465
- for (const eventIdPairChunk of eventIdPairChunks) {
466
- dbMutationLog.execute(sql `DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
467
- }
468
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
469
- attributes: { count: eventIdsToRollback.length },
470
- }));
471
- const getCursorInfo = (remoteHead) => Effect.gen(function* () {
472
- const { dbMutationLog } = yield* LeaderThreadCtx;
473
- if (remoteHead === EventId.ROOT.global)
474
- return Option.none();
475
- const MutationlogQuerySchema = Schema.Struct({
476
- syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
477
- }).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head);
478
- const syncMetadataOption = yield* Effect.sync(() => dbMutationLog.select(sql `SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`)).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie);
479
- return Option.some({
480
- cursor: { global: remoteHead, client: EventId.clientDefault },
481
- metadata: syncMetadataOption,
482
- });
483
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }));
484
- const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
485
- const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx;
469
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'));
470
+ const backgroundBackendPushing = ({ syncBackendPushQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
471
+ const { syncBackend } = yield* LeaderThreadCtx;
486
472
  if (syncBackend === undefined)
487
473
  return;
488
- yield* dbReady;
489
474
  while (true) {
490
475
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
491
476
  // TODO make batch size configurable
492
- const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE);
477
+ const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, BACKEND_PUSH_BATCH_SIZE);
493
478
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
494
479
  if (devtoolsLatch !== undefined) {
495
480
  yield* devtoolsLatch.await;
@@ -508,22 +493,49 @@ const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtool
508
493
  // wait for interrupt caused by background pulling which will then restart pushing
509
494
  return yield* Effect.never;
510
495
  }
511
- const { metadata } = pushResult.right;
512
- // TODO try to do this in a single query
513
- for (let i = 0; i < queueItems.length; i++) {
514
- const mutationEventEncoded = queueItems[i];
515
- yield* execSql(dbMutationLog, ...updateRows({
516
- tableName: MUTATION_LOG_META_TABLE,
517
- columns: mutationLogMetaTable.sqliteDef.columns,
518
- where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
519
- updateValues: { syncMetadataJson: metadata[i] },
520
- }));
521
- }
522
496
  }
523
- }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'));
497
+ }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'));
524
498
  const trimChangesetRows = (db, newHead) => {
525
499
  // Since we're using the session changeset rows to query for the current head,
526
500
  // we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
527
501
  db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`);
528
502
  };
503
+ const makePullQueueSet = Effect.gen(function* () {
504
+ const set = new Set();
505
+ yield* Effect.addFinalizer(() => Effect.gen(function* () {
506
+ for (const queue of set) {
507
+ yield* Queue.shutdown(queue);
508
+ }
509
+ set.clear();
510
+ }));
511
+ const makeQueue = () => Effect.gen(function* () {
512
+ const queue = yield* Queue.unbounded().pipe(Effect.acquireRelease(Queue.shutdown));
513
+ yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)));
514
+ set.add(queue);
515
+ return queue;
516
+ });
517
+ const offer = (item) => Effect.gen(function* () {
518
+ // Short-circuit if the payload is an empty upstream advance
519
+ if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
520
+ return;
521
+ }
522
+ for (const queue of set) {
523
+ yield* Queue.offer(queue, item);
524
+ }
525
+ });
526
+ return {
527
+ makeQueue,
528
+ offer,
529
+ };
530
+ });
531
+ const incrementMergeCounter = (mergeCounterRef) => Effect.gen(function* () {
532
+ const { dbReadModel } = yield* LeaderThreadCtx;
533
+ mergeCounterRef.current++;
534
+ dbReadModel.execute(sql `INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`);
535
+ return mergeCounterRef.current;
536
+ });
537
+ const getMergeCounterFromDb = (dbReadModel) => Effect.gen(function* () {
538
+ const result = dbReadModel.select(sql `SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`);
539
+ return result[0]?.mergeCounter ?? 0;
540
+ });
529
541
  //# sourceMappingURL=LeaderSyncProcessor.js.map