@livestore/common 0.0.0-snapshot-2b8a9de3ec1a701aca891ebc2c98eb328274ae9e → 0.0.0-snapshot-2ef046b02334f52613d31dbe06af53487685edc0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/adapter-types.d.ts +4 -11
  3. package/dist/adapter-types.d.ts.map +1 -1
  4. package/dist/adapter-types.js +0 -6
  5. package/dist/adapter-types.js.map +1 -1
  6. package/dist/devtools/devtools-messages-common.d.ts +7 -0
  7. package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
  8. package/dist/devtools/devtools-messages-common.js +6 -0
  9. package/dist/devtools/devtools-messages-common.js.map +1 -1
  10. package/dist/devtools/devtools-messages-leader.d.ts +1 -1
  11. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  12. package/dist/devtools/devtools-messages-leader.js +1 -2
  13. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  14. package/dist/leader-thread/LeaderSyncProcessor.d.ts +15 -6
  15. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  16. package/dist/leader-thread/LeaderSyncProcessor.js +199 -189
  17. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  18. package/dist/leader-thread/apply-mutation.d.ts +14 -9
  19. package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
  20. package/dist/leader-thread/apply-mutation.js +43 -36
  21. package/dist/leader-thread/apply-mutation.js.map +1 -1
  22. package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
  23. package/dist/leader-thread/leader-worker-devtools.js +2 -5
  24. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  25. package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
  26. package/dist/leader-thread/make-leader-thread-layer.js +21 -33
  27. package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
  28. package/dist/leader-thread/mod.d.ts +1 -1
  29. package/dist/leader-thread/mod.d.ts.map +1 -1
  30. package/dist/leader-thread/mod.js +1 -1
  31. package/dist/leader-thread/mod.js.map +1 -1
  32. package/dist/leader-thread/mutationlog.d.ts +19 -3
  33. package/dist/leader-thread/mutationlog.d.ts.map +1 -1
  34. package/dist/leader-thread/mutationlog.js +105 -12
  35. package/dist/leader-thread/mutationlog.js.map +1 -1
  36. package/dist/leader-thread/recreate-db.d.ts.map +1 -1
  37. package/dist/leader-thread/recreate-db.js +4 -3
  38. package/dist/leader-thread/recreate-db.js.map +1 -1
  39. package/dist/leader-thread/types.d.ts +35 -19
  40. package/dist/leader-thread/types.d.ts.map +1 -1
  41. package/dist/leader-thread/types.js.map +1 -1
  42. package/dist/rehydrate-from-mutationlog.d.ts +5 -4
  43. package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
  44. package/dist/rehydrate-from-mutationlog.js +7 -9
  45. package/dist/rehydrate-from-mutationlog.js.map +1 -1
  46. package/dist/schema/EventId.d.ts +4 -0
  47. package/dist/schema/EventId.d.ts.map +1 -1
  48. package/dist/schema/EventId.js +7 -1
  49. package/dist/schema/EventId.js.map +1 -1
  50. package/dist/schema/MutationEvent.d.ts +78 -25
  51. package/dist/schema/MutationEvent.d.ts.map +1 -1
  52. package/dist/schema/MutationEvent.js +25 -12
  53. package/dist/schema/MutationEvent.js.map +1 -1
  54. package/dist/schema/schema.js +1 -1
  55. package/dist/schema/schema.js.map +1 -1
  56. package/dist/schema/system-tables.d.ts +67 -0
  57. package/dist/schema/system-tables.d.ts.map +1 -1
  58. package/dist/schema/system-tables.js +12 -1
  59. package/dist/schema/system-tables.js.map +1 -1
  60. package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
  61. package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
  62. package/dist/sync/ClientSessionSyncProcessor.js +23 -19
  63. package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
  64. package/dist/sync/sync.d.ts +6 -5
  65. package/dist/sync/sync.d.ts.map +1 -1
  66. package/dist/sync/sync.js.map +1 -1
  67. package/dist/sync/syncstate.d.ts +47 -71
  68. package/dist/sync/syncstate.d.ts.map +1 -1
  69. package/dist/sync/syncstate.js +76 -112
  70. package/dist/sync/syncstate.js.map +1 -1
  71. package/dist/sync/syncstate.test.js +67 -139
  72. package/dist/sync/syncstate.test.js.map +1 -1
  73. package/package.json +2 -2
  74. package/src/adapter-types.ts +3 -12
  75. package/src/devtools/devtools-messages-common.ts +9 -0
  76. package/src/devtools/devtools-messages-leader.ts +1 -2
  77. package/src/leader-thread/LeaderSyncProcessor.ts +372 -348
  78. package/src/leader-thread/apply-mutation.ts +81 -71
  79. package/src/leader-thread/leader-worker-devtools.ts +3 -8
  80. package/src/leader-thread/make-leader-thread-layer.ts +26 -41
  81. package/src/leader-thread/mod.ts +1 -1
  82. package/src/leader-thread/mutationlog.ts +166 -13
  83. package/src/leader-thread/recreate-db.ts +4 -3
  84. package/src/leader-thread/types.ts +34 -23
  85. package/src/rehydrate-from-mutationlog.ts +12 -12
  86. package/src/schema/EventId.ts +8 -1
  87. package/src/schema/MutationEvent.ts +32 -16
  88. package/src/schema/schema.ts +1 -1
  89. package/src/schema/system-tables.ts +20 -1
  90. package/src/sync/ClientSessionSyncProcessor.ts +33 -25
  91. package/src/sync/sync.ts +6 -9
  92. package/src/sync/syncstate.test.ts +130 -208
  93. package/src/sync/syncstate.ts +76 -123
  94. package/dist/leader-thread/pull-queue-set.d.ts +0 -7
  95. package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
  96. package/dist/leader-thread/pull-queue-set.js +0 -48
  97. package/dist/leader-thread/pull-queue-set.js.map +0 -1
  98. package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,16 +1,15 @@
1
1
  import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils';
2
- import { BucketQueue, Deferred, Effect, Exit, FiberHandle, Option, OtelTracer, ReadonlyArray, Schema, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
2
+ import { BucketQueue, Deferred, Effect, Exit, FiberHandle, OtelTracer, Queue, ReadonlyArray, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
3
3
  import { UnexpectedError } from '../adapter-types.js';
4
- import { EventId, getMutationDef, MUTATION_LOG_META_TABLE, MutationEvent, mutationLogMetaTable, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
5
- import { updateRows } from '../sql-queries/index.js';
4
+ import { EventId, getMutationDef, LEADER_MERGE_COUNTER_TABLE, MutationEvent, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
6
5
  import { LeaderAheadError } from '../sync/sync.js';
7
6
  import * as SyncState from '../sync/syncstate.js';
8
7
  import { sql } from '../util.js';
9
- import { makeApplyMutation } from './apply-mutation.js';
10
- import { execSql } from './connection.js';
11
- import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js';
8
+ import { rollback } from './apply-mutation.js';
9
+ import * as Mutationlog from './mutationlog.js';
12
10
  import { LeaderThreadCtx } from './types.js';
13
11
  export const BACKEND_PUSH_BATCH_SIZE = 50;
12
+ export const LOCAL_PUSH_BATCH_SIZE = 10;
14
13
  /**
15
14
  * The LeaderSyncProcessor manages synchronization of mutations between
16
15
  * the local state and the sync backend, ensuring efficient and orderly processing.
@@ -18,35 +17,44 @@ export const BACKEND_PUSH_BATCH_SIZE = 50;
18
17
  * In the LeaderSyncProcessor, pulling always has precedence over pushing.
19
18
  *
20
19
  * Responsibilities:
21
- * - Queueing incoming local mutations in a localPushMailbox.
20
+ * - Queueing incoming local mutations in a localPushesQueue.
22
21
  * - Broadcasting mutations to client sessions via pull queues.
23
22
  * - Pushing mutations to the sync backend.
24
23
  *
25
24
  * Notes:
26
25
  *
27
26
  * local push processing:
28
- * - localPushMailbox:
27
+ * - localPushesQueue:
29
28
  * - Maintains events in ascending order.
30
29
  * - Uses `Deferred` objects to resolve/reject events based on application success.
31
- * - Processes events from the mailbox, applying mutations in batches.
30
+ * - Processes events from the queue, applying mutations in batches.
32
31
  * - Controlled by a `Latch` to manage execution flow.
33
32
  * - The latch closes on pull receipt and re-opens post-pull completion.
34
33
  * - Processes up to `maxBatchSize` events per cycle.
35
34
  *
35
+ * Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
36
+ *
37
+ * Tricky concurrency scenarios:
38
+ * - Queued local push batches becoming invalid due to a prior local push item being rejected.
39
+ * Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
40
+ *
36
41
  */
37
- export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clientId, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
38
- const syncBackendQueue = yield* BucketQueue.make();
42
+ export const makeLeaderSyncProcessor = ({ schema, dbMutationLogMissing, dbMutationLog, dbReadModel, dbReadModelMissing, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
43
+ const syncBackendPushQueue = yield* BucketQueue.make();
39
44
  const syncStateSref = yield* SubscriptionRef.make(undefined);
40
45
  const isClientEvent = (mutationEventEncoded) => {
41
46
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
42
47
  return mutationDef.options.clientOnly;
43
48
  };
49
+ const connectedClientSessionPullQueues = yield* makePullQueueSet;
44
50
  /**
45
51
  * Tracks generations of queued local push events.
46
- * If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
52
+ * If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
47
53
  * even if they would be valid on their own.
48
54
  */
49
55
  const currentLocalPushGenerationRef = { current: 0 };
56
+ const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) };
57
+ const mergePayloads = new Map();
50
58
  // This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
51
59
  const ctxRef = {
52
60
  current: undefined,
@@ -54,21 +62,11 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
54
62
  const localPushesQueue = yield* BucketQueue.make();
55
63
  const localPushesLatch = yield* Effect.makeLatch(true);
56
64
  const pullLatch = yield* Effect.makeLatch(true);
65
+ // NOTE: New events are only pushed to sync backend after successful local push processing
57
66
  const push = (newEvents, options) => Effect.gen(function* () {
58
67
  // TODO validate batch
59
68
  if (newEvents.length === 0)
60
69
  return;
61
- // if (options.generation < currentLocalPushGenerationRef.current) {
62
- // debugger
63
- // // We can safely drop this batch as it's from a previous push generation
64
- // return
65
- // }
66
- if (clientId === 'client-b') {
67
- // console.log(
68
- // 'push from client session',
69
- // newEvents.map((item) => item.toJSON()),
70
- // )
71
- }
72
70
  const waitForProcessing = options?.waitForProcessing ?? false;
73
71
  const generation = currentLocalPushGenerationRef.current;
74
72
  if (waitForProcessing) {
@@ -81,20 +79,21 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
81
79
  const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined, generation]);
82
80
  yield* BucketQueue.offerAll(localPushesQueue, items);
83
81
  }
84
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
82
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:local-push', {
85
83
  attributes: {
86
84
  batchSize: newEvents.length,
87
85
  batch: TRACE_VERBOSE ? newEvents : undefined,
88
86
  },
89
87
  links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
90
88
  }));
91
- const pushPartial = ({ mutationEvent: partialMutationEvent, clientId, sessionId, }) => Effect.gen(function* () {
89
+ const pushPartial = ({ mutationEvent: { mutation, args }, clientId, sessionId, }) => Effect.gen(function* () {
92
90
  const syncState = yield* syncStateSref;
93
91
  if (syncState === undefined)
94
92
  return shouldNeverHappen('Not initialized');
95
- const mutationDef = getMutationDef(schema, partialMutationEvent.mutation);
93
+ const mutationDef = getMutationDef(schema, mutation);
96
94
  const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
97
- ...partialMutationEvent,
95
+ mutation,
96
+ args,
98
97
  clientId,
99
98
  sessionId,
100
99
  ...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
@@ -102,28 +101,29 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
102
101
  yield* push([mutationEventEncoded]);
103
102
  }).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie));
104
103
  // Starts various background loops
105
- const boot = ({ dbReady }) => Effect.gen(function* () {
104
+ const boot = Effect.gen(function* () {
106
105
  const span = yield* Effect.currentSpan.pipe(Effect.orDie);
107
106
  const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)));
108
107
  const { devtools, shutdownChannel } = yield* LeaderThreadCtx;
108
+ const runtime = yield* Effect.runtime();
109
109
  ctxRef.current = {
110
110
  otelSpan,
111
111
  span,
112
112
  devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
113
+ runtime,
113
114
  };
114
- const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog);
115
- const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog);
115
+ const initialBackendHead = dbMutationLogMissing
116
+ ? EventId.ROOT.global
117
+ : Mutationlog.getBackendHeadFromDb(dbMutationLog);
118
+ const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog);
116
119
  if (initialBackendHead > initialLocalHead.global) {
117
120
  return shouldNeverHappen(`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`);
118
121
  }
119
- const pendingMutationEvents = yield* getMutationEventsSince({
120
- global: initialBackendHead,
121
- client: EventId.clientDefault,
122
- }).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))));
122
+ const pendingMutationEvents = dbMutationLogMissing
123
+ ? []
124
+ : yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault });
123
125
  const initialSyncState = new SyncState.SyncState({
124
126
  pending: pendingMutationEvents,
125
- // On the leader we don't need a rollback tail beyond `pending` items
126
- rollbackTail: [],
127
127
  upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
128
128
  localHead: initialLocalHead,
129
129
  });
@@ -131,13 +131,15 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
131
131
  yield* SubscriptionRef.set(syncStateSref, initialSyncState);
132
132
  // Rehydrate sync queue
133
133
  if (pendingMutationEvents.length > 0) {
134
- const filteredBatch = pendingMutationEvents
134
+ const globalPendingMutationEvents = pendingMutationEvents
135
135
  // Don't sync clientOnly mutations
136
136
  .filter((mutationEventEncoded) => {
137
137
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
138
138
  return mutationDef.options.clientOnly === false;
139
139
  });
140
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
140
+ if (globalPendingMutationEvents.length > 0) {
141
+ yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents);
142
+ }
141
143
  }
142
144
  const shutdownOnError = (cause) => Effect.gen(function* () {
143
145
  if (onError === 'shutdown') {
@@ -150,33 +152,33 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
150
152
  localPushesQueue,
151
153
  pullLatch,
152
154
  syncStateSref,
153
- syncBackendQueue,
155
+ syncBackendPushQueue,
154
156
  schema,
155
157
  isClientEvent,
156
158
  otelSpan,
157
159
  currentLocalPushGenerationRef,
160
+ connectedClientSessionPullQueues,
161
+ mergeCounterRef,
162
+ mergePayloads,
158
163
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
159
164
  const backendPushingFiberHandle = yield* FiberHandle.make();
160
165
  yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
161
- dbReady,
162
- syncBackendQueue,
166
+ syncBackendPushQueue,
163
167
  otelSpan,
164
168
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
165
169
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
166
170
  yield* backgroundBackendPulling({
167
- dbReady,
168
171
  initialBackendHead,
169
172
  isClientEvent,
170
173
  restartBackendPushing: (filteredRebasedPending) => Effect.gen(function* () {
171
174
  // Stop current pushing fiber
172
175
  yield* FiberHandle.clear(backendPushingFiberHandle);
173
- // Reset the sync queue
174
- yield* BucketQueue.clear(syncBackendQueue);
175
- yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending);
176
+ // Reset the sync backend push queue
177
+ yield* BucketQueue.clear(syncBackendPushQueue);
178
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending);
176
179
  // Restart pushing fiber
177
180
  yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
178
- dbReady,
179
- syncBackendQueue,
181
+ syncBackendPushQueue,
180
182
  otelSpan,
181
183
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
182
184
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
@@ -187,30 +189,53 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
187
189
  otelSpan,
188
190
  initialBlockingSyncContext,
189
191
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
192
+ connectedClientSessionPullQueues,
193
+ mergeCounterRef,
194
+ mergePayloads,
190
195
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
191
196
  return { initialLeaderHead: initialLocalHead };
192
- }).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'));
197
+ }).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'));
198
+ const pull = ({ cursor }) => {
199
+ return Effect.gen(function* () {
200
+ const queue = yield* pullQueue({ cursor });
201
+ return Stream.fromQueue(queue);
202
+ }).pipe(Stream.unwrapScoped);
203
+ };
204
+ const pullQueue = ({ cursor }) => {
205
+ const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized');
206
+ return Effect.gen(function* () {
207
+ const queue = yield* connectedClientSessionPullQueues.makeQueue(cursor);
208
+ const payloadsSinceCursor = Array.from(mergePayloads.entries())
209
+ .map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
210
+ .filter(({ mergeCounter }) => mergeCounter > cursor)
211
+ .toSorted((a, b) => a.mergeCounter - b.mergeCounter);
212
+ yield* queue.offerAll(payloadsSinceCursor);
213
+ return queue;
214
+ }).pipe(Effect.provide(runtime));
215
+ };
216
+ const syncState = Subscribable.make({
217
+ get: Effect.gen(function* () {
218
+ const syncState = yield* syncStateSref;
219
+ if (syncState === undefined)
220
+ return shouldNeverHappen('Not initialized');
221
+ return syncState;
222
+ }),
223
+ changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
224
+ });
193
225
  return {
226
+ pull,
227
+ pullQueue,
194
228
  push,
195
229
  pushPartial,
196
230
  boot,
197
- syncState: Subscribable.make({
198
- get: Effect.gen(function* () {
199
- const syncState = yield* syncStateSref;
200
- if (syncState === undefined)
201
- return shouldNeverHappen('Not initialized');
202
- return syncState;
203
- }),
204
- changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
205
- }),
231
+ syncState,
232
+ getMergeCounter: () => mergeCounterRef.current,
206
233
  };
207
234
  });
208
- const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, }) => Effect.gen(function* () {
209
- const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx;
210
- const applyMutationItems = yield* makeApplyMutationItems;
235
+ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendPushQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
211
236
  while (true) {
212
237
  // TODO make batch size configurable
213
- const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10);
238
+ const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, LOCAL_PUSH_BATCH_SIZE);
214
239
  // Wait for the backend pulling to finish
215
240
  yield* localPushesLatch.await;
216
241
  // Prevent backend pull processing until this local push is finished
@@ -236,9 +261,10 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
236
261
  isClientEvent,
237
262
  isEqualEvent: MutationEvent.isEqualEncoded,
238
263
  });
264
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
239
265
  switch (mergeResult._tag) {
240
266
  case 'unexpected-error': {
241
- otelSpan?.addEvent('local-push:unexpected-error', {
267
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:unexpected-error`, {
242
268
  batchSize: newEvents.length,
243
269
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
244
270
  });
@@ -248,14 +274,11 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
248
274
  return shouldNeverHappen('The leader thread should never have to rebase due to a local push');
249
275
  }
250
276
  case 'reject': {
251
- otelSpan?.addEvent('local-push:reject', {
277
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:reject`, {
252
278
  batchSize: newEvents.length,
253
279
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
254
280
  });
255
- /*
256
-
257
- TODO: how to test this?
258
- */
281
+ // TODO: how to test this?
259
282
  currentLocalPushGenerationRef.current++;
260
283
  const nextGeneration = currentLocalPushGenerationRef.current;
261
284
  const providedId = newEvents.at(0).id;
@@ -263,7 +286,8 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
263
286
  // We're also handling the case where the localPushQueue already contains events
264
287
  // from the next generation which we preserve in the queue
265
288
  const remainingEventsMatchingGeneration = yield* BucketQueue.takeSplitWhere(localPushesQueue, (item) => item[2] >= nextGeneration);
266
- if ((yield* BucketQueue.size(localPushesQueue)) > 0) {
289
+ // TODO we still need to better understand and handle this scenario
290
+ if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
267
291
  console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue));
268
292
  debugger;
269
293
  }
@@ -290,15 +314,12 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
290
314
  }
291
315
  }
292
316
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
293
- if (clientId === 'client-b') {
294
- // yield* Effect.log('offer upstream-advance due to local-push')
295
- // debugger
296
- }
297
317
  yield* connectedClientSessionPullQueues.offer({
298
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents },
299
- remaining: 0,
318
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
319
+ mergeCounter,
300
320
  });
301
- otelSpan?.addEvent('local-push', {
321
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
322
+ otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:advance`, {
302
323
  batchSize: newEvents.length,
303
324
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
304
325
  });
@@ -307,45 +328,41 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
307
328
  const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
308
329
  return mutationDef.options.clientOnly === false;
309
330
  });
310
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
311
- yield* applyMutationItems({ batchItems: newEvents, deferreds });
331
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch);
332
+ yield* applyMutationsBatch({ batchItems: newEvents, deferreds });
312
333
  // Allow the backend pulling to start
313
334
  yield* pullLatch.open;
314
335
  }
315
336
  });
316
337
  // TODO how to handle errors gracefully
317
- const makeApplyMutationItems = Effect.gen(function* () {
318
- const leaderThreadCtx = yield* LeaderThreadCtx;
319
- const { dbReadModel: db, dbMutationLog } = leaderThreadCtx;
320
- const applyMutation = yield* makeApplyMutation;
321
- return ({ batchItems, deferreds }) => Effect.gen(function* () {
322
- db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
323
- dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
324
- yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
325
- if (Exit.isSuccess(exit))
326
- return;
327
- // Rollback in case of an error
328
- db.execute('ROLLBACK', undefined);
329
- dbMutationLog.execute('ROLLBACK', undefined);
330
- }));
331
- for (let i = 0; i < batchItems.length; i++) {
332
- yield* applyMutation(batchItems[i]);
333
- if (deferreds?.[i] !== undefined) {
334
- yield* Deferred.succeed(deferreds[i], void 0);
335
- }
338
+ const applyMutationsBatch = ({ batchItems, deferreds }) => Effect.gen(function* () {
339
+ const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx;
340
+ // NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
341
+ db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
342
+ dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
343
+ yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
344
+ if (Exit.isSuccess(exit))
345
+ return;
346
+ // Rollback in case of an error
347
+ db.execute('ROLLBACK', undefined);
348
+ dbMutationLog.execute('ROLLBACK', undefined);
349
+ }));
350
+ for (let i = 0; i < batchItems.length; i++) {
351
+ const { sessionChangeset } = yield* applyMutation(batchItems[i]);
352
+ batchItems[i].meta.sessionChangeset = sessionChangeset;
353
+ if (deferreds?.[i] !== undefined) {
354
+ yield* Deferred.succeed(deferreds[i], void 0);
336
355
  }
337
- db.execute('COMMIT', undefined); // Commit the transaction
338
- dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
339
- }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
340
- attributes: { count: batchItems.length },
341
- }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
342
- });
343
- const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, }) => Effect.gen(function* () {
344
- const { syncBackend, dbReadModel: db, dbMutationLog, connectedClientSessionPullQueues, schema, clientId, } = yield* LeaderThreadCtx;
356
+ }
357
+ db.execute('COMMIT', undefined); // Commit the transaction
358
+ dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
359
+ }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
360
+ attributes: { batchSize: batchItems.length },
361
+ }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
362
+ const backgroundBackendPulling = ({ initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
363
+ const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx;
345
364
  if (syncBackend === undefined)
346
365
  return;
347
- const cursorInfo = yield* getCursorInfo(initialBackendHead);
348
- const applyMutationItems = yield* makeApplyMutationItems;
349
366
  const onNewPullChunk = (newEvents, remaining) => Effect.gen(function* () {
350
367
  if (newEvents.length === 0)
351
368
  return;
@@ -359,72 +376,80 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
359
376
  const syncState = yield* syncStateSref;
360
377
  if (syncState === undefined)
361
378
  return shouldNeverHappen('Not initialized');
362
- const trimRollbackUntil = newEvents.at(-1).id;
363
379
  const mergeResult = SyncState.merge({
364
380
  syncState,
365
- payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
381
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
366
382
  isClientEvent,
367
383
  isEqualEvent: MutationEvent.isEqualEncoded,
368
384
  ignoreClientEvents: true,
369
385
  });
386
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
370
387
  if (mergeResult._tag === 'reject') {
371
388
  return shouldNeverHappen('The leader thread should never reject upstream advances');
372
389
  }
373
390
  else if (mergeResult._tag === 'unexpected-error') {
374
- otelSpan?.addEvent('backend-pull:unexpected-error', {
391
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:unexpected-error`, {
375
392
  newEventsCount: newEvents.length,
376
393
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
377
394
  });
378
395
  return yield* Effect.fail(mergeResult.cause);
379
396
  }
380
397
  const newBackendHead = newEvents.at(-1).id;
381
- updateBackendHead(dbMutationLog, newBackendHead);
398
+ Mutationlog.updateBackendHead(dbMutationLog, newBackendHead);
382
399
  if (mergeResult._tag === 'rebase') {
383
- otelSpan?.addEvent('backend-pull:rebase', {
400
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:rebase`, {
384
401
  newEventsCount: newEvents.length,
385
402
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
386
- rollbackCount: mergeResult.eventsToRollback.length,
403
+ rollbackCount: mergeResult.rollbackEvents.length,
387
404
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
388
405
  });
389
- const filteredRebasedPending = mergeResult.newSyncState.pending.filter((mutationEvent) => {
406
+ const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
390
407
  const mutationDef = getMutationDef(schema, mutationEvent.mutation);
391
408
  return mutationDef.options.clientOnly === false;
392
409
  });
393
- yield* restartBackendPushing(filteredRebasedPending);
394
- if (mergeResult.eventsToRollback.length > 0) {
395
- yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.eventsToRollback.map((_) => _.id) });
410
+ yield* restartBackendPushing(globalRebasedPendingEvents);
411
+ if (mergeResult.rollbackEvents.length > 0) {
412
+ yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) });
396
413
  }
397
414
  yield* connectedClientSessionPullQueues.offer({
398
- payload: {
399
- _tag: 'upstream-rebase',
415
+ payload: SyncState.PayloadUpstreamRebase.make({
400
416
  newEvents: mergeResult.newEvents,
401
- rollbackUntil: mergeResult.eventsToRollback.at(0).id,
402
- trimRollbackUntil,
403
- },
404
- remaining,
417
+ rollbackEvents: mergeResult.rollbackEvents,
418
+ }),
419
+ mergeCounter,
405
420
  });
421
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamRebase.make({
422
+ newEvents: mergeResult.newEvents,
423
+ rollbackEvents: mergeResult.rollbackEvents,
424
+ }));
406
425
  }
407
426
  else {
408
- otelSpan?.addEvent('backend-pull:advance', {
427
+ otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:advance`, {
409
428
  newEventsCount: newEvents.length,
410
429
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
411
430
  });
412
- if (clientId === 'client-b') {
413
- // yield* Effect.log('offer upstream-advance due to pull')
414
- }
415
431
  yield* connectedClientSessionPullQueues.offer({
416
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents, trimRollbackUntil },
417
- remaining,
432
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
433
+ mergeCounter,
418
434
  });
435
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
436
+ if (mergeResult.confirmedEvents.length > 0) {
437
+ // `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
438
+ // `newEvents` instead which we filter via `mergeResult.confirmedEvents`
439
+ const confirmedNewEvents = newEvents.filter((mutationEvent) => mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(mutationEvent.id, confirmedEvent.id)));
440
+ yield* Mutationlog.updateSyncMetadata(confirmedNewEvents);
441
+ }
419
442
  }
443
+ // Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
420
444
  trimChangesetRows(db, newBackendHead);
421
- yield* applyMutationItems({ batchItems: mergeResult.newEvents, deferreds: undefined });
445
+ yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined });
422
446
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
447
+ // Allow local pushes to be processed again
423
448
  if (remaining === 0) {
424
- // Allow local pushes to be processed again
425
449
  yield* localPushesLatch.open;
426
450
  }
427
451
  });
452
+ const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead);
428
453
  yield* syncBackend.pull(cursorInfo).pipe(
429
454
  // TODO only take from queue while connected
430
455
  Stream.tap(({ batch, remaining }) => Effect.gen(function* () {
@@ -434,64 +459,22 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
434
459
  // batch: TRACE_VERBOSE ? batch : undefined,
435
460
  // },
436
461
  // })
437
- // Wait for the db to be initially created
438
- yield* dbReady;
439
462
  // NOTE we only want to take process mutations when the sync backend is connected
440
463
  // (e.g. needed for simulating being offline)
441
464
  // TODO remove when there's a better way to handle this in stream above
442
465
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
443
- yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)), remaining);
466
+ yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)), remaining);
444
467
  yield* initialBlockingSyncContext.update({ processed: batch.length, remaining });
445
468
  })), Stream.runDrain, Effect.interruptible);
446
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pulling'));
447
- const rollback = ({ db, dbMutationLog, eventIdsToRollback, }) => Effect.gen(function* () {
448
- const rollbackEvents = db
449
- .select(sql `SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`)
450
- .map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
451
- .sort((a, b) => EventId.compare(a.id, b.id));
452
- // TODO bring back `.toSorted` once Expo supports it
453
- // .toSorted((a, b) => EventId.compare(a.id, b.id))
454
- // Apply changesets in reverse order
455
- for (let i = rollbackEvents.length - 1; i >= 0; i--) {
456
- const { changeset } = rollbackEvents[i];
457
- if (changeset !== null) {
458
- db.makeChangeset(changeset).invert().apply();
459
- }
460
- }
461
- const eventIdPairChunks = ReadonlyArray.chunksOf(100)(eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`));
462
- // Delete the changeset rows
463
- for (const eventIdPairChunk of eventIdPairChunks) {
464
- db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
465
- }
466
- // Delete the mutation log rows
467
- for (const eventIdPairChunk of eventIdPairChunks) {
468
- dbMutationLog.execute(sql `DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
469
- }
470
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
471
- attributes: { count: eventIdsToRollback.length },
472
- }));
473
- const getCursorInfo = (remoteHead) => Effect.gen(function* () {
474
- const { dbMutationLog } = yield* LeaderThreadCtx;
475
- if (remoteHead === EventId.ROOT.global)
476
- return Option.none();
477
- const MutationlogQuerySchema = Schema.Struct({
478
- syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
479
- }).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head);
480
- const syncMetadataOption = yield* Effect.sync(() => dbMutationLog.select(sql `SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`)).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie);
481
- return Option.some({
482
- cursor: { global: remoteHead, client: EventId.clientDefault },
483
- metadata: syncMetadataOption,
484
- });
485
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }));
486
- const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
487
- const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx;
469
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'));
470
+ const backgroundBackendPushing = ({ syncBackendPushQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
471
+ const { syncBackend } = yield* LeaderThreadCtx;
488
472
  if (syncBackend === undefined)
489
473
  return;
490
- yield* dbReady;
491
474
  while (true) {
492
475
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
493
476
  // TODO make batch size configurable
494
- const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE);
477
+ const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, BACKEND_PUSH_BATCH_SIZE);
495
478
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
496
479
  if (devtoolsLatch !== undefined) {
497
480
  yield* devtoolsLatch.await;
@@ -510,22 +493,49 @@ const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtool
510
493
  // wait for interrupt caused by background pulling which will then restart pushing
511
494
  return yield* Effect.never;
512
495
  }
513
- const { metadata } = pushResult.right;
514
- // TODO try to do this in a single query
515
- for (let i = 0; i < queueItems.length; i++) {
516
- const mutationEventEncoded = queueItems[i];
517
- yield* execSql(dbMutationLog, ...updateRows({
518
- tableName: MUTATION_LOG_META_TABLE,
519
- columns: mutationLogMetaTable.sqliteDef.columns,
520
- where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
521
- updateValues: { syncMetadataJson: metadata[i] },
522
- }));
523
- }
524
496
  }
525
- }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'));
497
+ }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'));
526
498
  const trimChangesetRows = (db, newHead) => {
527
499
  // Since we're using the session changeset rows to query for the current head,
528
500
  // we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
529
501
  db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`);
530
502
  };
503
+ const makePullQueueSet = Effect.gen(function* () {
504
+ const set = new Set();
505
+ yield* Effect.addFinalizer(() => Effect.gen(function* () {
506
+ for (const queue of set) {
507
+ yield* Queue.shutdown(queue);
508
+ }
509
+ set.clear();
510
+ }));
511
+ const makeQueue = () => Effect.gen(function* () {
512
+ const queue = yield* Queue.unbounded().pipe(Effect.acquireRelease(Queue.shutdown));
513
+ yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)));
514
+ set.add(queue);
515
+ return queue;
516
+ });
517
+ const offer = (item) => Effect.gen(function* () {
518
+ // Short-circuit if the payload is an empty upstream advance
519
+ if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
520
+ return;
521
+ }
522
+ for (const queue of set) {
523
+ yield* Queue.offer(queue, item);
524
+ }
525
+ });
526
+ return {
527
+ makeQueue,
528
+ offer,
529
+ };
530
+ });
531
+ const incrementMergeCounter = (mergeCounterRef) => Effect.gen(function* () {
532
+ const { dbReadModel } = yield* LeaderThreadCtx;
533
+ mergeCounterRef.current++;
534
+ dbReadModel.execute(sql `INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`);
535
+ return mergeCounterRef.current;
536
+ });
537
+ const getMergeCounterFromDb = (dbReadModel) => Effect.gen(function* () {
538
+ const result = dbReadModel.select(sql `SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`);
539
+ return result[0]?.mergeCounter ?? 0;
540
+ });
531
541
  //# sourceMappingURL=LeaderSyncProcessor.js.map