@livestore/common 0.3.0-dev.26 → 0.3.0-dev.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +13 -12
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js +5 -6
- package/dist/adapter-types.js.map +1 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
- package/dist/devtools/devtools-messages-common.d.ts +13 -6
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-common.js +6 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts +25 -25
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-leader.js +1 -2
- package/dist/devtools/devtools-messages-leader.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +15 -6
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +211 -189
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +14 -9
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +43 -36
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +4 -5
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +21 -33
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mod.d.ts +1 -1
- package/dist/leader-thread/mod.d.ts.map +1 -1
- package/dist/leader-thread/mod.js +1 -1
- package/dist/leader-thread/mod.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +19 -3
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +105 -12
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/pull-queue-set.js +6 -16
- package/dist/leader-thread/pull-queue-set.js.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +4 -3
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +34 -19
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -4
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +7 -9
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +9 -0
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +17 -2
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +78 -25
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +25 -12
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/schema.js +1 -1
- package/dist/schema/schema.js.map +1 -1
- package/dist/schema/system-tables.d.ts +67 -0
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +12 -1
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.js +25 -19
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
- package/dist/sync/sync.d.ts +6 -5
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +47 -71
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +118 -127
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +204 -275
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +2 -2
- package/src/adapter-types.ts +11 -13
- package/src/devtools/devtools-messages-common.ts +9 -0
- package/src/devtools/devtools-messages-leader.ts +1 -2
- package/src/leader-thread/LeaderSyncProcessor.ts +381 -346
- package/src/leader-thread/apply-mutation.ts +81 -71
- package/src/leader-thread/leader-worker-devtools.ts +5 -7
- package/src/leader-thread/make-leader-thread-layer.ts +26 -41
- package/src/leader-thread/mod.ts +1 -1
- package/src/leader-thread/mutationlog.ts +166 -13
- package/src/leader-thread/recreate-db.ts +4 -3
- package/src/leader-thread/types.ts +33 -23
- package/src/rehydrate-from-mutationlog.ts +12 -12
- package/src/schema/EventId.ts +20 -2
- package/src/schema/MutationEvent.ts +32 -16
- package/src/schema/schema.ts +1 -1
- package/src/schema/system-tables.ts +20 -1
- package/src/sync/ClientSessionSyncProcessor.ts +35 -23
- package/src/sync/sync.ts +6 -9
- package/src/sync/syncstate.test.ts +230 -306
- package/src/sync/syncstate.ts +176 -171
- package/src/version.ts +1 -1
- package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,16 +1,15 @@
|
|
1
1
|
import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils';
|
2
|
-
import { BucketQueue, Deferred, Effect, Exit, FiberHandle,
|
2
|
+
import { BucketQueue, Deferred, Effect, Exit, FiberHandle, OtelTracer, Queue, ReadonlyArray, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
|
3
3
|
import { UnexpectedError } from '../adapter-types.js';
|
4
|
-
import { EventId, getMutationDef,
|
5
|
-
import { updateRows } from '../sql-queries/index.js';
|
4
|
+
import { EventId, getMutationDef, LEADER_MERGE_COUNTER_TABLE, MutationEvent, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
|
6
5
|
import { LeaderAheadError } from '../sync/sync.js';
|
7
6
|
import * as SyncState from '../sync/syncstate.js';
|
8
7
|
import { sql } from '../util.js';
|
9
|
-
import {
|
10
|
-
import
|
11
|
-
import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js';
|
8
|
+
import { rollback } from './apply-mutation.js';
|
9
|
+
import * as Mutationlog from './mutationlog.js';
|
12
10
|
import { LeaderThreadCtx } from './types.js';
|
13
11
|
export const BACKEND_PUSH_BATCH_SIZE = 50;
|
12
|
+
export const LOCAL_PUSH_BATCH_SIZE = 10;
|
14
13
|
/**
|
15
14
|
* The LeaderSyncProcessor manages synchronization of mutations between
|
16
15
|
* the local state and the sync backend, ensuring efficient and orderly processing.
|
@@ -18,35 +17,44 @@ export const BACKEND_PUSH_BATCH_SIZE = 50;
|
|
18
17
|
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
19
18
|
*
|
20
19
|
* Responsibilities:
|
21
|
-
* - Queueing incoming local mutations in a
|
20
|
+
* - Queueing incoming local mutations in a localPushesQueue.
|
22
21
|
* - Broadcasting mutations to client sessions via pull queues.
|
23
22
|
* - Pushing mutations to the sync backend.
|
24
23
|
*
|
25
24
|
* Notes:
|
26
25
|
*
|
27
26
|
* local push processing:
|
28
|
-
* -
|
27
|
+
* - localPushesQueue:
|
29
28
|
* - Maintains events in ascending order.
|
30
29
|
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
31
|
-
* - Processes events from the
|
30
|
+
* - Processes events from the queue, applying mutations in batches.
|
32
31
|
* - Controlled by a `Latch` to manage execution flow.
|
33
32
|
* - The latch closes on pull receipt and re-opens post-pull completion.
|
34
33
|
* - Processes up to `maxBatchSize` events per cycle.
|
35
34
|
*
|
35
|
+
* Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
|
36
|
+
*
|
37
|
+
* Tricky concurrency scenarios:
|
38
|
+
* - Queued local push batches becoming invalid due to a prior local push item being rejected.
|
39
|
+
* Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
|
40
|
+
*
|
36
41
|
*/
|
37
|
-
export const makeLeaderSyncProcessor = ({ schema,
|
38
|
-
const
|
42
|
+
export const makeLeaderSyncProcessor = ({ schema, dbMutationLogMissing, dbMutationLog, dbReadModel, dbReadModelMissing, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
|
43
|
+
const syncBackendPushQueue = yield* BucketQueue.make();
|
39
44
|
const syncStateSref = yield* SubscriptionRef.make(undefined);
|
40
45
|
const isClientEvent = (mutationEventEncoded) => {
|
41
46
|
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
|
42
47
|
return mutationDef.options.clientOnly;
|
43
48
|
};
|
49
|
+
const connectedClientSessionPullQueues = yield* makePullQueueSet;
|
44
50
|
/**
|
45
51
|
* Tracks generations of queued local push events.
|
46
|
-
* If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
52
|
+
* If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
47
53
|
* even if they would be valid on their own.
|
48
54
|
*/
|
49
55
|
const currentLocalPushGenerationRef = { current: 0 };
|
56
|
+
const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) };
|
57
|
+
const mergePayloads = new Map();
|
50
58
|
// This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
|
51
59
|
const ctxRef = {
|
52
60
|
current: undefined,
|
@@ -54,21 +62,11 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
54
62
|
const localPushesQueue = yield* BucketQueue.make();
|
55
63
|
const localPushesLatch = yield* Effect.makeLatch(true);
|
56
64
|
const pullLatch = yield* Effect.makeLatch(true);
|
65
|
+
// NOTE: New events are only pushed to sync backend after successful local push processing
|
57
66
|
const push = (newEvents, options) => Effect.gen(function* () {
|
58
67
|
// TODO validate batch
|
59
68
|
if (newEvents.length === 0)
|
60
69
|
return;
|
61
|
-
// if (options.generation < currentLocalPushGenerationRef.current) {
|
62
|
-
// debugger
|
63
|
-
// // We can safely drop this batch as it's from a previous push generation
|
64
|
-
// return
|
65
|
-
// }
|
66
|
-
if (clientId === 'client-b') {
|
67
|
-
// console.log(
|
68
|
-
// 'push from client session',
|
69
|
-
// newEvents.map((item) => item.toJSON()),
|
70
|
-
// )
|
71
|
-
}
|
72
70
|
const waitForProcessing = options?.waitForProcessing ?? false;
|
73
71
|
const generation = currentLocalPushGenerationRef.current;
|
74
72
|
if (waitForProcessing) {
|
@@ -81,20 +79,21 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
81
79
|
const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined, generation]);
|
82
80
|
yield* BucketQueue.offerAll(localPushesQueue, items);
|
83
81
|
}
|
84
|
-
}).pipe(Effect.withSpan('@livestore/common:
|
82
|
+
}).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:local-push', {
|
85
83
|
attributes: {
|
86
84
|
batchSize: newEvents.length,
|
87
85
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
88
86
|
},
|
89
87
|
links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
|
90
88
|
}));
|
91
|
-
const pushPartial = ({ mutationEvent:
|
89
|
+
const pushPartial = ({ mutationEvent: { mutation, args }, clientId, sessionId, }) => Effect.gen(function* () {
|
92
90
|
const syncState = yield* syncStateSref;
|
93
91
|
if (syncState === undefined)
|
94
92
|
return shouldNeverHappen('Not initialized');
|
95
|
-
const mutationDef = getMutationDef(schema,
|
93
|
+
const mutationDef = getMutationDef(schema, mutation);
|
96
94
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
97
|
-
|
95
|
+
mutation,
|
96
|
+
args,
|
98
97
|
clientId,
|
99
98
|
sessionId,
|
100
99
|
...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
|
@@ -102,28 +101,29 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
102
101
|
yield* push([mutationEventEncoded]);
|
103
102
|
}).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie));
|
104
103
|
// Starts various background loops
|
105
|
-
const boot =
|
104
|
+
const boot = Effect.gen(function* () {
|
106
105
|
const span = yield* Effect.currentSpan.pipe(Effect.orDie);
|
107
106
|
const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)));
|
108
107
|
const { devtools, shutdownChannel } = yield* LeaderThreadCtx;
|
108
|
+
const runtime = yield* Effect.runtime();
|
109
109
|
ctxRef.current = {
|
110
110
|
otelSpan,
|
111
111
|
span,
|
112
112
|
devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
|
113
|
+
runtime,
|
113
114
|
};
|
114
|
-
const initialBackendHead =
|
115
|
-
|
115
|
+
const initialBackendHead = dbMutationLogMissing
|
116
|
+
? EventId.ROOT.global
|
117
|
+
: Mutationlog.getBackendHeadFromDb(dbMutationLog);
|
118
|
+
const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog);
|
116
119
|
if (initialBackendHead > initialLocalHead.global) {
|
117
120
|
return shouldNeverHappen(`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`);
|
118
121
|
}
|
119
|
-
const pendingMutationEvents =
|
120
|
-
|
121
|
-
client: EventId.clientDefault
|
122
|
-
}).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))));
|
122
|
+
const pendingMutationEvents = dbMutationLogMissing
|
123
|
+
? []
|
124
|
+
: yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault });
|
123
125
|
const initialSyncState = new SyncState.SyncState({
|
124
126
|
pending: pendingMutationEvents,
|
125
|
-
// On the leader we don't need a rollback tail beyond `pending` items
|
126
|
-
rollbackTail: [],
|
127
127
|
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
128
128
|
localHead: initialLocalHead,
|
129
129
|
});
|
@@ -131,13 +131,15 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
131
131
|
yield* SubscriptionRef.set(syncStateSref, initialSyncState);
|
132
132
|
// Rehydrate sync queue
|
133
133
|
if (pendingMutationEvents.length > 0) {
|
134
|
-
const
|
134
|
+
const globalPendingMutationEvents = pendingMutationEvents
|
135
135
|
// Don't sync clientOnly mutations
|
136
136
|
.filter((mutationEventEncoded) => {
|
137
137
|
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
|
138
138
|
return mutationDef.options.clientOnly === false;
|
139
139
|
});
|
140
|
-
|
140
|
+
if (globalPendingMutationEvents.length > 0) {
|
141
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents);
|
142
|
+
}
|
141
143
|
}
|
142
144
|
const shutdownOnError = (cause) => Effect.gen(function* () {
|
143
145
|
if (onError === 'shutdown') {
|
@@ -150,33 +152,33 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
150
152
|
localPushesQueue,
|
151
153
|
pullLatch,
|
152
154
|
syncStateSref,
|
153
|
-
|
155
|
+
syncBackendPushQueue,
|
154
156
|
schema,
|
155
157
|
isClientEvent,
|
156
158
|
otelSpan,
|
157
159
|
currentLocalPushGenerationRef,
|
160
|
+
connectedClientSessionPullQueues,
|
161
|
+
mergeCounterRef,
|
162
|
+
mergePayloads,
|
158
163
|
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
|
159
164
|
const backendPushingFiberHandle = yield* FiberHandle.make();
|
160
165
|
yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
|
161
|
-
|
162
|
-
syncBackendQueue,
|
166
|
+
syncBackendPushQueue,
|
163
167
|
otelSpan,
|
164
168
|
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
165
169
|
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
|
166
170
|
yield* backgroundBackendPulling({
|
167
|
-
dbReady,
|
168
171
|
initialBackendHead,
|
169
172
|
isClientEvent,
|
170
173
|
restartBackendPushing: (filteredRebasedPending) => Effect.gen(function* () {
|
171
174
|
// Stop current pushing fiber
|
172
175
|
yield* FiberHandle.clear(backendPushingFiberHandle);
|
173
|
-
// Reset the sync queue
|
174
|
-
yield* BucketQueue.clear(
|
175
|
-
yield* BucketQueue.offerAll(
|
176
|
+
// Reset the sync backend push queue
|
177
|
+
yield* BucketQueue.clear(syncBackendPushQueue);
|
178
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending);
|
176
179
|
// Restart pushing fiber
|
177
180
|
yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
|
178
|
-
|
179
|
-
syncBackendQueue,
|
181
|
+
syncBackendPushQueue,
|
180
182
|
otelSpan,
|
181
183
|
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
182
184
|
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
|
@@ -187,30 +189,65 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
|
|
187
189
|
otelSpan,
|
188
190
|
initialBlockingSyncContext,
|
189
191
|
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
192
|
+
connectedClientSessionPullQueues,
|
193
|
+
mergeCounterRef,
|
194
|
+
mergePayloads,
|
190
195
|
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
|
191
196
|
return { initialLeaderHead: initialLocalHead };
|
192
|
-
}).pipe(Effect.withSpanScoped('@livestore/common:
|
197
|
+
}).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'));
|
198
|
+
const pull = ({ cursor }) => Effect.gen(function* () {
|
199
|
+
const queue = yield* pullQueue({ cursor });
|
200
|
+
return Stream.fromQueue(queue);
|
201
|
+
}).pipe(Stream.unwrapScoped);
|
202
|
+
const pullQueue = ({ cursor }) => {
|
203
|
+
const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized');
|
204
|
+
return Effect.gen(function* () {
|
205
|
+
const queue = yield* connectedClientSessionPullQueues.makeQueue;
|
206
|
+
const payloadsSinceCursor = Array.from(mergePayloads.entries())
|
207
|
+
.map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
|
208
|
+
.filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
|
209
|
+
.toSorted((a, b) => a.mergeCounter - b.mergeCounter)
|
210
|
+
.map(({ payload, mergeCounter }) => {
|
211
|
+
if (payload._tag === 'upstream-advance') {
|
212
|
+
return {
|
213
|
+
payload: {
|
214
|
+
_tag: 'upstream-advance',
|
215
|
+
newEvents: ReadonlyArray.dropWhile(payload.newEvents, (mutationEventEncoded) => EventId.isGreaterThanOrEqual(cursor.eventId, mutationEventEncoded.id)),
|
216
|
+
},
|
217
|
+
mergeCounter,
|
218
|
+
};
|
219
|
+
}
|
220
|
+
else {
|
221
|
+
return { payload, mergeCounter };
|
222
|
+
}
|
223
|
+
});
|
224
|
+
yield* queue.offerAll(payloadsSinceCursor);
|
225
|
+
return queue;
|
226
|
+
}).pipe(Effect.provide(runtime));
|
227
|
+
};
|
228
|
+
const syncState = Subscribable.make({
|
229
|
+
get: Effect.gen(function* () {
|
230
|
+
const syncState = yield* syncStateSref;
|
231
|
+
if (syncState === undefined)
|
232
|
+
return shouldNeverHappen('Not initialized');
|
233
|
+
return syncState;
|
234
|
+
}),
|
235
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
236
|
+
});
|
193
237
|
return {
|
238
|
+
pull,
|
239
|
+
pullQueue,
|
194
240
|
push,
|
195
241
|
pushPartial,
|
196
242
|
boot,
|
197
|
-
syncState
|
198
|
-
|
199
|
-
const syncState = yield* syncStateSref;
|
200
|
-
if (syncState === undefined)
|
201
|
-
return shouldNeverHappen('Not initialized');
|
202
|
-
return syncState;
|
203
|
-
}),
|
204
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
205
|
-
}),
|
243
|
+
syncState,
|
244
|
+
getMergeCounter: () => mergeCounterRef.current,
|
206
245
|
};
|
207
246
|
});
|
208
|
-
const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref,
|
209
|
-
const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx;
|
210
|
-
const applyMutationItems = yield* makeApplyMutationItems;
|
247
|
+
const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendPushQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
|
211
248
|
while (true) {
|
212
249
|
// TODO make batch size configurable
|
213
|
-
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1,
|
250
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, LOCAL_PUSH_BATCH_SIZE);
|
214
251
|
// Wait for the backend pulling to finish
|
215
252
|
yield* localPushesLatch.await;
|
216
253
|
// Prevent backend pull processing until this local push is finished
|
@@ -236,9 +273,10 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
|
|
236
273
|
isClientEvent,
|
237
274
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
238
275
|
});
|
276
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
|
239
277
|
switch (mergeResult._tag) {
|
240
278
|
case 'unexpected-error': {
|
241
|
-
otelSpan?.addEvent(
|
279
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:unexpected-error`, {
|
242
280
|
batchSize: newEvents.length,
|
243
281
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
244
282
|
});
|
@@ -248,14 +286,11 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
|
|
248
286
|
return shouldNeverHappen('The leader thread should never have to rebase due to a local push');
|
249
287
|
}
|
250
288
|
case 'reject': {
|
251
|
-
otelSpan?.addEvent(
|
289
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:reject`, {
|
252
290
|
batchSize: newEvents.length,
|
253
291
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
254
292
|
});
|
255
|
-
|
256
|
-
|
257
|
-
TODO: how to test this?
|
258
|
-
*/
|
293
|
+
// TODO: how to test this?
|
259
294
|
currentLocalPushGenerationRef.current++;
|
260
295
|
const nextGeneration = currentLocalPushGenerationRef.current;
|
261
296
|
const providedId = newEvents.at(0).id;
|
@@ -263,7 +298,8 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
|
|
263
298
|
// We're also handling the case where the localPushQueue already contains events
|
264
299
|
// from the next generation which we preserve in the queue
|
265
300
|
const remainingEventsMatchingGeneration = yield* BucketQueue.takeSplitWhere(localPushesQueue, (item) => item[2] >= nextGeneration);
|
266
|
-
|
301
|
+
// TODO we still need to better understand and handle this scenario
|
302
|
+
if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
|
267
303
|
console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue));
|
268
304
|
debugger;
|
269
305
|
}
|
@@ -290,15 +326,12 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
|
|
290
326
|
}
|
291
327
|
}
|
292
328
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
|
293
|
-
if (clientId === 'client-b') {
|
294
|
-
// yield* Effect.log('offer upstream-advance due to local-push')
|
295
|
-
// debugger
|
296
|
-
}
|
297
329
|
yield* connectedClientSessionPullQueues.offer({
|
298
|
-
payload: {
|
299
|
-
|
330
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
331
|
+
mergeCounter,
|
300
332
|
});
|
301
|
-
|
333
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
|
334
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:advance`, {
|
302
335
|
batchSize: newEvents.length,
|
303
336
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
304
337
|
});
|
@@ -307,45 +340,41 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
|
|
307
340
|
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
|
308
341
|
return mutationDef.options.clientOnly === false;
|
309
342
|
});
|
310
|
-
yield* BucketQueue.offerAll(
|
311
|
-
yield*
|
343
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch);
|
344
|
+
yield* applyMutationsBatch({ batchItems: newEvents, deferreds });
|
312
345
|
// Allow the backend pulling to start
|
313
346
|
yield* pullLatch.open;
|
314
347
|
}
|
315
348
|
});
|
316
349
|
// TODO how to handle errors gracefully
|
317
|
-
const
|
318
|
-
const
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
})
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
yield* Deferred.succeed(deferreds[i], void 0);
|
335
|
-
}
|
350
|
+
const applyMutationsBatch = ({ batchItems, deferreds }) => Effect.gen(function* () {
|
351
|
+
const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx;
|
352
|
+
// NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
|
353
|
+
db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
|
354
|
+
dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
|
355
|
+
yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
|
356
|
+
if (Exit.isSuccess(exit))
|
357
|
+
return;
|
358
|
+
// Rollback in case of an error
|
359
|
+
db.execute('ROLLBACK', undefined);
|
360
|
+
dbMutationLog.execute('ROLLBACK', undefined);
|
361
|
+
}));
|
362
|
+
for (let i = 0; i < batchItems.length; i++) {
|
363
|
+
const { sessionChangeset } = yield* applyMutation(batchItems[i]);
|
364
|
+
batchItems[i].meta.sessionChangeset = sessionChangeset;
|
365
|
+
if (deferreds?.[i] !== undefined) {
|
366
|
+
yield* Deferred.succeed(deferreds[i], void 0);
|
336
367
|
}
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
});
|
343
|
-
const backgroundBackendPulling = ({
|
344
|
-
const { syncBackend, dbReadModel: db, dbMutationLog,
|
368
|
+
}
|
369
|
+
db.execute('COMMIT', undefined); // Commit the transaction
|
370
|
+
dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
|
371
|
+
}).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
|
372
|
+
attributes: { batchSize: batchItems.length },
|
373
|
+
}), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
|
374
|
+
const backgroundBackendPulling = ({ initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, }) => Effect.gen(function* () {
|
375
|
+
const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx;
|
345
376
|
if (syncBackend === undefined)
|
346
377
|
return;
|
347
|
-
const cursorInfo = yield* getCursorInfo(initialBackendHead);
|
348
|
-
const applyMutationItems = yield* makeApplyMutationItems;
|
349
378
|
const onNewPullChunk = (newEvents, remaining) => Effect.gen(function* () {
|
350
379
|
if (newEvents.length === 0)
|
351
380
|
return;
|
@@ -359,72 +388,80 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
|
|
359
388
|
const syncState = yield* syncStateSref;
|
360
389
|
if (syncState === undefined)
|
361
390
|
return shouldNeverHappen('Not initialized');
|
362
|
-
const trimRollbackUntil = newEvents.at(-1).id;
|
363
391
|
const mergeResult = SyncState.merge({
|
364
392
|
syncState,
|
365
|
-
payload: {
|
393
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
|
366
394
|
isClientEvent,
|
367
395
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
368
396
|
ignoreClientEvents: true,
|
369
397
|
});
|
398
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
|
370
399
|
if (mergeResult._tag === 'reject') {
|
371
400
|
return shouldNeverHappen('The leader thread should never reject upstream advances');
|
372
401
|
}
|
373
402
|
else if (mergeResult._tag === 'unexpected-error') {
|
374
|
-
otelSpan?.addEvent(
|
403
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:unexpected-error`, {
|
375
404
|
newEventsCount: newEvents.length,
|
376
405
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
377
406
|
});
|
378
407
|
return yield* Effect.fail(mergeResult.cause);
|
379
408
|
}
|
380
409
|
const newBackendHead = newEvents.at(-1).id;
|
381
|
-
updateBackendHead(dbMutationLog, newBackendHead);
|
410
|
+
Mutationlog.updateBackendHead(dbMutationLog, newBackendHead);
|
382
411
|
if (mergeResult._tag === 'rebase') {
|
383
|
-
otelSpan?.addEvent(
|
412
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:rebase`, {
|
384
413
|
newEventsCount: newEvents.length,
|
385
414
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
386
|
-
rollbackCount: mergeResult.
|
415
|
+
rollbackCount: mergeResult.rollbackEvents.length,
|
387
416
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
388
417
|
});
|
389
|
-
const
|
418
|
+
const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
|
390
419
|
const mutationDef = getMutationDef(schema, mutationEvent.mutation);
|
391
420
|
return mutationDef.options.clientOnly === false;
|
392
421
|
});
|
393
|
-
yield* restartBackendPushing(
|
394
|
-
if (mergeResult.
|
395
|
-
yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.
|
422
|
+
yield* restartBackendPushing(globalRebasedPendingEvents);
|
423
|
+
if (mergeResult.rollbackEvents.length > 0) {
|
424
|
+
yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) });
|
396
425
|
}
|
397
426
|
yield* connectedClientSessionPullQueues.offer({
|
398
|
-
payload: {
|
399
|
-
_tag: 'upstream-rebase',
|
427
|
+
payload: SyncState.PayloadUpstreamRebase.make({
|
400
428
|
newEvents: mergeResult.newEvents,
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
remaining,
|
429
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
430
|
+
}),
|
431
|
+
mergeCounter,
|
405
432
|
});
|
433
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamRebase.make({
|
434
|
+
newEvents: mergeResult.newEvents,
|
435
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
436
|
+
}));
|
406
437
|
}
|
407
438
|
else {
|
408
|
-
otelSpan?.addEvent(
|
439
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:advance`, {
|
409
440
|
newEventsCount: newEvents.length,
|
410
441
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
411
442
|
});
|
412
|
-
if (clientId === 'client-b') {
|
413
|
-
// yield* Effect.log('offer upstream-advance due to pull')
|
414
|
-
}
|
415
443
|
yield* connectedClientSessionPullQueues.offer({
|
416
|
-
payload: {
|
417
|
-
|
444
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
445
|
+
mergeCounter,
|
418
446
|
});
|
447
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
|
448
|
+
if (mergeResult.confirmedEvents.length > 0) {
|
449
|
+
// `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
|
450
|
+
// `newEvents` instead which we filter via `mergeResult.confirmedEvents`
|
451
|
+
const confirmedNewEvents = newEvents.filter((mutationEvent) => mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(mutationEvent.id, confirmedEvent.id)));
|
452
|
+
yield* Mutationlog.updateSyncMetadata(confirmedNewEvents);
|
453
|
+
}
|
419
454
|
}
|
455
|
+
// Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
|
420
456
|
trimChangesetRows(db, newBackendHead);
|
421
|
-
yield*
|
457
|
+
yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined });
|
422
458
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
|
459
|
+
// Allow local pushes to be processed again
|
423
460
|
if (remaining === 0) {
|
424
|
-
// Allow local pushes to be processed again
|
425
461
|
yield* localPushesLatch.open;
|
426
462
|
}
|
427
463
|
});
|
464
|
+
const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead);
|
428
465
|
yield* syncBackend.pull(cursorInfo).pipe(
|
429
466
|
// TODO only take from queue while connected
|
430
467
|
Stream.tap(({ batch, remaining }) => Effect.gen(function* () {
|
@@ -434,64 +471,22 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
|
|
434
471
|
// batch: TRACE_VERBOSE ? batch : undefined,
|
435
472
|
// },
|
436
473
|
// })
|
437
|
-
// Wait for the db to be initially created
|
438
|
-
yield* dbReady;
|
439
474
|
// NOTE we only want to take process mutations when the sync backend is connected
|
440
475
|
// (e.g. needed for simulating being offline)
|
441
476
|
// TODO remove when there's a better way to handle this in stream above
|
442
477
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
|
443
|
-
yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)), remaining);
|
478
|
+
yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)), remaining);
|
444
479
|
yield* initialBlockingSyncContext.update({ processed: batch.length, remaining });
|
445
480
|
})), Stream.runDrain, Effect.interruptible);
|
446
|
-
}).pipe(Effect.withSpan('@livestore/common:
|
447
|
-
const
|
448
|
-
const
|
449
|
-
.select(sql `SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`)
|
450
|
-
.map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
|
451
|
-
.sort((a, b) => EventId.compare(a.id, b.id));
|
452
|
-
// TODO bring back `.toSorted` once Expo supports it
|
453
|
-
// .toSorted((a, b) => EventId.compare(a.id, b.id))
|
454
|
-
// Apply changesets in reverse order
|
455
|
-
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
456
|
-
const { changeset } = rollbackEvents[i];
|
457
|
-
if (changeset !== null) {
|
458
|
-
db.makeChangeset(changeset).invert().apply();
|
459
|
-
}
|
460
|
-
}
|
461
|
-
const eventIdPairChunks = ReadonlyArray.chunksOf(100)(eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`));
|
462
|
-
// Delete the changeset rows
|
463
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
464
|
-
db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
|
465
|
-
}
|
466
|
-
// Delete the mutation log rows
|
467
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
468
|
-
dbMutationLog.execute(sql `DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
|
469
|
-
}
|
470
|
-
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
|
471
|
-
attributes: { count: eventIdsToRollback.length },
|
472
|
-
}));
|
473
|
-
const getCursorInfo = (remoteHead) => Effect.gen(function* () {
|
474
|
-
const { dbMutationLog } = yield* LeaderThreadCtx;
|
475
|
-
if (remoteHead === EventId.ROOT.global)
|
476
|
-
return Option.none();
|
477
|
-
const MutationlogQuerySchema = Schema.Struct({
|
478
|
-
syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
|
479
|
-
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head);
|
480
|
-
const syncMetadataOption = yield* Effect.sync(() => dbMutationLog.select(sql `SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`)).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie);
|
481
|
-
return Option.some({
|
482
|
-
cursor: { global: remoteHead, client: EventId.clientDefault },
|
483
|
-
metadata: syncMetadataOption,
|
484
|
-
});
|
485
|
-
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }));
|
486
|
-
const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
|
487
|
-
const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx;
|
481
|
+
}).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'));
|
482
|
+
const backgroundBackendPushing = ({ syncBackendPushQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
|
483
|
+
const { syncBackend } = yield* LeaderThreadCtx;
|
488
484
|
if (syncBackend === undefined)
|
489
485
|
return;
|
490
|
-
yield* dbReady;
|
491
486
|
while (true) {
|
492
487
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
|
493
488
|
// TODO make batch size configurable
|
494
|
-
const queueItems = yield* BucketQueue.takeBetween(
|
489
|
+
const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, BACKEND_PUSH_BATCH_SIZE);
|
495
490
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
|
496
491
|
if (devtoolsLatch !== undefined) {
|
497
492
|
yield* devtoolsLatch.await;
|
@@ -510,22 +505,49 @@ const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtool
|
|
510
505
|
// wait for interrupt caused by background pulling which will then restart pushing
|
511
506
|
return yield* Effect.never;
|
512
507
|
}
|
513
|
-
const { metadata } = pushResult.right;
|
514
|
-
// TODO try to do this in a single query
|
515
|
-
for (let i = 0; i < queueItems.length; i++) {
|
516
|
-
const mutationEventEncoded = queueItems[i];
|
517
|
-
yield* execSql(dbMutationLog, ...updateRows({
|
518
|
-
tableName: MUTATION_LOG_META_TABLE,
|
519
|
-
columns: mutationLogMetaTable.sqliteDef.columns,
|
520
|
-
where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
|
521
|
-
updateValues: { syncMetadataJson: metadata[i] },
|
522
|
-
}));
|
523
|
-
}
|
524
508
|
}
|
525
|
-
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:
|
509
|
+
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'));
|
526
510
|
const trimChangesetRows = (db, newHead) => {
|
527
511
|
// Since we're using the session changeset rows to query for the current head,
|
528
512
|
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
529
513
|
db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`);
|
530
514
|
};
|
515
|
+
const makePullQueueSet = Effect.gen(function* () {
|
516
|
+
const set = new Set();
|
517
|
+
yield* Effect.addFinalizer(() => Effect.gen(function* () {
|
518
|
+
for (const queue of set) {
|
519
|
+
yield* Queue.shutdown(queue);
|
520
|
+
}
|
521
|
+
set.clear();
|
522
|
+
}));
|
523
|
+
const makeQueue = Effect.gen(function* () {
|
524
|
+
const queue = yield* Queue.unbounded().pipe(Effect.acquireRelease(Queue.shutdown));
|
525
|
+
yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)));
|
526
|
+
set.add(queue);
|
527
|
+
return queue;
|
528
|
+
});
|
529
|
+
const offer = (item) => Effect.gen(function* () {
|
530
|
+
// Short-circuit if the payload is an empty upstream advance
|
531
|
+
if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
|
532
|
+
return;
|
533
|
+
}
|
534
|
+
for (const queue of set) {
|
535
|
+
yield* Queue.offer(queue, item);
|
536
|
+
}
|
537
|
+
});
|
538
|
+
return {
|
539
|
+
makeQueue,
|
540
|
+
offer,
|
541
|
+
};
|
542
|
+
});
|
543
|
+
const incrementMergeCounter = (mergeCounterRef) => Effect.gen(function* () {
|
544
|
+
const { dbReadModel } = yield* LeaderThreadCtx;
|
545
|
+
mergeCounterRef.current++;
|
546
|
+
dbReadModel.execute(sql `INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`);
|
547
|
+
return mergeCounterRef.current;
|
548
|
+
});
|
549
|
+
const getMergeCounterFromDb = (dbReadModel) => Effect.gen(function* () {
|
550
|
+
const result = dbReadModel.select(sql `SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`);
|
551
|
+
return result[0]?.mergeCounter ?? 0;
|
552
|
+
});
|
531
553
|
//# sourceMappingURL=LeaderSyncProcessor.js.map
|