@livestore/common 0.3.0-dev.25 → 0.3.0-dev.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +13 -12
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js +5 -6
- package/dist/adapter-types.js.map +1 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
- package/dist/devtools/devtools-messages-common.d.ts +13 -6
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-common.js +6 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts +25 -25
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-leader.js +1 -2
- package/dist/devtools/devtools-messages-leader.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +15 -6
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +229 -207
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +14 -9
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +43 -36
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +4 -5
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +21 -33
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mod.d.ts +1 -1
- package/dist/leader-thread/mod.d.ts.map +1 -1
- package/dist/leader-thread/mod.js +1 -1
- package/dist/leader-thread/mod.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +19 -3
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +105 -12
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/pull-queue-set.js +6 -16
- package/dist/leader-thread/pull-queue-set.js.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +4 -3
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +34 -19
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -4
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +7 -9
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +9 -0
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +17 -2
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +78 -25
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +25 -12
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/schema.js +1 -1
- package/dist/schema/schema.js.map +1 -1
- package/dist/schema/system-tables.d.ts +67 -0
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +12 -1
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.js +49 -43
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
- package/dist/sync/sync.d.ts +6 -5
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +60 -84
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +127 -136
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +205 -276
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +2 -2
- package/src/adapter-types.ts +11 -13
- package/src/devtools/devtools-messages-common.ts +9 -0
- package/src/devtools/devtools-messages-leader.ts +1 -2
- package/src/leader-thread/LeaderSyncProcessor.ts +399 -364
- package/src/leader-thread/apply-mutation.ts +81 -71
- package/src/leader-thread/leader-worker-devtools.ts +5 -7
- package/src/leader-thread/make-leader-thread-layer.ts +26 -41
- package/src/leader-thread/mod.ts +1 -1
- package/src/leader-thread/mutationlog.ts +166 -13
- package/src/leader-thread/recreate-db.ts +4 -3
- package/src/leader-thread/types.ts +33 -23
- package/src/rehydrate-from-mutationlog.ts +12 -12
- package/src/schema/EventId.ts +20 -2
- package/src/schema/MutationEvent.ts +32 -16
- package/src/schema/schema.ts +1 -1
- package/src/schema/system-tables.ts +20 -1
- package/src/sync/ClientSessionSyncProcessor.ts +59 -47
- package/src/sync/sync.ts +6 -9
- package/src/sync/syncstate.test.ts +239 -315
- package/src/sync/syncstate.ts +191 -188
- package/src/version.ts +1 -1
- package/tmp/pack.tgz +0 -0
- package/src/leader-thread/pull-queue-set.ts +0 -67
@@ -1,15 +1,14 @@
|
|
1
1
|
import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
|
-
import type { HttpClient, Scope, Tracer } from '@livestore/utils/effect'
|
2
|
+
import type { HttpClient, Runtime, Scope, Tracer } from '@livestore/utils/effect'
|
3
3
|
import {
|
4
4
|
BucketQueue,
|
5
5
|
Deferred,
|
6
6
|
Effect,
|
7
7
|
Exit,
|
8
8
|
FiberHandle,
|
9
|
-
Option,
|
10
9
|
OtelTracer,
|
10
|
+
Queue,
|
11
11
|
ReadonlyArray,
|
12
|
-
Schema,
|
13
12
|
Stream,
|
14
13
|
Subscribable,
|
15
14
|
SubscriptionRef,
|
@@ -18,30 +17,29 @@ import type * as otel from '@opentelemetry/api'
|
|
18
17
|
|
19
18
|
import type { SqliteDb } from '../adapter-types.js'
|
20
19
|
import { UnexpectedError } from '../adapter-types.js'
|
21
|
-
import type { LiveStoreSchema
|
20
|
+
import type { LiveStoreSchema } from '../schema/mod.js'
|
22
21
|
import {
|
23
22
|
EventId,
|
24
23
|
getMutationDef,
|
25
|
-
|
24
|
+
LEADER_MERGE_COUNTER_TABLE,
|
26
25
|
MutationEvent,
|
27
|
-
mutationLogMetaTable,
|
28
26
|
SESSION_CHANGESET_META_TABLE,
|
29
27
|
} from '../schema/mod.js'
|
30
|
-
import { updateRows } from '../sql-queries/index.js'
|
31
28
|
import { LeaderAheadError } from '../sync/sync.js'
|
32
29
|
import * as SyncState from '../sync/syncstate.js'
|
33
30
|
import { sql } from '../util.js'
|
34
|
-
import {
|
35
|
-
import
|
36
|
-
import {
|
37
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
|
31
|
+
import { rollback } from './apply-mutation.js'
|
32
|
+
import * as Mutationlog from './mutationlog.js'
|
33
|
+
import type { InitialBlockingSyncContext, LeaderSyncProcessor } from './types.js'
|
38
34
|
import { LeaderThreadCtx } from './types.js'
|
39
35
|
|
40
36
|
export const BACKEND_PUSH_BATCH_SIZE = 50
|
37
|
+
export const LOCAL_PUSH_BATCH_SIZE = 10
|
41
38
|
|
42
39
|
type LocalPushQueueItem = [
|
43
40
|
mutationEvent: MutationEvent.EncodedWithMeta,
|
44
41
|
deferred: Deferred.Deferred<void, LeaderAheadError> | undefined,
|
42
|
+
/** Used to determine whether the batch has become invalid due to a rejected local push batch */
|
45
43
|
generation: number,
|
46
44
|
]
|
47
45
|
|
@@ -52,40 +50,49 @@ type LocalPushQueueItem = [
|
|
52
50
|
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
53
51
|
*
|
54
52
|
* Responsibilities:
|
55
|
-
* - Queueing incoming local mutations in a
|
53
|
+
* - Queueing incoming local mutations in a localPushesQueue.
|
56
54
|
* - Broadcasting mutations to client sessions via pull queues.
|
57
55
|
* - Pushing mutations to the sync backend.
|
58
56
|
*
|
59
57
|
* Notes:
|
60
58
|
*
|
61
59
|
* local push processing:
|
62
|
-
* -
|
60
|
+
* - localPushesQueue:
|
63
61
|
* - Maintains events in ascending order.
|
64
62
|
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
65
|
-
* - Processes events from the
|
63
|
+
* - Processes events from the queue, applying mutations in batches.
|
66
64
|
* - Controlled by a `Latch` to manage execution flow.
|
67
65
|
* - The latch closes on pull receipt and re-opens post-pull completion.
|
68
66
|
* - Processes up to `maxBatchSize` events per cycle.
|
69
67
|
*
|
68
|
+
* Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
|
69
|
+
*
|
70
|
+
* Tricky concurrency scenarios:
|
71
|
+
* - Queued local push batches becoming invalid due to a prior local push item being rejected.
|
72
|
+
* Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
|
73
|
+
*
|
70
74
|
*/
|
71
75
|
export const makeLeaderSyncProcessor = ({
|
72
76
|
schema,
|
73
|
-
|
77
|
+
dbMutationLogMissing,
|
74
78
|
dbMutationLog,
|
75
|
-
|
79
|
+
dbReadModel,
|
80
|
+
dbReadModelMissing,
|
76
81
|
initialBlockingSyncContext,
|
77
82
|
onError,
|
78
83
|
}: {
|
79
84
|
schema: LiveStoreSchema
|
80
85
|
/** Only used to know whether we can safely query dbMutationLog during setup execution */
|
81
|
-
|
86
|
+
dbMutationLogMissing: boolean
|
82
87
|
dbMutationLog: SqliteDb
|
83
|
-
|
88
|
+
dbReadModel: SqliteDb
|
89
|
+
/** Only used to know whether we can safely query dbReadModel during setup execution */
|
90
|
+
dbReadModelMissing: boolean
|
84
91
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
85
92
|
onError: 'shutdown' | 'ignore'
|
86
93
|
}): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
|
87
94
|
Effect.gen(function* () {
|
88
|
-
const
|
95
|
+
const syncBackendPushQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
|
89
96
|
|
90
97
|
const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
|
91
98
|
|
@@ -94,13 +101,18 @@ export const makeLeaderSyncProcessor = ({
|
|
94
101
|
return mutationDef.options.clientOnly
|
95
102
|
}
|
96
103
|
|
104
|
+
const connectedClientSessionPullQueues = yield* makePullQueueSet
|
105
|
+
|
97
106
|
/**
|
98
107
|
* Tracks generations of queued local push events.
|
99
|
-
* If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
108
|
+
* If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
100
109
|
* even if they would be valid on their own.
|
101
110
|
*/
|
102
111
|
const currentLocalPushGenerationRef = { current: 0 }
|
103
112
|
|
113
|
+
const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) }
|
114
|
+
const mergePayloads = new Map<number, typeof SyncState.PayloadUpstream.Type>()
|
115
|
+
|
104
116
|
// This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
|
105
117
|
const ctxRef = {
|
106
118
|
current: undefined as
|
@@ -109,6 +121,7 @@ export const makeLeaderSyncProcessor = ({
|
|
109
121
|
otelSpan: otel.Span | undefined
|
110
122
|
span: Tracer.Span
|
111
123
|
devtoolsLatch: Effect.Latch | undefined
|
124
|
+
runtime: Runtime.Runtime<LeaderThreadCtx>
|
112
125
|
},
|
113
126
|
}
|
114
127
|
|
@@ -116,24 +129,12 @@ export const makeLeaderSyncProcessor = ({
|
|
116
129
|
const localPushesLatch = yield* Effect.makeLatch(true)
|
117
130
|
const pullLatch = yield* Effect.makeLatch(true)
|
118
131
|
|
132
|
+
// NOTE: New events are only pushed to sync backend after successful local push processing
|
119
133
|
const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
|
120
134
|
Effect.gen(function* () {
|
121
135
|
// TODO validate batch
|
122
136
|
if (newEvents.length === 0) return
|
123
137
|
|
124
|
-
// if (options.generation < currentLocalPushGenerationRef.current) {
|
125
|
-
// debugger
|
126
|
-
// // We can safely drop this batch as it's from a previous push generation
|
127
|
-
// return
|
128
|
-
// }
|
129
|
-
|
130
|
-
if (clientId === 'client-b') {
|
131
|
-
// console.log(
|
132
|
-
// 'push from client session',
|
133
|
-
// newEvents.map((item) => item.toJSON()),
|
134
|
-
// )
|
135
|
-
}
|
136
|
-
|
137
138
|
const waitForProcessing = options?.waitForProcessing ?? false
|
138
139
|
const generation = currentLocalPushGenerationRef.current
|
139
140
|
|
@@ -154,7 +155,7 @@ export const makeLeaderSyncProcessor = ({
|
|
154
155
|
yield* BucketQueue.offerAll(localPushesQueue, items)
|
155
156
|
}
|
156
157
|
}).pipe(
|
157
|
-
Effect.withSpan('@livestore/common:
|
158
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:local-push', {
|
158
159
|
attributes: {
|
159
160
|
batchSize: newEvents.length,
|
160
161
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
@@ -164,7 +165,7 @@ export const makeLeaderSyncProcessor = ({
|
|
164
165
|
)
|
165
166
|
|
166
167
|
const pushPartial: LeaderSyncProcessor['pushPartial'] = ({
|
167
|
-
mutationEvent:
|
168
|
+
mutationEvent: { mutation, args },
|
168
169
|
clientId,
|
169
170
|
sessionId,
|
170
171
|
}) =>
|
@@ -172,10 +173,11 @@ export const makeLeaderSyncProcessor = ({
|
|
172
173
|
const syncState = yield* syncStateSref
|
173
174
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
174
175
|
|
175
|
-
const mutationDef = getMutationDef(schema,
|
176
|
+
const mutationDef = getMutationDef(schema, mutation)
|
176
177
|
|
177
178
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
178
|
-
|
179
|
+
mutation,
|
180
|
+
args,
|
179
181
|
clientId,
|
180
182
|
sessionId,
|
181
183
|
...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
|
@@ -185,134 +187,180 @@ export const makeLeaderSyncProcessor = ({
|
|
185
187
|
}).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie))
|
186
188
|
|
187
189
|
// Starts various background loops
|
188
|
-
const boot: LeaderSyncProcessor['boot'] = (
|
189
|
-
Effect.
|
190
|
-
|
191
|
-
|
192
|
-
|
190
|
+
const boot: LeaderSyncProcessor['boot'] = Effect.gen(function* () {
|
191
|
+
const span = yield* Effect.currentSpan.pipe(Effect.orDie)
|
192
|
+
const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
193
|
+
const { devtools, shutdownChannel } = yield* LeaderThreadCtx
|
194
|
+
const runtime = yield* Effect.runtime<LeaderThreadCtx>()
|
195
|
+
|
196
|
+
ctxRef.current = {
|
197
|
+
otelSpan,
|
198
|
+
span,
|
199
|
+
devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
|
200
|
+
runtime,
|
201
|
+
}
|
193
202
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
}
|
203
|
+
const initialBackendHead = dbMutationLogMissing
|
204
|
+
? EventId.ROOT.global
|
205
|
+
: Mutationlog.getBackendHeadFromDb(dbMutationLog)
|
206
|
+
const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog)
|
199
207
|
|
200
|
-
|
201
|
-
|
208
|
+
if (initialBackendHead > initialLocalHead.global) {
|
209
|
+
return shouldNeverHappen(
|
210
|
+
`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
|
211
|
+
)
|
212
|
+
}
|
202
213
|
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
)
|
207
|
-
}
|
214
|
+
const pendingMutationEvents = dbMutationLogMissing
|
215
|
+
? []
|
216
|
+
: yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault })
|
208
217
|
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
const initialSyncState = new SyncState.SyncState({
|
215
|
-
pending: pendingMutationEvents,
|
216
|
-
// On the leader we don't need a rollback tail beyond `pending` items
|
217
|
-
rollbackTail: [],
|
218
|
-
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
219
|
-
localHead: initialLocalHead,
|
220
|
-
})
|
218
|
+
const initialSyncState = new SyncState.SyncState({
|
219
|
+
pending: pendingMutationEvents,
|
220
|
+
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
221
|
+
localHead: initialLocalHead,
|
222
|
+
})
|
221
223
|
|
222
|
-
|
223
|
-
|
224
|
+
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
225
|
+
yield* SubscriptionRef.set(syncStateSref, initialSyncState)
|
224
226
|
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
227
|
+
// Rehydrate sync queue
|
228
|
+
if (pendingMutationEvents.length > 0) {
|
229
|
+
const globalPendingMutationEvents = pendingMutationEvents
|
230
|
+
// Don't sync clientOnly mutations
|
231
|
+
.filter((mutationEventEncoded) => {
|
232
|
+
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation)
|
233
|
+
return mutationDef.options.clientOnly === false
|
234
|
+
})
|
233
235
|
|
234
|
-
|
236
|
+
if (globalPendingMutationEvents.length > 0) {
|
237
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents)
|
235
238
|
}
|
239
|
+
}
|
240
|
+
|
241
|
+
const shutdownOnError = (cause: unknown) =>
|
242
|
+
Effect.gen(function* () {
|
243
|
+
if (onError === 'shutdown') {
|
244
|
+
yield* shutdownChannel.send(UnexpectedError.make({ cause }))
|
245
|
+
yield* Effect.die(cause)
|
246
|
+
}
|
247
|
+
})
|
236
248
|
|
237
|
-
|
249
|
+
yield* backgroundApplyLocalPushes({
|
250
|
+
localPushesLatch,
|
251
|
+
localPushesQueue,
|
252
|
+
pullLatch,
|
253
|
+
syncStateSref,
|
254
|
+
syncBackendPushQueue,
|
255
|
+
schema,
|
256
|
+
isClientEvent,
|
257
|
+
otelSpan,
|
258
|
+
currentLocalPushGenerationRef,
|
259
|
+
connectedClientSessionPullQueues,
|
260
|
+
mergeCounterRef,
|
261
|
+
mergePayloads,
|
262
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
263
|
+
|
264
|
+
const backendPushingFiberHandle = yield* FiberHandle.make()
|
265
|
+
|
266
|
+
yield* FiberHandle.run(
|
267
|
+
backendPushingFiberHandle,
|
268
|
+
backgroundBackendPushing({
|
269
|
+
syncBackendPushQueue,
|
270
|
+
otelSpan,
|
271
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
272
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
273
|
+
)
|
274
|
+
|
275
|
+
yield* backgroundBackendPulling({
|
276
|
+
initialBackendHead,
|
277
|
+
isClientEvent,
|
278
|
+
restartBackendPushing: (filteredRebasedPending) =>
|
238
279
|
Effect.gen(function* () {
|
239
|
-
|
240
|
-
|
241
|
-
|
280
|
+
// Stop current pushing fiber
|
281
|
+
yield* FiberHandle.clear(backendPushingFiberHandle)
|
282
|
+
|
283
|
+
// Reset the sync backend push queue
|
284
|
+
yield* BucketQueue.clear(syncBackendPushQueue)
|
285
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending)
|
286
|
+
|
287
|
+
// Restart pushing fiber
|
288
|
+
yield* FiberHandle.run(
|
289
|
+
backendPushingFiberHandle,
|
290
|
+
backgroundBackendPushing({
|
291
|
+
syncBackendPushQueue,
|
292
|
+
otelSpan,
|
293
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
294
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
295
|
+
)
|
296
|
+
}),
|
297
|
+
syncStateSref,
|
298
|
+
localPushesLatch,
|
299
|
+
pullLatch,
|
300
|
+
otelSpan,
|
301
|
+
initialBlockingSyncContext,
|
302
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
303
|
+
connectedClientSessionPullQueues,
|
304
|
+
mergeCounterRef,
|
305
|
+
mergePayloads,
|
306
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
307
|
+
|
308
|
+
return { initialLeaderHead: initialLocalHead }
|
309
|
+
}).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'))
|
310
|
+
|
311
|
+
const pull: LeaderSyncProcessor['pull'] = ({ cursor }) =>
|
312
|
+
Effect.gen(function* () {
|
313
|
+
const queue = yield* pullQueue({ cursor })
|
314
|
+
return Stream.fromQueue(queue)
|
315
|
+
}).pipe(Stream.unwrapScoped)
|
316
|
+
|
317
|
+
const pullQueue: LeaderSyncProcessor['pullQueue'] = ({ cursor }) => {
|
318
|
+
const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized')
|
319
|
+
return Effect.gen(function* () {
|
320
|
+
const queue = yield* connectedClientSessionPullQueues.makeQueue
|
321
|
+
const payloadsSinceCursor = Array.from(mergePayloads.entries())
|
322
|
+
.map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
|
323
|
+
.filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
|
324
|
+
.toSorted((a, b) => a.mergeCounter - b.mergeCounter)
|
325
|
+
.map(({ payload, mergeCounter }) => {
|
326
|
+
if (payload._tag === 'upstream-advance') {
|
327
|
+
return {
|
328
|
+
payload: {
|
329
|
+
_tag: 'upstream-advance' as const,
|
330
|
+
newEvents: ReadonlyArray.dropWhile(payload.newEvents, (mutationEventEncoded) =>
|
331
|
+
EventId.isGreaterThanOrEqual(cursor.eventId, mutationEventEncoded.id),
|
332
|
+
),
|
333
|
+
},
|
334
|
+
mergeCounter,
|
335
|
+
}
|
336
|
+
} else {
|
337
|
+
return { payload, mergeCounter }
|
242
338
|
}
|
243
339
|
})
|
244
340
|
|
245
|
-
yield*
|
246
|
-
localPushesLatch,
|
247
|
-
localPushesQueue,
|
248
|
-
pullLatch,
|
249
|
-
syncStateSref,
|
250
|
-
syncBackendQueue,
|
251
|
-
schema,
|
252
|
-
isClientEvent,
|
253
|
-
otelSpan,
|
254
|
-
currentLocalPushGenerationRef,
|
255
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
256
|
-
|
257
|
-
const backendPushingFiberHandle = yield* FiberHandle.make()
|
258
|
-
|
259
|
-
yield* FiberHandle.run(
|
260
|
-
backendPushingFiberHandle,
|
261
|
-
backgroundBackendPushing({
|
262
|
-
dbReady,
|
263
|
-
syncBackendQueue,
|
264
|
-
otelSpan,
|
265
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
266
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
267
|
-
)
|
341
|
+
yield* queue.offerAll(payloadsSinceCursor)
|
268
342
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
isClientEvent,
|
273
|
-
restartBackendPushing: (filteredRebasedPending) =>
|
274
|
-
Effect.gen(function* () {
|
275
|
-
// Stop current pushing fiber
|
276
|
-
yield* FiberHandle.clear(backendPushingFiberHandle)
|
277
|
-
|
278
|
-
// Reset the sync queue
|
279
|
-
yield* BucketQueue.clear(syncBackendQueue)
|
280
|
-
yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending)
|
281
|
-
|
282
|
-
// Restart pushing fiber
|
283
|
-
yield* FiberHandle.run(
|
284
|
-
backendPushingFiberHandle,
|
285
|
-
backgroundBackendPushing({
|
286
|
-
dbReady,
|
287
|
-
syncBackendQueue,
|
288
|
-
otelSpan,
|
289
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
290
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
291
|
-
)
|
292
|
-
}),
|
293
|
-
syncStateSref,
|
294
|
-
localPushesLatch,
|
295
|
-
pullLatch,
|
296
|
-
otelSpan,
|
297
|
-
initialBlockingSyncContext,
|
298
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
299
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
343
|
+
return queue
|
344
|
+
}).pipe(Effect.provide(runtime))
|
345
|
+
}
|
300
346
|
|
301
|
-
|
302
|
-
|
347
|
+
const syncState = Subscribable.make({
|
348
|
+
get: Effect.gen(function* () {
|
349
|
+
const syncState = yield* syncStateSref
|
350
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
351
|
+
return syncState
|
352
|
+
}),
|
353
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
354
|
+
})
|
303
355
|
|
304
356
|
return {
|
357
|
+
pull,
|
358
|
+
pullQueue,
|
305
359
|
push,
|
306
360
|
pushPartial,
|
307
361
|
boot,
|
308
|
-
syncState
|
309
|
-
|
310
|
-
const syncState = yield* syncStateSref
|
311
|
-
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
312
|
-
return syncState
|
313
|
-
}),
|
314
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
315
|
-
}),
|
362
|
+
syncState,
|
363
|
+
getMergeCounter: () => mergeCounterRef.current,
|
316
364
|
} satisfies LeaderSyncProcessor
|
317
365
|
})
|
318
366
|
|
@@ -321,30 +369,32 @@ const backgroundApplyLocalPushes = ({
|
|
321
369
|
localPushesQueue,
|
322
370
|
pullLatch,
|
323
371
|
syncStateSref,
|
324
|
-
|
372
|
+
syncBackendPushQueue,
|
325
373
|
schema,
|
326
374
|
isClientEvent,
|
327
375
|
otelSpan,
|
328
376
|
currentLocalPushGenerationRef,
|
377
|
+
connectedClientSessionPullQueues,
|
378
|
+
mergeCounterRef,
|
379
|
+
mergePayloads,
|
329
380
|
}: {
|
330
381
|
pullLatch: Effect.Latch
|
331
382
|
localPushesLatch: Effect.Latch
|
332
383
|
localPushesQueue: BucketQueue.BucketQueue<LocalPushQueueItem>
|
333
384
|
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
334
|
-
|
385
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
335
386
|
schema: LiveStoreSchema
|
336
387
|
isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
337
388
|
otelSpan: otel.Span | undefined
|
338
389
|
currentLocalPushGenerationRef: { current: number }
|
390
|
+
connectedClientSessionPullQueues: PullQueueSet
|
391
|
+
mergeCounterRef: { current: number }
|
392
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
339
393
|
}) =>
|
340
394
|
Effect.gen(function* () {
|
341
|
-
const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx
|
342
|
-
|
343
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
344
|
-
|
345
395
|
while (true) {
|
346
396
|
// TODO make batch size configurable
|
347
|
-
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1,
|
397
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, LOCAL_PUSH_BATCH_SIZE)
|
348
398
|
|
349
399
|
// Wait for the backend pulling to finish
|
350
400
|
yield* localPushesLatch.await
|
@@ -370,34 +420,33 @@ const backgroundApplyLocalPushes = ({
|
|
370
420
|
const syncState = yield* syncStateSref
|
371
421
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
372
422
|
|
373
|
-
const
|
423
|
+
const mergeResult = SyncState.merge({
|
374
424
|
syncState,
|
375
425
|
payload: { _tag: 'local-push', newEvents },
|
376
426
|
isClientEvent,
|
377
427
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
378
428
|
})
|
379
429
|
|
380
|
-
|
430
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
431
|
+
|
432
|
+
switch (mergeResult._tag) {
|
381
433
|
case 'unexpected-error': {
|
382
|
-
otelSpan?.addEvent(
|
434
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:unexpected-error`, {
|
383
435
|
batchSize: newEvents.length,
|
384
436
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
385
437
|
})
|
386
|
-
return yield* Effect.fail(
|
438
|
+
return yield* Effect.fail(mergeResult.cause)
|
387
439
|
}
|
388
440
|
case 'rebase': {
|
389
441
|
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
390
442
|
}
|
391
443
|
case 'reject': {
|
392
|
-
otelSpan?.addEvent(
|
444
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:reject`, {
|
393
445
|
batchSize: newEvents.length,
|
394
|
-
|
446
|
+
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
395
447
|
})
|
396
448
|
|
397
|
-
|
398
|
-
|
399
|
-
TODO: how to test this?
|
400
|
-
*/
|
449
|
+
// TODO: how to test this?
|
401
450
|
currentLocalPushGenerationRef.current++
|
402
451
|
|
403
452
|
const nextGeneration = currentLocalPushGenerationRef.current
|
@@ -411,7 +460,8 @@ const backgroundApplyLocalPushes = ({
|
|
411
460
|
(item) => item[2] >= nextGeneration,
|
412
461
|
)
|
413
462
|
|
414
|
-
|
463
|
+
// TODO we still need to better understand and handle this scenario
|
464
|
+
if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
|
415
465
|
console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue))
|
416
466
|
debugger
|
417
467
|
}
|
@@ -425,7 +475,7 @@ const backgroundApplyLocalPushes = ({
|
|
425
475
|
Deferred.fail(
|
426
476
|
deferred,
|
427
477
|
LeaderAheadError.make({
|
428
|
-
minimumExpectedId:
|
478
|
+
minimumExpectedId: mergeResult.expectedMinimumId,
|
429
479
|
providedId,
|
430
480
|
// nextGeneration,
|
431
481
|
}),
|
@@ -443,93 +493,88 @@ const backgroundApplyLocalPushes = ({
|
|
443
493
|
break
|
444
494
|
}
|
445
495
|
default: {
|
446
|
-
casesHandled(
|
496
|
+
casesHandled(mergeResult)
|
447
497
|
}
|
448
498
|
}
|
449
499
|
|
450
|
-
yield* SubscriptionRef.set(syncStateSref,
|
500
|
+
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
451
501
|
|
452
|
-
if (clientId === 'client-b') {
|
453
|
-
// yield* Effect.log('offer upstream-advance due to local-push')
|
454
|
-
// debugger
|
455
|
-
}
|
456
502
|
yield* connectedClientSessionPullQueues.offer({
|
457
|
-
payload: {
|
458
|
-
|
503
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
504
|
+
mergeCounter,
|
459
505
|
})
|
506
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
460
507
|
|
461
|
-
otelSpan?.addEvent(
|
508
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:advance`, {
|
462
509
|
batchSize: newEvents.length,
|
463
|
-
|
510
|
+
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
464
511
|
})
|
465
512
|
|
466
513
|
// Don't sync clientOnly mutations
|
467
|
-
const filteredBatch =
|
514
|
+
const filteredBatch = mergeResult.newEvents.filter((mutationEventEncoded) => {
|
468
515
|
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation)
|
469
516
|
return mutationDef.options.clientOnly === false
|
470
517
|
})
|
471
518
|
|
472
|
-
yield* BucketQueue.offerAll(
|
519
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch)
|
473
520
|
|
474
|
-
yield*
|
521
|
+
yield* applyMutationsBatch({ batchItems: newEvents, deferreds })
|
475
522
|
|
476
523
|
// Allow the backend pulling to start
|
477
524
|
yield* pullLatch.open
|
478
525
|
}
|
479
526
|
})
|
480
527
|
|
481
|
-
type
|
528
|
+
type ApplyMutationsBatch = (_: {
|
482
529
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
483
|
-
/**
|
530
|
+
/**
|
531
|
+
* The deferreds are used by the caller to know when the mutation has been processed.
|
532
|
+
* Indexes are aligned with `batchItems`
|
533
|
+
*/
|
484
534
|
deferreds: ReadonlyArray<Deferred.Deferred<void, LeaderAheadError> | undefined> | undefined
|
485
|
-
}) => Effect.Effect<void, UnexpectedError>
|
535
|
+
}) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx>
|
486
536
|
|
487
537
|
// TODO how to handle errors gracefully
|
488
|
-
const
|
538
|
+
const applyMutationsBatch: ApplyMutationsBatch = ({ batchItems, deferreds }) =>
|
489
539
|
Effect.gen(function* () {
|
490
|
-
const
|
491
|
-
const { dbReadModel: db, dbMutationLog } = leaderThreadCtx
|
540
|
+
const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx
|
492
541
|
|
493
|
-
|
542
|
+
// NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
|
543
|
+
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
544
|
+
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
494
545
|
|
495
|
-
|
546
|
+
yield* Effect.addFinalizer((exit) =>
|
496
547
|
Effect.gen(function* () {
|
497
|
-
|
498
|
-
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
499
|
-
|
500
|
-
yield* Effect.addFinalizer((exit) =>
|
501
|
-
Effect.gen(function* () {
|
502
|
-
if (Exit.isSuccess(exit)) return
|
548
|
+
if (Exit.isSuccess(exit)) return
|
503
549
|
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
550
|
+
// Rollback in case of an error
|
551
|
+
db.execute('ROLLBACK', undefined)
|
552
|
+
dbMutationLog.execute('ROLLBACK', undefined)
|
553
|
+
}),
|
554
|
+
)
|
509
555
|
|
510
|
-
|
511
|
-
|
556
|
+
for (let i = 0; i < batchItems.length; i++) {
|
557
|
+
const { sessionChangeset } = yield* applyMutation(batchItems[i]!)
|
558
|
+
batchItems[i]!.meta.sessionChangeset = sessionChangeset
|
512
559
|
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
560
|
+
if (deferreds?.[i] !== undefined) {
|
561
|
+
yield* Deferred.succeed(deferreds[i]!, void 0)
|
562
|
+
}
|
563
|
+
}
|
517
564
|
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
})
|
565
|
+
db.execute('COMMIT', undefined) // Commit the transaction
|
566
|
+
dbMutationLog.execute('COMMIT', undefined) // Commit the transaction
|
567
|
+
}).pipe(
|
568
|
+
Effect.uninterruptible,
|
569
|
+
Effect.scoped,
|
570
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
|
571
|
+
attributes: { batchSize: batchItems.length },
|
572
|
+
}),
|
573
|
+
Effect.tapCauseLogPretty,
|
574
|
+
UnexpectedError.mapToUnexpectedError,
|
575
|
+
)
|
530
576
|
|
531
577
|
const backgroundBackendPulling = ({
|
532
|
-
dbReady,
|
533
578
|
initialBackendHead,
|
534
579
|
isClientEvent,
|
535
580
|
restartBackendPushing,
|
@@ -539,8 +584,10 @@ const backgroundBackendPulling = ({
|
|
539
584
|
pullLatch,
|
540
585
|
devtoolsLatch,
|
541
586
|
initialBlockingSyncContext,
|
587
|
+
connectedClientSessionPullQueues,
|
588
|
+
mergeCounterRef,
|
589
|
+
mergePayloads,
|
542
590
|
}: {
|
543
|
-
dbReady: Deferred.Deferred<void>
|
544
591
|
initialBackendHead: EventId.GlobalEventId
|
545
592
|
isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
546
593
|
restartBackendPushing: (
|
@@ -552,23 +599,15 @@ const backgroundBackendPulling = ({
|
|
552
599
|
pullLatch: Effect.Latch
|
553
600
|
devtoolsLatch: Effect.Latch | undefined
|
554
601
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
602
|
+
connectedClientSessionPullQueues: PullQueueSet
|
603
|
+
mergeCounterRef: { current: number }
|
604
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
555
605
|
}) =>
|
556
606
|
Effect.gen(function* () {
|
557
|
-
const {
|
558
|
-
syncBackend,
|
559
|
-
dbReadModel: db,
|
560
|
-
dbMutationLog,
|
561
|
-
connectedClientSessionPullQueues,
|
562
|
-
schema,
|
563
|
-
clientId,
|
564
|
-
} = yield* LeaderThreadCtx
|
607
|
+
const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx
|
565
608
|
|
566
609
|
if (syncBackend === undefined) return
|
567
610
|
|
568
|
-
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
569
|
-
|
570
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
571
|
-
|
572
611
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
573
612
|
Effect.gen(function* () {
|
574
613
|
if (newEvents.length === 0) return
|
@@ -586,84 +625,101 @@ const backgroundBackendPulling = ({
|
|
586
625
|
const syncState = yield* syncStateSref
|
587
626
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
588
627
|
|
589
|
-
const
|
590
|
-
|
591
|
-
const updateResult = SyncState.updateSyncState({
|
628
|
+
const mergeResult = SyncState.merge({
|
592
629
|
syncState,
|
593
|
-
payload: {
|
630
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
|
594
631
|
isClientEvent,
|
595
632
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
596
633
|
ignoreClientEvents: true,
|
597
634
|
})
|
598
635
|
|
599
|
-
|
636
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
637
|
+
|
638
|
+
if (mergeResult._tag === 'reject') {
|
600
639
|
return shouldNeverHappen('The leader thread should never reject upstream advances')
|
601
|
-
} else if (
|
602
|
-
otelSpan?.addEvent(
|
640
|
+
} else if (mergeResult._tag === 'unexpected-error') {
|
641
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:unexpected-error`, {
|
603
642
|
newEventsCount: newEvents.length,
|
604
643
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
605
644
|
})
|
606
|
-
return yield* Effect.fail(
|
645
|
+
return yield* Effect.fail(mergeResult.cause)
|
607
646
|
}
|
608
647
|
|
609
648
|
const newBackendHead = newEvents.at(-1)!.id
|
610
649
|
|
611
|
-
updateBackendHead(dbMutationLog, newBackendHead)
|
650
|
+
Mutationlog.updateBackendHead(dbMutationLog, newBackendHead)
|
612
651
|
|
613
|
-
if (
|
614
|
-
otelSpan?.addEvent(
|
652
|
+
if (mergeResult._tag === 'rebase') {
|
653
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:rebase`, {
|
615
654
|
newEventsCount: newEvents.length,
|
616
655
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
617
|
-
rollbackCount:
|
618
|
-
|
656
|
+
rollbackCount: mergeResult.rollbackEvents.length,
|
657
|
+
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
619
658
|
})
|
620
659
|
|
621
|
-
const
|
660
|
+
const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
|
622
661
|
const mutationDef = getMutationDef(schema, mutationEvent.mutation)
|
623
662
|
return mutationDef.options.clientOnly === false
|
624
663
|
})
|
625
|
-
yield* restartBackendPushing(
|
664
|
+
yield* restartBackendPushing(globalRebasedPendingEvents)
|
626
665
|
|
627
|
-
if (
|
628
|
-
yield* rollback({ db, dbMutationLog, eventIdsToRollback:
|
666
|
+
if (mergeResult.rollbackEvents.length > 0) {
|
667
|
+
yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) })
|
629
668
|
}
|
630
669
|
|
631
670
|
yield* connectedClientSessionPullQueues.offer({
|
632
|
-
payload: {
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
},
|
638
|
-
remaining,
|
671
|
+
payload: SyncState.PayloadUpstreamRebase.make({
|
672
|
+
newEvents: mergeResult.newEvents,
|
673
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
674
|
+
}),
|
675
|
+
mergeCounter,
|
639
676
|
})
|
677
|
+
mergePayloads.set(
|
678
|
+
mergeCounter,
|
679
|
+
SyncState.PayloadUpstreamRebase.make({
|
680
|
+
newEvents: mergeResult.newEvents,
|
681
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
682
|
+
}),
|
683
|
+
)
|
640
684
|
} else {
|
641
|
-
otelSpan?.addEvent(
|
685
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:advance`, {
|
642
686
|
newEventsCount: newEvents.length,
|
643
|
-
|
687
|
+
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
644
688
|
})
|
645
689
|
|
646
|
-
if (clientId === 'client-b') {
|
647
|
-
// yield* Effect.log('offer upstream-advance due to pull')
|
648
|
-
}
|
649
690
|
yield* connectedClientSessionPullQueues.offer({
|
650
|
-
payload: {
|
651
|
-
|
691
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
692
|
+
mergeCounter,
|
652
693
|
})
|
694
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
695
|
+
|
696
|
+
if (mergeResult.confirmedEvents.length > 0) {
|
697
|
+
// `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
|
698
|
+
// `newEvents` instead which we filter via `mergeResult.confirmedEvents`
|
699
|
+
const confirmedNewEvents = newEvents.filter((mutationEvent) =>
|
700
|
+
mergeResult.confirmedEvents.some((confirmedEvent) =>
|
701
|
+
EventId.isEqual(mutationEvent.id, confirmedEvent.id),
|
702
|
+
),
|
703
|
+
)
|
704
|
+
yield* Mutationlog.updateSyncMetadata(confirmedNewEvents)
|
705
|
+
}
|
653
706
|
}
|
654
707
|
|
708
|
+
// Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
|
655
709
|
trimChangesetRows(db, newBackendHead)
|
656
710
|
|
657
|
-
yield*
|
711
|
+
yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined })
|
658
712
|
|
659
|
-
yield* SubscriptionRef.set(syncStateSref,
|
713
|
+
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
660
714
|
|
715
|
+
// Allow local pushes to be processed again
|
661
716
|
if (remaining === 0) {
|
662
|
-
// Allow local pushes to be processed again
|
663
717
|
yield* localPushesLatch.open
|
664
718
|
}
|
665
719
|
})
|
666
720
|
|
721
|
+
const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead)
|
722
|
+
|
667
723
|
yield* syncBackend.pull(cursorInfo).pipe(
|
668
724
|
// TODO only take from queue while connected
|
669
725
|
Stream.tap(({ batch, remaining }) =>
|
@@ -675,16 +731,13 @@ const backgroundBackendPulling = ({
|
|
675
731
|
// },
|
676
732
|
// })
|
677
733
|
|
678
|
-
// Wait for the db to be initially created
|
679
|
-
yield* dbReady
|
680
|
-
|
681
734
|
// NOTE we only want to take process mutations when the sync backend is connected
|
682
735
|
// (e.g. needed for simulating being offline)
|
683
736
|
// TODO remove when there's a better way to handle this in stream above
|
684
737
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
685
738
|
|
686
739
|
yield* onNewPullChunk(
|
687
|
-
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)),
|
740
|
+
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)),
|
688
741
|
remaining,
|
689
742
|
)
|
690
743
|
|
@@ -694,102 +747,26 @@ const backgroundBackendPulling = ({
|
|
694
747
|
Stream.runDrain,
|
695
748
|
Effect.interruptible,
|
696
749
|
)
|
697
|
-
}).pipe(Effect.withSpan('@livestore/common:
|
698
|
-
|
699
|
-
const rollback = ({
|
700
|
-
db,
|
701
|
-
dbMutationLog,
|
702
|
-
eventIdsToRollback,
|
703
|
-
}: {
|
704
|
-
db: SqliteDb
|
705
|
-
dbMutationLog: SqliteDb
|
706
|
-
eventIdsToRollback: EventId.EventId[]
|
707
|
-
}) =>
|
708
|
-
Effect.gen(function* () {
|
709
|
-
const rollbackEvents = db
|
710
|
-
.select<SessionChangesetMetaRow>(
|
711
|
-
sql`SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`,
|
712
|
-
)
|
713
|
-
.map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
|
714
|
-
.sort((a, b) => EventId.compare(a.id, b.id))
|
715
|
-
// TODO bring back `.toSorted` once Expo supports it
|
716
|
-
// .toSorted((a, b) => EventId.compare(a.id, b.id))
|
717
|
-
|
718
|
-
// Apply changesets in reverse order
|
719
|
-
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
720
|
-
const { changeset } = rollbackEvents[i]!
|
721
|
-
if (changeset !== null) {
|
722
|
-
db.makeChangeset(changeset).invert().apply()
|
723
|
-
}
|
724
|
-
}
|
725
|
-
|
726
|
-
const eventIdPairChunks = ReadonlyArray.chunksOf(100)(
|
727
|
-
eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`),
|
728
|
-
)
|
729
|
-
|
730
|
-
// Delete the changeset rows
|
731
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
732
|
-
db.execute(
|
733
|
-
sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
734
|
-
)
|
735
|
-
}
|
736
|
-
|
737
|
-
// Delete the mutation log rows
|
738
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
739
|
-
dbMutationLog.execute(
|
740
|
-
sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
741
|
-
)
|
742
|
-
}
|
743
|
-
}).pipe(
|
744
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
|
745
|
-
attributes: { count: eventIdsToRollback.length },
|
746
|
-
}),
|
747
|
-
)
|
748
|
-
|
749
|
-
const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
750
|
-
Effect.gen(function* () {
|
751
|
-
const { dbMutationLog } = yield* LeaderThreadCtx
|
752
|
-
|
753
|
-
if (remoteHead === EventId.ROOT.global) return Option.none()
|
754
|
-
|
755
|
-
const MutationlogQuerySchema = Schema.Struct({
|
756
|
-
syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
|
757
|
-
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
|
758
|
-
|
759
|
-
const syncMetadataOption = yield* Effect.sync(() =>
|
760
|
-
dbMutationLog.select<{ syncMetadataJson: string }>(
|
761
|
-
sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`,
|
762
|
-
),
|
763
|
-
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
764
|
-
|
765
|
-
return Option.some({
|
766
|
-
cursor: { global: remoteHead, client: EventId.clientDefault },
|
767
|
-
metadata: syncMetadataOption,
|
768
|
-
}) satisfies InitialSyncInfo
|
769
|
-
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
750
|
+
}).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'))
|
770
751
|
|
771
752
|
const backgroundBackendPushing = ({
|
772
|
-
|
773
|
-
syncBackendQueue,
|
753
|
+
syncBackendPushQueue,
|
774
754
|
otelSpan,
|
775
755
|
devtoolsLatch,
|
776
756
|
}: {
|
777
|
-
|
778
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
757
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
779
758
|
otelSpan: otel.Span | undefined
|
780
759
|
devtoolsLatch: Effect.Latch | undefined
|
781
760
|
}) =>
|
782
761
|
Effect.gen(function* () {
|
783
|
-
const { syncBackend
|
762
|
+
const { syncBackend } = yield* LeaderThreadCtx
|
784
763
|
if (syncBackend === undefined) return
|
785
764
|
|
786
|
-
yield* dbReady
|
787
|
-
|
788
765
|
while (true) {
|
789
766
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
790
767
|
|
791
768
|
// TODO make batch size configurable
|
792
|
-
const queueItems = yield* BucketQueue.takeBetween(
|
769
|
+
const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, BACKEND_PUSH_BATCH_SIZE)
|
793
770
|
|
794
771
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
795
772
|
|
@@ -813,27 +790,85 @@ const backgroundBackendPushing = ({
|
|
813
790
|
// wait for interrupt caused by background pulling which will then restart pushing
|
814
791
|
return yield* Effect.never
|
815
792
|
}
|
816
|
-
|
817
|
-
const { metadata } = pushResult.right
|
818
|
-
|
819
|
-
// TODO try to do this in a single query
|
820
|
-
for (let i = 0; i < queueItems.length; i++) {
|
821
|
-
const mutationEventEncoded = queueItems[i]!
|
822
|
-
yield* execSql(
|
823
|
-
dbMutationLog,
|
824
|
-
...updateRows({
|
825
|
-
tableName: MUTATION_LOG_META_TABLE,
|
826
|
-
columns: mutationLogMetaTable.sqliteDef.columns,
|
827
|
-
where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
|
828
|
-
updateValues: { syncMetadataJson: metadata[i]! },
|
829
|
-
}),
|
830
|
-
)
|
831
|
-
}
|
832
793
|
}
|
833
|
-
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:
|
794
|
+
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'))
|
834
795
|
|
835
796
|
const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
|
836
797
|
// Since we're using the session changeset rows to query for the current head,
|
837
798
|
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
838
799
|
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
839
800
|
}
|
801
|
+
|
802
|
+
interface PullQueueSet {
|
803
|
+
makeQueue: Effect.Effect<
|
804
|
+
Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>,
|
805
|
+
UnexpectedError,
|
806
|
+
Scope.Scope | LeaderThreadCtx
|
807
|
+
>
|
808
|
+
offer: (item: {
|
809
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
810
|
+
mergeCounter: number
|
811
|
+
}) => Effect.Effect<void, UnexpectedError>
|
812
|
+
}
|
813
|
+
|
814
|
+
const makePullQueueSet = Effect.gen(function* () {
|
815
|
+
const set = new Set<Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>>()
|
816
|
+
|
817
|
+
yield* Effect.addFinalizer(() =>
|
818
|
+
Effect.gen(function* () {
|
819
|
+
for (const queue of set) {
|
820
|
+
yield* Queue.shutdown(queue)
|
821
|
+
}
|
822
|
+
|
823
|
+
set.clear()
|
824
|
+
}),
|
825
|
+
)
|
826
|
+
|
827
|
+
const makeQueue: PullQueueSet['makeQueue'] = Effect.gen(function* () {
|
828
|
+
const queue = yield* Queue.unbounded<{
|
829
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
830
|
+
mergeCounter: number
|
831
|
+
}>().pipe(Effect.acquireRelease(Queue.shutdown))
|
832
|
+
|
833
|
+
yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)))
|
834
|
+
|
835
|
+
set.add(queue)
|
836
|
+
|
837
|
+
return queue
|
838
|
+
})
|
839
|
+
|
840
|
+
const offer: PullQueueSet['offer'] = (item) =>
|
841
|
+
Effect.gen(function* () {
|
842
|
+
// Short-circuit if the payload is an empty upstream advance
|
843
|
+
if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
|
844
|
+
return
|
845
|
+
}
|
846
|
+
|
847
|
+
for (const queue of set) {
|
848
|
+
yield* Queue.offer(queue, item)
|
849
|
+
}
|
850
|
+
})
|
851
|
+
|
852
|
+
return {
|
853
|
+
makeQueue,
|
854
|
+
offer,
|
855
|
+
}
|
856
|
+
})
|
857
|
+
|
858
|
+
const incrementMergeCounter = (mergeCounterRef: { current: number }) =>
|
859
|
+
Effect.gen(function* () {
|
860
|
+
const { dbReadModel } = yield* LeaderThreadCtx
|
861
|
+
mergeCounterRef.current++
|
862
|
+
dbReadModel.execute(
|
863
|
+
sql`INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`,
|
864
|
+
)
|
865
|
+
return mergeCounterRef.current
|
866
|
+
})
|
867
|
+
|
868
|
+
const getMergeCounterFromDb = (dbReadModel: SqliteDb) =>
|
869
|
+
Effect.gen(function* () {
|
870
|
+
const result = dbReadModel.select<{ mergeCounter: number }>(
|
871
|
+
sql`SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`,
|
872
|
+
)
|
873
|
+
return result[0]?.mergeCounter ?? 0
|
874
|
+
})
|