@livestore/common 0.0.0-snapshot-2b8a9de3ec1a701aca891ebc2c98eb328274ae9e → 0.0.0-snapshot-2ef046b02334f52613d31dbe06af53487685edc0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +4 -11
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js +0 -6
- package/dist/adapter-types.js.map +1 -1
- package/dist/devtools/devtools-messages-common.d.ts +7 -0
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-common.js +6 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-leader.js +1 -2
- package/dist/devtools/devtools-messages-leader.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +15 -6
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +199 -189
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +14 -9
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +43 -36
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +2 -5
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +21 -33
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mod.d.ts +1 -1
- package/dist/leader-thread/mod.d.ts.map +1 -1
- package/dist/leader-thread/mod.js +1 -1
- package/dist/leader-thread/mod.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +19 -3
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +105 -12
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +4 -3
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +35 -19
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -4
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +7 -9
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +4 -0
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +7 -1
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +78 -25
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +25 -12
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/schema.js +1 -1
- package/dist/schema/schema.js.map +1 -1
- package/dist/schema/system-tables.d.ts +67 -0
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +12 -1
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +9 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.js +23 -19
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
- package/dist/sync/sync.d.ts +6 -5
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +47 -71
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +76 -112
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +67 -139
- package/dist/sync/syncstate.test.js.map +1 -1
- package/package.json +2 -2
- package/src/adapter-types.ts +3 -12
- package/src/devtools/devtools-messages-common.ts +9 -0
- package/src/devtools/devtools-messages-leader.ts +1 -2
- package/src/leader-thread/LeaderSyncProcessor.ts +372 -348
- package/src/leader-thread/apply-mutation.ts +81 -71
- package/src/leader-thread/leader-worker-devtools.ts +3 -8
- package/src/leader-thread/make-leader-thread-layer.ts +26 -41
- package/src/leader-thread/mod.ts +1 -1
- package/src/leader-thread/mutationlog.ts +166 -13
- package/src/leader-thread/recreate-db.ts +4 -3
- package/src/leader-thread/types.ts +34 -23
- package/src/rehydrate-from-mutationlog.ts +12 -12
- package/src/schema/EventId.ts +8 -1
- package/src/schema/MutationEvent.ts +32 -16
- package/src/schema/schema.ts +1 -1
- package/src/schema/system-tables.ts +20 -1
- package/src/sync/ClientSessionSyncProcessor.ts +33 -25
- package/src/sync/sync.ts +6 -9
- package/src/sync/syncstate.test.ts +130 -208
- package/src/sync/syncstate.ts +76 -123
- package/dist/leader-thread/pull-queue-set.d.ts +0 -7
- package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
- package/dist/leader-thread/pull-queue-set.js +0 -48
- package/dist/leader-thread/pull-queue-set.js.map +0 -1
- package/src/leader-thread/pull-queue-set.ts +0 -67
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
|
2
|
-
import type { HttpClient, Scope, Tracer } from '@livestore/utils/effect'
|
|
2
|
+
import type { HttpClient, Runtime, Scope, Tracer } from '@livestore/utils/effect'
|
|
3
3
|
import {
|
|
4
4
|
BucketQueue,
|
|
5
5
|
Deferred,
|
|
6
6
|
Effect,
|
|
7
7
|
Exit,
|
|
8
8
|
FiberHandle,
|
|
9
|
-
Option,
|
|
10
9
|
OtelTracer,
|
|
10
|
+
Queue,
|
|
11
11
|
ReadonlyArray,
|
|
12
|
-
Schema,
|
|
13
12
|
Stream,
|
|
14
13
|
Subscribable,
|
|
15
14
|
SubscriptionRef,
|
|
@@ -18,30 +17,29 @@ import type * as otel from '@opentelemetry/api'
|
|
|
18
17
|
|
|
19
18
|
import type { SqliteDb } from '../adapter-types.js'
|
|
20
19
|
import { UnexpectedError } from '../adapter-types.js'
|
|
21
|
-
import type { LiveStoreSchema
|
|
20
|
+
import type { LiveStoreSchema } from '../schema/mod.js'
|
|
22
21
|
import {
|
|
23
22
|
EventId,
|
|
24
23
|
getMutationDef,
|
|
25
|
-
|
|
24
|
+
LEADER_MERGE_COUNTER_TABLE,
|
|
26
25
|
MutationEvent,
|
|
27
|
-
mutationLogMetaTable,
|
|
28
26
|
SESSION_CHANGESET_META_TABLE,
|
|
29
27
|
} from '../schema/mod.js'
|
|
30
|
-
import { updateRows } from '../sql-queries/index.js'
|
|
31
28
|
import { LeaderAheadError } from '../sync/sync.js'
|
|
32
29
|
import * as SyncState from '../sync/syncstate.js'
|
|
33
30
|
import { sql } from '../util.js'
|
|
34
|
-
import {
|
|
35
|
-
import
|
|
36
|
-
import {
|
|
37
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
|
|
31
|
+
import { rollback } from './apply-mutation.js'
|
|
32
|
+
import * as Mutationlog from './mutationlog.js'
|
|
33
|
+
import type { InitialBlockingSyncContext, LeaderSyncProcessor } from './types.js'
|
|
38
34
|
import { LeaderThreadCtx } from './types.js'
|
|
39
35
|
|
|
40
36
|
export const BACKEND_PUSH_BATCH_SIZE = 50
|
|
37
|
+
export const LOCAL_PUSH_BATCH_SIZE = 10
|
|
41
38
|
|
|
42
39
|
type LocalPushQueueItem = [
|
|
43
40
|
mutationEvent: MutationEvent.EncodedWithMeta,
|
|
44
41
|
deferred: Deferred.Deferred<void, LeaderAheadError> | undefined,
|
|
42
|
+
/** Used to determine whether the batch has become invalid due to a rejected local push batch */
|
|
45
43
|
generation: number,
|
|
46
44
|
]
|
|
47
45
|
|
|
@@ -52,40 +50,49 @@ type LocalPushQueueItem = [
|
|
|
52
50
|
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
|
53
51
|
*
|
|
54
52
|
* Responsibilities:
|
|
55
|
-
* - Queueing incoming local mutations in a
|
|
53
|
+
* - Queueing incoming local mutations in a localPushesQueue.
|
|
56
54
|
* - Broadcasting mutations to client sessions via pull queues.
|
|
57
55
|
* - Pushing mutations to the sync backend.
|
|
58
56
|
*
|
|
59
57
|
* Notes:
|
|
60
58
|
*
|
|
61
59
|
* local push processing:
|
|
62
|
-
* -
|
|
60
|
+
* - localPushesQueue:
|
|
63
61
|
* - Maintains events in ascending order.
|
|
64
62
|
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
|
65
|
-
* - Processes events from the
|
|
63
|
+
* - Processes events from the queue, applying mutations in batches.
|
|
66
64
|
* - Controlled by a `Latch` to manage execution flow.
|
|
67
65
|
* - The latch closes on pull receipt and re-opens post-pull completion.
|
|
68
66
|
* - Processes up to `maxBatchSize` events per cycle.
|
|
69
67
|
*
|
|
68
|
+
* Currently we're advancing the db read model and mutation log in lockstep, but we could also decouple this in the future
|
|
69
|
+
*
|
|
70
|
+
* Tricky concurrency scenarios:
|
|
71
|
+
* - Queued local push batches becoming invalid due to a prior local push item being rejected.
|
|
72
|
+
* Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
|
|
73
|
+
*
|
|
70
74
|
*/
|
|
71
75
|
export const makeLeaderSyncProcessor = ({
|
|
72
76
|
schema,
|
|
73
|
-
|
|
77
|
+
dbMutationLogMissing,
|
|
74
78
|
dbMutationLog,
|
|
75
|
-
|
|
79
|
+
dbReadModel,
|
|
80
|
+
dbReadModelMissing,
|
|
76
81
|
initialBlockingSyncContext,
|
|
77
82
|
onError,
|
|
78
83
|
}: {
|
|
79
84
|
schema: LiveStoreSchema
|
|
80
85
|
/** Only used to know whether we can safely query dbMutationLog during setup execution */
|
|
81
|
-
|
|
86
|
+
dbMutationLogMissing: boolean
|
|
82
87
|
dbMutationLog: SqliteDb
|
|
83
|
-
|
|
88
|
+
dbReadModel: SqliteDb
|
|
89
|
+
/** Only used to know whether we can safely query dbReadModel during setup execution */
|
|
90
|
+
dbReadModelMissing: boolean
|
|
84
91
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
|
85
92
|
onError: 'shutdown' | 'ignore'
|
|
86
93
|
}): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
|
|
87
94
|
Effect.gen(function* () {
|
|
88
|
-
const
|
|
95
|
+
const syncBackendPushQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
|
|
89
96
|
|
|
90
97
|
const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
|
|
91
98
|
|
|
@@ -94,13 +101,18 @@ export const makeLeaderSyncProcessor = ({
|
|
|
94
101
|
return mutationDef.options.clientOnly
|
|
95
102
|
}
|
|
96
103
|
|
|
104
|
+
const connectedClientSessionPullQueues = yield* makePullQueueSet
|
|
105
|
+
|
|
97
106
|
/**
|
|
98
107
|
* Tracks generations of queued local push events.
|
|
99
|
-
* If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
|
108
|
+
* If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
|
100
109
|
* even if they would be valid on their own.
|
|
101
110
|
*/
|
|
102
111
|
const currentLocalPushGenerationRef = { current: 0 }
|
|
103
112
|
|
|
113
|
+
const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) }
|
|
114
|
+
const mergePayloads = new Map<number, typeof SyncState.PayloadUpstream.Type>()
|
|
115
|
+
|
|
104
116
|
// This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
|
|
105
117
|
const ctxRef = {
|
|
106
118
|
current: undefined as
|
|
@@ -109,6 +121,7 @@ export const makeLeaderSyncProcessor = ({
|
|
|
109
121
|
otelSpan: otel.Span | undefined
|
|
110
122
|
span: Tracer.Span
|
|
111
123
|
devtoolsLatch: Effect.Latch | undefined
|
|
124
|
+
runtime: Runtime.Runtime<LeaderThreadCtx>
|
|
112
125
|
},
|
|
113
126
|
}
|
|
114
127
|
|
|
@@ -116,24 +129,12 @@ export const makeLeaderSyncProcessor = ({
|
|
|
116
129
|
const localPushesLatch = yield* Effect.makeLatch(true)
|
|
117
130
|
const pullLatch = yield* Effect.makeLatch(true)
|
|
118
131
|
|
|
132
|
+
// NOTE: New events are only pushed to sync backend after successful local push processing
|
|
119
133
|
const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
|
|
120
134
|
Effect.gen(function* () {
|
|
121
135
|
// TODO validate batch
|
|
122
136
|
if (newEvents.length === 0) return
|
|
123
137
|
|
|
124
|
-
// if (options.generation < currentLocalPushGenerationRef.current) {
|
|
125
|
-
// debugger
|
|
126
|
-
// // We can safely drop this batch as it's from a previous push generation
|
|
127
|
-
// return
|
|
128
|
-
// }
|
|
129
|
-
|
|
130
|
-
if (clientId === 'client-b') {
|
|
131
|
-
// console.log(
|
|
132
|
-
// 'push from client session',
|
|
133
|
-
// newEvents.map((item) => item.toJSON()),
|
|
134
|
-
// )
|
|
135
|
-
}
|
|
136
|
-
|
|
137
138
|
const waitForProcessing = options?.waitForProcessing ?? false
|
|
138
139
|
const generation = currentLocalPushGenerationRef.current
|
|
139
140
|
|
|
@@ -154,7 +155,7 @@ export const makeLeaderSyncProcessor = ({
|
|
|
154
155
|
yield* BucketQueue.offerAll(localPushesQueue, items)
|
|
155
156
|
}
|
|
156
157
|
}).pipe(
|
|
157
|
-
Effect.withSpan('@livestore/common:
|
|
158
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:local-push', {
|
|
158
159
|
attributes: {
|
|
159
160
|
batchSize: newEvents.length,
|
|
160
161
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
|
@@ -164,7 +165,7 @@ export const makeLeaderSyncProcessor = ({
|
|
|
164
165
|
)
|
|
165
166
|
|
|
166
167
|
const pushPartial: LeaderSyncProcessor['pushPartial'] = ({
|
|
167
|
-
mutationEvent:
|
|
168
|
+
mutationEvent: { mutation, args },
|
|
168
169
|
clientId,
|
|
169
170
|
sessionId,
|
|
170
171
|
}) =>
|
|
@@ -172,10 +173,11 @@ export const makeLeaderSyncProcessor = ({
|
|
|
172
173
|
const syncState = yield* syncStateSref
|
|
173
174
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
174
175
|
|
|
175
|
-
const mutationDef = getMutationDef(schema,
|
|
176
|
+
const mutationDef = getMutationDef(schema, mutation)
|
|
176
177
|
|
|
177
178
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
|
178
|
-
|
|
179
|
+
mutation,
|
|
180
|
+
args,
|
|
179
181
|
clientId,
|
|
180
182
|
sessionId,
|
|
181
183
|
...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
|
|
@@ -185,134 +187,166 @@ export const makeLeaderSyncProcessor = ({
|
|
|
185
187
|
}).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie))
|
|
186
188
|
|
|
187
189
|
// Starts various background loops
|
|
188
|
-
const boot: LeaderSyncProcessor['boot'] = (
|
|
189
|
-
Effect.
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
190
|
+
const boot: LeaderSyncProcessor['boot'] = Effect.gen(function* () {
|
|
191
|
+
const span = yield* Effect.currentSpan.pipe(Effect.orDie)
|
|
192
|
+
const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
|
193
|
+
const { devtools, shutdownChannel } = yield* LeaderThreadCtx
|
|
194
|
+
const runtime = yield* Effect.runtime<LeaderThreadCtx>()
|
|
195
|
+
|
|
196
|
+
ctxRef.current = {
|
|
197
|
+
otelSpan,
|
|
198
|
+
span,
|
|
199
|
+
devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
|
|
200
|
+
runtime,
|
|
201
|
+
}
|
|
193
202
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
}
|
|
203
|
+
const initialBackendHead = dbMutationLogMissing
|
|
204
|
+
? EventId.ROOT.global
|
|
205
|
+
: Mutationlog.getBackendHeadFromDb(dbMutationLog)
|
|
206
|
+
const initialLocalHead = dbMutationLogMissing ? EventId.ROOT : Mutationlog.getClientHeadFromDb(dbMutationLog)
|
|
199
207
|
|
|
200
|
-
|
|
201
|
-
|
|
208
|
+
if (initialBackendHead > initialLocalHead.global) {
|
|
209
|
+
return shouldNeverHappen(
|
|
210
|
+
`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
|
|
211
|
+
)
|
|
212
|
+
}
|
|
202
213
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
)
|
|
207
|
-
}
|
|
214
|
+
const pendingMutationEvents = dbMutationLogMissing
|
|
215
|
+
? []
|
|
216
|
+
: yield* Mutationlog.getMutationEventsSince({ global: initialBackendHead, client: EventId.clientDefault })
|
|
208
217
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
const initialSyncState = new SyncState.SyncState({
|
|
215
|
-
pending: pendingMutationEvents,
|
|
216
|
-
// On the leader we don't need a rollback tail beyond `pending` items
|
|
217
|
-
rollbackTail: [],
|
|
218
|
-
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
|
219
|
-
localHead: initialLocalHead,
|
|
220
|
-
})
|
|
218
|
+
const initialSyncState = new SyncState.SyncState({
|
|
219
|
+
pending: pendingMutationEvents,
|
|
220
|
+
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
|
221
|
+
localHead: initialLocalHead,
|
|
222
|
+
})
|
|
221
223
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
+
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
|
225
|
+
yield* SubscriptionRef.set(syncStateSref, initialSyncState)
|
|
224
226
|
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
227
|
+
// Rehydrate sync queue
|
|
228
|
+
if (pendingMutationEvents.length > 0) {
|
|
229
|
+
const globalPendingMutationEvents = pendingMutationEvents
|
|
230
|
+
// Don't sync clientOnly mutations
|
|
231
|
+
.filter((mutationEventEncoded) => {
|
|
232
|
+
const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation)
|
|
233
|
+
return mutationDef.options.clientOnly === false
|
|
234
|
+
})
|
|
233
235
|
|
|
234
|
-
|
|
236
|
+
if (globalPendingMutationEvents.length > 0) {
|
|
237
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingMutationEvents)
|
|
235
238
|
}
|
|
239
|
+
}
|
|
236
240
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
yield* backgroundApplyLocalPushes({
|
|
246
|
-
localPushesLatch,
|
|
247
|
-
localPushesQueue,
|
|
248
|
-
pullLatch,
|
|
249
|
-
syncStateSref,
|
|
250
|
-
syncBackendQueue,
|
|
251
|
-
schema,
|
|
252
|
-
isClientEvent,
|
|
253
|
-
otelSpan,
|
|
254
|
-
currentLocalPushGenerationRef,
|
|
255
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
256
|
-
|
|
257
|
-
const backendPushingFiberHandle = yield* FiberHandle.make()
|
|
258
|
-
|
|
259
|
-
yield* FiberHandle.run(
|
|
260
|
-
backendPushingFiberHandle,
|
|
261
|
-
backgroundBackendPushing({
|
|
262
|
-
dbReady,
|
|
263
|
-
syncBackendQueue,
|
|
264
|
-
otelSpan,
|
|
265
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
266
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
267
|
-
)
|
|
241
|
+
const shutdownOnError = (cause: unknown) =>
|
|
242
|
+
Effect.gen(function* () {
|
|
243
|
+
if (onError === 'shutdown') {
|
|
244
|
+
yield* shutdownChannel.send(UnexpectedError.make({ cause }))
|
|
245
|
+
yield* Effect.die(cause)
|
|
246
|
+
}
|
|
247
|
+
})
|
|
268
248
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
291
|
-
)
|
|
292
|
-
}),
|
|
293
|
-
syncStateSref,
|
|
294
|
-
localPushesLatch,
|
|
295
|
-
pullLatch,
|
|
249
|
+
yield* backgroundApplyLocalPushes({
|
|
250
|
+
localPushesLatch,
|
|
251
|
+
localPushesQueue,
|
|
252
|
+
pullLatch,
|
|
253
|
+
syncStateSref,
|
|
254
|
+
syncBackendPushQueue,
|
|
255
|
+
schema,
|
|
256
|
+
isClientEvent,
|
|
257
|
+
otelSpan,
|
|
258
|
+
currentLocalPushGenerationRef,
|
|
259
|
+
connectedClientSessionPullQueues,
|
|
260
|
+
mergeCounterRef,
|
|
261
|
+
mergePayloads,
|
|
262
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
263
|
+
|
|
264
|
+
const backendPushingFiberHandle = yield* FiberHandle.make()
|
|
265
|
+
|
|
266
|
+
yield* FiberHandle.run(
|
|
267
|
+
backendPushingFiberHandle,
|
|
268
|
+
backgroundBackendPushing({
|
|
269
|
+
syncBackendPushQueue,
|
|
296
270
|
otelSpan,
|
|
297
|
-
initialBlockingSyncContext,
|
|
298
271
|
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
299
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError),
|
|
272
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
yield* backgroundBackendPulling({
|
|
276
|
+
initialBackendHead,
|
|
277
|
+
isClientEvent,
|
|
278
|
+
restartBackendPushing: (filteredRebasedPending) =>
|
|
279
|
+
Effect.gen(function* () {
|
|
280
|
+
// Stop current pushing fiber
|
|
281
|
+
yield* FiberHandle.clear(backendPushingFiberHandle)
|
|
282
|
+
|
|
283
|
+
// Reset the sync backend push queue
|
|
284
|
+
yield* BucketQueue.clear(syncBackendPushQueue)
|
|
285
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending)
|
|
286
|
+
|
|
287
|
+
// Restart pushing fiber
|
|
288
|
+
yield* FiberHandle.run(
|
|
289
|
+
backendPushingFiberHandle,
|
|
290
|
+
backgroundBackendPushing({
|
|
291
|
+
syncBackendPushQueue,
|
|
292
|
+
otelSpan,
|
|
293
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
294
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
295
|
+
)
|
|
296
|
+
}),
|
|
297
|
+
syncStateSref,
|
|
298
|
+
localPushesLatch,
|
|
299
|
+
pullLatch,
|
|
300
|
+
otelSpan,
|
|
301
|
+
initialBlockingSyncContext,
|
|
302
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
303
|
+
connectedClientSessionPullQueues,
|
|
304
|
+
mergeCounterRef,
|
|
305
|
+
mergePayloads,
|
|
306
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
307
|
+
|
|
308
|
+
return { initialLeaderHead: initialLocalHead }
|
|
309
|
+
}).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'))
|
|
310
|
+
|
|
311
|
+
const pull: LeaderSyncProcessor['pull'] = ({ cursor }) => {
|
|
312
|
+
return Effect.gen(function* () {
|
|
313
|
+
const queue = yield* pullQueue({ cursor })
|
|
314
|
+
return Stream.fromQueue(queue)
|
|
315
|
+
}).pipe(Stream.unwrapScoped)
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
const pullQueue: LeaderSyncProcessor['pullQueue'] = ({ cursor }) => {
|
|
319
|
+
const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized')
|
|
320
|
+
return Effect.gen(function* () {
|
|
321
|
+
const queue = yield* connectedClientSessionPullQueues.makeQueue(cursor)
|
|
322
|
+
const payloadsSinceCursor = Array.from(mergePayloads.entries())
|
|
323
|
+
.map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
|
|
324
|
+
.filter(({ mergeCounter }) => mergeCounter > cursor)
|
|
325
|
+
.toSorted((a, b) => a.mergeCounter - b.mergeCounter)
|
|
300
326
|
|
|
301
|
-
|
|
302
|
-
|
|
327
|
+
yield* queue.offerAll(payloadsSinceCursor)
|
|
328
|
+
|
|
329
|
+
return queue
|
|
330
|
+
}).pipe(Effect.provide(runtime))
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
const syncState = Subscribable.make({
|
|
334
|
+
get: Effect.gen(function* () {
|
|
335
|
+
const syncState = yield* syncStateSref
|
|
336
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
337
|
+
return syncState
|
|
338
|
+
}),
|
|
339
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
|
340
|
+
})
|
|
303
341
|
|
|
304
342
|
return {
|
|
343
|
+
pull,
|
|
344
|
+
pullQueue,
|
|
305
345
|
push,
|
|
306
346
|
pushPartial,
|
|
307
347
|
boot,
|
|
308
|
-
syncState
|
|
309
|
-
|
|
310
|
-
const syncState = yield* syncStateSref
|
|
311
|
-
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
312
|
-
return syncState
|
|
313
|
-
}),
|
|
314
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
|
315
|
-
}),
|
|
348
|
+
syncState,
|
|
349
|
+
getMergeCounter: () => mergeCounterRef.current,
|
|
316
350
|
} satisfies LeaderSyncProcessor
|
|
317
351
|
})
|
|
318
352
|
|
|
@@ -321,30 +355,32 @@ const backgroundApplyLocalPushes = ({
|
|
|
321
355
|
localPushesQueue,
|
|
322
356
|
pullLatch,
|
|
323
357
|
syncStateSref,
|
|
324
|
-
|
|
358
|
+
syncBackendPushQueue,
|
|
325
359
|
schema,
|
|
326
360
|
isClientEvent,
|
|
327
361
|
otelSpan,
|
|
328
362
|
currentLocalPushGenerationRef,
|
|
363
|
+
connectedClientSessionPullQueues,
|
|
364
|
+
mergeCounterRef,
|
|
365
|
+
mergePayloads,
|
|
329
366
|
}: {
|
|
330
367
|
pullLatch: Effect.Latch
|
|
331
368
|
localPushesLatch: Effect.Latch
|
|
332
369
|
localPushesQueue: BucketQueue.BucketQueue<LocalPushQueueItem>
|
|
333
370
|
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
|
334
|
-
|
|
371
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
|
335
372
|
schema: LiveStoreSchema
|
|
336
373
|
isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
|
337
374
|
otelSpan: otel.Span | undefined
|
|
338
375
|
currentLocalPushGenerationRef: { current: number }
|
|
376
|
+
connectedClientSessionPullQueues: PullQueueSet
|
|
377
|
+
mergeCounterRef: { current: number }
|
|
378
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
|
339
379
|
}) =>
|
|
340
380
|
Effect.gen(function* () {
|
|
341
|
-
const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx
|
|
342
|
-
|
|
343
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
|
344
|
-
|
|
345
381
|
while (true) {
|
|
346
382
|
// TODO make batch size configurable
|
|
347
|
-
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1,
|
|
383
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, LOCAL_PUSH_BATCH_SIZE)
|
|
348
384
|
|
|
349
385
|
// Wait for the backend pulling to finish
|
|
350
386
|
yield* localPushesLatch.await
|
|
@@ -377,9 +413,11 @@ const backgroundApplyLocalPushes = ({
|
|
|
377
413
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
|
378
414
|
})
|
|
379
415
|
|
|
416
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
|
417
|
+
|
|
380
418
|
switch (mergeResult._tag) {
|
|
381
419
|
case 'unexpected-error': {
|
|
382
|
-
otelSpan?.addEvent(
|
|
420
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:unexpected-error`, {
|
|
383
421
|
batchSize: newEvents.length,
|
|
384
422
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
385
423
|
})
|
|
@@ -389,15 +427,12 @@ const backgroundApplyLocalPushes = ({
|
|
|
389
427
|
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
|
390
428
|
}
|
|
391
429
|
case 'reject': {
|
|
392
|
-
otelSpan?.addEvent(
|
|
430
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:reject`, {
|
|
393
431
|
batchSize: newEvents.length,
|
|
394
432
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
395
433
|
})
|
|
396
434
|
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
TODO: how to test this?
|
|
400
|
-
*/
|
|
435
|
+
// TODO: how to test this?
|
|
401
436
|
currentLocalPushGenerationRef.current++
|
|
402
437
|
|
|
403
438
|
const nextGeneration = currentLocalPushGenerationRef.current
|
|
@@ -411,7 +446,8 @@ const backgroundApplyLocalPushes = ({
|
|
|
411
446
|
(item) => item[2] >= nextGeneration,
|
|
412
447
|
)
|
|
413
448
|
|
|
414
|
-
|
|
449
|
+
// TODO we still need to better understand and handle this scenario
|
|
450
|
+
if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
|
|
415
451
|
console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue))
|
|
416
452
|
debugger
|
|
417
453
|
}
|
|
@@ -449,16 +485,13 @@ const backgroundApplyLocalPushes = ({
|
|
|
449
485
|
|
|
450
486
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
|
451
487
|
|
|
452
|
-
if (clientId === 'client-b') {
|
|
453
|
-
// yield* Effect.log('offer upstream-advance due to local-push')
|
|
454
|
-
// debugger
|
|
455
|
-
}
|
|
456
488
|
yield* connectedClientSessionPullQueues.offer({
|
|
457
|
-
payload: {
|
|
458
|
-
|
|
489
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
|
490
|
+
mergeCounter,
|
|
459
491
|
})
|
|
492
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
|
460
493
|
|
|
461
|
-
otelSpan?.addEvent(
|
|
494
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:local-push:advance`, {
|
|
462
495
|
batchSize: newEvents.length,
|
|
463
496
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
464
497
|
})
|
|
@@ -469,67 +502,65 @@ const backgroundApplyLocalPushes = ({
|
|
|
469
502
|
return mutationDef.options.clientOnly === false
|
|
470
503
|
})
|
|
471
504
|
|
|
472
|
-
yield* BucketQueue.offerAll(
|
|
505
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch)
|
|
473
506
|
|
|
474
|
-
yield*
|
|
507
|
+
yield* applyMutationsBatch({ batchItems: newEvents, deferreds })
|
|
475
508
|
|
|
476
509
|
// Allow the backend pulling to start
|
|
477
510
|
yield* pullLatch.open
|
|
478
511
|
}
|
|
479
512
|
})
|
|
480
513
|
|
|
481
|
-
type
|
|
514
|
+
type ApplyMutationsBatch = (_: {
|
|
482
515
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
|
483
|
-
/**
|
|
516
|
+
/**
|
|
517
|
+
* The deferreds are used by the caller to know when the mutation has been processed.
|
|
518
|
+
* Indexes are aligned with `batchItems`
|
|
519
|
+
*/
|
|
484
520
|
deferreds: ReadonlyArray<Deferred.Deferred<void, LeaderAheadError> | undefined> | undefined
|
|
485
|
-
}) => Effect.Effect<void, UnexpectedError>
|
|
521
|
+
}) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx>
|
|
486
522
|
|
|
487
523
|
// TODO how to handle errors gracefully
|
|
488
|
-
const
|
|
524
|
+
const applyMutationsBatch: ApplyMutationsBatch = ({ batchItems, deferreds }) =>
|
|
489
525
|
Effect.gen(function* () {
|
|
490
|
-
const
|
|
491
|
-
const { dbReadModel: db, dbMutationLog } = leaderThreadCtx
|
|
526
|
+
const { dbReadModel: db, dbMutationLog, applyMutation } = yield* LeaderThreadCtx
|
|
492
527
|
|
|
493
|
-
|
|
528
|
+
// NOTE We always start a transaction to ensure consistency between db and mutation log (even for single-item batches)
|
|
529
|
+
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
530
|
+
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
494
531
|
|
|
495
|
-
|
|
532
|
+
yield* Effect.addFinalizer((exit) =>
|
|
496
533
|
Effect.gen(function* () {
|
|
497
|
-
|
|
498
|
-
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
499
|
-
|
|
500
|
-
yield* Effect.addFinalizer((exit) =>
|
|
501
|
-
Effect.gen(function* () {
|
|
502
|
-
if (Exit.isSuccess(exit)) return
|
|
534
|
+
if (Exit.isSuccess(exit)) return
|
|
503
535
|
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
536
|
+
// Rollback in case of an error
|
|
537
|
+
db.execute('ROLLBACK', undefined)
|
|
538
|
+
dbMutationLog.execute('ROLLBACK', undefined)
|
|
539
|
+
}),
|
|
540
|
+
)
|
|
509
541
|
|
|
510
|
-
|
|
511
|
-
|
|
542
|
+
for (let i = 0; i < batchItems.length; i++) {
|
|
543
|
+
const { sessionChangeset } = yield* applyMutation(batchItems[i]!)
|
|
544
|
+
batchItems[i]!.meta.sessionChangeset = sessionChangeset
|
|
512
545
|
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
546
|
+
if (deferreds?.[i] !== undefined) {
|
|
547
|
+
yield* Deferred.succeed(deferreds[i]!, void 0)
|
|
548
|
+
}
|
|
549
|
+
}
|
|
517
550
|
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
})
|
|
551
|
+
db.execute('COMMIT', undefined) // Commit the transaction
|
|
552
|
+
dbMutationLog.execute('COMMIT', undefined) // Commit the transaction
|
|
553
|
+
}).pipe(
|
|
554
|
+
Effect.uninterruptible,
|
|
555
|
+
Effect.scoped,
|
|
556
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyMutationItems', {
|
|
557
|
+
attributes: { batchSize: batchItems.length },
|
|
558
|
+
}),
|
|
559
|
+
Effect.tapCauseLogPretty,
|
|
560
|
+
UnexpectedError.mapToUnexpectedError,
|
|
561
|
+
)
|
|
530
562
|
|
|
531
563
|
const backgroundBackendPulling = ({
|
|
532
|
-
dbReady,
|
|
533
564
|
initialBackendHead,
|
|
534
565
|
isClientEvent,
|
|
535
566
|
restartBackendPushing,
|
|
@@ -539,8 +570,10 @@ const backgroundBackendPulling = ({
|
|
|
539
570
|
pullLatch,
|
|
540
571
|
devtoolsLatch,
|
|
541
572
|
initialBlockingSyncContext,
|
|
573
|
+
connectedClientSessionPullQueues,
|
|
574
|
+
mergeCounterRef,
|
|
575
|
+
mergePayloads,
|
|
542
576
|
}: {
|
|
543
|
-
dbReady: Deferred.Deferred<void>
|
|
544
577
|
initialBackendHead: EventId.GlobalEventId
|
|
545
578
|
isClientEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
|
546
579
|
restartBackendPushing: (
|
|
@@ -552,23 +585,15 @@ const backgroundBackendPulling = ({
|
|
|
552
585
|
pullLatch: Effect.Latch
|
|
553
586
|
devtoolsLatch: Effect.Latch | undefined
|
|
554
587
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
|
588
|
+
connectedClientSessionPullQueues: PullQueueSet
|
|
589
|
+
mergeCounterRef: { current: number }
|
|
590
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
|
555
591
|
}) =>
|
|
556
592
|
Effect.gen(function* () {
|
|
557
|
-
const {
|
|
558
|
-
syncBackend,
|
|
559
|
-
dbReadModel: db,
|
|
560
|
-
dbMutationLog,
|
|
561
|
-
connectedClientSessionPullQueues,
|
|
562
|
-
schema,
|
|
563
|
-
clientId,
|
|
564
|
-
} = yield* LeaderThreadCtx
|
|
593
|
+
const { syncBackend, dbReadModel: db, dbMutationLog, schema } = yield* LeaderThreadCtx
|
|
565
594
|
|
|
566
595
|
if (syncBackend === undefined) return
|
|
567
596
|
|
|
568
|
-
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
|
569
|
-
|
|
570
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
|
571
|
-
|
|
572
597
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
|
573
598
|
Effect.gen(function* () {
|
|
574
599
|
if (newEvents.length === 0) return
|
|
@@ -586,20 +611,20 @@ const backgroundBackendPulling = ({
|
|
|
586
611
|
const syncState = yield* syncStateSref
|
|
587
612
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
588
613
|
|
|
589
|
-
const trimRollbackUntil = newEvents.at(-1)!.id
|
|
590
|
-
|
|
591
614
|
const mergeResult = SyncState.merge({
|
|
592
615
|
syncState,
|
|
593
|
-
payload: {
|
|
616
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
|
|
594
617
|
isClientEvent,
|
|
595
618
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
|
596
619
|
ignoreClientEvents: true,
|
|
597
620
|
})
|
|
598
621
|
|
|
622
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
|
623
|
+
|
|
599
624
|
if (mergeResult._tag === 'reject') {
|
|
600
625
|
return shouldNeverHappen('The leader thread should never reject upstream advances')
|
|
601
626
|
} else if (mergeResult._tag === 'unexpected-error') {
|
|
602
|
-
otelSpan?.addEvent(
|
|
627
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:unexpected-error`, {
|
|
603
628
|
newEventsCount: newEvents.length,
|
|
604
629
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
605
630
|
})
|
|
@@ -608,62 +633,79 @@ const backgroundBackendPulling = ({
|
|
|
608
633
|
|
|
609
634
|
const newBackendHead = newEvents.at(-1)!.id
|
|
610
635
|
|
|
611
|
-
updateBackendHead(dbMutationLog, newBackendHead)
|
|
636
|
+
Mutationlog.updateBackendHead(dbMutationLog, newBackendHead)
|
|
612
637
|
|
|
613
638
|
if (mergeResult._tag === 'rebase') {
|
|
614
|
-
otelSpan?.addEvent(
|
|
639
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:rebase`, {
|
|
615
640
|
newEventsCount: newEvents.length,
|
|
616
641
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
617
|
-
rollbackCount: mergeResult.
|
|
642
|
+
rollbackCount: mergeResult.rollbackEvents.length,
|
|
618
643
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
619
644
|
})
|
|
620
645
|
|
|
621
|
-
const
|
|
646
|
+
const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((mutationEvent) => {
|
|
622
647
|
const mutationDef = getMutationDef(schema, mutationEvent.mutation)
|
|
623
648
|
return mutationDef.options.clientOnly === false
|
|
624
649
|
})
|
|
625
|
-
yield* restartBackendPushing(
|
|
650
|
+
yield* restartBackendPushing(globalRebasedPendingEvents)
|
|
626
651
|
|
|
627
|
-
if (mergeResult.
|
|
628
|
-
yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.
|
|
652
|
+
if (mergeResult.rollbackEvents.length > 0) {
|
|
653
|
+
yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) })
|
|
629
654
|
}
|
|
630
655
|
|
|
631
656
|
yield* connectedClientSessionPullQueues.offer({
|
|
632
|
-
payload: {
|
|
633
|
-
_tag: 'upstream-rebase',
|
|
657
|
+
payload: SyncState.PayloadUpstreamRebase.make({
|
|
634
658
|
newEvents: mergeResult.newEvents,
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
remaining,
|
|
659
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
|
660
|
+
}),
|
|
661
|
+
mergeCounter,
|
|
639
662
|
})
|
|
663
|
+
mergePayloads.set(
|
|
664
|
+
mergeCounter,
|
|
665
|
+
SyncState.PayloadUpstreamRebase.make({
|
|
666
|
+
newEvents: mergeResult.newEvents,
|
|
667
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
|
668
|
+
}),
|
|
669
|
+
)
|
|
640
670
|
} else {
|
|
641
|
-
otelSpan?.addEvent(
|
|
671
|
+
otelSpan?.addEvent(`merge[${mergeCounter}]:backend-pull:advance`, {
|
|
642
672
|
newEventsCount: newEvents.length,
|
|
643
673
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
644
674
|
})
|
|
645
675
|
|
|
646
|
-
if (clientId === 'client-b') {
|
|
647
|
-
// yield* Effect.log('offer upstream-advance due to pull')
|
|
648
|
-
}
|
|
649
676
|
yield* connectedClientSessionPullQueues.offer({
|
|
650
|
-
payload: {
|
|
651
|
-
|
|
677
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
|
678
|
+
mergeCounter,
|
|
652
679
|
})
|
|
680
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
|
681
|
+
|
|
682
|
+
if (mergeResult.confirmedEvents.length > 0) {
|
|
683
|
+
// `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
|
|
684
|
+
// `newEvents` instead which we filter via `mergeResult.confirmedEvents`
|
|
685
|
+
const confirmedNewEvents = newEvents.filter((mutationEvent) =>
|
|
686
|
+
mergeResult.confirmedEvents.some((confirmedEvent) =>
|
|
687
|
+
EventId.isEqual(mutationEvent.id, confirmedEvent.id),
|
|
688
|
+
),
|
|
689
|
+
)
|
|
690
|
+
yield* Mutationlog.updateSyncMetadata(confirmedNewEvents)
|
|
691
|
+
}
|
|
653
692
|
}
|
|
654
693
|
|
|
694
|
+
// Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
|
|
655
695
|
trimChangesetRows(db, newBackendHead)
|
|
656
696
|
|
|
657
|
-
yield*
|
|
697
|
+
yield* applyMutationsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined })
|
|
658
698
|
|
|
659
699
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
|
660
700
|
|
|
701
|
+
// Allow local pushes to be processed again
|
|
661
702
|
if (remaining === 0) {
|
|
662
|
-
// Allow local pushes to be processed again
|
|
663
703
|
yield* localPushesLatch.open
|
|
664
704
|
}
|
|
665
705
|
})
|
|
666
706
|
|
|
707
|
+
const cursorInfo = yield* Mutationlog.getSyncBackendCursorInfo(initialBackendHead)
|
|
708
|
+
|
|
667
709
|
yield* syncBackend.pull(cursorInfo).pipe(
|
|
668
710
|
// TODO only take from queue while connected
|
|
669
711
|
Stream.tap(({ batch, remaining }) =>
|
|
@@ -675,16 +717,13 @@ const backgroundBackendPulling = ({
|
|
|
675
717
|
// },
|
|
676
718
|
// })
|
|
677
719
|
|
|
678
|
-
// Wait for the db to be initially created
|
|
679
|
-
yield* dbReady
|
|
680
|
-
|
|
681
720
|
// NOTE we only want to take process mutations when the sync backend is connected
|
|
682
721
|
// (e.g. needed for simulating being offline)
|
|
683
722
|
// TODO remove when there's a better way to handle this in stream above
|
|
684
723
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
685
724
|
|
|
686
725
|
yield* onNewPullChunk(
|
|
687
|
-
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)),
|
|
726
|
+
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded, _.metadata)),
|
|
688
727
|
remaining,
|
|
689
728
|
)
|
|
690
729
|
|
|
@@ -694,102 +733,26 @@ const backgroundBackendPulling = ({
|
|
|
694
733
|
Stream.runDrain,
|
|
695
734
|
Effect.interruptible,
|
|
696
735
|
)
|
|
697
|
-
}).pipe(Effect.withSpan('@livestore/common:
|
|
698
|
-
|
|
699
|
-
const rollback = ({
|
|
700
|
-
db,
|
|
701
|
-
dbMutationLog,
|
|
702
|
-
eventIdsToRollback,
|
|
703
|
-
}: {
|
|
704
|
-
db: SqliteDb
|
|
705
|
-
dbMutationLog: SqliteDb
|
|
706
|
-
eventIdsToRollback: EventId.EventId[]
|
|
707
|
-
}) =>
|
|
708
|
-
Effect.gen(function* () {
|
|
709
|
-
const rollbackEvents = db
|
|
710
|
-
.select<SessionChangesetMetaRow>(
|
|
711
|
-
sql`SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`,
|
|
712
|
-
)
|
|
713
|
-
.map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
|
|
714
|
-
.sort((a, b) => EventId.compare(a.id, b.id))
|
|
715
|
-
// TODO bring back `.toSorted` once Expo supports it
|
|
716
|
-
// .toSorted((a, b) => EventId.compare(a.id, b.id))
|
|
717
|
-
|
|
718
|
-
// Apply changesets in reverse order
|
|
719
|
-
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
|
720
|
-
const { changeset } = rollbackEvents[i]!
|
|
721
|
-
if (changeset !== null) {
|
|
722
|
-
db.makeChangeset(changeset).invert().apply()
|
|
723
|
-
}
|
|
724
|
-
}
|
|
725
|
-
|
|
726
|
-
const eventIdPairChunks = ReadonlyArray.chunksOf(100)(
|
|
727
|
-
eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`),
|
|
728
|
-
)
|
|
729
|
-
|
|
730
|
-
// Delete the changeset rows
|
|
731
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
|
732
|
-
db.execute(
|
|
733
|
-
sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
|
734
|
-
)
|
|
735
|
-
}
|
|
736
|
-
|
|
737
|
-
// Delete the mutation log rows
|
|
738
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
|
739
|
-
dbMutationLog.execute(
|
|
740
|
-
sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
|
741
|
-
)
|
|
742
|
-
}
|
|
743
|
-
}).pipe(
|
|
744
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
|
|
745
|
-
attributes: { count: eventIdsToRollback.length },
|
|
746
|
-
}),
|
|
747
|
-
)
|
|
748
|
-
|
|
749
|
-
const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
|
750
|
-
Effect.gen(function* () {
|
|
751
|
-
const { dbMutationLog } = yield* LeaderThreadCtx
|
|
752
|
-
|
|
753
|
-
if (remoteHead === EventId.ROOT.global) return Option.none()
|
|
754
|
-
|
|
755
|
-
const MutationlogQuerySchema = Schema.Struct({
|
|
756
|
-
syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
|
|
757
|
-
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
|
|
758
|
-
|
|
759
|
-
const syncMetadataOption = yield* Effect.sync(() =>
|
|
760
|
-
dbMutationLog.select<{ syncMetadataJson: string }>(
|
|
761
|
-
sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`,
|
|
762
|
-
),
|
|
763
|
-
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
|
764
|
-
|
|
765
|
-
return Option.some({
|
|
766
|
-
cursor: { global: remoteHead, client: EventId.clientDefault },
|
|
767
|
-
metadata: syncMetadataOption,
|
|
768
|
-
}) satisfies InitialSyncInfo
|
|
769
|
-
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
|
736
|
+
}).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'))
|
|
770
737
|
|
|
771
738
|
const backgroundBackendPushing = ({
|
|
772
|
-
|
|
773
|
-
syncBackendQueue,
|
|
739
|
+
syncBackendPushQueue,
|
|
774
740
|
otelSpan,
|
|
775
741
|
devtoolsLatch,
|
|
776
742
|
}: {
|
|
777
|
-
|
|
778
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
|
743
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
|
779
744
|
otelSpan: otel.Span | undefined
|
|
780
745
|
devtoolsLatch: Effect.Latch | undefined
|
|
781
746
|
}) =>
|
|
782
747
|
Effect.gen(function* () {
|
|
783
|
-
const { syncBackend
|
|
748
|
+
const { syncBackend } = yield* LeaderThreadCtx
|
|
784
749
|
if (syncBackend === undefined) return
|
|
785
750
|
|
|
786
|
-
yield* dbReady
|
|
787
|
-
|
|
788
751
|
while (true) {
|
|
789
752
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
790
753
|
|
|
791
754
|
// TODO make batch size configurable
|
|
792
|
-
const queueItems = yield* BucketQueue.takeBetween(
|
|
755
|
+
const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, BACKEND_PUSH_BATCH_SIZE)
|
|
793
756
|
|
|
794
757
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
795
758
|
|
|
@@ -813,27 +776,88 @@ const backgroundBackendPushing = ({
|
|
|
813
776
|
// wait for interrupt caused by background pulling which will then restart pushing
|
|
814
777
|
return yield* Effect.never
|
|
815
778
|
}
|
|
816
|
-
|
|
817
|
-
const { metadata } = pushResult.right
|
|
818
|
-
|
|
819
|
-
// TODO try to do this in a single query
|
|
820
|
-
for (let i = 0; i < queueItems.length; i++) {
|
|
821
|
-
const mutationEventEncoded = queueItems[i]!
|
|
822
|
-
yield* execSql(
|
|
823
|
-
dbMutationLog,
|
|
824
|
-
...updateRows({
|
|
825
|
-
tableName: MUTATION_LOG_META_TABLE,
|
|
826
|
-
columns: mutationLogMetaTable.sqliteDef.columns,
|
|
827
|
-
where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
|
|
828
|
-
updateValues: { syncMetadataJson: metadata[i]! },
|
|
829
|
-
}),
|
|
830
|
-
)
|
|
831
|
-
}
|
|
832
779
|
}
|
|
833
|
-
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:
|
|
780
|
+
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'))
|
|
834
781
|
|
|
835
782
|
const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
|
|
836
783
|
// Since we're using the session changeset rows to query for the current head,
|
|
837
784
|
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
|
838
785
|
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
|
839
786
|
}
|
|
787
|
+
|
|
788
|
+
interface PullQueueSet {
|
|
789
|
+
makeQueue: (
|
|
790
|
+
cursor: number,
|
|
791
|
+
) => Effect.Effect<
|
|
792
|
+
Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>,
|
|
793
|
+
UnexpectedError,
|
|
794
|
+
Scope.Scope | LeaderThreadCtx
|
|
795
|
+
>
|
|
796
|
+
offer: (item: {
|
|
797
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
|
798
|
+
mergeCounter: number
|
|
799
|
+
}) => Effect.Effect<void, UnexpectedError>
|
|
800
|
+
}
|
|
801
|
+
|
|
802
|
+
const makePullQueueSet = Effect.gen(function* () {
|
|
803
|
+
const set = new Set<Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>>()
|
|
804
|
+
|
|
805
|
+
yield* Effect.addFinalizer(() =>
|
|
806
|
+
Effect.gen(function* () {
|
|
807
|
+
for (const queue of set) {
|
|
808
|
+
yield* Queue.shutdown(queue)
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
set.clear()
|
|
812
|
+
}),
|
|
813
|
+
)
|
|
814
|
+
|
|
815
|
+
const makeQueue: PullQueueSet['makeQueue'] = () =>
|
|
816
|
+
Effect.gen(function* () {
|
|
817
|
+
const queue = yield* Queue.unbounded<{
|
|
818
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
|
819
|
+
mergeCounter: number
|
|
820
|
+
}>().pipe(Effect.acquireRelease(Queue.shutdown))
|
|
821
|
+
|
|
822
|
+
yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)))
|
|
823
|
+
|
|
824
|
+
set.add(queue)
|
|
825
|
+
|
|
826
|
+
return queue
|
|
827
|
+
})
|
|
828
|
+
|
|
829
|
+
const offer: PullQueueSet['offer'] = (item) =>
|
|
830
|
+
Effect.gen(function* () {
|
|
831
|
+
// Short-circuit if the payload is an empty upstream advance
|
|
832
|
+
if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
|
|
833
|
+
return
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
for (const queue of set) {
|
|
837
|
+
yield* Queue.offer(queue, item)
|
|
838
|
+
}
|
|
839
|
+
})
|
|
840
|
+
|
|
841
|
+
return {
|
|
842
|
+
makeQueue,
|
|
843
|
+
offer,
|
|
844
|
+
}
|
|
845
|
+
})
|
|
846
|
+
|
|
847
|
+
const incrementMergeCounter = (mergeCounterRef: { current: number }) =>
|
|
848
|
+
Effect.gen(function* () {
|
|
849
|
+
const { dbReadModel } = yield* LeaderThreadCtx
|
|
850
|
+
mergeCounterRef.current++
|
|
851
|
+
dbReadModel.execute(
|
|
852
|
+
sql`INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`,
|
|
853
|
+
)
|
|
854
|
+
return mergeCounterRef.current
|
|
855
|
+
})
|
|
856
|
+
|
|
857
|
+
const getMergeCounterFromDb = (dbReadModel: SqliteDb) =>
|
|
858
|
+
Effect.gen(function* () {
|
|
859
|
+
const result = dbReadModel.select<{ mergeCounter: number }>(
|
|
860
|
+
sql`SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`,
|
|
861
|
+
)
|
|
862
|
+
return result[0]?.mergeCounter ?? 0
|
|
863
|
+
})
|