@livestore/common 0.3.0-dev.0 → 0.3.0-dev.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +26 -23
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js.map +1 -1
- package/dist/derived-mutations.d.ts +4 -4
- package/dist/derived-mutations.d.ts.map +1 -1
- package/dist/derived-mutations.test.js.map +1 -1
- package/dist/devtools/devtools-bridge.d.ts +2 -1
- package/dist/devtools/devtools-bridge.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.d.ts +98 -110
- package/dist/devtools/devtools-messages.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.js +9 -6
- package/dist/devtools/devtools-messages.js.map +1 -1
- package/dist/index.d.ts +0 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +37 -0
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -0
- package/dist/leader-thread/LeaderSyncProcessor.js +417 -0
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -0
- package/dist/leader-thread/apply-mutation.d.ts +5 -2
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +38 -26
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-sync-processor.d.ts +2 -2
- package/dist/leader-thread/leader-sync-processor.d.ts.map +1 -1
- package/dist/leader-thread/leader-sync-processor.js +20 -12
- package/dist/leader-thread/leader-sync-processor.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +22 -66
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts +8 -7
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +11 -5
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +4 -17
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +2 -1
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +9 -3
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +17 -9
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/mutation.d.ts +9 -2
- package/dist/mutation.d.ts.map +1 -1
- package/dist/mutation.js +5 -5
- package/dist/mutation.js.map +1 -1
- package/dist/query-builder/impl.d.ts +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +2 -2
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +13 -19
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +16 -14
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +15 -7
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/EventId.test.d.ts +2 -0
- package/dist/schema/EventId.test.d.ts.map +1 -0
- package/dist/schema/EventId.test.js +11 -0
- package/dist/schema/EventId.test.js.map +1 -0
- package/dist/schema/MutationEvent.d.ts +49 -80
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +32 -15
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/MutationEvent.test.d.ts +2 -0
- package/dist/schema/MutationEvent.test.d.ts.map +1 -0
- package/dist/schema/MutationEvent.test.js +2 -0
- package/dist/schema/MutationEvent.test.js.map +1 -0
- package/dist/schema/system-tables.d.ts +26 -26
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +19 -11
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/schema-management/migrations.js +6 -6
- package/dist/schema-management/migrations.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +45 -0
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -0
- package/dist/sync/ClientSessionSyncProcessor.js +134 -0
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -0
- package/dist/sync/client-session-sync-processor.d.ts +4 -4
- package/dist/sync/client-session-sync-processor.d.ts.map +1 -1
- package/dist/sync/index.d.ts +1 -1
- package/dist/sync/index.d.ts.map +1 -1
- package/dist/sync/index.js +1 -1
- package/dist/sync/index.js.map +1 -1
- package/dist/sync/next/history-dag-common.d.ts +1 -4
- package/dist/sync/next/history-dag-common.d.ts.map +1 -1
- package/dist/sync/next/history-dag-common.js +1 -1
- package/dist/sync/next/history-dag-common.js.map +1 -1
- package/dist/sync/next/rebase-events.d.ts +3 -3
- package/dist/sync/next/rebase-events.d.ts.map +1 -1
- package/dist/sync/next/rebase-events.js +3 -2
- package/dist/sync/next/rebase-events.js.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.d.ts.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.js +3 -9
- package/dist/sync/next/test/mutation-fixtures.js.map +1 -1
- package/dist/sync/sync.d.ts +21 -11
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +45 -23
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +56 -12
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +125 -69
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/sync/validate-push-payload.d.ts +2 -2
- package/dist/sync/validate-push-payload.d.ts.map +1 -1
- package/dist/sync/validate-push-payload.js +2 -2
- package/dist/sync/validate-push-payload.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.d.ts.map +1 -1
- package/dist/version.js +1 -1
- package/dist/version.js.map +1 -1
- package/package.json +6 -5
- package/src/adapter-types.ts +22 -24
- package/src/derived-mutations.test.ts +1 -1
- package/src/derived-mutations.ts +9 -5
- package/src/devtools/devtools-bridge.ts +2 -1
- package/src/devtools/devtools-messages.ts +9 -6
- package/src/index.ts +0 -6
- package/src/leader-thread/{leader-sync-processor.ts → LeaderSyncProcessor.ts} +235 -230
- package/src/leader-thread/apply-mutation.ts +49 -31
- package/src/leader-thread/leader-worker-devtools.ts +30 -109
- package/src/leader-thread/make-leader-thread-layer.ts +24 -13
- package/src/leader-thread/mutationlog.ts +9 -5
- package/src/leader-thread/recreate-db.ts +9 -5
- package/src/leader-thread/types.ts +18 -11
- package/src/mutation.ts +17 -7
- package/src/rehydrate-from-mutationlog.ts +15 -23
- package/src/schema/EventId.test.ts +12 -0
- package/src/schema/EventId.ts +23 -9
- package/src/schema/MutationEvent.ts +46 -24
- package/src/schema/system-tables.ts +19 -11
- package/src/schema-management/migrations.ts +6 -6
- package/src/sync/{client-session-sync-processor.ts → ClientSessionSyncProcessor.ts} +11 -9
- package/src/sync/index.ts +1 -1
- package/src/sync/next/history-dag-common.ts +1 -1
- package/src/sync/next/rebase-events.ts +7 -7
- package/src/sync/next/test/mutation-fixtures.ts +3 -10
- package/src/sync/sync.ts +19 -6
- package/src/sync/syncstate.test.ts +127 -67
- package/src/sync/syncstate.ts +21 -19
- package/src/sync/validate-push-payload.ts +7 -4
- package/src/version.ts +1 -1
@@ -1,17 +1,17 @@
|
|
1
|
-
import { shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
1
|
+
import { isNotUndefined, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
2
|
import type { HttpClient, Scope } from '@livestore/utils/effect'
|
3
3
|
import {
|
4
4
|
BucketQueue,
|
5
5
|
Deferred,
|
6
6
|
Effect,
|
7
7
|
Exit,
|
8
|
-
Fiber,
|
9
8
|
FiberHandle,
|
10
9
|
Option,
|
11
10
|
OtelTracer,
|
12
|
-
|
11
|
+
ReadonlyArray,
|
13
12
|
Schema,
|
14
13
|
Stream,
|
14
|
+
Subscribable,
|
15
15
|
SubscriptionRef,
|
16
16
|
} from '@livestore/utils/effect'
|
17
17
|
import type * as otel from '@opentelemetry/api'
|
@@ -33,61 +33,36 @@ import { sql } from '../util.js'
|
|
33
33
|
import { makeApplyMutation } from './apply-mutation.js'
|
34
34
|
import { execSql } from './connection.js'
|
35
35
|
import { getBackendHeadFromDb, getLocalHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js'
|
36
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo,
|
36
|
+
import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
|
37
37
|
import { LeaderThreadCtx } from './types.js'
|
38
38
|
|
39
|
-
type
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
type ProcessorStateInSync = {
|
44
|
-
_tag: 'in-sync'
|
45
|
-
syncState: SyncState.SyncState
|
46
|
-
}
|
47
|
-
|
48
|
-
type ProcessorStateApplyingSyncStateAdvance = {
|
49
|
-
_tag: 'applying-syncstate-advance'
|
50
|
-
origin: 'pull' | 'push'
|
51
|
-
syncState: SyncState.SyncState
|
52
|
-
// TODO re-introduce this
|
53
|
-
// proccesHead: EventId
|
54
|
-
fiber: Fiber.RuntimeFiber<void, UnexpectedError>
|
55
|
-
}
|
56
|
-
|
57
|
-
type ProcessorState = ProcessorStateInit | ProcessorStateInSync | ProcessorStateApplyingSyncStateAdvance
|
39
|
+
type PushQueueItem = [
|
40
|
+
mutationEvent: MutationEvent.EncodedWithMeta,
|
41
|
+
deferred: Deferred.Deferred<void, InvalidPushError> | undefined,
|
42
|
+
]
|
58
43
|
|
59
44
|
/**
|
60
|
-
* The
|
61
|
-
*
|
62
|
-
* The leader sync processor is also responsible for
|
63
|
-
* - broadcasting mutations to client sessions via the pull queues.
|
64
|
-
* - pushing mutations to the sync backend
|
65
|
-
*
|
66
|
-
* In the leader sync processor, pulling always has precedence over pushing.
|
45
|
+
* The LeaderSyncProcessor manages synchronization of mutations between
|
46
|
+
* the local state and the sync backend, ensuring efficient and orderly processing.
|
67
47
|
*
|
68
|
-
*
|
69
|
-
* - Mutation pushed from client session
|
70
|
-
* - Mutation pushed from devtools (via pushPartial)
|
71
|
-
* - Mutation pulled from sync backend
|
48
|
+
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
72
49
|
*
|
73
|
-
*
|
74
|
-
* -
|
75
|
-
* -
|
50
|
+
* Responsibilities:
|
51
|
+
* - Queueing incoming local mutations in a localPushMailbox.
|
52
|
+
* - Broadcasting mutations to client sessions via pull queues.
|
53
|
+
* - Pushing mutations to the sync backend.
|
76
54
|
*
|
77
|
-
*
|
78
|
-
* - in-sync -> applying-syncstate-advance
|
79
|
-
* - applying-syncstate-advance -> in-sync
|
80
|
-
* - applying-syncstate-advance -> applying-syncstate-advance (need to interrupt previous operation)
|
55
|
+
* Notes:
|
81
56
|
*
|
82
|
-
*
|
83
|
-
* -
|
84
|
-
*
|
85
|
-
*
|
86
|
-
* -
|
57
|
+
* local push processing:
|
58
|
+
* - localPushMailbox:
|
59
|
+
* - Maintains events in ascending order.
|
60
|
+
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
61
|
+
* - Processes events from the mailbox, applying mutations in batches.
|
62
|
+
* - Controlled by a `Latch` to manage execution flow.
|
63
|
+
* - The latch closes on pull receipt and re-opens post-pull completion.
|
64
|
+
* - Processes up to `maxBatchSize` events per cycle.
|
87
65
|
*
|
88
|
-
* Backend pushing:
|
89
|
-
* - continously push to backend
|
90
|
-
* - only interrupted and restarted on rebase
|
91
66
|
*/
|
92
67
|
export const makeLeaderSyncProcessor = ({
|
93
68
|
schema,
|
@@ -100,13 +75,11 @@ export const makeLeaderSyncProcessor = ({
|
|
100
75
|
dbMissing: boolean
|
101
76
|
dbLog: SynchronousDatabase
|
102
77
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
103
|
-
}): Effect.Effect<
|
78
|
+
}): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
|
104
79
|
Effect.gen(function* () {
|
105
|
-
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.
|
80
|
+
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
|
106
81
|
|
107
|
-
const
|
108
|
-
|
109
|
-
const semaphore = yield* Effect.makeSemaphore(1)
|
82
|
+
const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
|
110
83
|
|
111
84
|
const isLocalEvent = (mutationEventEncoded: MutationEvent.EncodedWithMeta) => {
|
112
85
|
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
@@ -114,105 +87,32 @@ export const makeLeaderSyncProcessor = ({
|
|
114
87
|
}
|
115
88
|
|
116
89
|
const spanRef = { current: undefined as otel.Span | undefined }
|
117
|
-
const applyMutationItemsRef = { current: undefined as ApplyMutationItems | undefined }
|
118
|
-
|
119
|
-
// TODO get rid of counters once Effect semaphore ordering is fixed
|
120
|
-
let counterRef = 0
|
121
|
-
let expectedCounter = 0
|
122
90
|
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
- Have a worker fiber that takes from the mailbox (wouldn't need a semaphore)
|
127
|
-
*/
|
128
|
-
|
129
|
-
const waitForSyncState = (counter: number): Effect.Effect<ProcessorStateInSync> =>
|
130
|
-
Effect.gen(function* () {
|
131
|
-
// console.log('waitForSyncState: waiting for semaphore', counter)
|
132
|
-
yield* semaphore.take(1)
|
133
|
-
// NOTE this is a workaround to ensure the semaphore take-order is respected
|
134
|
-
// TODO this needs to be fixed upstream in Effect
|
135
|
-
if (counter !== expectedCounter) {
|
136
|
-
console.log(
|
137
|
-
`waitForSyncState: counter mismatch (expected: ${expectedCounter}, got: ${counter}), releasing semaphore`,
|
138
|
-
)
|
139
|
-
yield* semaphore.release(1)
|
140
|
-
yield* Effect.yieldNow()
|
141
|
-
// Retrying...
|
142
|
-
return yield* waitForSyncState(counter)
|
143
|
-
}
|
144
|
-
// console.log('waitForSyncState: took semaphore', counter)
|
145
|
-
const state = yield* Ref.get(stateRef)
|
146
|
-
if (state._tag !== 'in-sync') {
|
147
|
-
return shouldNeverHappen('Expected to be in-sync but got ' + state._tag)
|
148
|
-
}
|
149
|
-
expectedCounter = counter + 1
|
150
|
-
return state
|
151
|
-
}).pipe(Effect.withSpan(`@livestore/common:leader-thread:syncing:waitForSyncState(${counter})`))
|
91
|
+
const localPushesQueue = yield* BucketQueue.make<PushQueueItem>()
|
92
|
+
const localPushesLatch = yield* Effect.makeLatch(true)
|
93
|
+
const pullLatch = yield* Effect.makeLatch(true)
|
152
94
|
|
153
|
-
const push = (newEvents
|
95
|
+
const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
|
154
96
|
Effect.gen(function* () {
|
155
|
-
const counter = counterRef
|
156
|
-
counterRef++
|
157
97
|
// TODO validate batch
|
158
98
|
if (newEvents.length === 0) return
|
159
99
|
|
160
|
-
const
|
100
|
+
const waitForProcessing = options?.waitForProcessing ?? false
|
161
101
|
|
162
|
-
|
163
|
-
|
102
|
+
if (waitForProcessing) {
|
103
|
+
const deferreds = yield* Effect.forEach(newEvents, () => Deferred.make<void, InvalidPushError>())
|
164
104
|
|
165
|
-
|
166
|
-
|
167
|
-
payload: { _tag: 'local-push', newEvents },
|
168
|
-
isLocalEvent,
|
169
|
-
isEqualEvent: MutationEvent.isEqualEncoded,
|
170
|
-
})
|
171
|
-
|
172
|
-
if (updateResult._tag === 'rebase') {
|
173
|
-
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
174
|
-
} else if (updateResult._tag === 'reject') {
|
175
|
-
return yield* Effect.fail(
|
176
|
-
InvalidPushError.make({
|
177
|
-
reason: {
|
178
|
-
_tag: 'LeaderAhead',
|
179
|
-
minimumExpectedId: updateResult.expectedMinimumId,
|
180
|
-
providedId: newEvents.at(0)!.id,
|
181
|
-
},
|
182
|
-
}),
|
105
|
+
const items = newEvents.map(
|
106
|
+
(mutationEventEncoded, i) => [mutationEventEncoded, deferreds[i]] as PushQueueItem,
|
183
107
|
)
|
184
|
-
}
|
185
|
-
|
186
|
-
const fiber = yield* applyMutationItemsRef.current!({ batchItems: updateResult.newEvents }).pipe(Effect.fork)
|
187
108
|
|
188
|
-
|
189
|
-
_tag: 'applying-syncstate-advance',
|
190
|
-
origin: 'push',
|
191
|
-
syncState: updateResult.newSyncState,
|
192
|
-
fiber,
|
193
|
-
})
|
194
|
-
|
195
|
-
// console.log('setRef:applying-syncstate-advance after push', counter)
|
196
|
-
|
197
|
-
yield* connectedClientSessionPullQueues.offer({
|
198
|
-
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
199
|
-
remaining: 0,
|
200
|
-
})
|
201
|
-
|
202
|
-
spanRef.current?.addEvent('local-push', {
|
203
|
-
batchSize: newEvents.length,
|
204
|
-
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
205
|
-
})
|
206
|
-
|
207
|
-
// Don't sync localOnly mutations
|
208
|
-
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
209
|
-
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
210
|
-
return mutationDef.options.localOnly === false
|
211
|
-
})
|
212
|
-
|
213
|
-
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
109
|
+
yield* BucketQueue.offerAll(localPushesQueue, items)
|
214
110
|
|
215
|
-
|
111
|
+
yield* Effect.all(deferreds)
|
112
|
+
} else {
|
113
|
+
const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined] as PushQueueItem)
|
114
|
+
yield* BucketQueue.offerAll(localPushesQueue, items)
|
115
|
+
}
|
216
116
|
}).pipe(
|
217
117
|
Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
|
218
118
|
attributes: {
|
@@ -225,10 +125,10 @@ export const makeLeaderSyncProcessor = ({
|
|
225
125
|
}),
|
226
126
|
)
|
227
127
|
|
228
|
-
const pushPartial:
|
128
|
+
const pushPartial: LeaderSyncProcessor['pushPartial'] = (mutationEventEncoded_) =>
|
229
129
|
Effect.gen(function* () {
|
230
|
-
const
|
231
|
-
if (
|
130
|
+
const syncState = yield* syncStateSref
|
131
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
232
132
|
|
233
133
|
const mutationDef =
|
234
134
|
schema.mutations.get(mutationEventEncoded_.mutation) ??
|
@@ -236,14 +136,14 @@ export const makeLeaderSyncProcessor = ({
|
|
236
136
|
|
237
137
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
238
138
|
...mutationEventEncoded_,
|
239
|
-
...EventId.nextPair(
|
139
|
+
...EventId.nextPair(syncState.localHead, mutationDef.options.localOnly),
|
240
140
|
})
|
241
141
|
|
242
142
|
yield* push([mutationEventEncoded])
|
243
143
|
}).pipe(Effect.catchTag('InvalidPushError', Effect.orDie))
|
244
144
|
|
245
145
|
// Starts various background loops
|
246
|
-
const boot:
|
146
|
+
const boot: LeaderSyncProcessor['boot'] = ({ dbReady }) =>
|
247
147
|
Effect.gen(function* () {
|
248
148
|
const span = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
249
149
|
spanRef.current = span
|
@@ -257,20 +157,21 @@ export const makeLeaderSyncProcessor = ({
|
|
257
157
|
)
|
258
158
|
}
|
259
159
|
|
260
|
-
const pendingMutationEvents = yield* getMutationEventsSince({
|
160
|
+
const pendingMutationEvents = yield* getMutationEventsSince({
|
161
|
+
global: initialBackendHead,
|
162
|
+
local: EventId.localDefault,
|
163
|
+
}).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))))
|
261
164
|
|
262
|
-
const initialSyncState = {
|
263
|
-
pending: pendingMutationEvents
|
165
|
+
const initialSyncState = new SyncState.SyncState({
|
166
|
+
pending: pendingMutationEvents,
|
264
167
|
// On the leader we don't need a rollback tail beyond `pending` items
|
265
168
|
rollbackTail: [],
|
266
|
-
upstreamHead: { global: initialBackendHead, local:
|
169
|
+
upstreamHead: { global: initialBackendHead, local: EventId.localDefault },
|
267
170
|
localHead: initialLocalHead,
|
268
|
-
}
|
171
|
+
})
|
269
172
|
|
270
173
|
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
271
|
-
yield*
|
272
|
-
|
273
|
-
applyMutationItemsRef.current = yield* makeApplyMutationItems({ stateRef, semaphore })
|
174
|
+
yield* SubscriptionRef.set(syncStateSref, initialSyncState)
|
274
175
|
|
275
176
|
// Rehydrate sync queue
|
276
177
|
if (pendingMutationEvents.length > 0) {
|
@@ -284,6 +185,17 @@ export const makeLeaderSyncProcessor = ({
|
|
284
185
|
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
285
186
|
}
|
286
187
|
|
188
|
+
yield* backgroundApplyLocalPushes({
|
189
|
+
localPushesLatch,
|
190
|
+
localPushesQueue,
|
191
|
+
pullLatch,
|
192
|
+
syncStateSref,
|
193
|
+
syncBackendQueue,
|
194
|
+
schema,
|
195
|
+
isLocalEvent,
|
196
|
+
span,
|
197
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
198
|
+
|
287
199
|
const backendPushingFiberHandle = yield* FiberHandle.make()
|
288
200
|
|
289
201
|
yield* FiberHandle.run(
|
@@ -310,9 +222,9 @@ export const makeLeaderSyncProcessor = ({
|
|
310
222
|
backgroundBackendPushing({ dbReady, syncBackendQueue, span }).pipe(Effect.tapCauseLogPretty),
|
311
223
|
)
|
312
224
|
}),
|
313
|
-
|
314
|
-
|
315
|
-
|
225
|
+
syncStateSref,
|
226
|
+
localPushesLatch,
|
227
|
+
pullLatch,
|
316
228
|
span,
|
317
229
|
initialBlockingSyncContext,
|
318
230
|
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
@@ -322,40 +234,139 @@ export const makeLeaderSyncProcessor = ({
|
|
322
234
|
push,
|
323
235
|
pushPartial,
|
324
236
|
boot,
|
325
|
-
syncState:
|
326
|
-
|
327
|
-
|
328
|
-
|
237
|
+
syncState: Subscribable.make({
|
238
|
+
get: Effect.gen(function* () {
|
239
|
+
const syncState = yield* syncStateSref
|
240
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
241
|
+
return syncState
|
242
|
+
}),
|
243
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
329
244
|
}),
|
330
|
-
} satisfies
|
245
|
+
} satisfies LeaderSyncProcessor
|
246
|
+
})
|
247
|
+
|
248
|
+
const backgroundApplyLocalPushes = ({
|
249
|
+
localPushesLatch,
|
250
|
+
localPushesQueue,
|
251
|
+
pullLatch,
|
252
|
+
syncStateSref,
|
253
|
+
syncBackendQueue,
|
254
|
+
schema,
|
255
|
+
isLocalEvent,
|
256
|
+
span,
|
257
|
+
}: {
|
258
|
+
pullLatch: Effect.Latch
|
259
|
+
localPushesLatch: Effect.Latch
|
260
|
+
localPushesQueue: BucketQueue.BucketQueue<PushQueueItem>
|
261
|
+
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
262
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
263
|
+
schema: LiveStoreSchema
|
264
|
+
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
265
|
+
span: otel.Span | undefined
|
266
|
+
}) =>
|
267
|
+
Effect.gen(function* () {
|
268
|
+
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
269
|
+
|
270
|
+
const applyMutationItems = yield* makeApplyMutationItems
|
271
|
+
|
272
|
+
while (true) {
|
273
|
+
// TODO make batch size configurable
|
274
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10)
|
275
|
+
const [newEvents, deferreds] = ReadonlyArray.unzip(batchItems)
|
276
|
+
|
277
|
+
// Wait for the backend pulling to finish
|
278
|
+
yield* localPushesLatch.await
|
279
|
+
|
280
|
+
// Prevent the backend pulling from starting until this local push is finished
|
281
|
+
yield* pullLatch.close
|
282
|
+
|
283
|
+
const syncState = yield* syncStateSref
|
284
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
285
|
+
|
286
|
+
const updateResult = SyncState.updateSyncState({
|
287
|
+
syncState,
|
288
|
+
payload: { _tag: 'local-push', newEvents },
|
289
|
+
isLocalEvent,
|
290
|
+
isEqualEvent: MutationEvent.isEqualEncoded,
|
291
|
+
})
|
292
|
+
|
293
|
+
if (updateResult._tag === 'rebase') {
|
294
|
+
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
295
|
+
} else if (updateResult._tag === 'reject') {
|
296
|
+
span?.addEvent('local-push:reject', {
|
297
|
+
batchSize: newEvents.length,
|
298
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
299
|
+
})
|
300
|
+
|
301
|
+
const providedId = newEvents.at(0)!.id
|
302
|
+
const remainingEvents = yield* BucketQueue.takeAll(localPushesQueue)
|
303
|
+
const allDeferreds = [...deferreds, ...remainingEvents.map(([_, deferred]) => deferred)].filter(isNotUndefined)
|
304
|
+
yield* Effect.forEach(allDeferreds, (deferred) =>
|
305
|
+
Deferred.fail(
|
306
|
+
deferred,
|
307
|
+
InvalidPushError.make({
|
308
|
+
// TODO improve error handling so it differentiates between a push being rejected
|
309
|
+
// because of itself or because of another push
|
310
|
+
reason: {
|
311
|
+
_tag: 'LeaderAhead',
|
312
|
+
minimumExpectedId: updateResult.expectedMinimumId,
|
313
|
+
providedId,
|
314
|
+
},
|
315
|
+
}),
|
316
|
+
),
|
317
|
+
)
|
318
|
+
|
319
|
+
// Allow the backend pulling to start
|
320
|
+
yield* pullLatch.open
|
321
|
+
|
322
|
+
// In this case we're skipping state update and down/upstream processing
|
323
|
+
// We've cleared the local push queue and are now waiting for new local pushes / backend pulls
|
324
|
+
continue
|
325
|
+
}
|
326
|
+
|
327
|
+
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
328
|
+
|
329
|
+
yield* connectedClientSessionPullQueues.offer({
|
330
|
+
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
331
|
+
remaining: 0,
|
332
|
+
})
|
333
|
+
|
334
|
+
span?.addEvent('local-push', {
|
335
|
+
batchSize: newEvents.length,
|
336
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
337
|
+
})
|
338
|
+
|
339
|
+
// Don't sync localOnly mutations
|
340
|
+
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
341
|
+
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
342
|
+
return mutationDef.options.localOnly === false
|
343
|
+
})
|
344
|
+
|
345
|
+
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
346
|
+
|
347
|
+
yield* applyMutationItems({ batchItems: newEvents, deferreds })
|
348
|
+
|
349
|
+
// Allow the backend pulling to start
|
350
|
+
yield* pullLatch.open
|
351
|
+
}
|
331
352
|
})
|
332
353
|
|
333
354
|
type ApplyMutationItems = (_: {
|
334
355
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
356
|
+
/** Indexes are aligned with `batchItems` */
|
357
|
+
deferreds: ReadonlyArray<Deferred.Deferred<void, InvalidPushError> | undefined> | undefined
|
335
358
|
}) => Effect.Effect<void, UnexpectedError>
|
336
359
|
|
337
360
|
// TODO how to handle errors gracefully
|
338
|
-
const makeApplyMutationItems =
|
339
|
-
stateRef,
|
340
|
-
semaphore,
|
341
|
-
}: {
|
342
|
-
stateRef: Ref.Ref<ProcessorState>
|
343
|
-
semaphore: Effect.Semaphore
|
344
|
-
}): Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =>
|
361
|
+
const makeApplyMutationItems: Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =
|
345
362
|
Effect.gen(function* () {
|
346
363
|
const leaderThreadCtx = yield* LeaderThreadCtx
|
347
364
|
const { db, dbLog } = leaderThreadCtx
|
348
365
|
|
349
366
|
const applyMutation = yield* makeApplyMutation
|
350
367
|
|
351
|
-
return ({ batchItems }) =>
|
368
|
+
return ({ batchItems, deferreds }) =>
|
352
369
|
Effect.gen(function* () {
|
353
|
-
const state = yield* Ref.get(stateRef)
|
354
|
-
if (state._tag !== 'applying-syncstate-advance') {
|
355
|
-
// console.log('applyMutationItems: counter', counter)
|
356
|
-
return shouldNeverHappen(`Expected to be applying-syncstate-advance but got ${state._tag}`)
|
357
|
-
}
|
358
|
-
|
359
370
|
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
360
371
|
dbLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
361
372
|
|
@@ -370,29 +381,21 @@ const makeApplyMutationItems = ({
|
|
370
381
|
)
|
371
382
|
|
372
383
|
for (let i = 0; i < batchItems.length; i++) {
|
373
|
-
|
384
|
+
yield* applyMutation(batchItems[i]!)
|
374
385
|
|
375
|
-
|
376
|
-
|
377
|
-
if (meta?.deferred) {
|
378
|
-
yield* Deferred.succeed(meta.deferred, void 0)
|
386
|
+
if (deferreds?.[i] !== undefined) {
|
387
|
+
yield* Deferred.succeed(deferreds[i]!, void 0)
|
379
388
|
}
|
380
|
-
|
381
|
-
// TODO re-introduce this
|
382
|
-
// if (i < batchItems.length - 1) {
|
383
|
-
// yield* Ref.set(stateRef, { ...state, proccesHead: batchItems[i + 1]!.id })
|
384
|
-
// }
|
385
389
|
}
|
386
390
|
|
387
391
|
db.execute('COMMIT', undefined) // Commit the transaction
|
388
392
|
dbLog.execute('COMMIT', undefined) // Commit the transaction
|
389
|
-
|
390
|
-
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: state.syncState })
|
391
|
-
// console.log('setRef:sync after applyMutationItems', counter)
|
392
|
-
yield* semaphore.release(1)
|
393
393
|
}).pipe(
|
394
|
+
Effect.uninterruptible,
|
394
395
|
Effect.scoped,
|
395
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems'
|
396
|
+
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
|
397
|
+
attributes: { count: batchItems.length },
|
398
|
+
}),
|
396
399
|
Effect.tapCauseLogPretty,
|
397
400
|
UnexpectedError.mapToUnexpectedError,
|
398
401
|
)
|
@@ -404,21 +407,21 @@ const backgroundBackendPulling = ({
|
|
404
407
|
isLocalEvent,
|
405
408
|
restartBackendPushing,
|
406
409
|
span,
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
+
syncStateSref,
|
411
|
+
localPushesLatch,
|
412
|
+
pullLatch,
|
410
413
|
initialBlockingSyncContext,
|
411
414
|
}: {
|
412
415
|
dbReady: Deferred.Deferred<void>
|
413
|
-
initialBackendHead:
|
416
|
+
initialBackendHead: EventId.GlobalEventId
|
414
417
|
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
415
418
|
restartBackendPushing: (
|
416
419
|
filteredRebasedPending: ReadonlyArray<MutationEvent.EncodedWithMeta>,
|
417
420
|
) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx | HttpClient.HttpClient>
|
418
421
|
span: otel.Span | undefined
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
+
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
423
|
+
localPushesLatch: Effect.Latch
|
424
|
+
pullLatch: Effect.Latch
|
422
425
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
423
426
|
}) =>
|
424
427
|
Effect.gen(function* () {
|
@@ -428,30 +431,25 @@ const backgroundBackendPulling = ({
|
|
428
431
|
|
429
432
|
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
430
433
|
|
434
|
+
const applyMutationItems = yield* makeApplyMutationItems
|
435
|
+
|
431
436
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
432
437
|
Effect.gen(function* () {
|
433
438
|
if (newEvents.length === 0) return
|
434
439
|
|
435
|
-
|
436
|
-
|
440
|
+
// Prevent more local pushes from being processed until this pull is finished
|
441
|
+
yield* localPushesLatch.close
|
437
442
|
|
438
|
-
//
|
443
|
+
// Wait for pending local pushes to finish
|
444
|
+
yield* pullLatch.await
|
439
445
|
|
440
|
-
|
441
|
-
|
442
|
-
yield* Fiber.interrupt(state.fiber)
|
443
|
-
// In theory we should force-take the semaphore here, but as it's still taken,
|
444
|
-
// it's already in the right state we want it to be in
|
445
|
-
} else {
|
446
|
-
// Wait for previous advance to finish
|
447
|
-
yield* semaphore.take(1)
|
448
|
-
}
|
449
|
-
}
|
446
|
+
const syncState = yield* syncStateSref
|
447
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
450
448
|
|
451
449
|
const trimRollbackUntil = newEvents.at(-1)!.id
|
452
450
|
|
453
451
|
const updateResult = SyncState.updateSyncState({
|
454
|
-
syncState
|
452
|
+
syncState,
|
455
453
|
payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
|
456
454
|
isLocalEvent,
|
457
455
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
@@ -505,17 +503,16 @@ const backgroundBackendPulling = ({
|
|
505
503
|
})
|
506
504
|
}
|
507
505
|
|
508
|
-
|
509
|
-
batchItems: updateResult.newEvents,
|
510
|
-
}).pipe(Effect.fork)
|
506
|
+
trimChangesetRows(db, newBackendHead)
|
511
507
|
|
512
|
-
yield*
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
508
|
+
yield* applyMutationItems({ batchItems: updateResult.newEvents, deferreds: undefined })
|
509
|
+
|
510
|
+
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
511
|
+
|
512
|
+
if (remaining === 0) {
|
513
|
+
// Allow local pushes to be processed again
|
514
|
+
yield* localPushesLatch.open
|
515
|
+
}
|
519
516
|
})
|
520
517
|
|
521
518
|
yield* syncBackend.pull(cursorInfo).pipe(
|
@@ -538,7 +535,7 @@ const backgroundBackendPulling = ({
|
|
538
535
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
539
536
|
|
540
537
|
yield* onNewPullChunk(
|
541
|
-
batch.map((_) =>
|
538
|
+
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)),
|
542
539
|
remaining,
|
543
540
|
)
|
544
541
|
|
@@ -570,7 +567,9 @@ const rollback = ({
|
|
570
567
|
// Apply changesets in reverse order
|
571
568
|
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
572
569
|
const { changeset } = rollbackEvents[i]!
|
573
|
-
|
570
|
+
if (changeset !== null) {
|
571
|
+
db.makeChangeset(changeset).invert().apply()
|
572
|
+
}
|
574
573
|
}
|
575
574
|
|
576
575
|
// Delete the changeset rows
|
@@ -588,7 +587,7 @@ const rollback = ({
|
|
588
587
|
}),
|
589
588
|
)
|
590
589
|
|
591
|
-
const getCursorInfo = (remoteHead:
|
590
|
+
const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
592
591
|
Effect.gen(function* () {
|
593
592
|
const { dbLog } = yield* LeaderThreadCtx
|
594
593
|
|
@@ -605,7 +604,7 @@ const getCursorInfo = (remoteHead: number) =>
|
|
605
604
|
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
606
605
|
|
607
606
|
return Option.some({
|
608
|
-
cursor: { global: remoteHead, local:
|
607
|
+
cursor: { global: remoteHead, local: EventId.localDefault },
|
609
608
|
metadata: syncMetadataOption,
|
610
609
|
}) satisfies InitialSyncInfo
|
611
610
|
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
@@ -616,7 +615,7 @@ const backgroundBackendPushing = ({
|
|
616
615
|
span,
|
617
616
|
}: {
|
618
617
|
dbReady: Deferred.Deferred<void>
|
619
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.
|
618
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
620
619
|
span: otel.Span | undefined
|
621
620
|
}) =>
|
622
621
|
Effect.gen(function* () {
|
@@ -639,11 +638,11 @@ const backgroundBackendPushing = ({
|
|
639
638
|
})
|
640
639
|
|
641
640
|
// TODO handle push errors (should only happen during concurrent pull+push)
|
642
|
-
const pushResult = yield* syncBackend.push(queueItems).pipe(Effect.either)
|
641
|
+
const pushResult = yield* syncBackend.push(queueItems.map((_) => _.toGlobal())).pipe(Effect.either)
|
643
642
|
|
644
643
|
if (pushResult._tag === 'Left') {
|
645
644
|
span?.addEvent('backend-push-error', { error: pushResult.left.toString() })
|
646
|
-
// wait for interrupt
|
645
|
+
// wait for interrupt caused by background pulling which will then restart pushing
|
647
646
|
return yield* Effect.never
|
648
647
|
}
|
649
648
|
|
@@ -664,3 +663,9 @@ const backgroundBackendPushing = ({
|
|
664
663
|
}
|
665
664
|
}
|
666
665
|
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'))
|
666
|
+
|
667
|
+
const trimChangesetRows = (db: SynchronousDatabase, newHead: EventId.EventId) => {
|
668
|
+
// Since we're using the session changeset rows to query for the current head,
|
669
|
+
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
670
|
+
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
671
|
+
}
|