@livestore/common 0.3.0-dev.10 → 0.3.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +23 -26
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js.map +1 -1
- package/dist/derived-mutations.d.ts +4 -4
- package/dist/derived-mutations.d.ts.map +1 -1
- package/dist/derived-mutations.test.js.map +1 -1
- package/dist/devtools/devtools-bridge.d.ts +1 -2
- package/dist/devtools/devtools-bridge.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.d.ts +110 -98
- package/dist/devtools/devtools-messages.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.js +6 -9
- package/dist/devtools/devtools-messages.js.map +1 -1
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +2 -5
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +26 -38
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-sync-processor.d.ts +2 -2
- package/dist/leader-thread/leader-sync-processor.d.ts.map +1 -1
- package/dist/leader-thread/leader-sync-processor.js +12 -20
- package/dist/leader-thread/leader-sync-processor.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +66 -22
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts +7 -8
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +5 -11
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +17 -4
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +1 -2
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +3 -9
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +9 -17
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/mutation.d.ts +2 -9
- package/dist/mutation.d.ts.map +1 -1
- package/dist/mutation.js +5 -5
- package/dist/mutation.js.map +1 -1
- package/dist/query-builder/impl.d.ts +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +2 -2
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +19 -13
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +14 -16
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +7 -15
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +80 -49
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +15 -32
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/system-tables.d.ts +26 -26
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +11 -19
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/schema-management/migrations.js +6 -6
- package/dist/schema-management/migrations.js.map +1 -1
- package/dist/sync/client-session-sync-processor.d.ts +4 -4
- package/dist/sync/client-session-sync-processor.d.ts.map +1 -1
- package/dist/sync/index.d.ts +1 -1
- package/dist/sync/index.d.ts.map +1 -1
- package/dist/sync/index.js +1 -1
- package/dist/sync/index.js.map +1 -1
- package/dist/sync/next/history-dag-common.d.ts +4 -1
- package/dist/sync/next/history-dag-common.d.ts.map +1 -1
- package/dist/sync/next/history-dag-common.js +1 -1
- package/dist/sync/next/history-dag-common.js.map +1 -1
- package/dist/sync/next/rebase-events.d.ts +3 -3
- package/dist/sync/next/rebase-events.d.ts.map +1 -1
- package/dist/sync/next/rebase-events.js +2 -3
- package/dist/sync/next/rebase-events.js.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.d.ts.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.js +9 -3
- package/dist/sync/next/test/mutation-fixtures.js.map +1 -1
- package/dist/sync/sync.d.ts +11 -21
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +23 -45
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +12 -56
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +69 -125
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/sync/validate-push-payload.d.ts +2 -2
- package/dist/sync/validate-push-payload.d.ts.map +1 -1
- package/dist/sync/validate-push-payload.js +2 -2
- package/dist/sync/validate-push-payload.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.d.ts.map +1 -1
- package/dist/version.js +1 -1
- package/dist/version.js.map +1 -1
- package/package.json +5 -6
- package/src/adapter-types.ts +24 -22
- package/src/derived-mutations.test.ts +1 -1
- package/src/derived-mutations.ts +5 -9
- package/src/devtools/devtools-bridge.ts +1 -2
- package/src/devtools/devtools-messages.ts +6 -9
- package/src/index.ts +6 -0
- package/src/leader-thread/apply-mutation.ts +31 -49
- package/src/leader-thread/{LeaderSyncProcessor.ts → leader-sync-processor.ts} +230 -235
- package/src/leader-thread/leader-worker-devtools.ts +109 -30
- package/src/leader-thread/make-leader-thread-layer.ts +13 -24
- package/src/leader-thread/mutationlog.ts +5 -9
- package/src/leader-thread/recreate-db.ts +5 -9
- package/src/leader-thread/types.ts +11 -18
- package/src/mutation.ts +7 -17
- package/src/rehydrate-from-mutationlog.ts +23 -15
- package/src/schema/EventId.ts +9 -23
- package/src/schema/MutationEvent.ts +24 -46
- package/src/schema/system-tables.ts +11 -19
- package/src/schema-management/migrations.ts +6 -6
- package/src/sync/{ClientSessionSyncProcessor.ts → client-session-sync-processor.ts} +9 -11
- package/src/sync/index.ts +1 -1
- package/src/sync/next/history-dag-common.ts +1 -1
- package/src/sync/next/rebase-events.ts +7 -7
- package/src/sync/next/test/mutation-fixtures.ts +10 -3
- package/src/sync/sync.ts +6 -19
- package/src/sync/syncstate.test.ts +67 -127
- package/src/sync/syncstate.ts +19 -21
- package/src/sync/validate-push-payload.ts +4 -7
- package/src/version.ts +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +0 -37
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +0 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +0 -417
- package/dist/leader-thread/LeaderSyncProcessor.js.map +0 -1
- package/dist/schema/EventId.test.d.ts +0 -2
- package/dist/schema/EventId.test.d.ts.map +0 -1
- package/dist/schema/EventId.test.js +0 -11
- package/dist/schema/EventId.test.js.map +0 -1
- package/dist/schema/MutationEvent.test.d.ts +0 -2
- package/dist/schema/MutationEvent.test.d.ts.map +0 -1
- package/dist/schema/MutationEvent.test.js +0 -2
- package/dist/schema/MutationEvent.test.js.map +0 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +0 -45
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +0 -1
- package/dist/sync/ClientSessionSyncProcessor.js +0 -134
- package/dist/sync/ClientSessionSyncProcessor.js.map +0 -1
- package/src/schema/EventId.test.ts +0 -12
@@ -1,17 +1,17 @@
|
|
1
|
-
import {
|
1
|
+
import { shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
2
|
import type { HttpClient, Scope } from '@livestore/utils/effect'
|
3
3
|
import {
|
4
4
|
BucketQueue,
|
5
5
|
Deferred,
|
6
6
|
Effect,
|
7
7
|
Exit,
|
8
|
+
Fiber,
|
8
9
|
FiberHandle,
|
9
10
|
Option,
|
10
11
|
OtelTracer,
|
11
|
-
|
12
|
+
Ref,
|
12
13
|
Schema,
|
13
14
|
Stream,
|
14
|
-
Subscribable,
|
15
15
|
SubscriptionRef,
|
16
16
|
} from '@livestore/utils/effect'
|
17
17
|
import type * as otel from '@opentelemetry/api'
|
@@ -33,36 +33,61 @@ import { sql } from '../util.js'
|
|
33
33
|
import { makeApplyMutation } from './apply-mutation.js'
|
34
34
|
import { execSql } from './connection.js'
|
35
35
|
import { getBackendHeadFromDb, getLocalHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js'
|
36
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo,
|
36
|
+
import type { InitialBlockingSyncContext, InitialSyncInfo, SyncProcessor } from './types.js'
|
37
37
|
import { LeaderThreadCtx } from './types.js'
|
38
38
|
|
39
|
-
type
|
40
|
-
|
41
|
-
|
42
|
-
|
39
|
+
type ProcessorStateInit = {
|
40
|
+
_tag: 'init'
|
41
|
+
}
|
42
|
+
|
43
|
+
type ProcessorStateInSync = {
|
44
|
+
_tag: 'in-sync'
|
45
|
+
syncState: SyncState.SyncState
|
46
|
+
}
|
47
|
+
|
48
|
+
type ProcessorStateApplyingSyncStateAdvance = {
|
49
|
+
_tag: 'applying-syncstate-advance'
|
50
|
+
origin: 'pull' | 'push'
|
51
|
+
syncState: SyncState.SyncState
|
52
|
+
// TODO re-introduce this
|
53
|
+
// proccesHead: EventId
|
54
|
+
fiber: Fiber.RuntimeFiber<void, UnexpectedError>
|
55
|
+
}
|
56
|
+
|
57
|
+
type ProcessorState = ProcessorStateInit | ProcessorStateInSync | ProcessorStateApplyingSyncStateAdvance
|
43
58
|
|
44
59
|
/**
|
45
|
-
* The
|
46
|
-
*
|
60
|
+
* The general idea of the sync processor is to "follow the sync state"
|
61
|
+
* and apply/rollback mutations as needed to the read model and mutation log.
|
62
|
+
* The leader sync processor is also responsible for
|
63
|
+
* - broadcasting mutations to client sessions via the pull queues.
|
64
|
+
* - pushing mutations to the sync backend
|
65
|
+
*
|
66
|
+
* In the leader sync processor, pulling always has precedence over pushing.
|
47
67
|
*
|
48
|
-
*
|
68
|
+
* External events:
|
69
|
+
* - Mutation pushed from client session
|
70
|
+
* - Mutation pushed from devtools (via pushPartial)
|
71
|
+
* - Mutation pulled from sync backend
|
49
72
|
*
|
50
|
-
*
|
51
|
-
* -
|
52
|
-
* -
|
53
|
-
* - Pushing mutations to the sync backend.
|
73
|
+
* The machine can be in the following states:
|
74
|
+
* - in-sync: fully synced with remote, now idling
|
75
|
+
* - applying-syncstate-advance (with pointer to current progress in case of rebase interrupt)
|
54
76
|
*
|
55
|
-
*
|
77
|
+
* Transitions:
|
78
|
+
* - in-sync -> applying-syncstate-advance
|
79
|
+
* - applying-syncstate-advance -> in-sync
|
80
|
+
* - applying-syncstate-advance -> applying-syncstate-advance (need to interrupt previous operation)
|
56
81
|
*
|
57
|
-
*
|
58
|
-
* -
|
59
|
-
*
|
60
|
-
*
|
61
|
-
* -
|
62
|
-
* - Controlled by a `Latch` to manage execution flow.
|
63
|
-
* - The latch closes on pull receipt and re-opens post-pull completion.
|
64
|
-
* - Processes up to `maxBatchSize` events per cycle.
|
82
|
+
* Queuing vs interrupting behaviour:
|
83
|
+
* - Operations caused by pull can never be interrupted
|
84
|
+
* - Incoming pull can interrupt current push
|
85
|
+
* - Incoming pull needs to wait to previous pull to finish
|
86
|
+
* - Incoming push needs to wait to previous push to finish
|
65
87
|
*
|
88
|
+
* Backend pushing:
|
89
|
+
* - continously push to backend
|
90
|
+
* - only interrupted and restarted on rebase
|
66
91
|
*/
|
67
92
|
export const makeLeaderSyncProcessor = ({
|
68
93
|
schema,
|
@@ -75,11 +100,13 @@ export const makeLeaderSyncProcessor = ({
|
|
75
100
|
dbMissing: boolean
|
76
101
|
dbLog: SynchronousDatabase
|
77
102
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
78
|
-
}): Effect.Effect<
|
103
|
+
}): Effect.Effect<SyncProcessor, UnexpectedError, Scope.Scope> =>
|
79
104
|
Effect.gen(function* () {
|
80
|
-
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.
|
105
|
+
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.AnyEncoded>()
|
81
106
|
|
82
|
-
const
|
107
|
+
const stateRef = yield* Ref.make<ProcessorState>({ _tag: 'init' })
|
108
|
+
|
109
|
+
const semaphore = yield* Effect.makeSemaphore(1)
|
83
110
|
|
84
111
|
const isLocalEvent = (mutationEventEncoded: MutationEvent.EncodedWithMeta) => {
|
85
112
|
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
@@ -87,32 +114,105 @@ export const makeLeaderSyncProcessor = ({
|
|
87
114
|
}
|
88
115
|
|
89
116
|
const spanRef = { current: undefined as otel.Span | undefined }
|
117
|
+
const applyMutationItemsRef = { current: undefined as ApplyMutationItems | undefined }
|
118
|
+
|
119
|
+
// TODO get rid of counters once Effect semaphore ordering is fixed
|
120
|
+
let counterRef = 0
|
121
|
+
let expectedCounter = 0
|
90
122
|
|
91
|
-
|
92
|
-
|
93
|
-
|
123
|
+
/*
|
124
|
+
TODO: refactor
|
125
|
+
- Pushes go directly into a Mailbox
|
126
|
+
- Have a worker fiber that takes from the mailbox (wouldn't need a semaphore)
|
127
|
+
*/
|
128
|
+
|
129
|
+
const waitForSyncState = (counter: number): Effect.Effect<ProcessorStateInSync> =>
|
130
|
+
Effect.gen(function* () {
|
131
|
+
// console.log('waitForSyncState: waiting for semaphore', counter)
|
132
|
+
yield* semaphore.take(1)
|
133
|
+
// NOTE this is a workaround to ensure the semaphore take-order is respected
|
134
|
+
// TODO this needs to be fixed upstream in Effect
|
135
|
+
if (counter !== expectedCounter) {
|
136
|
+
console.log(
|
137
|
+
`waitForSyncState: counter mismatch (expected: ${expectedCounter}, got: ${counter}), releasing semaphore`,
|
138
|
+
)
|
139
|
+
yield* semaphore.release(1)
|
140
|
+
yield* Effect.yieldNow()
|
141
|
+
// Retrying...
|
142
|
+
return yield* waitForSyncState(counter)
|
143
|
+
}
|
144
|
+
// console.log('waitForSyncState: took semaphore', counter)
|
145
|
+
const state = yield* Ref.get(stateRef)
|
146
|
+
if (state._tag !== 'in-sync') {
|
147
|
+
return shouldNeverHappen('Expected to be in-sync but got ' + state._tag)
|
148
|
+
}
|
149
|
+
expectedCounter = counter + 1
|
150
|
+
return state
|
151
|
+
}).pipe(Effect.withSpan(`@livestore/common:leader-thread:syncing:waitForSyncState(${counter})`))
|
94
152
|
|
95
|
-
const push
|
153
|
+
const push = (newEvents: ReadonlyArray<MutationEvent.EncodedWithMeta>) =>
|
96
154
|
Effect.gen(function* () {
|
155
|
+
const counter = counterRef
|
156
|
+
counterRef++
|
97
157
|
// TODO validate batch
|
98
158
|
if (newEvents.length === 0) return
|
99
159
|
|
100
|
-
const
|
160
|
+
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
101
161
|
|
102
|
-
if
|
103
|
-
|
162
|
+
// TODO if there are multiple pending pushes, we should batch them together
|
163
|
+
const state = yield* waitForSyncState(counter)
|
104
164
|
|
105
|
-
|
106
|
-
|
165
|
+
const updateResult = SyncState.updateSyncState({
|
166
|
+
syncState: state.syncState,
|
167
|
+
payload: { _tag: 'local-push', newEvents },
|
168
|
+
isLocalEvent,
|
169
|
+
isEqualEvent: MutationEvent.isEqualEncoded,
|
170
|
+
})
|
171
|
+
|
172
|
+
if (updateResult._tag === 'rebase') {
|
173
|
+
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
174
|
+
} else if (updateResult._tag === 'reject') {
|
175
|
+
return yield* Effect.fail(
|
176
|
+
InvalidPushError.make({
|
177
|
+
reason: {
|
178
|
+
_tag: 'LeaderAhead',
|
179
|
+
minimumExpectedId: updateResult.expectedMinimumId,
|
180
|
+
providedId: newEvents.at(0)!.id,
|
181
|
+
},
|
182
|
+
}),
|
107
183
|
)
|
184
|
+
}
|
108
185
|
|
109
|
-
|
186
|
+
const fiber = yield* applyMutationItemsRef.current!({ batchItems: updateResult.newEvents }).pipe(Effect.fork)
|
110
187
|
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
188
|
+
yield* Ref.set(stateRef, {
|
189
|
+
_tag: 'applying-syncstate-advance',
|
190
|
+
origin: 'push',
|
191
|
+
syncState: updateResult.newSyncState,
|
192
|
+
fiber,
|
193
|
+
})
|
194
|
+
|
195
|
+
// console.log('setRef:applying-syncstate-advance after push', counter)
|
196
|
+
|
197
|
+
yield* connectedClientSessionPullQueues.offer({
|
198
|
+
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
199
|
+
remaining: 0,
|
200
|
+
})
|
201
|
+
|
202
|
+
spanRef.current?.addEvent('local-push', {
|
203
|
+
batchSize: newEvents.length,
|
204
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
205
|
+
})
|
206
|
+
|
207
|
+
// Don't sync localOnly mutations
|
208
|
+
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
209
|
+
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
210
|
+
return mutationDef.options.localOnly === false
|
211
|
+
})
|
212
|
+
|
213
|
+
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
214
|
+
|
215
|
+
yield* fiber // Waiting for the mutation to be applied
|
116
216
|
}).pipe(
|
117
217
|
Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
|
118
218
|
attributes: {
|
@@ -125,10 +225,10 @@ export const makeLeaderSyncProcessor = ({
|
|
125
225
|
}),
|
126
226
|
)
|
127
227
|
|
128
|
-
const pushPartial:
|
228
|
+
const pushPartial: SyncProcessor['pushPartial'] = (mutationEventEncoded_) =>
|
129
229
|
Effect.gen(function* () {
|
130
|
-
const
|
131
|
-
if (
|
230
|
+
const state = yield* Ref.get(stateRef)
|
231
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
132
232
|
|
133
233
|
const mutationDef =
|
134
234
|
schema.mutations.get(mutationEventEncoded_.mutation) ??
|
@@ -136,14 +236,14 @@ export const makeLeaderSyncProcessor = ({
|
|
136
236
|
|
137
237
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
138
238
|
...mutationEventEncoded_,
|
139
|
-
...EventId.nextPair(syncState.localHead, mutationDef.options.localOnly),
|
239
|
+
...EventId.nextPair(state.syncState.localHead, mutationDef.options.localOnly),
|
140
240
|
})
|
141
241
|
|
142
242
|
yield* push([mutationEventEncoded])
|
143
243
|
}).pipe(Effect.catchTag('InvalidPushError', Effect.orDie))
|
144
244
|
|
145
245
|
// Starts various background loops
|
146
|
-
const boot:
|
246
|
+
const boot: SyncProcessor['boot'] = ({ dbReady }) =>
|
147
247
|
Effect.gen(function* () {
|
148
248
|
const span = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
149
249
|
spanRef.current = span
|
@@ -157,21 +257,20 @@ export const makeLeaderSyncProcessor = ({
|
|
157
257
|
)
|
158
258
|
}
|
159
259
|
|
160
|
-
const pendingMutationEvents = yield* getMutationEventsSince({
|
161
|
-
global: initialBackendHead,
|
162
|
-
local: EventId.localDefault,
|
163
|
-
}).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))))
|
260
|
+
const pendingMutationEvents = yield* getMutationEventsSince({ global: initialBackendHead, local: 0 })
|
164
261
|
|
165
|
-
const initialSyncState =
|
166
|
-
pending: pendingMutationEvents,
|
262
|
+
const initialSyncState = {
|
263
|
+
pending: pendingMutationEvents.map((_) => new MutationEvent.EncodedWithMeta(_)),
|
167
264
|
// On the leader we don't need a rollback tail beyond `pending` items
|
168
265
|
rollbackTail: [],
|
169
|
-
upstreamHead: { global: initialBackendHead, local:
|
266
|
+
upstreamHead: { global: initialBackendHead, local: 0 },
|
170
267
|
localHead: initialLocalHead,
|
171
|
-
}
|
268
|
+
} as SyncState.SyncState
|
172
269
|
|
173
270
|
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
174
|
-
yield*
|
271
|
+
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: initialSyncState })
|
272
|
+
|
273
|
+
applyMutationItemsRef.current = yield* makeApplyMutationItems({ stateRef, semaphore })
|
175
274
|
|
176
275
|
// Rehydrate sync queue
|
177
276
|
if (pendingMutationEvents.length > 0) {
|
@@ -185,17 +284,6 @@ export const makeLeaderSyncProcessor = ({
|
|
185
284
|
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
186
285
|
}
|
187
286
|
|
188
|
-
yield* backgroundApplyLocalPushes({
|
189
|
-
localPushesLatch,
|
190
|
-
localPushesQueue,
|
191
|
-
pullLatch,
|
192
|
-
syncStateSref,
|
193
|
-
syncBackendQueue,
|
194
|
-
schema,
|
195
|
-
isLocalEvent,
|
196
|
-
span,
|
197
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
198
|
-
|
199
287
|
const backendPushingFiberHandle = yield* FiberHandle.make()
|
200
288
|
|
201
289
|
yield* FiberHandle.run(
|
@@ -222,9 +310,9 @@ export const makeLeaderSyncProcessor = ({
|
|
222
310
|
backgroundBackendPushing({ dbReady, syncBackendQueue, span }).pipe(Effect.tapCauseLogPretty),
|
223
311
|
)
|
224
312
|
}),
|
225
|
-
|
226
|
-
|
227
|
-
|
313
|
+
applyMutationItemsRef,
|
314
|
+
stateRef,
|
315
|
+
semaphore,
|
228
316
|
span,
|
229
317
|
initialBlockingSyncContext,
|
230
318
|
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
@@ -234,139 +322,40 @@ export const makeLeaderSyncProcessor = ({
|
|
234
322
|
push,
|
235
323
|
pushPartial,
|
236
324
|
boot,
|
237
|
-
syncState:
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
return syncState
|
242
|
-
}),
|
243
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
325
|
+
syncState: Effect.gen(function* () {
|
326
|
+
const state = yield* Ref.get(stateRef)
|
327
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
328
|
+
return state.syncState
|
244
329
|
}),
|
245
|
-
} satisfies
|
246
|
-
})
|
247
|
-
|
248
|
-
const backgroundApplyLocalPushes = ({
|
249
|
-
localPushesLatch,
|
250
|
-
localPushesQueue,
|
251
|
-
pullLatch,
|
252
|
-
syncStateSref,
|
253
|
-
syncBackendQueue,
|
254
|
-
schema,
|
255
|
-
isLocalEvent,
|
256
|
-
span,
|
257
|
-
}: {
|
258
|
-
pullLatch: Effect.Latch
|
259
|
-
localPushesLatch: Effect.Latch
|
260
|
-
localPushesQueue: BucketQueue.BucketQueue<PushQueueItem>
|
261
|
-
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
262
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
263
|
-
schema: LiveStoreSchema
|
264
|
-
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
265
|
-
span: otel.Span | undefined
|
266
|
-
}) =>
|
267
|
-
Effect.gen(function* () {
|
268
|
-
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
269
|
-
|
270
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
271
|
-
|
272
|
-
while (true) {
|
273
|
-
// TODO make batch size configurable
|
274
|
-
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10)
|
275
|
-
const [newEvents, deferreds] = ReadonlyArray.unzip(batchItems)
|
276
|
-
|
277
|
-
// Wait for the backend pulling to finish
|
278
|
-
yield* localPushesLatch.await
|
279
|
-
|
280
|
-
// Prevent the backend pulling from starting until this local push is finished
|
281
|
-
yield* pullLatch.close
|
282
|
-
|
283
|
-
const syncState = yield* syncStateSref
|
284
|
-
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
285
|
-
|
286
|
-
const updateResult = SyncState.updateSyncState({
|
287
|
-
syncState,
|
288
|
-
payload: { _tag: 'local-push', newEvents },
|
289
|
-
isLocalEvent,
|
290
|
-
isEqualEvent: MutationEvent.isEqualEncoded,
|
291
|
-
})
|
292
|
-
|
293
|
-
if (updateResult._tag === 'rebase') {
|
294
|
-
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
295
|
-
} else if (updateResult._tag === 'reject') {
|
296
|
-
span?.addEvent('local-push:reject', {
|
297
|
-
batchSize: newEvents.length,
|
298
|
-
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
299
|
-
})
|
300
|
-
|
301
|
-
const providedId = newEvents.at(0)!.id
|
302
|
-
const remainingEvents = yield* BucketQueue.takeAll(localPushesQueue)
|
303
|
-
const allDeferreds = [...deferreds, ...remainingEvents.map(([_, deferred]) => deferred)].filter(isNotUndefined)
|
304
|
-
yield* Effect.forEach(allDeferreds, (deferred) =>
|
305
|
-
Deferred.fail(
|
306
|
-
deferred,
|
307
|
-
InvalidPushError.make({
|
308
|
-
// TODO improve error handling so it differentiates between a push being rejected
|
309
|
-
// because of itself or because of another push
|
310
|
-
reason: {
|
311
|
-
_tag: 'LeaderAhead',
|
312
|
-
minimumExpectedId: updateResult.expectedMinimumId,
|
313
|
-
providedId,
|
314
|
-
},
|
315
|
-
}),
|
316
|
-
),
|
317
|
-
)
|
318
|
-
|
319
|
-
// Allow the backend pulling to start
|
320
|
-
yield* pullLatch.open
|
321
|
-
|
322
|
-
// In this case we're skipping state update and down/upstream processing
|
323
|
-
// We've cleared the local push queue and are now waiting for new local pushes / backend pulls
|
324
|
-
continue
|
325
|
-
}
|
326
|
-
|
327
|
-
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
328
|
-
|
329
|
-
yield* connectedClientSessionPullQueues.offer({
|
330
|
-
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
331
|
-
remaining: 0,
|
332
|
-
})
|
333
|
-
|
334
|
-
span?.addEvent('local-push', {
|
335
|
-
batchSize: newEvents.length,
|
336
|
-
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
337
|
-
})
|
338
|
-
|
339
|
-
// Don't sync localOnly mutations
|
340
|
-
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
341
|
-
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
342
|
-
return mutationDef.options.localOnly === false
|
343
|
-
})
|
344
|
-
|
345
|
-
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
346
|
-
|
347
|
-
yield* applyMutationItems({ batchItems: newEvents, deferreds })
|
348
|
-
|
349
|
-
// Allow the backend pulling to start
|
350
|
-
yield* pullLatch.open
|
351
|
-
}
|
330
|
+
} satisfies SyncProcessor
|
352
331
|
})
|
353
332
|
|
354
333
|
type ApplyMutationItems = (_: {
|
355
334
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
356
|
-
/** Indexes are aligned with `batchItems` */
|
357
|
-
deferreds: ReadonlyArray<Deferred.Deferred<void, InvalidPushError> | undefined> | undefined
|
358
335
|
}) => Effect.Effect<void, UnexpectedError>
|
359
336
|
|
360
337
|
// TODO how to handle errors gracefully
|
361
|
-
const makeApplyMutationItems
|
338
|
+
const makeApplyMutationItems = ({
|
339
|
+
stateRef,
|
340
|
+
semaphore,
|
341
|
+
}: {
|
342
|
+
stateRef: Ref.Ref<ProcessorState>
|
343
|
+
semaphore: Effect.Semaphore
|
344
|
+
}): Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =>
|
362
345
|
Effect.gen(function* () {
|
363
346
|
const leaderThreadCtx = yield* LeaderThreadCtx
|
364
347
|
const { db, dbLog } = leaderThreadCtx
|
365
348
|
|
366
349
|
const applyMutation = yield* makeApplyMutation
|
367
350
|
|
368
|
-
return ({ batchItems
|
351
|
+
return ({ batchItems }) =>
|
369
352
|
Effect.gen(function* () {
|
353
|
+
const state = yield* Ref.get(stateRef)
|
354
|
+
if (state._tag !== 'applying-syncstate-advance') {
|
355
|
+
// console.log('applyMutationItems: counter', counter)
|
356
|
+
return shouldNeverHappen(`Expected to be applying-syncstate-advance but got ${state._tag}`)
|
357
|
+
}
|
358
|
+
|
370
359
|
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
371
360
|
dbLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
372
361
|
|
@@ -381,21 +370,29 @@ const makeApplyMutationItems: Effect.Effect<ApplyMutationItems, UnexpectedError,
|
|
381
370
|
)
|
382
371
|
|
383
372
|
for (let i = 0; i < batchItems.length; i++) {
|
384
|
-
|
373
|
+
const { meta, ...mutationEventEncoded } = batchItems[i]!
|
385
374
|
|
386
|
-
|
387
|
-
|
375
|
+
yield* applyMutation(mutationEventEncoded)
|
376
|
+
|
377
|
+
if (meta?.deferred) {
|
378
|
+
yield* Deferred.succeed(meta.deferred, void 0)
|
388
379
|
}
|
380
|
+
|
381
|
+
// TODO re-introduce this
|
382
|
+
// if (i < batchItems.length - 1) {
|
383
|
+
// yield* Ref.set(stateRef, { ...state, proccesHead: batchItems[i + 1]!.id })
|
384
|
+
// }
|
389
385
|
}
|
390
386
|
|
391
387
|
db.execute('COMMIT', undefined) // Commit the transaction
|
392
388
|
dbLog.execute('COMMIT', undefined) // Commit the transaction
|
389
|
+
|
390
|
+
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: state.syncState })
|
391
|
+
// console.log('setRef:sync after applyMutationItems', counter)
|
392
|
+
yield* semaphore.release(1)
|
393
393
|
}).pipe(
|
394
|
-
Effect.uninterruptible,
|
395
394
|
Effect.scoped,
|
396
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems',
|
397
|
-
attributes: { count: batchItems.length },
|
398
|
-
}),
|
395
|
+
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems'),
|
399
396
|
Effect.tapCauseLogPretty,
|
400
397
|
UnexpectedError.mapToUnexpectedError,
|
401
398
|
)
|
@@ -407,21 +404,21 @@ const backgroundBackendPulling = ({
|
|
407
404
|
isLocalEvent,
|
408
405
|
restartBackendPushing,
|
409
406
|
span,
|
410
|
-
|
411
|
-
|
412
|
-
|
407
|
+
stateRef,
|
408
|
+
applyMutationItemsRef,
|
409
|
+
semaphore,
|
413
410
|
initialBlockingSyncContext,
|
414
411
|
}: {
|
415
412
|
dbReady: Deferred.Deferred<void>
|
416
|
-
initialBackendHead:
|
413
|
+
initialBackendHead: number
|
417
414
|
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
418
415
|
restartBackendPushing: (
|
419
416
|
filteredRebasedPending: ReadonlyArray<MutationEvent.EncodedWithMeta>,
|
420
417
|
) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx | HttpClient.HttpClient>
|
421
418
|
span: otel.Span | undefined
|
422
|
-
|
423
|
-
|
424
|
-
|
419
|
+
stateRef: Ref.Ref<ProcessorState>
|
420
|
+
applyMutationItemsRef: { current: ApplyMutationItems | undefined }
|
421
|
+
semaphore: Effect.Semaphore
|
425
422
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
426
423
|
}) =>
|
427
424
|
Effect.gen(function* () {
|
@@ -431,25 +428,30 @@ const backgroundBackendPulling = ({
|
|
431
428
|
|
432
429
|
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
433
430
|
|
434
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
435
|
-
|
436
431
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
437
432
|
Effect.gen(function* () {
|
438
433
|
if (newEvents.length === 0) return
|
439
434
|
|
440
|
-
|
441
|
-
|
435
|
+
const state = yield* Ref.get(stateRef)
|
436
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
442
437
|
|
443
|
-
//
|
444
|
-
yield* pullLatch.await
|
438
|
+
// const counter = state.counter + 1
|
445
439
|
|
446
|
-
|
447
|
-
|
440
|
+
if (state._tag === 'applying-syncstate-advance') {
|
441
|
+
if (state.origin === 'push') {
|
442
|
+
yield* Fiber.interrupt(state.fiber)
|
443
|
+
// In theory we should force-take the semaphore here, but as it's still taken,
|
444
|
+
// it's already in the right state we want it to be in
|
445
|
+
} else {
|
446
|
+
// Wait for previous advance to finish
|
447
|
+
yield* semaphore.take(1)
|
448
|
+
}
|
449
|
+
}
|
448
450
|
|
449
451
|
const trimRollbackUntil = newEvents.at(-1)!.id
|
450
452
|
|
451
453
|
const updateResult = SyncState.updateSyncState({
|
452
|
-
syncState,
|
454
|
+
syncState: state.syncState,
|
453
455
|
payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
|
454
456
|
isLocalEvent,
|
455
457
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
@@ -503,16 +505,17 @@ const backgroundBackendPulling = ({
|
|
503
505
|
})
|
504
506
|
}
|
505
507
|
|
506
|
-
|
508
|
+
const fiber = yield* applyMutationItemsRef.current!({
|
509
|
+
batchItems: updateResult.newEvents,
|
510
|
+
}).pipe(Effect.fork)
|
507
511
|
|
508
|
-
yield*
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
}
|
512
|
+
yield* Ref.set(stateRef, {
|
513
|
+
_tag: 'applying-syncstate-advance',
|
514
|
+
origin: 'pull',
|
515
|
+
syncState: updateResult.newSyncState,
|
516
|
+
fiber,
|
517
|
+
})
|
518
|
+
// console.log('setRef:applying-syncstate-advance after backgroundBackendPulling', -1)
|
516
519
|
})
|
517
520
|
|
518
521
|
yield* syncBackend.pull(cursorInfo).pipe(
|
@@ -535,7 +538,7 @@ const backgroundBackendPulling = ({
|
|
535
538
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
536
539
|
|
537
540
|
yield* onNewPullChunk(
|
538
|
-
batch.map((_) => MutationEvent.EncodedWithMeta
|
541
|
+
batch.map((_) => new MutationEvent.EncodedWithMeta(_.mutationEventEncoded)),
|
539
542
|
remaining,
|
540
543
|
)
|
541
544
|
|
@@ -567,9 +570,7 @@ const rollback = ({
|
|
567
570
|
// Apply changesets in reverse order
|
568
571
|
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
569
572
|
const { changeset } = rollbackEvents[i]!
|
570
|
-
|
571
|
-
db.makeChangeset(changeset).invert().apply()
|
572
|
-
}
|
573
|
+
db.makeChangeset(changeset).invert().apply()
|
573
574
|
}
|
574
575
|
|
575
576
|
// Delete the changeset rows
|
@@ -587,7 +588,7 @@ const rollback = ({
|
|
587
588
|
}),
|
588
589
|
)
|
589
590
|
|
590
|
-
const getCursorInfo = (remoteHead:
|
591
|
+
const getCursorInfo = (remoteHead: number) =>
|
591
592
|
Effect.gen(function* () {
|
592
593
|
const { dbLog } = yield* LeaderThreadCtx
|
593
594
|
|
@@ -604,7 +605,7 @@ const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
|
604
605
|
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
605
606
|
|
606
607
|
return Option.some({
|
607
|
-
cursor: { global: remoteHead, local:
|
608
|
+
cursor: { global: remoteHead, local: 0 },
|
608
609
|
metadata: syncMetadataOption,
|
609
610
|
}) satisfies InitialSyncInfo
|
610
611
|
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
@@ -615,7 +616,7 @@ const backgroundBackendPushing = ({
|
|
615
616
|
span,
|
616
617
|
}: {
|
617
618
|
dbReady: Deferred.Deferred<void>
|
618
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.
|
619
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.AnyEncoded>
|
619
620
|
span: otel.Span | undefined
|
620
621
|
}) =>
|
621
622
|
Effect.gen(function* () {
|
@@ -638,11 +639,11 @@ const backgroundBackendPushing = ({
|
|
638
639
|
})
|
639
640
|
|
640
641
|
// TODO handle push errors (should only happen during concurrent pull+push)
|
641
|
-
const pushResult = yield* syncBackend.push(queueItems
|
642
|
+
const pushResult = yield* syncBackend.push(queueItems).pipe(Effect.either)
|
642
643
|
|
643
644
|
if (pushResult._tag === 'Left') {
|
644
645
|
span?.addEvent('backend-push-error', { error: pushResult.left.toString() })
|
645
|
-
// wait for interrupt
|
646
|
+
// wait for interrupt and restarting of pushing
|
646
647
|
return yield* Effect.never
|
647
648
|
}
|
648
649
|
|
@@ -663,9 +664,3 @@ const backgroundBackendPushing = ({
|
|
663
664
|
}
|
664
665
|
}
|
665
666
|
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'))
|
666
|
-
|
667
|
-
const trimChangesetRows = (db: SynchronousDatabase, newHead: EventId.EventId) => {
|
668
|
-
// Since we're using the session changeset rows to query for the current head,
|
669
|
-
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
670
|
-
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
671
|
-
}
|