@livestore/common 0.3.0-dev.1 → 0.3.0-dev.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +47 -35
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js.map +1 -1
- package/dist/derived-mutations.d.ts +4 -4
- package/dist/derived-mutations.d.ts.map +1 -1
- package/dist/derived-mutations.test.js.map +1 -1
- package/dist/devtools/devtool-message-leader.d.ts +2 -0
- package/dist/devtools/devtool-message-leader.d.ts.map +1 -0
- package/dist/devtools/devtool-message-leader.js +2 -0
- package/dist/devtools/devtool-message-leader.js.map +1 -0
- package/dist/devtools/devtools-bridge.d.ts +2 -1
- package/dist/devtools/devtools-bridge.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +297 -0
- package/dist/devtools/devtools-messages-client-session.d.ts.map +1 -0
- package/dist/devtools/devtools-messages-client-session.js +61 -0
- package/dist/devtools/devtools-messages-client-session.js.map +1 -0
- package/dist/devtools/devtools-messages-common.d.ts +65 -0
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -0
- package/dist/devtools/devtools-messages-common.js +35 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -0
- package/dist/devtools/devtools-messages-leader.d.ts +261 -0
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -0
- package/dist/devtools/devtools-messages-leader.js +85 -0
- package/dist/devtools/devtools-messages-leader.js.map +1 -0
- package/dist/devtools/devtools-messages.d.ts +3 -592
- package/dist/devtools/devtools-messages.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.js +3 -171
- package/dist/devtools/devtools-messages.js.map +1 -1
- package/dist/index.d.ts +0 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/init-singleton-tables.d.ts +2 -2
- package/dist/init-singleton-tables.d.ts.map +1 -1
- package/dist/init-singleton-tables.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +37 -0
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -0
- package/dist/leader-thread/LeaderSyncProcessor.js +432 -0
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -0
- package/dist/leader-thread/apply-mutation.d.ts +5 -2
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +41 -29
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/connection.d.ts +4 -4
- package/dist/leader-thread/connection.d.ts.map +1 -1
- package/dist/leader-thread/connection.js +5 -5
- package/dist/leader-thread/connection.js.map +1 -1
- package/dist/leader-thread/leader-sync-processor.d.ts +2 -2
- package/dist/leader-thread/leader-sync-processor.d.ts.map +1 -1
- package/dist/leader-thread/leader-sync-processor.js +20 -12
- package/dist/leader-thread/leader-sync-processor.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +37 -81
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts +12 -11
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +33 -14
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +6 -19
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +7 -6
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +24 -18
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +36 -16
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/mutation.d.ts +9 -2
- package/dist/mutation.d.ts.map +1 -1
- package/dist/mutation.js +5 -5
- package/dist/mutation.js.map +1 -1
- package/dist/query-builder/impl.d.ts +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -5
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +13 -19
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +16 -14
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +15 -7
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/EventId.test.d.ts +2 -0
- package/dist/schema/EventId.test.d.ts.map +1 -0
- package/dist/schema/EventId.test.js +11 -0
- package/dist/schema/EventId.test.js.map +1 -0
- package/dist/schema/MutationEvent.d.ts +49 -80
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +32 -15
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/MutationEvent.test.d.ts +2 -0
- package/dist/schema/MutationEvent.test.d.ts.map +1 -0
- package/dist/schema/MutationEvent.test.js +2 -0
- package/dist/schema/MutationEvent.test.js.map +1 -0
- package/dist/schema/system-tables.d.ts +26 -26
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +19 -11
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/schema-management/common.d.ts +3 -3
- package/dist/schema-management/common.d.ts.map +1 -1
- package/dist/schema-management/common.js.map +1 -1
- package/dist/schema-management/migrations.d.ts +4 -4
- package/dist/schema-management/migrations.d.ts.map +1 -1
- package/dist/schema-management/migrations.js +6 -6
- package/dist/schema-management/migrations.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +43 -0
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -0
- package/dist/sync/ClientSessionSyncProcessor.js +141 -0
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -0
- package/dist/sync/client-session-sync-processor.d.ts +4 -4
- package/dist/sync/client-session-sync-processor.d.ts.map +1 -1
- package/dist/sync/index.d.ts +1 -1
- package/dist/sync/index.d.ts.map +1 -1
- package/dist/sync/index.js +1 -1
- package/dist/sync/index.js.map +1 -1
- package/dist/sync/next/history-dag-common.d.ts +1 -4
- package/dist/sync/next/history-dag-common.d.ts.map +1 -1
- package/dist/sync/next/history-dag-common.js +1 -1
- package/dist/sync/next/history-dag-common.js.map +1 -1
- package/dist/sync/next/rebase-events.d.ts +3 -3
- package/dist/sync/next/rebase-events.d.ts.map +1 -1
- package/dist/sync/next/rebase-events.js +3 -2
- package/dist/sync/next/rebase-events.js.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.d.ts +7 -7
- package/dist/sync/next/test/mutation-fixtures.d.ts.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.js +3 -9
- package/dist/sync/next/test/mutation-fixtures.js.map +1 -1
- package/dist/sync/sync.d.ts +21 -11
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +45 -23
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +56 -12
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +125 -69
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/sync/validate-push-payload.d.ts +2 -2
- package/dist/sync/validate-push-payload.d.ts.map +1 -1
- package/dist/sync/validate-push-payload.js +2 -2
- package/dist/sync/validate-push-payload.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.d.ts.map +1 -1
- package/dist/version.js +1 -1
- package/dist/version.js.map +1 -1
- package/package.json +6 -5
- package/src/adapter-types.ts +39 -40
- package/src/derived-mutations.test.ts +1 -1
- package/src/derived-mutations.ts +9 -5
- package/src/devtools/devtools-bridge.ts +2 -1
- package/src/devtools/devtools-messages-client-session.ts +109 -0
- package/src/devtools/devtools-messages-common.ts +52 -0
- package/src/devtools/devtools-messages-leader.ts +115 -0
- package/src/devtools/devtools-messages.ts +3 -243
- package/src/index.ts +0 -6
- package/src/init-singleton-tables.ts +2 -2
- package/src/leader-thread/{leader-sync-processor.ts → LeaderSyncProcessor.ts} +306 -268
- package/src/leader-thread/apply-mutation.ts +53 -35
- package/src/leader-thread/connection.ts +7 -7
- package/src/leader-thread/leader-worker-devtools.ts +52 -124
- package/src/leader-thread/make-leader-thread-layer.ts +62 -30
- package/src/leader-thread/mutationlog.ts +14 -10
- package/src/leader-thread/recreate-db.ts +24 -20
- package/src/leader-thread/types.ts +41 -20
- package/src/mutation.ts +17 -7
- package/src/rehydrate-from-mutationlog.ts +18 -26
- package/src/schema/EventId.test.ts +12 -0
- package/src/schema/EventId.ts +23 -9
- package/src/schema/MutationEvent.ts +46 -24
- package/src/schema/system-tables.ts +19 -11
- package/src/schema-management/common.ts +3 -3
- package/src/schema-management/migrations.ts +10 -10
- package/src/sync/{client-session-sync-processor.ts → ClientSessionSyncProcessor.ts} +26 -19
- package/src/sync/index.ts +1 -1
- package/src/sync/next/history-dag-common.ts +1 -1
- package/src/sync/next/rebase-events.ts +7 -7
- package/src/sync/next/test/mutation-fixtures.ts +3 -10
- package/src/sync/sync.ts +19 -6
- package/src/sync/syncstate.test.ts +127 -67
- package/src/sync/syncstate.ts +21 -19
- package/src/sync/validate-push-payload.ts +7 -4
- package/src/version.ts +1 -1
@@ -1,22 +1,22 @@
|
|
1
|
-
import { shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
|
-
import type { HttpClient, Scope } from '@livestore/utils/effect'
|
1
|
+
import { isNotUndefined, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
|
+
import type { HttpClient, Scope, Tracer } from '@livestore/utils/effect'
|
3
3
|
import {
|
4
4
|
BucketQueue,
|
5
5
|
Deferred,
|
6
6
|
Effect,
|
7
7
|
Exit,
|
8
|
-
Fiber,
|
9
8
|
FiberHandle,
|
10
9
|
Option,
|
11
10
|
OtelTracer,
|
12
|
-
|
11
|
+
ReadonlyArray,
|
13
12
|
Schema,
|
14
13
|
Stream,
|
14
|
+
Subscribable,
|
15
15
|
SubscriptionRef,
|
16
16
|
} from '@livestore/utils/effect'
|
17
17
|
import type * as otel from '@opentelemetry/api'
|
18
18
|
|
19
|
-
import type {
|
19
|
+
import type { SqliteDb } from '../adapter-types.js'
|
20
20
|
import { UnexpectedError } from '../adapter-types.js'
|
21
21
|
import type { LiveStoreSchema, SessionChangesetMetaRow } from '../schema/mod.js'
|
22
22
|
import {
|
@@ -33,202 +33,114 @@ import { sql } from '../util.js'
|
|
33
33
|
import { makeApplyMutation } from './apply-mutation.js'
|
34
34
|
import { execSql } from './connection.js'
|
35
35
|
import { getBackendHeadFromDb, getLocalHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js'
|
36
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo,
|
36
|
+
import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
|
37
37
|
import { LeaderThreadCtx } from './types.js'
|
38
38
|
|
39
|
-
type
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
type ProcessorStateInSync = {
|
44
|
-
_tag: 'in-sync'
|
45
|
-
syncState: SyncState.SyncState
|
46
|
-
}
|
47
|
-
|
48
|
-
type ProcessorStateApplyingSyncStateAdvance = {
|
49
|
-
_tag: 'applying-syncstate-advance'
|
50
|
-
origin: 'pull' | 'push'
|
51
|
-
syncState: SyncState.SyncState
|
52
|
-
// TODO re-introduce this
|
53
|
-
// proccesHead: EventId
|
54
|
-
fiber: Fiber.RuntimeFiber<void, UnexpectedError>
|
55
|
-
}
|
56
|
-
|
57
|
-
type ProcessorState = ProcessorStateInit | ProcessorStateInSync | ProcessorStateApplyingSyncStateAdvance
|
39
|
+
type PushQueueItem = [
|
40
|
+
mutationEvent: MutationEvent.EncodedWithMeta,
|
41
|
+
deferred: Deferred.Deferred<void, InvalidPushError> | undefined,
|
42
|
+
]
|
58
43
|
|
59
44
|
/**
|
60
|
-
* The
|
61
|
-
*
|
62
|
-
* The leader sync processor is also responsible for
|
63
|
-
* - broadcasting mutations to client sessions via the pull queues.
|
64
|
-
* - pushing mutations to the sync backend
|
45
|
+
* The LeaderSyncProcessor manages synchronization of mutations between
|
46
|
+
* the local state and the sync backend, ensuring efficient and orderly processing.
|
65
47
|
*
|
66
|
-
* In the
|
48
|
+
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
67
49
|
*
|
68
|
-
*
|
69
|
-
* -
|
70
|
-
* -
|
71
|
-
* -
|
50
|
+
* Responsibilities:
|
51
|
+
* - Queueing incoming local mutations in a localPushMailbox.
|
52
|
+
* - Broadcasting mutations to client sessions via pull queues.
|
53
|
+
* - Pushing mutations to the sync backend.
|
72
54
|
*
|
73
|
-
*
|
74
|
-
* - in-sync: fully synced with remote, now idling
|
75
|
-
* - applying-syncstate-advance (with pointer to current progress in case of rebase interrupt)
|
55
|
+
* Notes:
|
76
56
|
*
|
77
|
-
*
|
78
|
-
* -
|
79
|
-
*
|
80
|
-
*
|
57
|
+
* local push processing:
|
58
|
+
* - localPushMailbox:
|
59
|
+
* - Maintains events in ascending order.
|
60
|
+
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
61
|
+
* - Processes events from the mailbox, applying mutations in batches.
|
62
|
+
* - Controlled by a `Latch` to manage execution flow.
|
63
|
+
* - The latch closes on pull receipt and re-opens post-pull completion.
|
64
|
+
* - Processes up to `maxBatchSize` events per cycle.
|
81
65
|
*
|
82
|
-
* Queuing vs interrupting behaviour:
|
83
|
-
* - Operations caused by pull can never be interrupted
|
84
|
-
* - Incoming pull can interrupt current push
|
85
|
-
* - Incoming pull needs to wait to previous pull to finish
|
86
|
-
* - Incoming push needs to wait to previous push to finish
|
87
|
-
*
|
88
|
-
* Backend pushing:
|
89
|
-
* - continously push to backend
|
90
|
-
* - only interrupted and restarted on rebase
|
91
66
|
*/
|
92
67
|
export const makeLeaderSyncProcessor = ({
|
93
68
|
schema,
|
94
69
|
dbMissing,
|
95
|
-
|
70
|
+
dbMutationLog,
|
96
71
|
initialBlockingSyncContext,
|
97
72
|
}: {
|
98
73
|
schema: LiveStoreSchema
|
99
|
-
/** Only used to know whether we can safely query
|
74
|
+
/** Only used to know whether we can safely query dbMutationLog during setup execution */
|
100
75
|
dbMissing: boolean
|
101
|
-
|
76
|
+
dbMutationLog: SqliteDb
|
102
77
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
103
|
-
}): Effect.Effect<
|
78
|
+
}): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
|
104
79
|
Effect.gen(function* () {
|
105
|
-
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.
|
106
|
-
|
107
|
-
const stateRef = yield* Ref.make<ProcessorState>({ _tag: 'init' })
|
80
|
+
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.EncodedWithMeta>()
|
108
81
|
|
109
|
-
const
|
82
|
+
const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
|
110
83
|
|
111
84
|
const isLocalEvent = (mutationEventEncoded: MutationEvent.EncodedWithMeta) => {
|
112
85
|
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
113
86
|
return mutationDef.options.localOnly
|
114
87
|
}
|
115
88
|
|
116
|
-
|
117
|
-
const
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
*/
|
89
|
+
// This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
|
90
|
+
const ctxRef = {
|
91
|
+
current: undefined as
|
92
|
+
| undefined
|
93
|
+
| {
|
94
|
+
otelSpan: otel.Span | undefined
|
95
|
+
span: Tracer.Span
|
96
|
+
devtoolsPullLatch: Effect.Latch | undefined
|
97
|
+
devtoolsPushLatch: Effect.Latch | undefined
|
98
|
+
},
|
99
|
+
}
|
128
100
|
|
129
|
-
const
|
130
|
-
|
131
|
-
|
132
|
-
yield* semaphore.take(1)
|
133
|
-
// NOTE this is a workaround to ensure the semaphore take-order is respected
|
134
|
-
// TODO this needs to be fixed upstream in Effect
|
135
|
-
if (counter !== expectedCounter) {
|
136
|
-
console.log(
|
137
|
-
`waitForSyncState: counter mismatch (expected: ${expectedCounter}, got: ${counter}), releasing semaphore`,
|
138
|
-
)
|
139
|
-
yield* semaphore.release(1)
|
140
|
-
yield* Effect.yieldNow()
|
141
|
-
// Retrying...
|
142
|
-
return yield* waitForSyncState(counter)
|
143
|
-
}
|
144
|
-
// console.log('waitForSyncState: took semaphore', counter)
|
145
|
-
const state = yield* Ref.get(stateRef)
|
146
|
-
if (state._tag !== 'in-sync') {
|
147
|
-
return shouldNeverHappen('Expected to be in-sync but got ' + state._tag)
|
148
|
-
}
|
149
|
-
expectedCounter = counter + 1
|
150
|
-
return state
|
151
|
-
}).pipe(Effect.withSpan(`@livestore/common:leader-thread:syncing:waitForSyncState(${counter})`))
|
101
|
+
const localPushesQueue = yield* BucketQueue.make<PushQueueItem>()
|
102
|
+
const localPushesLatch = yield* Effect.makeLatch(true)
|
103
|
+
const pullLatch = yield* Effect.makeLatch(true)
|
152
104
|
|
153
|
-
const push = (newEvents
|
105
|
+
const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
|
154
106
|
Effect.gen(function* () {
|
155
|
-
const counter = counterRef
|
156
|
-
counterRef++
|
157
107
|
// TODO validate batch
|
158
108
|
if (newEvents.length === 0) return
|
159
109
|
|
160
|
-
|
161
|
-
|
162
|
-
// TODO if there are multiple pending pushes, we should batch them together
|
163
|
-
const state = yield* waitForSyncState(counter)
|
164
|
-
|
165
|
-
const updateResult = SyncState.updateSyncState({
|
166
|
-
syncState: state.syncState,
|
167
|
-
payload: { _tag: 'local-push', newEvents },
|
168
|
-
isLocalEvent,
|
169
|
-
isEqualEvent: MutationEvent.isEqualEncoded,
|
170
|
-
})
|
171
|
-
|
172
|
-
if (updateResult._tag === 'rebase') {
|
173
|
-
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
174
|
-
} else if (updateResult._tag === 'reject') {
|
175
|
-
return yield* Effect.fail(
|
176
|
-
InvalidPushError.make({
|
177
|
-
reason: {
|
178
|
-
_tag: 'LeaderAhead',
|
179
|
-
minimumExpectedId: updateResult.expectedMinimumId,
|
180
|
-
providedId: newEvents.at(0)!.id,
|
181
|
-
},
|
182
|
-
}),
|
183
|
-
)
|
110
|
+
if (ctxRef.current?.devtoolsPushLatch !== undefined) {
|
111
|
+
yield* ctxRef.current.devtoolsPushLatch.await
|
184
112
|
}
|
185
113
|
|
186
|
-
const
|
187
|
-
|
188
|
-
yield* Ref.set(stateRef, {
|
189
|
-
_tag: 'applying-syncstate-advance',
|
190
|
-
origin: 'push',
|
191
|
-
syncState: updateResult.newSyncState,
|
192
|
-
fiber,
|
193
|
-
})
|
194
|
-
|
195
|
-
// console.log('setRef:applying-syncstate-advance after push', counter)
|
114
|
+
const waitForProcessing = options?.waitForProcessing ?? false
|
196
115
|
|
197
|
-
|
198
|
-
|
199
|
-
remaining: 0,
|
200
|
-
})
|
116
|
+
if (waitForProcessing) {
|
117
|
+
const deferreds = yield* Effect.forEach(newEvents, () => Deferred.make<void, InvalidPushError>())
|
201
118
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
})
|
206
|
-
|
207
|
-
// Don't sync localOnly mutations
|
208
|
-
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
209
|
-
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
210
|
-
return mutationDef.options.localOnly === false
|
211
|
-
})
|
119
|
+
const items = newEvents.map(
|
120
|
+
(mutationEventEncoded, i) => [mutationEventEncoded, deferreds[i]] as PushQueueItem,
|
121
|
+
)
|
212
122
|
|
213
|
-
|
123
|
+
yield* BucketQueue.offerAll(localPushesQueue, items)
|
214
124
|
|
215
|
-
|
125
|
+
yield* Effect.all(deferreds)
|
126
|
+
} else {
|
127
|
+
const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined] as PushQueueItem)
|
128
|
+
yield* BucketQueue.offerAll(localPushesQueue, items)
|
129
|
+
}
|
216
130
|
}).pipe(
|
217
131
|
Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
|
218
132
|
attributes: {
|
219
133
|
batchSize: newEvents.length,
|
220
134
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
221
135
|
},
|
222
|
-
links:
|
223
|
-
? [{ _tag: 'SpanLink', span: OtelTracer.makeExternalSpan(spanRef.current.spanContext()), attributes: {} }]
|
224
|
-
: undefined,
|
136
|
+
links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
|
225
137
|
}),
|
226
138
|
)
|
227
139
|
|
228
|
-
const pushPartial:
|
140
|
+
const pushPartial: LeaderSyncProcessor['pushPartial'] = (mutationEventEncoded_) =>
|
229
141
|
Effect.gen(function* () {
|
230
|
-
const
|
231
|
-
if (
|
142
|
+
const syncState = yield* syncStateSref
|
143
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
232
144
|
|
233
145
|
const mutationDef =
|
234
146
|
schema.mutations.get(mutationEventEncoded_.mutation) ??
|
@@ -236,20 +148,28 @@ export const makeLeaderSyncProcessor = ({
|
|
236
148
|
|
237
149
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
238
150
|
...mutationEventEncoded_,
|
239
|
-
...EventId.nextPair(
|
151
|
+
...EventId.nextPair(syncState.localHead, mutationDef.options.localOnly),
|
240
152
|
})
|
241
153
|
|
242
154
|
yield* push([mutationEventEncoded])
|
243
155
|
}).pipe(Effect.catchTag('InvalidPushError', Effect.orDie))
|
244
156
|
|
245
157
|
// Starts various background loops
|
246
|
-
const boot:
|
158
|
+
const boot: LeaderSyncProcessor['boot'] = ({ dbReady }) =>
|
247
159
|
Effect.gen(function* () {
|
248
|
-
const span = yield*
|
249
|
-
|
160
|
+
const span = yield* Effect.currentSpan.pipe(Effect.orDie)
|
161
|
+
const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
162
|
+
const { devtools } = yield* LeaderThreadCtx
|
250
163
|
|
251
|
-
|
252
|
-
|
164
|
+
ctxRef.current = {
|
165
|
+
otelSpan,
|
166
|
+
span,
|
167
|
+
devtoolsPullLatch: devtools.enabled ? devtools.syncBackendPullLatch : undefined,
|
168
|
+
devtoolsPushLatch: devtools.enabled ? devtools.syncBackendPushLatch : undefined,
|
169
|
+
}
|
170
|
+
|
171
|
+
const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog)
|
172
|
+
const initialLocalHead = dbMissing ? EventId.ROOT : getLocalHeadFromDb(dbMutationLog)
|
253
173
|
|
254
174
|
if (initialBackendHead > initialLocalHead.global) {
|
255
175
|
return shouldNeverHappen(
|
@@ -257,20 +177,21 @@ export const makeLeaderSyncProcessor = ({
|
|
257
177
|
)
|
258
178
|
}
|
259
179
|
|
260
|
-
const pendingMutationEvents = yield* getMutationEventsSince({
|
180
|
+
const pendingMutationEvents = yield* getMutationEventsSince({
|
181
|
+
global: initialBackendHead,
|
182
|
+
local: EventId.localDefault,
|
183
|
+
}).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))))
|
261
184
|
|
262
|
-
const initialSyncState = {
|
263
|
-
pending: pendingMutationEvents
|
185
|
+
const initialSyncState = new SyncState.SyncState({
|
186
|
+
pending: pendingMutationEvents,
|
264
187
|
// On the leader we don't need a rollback tail beyond `pending` items
|
265
188
|
rollbackTail: [],
|
266
|
-
upstreamHead: { global: initialBackendHead, local:
|
189
|
+
upstreamHead: { global: initialBackendHead, local: EventId.localDefault },
|
267
190
|
localHead: initialLocalHead,
|
268
|
-
}
|
191
|
+
})
|
269
192
|
|
270
193
|
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
271
|
-
yield*
|
272
|
-
|
273
|
-
applyMutationItemsRef.current = yield* makeApplyMutationItems({ stateRef, semaphore })
|
194
|
+
yield* SubscriptionRef.set(syncStateSref, initialSyncState)
|
274
195
|
|
275
196
|
// Rehydrate sync queue
|
276
197
|
if (pendingMutationEvents.length > 0) {
|
@@ -284,11 +205,22 @@ export const makeLeaderSyncProcessor = ({
|
|
284
205
|
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
285
206
|
}
|
286
207
|
|
208
|
+
yield* backgroundApplyLocalPushes({
|
209
|
+
localPushesLatch,
|
210
|
+
localPushesQueue,
|
211
|
+
pullLatch,
|
212
|
+
syncStateSref,
|
213
|
+
syncBackendQueue,
|
214
|
+
schema,
|
215
|
+
isLocalEvent,
|
216
|
+
otelSpan,
|
217
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
218
|
+
|
287
219
|
const backendPushingFiberHandle = yield* FiberHandle.make()
|
288
220
|
|
289
221
|
yield* FiberHandle.run(
|
290
222
|
backendPushingFiberHandle,
|
291
|
-
backgroundBackendPushing({ dbReady, syncBackendQueue,
|
223
|
+
backgroundBackendPushing({ dbReady, syncBackendQueue, otelSpan }).pipe(Effect.tapCauseLogPretty),
|
292
224
|
)
|
293
225
|
|
294
226
|
yield* backgroundBackendPulling({
|
@@ -307,14 +239,15 @@ export const makeLeaderSyncProcessor = ({
|
|
307
239
|
// Restart pushing fiber
|
308
240
|
yield* FiberHandle.run(
|
309
241
|
backendPushingFiberHandle,
|
310
|
-
backgroundBackendPushing({ dbReady, syncBackendQueue,
|
242
|
+
backgroundBackendPushing({ dbReady, syncBackendQueue, otelSpan }).pipe(Effect.tapCauseLogPretty),
|
311
243
|
)
|
312
244
|
}),
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
245
|
+
syncStateSref,
|
246
|
+
localPushesLatch,
|
247
|
+
pullLatch,
|
248
|
+
otelSpan,
|
317
249
|
initialBlockingSyncContext,
|
250
|
+
devtoolsPullLatch: ctxRef.current?.devtoolsPullLatch,
|
318
251
|
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
319
252
|
}).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'))
|
320
253
|
|
@@ -322,42 +255,141 @@ export const makeLeaderSyncProcessor = ({
|
|
322
255
|
push,
|
323
256
|
pushPartial,
|
324
257
|
boot,
|
325
|
-
syncState:
|
326
|
-
|
327
|
-
|
328
|
-
|
258
|
+
syncState: Subscribable.make({
|
259
|
+
get: Effect.gen(function* () {
|
260
|
+
const syncState = yield* syncStateSref
|
261
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
262
|
+
return syncState
|
263
|
+
}),
|
264
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
329
265
|
}),
|
330
|
-
} satisfies
|
266
|
+
} satisfies LeaderSyncProcessor
|
267
|
+
})
|
268
|
+
|
269
|
+
const backgroundApplyLocalPushes = ({
|
270
|
+
localPushesLatch,
|
271
|
+
localPushesQueue,
|
272
|
+
pullLatch,
|
273
|
+
syncStateSref,
|
274
|
+
syncBackendQueue,
|
275
|
+
schema,
|
276
|
+
isLocalEvent,
|
277
|
+
otelSpan,
|
278
|
+
}: {
|
279
|
+
pullLatch: Effect.Latch
|
280
|
+
localPushesLatch: Effect.Latch
|
281
|
+
localPushesQueue: BucketQueue.BucketQueue<PushQueueItem>
|
282
|
+
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
283
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
284
|
+
schema: LiveStoreSchema
|
285
|
+
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
286
|
+
otelSpan: otel.Span | undefined
|
287
|
+
}) =>
|
288
|
+
Effect.gen(function* () {
|
289
|
+
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
290
|
+
|
291
|
+
const applyMutationItems = yield* makeApplyMutationItems
|
292
|
+
|
293
|
+
while (true) {
|
294
|
+
// TODO make batch size configurable
|
295
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10)
|
296
|
+
const [newEvents, deferreds] = ReadonlyArray.unzip(batchItems)
|
297
|
+
|
298
|
+
// Wait for the backend pulling to finish
|
299
|
+
yield* localPushesLatch.await
|
300
|
+
|
301
|
+
// Prevent the backend pulling from starting until this local push is finished
|
302
|
+
yield* pullLatch.close
|
303
|
+
|
304
|
+
const syncState = yield* syncStateSref
|
305
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
306
|
+
|
307
|
+
const updateResult = SyncState.updateSyncState({
|
308
|
+
syncState,
|
309
|
+
payload: { _tag: 'local-push', newEvents },
|
310
|
+
isLocalEvent,
|
311
|
+
isEqualEvent: MutationEvent.isEqualEncoded,
|
312
|
+
})
|
313
|
+
|
314
|
+
if (updateResult._tag === 'rebase') {
|
315
|
+
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
316
|
+
} else if (updateResult._tag === 'reject') {
|
317
|
+
otelSpan?.addEvent('local-push:reject', {
|
318
|
+
batchSize: newEvents.length,
|
319
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
320
|
+
})
|
321
|
+
|
322
|
+
const providedId = newEvents.at(0)!.id
|
323
|
+
const remainingEvents = yield* BucketQueue.takeAll(localPushesQueue)
|
324
|
+
const allDeferreds = [...deferreds, ...remainingEvents.map(([_, deferred]) => deferred)].filter(isNotUndefined)
|
325
|
+
yield* Effect.forEach(allDeferreds, (deferred) =>
|
326
|
+
Deferred.fail(
|
327
|
+
deferred,
|
328
|
+
InvalidPushError.make({
|
329
|
+
// TODO improve error handling so it differentiates between a push being rejected
|
330
|
+
// because of itself or because of another push
|
331
|
+
reason: {
|
332
|
+
_tag: 'LeaderAhead',
|
333
|
+
minimumExpectedId: updateResult.expectedMinimumId,
|
334
|
+
providedId,
|
335
|
+
},
|
336
|
+
}),
|
337
|
+
),
|
338
|
+
)
|
339
|
+
|
340
|
+
// Allow the backend pulling to start
|
341
|
+
yield* pullLatch.open
|
342
|
+
|
343
|
+
// In this case we're skipping state update and down/upstream processing
|
344
|
+
// We've cleared the local push queue and are now waiting for new local pushes / backend pulls
|
345
|
+
continue
|
346
|
+
}
|
347
|
+
|
348
|
+
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
349
|
+
|
350
|
+
yield* connectedClientSessionPullQueues.offer({
|
351
|
+
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
352
|
+
remaining: 0,
|
353
|
+
})
|
354
|
+
|
355
|
+
otelSpan?.addEvent('local-push', {
|
356
|
+
batchSize: newEvents.length,
|
357
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
358
|
+
})
|
359
|
+
|
360
|
+
// Don't sync localOnly mutations
|
361
|
+
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
362
|
+
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
363
|
+
return mutationDef.options.localOnly === false
|
364
|
+
})
|
365
|
+
|
366
|
+
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
367
|
+
|
368
|
+
yield* applyMutationItems({ batchItems: newEvents, deferreds })
|
369
|
+
|
370
|
+
// Allow the backend pulling to start
|
371
|
+
yield* pullLatch.open
|
372
|
+
}
|
331
373
|
})
|
332
374
|
|
333
375
|
type ApplyMutationItems = (_: {
|
334
376
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
377
|
+
/** Indexes are aligned with `batchItems` */
|
378
|
+
deferreds: ReadonlyArray<Deferred.Deferred<void, InvalidPushError> | undefined> | undefined
|
335
379
|
}) => Effect.Effect<void, UnexpectedError>
|
336
380
|
|
337
381
|
// TODO how to handle errors gracefully
|
338
|
-
const makeApplyMutationItems =
|
339
|
-
stateRef,
|
340
|
-
semaphore,
|
341
|
-
}: {
|
342
|
-
stateRef: Ref.Ref<ProcessorState>
|
343
|
-
semaphore: Effect.Semaphore
|
344
|
-
}): Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =>
|
382
|
+
const makeApplyMutationItems: Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =
|
345
383
|
Effect.gen(function* () {
|
346
384
|
const leaderThreadCtx = yield* LeaderThreadCtx
|
347
|
-
const { db,
|
385
|
+
const { dbReadModel: db, dbMutationLog } = leaderThreadCtx
|
348
386
|
|
349
387
|
const applyMutation = yield* makeApplyMutation
|
350
388
|
|
351
|
-
return ({ batchItems }) =>
|
389
|
+
return ({ batchItems, deferreds }) =>
|
352
390
|
Effect.gen(function* () {
|
353
|
-
const state = yield* Ref.get(stateRef)
|
354
|
-
if (state._tag !== 'applying-syncstate-advance') {
|
355
|
-
// console.log('applyMutationItems: counter', counter)
|
356
|
-
return shouldNeverHappen(`Expected to be applying-syncstate-advance but got ${state._tag}`)
|
357
|
-
}
|
358
|
-
|
359
391
|
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
360
|
-
|
392
|
+
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
361
393
|
|
362
394
|
yield* Effect.addFinalizer((exit) =>
|
363
395
|
Effect.gen(function* () {
|
@@ -365,34 +397,26 @@ const makeApplyMutationItems = ({
|
|
365
397
|
|
366
398
|
// Rollback in case of an error
|
367
399
|
db.execute('ROLLBACK', undefined)
|
368
|
-
|
400
|
+
dbMutationLog.execute('ROLLBACK', undefined)
|
369
401
|
}),
|
370
402
|
)
|
371
403
|
|
372
404
|
for (let i = 0; i < batchItems.length; i++) {
|
373
|
-
|
374
|
-
|
375
|
-
yield* applyMutation(mutationEventEncoded)
|
405
|
+
yield* applyMutation(batchItems[i]!)
|
376
406
|
|
377
|
-
if (
|
378
|
-
yield* Deferred.succeed(
|
407
|
+
if (deferreds?.[i] !== undefined) {
|
408
|
+
yield* Deferred.succeed(deferreds[i]!, void 0)
|
379
409
|
}
|
380
|
-
|
381
|
-
// TODO re-introduce this
|
382
|
-
// if (i < batchItems.length - 1) {
|
383
|
-
// yield* Ref.set(stateRef, { ...state, proccesHead: batchItems[i + 1]!.id })
|
384
|
-
// }
|
385
410
|
}
|
386
411
|
|
387
412
|
db.execute('COMMIT', undefined) // Commit the transaction
|
388
|
-
|
389
|
-
|
390
|
-
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: state.syncState })
|
391
|
-
// console.log('setRef:sync after applyMutationItems', counter)
|
392
|
-
yield* semaphore.release(1)
|
413
|
+
dbMutationLog.execute('COMMIT', undefined) // Commit the transaction
|
393
414
|
}).pipe(
|
415
|
+
Effect.uninterruptible,
|
394
416
|
Effect.scoped,
|
395
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems'
|
417
|
+
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
|
418
|
+
attributes: { count: batchItems.length },
|
419
|
+
}),
|
396
420
|
Effect.tapCauseLogPretty,
|
397
421
|
UnexpectedError.mapToUnexpectedError,
|
398
422
|
)
|
@@ -403,55 +427,62 @@ const backgroundBackendPulling = ({
|
|
403
427
|
initialBackendHead,
|
404
428
|
isLocalEvent,
|
405
429
|
restartBackendPushing,
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
430
|
+
otelSpan,
|
431
|
+
syncStateSref,
|
432
|
+
localPushesLatch,
|
433
|
+
pullLatch,
|
434
|
+
devtoolsPullLatch,
|
410
435
|
initialBlockingSyncContext,
|
411
436
|
}: {
|
412
437
|
dbReady: Deferred.Deferred<void>
|
413
|
-
initialBackendHead:
|
438
|
+
initialBackendHead: EventId.GlobalEventId
|
414
439
|
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
415
440
|
restartBackendPushing: (
|
416
441
|
filteredRebasedPending: ReadonlyArray<MutationEvent.EncodedWithMeta>,
|
417
442
|
) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx | HttpClient.HttpClient>
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
443
|
+
otelSpan: otel.Span | undefined
|
444
|
+
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
445
|
+
localPushesLatch: Effect.Latch
|
446
|
+
pullLatch: Effect.Latch
|
447
|
+
devtoolsPullLatch: Effect.Latch | undefined
|
422
448
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
423
449
|
}) =>
|
424
450
|
Effect.gen(function* () {
|
425
|
-
const {
|
451
|
+
const {
|
452
|
+
syncBackend,
|
453
|
+
dbReadModel: db,
|
454
|
+
dbMutationLog,
|
455
|
+
connectedClientSessionPullQueues,
|
456
|
+
schema,
|
457
|
+
} = yield* LeaderThreadCtx
|
426
458
|
|
427
459
|
if (syncBackend === undefined) return
|
428
460
|
|
429
461
|
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
430
462
|
|
463
|
+
const applyMutationItems = yield* makeApplyMutationItems
|
464
|
+
|
431
465
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
432
466
|
Effect.gen(function* () {
|
433
467
|
if (newEvents.length === 0) return
|
434
468
|
|
435
|
-
|
436
|
-
|
469
|
+
if (devtoolsPullLatch !== undefined) {
|
470
|
+
yield* devtoolsPullLatch.await
|
471
|
+
}
|
437
472
|
|
438
|
-
//
|
473
|
+
// Prevent more local pushes from being processed until this pull is finished
|
474
|
+
yield* localPushesLatch.close
|
439
475
|
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
} else {
|
446
|
-
// Wait for previous advance to finish
|
447
|
-
yield* semaphore.take(1)
|
448
|
-
}
|
449
|
-
}
|
476
|
+
// Wait for pending local pushes to finish
|
477
|
+
yield* pullLatch.await
|
478
|
+
|
479
|
+
const syncState = yield* syncStateSref
|
480
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
450
481
|
|
451
482
|
const trimRollbackUntil = newEvents.at(-1)!.id
|
452
483
|
|
453
484
|
const updateResult = SyncState.updateSyncState({
|
454
|
-
syncState
|
485
|
+
syncState,
|
455
486
|
payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
|
456
487
|
isLocalEvent,
|
457
488
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
@@ -464,10 +495,10 @@ const backgroundBackendPulling = ({
|
|
464
495
|
|
465
496
|
const newBackendHead = newEvents.at(-1)!.id
|
466
497
|
|
467
|
-
updateBackendHead(
|
498
|
+
updateBackendHead(dbMutationLog, newBackendHead)
|
468
499
|
|
469
500
|
if (updateResult._tag === 'rebase') {
|
470
|
-
|
501
|
+
otelSpan?.addEvent('backend-pull:rebase', {
|
471
502
|
newEventsCount: newEvents.length,
|
472
503
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
473
504
|
rollbackCount: updateResult.eventsToRollback.length,
|
@@ -481,7 +512,7 @@ const backgroundBackendPulling = ({
|
|
481
512
|
yield* restartBackendPushing(filteredRebasedPending)
|
482
513
|
|
483
514
|
if (updateResult.eventsToRollback.length > 0) {
|
484
|
-
yield* rollback({ db,
|
515
|
+
yield* rollback({ db, dbMutationLog, eventIdsToRollback: updateResult.eventsToRollback.map((_) => _.id) })
|
485
516
|
}
|
486
517
|
|
487
518
|
yield* connectedClientSessionPullQueues.offer({
|
@@ -494,7 +525,7 @@ const backgroundBackendPulling = ({
|
|
494
525
|
remaining,
|
495
526
|
})
|
496
527
|
} else {
|
497
|
-
|
528
|
+
otelSpan?.addEvent('backend-pull:advance', {
|
498
529
|
newEventsCount: newEvents.length,
|
499
530
|
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
500
531
|
})
|
@@ -505,17 +536,16 @@ const backgroundBackendPulling = ({
|
|
505
536
|
})
|
506
537
|
}
|
507
538
|
|
508
|
-
|
509
|
-
batchItems: updateResult.newEvents,
|
510
|
-
}).pipe(Effect.fork)
|
539
|
+
trimChangesetRows(db, newBackendHead)
|
511
540
|
|
512
|
-
yield*
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
541
|
+
yield* applyMutationItems({ batchItems: updateResult.newEvents, deferreds: undefined })
|
542
|
+
|
543
|
+
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
544
|
+
|
545
|
+
if (remaining === 0) {
|
546
|
+
// Allow local pushes to be processed again
|
547
|
+
yield* localPushesLatch.open
|
548
|
+
}
|
519
549
|
})
|
520
550
|
|
521
551
|
yield* syncBackend.pull(cursorInfo).pipe(
|
@@ -538,7 +568,7 @@ const backgroundBackendPulling = ({
|
|
538
568
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
539
569
|
|
540
570
|
yield* onNewPullChunk(
|
541
|
-
batch.map((_) =>
|
571
|
+
batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)),
|
542
572
|
remaining,
|
543
573
|
)
|
544
574
|
|
@@ -552,11 +582,11 @@ const backgroundBackendPulling = ({
|
|
552
582
|
|
553
583
|
const rollback = ({
|
554
584
|
db,
|
555
|
-
|
585
|
+
dbMutationLog,
|
556
586
|
eventIdsToRollback,
|
557
587
|
}: {
|
558
|
-
db:
|
559
|
-
|
588
|
+
db: SqliteDb
|
589
|
+
dbMutationLog: SqliteDb
|
560
590
|
eventIdsToRollback: EventId.EventId[]
|
561
591
|
}) =>
|
562
592
|
Effect.gen(function* () {
|
@@ -570,7 +600,9 @@ const rollback = ({
|
|
570
600
|
// Apply changesets in reverse order
|
571
601
|
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
572
602
|
const { changeset } = rollbackEvents[i]!
|
573
|
-
|
603
|
+
if (changeset !== null) {
|
604
|
+
db.makeChangeset(changeset).invert().apply()
|
605
|
+
}
|
574
606
|
}
|
575
607
|
|
576
608
|
// Delete the changeset rows
|
@@ -579,7 +611,7 @@ const rollback = ({
|
|
579
611
|
)
|
580
612
|
|
581
613
|
// Delete the mutation log rows
|
582
|
-
|
614
|
+
dbMutationLog.execute(
|
583
615
|
sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idLocal) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.local})`).join(', ')})`,
|
584
616
|
)
|
585
617
|
}).pipe(
|
@@ -588,9 +620,9 @@ const rollback = ({
|
|
588
620
|
}),
|
589
621
|
)
|
590
622
|
|
591
|
-
const getCursorInfo = (remoteHead:
|
623
|
+
const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
592
624
|
Effect.gen(function* () {
|
593
|
-
const {
|
625
|
+
const { dbMutationLog } = yield* LeaderThreadCtx
|
594
626
|
|
595
627
|
if (remoteHead === EventId.ROOT.global) return Option.none()
|
596
628
|
|
@@ -599,13 +631,13 @@ const getCursorInfo = (remoteHead: number) =>
|
|
599
631
|
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
|
600
632
|
|
601
633
|
const syncMetadataOption = yield* Effect.sync(() =>
|
602
|
-
|
634
|
+
dbMutationLog.select<{ syncMetadataJson: string }>(
|
603
635
|
sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idLocal ASC LIMIT 1`,
|
604
636
|
),
|
605
637
|
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
606
638
|
|
607
639
|
return Option.some({
|
608
|
-
cursor: { global: remoteHead, local:
|
640
|
+
cursor: { global: remoteHead, local: EventId.localDefault },
|
609
641
|
metadata: syncMetadataOption,
|
610
642
|
}) satisfies InitialSyncInfo
|
611
643
|
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
@@ -613,14 +645,14 @@ const getCursorInfo = (remoteHead: number) =>
|
|
613
645
|
const backgroundBackendPushing = ({
|
614
646
|
dbReady,
|
615
647
|
syncBackendQueue,
|
616
|
-
|
648
|
+
otelSpan,
|
617
649
|
}: {
|
618
650
|
dbReady: Deferred.Deferred<void>
|
619
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.
|
620
|
-
|
651
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
652
|
+
otelSpan: otel.Span | undefined
|
621
653
|
}) =>
|
622
654
|
Effect.gen(function* () {
|
623
|
-
const { syncBackend,
|
655
|
+
const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx
|
624
656
|
if (syncBackend === undefined) return
|
625
657
|
|
626
658
|
yield* dbReady
|
@@ -633,17 +665,17 @@ const backgroundBackendPushing = ({
|
|
633
665
|
|
634
666
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
635
667
|
|
636
|
-
|
668
|
+
otelSpan?.addEvent('backend-push', {
|
637
669
|
batchSize: queueItems.length,
|
638
670
|
batch: TRACE_VERBOSE ? JSON.stringify(queueItems) : undefined,
|
639
671
|
})
|
640
672
|
|
641
673
|
// TODO handle push errors (should only happen during concurrent pull+push)
|
642
|
-
const pushResult = yield* syncBackend.push(queueItems).pipe(Effect.either)
|
674
|
+
const pushResult = yield* syncBackend.push(queueItems.map((_) => _.toGlobal())).pipe(Effect.either)
|
643
675
|
|
644
676
|
if (pushResult._tag === 'Left') {
|
645
|
-
|
646
|
-
// wait for interrupt
|
677
|
+
otelSpan?.addEvent('backend-push-error', { error: pushResult.left.toString() })
|
678
|
+
// wait for interrupt caused by background pulling which will then restart pushing
|
647
679
|
return yield* Effect.never
|
648
680
|
}
|
649
681
|
|
@@ -653,7 +685,7 @@ const backgroundBackendPushing = ({
|
|
653
685
|
for (let i = 0; i < queueItems.length; i++) {
|
654
686
|
const mutationEventEncoded = queueItems[i]!
|
655
687
|
yield* execSql(
|
656
|
-
|
688
|
+
dbMutationLog,
|
657
689
|
...updateRows({
|
658
690
|
tableName: MUTATION_LOG_META_TABLE,
|
659
691
|
columns: mutationLogMetaTable.sqliteDef.columns,
|
@@ -664,3 +696,9 @@ const backgroundBackendPushing = ({
|
|
664
696
|
}
|
665
697
|
}
|
666
698
|
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'))
|
699
|
+
|
700
|
+
const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
|
701
|
+
// Since we're using the session changeset rows to query for the current head,
|
702
|
+
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
703
|
+
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
704
|
+
}
|