@livestore/common 0.3.0-dev.11 → 0.3.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +35 -47
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js.map +1 -1
- package/dist/derived-mutations.d.ts +4 -4
- package/dist/derived-mutations.d.ts.map +1 -1
- package/dist/derived-mutations.test.js.map +1 -1
- package/dist/devtools/devtools-bridge.d.ts +1 -2
- package/dist/devtools/devtools-bridge.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.d.ts +592 -3
- package/dist/devtools/devtools-messages.d.ts.map +1 -1
- package/dist/devtools/devtools-messages.js +171 -3
- package/dist/devtools/devtools-messages.js.map +1 -1
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/init-singleton-tables.d.ts +2 -2
- package/dist/init-singleton-tables.d.ts.map +1 -1
- package/dist/init-singleton-tables.js.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +2 -5
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +29 -41
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/connection.d.ts +4 -4
- package/dist/leader-thread/connection.d.ts.map +1 -1
- package/dist/leader-thread/connection.js +5 -5
- package/dist/leader-thread/connection.js.map +1 -1
- package/dist/leader-thread/leader-sync-processor.d.ts +2 -2
- package/dist/leader-thread/leader-sync-processor.d.ts.map +1 -1
- package/dist/leader-thread/leader-sync-processor.js +12 -20
- package/dist/leader-thread/leader-sync-processor.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +81 -37
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts +11 -12
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +14 -33
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +19 -6
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +6 -7
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +18 -24
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +16 -36
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/mutation.d.ts +2 -9
- package/dist/mutation.d.ts.map +1 -1
- package/dist/mutation.js +5 -5
- package/dist/mutation.js.map +1 -1
- package/dist/query-builder/impl.d.ts +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -5
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +19 -13
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +14 -16
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +7 -15
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +80 -49
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +15 -32
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/system-tables.d.ts +26 -26
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +11 -19
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/schema-management/common.d.ts +3 -3
- package/dist/schema-management/common.d.ts.map +1 -1
- package/dist/schema-management/common.js.map +1 -1
- package/dist/schema-management/migrations.d.ts +4 -4
- package/dist/schema-management/migrations.d.ts.map +1 -1
- package/dist/schema-management/migrations.js +6 -6
- package/dist/schema-management/migrations.js.map +1 -1
- package/dist/sync/client-session-sync-processor.d.ts +4 -4
- package/dist/sync/client-session-sync-processor.d.ts.map +1 -1
- package/dist/sync/index.d.ts +1 -1
- package/dist/sync/index.d.ts.map +1 -1
- package/dist/sync/index.js +1 -1
- package/dist/sync/index.js.map +1 -1
- package/dist/sync/next/history-dag-common.d.ts +4 -1
- package/dist/sync/next/history-dag-common.d.ts.map +1 -1
- package/dist/sync/next/history-dag-common.js +1 -1
- package/dist/sync/next/history-dag-common.js.map +1 -1
- package/dist/sync/next/rebase-events.d.ts +3 -3
- package/dist/sync/next/rebase-events.d.ts.map +1 -1
- package/dist/sync/next/rebase-events.js +2 -3
- package/dist/sync/next/rebase-events.js.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.d.ts +7 -7
- package/dist/sync/next/test/mutation-fixtures.d.ts.map +1 -1
- package/dist/sync/next/test/mutation-fixtures.js +9 -3
- package/dist/sync/next/test/mutation-fixtures.js.map +1 -1
- package/dist/sync/sync.d.ts +11 -21
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +23 -45
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +12 -56
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +69 -125
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/sync/validate-push-payload.d.ts +2 -2
- package/dist/sync/validate-push-payload.d.ts.map +1 -1
- package/dist/sync/validate-push-payload.js +2 -2
- package/dist/sync/validate-push-payload.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.d.ts.map +1 -1
- package/dist/version.js +1 -1
- package/dist/version.js.map +1 -1
- package/package.json +5 -6
- package/src/adapter-types.ts +40 -39
- package/src/derived-mutations.test.ts +1 -1
- package/src/derived-mutations.ts +5 -9
- package/src/devtools/devtools-bridge.ts +1 -2
- package/src/devtools/devtools-messages.ts +243 -3
- package/src/index.ts +6 -0
- package/src/init-singleton-tables.ts +2 -2
- package/src/leader-thread/apply-mutation.ts +35 -53
- package/src/leader-thread/connection.ts +7 -7
- package/src/leader-thread/{LeaderSyncProcessor.ts → leader-sync-processor.ts} +268 -306
- package/src/leader-thread/leader-worker-devtools.ts +124 -52
- package/src/leader-thread/make-leader-thread-layer.ts +30 -62
- package/src/leader-thread/mutationlog.ts +10 -14
- package/src/leader-thread/recreate-db.ts +20 -24
- package/src/leader-thread/types.ts +20 -41
- package/src/mutation.ts +7 -17
- package/src/rehydrate-from-mutationlog.ts +26 -18
- package/src/schema/EventId.ts +9 -23
- package/src/schema/MutationEvent.ts +24 -46
- package/src/schema/system-tables.ts +11 -19
- package/src/schema-management/common.ts +3 -3
- package/src/schema-management/migrations.ts +10 -10
- package/src/sync/{ClientSessionSyncProcessor.ts → client-session-sync-processor.ts} +19 -26
- package/src/sync/index.ts +1 -1
- package/src/sync/next/history-dag-common.ts +1 -1
- package/src/sync/next/rebase-events.ts +7 -7
- package/src/sync/next/test/mutation-fixtures.ts +10 -3
- package/src/sync/sync.ts +6 -19
- package/src/sync/syncstate.test.ts +67 -127
- package/src/sync/syncstate.ts +19 -21
- package/src/sync/validate-push-payload.ts +4 -7
- package/src/version.ts +1 -1
- package/dist/devtools/devtool-message-leader.d.ts +0 -2
- package/dist/devtools/devtool-message-leader.d.ts.map +0 -1
- package/dist/devtools/devtool-message-leader.js +0 -2
- package/dist/devtools/devtool-message-leader.js.map +0 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +0 -297
- package/dist/devtools/devtools-messages-client-session.d.ts.map +0 -1
- package/dist/devtools/devtools-messages-client-session.js +0 -61
- package/dist/devtools/devtools-messages-client-session.js.map +0 -1
- package/dist/devtools/devtools-messages-common.d.ts +0 -65
- package/dist/devtools/devtools-messages-common.d.ts.map +0 -1
- package/dist/devtools/devtools-messages-common.js +0 -35
- package/dist/devtools/devtools-messages-common.js.map +0 -1
- package/dist/devtools/devtools-messages-leader.d.ts +0 -261
- package/dist/devtools/devtools-messages-leader.d.ts.map +0 -1
- package/dist/devtools/devtools-messages-leader.js +0 -85
- package/dist/devtools/devtools-messages-leader.js.map +0 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +0 -37
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +0 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +0 -432
- package/dist/leader-thread/LeaderSyncProcessor.js.map +0 -1
- package/dist/schema/EventId.test.d.ts +0 -2
- package/dist/schema/EventId.test.d.ts.map +0 -1
- package/dist/schema/EventId.test.js +0 -11
- package/dist/schema/EventId.test.js.map +0 -1
- package/dist/schema/MutationEvent.test.d.ts +0 -2
- package/dist/schema/MutationEvent.test.d.ts.map +0 -1
- package/dist/schema/MutationEvent.test.js +0 -2
- package/dist/schema/MutationEvent.test.js.map +0 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +0 -43
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +0 -1
- package/dist/sync/ClientSessionSyncProcessor.js +0 -141
- package/dist/sync/ClientSessionSyncProcessor.js.map +0 -1
- package/src/devtools/devtools-messages-client-session.ts +0 -109
- package/src/devtools/devtools-messages-common.ts +0 -52
- package/src/devtools/devtools-messages-leader.ts +0 -115
- package/src/schema/EventId.test.ts +0 -12
@@ -1,22 +1,22 @@
|
|
1
|
-
import {
|
2
|
-
import type { HttpClient, Scope
|
1
|
+
import { shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
2
|
+
import type { HttpClient, Scope } from '@livestore/utils/effect'
|
3
3
|
import {
|
4
4
|
BucketQueue,
|
5
5
|
Deferred,
|
6
6
|
Effect,
|
7
7
|
Exit,
|
8
|
+
Fiber,
|
8
9
|
FiberHandle,
|
9
10
|
Option,
|
10
11
|
OtelTracer,
|
11
|
-
|
12
|
+
Ref,
|
12
13
|
Schema,
|
13
14
|
Stream,
|
14
|
-
Subscribable,
|
15
15
|
SubscriptionRef,
|
16
16
|
} from '@livestore/utils/effect'
|
17
17
|
import type * as otel from '@opentelemetry/api'
|
18
18
|
|
19
|
-
import type {
|
19
|
+
import type { SynchronousDatabase } from '../adapter-types.js'
|
20
20
|
import { UnexpectedError } from '../adapter-types.js'
|
21
21
|
import type { LiveStoreSchema, SessionChangesetMetaRow } from '../schema/mod.js'
|
22
22
|
import {
|
@@ -33,114 +33,202 @@ import { sql } from '../util.js'
|
|
33
33
|
import { makeApplyMutation } from './apply-mutation.js'
|
34
34
|
import { execSql } from './connection.js'
|
35
35
|
import { getBackendHeadFromDb, getLocalHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js'
|
36
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo,
|
36
|
+
import type { InitialBlockingSyncContext, InitialSyncInfo, SyncProcessor } from './types.js'
|
37
37
|
import { LeaderThreadCtx } from './types.js'
|
38
38
|
|
39
|
-
type
|
40
|
-
|
41
|
-
|
42
|
-
|
39
|
+
type ProcessorStateInit = {
|
40
|
+
_tag: 'init'
|
41
|
+
}
|
42
|
+
|
43
|
+
type ProcessorStateInSync = {
|
44
|
+
_tag: 'in-sync'
|
45
|
+
syncState: SyncState.SyncState
|
46
|
+
}
|
47
|
+
|
48
|
+
type ProcessorStateApplyingSyncStateAdvance = {
|
49
|
+
_tag: 'applying-syncstate-advance'
|
50
|
+
origin: 'pull' | 'push'
|
51
|
+
syncState: SyncState.SyncState
|
52
|
+
// TODO re-introduce this
|
53
|
+
// proccesHead: EventId
|
54
|
+
fiber: Fiber.RuntimeFiber<void, UnexpectedError>
|
55
|
+
}
|
56
|
+
|
57
|
+
type ProcessorState = ProcessorStateInit | ProcessorStateInSync | ProcessorStateApplyingSyncStateAdvance
|
43
58
|
|
44
59
|
/**
|
45
|
-
* The
|
46
|
-
*
|
60
|
+
* The general idea of the sync processor is to "follow the sync state"
|
61
|
+
* and apply/rollback mutations as needed to the read model and mutation log.
|
62
|
+
* The leader sync processor is also responsible for
|
63
|
+
* - broadcasting mutations to client sessions via the pull queues.
|
64
|
+
* - pushing mutations to the sync backend
|
47
65
|
*
|
48
|
-
* In the
|
66
|
+
* In the leader sync processor, pulling always has precedence over pushing.
|
49
67
|
*
|
50
|
-
*
|
51
|
-
* -
|
52
|
-
* -
|
53
|
-
* -
|
68
|
+
* External events:
|
69
|
+
* - Mutation pushed from client session
|
70
|
+
* - Mutation pushed from devtools (via pushPartial)
|
71
|
+
* - Mutation pulled from sync backend
|
54
72
|
*
|
55
|
-
*
|
73
|
+
* The machine can be in the following states:
|
74
|
+
* - in-sync: fully synced with remote, now idling
|
75
|
+
* - applying-syncstate-advance (with pointer to current progress in case of rebase interrupt)
|
56
76
|
*
|
57
|
-
*
|
58
|
-
* -
|
59
|
-
*
|
60
|
-
*
|
61
|
-
* - Processes events from the mailbox, applying mutations in batches.
|
62
|
-
* - Controlled by a `Latch` to manage execution flow.
|
63
|
-
* - The latch closes on pull receipt and re-opens post-pull completion.
|
64
|
-
* - Processes up to `maxBatchSize` events per cycle.
|
77
|
+
* Transitions:
|
78
|
+
* - in-sync -> applying-syncstate-advance
|
79
|
+
* - applying-syncstate-advance -> in-sync
|
80
|
+
* - applying-syncstate-advance -> applying-syncstate-advance (need to interrupt previous operation)
|
65
81
|
*
|
82
|
+
* Queuing vs interrupting behaviour:
|
83
|
+
* - Operations caused by pull can never be interrupted
|
84
|
+
* - Incoming pull can interrupt current push
|
85
|
+
* - Incoming pull needs to wait to previous pull to finish
|
86
|
+
* - Incoming push needs to wait to previous push to finish
|
87
|
+
*
|
88
|
+
* Backend pushing:
|
89
|
+
* - continously push to backend
|
90
|
+
* - only interrupted and restarted on rebase
|
66
91
|
*/
|
67
92
|
export const makeLeaderSyncProcessor = ({
|
68
93
|
schema,
|
69
94
|
dbMissing,
|
70
|
-
|
95
|
+
dbLog,
|
71
96
|
initialBlockingSyncContext,
|
72
97
|
}: {
|
73
98
|
schema: LiveStoreSchema
|
74
|
-
/** Only used to know whether we can safely query
|
99
|
+
/** Only used to know whether we can safely query dbLog during setup execution */
|
75
100
|
dbMissing: boolean
|
76
|
-
|
101
|
+
dbLog: SynchronousDatabase
|
77
102
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
78
|
-
}): Effect.Effect<
|
103
|
+
}): Effect.Effect<SyncProcessor, UnexpectedError, Scope.Scope> =>
|
79
104
|
Effect.gen(function* () {
|
80
|
-
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.
|
105
|
+
const syncBackendQueue = yield* BucketQueue.make<MutationEvent.AnyEncoded>()
|
106
|
+
|
107
|
+
const stateRef = yield* Ref.make<ProcessorState>({ _tag: 'init' })
|
81
108
|
|
82
|
-
const
|
109
|
+
const semaphore = yield* Effect.makeSemaphore(1)
|
83
110
|
|
84
111
|
const isLocalEvent = (mutationEventEncoded: MutationEvent.EncodedWithMeta) => {
|
85
112
|
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
86
113
|
return mutationDef.options.localOnly
|
87
114
|
}
|
88
115
|
|
89
|
-
|
90
|
-
const
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
116
|
+
const spanRef = { current: undefined as otel.Span | undefined }
|
117
|
+
const applyMutationItemsRef = { current: undefined as ApplyMutationItems | undefined }
|
118
|
+
|
119
|
+
// TODO get rid of counters once Effect semaphore ordering is fixed
|
120
|
+
let counterRef = 0
|
121
|
+
let expectedCounter = 0
|
122
|
+
|
123
|
+
/*
|
124
|
+
TODO: refactor
|
125
|
+
- Pushes go directly into a Mailbox
|
126
|
+
- Have a worker fiber that takes from the mailbox (wouldn't need a semaphore)
|
127
|
+
*/
|
100
128
|
|
101
|
-
const
|
102
|
-
|
103
|
-
|
129
|
+
const waitForSyncState = (counter: number): Effect.Effect<ProcessorStateInSync> =>
|
130
|
+
Effect.gen(function* () {
|
131
|
+
// console.log('waitForSyncState: waiting for semaphore', counter)
|
132
|
+
yield* semaphore.take(1)
|
133
|
+
// NOTE this is a workaround to ensure the semaphore take-order is respected
|
134
|
+
// TODO this needs to be fixed upstream in Effect
|
135
|
+
if (counter !== expectedCounter) {
|
136
|
+
console.log(
|
137
|
+
`waitForSyncState: counter mismatch (expected: ${expectedCounter}, got: ${counter}), releasing semaphore`,
|
138
|
+
)
|
139
|
+
yield* semaphore.release(1)
|
140
|
+
yield* Effect.yieldNow()
|
141
|
+
// Retrying...
|
142
|
+
return yield* waitForSyncState(counter)
|
143
|
+
}
|
144
|
+
// console.log('waitForSyncState: took semaphore', counter)
|
145
|
+
const state = yield* Ref.get(stateRef)
|
146
|
+
if (state._tag !== 'in-sync') {
|
147
|
+
return shouldNeverHappen('Expected to be in-sync but got ' + state._tag)
|
148
|
+
}
|
149
|
+
expectedCounter = counter + 1
|
150
|
+
return state
|
151
|
+
}).pipe(Effect.withSpan(`@livestore/common:leader-thread:syncing:waitForSyncState(${counter})`))
|
104
152
|
|
105
|
-
const push
|
153
|
+
const push = (newEvents: ReadonlyArray<MutationEvent.EncodedWithMeta>) =>
|
106
154
|
Effect.gen(function* () {
|
155
|
+
const counter = counterRef
|
156
|
+
counterRef++
|
107
157
|
// TODO validate batch
|
108
158
|
if (newEvents.length === 0) return
|
109
159
|
|
110
|
-
|
111
|
-
yield* ctxRef.current.devtoolsPushLatch.await
|
112
|
-
}
|
160
|
+
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
113
161
|
|
114
|
-
|
162
|
+
// TODO if there are multiple pending pushes, we should batch them together
|
163
|
+
const state = yield* waitForSyncState(counter)
|
115
164
|
|
116
|
-
|
117
|
-
|
165
|
+
const updateResult = SyncState.updateSyncState({
|
166
|
+
syncState: state.syncState,
|
167
|
+
payload: { _tag: 'local-push', newEvents },
|
168
|
+
isLocalEvent,
|
169
|
+
isEqualEvent: MutationEvent.isEqualEncoded,
|
170
|
+
})
|
118
171
|
|
119
|
-
|
120
|
-
|
172
|
+
if (updateResult._tag === 'rebase') {
|
173
|
+
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
174
|
+
} else if (updateResult._tag === 'reject') {
|
175
|
+
return yield* Effect.fail(
|
176
|
+
InvalidPushError.make({
|
177
|
+
reason: {
|
178
|
+
_tag: 'LeaderAhead',
|
179
|
+
minimumExpectedId: updateResult.expectedMinimumId,
|
180
|
+
providedId: newEvents.at(0)!.id,
|
181
|
+
},
|
182
|
+
}),
|
121
183
|
)
|
184
|
+
}
|
122
185
|
|
123
|
-
|
186
|
+
const fiber = yield* applyMutationItemsRef.current!({ batchItems: updateResult.newEvents }).pipe(Effect.fork)
|
124
187
|
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
188
|
+
yield* Ref.set(stateRef, {
|
189
|
+
_tag: 'applying-syncstate-advance',
|
190
|
+
origin: 'push',
|
191
|
+
syncState: updateResult.newSyncState,
|
192
|
+
fiber,
|
193
|
+
})
|
194
|
+
|
195
|
+
// console.log('setRef:applying-syncstate-advance after push', counter)
|
196
|
+
|
197
|
+
yield* connectedClientSessionPullQueues.offer({
|
198
|
+
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
199
|
+
remaining: 0,
|
200
|
+
})
|
201
|
+
|
202
|
+
spanRef.current?.addEvent('local-push', {
|
203
|
+
batchSize: newEvents.length,
|
204
|
+
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
205
|
+
})
|
206
|
+
|
207
|
+
// Don't sync localOnly mutations
|
208
|
+
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
209
|
+
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
210
|
+
return mutationDef.options.localOnly === false
|
211
|
+
})
|
212
|
+
|
213
|
+
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
214
|
+
|
215
|
+
yield* fiber // Waiting for the mutation to be applied
|
130
216
|
}).pipe(
|
131
217
|
Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
|
132
218
|
attributes: {
|
133
219
|
batchSize: newEvents.length,
|
134
220
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
135
221
|
},
|
136
|
-
links:
|
222
|
+
links: spanRef.current
|
223
|
+
? [{ _tag: 'SpanLink', span: OtelTracer.makeExternalSpan(spanRef.current.spanContext()), attributes: {} }]
|
224
|
+
: undefined,
|
137
225
|
}),
|
138
226
|
)
|
139
227
|
|
140
|
-
const pushPartial:
|
228
|
+
const pushPartial: SyncProcessor['pushPartial'] = (mutationEventEncoded_) =>
|
141
229
|
Effect.gen(function* () {
|
142
|
-
const
|
143
|
-
if (
|
230
|
+
const state = yield* Ref.get(stateRef)
|
231
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
144
232
|
|
145
233
|
const mutationDef =
|
146
234
|
schema.mutations.get(mutationEventEncoded_.mutation) ??
|
@@ -148,28 +236,20 @@ export const makeLeaderSyncProcessor = ({
|
|
148
236
|
|
149
237
|
const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
|
150
238
|
...mutationEventEncoded_,
|
151
|
-
...EventId.nextPair(syncState.localHead, mutationDef.options.localOnly),
|
239
|
+
...EventId.nextPair(state.syncState.localHead, mutationDef.options.localOnly),
|
152
240
|
})
|
153
241
|
|
154
242
|
yield* push([mutationEventEncoded])
|
155
243
|
}).pipe(Effect.catchTag('InvalidPushError', Effect.orDie))
|
156
244
|
|
157
245
|
// Starts various background loops
|
158
|
-
const boot:
|
246
|
+
const boot: SyncProcessor['boot'] = ({ dbReady }) =>
|
159
247
|
Effect.gen(function* () {
|
160
|
-
const span = yield*
|
161
|
-
|
162
|
-
const { devtools } = yield* LeaderThreadCtx
|
248
|
+
const span = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
249
|
+
spanRef.current = span
|
163
250
|
|
164
|
-
|
165
|
-
|
166
|
-
span,
|
167
|
-
devtoolsPullLatch: devtools.enabled ? devtools.syncBackendPullLatch : undefined,
|
168
|
-
devtoolsPushLatch: devtools.enabled ? devtools.syncBackendPushLatch : undefined,
|
169
|
-
}
|
170
|
-
|
171
|
-
const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog)
|
172
|
-
const initialLocalHead = dbMissing ? EventId.ROOT : getLocalHeadFromDb(dbMutationLog)
|
251
|
+
const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbLog)
|
252
|
+
const initialLocalHead = dbMissing ? EventId.ROOT : getLocalHeadFromDb(dbLog)
|
173
253
|
|
174
254
|
if (initialBackendHead > initialLocalHead.global) {
|
175
255
|
return shouldNeverHappen(
|
@@ -177,21 +257,20 @@ export const makeLeaderSyncProcessor = ({
|
|
177
257
|
)
|
178
258
|
}
|
179
259
|
|
180
|
-
const pendingMutationEvents = yield* getMutationEventsSince({
|
181
|
-
global: initialBackendHead,
|
182
|
-
local: EventId.localDefault,
|
183
|
-
}).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))))
|
260
|
+
const pendingMutationEvents = yield* getMutationEventsSince({ global: initialBackendHead, local: 0 })
|
184
261
|
|
185
|
-
const initialSyncState =
|
186
|
-
pending: pendingMutationEvents,
|
262
|
+
const initialSyncState = {
|
263
|
+
pending: pendingMutationEvents.map((_) => new MutationEvent.EncodedWithMeta(_)),
|
187
264
|
// On the leader we don't need a rollback tail beyond `pending` items
|
188
265
|
rollbackTail: [],
|
189
|
-
upstreamHead: { global: initialBackendHead, local:
|
266
|
+
upstreamHead: { global: initialBackendHead, local: 0 },
|
190
267
|
localHead: initialLocalHead,
|
191
|
-
}
|
268
|
+
} as SyncState.SyncState
|
192
269
|
|
193
270
|
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
194
|
-
yield*
|
271
|
+
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: initialSyncState })
|
272
|
+
|
273
|
+
applyMutationItemsRef.current = yield* makeApplyMutationItems({ stateRef, semaphore })
|
195
274
|
|
196
275
|
// Rehydrate sync queue
|
197
276
|
if (pendingMutationEvents.length > 0) {
|
@@ -205,22 +284,11 @@ export const makeLeaderSyncProcessor = ({
|
|
205
284
|
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
206
285
|
}
|
207
286
|
|
208
|
-
yield* backgroundApplyLocalPushes({
|
209
|
-
localPushesLatch,
|
210
|
-
localPushesQueue,
|
211
|
-
pullLatch,
|
212
|
-
syncStateSref,
|
213
|
-
syncBackendQueue,
|
214
|
-
schema,
|
215
|
-
isLocalEvent,
|
216
|
-
otelSpan,
|
217
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
218
|
-
|
219
287
|
const backendPushingFiberHandle = yield* FiberHandle.make()
|
220
288
|
|
221
289
|
yield* FiberHandle.run(
|
222
290
|
backendPushingFiberHandle,
|
223
|
-
backgroundBackendPushing({ dbReady, syncBackendQueue,
|
291
|
+
backgroundBackendPushing({ dbReady, syncBackendQueue, span }).pipe(Effect.tapCauseLogPretty),
|
224
292
|
)
|
225
293
|
|
226
294
|
yield* backgroundBackendPulling({
|
@@ -239,15 +307,14 @@ export const makeLeaderSyncProcessor = ({
|
|
239
307
|
// Restart pushing fiber
|
240
308
|
yield* FiberHandle.run(
|
241
309
|
backendPushingFiberHandle,
|
242
|
-
backgroundBackendPushing({ dbReady, syncBackendQueue,
|
310
|
+
backgroundBackendPushing({ dbReady, syncBackendQueue, span }).pipe(Effect.tapCauseLogPretty),
|
243
311
|
)
|
244
312
|
}),
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
313
|
+
applyMutationItemsRef,
|
314
|
+
stateRef,
|
315
|
+
semaphore,
|
316
|
+
span,
|
249
317
|
initialBlockingSyncContext,
|
250
|
-
devtoolsPullLatch: ctxRef.current?.devtoolsPullLatch,
|
251
318
|
}).pipe(Effect.tapCauseLogPretty, Effect.forkScoped)
|
252
319
|
}).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'))
|
253
320
|
|
@@ -255,141 +322,42 @@ export const makeLeaderSyncProcessor = ({
|
|
255
322
|
push,
|
256
323
|
pushPartial,
|
257
324
|
boot,
|
258
|
-
syncState:
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
return syncState
|
263
|
-
}),
|
264
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
325
|
+
syncState: Effect.gen(function* () {
|
326
|
+
const state = yield* Ref.get(stateRef)
|
327
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
328
|
+
return state.syncState
|
265
329
|
}),
|
266
|
-
} satisfies
|
267
|
-
})
|
268
|
-
|
269
|
-
const backgroundApplyLocalPushes = ({
|
270
|
-
localPushesLatch,
|
271
|
-
localPushesQueue,
|
272
|
-
pullLatch,
|
273
|
-
syncStateSref,
|
274
|
-
syncBackendQueue,
|
275
|
-
schema,
|
276
|
-
isLocalEvent,
|
277
|
-
otelSpan,
|
278
|
-
}: {
|
279
|
-
pullLatch: Effect.Latch
|
280
|
-
localPushesLatch: Effect.Latch
|
281
|
-
localPushesQueue: BucketQueue.BucketQueue<PushQueueItem>
|
282
|
-
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
283
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
284
|
-
schema: LiveStoreSchema
|
285
|
-
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
286
|
-
otelSpan: otel.Span | undefined
|
287
|
-
}) =>
|
288
|
-
Effect.gen(function* () {
|
289
|
-
const { connectedClientSessionPullQueues } = yield* LeaderThreadCtx
|
290
|
-
|
291
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
292
|
-
|
293
|
-
while (true) {
|
294
|
-
// TODO make batch size configurable
|
295
|
-
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10)
|
296
|
-
const [newEvents, deferreds] = ReadonlyArray.unzip(batchItems)
|
297
|
-
|
298
|
-
// Wait for the backend pulling to finish
|
299
|
-
yield* localPushesLatch.await
|
300
|
-
|
301
|
-
// Prevent the backend pulling from starting until this local push is finished
|
302
|
-
yield* pullLatch.close
|
303
|
-
|
304
|
-
const syncState = yield* syncStateSref
|
305
|
-
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
306
|
-
|
307
|
-
const updateResult = SyncState.updateSyncState({
|
308
|
-
syncState,
|
309
|
-
payload: { _tag: 'local-push', newEvents },
|
310
|
-
isLocalEvent,
|
311
|
-
isEqualEvent: MutationEvent.isEqualEncoded,
|
312
|
-
})
|
313
|
-
|
314
|
-
if (updateResult._tag === 'rebase') {
|
315
|
-
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
316
|
-
} else if (updateResult._tag === 'reject') {
|
317
|
-
otelSpan?.addEvent('local-push:reject', {
|
318
|
-
batchSize: newEvents.length,
|
319
|
-
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
320
|
-
})
|
321
|
-
|
322
|
-
const providedId = newEvents.at(0)!.id
|
323
|
-
const remainingEvents = yield* BucketQueue.takeAll(localPushesQueue)
|
324
|
-
const allDeferreds = [...deferreds, ...remainingEvents.map(([_, deferred]) => deferred)].filter(isNotUndefined)
|
325
|
-
yield* Effect.forEach(allDeferreds, (deferred) =>
|
326
|
-
Deferred.fail(
|
327
|
-
deferred,
|
328
|
-
InvalidPushError.make({
|
329
|
-
// TODO improve error handling so it differentiates between a push being rejected
|
330
|
-
// because of itself or because of another push
|
331
|
-
reason: {
|
332
|
-
_tag: 'LeaderAhead',
|
333
|
-
minimumExpectedId: updateResult.expectedMinimumId,
|
334
|
-
providedId,
|
335
|
-
},
|
336
|
-
}),
|
337
|
-
),
|
338
|
-
)
|
339
|
-
|
340
|
-
// Allow the backend pulling to start
|
341
|
-
yield* pullLatch.open
|
342
|
-
|
343
|
-
// In this case we're skipping state update and down/upstream processing
|
344
|
-
// We've cleared the local push queue and are now waiting for new local pushes / backend pulls
|
345
|
-
continue
|
346
|
-
}
|
347
|
-
|
348
|
-
yield* SubscriptionRef.set(syncStateSref, updateResult.newSyncState)
|
349
|
-
|
350
|
-
yield* connectedClientSessionPullQueues.offer({
|
351
|
-
payload: { _tag: 'upstream-advance', newEvents: updateResult.newEvents },
|
352
|
-
remaining: 0,
|
353
|
-
})
|
354
|
-
|
355
|
-
otelSpan?.addEvent('local-push', {
|
356
|
-
batchSize: newEvents.length,
|
357
|
-
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
358
|
-
})
|
359
|
-
|
360
|
-
// Don't sync localOnly mutations
|
361
|
-
const filteredBatch = updateResult.newEvents.filter((mutationEventEncoded) => {
|
362
|
-
const mutationDef = schema.mutations.get(mutationEventEncoded.mutation)!
|
363
|
-
return mutationDef.options.localOnly === false
|
364
|
-
})
|
365
|
-
|
366
|
-
yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch)
|
367
|
-
|
368
|
-
yield* applyMutationItems({ batchItems: newEvents, deferreds })
|
369
|
-
|
370
|
-
// Allow the backend pulling to start
|
371
|
-
yield* pullLatch.open
|
372
|
-
}
|
330
|
+
} satisfies SyncProcessor
|
373
331
|
})
|
374
332
|
|
375
333
|
type ApplyMutationItems = (_: {
|
376
334
|
batchItems: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
377
|
-
/** Indexes are aligned with `batchItems` */
|
378
|
-
deferreds: ReadonlyArray<Deferred.Deferred<void, InvalidPushError> | undefined> | undefined
|
379
335
|
}) => Effect.Effect<void, UnexpectedError>
|
380
336
|
|
381
337
|
// TODO how to handle errors gracefully
|
382
|
-
const makeApplyMutationItems
|
338
|
+
const makeApplyMutationItems = ({
|
339
|
+
stateRef,
|
340
|
+
semaphore,
|
341
|
+
}: {
|
342
|
+
stateRef: Ref.Ref<ProcessorState>
|
343
|
+
semaphore: Effect.Semaphore
|
344
|
+
}): Effect.Effect<ApplyMutationItems, UnexpectedError, LeaderThreadCtx | Scope.Scope> =>
|
383
345
|
Effect.gen(function* () {
|
384
346
|
const leaderThreadCtx = yield* LeaderThreadCtx
|
385
|
-
const {
|
347
|
+
const { db, dbLog } = leaderThreadCtx
|
386
348
|
|
387
349
|
const applyMutation = yield* makeApplyMutation
|
388
350
|
|
389
|
-
return ({ batchItems
|
351
|
+
return ({ batchItems }) =>
|
390
352
|
Effect.gen(function* () {
|
353
|
+
const state = yield* Ref.get(stateRef)
|
354
|
+
if (state._tag !== 'applying-syncstate-advance') {
|
355
|
+
// console.log('applyMutationItems: counter', counter)
|
356
|
+
return shouldNeverHappen(`Expected to be applying-syncstate-advance but got ${state._tag}`)
|
357
|
+
}
|
358
|
+
|
391
359
|
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
392
|
-
|
360
|
+
dbLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
393
361
|
|
394
362
|
yield* Effect.addFinalizer((exit) =>
|
395
363
|
Effect.gen(function* () {
|
@@ -397,26 +365,34 @@ const makeApplyMutationItems: Effect.Effect<ApplyMutationItems, UnexpectedError,
|
|
397
365
|
|
398
366
|
// Rollback in case of an error
|
399
367
|
db.execute('ROLLBACK', undefined)
|
400
|
-
|
368
|
+
dbLog.execute('ROLLBACK', undefined)
|
401
369
|
}),
|
402
370
|
)
|
403
371
|
|
404
372
|
for (let i = 0; i < batchItems.length; i++) {
|
405
|
-
|
373
|
+
const { meta, ...mutationEventEncoded } = batchItems[i]!
|
374
|
+
|
375
|
+
yield* applyMutation(mutationEventEncoded)
|
406
376
|
|
407
|
-
if (
|
408
|
-
yield* Deferred.succeed(
|
377
|
+
if (meta?.deferred) {
|
378
|
+
yield* Deferred.succeed(meta.deferred, void 0)
|
409
379
|
}
|
380
|
+
|
381
|
+
// TODO re-introduce this
|
382
|
+
// if (i < batchItems.length - 1) {
|
383
|
+
// yield* Ref.set(stateRef, { ...state, proccesHead: batchItems[i + 1]!.id })
|
384
|
+
// }
|
410
385
|
}
|
411
386
|
|
412
387
|
db.execute('COMMIT', undefined) // Commit the transaction
|
413
|
-
|
388
|
+
dbLog.execute('COMMIT', undefined) // Commit the transaction
|
389
|
+
|
390
|
+
yield* Ref.set(stateRef, { _tag: 'in-sync', syncState: state.syncState })
|
391
|
+
// console.log('setRef:sync after applyMutationItems', counter)
|
392
|
+
yield* semaphore.release(1)
|
414
393
|
}).pipe(
|
415
|
-
Effect.uninterruptible,
|
416
394
|
Effect.scoped,
|
417
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems',
|
418
|
-
attributes: { count: batchItems.length },
|
419
|
-
}),
|
395
|
+
Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems'),
|
420
396
|
Effect.tapCauseLogPretty,
|
421
397
|
UnexpectedError.mapToUnexpectedError,
|
422
398
|
)
|
@@ -427,62 +403,55 @@ const backgroundBackendPulling = ({
|
|
427
403
|
initialBackendHead,
|
428
404
|
isLocalEvent,
|
429
405
|
restartBackendPushing,
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
devtoolsPullLatch,
|
406
|
+
span,
|
407
|
+
stateRef,
|
408
|
+
applyMutationItemsRef,
|
409
|
+
semaphore,
|
435
410
|
initialBlockingSyncContext,
|
436
411
|
}: {
|
437
412
|
dbReady: Deferred.Deferred<void>
|
438
|
-
initialBackendHead:
|
413
|
+
initialBackendHead: number
|
439
414
|
isLocalEvent: (mutationEventEncoded: MutationEvent.EncodedWithMeta) => boolean
|
440
415
|
restartBackendPushing: (
|
441
416
|
filteredRebasedPending: ReadonlyArray<MutationEvent.EncodedWithMeta>,
|
442
417
|
) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx | HttpClient.HttpClient>
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
devtoolsPullLatch: Effect.Latch | undefined
|
418
|
+
span: otel.Span | undefined
|
419
|
+
stateRef: Ref.Ref<ProcessorState>
|
420
|
+
applyMutationItemsRef: { current: ApplyMutationItems | undefined }
|
421
|
+
semaphore: Effect.Semaphore
|
448
422
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
449
423
|
}) =>
|
450
424
|
Effect.gen(function* () {
|
451
|
-
const {
|
452
|
-
syncBackend,
|
453
|
-
dbReadModel: db,
|
454
|
-
dbMutationLog,
|
455
|
-
connectedClientSessionPullQueues,
|
456
|
-
schema,
|
457
|
-
} = yield* LeaderThreadCtx
|
425
|
+
const { syncBackend, db, dbLog, connectedClientSessionPullQueues, schema } = yield* LeaderThreadCtx
|
458
426
|
|
459
427
|
if (syncBackend === undefined) return
|
460
428
|
|
461
429
|
const cursorInfo = yield* getCursorInfo(initialBackendHead)
|
462
430
|
|
463
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
464
|
-
|
465
431
|
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
466
432
|
Effect.gen(function* () {
|
467
433
|
if (newEvents.length === 0) return
|
468
434
|
|
469
|
-
|
470
|
-
|
471
|
-
}
|
435
|
+
const state = yield* Ref.get(stateRef)
|
436
|
+
if (state._tag === 'init') return shouldNeverHappen('Not initialized')
|
472
437
|
|
473
|
-
//
|
474
|
-
yield* localPushesLatch.close
|
438
|
+
// const counter = state.counter + 1
|
475
439
|
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
440
|
+
if (state._tag === 'applying-syncstate-advance') {
|
441
|
+
if (state.origin === 'push') {
|
442
|
+
yield* Fiber.interrupt(state.fiber)
|
443
|
+
// In theory we should force-take the semaphore here, but as it's still taken,
|
444
|
+
// it's already in the right state we want it to be in
|
445
|
+
} else {
|
446
|
+
// Wait for previous advance to finish
|
447
|
+
yield* semaphore.take(1)
|
448
|
+
}
|
449
|
+
}
|
481
450
|
|
482
451
|
const trimRollbackUntil = newEvents.at(-1)!.id
|
483
452
|
|
484
453
|
const updateResult = SyncState.updateSyncState({
|
485
|
-
syncState,
|
454
|
+
syncState: state.syncState,
|
486
455
|
payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
|
487
456
|
isLocalEvent,
|
488
457
|
isEqualEvent: MutationEvent.isEqualEncoded,
|
@@ -495,10 +464,10 @@ const backgroundBackendPulling = ({
|
|
495
464
|
|
496
465
|
const newBackendHead = newEvents.at(-1)!.id
|
497
466
|
|
498
|
-
updateBackendHead(
|
467
|
+
updateBackendHead(dbLog, newBackendHead)
|
499
468
|
|
500
469
|
if (updateResult._tag === 'rebase') {
|
501
|
-
|
470
|
+
span?.addEvent('backend-pull:rebase', {
|
502
471
|
newEventsCount: newEvents.length,
|
503
472
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
504
473
|
rollbackCount: updateResult.eventsToRollback.length,
|
@@ -512,7 +481,7 @@ const backgroundBackendPulling = ({
|
|
512
481
|
yield* restartBackendPushing(filteredRebasedPending)
|
513
482
|
|
514
483
|
if (updateResult.eventsToRollback.length > 0) {
|
515
|
-
yield* rollback({ db,
|
484
|
+
yield* rollback({ db, dbLog, eventIdsToRollback: updateResult.eventsToRollback.map((_) => _.id) })
|
516
485
|
}
|
517
486
|
|
518
487
|
yield* connectedClientSessionPullQueues.offer({
|
@@ -525,7 +494,7 @@ const backgroundBackendPulling = ({
|
|
525
494
|
remaining,
|
526
495
|
})
|
527
496
|
} else {
|
528
|
-
|
497
|
+
span?.addEvent('backend-pull:advance', {
|
529
498
|
newEventsCount: newEvents.length,
|
530
499
|
updateResult: TRACE_VERBOSE ? JSON.stringify(updateResult) : undefined,
|
531
500
|
})
|
@@ -536,16 +505,17 @@ const backgroundBackendPulling = ({
|
|
536
505
|
})
|
537
506
|
}
|
538
507
|
|
539
|
-
|
508
|
+
const fiber = yield* applyMutationItemsRef.current!({
|
509
|
+
batchItems: updateResult.newEvents,
|
510
|
+
}).pipe(Effect.fork)
|
540
511
|
|
541
|
-
yield*
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
}
|
512
|
+
yield* Ref.set(stateRef, {
|
513
|
+
_tag: 'applying-syncstate-advance',
|
514
|
+
origin: 'pull',
|
515
|
+
syncState: updateResult.newSyncState,
|
516
|
+
fiber,
|
517
|
+
})
|
518
|
+
// console.log('setRef:applying-syncstate-advance after backgroundBackendPulling', -1)
|
549
519
|
})
|
550
520
|
|
551
521
|
yield* syncBackend.pull(cursorInfo).pipe(
|
@@ -568,7 +538,7 @@ const backgroundBackendPulling = ({
|
|
568
538
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
569
539
|
|
570
540
|
yield* onNewPullChunk(
|
571
|
-
batch.map((_) => MutationEvent.EncodedWithMeta
|
541
|
+
batch.map((_) => new MutationEvent.EncodedWithMeta(_.mutationEventEncoded)),
|
572
542
|
remaining,
|
573
543
|
)
|
574
544
|
|
@@ -582,11 +552,11 @@ const backgroundBackendPulling = ({
|
|
582
552
|
|
583
553
|
const rollback = ({
|
584
554
|
db,
|
585
|
-
|
555
|
+
dbLog,
|
586
556
|
eventIdsToRollback,
|
587
557
|
}: {
|
588
|
-
db:
|
589
|
-
|
558
|
+
db: SynchronousDatabase
|
559
|
+
dbLog: SynchronousDatabase
|
590
560
|
eventIdsToRollback: EventId.EventId[]
|
591
561
|
}) =>
|
592
562
|
Effect.gen(function* () {
|
@@ -600,9 +570,7 @@ const rollback = ({
|
|
600
570
|
// Apply changesets in reverse order
|
601
571
|
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
602
572
|
const { changeset } = rollbackEvents[i]!
|
603
|
-
|
604
|
-
db.makeChangeset(changeset).invert().apply()
|
605
|
-
}
|
573
|
+
db.makeChangeset(changeset).invert().apply()
|
606
574
|
}
|
607
575
|
|
608
576
|
// Delete the changeset rows
|
@@ -611,7 +579,7 @@ const rollback = ({
|
|
611
579
|
)
|
612
580
|
|
613
581
|
// Delete the mutation log rows
|
614
|
-
|
582
|
+
dbLog.execute(
|
615
583
|
sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idLocal) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.local})`).join(', ')})`,
|
616
584
|
)
|
617
585
|
}).pipe(
|
@@ -620,9 +588,9 @@ const rollback = ({
|
|
620
588
|
}),
|
621
589
|
)
|
622
590
|
|
623
|
-
const getCursorInfo = (remoteHead:
|
591
|
+
const getCursorInfo = (remoteHead: number) =>
|
624
592
|
Effect.gen(function* () {
|
625
|
-
const {
|
593
|
+
const { dbLog } = yield* LeaderThreadCtx
|
626
594
|
|
627
595
|
if (remoteHead === EventId.ROOT.global) return Option.none()
|
628
596
|
|
@@ -631,13 +599,13 @@ const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
|
631
599
|
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
|
632
600
|
|
633
601
|
const syncMetadataOption = yield* Effect.sync(() =>
|
634
|
-
|
602
|
+
dbLog.select<{ syncMetadataJson: string }>(
|
635
603
|
sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idLocal ASC LIMIT 1`,
|
636
604
|
),
|
637
605
|
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
638
606
|
|
639
607
|
return Option.some({
|
640
|
-
cursor: { global: remoteHead, local:
|
608
|
+
cursor: { global: remoteHead, local: 0 },
|
641
609
|
metadata: syncMetadataOption,
|
642
610
|
}) satisfies InitialSyncInfo
|
643
611
|
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
@@ -645,14 +613,14 @@ const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
|
645
613
|
const backgroundBackendPushing = ({
|
646
614
|
dbReady,
|
647
615
|
syncBackendQueue,
|
648
|
-
|
616
|
+
span,
|
649
617
|
}: {
|
650
618
|
dbReady: Deferred.Deferred<void>
|
651
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.
|
652
|
-
|
619
|
+
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.AnyEncoded>
|
620
|
+
span: otel.Span | undefined
|
653
621
|
}) =>
|
654
622
|
Effect.gen(function* () {
|
655
|
-
const { syncBackend,
|
623
|
+
const { syncBackend, dbLog } = yield* LeaderThreadCtx
|
656
624
|
if (syncBackend === undefined) return
|
657
625
|
|
658
626
|
yield* dbReady
|
@@ -665,17 +633,17 @@ const backgroundBackendPushing = ({
|
|
665
633
|
|
666
634
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
667
635
|
|
668
|
-
|
636
|
+
span?.addEvent('backend-push', {
|
669
637
|
batchSize: queueItems.length,
|
670
638
|
batch: TRACE_VERBOSE ? JSON.stringify(queueItems) : undefined,
|
671
639
|
})
|
672
640
|
|
673
641
|
// TODO handle push errors (should only happen during concurrent pull+push)
|
674
|
-
const pushResult = yield* syncBackend.push(queueItems
|
642
|
+
const pushResult = yield* syncBackend.push(queueItems).pipe(Effect.either)
|
675
643
|
|
676
644
|
if (pushResult._tag === 'Left') {
|
677
|
-
|
678
|
-
// wait for interrupt
|
645
|
+
span?.addEvent('backend-push-error', { error: pushResult.left.toString() })
|
646
|
+
// wait for interrupt and restarting of pushing
|
679
647
|
return yield* Effect.never
|
680
648
|
}
|
681
649
|
|
@@ -685,7 +653,7 @@ const backgroundBackendPushing = ({
|
|
685
653
|
for (let i = 0; i < queueItems.length; i++) {
|
686
654
|
const mutationEventEncoded = queueItems[i]!
|
687
655
|
yield* execSql(
|
688
|
-
|
656
|
+
dbLog,
|
689
657
|
...updateRows({
|
690
658
|
tableName: MUTATION_LOG_META_TABLE,
|
691
659
|
columns: mutationLogMetaTable.sqliteDef.columns,
|
@@ -696,9 +664,3 @@ const backgroundBackendPushing = ({
|
|
696
664
|
}
|
697
665
|
}
|
698
666
|
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'))
|
699
|
-
|
700
|
-
const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
|
701
|
-
// Since we're using the session changeset rows to query for the current head,
|
702
|
-
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
703
|
-
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
704
|
-
}
|