@livestore/common 0.0.0-snapshot-2b8a9de3ec1a701aca891ebc2c98eb328274ae9e → 0.0.0-snapshot-2c861249e50661661613204300b1fc0d902c2e46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/__tests__/fixture.d.ts +83 -221
- package/dist/__tests__/fixture.d.ts.map +1 -1
- package/dist/__tests__/fixture.js +33 -11
- package/dist/__tests__/fixture.js.map +1 -1
- package/dist/adapter-types.d.ts +36 -22
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js +20 -8
- package/dist/adapter-types.js.map +1 -1
- package/dist/debug-info.d.ts.map +1 -1
- package/dist/debug-info.js +1 -0
- package/dist/debug-info.js.map +1 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
- package/dist/devtools/devtools-messages-common.d.ts +13 -6
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-common.js +6 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts +46 -46
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-leader.js +12 -13
- package/dist/devtools/devtools-messages-leader.js.map +1 -1
- package/dist/index.d.ts +2 -5
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -5
- package/dist/index.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +34 -12
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +284 -226
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
- package/dist/leader-thread/apply-event.d.ts +16 -0
- package/dist/leader-thread/apply-event.d.ts.map +1 -0
- package/dist/leader-thread/apply-event.js +122 -0
- package/dist/leader-thread/apply-event.js.map +1 -0
- package/dist/leader-thread/eventlog.d.ts +27 -0
- package/dist/leader-thread/eventlog.d.ts.map +1 -0
- package/dist/leader-thread/eventlog.js +123 -0
- package/dist/leader-thread/eventlog.js.map +1 -0
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +22 -23
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts +16 -4
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +36 -41
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mod.d.ts +1 -1
- package/dist/leader-thread/mod.d.ts.map +1 -1
- package/dist/leader-thread/mod.js +1 -1
- package/dist/leader-thread/mod.js.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +7 -7
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +40 -25
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/materializer-helper.d.ts +23 -0
- package/dist/materializer-helper.d.ts.map +1 -0
- package/dist/materializer-helper.js +70 -0
- package/dist/materializer-helper.js.map +1 -0
- package/dist/query-builder/api.d.ts +55 -50
- package/dist/query-builder/api.d.ts.map +1 -1
- package/dist/query-builder/api.js +3 -5
- package/dist/query-builder/api.js.map +1 -1
- package/dist/query-builder/astToSql.d.ts.map +1 -1
- package/dist/query-builder/astToSql.js +59 -37
- package/dist/query-builder/astToSql.js.map +1 -1
- package/dist/query-builder/impl.d.ts +2 -3
- package/dist/query-builder/impl.d.ts.map +1 -1
- package/dist/query-builder/impl.js +47 -43
- package/dist/query-builder/impl.js.map +1 -1
- package/dist/query-builder/impl.test.d.ts +86 -1
- package/dist/query-builder/impl.test.d.ts.map +1 -1
- package/dist/query-builder/impl.test.js +223 -36
- package/dist/query-builder/impl.test.js.map +1 -1
- package/dist/rehydrate-from-eventlog.d.ts +15 -0
- package/dist/rehydrate-from-eventlog.d.ts.map +1 -0
- package/dist/{rehydrate-from-mutationlog.js → rehydrate-from-eventlog.js} +27 -28
- package/dist/rehydrate-from-eventlog.js.map +1 -0
- package/dist/schema/EventDef.d.ts +136 -0
- package/dist/schema/EventDef.d.ts.map +1 -0
- package/dist/schema/EventDef.js +58 -0
- package/dist/schema/EventDef.js.map +1 -0
- package/dist/schema/EventId.d.ts +10 -1
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +24 -3
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/LiveStoreEvent.d.ts +255 -0
- package/dist/schema/LiveStoreEvent.d.ts.map +1 -0
- package/dist/schema/LiveStoreEvent.js +118 -0
- package/dist/schema/LiveStoreEvent.js.map +1 -0
- package/dist/schema/client-document-def.d.ts +223 -0
- package/dist/schema/client-document-def.d.ts.map +1 -0
- package/dist/schema/client-document-def.js +164 -0
- package/dist/schema/client-document-def.js.map +1 -0
- package/dist/schema/client-document-def.test.d.ts +2 -0
- package/dist/schema/client-document-def.test.d.ts.map +1 -0
- package/dist/schema/client-document-def.test.js +161 -0
- package/dist/schema/client-document-def.test.js.map +1 -0
- package/dist/schema/db-schema/dsl/mod.d.ts.map +1 -1
- package/dist/schema/events.d.ts +2 -0
- package/dist/schema/events.d.ts.map +1 -0
- package/dist/schema/events.js +2 -0
- package/dist/schema/events.js.map +1 -0
- package/dist/schema/mod.d.ts +4 -3
- package/dist/schema/mod.d.ts.map +1 -1
- package/dist/schema/mod.js +4 -3
- package/dist/schema/mod.js.map +1 -1
- package/dist/schema/schema.d.ts +26 -22
- package/dist/schema/schema.d.ts.map +1 -1
- package/dist/schema/schema.js +45 -43
- package/dist/schema/schema.js.map +1 -1
- package/dist/schema/sqlite-state.d.ts +12 -0
- package/dist/schema/sqlite-state.d.ts.map +1 -0
- package/dist/schema/sqlite-state.js +36 -0
- package/dist/schema/sqlite-state.js.map +1 -0
- package/dist/schema/system-tables.d.ts +121 -85
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +68 -43
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/schema/table-def.d.ts +26 -96
- package/dist/schema/table-def.d.ts.map +1 -1
- package/dist/schema/table-def.js +14 -64
- package/dist/schema/table-def.js.map +1 -1
- package/dist/schema/view.d.ts +3 -0
- package/dist/schema/view.d.ts.map +1 -0
- package/dist/schema/view.js +3 -0
- package/dist/schema/view.js.map +1 -0
- package/dist/schema-management/common.d.ts +4 -4
- package/dist/schema-management/common.d.ts.map +1 -1
- package/dist/schema-management/migrations.d.ts.map +1 -1
- package/dist/schema-management/migrations.js +6 -6
- package/dist/schema-management/migrations.js.map +1 -1
- package/dist/schema-management/validate-mutation-defs.d.ts +3 -3
- package/dist/schema-management/validate-mutation-defs.d.ts.map +1 -1
- package/dist/schema-management/validate-mutation-defs.js +17 -17
- package/dist/schema-management/validate-mutation-defs.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +16 -8
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.js +50 -43
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
- package/dist/sync/next/facts.d.ts +19 -19
- package/dist/sync/next/facts.d.ts.map +1 -1
- package/dist/sync/next/facts.js +2 -2
- package/dist/sync/next/facts.js.map +1 -1
- package/dist/sync/next/history-dag-common.d.ts +3 -3
- package/dist/sync/next/history-dag-common.d.ts.map +1 -1
- package/dist/sync/next/history-dag-common.js +1 -1
- package/dist/sync/next/history-dag-common.js.map +1 -1
- package/dist/sync/next/history-dag.js +1 -1
- package/dist/sync/next/history-dag.js.map +1 -1
- package/dist/sync/next/rebase-events.d.ts +7 -7
- package/dist/sync/next/rebase-events.d.ts.map +1 -1
- package/dist/sync/next/rebase-events.js +5 -5
- package/dist/sync/next/rebase-events.js.map +1 -1
- package/dist/sync/next/test/compact-events.calculator.test.js +38 -33
- package/dist/sync/next/test/compact-events.calculator.test.js.map +1 -1
- package/dist/sync/next/test/compact-events.test.js +71 -71
- package/dist/sync/next/test/compact-events.test.js.map +1 -1
- package/dist/sync/next/test/{mutation-fixtures.d.ts → event-fixtures.d.ts} +25 -25
- package/dist/sync/next/test/event-fixtures.d.ts.map +1 -0
- package/dist/sync/next/test/{mutation-fixtures.js → event-fixtures.js} +60 -25
- package/dist/sync/next/test/event-fixtures.js.map +1 -0
- package/dist/sync/next/test/mod.d.ts +1 -1
- package/dist/sync/next/test/mod.d.ts.map +1 -1
- package/dist/sync/next/test/mod.js +1 -1
- package/dist/sync/next/test/mod.js.map +1 -1
- package/dist/sync/sync.d.ts +8 -7
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +69 -93
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +143 -146
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +208 -289
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/sync/validate-push-payload.d.ts +2 -2
- package/dist/sync/validate-push-payload.d.ts.map +1 -1
- package/dist/sync/validate-push-payload.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +2 -2
- package/src/__tests__/fixture.ts +36 -15
- package/src/adapter-types.ts +34 -23
- package/src/debug-info.ts +1 -0
- package/src/devtools/devtools-messages-common.ts +9 -0
- package/src/devtools/devtools-messages-leader.ts +14 -15
- package/src/index.ts +2 -5
- package/src/leader-thread/LeaderSyncProcessor.ts +485 -389
- package/src/leader-thread/apply-event.ts +197 -0
- package/src/leader-thread/eventlog.ts +199 -0
- package/src/leader-thread/leader-worker-devtools.ts +23 -25
- package/src/leader-thread/make-leader-thread-layer.ts +68 -61
- package/src/leader-thread/mod.ts +1 -1
- package/src/leader-thread/recreate-db.ts +7 -8
- package/src/leader-thread/types.ts +39 -29
- package/src/materializer-helper.ts +110 -0
- package/src/query-builder/api.ts +76 -102
- package/src/query-builder/astToSql.ts +68 -39
- package/src/query-builder/impl.test.ts +239 -42
- package/src/query-builder/impl.ts +66 -54
- package/src/{rehydrate-from-mutationlog.ts → rehydrate-from-eventlog.ts} +37 -40
- package/src/schema/EventDef.ts +216 -0
- package/src/schema/EventId.ts +30 -4
- package/src/schema/LiveStoreEvent.ts +239 -0
- package/src/schema/client-document-def.test.ts +188 -0
- package/src/schema/client-document-def.ts +436 -0
- package/src/schema/db-schema/dsl/mod.ts +0 -1
- package/src/schema/events.ts +1 -0
- package/src/schema/mod.ts +4 -3
- package/src/schema/schema.ts +78 -68
- package/src/schema/sqlite-state.ts +62 -0
- package/src/schema/system-tables.ts +54 -46
- package/src/schema/table-def.ts +51 -209
- package/src/schema/view.ts +2 -0
- package/src/schema-management/common.ts +4 -4
- package/src/schema-management/migrations.ts +8 -9
- package/src/schema-management/validate-mutation-defs.ts +22 -24
- package/src/sync/ClientSessionSyncProcessor.ts +66 -53
- package/src/sync/next/facts.ts +31 -32
- package/src/sync/next/history-dag-common.ts +4 -4
- package/src/sync/next/history-dag.ts +1 -1
- package/src/sync/next/rebase-events.ts +13 -13
- package/src/sync/next/test/compact-events.calculator.test.ts +45 -45
- package/src/sync/next/test/compact-events.test.ts +73 -73
- package/src/sync/next/test/event-fixtures.ts +219 -0
- package/src/sync/next/test/mod.ts +1 -1
- package/src/sync/sync.ts +9 -12
- package/src/sync/syncstate.test.ts +236 -323
- package/src/sync/syncstate.ts +218 -203
- package/src/sync/validate-push-payload.ts +2 -2
- package/src/version.ts +1 -1
- package/tsconfig.json +1 -0
- package/dist/derived-mutations.d.ts +0 -109
- package/dist/derived-mutations.d.ts.map +0 -1
- package/dist/derived-mutations.js +0 -54
- package/dist/derived-mutations.js.map +0 -1
- package/dist/derived-mutations.test.d.ts +0 -2
- package/dist/derived-mutations.test.d.ts.map +0 -1
- package/dist/derived-mutations.test.js +0 -93
- package/dist/derived-mutations.test.js.map +0 -1
- package/dist/init-singleton-tables.d.ts +0 -4
- package/dist/init-singleton-tables.d.ts.map +0 -1
- package/dist/init-singleton-tables.js +0 -16
- package/dist/init-singleton-tables.js.map +0 -1
- package/dist/leader-thread/apply-mutation.d.ts +0 -11
- package/dist/leader-thread/apply-mutation.d.ts.map +0 -1
- package/dist/leader-thread/apply-mutation.js +0 -115
- package/dist/leader-thread/apply-mutation.js.map +0 -1
- package/dist/leader-thread/mutationlog.d.ts +0 -11
- package/dist/leader-thread/mutationlog.d.ts.map +0 -1
- package/dist/leader-thread/mutationlog.js +0 -31
- package/dist/leader-thread/mutationlog.js.map +0 -1
- package/dist/leader-thread/pull-queue-set.d.ts +0 -7
- package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
- package/dist/leader-thread/pull-queue-set.js +0 -48
- package/dist/leader-thread/pull-queue-set.js.map +0 -1
- package/dist/mutation.d.ts +0 -20
- package/dist/mutation.d.ts.map +0 -1
- package/dist/mutation.js +0 -68
- package/dist/mutation.js.map +0 -1
- package/dist/query-info.d.ts +0 -41
- package/dist/query-info.d.ts.map +0 -1
- package/dist/query-info.js +0 -7
- package/dist/query-info.js.map +0 -1
- package/dist/rehydrate-from-mutationlog.d.ts +0 -14
- package/dist/rehydrate-from-mutationlog.d.ts.map +0 -1
- package/dist/rehydrate-from-mutationlog.js.map +0 -1
- package/dist/schema/MutationEvent.d.ts +0 -202
- package/dist/schema/MutationEvent.d.ts.map +0 -1
- package/dist/schema/MutationEvent.js +0 -105
- package/dist/schema/MutationEvent.js.map +0 -1
- package/dist/schema/mutations.d.ts +0 -115
- package/dist/schema/mutations.d.ts.map +0 -1
- package/dist/schema/mutations.js +0 -42
- package/dist/schema/mutations.js.map +0 -1
- package/dist/sync/next/test/mutation-fixtures.d.ts.map +0 -1
- package/dist/sync/next/test/mutation-fixtures.js.map +0 -1
- package/src/derived-mutations.test.ts +0 -101
- package/src/derived-mutations.ts +0 -170
- package/src/init-singleton-tables.ts +0 -24
- package/src/leader-thread/apply-mutation.ts +0 -187
- package/src/leader-thread/mutationlog.ts +0 -49
- package/src/leader-thread/pull-queue-set.ts +0 -67
- package/src/mutation.ts +0 -108
- package/src/query-info.ts +0 -83
- package/src/schema/MutationEvent.ts +0 -224
- package/src/schema/mutations.ts +0 -193
- package/src/sync/next/test/mutation-fixtures.ts +0 -228
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils'
|
|
2
|
-
import type { HttpClient, Scope, Tracer } from '@livestore/utils/effect'
|
|
2
|
+
import type { HttpClient, Runtime, Scope, Tracer } from '@livestore/utils/effect'
|
|
3
3
|
import {
|
|
4
4
|
BucketQueue,
|
|
5
5
|
Deferred,
|
|
6
6
|
Effect,
|
|
7
7
|
Exit,
|
|
8
8
|
FiberHandle,
|
|
9
|
-
Option,
|
|
10
9
|
OtelTracer,
|
|
10
|
+
Queue,
|
|
11
11
|
ReadonlyArray,
|
|
12
|
-
Schema,
|
|
13
12
|
Stream,
|
|
14
13
|
Subscribable,
|
|
15
14
|
SubscriptionRef,
|
|
@@ -18,89 +17,120 @@ import type * as otel from '@opentelemetry/api'
|
|
|
18
17
|
|
|
19
18
|
import type { SqliteDb } from '../adapter-types.js'
|
|
20
19
|
import { UnexpectedError } from '../adapter-types.js'
|
|
21
|
-
import type { LiveStoreSchema
|
|
20
|
+
import type { LiveStoreSchema } from '../schema/mod.js'
|
|
22
21
|
import {
|
|
23
22
|
EventId,
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
mutationLogMetaTable,
|
|
23
|
+
getEventDef,
|
|
24
|
+
LEADER_MERGE_COUNTER_TABLE,
|
|
25
|
+
LiveStoreEvent,
|
|
28
26
|
SESSION_CHANGESET_META_TABLE,
|
|
29
27
|
} from '../schema/mod.js'
|
|
30
|
-
import { updateRows } from '../sql-queries/index.js'
|
|
31
28
|
import { LeaderAheadError } from '../sync/sync.js'
|
|
32
29
|
import * as SyncState from '../sync/syncstate.js'
|
|
33
30
|
import { sql } from '../util.js'
|
|
34
|
-
import {
|
|
35
|
-
import
|
|
36
|
-
import {
|
|
37
|
-
import type { InitialBlockingSyncContext, InitialSyncInfo, LeaderSyncProcessor } from './types.js'
|
|
31
|
+
import { rollback } from './apply-event.js'
|
|
32
|
+
import * as Eventlog from './eventlog.js'
|
|
33
|
+
import type { InitialBlockingSyncContext, LeaderSyncProcessor } from './types.js'
|
|
38
34
|
import { LeaderThreadCtx } from './types.js'
|
|
39
35
|
|
|
40
|
-
export const BACKEND_PUSH_BATCH_SIZE = 50
|
|
41
|
-
|
|
42
36
|
type LocalPushQueueItem = [
|
|
43
|
-
|
|
37
|
+
event: LiveStoreEvent.EncodedWithMeta,
|
|
44
38
|
deferred: Deferred.Deferred<void, LeaderAheadError> | undefined,
|
|
39
|
+
/** Used to determine whether the batch has become invalid due to a rejected local push batch */
|
|
45
40
|
generation: number,
|
|
46
41
|
]
|
|
47
42
|
|
|
48
43
|
/**
|
|
49
|
-
* The LeaderSyncProcessor manages synchronization of
|
|
44
|
+
* The LeaderSyncProcessor manages synchronization of events between
|
|
50
45
|
* the local state and the sync backend, ensuring efficient and orderly processing.
|
|
51
46
|
*
|
|
52
47
|
* In the LeaderSyncProcessor, pulling always has precedence over pushing.
|
|
53
48
|
*
|
|
54
49
|
* Responsibilities:
|
|
55
|
-
* - Queueing incoming local
|
|
56
|
-
* - Broadcasting
|
|
57
|
-
* - Pushing
|
|
50
|
+
* - Queueing incoming local events in a localPushesQueue.
|
|
51
|
+
* - Broadcasting events to client sessions via pull queues.
|
|
52
|
+
* - Pushing events to the sync backend.
|
|
58
53
|
*
|
|
59
54
|
* Notes:
|
|
60
55
|
*
|
|
61
56
|
* local push processing:
|
|
62
|
-
* -
|
|
57
|
+
* - localPushesQueue:
|
|
63
58
|
* - Maintains events in ascending order.
|
|
64
59
|
* - Uses `Deferred` objects to resolve/reject events based on application success.
|
|
65
|
-
* - Processes events from the
|
|
60
|
+
* - Processes events from the queue, applying events in batches.
|
|
66
61
|
* - Controlled by a `Latch` to manage execution flow.
|
|
67
62
|
* - The latch closes on pull receipt and re-opens post-pull completion.
|
|
68
63
|
* - Processes up to `maxBatchSize` events per cycle.
|
|
69
64
|
*
|
|
65
|
+
* Currently we're advancing the db read model and eventlog in lockstep, but we could also decouple this in the future
|
|
66
|
+
*
|
|
67
|
+
* Tricky concurrency scenarios:
|
|
68
|
+
* - Queued local push batches becoming invalid due to a prior local push item being rejected.
|
|
69
|
+
* Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
|
|
70
|
+
*
|
|
70
71
|
*/
|
|
71
72
|
export const makeLeaderSyncProcessor = ({
|
|
72
73
|
schema,
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
74
|
+
dbEventlogMissing,
|
|
75
|
+
dbEventlog,
|
|
76
|
+
dbReadModel,
|
|
77
|
+
dbReadModelMissing,
|
|
76
78
|
initialBlockingSyncContext,
|
|
77
79
|
onError,
|
|
80
|
+
params,
|
|
81
|
+
testing,
|
|
78
82
|
}: {
|
|
79
83
|
schema: LiveStoreSchema
|
|
80
|
-
/** Only used to know whether we can safely query
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
+
/** Only used to know whether we can safely query dbEventlog during setup execution */
|
|
85
|
+
dbEventlogMissing: boolean
|
|
86
|
+
dbEventlog: SqliteDb
|
|
87
|
+
dbReadModel: SqliteDb
|
|
88
|
+
/** Only used to know whether we can safely query dbReadModel during setup execution */
|
|
89
|
+
dbReadModelMissing: boolean
|
|
84
90
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
|
85
91
|
onError: 'shutdown' | 'ignore'
|
|
92
|
+
params: {
|
|
93
|
+
/**
|
|
94
|
+
* @default 10
|
|
95
|
+
*/
|
|
96
|
+
localPushBatchSize?: number
|
|
97
|
+
/**
|
|
98
|
+
* @default 50
|
|
99
|
+
*/
|
|
100
|
+
backendPushBatchSize?: number
|
|
101
|
+
}
|
|
102
|
+
testing: {
|
|
103
|
+
delays?: {
|
|
104
|
+
localPushProcessing?: Effect.Effect<void>
|
|
105
|
+
}
|
|
106
|
+
}
|
|
86
107
|
}): Effect.Effect<LeaderSyncProcessor, UnexpectedError, Scope.Scope> =>
|
|
87
108
|
Effect.gen(function* () {
|
|
88
|
-
const
|
|
109
|
+
const syncBackendPushQueue = yield* BucketQueue.make<LiveStoreEvent.EncodedWithMeta>()
|
|
110
|
+
const localPushBatchSize = params.localPushBatchSize ?? 10
|
|
111
|
+
const backendPushBatchSize = params.backendPushBatchSize ?? 50
|
|
89
112
|
|
|
90
113
|
const syncStateSref = yield* SubscriptionRef.make<SyncState.SyncState | undefined>(undefined)
|
|
91
114
|
|
|
92
|
-
const isClientEvent = (
|
|
93
|
-
const
|
|
94
|
-
return
|
|
115
|
+
const isClientEvent = (eventEncoded: LiveStoreEvent.EncodedWithMeta) => {
|
|
116
|
+
const eventDef = getEventDef(schema, eventEncoded.name)
|
|
117
|
+
return eventDef.eventDef.options.clientOnly
|
|
95
118
|
}
|
|
96
119
|
|
|
120
|
+
const connectedClientSessionPullQueues = yield* makePullQueueSet
|
|
121
|
+
|
|
97
122
|
/**
|
|
98
123
|
* Tracks generations of queued local push events.
|
|
99
|
-
* If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
|
124
|
+
* If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
|
|
100
125
|
* even if they would be valid on their own.
|
|
101
126
|
*/
|
|
127
|
+
// TODO get rid of this in favour of the `mergeGeneration` event id field
|
|
102
128
|
const currentLocalPushGenerationRef = { current: 0 }
|
|
103
129
|
|
|
130
|
+
type MergeCounter = number
|
|
131
|
+
const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) }
|
|
132
|
+
const mergePayloads = new Map<MergeCounter, typeof SyncState.PayloadUpstream.Type>()
|
|
133
|
+
|
|
104
134
|
// This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
|
|
105
135
|
const ctxRef = {
|
|
106
136
|
current: undefined as
|
|
@@ -109,6 +139,7 @@ export const makeLeaderSyncProcessor = ({
|
|
|
109
139
|
otelSpan: otel.Span | undefined
|
|
110
140
|
span: Tracer.Span
|
|
111
141
|
devtoolsLatch: Effect.Latch | undefined
|
|
142
|
+
runtime: Runtime.Runtime<LeaderThreadCtx>
|
|
112
143
|
},
|
|
113
144
|
}
|
|
114
145
|
|
|
@@ -116,23 +147,28 @@ export const makeLeaderSyncProcessor = ({
|
|
|
116
147
|
const localPushesLatch = yield* Effect.makeLatch(true)
|
|
117
148
|
const pullLatch = yield* Effect.makeLatch(true)
|
|
118
149
|
|
|
150
|
+
/**
|
|
151
|
+
* Additionally to the `syncStateSref` we also need the `pushHeadRef` in order to prevent old/duplicate
|
|
152
|
+
* events from being pushed in a scenario like this:
|
|
153
|
+
* - client session A pushes e1
|
|
154
|
+
* - leader sync processor takes a bit and hasn't yet taken e1 from the localPushesQueue
|
|
155
|
+
* - client session B also pushes e1 (which should be rejected)
|
|
156
|
+
*
|
|
157
|
+
* Thus the purpoe of the pushHeadRef is the guard the integrity of the local push queue
|
|
158
|
+
*/
|
|
159
|
+
const pushHeadRef = { current: EventId.ROOT }
|
|
160
|
+
const advancePushHead = (eventId: EventId.EventId) => {
|
|
161
|
+
pushHeadRef.current = EventId.max(pushHeadRef.current, eventId)
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// NOTE: New events are only pushed to sync backend after successful local push processing
|
|
119
165
|
const push: LeaderSyncProcessor['push'] = (newEvents, options) =>
|
|
120
166
|
Effect.gen(function* () {
|
|
121
|
-
// TODO validate batch
|
|
122
167
|
if (newEvents.length === 0) return
|
|
123
168
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
// return
|
|
128
|
-
// }
|
|
129
|
-
|
|
130
|
-
if (clientId === 'client-b') {
|
|
131
|
-
// console.log(
|
|
132
|
-
// 'push from client session',
|
|
133
|
-
// newEvents.map((item) => item.toJSON()),
|
|
134
|
-
// )
|
|
135
|
-
}
|
|
169
|
+
yield* validatePushBatch(newEvents, pushHeadRef.current)
|
|
170
|
+
|
|
171
|
+
advancePushHead(newEvents.at(-1)!.id)
|
|
136
172
|
|
|
137
173
|
const waitForProcessing = options?.waitForProcessing ?? false
|
|
138
174
|
const generation = currentLocalPushGenerationRef.current
|
|
@@ -141,20 +177,18 @@ export const makeLeaderSyncProcessor = ({
|
|
|
141
177
|
const deferreds = yield* Effect.forEach(newEvents, () => Deferred.make<void, LeaderAheadError>())
|
|
142
178
|
|
|
143
179
|
const items = newEvents.map(
|
|
144
|
-
(
|
|
180
|
+
(eventEncoded, i) => [eventEncoded, deferreds[i], generation] as LocalPushQueueItem,
|
|
145
181
|
)
|
|
146
182
|
|
|
147
183
|
yield* BucketQueue.offerAll(localPushesQueue, items)
|
|
148
184
|
|
|
149
185
|
yield* Effect.all(deferreds)
|
|
150
186
|
} else {
|
|
151
|
-
const items = newEvents.map(
|
|
152
|
-
(mutationEventEncoded) => [mutationEventEncoded, undefined, generation] as LocalPushQueueItem,
|
|
153
|
-
)
|
|
187
|
+
const items = newEvents.map((eventEncoded) => [eventEncoded, undefined, generation] as LocalPushQueueItem)
|
|
154
188
|
yield* BucketQueue.offerAll(localPushesQueue, items)
|
|
155
189
|
}
|
|
156
190
|
}).pipe(
|
|
157
|
-
Effect.withSpan('@livestore/common:
|
|
191
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:push', {
|
|
158
192
|
attributes: {
|
|
159
193
|
batchSize: newEvents.length,
|
|
160
194
|
batch: TRACE_VERBOSE ? newEvents : undefined,
|
|
@@ -163,156 +197,195 @@ export const makeLeaderSyncProcessor = ({
|
|
|
163
197
|
}),
|
|
164
198
|
)
|
|
165
199
|
|
|
166
|
-
const pushPartial: LeaderSyncProcessor['pushPartial'] = ({
|
|
167
|
-
mutationEvent: partialMutationEvent,
|
|
168
|
-
clientId,
|
|
169
|
-
sessionId,
|
|
170
|
-
}) =>
|
|
200
|
+
const pushPartial: LeaderSyncProcessor['pushPartial'] = ({ event: { name, args }, clientId, sessionId }) =>
|
|
171
201
|
Effect.gen(function* () {
|
|
172
202
|
const syncState = yield* syncStateSref
|
|
173
203
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
174
204
|
|
|
175
|
-
const
|
|
205
|
+
const eventDef = getEventDef(schema, name)
|
|
176
206
|
|
|
177
|
-
const
|
|
178
|
-
|
|
207
|
+
const eventEncoded = new LiveStoreEvent.EncodedWithMeta({
|
|
208
|
+
name,
|
|
209
|
+
args,
|
|
179
210
|
clientId,
|
|
180
211
|
sessionId,
|
|
181
|
-
...EventId.nextPair(syncState.localHead,
|
|
212
|
+
...EventId.nextPair(syncState.localHead, eventDef.eventDef.options.clientOnly),
|
|
182
213
|
})
|
|
183
214
|
|
|
184
|
-
yield* push([
|
|
215
|
+
yield* push([eventEncoded])
|
|
185
216
|
}).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie))
|
|
186
217
|
|
|
187
218
|
// Starts various background loops
|
|
188
|
-
const boot: LeaderSyncProcessor['boot'] = (
|
|
189
|
-
Effect.
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
219
|
+
const boot: LeaderSyncProcessor['boot'] = Effect.gen(function* () {
|
|
220
|
+
const span = yield* Effect.currentSpan.pipe(Effect.orDie)
|
|
221
|
+
const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
|
|
222
|
+
const { devtools, shutdownChannel } = yield* LeaderThreadCtx
|
|
223
|
+
const runtime = yield* Effect.runtime<LeaderThreadCtx>()
|
|
224
|
+
|
|
225
|
+
ctxRef.current = {
|
|
226
|
+
otelSpan,
|
|
227
|
+
span,
|
|
228
|
+
devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
|
|
229
|
+
runtime,
|
|
230
|
+
}
|
|
199
231
|
|
|
200
|
-
|
|
201
|
-
const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog)
|
|
232
|
+
const initialLocalHead = dbEventlogMissing ? EventId.ROOT : Eventlog.getClientHeadFromDb(dbEventlog)
|
|
202
233
|
|
|
203
|
-
|
|
204
|
-
return shouldNeverHappen(
|
|
205
|
-
`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
|
|
206
|
-
)
|
|
207
|
-
}
|
|
234
|
+
const initialBackendHead = dbEventlogMissing ? EventId.ROOT.global : Eventlog.getBackendHeadFromDb(dbEventlog)
|
|
208
235
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
|
219
|
-
localHead: initialLocalHead,
|
|
220
|
-
})
|
|
236
|
+
if (initialBackendHead > initialLocalHead.global) {
|
|
237
|
+
return shouldNeverHappen(
|
|
238
|
+
`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`,
|
|
239
|
+
)
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
const pendingEvents = dbEventlogMissing
|
|
243
|
+
? []
|
|
244
|
+
: yield* Eventlog.getEventsSince({ global: initialBackendHead, client: EventId.clientDefault })
|
|
221
245
|
|
|
222
|
-
|
|
223
|
-
|
|
246
|
+
const initialSyncState = new SyncState.SyncState({
|
|
247
|
+
pending: pendingEvents,
|
|
248
|
+
upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
|
|
249
|
+
localHead: initialLocalHead,
|
|
250
|
+
})
|
|
224
251
|
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
252
|
+
/** State transitions need to happen atomically, so we use a Ref to track the state */
|
|
253
|
+
yield* SubscriptionRef.set(syncStateSref, initialSyncState)
|
|
254
|
+
|
|
255
|
+
// Rehydrate sync queue
|
|
256
|
+
if (pendingEvents.length > 0) {
|
|
257
|
+
const globalPendingEvents = pendingEvents
|
|
258
|
+
// Don't sync clientOnly events
|
|
259
|
+
.filter((eventEncoded) => {
|
|
260
|
+
const eventDef = getEventDef(schema, eventEncoded.name)
|
|
261
|
+
return eventDef.eventDef.options.clientOnly === false
|
|
262
|
+
})
|
|
233
263
|
|
|
234
|
-
|
|
264
|
+
if (globalPendingEvents.length > 0) {
|
|
265
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingEvents)
|
|
235
266
|
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
const shutdownOnError = (cause: unknown) =>
|
|
270
|
+
Effect.gen(function* () {
|
|
271
|
+
if (onError === 'shutdown') {
|
|
272
|
+
yield* shutdownChannel.send(UnexpectedError.make({ cause }))
|
|
273
|
+
yield* Effect.die(cause)
|
|
274
|
+
}
|
|
275
|
+
})
|
|
236
276
|
|
|
237
|
-
|
|
277
|
+
yield* backgroundApplyLocalPushes({
|
|
278
|
+
localPushesLatch,
|
|
279
|
+
localPushesQueue,
|
|
280
|
+
pullLatch,
|
|
281
|
+
syncStateSref,
|
|
282
|
+
syncBackendPushQueue,
|
|
283
|
+
schema,
|
|
284
|
+
isClientEvent,
|
|
285
|
+
otelSpan,
|
|
286
|
+
currentLocalPushGenerationRef,
|
|
287
|
+
connectedClientSessionPullQueues,
|
|
288
|
+
mergeCounterRef,
|
|
289
|
+
mergePayloads,
|
|
290
|
+
localPushBatchSize,
|
|
291
|
+
testing: {
|
|
292
|
+
delay: testing?.delays?.localPushProcessing,
|
|
293
|
+
},
|
|
294
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
295
|
+
|
|
296
|
+
const backendPushingFiberHandle = yield* FiberHandle.make()
|
|
297
|
+
const backendPushingEffect = backgroundBackendPushing({
|
|
298
|
+
syncBackendPushQueue,
|
|
299
|
+
otelSpan,
|
|
300
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
301
|
+
backendPushBatchSize,
|
|
302
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError))
|
|
303
|
+
|
|
304
|
+
yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect)
|
|
305
|
+
|
|
306
|
+
yield* backgroundBackendPulling({
|
|
307
|
+
initialBackendHead,
|
|
308
|
+
isClientEvent,
|
|
309
|
+
restartBackendPushing: (filteredRebasedPending) =>
|
|
238
310
|
Effect.gen(function* () {
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
311
|
+
// Stop current pushing fiber
|
|
312
|
+
yield* FiberHandle.clear(backendPushingFiberHandle)
|
|
313
|
+
|
|
314
|
+
// Reset the sync backend push queue
|
|
315
|
+
yield* BucketQueue.clear(syncBackendPushQueue)
|
|
316
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending)
|
|
317
|
+
|
|
318
|
+
// Restart pushing fiber
|
|
319
|
+
yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect)
|
|
320
|
+
}),
|
|
321
|
+
syncStateSref,
|
|
322
|
+
localPushesLatch,
|
|
323
|
+
pullLatch,
|
|
324
|
+
otelSpan,
|
|
325
|
+
initialBlockingSyncContext,
|
|
326
|
+
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
327
|
+
connectedClientSessionPullQueues,
|
|
328
|
+
mergeCounterRef,
|
|
329
|
+
mergePayloads,
|
|
330
|
+
advancePushHead,
|
|
331
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
332
|
+
|
|
333
|
+
return { initialLeaderHead: initialLocalHead }
|
|
334
|
+
}).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'))
|
|
335
|
+
|
|
336
|
+
const pull: LeaderSyncProcessor['pull'] = ({ cursor }) =>
|
|
337
|
+
Effect.gen(function* () {
|
|
338
|
+
const queue = yield* pullQueue({ cursor })
|
|
339
|
+
return Stream.fromQueue(queue)
|
|
340
|
+
}).pipe(Stream.unwrapScoped)
|
|
341
|
+
|
|
342
|
+
const pullQueue: LeaderSyncProcessor['pullQueue'] = ({ cursor }) => {
|
|
343
|
+
const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized')
|
|
344
|
+
return Effect.gen(function* () {
|
|
345
|
+
const queue = yield* connectedClientSessionPullQueues.makeQueue
|
|
346
|
+
const payloadsSinceCursor = Array.from(mergePayloads.entries())
|
|
347
|
+
.map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
|
|
348
|
+
.filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
|
|
349
|
+
.toSorted((a, b) => a.mergeCounter - b.mergeCounter)
|
|
350
|
+
.map(({ payload, mergeCounter }) => {
|
|
351
|
+
if (payload._tag === 'upstream-advance') {
|
|
352
|
+
return {
|
|
353
|
+
payload: {
|
|
354
|
+
_tag: 'upstream-advance' as const,
|
|
355
|
+
newEvents: ReadonlyArray.dropWhile(payload.newEvents, (eventEncoded) =>
|
|
356
|
+
EventId.isGreaterThanOrEqual(cursor.eventId, eventEncoded.id),
|
|
357
|
+
),
|
|
358
|
+
},
|
|
359
|
+
mergeCounter,
|
|
360
|
+
}
|
|
361
|
+
} else {
|
|
362
|
+
return { payload, mergeCounter }
|
|
242
363
|
}
|
|
243
364
|
})
|
|
244
365
|
|
|
245
|
-
yield*
|
|
246
|
-
localPushesLatch,
|
|
247
|
-
localPushesQueue,
|
|
248
|
-
pullLatch,
|
|
249
|
-
syncStateSref,
|
|
250
|
-
syncBackendQueue,
|
|
251
|
-
schema,
|
|
252
|
-
isClientEvent,
|
|
253
|
-
otelSpan,
|
|
254
|
-
currentLocalPushGenerationRef,
|
|
255
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
256
|
-
|
|
257
|
-
const backendPushingFiberHandle = yield* FiberHandle.make()
|
|
258
|
-
|
|
259
|
-
yield* FiberHandle.run(
|
|
260
|
-
backendPushingFiberHandle,
|
|
261
|
-
backgroundBackendPushing({
|
|
262
|
-
dbReady,
|
|
263
|
-
syncBackendQueue,
|
|
264
|
-
otelSpan,
|
|
265
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
266
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
267
|
-
)
|
|
366
|
+
yield* queue.offerAll(payloadsSinceCursor)
|
|
268
367
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
isClientEvent,
|
|
273
|
-
restartBackendPushing: (filteredRebasedPending) =>
|
|
274
|
-
Effect.gen(function* () {
|
|
275
|
-
// Stop current pushing fiber
|
|
276
|
-
yield* FiberHandle.clear(backendPushingFiberHandle)
|
|
277
|
-
|
|
278
|
-
// Reset the sync queue
|
|
279
|
-
yield* BucketQueue.clear(syncBackendQueue)
|
|
280
|
-
yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending)
|
|
281
|
-
|
|
282
|
-
// Restart pushing fiber
|
|
283
|
-
yield* FiberHandle.run(
|
|
284
|
-
backendPushingFiberHandle,
|
|
285
|
-
backgroundBackendPushing({
|
|
286
|
-
dbReady,
|
|
287
|
-
syncBackendQueue,
|
|
288
|
-
otelSpan,
|
|
289
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
290
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)),
|
|
291
|
-
)
|
|
292
|
-
}),
|
|
293
|
-
syncStateSref,
|
|
294
|
-
localPushesLatch,
|
|
295
|
-
pullLatch,
|
|
296
|
-
otelSpan,
|
|
297
|
-
initialBlockingSyncContext,
|
|
298
|
-
devtoolsLatch: ctxRef.current?.devtoolsLatch,
|
|
299
|
-
}).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped)
|
|
368
|
+
return queue
|
|
369
|
+
}).pipe(Effect.provide(runtime))
|
|
370
|
+
}
|
|
300
371
|
|
|
301
|
-
|
|
302
|
-
|
|
372
|
+
const syncState = Subscribable.make({
|
|
373
|
+
get: Effect.gen(function* () {
|
|
374
|
+
const syncState = yield* syncStateSref
|
|
375
|
+
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
376
|
+
return syncState
|
|
377
|
+
}),
|
|
378
|
+
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
|
379
|
+
})
|
|
303
380
|
|
|
304
381
|
return {
|
|
382
|
+
pull,
|
|
383
|
+
pullQueue,
|
|
305
384
|
push,
|
|
306
385
|
pushPartial,
|
|
307
386
|
boot,
|
|
308
|
-
syncState
|
|
309
|
-
|
|
310
|
-
const syncState = yield* syncStateSref
|
|
311
|
-
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
312
|
-
return syncState
|
|
313
|
-
}),
|
|
314
|
-
changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
|
|
315
|
-
}),
|
|
387
|
+
syncState,
|
|
388
|
+
getMergeCounter: () => mergeCounterRef.current,
|
|
316
389
|
} satisfies LeaderSyncProcessor
|
|
317
390
|
})
|
|
318
391
|
|
|
@@ -321,30 +394,41 @@ const backgroundApplyLocalPushes = ({
|
|
|
321
394
|
localPushesQueue,
|
|
322
395
|
pullLatch,
|
|
323
396
|
syncStateSref,
|
|
324
|
-
|
|
397
|
+
syncBackendPushQueue,
|
|
325
398
|
schema,
|
|
326
399
|
isClientEvent,
|
|
327
400
|
otelSpan,
|
|
328
401
|
currentLocalPushGenerationRef,
|
|
402
|
+
connectedClientSessionPullQueues,
|
|
403
|
+
mergeCounterRef,
|
|
404
|
+
mergePayloads,
|
|
405
|
+
localPushBatchSize,
|
|
406
|
+
testing,
|
|
329
407
|
}: {
|
|
330
408
|
pullLatch: Effect.Latch
|
|
331
409
|
localPushesLatch: Effect.Latch
|
|
332
410
|
localPushesQueue: BucketQueue.BucketQueue<LocalPushQueueItem>
|
|
333
411
|
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
|
334
|
-
|
|
412
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<LiveStoreEvent.EncodedWithMeta>
|
|
335
413
|
schema: LiveStoreSchema
|
|
336
|
-
isClientEvent: (
|
|
414
|
+
isClientEvent: (eventEncoded: LiveStoreEvent.EncodedWithMeta) => boolean
|
|
337
415
|
otelSpan: otel.Span | undefined
|
|
338
416
|
currentLocalPushGenerationRef: { current: number }
|
|
417
|
+
connectedClientSessionPullQueues: PullQueueSet
|
|
418
|
+
mergeCounterRef: { current: number }
|
|
419
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
|
420
|
+
localPushBatchSize: number
|
|
421
|
+
testing: {
|
|
422
|
+
delay: Effect.Effect<void> | undefined
|
|
423
|
+
}
|
|
339
424
|
}) =>
|
|
340
425
|
Effect.gen(function* () {
|
|
341
|
-
const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx
|
|
342
|
-
|
|
343
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
|
344
|
-
|
|
345
426
|
while (true) {
|
|
346
|
-
|
|
347
|
-
|
|
427
|
+
if (testing.delay !== undefined) {
|
|
428
|
+
yield* testing.delay.pipe(Effect.withSpan('localPushProcessingDelay'))
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, localPushBatchSize)
|
|
348
432
|
|
|
349
433
|
// Wait for the backend pulling to finish
|
|
350
434
|
yield* localPushesLatch.await
|
|
@@ -356,7 +440,7 @@ const backgroundApplyLocalPushes = ({
|
|
|
356
440
|
// It's important that we filter after we got localPushesLatch, otherwise we might filter with the old generation
|
|
357
441
|
const filteredBatchItems = batchItems
|
|
358
442
|
.filter(([_1, _2, generation]) => generation === currentLocalPushGenerationRef.current)
|
|
359
|
-
.map(([
|
|
443
|
+
.map(([eventEncoded, deferred]) => [eventEncoded, deferred] as const)
|
|
360
444
|
|
|
361
445
|
if (filteredBatchItems.length === 0) {
|
|
362
446
|
// console.log('dropping old-gen batch', currentLocalPushGenerationRef.current)
|
|
@@ -374,12 +458,14 @@ const backgroundApplyLocalPushes = ({
|
|
|
374
458
|
syncState,
|
|
375
459
|
payload: { _tag: 'local-push', newEvents },
|
|
376
460
|
isClientEvent,
|
|
377
|
-
isEqualEvent:
|
|
461
|
+
isEqualEvent: LiveStoreEvent.isEqualEncoded,
|
|
378
462
|
})
|
|
379
463
|
|
|
464
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
|
465
|
+
|
|
380
466
|
switch (mergeResult._tag) {
|
|
381
467
|
case 'unexpected-error': {
|
|
382
|
-
otelSpan?.addEvent(
|
|
468
|
+
otelSpan?.addEvent(`[${mergeCounter}]:push:unexpected-error`, {
|
|
383
469
|
batchSize: newEvents.length,
|
|
384
470
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
385
471
|
})
|
|
@@ -389,15 +475,12 @@ const backgroundApplyLocalPushes = ({
|
|
|
389
475
|
return shouldNeverHappen('The leader thread should never have to rebase due to a local push')
|
|
390
476
|
}
|
|
391
477
|
case 'reject': {
|
|
392
|
-
otelSpan?.addEvent(
|
|
478
|
+
otelSpan?.addEvent(`[${mergeCounter}]:push:reject`, {
|
|
393
479
|
batchSize: newEvents.length,
|
|
394
480
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
395
481
|
})
|
|
396
482
|
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
TODO: how to test this?
|
|
400
|
-
*/
|
|
483
|
+
// TODO: how to test this?
|
|
401
484
|
currentLocalPushGenerationRef.current++
|
|
402
485
|
|
|
403
486
|
const nextGeneration = currentLocalPushGenerationRef.current
|
|
@@ -411,7 +494,8 @@ const backgroundApplyLocalPushes = ({
|
|
|
411
494
|
(item) => item[2] >= nextGeneration,
|
|
412
495
|
)
|
|
413
496
|
|
|
414
|
-
|
|
497
|
+
// TODO we still need to better understand and handle this scenario
|
|
498
|
+
if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
|
|
415
499
|
console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue))
|
|
416
500
|
debugger
|
|
417
501
|
}
|
|
@@ -449,87 +533,82 @@ const backgroundApplyLocalPushes = ({
|
|
|
449
533
|
|
|
450
534
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
|
451
535
|
|
|
452
|
-
if (clientId === 'client-b') {
|
|
453
|
-
// yield* Effect.log('offer upstream-advance due to local-push')
|
|
454
|
-
// debugger
|
|
455
|
-
}
|
|
456
536
|
yield* connectedClientSessionPullQueues.offer({
|
|
457
|
-
payload: {
|
|
458
|
-
|
|
537
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
|
538
|
+
mergeCounter,
|
|
459
539
|
})
|
|
540
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
|
460
541
|
|
|
461
|
-
otelSpan?.addEvent(
|
|
542
|
+
otelSpan?.addEvent(`[${mergeCounter}]:push:advance`, {
|
|
462
543
|
batchSize: newEvents.length,
|
|
463
544
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
464
545
|
})
|
|
465
546
|
|
|
466
|
-
// Don't sync clientOnly
|
|
467
|
-
const filteredBatch = mergeResult.newEvents.filter((
|
|
468
|
-
const
|
|
469
|
-
return
|
|
547
|
+
// Don't sync clientOnly events
|
|
548
|
+
const filteredBatch = mergeResult.newEvents.filter((eventEncoded) => {
|
|
549
|
+
const eventDef = getEventDef(schema, eventEncoded.name)
|
|
550
|
+
return eventDef.eventDef.options.clientOnly === false
|
|
470
551
|
})
|
|
471
552
|
|
|
472
|
-
yield* BucketQueue.offerAll(
|
|
553
|
+
yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch)
|
|
473
554
|
|
|
474
|
-
yield*
|
|
555
|
+
yield* applyEventsBatch({ batchItems: mergeResult.newEvents, deferreds })
|
|
475
556
|
|
|
476
557
|
// Allow the backend pulling to start
|
|
477
558
|
yield* pullLatch.open
|
|
478
559
|
}
|
|
479
560
|
})
|
|
480
561
|
|
|
481
|
-
type
|
|
482
|
-
batchItems: ReadonlyArray<
|
|
483
|
-
/**
|
|
562
|
+
type ApplyEventsBatch = (_: {
|
|
563
|
+
batchItems: ReadonlyArray<LiveStoreEvent.EncodedWithMeta>
|
|
564
|
+
/**
|
|
565
|
+
* The deferreds are used by the caller to know when the mutation has been processed.
|
|
566
|
+
* Indexes are aligned with `batchItems`
|
|
567
|
+
*/
|
|
484
568
|
deferreds: ReadonlyArray<Deferred.Deferred<void, LeaderAheadError> | undefined> | undefined
|
|
485
|
-
}) => Effect.Effect<void, UnexpectedError>
|
|
569
|
+
}) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx>
|
|
486
570
|
|
|
487
571
|
// TODO how to handle errors gracefully
|
|
488
|
-
const
|
|
572
|
+
const applyEventsBatch: ApplyEventsBatch = ({ batchItems, deferreds }) =>
|
|
489
573
|
Effect.gen(function* () {
|
|
490
|
-
const
|
|
491
|
-
const { dbReadModel: db, dbMutationLog } = leaderThreadCtx
|
|
574
|
+
const { dbReadModel: db, dbEventlog, applyEvent } = yield* LeaderThreadCtx
|
|
492
575
|
|
|
493
|
-
|
|
576
|
+
// NOTE We always start a transaction to ensure consistency between db and eventlog (even for single-item batches)
|
|
577
|
+
db.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
578
|
+
dbEventlog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
494
579
|
|
|
495
|
-
|
|
580
|
+
yield* Effect.addFinalizer((exit) =>
|
|
496
581
|
Effect.gen(function* () {
|
|
497
|
-
|
|
498
|
-
dbMutationLog.execute('BEGIN TRANSACTION', undefined) // Start the transaction
|
|
499
|
-
|
|
500
|
-
yield* Effect.addFinalizer((exit) =>
|
|
501
|
-
Effect.gen(function* () {
|
|
502
|
-
if (Exit.isSuccess(exit)) return
|
|
582
|
+
if (Exit.isSuccess(exit)) return
|
|
503
583
|
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
584
|
+
// Rollback in case of an error
|
|
585
|
+
db.execute('ROLLBACK', undefined)
|
|
586
|
+
dbEventlog.execute('ROLLBACK', undefined)
|
|
587
|
+
}),
|
|
588
|
+
)
|
|
509
589
|
|
|
510
|
-
|
|
511
|
-
|
|
590
|
+
for (let i = 0; i < batchItems.length; i++) {
|
|
591
|
+
const { sessionChangeset } = yield* applyEvent(batchItems[i]!)
|
|
592
|
+
batchItems[i]!.meta.sessionChangeset = sessionChangeset
|
|
512
593
|
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
594
|
+
if (deferreds?.[i] !== undefined) {
|
|
595
|
+
yield* Deferred.succeed(deferreds[i]!, void 0)
|
|
596
|
+
}
|
|
597
|
+
}
|
|
517
598
|
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
})
|
|
599
|
+
db.execute('COMMIT', undefined) // Commit the transaction
|
|
600
|
+
dbEventlog.execute('COMMIT', undefined) // Commit the transaction
|
|
601
|
+
}).pipe(
|
|
602
|
+
Effect.uninterruptible,
|
|
603
|
+
Effect.scoped,
|
|
604
|
+
Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyEventItems', {
|
|
605
|
+
attributes: { batchSize: batchItems.length },
|
|
606
|
+
}),
|
|
607
|
+
Effect.tapCauseLogPretty,
|
|
608
|
+
UnexpectedError.mapToUnexpectedError,
|
|
609
|
+
)
|
|
530
610
|
|
|
531
611
|
const backgroundBackendPulling = ({
|
|
532
|
-
dbReady,
|
|
533
612
|
initialBackendHead,
|
|
534
613
|
isClientEvent,
|
|
535
614
|
restartBackendPushing,
|
|
@@ -539,12 +618,15 @@ const backgroundBackendPulling = ({
|
|
|
539
618
|
pullLatch,
|
|
540
619
|
devtoolsLatch,
|
|
541
620
|
initialBlockingSyncContext,
|
|
621
|
+
connectedClientSessionPullQueues,
|
|
622
|
+
mergeCounterRef,
|
|
623
|
+
mergePayloads,
|
|
624
|
+
advancePushHead,
|
|
542
625
|
}: {
|
|
543
|
-
dbReady: Deferred.Deferred<void>
|
|
544
626
|
initialBackendHead: EventId.GlobalEventId
|
|
545
|
-
isClientEvent: (
|
|
627
|
+
isClientEvent: (eventEncoded: LiveStoreEvent.EncodedWithMeta) => boolean
|
|
546
628
|
restartBackendPushing: (
|
|
547
|
-
filteredRebasedPending: ReadonlyArray<
|
|
629
|
+
filteredRebasedPending: ReadonlyArray<LiveStoreEvent.EncodedWithMeta>,
|
|
548
630
|
) => Effect.Effect<void, UnexpectedError, LeaderThreadCtx | HttpClient.HttpClient>
|
|
549
631
|
otelSpan: otel.Span | undefined
|
|
550
632
|
syncStateSref: SubscriptionRef.SubscriptionRef<SyncState.SyncState | undefined>
|
|
@@ -552,24 +634,17 @@ const backgroundBackendPulling = ({
|
|
|
552
634
|
pullLatch: Effect.Latch
|
|
553
635
|
devtoolsLatch: Effect.Latch | undefined
|
|
554
636
|
initialBlockingSyncContext: InitialBlockingSyncContext
|
|
637
|
+
connectedClientSessionPullQueues: PullQueueSet
|
|
638
|
+
mergeCounterRef: { current: number }
|
|
639
|
+
mergePayloads: Map<number, typeof SyncState.PayloadUpstream.Type>
|
|
640
|
+
advancePushHead: (eventId: EventId.EventId) => void
|
|
555
641
|
}) =>
|
|
556
642
|
Effect.gen(function* () {
|
|
557
|
-
const {
|
|
558
|
-
syncBackend,
|
|
559
|
-
dbReadModel: db,
|
|
560
|
-
dbMutationLog,
|
|
561
|
-
connectedClientSessionPullQueues,
|
|
562
|
-
schema,
|
|
563
|
-
clientId,
|
|
564
|
-
} = yield* LeaderThreadCtx
|
|
643
|
+
const { syncBackend, dbReadModel: db, dbEventlog, schema } = yield* LeaderThreadCtx
|
|
565
644
|
|
|
566
645
|
if (syncBackend === undefined) return
|
|
567
646
|
|
|
568
|
-
const
|
|
569
|
-
|
|
570
|
-
const applyMutationItems = yield* makeApplyMutationItems
|
|
571
|
-
|
|
572
|
-
const onNewPullChunk = (newEvents: MutationEvent.EncodedWithMeta[], remaining: number) =>
|
|
647
|
+
const onNewPullChunk = (newEvents: LiveStoreEvent.EncodedWithMeta[], remaining: number) =>
|
|
573
648
|
Effect.gen(function* () {
|
|
574
649
|
if (newEvents.length === 0) return
|
|
575
650
|
|
|
@@ -586,20 +661,20 @@ const backgroundBackendPulling = ({
|
|
|
586
661
|
const syncState = yield* syncStateSref
|
|
587
662
|
if (syncState === undefined) return shouldNeverHappen('Not initialized')
|
|
588
663
|
|
|
589
|
-
const trimRollbackUntil = newEvents.at(-1)!.id
|
|
590
|
-
|
|
591
664
|
const mergeResult = SyncState.merge({
|
|
592
665
|
syncState,
|
|
593
|
-
payload: {
|
|
666
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
|
|
594
667
|
isClientEvent,
|
|
595
|
-
isEqualEvent:
|
|
668
|
+
isEqualEvent: LiveStoreEvent.isEqualEncoded,
|
|
596
669
|
ignoreClientEvents: true,
|
|
597
670
|
})
|
|
598
671
|
|
|
672
|
+
const mergeCounter = yield* incrementMergeCounter(mergeCounterRef)
|
|
673
|
+
|
|
599
674
|
if (mergeResult._tag === 'reject') {
|
|
600
675
|
return shouldNeverHappen('The leader thread should never reject upstream advances')
|
|
601
676
|
} else if (mergeResult._tag === 'unexpected-error') {
|
|
602
|
-
otelSpan?.addEvent(
|
|
677
|
+
otelSpan?.addEvent(`[${mergeCounter}]:pull:unexpected-error`, {
|
|
603
678
|
newEventsCount: newEvents.length,
|
|
604
679
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
605
680
|
})
|
|
@@ -608,62 +683,79 @@ const backgroundBackendPulling = ({
|
|
|
608
683
|
|
|
609
684
|
const newBackendHead = newEvents.at(-1)!.id
|
|
610
685
|
|
|
611
|
-
updateBackendHead(
|
|
686
|
+
Eventlog.updateBackendHead(dbEventlog, newBackendHead)
|
|
612
687
|
|
|
613
688
|
if (mergeResult._tag === 'rebase') {
|
|
614
|
-
otelSpan?.addEvent(
|
|
689
|
+
otelSpan?.addEvent(`[${mergeCounter}]:pull:rebase`, {
|
|
615
690
|
newEventsCount: newEvents.length,
|
|
616
691
|
newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
|
|
617
|
-
rollbackCount: mergeResult.
|
|
692
|
+
rollbackCount: mergeResult.rollbackEvents.length,
|
|
618
693
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
619
694
|
})
|
|
620
695
|
|
|
621
|
-
const
|
|
622
|
-
const
|
|
623
|
-
return
|
|
696
|
+
const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((event) => {
|
|
697
|
+
const eventDef = getEventDef(schema, event.name)
|
|
698
|
+
return eventDef.eventDef.options.clientOnly === false
|
|
624
699
|
})
|
|
625
|
-
yield* restartBackendPushing(
|
|
700
|
+
yield* restartBackendPushing(globalRebasedPendingEvents)
|
|
626
701
|
|
|
627
|
-
if (mergeResult.
|
|
628
|
-
yield* rollback({ db,
|
|
702
|
+
if (mergeResult.rollbackEvents.length > 0) {
|
|
703
|
+
yield* rollback({ db, dbEventlog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) })
|
|
629
704
|
}
|
|
630
705
|
|
|
631
706
|
yield* connectedClientSessionPullQueues.offer({
|
|
632
|
-
payload: {
|
|
633
|
-
_tag: 'upstream-rebase',
|
|
707
|
+
payload: SyncState.PayloadUpstreamRebase.make({
|
|
634
708
|
newEvents: mergeResult.newEvents,
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
remaining,
|
|
709
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
|
710
|
+
}),
|
|
711
|
+
mergeCounter,
|
|
639
712
|
})
|
|
713
|
+
mergePayloads.set(
|
|
714
|
+
mergeCounter,
|
|
715
|
+
SyncState.PayloadUpstreamRebase.make({
|
|
716
|
+
newEvents: mergeResult.newEvents,
|
|
717
|
+
rollbackEvents: mergeResult.rollbackEvents,
|
|
718
|
+
}),
|
|
719
|
+
)
|
|
640
720
|
} else {
|
|
641
|
-
otelSpan?.addEvent(
|
|
721
|
+
otelSpan?.addEvent(`[${mergeCounter}]:pull:advance`, {
|
|
642
722
|
newEventsCount: newEvents.length,
|
|
643
723
|
mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
|
|
644
724
|
})
|
|
645
725
|
|
|
646
|
-
if (clientId === 'client-b') {
|
|
647
|
-
// yield* Effect.log('offer upstream-advance due to pull')
|
|
648
|
-
}
|
|
649
726
|
yield* connectedClientSessionPullQueues.offer({
|
|
650
|
-
payload: {
|
|
651
|
-
|
|
727
|
+
payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
|
|
728
|
+
mergeCounter,
|
|
652
729
|
})
|
|
730
|
+
mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }))
|
|
731
|
+
|
|
732
|
+
if (mergeResult.confirmedEvents.length > 0) {
|
|
733
|
+
// `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
|
|
734
|
+
// `newEvents` instead which we filter via `mergeResult.confirmedEvents`
|
|
735
|
+
const confirmedNewEvents = newEvents.filter((event) =>
|
|
736
|
+
mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(event.id, confirmedEvent.id)),
|
|
737
|
+
)
|
|
738
|
+
yield* Eventlog.updateSyncMetadata(confirmedNewEvents)
|
|
739
|
+
}
|
|
653
740
|
}
|
|
654
741
|
|
|
742
|
+
// Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
|
|
655
743
|
trimChangesetRows(db, newBackendHead)
|
|
656
744
|
|
|
657
|
-
|
|
745
|
+
advancePushHead(mergeResult.newSyncState.localHead)
|
|
746
|
+
|
|
747
|
+
yield* applyEventsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined })
|
|
658
748
|
|
|
659
749
|
yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState)
|
|
660
750
|
|
|
751
|
+
// Allow local pushes to be processed again
|
|
661
752
|
if (remaining === 0) {
|
|
662
|
-
// Allow local pushes to be processed again
|
|
663
753
|
yield* localPushesLatch.open
|
|
664
754
|
}
|
|
665
755
|
})
|
|
666
756
|
|
|
757
|
+
const cursorInfo = yield* Eventlog.getSyncBackendCursorInfo(initialBackendHead)
|
|
758
|
+
|
|
667
759
|
yield* syncBackend.pull(cursorInfo).pipe(
|
|
668
760
|
// TODO only take from queue while connected
|
|
669
761
|
Stream.tap(({ batch, remaining }) =>
|
|
@@ -675,16 +767,13 @@ const backgroundBackendPulling = ({
|
|
|
675
767
|
// },
|
|
676
768
|
// })
|
|
677
769
|
|
|
678
|
-
//
|
|
679
|
-
yield* dbReady
|
|
680
|
-
|
|
681
|
-
// NOTE we only want to take process mutations when the sync backend is connected
|
|
770
|
+
// NOTE we only want to take process events when the sync backend is connected
|
|
682
771
|
// (e.g. needed for simulating being offline)
|
|
683
772
|
// TODO remove when there's a better way to handle this in stream above
|
|
684
773
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
685
774
|
|
|
686
775
|
yield* onNewPullChunk(
|
|
687
|
-
batch.map((_) =>
|
|
776
|
+
batch.map((_) => LiveStoreEvent.EncodedWithMeta.fromGlobal(_.eventEncoded, _.metadata)),
|
|
688
777
|
remaining,
|
|
689
778
|
)
|
|
690
779
|
|
|
@@ -694,102 +783,27 @@ const backgroundBackendPulling = ({
|
|
|
694
783
|
Stream.runDrain,
|
|
695
784
|
Effect.interruptible,
|
|
696
785
|
)
|
|
697
|
-
}).pipe(Effect.withSpan('@livestore/common:
|
|
698
|
-
|
|
699
|
-
const rollback = ({
|
|
700
|
-
db,
|
|
701
|
-
dbMutationLog,
|
|
702
|
-
eventIdsToRollback,
|
|
703
|
-
}: {
|
|
704
|
-
db: SqliteDb
|
|
705
|
-
dbMutationLog: SqliteDb
|
|
706
|
-
eventIdsToRollback: EventId.EventId[]
|
|
707
|
-
}) =>
|
|
708
|
-
Effect.gen(function* () {
|
|
709
|
-
const rollbackEvents = db
|
|
710
|
-
.select<SessionChangesetMetaRow>(
|
|
711
|
-
sql`SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`,
|
|
712
|
-
)
|
|
713
|
-
.map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
|
|
714
|
-
.sort((a, b) => EventId.compare(a.id, b.id))
|
|
715
|
-
// TODO bring back `.toSorted` once Expo supports it
|
|
716
|
-
// .toSorted((a, b) => EventId.compare(a.id, b.id))
|
|
717
|
-
|
|
718
|
-
// Apply changesets in reverse order
|
|
719
|
-
for (let i = rollbackEvents.length - 1; i >= 0; i--) {
|
|
720
|
-
const { changeset } = rollbackEvents[i]!
|
|
721
|
-
if (changeset !== null) {
|
|
722
|
-
db.makeChangeset(changeset).invert().apply()
|
|
723
|
-
}
|
|
724
|
-
}
|
|
725
|
-
|
|
726
|
-
const eventIdPairChunks = ReadonlyArray.chunksOf(100)(
|
|
727
|
-
eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`),
|
|
728
|
-
)
|
|
729
|
-
|
|
730
|
-
// Delete the changeset rows
|
|
731
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
|
732
|
-
db.execute(
|
|
733
|
-
sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
|
734
|
-
)
|
|
735
|
-
}
|
|
736
|
-
|
|
737
|
-
// Delete the mutation log rows
|
|
738
|
-
for (const eventIdPairChunk of eventIdPairChunks) {
|
|
739
|
-
dbMutationLog.execute(
|
|
740
|
-
sql`DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`,
|
|
741
|
-
)
|
|
742
|
-
}
|
|
743
|
-
}).pipe(
|
|
744
|
-
Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
|
|
745
|
-
attributes: { count: eventIdsToRollback.length },
|
|
746
|
-
}),
|
|
747
|
-
)
|
|
748
|
-
|
|
749
|
-
const getCursorInfo = (remoteHead: EventId.GlobalEventId) =>
|
|
750
|
-
Effect.gen(function* () {
|
|
751
|
-
const { dbMutationLog } = yield* LeaderThreadCtx
|
|
752
|
-
|
|
753
|
-
if (remoteHead === EventId.ROOT.global) return Option.none()
|
|
754
|
-
|
|
755
|
-
const MutationlogQuerySchema = Schema.Struct({
|
|
756
|
-
syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
|
|
757
|
-
}).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head)
|
|
758
|
-
|
|
759
|
-
const syncMetadataOption = yield* Effect.sync(() =>
|
|
760
|
-
dbMutationLog.select<{ syncMetadataJson: string }>(
|
|
761
|
-
sql`SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`,
|
|
762
|
-
),
|
|
763
|
-
).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie)
|
|
764
|
-
|
|
765
|
-
return Option.some({
|
|
766
|
-
cursor: { global: remoteHead, client: EventId.clientDefault },
|
|
767
|
-
metadata: syncMetadataOption,
|
|
768
|
-
}) satisfies InitialSyncInfo
|
|
769
|
-
}).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }))
|
|
786
|
+
}).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'))
|
|
770
787
|
|
|
771
788
|
const backgroundBackendPushing = ({
|
|
772
|
-
|
|
773
|
-
syncBackendQueue,
|
|
789
|
+
syncBackendPushQueue,
|
|
774
790
|
otelSpan,
|
|
775
791
|
devtoolsLatch,
|
|
792
|
+
backendPushBatchSize,
|
|
776
793
|
}: {
|
|
777
|
-
|
|
778
|
-
syncBackendQueue: BucketQueue.BucketQueue<MutationEvent.EncodedWithMeta>
|
|
794
|
+
syncBackendPushQueue: BucketQueue.BucketQueue<LiveStoreEvent.EncodedWithMeta>
|
|
779
795
|
otelSpan: otel.Span | undefined
|
|
780
796
|
devtoolsLatch: Effect.Latch | undefined
|
|
797
|
+
backendPushBatchSize: number
|
|
781
798
|
}) =>
|
|
782
799
|
Effect.gen(function* () {
|
|
783
|
-
const { syncBackend
|
|
800
|
+
const { syncBackend } = yield* LeaderThreadCtx
|
|
784
801
|
if (syncBackend === undefined) return
|
|
785
802
|
|
|
786
|
-
yield* dbReady
|
|
787
|
-
|
|
788
803
|
while (true) {
|
|
789
804
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
790
805
|
|
|
791
|
-
|
|
792
|
-
const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE)
|
|
806
|
+
const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, backendPushBatchSize)
|
|
793
807
|
|
|
794
808
|
yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true)
|
|
795
809
|
|
|
@@ -813,27 +827,109 @@ const backgroundBackendPushing = ({
|
|
|
813
827
|
// wait for interrupt caused by background pulling which will then restart pushing
|
|
814
828
|
return yield* Effect.never
|
|
815
829
|
}
|
|
816
|
-
|
|
817
|
-
const { metadata } = pushResult.right
|
|
818
|
-
|
|
819
|
-
// TODO try to do this in a single query
|
|
820
|
-
for (let i = 0; i < queueItems.length; i++) {
|
|
821
|
-
const mutationEventEncoded = queueItems[i]!
|
|
822
|
-
yield* execSql(
|
|
823
|
-
dbMutationLog,
|
|
824
|
-
...updateRows({
|
|
825
|
-
tableName: MUTATION_LOG_META_TABLE,
|
|
826
|
-
columns: mutationLogMetaTable.sqliteDef.columns,
|
|
827
|
-
where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
|
|
828
|
-
updateValues: { syncMetadataJson: metadata[i]! },
|
|
829
|
-
}),
|
|
830
|
-
)
|
|
831
|
-
}
|
|
832
830
|
}
|
|
833
|
-
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:
|
|
831
|
+
}).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'))
|
|
834
832
|
|
|
835
833
|
const trimChangesetRows = (db: SqliteDb, newHead: EventId.EventId) => {
|
|
836
834
|
// Since we're using the session changeset rows to query for the current head,
|
|
837
835
|
// we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
|
|
838
836
|
db.execute(sql`DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`)
|
|
839
837
|
}
|
|
838
|
+
|
|
839
|
+
interface PullQueueSet {
|
|
840
|
+
makeQueue: Effect.Effect<
|
|
841
|
+
Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>,
|
|
842
|
+
UnexpectedError,
|
|
843
|
+
Scope.Scope | LeaderThreadCtx
|
|
844
|
+
>
|
|
845
|
+
offer: (item: {
|
|
846
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
|
847
|
+
mergeCounter: number
|
|
848
|
+
}) => Effect.Effect<void, UnexpectedError>
|
|
849
|
+
}
|
|
850
|
+
|
|
851
|
+
const makePullQueueSet = Effect.gen(function* () {
|
|
852
|
+
const set = new Set<Queue.Queue<{ payload: typeof SyncState.PayloadUpstream.Type; mergeCounter: number }>>()
|
|
853
|
+
|
|
854
|
+
yield* Effect.addFinalizer(() =>
|
|
855
|
+
Effect.gen(function* () {
|
|
856
|
+
for (const queue of set) {
|
|
857
|
+
yield* Queue.shutdown(queue)
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
set.clear()
|
|
861
|
+
}),
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
const makeQueue: PullQueueSet['makeQueue'] = Effect.gen(function* () {
|
|
865
|
+
const queue = yield* Queue.unbounded<{
|
|
866
|
+
payload: typeof SyncState.PayloadUpstream.Type
|
|
867
|
+
mergeCounter: number
|
|
868
|
+
}>().pipe(Effect.acquireRelease(Queue.shutdown))
|
|
869
|
+
|
|
870
|
+
yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)))
|
|
871
|
+
|
|
872
|
+
set.add(queue)
|
|
873
|
+
|
|
874
|
+
return queue
|
|
875
|
+
})
|
|
876
|
+
|
|
877
|
+
const offer: PullQueueSet['offer'] = (item) =>
|
|
878
|
+
Effect.gen(function* () {
|
|
879
|
+
// Short-circuit if the payload is an empty upstream advance
|
|
880
|
+
if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
|
|
881
|
+
return
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
for (const queue of set) {
|
|
885
|
+
yield* Queue.offer(queue, item)
|
|
886
|
+
}
|
|
887
|
+
})
|
|
888
|
+
|
|
889
|
+
return {
|
|
890
|
+
makeQueue,
|
|
891
|
+
offer,
|
|
892
|
+
}
|
|
893
|
+
})
|
|
894
|
+
|
|
895
|
+
const incrementMergeCounter = (mergeCounterRef: { current: number }) =>
|
|
896
|
+
Effect.gen(function* () {
|
|
897
|
+
const { dbReadModel } = yield* LeaderThreadCtx
|
|
898
|
+
mergeCounterRef.current++
|
|
899
|
+
dbReadModel.execute(
|
|
900
|
+
sql`INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`,
|
|
901
|
+
)
|
|
902
|
+
return mergeCounterRef.current
|
|
903
|
+
})
|
|
904
|
+
|
|
905
|
+
const getMergeCounterFromDb = (dbReadModel: SqliteDb) =>
|
|
906
|
+
Effect.gen(function* () {
|
|
907
|
+
const result = dbReadModel.select<{ mergeCounter: number }>(
|
|
908
|
+
sql`SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`,
|
|
909
|
+
)
|
|
910
|
+
return result[0]?.mergeCounter ?? 0
|
|
911
|
+
})
|
|
912
|
+
|
|
913
|
+
const validatePushBatch = (batch: ReadonlyArray<LiveStoreEvent.EncodedWithMeta>, pushHead: EventId.EventId) =>
|
|
914
|
+
Effect.gen(function* () {
|
|
915
|
+
if (batch.length === 0) {
|
|
916
|
+
return
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
// Make sure batch is monotonically increasing
|
|
920
|
+
for (let i = 1; i < batch.length; i++) {
|
|
921
|
+
if (EventId.isGreaterThanOrEqual(batch[i - 1]!.id, batch[i]!.id)) {
|
|
922
|
+
shouldNeverHappen(
|
|
923
|
+
`Events must be ordered in monotonically ascending order by eventId. Received: [${batch.map((e) => EventId.toString(e.id)).join(', ')}]`,
|
|
924
|
+
)
|
|
925
|
+
}
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
// Make sure smallest event id is > pushHead
|
|
929
|
+
if (EventId.isGreaterThanOrEqual(pushHead, batch[0]!.id)) {
|
|
930
|
+
return yield* LeaderAheadError.make({
|
|
931
|
+
minimumExpectedId: pushHead,
|
|
932
|
+
providedId: batch[0]!.id,
|
|
933
|
+
})
|
|
934
|
+
}
|
|
935
|
+
})
|