@livestore/common 0.0.0-snapshot-f6ec49b1a18859aad769f0a0d8edf8bae231ed07 → 0.0.0-snapshot-2ef046b02334f52613d31dbe06af53487685edc0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/adapter-types.d.ts +7 -12
- package/dist/adapter-types.d.ts.map +1 -1
- package/dist/adapter-types.js +1 -7
- package/dist/adapter-types.js.map +1 -1
- package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
- package/dist/devtools/devtools-messages-common.d.ts +13 -6
- package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-common.js +6 -0
- package/dist/devtools/devtools-messages-common.js.map +1 -1
- package/dist/devtools/devtools-messages-leader.d.ts +25 -25
- package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
- package/dist/devtools/devtools-messages-leader.js +1 -2
- package/dist/devtools/devtools-messages-leader.js.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.d.ts +16 -6
- package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
- package/dist/leader-thread/LeaderSyncProcessor.js +227 -215
- package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
- package/dist/leader-thread/apply-mutation.d.ts +14 -9
- package/dist/leader-thread/apply-mutation.d.ts.map +1 -1
- package/dist/leader-thread/apply-mutation.js +43 -36
- package/dist/leader-thread/apply-mutation.js.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
- package/dist/leader-thread/leader-worker-devtools.js +2 -5
- package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
- package/dist/leader-thread/make-leader-thread-layer.js +22 -33
- package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
- package/dist/leader-thread/mod.d.ts +1 -1
- package/dist/leader-thread/mod.d.ts.map +1 -1
- package/dist/leader-thread/mod.js +1 -1
- package/dist/leader-thread/mod.js.map +1 -1
- package/dist/leader-thread/mutationlog.d.ts +20 -3
- package/dist/leader-thread/mutationlog.d.ts.map +1 -1
- package/dist/leader-thread/mutationlog.js +106 -12
- package/dist/leader-thread/mutationlog.js.map +1 -1
- package/dist/leader-thread/recreate-db.d.ts.map +1 -1
- package/dist/leader-thread/recreate-db.js +4 -3
- package/dist/leader-thread/recreate-db.js.map +1 -1
- package/dist/leader-thread/types.d.ts +35 -19
- package/dist/leader-thread/types.d.ts.map +1 -1
- package/dist/leader-thread/types.js.map +1 -1
- package/dist/rehydrate-from-mutationlog.d.ts +5 -4
- package/dist/rehydrate-from-mutationlog.d.ts.map +1 -1
- package/dist/rehydrate-from-mutationlog.js +7 -9
- package/dist/rehydrate-from-mutationlog.js.map +1 -1
- package/dist/schema/EventId.d.ts +4 -0
- package/dist/schema/EventId.d.ts.map +1 -1
- package/dist/schema/EventId.js +7 -1
- package/dist/schema/EventId.js.map +1 -1
- package/dist/schema/MutationEvent.d.ts +87 -18
- package/dist/schema/MutationEvent.d.ts.map +1 -1
- package/dist/schema/MutationEvent.js +35 -6
- package/dist/schema/MutationEvent.js.map +1 -1
- package/dist/schema/schema.js +1 -1
- package/dist/schema/schema.js.map +1 -1
- package/dist/schema/system-tables.d.ts +67 -0
- package/dist/schema/system-tables.d.ts.map +1 -1
- package/dist/schema/system-tables.js +12 -1
- package/dist/schema/system-tables.js.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts +11 -1
- package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
- package/dist/sync/ClientSessionSyncProcessor.js +54 -47
- package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
- package/dist/sync/sync.d.ts +16 -5
- package/dist/sync/sync.d.ts.map +1 -1
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/syncstate.d.ts +81 -83
- package/dist/sync/syncstate.d.ts.map +1 -1
- package/dist/sync/syncstate.js +159 -125
- package/dist/sync/syncstate.js.map +1 -1
- package/dist/sync/syncstate.test.js +97 -138
- package/dist/sync/syncstate.test.js.map +1 -1
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +2 -2
- package/src/adapter-types.ts +5 -12
- package/src/devtools/devtools-messages-common.ts +9 -0
- package/src/devtools/devtools-messages-leader.ts +1 -2
- package/src/leader-thread/LeaderSyncProcessor.ts +398 -370
- package/src/leader-thread/apply-mutation.ts +81 -71
- package/src/leader-thread/leader-worker-devtools.ts +3 -8
- package/src/leader-thread/make-leader-thread-layer.ts +27 -41
- package/src/leader-thread/mod.ts +1 -1
- package/src/leader-thread/mutationlog.ts +167 -13
- package/src/leader-thread/recreate-db.ts +4 -3
- package/src/leader-thread/types.ts +34 -23
- package/src/rehydrate-from-mutationlog.ts +12 -12
- package/src/schema/EventId.ts +8 -1
- package/src/schema/MutationEvent.ts +42 -10
- package/src/schema/schema.ts +1 -1
- package/src/schema/system-tables.ts +20 -1
- package/src/sync/ClientSessionSyncProcessor.ts +64 -50
- package/src/sync/sync.ts +16 -9
- package/src/sync/syncstate.test.ts +173 -217
- package/src/sync/syncstate.ts +184 -151
- package/src/version.ts +1 -1
- package/dist/leader-thread/pull-queue-set.d.ts +0 -7
- package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
- package/dist/leader-thread/pull-queue-set.js +0 -48
- package/dist/leader-thread/pull-queue-set.js.map +0 -1
- package/src/leader-thread/pull-queue-set.ts +0 -67
package/src/sync/syncstate.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { casesHandled } from '@livestore/utils'
|
|
1
|
+
import { casesHandled, shouldNeverHappen } from '@livestore/utils'
|
|
2
2
|
import { Match, ReadonlyArray, Schema } from '@livestore/utils/effect'
|
|
3
3
|
|
|
4
4
|
import { UnexpectedError } from '../adapter-types.js'
|
|
@@ -9,65 +9,65 @@ import * as MutationEvent from '../schema/MutationEvent.js'
|
|
|
9
9
|
* SyncState represents the current sync state of a sync node relative to an upstream node.
|
|
10
10
|
* Events flow from local to upstream, with each state maintaining its own event head.
|
|
11
11
|
*
|
|
12
|
-
*
|
|
12
|
+
* Example:
|
|
13
13
|
* ```
|
|
14
|
-
*
|
|
15
|
-
* |
|
|
16
|
-
*
|
|
17
|
-
*
|
|
18
|
-
*
|
|
19
|
-
*
|
|
14
|
+
* +------------------------+
|
|
15
|
+
* | PENDING EVENTS |
|
|
16
|
+
* +------------------------+
|
|
17
|
+
* ▼ ▼
|
|
18
|
+
* Upstream Head Local Head
|
|
19
|
+
* (1,0) (1,1), (1,2), (2,0)
|
|
20
20
|
* ```
|
|
21
21
|
*
|
|
22
|
-
*
|
|
23
|
-
* -
|
|
24
|
-
*
|
|
25
|
-
* - Subject to rebase if rejected.
|
|
26
|
-
* - **Rollback Tail**: Events that are kept around temporarily for potential rollback until confirmed by upstream.
|
|
22
|
+
* **Pending Events**: Events awaiting acknowledgment from the upstream.
|
|
23
|
+
* - Can be confirmed or rejected by the upstream.
|
|
24
|
+
* - Subject to rebase if rejected.
|
|
27
25
|
*
|
|
28
26
|
* Payloads:
|
|
29
27
|
* - `PayloadUpstreamRebase`: Upstream has performed a rebase, so downstream must roll back to the specified event
|
|
30
28
|
* and rebase the pending events on top of the new events.
|
|
31
29
|
* - `PayloadUpstreamAdvance`: Upstream has advanced, so downstream must rebase the pending events on top of the new events.
|
|
32
|
-
* - `PayloadUpstreamTrimRollbackTail`: Upstream has advanced, so downstream can trim the rollback tail.
|
|
33
30
|
* - `PayloadLocalPush`: Local push payload
|
|
34
31
|
*
|
|
35
32
|
* Invariants:
|
|
36
33
|
* 1. **Chain Continuity**: Each event must reference its immediate parent.
|
|
37
34
|
* 2. **Head Ordering**: Upstream Head ≤ Local Head.
|
|
38
|
-
* 3. **
|
|
35
|
+
* 3. **Event number sequence**: Must follow the pattern (1,0)→(1,1)→(1,2)→(2,0).
|
|
39
36
|
*
|
|
40
|
-
*
|
|
41
|
-
*
|
|
37
|
+
* A few further notes to help form an intuition:
|
|
38
|
+
* - The goal is to keep the pending events as small as possible (i.e. to have synced with the next upstream node)
|
|
39
|
+
* - There are 2 cases for rebasing:
|
|
40
|
+
* - The conflicting event only conflicts with the pending events -> only (some of) the pending events need to be rolled back
|
|
41
|
+
*
|
|
42
|
+
* The `merge` function processes updates to the sync state based on incoming payloads,
|
|
43
|
+
* handling cases such as upstream rebase, advance and local push.
|
|
42
44
|
*/
|
|
43
45
|
export class SyncState extends Schema.Class<SyncState>('SyncState')({
|
|
44
46
|
pending: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
45
|
-
|
|
47
|
+
/** What this node expects the next upstream node to have as its own local head */
|
|
46
48
|
upstreamHead: EventId.EventId,
|
|
49
|
+
/** Equivalent to `pending.at(-1)?.id` if there are pending events */
|
|
47
50
|
localHead: EventId.EventId,
|
|
48
51
|
}) {
|
|
49
|
-
toJSON = (): any => {
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
localHead: `(${this.localHead.global},${this.localHead.client})`,
|
|
55
|
-
}
|
|
56
|
-
}
|
|
52
|
+
toJSON = (): any => ({
|
|
53
|
+
pending: this.pending.map((e) => e.toJSON()),
|
|
54
|
+
upstreamHead: EventId.toString(this.upstreamHead),
|
|
55
|
+
localHead: EventId.toString(this.localHead),
|
|
56
|
+
})
|
|
57
57
|
}
|
|
58
58
|
|
|
59
|
+
/**
|
|
60
|
+
* This payload propagates a rebase from the upstream node
|
|
61
|
+
*/
|
|
59
62
|
export class PayloadUpstreamRebase extends Schema.TaggedStruct('upstream-rebase', {
|
|
60
|
-
/**
|
|
61
|
-
|
|
63
|
+
/** Events which need to be rolled back */
|
|
64
|
+
rollbackEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
65
|
+
/** Events which need to be applied after the rollback (already rebased by the upstream node) */
|
|
62
66
|
newEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
63
|
-
/** Trim rollback tail up to this event (inclusive). */
|
|
64
|
-
trimRollbackUntil: Schema.optional(EventId.EventId),
|
|
65
67
|
}) {}
|
|
66
68
|
|
|
67
69
|
export class PayloadUpstreamAdvance extends Schema.TaggedStruct('upstream-advance', {
|
|
68
70
|
newEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
69
|
-
/** Trim rollback tail up to this event (inclusive). */
|
|
70
|
-
trimRollbackUntil: Schema.optional(EventId.EventId),
|
|
71
71
|
}) {}
|
|
72
72
|
|
|
73
73
|
export class PayloadLocalPush extends Schema.TaggedStruct('local-push', {
|
|
@@ -76,12 +76,10 @@ export class PayloadLocalPush extends Schema.TaggedStruct('local-push', {
|
|
|
76
76
|
|
|
77
77
|
export class Payload extends Schema.Union(PayloadUpstreamRebase, PayloadUpstreamAdvance, PayloadLocalPush) {}
|
|
78
78
|
|
|
79
|
-
export
|
|
80
|
-
|
|
81
|
-
export type PayloadUpstream = typeof PayloadUpstream.Type
|
|
79
|
+
export class PayloadUpstream extends Schema.Union(PayloadUpstreamRebase, PayloadUpstreamAdvance) {}
|
|
82
80
|
|
|
83
81
|
/** Only used for debugging purposes */
|
|
84
|
-
export class
|
|
82
|
+
export class MergeContext extends Schema.Class<MergeContext>('MergeContext')({
|
|
85
83
|
payload: Payload,
|
|
86
84
|
syncState: SyncState,
|
|
87
85
|
}) {
|
|
@@ -95,9 +93,10 @@ export class UpdateContext extends Schema.Class<UpdateContext>('UpdateContext')(
|
|
|
95
93
|
_tag: 'upstream-advance',
|
|
96
94
|
newEvents: this.payload.newEvents.map((e) => e.toJSON()),
|
|
97
95
|
})),
|
|
98
|
-
Match.tag('upstream-rebase', () => ({
|
|
96
|
+
Match.tag('upstream-rebase', (payload) => ({
|
|
99
97
|
_tag: 'upstream-rebase',
|
|
100
|
-
newEvents:
|
|
98
|
+
newEvents: payload.newEvents.map((e) => e.toJSON()),
|
|
99
|
+
rollbackEvents: payload.rollbackEvents.map((e) => e.toJSON()),
|
|
101
100
|
})),
|
|
102
101
|
Match.exhaustive,
|
|
103
102
|
)
|
|
@@ -108,116 +107,105 @@ export class UpdateContext extends Schema.Class<UpdateContext>('UpdateContext')(
|
|
|
108
107
|
}
|
|
109
108
|
}
|
|
110
109
|
|
|
111
|
-
export class
|
|
110
|
+
export class MergeResultAdvance extends Schema.Class<MergeResultAdvance>('MergeResultAdvance')({
|
|
112
111
|
_tag: Schema.Literal('advance'),
|
|
113
112
|
newSyncState: SyncState,
|
|
114
|
-
/** Events which weren't pending before the update */
|
|
115
113
|
newEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
116
|
-
|
|
114
|
+
/** Events which were previously pending but are now confirmed */
|
|
115
|
+
confirmedEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
116
|
+
mergeContext: MergeContext,
|
|
117
117
|
}) {
|
|
118
118
|
toJSON = (): any => {
|
|
119
119
|
return {
|
|
120
120
|
_tag: this._tag,
|
|
121
121
|
newSyncState: this.newSyncState.toJSON(),
|
|
122
122
|
newEvents: this.newEvents.map((e) => e.toJSON()),
|
|
123
|
-
|
|
123
|
+
confirmedEvents: this.confirmedEvents.map((e) => e.toJSON()),
|
|
124
|
+
mergeContext: this.mergeContext.toJSON(),
|
|
124
125
|
}
|
|
125
126
|
}
|
|
126
127
|
}
|
|
127
128
|
|
|
128
|
-
export class
|
|
129
|
+
export class MergeResultRebase extends Schema.Class<MergeResultRebase>('MergeResultRebase')({
|
|
129
130
|
_tag: Schema.Literal('rebase'),
|
|
130
131
|
newSyncState: SyncState,
|
|
131
|
-
/** Events which weren't pending before the update */
|
|
132
132
|
newEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
133
|
-
|
|
134
|
-
|
|
133
|
+
/** Events which need to be rolled back */
|
|
134
|
+
rollbackEvents: Schema.Array(MutationEvent.EncodedWithMeta),
|
|
135
|
+
mergeContext: MergeContext,
|
|
135
136
|
}) {
|
|
136
137
|
toJSON = (): any => {
|
|
137
138
|
return {
|
|
138
139
|
_tag: this._tag,
|
|
139
140
|
newSyncState: this.newSyncState.toJSON(),
|
|
140
141
|
newEvents: this.newEvents.map((e) => e.toJSON()),
|
|
141
|
-
|
|
142
|
-
|
|
142
|
+
rollbackEvents: this.rollbackEvents.map((e) => e.toJSON()),
|
|
143
|
+
mergeContext: this.mergeContext.toJSON(),
|
|
143
144
|
}
|
|
144
145
|
}
|
|
145
146
|
}
|
|
146
147
|
|
|
147
|
-
export class
|
|
148
|
+
export class MergeResultReject extends Schema.Class<MergeResultReject>('MergeResultReject')({
|
|
148
149
|
_tag: Schema.Literal('reject'),
|
|
149
150
|
/** The minimum id that the new events must have */
|
|
150
151
|
expectedMinimumId: EventId.EventId,
|
|
151
|
-
|
|
152
|
+
mergeContext: MergeContext,
|
|
152
153
|
}) {
|
|
153
154
|
toJSON = (): any => {
|
|
154
155
|
return {
|
|
155
156
|
_tag: this._tag,
|
|
156
|
-
expectedMinimumId:
|
|
157
|
-
|
|
157
|
+
expectedMinimumId: EventId.toString(this.expectedMinimumId),
|
|
158
|
+
mergeContext: this.mergeContext.toJSON(),
|
|
158
159
|
}
|
|
159
160
|
}
|
|
160
161
|
}
|
|
161
162
|
|
|
162
|
-
export class
|
|
163
|
-
'UpdateResultUnexpectedError',
|
|
164
|
-
)({
|
|
163
|
+
export class MergeResultUnexpectedError extends Schema.Class<MergeResultUnexpectedError>('MergeResultUnexpectedError')({
|
|
165
164
|
_tag: Schema.Literal('unexpected-error'),
|
|
166
165
|
cause: UnexpectedError,
|
|
167
166
|
}) {}
|
|
168
167
|
|
|
169
|
-
export class
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
168
|
+
export class MergeResult extends Schema.Union(
|
|
169
|
+
MergeResultAdvance,
|
|
170
|
+
MergeResultRebase,
|
|
171
|
+
MergeResultReject,
|
|
172
|
+
MergeResultUnexpectedError,
|
|
174
173
|
) {}
|
|
175
174
|
|
|
176
|
-
const unexpectedError = (cause: unknown):
|
|
177
|
-
|
|
175
|
+
const unexpectedError = (cause: unknown): MergeResultUnexpectedError =>
|
|
176
|
+
MergeResultUnexpectedError.make({
|
|
178
177
|
_tag: 'unexpected-error',
|
|
179
178
|
cause: new UnexpectedError({ cause }),
|
|
180
179
|
})
|
|
181
180
|
|
|
182
|
-
|
|
181
|
+
// TODO Idea: call merge recursively through hierarchy levels
|
|
182
|
+
/*
|
|
183
|
+
Idea: have a map that maps from `globalEventId` to Array<ClientEvents>
|
|
184
|
+
The same applies to even further hierarchy levels
|
|
185
|
+
|
|
186
|
+
TODO: possibly even keep the client events in a separate table in the client leader
|
|
187
|
+
*/
|
|
188
|
+
export const merge = ({
|
|
183
189
|
syncState,
|
|
184
190
|
payload,
|
|
185
|
-
|
|
191
|
+
isClientEvent,
|
|
186
192
|
isEqualEvent,
|
|
187
|
-
|
|
193
|
+
ignoreClientEvents = false,
|
|
188
194
|
}: {
|
|
189
195
|
syncState: SyncState
|
|
190
196
|
payload: typeof Payload.Type
|
|
191
|
-
|
|
197
|
+
isClientEvent: (event: MutationEvent.EncodedWithMeta) => boolean
|
|
192
198
|
isEqualEvent: (a: MutationEvent.EncodedWithMeta, b: MutationEvent.EncodedWithMeta) => boolean
|
|
193
|
-
/** This is used in the leader which should ignore
|
|
194
|
-
|
|
195
|
-
}): typeof
|
|
196
|
-
|
|
197
|
-
rollbackTail: ReadonlyArray<MutationEvent.EncodedWithMeta>,
|
|
198
|
-
): ReadonlyArray<MutationEvent.EncodedWithMeta> => {
|
|
199
|
-
const trimRollbackUntil = payload._tag === 'local-push' ? undefined : payload.trimRollbackUntil
|
|
200
|
-
if (trimRollbackUntil === undefined) return rollbackTail
|
|
201
|
-
const index = rollbackTail.findIndex((event) => EventId.isEqual(event.id, trimRollbackUntil))
|
|
202
|
-
if (index === -1) return []
|
|
203
|
-
return rollbackTail.slice(index + 1)
|
|
204
|
-
}
|
|
199
|
+
/** This is used in the leader which should ignore client events when receiving an upstream-advance payload */
|
|
200
|
+
ignoreClientEvents?: boolean
|
|
201
|
+
}): typeof MergeResult.Type => {
|
|
202
|
+
validateSyncState(syncState)
|
|
205
203
|
|
|
206
|
-
const
|
|
204
|
+
const mergeContext = MergeContext.make({ payload, syncState })
|
|
207
205
|
|
|
208
206
|
switch (payload._tag) {
|
|
209
207
|
case 'upstream-rebase': {
|
|
210
|
-
|
|
211
|
-
const rollbackIndex = syncState.rollbackTail.findIndex((event) =>
|
|
212
|
-
EventId.isEqual(event.id, payload.rollbackUntil),
|
|
213
|
-
)
|
|
214
|
-
if (rollbackIndex === -1) {
|
|
215
|
-
return unexpectedError(
|
|
216
|
-
`Rollback event not found in rollback tail. Rollback until: [${payload.rollbackUntil.global},${payload.rollbackUntil.client}]. Rollback tail: [${syncState.rollbackTail.map((e) => e.toString()).join(', ')}]`,
|
|
217
|
-
)
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
const eventsToRollback = [...syncState.rollbackTail.slice(rollbackIndex), ...syncState.pending]
|
|
208
|
+
const rollbackEvents = [...payload.rollbackEvents, ...syncState.pending]
|
|
221
209
|
|
|
222
210
|
// Get the last new event's ID as the new upstream head
|
|
223
211
|
const newUpstreamHead = payload.newEvents.at(-1)?.id ?? syncState.upstreamHead
|
|
@@ -226,35 +214,35 @@ export const updateSyncState = ({
|
|
|
226
214
|
const rebasedPending = rebaseEvents({
|
|
227
215
|
events: syncState.pending,
|
|
228
216
|
baseEventId: newUpstreamHead,
|
|
229
|
-
|
|
217
|
+
isClientEvent,
|
|
230
218
|
})
|
|
231
219
|
|
|
232
|
-
return
|
|
220
|
+
return MergeResultRebase.make({
|
|
233
221
|
_tag: 'rebase',
|
|
234
222
|
newSyncState: new SyncState({
|
|
235
223
|
pending: rebasedPending,
|
|
236
|
-
rollbackTail: trimRollbackTail([...syncState.rollbackTail.slice(0, rollbackIndex), ...payload.newEvents]),
|
|
237
224
|
upstreamHead: newUpstreamHead,
|
|
238
225
|
localHead: rebasedPending.at(-1)?.id ?? newUpstreamHead,
|
|
239
226
|
}),
|
|
240
227
|
newEvents: [...payload.newEvents, ...rebasedPending],
|
|
241
|
-
|
|
242
|
-
|
|
228
|
+
rollbackEvents,
|
|
229
|
+
mergeContext,
|
|
243
230
|
})
|
|
244
231
|
}
|
|
245
232
|
|
|
233
|
+
// #region upstream-advance
|
|
246
234
|
case 'upstream-advance': {
|
|
247
235
|
if (payload.newEvents.length === 0) {
|
|
248
|
-
return
|
|
236
|
+
return MergeResultAdvance.make({
|
|
249
237
|
_tag: 'advance',
|
|
250
238
|
newSyncState: new SyncState({
|
|
251
239
|
pending: syncState.pending,
|
|
252
|
-
rollbackTail: trimRollbackTail(syncState.rollbackTail),
|
|
253
240
|
upstreamHead: syncState.upstreamHead,
|
|
254
241
|
localHead: syncState.localHead,
|
|
255
242
|
}),
|
|
256
243
|
newEvents: [],
|
|
257
|
-
|
|
244
|
+
confirmedEvents: [],
|
|
245
|
+
mergeContext: mergeContext,
|
|
258
246
|
})
|
|
259
247
|
}
|
|
260
248
|
|
|
@@ -262,15 +250,29 @@ export const updateSyncState = ({
|
|
|
262
250
|
for (let i = 1; i < payload.newEvents.length; i++) {
|
|
263
251
|
if (EventId.isGreaterThan(payload.newEvents[i - 1]!.id, payload.newEvents[i]!.id)) {
|
|
264
252
|
return unexpectedError(
|
|
265
|
-
`Events must be sorted in ascending order by eventId. Received: [${payload.newEvents.map((e) =>
|
|
253
|
+
`Events must be sorted in ascending order by eventId. Received: [${payload.newEvents.map((e) => EventId.toString(e.id)).join(', ')}]`,
|
|
266
254
|
)
|
|
267
255
|
}
|
|
268
256
|
}
|
|
269
257
|
|
|
270
258
|
// Validate that incoming events are larger than upstream head
|
|
271
|
-
if (
|
|
259
|
+
if (
|
|
260
|
+
EventId.isGreaterThan(syncState.upstreamHead, payload.newEvents[0]!.id) ||
|
|
261
|
+
EventId.isEqual(syncState.upstreamHead, payload.newEvents[0]!.id)
|
|
262
|
+
) {
|
|
263
|
+
return unexpectedError(
|
|
264
|
+
`Incoming events must be greater than upstream head. Expected greater than: ${EventId.toString(syncState.upstreamHead)}. Received: [${payload.newEvents.map((e) => EventId.toString(e.id)).join(', ')}]`,
|
|
265
|
+
)
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// Validate that the parent id of the first incoming event is known
|
|
269
|
+
const knownEventGlobalIds = [...syncState.pending].flatMap((e) => [e.id.global, e.parentId.global])
|
|
270
|
+
knownEventGlobalIds.push(syncState.upstreamHead.global)
|
|
271
|
+
const firstNewEvent = payload.newEvents[0]!
|
|
272
|
+
const hasUnknownParentId = knownEventGlobalIds.includes(firstNewEvent.parentId.global) === false
|
|
273
|
+
if (hasUnknownParentId) {
|
|
272
274
|
return unexpectedError(
|
|
273
|
-
`Incoming events must
|
|
275
|
+
`Incoming events must have a known parent id. Received: [${payload.newEvents.map((e) => EventId.toString(e.id)).join(', ')}]`,
|
|
274
276
|
)
|
|
275
277
|
}
|
|
276
278
|
|
|
@@ -280,10 +282,11 @@ export const updateSyncState = ({
|
|
|
280
282
|
existingEvents: syncState.pending,
|
|
281
283
|
incomingEvents: payload.newEvents,
|
|
282
284
|
isEqualEvent,
|
|
283
|
-
|
|
284
|
-
|
|
285
|
+
isClientEvent,
|
|
286
|
+
ignoreClientEvents,
|
|
285
287
|
})
|
|
286
288
|
|
|
289
|
+
// No divergent pending events, thus we can just advance (some of) the pending events
|
|
287
290
|
if (divergentPendingIndex === -1) {
|
|
288
291
|
const pendingEventIds = new Set(syncState.pending.map((e) => `${e.id.global},${e.id.client}`))
|
|
289
292
|
const newEvents = payload.newEvents.filter((e) => !pendingEventIds.has(`${e.id.global},${e.id.client}`))
|
|
@@ -292,17 +295,17 @@ export const updateSyncState = ({
|
|
|
292
295
|
// we need to split the pending events into two groups:
|
|
293
296
|
// - pendingMatching: The pending events up to point where they match the incoming events
|
|
294
297
|
// - pendingRemaining: The pending events after the point where they match the incoming events
|
|
295
|
-
// The `
|
|
296
|
-
let
|
|
298
|
+
// The `clientIndexOffset` is used to account for the client events that are being ignored
|
|
299
|
+
let clientIndexOffset = 0
|
|
297
300
|
const [pendingMatching, pendingRemaining] = ReadonlyArray.splitWhere(
|
|
298
301
|
syncState.pending,
|
|
299
302
|
(pendingEvent, index) => {
|
|
300
|
-
if (
|
|
301
|
-
|
|
303
|
+
if (ignoreClientEvents && isClientEvent(pendingEvent)) {
|
|
304
|
+
clientIndexOffset++
|
|
302
305
|
return false
|
|
303
306
|
}
|
|
304
307
|
|
|
305
|
-
const newEvent = payload.newEvents.at(index -
|
|
308
|
+
const newEvent = payload.newEvents.at(index - clientIndexOffset)
|
|
306
309
|
if (!newEvent) {
|
|
307
310
|
return true
|
|
308
311
|
}
|
|
@@ -310,65 +313,57 @@ export const updateSyncState = ({
|
|
|
310
313
|
},
|
|
311
314
|
)
|
|
312
315
|
|
|
313
|
-
|
|
314
|
-
const pendingAndNewEvents = [...pendingMatching, ...payload.newEvents].filter((event) => {
|
|
315
|
-
const eventIdStr = `${event.id.global},${event.id.client}`
|
|
316
|
-
if (seenEventIds.has(eventIdStr)) {
|
|
317
|
-
return false
|
|
318
|
-
}
|
|
319
|
-
seenEventIds.add(eventIdStr)
|
|
320
|
-
return true
|
|
321
|
-
})
|
|
322
|
-
|
|
323
|
-
return UpdateResultAdvance.make({
|
|
316
|
+
return MergeResultAdvance.make({
|
|
324
317
|
_tag: 'advance',
|
|
325
318
|
newSyncState: new SyncState({
|
|
326
319
|
pending: pendingRemaining,
|
|
327
|
-
rollbackTail: trimRollbackTail([...syncState.rollbackTail, ...pendingAndNewEvents]),
|
|
328
320
|
upstreamHead: newUpstreamHead,
|
|
329
321
|
localHead: pendingRemaining.at(-1)?.id ?? newUpstreamHead,
|
|
330
322
|
}),
|
|
331
323
|
newEvents,
|
|
332
|
-
|
|
324
|
+
confirmedEvents: pendingMatching,
|
|
325
|
+
mergeContext: mergeContext,
|
|
333
326
|
})
|
|
334
327
|
} else {
|
|
335
328
|
const divergentPending = syncState.pending.slice(divergentPendingIndex)
|
|
336
329
|
const rebasedPending = rebaseEvents({
|
|
337
330
|
events: divergentPending,
|
|
338
331
|
baseEventId: newUpstreamHead,
|
|
339
|
-
|
|
332
|
+
isClientEvent,
|
|
340
333
|
})
|
|
341
334
|
|
|
342
335
|
const divergentNewEventsIndex = findDivergencePoint({
|
|
343
336
|
existingEvents: payload.newEvents,
|
|
344
337
|
incomingEvents: syncState.pending,
|
|
345
338
|
isEqualEvent,
|
|
346
|
-
|
|
347
|
-
|
|
339
|
+
isClientEvent,
|
|
340
|
+
ignoreClientEvents,
|
|
348
341
|
})
|
|
349
342
|
|
|
350
|
-
return
|
|
343
|
+
return MergeResultRebase.make({
|
|
351
344
|
_tag: 'rebase',
|
|
352
345
|
newSyncState: new SyncState({
|
|
353
346
|
pending: rebasedPending,
|
|
354
|
-
rollbackTail: trimRollbackTail([...syncState.rollbackTail, ...payload.newEvents]),
|
|
355
347
|
upstreamHead: newUpstreamHead,
|
|
356
348
|
localHead: rebasedPending.at(-1)!.id,
|
|
357
349
|
}),
|
|
358
350
|
newEvents: [...payload.newEvents.slice(divergentNewEventsIndex), ...rebasedPending],
|
|
359
|
-
|
|
360
|
-
|
|
351
|
+
rollbackEvents: divergentPending,
|
|
352
|
+
mergeContext,
|
|
361
353
|
})
|
|
362
354
|
}
|
|
363
355
|
}
|
|
356
|
+
// #endregion
|
|
364
357
|
|
|
358
|
+
// This is the same as what's running in the sync backend
|
|
365
359
|
case 'local-push': {
|
|
366
360
|
if (payload.newEvents.length === 0) {
|
|
367
|
-
return
|
|
361
|
+
return MergeResultAdvance.make({
|
|
368
362
|
_tag: 'advance',
|
|
369
363
|
newSyncState: syncState,
|
|
370
364
|
newEvents: [],
|
|
371
|
-
|
|
365
|
+
confirmedEvents: [],
|
|
366
|
+
mergeContext: mergeContext,
|
|
372
367
|
})
|
|
373
368
|
}
|
|
374
369
|
|
|
@@ -377,22 +372,22 @@ export const updateSyncState = ({
|
|
|
377
372
|
|
|
378
373
|
if (invalidEventId) {
|
|
379
374
|
const expectedMinimumId = EventId.nextPair(syncState.localHead, true).id
|
|
380
|
-
return
|
|
375
|
+
return MergeResultReject.make({
|
|
381
376
|
_tag: 'reject',
|
|
382
377
|
expectedMinimumId,
|
|
383
|
-
|
|
378
|
+
mergeContext,
|
|
384
379
|
})
|
|
385
380
|
} else {
|
|
386
|
-
return
|
|
381
|
+
return MergeResultAdvance.make({
|
|
387
382
|
_tag: 'advance',
|
|
388
383
|
newSyncState: new SyncState({
|
|
389
384
|
pending: [...syncState.pending, ...payload.newEvents],
|
|
390
|
-
rollbackTail: syncState.rollbackTail,
|
|
391
385
|
upstreamHead: syncState.upstreamHead,
|
|
392
386
|
localHead: payload.newEvents.at(-1)!.id,
|
|
393
387
|
}),
|
|
394
388
|
newEvents: payload.newEvents,
|
|
395
|
-
|
|
389
|
+
confirmedEvents: [],
|
|
390
|
+
mergeContext: mergeContext,
|
|
396
391
|
})
|
|
397
392
|
}
|
|
398
393
|
}
|
|
@@ -407,32 +402,32 @@ export const updateSyncState = ({
|
|
|
407
402
|
* Gets the index relative to `existingEvents` where the divergence point is
|
|
408
403
|
* by comparing each event in `existingEvents` to the corresponding event in `incomingEvents`
|
|
409
404
|
*/
|
|
410
|
-
const findDivergencePoint = ({
|
|
405
|
+
export const findDivergencePoint = ({
|
|
411
406
|
existingEvents,
|
|
412
407
|
incomingEvents,
|
|
413
408
|
isEqualEvent,
|
|
414
|
-
|
|
415
|
-
|
|
409
|
+
isClientEvent,
|
|
410
|
+
ignoreClientEvents,
|
|
416
411
|
}: {
|
|
417
412
|
existingEvents: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
|
418
413
|
incomingEvents: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
|
419
414
|
isEqualEvent: (a: MutationEvent.EncodedWithMeta, b: MutationEvent.EncodedWithMeta) => boolean
|
|
420
|
-
|
|
421
|
-
|
|
415
|
+
isClientEvent: (event: MutationEvent.EncodedWithMeta) => boolean
|
|
416
|
+
ignoreClientEvents: boolean
|
|
422
417
|
}): number => {
|
|
423
|
-
if (
|
|
424
|
-
const filteredExistingEvents = existingEvents.filter((event) => !
|
|
425
|
-
const
|
|
418
|
+
if (ignoreClientEvents) {
|
|
419
|
+
const filteredExistingEvents = existingEvents.filter((event) => !isClientEvent(event))
|
|
420
|
+
const divergencePointWithoutClientEvents = findDivergencePoint({
|
|
426
421
|
existingEvents: filteredExistingEvents,
|
|
427
422
|
incomingEvents,
|
|
428
423
|
isEqualEvent,
|
|
429
|
-
|
|
430
|
-
|
|
424
|
+
isClientEvent,
|
|
425
|
+
ignoreClientEvents: false,
|
|
431
426
|
})
|
|
432
427
|
|
|
433
|
-
if (
|
|
428
|
+
if (divergencePointWithoutClientEvents === -1) return -1
|
|
434
429
|
|
|
435
|
-
const divergencePointEventId = existingEvents[
|
|
430
|
+
const divergencePointEventId = existingEvents[divergencePointWithoutClientEvents]!.id
|
|
436
431
|
// Now find the divergence point in the original array
|
|
437
432
|
return existingEvents.findIndex((event) => EventId.isEqual(event.id, divergencePointEventId))
|
|
438
433
|
}
|
|
@@ -447,15 +442,15 @@ const findDivergencePoint = ({
|
|
|
447
442
|
const rebaseEvents = ({
|
|
448
443
|
events,
|
|
449
444
|
baseEventId,
|
|
450
|
-
|
|
445
|
+
isClientEvent,
|
|
451
446
|
}: {
|
|
452
447
|
events: ReadonlyArray<MutationEvent.EncodedWithMeta>
|
|
453
448
|
baseEventId: EventId.EventId
|
|
454
|
-
|
|
449
|
+
isClientEvent: (event: MutationEvent.EncodedWithMeta) => boolean
|
|
455
450
|
}): ReadonlyArray<MutationEvent.EncodedWithMeta> => {
|
|
456
451
|
let prevEventId = baseEventId
|
|
457
452
|
return events.map((event) => {
|
|
458
|
-
const isLocal =
|
|
453
|
+
const isLocal = isClientEvent(event)
|
|
459
454
|
const newEvent = event.rebase(prevEventId, isLocal)
|
|
460
455
|
prevEventId = newEvent.id
|
|
461
456
|
return newEvent
|
|
@@ -469,4 +464,42 @@ const rebaseEvents = ({
|
|
|
469
464
|
* it could make sense to "flatten" update results into a single update result which the client session
|
|
470
465
|
* can process more efficiently which avoids push-threshing
|
|
471
466
|
*/
|
|
472
|
-
const
|
|
467
|
+
const _flattenMergeResults = (_updateResults: ReadonlyArray<MergeResult>) => {}
|
|
468
|
+
|
|
469
|
+
const validateSyncState = (syncState: SyncState) => {
|
|
470
|
+
for (let i = 0; i < syncState.pending.length; i++) {
|
|
471
|
+
const event = syncState.pending[i]!
|
|
472
|
+
const nextEvent = syncState.pending[i + 1]
|
|
473
|
+
if (nextEvent === undefined) break // Reached end of chain
|
|
474
|
+
|
|
475
|
+
if (EventId.isGreaterThan(event.id, nextEvent.id)) {
|
|
476
|
+
shouldNeverHappen('Events must be sorted in ascending order by eventId', {
|
|
477
|
+
event,
|
|
478
|
+
nextEvent,
|
|
479
|
+
})
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
// If the global id has increased, then the client id must be 0
|
|
483
|
+
const globalIdHasIncreased = nextEvent.id.global > event.id.global
|
|
484
|
+
if (globalIdHasIncreased) {
|
|
485
|
+
if (nextEvent.id.client !== 0) {
|
|
486
|
+
shouldNeverHappen(
|
|
487
|
+
`New global events must point to clientId 0 in the parentId. Received: (${EventId.toString(nextEvent.id)})`,
|
|
488
|
+
syncState.pending,
|
|
489
|
+
{
|
|
490
|
+
event,
|
|
491
|
+
nextEvent,
|
|
492
|
+
},
|
|
493
|
+
)
|
|
494
|
+
}
|
|
495
|
+
} else {
|
|
496
|
+
// Otherwise, the parentId must be the same as the previous event's id
|
|
497
|
+
if (EventId.isEqual(nextEvent.parentId, event.id) === false) {
|
|
498
|
+
shouldNeverHappen('Events must be linked in a continuous chain via the parentId', syncState.pending, {
|
|
499
|
+
event,
|
|
500
|
+
nextEvent,
|
|
501
|
+
})
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
}
|
package/src/version.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
// import packageJson from '../package.json' with { type: 'json' }
|
|
3
3
|
// export const liveStoreVersion = packageJson.version
|
|
4
4
|
|
|
5
|
-
export const liveStoreVersion = '0.3.0-dev.
|
|
5
|
+
export const liveStoreVersion = '0.3.0-dev.26' as const
|
|
6
6
|
|
|
7
7
|
/**
|
|
8
8
|
* This version number is incremented whenever the internal storage format changes in a breaking way.
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { Effect, Queue } from '@livestore/utils/effect';
|
|
2
|
-
import { LeaderThreadCtx, type PullQueueItem } from './types.js';
|
|
3
|
-
export declare const makePullQueueSet: Effect.Effect<{
|
|
4
|
-
makeQueue: (since: import("../schema/EventId.js").EventId) => Effect.Effect<Queue.Queue<PullQueueItem>, import("../adapter-types.js").UnexpectedError, import("effect/Scope").Scope | LeaderThreadCtx>;
|
|
5
|
-
offer: (item: PullQueueItem) => Effect.Effect<void, import("../adapter-types.js").UnexpectedError, LeaderThreadCtx>;
|
|
6
|
-
}, never, import("effect/Scope").Scope>;
|
|
7
|
-
//# sourceMappingURL=pull-queue-set.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"pull-queue-set.d.ts","sourceRoot":"","sources":["../../src/leader-thread/pull-queue-set.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,yBAAyB,CAAA;AAIvD,OAAO,EAAE,eAAe,EAAE,KAAK,aAAa,EAAqB,MAAM,YAAY,CAAA;AAEnF,eAAO,MAAM,gBAAgB;kEA6D+8F,OAAQ,MAAM,CAAC,MAAM,KAAK;oCAAkG,OAAQ,MAAM;uCADpnG,CAAA"}
|