@livestore/common 0.0.0-snapshot-2b8a9de3ec1a701aca891ebc2c98eb328274ae9e → 0.0.0-snapshot-2c861249e50661661613204300b1fc0d902c2e46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (287) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/__tests__/fixture.d.ts +83 -221
  3. package/dist/__tests__/fixture.d.ts.map +1 -1
  4. package/dist/__tests__/fixture.js +33 -11
  5. package/dist/__tests__/fixture.js.map +1 -1
  6. package/dist/adapter-types.d.ts +36 -22
  7. package/dist/adapter-types.d.ts.map +1 -1
  8. package/dist/adapter-types.js +20 -8
  9. package/dist/adapter-types.js.map +1 -1
  10. package/dist/debug-info.d.ts.map +1 -1
  11. package/dist/debug-info.js +1 -0
  12. package/dist/debug-info.js.map +1 -1
  13. package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
  14. package/dist/devtools/devtools-messages-common.d.ts +13 -6
  15. package/dist/devtools/devtools-messages-common.d.ts.map +1 -1
  16. package/dist/devtools/devtools-messages-common.js +6 -0
  17. package/dist/devtools/devtools-messages-common.js.map +1 -1
  18. package/dist/devtools/devtools-messages-leader.d.ts +46 -46
  19. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  20. package/dist/devtools/devtools-messages-leader.js +12 -13
  21. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  22. package/dist/index.d.ts +2 -5
  23. package/dist/index.d.ts.map +1 -1
  24. package/dist/index.js +2 -5
  25. package/dist/index.js.map +1 -1
  26. package/dist/leader-thread/LeaderSyncProcessor.d.ts +34 -12
  27. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  28. package/dist/leader-thread/LeaderSyncProcessor.js +284 -226
  29. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  30. package/dist/leader-thread/apply-event.d.ts +16 -0
  31. package/dist/leader-thread/apply-event.d.ts.map +1 -0
  32. package/dist/leader-thread/apply-event.js +122 -0
  33. package/dist/leader-thread/apply-event.js.map +1 -0
  34. package/dist/leader-thread/eventlog.d.ts +27 -0
  35. package/dist/leader-thread/eventlog.d.ts.map +1 -0
  36. package/dist/leader-thread/eventlog.js +123 -0
  37. package/dist/leader-thread/eventlog.js.map +1 -0
  38. package/dist/leader-thread/leader-worker-devtools.d.ts.map +1 -1
  39. package/dist/leader-thread/leader-worker-devtools.js +22 -23
  40. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  41. package/dist/leader-thread/make-leader-thread-layer.d.ts +16 -4
  42. package/dist/leader-thread/make-leader-thread-layer.d.ts.map +1 -1
  43. package/dist/leader-thread/make-leader-thread-layer.js +36 -41
  44. package/dist/leader-thread/make-leader-thread-layer.js.map +1 -1
  45. package/dist/leader-thread/mod.d.ts +1 -1
  46. package/dist/leader-thread/mod.d.ts.map +1 -1
  47. package/dist/leader-thread/mod.js +1 -1
  48. package/dist/leader-thread/mod.js.map +1 -1
  49. package/dist/leader-thread/recreate-db.d.ts.map +1 -1
  50. package/dist/leader-thread/recreate-db.js +7 -7
  51. package/dist/leader-thread/recreate-db.js.map +1 -1
  52. package/dist/leader-thread/types.d.ts +40 -25
  53. package/dist/leader-thread/types.d.ts.map +1 -1
  54. package/dist/leader-thread/types.js.map +1 -1
  55. package/dist/materializer-helper.d.ts +23 -0
  56. package/dist/materializer-helper.d.ts.map +1 -0
  57. package/dist/materializer-helper.js +70 -0
  58. package/dist/materializer-helper.js.map +1 -0
  59. package/dist/query-builder/api.d.ts +55 -50
  60. package/dist/query-builder/api.d.ts.map +1 -1
  61. package/dist/query-builder/api.js +3 -5
  62. package/dist/query-builder/api.js.map +1 -1
  63. package/dist/query-builder/astToSql.d.ts.map +1 -1
  64. package/dist/query-builder/astToSql.js +59 -37
  65. package/dist/query-builder/astToSql.js.map +1 -1
  66. package/dist/query-builder/impl.d.ts +2 -3
  67. package/dist/query-builder/impl.d.ts.map +1 -1
  68. package/dist/query-builder/impl.js +47 -43
  69. package/dist/query-builder/impl.js.map +1 -1
  70. package/dist/query-builder/impl.test.d.ts +86 -1
  71. package/dist/query-builder/impl.test.d.ts.map +1 -1
  72. package/dist/query-builder/impl.test.js +223 -36
  73. package/dist/query-builder/impl.test.js.map +1 -1
  74. package/dist/rehydrate-from-eventlog.d.ts +15 -0
  75. package/dist/rehydrate-from-eventlog.d.ts.map +1 -0
  76. package/dist/{rehydrate-from-mutationlog.js → rehydrate-from-eventlog.js} +27 -28
  77. package/dist/rehydrate-from-eventlog.js.map +1 -0
  78. package/dist/schema/EventDef.d.ts +136 -0
  79. package/dist/schema/EventDef.d.ts.map +1 -0
  80. package/dist/schema/EventDef.js +58 -0
  81. package/dist/schema/EventDef.js.map +1 -0
  82. package/dist/schema/EventId.d.ts +10 -1
  83. package/dist/schema/EventId.d.ts.map +1 -1
  84. package/dist/schema/EventId.js +24 -3
  85. package/dist/schema/EventId.js.map +1 -1
  86. package/dist/schema/LiveStoreEvent.d.ts +255 -0
  87. package/dist/schema/LiveStoreEvent.d.ts.map +1 -0
  88. package/dist/schema/LiveStoreEvent.js +118 -0
  89. package/dist/schema/LiveStoreEvent.js.map +1 -0
  90. package/dist/schema/client-document-def.d.ts +223 -0
  91. package/dist/schema/client-document-def.d.ts.map +1 -0
  92. package/dist/schema/client-document-def.js +164 -0
  93. package/dist/schema/client-document-def.js.map +1 -0
  94. package/dist/schema/client-document-def.test.d.ts +2 -0
  95. package/dist/schema/client-document-def.test.d.ts.map +1 -0
  96. package/dist/schema/client-document-def.test.js +161 -0
  97. package/dist/schema/client-document-def.test.js.map +1 -0
  98. package/dist/schema/db-schema/dsl/mod.d.ts.map +1 -1
  99. package/dist/schema/events.d.ts +2 -0
  100. package/dist/schema/events.d.ts.map +1 -0
  101. package/dist/schema/events.js +2 -0
  102. package/dist/schema/events.js.map +1 -0
  103. package/dist/schema/mod.d.ts +4 -3
  104. package/dist/schema/mod.d.ts.map +1 -1
  105. package/dist/schema/mod.js +4 -3
  106. package/dist/schema/mod.js.map +1 -1
  107. package/dist/schema/schema.d.ts +26 -22
  108. package/dist/schema/schema.d.ts.map +1 -1
  109. package/dist/schema/schema.js +45 -43
  110. package/dist/schema/schema.js.map +1 -1
  111. package/dist/schema/sqlite-state.d.ts +12 -0
  112. package/dist/schema/sqlite-state.d.ts.map +1 -0
  113. package/dist/schema/sqlite-state.js +36 -0
  114. package/dist/schema/sqlite-state.js.map +1 -0
  115. package/dist/schema/system-tables.d.ts +121 -85
  116. package/dist/schema/system-tables.d.ts.map +1 -1
  117. package/dist/schema/system-tables.js +68 -43
  118. package/dist/schema/system-tables.js.map +1 -1
  119. package/dist/schema/table-def.d.ts +26 -96
  120. package/dist/schema/table-def.d.ts.map +1 -1
  121. package/dist/schema/table-def.js +14 -64
  122. package/dist/schema/table-def.js.map +1 -1
  123. package/dist/schema/view.d.ts +3 -0
  124. package/dist/schema/view.d.ts.map +1 -0
  125. package/dist/schema/view.js +3 -0
  126. package/dist/schema/view.js.map +1 -0
  127. package/dist/schema-management/common.d.ts +4 -4
  128. package/dist/schema-management/common.d.ts.map +1 -1
  129. package/dist/schema-management/migrations.d.ts.map +1 -1
  130. package/dist/schema-management/migrations.js +6 -6
  131. package/dist/schema-management/migrations.js.map +1 -1
  132. package/dist/schema-management/validate-mutation-defs.d.ts +3 -3
  133. package/dist/schema-management/validate-mutation-defs.d.ts.map +1 -1
  134. package/dist/schema-management/validate-mutation-defs.js +17 -17
  135. package/dist/schema-management/validate-mutation-defs.js.map +1 -1
  136. package/dist/sync/ClientSessionSyncProcessor.d.ts +16 -8
  137. package/dist/sync/ClientSessionSyncProcessor.d.ts.map +1 -1
  138. package/dist/sync/ClientSessionSyncProcessor.js +50 -43
  139. package/dist/sync/ClientSessionSyncProcessor.js.map +1 -1
  140. package/dist/sync/next/facts.d.ts +19 -19
  141. package/dist/sync/next/facts.d.ts.map +1 -1
  142. package/dist/sync/next/facts.js +2 -2
  143. package/dist/sync/next/facts.js.map +1 -1
  144. package/dist/sync/next/history-dag-common.d.ts +3 -3
  145. package/dist/sync/next/history-dag-common.d.ts.map +1 -1
  146. package/dist/sync/next/history-dag-common.js +1 -1
  147. package/dist/sync/next/history-dag-common.js.map +1 -1
  148. package/dist/sync/next/history-dag.js +1 -1
  149. package/dist/sync/next/history-dag.js.map +1 -1
  150. package/dist/sync/next/rebase-events.d.ts +7 -7
  151. package/dist/sync/next/rebase-events.d.ts.map +1 -1
  152. package/dist/sync/next/rebase-events.js +5 -5
  153. package/dist/sync/next/rebase-events.js.map +1 -1
  154. package/dist/sync/next/test/compact-events.calculator.test.js +38 -33
  155. package/dist/sync/next/test/compact-events.calculator.test.js.map +1 -1
  156. package/dist/sync/next/test/compact-events.test.js +71 -71
  157. package/dist/sync/next/test/compact-events.test.js.map +1 -1
  158. package/dist/sync/next/test/{mutation-fixtures.d.ts → event-fixtures.d.ts} +25 -25
  159. package/dist/sync/next/test/event-fixtures.d.ts.map +1 -0
  160. package/dist/sync/next/test/{mutation-fixtures.js → event-fixtures.js} +60 -25
  161. package/dist/sync/next/test/event-fixtures.js.map +1 -0
  162. package/dist/sync/next/test/mod.d.ts +1 -1
  163. package/dist/sync/next/test/mod.d.ts.map +1 -1
  164. package/dist/sync/next/test/mod.js +1 -1
  165. package/dist/sync/next/test/mod.js.map +1 -1
  166. package/dist/sync/sync.d.ts +8 -7
  167. package/dist/sync/sync.d.ts.map +1 -1
  168. package/dist/sync/sync.js.map +1 -1
  169. package/dist/sync/syncstate.d.ts +69 -93
  170. package/dist/sync/syncstate.d.ts.map +1 -1
  171. package/dist/sync/syncstate.js +143 -146
  172. package/dist/sync/syncstate.js.map +1 -1
  173. package/dist/sync/syncstate.test.js +208 -289
  174. package/dist/sync/syncstate.test.js.map +1 -1
  175. package/dist/sync/validate-push-payload.d.ts +2 -2
  176. package/dist/sync/validate-push-payload.d.ts.map +1 -1
  177. package/dist/sync/validate-push-payload.js.map +1 -1
  178. package/dist/version.d.ts +1 -1
  179. package/dist/version.js +1 -1
  180. package/package.json +2 -2
  181. package/src/__tests__/fixture.ts +36 -15
  182. package/src/adapter-types.ts +34 -23
  183. package/src/debug-info.ts +1 -0
  184. package/src/devtools/devtools-messages-common.ts +9 -0
  185. package/src/devtools/devtools-messages-leader.ts +14 -15
  186. package/src/index.ts +2 -5
  187. package/src/leader-thread/LeaderSyncProcessor.ts +485 -389
  188. package/src/leader-thread/apply-event.ts +197 -0
  189. package/src/leader-thread/eventlog.ts +199 -0
  190. package/src/leader-thread/leader-worker-devtools.ts +23 -25
  191. package/src/leader-thread/make-leader-thread-layer.ts +68 -61
  192. package/src/leader-thread/mod.ts +1 -1
  193. package/src/leader-thread/recreate-db.ts +7 -8
  194. package/src/leader-thread/types.ts +39 -29
  195. package/src/materializer-helper.ts +110 -0
  196. package/src/query-builder/api.ts +76 -102
  197. package/src/query-builder/astToSql.ts +68 -39
  198. package/src/query-builder/impl.test.ts +239 -42
  199. package/src/query-builder/impl.ts +66 -54
  200. package/src/{rehydrate-from-mutationlog.ts → rehydrate-from-eventlog.ts} +37 -40
  201. package/src/schema/EventDef.ts +216 -0
  202. package/src/schema/EventId.ts +30 -4
  203. package/src/schema/LiveStoreEvent.ts +239 -0
  204. package/src/schema/client-document-def.test.ts +188 -0
  205. package/src/schema/client-document-def.ts +436 -0
  206. package/src/schema/db-schema/dsl/mod.ts +0 -1
  207. package/src/schema/events.ts +1 -0
  208. package/src/schema/mod.ts +4 -3
  209. package/src/schema/schema.ts +78 -68
  210. package/src/schema/sqlite-state.ts +62 -0
  211. package/src/schema/system-tables.ts +54 -46
  212. package/src/schema/table-def.ts +51 -209
  213. package/src/schema/view.ts +2 -0
  214. package/src/schema-management/common.ts +4 -4
  215. package/src/schema-management/migrations.ts +8 -9
  216. package/src/schema-management/validate-mutation-defs.ts +22 -24
  217. package/src/sync/ClientSessionSyncProcessor.ts +66 -53
  218. package/src/sync/next/facts.ts +31 -32
  219. package/src/sync/next/history-dag-common.ts +4 -4
  220. package/src/sync/next/history-dag.ts +1 -1
  221. package/src/sync/next/rebase-events.ts +13 -13
  222. package/src/sync/next/test/compact-events.calculator.test.ts +45 -45
  223. package/src/sync/next/test/compact-events.test.ts +73 -73
  224. package/src/sync/next/test/event-fixtures.ts +219 -0
  225. package/src/sync/next/test/mod.ts +1 -1
  226. package/src/sync/sync.ts +9 -12
  227. package/src/sync/syncstate.test.ts +236 -323
  228. package/src/sync/syncstate.ts +218 -203
  229. package/src/sync/validate-push-payload.ts +2 -2
  230. package/src/version.ts +1 -1
  231. package/tsconfig.json +1 -0
  232. package/dist/derived-mutations.d.ts +0 -109
  233. package/dist/derived-mutations.d.ts.map +0 -1
  234. package/dist/derived-mutations.js +0 -54
  235. package/dist/derived-mutations.js.map +0 -1
  236. package/dist/derived-mutations.test.d.ts +0 -2
  237. package/dist/derived-mutations.test.d.ts.map +0 -1
  238. package/dist/derived-mutations.test.js +0 -93
  239. package/dist/derived-mutations.test.js.map +0 -1
  240. package/dist/init-singleton-tables.d.ts +0 -4
  241. package/dist/init-singleton-tables.d.ts.map +0 -1
  242. package/dist/init-singleton-tables.js +0 -16
  243. package/dist/init-singleton-tables.js.map +0 -1
  244. package/dist/leader-thread/apply-mutation.d.ts +0 -11
  245. package/dist/leader-thread/apply-mutation.d.ts.map +0 -1
  246. package/dist/leader-thread/apply-mutation.js +0 -115
  247. package/dist/leader-thread/apply-mutation.js.map +0 -1
  248. package/dist/leader-thread/mutationlog.d.ts +0 -11
  249. package/dist/leader-thread/mutationlog.d.ts.map +0 -1
  250. package/dist/leader-thread/mutationlog.js +0 -31
  251. package/dist/leader-thread/mutationlog.js.map +0 -1
  252. package/dist/leader-thread/pull-queue-set.d.ts +0 -7
  253. package/dist/leader-thread/pull-queue-set.d.ts.map +0 -1
  254. package/dist/leader-thread/pull-queue-set.js +0 -48
  255. package/dist/leader-thread/pull-queue-set.js.map +0 -1
  256. package/dist/mutation.d.ts +0 -20
  257. package/dist/mutation.d.ts.map +0 -1
  258. package/dist/mutation.js +0 -68
  259. package/dist/mutation.js.map +0 -1
  260. package/dist/query-info.d.ts +0 -41
  261. package/dist/query-info.d.ts.map +0 -1
  262. package/dist/query-info.js +0 -7
  263. package/dist/query-info.js.map +0 -1
  264. package/dist/rehydrate-from-mutationlog.d.ts +0 -14
  265. package/dist/rehydrate-from-mutationlog.d.ts.map +0 -1
  266. package/dist/rehydrate-from-mutationlog.js.map +0 -1
  267. package/dist/schema/MutationEvent.d.ts +0 -202
  268. package/dist/schema/MutationEvent.d.ts.map +0 -1
  269. package/dist/schema/MutationEvent.js +0 -105
  270. package/dist/schema/MutationEvent.js.map +0 -1
  271. package/dist/schema/mutations.d.ts +0 -115
  272. package/dist/schema/mutations.d.ts.map +0 -1
  273. package/dist/schema/mutations.js +0 -42
  274. package/dist/schema/mutations.js.map +0 -1
  275. package/dist/sync/next/test/mutation-fixtures.d.ts.map +0 -1
  276. package/dist/sync/next/test/mutation-fixtures.js.map +0 -1
  277. package/src/derived-mutations.test.ts +0 -101
  278. package/src/derived-mutations.ts +0 -170
  279. package/src/init-singleton-tables.ts +0 -24
  280. package/src/leader-thread/apply-mutation.ts +0 -187
  281. package/src/leader-thread/mutationlog.ts +0 -49
  282. package/src/leader-thread/pull-queue-set.ts +0 -67
  283. package/src/mutation.ts +0 -108
  284. package/src/query-info.ts +0 -83
  285. package/src/schema/MutationEvent.ts +0 -224
  286. package/src/schema/mutations.ts +0 -193
  287. package/src/sync/next/test/mutation-fixtures.ts +0 -228
@@ -1,52 +1,61 @@
1
1
  import { casesHandled, isNotUndefined, LS_DEV, shouldNeverHappen, TRACE_VERBOSE } from '@livestore/utils';
2
- import { BucketQueue, Deferred, Effect, Exit, FiberHandle, Option, OtelTracer, ReadonlyArray, Schema, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
2
+ import { BucketQueue, Deferred, Effect, Exit, FiberHandle, OtelTracer, Queue, ReadonlyArray, Stream, Subscribable, SubscriptionRef, } from '@livestore/utils/effect';
3
3
  import { UnexpectedError } from '../adapter-types.js';
4
- import { EventId, getMutationDef, MUTATION_LOG_META_TABLE, MutationEvent, mutationLogMetaTable, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
5
- import { updateRows } from '../sql-queries/index.js';
4
+ import { EventId, getEventDef, LEADER_MERGE_COUNTER_TABLE, LiveStoreEvent, SESSION_CHANGESET_META_TABLE, } from '../schema/mod.js';
6
5
  import { LeaderAheadError } from '../sync/sync.js';
7
6
  import * as SyncState from '../sync/syncstate.js';
8
7
  import { sql } from '../util.js';
9
- import { makeApplyMutation } from './apply-mutation.js';
10
- import { execSql } from './connection.js';
11
- import { getBackendHeadFromDb, getClientHeadFromDb, getMutationEventsSince, updateBackendHead } from './mutationlog.js';
8
+ import { rollback } from './apply-event.js';
9
+ import * as Eventlog from './eventlog.js';
12
10
  import { LeaderThreadCtx } from './types.js';
13
- export const BACKEND_PUSH_BATCH_SIZE = 50;
14
11
  /**
15
- * The LeaderSyncProcessor manages synchronization of mutations between
12
+ * The LeaderSyncProcessor manages synchronization of events between
16
13
  * the local state and the sync backend, ensuring efficient and orderly processing.
17
14
  *
18
15
  * In the LeaderSyncProcessor, pulling always has precedence over pushing.
19
16
  *
20
17
  * Responsibilities:
21
- * - Queueing incoming local mutations in a localPushMailbox.
22
- * - Broadcasting mutations to client sessions via pull queues.
23
- * - Pushing mutations to the sync backend.
18
+ * - Queueing incoming local events in a localPushesQueue.
19
+ * - Broadcasting events to client sessions via pull queues.
20
+ * - Pushing events to the sync backend.
24
21
  *
25
22
  * Notes:
26
23
  *
27
24
  * local push processing:
28
- * - localPushMailbox:
25
+ * - localPushesQueue:
29
26
  * - Maintains events in ascending order.
30
27
  * - Uses `Deferred` objects to resolve/reject events based on application success.
31
- * - Processes events from the mailbox, applying mutations in batches.
28
+ * - Processes events from the queue, applying events in batches.
32
29
  * - Controlled by a `Latch` to manage execution flow.
33
30
  * - The latch closes on pull receipt and re-opens post-pull completion.
34
31
  * - Processes up to `maxBatchSize` events per cycle.
35
32
  *
33
+ * Currently we're advancing the db read model and eventlog in lockstep, but we could also decouple this in the future
34
+ *
35
+ * Tricky concurrency scenarios:
36
+ * - Queued local push batches becoming invalid due to a prior local push item being rejected.
37
+ * Solution: Introduce a generation number for local push batches which is used to filter out old batches items in case of rejection.
38
+ *
36
39
  */
37
- export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clientId, initialBlockingSyncContext, onError, }) => Effect.gen(function* () {
38
- const syncBackendQueue = yield* BucketQueue.make();
40
+ export const makeLeaderSyncProcessor = ({ schema, dbEventlogMissing, dbEventlog, dbReadModel, dbReadModelMissing, initialBlockingSyncContext, onError, params, testing, }) => Effect.gen(function* () {
41
+ const syncBackendPushQueue = yield* BucketQueue.make();
42
+ const localPushBatchSize = params.localPushBatchSize ?? 10;
43
+ const backendPushBatchSize = params.backendPushBatchSize ?? 50;
39
44
  const syncStateSref = yield* SubscriptionRef.make(undefined);
40
- const isClientEvent = (mutationEventEncoded) => {
41
- const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
42
- return mutationDef.options.clientOnly;
45
+ const isClientEvent = (eventEncoded) => {
46
+ const eventDef = getEventDef(schema, eventEncoded.name);
47
+ return eventDef.eventDef.options.clientOnly;
43
48
  };
49
+ const connectedClientSessionPullQueues = yield* makePullQueueSet;
44
50
  /**
45
51
  * Tracks generations of queued local push events.
46
- * If a batch is rejected, all subsequent push queue items with the same generation are also rejected,
52
+ * If a local-push batch is rejected, all subsequent push queue items with the same generation are also rejected,
47
53
  * even if they would be valid on their own.
48
54
  */
55
+ // TODO get rid of this in favour of the `mergeGeneration` event id field
49
56
  const currentLocalPushGenerationRef = { current: 0 };
57
+ const mergeCounterRef = { current: dbReadModelMissing ? 0 : yield* getMergeCounterFromDb(dbReadModel) };
58
+ const mergePayloads = new Map();
50
59
  // This context depends on data from `boot`, we should find a better implementation to avoid this ref indirection.
51
60
  const ctxRef = {
52
61
  current: undefined,
@@ -54,90 +63,96 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
54
63
  const localPushesQueue = yield* BucketQueue.make();
55
64
  const localPushesLatch = yield* Effect.makeLatch(true);
56
65
  const pullLatch = yield* Effect.makeLatch(true);
66
+ /**
67
+ * Additionally to the `syncStateSref` we also need the `pushHeadRef` in order to prevent old/duplicate
68
+ * events from being pushed in a scenario like this:
69
+ * - client session A pushes e1
70
+ * - leader sync processor takes a bit and hasn't yet taken e1 from the localPushesQueue
71
+ * - client session B also pushes e1 (which should be rejected)
72
+ *
73
+ * Thus the purpoe of the pushHeadRef is the guard the integrity of the local push queue
74
+ */
75
+ const pushHeadRef = { current: EventId.ROOT };
76
+ const advancePushHead = (eventId) => {
77
+ pushHeadRef.current = EventId.max(pushHeadRef.current, eventId);
78
+ };
79
+ // NOTE: New events are only pushed to sync backend after successful local push processing
57
80
  const push = (newEvents, options) => Effect.gen(function* () {
58
- // TODO validate batch
59
81
  if (newEvents.length === 0)
60
82
  return;
61
- // if (options.generation < currentLocalPushGenerationRef.current) {
62
- // debugger
63
- // // We can safely drop this batch as it's from a previous push generation
64
- // return
65
- // }
66
- if (clientId === 'client-b') {
67
- // console.log(
68
- // 'push from client session',
69
- // newEvents.map((item) => item.toJSON()),
70
- // )
71
- }
83
+ yield* validatePushBatch(newEvents, pushHeadRef.current);
84
+ advancePushHead(newEvents.at(-1).id);
72
85
  const waitForProcessing = options?.waitForProcessing ?? false;
73
86
  const generation = currentLocalPushGenerationRef.current;
74
87
  if (waitForProcessing) {
75
88
  const deferreds = yield* Effect.forEach(newEvents, () => Deferred.make());
76
- const items = newEvents.map((mutationEventEncoded, i) => [mutationEventEncoded, deferreds[i], generation]);
89
+ const items = newEvents.map((eventEncoded, i) => [eventEncoded, deferreds[i], generation]);
77
90
  yield* BucketQueue.offerAll(localPushesQueue, items);
78
91
  yield* Effect.all(deferreds);
79
92
  }
80
93
  else {
81
- const items = newEvents.map((mutationEventEncoded) => [mutationEventEncoded, undefined, generation]);
94
+ const items = newEvents.map((eventEncoded) => [eventEncoded, undefined, generation]);
82
95
  yield* BucketQueue.offerAll(localPushesQueue, items);
83
96
  }
84
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:local-push', {
97
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:push', {
85
98
  attributes: {
86
99
  batchSize: newEvents.length,
87
100
  batch: TRACE_VERBOSE ? newEvents : undefined,
88
101
  },
89
102
  links: ctxRef.current?.span ? [{ _tag: 'SpanLink', span: ctxRef.current.span, attributes: {} }] : undefined,
90
103
  }));
91
- const pushPartial = ({ mutationEvent: partialMutationEvent, clientId, sessionId, }) => Effect.gen(function* () {
104
+ const pushPartial = ({ event: { name, args }, clientId, sessionId }) => Effect.gen(function* () {
92
105
  const syncState = yield* syncStateSref;
93
106
  if (syncState === undefined)
94
107
  return shouldNeverHappen('Not initialized');
95
- const mutationDef = getMutationDef(schema, partialMutationEvent.mutation);
96
- const mutationEventEncoded = new MutationEvent.EncodedWithMeta({
97
- ...partialMutationEvent,
108
+ const eventDef = getEventDef(schema, name);
109
+ const eventEncoded = new LiveStoreEvent.EncodedWithMeta({
110
+ name,
111
+ args,
98
112
  clientId,
99
113
  sessionId,
100
- ...EventId.nextPair(syncState.localHead, mutationDef.options.clientOnly),
114
+ ...EventId.nextPair(syncState.localHead, eventDef.eventDef.options.clientOnly),
101
115
  });
102
- yield* push([mutationEventEncoded]);
116
+ yield* push([eventEncoded]);
103
117
  }).pipe(Effect.catchTag('LeaderAheadError', Effect.orDie));
104
118
  // Starts various background loops
105
- const boot = ({ dbReady }) => Effect.gen(function* () {
119
+ const boot = Effect.gen(function* () {
106
120
  const span = yield* Effect.currentSpan.pipe(Effect.orDie);
107
121
  const otelSpan = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)));
108
122
  const { devtools, shutdownChannel } = yield* LeaderThreadCtx;
123
+ const runtime = yield* Effect.runtime();
109
124
  ctxRef.current = {
110
125
  otelSpan,
111
126
  span,
112
127
  devtoolsLatch: devtools.enabled ? devtools.syncBackendLatch : undefined,
128
+ runtime,
113
129
  };
114
- const initialBackendHead = dbMissing ? EventId.ROOT.global : getBackendHeadFromDb(dbMutationLog);
115
- const initialLocalHead = dbMissing ? EventId.ROOT : getClientHeadFromDb(dbMutationLog);
130
+ const initialLocalHead = dbEventlogMissing ? EventId.ROOT : Eventlog.getClientHeadFromDb(dbEventlog);
131
+ const initialBackendHead = dbEventlogMissing ? EventId.ROOT.global : Eventlog.getBackendHeadFromDb(dbEventlog);
116
132
  if (initialBackendHead > initialLocalHead.global) {
117
133
  return shouldNeverHappen(`During boot the backend head (${initialBackendHead}) should never be greater than the local head (${initialLocalHead.global})`);
118
134
  }
119
- const pendingMutationEvents = yield* getMutationEventsSince({
120
- global: initialBackendHead,
121
- client: EventId.clientDefault,
122
- }).pipe(Effect.map(ReadonlyArray.map((_) => new MutationEvent.EncodedWithMeta(_))));
135
+ const pendingEvents = dbEventlogMissing
136
+ ? []
137
+ : yield* Eventlog.getEventsSince({ global: initialBackendHead, client: EventId.clientDefault });
123
138
  const initialSyncState = new SyncState.SyncState({
124
- pending: pendingMutationEvents,
125
- // On the leader we don't need a rollback tail beyond `pending` items
126
- rollbackTail: [],
139
+ pending: pendingEvents,
127
140
  upstreamHead: { global: initialBackendHead, client: EventId.clientDefault },
128
141
  localHead: initialLocalHead,
129
142
  });
130
143
  /** State transitions need to happen atomically, so we use a Ref to track the state */
131
144
  yield* SubscriptionRef.set(syncStateSref, initialSyncState);
132
145
  // Rehydrate sync queue
133
- if (pendingMutationEvents.length > 0) {
134
- const filteredBatch = pendingMutationEvents
135
- // Don't sync clientOnly mutations
136
- .filter((mutationEventEncoded) => {
137
- const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
138
- return mutationDef.options.clientOnly === false;
146
+ if (pendingEvents.length > 0) {
147
+ const globalPendingEvents = pendingEvents
148
+ // Don't sync clientOnly events
149
+ .filter((eventEncoded) => {
150
+ const eventDef = getEventDef(schema, eventEncoded.name);
151
+ return eventDef.eventDef.options.clientOnly === false;
139
152
  });
140
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
153
+ if (globalPendingEvents.length > 0) {
154
+ yield* BucketQueue.offerAll(syncBackendPushQueue, globalPendingEvents);
155
+ }
141
156
  }
142
157
  const shutdownOnError = (cause) => Effect.gen(function* () {
143
158
  if (onError === 'shutdown') {
@@ -150,36 +165,38 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
150
165
  localPushesQueue,
151
166
  pullLatch,
152
167
  syncStateSref,
153
- syncBackendQueue,
168
+ syncBackendPushQueue,
154
169
  schema,
155
170
  isClientEvent,
156
171
  otelSpan,
157
172
  currentLocalPushGenerationRef,
173
+ connectedClientSessionPullQueues,
174
+ mergeCounterRef,
175
+ mergePayloads,
176
+ localPushBatchSize,
177
+ testing: {
178
+ delay: testing?.delays?.localPushProcessing,
179
+ },
158
180
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
159
181
  const backendPushingFiberHandle = yield* FiberHandle.make();
160
- yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
161
- dbReady,
162
- syncBackendQueue,
182
+ const backendPushingEffect = backgroundBackendPushing({
183
+ syncBackendPushQueue,
163
184
  otelSpan,
164
185
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
165
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
186
+ backendPushBatchSize,
187
+ }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError));
188
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect);
166
189
  yield* backgroundBackendPulling({
167
- dbReady,
168
190
  initialBackendHead,
169
191
  isClientEvent,
170
192
  restartBackendPushing: (filteredRebasedPending) => Effect.gen(function* () {
171
193
  // Stop current pushing fiber
172
194
  yield* FiberHandle.clear(backendPushingFiberHandle);
173
- // Reset the sync queue
174
- yield* BucketQueue.clear(syncBackendQueue);
175
- yield* BucketQueue.offerAll(syncBackendQueue, filteredRebasedPending);
195
+ // Reset the sync backend push queue
196
+ yield* BucketQueue.clear(syncBackendPushQueue);
197
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredRebasedPending);
176
198
  // Restart pushing fiber
177
- yield* FiberHandle.run(backendPushingFiberHandle, backgroundBackendPushing({
178
- dbReady,
179
- syncBackendQueue,
180
- otelSpan,
181
- devtoolsLatch: ctxRef.current?.devtoolsLatch,
182
- }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError)));
199
+ yield* FiberHandle.run(backendPushingFiberHandle, backendPushingEffect);
183
200
  }),
184
201
  syncStateSref,
185
202
  localPushesLatch,
@@ -187,30 +204,68 @@ export const makeLeaderSyncProcessor = ({ schema, dbMissing, dbMutationLog, clie
187
204
  otelSpan,
188
205
  initialBlockingSyncContext,
189
206
  devtoolsLatch: ctxRef.current?.devtoolsLatch,
207
+ connectedClientSessionPullQueues,
208
+ mergeCounterRef,
209
+ mergePayloads,
210
+ advancePushHead,
190
211
  }).pipe(Effect.tapCauseLogPretty, Effect.catchAllCause(shutdownOnError), Effect.forkScoped);
191
212
  return { initialLeaderHead: initialLocalHead };
192
- }).pipe(Effect.withSpanScoped('@livestore/common:leader-thread:syncing'));
213
+ }).pipe(Effect.withSpanScoped('@livestore/common:LeaderSyncProcessor:boot'));
214
+ const pull = ({ cursor }) => Effect.gen(function* () {
215
+ const queue = yield* pullQueue({ cursor });
216
+ return Stream.fromQueue(queue);
217
+ }).pipe(Stream.unwrapScoped);
218
+ const pullQueue = ({ cursor }) => {
219
+ const runtime = ctxRef.current?.runtime ?? shouldNeverHappen('Not initialized');
220
+ return Effect.gen(function* () {
221
+ const queue = yield* connectedClientSessionPullQueues.makeQueue;
222
+ const payloadsSinceCursor = Array.from(mergePayloads.entries())
223
+ .map(([mergeCounter, payload]) => ({ payload, mergeCounter }))
224
+ .filter(({ mergeCounter }) => mergeCounter > cursor.mergeCounter)
225
+ .toSorted((a, b) => a.mergeCounter - b.mergeCounter)
226
+ .map(({ payload, mergeCounter }) => {
227
+ if (payload._tag === 'upstream-advance') {
228
+ return {
229
+ payload: {
230
+ _tag: 'upstream-advance',
231
+ newEvents: ReadonlyArray.dropWhile(payload.newEvents, (eventEncoded) => EventId.isGreaterThanOrEqual(cursor.eventId, eventEncoded.id)),
232
+ },
233
+ mergeCounter,
234
+ };
235
+ }
236
+ else {
237
+ return { payload, mergeCounter };
238
+ }
239
+ });
240
+ yield* queue.offerAll(payloadsSinceCursor);
241
+ return queue;
242
+ }).pipe(Effect.provide(runtime));
243
+ };
244
+ const syncState = Subscribable.make({
245
+ get: Effect.gen(function* () {
246
+ const syncState = yield* syncStateSref;
247
+ if (syncState === undefined)
248
+ return shouldNeverHappen('Not initialized');
249
+ return syncState;
250
+ }),
251
+ changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
252
+ });
193
253
  return {
254
+ pull,
255
+ pullQueue,
194
256
  push,
195
257
  pushPartial,
196
258
  boot,
197
- syncState: Subscribable.make({
198
- get: Effect.gen(function* () {
199
- const syncState = yield* syncStateSref;
200
- if (syncState === undefined)
201
- return shouldNeverHappen('Not initialized');
202
- return syncState;
203
- }),
204
- changes: syncStateSref.changes.pipe(Stream.filter(isNotUndefined)),
205
- }),
259
+ syncState,
260
+ getMergeCounter: () => mergeCounterRef.current,
206
261
  };
207
262
  });
208
- const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, }) => Effect.gen(function* () {
209
- const { connectedClientSessionPullQueues, clientId } = yield* LeaderThreadCtx;
210
- const applyMutationItems = yield* makeApplyMutationItems;
263
+ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLatch, syncStateSref, syncBackendPushQueue, schema, isClientEvent, otelSpan, currentLocalPushGenerationRef, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, localPushBatchSize, testing, }) => Effect.gen(function* () {
211
264
  while (true) {
212
- // TODO make batch size configurable
213
- const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, 10);
265
+ if (testing.delay !== undefined) {
266
+ yield* testing.delay.pipe(Effect.withSpan('localPushProcessingDelay'));
267
+ }
268
+ const batchItems = yield* BucketQueue.takeBetween(localPushesQueue, 1, localPushBatchSize);
214
269
  // Wait for the backend pulling to finish
215
270
  yield* localPushesLatch.await;
216
271
  // Prevent backend pull processing until this local push is finished
@@ -219,7 +274,7 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
219
274
  // It's important that we filter after we got localPushesLatch, otherwise we might filter with the old generation
220
275
  const filteredBatchItems = batchItems
221
276
  .filter(([_1, _2, generation]) => generation === currentLocalPushGenerationRef.current)
222
- .map(([mutationEventEncoded, deferred]) => [mutationEventEncoded, deferred]);
277
+ .map(([eventEncoded, deferred]) => [eventEncoded, deferred]);
223
278
  if (filteredBatchItems.length === 0) {
224
279
  // console.log('dropping old-gen batch', currentLocalPushGenerationRef.current)
225
280
  // Allow the backend pulling to start
@@ -234,11 +289,12 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
234
289
  syncState,
235
290
  payload: { _tag: 'local-push', newEvents },
236
291
  isClientEvent,
237
- isEqualEvent: MutationEvent.isEqualEncoded,
292
+ isEqualEvent: LiveStoreEvent.isEqualEncoded,
238
293
  });
294
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
239
295
  switch (mergeResult._tag) {
240
296
  case 'unexpected-error': {
241
- otelSpan?.addEvent('local-push:unexpected-error', {
297
+ otelSpan?.addEvent(`[${mergeCounter}]:push:unexpected-error`, {
242
298
  batchSize: newEvents.length,
243
299
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
244
300
  });
@@ -248,14 +304,11 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
248
304
  return shouldNeverHappen('The leader thread should never have to rebase due to a local push');
249
305
  }
250
306
  case 'reject': {
251
- otelSpan?.addEvent('local-push:reject', {
307
+ otelSpan?.addEvent(`[${mergeCounter}]:push:reject`, {
252
308
  batchSize: newEvents.length,
253
309
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
254
310
  });
255
- /*
256
-
257
- TODO: how to test this?
258
- */
311
+ // TODO: how to test this?
259
312
  currentLocalPushGenerationRef.current++;
260
313
  const nextGeneration = currentLocalPushGenerationRef.current;
261
314
  const providedId = newEvents.at(0).id;
@@ -263,7 +316,8 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
263
316
  // We're also handling the case where the localPushQueue already contains events
264
317
  // from the next generation which we preserve in the queue
265
318
  const remainingEventsMatchingGeneration = yield* BucketQueue.takeSplitWhere(localPushesQueue, (item) => item[2] >= nextGeneration);
266
- if ((yield* BucketQueue.size(localPushesQueue)) > 0) {
319
+ // TODO we still need to better understand and handle this scenario
320
+ if (LS_DEV && (yield* BucketQueue.size(localPushesQueue)) > 0) {
267
321
  console.log('localPushesQueue is not empty', yield* BucketQueue.size(localPushesQueue));
268
322
  debugger;
269
323
  }
@@ -290,62 +344,55 @@ const backgroundApplyLocalPushes = ({ localPushesLatch, localPushesQueue, pullLa
290
344
  }
291
345
  }
292
346
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
293
- if (clientId === 'client-b') {
294
- // yield* Effect.log('offer upstream-advance due to local-push')
295
- // debugger
296
- }
297
347
  yield* connectedClientSessionPullQueues.offer({
298
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents },
299
- remaining: 0,
348
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
349
+ mergeCounter,
300
350
  });
301
- otelSpan?.addEvent('local-push', {
351
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
352
+ otelSpan?.addEvent(`[${mergeCounter}]:push:advance`, {
302
353
  batchSize: newEvents.length,
303
354
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
304
355
  });
305
- // Don't sync clientOnly mutations
306
- const filteredBatch = mergeResult.newEvents.filter((mutationEventEncoded) => {
307
- const mutationDef = getMutationDef(schema, mutationEventEncoded.mutation);
308
- return mutationDef.options.clientOnly === false;
356
+ // Don't sync clientOnly events
357
+ const filteredBatch = mergeResult.newEvents.filter((eventEncoded) => {
358
+ const eventDef = getEventDef(schema, eventEncoded.name);
359
+ return eventDef.eventDef.options.clientOnly === false;
309
360
  });
310
- yield* BucketQueue.offerAll(syncBackendQueue, filteredBatch);
311
- yield* applyMutationItems({ batchItems: newEvents, deferreds });
361
+ yield* BucketQueue.offerAll(syncBackendPushQueue, filteredBatch);
362
+ yield* applyEventsBatch({ batchItems: mergeResult.newEvents, deferreds });
312
363
  // Allow the backend pulling to start
313
364
  yield* pullLatch.open;
314
365
  }
315
366
  });
316
367
  // TODO how to handle errors gracefully
317
- const makeApplyMutationItems = Effect.gen(function* () {
318
- const leaderThreadCtx = yield* LeaderThreadCtx;
319
- const { dbReadModel: db, dbMutationLog } = leaderThreadCtx;
320
- const applyMutation = yield* makeApplyMutation;
321
- return ({ batchItems, deferreds }) => Effect.gen(function* () {
322
- db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
323
- dbMutationLog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
324
- yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
325
- if (Exit.isSuccess(exit))
326
- return;
327
- // Rollback in case of an error
328
- db.execute('ROLLBACK', undefined);
329
- dbMutationLog.execute('ROLLBACK', undefined);
330
- }));
331
- for (let i = 0; i < batchItems.length; i++) {
332
- yield* applyMutation(batchItems[i]);
333
- if (deferreds?.[i] !== undefined) {
334
- yield* Deferred.succeed(deferreds[i], void 0);
335
- }
368
+ const applyEventsBatch = ({ batchItems, deferreds }) => Effect.gen(function* () {
369
+ const { dbReadModel: db, dbEventlog, applyEvent } = yield* LeaderThreadCtx;
370
+ // NOTE We always start a transaction to ensure consistency between db and eventlog (even for single-item batches)
371
+ db.execute('BEGIN TRANSACTION', undefined); // Start the transaction
372
+ dbEventlog.execute('BEGIN TRANSACTION', undefined); // Start the transaction
373
+ yield* Effect.addFinalizer((exit) => Effect.gen(function* () {
374
+ if (Exit.isSuccess(exit))
375
+ return;
376
+ // Rollback in case of an error
377
+ db.execute('ROLLBACK', undefined);
378
+ dbEventlog.execute('ROLLBACK', undefined);
379
+ }));
380
+ for (let i = 0; i < batchItems.length; i++) {
381
+ const { sessionChangeset } = yield* applyEvent(batchItems[i]);
382
+ batchItems[i].meta.sessionChangeset = sessionChangeset;
383
+ if (deferreds?.[i] !== undefined) {
384
+ yield* Deferred.succeed(deferreds[i], void 0);
336
385
  }
337
- db.execute('COMMIT', undefined); // Commit the transaction
338
- dbMutationLog.execute('COMMIT', undefined); // Commit the transaction
339
- }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:leader-thread:syncing:applyMutationItems', {
340
- attributes: { count: batchItems.length },
341
- }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
342
- });
343
- const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, }) => Effect.gen(function* () {
344
- const { syncBackend, dbReadModel: db, dbMutationLog, connectedClientSessionPullQueues, schema, clientId, } = yield* LeaderThreadCtx;
386
+ }
387
+ db.execute('COMMIT', undefined); // Commit the transaction
388
+ dbEventlog.execute('COMMIT', undefined); // Commit the transaction
389
+ }).pipe(Effect.uninterruptible, Effect.scoped, Effect.withSpan('@livestore/common:LeaderSyncProcessor:applyEventItems', {
390
+ attributes: { batchSize: batchItems.length },
391
+ }), Effect.tapCauseLogPretty, UnexpectedError.mapToUnexpectedError);
392
+ const backgroundBackendPulling = ({ initialBackendHead, isClientEvent, restartBackendPushing, otelSpan, syncStateSref, localPushesLatch, pullLatch, devtoolsLatch, initialBlockingSyncContext, connectedClientSessionPullQueues, mergeCounterRef, mergePayloads, advancePushHead, }) => Effect.gen(function* () {
393
+ const { syncBackend, dbReadModel: db, dbEventlog, schema } = yield* LeaderThreadCtx;
345
394
  if (syncBackend === undefined)
346
395
  return;
347
- const cursorInfo = yield* getCursorInfo(initialBackendHead);
348
- const applyMutationItems = yield* makeApplyMutationItems;
349
396
  const onNewPullChunk = (newEvents, remaining) => Effect.gen(function* () {
350
397
  if (newEvents.length === 0)
351
398
  return;
@@ -359,72 +406,81 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
359
406
  const syncState = yield* syncStateSref;
360
407
  if (syncState === undefined)
361
408
  return shouldNeverHappen('Not initialized');
362
- const trimRollbackUntil = newEvents.at(-1).id;
363
409
  const mergeResult = SyncState.merge({
364
410
  syncState,
365
- payload: { _tag: 'upstream-advance', newEvents, trimRollbackUntil },
411
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents }),
366
412
  isClientEvent,
367
- isEqualEvent: MutationEvent.isEqualEncoded,
413
+ isEqualEvent: LiveStoreEvent.isEqualEncoded,
368
414
  ignoreClientEvents: true,
369
415
  });
416
+ const mergeCounter = yield* incrementMergeCounter(mergeCounterRef);
370
417
  if (mergeResult._tag === 'reject') {
371
418
  return shouldNeverHappen('The leader thread should never reject upstream advances');
372
419
  }
373
420
  else if (mergeResult._tag === 'unexpected-error') {
374
- otelSpan?.addEvent('backend-pull:unexpected-error', {
421
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:unexpected-error`, {
375
422
  newEventsCount: newEvents.length,
376
423
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
377
424
  });
378
425
  return yield* Effect.fail(mergeResult.cause);
379
426
  }
380
427
  const newBackendHead = newEvents.at(-1).id;
381
- updateBackendHead(dbMutationLog, newBackendHead);
428
+ Eventlog.updateBackendHead(dbEventlog, newBackendHead);
382
429
  if (mergeResult._tag === 'rebase') {
383
- otelSpan?.addEvent('backend-pull:rebase', {
430
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:rebase`, {
384
431
  newEventsCount: newEvents.length,
385
432
  newEvents: TRACE_VERBOSE ? JSON.stringify(newEvents) : undefined,
386
- rollbackCount: mergeResult.eventsToRollback.length,
433
+ rollbackCount: mergeResult.rollbackEvents.length,
387
434
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
388
435
  });
389
- const filteredRebasedPending = mergeResult.newSyncState.pending.filter((mutationEvent) => {
390
- const mutationDef = getMutationDef(schema, mutationEvent.mutation);
391
- return mutationDef.options.clientOnly === false;
436
+ const globalRebasedPendingEvents = mergeResult.newSyncState.pending.filter((event) => {
437
+ const eventDef = getEventDef(schema, event.name);
438
+ return eventDef.eventDef.options.clientOnly === false;
392
439
  });
393
- yield* restartBackendPushing(filteredRebasedPending);
394
- if (mergeResult.eventsToRollback.length > 0) {
395
- yield* rollback({ db, dbMutationLog, eventIdsToRollback: mergeResult.eventsToRollback.map((_) => _.id) });
440
+ yield* restartBackendPushing(globalRebasedPendingEvents);
441
+ if (mergeResult.rollbackEvents.length > 0) {
442
+ yield* rollback({ db, dbEventlog, eventIdsToRollback: mergeResult.rollbackEvents.map((_) => _.id) });
396
443
  }
397
444
  yield* connectedClientSessionPullQueues.offer({
398
- payload: {
399
- _tag: 'upstream-rebase',
445
+ payload: SyncState.PayloadUpstreamRebase.make({
400
446
  newEvents: mergeResult.newEvents,
401
- rollbackUntil: mergeResult.eventsToRollback.at(0).id,
402
- trimRollbackUntil,
403
- },
404
- remaining,
447
+ rollbackEvents: mergeResult.rollbackEvents,
448
+ }),
449
+ mergeCounter,
405
450
  });
451
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamRebase.make({
452
+ newEvents: mergeResult.newEvents,
453
+ rollbackEvents: mergeResult.rollbackEvents,
454
+ }));
406
455
  }
407
456
  else {
408
- otelSpan?.addEvent('backend-pull:advance', {
457
+ otelSpan?.addEvent(`[${mergeCounter}]:pull:advance`, {
409
458
  newEventsCount: newEvents.length,
410
459
  mergeResult: TRACE_VERBOSE ? JSON.stringify(mergeResult) : undefined,
411
460
  });
412
- if (clientId === 'client-b') {
413
- // yield* Effect.log('offer upstream-advance due to pull')
414
- }
415
461
  yield* connectedClientSessionPullQueues.offer({
416
- payload: { _tag: 'upstream-advance', newEvents: mergeResult.newEvents, trimRollbackUntil },
417
- remaining,
462
+ payload: SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }),
463
+ mergeCounter,
418
464
  });
465
+ mergePayloads.set(mergeCounter, SyncState.PayloadUpstreamAdvance.make({ newEvents: mergeResult.newEvents }));
466
+ if (mergeResult.confirmedEvents.length > 0) {
467
+ // `mergeResult.confirmedEvents` don't contain the correct sync metadata, so we need to use
468
+ // `newEvents` instead which we filter via `mergeResult.confirmedEvents`
469
+ const confirmedNewEvents = newEvents.filter((event) => mergeResult.confirmedEvents.some((confirmedEvent) => EventId.isEqual(event.id, confirmedEvent.id)));
470
+ yield* Eventlog.updateSyncMetadata(confirmedNewEvents);
471
+ }
419
472
  }
473
+ // Removes the changeset rows which are no longer needed as we'll never have to rollback beyond this point
420
474
  trimChangesetRows(db, newBackendHead);
421
- yield* applyMutationItems({ batchItems: mergeResult.newEvents, deferreds: undefined });
475
+ advancePushHead(mergeResult.newSyncState.localHead);
476
+ yield* applyEventsBatch({ batchItems: mergeResult.newEvents, deferreds: undefined });
422
477
  yield* SubscriptionRef.set(syncStateSref, mergeResult.newSyncState);
478
+ // Allow local pushes to be processed again
423
479
  if (remaining === 0) {
424
- // Allow local pushes to be processed again
425
480
  yield* localPushesLatch.open;
426
481
  }
427
482
  });
483
+ const cursorInfo = yield* Eventlog.getSyncBackendCursorInfo(initialBackendHead);
428
484
  yield* syncBackend.pull(cursorInfo).pipe(
429
485
  // TODO only take from queue while connected
430
486
  Stream.tap(({ batch, remaining }) => Effect.gen(function* () {
@@ -434,64 +490,21 @@ const backgroundBackendPulling = ({ dbReady, initialBackendHead, isClientEvent,
434
490
  // batch: TRACE_VERBOSE ? batch : undefined,
435
491
  // },
436
492
  // })
437
- // Wait for the db to be initially created
438
- yield* dbReady;
439
- // NOTE we only want to take process mutations when the sync backend is connected
493
+ // NOTE we only want to take process events when the sync backend is connected
440
494
  // (e.g. needed for simulating being offline)
441
495
  // TODO remove when there's a better way to handle this in stream above
442
496
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
443
- yield* onNewPullChunk(batch.map((_) => MutationEvent.EncodedWithMeta.fromGlobal(_.mutationEventEncoded)), remaining);
497
+ yield* onNewPullChunk(batch.map((_) => LiveStoreEvent.EncodedWithMeta.fromGlobal(_.eventEncoded, _.metadata)), remaining);
444
498
  yield* initialBlockingSyncContext.update({ processed: batch.length, remaining });
445
499
  })), Stream.runDrain, Effect.interruptible);
446
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pulling'));
447
- const rollback = ({ db, dbMutationLog, eventIdsToRollback, }) => Effect.gen(function* () {
448
- const rollbackEvents = db
449
- .select(sql `SELECT * FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`).join(', ')})`)
450
- .map((_) => ({ id: { global: _.idGlobal, client: _.idClient }, changeset: _.changeset, debug: _.debug }))
451
- .sort((a, b) => EventId.compare(a.id, b.id));
452
- // TODO bring back `.toSorted` once Expo supports it
453
- // .toSorted((a, b) => EventId.compare(a.id, b.id))
454
- // Apply changesets in reverse order
455
- for (let i = rollbackEvents.length - 1; i >= 0; i--) {
456
- const { changeset } = rollbackEvents[i];
457
- if (changeset !== null) {
458
- db.makeChangeset(changeset).invert().apply();
459
- }
460
- }
461
- const eventIdPairChunks = ReadonlyArray.chunksOf(100)(eventIdsToRollback.map((id) => `(${id.global}, ${id.client})`));
462
- // Delete the changeset rows
463
- for (const eventIdPairChunk of eventIdPairChunks) {
464
- db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
465
- }
466
- // Delete the mutation log rows
467
- for (const eventIdPairChunk of eventIdPairChunks) {
468
- dbMutationLog.execute(sql `DELETE FROM ${MUTATION_LOG_META_TABLE} WHERE (idGlobal, idClient) IN (${eventIdPairChunk.join(', ')})`);
469
- }
470
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:rollback', {
471
- attributes: { count: eventIdsToRollback.length },
472
- }));
473
- const getCursorInfo = (remoteHead) => Effect.gen(function* () {
474
- const { dbMutationLog } = yield* LeaderThreadCtx;
475
- if (remoteHead === EventId.ROOT.global)
476
- return Option.none();
477
- const MutationlogQuerySchema = Schema.Struct({
478
- syncMetadataJson: Schema.parseJson(Schema.Option(Schema.JsonValue)),
479
- }).pipe(Schema.pluck('syncMetadataJson'), Schema.Array, Schema.head);
480
- const syncMetadataOption = yield* Effect.sync(() => dbMutationLog.select(sql `SELECT syncMetadataJson FROM ${MUTATION_LOG_META_TABLE} WHERE idGlobal = ${remoteHead} ORDER BY idClient ASC LIMIT 1`)).pipe(Effect.andThen(Schema.decode(MutationlogQuerySchema)), Effect.map(Option.flatten), Effect.orDie);
481
- return Option.some({
482
- cursor: { global: remoteHead, client: EventId.clientDefault },
483
- metadata: syncMetadataOption,
484
- });
485
- }).pipe(Effect.withSpan('@livestore/common:leader-thread:syncing:getCursorInfo', { attributes: { remoteHead } }));
486
- const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtoolsLatch, }) => Effect.gen(function* () {
487
- const { syncBackend, dbMutationLog } = yield* LeaderThreadCtx;
500
+ }).pipe(Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pulling'));
501
+ const backgroundBackendPushing = ({ syncBackendPushQueue, otelSpan, devtoolsLatch, backendPushBatchSize, }) => Effect.gen(function* () {
502
+ const { syncBackend } = yield* LeaderThreadCtx;
488
503
  if (syncBackend === undefined)
489
504
  return;
490
- yield* dbReady;
491
505
  while (true) {
492
506
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
493
- // TODO make batch size configurable
494
- const queueItems = yield* BucketQueue.takeBetween(syncBackendQueue, 1, BACKEND_PUSH_BATCH_SIZE);
507
+ const queueItems = yield* BucketQueue.takeBetween(syncBackendPushQueue, 1, backendPushBatchSize);
495
508
  yield* SubscriptionRef.waitUntil(syncBackend.isConnected, (isConnected) => isConnected === true);
496
509
  if (devtoolsLatch !== undefined) {
497
510
  yield* devtoolsLatch.await;
@@ -510,22 +523,67 @@ const backgroundBackendPushing = ({ dbReady, syncBackendQueue, otelSpan, devtool
510
523
  // wait for interrupt caused by background pulling which will then restart pushing
511
524
  return yield* Effect.never;
512
525
  }
513
- const { metadata } = pushResult.right;
514
- // TODO try to do this in a single query
515
- for (let i = 0; i < queueItems.length; i++) {
516
- const mutationEventEncoded = queueItems[i];
517
- yield* execSql(dbMutationLog, ...updateRows({
518
- tableName: MUTATION_LOG_META_TABLE,
519
- columns: mutationLogMetaTable.sqliteDef.columns,
520
- where: { idGlobal: mutationEventEncoded.id.global, idClient: mutationEventEncoded.id.client },
521
- updateValues: { syncMetadataJson: metadata[i] },
522
- }));
523
- }
524
526
  }
525
- }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:leader-thread:syncing:backend-pushing'));
527
+ }).pipe(Effect.interruptible, Effect.withSpan('@livestore/common:LeaderSyncProcessor:backend-pushing'));
526
528
  const trimChangesetRows = (db, newHead) => {
527
529
  // Since we're using the session changeset rows to query for the current head,
528
530
  // we're keeping at least one row for the current head, and thus are using `<` instead of `<=`
529
531
  db.execute(sql `DELETE FROM ${SESSION_CHANGESET_META_TABLE} WHERE idGlobal < ${newHead.global}`);
530
532
  };
533
+ const makePullQueueSet = Effect.gen(function* () {
534
+ const set = new Set();
535
+ yield* Effect.addFinalizer(() => Effect.gen(function* () {
536
+ for (const queue of set) {
537
+ yield* Queue.shutdown(queue);
538
+ }
539
+ set.clear();
540
+ }));
541
+ const makeQueue = Effect.gen(function* () {
542
+ const queue = yield* Queue.unbounded().pipe(Effect.acquireRelease(Queue.shutdown));
543
+ yield* Effect.addFinalizer(() => Effect.sync(() => set.delete(queue)));
544
+ set.add(queue);
545
+ return queue;
546
+ });
547
+ const offer = (item) => Effect.gen(function* () {
548
+ // Short-circuit if the payload is an empty upstream advance
549
+ if (item.payload._tag === 'upstream-advance' && item.payload.newEvents.length === 0) {
550
+ return;
551
+ }
552
+ for (const queue of set) {
553
+ yield* Queue.offer(queue, item);
554
+ }
555
+ });
556
+ return {
557
+ makeQueue,
558
+ offer,
559
+ };
560
+ });
561
+ const incrementMergeCounter = (mergeCounterRef) => Effect.gen(function* () {
562
+ const { dbReadModel } = yield* LeaderThreadCtx;
563
+ mergeCounterRef.current++;
564
+ dbReadModel.execute(sql `INSERT OR REPLACE INTO ${LEADER_MERGE_COUNTER_TABLE} (id, mergeCounter) VALUES (0, ${mergeCounterRef.current})`);
565
+ return mergeCounterRef.current;
566
+ });
567
+ const getMergeCounterFromDb = (dbReadModel) => Effect.gen(function* () {
568
+ const result = dbReadModel.select(sql `SELECT mergeCounter FROM ${LEADER_MERGE_COUNTER_TABLE} WHERE id = 0`);
569
+ return result[0]?.mergeCounter ?? 0;
570
+ });
571
+ const validatePushBatch = (batch, pushHead) => Effect.gen(function* () {
572
+ if (batch.length === 0) {
573
+ return;
574
+ }
575
+ // Make sure batch is monotonically increasing
576
+ for (let i = 1; i < batch.length; i++) {
577
+ if (EventId.isGreaterThanOrEqual(batch[i - 1].id, batch[i].id)) {
578
+ shouldNeverHappen(`Events must be ordered in monotonically ascending order by eventId. Received: [${batch.map((e) => EventId.toString(e.id)).join(', ')}]`);
579
+ }
580
+ }
581
+ // Make sure smallest event id is > pushHead
582
+ if (EventId.isGreaterThanOrEqual(pushHead, batch[0].id)) {
583
+ return yield* LeaderAheadError.make({
584
+ minimumExpectedId: pushHead,
585
+ providedId: batch[0].id,
586
+ });
587
+ }
588
+ });
531
589
  //# sourceMappingURL=LeaderSyncProcessor.js.map