@wataruoguchi/emmett-event-store-kysely 2.2.7 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -32,14 +32,16 @@ import { Kysely } from "kysely";
32
32
  // Required tables: messages, streams, subscriptions
33
33
  ```
34
34
 
35
- A read model table expects to have the following columns:
35
+ **Legacy approach:** A read model table expects to have the following columns:
36
36
 
37
- - stream_id (uuid)
37
+ - stream_id (text)
38
38
  - last_stream_position (bigint)
39
39
  - last_global_position (bigint)
40
40
  - partition (text)
41
41
  - snapshot (jsonb)
42
42
 
43
+ **New approach (recommended):** Use `createSnapshotProjectionWithSnapshotTable` to store snapshots in a separate centralized table, keeping read model tables clean with only keys and denormalized columns.
44
+
43
45
  ### 2. Create Event Store
44
46
 
45
47
  ```typescript
@@ -69,12 +71,69 @@ Please read <https://event-driven-io.github.io/emmett/getting-started.html>
69
71
 
70
72
  ### 4. Build Read Models
71
73
 
72
- This package supports "Snapshot Projections".
74
+ This package supports "Snapshot Projections" with two approaches:
75
+
76
+ #### Option A: Separate Snapshot Table (Recommended) ⭐
77
+
78
+ Use `createSnapshotProjectionWithSnapshotTable` to store snapshots in a centralized table:
79
+
80
+ ```typescript
81
+ import {
82
+ createSnapshotProjectionRegistryWithSnapshotTable
83
+ } from "@wataruoguchi/emmett-event-store-kysely";
84
+
85
+ // First, create the snapshots table:
86
+ // CREATE TABLE snapshots (
87
+ // readmodel_table_name TEXT NOT NULL,
88
+ // stream_id TEXT NOT NULL,
89
+ // last_stream_position BIGINT NOT NULL,
90
+ // last_global_position BIGINT NOT NULL,
91
+ // snapshot JSONB NOT NULL,
92
+ // PRIMARY KEY (readmodel_table_name, stream_id)
93
+ // );
94
+
95
+ // Reuse your write model's evolve function!
96
+ const registry = createSnapshotProjectionRegistryWithSnapshotTable(
97
+ ["CartCreated", "ItemAdded", "CartCheckedOut"],
98
+ {
99
+ tableName: "carts",
100
+ extractKeys: (event, partition) => ({
101
+ tenant_id: event.data.eventMeta.tenantId,
102
+ cart_id: event.data.eventMeta.cartId,
103
+ partition,
104
+ }),
105
+ evolve: domainEvolve, // Reuse from write model!
106
+ initialState,
107
+ mapToColumns: (state) => ({ // Optional: denormalize for queries
108
+ currency: state.currency,
109
+ total: state.status === "checkedOut" ? state.total : null,
110
+ }),
111
+ }
112
+ );
113
+ ```
114
+
115
+ **Benefits:**
116
+
117
+ - ✅ Cleaner read model tables (no event-sourcing columns)
118
+ - ✅ Easier to create new read models (no schema migrations for event-sourcing columns)
119
+ - ✅ Centralized snapshot management
120
+ - ✅ Race condition protection with `FOR UPDATE` locking
121
+ - ✅ Operations wrapped in transactions for stronger race condition protection
122
+ - ✅ Automatic idempotency (skips already-processed events)
123
+ - ✅ Primary key validation (ensures consistent `extractKeys`)
124
+
125
+ **Important:** The `extractKeys` function must return the same set of keys for all events. The projection validates this at runtime.
126
+
127
+ #### Option B: Legacy Approach (Backward Compatible)
128
+
129
+ Use `createSnapshotProjectionRegistry` to store everything in the read model table:
130
+
131
+ **Note:** This approach stores event-sourcing columns (`stream_id`, `last_stream_position`, etc.) directly in the read model table. Consider using Option A for new projects.
73
132
 
74
133
  ```typescript
75
134
  import {
76
135
  createSnapshotProjectionRegistry
77
- } from "@wataruoguchi/emmett-event-store-kysely/projections";
136
+ } from "@wataruoguchi/emmett-event-store-kysely";
78
137
 
79
138
  // Reuse your write model's evolve function!
80
139
  const registry = createSnapshotProjectionRegistry(
@@ -99,7 +158,7 @@ const registry = createSnapshotProjectionRegistry(
99
158
  ### 5. Process Events and Update Read Model
100
159
 
101
160
  ```typescript
102
- import { createProjectionRunner } from "@wataruoguchi/emmett-event-store-kysely/projections";
161
+ import { createProjectionRunner } from "@wataruoguchi/emmett-event-store-kysely";
103
162
 
104
163
  const runner = createProjectionRunner({
105
164
  db,
package/dist/index.cjs CHANGED
@@ -25,6 +25,8 @@ __export(index_exports, {
25
25
  createProjectionRunner: () => createProjectionRunner,
26
26
  createSnapshotProjection: () => createSnapshotProjection,
27
27
  createSnapshotProjectionRegistry: () => createSnapshotProjectionRegistry,
28
+ createSnapshotProjectionRegistryWithSnapshotTable: () => createSnapshotProjectionRegistryWithSnapshotTable,
29
+ createSnapshotProjectionWithSnapshotTable: () => createSnapshotProjectionWithSnapshotTable,
28
30
  getKyselyEventStore: () => getKyselyEventStore
29
31
  });
30
32
  module.exports = __toCommonJS(index_exports);
@@ -451,8 +453,8 @@ function createProjectionRunner({
451
453
  readStream,
452
454
  registry
453
455
  }) {
454
- async function getOrCreateCheckpoint(subscriptionId, partition) {
455
- const existing = await db.selectFrom("subscriptions").select([
456
+ async function getOrCreateCheckpoint(executor, subscriptionId, partition) {
457
+ const existing = await executor.selectFrom("subscriptions").select([
456
458
  "subscription_id as subscriptionId",
457
459
  "partition",
458
460
  "last_processed_position as lastProcessedPosition"
@@ -469,7 +471,7 @@ function createProjectionRunner({
469
471
  lastProcessedPosition: last
470
472
  };
471
473
  }
472
- await db.insertInto("subscriptions").values({
474
+ await executor.insertInto("subscriptions").values({
473
475
  subscription_id: subscriptionId,
474
476
  partition,
475
477
  version: 1,
@@ -485,73 +487,144 @@ function createProjectionRunner({
485
487
  lastProcessedPosition: 0n
486
488
  };
487
489
  }
488
- async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
489
- await db.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
490
+ async function updateCheckpoint(executor, subscriptionId, partition, lastProcessedPosition) {
491
+ await executor.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
490
492
  }
491
493
  async function projectEvents(subscriptionId, streamId, opts) {
492
494
  const partition = opts?.partition ?? "default_partition";
493
495
  const batchSize = BigInt(opts?.batchSize ?? 500);
494
- const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
496
+ const checkpoint = await getOrCreateCheckpoint(
497
+ db,
498
+ subscriptionId,
499
+ partition
500
+ );
495
501
  const { events, currentStreamVersion } = await readStream(streamId, {
496
502
  from: checkpoint.lastProcessedPosition + 1n,
497
503
  to: checkpoint.lastProcessedPosition + batchSize,
498
504
  partition
499
505
  });
506
+ let processed = 0;
500
507
  for (const ev of events) {
501
508
  if (!ev) continue;
502
- const handlers = registry[ev.type] ?? [];
503
- if (handlers.length === 0) {
509
+ await db.transaction().execute(async (trx) => {
510
+ const handlers = registry[ev.type] ?? [];
511
+ if (handlers.length === 0) {
512
+ await updateCheckpoint(
513
+ trx,
514
+ subscriptionId,
515
+ partition,
516
+ ev.metadata.streamPosition
517
+ );
518
+ return;
519
+ }
520
+ const projectionEvent = {
521
+ type: ev.type,
522
+ data: ev.data,
523
+ metadata: {
524
+ streamId: ev.metadata.streamId,
525
+ streamPosition: ev.metadata.streamPosition,
526
+ globalPosition: ev.metadata.globalPosition
527
+ }
528
+ };
529
+ for (const handler of handlers) {
530
+ await handler({ db: trx, partition }, projectionEvent);
531
+ }
504
532
  await updateCheckpoint(
533
+ trx,
505
534
  subscriptionId,
506
535
  partition,
507
- ev.metadata.streamPosition
536
+ projectionEvent.metadata.streamPosition
508
537
  );
509
- continue;
510
- }
511
- const projectionEvent = {
512
- type: ev.type,
513
- data: ev.data,
514
- metadata: {
515
- streamId: ev.metadata.streamId,
516
- streamPosition: ev.metadata.streamPosition,
517
- globalPosition: ev.metadata.globalPosition
518
- }
519
- };
520
- for (const handler of handlers) {
521
- await handler({ db, partition }, projectionEvent);
522
- }
523
- await updateCheckpoint(
524
- subscriptionId,
525
- partition,
526
- projectionEvent.metadata.streamPosition
527
- );
538
+ });
539
+ processed++;
528
540
  }
529
- return { processed: events.length, currentStreamVersion };
541
+ return { processed, currentStreamVersion };
530
542
  }
531
543
  return { projectEvents };
532
544
  }
533
545
 
534
546
  // src/projections/snapshot-projection.ts
547
+ function constructStreamId(keys) {
548
+ const sortedEntries = Object.entries(keys).sort(([a], [b]) => {
549
+ if (a < b) return -1;
550
+ if (a > b) return 1;
551
+ return 0;
552
+ });
553
+ return sortedEntries.map(([key, value]) => {
554
+ const encodedKey = encodeURIComponent(key);
555
+ const encodedValue = encodeURIComponent(value);
556
+ return `${encodedKey}:${encodedValue}`;
557
+ }).join("|");
558
+ }
559
+ function validateAndCachePrimaryKeys(keys, tableName, cachedKeys) {
560
+ const currentKeys = Object.keys(keys);
561
+ const sortedCurrentKeys = [...currentKeys].sort();
562
+ if (!cachedKeys) {
563
+ return sortedCurrentKeys;
564
+ }
565
+ if (cachedKeys.length !== sortedCurrentKeys.length || !cachedKeys.every((key, index) => key === sortedCurrentKeys[index])) {
566
+ throw new Error(
567
+ `Snapshot projection "${tableName}" received inconsistent primary keys from extractKeys. Expected keys: ${cachedKeys.join(", ")}, but received: ${sortedCurrentKeys.join(", ")}. Ensure extractKeys returns a consistent set of keys for all events.`
568
+ );
569
+ }
570
+ return cachedKeys;
571
+ }
572
+ function shouldSkipEvent(eventPosition, lastProcessedPosition) {
573
+ return eventPosition <= lastProcessedPosition;
574
+ }
575
+ function loadStateFromSnapshot(snapshot, initialState, tableName) {
576
+ if (!snapshot) {
577
+ return initialState();
578
+ }
579
+ if (typeof snapshot === "string") {
580
+ try {
581
+ return JSON.parse(snapshot);
582
+ } catch (error) {
583
+ const tableContext = tableName ? ` for table "${tableName}"` : "";
584
+ const errorMessage = error instanceof Error ? error.message : String(error);
585
+ throw new Error(
586
+ `Failed to parse snapshot${tableContext}: ${errorMessage}. Snapshot value: ${snapshot.substring(0, 200)}${snapshot.length > 200 ? "..." : ""}`
587
+ );
588
+ }
589
+ }
590
+ return snapshot;
591
+ }
592
+ function buildDenormalizedUpdateSet(newState, mapToColumns) {
593
+ const updateSet = {};
594
+ if (mapToColumns) {
595
+ const columns = mapToColumns(newState);
596
+ for (const columnName of Object.keys(columns)) {
597
+ updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
598
+ }
599
+ }
600
+ return updateSet;
601
+ }
535
602
  function createSnapshotProjection(config) {
536
603
  const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
537
604
  let inferredPrimaryKeys;
538
605
  return async ({ db, partition }, event) => {
539
606
  const keys = extractKeys(event, partition);
540
- if (!inferredPrimaryKeys) {
541
- inferredPrimaryKeys = Object.keys(keys);
542
- }
607
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(
608
+ keys,
609
+ tableName,
610
+ inferredPrimaryKeys
611
+ );
543
612
  const primaryKeys = inferredPrimaryKeys;
544
613
  const existing = await db.selectFrom(tableName).select(["last_stream_position", "snapshot"]).where((eb) => {
545
614
  const conditions = Object.entries(keys).map(
546
615
  ([key, value]) => eb(key, "=", value)
547
616
  );
548
617
  return eb.and(conditions);
549
- }).executeTakeFirst();
618
+ }).forUpdate().executeTakeFirst();
550
619
  const lastPos = existing?.last_stream_position ? BigInt(String(existing.last_stream_position)) : -1n;
551
- if (event.metadata.streamPosition <= lastPos) {
620
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
552
621
  return;
553
622
  }
554
- const currentState = existing?.snapshot ? existing.snapshot : initialState();
623
+ const currentState = loadStateFromSnapshot(
624
+ existing?.snapshot,
625
+ initialState,
626
+ tableName
627
+ );
555
628
  const newState = evolve(currentState, event);
556
629
  const rowData = {
557
630
  ...keys,
@@ -571,18 +644,75 @@ function createSnapshotProjection(config) {
571
644
  last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
572
645
  last_global_position: (eb) => eb.ref("excluded.last_global_position")
573
646
  };
574
- if (mapToColumns) {
575
- const columns = mapToColumns(newState);
576
- for (const columnName of Object.keys(columns)) {
577
- updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
578
- }
579
- }
647
+ const denormalizedUpdateSet = buildDenormalizedUpdateSet(
648
+ newState,
649
+ mapToColumns
650
+ );
651
+ Object.assign(updateSet, denormalizedUpdateSet);
580
652
  await insertQuery.onConflict((oc) => {
581
653
  const conflictBuilder = oc.columns(primaryKeys);
582
654
  return conflictBuilder.doUpdateSet(updateSet);
583
655
  }).execute();
584
656
  };
585
657
  }
658
+ function createSnapshotProjectionWithSnapshotTable(config) {
659
+ const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
660
+ let inferredPrimaryKeys;
661
+ return async ({ db, partition }, event) => {
662
+ const keys = extractKeys(event, partition);
663
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(
664
+ keys,
665
+ tableName,
666
+ inferredPrimaryKeys
667
+ );
668
+ const primaryKeys = inferredPrimaryKeys;
669
+ const streamId = constructStreamId(keys);
670
+ const existing = await db.selectFrom("snapshots").select(["last_stream_position", "snapshot"]).where("readmodel_table_name", "=", tableName).where("stream_id", "=", streamId).forUpdate().executeTakeFirst();
671
+ const lastPos = existing?.last_stream_position ? BigInt(String(existing.last_stream_position)) : -1n;
672
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
673
+ return;
674
+ }
675
+ const currentState = loadStateFromSnapshot(
676
+ existing?.snapshot,
677
+ initialState,
678
+ tableName
679
+ );
680
+ const newState = evolve(currentState, event);
681
+ await db.insertInto("snapshots").values({
682
+ readmodel_table_name: tableName,
683
+ stream_id: streamId,
684
+ snapshot: JSON.stringify(newState),
685
+ last_stream_position: event.metadata.streamPosition.toString(),
686
+ last_global_position: event.metadata.globalPosition.toString()
687
+ }).onConflict((oc) => {
688
+ return oc.columns(["readmodel_table_name", "stream_id"]).doUpdateSet({
689
+ snapshot: (eb) => eb.ref("excluded.snapshot"),
690
+ last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
691
+ last_global_position: (eb) => eb.ref("excluded.last_global_position")
692
+ });
693
+ }).execute();
694
+ const readModelData = { ...keys };
695
+ if (mapToColumns) {
696
+ const columns = mapToColumns(newState);
697
+ Object.assign(readModelData, columns);
698
+ }
699
+ const readModelInsertQuery = db.insertInto(tableName).values(readModelData);
700
+ const readModelUpdateSet = buildDenormalizedUpdateSet(
701
+ newState,
702
+ mapToColumns
703
+ );
704
+ if (Object.keys(readModelUpdateSet).length > 0) {
705
+ await readModelInsertQuery.onConflict((oc) => {
706
+ const conflictBuilder = oc.columns(primaryKeys);
707
+ return conflictBuilder.doUpdateSet(readModelUpdateSet);
708
+ }).execute();
709
+ } else {
710
+ await readModelInsertQuery.onConflict((oc) => {
711
+ return oc.columns(primaryKeys).doNothing();
712
+ }).execute();
713
+ }
714
+ };
715
+ }
586
716
  function createSnapshotProjectionRegistry(eventTypes, config) {
587
717
  const handler = createSnapshotProjection(config);
588
718
  const registry = {};
@@ -591,6 +721,14 @@ function createSnapshotProjectionRegistry(eventTypes, config) {
591
721
  }
592
722
  return registry;
593
723
  }
724
+ function createSnapshotProjectionRegistryWithSnapshotTable(eventTypes, config) {
725
+ const handler = createSnapshotProjectionWithSnapshotTable(config);
726
+ const registry = {};
727
+ for (const eventType of eventTypes) {
728
+ registry[eventType] = [handler];
729
+ }
730
+ return registry;
731
+ }
594
732
  // Annotate the CommonJS export names for ESM import in node:
595
733
  0 && (module.exports = {
596
734
  createKyselyEventStoreConsumer,
@@ -598,5 +736,7 @@ function createSnapshotProjectionRegistry(eventTypes, config) {
598
736
  createProjectionRunner,
599
737
  createSnapshotProjection,
600
738
  createSnapshotProjectionRegistry,
739
+ createSnapshotProjectionRegistryWithSnapshotTable,
740
+ createSnapshotProjectionWithSnapshotTable,
601
741
  getKyselyEventStore
602
742
  });
package/dist/index.d.ts CHANGED
@@ -4,7 +4,7 @@ export { getKyselyEventStore } from "./event-store/kysely-event-store.js";
4
4
  export type { KyselyEventStore, KyselyEventStoreOptions, ProjectionReadStreamOptions, } from "./event-store/kysely-event-store.js";
5
5
  export { createProjectionRunner } from "./projections/runner.js";
6
6
  export type { ProjectEvents } from "./projections/runner.js";
7
- export { createSnapshotProjection, createSnapshotProjectionRegistry, } from "./projections/snapshot-projection.js";
7
+ export { createSnapshotProjection, createSnapshotProjectionRegistry, createSnapshotProjectionWithSnapshotTable, createSnapshotProjectionRegistryWithSnapshotTable, } from "./projections/snapshot-projection.js";
8
8
  export type { SnapshotProjectionConfig } from "./projections/snapshot-projection.js";
9
9
  export { createProjectionRegistry } from "./types.js";
10
10
  export type { DatabaseExecutor, Dependencies, ExtendedOptions, ProjectionContext, ProjectionEvent, ProjectionEventMetadata, ProjectionHandler, ProjectionRegistry, } from "./types.js";
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,8BAA8B,EAAE,MAAM,4BAA4B,CAAC;AAC5E,YAAY,EACV,wBAAwB,EACxB,8BAA8B,GAC/B,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,mBAAmB,EAAE,MAAM,qCAAqC,CAAC;AAC1E,YAAY,EACV,gBAAgB,EAChB,uBAAuB,EACvB,2BAA2B,GAC5B,MAAM,qCAAqC,CAAC;AAC7C,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAC7D,OAAO,EACL,wBAAwB,EACxB,gCAAgC,GACjC,MAAM,sCAAsC,CAAC;AAC9C,YAAY,EAAE,wBAAwB,EAAE,MAAM,sCAAsC,CAAC;AACrF,OAAO,EAAE,wBAAwB,EAAE,MAAM,YAAY,CAAC;AACtD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,EACf,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,YAAY,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,8BAA8B,EAAE,MAAM,4BAA4B,CAAC;AAC5E,YAAY,EACV,wBAAwB,EACxB,8BAA8B,GAC/B,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,mBAAmB,EAAE,MAAM,qCAAqC,CAAC;AAC1E,YAAY,EACV,gBAAgB,EAChB,uBAAuB,EACvB,2BAA2B,GAC5B,MAAM,qCAAqC,CAAC;AAC7C,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAC7D,OAAO,EACL,wBAAwB,EACxB,gCAAgC,EAChC,yCAAyC,EACzC,iDAAiD,GAClD,MAAM,sCAAsC,CAAC;AAC9C,YAAY,EAAE,wBAAwB,EAAE,MAAM,sCAAsC,CAAC;AACrF,OAAO,EAAE,wBAAwB,EAAE,MAAM,YAAY,CAAC;AACtD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,EACf,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,YAAY,CAAC"}
package/dist/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  export { createKyselyEventStoreConsumer } from "./event-store/consumers.js";
2
2
  export { getKyselyEventStore } from "./event-store/kysely-event-store.js";
3
3
  export { createProjectionRunner } from "./projections/runner.js";
4
- export { createSnapshotProjection, createSnapshotProjectionRegistry, } from "./projections/snapshot-projection.js";
4
+ export { createSnapshotProjection, createSnapshotProjectionRegistry, createSnapshotProjectionWithSnapshotTable, createSnapshotProjectionRegistryWithSnapshotTable, } from "./projections/snapshot-projection.js";
5
5
  export { createProjectionRegistry } from "./types.js";
@@ -1 +1 @@
1
- {"version":3,"file":"runner.d.ts","sourceRoot":"","sources":["../../src/projections/runner.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,MAAM,EAAqB,MAAM,QAAQ,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,sCAAsC,CAAC;AAC7E,OAAO,KAAK,EAAmB,kBAAkB,EAAE,MAAM,aAAa,CAAC;AAEvE,MAAM,MAAM,sBAAsB,GAAG;IACnC,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,qBAAqB,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF;;;GAGG;AACH,MAAM,MAAM,oBAAoB,GAAG;IACjC,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;IACtB,UAAU,EAAE,gBAAgB,CAAC,YAAY,CAAC,CAAC;IAC3C,QAAQ,EAAE,kBAAkB,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG,CAC1B,cAAc,EAAE,MAAM,EACtB,QAAQ,EAAE,MAAM,EAChB,IAAI,CAAC,EAAE;IAAE,SAAS,CAAC,EAAE,MAAM,CAAC;IAAC,SAAS,CAAC,EAAE,MAAM,CAAA;CAAE,KAC9C,OAAO,CAAC;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,oBAAoB,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC;AAElE,wBAAgB,sBAAsB,CAAC,EACrC,EAAE,EACF,UAAU,EACV,QAAQ,GACT,EAAE,oBAAoB,GAAG;IAAE,aAAa,EAAE,aAAa,CAAA;CAAE,CA+HzD"}
1
+ {"version":3,"file":"runner.d.ts","sourceRoot":"","sources":["../../src/projections/runner.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,MAAM,EAAqB,MAAM,QAAQ,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,sCAAsC,CAAC;AAC7E,OAAO,KAAK,EAAmB,kBAAkB,EAAE,MAAM,aAAa,CAAC;AAEvE,MAAM,MAAM,sBAAsB,GAAG;IACnC,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,qBAAqB,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF;;;GAGG;AACH,MAAM,MAAM,oBAAoB,GAAG;IACjC,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;IACtB,UAAU,EAAE,gBAAgB,CAAC,YAAY,CAAC,CAAC;IAC3C,QAAQ,EAAE,kBAAkB,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG,CAC1B,cAAc,EAAE,MAAM,EACtB,QAAQ,EAAE,MAAM,EAChB,IAAI,CAAC,EAAE;IAAE,SAAS,CAAC,EAAE,MAAM,CAAC;IAAC,SAAS,CAAC,EAAE,MAAM,CAAA;CAAE,KAC9C,OAAO,CAAC;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,oBAAoB,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC;AAElE,wBAAgB,sBAAsB,CAAC,EACrC,EAAE,EACF,UAAU,EACV,QAAQ,GACT,EAAE,oBAAoB,GAAG;IAAE,aAAa,EAAE,aAAa,CAAA;CAAE,CA6JzD"}
@@ -1,6 +1,6 @@
1
1
  export function createProjectionRunner({ db, readStream, registry, }) {
2
- async function getOrCreateCheckpoint(subscriptionId, partition) {
3
- const existing = await db
2
+ async function getOrCreateCheckpoint(executor, subscriptionId, partition) {
3
+ const existing = await executor
4
4
  .selectFrom("subscriptions")
5
5
  .select([
6
6
  "subscription_id as subscriptionId",
@@ -19,7 +19,7 @@ export function createProjectionRunner({ db, readStream, registry, }) {
19
19
  lastProcessedPosition: last,
20
20
  };
21
21
  }
22
- await db
22
+ await executor
23
23
  .insertInto("subscriptions")
24
24
  .values({
25
25
  subscription_id: subscriptionId,
@@ -37,8 +37,8 @@ export function createProjectionRunner({ db, readStream, registry, }) {
37
37
  lastProcessedPosition: 0n,
38
38
  };
39
39
  }
40
- async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
41
- await db
40
+ async function updateCheckpoint(executor, subscriptionId, partition, lastProcessedPosition) {
41
+ await executor
42
42
  .updateTable("subscriptions")
43
43
  .set({ last_processed_position: lastProcessedPosition })
44
44
  .where("subscription_id", "=", subscriptionId)
@@ -48,35 +48,49 @@ export function createProjectionRunner({ db, readStream, registry, }) {
48
48
  async function projectEvents(subscriptionId, streamId, opts) {
49
49
  const partition = opts?.partition ?? "default_partition";
50
50
  const batchSize = BigInt(opts?.batchSize ?? 500);
51
- const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
51
+ // Read checkpoint outside transaction to avoid holding locks during event reading
52
+ const checkpoint = await getOrCreateCheckpoint(db, subscriptionId, partition);
53
+ // Read events outside transaction - this is just a read operation
52
54
  const { events, currentStreamVersion } = await readStream(streamId, {
53
55
  from: checkpoint.lastProcessedPosition + 1n,
54
56
  to: checkpoint.lastProcessedPosition + batchSize,
55
57
  partition,
56
58
  });
59
+ let processed = 0;
60
+ // Process each event in its own transaction
61
+ // This keeps transactions short and reduces lock contention
57
62
  for (const ev of events) {
58
63
  if (!ev)
59
64
  continue;
60
- const handlers = registry[ev.type] ?? [];
61
- if (handlers.length === 0) {
62
- await updateCheckpoint(subscriptionId, partition, ev.metadata.streamPosition);
63
- continue;
64
- }
65
- const projectionEvent = {
66
- type: ev.type,
67
- data: ev.data,
68
- metadata: {
69
- streamId: ev.metadata.streamId,
70
- streamPosition: ev.metadata.streamPosition,
71
- globalPosition: ev.metadata.globalPosition,
72
- },
73
- };
74
- for (const handler of handlers) {
75
- await handler({ db, partition }, projectionEvent);
76
- }
77
- await updateCheckpoint(subscriptionId, partition, projectionEvent.metadata.streamPosition);
65
+ // Each event gets its own transaction
66
+ // This ensures atomicity per event while keeping transactions short
67
+ await db.transaction().execute(async (trx) => {
68
+ const handlers = registry[ev.type] ?? [];
69
+ if (handlers.length === 0) {
70
+ // No handlers, just update checkpoint
71
+ await updateCheckpoint(trx, subscriptionId, partition, ev.metadata.streamPosition);
72
+ return;
73
+ }
74
+ const projectionEvent = {
75
+ type: ev.type,
76
+ data: ev.data,
77
+ metadata: {
78
+ streamId: ev.metadata.streamId,
79
+ streamPosition: ev.metadata.streamPosition,
80
+ globalPosition: ev.metadata.globalPosition,
81
+ },
82
+ };
83
+ // All handlers for this event run in the same transaction
84
+ // This ensures they see each other's changes and maintain consistency
85
+ for (const handler of handlers) {
86
+ await handler({ db: trx, partition }, projectionEvent);
87
+ }
88
+ // Update checkpoint after all handlers succeed
89
+ await updateCheckpoint(trx, subscriptionId, partition, projectionEvent.metadata.streamPosition);
90
+ });
91
+ processed++;
78
92
  }
79
- return { processed: events.length, currentStreamVersion };
93
+ return { processed, currentStreamVersion };
80
94
  }
81
95
  return { projectEvents };
82
96
  }
@@ -54,6 +54,26 @@ export type SnapshotProjectionConfig<TState, TTable extends string, E extends {
54
54
  */
55
55
  mapToColumns?: (state: TState) => Record<string, unknown>;
56
56
  };
57
+ /**
58
+ * Constructs a deterministic stream_id from the keys.
59
+ * The stream_id is created by sorting the keys and concatenating them with a delimiter.
60
+ * This ensures the same keys always produce the same stream_id.
61
+ *
62
+ * URL encoding is used to handle special characters (like `|` and `:`) in key names or values
63
+ * that could otherwise cause collisions or parsing issues when used as delimiters.
64
+ *
65
+ * @internal
66
+ * Exported for testing purposes only.
67
+ */
68
+ export declare function constructStreamId(keys: Record<string, string>): string;
69
+ /**
70
+ * Loads the current state from a snapshot, handling both string and parsed JSON formats.
71
+ * Falls back to initial state if no snapshot exists.
72
+ *
73
+ * @internal
74
+ * Exported for testing purposes only.
75
+ */
76
+ export declare function loadStateFromSnapshot<TState>(snapshot: unknown, initialState: () => TState, tableName?: string): TState;
57
77
  /**
58
78
  * Creates a projection handler that stores the aggregate state as a snapshot.
59
79
  *
@@ -91,6 +111,62 @@ export declare function createSnapshotProjection<TState, TTable extends string,
91
111
  type: string;
92
112
  data: unknown;
93
113
  }>(config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionHandler<DatabaseExecutor, E>;
114
+ /**
115
+ * Creates a projection handler that stores snapshots in a separate centralized table.
116
+ *
117
+ * This is similar to `createSnapshotProjection`, but uses a separate `snapshots` table
118
+ * to store event-sourcing-related columns. This approach makes read model tables cleaner
119
+ * and more scalable, as they don't need to include event-sourcing columns.
120
+ *
121
+ * **Key differences from `createSnapshotProjection`:**
122
+ * - Snapshots are stored in a centralized `snapshots` table
123
+ * - Read model tables only contain keys from `extractKeys` and columns from `mapToColumns`
124
+ * - The `stream_id` is deterministically constructed from the keys (not from event metadata)
125
+ *
126
+ * **Database schema required:**
127
+ * ```sql
128
+ * CREATE TABLE snapshots (
129
+ * readmodel_table_name TEXT NOT NULL,
130
+ * stream_id TEXT NOT NULL,
131
+ * last_stream_position BIGINT NOT NULL,
132
+ * last_global_position BIGINT NOT NULL,
133
+ * snapshot JSONB NOT NULL,
134
+ * PRIMARY KEY (readmodel_table_name, stream_id)
135
+ * );
136
+ * ```
137
+ *
138
+ * @example
139
+ * ```typescript
140
+ * const cartProjection = createSnapshotProjectionWithSnapshotTable({
141
+ * tableName: 'carts',
142
+ * extractKeys: (event, partition) => ({
143
+ * tenant_id: event.data.eventMeta.tenantId,
144
+ * cart_id: event.data.eventMeta.cartId,
145
+ * partition
146
+ * }),
147
+ * evolve: cartEvolve,
148
+ * initialState: () => ({ status: 'init', items: [] }),
149
+ * mapToColumns: (state) => ({
150
+ * currency: state.currency,
151
+ * is_checked_out: state.status === 'checkedOut'
152
+ * })
153
+ * });
154
+ *
155
+ * // Use it in a projection registry
156
+ * const registry: ProjectionRegistry = {
157
+ * CartCreated: [cartProjection],
158
+ * ItemAddedToCart: [cartProjection],
159
+ * // ... other events
160
+ * };
161
+ * ```
162
+ */
163
+ export declare function createSnapshotProjectionWithSnapshotTable<TState, TTable extends string, E extends {
164
+ type: string;
165
+ data: unknown;
166
+ } = {
167
+ type: string;
168
+ data: unknown;
169
+ }>(config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionHandler<DatabaseExecutor, E>;
94
170
  /**
95
171
  * Creates multiple projection handlers that all use the same snapshot projection logic.
96
172
  * This is a convenience function to avoid repeating the same handler for multiple event types.
@@ -119,4 +195,37 @@ export declare function createSnapshotProjectionRegistry<TState, TTable extends
119
195
  type: string;
120
196
  data: unknown;
121
197
  }>(eventTypes: E["type"][], config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionRegistry;
198
+ /**
199
+ * Creates multiple projection handlers that all use the same snapshot projection logic
200
+ * with a separate snapshots table. This is a convenience function to avoid repeating
201
+ * the same handler for multiple event types.
202
+ *
203
+ * @example
204
+ * ```typescript
205
+ * const registry = createSnapshotProjectionRegistryWithSnapshotTable(
206
+ * ['CartCreated', 'ItemAddedToCart', 'ItemRemovedFromCart'],
207
+ * {
208
+ * tableName: 'carts',
209
+ * extractKeys: (event, partition) => ({
210
+ * tenant_id: event.data.eventMeta.tenantId,
211
+ * cart_id: event.data.eventMeta.cartId,
212
+ * partition
213
+ * }),
214
+ * evolve: cartEvolve,
215
+ * initialState: () => ({ status: 'init', items: [] }),
216
+ * mapToColumns: (state) => ({
217
+ * currency: state.currency,
218
+ * is_checked_out: state.status === 'checkedOut'
219
+ * })
220
+ * }
221
+ * );
222
+ * ```
223
+ */
224
+ export declare function createSnapshotProjectionRegistryWithSnapshotTable<TState, TTable extends string, E extends {
225
+ type: string;
226
+ data: unknown;
227
+ } = {
228
+ type: string;
229
+ data: unknown;
230
+ }>(eventTypes: E["type"][], config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionRegistry;
122
231
  //# sourceMappingURL=snapshot-projection.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"snapshot-projection.d.ts","sourceRoot":"","sources":["../../src/projections/snapshot-projection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,gBAAgB,EAEhB,eAAe,EACf,iBAAiB,EACjB,kBAAkB,EACnB,MAAM,aAAa,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,wBAAwB,CAClC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,IACzE;IACF;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,EAAE,CAAC;IAEvB;;;OAGG;IACH,WAAW,EAAE,CACX,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,EACzB,SAAS,EAAE,MAAM,KACd,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5B;;;OAGG;IACH,MAAM,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,MAAM,CAAC;IAE7D;;OAEG;IACH,YAAY,EAAE,MAAM,MAAM,CAAC;IAE3B;;;;;;;;;;;;OAYG;IACH,YAAY,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC3D,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,wBAAgB,wBAAwB,CACtC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAqGxC;AAED;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,gCAAgC,CAC9C,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB"}
1
+ {"version":3,"file":"snapshot-projection.d.ts","sourceRoot":"","sources":["../../src/projections/snapshot-projection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,gBAAgB,EAEhB,eAAe,EACf,iBAAiB,EACjB,kBAAkB,EACnB,MAAM,aAAa,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,wBAAwB,CAClC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,IACzE;IACF;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,EAAE,CAAC;IAEvB;;;OAGG;IACH,WAAW,EAAE,CACX,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,EACzB,SAAS,EAAE,MAAM,KACd,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5B;;;OAGG;IACH,MAAM,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,MAAM,CAAC;IAE7D;;OAEG;IACH,YAAY,EAAE,MAAM,MAAM,CAAC;IAE3B;;;;;;;;;;;;OAYG;IACH,YAAY,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC3D,CAAC;AAEF;;;;;;;;;;GAUG;AACH,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,MAAM,CAatE;AAgDD;;;;;;GAMG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAC1C,QAAQ,EAAE,OAAO,EACjB,YAAY,EAAE,MAAM,MAAM,EAC1B,SAAS,CAAC,EAAE,MAAM,GACjB,MAAM,CAqBR;AAyBD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,wBAAgB,wBAAwB,CACtC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CA4GxC;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAgDG;AACH,wBAAgB,yCAAyC,CACvD,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAyHxC;AAED;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,gCAAgC,CAC9C,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,wBAAgB,iDAAiD,CAC/D,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB"}
@@ -1,3 +1,99 @@
1
+ /**
2
+ * Constructs a deterministic stream_id from the keys.
3
+ * The stream_id is created by sorting the keys and concatenating them with a delimiter.
4
+ * This ensures the same keys always produce the same stream_id.
5
+ *
6
+ * URL encoding is used to handle special characters (like `|` and `:`) in key names or values
7
+ * that could otherwise cause collisions or parsing issues when used as delimiters.
8
+ *
9
+ * @internal
10
+ * Exported for testing purposes only.
11
+ */
12
+ export function constructStreamId(keys) {
13
+ const sortedEntries = Object.entries(keys).sort(([a], [b]) => {
14
+ if (a < b)
15
+ return -1;
16
+ if (a > b)
17
+ return 1;
18
+ return 0;
19
+ });
20
+ return sortedEntries
21
+ .map(([key, value]) => {
22
+ const encodedKey = encodeURIComponent(key);
23
+ const encodedValue = encodeURIComponent(value);
24
+ return `${encodedKey}:${encodedValue}`;
25
+ })
26
+ .join("|");
27
+ }
28
+ /**
29
+ * Validates and caches primary keys from extractKeys.
30
+ * Ensures that extractKeys returns a consistent set of keys across all events.
31
+ */
32
+ function validateAndCachePrimaryKeys(keys, tableName, cachedKeys) {
33
+ const currentKeys = Object.keys(keys);
34
+ const sortedCurrentKeys = [...currentKeys].sort();
35
+ if (!cachedKeys) {
36
+ // Cache the initially inferred primary keys in a deterministic order
37
+ return sortedCurrentKeys;
38
+ }
39
+ // Validate that subsequent calls to extractKeys return the same key set
40
+ if (cachedKeys.length !== sortedCurrentKeys.length ||
41
+ !cachedKeys.every((key, index) => key === sortedCurrentKeys[index])) {
42
+ throw new Error(`Snapshot projection "${tableName}" received inconsistent primary keys from extractKeys. ` +
43
+ `Expected keys: ${cachedKeys.join(", ")}, ` +
44
+ `but received: ${sortedCurrentKeys.join(", ")}. ` +
45
+ `Ensure extractKeys returns a consistent set of keys for all events.`);
46
+ }
47
+ return cachedKeys;
48
+ }
49
+ /**
50
+ * Checks if the event should be processed based on the last processed position.
51
+ * Returns true if the event should be skipped (already processed or older).
52
+ * Callers should pass -1n for lastProcessedPosition when there is no previous position
53
+ * so that events are processed from the beginning.
54
+ */
55
+ function shouldSkipEvent(eventPosition, lastProcessedPosition) {
56
+ return eventPosition <= lastProcessedPosition;
57
+ }
58
+ /**
59
+ * Loads the current state from a snapshot, handling both string and parsed JSON formats.
60
+ * Falls back to initial state if no snapshot exists.
61
+ *
62
+ * @internal
63
+ * Exported for testing purposes only.
64
+ */
65
+ export function loadStateFromSnapshot(snapshot, initialState, tableName) {
66
+ if (!snapshot) {
67
+ return initialState();
68
+ }
69
+ // Some database drivers return JSONB as strings, others as parsed objects
70
+ if (typeof snapshot === "string") {
71
+ try {
72
+ return JSON.parse(snapshot);
73
+ }
74
+ catch (error) {
75
+ const tableContext = tableName ? ` for table "${tableName}"` : "";
76
+ const errorMessage = error instanceof Error ? error.message : String(error);
77
+ throw new Error(`Failed to parse snapshot${tableContext}: ${errorMessage}. ` +
78
+ `Snapshot value: ${snapshot.substring(0, 200)}${snapshot.length > 200 ? "..." : ""}`);
79
+ }
80
+ }
81
+ return snapshot;
82
+ }
83
+ /**
84
+ * Builds the update set for denormalized columns from mapToColumns.
85
+ * Returns an empty object if mapToColumns is not provided.
86
+ */
87
+ function buildDenormalizedUpdateSet(newState, mapToColumns) {
88
+ const updateSet = {};
89
+ if (mapToColumns) {
90
+ const columns = mapToColumns(newState);
91
+ for (const columnName of Object.keys(columns)) {
92
+ updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
93
+ }
94
+ }
95
+ return updateSet;
96
+ }
1
97
  /**
2
98
  * Creates a projection handler that stores the aggregate state as a snapshot.
3
99
  *
@@ -34,12 +130,11 @@ export function createSnapshotProjection(config) {
34
130
  let inferredPrimaryKeys;
35
131
  return async ({ db, partition }, event) => {
36
132
  const keys = extractKeys(event, partition);
37
- // Infer primary keys from extractKeys on first call
38
- if (!inferredPrimaryKeys) {
39
- inferredPrimaryKeys = Object.keys(keys);
40
- }
133
+ // Validate and cache primary keys
134
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(keys, tableName, inferredPrimaryKeys);
41
135
  const primaryKeys = inferredPrimaryKeys;
42
136
  // Check if event is newer than what we've already processed
137
+ // Use FOR UPDATE to lock the row and prevent race conditions with concurrent transactions
43
138
  // Note: Casting to `any` is necessary because Kysely cannot infer types for dynamic table names.
44
139
  // The table name is provided at runtime, so TypeScript cannot verify the table structure at compile time.
45
140
  // This is a known limitation when working with dynamic table names in Kysely.
@@ -52,19 +147,17 @@ export function createSnapshotProjection(config) {
52
147
  const conditions = Object.entries(keys).map(([key, value]) => eb(key, "=", value));
53
148
  return eb.and(conditions);
54
149
  })
150
+ .forUpdate()
55
151
  .executeTakeFirst();
56
152
  const lastPos = existing?.last_stream_position
57
153
  ? BigInt(String(existing.last_stream_position))
58
154
  : -1n;
59
155
  // Skip if we've already processed a newer event
60
- if (event.metadata.streamPosition <= lastPos) {
156
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
61
157
  return;
62
158
  }
63
159
  // Load current state from snapshot or use initial state
64
- // Note: snapshot is stored as JSONB and Kysely returns it as parsed JSON
65
- const currentState = existing?.snapshot
66
- ? existing.snapshot
67
- : initialState();
160
+ const currentState = loadStateFromSnapshot(existing?.snapshot, initialState, tableName);
68
161
  // Apply the event to get new state
69
162
  const newState = evolve(currentState, event);
70
163
  // Prepare the row data with snapshot
@@ -88,24 +181,162 @@ export function createSnapshotProjection(config) {
88
181
  last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
89
182
  last_global_position: (eb) => eb.ref("excluded.last_global_position"),
90
183
  };
91
- // If mapToColumns is provided, also update the denormalized columns
92
- if (mapToColumns) {
93
- const columns = mapToColumns(newState);
94
- for (const columnName of Object.keys(columns)) {
95
- updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
96
- }
97
- }
184
+ // Add denormalized columns to update set if provided
185
+ const denormalizedUpdateSet = buildDenormalizedUpdateSet(newState, mapToColumns);
186
+ Object.assign(updateSet, denormalizedUpdateSet);
98
187
  await insertQuery
99
188
  // Note: `any` is used here because the conflict builder needs to work with any table schema.
100
189
  // The actual schema is validated at runtime through Kysely's query builder.
190
+ // The FOR UPDATE lock above ensures that concurrent transactions wait, preventing race conditions.
101
191
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
102
192
  .onConflict((oc) => {
103
193
  const conflictBuilder = oc.columns(primaryKeys);
194
+ // Note: We could add a WHERE clause here (via doUpdateSet's `where` option) to only update
195
+ // if excluded.last_stream_position > table.last_stream_position, but the FOR UPDATE lock above
196
+ // already provides the primary protection, so we intentionally rely on that for concurrency control.
104
197
  return conflictBuilder.doUpdateSet(updateSet);
105
198
  })
106
199
  .execute();
107
200
  };
108
201
  }
202
+ /**
203
+ * Creates a projection handler that stores snapshots in a separate centralized table.
204
+ *
205
+ * This is similar to `createSnapshotProjection`, but uses a separate `snapshots` table
206
+ * to store event-sourcing-related columns. This approach makes read model tables cleaner
207
+ * and more scalable, as they don't need to include event-sourcing columns.
208
+ *
209
+ * **Key differences from `createSnapshotProjection`:**
210
+ * - Snapshots are stored in a centralized `snapshots` table
211
+ * - Read model tables only contain keys from `extractKeys` and columns from `mapToColumns`
212
+ * - The `stream_id` is deterministically constructed from the keys (not from event metadata)
213
+ *
214
+ * **Database schema required:**
215
+ * ```sql
216
+ * CREATE TABLE snapshots (
217
+ * readmodel_table_name TEXT NOT NULL,
218
+ * stream_id TEXT NOT NULL,
219
+ * last_stream_position BIGINT NOT NULL,
220
+ * last_global_position BIGINT NOT NULL,
221
+ * snapshot JSONB NOT NULL,
222
+ * PRIMARY KEY (readmodel_table_name, stream_id)
223
+ * );
224
+ * ```
225
+ *
226
+ * @example
227
+ * ```typescript
228
+ * const cartProjection = createSnapshotProjectionWithSnapshotTable({
229
+ * tableName: 'carts',
230
+ * extractKeys: (event, partition) => ({
231
+ * tenant_id: event.data.eventMeta.tenantId,
232
+ * cart_id: event.data.eventMeta.cartId,
233
+ * partition
234
+ * }),
235
+ * evolve: cartEvolve,
236
+ * initialState: () => ({ status: 'init', items: [] }),
237
+ * mapToColumns: (state) => ({
238
+ * currency: state.currency,
239
+ * is_checked_out: state.status === 'checkedOut'
240
+ * })
241
+ * });
242
+ *
243
+ * // Use it in a projection registry
244
+ * const registry: ProjectionRegistry = {
245
+ * CartCreated: [cartProjection],
246
+ * ItemAddedToCart: [cartProjection],
247
+ * // ... other events
248
+ * };
249
+ * ```
250
+ */
251
+ export function createSnapshotProjectionWithSnapshotTable(config) {
252
+ const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
253
+ // Cache the inferred primary keys after the first call
254
+ let inferredPrimaryKeys;
255
+ return async ({ db, partition }, event) => {
256
+ const keys = extractKeys(event, partition);
257
+ // Validate and cache primary keys
258
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(keys, tableName, inferredPrimaryKeys);
259
+ const primaryKeys = inferredPrimaryKeys;
260
+ // Construct deterministic stream_id from keys
261
+ const streamId = constructStreamId(keys);
262
+ // Check if event is newer than what we've already processed
263
+ // Use FOR UPDATE to lock the row and prevent race conditions with concurrent transactions
264
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
265
+ const existing = await db
266
+ .selectFrom("snapshots")
267
+ .select(["last_stream_position", "snapshot"])
268
+ .where("readmodel_table_name", "=", tableName)
269
+ .where("stream_id", "=", streamId)
270
+ .forUpdate()
271
+ .executeTakeFirst();
272
+ const lastPos = existing?.last_stream_position
273
+ ? BigInt(String(existing.last_stream_position))
274
+ : -1n;
275
+ // Skip if we've already processed a newer event
276
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
277
+ return;
278
+ }
279
+ // Load current state from snapshot or use initial state
280
+ const currentState = loadStateFromSnapshot(existing?.snapshot, initialState, tableName);
281
+ // Apply the event to get new state
282
+ const newState = evolve(currentState, event);
283
+ // Upsert the snapshot in the snapshots table
284
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
285
+ await db
286
+ .insertInto("snapshots")
287
+ .values({
288
+ readmodel_table_name: tableName,
289
+ stream_id: streamId,
290
+ snapshot: JSON.stringify(newState),
291
+ last_stream_position: event.metadata.streamPosition.toString(),
292
+ last_global_position: event.metadata.globalPosition.toString(),
293
+ })
294
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
295
+ .onConflict((oc) => {
296
+ // The FOR UPDATE lock above ensures that concurrent transactions wait, preventing race conditions.
297
+ // Note: We could add a WHERE clause here to only update if excluded.last_stream_position > snapshots.last_stream_position,
298
+ // but this would be redundant for correctness: the FOR UPDATE lock, combined with the shouldSkipEvent check,
299
+ // already prevents stale or out-of-order events from overwriting newer snapshots.
300
+ return oc.columns(["readmodel_table_name", "stream_id"]).doUpdateSet({
301
+ snapshot: (eb) => eb.ref("excluded.snapshot"),
302
+ last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
303
+ last_global_position: (eb) => eb.ref("excluded.last_global_position"),
304
+ });
305
+ })
306
+ .execute();
307
+ // Upsert the read model table with keys and denormalized columns only
308
+ const readModelData = { ...keys };
309
+ if (mapToColumns) {
310
+ const columns = mapToColumns(newState);
311
+ Object.assign(readModelData, columns);
312
+ }
313
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
314
+ const readModelInsertQuery = db
315
+ .insertInto(tableName)
316
+ .values(readModelData);
317
+ // Build the update set for conflict resolution (only for denormalized columns)
318
+ const readModelUpdateSet = buildDenormalizedUpdateSet(newState, mapToColumns);
319
+ // Only update if there are denormalized columns, otherwise just insert (no-op on conflict)
320
+ if (Object.keys(readModelUpdateSet).length > 0) {
321
+ await readModelInsertQuery
322
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
323
+ .onConflict((oc) => {
324
+ const conflictBuilder = oc.columns(primaryKeys);
325
+ return conflictBuilder.doUpdateSet(readModelUpdateSet);
326
+ })
327
+ .execute();
328
+ }
329
+ else {
330
+ // If no denormalized columns, use insert with on conflict do nothing
331
+ await readModelInsertQuery
332
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
333
+ .onConflict((oc) => {
334
+ return oc.columns(primaryKeys).doNothing();
335
+ })
336
+ .execute();
337
+ }
338
+ };
339
+ }
109
340
  /**
110
341
  * Creates multiple projection handlers that all use the same snapshot projection logic.
111
342
  * This is a convenience function to avoid repeating the same handler for multiple event types.
@@ -138,3 +369,40 @@ export function createSnapshotProjectionRegistry(eventTypes, config) {
138
369
  }
139
370
  return registry;
140
371
  }
372
+ /**
373
+ * Creates multiple projection handlers that all use the same snapshot projection logic
374
+ * with a separate snapshots table. This is a convenience function to avoid repeating
375
+ * the same handler for multiple event types.
376
+ *
377
+ * @example
378
+ * ```typescript
379
+ * const registry = createSnapshotProjectionRegistryWithSnapshotTable(
380
+ * ['CartCreated', 'ItemAddedToCart', 'ItemRemovedFromCart'],
381
+ * {
382
+ * tableName: 'carts',
383
+ * extractKeys: (event, partition) => ({
384
+ * tenant_id: event.data.eventMeta.tenantId,
385
+ * cart_id: event.data.eventMeta.cartId,
386
+ * partition
387
+ * }),
388
+ * evolve: cartEvolve,
389
+ * initialState: () => ({ status: 'init', items: [] }),
390
+ * mapToColumns: (state) => ({
391
+ * currency: state.currency,
392
+ * is_checked_out: state.status === 'checkedOut'
393
+ * })
394
+ * }
395
+ * );
396
+ * ```
397
+ */
398
+ export function createSnapshotProjectionRegistryWithSnapshotTable(eventTypes, config) {
399
+ const handler = createSnapshotProjectionWithSnapshotTable(config);
400
+ const registry = {};
401
+ for (const eventType of eventTypes) {
402
+ // Type cast is safe here because ProjectionHandler is contravariant in its event type parameter.
403
+ // A handler for a specific event type E can safely handle any event that matches E's structure.
404
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
405
+ registry[eventType] = [handler];
406
+ }
407
+ return registry;
408
+ }
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "publishConfig": {
4
4
  "access": "public"
5
5
  },
6
- "version": "2.2.7",
6
+ "version": "2.3.0",
7
7
  "description": "Emmett Event Store with Kysely",
8
8
  "author": "Wataru Oguchi",
9
9
  "license": "MIT",