@wataruoguchi/emmett-event-store-kysely 2.2.7 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -32,14 +32,16 @@ import { Kysely } from "kysely";
32
32
  // Required tables: messages, streams, subscriptions
33
33
  ```
34
34
 
35
- A read model table expects to have the following columns:
35
+ **Legacy approach:** A read model table expects to have the following columns:
36
36
 
37
- - stream_id (uuid)
37
+ - stream_id (text)
38
38
  - last_stream_position (bigint)
39
39
  - last_global_position (bigint)
40
40
  - partition (text)
41
41
  - snapshot (jsonb)
42
42
 
43
+ **New approach (recommended):** Use `createSnapshotProjectionWithSnapshotTable` to store snapshots in a separate centralized table, keeping read model tables clean with only keys and denormalized columns.
44
+
43
45
  ### 2. Create Event Store
44
46
 
45
47
  ```typescript
@@ -69,12 +71,69 @@ Please read <https://event-driven-io.github.io/emmett/getting-started.html>
69
71
 
70
72
  ### 4. Build Read Models
71
73
 
72
- This package supports "Snapshot Projections".
74
+ This package supports "Snapshot Projections" with two approaches:
75
+
76
+ #### Option A: Separate Snapshot Table (Recommended) ⭐
77
+
78
+ Use `createSnapshotProjectionWithSnapshotTable` to store snapshots in a centralized table:
79
+
80
+ ```typescript
81
+ import {
82
+ createSnapshotProjectionRegistryWithSnapshotTable
83
+ } from "@wataruoguchi/emmett-event-store-kysely";
84
+
85
+ // First, create the snapshots table:
86
+ // CREATE TABLE snapshots (
87
+ // readmodel_table_name TEXT NOT NULL,
88
+ // stream_id TEXT NOT NULL,
89
+ // last_stream_position BIGINT NOT NULL,
90
+ // last_global_position BIGINT NOT NULL,
91
+ // snapshot JSONB NOT NULL,
92
+ // PRIMARY KEY (readmodel_table_name, stream_id)
93
+ // );
94
+
95
+ // Reuse your write model's evolve function!
96
+ const registry = createSnapshotProjectionRegistryWithSnapshotTable(
97
+ ["CartCreated", "ItemAdded", "CartCheckedOut"],
98
+ {
99
+ tableName: "carts",
100
+ extractKeys: (event, partition) => ({
101
+ tenant_id: event.data.eventMeta.tenantId,
102
+ cart_id: event.data.eventMeta.cartId,
103
+ partition,
104
+ }),
105
+ evolve: domainEvolve, // Reuse from write model!
106
+ initialState,
107
+ mapToColumns: (state) => ({ // Optional: denormalize for queries
108
+ currency: state.currency,
109
+ total: state.status === "checkedOut" ? state.total : null,
110
+ }),
111
+ }
112
+ );
113
+ ```
114
+
115
+ **Benefits:**
116
+
117
+ - ✅ Cleaner read model tables (no event-sourcing columns)
118
+ - ✅ Easier to create new read models (no schema migrations for event-sourcing columns)
119
+ - ✅ Centralized snapshot management
120
+ - ✅ Race condition protection with `FOR UPDATE` locking
121
+ - ✅ Operations wrapped in transactions for stronger race condition protection
122
+ - ✅ Automatic idempotency (skips already-processed events)
123
+ - ✅ Primary key validation (ensures consistent `extractKeys`)
124
+
125
+ **Important:** The `extractKeys` function must return the same set of keys for all events. The projection validates this at runtime.
126
+
127
+ #### Option B: Legacy Approach (Backward Compatible)
128
+
129
+ Use `createSnapshotProjectionRegistry` to store everything in the read model table:
130
+
131
+ **Note:** This approach stores event-sourcing columns (`stream_id`, `last_stream_position`, etc.) directly in the read model table. Consider using Option A for new projects.
73
132
 
74
133
  ```typescript
75
134
  import {
76
135
  createSnapshotProjectionRegistry
77
- } from "@wataruoguchi/emmett-event-store-kysely/projections";
136
+ } from "@wataruoguchi/emmett-event-store-kysely";
78
137
 
79
138
  // Reuse your write model's evolve function!
80
139
  const registry = createSnapshotProjectionRegistry(
@@ -99,7 +158,7 @@ const registry = createSnapshotProjectionRegistry(
99
158
  ### 5. Process Events and Update Read Model
100
159
 
101
160
  ```typescript
102
- import { createProjectionRunner } from "@wataruoguchi/emmett-event-store-kysely/projections";
161
+ import { createProjectionRunner } from "@wataruoguchi/emmett-event-store-kysely";
103
162
 
104
163
  const runner = createProjectionRunner({
105
164
  db,
package/dist/index.cjs CHANGED
@@ -25,6 +25,8 @@ __export(index_exports, {
25
25
  createProjectionRunner: () => createProjectionRunner,
26
26
  createSnapshotProjection: () => createSnapshotProjection,
27
27
  createSnapshotProjectionRegistry: () => createSnapshotProjectionRegistry,
28
+ createSnapshotProjectionRegistryWithSnapshotTable: () => createSnapshotProjectionRegistryWithSnapshotTable,
29
+ createSnapshotProjectionWithSnapshotTable: () => createSnapshotProjectionWithSnapshotTable,
28
30
  getKyselyEventStore: () => getKyselyEventStore
29
31
  });
30
32
  module.exports = __toCommonJS(index_exports);
@@ -451,8 +453,8 @@ function createProjectionRunner({
451
453
  readStream,
452
454
  registry
453
455
  }) {
454
- async function getOrCreateCheckpoint(subscriptionId, partition) {
455
- const existing = await db.selectFrom("subscriptions").select([
456
+ async function getOrCreateCheckpoint(executor, subscriptionId, partition) {
457
+ const existing = await executor.selectFrom("subscriptions").select([
456
458
  "subscription_id as subscriptionId",
457
459
  "partition",
458
460
  "last_processed_position as lastProcessedPosition"
@@ -469,7 +471,7 @@ function createProjectionRunner({
469
471
  lastProcessedPosition: last
470
472
  };
471
473
  }
472
- await db.insertInto("subscriptions").values({
474
+ await executor.insertInto("subscriptions").values({
473
475
  subscription_id: subscriptionId,
474
476
  partition,
475
477
  version: 1,
@@ -485,74 +487,145 @@ function createProjectionRunner({
485
487
  lastProcessedPosition: 0n
486
488
  };
487
489
  }
488
- async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
489
- await db.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
490
+ async function updateCheckpoint(executor, subscriptionId, partition, lastProcessedPosition) {
491
+ await executor.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
490
492
  }
491
493
  async function projectEvents(subscriptionId, streamId, opts) {
492
494
  const partition = opts?.partition ?? "default_partition";
493
495
  const batchSize = BigInt(opts?.batchSize ?? 500);
494
- const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
496
+ const checkpoint = await getOrCreateCheckpoint(
497
+ db,
498
+ subscriptionId,
499
+ partition
500
+ );
495
501
  const { events, currentStreamVersion } = await readStream(streamId, {
496
502
  from: checkpoint.lastProcessedPosition + 1n,
497
503
  to: checkpoint.lastProcessedPosition + batchSize,
498
504
  partition
499
505
  });
506
+ let processed = 0;
500
507
  for (const ev of events) {
501
508
  if (!ev) continue;
502
- const handlers = registry[ev.type] ?? [];
503
- if (handlers.length === 0) {
509
+ await db.transaction().execute(async (trx) => {
510
+ const handlers = registry[ev.type] ?? [];
511
+ if (handlers.length === 0) {
512
+ await updateCheckpoint(
513
+ trx,
514
+ subscriptionId,
515
+ partition,
516
+ ev.metadata.streamPosition
517
+ );
518
+ return;
519
+ }
520
+ const projectionEvent = {
521
+ type: ev.type,
522
+ data: ev.data,
523
+ metadata: {
524
+ streamId: ev.metadata.streamId,
525
+ streamPosition: ev.metadata.streamPosition,
526
+ globalPosition: ev.metadata.globalPosition
527
+ }
528
+ };
529
+ for (const handler of handlers) {
530
+ await handler({ db: trx, partition }, projectionEvent);
531
+ }
504
532
  await updateCheckpoint(
533
+ trx,
505
534
  subscriptionId,
506
535
  partition,
507
- ev.metadata.streamPosition
536
+ projectionEvent.metadata.streamPosition
508
537
  );
509
- continue;
510
- }
511
- const projectionEvent = {
512
- type: ev.type,
513
- data: ev.data,
514
- metadata: {
515
- streamId: ev.metadata.streamId,
516
- streamPosition: ev.metadata.streamPosition,
517
- globalPosition: ev.metadata.globalPosition
518
- }
519
- };
520
- for (const handler of handlers) {
521
- await handler({ db, partition }, projectionEvent);
522
- }
523
- await updateCheckpoint(
524
- subscriptionId,
525
- partition,
526
- projectionEvent.metadata.streamPosition
527
- );
538
+ });
539
+ processed++;
528
540
  }
529
- return { processed: events.length, currentStreamVersion };
541
+ return { processed, currentStreamVersion };
530
542
  }
531
543
  return { projectEvents };
532
544
  }
533
545
 
534
546
  // src/projections/snapshot-projection.ts
547
+ function constructStreamId(keys) {
548
+ const sortedEntries = Object.entries(keys).sort(([a], [b]) => {
549
+ if (a < b) return -1;
550
+ if (a > b) return 1;
551
+ return 0;
552
+ });
553
+ return sortedEntries.map(([key, value]) => {
554
+ const encodedKey = encodeURIComponent(key);
555
+ const encodedValue = encodeURIComponent(value);
556
+ return `${encodedKey}:${encodedValue}`;
557
+ }).join("|");
558
+ }
559
+ function validateAndCachePrimaryKeys(keys, tableName, cachedKeys) {
560
+ const currentKeys = Object.keys(keys);
561
+ const sortedCurrentKeys = [...currentKeys].sort();
562
+ if (!cachedKeys) {
563
+ return sortedCurrentKeys;
564
+ }
565
+ if (cachedKeys.length !== sortedCurrentKeys.length || !cachedKeys.every((key, index) => key === sortedCurrentKeys[index])) {
566
+ throw new Error(
567
+ `Snapshot projection "${tableName}" received inconsistent primary keys from extractKeys. Expected keys: ${cachedKeys.join(", ")}, but received: ${sortedCurrentKeys.join(", ")}. Ensure extractKeys returns a consistent set of keys for all events.`
568
+ );
569
+ }
570
+ return cachedKeys;
571
+ }
572
+ function shouldSkipEvent(eventPosition, lastProcessedPosition) {
573
+ return eventPosition <= lastProcessedPosition;
574
+ }
575
+ function loadStateFromSnapshot(snapshot, initialState, tableName) {
576
+ if (!snapshot) {
577
+ return initialState();
578
+ }
579
+ if (typeof snapshot === "string") {
580
+ try {
581
+ return JSON.parse(snapshot);
582
+ } catch (error) {
583
+ const tableContext = tableName ? ` for table "${tableName}"` : "";
584
+ const errorMessage = error instanceof Error ? error.message : String(error);
585
+ throw new Error(
586
+ `Failed to parse snapshot${tableContext}: ${errorMessage}. Snapshot value: ${snapshot.substring(0, 200)}${snapshot.length > 200 ? "..." : ""}`
587
+ );
588
+ }
589
+ }
590
+ return snapshot;
591
+ }
592
+ function buildDenormalizedUpdateSet(columns) {
593
+ const updateSet = {};
594
+ if (columns) {
595
+ for (const columnName of Object.keys(columns)) {
596
+ updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
597
+ }
598
+ }
599
+ return updateSet;
600
+ }
535
601
  function createSnapshotProjection(config) {
536
602
  const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
537
603
  let inferredPrimaryKeys;
538
604
  return async ({ db, partition }, event) => {
539
605
  const keys = extractKeys(event, partition);
540
- if (!inferredPrimaryKeys) {
541
- inferredPrimaryKeys = Object.keys(keys);
542
- }
606
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(
607
+ keys,
608
+ tableName,
609
+ inferredPrimaryKeys
610
+ );
543
611
  const primaryKeys = inferredPrimaryKeys;
544
612
  const existing = await db.selectFrom(tableName).select(["last_stream_position", "snapshot"]).where((eb) => {
545
613
  const conditions = Object.entries(keys).map(
546
614
  ([key, value]) => eb(key, "=", value)
547
615
  );
548
616
  return eb.and(conditions);
549
- }).executeTakeFirst();
617
+ }).forUpdate().executeTakeFirst();
550
618
  const lastPos = existing?.last_stream_position ? BigInt(String(existing.last_stream_position)) : -1n;
551
- if (event.metadata.streamPosition <= lastPos) {
619
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
552
620
  return;
553
621
  }
554
- const currentState = existing?.snapshot ? existing.snapshot : initialState();
622
+ const currentState = loadStateFromSnapshot(
623
+ existing?.snapshot,
624
+ initialState,
625
+ tableName
626
+ );
555
627
  const newState = evolve(currentState, event);
628
+ const denormalizedColumns = mapToColumns ? mapToColumns(newState) : void 0;
556
629
  const rowData = {
557
630
  ...keys,
558
631
  snapshot: JSON.stringify(newState),
@@ -560,9 +633,8 @@ function createSnapshotProjection(config) {
560
633
  last_stream_position: event.metadata.streamPosition.toString(),
561
634
  last_global_position: event.metadata.globalPosition.toString()
562
635
  };
563
- if (mapToColumns) {
564
- const columns = mapToColumns(newState);
565
- Object.assign(rowData, columns);
636
+ if (denormalizedColumns) {
637
+ Object.assign(rowData, denormalizedColumns);
566
638
  }
567
639
  const insertQuery = db.insertInto(tableName).values(rowData);
568
640
  const updateSet = {
@@ -571,18 +643,69 @@ function createSnapshotProjection(config) {
571
643
  last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
572
644
  last_global_position: (eb) => eb.ref("excluded.last_global_position")
573
645
  };
574
- if (mapToColumns) {
575
- const columns = mapToColumns(newState);
576
- for (const columnName of Object.keys(columns)) {
577
- updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
578
- }
579
- }
646
+ const denormalizedUpdateSet = buildDenormalizedUpdateSet(denormalizedColumns);
647
+ Object.assign(updateSet, denormalizedUpdateSet);
580
648
  await insertQuery.onConflict((oc) => {
581
649
  const conflictBuilder = oc.columns(primaryKeys);
582
650
  return conflictBuilder.doUpdateSet(updateSet);
583
651
  }).execute();
584
652
  };
585
653
  }
654
+ function createSnapshotProjectionWithSnapshotTable(config) {
655
+ const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
656
+ let inferredPrimaryKeys;
657
+ return async ({ db, partition }, event) => {
658
+ const keys = extractKeys(event, partition);
659
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(
660
+ keys,
661
+ tableName,
662
+ inferredPrimaryKeys
663
+ );
664
+ const primaryKeys = inferredPrimaryKeys;
665
+ const streamId = constructStreamId(keys);
666
+ const existing = await db.selectFrom("snapshots").select(["last_stream_position", "snapshot"]).where("readmodel_table_name", "=", tableName).where("stream_id", "=", streamId).forUpdate().executeTakeFirst();
667
+ const lastPos = existing?.last_stream_position ? BigInt(String(existing.last_stream_position)) : -1n;
668
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
669
+ return;
670
+ }
671
+ const currentState = loadStateFromSnapshot(
672
+ existing?.snapshot,
673
+ initialState,
674
+ tableName
675
+ );
676
+ const newState = evolve(currentState, event);
677
+ const denormalizedColumns = mapToColumns ? mapToColumns(newState) : void 0;
678
+ await db.insertInto("snapshots").values({
679
+ readmodel_table_name: tableName,
680
+ stream_id: streamId,
681
+ snapshot: JSON.stringify(newState),
682
+ last_stream_position: event.metadata.streamPosition.toString(),
683
+ last_global_position: event.metadata.globalPosition.toString()
684
+ }).onConflict((oc) => {
685
+ return oc.columns(["readmodel_table_name", "stream_id"]).doUpdateSet({
686
+ snapshot: (eb) => eb.ref("excluded.snapshot"),
687
+ last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
688
+ last_global_position: (eb) => eb.ref("excluded.last_global_position")
689
+ });
690
+ }).execute();
691
+ const readModelData = { ...keys };
692
+ if (denormalizedColumns) {
693
+ Object.assign(readModelData, denormalizedColumns);
694
+ }
695
+ const readModelInsertQuery = db.insertInto(tableName).values(readModelData);
696
+ const readModelUpdateSet = buildDenormalizedUpdateSet(denormalizedColumns);
697
+ if (Object.keys(readModelUpdateSet).length > 0) {
698
+ await readModelInsertQuery.onConflict((oc) => {
699
+ const conflictBuilder = oc.columns(primaryKeys);
700
+ return conflictBuilder.doUpdateSet(readModelUpdateSet);
701
+ }).execute();
702
+ } else {
703
+ await readModelInsertQuery.onConflict((oc) => {
704
+ return oc.columns(primaryKeys).doNothing();
705
+ }).execute();
706
+ }
707
+ };
708
+ }
586
709
  function createSnapshotProjectionRegistry(eventTypes, config) {
587
710
  const handler = createSnapshotProjection(config);
588
711
  const registry = {};
@@ -591,6 +714,14 @@ function createSnapshotProjectionRegistry(eventTypes, config) {
591
714
  }
592
715
  return registry;
593
716
  }
717
+ function createSnapshotProjectionRegistryWithSnapshotTable(eventTypes, config) {
718
+ const handler = createSnapshotProjectionWithSnapshotTable(config);
719
+ const registry = {};
720
+ for (const eventType of eventTypes) {
721
+ registry[eventType] = [handler];
722
+ }
723
+ return registry;
724
+ }
594
725
  // Annotate the CommonJS export names for ESM import in node:
595
726
  0 && (module.exports = {
596
727
  createKyselyEventStoreConsumer,
@@ -598,5 +729,7 @@ function createSnapshotProjectionRegistry(eventTypes, config) {
598
729
  createProjectionRunner,
599
730
  createSnapshotProjection,
600
731
  createSnapshotProjectionRegistry,
732
+ createSnapshotProjectionRegistryWithSnapshotTable,
733
+ createSnapshotProjectionWithSnapshotTable,
601
734
  getKyselyEventStore
602
735
  });
package/dist/index.d.ts CHANGED
@@ -4,7 +4,7 @@ export { getKyselyEventStore } from "./event-store/kysely-event-store.js";
4
4
  export type { KyselyEventStore, KyselyEventStoreOptions, ProjectionReadStreamOptions, } from "./event-store/kysely-event-store.js";
5
5
  export { createProjectionRunner } from "./projections/runner.js";
6
6
  export type { ProjectEvents } from "./projections/runner.js";
7
- export { createSnapshotProjection, createSnapshotProjectionRegistry, } from "./projections/snapshot-projection.js";
7
+ export { createSnapshotProjection, createSnapshotProjectionRegistry, createSnapshotProjectionWithSnapshotTable, createSnapshotProjectionRegistryWithSnapshotTable, } from "./projections/snapshot-projection.js";
8
8
  export type { SnapshotProjectionConfig } from "./projections/snapshot-projection.js";
9
9
  export { createProjectionRegistry } from "./types.js";
10
10
  export type { DatabaseExecutor, Dependencies, ExtendedOptions, ProjectionContext, ProjectionEvent, ProjectionEventMetadata, ProjectionHandler, ProjectionRegistry, } from "./types.js";
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,8BAA8B,EAAE,MAAM,4BAA4B,CAAC;AAC5E,YAAY,EACV,wBAAwB,EACxB,8BAA8B,GAC/B,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,mBAAmB,EAAE,MAAM,qCAAqC,CAAC;AAC1E,YAAY,EACV,gBAAgB,EAChB,uBAAuB,EACvB,2BAA2B,GAC5B,MAAM,qCAAqC,CAAC;AAC7C,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAC7D,OAAO,EACL,wBAAwB,EACxB,gCAAgC,GACjC,MAAM,sCAAsC,CAAC;AAC9C,YAAY,EAAE,wBAAwB,EAAE,MAAM,sCAAsC,CAAC;AACrF,OAAO,EAAE,wBAAwB,EAAE,MAAM,YAAY,CAAC;AACtD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,EACf,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,YAAY,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,8BAA8B,EAAE,MAAM,4BAA4B,CAAC;AAC5E,YAAY,EACV,wBAAwB,EACxB,8BAA8B,GAC/B,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,mBAAmB,EAAE,MAAM,qCAAqC,CAAC;AAC1E,YAAY,EACV,gBAAgB,EAChB,uBAAuB,EACvB,2BAA2B,GAC5B,MAAM,qCAAqC,CAAC;AAC7C,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAC7D,OAAO,EACL,wBAAwB,EACxB,gCAAgC,EAChC,yCAAyC,EACzC,iDAAiD,GAClD,MAAM,sCAAsC,CAAC;AAC9C,YAAY,EAAE,wBAAwB,EAAE,MAAM,sCAAsC,CAAC;AACrF,OAAO,EAAE,wBAAwB,EAAE,MAAM,YAAY,CAAC;AACtD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,EACf,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,YAAY,CAAC"}
package/dist/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  export { createKyselyEventStoreConsumer } from "./event-store/consumers.js";
2
2
  export { getKyselyEventStore } from "./event-store/kysely-event-store.js";
3
3
  export { createProjectionRunner } from "./projections/runner.js";
4
- export { createSnapshotProjection, createSnapshotProjectionRegistry, } from "./projections/snapshot-projection.js";
4
+ export { createSnapshotProjection, createSnapshotProjectionRegistry, createSnapshotProjectionWithSnapshotTable, createSnapshotProjectionRegistryWithSnapshotTable, } from "./projections/snapshot-projection.js";
5
5
  export { createProjectionRegistry } from "./types.js";
@@ -1 +1 @@
1
- {"version":3,"file":"runner.d.ts","sourceRoot":"","sources":["../../src/projections/runner.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,MAAM,EAAqB,MAAM,QAAQ,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,sCAAsC,CAAC;AAC7E,OAAO,KAAK,EAAmB,kBAAkB,EAAE,MAAM,aAAa,CAAC;AAEvE,MAAM,MAAM,sBAAsB,GAAG;IACnC,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,qBAAqB,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF;;;GAGG;AACH,MAAM,MAAM,oBAAoB,GAAG;IACjC,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;IACtB,UAAU,EAAE,gBAAgB,CAAC,YAAY,CAAC,CAAC;IAC3C,QAAQ,EAAE,kBAAkB,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG,CAC1B,cAAc,EAAE,MAAM,EACtB,QAAQ,EAAE,MAAM,EAChB,IAAI,CAAC,EAAE;IAAE,SAAS,CAAC,EAAE,MAAM,CAAC;IAAC,SAAS,CAAC,EAAE,MAAM,CAAA;CAAE,KAC9C,OAAO,CAAC;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,oBAAoB,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC;AAElE,wBAAgB,sBAAsB,CAAC,EACrC,EAAE,EACF,UAAU,EACV,QAAQ,GACT,EAAE,oBAAoB,GAAG;IAAE,aAAa,EAAE,aAAa,CAAA;CAAE,CA+HzD"}
1
+ {"version":3,"file":"runner.d.ts","sourceRoot":"","sources":["../../src/projections/runner.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,MAAM,EAAqB,MAAM,QAAQ,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,sCAAsC,CAAC;AAC7E,OAAO,KAAK,EAAmB,kBAAkB,EAAE,MAAM,aAAa,CAAC;AAEvE,MAAM,MAAM,sBAAsB,GAAG;IACnC,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,qBAAqB,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF;;;GAGG;AACH,MAAM,MAAM,oBAAoB,GAAG;IACjC,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;IACtB,UAAU,EAAE,gBAAgB,CAAC,YAAY,CAAC,CAAC;IAC3C,QAAQ,EAAE,kBAAkB,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG,CAC1B,cAAc,EAAE,MAAM,EACtB,QAAQ,EAAE,MAAM,EAChB,IAAI,CAAC,EAAE;IAAE,SAAS,CAAC,EAAE,MAAM,CAAC;IAAC,SAAS,CAAC,EAAE,MAAM,CAAA;CAAE,KAC9C,OAAO,CAAC;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,oBAAoB,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC;AAElE,wBAAgB,sBAAsB,CAAC,EACrC,EAAE,EACF,UAAU,EACV,QAAQ,GACT,EAAE,oBAAoB,GAAG;IAAE,aAAa,EAAE,aAAa,CAAA;CAAE,CA6JzD"}
@@ -1,6 +1,6 @@
1
1
  export function createProjectionRunner({ db, readStream, registry, }) {
2
- async function getOrCreateCheckpoint(subscriptionId, partition) {
3
- const existing = await db
2
+ async function getOrCreateCheckpoint(executor, subscriptionId, partition) {
3
+ const existing = await executor
4
4
  .selectFrom("subscriptions")
5
5
  .select([
6
6
  "subscription_id as subscriptionId",
@@ -19,7 +19,7 @@ export function createProjectionRunner({ db, readStream, registry, }) {
19
19
  lastProcessedPosition: last,
20
20
  };
21
21
  }
22
- await db
22
+ await executor
23
23
  .insertInto("subscriptions")
24
24
  .values({
25
25
  subscription_id: subscriptionId,
@@ -37,8 +37,8 @@ export function createProjectionRunner({ db, readStream, registry, }) {
37
37
  lastProcessedPosition: 0n,
38
38
  };
39
39
  }
40
- async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
41
- await db
40
+ async function updateCheckpoint(executor, subscriptionId, partition, lastProcessedPosition) {
41
+ await executor
42
42
  .updateTable("subscriptions")
43
43
  .set({ last_processed_position: lastProcessedPosition })
44
44
  .where("subscription_id", "=", subscriptionId)
@@ -48,35 +48,49 @@ export function createProjectionRunner({ db, readStream, registry, }) {
48
48
  async function projectEvents(subscriptionId, streamId, opts) {
49
49
  const partition = opts?.partition ?? "default_partition";
50
50
  const batchSize = BigInt(opts?.batchSize ?? 500);
51
- const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
51
+ // Read checkpoint outside transaction to avoid holding locks during event reading
52
+ const checkpoint = await getOrCreateCheckpoint(db, subscriptionId, partition);
53
+ // Read events outside transaction - this is just a read operation
52
54
  const { events, currentStreamVersion } = await readStream(streamId, {
53
55
  from: checkpoint.lastProcessedPosition + 1n,
54
56
  to: checkpoint.lastProcessedPosition + batchSize,
55
57
  partition,
56
58
  });
59
+ let processed = 0;
60
+ // Process each event in its own transaction
61
+ // This keeps transactions short and reduces lock contention
57
62
  for (const ev of events) {
58
63
  if (!ev)
59
64
  continue;
60
- const handlers = registry[ev.type] ?? [];
61
- if (handlers.length === 0) {
62
- await updateCheckpoint(subscriptionId, partition, ev.metadata.streamPosition);
63
- continue;
64
- }
65
- const projectionEvent = {
66
- type: ev.type,
67
- data: ev.data,
68
- metadata: {
69
- streamId: ev.metadata.streamId,
70
- streamPosition: ev.metadata.streamPosition,
71
- globalPosition: ev.metadata.globalPosition,
72
- },
73
- };
74
- for (const handler of handlers) {
75
- await handler({ db, partition }, projectionEvent);
76
- }
77
- await updateCheckpoint(subscriptionId, partition, projectionEvent.metadata.streamPosition);
65
+ // Each event gets its own transaction
66
+ // This ensures atomicity per event while keeping transactions short
67
+ await db.transaction().execute(async (trx) => {
68
+ const handlers = registry[ev.type] ?? [];
69
+ if (handlers.length === 0) {
70
+ // No handlers, just update checkpoint
71
+ await updateCheckpoint(trx, subscriptionId, partition, ev.metadata.streamPosition);
72
+ return;
73
+ }
74
+ const projectionEvent = {
75
+ type: ev.type,
76
+ data: ev.data,
77
+ metadata: {
78
+ streamId: ev.metadata.streamId,
79
+ streamPosition: ev.metadata.streamPosition,
80
+ globalPosition: ev.metadata.globalPosition,
81
+ },
82
+ };
83
+ // All handlers for this event run in the same transaction
84
+ // This ensures they see each other's changes and maintain consistency
85
+ for (const handler of handlers) {
86
+ await handler({ db: trx, partition }, projectionEvent);
87
+ }
88
+ // Update checkpoint after all handlers succeed
89
+ await updateCheckpoint(trx, subscriptionId, partition, projectionEvent.metadata.streamPosition);
90
+ });
91
+ processed++;
78
92
  }
79
- return { processed: events.length, currentStreamVersion };
93
+ return { processed, currentStreamVersion };
80
94
  }
81
95
  return { projectEvents };
82
96
  }
@@ -54,6 +54,26 @@ export type SnapshotProjectionConfig<TState, TTable extends string, E extends {
54
54
  */
55
55
  mapToColumns?: (state: TState) => Record<string, unknown>;
56
56
  };
57
+ /**
58
+ * Constructs a deterministic stream_id from the keys.
59
+ * The stream_id is created by sorting the keys and concatenating them with a delimiter.
60
+ * This ensures the same keys always produce the same stream_id.
61
+ *
62
+ * URL encoding is used to handle special characters (like `|` and `:`) in key names or values
63
+ * that could otherwise cause collisions or parsing issues when used as delimiters.
64
+ *
65
+ * @internal
66
+ * Exported for testing purposes only.
67
+ */
68
+ export declare function constructStreamId(keys: Record<string, string>): string;
69
+ /**
70
+ * Loads the current state from a snapshot, handling both string and parsed JSON formats.
71
+ * Falls back to initial state if no snapshot exists.
72
+ *
73
+ * @internal
74
+ * Exported for testing purposes only.
75
+ */
76
+ export declare function loadStateFromSnapshot<TState>(snapshot: unknown, initialState: () => TState, tableName?: string): TState;
57
77
  /**
58
78
  * Creates a projection handler that stores the aggregate state as a snapshot.
59
79
  *
@@ -91,6 +111,62 @@ export declare function createSnapshotProjection<TState, TTable extends string,
91
111
  type: string;
92
112
  data: unknown;
93
113
  }>(config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionHandler<DatabaseExecutor, E>;
114
+ /**
115
+ * Creates a projection handler that stores snapshots in a separate centralized table.
116
+ *
117
+ * This is similar to `createSnapshotProjection`, but uses a separate `snapshots` table
118
+ * to store event-sourcing-related columns. This approach makes read model tables cleaner
119
+ * and more scalable, as they don't need to include event-sourcing columns.
120
+ *
121
+ * **Key differences from `createSnapshotProjection`:**
122
+ * - Snapshots are stored in a centralized `snapshots` table
123
+ * - Read model tables only contain keys from `extractKeys` and columns from `mapToColumns`
124
+ * - The `stream_id` is deterministically constructed from the keys (not from event metadata)
125
+ *
126
+ * **Database schema required:**
127
+ * ```sql
128
+ * CREATE TABLE snapshots (
129
+ * readmodel_table_name TEXT NOT NULL,
130
+ * stream_id TEXT NOT NULL,
131
+ * last_stream_position BIGINT NOT NULL,
132
+ * last_global_position BIGINT NOT NULL,
133
+ * snapshot JSONB NOT NULL,
134
+ * PRIMARY KEY (readmodel_table_name, stream_id)
135
+ * );
136
+ * ```
137
+ *
138
+ * @example
139
+ * ```typescript
140
+ * const cartProjection = createSnapshotProjectionWithSnapshotTable({
141
+ * tableName: 'carts',
142
+ * extractKeys: (event, partition) => ({
143
+ * tenant_id: event.data.eventMeta.tenantId,
144
+ * cart_id: event.data.eventMeta.cartId,
145
+ * partition
146
+ * }),
147
+ * evolve: cartEvolve,
148
+ * initialState: () => ({ status: 'init', items: [] }),
149
+ * mapToColumns: (state) => ({
150
+ * currency: state.currency,
151
+ * is_checked_out: state.status === 'checkedOut'
152
+ * })
153
+ * });
154
+ *
155
+ * // Use it in a projection registry
156
+ * const registry: ProjectionRegistry = {
157
+ * CartCreated: [cartProjection],
158
+ * ItemAddedToCart: [cartProjection],
159
+ * // ... other events
160
+ * };
161
+ * ```
162
+ */
163
+ export declare function createSnapshotProjectionWithSnapshotTable<TState, TTable extends string, E extends {
164
+ type: string;
165
+ data: unknown;
166
+ } = {
167
+ type: string;
168
+ data: unknown;
169
+ }>(config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionHandler<DatabaseExecutor, E>;
94
170
  /**
95
171
  * Creates multiple projection handlers that all use the same snapshot projection logic.
96
172
  * This is a convenience function to avoid repeating the same handler for multiple event types.
@@ -119,4 +195,37 @@ export declare function createSnapshotProjectionRegistry<TState, TTable extends
119
195
  type: string;
120
196
  data: unknown;
121
197
  }>(eventTypes: E["type"][], config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionRegistry;
198
+ /**
199
+ * Creates multiple projection handlers that all use the same snapshot projection logic
200
+ * with a separate snapshots table. This is a convenience function to avoid repeating
201
+ * the same handler for multiple event types.
202
+ *
203
+ * @example
204
+ * ```typescript
205
+ * const registry = createSnapshotProjectionRegistryWithSnapshotTable(
206
+ * ['CartCreated', 'ItemAddedToCart', 'ItemRemovedFromCart'],
207
+ * {
208
+ * tableName: 'carts',
209
+ * extractKeys: (event, partition) => ({
210
+ * tenant_id: event.data.eventMeta.tenantId,
211
+ * cart_id: event.data.eventMeta.cartId,
212
+ * partition
213
+ * }),
214
+ * evolve: cartEvolve,
215
+ * initialState: () => ({ status: 'init', items: [] }),
216
+ * mapToColumns: (state) => ({
217
+ * currency: state.currency,
218
+ * is_checked_out: state.status === 'checkedOut'
219
+ * })
220
+ * }
221
+ * );
222
+ * ```
223
+ */
224
+ export declare function createSnapshotProjectionRegistryWithSnapshotTable<TState, TTable extends string, E extends {
225
+ type: string;
226
+ data: unknown;
227
+ } = {
228
+ type: string;
229
+ data: unknown;
230
+ }>(eventTypes: E["type"][], config: SnapshotProjectionConfig<TState, TTable, E>): ProjectionRegistry;
122
231
  //# sourceMappingURL=snapshot-projection.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"snapshot-projection.d.ts","sourceRoot":"","sources":["../../src/projections/snapshot-projection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,gBAAgB,EAEhB,eAAe,EACf,iBAAiB,EACjB,kBAAkB,EACnB,MAAM,aAAa,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,wBAAwB,CAClC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,IACzE;IACF;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,EAAE,CAAC;IAEvB;;;OAGG;IACH,WAAW,EAAE,CACX,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,EACzB,SAAS,EAAE,MAAM,KACd,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5B;;;OAGG;IACH,MAAM,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,MAAM,CAAC;IAE7D;;OAEG;IACH,YAAY,EAAE,MAAM,MAAM,CAAC;IAE3B;;;;;;;;;;;;OAYG;IACH,YAAY,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC3D,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,wBAAgB,wBAAwB,CACtC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CAqGxC;AAED;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,gCAAgC,CAC9C,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB"}
1
+ {"version":3,"file":"snapshot-projection.d.ts","sourceRoot":"","sources":["../../src/projections/snapshot-projection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,gBAAgB,EAEhB,eAAe,EACf,iBAAiB,EACjB,kBAAkB,EACnB,MAAM,aAAa,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,wBAAwB,CAClC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,IACzE;IACF;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,EAAE,CAAC;IAEvB;;;OAGG;IACH,WAAW,EAAE,CACX,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,EACzB,SAAS,EAAE,MAAM,KACd,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAE5B;;;OAGG;IACH,MAAM,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,MAAM,CAAC;IAE7D;;OAEG;IACH,YAAY,EAAE,MAAM,MAAM,CAAC;IAE3B;;;;;;;;;;;;OAYG;IACH,YAAY,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC3D,CAAC;AAEF;;;;;;;;;;GAUG;AACH,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,MAAM,CAatE;AAgDD;;;;;;GAMG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAC1C,QAAQ,EAAE,OAAO,EACjB,YAAY,EAAE,MAAM,MAAM,EAC1B,SAAS,CAAC,EAAE,MAAM,GACjB,MAAM,CAqBR;AAuBD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,wBAAgB,wBAAwB,CACtC,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CA8GxC;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAgDG;AACH,wBAAgB,yCAAyC,CACvD,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,iBAAiB,CAAC,gBAAgB,EAAE,CAAC,CAAC,CA0HxC;AAED;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAgB,gCAAgC,CAC9C,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,wBAAgB,iDAAiD,CAC/D,MAAM,EACN,MAAM,SAAS,MAAM,EACrB,CAAC,SAAS;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,EAE3E,UAAU,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,EACvB,MAAM,EAAE,wBAAwB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC,GAClD,kBAAkB,CAYpB"}
@@ -1,3 +1,98 @@
1
+ /**
2
+ * Constructs a deterministic stream_id from the keys.
3
+ * The stream_id is created by sorting the keys and concatenating them with a delimiter.
4
+ * This ensures the same keys always produce the same stream_id.
5
+ *
6
+ * URL encoding is used to handle special characters (like `|` and `:`) in key names or values
7
+ * that could otherwise cause collisions or parsing issues when used as delimiters.
8
+ *
9
+ * @internal
10
+ * Exported for testing purposes only.
11
+ */
12
+ export function constructStreamId(keys) {
13
+ const sortedEntries = Object.entries(keys).sort(([a], [b]) => {
14
+ if (a < b)
15
+ return -1;
16
+ if (a > b)
17
+ return 1;
18
+ return 0;
19
+ });
20
+ return sortedEntries
21
+ .map(([key, value]) => {
22
+ const encodedKey = encodeURIComponent(key);
23
+ const encodedValue = encodeURIComponent(value);
24
+ return `${encodedKey}:${encodedValue}`;
25
+ })
26
+ .join("|");
27
+ }
28
+ /**
29
+ * Validates and caches primary keys from extractKeys.
30
+ * Ensures that extractKeys returns a consistent set of keys across all events.
31
+ */
32
+ function validateAndCachePrimaryKeys(keys, tableName, cachedKeys) {
33
+ const currentKeys = Object.keys(keys);
34
+ const sortedCurrentKeys = [...currentKeys].sort();
35
+ if (!cachedKeys) {
36
+ // Cache the initially inferred primary keys in a deterministic order
37
+ return sortedCurrentKeys;
38
+ }
39
+ // Validate that subsequent calls to extractKeys return the same key set
40
+ if (cachedKeys.length !== sortedCurrentKeys.length ||
41
+ !cachedKeys.every((key, index) => key === sortedCurrentKeys[index])) {
42
+ throw new Error(`Snapshot projection "${tableName}" received inconsistent primary keys from extractKeys. ` +
43
+ `Expected keys: ${cachedKeys.join(", ")}, ` +
44
+ `but received: ${sortedCurrentKeys.join(", ")}. ` +
45
+ `Ensure extractKeys returns a consistent set of keys for all events.`);
46
+ }
47
+ return cachedKeys;
48
+ }
49
+ /**
50
+ * Checks if the event should be processed based on the last processed position.
51
+ * Returns true if the event should be skipped (already processed or older).
52
+ * Callers should pass -1n for lastProcessedPosition when there is no previous position
53
+ * so that events are processed from the beginning.
54
+ */
55
+ function shouldSkipEvent(eventPosition, lastProcessedPosition) {
56
+ return eventPosition <= lastProcessedPosition;
57
+ }
58
+ /**
59
+ * Loads the current state from a snapshot, handling both string and parsed JSON formats.
60
+ * Falls back to initial state if no snapshot exists.
61
+ *
62
+ * @internal
63
+ * Exported for testing purposes only.
64
+ */
65
+ export function loadStateFromSnapshot(snapshot, initialState, tableName) {
66
+ if (!snapshot) {
67
+ return initialState();
68
+ }
69
+ // Some database drivers return JSONB as strings, others as parsed objects
70
+ if (typeof snapshot === "string") {
71
+ try {
72
+ return JSON.parse(snapshot);
73
+ }
74
+ catch (error) {
75
+ const tableContext = tableName ? ` for table "${tableName}"` : "";
76
+ const errorMessage = error instanceof Error ? error.message : String(error);
77
+ throw new Error(`Failed to parse snapshot${tableContext}: ${errorMessage}. ` +
78
+ `Snapshot value: ${snapshot.substring(0, 200)}${snapshot.length > 200 ? "..." : ""}`);
79
+ }
80
+ }
81
+ return snapshot;
82
+ }
83
+ /**
84
+ * Builds the update set for denormalized columns.
85
+ * Returns an empty object if columns is not provided or empty.
86
+ */
87
+ function buildDenormalizedUpdateSet(columns) {
88
+ const updateSet = {};
89
+ if (columns) {
90
+ for (const columnName of Object.keys(columns)) {
91
+ updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
92
+ }
93
+ }
94
+ return updateSet;
95
+ }
1
96
  /**
2
97
  * Creates a projection handler that stores the aggregate state as a snapshot.
3
98
  *
@@ -34,12 +129,11 @@ export function createSnapshotProjection(config) {
34
129
  let inferredPrimaryKeys;
35
130
  return async ({ db, partition }, event) => {
36
131
  const keys = extractKeys(event, partition);
37
- // Infer primary keys from extractKeys on first call
38
- if (!inferredPrimaryKeys) {
39
- inferredPrimaryKeys = Object.keys(keys);
40
- }
132
+ // Validate and cache primary keys
133
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(keys, tableName, inferredPrimaryKeys);
41
134
  const primaryKeys = inferredPrimaryKeys;
42
135
  // Check if event is newer than what we've already processed
136
+ // Use FOR UPDATE to lock the row and prevent race conditions with concurrent transactions
43
137
  // Note: Casting to `any` is necessary because Kysely cannot infer types for dynamic table names.
44
138
  // The table name is provided at runtime, so TypeScript cannot verify the table structure at compile time.
45
139
  // This is a known limitation when working with dynamic table names in Kysely.
@@ -52,21 +146,23 @@ export function createSnapshotProjection(config) {
52
146
  const conditions = Object.entries(keys).map(([key, value]) => eb(key, "=", value));
53
147
  return eb.and(conditions);
54
148
  })
149
+ .forUpdate()
55
150
  .executeTakeFirst();
56
151
  const lastPos = existing?.last_stream_position
57
152
  ? BigInt(String(existing.last_stream_position))
58
153
  : -1n;
59
154
  // Skip if we've already processed a newer event
60
- if (event.metadata.streamPosition <= lastPos) {
155
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
61
156
  return;
62
157
  }
63
158
  // Load current state from snapshot or use initial state
64
- // Note: snapshot is stored as JSONB and Kysely returns it as parsed JSON
65
- const currentState = existing?.snapshot
66
- ? existing.snapshot
67
- : initialState();
159
+ const currentState = loadStateFromSnapshot(existing?.snapshot, initialState, tableName);
68
160
  // Apply the event to get new state
69
161
  const newState = evolve(currentState, event);
162
+ // Call mapToColumns once after evolve (only if provided)
163
+ const denormalizedColumns = mapToColumns
164
+ ? mapToColumns(newState)
165
+ : undefined;
70
166
  // Prepare the row data with snapshot
71
167
  const rowData = {
72
168
  ...keys,
@@ -75,10 +171,9 @@ export function createSnapshotProjection(config) {
75
171
  last_stream_position: event.metadata.streamPosition.toString(),
76
172
  last_global_position: event.metadata.globalPosition.toString(),
77
173
  };
78
- // If mapToColumns is provided, add the denormalized columns
79
- if (mapToColumns) {
80
- const columns = mapToColumns(newState);
81
- Object.assign(rowData, columns);
174
+ // If denormalized columns exist, add them to row data
175
+ if (denormalizedColumns) {
176
+ Object.assign(rowData, denormalizedColumns);
82
177
  }
83
178
  // Upsert the snapshot
84
179
  const insertQuery = db.insertInto(tableName).values(rowData);
@@ -88,24 +183,165 @@ export function createSnapshotProjection(config) {
88
183
  last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
89
184
  last_global_position: (eb) => eb.ref("excluded.last_global_position"),
90
185
  };
91
- // If mapToColumns is provided, also update the denormalized columns
92
- if (mapToColumns) {
93
- const columns = mapToColumns(newState);
94
- for (const columnName of Object.keys(columns)) {
95
- updateSet[columnName] = (eb) => eb.ref(`excluded.${columnName}`);
96
- }
97
- }
186
+ // Add denormalized columns to update set if provided
187
+ const denormalizedUpdateSet = buildDenormalizedUpdateSet(denormalizedColumns);
188
+ Object.assign(updateSet, denormalizedUpdateSet);
98
189
  await insertQuery
99
190
  // Note: `any` is used here because the conflict builder needs to work with any table schema.
100
191
  // The actual schema is validated at runtime through Kysely's query builder.
192
+ // The FOR UPDATE lock above ensures that concurrent transactions wait, preventing race conditions.
101
193
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
102
194
  .onConflict((oc) => {
103
195
  const conflictBuilder = oc.columns(primaryKeys);
196
+ // Note: We could add a WHERE clause here (via doUpdateSet's `where` option) to only update
197
+ // if excluded.last_stream_position > table.last_stream_position, but the FOR UPDATE lock above
198
+ // already provides the primary protection, so we intentionally rely on that for concurrency control.
104
199
  return conflictBuilder.doUpdateSet(updateSet);
105
200
  })
106
201
  .execute();
107
202
  };
108
203
  }
204
+ /**
205
+ * Creates a projection handler that stores snapshots in a separate centralized table.
206
+ *
207
+ * This is similar to `createSnapshotProjection`, but uses a separate `snapshots` table
208
+ * to store event-sourcing-related columns. This approach makes read model tables cleaner
209
+ * and more scalable, as they don't need to include event-sourcing columns.
210
+ *
211
+ * **Key differences from `createSnapshotProjection`:**
212
+ * - Snapshots are stored in a centralized `snapshots` table
213
+ * - Read model tables only contain keys from `extractKeys` and columns from `mapToColumns`
214
+ * - The `stream_id` is deterministically constructed from the keys (not from event metadata)
215
+ *
216
+ * **Database schema required:**
217
+ * ```sql
218
+ * CREATE TABLE snapshots (
219
+ * readmodel_table_name TEXT NOT NULL,
220
+ * stream_id TEXT NOT NULL,
221
+ * last_stream_position BIGINT NOT NULL,
222
+ * last_global_position BIGINT NOT NULL,
223
+ * snapshot JSONB NOT NULL,
224
+ * PRIMARY KEY (readmodel_table_name, stream_id)
225
+ * );
226
+ * ```
227
+ *
228
+ * @example
229
+ * ```typescript
230
+ * const cartProjection = createSnapshotProjectionWithSnapshotTable({
231
+ * tableName: 'carts',
232
+ * extractKeys: (event, partition) => ({
233
+ * tenant_id: event.data.eventMeta.tenantId,
234
+ * cart_id: event.data.eventMeta.cartId,
235
+ * partition
236
+ * }),
237
+ * evolve: cartEvolve,
238
+ * initialState: () => ({ status: 'init', items: [] }),
239
+ * mapToColumns: (state) => ({
240
+ * currency: state.currency,
241
+ * is_checked_out: state.status === 'checkedOut'
242
+ * })
243
+ * });
244
+ *
245
+ * // Use it in a projection registry
246
+ * const registry: ProjectionRegistry = {
247
+ * CartCreated: [cartProjection],
248
+ * ItemAddedToCart: [cartProjection],
249
+ * // ... other events
250
+ * };
251
+ * ```
252
+ */
253
+ export function createSnapshotProjectionWithSnapshotTable(config) {
254
+ const { tableName, extractKeys, evolve, initialState, mapToColumns } = config;
255
+ // Cache the inferred primary keys after the first call
256
+ let inferredPrimaryKeys;
257
+ return async ({ db, partition }, event) => {
258
+ const keys = extractKeys(event, partition);
259
+ // Validate and cache primary keys
260
+ inferredPrimaryKeys = validateAndCachePrimaryKeys(keys, tableName, inferredPrimaryKeys);
261
+ const primaryKeys = inferredPrimaryKeys;
262
+ // Construct deterministic stream_id from keys
263
+ const streamId = constructStreamId(keys);
264
+ // Check if event is newer than what we've already processed
265
+ // Use FOR UPDATE to lock the row and prevent race conditions with concurrent transactions
266
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
267
+ const existing = await db
268
+ .selectFrom("snapshots")
269
+ .select(["last_stream_position", "snapshot"])
270
+ .where("readmodel_table_name", "=", tableName)
271
+ .where("stream_id", "=", streamId)
272
+ .forUpdate()
273
+ .executeTakeFirst();
274
+ const lastPos = existing?.last_stream_position
275
+ ? BigInt(String(existing.last_stream_position))
276
+ : -1n;
277
+ // Skip if we've already processed a newer event
278
+ if (shouldSkipEvent(event.metadata.streamPosition, lastPos)) {
279
+ return;
280
+ }
281
+ // Load current state from snapshot or use initial state
282
+ const currentState = loadStateFromSnapshot(existing?.snapshot, initialState, tableName);
283
+ // Apply the event to get new state
284
+ const newState = evolve(currentState, event);
285
+ // Call mapToColumns once after evolve (only if provided)
286
+ const denormalizedColumns = mapToColumns
287
+ ? mapToColumns(newState)
288
+ : undefined;
289
+ // Upsert the snapshot in the snapshots table
290
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
291
+ await db
292
+ .insertInto("snapshots")
293
+ .values({
294
+ readmodel_table_name: tableName,
295
+ stream_id: streamId,
296
+ snapshot: JSON.stringify(newState),
297
+ last_stream_position: event.metadata.streamPosition.toString(),
298
+ last_global_position: event.metadata.globalPosition.toString(),
299
+ })
300
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
301
+ .onConflict((oc) => {
302
+ // The FOR UPDATE lock above ensures that concurrent transactions wait, preventing race conditions.
303
+ // Note: We could add a WHERE clause here to only update if excluded.last_stream_position > snapshots.last_stream_position,
304
+ // but this would be redundant for correctness: the FOR UPDATE lock, combined with the shouldSkipEvent check,
305
+ // already prevents stale or out-of-order events from overwriting newer snapshots.
306
+ return oc.columns(["readmodel_table_name", "stream_id"]).doUpdateSet({
307
+ snapshot: (eb) => eb.ref("excluded.snapshot"),
308
+ last_stream_position: (eb) => eb.ref("excluded.last_stream_position"),
309
+ last_global_position: (eb) => eb.ref("excluded.last_global_position"),
310
+ });
311
+ })
312
+ .execute();
313
+ // Upsert the read model table with keys and denormalized columns only
314
+ const readModelData = { ...keys };
315
+ if (denormalizedColumns) {
316
+ Object.assign(readModelData, denormalizedColumns);
317
+ }
318
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
319
+ const readModelInsertQuery = db
320
+ .insertInto(tableName)
321
+ .values(readModelData);
322
+ // Build the update set for conflict resolution (only for denormalized columns)
323
+ const readModelUpdateSet = buildDenormalizedUpdateSet(denormalizedColumns);
324
+ // Only update if there are denormalized columns, otherwise just insert (no-op on conflict)
325
+ if (Object.keys(readModelUpdateSet).length > 0) {
326
+ await readModelInsertQuery
327
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
328
+ .onConflict((oc) => {
329
+ const conflictBuilder = oc.columns(primaryKeys);
330
+ return conflictBuilder.doUpdateSet(readModelUpdateSet);
331
+ })
332
+ .execute();
333
+ }
334
+ else {
335
+ // If no denormalized columns, use insert with on conflict do nothing
336
+ await readModelInsertQuery
337
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
338
+ .onConflict((oc) => {
339
+ return oc.columns(primaryKeys).doNothing();
340
+ })
341
+ .execute();
342
+ }
343
+ };
344
+ }
109
345
  /**
110
346
  * Creates multiple projection handlers that all use the same snapshot projection logic.
111
347
  * This is a convenience function to avoid repeating the same handler for multiple event types.
@@ -138,3 +374,40 @@ export function createSnapshotProjectionRegistry(eventTypes, config) {
138
374
  }
139
375
  return registry;
140
376
  }
377
+ /**
378
+ * Creates multiple projection handlers that all use the same snapshot projection logic
379
+ * with a separate snapshots table. This is a convenience function to avoid repeating
380
+ * the same handler for multiple event types.
381
+ *
382
+ * @example
383
+ * ```typescript
384
+ * const registry = createSnapshotProjectionRegistryWithSnapshotTable(
385
+ * ['CartCreated', 'ItemAddedToCart', 'ItemRemovedFromCart'],
386
+ * {
387
+ * tableName: 'carts',
388
+ * extractKeys: (event, partition) => ({
389
+ * tenant_id: event.data.eventMeta.tenantId,
390
+ * cart_id: event.data.eventMeta.cartId,
391
+ * partition
392
+ * }),
393
+ * evolve: cartEvolve,
394
+ * initialState: () => ({ status: 'init', items: [] }),
395
+ * mapToColumns: (state) => ({
396
+ * currency: state.currency,
397
+ * is_checked_out: state.status === 'checkedOut'
398
+ * })
399
+ * }
400
+ * );
401
+ * ```
402
+ */
403
+ export function createSnapshotProjectionRegistryWithSnapshotTable(eventTypes, config) {
404
+ const handler = createSnapshotProjectionWithSnapshotTable(config);
405
+ const registry = {};
406
+ for (const eventType of eventTypes) {
407
+ // Type cast is safe here because ProjectionHandler is contravariant in its event type parameter.
408
+ // A handler for a specific event type E can safely handle any event that matches E's structure.
409
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
410
+ registry[eventType] = [handler];
411
+ }
412
+ return registry;
413
+ }
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "publishConfig": {
4
4
  "access": "public"
5
5
  },
6
- "version": "2.2.7",
6
+ "version": "2.3.1",
7
7
  "description": "Emmett Event Store with Kysely",
8
8
  "author": "Wataru Oguchi",
9
9
  "license": "MIT",