@wataruoguchi/emmett-event-store-kysely 1.1.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +76 -164
  2. package/dist/event-store/consumers.d.ts +23 -0
  3. package/dist/event-store/consumers.d.ts.map +1 -0
  4. package/dist/event-store/consumers.js +155 -0
  5. package/dist/event-store/kysely-event-store.d.ts +42 -0
  6. package/dist/event-store/kysely-event-store.d.ts.map +1 -0
  7. package/dist/event-store/kysely-event-store.js +256 -0
  8. package/dist/index.cjs +584 -0
  9. package/dist/index.d.ts +10 -0
  10. package/dist/index.d.ts.map +1 -0
  11. package/dist/index.js +5 -0
  12. package/dist/projections/runner.d.ts +3 -2
  13. package/dist/projections/runner.d.ts.map +1 -1
  14. package/dist/projections/snapshot-projection.d.ts +120 -0
  15. package/dist/projections/snapshot-projection.d.ts.map +1 -0
  16. package/dist/projections/snapshot-projection.js +125 -0
  17. package/dist/types.d.ts +33 -4
  18. package/dist/types.d.ts.map +1 -1
  19. package/package.json +9 -14
  20. package/dist/event-store/aggregate-stream.d.ts +0 -10
  21. package/dist/event-store/aggregate-stream.d.ts.map +0 -1
  22. package/dist/event-store/aggregate-stream.js +0 -18
  23. package/dist/event-store/append-to-stream.d.ts +0 -7
  24. package/dist/event-store/append-to-stream.d.ts.map +0 -1
  25. package/dist/event-store/append-to-stream.js +0 -143
  26. package/dist/event-store/index.cjs +0 -294
  27. package/dist/event-store/index.d.ts +0 -13
  28. package/dist/event-store/index.d.ts.map +0 -1
  29. package/dist/event-store/index.js +0 -10
  30. package/dist/event-store/read-stream.d.ts +0 -14
  31. package/dist/event-store/read-stream.d.ts.map +0 -1
  32. package/dist/event-store/read-stream.js +0 -88
  33. package/dist/projections/index.cjs +0 -124
  34. package/dist/projections/index.d.ts +0 -4
  35. package/dist/projections/index.d.ts.map +0 -1
  36. package/dist/projections/index.js +0 -2
@@ -0,0 +1,256 @@
1
+ // biome-ignore assist/source/organizeImports: retain import order similar to app code
2
+ import { assertExpectedVersionMatchesCurrent, } from "@event-driven-io/emmett";
3
+ import { DEFAULT_PARTITION, PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
4
+ export const defaultKyselyOptions = {
5
+ schema: {
6
+ autoMigration: "CreateOrUpdate",
7
+ },
8
+ };
9
+ export const getKyselyEventStore = (deps) => {
10
+ const { db, logger } = deps;
11
+ const eventStore = {
12
+ /**
13
+ * @description We do not use schema management in this package.
14
+ */
15
+ schema: {
16
+ sql: () => "",
17
+ print: () => console.log(""),
18
+ migrate: async () => Promise.resolve(),
19
+ },
20
+ /**
21
+ * @description We do not use session management in this package.
22
+ */
23
+ async withSession(callback) {
24
+ return await callback({
25
+ eventStore,
26
+ close: () => Promise.resolve(),
27
+ });
28
+ },
29
+ async aggregateStream(streamName, options) {
30
+ const { evolve, initialState, read } = options;
31
+ logger.info({ streamName, options }, "aggregateStream");
32
+ const expectedStreamVersion = read?.expectedStreamVersion;
33
+ const result = await eventStore.readStream(streamName, read);
34
+ assertExpectedVersionMatchesCurrent(result.currentStreamVersion, expectedStreamVersion, PostgreSQLEventStoreDefaultStreamVersion);
35
+ const state = result.events.reduce((state, event) => (event ? evolve(state, event) : state), initialState());
36
+ return {
37
+ state,
38
+ currentStreamVersion: result.currentStreamVersion,
39
+ streamExists: result.streamExists,
40
+ };
41
+ },
42
+ async readStream(streamName, options) {
43
+ const partition = getPartition(options);
44
+ logger.info({ streamName, options, partition }, "readStream");
45
+ const { currentStreamVersion, streamExists } = await fetchStreamInfo(db, streamName, partition);
46
+ const range = parseRangeOptions(options);
47
+ const rows = await buildEventsQuery({ db, logger }, streamName, partition, range).execute();
48
+ const events = rows.map((row) => mapRowToEvent(row, streamName));
49
+ return {
50
+ events,
51
+ currentStreamVersion,
52
+ streamExists,
53
+ };
54
+ },
55
+ async appendToStream(streamName, events, options) {
56
+ const streamType = getStreamType(options);
57
+ const partition = getPartition(options);
58
+ const expected = options?.expectedStreamVersion;
59
+ logger.info({ streamName, events, options, partition }, "appendToStream");
60
+ ensureEventsNotEmpty(events, expected);
61
+ const result = await db
62
+ .transaction()
63
+ .execute(async (trx) => {
64
+ const { currentStreamVersion, streamExists } = await fetchStreamInfo(trx, streamName, partition);
65
+ assertExpectedVersion(expected, currentStreamVersion, streamExists);
66
+ const basePos = currentStreamVersion;
67
+ const nextStreamPosition = computeNextStreamPosition(basePos, events.length);
68
+ await upsertStreamRow(trx, streamName, partition, streamType, basePos, nextStreamPosition, expected, streamExists);
69
+ const messagesToInsert = buildMessagesToInsert(events, basePos, streamName, partition);
70
+ const lastEventGlobalPosition = await insertMessagesAndGetLastGlobalPosition(trx, messagesToInsert);
71
+ return {
72
+ nextExpectedStreamVersion: nextStreamPosition,
73
+ lastEventGlobalPosition,
74
+ createdNewStream: !streamExists,
75
+ };
76
+ });
77
+ return result;
78
+ },
79
+ close: async () => {
80
+ // Kysely doesn't require explicit closing for most cases
81
+ // but we can add cleanup logic here if needed
82
+ await Promise.resolve();
83
+ },
84
+ };
85
+ return eventStore;
86
+ };
87
+ // Helper functions (consolidated from the optimized implementation)
88
+ function getStreamType(options) {
89
+ return options?.streamType ?? "unknown";
90
+ }
91
+ function getPartition(options) {
92
+ return options?.partition ?? DEFAULT_PARTITION;
93
+ }
94
+ function ensureEventsNotEmpty(events, _expected) {
95
+ if (events.length === 0) {
96
+ throw new Error("Cannot append empty events array");
97
+ }
98
+ }
99
+ function assertExpectedVersion(expected, currentPos, streamExistsNow) {
100
+ if (expected === "STREAM_EXISTS" && !streamExistsNow) {
101
+ throw new Error("Stream does not exist but expected to exist");
102
+ }
103
+ if (expected === "STREAM_DOES_NOT_EXIST" && streamExistsNow) {
104
+ throw new Error("Stream exists but expected not to exist");
105
+ }
106
+ if (typeof expected === "bigint" && expected !== currentPos) {
107
+ throw new Error(`Expected version ${expected} but current is ${currentPos}`);
108
+ }
109
+ }
110
+ function computeNextStreamPosition(basePos, eventCount) {
111
+ return basePos + BigInt(eventCount);
112
+ }
113
+ async function upsertStreamRow(executor, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExistsNow) {
114
+ if (!streamExistsNow) {
115
+ await executor
116
+ .insertInto("streams")
117
+ .values({
118
+ stream_id: streamId,
119
+ stream_position: nextStreamPosition,
120
+ partition,
121
+ stream_type: streamType,
122
+ stream_metadata: {},
123
+ is_archived: false,
124
+ })
125
+ .execute();
126
+ return;
127
+ }
128
+ if (typeof expected === "bigint") {
129
+ const updatedRow = await executor
130
+ .updateTable("streams")
131
+ .set({ stream_position: nextStreamPosition })
132
+ .where("stream_id", "=", streamId)
133
+ .where("partition", "=", partition)
134
+ .where("is_archived", "=", false)
135
+ .where("stream_position", "=", basePos)
136
+ .returning("stream_position")
137
+ .executeTakeFirst();
138
+ if (!updatedRow) {
139
+ throw new Error(`Expected version ${expected} but current is ${basePos}`);
140
+ }
141
+ return;
142
+ }
143
+ await executor
144
+ .updateTable("streams")
145
+ .set({ stream_position: nextStreamPosition })
146
+ .where("stream_id", "=", streamId)
147
+ .where("partition", "=", partition)
148
+ .where("is_archived", "=", false)
149
+ .execute();
150
+ }
151
+ function buildMessagesToInsert(events, basePos, streamId, partition) {
152
+ return events.map((e, index) => {
153
+ const messageId = crypto.randomUUID();
154
+ const streamPosition = basePos + BigInt(index + 1);
155
+ const rawMeta = "metadata" in e ? e.metadata : undefined;
156
+ const eventMeta = rawMeta && typeof rawMeta === "object" ? rawMeta : {};
157
+ const messageMetadata = {
158
+ messageId,
159
+ ...eventMeta,
160
+ };
161
+ return {
162
+ stream_id: streamId,
163
+ stream_position: streamPosition,
164
+ partition,
165
+ message_data: e.data,
166
+ message_metadata: messageMetadata,
167
+ message_schema_version: index.toString(),
168
+ message_type: e.type,
169
+ message_kind: "E",
170
+ message_id: messageId,
171
+ is_archived: false,
172
+ created: new Date(),
173
+ };
174
+ });
175
+ }
176
+ async function insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert) {
177
+ const inserted = await executor
178
+ .insertInto("messages")
179
+ .values(messagesToInsert)
180
+ .returning("global_position")
181
+ .execute();
182
+ if (!inserted || (Array.isArray(inserted) && inserted.length === 0)) {
183
+ return 0n;
184
+ }
185
+ const globalPositions = inserted.map((r) => BigInt(String(r.global_position)));
186
+ return globalPositions[globalPositions.length - 1];
187
+ }
188
+ function parseRangeOptions(options) {
189
+ const from = options && typeof options === "object" && "from" in options
190
+ ? options.from
191
+ : undefined;
192
+ const to = options && typeof options === "object" && "to" in options
193
+ ? options.to
194
+ : undefined;
195
+ const maxCount = options && typeof options === "object" && "maxCount" in options
196
+ ? options.maxCount
197
+ : undefined;
198
+ return { from, to, maxCount };
199
+ }
200
+ function buildEventsQuery(deps, streamId, partition, range) {
201
+ const { db } = deps;
202
+ let q = db
203
+ .selectFrom("messages")
204
+ .select([
205
+ "message_type",
206
+ "message_data",
207
+ "message_metadata",
208
+ "stream_position",
209
+ "global_position",
210
+ "message_id",
211
+ ])
212
+ .where("stream_id", "=", streamId)
213
+ .where("partition", "=", partition)
214
+ .where("is_archived", "=", false)
215
+ .orderBy("stream_position");
216
+ if (range.from !== undefined) {
217
+ q = q.where("stream_position", ">=", range.from);
218
+ }
219
+ if (range.to !== undefined) {
220
+ q = q.where("stream_position", "<=", range.to);
221
+ }
222
+ if (range.maxCount !== undefined) {
223
+ q = q.limit(Number(range.maxCount));
224
+ }
225
+ return q;
226
+ }
227
+ function mapRowToEvent(row, streamId) {
228
+ const streamPosition = BigInt(String(row.stream_position));
229
+ const globalPosition = BigInt(String(row.global_position ?? 0));
230
+ const baseMetadata = (row.message_metadata ?? {});
231
+ return {
232
+ kind: "Event",
233
+ type: row.message_type,
234
+ data: row.message_data,
235
+ metadata: {
236
+ ...baseMetadata,
237
+ messageId: row.message_id,
238
+ streamId: streamId,
239
+ streamPosition: streamPosition,
240
+ globalPosition: globalPosition,
241
+ },
242
+ };
243
+ }
244
+ async function fetchStreamInfo(executor, streamId, partition) {
245
+ const streamRow = await executor
246
+ .selectFrom("streams")
247
+ .select(["stream_position"])
248
+ .where("stream_id", "=", streamId)
249
+ .where("partition", "=", partition)
250
+ .where("is_archived", "=", false)
251
+ .executeTakeFirst();
252
+ const currentStreamVersion = streamRow
253
+ ? BigInt(String(streamRow.stream_position))
254
+ : PostgreSQLEventStoreDefaultStreamVersion;
255
+ return { currentStreamVersion, streamExists: !!streamRow };
256
+ }