@wataruoguchi/emmett-event-store-kysely 1.1.2 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +76 -164
- package/dist/event-store/consumers.d.ts +23 -0
- package/dist/event-store/consumers.d.ts.map +1 -0
- package/dist/event-store/consumers.js +155 -0
- package/dist/event-store/kysely-event-store.d.ts +42 -0
- package/dist/event-store/kysely-event-store.d.ts.map +1 -0
- package/dist/event-store/kysely-event-store.js +256 -0
- package/dist/index.cjs +584 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +5 -0
- package/dist/projections/runner.d.ts +3 -2
- package/dist/projections/runner.d.ts.map +1 -1
- package/dist/projections/snapshot-projection.d.ts +120 -0
- package/dist/projections/snapshot-projection.d.ts.map +1 -0
- package/dist/projections/snapshot-projection.js +125 -0
- package/dist/types.d.ts +39 -11
- package/dist/types.d.ts.map +1 -1
- package/package.json +9 -14
- package/dist/event-store/aggregate-stream.d.ts +0 -10
- package/dist/event-store/aggregate-stream.d.ts.map +0 -1
- package/dist/event-store/aggregate-stream.js +0 -18
- package/dist/event-store/append-to-stream.d.ts +0 -7
- package/dist/event-store/append-to-stream.d.ts.map +0 -1
- package/dist/event-store/append-to-stream.js +0 -143
- package/dist/event-store/index.cjs +0 -291
- package/dist/event-store/index.d.ts +0 -13
- package/dist/event-store/index.d.ts.map +0 -1
- package/dist/event-store/index.js +0 -10
- package/dist/event-store/read-stream.d.ts +0 -14
- package/dist/event-store/read-stream.d.ts.map +0 -1
- package/dist/event-store/read-stream.js +0 -88
- package/dist/projections/index.cjs +0 -124
- package/dist/projections/index.d.ts +0 -4
- package/dist/projections/index.d.ts.map +0 -1
- package/dist/projections/index.js +0 -2
|
@@ -1,291 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __defProp = Object.defineProperty;
|
|
3
|
-
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
-
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
-
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
-
var __export = (target, all) => {
|
|
7
|
-
for (var name in all)
|
|
8
|
-
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
-
};
|
|
10
|
-
var __copyProps = (to, from, except, desc) => {
|
|
11
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
-
for (let key of __getOwnPropNames(from))
|
|
13
|
-
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
-
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
-
}
|
|
16
|
-
return to;
|
|
17
|
-
};
|
|
18
|
-
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
-
|
|
20
|
-
// src/event-store/index.ts
|
|
21
|
-
var event_store_exports = {};
|
|
22
|
-
__export(event_store_exports, {
|
|
23
|
-
createEventStore: () => createEventStore,
|
|
24
|
-
createReadStream: () => createReadStream
|
|
25
|
-
});
|
|
26
|
-
module.exports = __toCommonJS(event_store_exports);
|
|
27
|
-
|
|
28
|
-
// src/event-store/aggregate-stream.ts
|
|
29
|
-
var import_emmett = require("@event-driven-io/emmett");
|
|
30
|
-
|
|
31
|
-
// src/types.ts
|
|
32
|
-
var PostgreSQLEventStoreDefaultStreamVersion = 0n;
|
|
33
|
-
var DEFAULT_PARTITION = "default_partition";
|
|
34
|
-
|
|
35
|
-
// src/event-store/aggregate-stream.ts
|
|
36
|
-
function createAggregateStream({ readStream }, { logger }) {
|
|
37
|
-
return async function aggregateStream(streamId, options) {
|
|
38
|
-
const { evolve, initialState, read } = options;
|
|
39
|
-
logger.info({ streamId, options }, "aggregateStream");
|
|
40
|
-
const expectedStreamVersion = read?.expectedStreamVersion;
|
|
41
|
-
const result = await readStream(streamId, options.read);
|
|
42
|
-
(0, import_emmett.assertExpectedVersionMatchesCurrent)(
|
|
43
|
-
result.currentStreamVersion,
|
|
44
|
-
expectedStreamVersion,
|
|
45
|
-
PostgreSQLEventStoreDefaultStreamVersion
|
|
46
|
-
);
|
|
47
|
-
const state = result.events.reduce(
|
|
48
|
-
(state2, event) => event ? evolve(state2, event) : state2,
|
|
49
|
-
initialState()
|
|
50
|
-
);
|
|
51
|
-
return {
|
|
52
|
-
state,
|
|
53
|
-
currentStreamVersion: result.currentStreamVersion,
|
|
54
|
-
streamExists: result.streamExists
|
|
55
|
-
};
|
|
56
|
-
};
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
// src/event-store/append-to-stream.ts
|
|
60
|
-
var import_emmett2 = require("@event-driven-io/emmett");
|
|
61
|
-
var PostgreSQLEventStoreDefaultGlobalPosition = 0n;
|
|
62
|
-
function createAppendToStream({
|
|
63
|
-
db,
|
|
64
|
-
logger
|
|
65
|
-
}) {
|
|
66
|
-
return async function appendToStream(streamId, events, options) {
|
|
67
|
-
const streamType = getStreamType(options);
|
|
68
|
-
const partition = getPartition(options);
|
|
69
|
-
const expected = options?.expectedStreamVersion;
|
|
70
|
-
logger.info({ streamId, events, options, partition }, "appendToStream");
|
|
71
|
-
ensureEventsNotEmpty(events, expected);
|
|
72
|
-
const result = await db.transaction().execute(async (trx) => {
|
|
73
|
-
const { currentStreamVersion, streamExists } = await fetchStreamInfo(
|
|
74
|
-
trx,
|
|
75
|
-
streamId,
|
|
76
|
-
partition
|
|
77
|
-
);
|
|
78
|
-
assertExpectedVersion(expected, currentStreamVersion, streamExists);
|
|
79
|
-
const basePos = currentStreamVersion;
|
|
80
|
-
const nextStreamPosition = computeNextStreamPosition(
|
|
81
|
-
basePos,
|
|
82
|
-
events.length
|
|
83
|
-
);
|
|
84
|
-
await upsertStreamRow(
|
|
85
|
-
trx,
|
|
86
|
-
streamId,
|
|
87
|
-
partition,
|
|
88
|
-
streamType,
|
|
89
|
-
basePos,
|
|
90
|
-
nextStreamPosition,
|
|
91
|
-
expected,
|
|
92
|
-
streamExists
|
|
93
|
-
);
|
|
94
|
-
const messagesToInsert = buildMessagesToInsert(
|
|
95
|
-
events,
|
|
96
|
-
basePos,
|
|
97
|
-
streamId,
|
|
98
|
-
partition
|
|
99
|
-
);
|
|
100
|
-
const lastEventGlobalPosition = await insertMessagesAndGetLastGlobalPosition(trx, messagesToInsert);
|
|
101
|
-
return {
|
|
102
|
-
nextExpectedStreamVersion: nextStreamPosition,
|
|
103
|
-
lastEventGlobalPosition,
|
|
104
|
-
createdNewStream: !streamExists
|
|
105
|
-
};
|
|
106
|
-
});
|
|
107
|
-
return result;
|
|
108
|
-
};
|
|
109
|
-
}
|
|
110
|
-
function getStreamType(options) {
|
|
111
|
-
return options?.streamType ?? "unknown";
|
|
112
|
-
}
|
|
113
|
-
function ensureEventsNotEmpty(events, expected) {
|
|
114
|
-
if (events.length === 0) {
|
|
115
|
-
throw new import_emmett2.ExpectedVersionConflictError(
|
|
116
|
-
-1n,
|
|
117
|
-
expected ?? import_emmett2.NO_CONCURRENCY_CHECK
|
|
118
|
-
);
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
function assertExpectedVersion(expected, currentPos, streamExistsNow) {
|
|
122
|
-
if (expected === import_emmett2.STREAM_EXISTS && !streamExistsNow) {
|
|
123
|
-
throw new import_emmett2.ExpectedVersionConflictError(-1n, import_emmett2.STREAM_EXISTS);
|
|
124
|
-
}
|
|
125
|
-
if (expected === import_emmett2.STREAM_DOES_NOT_EXIST && streamExistsNow) {
|
|
126
|
-
throw new import_emmett2.ExpectedVersionConflictError(-1n, import_emmett2.STREAM_DOES_NOT_EXIST);
|
|
127
|
-
}
|
|
128
|
-
if (typeof expected === "bigint" && expected !== currentPos) {
|
|
129
|
-
throw new import_emmett2.ExpectedVersionConflictError(currentPos, expected);
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
function computeNextStreamPosition(basePos, eventCount) {
|
|
133
|
-
return basePos + BigInt(eventCount);
|
|
134
|
-
}
|
|
135
|
-
async function upsertStreamRow(executor, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExistsNow) {
|
|
136
|
-
if (!streamExistsNow) {
|
|
137
|
-
await executor.insertInto("streams").values({
|
|
138
|
-
stream_id: streamId,
|
|
139
|
-
stream_position: nextStreamPosition,
|
|
140
|
-
partition,
|
|
141
|
-
stream_type: streamType,
|
|
142
|
-
stream_metadata: {},
|
|
143
|
-
is_archived: false
|
|
144
|
-
}).execute();
|
|
145
|
-
return;
|
|
146
|
-
}
|
|
147
|
-
if (typeof expected === "bigint") {
|
|
148
|
-
const updatedRow = await executor.updateTable("streams").set({ stream_position: nextStreamPosition }).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).where("stream_position", "=", basePos).returning("stream_position").executeTakeFirst();
|
|
149
|
-
if (!updatedRow) {
|
|
150
|
-
throw new import_emmett2.ExpectedVersionConflictError(basePos, expected);
|
|
151
|
-
}
|
|
152
|
-
return;
|
|
153
|
-
}
|
|
154
|
-
await executor.updateTable("streams").set({ stream_position: nextStreamPosition }).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).execute();
|
|
155
|
-
}
|
|
156
|
-
function buildMessagesToInsert(events, basePos, streamId, partition) {
|
|
157
|
-
return events.map((e, index) => {
|
|
158
|
-
const messageId = crypto.randomUUID();
|
|
159
|
-
const streamPosition = basePos + BigInt(index + 1);
|
|
160
|
-
const rawMeta = "metadata" in e ? e.metadata : void 0;
|
|
161
|
-
const eventMeta = rawMeta && typeof rawMeta === "object" ? rawMeta : {};
|
|
162
|
-
const messageMetadata = {
|
|
163
|
-
messageId,
|
|
164
|
-
...eventMeta
|
|
165
|
-
};
|
|
166
|
-
return {
|
|
167
|
-
stream_id: streamId,
|
|
168
|
-
stream_position: streamPosition,
|
|
169
|
-
partition,
|
|
170
|
-
message_data: e.data,
|
|
171
|
-
message_metadata: messageMetadata,
|
|
172
|
-
message_schema_version: index.toString(),
|
|
173
|
-
message_type: e.type,
|
|
174
|
-
message_kind: "E",
|
|
175
|
-
message_id: messageId,
|
|
176
|
-
is_archived: false,
|
|
177
|
-
created: /* @__PURE__ */ new Date()
|
|
178
|
-
};
|
|
179
|
-
});
|
|
180
|
-
}
|
|
181
|
-
async function insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert) {
|
|
182
|
-
const inserted = await executor.insertInto("messages").values(messagesToInsert).returning("global_position").execute();
|
|
183
|
-
if (!inserted || Array.isArray(inserted) && inserted.length === 0) {
|
|
184
|
-
return PostgreSQLEventStoreDefaultGlobalPosition;
|
|
185
|
-
}
|
|
186
|
-
const globalPositions = inserted.map(
|
|
187
|
-
(r) => BigInt(String(r.global_position))
|
|
188
|
-
);
|
|
189
|
-
return globalPositions[globalPositions.length - 1];
|
|
190
|
-
}
|
|
191
|
-
function getPartition(options) {
|
|
192
|
-
return options?.partition ?? DEFAULT_PARTITION;
|
|
193
|
-
}
|
|
194
|
-
async function fetchStreamInfo(executor, streamId, partition) {
|
|
195
|
-
const streamRow = await executor.selectFrom("streams").select(["stream_position"]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).executeTakeFirst();
|
|
196
|
-
const currentStreamVersion = streamRow ? BigInt(
|
|
197
|
-
String(streamRow.stream_position)
|
|
198
|
-
) : PostgreSQLEventStoreDefaultStreamVersion;
|
|
199
|
-
return { currentStreamVersion, streamExists: !!streamRow };
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
// src/event-store/read-stream.ts
|
|
203
|
-
function createReadStream({ db, logger }) {
|
|
204
|
-
return async function readStream(streamId, options) {
|
|
205
|
-
const partition = getPartition2(options);
|
|
206
|
-
logger.info({ streamId, options, partition }, "readStream");
|
|
207
|
-
const { currentStreamVersion, streamExists } = await fetchStreamInfo2(
|
|
208
|
-
db,
|
|
209
|
-
streamId,
|
|
210
|
-
partition
|
|
211
|
-
);
|
|
212
|
-
const range = parseRangeOptions(options);
|
|
213
|
-
const rows = await buildEventsQuery(
|
|
214
|
-
{ db, logger },
|
|
215
|
-
streamId,
|
|
216
|
-
partition,
|
|
217
|
-
range
|
|
218
|
-
).execute();
|
|
219
|
-
const events = rows.map((row) => mapRowToEvent(row, streamId));
|
|
220
|
-
return {
|
|
221
|
-
events,
|
|
222
|
-
currentStreamVersion,
|
|
223
|
-
streamExists
|
|
224
|
-
};
|
|
225
|
-
};
|
|
226
|
-
}
|
|
227
|
-
function parseRangeOptions(options) {
|
|
228
|
-
const from = options && typeof options === "object" && "from" in options ? options.from : void 0;
|
|
229
|
-
const to = options && typeof options === "object" && "to" in options ? options.to : void 0;
|
|
230
|
-
const maxCount = options && typeof options === "object" && "maxCount" in options ? options.maxCount : void 0;
|
|
231
|
-
return { from, to, maxCount };
|
|
232
|
-
}
|
|
233
|
-
function buildEventsQuery(deps, streamId, partition, range) {
|
|
234
|
-
const { db } = deps;
|
|
235
|
-
let q = db.selectFrom("messages").select([
|
|
236
|
-
"message_type",
|
|
237
|
-
"message_data",
|
|
238
|
-
"message_metadata",
|
|
239
|
-
"stream_position",
|
|
240
|
-
"global_position",
|
|
241
|
-
"message_id"
|
|
242
|
-
]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).orderBy("stream_position");
|
|
243
|
-
if (range.from !== void 0) {
|
|
244
|
-
q = q.where("stream_position", ">=", BigInt(range.from));
|
|
245
|
-
}
|
|
246
|
-
if (range.to !== void 0) {
|
|
247
|
-
q = q.where("stream_position", "<=", BigInt(range.to));
|
|
248
|
-
}
|
|
249
|
-
if (range.maxCount !== void 0) {
|
|
250
|
-
q = q.limit(Number(range.maxCount));
|
|
251
|
-
}
|
|
252
|
-
return q;
|
|
253
|
-
}
|
|
254
|
-
function mapRowToEvent(row, streamId) {
|
|
255
|
-
const streamPosition = BigInt(String(row.stream_position));
|
|
256
|
-
const globalPosition = BigInt(String(row.global_position ?? 0));
|
|
257
|
-
const baseMetadata = row.message_metadata ?? {};
|
|
258
|
-
return {
|
|
259
|
-
kind: "Event",
|
|
260
|
-
type: row.message_type,
|
|
261
|
-
data: row.message_data,
|
|
262
|
-
metadata: {
|
|
263
|
-
...baseMetadata,
|
|
264
|
-
messageId: row.message_id,
|
|
265
|
-
streamId,
|
|
266
|
-
streamPosition,
|
|
267
|
-
globalPosition
|
|
268
|
-
}
|
|
269
|
-
};
|
|
270
|
-
}
|
|
271
|
-
function getPartition2(options) {
|
|
272
|
-
return options?.partition ?? DEFAULT_PARTITION;
|
|
273
|
-
}
|
|
274
|
-
async function fetchStreamInfo2(executor, streamId, partition) {
|
|
275
|
-
const streamRow = await executor.selectFrom("streams").select(["stream_position"]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).executeTakeFirst();
|
|
276
|
-
const currentStreamVersion = streamRow ? streamRow.stream_position : PostgreSQLEventStoreDefaultStreamVersion;
|
|
277
|
-
return { currentStreamVersion, streamExists: !!streamRow };
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
// src/event-store/index.ts
|
|
281
|
-
function createEventStore(deps) {
|
|
282
|
-
const readStream = createReadStream(deps);
|
|
283
|
-
const appendToStream = createAppendToStream(deps);
|
|
284
|
-
const aggregateStream = createAggregateStream({ readStream }, deps);
|
|
285
|
-
return { readStream, appendToStream, aggregateStream };
|
|
286
|
-
}
|
|
287
|
-
// Annotate the CommonJS export names for ESM import in node:
|
|
288
|
-
0 && (module.exports = {
|
|
289
|
-
createEventStore,
|
|
290
|
-
createReadStream
|
|
291
|
-
});
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import type { Dependencies } from "../types.js";
|
|
2
|
-
export type { DatabaseExecutor, Dependencies, ExtendedOptions, } from "../types.js";
|
|
3
|
-
export type { AggregateStream } from "./aggregate-stream.js";
|
|
4
|
-
export type { AppendToStream } from "./append-to-stream.js";
|
|
5
|
-
export type { ReadStream } from "./read-stream.js";
|
|
6
|
-
export type EventStore = ReturnType<typeof createEventStore>;
|
|
7
|
-
export { createReadStream } from "./read-stream.js";
|
|
8
|
-
export declare function createEventStore(deps: Dependencies): {
|
|
9
|
-
readStream: import("./read-stream.js").ReadStream;
|
|
10
|
-
appendToStream: import("./append-to-stream.js").AppendToStream;
|
|
11
|
-
aggregateStream: import("./aggregate-stream.js").AggregateStream;
|
|
12
|
-
};
|
|
13
|
-
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/event-store/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAKhD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,GAChB,MAAM,aAAa,CAAC;AACrB,YAAY,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AAC7D,YAAY,EAAE,cAAc,EAAE,MAAM,uBAAuB,CAAC;AAC5D,YAAY,EAAE,UAAU,EAAE,MAAM,kBAAkB,CAAC;AACnD,MAAM,MAAM,UAAU,GAAG,UAAU,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAE7D,OAAO,EAAE,gBAAgB,EAAE,MAAM,kBAAkB,CAAC;AACpD,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,YAAY;;;;EAKlD"}
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
import { createAggregateStream } from "./aggregate-stream.js";
|
|
2
|
-
import { createAppendToStream } from "./append-to-stream.js";
|
|
3
|
-
import { createReadStream } from "./read-stream.js";
|
|
4
|
-
export { createReadStream } from "./read-stream.js";
|
|
5
|
-
export function createEventStore(deps) {
|
|
6
|
-
const readStream = createReadStream(deps);
|
|
7
|
-
const appendToStream = createAppendToStream(deps);
|
|
8
|
-
const aggregateStream = createAggregateStream({ readStream }, deps);
|
|
9
|
-
return { readStream, appendToStream, aggregateStream };
|
|
10
|
-
}
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
import type { Event, ReadEventMetadataWithGlobalPosition, ReadStreamOptions, ReadStreamResult } from "@event-driven-io/emmett";
|
|
2
|
-
import { type Dependencies, type ExtendedOptions } from "../types.js";
|
|
3
|
-
type PostgresReadEventMetadata = ReadEventMetadataWithGlobalPosition;
|
|
4
|
-
type ExtendedReadStreamOptions = ReadStreamOptions & ExtendedOptions;
|
|
5
|
-
type ProjectionReadStreamOptions = {
|
|
6
|
-
from?: bigint;
|
|
7
|
-
to?: bigint;
|
|
8
|
-
partition?: string;
|
|
9
|
-
maxCount?: bigint;
|
|
10
|
-
};
|
|
11
|
-
export type ReadStream = <EventType extends Event>(stream: string, options?: ExtendedReadStreamOptions | ProjectionReadStreamOptions) => Promise<ReadStreamResult<EventType, PostgresReadEventMetadata>>;
|
|
12
|
-
export declare function createReadStream({ db, logger }: Dependencies): ReadStream;
|
|
13
|
-
export {};
|
|
14
|
-
//# sourceMappingURL=read-stream.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"read-stream.d.ts","sourceRoot":"","sources":["../../src/event-store/read-stream.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,KAAK,EAEL,mCAAmC,EACnC,iBAAiB,EACjB,gBAAgB,EACjB,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAGL,KAAK,YAAY,EACjB,KAAK,eAAe,EACrB,MAAM,aAAa,CAAC;AAErB,KAAK,yBAAyB,GAAG,mCAAmC,CAAC;AACrE,KAAK,yBAAyB,GAAG,iBAAiB,GAAG,eAAe,CAAC;AAGrE,KAAK,2BAA2B,GAAG;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,EAAE,CAAC,EAAE,MAAM,CAAC;IACZ,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,UAAU,GAAG,CAAC,SAAS,SAAS,KAAK,EAC/C,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,yBAAyB,GAAG,2BAA2B,KAC9D,OAAO,CAAC,gBAAgB,CAAC,SAAS,EAAE,yBAAyB,CAAC,CAAC,CAAC;AAErE,wBAAgB,gBAAgB,CAAC,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,YAAY,GAAG,UAAU,CAiCzE"}
|
|
@@ -1,88 +0,0 @@
|
|
|
1
|
-
import { DEFAULT_PARTITION, PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
|
|
2
|
-
export function createReadStream({ db, logger }) {
|
|
3
|
-
return async function readStream(streamId, options) {
|
|
4
|
-
const partition = getPartition(options);
|
|
5
|
-
logger.info({ streamId, options, partition }, "readStream");
|
|
6
|
-
const { currentStreamVersion, streamExists } = await fetchStreamInfo(db, streamId, partition);
|
|
7
|
-
const range = parseRangeOptions(options);
|
|
8
|
-
const rows = await buildEventsQuery({ db, logger }, streamId, partition, range).execute();
|
|
9
|
-
const events = rows.map((row) => mapRowToEvent(row, streamId));
|
|
10
|
-
return {
|
|
11
|
-
events,
|
|
12
|
-
currentStreamVersion,
|
|
13
|
-
streamExists,
|
|
14
|
-
};
|
|
15
|
-
};
|
|
16
|
-
}
|
|
17
|
-
function parseRangeOptions(options) {
|
|
18
|
-
const from = options && typeof options === "object" && "from" in options
|
|
19
|
-
? options.from
|
|
20
|
-
: undefined;
|
|
21
|
-
const to = options && typeof options === "object" && "to" in options
|
|
22
|
-
? options.to
|
|
23
|
-
: undefined;
|
|
24
|
-
const maxCount = options && typeof options === "object" && "maxCount" in options
|
|
25
|
-
? options.maxCount
|
|
26
|
-
: undefined;
|
|
27
|
-
return { from, to, maxCount };
|
|
28
|
-
}
|
|
29
|
-
function buildEventsQuery(deps, streamId, partition, range) {
|
|
30
|
-
const { db } = deps;
|
|
31
|
-
let q = db
|
|
32
|
-
.selectFrom("messages")
|
|
33
|
-
.select([
|
|
34
|
-
"message_type",
|
|
35
|
-
"message_data",
|
|
36
|
-
"message_metadata",
|
|
37
|
-
"stream_position",
|
|
38
|
-
"global_position",
|
|
39
|
-
"message_id",
|
|
40
|
-
])
|
|
41
|
-
.where("stream_id", "=", streamId)
|
|
42
|
-
.where("partition", "=", partition)
|
|
43
|
-
.where("is_archived", "=", false)
|
|
44
|
-
.orderBy("stream_position");
|
|
45
|
-
if (range.from !== undefined) {
|
|
46
|
-
q = q.where("stream_position", ">=", BigInt(range.from));
|
|
47
|
-
}
|
|
48
|
-
if (range.to !== undefined) {
|
|
49
|
-
q = q.where("stream_position", "<=", BigInt(range.to));
|
|
50
|
-
}
|
|
51
|
-
if (range.maxCount !== undefined) {
|
|
52
|
-
q = q.limit(Number(range.maxCount));
|
|
53
|
-
}
|
|
54
|
-
return q;
|
|
55
|
-
}
|
|
56
|
-
function mapRowToEvent(row, streamId) {
|
|
57
|
-
const streamPosition = BigInt(String(row.stream_position));
|
|
58
|
-
const globalPosition = BigInt(String(row.global_position ?? 0));
|
|
59
|
-
const baseMetadata = (row.message_metadata ?? {});
|
|
60
|
-
return {
|
|
61
|
-
kind: "Event",
|
|
62
|
-
type: row.message_type,
|
|
63
|
-
data: row.message_data,
|
|
64
|
-
metadata: {
|
|
65
|
-
...baseMetadata,
|
|
66
|
-
messageId: row.message_id,
|
|
67
|
-
streamId: streamId,
|
|
68
|
-
streamPosition: streamPosition,
|
|
69
|
-
globalPosition: globalPosition,
|
|
70
|
-
},
|
|
71
|
-
};
|
|
72
|
-
}
|
|
73
|
-
function getPartition(options) {
|
|
74
|
-
return options?.partition ?? DEFAULT_PARTITION;
|
|
75
|
-
}
|
|
76
|
-
async function fetchStreamInfo(executor, streamId, partition) {
|
|
77
|
-
const streamRow = await executor
|
|
78
|
-
.selectFrom("streams")
|
|
79
|
-
.select(["stream_position"])
|
|
80
|
-
.where("stream_id", "=", streamId)
|
|
81
|
-
.where("partition", "=", partition)
|
|
82
|
-
.where("is_archived", "=", false)
|
|
83
|
-
.executeTakeFirst();
|
|
84
|
-
const currentStreamVersion = streamRow
|
|
85
|
-
? streamRow.stream_position
|
|
86
|
-
: PostgreSQLEventStoreDefaultStreamVersion;
|
|
87
|
-
return { currentStreamVersion, streamExists: !!streamRow };
|
|
88
|
-
}
|
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __defProp = Object.defineProperty;
|
|
3
|
-
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
-
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
-
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
-
var __export = (target, all) => {
|
|
7
|
-
for (var name in all)
|
|
8
|
-
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
-
};
|
|
10
|
-
var __copyProps = (to, from, except, desc) => {
|
|
11
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
-
for (let key of __getOwnPropNames(from))
|
|
13
|
-
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
-
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
-
}
|
|
16
|
-
return to;
|
|
17
|
-
};
|
|
18
|
-
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
-
|
|
20
|
-
// src/projections/index.ts
|
|
21
|
-
var projections_exports = {};
|
|
22
|
-
__export(projections_exports, {
|
|
23
|
-
createProjectionRegistry: () => createProjectionRegistry,
|
|
24
|
-
createProjectionRunner: () => createProjectionRunner
|
|
25
|
-
});
|
|
26
|
-
module.exports = __toCommonJS(projections_exports);
|
|
27
|
-
|
|
28
|
-
// src/types.ts
|
|
29
|
-
function createProjectionRegistry(...registries) {
|
|
30
|
-
const combined = {};
|
|
31
|
-
for (const reg of registries) {
|
|
32
|
-
for (const [eventType, handlers] of Object.entries(reg)) {
|
|
33
|
-
combined[eventType] = [...combined[eventType] ?? [], ...handlers];
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
return combined;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
// src/projections/runner.ts
|
|
40
|
-
function createProjectionRunner({ db, readStream, registry }) {
|
|
41
|
-
async function getOrCreateCheckpoint(subscriptionId, partition) {
|
|
42
|
-
const existing = await db.selectFrom("subscriptions").select([
|
|
43
|
-
"subscription_id as subscriptionId",
|
|
44
|
-
"partition",
|
|
45
|
-
"last_processed_position as lastProcessedPosition"
|
|
46
|
-
]).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).executeTakeFirst();
|
|
47
|
-
if (existing) {
|
|
48
|
-
const last = BigInt(
|
|
49
|
-
String(
|
|
50
|
-
existing.lastProcessedPosition
|
|
51
|
-
)
|
|
52
|
-
);
|
|
53
|
-
return {
|
|
54
|
-
subscriptionId,
|
|
55
|
-
partition,
|
|
56
|
-
lastProcessedPosition: last
|
|
57
|
-
};
|
|
58
|
-
}
|
|
59
|
-
await db.insertInto("subscriptions").values({
|
|
60
|
-
subscription_id: subscriptionId,
|
|
61
|
-
partition,
|
|
62
|
-
version: 1,
|
|
63
|
-
last_processed_position: 0n
|
|
64
|
-
}).onConflict(
|
|
65
|
-
(oc) => oc.columns(["subscription_id", "partition", "version"]).doUpdateSet({
|
|
66
|
-
last_processed_position: (eb) => eb.ref("excluded.last_processed_position")
|
|
67
|
-
})
|
|
68
|
-
).execute();
|
|
69
|
-
return {
|
|
70
|
-
subscriptionId,
|
|
71
|
-
partition,
|
|
72
|
-
lastProcessedPosition: 0n
|
|
73
|
-
};
|
|
74
|
-
}
|
|
75
|
-
async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
|
|
76
|
-
await db.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
|
|
77
|
-
}
|
|
78
|
-
async function projectEvents(subscriptionId, streamId, opts) {
|
|
79
|
-
const partition = opts?.partition ?? "default_partition";
|
|
80
|
-
const batchSize = BigInt(opts?.batchSize ?? 500);
|
|
81
|
-
const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
|
|
82
|
-
const { events, currentStreamVersion } = await readStream(streamId, {
|
|
83
|
-
from: checkpoint.lastProcessedPosition + 1n,
|
|
84
|
-
to: checkpoint.lastProcessedPosition + batchSize,
|
|
85
|
-
partition
|
|
86
|
-
});
|
|
87
|
-
for (const ev of events) {
|
|
88
|
-
if (!ev) continue;
|
|
89
|
-
const handlers = registry[ev.type] ?? [];
|
|
90
|
-
if (handlers.length === 0) {
|
|
91
|
-
await updateCheckpoint(
|
|
92
|
-
subscriptionId,
|
|
93
|
-
partition,
|
|
94
|
-
ev.metadata.streamPosition
|
|
95
|
-
);
|
|
96
|
-
continue;
|
|
97
|
-
}
|
|
98
|
-
const projectionEvent = {
|
|
99
|
-
type: ev.type,
|
|
100
|
-
data: ev.data,
|
|
101
|
-
metadata: {
|
|
102
|
-
streamId: ev.metadata.streamId,
|
|
103
|
-
streamPosition: ev.metadata.streamPosition,
|
|
104
|
-
globalPosition: ev.metadata.globalPosition
|
|
105
|
-
}
|
|
106
|
-
};
|
|
107
|
-
for (const handler of handlers) {
|
|
108
|
-
await handler({ db, partition }, projectionEvent);
|
|
109
|
-
}
|
|
110
|
-
await updateCheckpoint(
|
|
111
|
-
subscriptionId,
|
|
112
|
-
partition,
|
|
113
|
-
projectionEvent.metadata.streamPosition
|
|
114
|
-
);
|
|
115
|
-
}
|
|
116
|
-
return { processed: events.length, currentStreamVersion };
|
|
117
|
-
}
|
|
118
|
-
return { projectEvents };
|
|
119
|
-
}
|
|
120
|
-
// Annotate the CommonJS export names for ESM import in node:
|
|
121
|
-
0 && (module.exports = {
|
|
122
|
-
createProjectionRegistry,
|
|
123
|
-
createProjectionRunner
|
|
124
|
-
});
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
export { createProjectionRegistry } from "../types.js";
|
|
2
|
-
export type { ProjectionContext, ProjectionEvent, ProjectionEventMetadata, ProjectionHandler, ProjectionRegistry, } from "../types.js";
|
|
3
|
-
export { createProjectionRunner } from "./runner.js";
|
|
4
|
-
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/projections/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,wBAAwB,EAAE,MAAM,aAAa,CAAC;AACvD,YAAY,EACV,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,aAAa,CAAC;AACrB,OAAO,EAAE,sBAAsB,EAAE,MAAM,aAAa,CAAC"}
|