@wataruoguchi/emmett-event-store-kysely 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db-schema.d.ts +34 -0
- package/dist/db-schema.d.ts.map +1 -0
- package/dist/db-schema.js +3 -0
- package/dist/event-store/aggregate-stream.d.ts +10 -0
- package/dist/event-store/aggregate-stream.d.ts.map +1 -0
- package/dist/event-store/aggregate-stream.js +18 -0
- package/dist/event-store/append-to-stream.d.ts +7 -0
- package/dist/event-store/append-to-stream.d.ts.map +1 -0
- package/dist/event-store/append-to-stream.js +143 -0
- package/dist/event-store/index.cjs +291 -0
- package/dist/event-store/index.d.ts +13 -0
- package/dist/event-store/index.d.ts.map +1 -0
- package/dist/event-store/index.js +10 -0
- package/dist/event-store/read-stream.d.ts +14 -0
- package/dist/event-store/read-stream.d.ts.map +1 -0
- package/dist/event-store/read-stream.js +88 -0
- package/dist/projections/index.cjs +124 -0
- package/dist/projections/index.d.ts +4 -0
- package/dist/projections/index.d.ts.map +1 -0
- package/dist/projections/index.js +2 -0
- package/dist/projections/runner.d.ts +21 -0
- package/dist/projections/runner.d.ts.map +1 -0
- package/dist/projections/runner.js +82 -0
- package/dist/types.d.ts +38 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +15 -0
- package/package.json +78 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
export interface MessagesTable {
|
|
2
|
+
created: Date;
|
|
3
|
+
global_position: bigint | null;
|
|
4
|
+
is_archived: boolean;
|
|
5
|
+
message_data: unknown;
|
|
6
|
+
message_id: string;
|
|
7
|
+
message_kind: string;
|
|
8
|
+
message_metadata: unknown;
|
|
9
|
+
message_schema_version: string;
|
|
10
|
+
message_type: string;
|
|
11
|
+
partition: string;
|
|
12
|
+
stream_id: string;
|
|
13
|
+
stream_position: bigint;
|
|
14
|
+
}
|
|
15
|
+
export interface StreamsTable {
|
|
16
|
+
is_archived: boolean;
|
|
17
|
+
partition: string;
|
|
18
|
+
stream_id: string;
|
|
19
|
+
stream_metadata: unknown;
|
|
20
|
+
stream_position: bigint;
|
|
21
|
+
stream_type: string;
|
|
22
|
+
}
|
|
23
|
+
export interface SubscriptionsTable {
|
|
24
|
+
last_processed_position: bigint;
|
|
25
|
+
partition: string;
|
|
26
|
+
subscription_id: string;
|
|
27
|
+
version: number;
|
|
28
|
+
}
|
|
29
|
+
export interface EventStoreDBSchema {
|
|
30
|
+
messages: MessagesTable;
|
|
31
|
+
streams: StreamsTable;
|
|
32
|
+
subscriptions: SubscriptionsTable;
|
|
33
|
+
}
|
|
34
|
+
//# sourceMappingURL=db-schema.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"db-schema.d.ts","sourceRoot":"","sources":["../src/db-schema.ts"],"names":[],"mappings":"AAGA,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,IAAI,CAAC;IACd,eAAe,EAAE,MAAM,GAAG,IAAI,CAAC;IAC/B,WAAW,EAAE,OAAO,CAAC;IACrB,YAAY,EAAE,OAAO,CAAC;IACtB,UAAU,EAAE,MAAM,CAAC;IACnB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,OAAO,CAAC;IAC1B,sBAAsB,EAAE,MAAM,CAAC;IAC/B,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,WAAW,YAAY;IAC3B,WAAW,EAAE,OAAO,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,OAAO,CAAC;IACzB,eAAe,EAAE,MAAM,CAAC;IACxB,WAAW,EAAE,MAAM,CAAC;CACrB;AAED,MAAM,WAAW,kBAAkB;IACjC,uBAAuB,EAAE,MAAM,CAAC;IAChC,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,MAAM,CAAC;IACxB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,kBAAkB;IACjC,QAAQ,EAAE,aAAa,CAAC;IACxB,OAAO,EAAE,YAAY,CAAC;IACtB,aAAa,EAAE,kBAAkB,CAAC;CACnC"}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { type AggregateStreamOptions, type AggregateStreamResult, type Event, type ReadEventMetadataWithGlobalPosition } from "@event-driven-io/emmett";
|
|
2
|
+
import { type Dependencies } from "../types.js";
|
|
3
|
+
import type { ReadStream } from "./read-stream.js";
|
|
4
|
+
type PostgresReadEventMetadata = ReadEventMetadataWithGlobalPosition;
|
|
5
|
+
export type AggregateStream = <State, EventType extends Event>(streamId: string, options: AggregateStreamOptions<State, EventType, PostgresReadEventMetadata>) => Promise<AggregateStreamResult<State>>;
|
|
6
|
+
export declare function createAggregateStream({ readStream }: {
|
|
7
|
+
readStream: ReadStream;
|
|
8
|
+
}, { logger }: Dependencies): AggregateStream;
|
|
9
|
+
export {};
|
|
10
|
+
//# sourceMappingURL=aggregate-stream.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"aggregate-stream.d.ts","sourceRoot":"","sources":["../../src/event-store/aggregate-stream.ts"],"names":[],"mappings":"AACA,OAAO,EAEL,KAAK,sBAAsB,EAC3B,KAAK,qBAAqB,EAC1B,KAAK,KAAK,EACV,KAAK,mCAAmC,EACzC,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAEL,KAAK,YAAY,EAClB,MAAM,aAAa,CAAC;AACrB,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,kBAAkB,CAAC;AAEnD,KAAK,yBAAyB,GAAG,mCAAmC,CAAC;AAErE,MAAM,MAAM,eAAe,GAAG,CAAC,KAAK,EAAE,SAAS,SAAS,KAAK,EAC3D,QAAQ,EAAE,MAAM,EAChB,OAAO,EAAE,sBAAsB,CAAC,KAAK,EAAE,SAAS,EAAE,yBAAyB,CAAC,KACzE,OAAO,CAAC,qBAAqB,CAAC,KAAK,CAAC,CAAC,CAAC;AAE3C,wBAAgB,qBAAqB,CACnC,EAAE,UAAU,EAAE,EAAE;IAAE,UAAU,EAAE,UAAU,CAAA;CAAE,EAC1C,EAAE,MAAM,EAAE,EAAE,YAAY,GACvB,eAAe,CAgCjB"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
// biome-ignore assist/source/organizeImports: retain import order similar to app code
|
|
2
|
+
import { assertExpectedVersionMatchesCurrent, } from "@event-driven-io/emmett";
|
|
3
|
+
import { PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
|
|
4
|
+
export function createAggregateStream({ readStream }, { logger }) {
|
|
5
|
+
return async function aggregateStream(streamId, options) {
|
|
6
|
+
const { evolve, initialState, read } = options;
|
|
7
|
+
logger.info({ streamId, options }, "aggregateStream");
|
|
8
|
+
const expectedStreamVersion = read?.expectedStreamVersion;
|
|
9
|
+
const result = await readStream(streamId, options.read);
|
|
10
|
+
assertExpectedVersionMatchesCurrent(result.currentStreamVersion, expectedStreamVersion, PostgreSQLEventStoreDefaultStreamVersion);
|
|
11
|
+
const state = result.events.reduce((state, event) => (event ? evolve(state, event) : state), initialState());
|
|
12
|
+
return {
|
|
13
|
+
state,
|
|
14
|
+
currentStreamVersion: result.currentStreamVersion,
|
|
15
|
+
streamExists: result.streamExists,
|
|
16
|
+
};
|
|
17
|
+
};
|
|
18
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { type AppendToStreamOptions, type AppendToStreamResultWithGlobalPosition, type Event } from "@event-driven-io/emmett";
|
|
2
|
+
import { type Dependencies, type ExtendedOptions } from "../types.js";
|
|
3
|
+
type ExtendedAppendToStreamOptions = AppendToStreamOptions & ExtendedOptions;
|
|
4
|
+
export type AppendToStream = <EventType extends Event>(streamId: string, events: EventType[], options?: ExtendedAppendToStreamOptions) => Promise<AppendToStreamResultWithGlobalPosition>;
|
|
5
|
+
export declare function createAppendToStream({ db, logger, }: Dependencies): AppendToStream;
|
|
6
|
+
export {};
|
|
7
|
+
//# sourceMappingURL=append-to-stream.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"append-to-stream.d.ts","sourceRoot":"","sources":["../../src/event-store/append-to-stream.ts"],"names":[],"mappings":"AACA,OAAO,EAKL,KAAK,qBAAqB,EAC1B,KAAK,sCAAsC,EAC3C,KAAK,KAAK,EACX,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAGL,KAAK,YAAY,EACjB,KAAK,eAAe,EACrB,MAAM,aAAa,CAAC;AAIrB,KAAK,6BAA6B,GAAG,qBAAqB,GAAG,eAAe,CAAC;AAC7E,MAAM,MAAM,cAAc,GAAG,CAAC,SAAS,SAAS,KAAK,EACnD,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,SAAS,EAAE,EACnB,OAAO,CAAC,EAAE,6BAA6B,KACpC,OAAO,CAAC,sCAAsC,CAAC,CAAC;AAErD,wBAAgB,oBAAoB,CAAC,EACnC,EAAE,EACF,MAAM,GACP,EAAE,YAAY,GAAG,cAAc,CA6D/B"}
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
// biome-ignore assist/source/organizeImports: retain import order similar to app code
|
|
2
|
+
import { ExpectedVersionConflictError, NO_CONCURRENCY_CHECK, STREAM_DOES_NOT_EXIST, STREAM_EXISTS, } from "@event-driven-io/emmett";
|
|
3
|
+
import { DEFAULT_PARTITION, PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
|
|
4
|
+
const PostgreSQLEventStoreDefaultGlobalPosition = 0n;
|
|
5
|
+
export function createAppendToStream({ db, logger, }) {
|
|
6
|
+
return async function appendToStream(streamId, events, options) {
|
|
7
|
+
const streamType = getStreamType(options);
|
|
8
|
+
const partition = getPartition(options);
|
|
9
|
+
const expected = options?.expectedStreamVersion;
|
|
10
|
+
logger.info({ streamId, events, options, partition }, "appendToStream");
|
|
11
|
+
ensureEventsNotEmpty(events, expected);
|
|
12
|
+
const result = await db
|
|
13
|
+
.transaction()
|
|
14
|
+
.execute(async (trx) => {
|
|
15
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo(trx, streamId, partition);
|
|
16
|
+
assertExpectedVersion(expected, currentStreamVersion, streamExists);
|
|
17
|
+
const basePos = currentStreamVersion;
|
|
18
|
+
const nextStreamPosition = computeNextStreamPosition(basePos, events.length);
|
|
19
|
+
await upsertStreamRow(trx, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExists);
|
|
20
|
+
const messagesToInsert = buildMessagesToInsert(events, basePos, streamId, partition);
|
|
21
|
+
const lastEventGlobalPosition = await insertMessagesAndGetLastGlobalPosition(trx, messagesToInsert);
|
|
22
|
+
return {
|
|
23
|
+
nextExpectedStreamVersion: nextStreamPosition,
|
|
24
|
+
lastEventGlobalPosition,
|
|
25
|
+
createdNewStream: !streamExists,
|
|
26
|
+
};
|
|
27
|
+
});
|
|
28
|
+
return result;
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
function getStreamType(options) {
|
|
32
|
+
return options?.streamType ?? "unknown";
|
|
33
|
+
}
|
|
34
|
+
function ensureEventsNotEmpty(events, expected) {
|
|
35
|
+
if (events.length === 0) {
|
|
36
|
+
throw new ExpectedVersionConflictError(-1n, expected ?? NO_CONCURRENCY_CHECK);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
function assertExpectedVersion(expected, currentPos, streamExistsNow) {
|
|
40
|
+
if (expected === STREAM_EXISTS && !streamExistsNow) {
|
|
41
|
+
throw new ExpectedVersionConflictError(-1n, STREAM_EXISTS);
|
|
42
|
+
}
|
|
43
|
+
if (expected === STREAM_DOES_NOT_EXIST && streamExistsNow) {
|
|
44
|
+
throw new ExpectedVersionConflictError(-1n, STREAM_DOES_NOT_EXIST);
|
|
45
|
+
}
|
|
46
|
+
if (typeof expected === "bigint" && expected !== currentPos) {
|
|
47
|
+
throw new ExpectedVersionConflictError(currentPos, expected);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
function computeNextStreamPosition(basePos, eventCount) {
|
|
51
|
+
return basePos + BigInt(eventCount);
|
|
52
|
+
}
|
|
53
|
+
async function upsertStreamRow(executor, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExistsNow) {
|
|
54
|
+
if (!streamExistsNow) {
|
|
55
|
+
await executor
|
|
56
|
+
.insertInto("streams")
|
|
57
|
+
.values({
|
|
58
|
+
stream_id: streamId,
|
|
59
|
+
stream_position: nextStreamPosition,
|
|
60
|
+
partition,
|
|
61
|
+
stream_type: streamType,
|
|
62
|
+
stream_metadata: {},
|
|
63
|
+
is_archived: false,
|
|
64
|
+
})
|
|
65
|
+
.execute();
|
|
66
|
+
return;
|
|
67
|
+
}
|
|
68
|
+
if (typeof expected === "bigint") {
|
|
69
|
+
const updatedRow = await executor
|
|
70
|
+
.updateTable("streams")
|
|
71
|
+
.set({ stream_position: nextStreamPosition })
|
|
72
|
+
.where("stream_id", "=", streamId)
|
|
73
|
+
.where("partition", "=", partition)
|
|
74
|
+
.where("is_archived", "=", false)
|
|
75
|
+
.where("stream_position", "=", basePos)
|
|
76
|
+
.returning("stream_position")
|
|
77
|
+
.executeTakeFirst();
|
|
78
|
+
if (!updatedRow) {
|
|
79
|
+
throw new ExpectedVersionConflictError(basePos, expected);
|
|
80
|
+
}
|
|
81
|
+
return;
|
|
82
|
+
}
|
|
83
|
+
await executor
|
|
84
|
+
.updateTable("streams")
|
|
85
|
+
.set({ stream_position: nextStreamPosition })
|
|
86
|
+
.where("stream_id", "=", streamId)
|
|
87
|
+
.where("partition", "=", partition)
|
|
88
|
+
.where("is_archived", "=", false)
|
|
89
|
+
.execute();
|
|
90
|
+
}
|
|
91
|
+
function buildMessagesToInsert(events, basePos, streamId, partition) {
|
|
92
|
+
return events.map((e, index) => {
|
|
93
|
+
const messageId = crypto.randomUUID();
|
|
94
|
+
const streamPosition = basePos + BigInt(index + 1);
|
|
95
|
+
const rawMeta = "metadata" in e ? e.metadata : undefined;
|
|
96
|
+
const eventMeta = rawMeta && typeof rawMeta === "object" ? rawMeta : {};
|
|
97
|
+
const messageMetadata = {
|
|
98
|
+
messageId,
|
|
99
|
+
...eventMeta,
|
|
100
|
+
};
|
|
101
|
+
return {
|
|
102
|
+
stream_id: streamId,
|
|
103
|
+
stream_position: streamPosition,
|
|
104
|
+
partition,
|
|
105
|
+
message_data: e.data,
|
|
106
|
+
message_metadata: messageMetadata,
|
|
107
|
+
message_schema_version: index.toString(),
|
|
108
|
+
message_type: e.type,
|
|
109
|
+
message_kind: "E",
|
|
110
|
+
message_id: messageId,
|
|
111
|
+
is_archived: false,
|
|
112
|
+
created: new Date(),
|
|
113
|
+
};
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
async function insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert) {
|
|
117
|
+
const inserted = await executor
|
|
118
|
+
.insertInto("messages")
|
|
119
|
+
.values(messagesToInsert)
|
|
120
|
+
.returning("global_position")
|
|
121
|
+
.execute();
|
|
122
|
+
if (!inserted || (Array.isArray(inserted) && inserted.length === 0)) {
|
|
123
|
+
return PostgreSQLEventStoreDefaultGlobalPosition;
|
|
124
|
+
}
|
|
125
|
+
const globalPositions = inserted.map((r) => BigInt(String(r.global_position)));
|
|
126
|
+
return globalPositions[globalPositions.length - 1];
|
|
127
|
+
}
|
|
128
|
+
function getPartition(options) {
|
|
129
|
+
return options?.partition ?? DEFAULT_PARTITION;
|
|
130
|
+
}
|
|
131
|
+
async function fetchStreamInfo(executor, streamId, partition) {
|
|
132
|
+
const streamRow = await executor
|
|
133
|
+
.selectFrom("streams")
|
|
134
|
+
.select(["stream_position"])
|
|
135
|
+
.where("stream_id", "=", streamId)
|
|
136
|
+
.where("partition", "=", partition)
|
|
137
|
+
.where("is_archived", "=", false)
|
|
138
|
+
.executeTakeFirst();
|
|
139
|
+
const currentStreamVersion = streamRow
|
|
140
|
+
? BigInt(String(streamRow.stream_position))
|
|
141
|
+
: PostgreSQLEventStoreDefaultStreamVersion;
|
|
142
|
+
return { currentStreamVersion, streamExists: !!streamRow };
|
|
143
|
+
}
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/event-store/index.ts
|
|
21
|
+
var event_store_exports = {};
|
|
22
|
+
__export(event_store_exports, {
|
|
23
|
+
createEventStore: () => createEventStore,
|
|
24
|
+
createReadStream: () => createReadStream
|
|
25
|
+
});
|
|
26
|
+
module.exports = __toCommonJS(event_store_exports);
|
|
27
|
+
|
|
28
|
+
// src/event-store/aggregate-stream.ts
|
|
29
|
+
var import_emmett = require("@event-driven-io/emmett");
|
|
30
|
+
|
|
31
|
+
// src/types.ts
|
|
32
|
+
var PostgreSQLEventStoreDefaultStreamVersion = 0n;
|
|
33
|
+
var DEFAULT_PARTITION = "default_partition";
|
|
34
|
+
|
|
35
|
+
// src/event-store/aggregate-stream.ts
|
|
36
|
+
function createAggregateStream({ readStream }, { logger }) {
|
|
37
|
+
return async function aggregateStream(streamId, options) {
|
|
38
|
+
const { evolve, initialState, read } = options;
|
|
39
|
+
logger.info({ streamId, options }, "aggregateStream");
|
|
40
|
+
const expectedStreamVersion = read?.expectedStreamVersion;
|
|
41
|
+
const result = await readStream(streamId, options.read);
|
|
42
|
+
(0, import_emmett.assertExpectedVersionMatchesCurrent)(
|
|
43
|
+
result.currentStreamVersion,
|
|
44
|
+
expectedStreamVersion,
|
|
45
|
+
PostgreSQLEventStoreDefaultStreamVersion
|
|
46
|
+
);
|
|
47
|
+
const state = result.events.reduce(
|
|
48
|
+
(state2, event) => event ? evolve(state2, event) : state2,
|
|
49
|
+
initialState()
|
|
50
|
+
);
|
|
51
|
+
return {
|
|
52
|
+
state,
|
|
53
|
+
currentStreamVersion: result.currentStreamVersion,
|
|
54
|
+
streamExists: result.streamExists
|
|
55
|
+
};
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// src/event-store/append-to-stream.ts
|
|
60
|
+
var import_emmett2 = require("@event-driven-io/emmett");
|
|
61
|
+
var PostgreSQLEventStoreDefaultGlobalPosition = 0n;
|
|
62
|
+
function createAppendToStream({
|
|
63
|
+
db,
|
|
64
|
+
logger
|
|
65
|
+
}) {
|
|
66
|
+
return async function appendToStream(streamId, events, options) {
|
|
67
|
+
const streamType = getStreamType(options);
|
|
68
|
+
const partition = getPartition(options);
|
|
69
|
+
const expected = options?.expectedStreamVersion;
|
|
70
|
+
logger.info({ streamId, events, options, partition }, "appendToStream");
|
|
71
|
+
ensureEventsNotEmpty(events, expected);
|
|
72
|
+
const result = await db.transaction().execute(async (trx) => {
|
|
73
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo(
|
|
74
|
+
trx,
|
|
75
|
+
streamId,
|
|
76
|
+
partition
|
|
77
|
+
);
|
|
78
|
+
assertExpectedVersion(expected, currentStreamVersion, streamExists);
|
|
79
|
+
const basePos = currentStreamVersion;
|
|
80
|
+
const nextStreamPosition = computeNextStreamPosition(
|
|
81
|
+
basePos,
|
|
82
|
+
events.length
|
|
83
|
+
);
|
|
84
|
+
await upsertStreamRow(
|
|
85
|
+
trx,
|
|
86
|
+
streamId,
|
|
87
|
+
partition,
|
|
88
|
+
streamType,
|
|
89
|
+
basePos,
|
|
90
|
+
nextStreamPosition,
|
|
91
|
+
expected,
|
|
92
|
+
streamExists
|
|
93
|
+
);
|
|
94
|
+
const messagesToInsert = buildMessagesToInsert(
|
|
95
|
+
events,
|
|
96
|
+
basePos,
|
|
97
|
+
streamId,
|
|
98
|
+
partition
|
|
99
|
+
);
|
|
100
|
+
const lastEventGlobalPosition = await insertMessagesAndGetLastGlobalPosition(trx, messagesToInsert);
|
|
101
|
+
return {
|
|
102
|
+
nextExpectedStreamVersion: nextStreamPosition,
|
|
103
|
+
lastEventGlobalPosition,
|
|
104
|
+
createdNewStream: !streamExists
|
|
105
|
+
};
|
|
106
|
+
});
|
|
107
|
+
return result;
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
function getStreamType(options) {
|
|
111
|
+
return options?.streamType ?? "unknown";
|
|
112
|
+
}
|
|
113
|
+
function ensureEventsNotEmpty(events, expected) {
|
|
114
|
+
if (events.length === 0) {
|
|
115
|
+
throw new import_emmett2.ExpectedVersionConflictError(
|
|
116
|
+
-1n,
|
|
117
|
+
expected ?? import_emmett2.NO_CONCURRENCY_CHECK
|
|
118
|
+
);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
function assertExpectedVersion(expected, currentPos, streamExistsNow) {
|
|
122
|
+
if (expected === import_emmett2.STREAM_EXISTS && !streamExistsNow) {
|
|
123
|
+
throw new import_emmett2.ExpectedVersionConflictError(-1n, import_emmett2.STREAM_EXISTS);
|
|
124
|
+
}
|
|
125
|
+
if (expected === import_emmett2.STREAM_DOES_NOT_EXIST && streamExistsNow) {
|
|
126
|
+
throw new import_emmett2.ExpectedVersionConflictError(-1n, import_emmett2.STREAM_DOES_NOT_EXIST);
|
|
127
|
+
}
|
|
128
|
+
if (typeof expected === "bigint" && expected !== currentPos) {
|
|
129
|
+
throw new import_emmett2.ExpectedVersionConflictError(currentPos, expected);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
function computeNextStreamPosition(basePos, eventCount) {
|
|
133
|
+
return basePos + BigInt(eventCount);
|
|
134
|
+
}
|
|
135
|
+
async function upsertStreamRow(executor, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExistsNow) {
|
|
136
|
+
if (!streamExistsNow) {
|
|
137
|
+
await executor.insertInto("streams").values({
|
|
138
|
+
stream_id: streamId,
|
|
139
|
+
stream_position: nextStreamPosition,
|
|
140
|
+
partition,
|
|
141
|
+
stream_type: streamType,
|
|
142
|
+
stream_metadata: {},
|
|
143
|
+
is_archived: false
|
|
144
|
+
}).execute();
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
if (typeof expected === "bigint") {
|
|
148
|
+
const updatedRow = await executor.updateTable("streams").set({ stream_position: nextStreamPosition }).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).where("stream_position", "=", basePos).returning("stream_position").executeTakeFirst();
|
|
149
|
+
if (!updatedRow) {
|
|
150
|
+
throw new import_emmett2.ExpectedVersionConflictError(basePos, expected);
|
|
151
|
+
}
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
await executor.updateTable("streams").set({ stream_position: nextStreamPosition }).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).execute();
|
|
155
|
+
}
|
|
156
|
+
function buildMessagesToInsert(events, basePos, streamId, partition) {
|
|
157
|
+
return events.map((e, index) => {
|
|
158
|
+
const messageId = crypto.randomUUID();
|
|
159
|
+
const streamPosition = basePos + BigInt(index + 1);
|
|
160
|
+
const rawMeta = "metadata" in e ? e.metadata : void 0;
|
|
161
|
+
const eventMeta = rawMeta && typeof rawMeta === "object" ? rawMeta : {};
|
|
162
|
+
const messageMetadata = {
|
|
163
|
+
messageId,
|
|
164
|
+
...eventMeta
|
|
165
|
+
};
|
|
166
|
+
return {
|
|
167
|
+
stream_id: streamId,
|
|
168
|
+
stream_position: streamPosition,
|
|
169
|
+
partition,
|
|
170
|
+
message_data: e.data,
|
|
171
|
+
message_metadata: messageMetadata,
|
|
172
|
+
message_schema_version: index.toString(),
|
|
173
|
+
message_type: e.type,
|
|
174
|
+
message_kind: "E",
|
|
175
|
+
message_id: messageId,
|
|
176
|
+
is_archived: false,
|
|
177
|
+
created: /* @__PURE__ */ new Date()
|
|
178
|
+
};
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
async function insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert) {
|
|
182
|
+
const inserted = await executor.insertInto("messages").values(messagesToInsert).returning("global_position").execute();
|
|
183
|
+
if (!inserted || Array.isArray(inserted) && inserted.length === 0) {
|
|
184
|
+
return PostgreSQLEventStoreDefaultGlobalPosition;
|
|
185
|
+
}
|
|
186
|
+
const globalPositions = inserted.map(
|
|
187
|
+
(r) => BigInt(String(r.global_position))
|
|
188
|
+
);
|
|
189
|
+
return globalPositions[globalPositions.length - 1];
|
|
190
|
+
}
|
|
191
|
+
function getPartition(options) {
|
|
192
|
+
return options?.partition ?? DEFAULT_PARTITION;
|
|
193
|
+
}
|
|
194
|
+
async function fetchStreamInfo(executor, streamId, partition) {
|
|
195
|
+
const streamRow = await executor.selectFrom("streams").select(["stream_position"]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).executeTakeFirst();
|
|
196
|
+
const currentStreamVersion = streamRow ? BigInt(
|
|
197
|
+
String(streamRow.stream_position)
|
|
198
|
+
) : PostgreSQLEventStoreDefaultStreamVersion;
|
|
199
|
+
return { currentStreamVersion, streamExists: !!streamRow };
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// src/event-store/read-stream.ts
|
|
203
|
+
function createReadStream({ db, logger }) {
|
|
204
|
+
return async function readStream(streamId, options) {
|
|
205
|
+
const partition = getPartition2(options);
|
|
206
|
+
logger.info({ streamId, options, partition }, "readStream");
|
|
207
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo2(
|
|
208
|
+
db,
|
|
209
|
+
streamId,
|
|
210
|
+
partition
|
|
211
|
+
);
|
|
212
|
+
const range = parseRangeOptions(options);
|
|
213
|
+
const rows = await buildEventsQuery(
|
|
214
|
+
{ db, logger },
|
|
215
|
+
streamId,
|
|
216
|
+
partition,
|
|
217
|
+
range
|
|
218
|
+
).execute();
|
|
219
|
+
const events = rows.map((row) => mapRowToEvent(row, streamId));
|
|
220
|
+
return {
|
|
221
|
+
events,
|
|
222
|
+
currentStreamVersion,
|
|
223
|
+
streamExists
|
|
224
|
+
};
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
function parseRangeOptions(options) {
|
|
228
|
+
const from = options && typeof options === "object" && "from" in options ? options.from : void 0;
|
|
229
|
+
const to = options && typeof options === "object" && "to" in options ? options.to : void 0;
|
|
230
|
+
const maxCount = options && typeof options === "object" && "maxCount" in options ? options.maxCount : void 0;
|
|
231
|
+
return { from, to, maxCount };
|
|
232
|
+
}
|
|
233
|
+
function buildEventsQuery(deps, streamId, partition, range) {
|
|
234
|
+
const { db } = deps;
|
|
235
|
+
let q = db.selectFrom("messages").select([
|
|
236
|
+
"message_type",
|
|
237
|
+
"message_data",
|
|
238
|
+
"message_metadata",
|
|
239
|
+
"stream_position",
|
|
240
|
+
"global_position",
|
|
241
|
+
"message_id"
|
|
242
|
+
]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).orderBy("stream_position");
|
|
243
|
+
if (range.from !== void 0) {
|
|
244
|
+
q = q.where("stream_position", ">=", BigInt(range.from));
|
|
245
|
+
}
|
|
246
|
+
if (range.to !== void 0) {
|
|
247
|
+
q = q.where("stream_position", "<=", BigInt(range.to));
|
|
248
|
+
}
|
|
249
|
+
if (range.maxCount !== void 0) {
|
|
250
|
+
q = q.limit(Number(range.maxCount));
|
|
251
|
+
}
|
|
252
|
+
return q;
|
|
253
|
+
}
|
|
254
|
+
function mapRowToEvent(row, streamId) {
|
|
255
|
+
const streamPosition = BigInt(String(row.stream_position));
|
|
256
|
+
const globalPosition = BigInt(String(row.global_position ?? 0));
|
|
257
|
+
const baseMetadata = row.message_metadata ?? {};
|
|
258
|
+
return {
|
|
259
|
+
kind: "Event",
|
|
260
|
+
type: row.message_type,
|
|
261
|
+
data: row.message_data,
|
|
262
|
+
metadata: {
|
|
263
|
+
...baseMetadata,
|
|
264
|
+
messageId: row.message_id,
|
|
265
|
+
streamId,
|
|
266
|
+
streamPosition,
|
|
267
|
+
globalPosition
|
|
268
|
+
}
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
function getPartition2(options) {
|
|
272
|
+
return options?.partition ?? DEFAULT_PARTITION;
|
|
273
|
+
}
|
|
274
|
+
async function fetchStreamInfo2(executor, streamId, partition) {
|
|
275
|
+
const streamRow = await executor.selectFrom("streams").select(["stream_position"]).where("stream_id", "=", streamId).where("partition", "=", partition).where("is_archived", "=", false).executeTakeFirst();
|
|
276
|
+
const currentStreamVersion = streamRow ? streamRow.stream_position : PostgreSQLEventStoreDefaultStreamVersion;
|
|
277
|
+
return { currentStreamVersion, streamExists: !!streamRow };
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// src/event-store/index.ts
|
|
281
|
+
function createEventStore(deps) {
|
|
282
|
+
const readStream = createReadStream(deps);
|
|
283
|
+
const appendToStream = createAppendToStream(deps);
|
|
284
|
+
const aggregateStream = createAggregateStream({ readStream }, deps);
|
|
285
|
+
return { readStream, appendToStream, aggregateStream };
|
|
286
|
+
}
|
|
287
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
288
|
+
0 && (module.exports = {
|
|
289
|
+
createEventStore,
|
|
290
|
+
createReadStream
|
|
291
|
+
});
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { Dependencies } from "../types.js";
|
|
2
|
+
export type { DatabaseExecutor, Dependencies, ExtendedOptions, } from "../types.js";
|
|
3
|
+
export type { AggregateStream } from "./aggregate-stream.js";
|
|
4
|
+
export type { AppendToStream } from "./append-to-stream.js";
|
|
5
|
+
export type { ReadStream } from "./read-stream.js";
|
|
6
|
+
export type EventStore = ReturnType<typeof createEventStore>;
|
|
7
|
+
export { createReadStream } from "./read-stream.js";
|
|
8
|
+
export declare function createEventStore(deps: Dependencies): {
|
|
9
|
+
readStream: import("./read-stream.js").ReadStream;
|
|
10
|
+
appendToStream: import("./append-to-stream.js").AppendToStream;
|
|
11
|
+
aggregateStream: import("./aggregate-stream.js").AggregateStream;
|
|
12
|
+
};
|
|
13
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/event-store/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAKhD,YAAY,EACV,gBAAgB,EAChB,YAAY,EACZ,eAAe,GAChB,MAAM,aAAa,CAAC;AACrB,YAAY,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AAC7D,YAAY,EAAE,cAAc,EAAE,MAAM,uBAAuB,CAAC;AAC5D,YAAY,EAAE,UAAU,EAAE,MAAM,kBAAkB,CAAC;AACnD,MAAM,MAAM,UAAU,GAAG,UAAU,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAE7D,OAAO,EAAE,gBAAgB,EAAE,MAAM,kBAAkB,CAAC;AACpD,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,YAAY;;;;EAKlD"}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { createAggregateStream } from "./aggregate-stream.js";
|
|
2
|
+
import { createAppendToStream } from "./append-to-stream.js";
|
|
3
|
+
import { createReadStream } from "./read-stream.js";
|
|
4
|
+
export { createReadStream } from "./read-stream.js";
|
|
5
|
+
export function createEventStore(deps) {
|
|
6
|
+
const readStream = createReadStream(deps);
|
|
7
|
+
const appendToStream = createAppendToStream(deps);
|
|
8
|
+
const aggregateStream = createAggregateStream({ readStream }, deps);
|
|
9
|
+
return { readStream, appendToStream, aggregateStream };
|
|
10
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { Event, ReadEventMetadataWithGlobalPosition, ReadStreamOptions, ReadStreamResult } from "@event-driven-io/emmett";
|
|
2
|
+
import { type Dependencies, type ExtendedOptions } from "../types.js";
|
|
3
|
+
type PostgresReadEventMetadata = ReadEventMetadataWithGlobalPosition;
|
|
4
|
+
type ExtendedReadStreamOptions = ReadStreamOptions & ExtendedOptions;
|
|
5
|
+
type ProjectionReadStreamOptions = {
|
|
6
|
+
from?: bigint;
|
|
7
|
+
to?: bigint;
|
|
8
|
+
partition?: string;
|
|
9
|
+
maxCount?: bigint;
|
|
10
|
+
};
|
|
11
|
+
export type ReadStream = <EventType extends Event>(stream: string, options?: ExtendedReadStreamOptions | ProjectionReadStreamOptions) => Promise<ReadStreamResult<EventType, PostgresReadEventMetadata>>;
|
|
12
|
+
export declare function createReadStream({ db, logger }: Dependencies): ReadStream;
|
|
13
|
+
export {};
|
|
14
|
+
//# sourceMappingURL=read-stream.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"read-stream.d.ts","sourceRoot":"","sources":["../../src/event-store/read-stream.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EACV,KAAK,EAEL,mCAAmC,EACnC,iBAAiB,EACjB,gBAAgB,EACjB,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAGL,KAAK,YAAY,EACjB,KAAK,eAAe,EACrB,MAAM,aAAa,CAAC;AAErB,KAAK,yBAAyB,GAAG,mCAAmC,CAAC;AACrE,KAAK,yBAAyB,GAAG,iBAAiB,GAAG,eAAe,CAAC;AAGrE,KAAK,2BAA2B,GAAG;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,EAAE,CAAC,EAAE,MAAM,CAAC;IACZ,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,UAAU,GAAG,CAAC,SAAS,SAAS,KAAK,EAC/C,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE,yBAAyB,GAAG,2BAA2B,KAC9D,OAAO,CAAC,gBAAgB,CAAC,SAAS,EAAE,yBAAyB,CAAC,CAAC,CAAC;AAErE,wBAAgB,gBAAgB,CAAC,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,YAAY,GAAG,UAAU,CAiCzE"}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { DEFAULT_PARTITION, PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
|
|
2
|
+
export function createReadStream({ db, logger }) {
|
|
3
|
+
return async function readStream(streamId, options) {
|
|
4
|
+
const partition = getPartition(options);
|
|
5
|
+
logger.info({ streamId, options, partition }, "readStream");
|
|
6
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo(db, streamId, partition);
|
|
7
|
+
const range = parseRangeOptions(options);
|
|
8
|
+
const rows = await buildEventsQuery({ db, logger }, streamId, partition, range).execute();
|
|
9
|
+
const events = rows.map((row) => mapRowToEvent(row, streamId));
|
|
10
|
+
return {
|
|
11
|
+
events,
|
|
12
|
+
currentStreamVersion,
|
|
13
|
+
streamExists,
|
|
14
|
+
};
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
function parseRangeOptions(options) {
|
|
18
|
+
const from = options && typeof options === "object" && "from" in options
|
|
19
|
+
? options.from
|
|
20
|
+
: undefined;
|
|
21
|
+
const to = options && typeof options === "object" && "to" in options
|
|
22
|
+
? options.to
|
|
23
|
+
: undefined;
|
|
24
|
+
const maxCount = options && typeof options === "object" && "maxCount" in options
|
|
25
|
+
? options.maxCount
|
|
26
|
+
: undefined;
|
|
27
|
+
return { from, to, maxCount };
|
|
28
|
+
}
|
|
29
|
+
function buildEventsQuery(deps, streamId, partition, range) {
|
|
30
|
+
const { db } = deps;
|
|
31
|
+
let q = db
|
|
32
|
+
.selectFrom("messages")
|
|
33
|
+
.select([
|
|
34
|
+
"message_type",
|
|
35
|
+
"message_data",
|
|
36
|
+
"message_metadata",
|
|
37
|
+
"stream_position",
|
|
38
|
+
"global_position",
|
|
39
|
+
"message_id",
|
|
40
|
+
])
|
|
41
|
+
.where("stream_id", "=", streamId)
|
|
42
|
+
.where("partition", "=", partition)
|
|
43
|
+
.where("is_archived", "=", false)
|
|
44
|
+
.orderBy("stream_position");
|
|
45
|
+
if (range.from !== undefined) {
|
|
46
|
+
q = q.where("stream_position", ">=", BigInt(range.from));
|
|
47
|
+
}
|
|
48
|
+
if (range.to !== undefined) {
|
|
49
|
+
q = q.where("stream_position", "<=", BigInt(range.to));
|
|
50
|
+
}
|
|
51
|
+
if (range.maxCount !== undefined) {
|
|
52
|
+
q = q.limit(Number(range.maxCount));
|
|
53
|
+
}
|
|
54
|
+
return q;
|
|
55
|
+
}
|
|
56
|
+
function mapRowToEvent(row, streamId) {
|
|
57
|
+
const streamPosition = BigInt(String(row.stream_position));
|
|
58
|
+
const globalPosition = BigInt(String(row.global_position ?? 0));
|
|
59
|
+
const baseMetadata = (row.message_metadata ?? {});
|
|
60
|
+
return {
|
|
61
|
+
kind: "Event",
|
|
62
|
+
type: row.message_type,
|
|
63
|
+
data: row.message_data,
|
|
64
|
+
metadata: {
|
|
65
|
+
...baseMetadata,
|
|
66
|
+
messageId: row.message_id,
|
|
67
|
+
streamId: streamId,
|
|
68
|
+
streamPosition: streamPosition,
|
|
69
|
+
globalPosition: globalPosition,
|
|
70
|
+
},
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
function getPartition(options) {
|
|
74
|
+
return options?.partition ?? DEFAULT_PARTITION;
|
|
75
|
+
}
|
|
76
|
+
async function fetchStreamInfo(executor, streamId, partition) {
|
|
77
|
+
const streamRow = await executor
|
|
78
|
+
.selectFrom("streams")
|
|
79
|
+
.select(["stream_position"])
|
|
80
|
+
.where("stream_id", "=", streamId)
|
|
81
|
+
.where("partition", "=", partition)
|
|
82
|
+
.where("is_archived", "=", false)
|
|
83
|
+
.executeTakeFirst();
|
|
84
|
+
const currentStreamVersion = streamRow
|
|
85
|
+
? streamRow.stream_position
|
|
86
|
+
: PostgreSQLEventStoreDefaultStreamVersion;
|
|
87
|
+
return { currentStreamVersion, streamExists: !!streamRow };
|
|
88
|
+
}
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/projections/index.ts
|
|
21
|
+
var projections_exports = {};
|
|
22
|
+
__export(projections_exports, {
|
|
23
|
+
createProjectionRegistry: () => createProjectionRegistry,
|
|
24
|
+
createProjectionRunner: () => createProjectionRunner
|
|
25
|
+
});
|
|
26
|
+
module.exports = __toCommonJS(projections_exports);
|
|
27
|
+
|
|
28
|
+
// src/types.ts
|
|
29
|
+
function createProjectionRegistry(...registries) {
|
|
30
|
+
const combined = {};
|
|
31
|
+
for (const reg of registries) {
|
|
32
|
+
for (const [eventType, handlers] of Object.entries(reg)) {
|
|
33
|
+
combined[eventType] = [...combined[eventType] ?? [], ...handlers];
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
return combined;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// src/projections/runner.ts
|
|
40
|
+
function createProjectionRunner({ db, readStream, registry }) {
|
|
41
|
+
async function getOrCreateCheckpoint(subscriptionId, partition) {
|
|
42
|
+
const existing = await db.selectFrom("subscriptions").select([
|
|
43
|
+
"subscription_id as subscriptionId",
|
|
44
|
+
"partition",
|
|
45
|
+
"last_processed_position as lastProcessedPosition"
|
|
46
|
+
]).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).executeTakeFirst();
|
|
47
|
+
if (existing) {
|
|
48
|
+
const last = BigInt(
|
|
49
|
+
String(
|
|
50
|
+
existing.lastProcessedPosition
|
|
51
|
+
)
|
|
52
|
+
);
|
|
53
|
+
return {
|
|
54
|
+
subscriptionId,
|
|
55
|
+
partition,
|
|
56
|
+
lastProcessedPosition: last
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
await db.insertInto("subscriptions").values({
|
|
60
|
+
subscription_id: subscriptionId,
|
|
61
|
+
partition,
|
|
62
|
+
version: 1,
|
|
63
|
+
last_processed_position: 0n
|
|
64
|
+
}).onConflict(
|
|
65
|
+
(oc) => oc.columns(["subscription_id", "partition", "version"]).doUpdateSet({
|
|
66
|
+
last_processed_position: (eb) => eb.ref("excluded.last_processed_position")
|
|
67
|
+
})
|
|
68
|
+
).execute();
|
|
69
|
+
return {
|
|
70
|
+
subscriptionId,
|
|
71
|
+
partition,
|
|
72
|
+
lastProcessedPosition: 0n
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
|
|
76
|
+
await db.updateTable("subscriptions").set({ last_processed_position: lastProcessedPosition }).where("subscription_id", "=", subscriptionId).where("partition", "=", partition).execute();
|
|
77
|
+
}
|
|
78
|
+
async function projectEvents(subscriptionId, streamId, opts) {
|
|
79
|
+
const partition = opts?.partition ?? "default_partition";
|
|
80
|
+
const batchSize = BigInt(opts?.batchSize ?? 500);
|
|
81
|
+
const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
|
|
82
|
+
const { events, currentStreamVersion } = await readStream(streamId, {
|
|
83
|
+
from: checkpoint.lastProcessedPosition + 1n,
|
|
84
|
+
to: checkpoint.lastProcessedPosition + batchSize,
|
|
85
|
+
partition
|
|
86
|
+
});
|
|
87
|
+
for (const ev of events) {
|
|
88
|
+
if (!ev) continue;
|
|
89
|
+
const handlers = registry[ev.type] ?? [];
|
|
90
|
+
if (handlers.length === 0) {
|
|
91
|
+
await updateCheckpoint(
|
|
92
|
+
subscriptionId,
|
|
93
|
+
partition,
|
|
94
|
+
ev.metadata.streamPosition
|
|
95
|
+
);
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
const projectionEvent = {
|
|
99
|
+
type: ev.type,
|
|
100
|
+
data: ev.data,
|
|
101
|
+
metadata: {
|
|
102
|
+
streamId: ev.metadata.streamId,
|
|
103
|
+
streamPosition: ev.metadata.streamPosition,
|
|
104
|
+
globalPosition: ev.metadata.globalPosition
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
for (const handler of handlers) {
|
|
108
|
+
await handler({ db, partition }, projectionEvent);
|
|
109
|
+
}
|
|
110
|
+
await updateCheckpoint(
|
|
111
|
+
subscriptionId,
|
|
112
|
+
partition,
|
|
113
|
+
projectionEvent.metadata.streamPosition
|
|
114
|
+
);
|
|
115
|
+
}
|
|
116
|
+
return { processed: events.length, currentStreamVersion };
|
|
117
|
+
}
|
|
118
|
+
return { projectEvents };
|
|
119
|
+
}
|
|
120
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
121
|
+
0 && (module.exports = {
|
|
122
|
+
createProjectionRegistry,
|
|
123
|
+
createProjectionRunner
|
|
124
|
+
});
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
export { createProjectionRegistry } from "../types.js";
|
|
2
|
+
export type { ProjectionContext, ProjectionEvent, ProjectionEventMetadata, ProjectionHandler, ProjectionRegistry, } from "../types.js";
|
|
3
|
+
export { createProjectionRunner } from "./runner.js";
|
|
4
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/projections/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,wBAAwB,EAAE,MAAM,aAAa,CAAC;AACvD,YAAY,EACV,iBAAiB,EACjB,eAAe,EACf,uBAAuB,EACvB,iBAAiB,EACjB,kBAAkB,GACnB,MAAM,aAAa,CAAC;AACrB,OAAO,EAAE,sBAAsB,EAAE,MAAM,aAAa,CAAC"}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import type { DatabaseExecutor, ProjectionRegistry, ReadStream } from "../types.js";
|
|
2
|
+
export type SubscriptionCheckpoint = {
|
|
3
|
+
subscriptionId: string;
|
|
4
|
+
partition: string;
|
|
5
|
+
lastProcessedPosition: bigint;
|
|
6
|
+
};
|
|
7
|
+
export type ProjectionRunnerDeps<T extends DatabaseExecutor = DatabaseExecutor> = {
|
|
8
|
+
db: T;
|
|
9
|
+
readStream: ReadStream;
|
|
10
|
+
registry: ProjectionRegistry<T>;
|
|
11
|
+
};
|
|
12
|
+
export declare function createProjectionRunner<T extends DatabaseExecutor = DatabaseExecutor>({ db, readStream, registry }: ProjectionRunnerDeps<T>): {
|
|
13
|
+
projectEvents: (subscriptionId: string, streamId: string, opts?: {
|
|
14
|
+
partition?: string;
|
|
15
|
+
batchSize?: number;
|
|
16
|
+
}) => Promise<{
|
|
17
|
+
processed: number;
|
|
18
|
+
currentStreamVersion: bigint;
|
|
19
|
+
}>;
|
|
20
|
+
};
|
|
21
|
+
//# sourceMappingURL=runner.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"runner.d.ts","sourceRoot":"","sources":["../../src/projections/runner.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EACV,gBAAgB,EAEhB,kBAAkB,EAClB,UAAU,EACX,MAAM,aAAa,CAAC;AAErB,MAAM,MAAM,sBAAsB,GAAG;IACnC,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,qBAAqB,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF,MAAM,MAAM,oBAAoB,CAC9B,CAAC,SAAS,gBAAgB,GAAG,gBAAgB,IAC3C;IACF,EAAE,EAAE,CAAC,CAAC;IACN,UAAU,EAAE,UAAU,CAAC;IACvB,QAAQ,EAAE,kBAAkB,CAAC,CAAC,CAAC,CAAC;CACjC,CAAC;AAEF,wBAAgB,sBAAsB,CACpC,CAAC,SAAS,gBAAgB,GAAG,gBAAgB,EAC7C,EAAE,EAAE,EAAE,UAAU,EAAE,QAAQ,EAAE,EAAE,oBAAoB,CAAC,CAAC,CAAC;oCA4EnC,MAAM,YACZ,MAAM,SACT;QAAE,SAAS,CAAC,EAAE,MAAM,CAAC;QAAC,SAAS,CAAC,EAAE,MAAM,CAAA;KAAE;;;;EAgDpD"}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
export function createProjectionRunner({ db, readStream, registry }) {
|
|
2
|
+
async function getOrCreateCheckpoint(subscriptionId, partition) {
|
|
3
|
+
const existing = await db
|
|
4
|
+
.selectFrom("subscriptions")
|
|
5
|
+
.select([
|
|
6
|
+
"subscription_id as subscriptionId",
|
|
7
|
+
"partition",
|
|
8
|
+
"last_processed_position as lastProcessedPosition",
|
|
9
|
+
])
|
|
10
|
+
.where("subscription_id", "=", subscriptionId)
|
|
11
|
+
.where("partition", "=", partition)
|
|
12
|
+
.executeTakeFirst();
|
|
13
|
+
if (existing) {
|
|
14
|
+
const last = BigInt(String(existing
|
|
15
|
+
.lastProcessedPosition));
|
|
16
|
+
return {
|
|
17
|
+
subscriptionId,
|
|
18
|
+
partition,
|
|
19
|
+
lastProcessedPosition: last,
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
await db
|
|
23
|
+
.insertInto("subscriptions")
|
|
24
|
+
.values({
|
|
25
|
+
subscription_id: subscriptionId,
|
|
26
|
+
partition,
|
|
27
|
+
version: 1,
|
|
28
|
+
last_processed_position: 0n,
|
|
29
|
+
})
|
|
30
|
+
.onConflict((oc) => oc.columns(["subscription_id", "partition", "version"]).doUpdateSet({
|
|
31
|
+
last_processed_position: (eb) => eb.ref("excluded.last_processed_position"),
|
|
32
|
+
}))
|
|
33
|
+
.execute();
|
|
34
|
+
return {
|
|
35
|
+
subscriptionId,
|
|
36
|
+
partition,
|
|
37
|
+
lastProcessedPosition: 0n,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
async function updateCheckpoint(subscriptionId, partition, lastProcessedPosition) {
|
|
41
|
+
await db
|
|
42
|
+
.updateTable("subscriptions")
|
|
43
|
+
.set({ last_processed_position: lastProcessedPosition })
|
|
44
|
+
.where("subscription_id", "=", subscriptionId)
|
|
45
|
+
.where("partition", "=", partition)
|
|
46
|
+
.execute();
|
|
47
|
+
}
|
|
48
|
+
async function projectEvents(subscriptionId, streamId, opts) {
|
|
49
|
+
const partition = opts?.partition ?? "default_partition";
|
|
50
|
+
const batchSize = BigInt(opts?.batchSize ?? 500);
|
|
51
|
+
const checkpoint = await getOrCreateCheckpoint(subscriptionId, partition);
|
|
52
|
+
const { events, currentStreamVersion } = await readStream(streamId, {
|
|
53
|
+
from: checkpoint.lastProcessedPosition + 1n,
|
|
54
|
+
to: checkpoint.lastProcessedPosition + batchSize,
|
|
55
|
+
partition,
|
|
56
|
+
});
|
|
57
|
+
for (const ev of events) {
|
|
58
|
+
if (!ev)
|
|
59
|
+
continue;
|
|
60
|
+
const handlers = registry[ev.type] ?? [];
|
|
61
|
+
if (handlers.length === 0) {
|
|
62
|
+
await updateCheckpoint(subscriptionId, partition, ev.metadata.streamPosition);
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
const projectionEvent = {
|
|
66
|
+
type: ev.type,
|
|
67
|
+
data: ev.data,
|
|
68
|
+
metadata: {
|
|
69
|
+
streamId: ev.metadata.streamId,
|
|
70
|
+
streamPosition: ev.metadata.streamPosition,
|
|
71
|
+
globalPosition: ev.metadata.globalPosition,
|
|
72
|
+
},
|
|
73
|
+
};
|
|
74
|
+
for (const handler of handlers) {
|
|
75
|
+
await handler({ db, partition }, projectionEvent);
|
|
76
|
+
}
|
|
77
|
+
await updateCheckpoint(subscriptionId, partition, projectionEvent.metadata.streamPosition);
|
|
78
|
+
}
|
|
79
|
+
return { processed: events.length, currentStreamVersion };
|
|
80
|
+
}
|
|
81
|
+
return { projectEvents };
|
|
82
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import type { Kysely, Transaction } from "kysely";
|
|
2
|
+
import type { EventStoreDBSchema } from "./db-schema.js";
|
|
3
|
+
export type DatabaseExecutor = Kysely<EventStoreDBSchema> | Transaction<EventStoreDBSchema>;
|
|
4
|
+
export type Logger = {
|
|
5
|
+
info: (obj: unknown, msg?: string) => void;
|
|
6
|
+
error: (obj: unknown, msg?: string) => void;
|
|
7
|
+
warn?: (obj: unknown, msg?: string) => void;
|
|
8
|
+
debug?: (obj: unknown, msg?: string) => void;
|
|
9
|
+
};
|
|
10
|
+
export type Dependencies = {
|
|
11
|
+
db: DatabaseExecutor;
|
|
12
|
+
logger: Logger;
|
|
13
|
+
};
|
|
14
|
+
export type ExtendedOptions = {
|
|
15
|
+
partition?: string;
|
|
16
|
+
streamType?: string;
|
|
17
|
+
};
|
|
18
|
+
export declare const PostgreSQLEventStoreDefaultStreamVersion = 0n;
|
|
19
|
+
export declare const DEFAULT_PARTITION: "default_partition";
|
|
20
|
+
export type ProjectionEventMetadata = {
|
|
21
|
+
streamId: string;
|
|
22
|
+
streamPosition: bigint;
|
|
23
|
+
globalPosition: bigint;
|
|
24
|
+
};
|
|
25
|
+
export type ProjectionEvent = {
|
|
26
|
+
type: string;
|
|
27
|
+
data: unknown;
|
|
28
|
+
metadata: ProjectionEventMetadata;
|
|
29
|
+
};
|
|
30
|
+
export type ProjectionContext<T = DatabaseExecutor> = {
|
|
31
|
+
db: T;
|
|
32
|
+
partition: string;
|
|
33
|
+
};
|
|
34
|
+
export type ProjectionHandler<T = DatabaseExecutor> = (ctx: ProjectionContext<T>, event: ProjectionEvent) => void | Promise<void>;
|
|
35
|
+
export type ProjectionRegistry<T = DatabaseExecutor> = Record<string, ProjectionHandler<T>[]>;
|
|
36
|
+
export declare function createProjectionRegistry<T = DatabaseExecutor>(...registries: ProjectionRegistry<T>[]): ProjectionRegistry<T>;
|
|
37
|
+
export type { ReadStream } from "./event-store/read-stream.js";
|
|
38
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,QAAQ,CAAC;AAClD,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,gBAAgB,CAAC;AAGzD,MAAM,MAAM,gBAAgB,GACxB,MAAM,CAAC,kBAAkB,CAAC,GAC1B,WAAW,CAAC,kBAAkB,CAAC,CAAC;AAEpC,MAAM,MAAM,MAAM,GAAG;IACnB,IAAI,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC;IAC3C,KAAK,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC;IAC5C,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC;IAC5C,KAAK,CAAC,EAAE,CAAC,GAAG,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC;CAC9C,CAAC;AAEF,MAAM,MAAM,YAAY,GAAG;IACzB,EAAE,EAAE,gBAAgB,CAAC;IACrB,MAAM,EAAE,MAAM,CAAC;CAChB,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG;IAC5B,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,UAAU,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC;AAEF,eAAO,MAAM,wCAAwC,KAAK,CAAC;AAC3D,eAAO,MAAM,iBAAiB,EAAG,mBAA4B,CAAC;AAG9D,MAAM,MAAM,uBAAuB,GAAG;IACpC,QAAQ,EAAE,MAAM,CAAC;IACjB,cAAc,EAAE,MAAM,CAAC;IACvB,cAAc,EAAE,MAAM,CAAC;CACxB,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG;IAC5B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,OAAO,CAAC;IACd,QAAQ,EAAE,uBAAuB,CAAC;CACnC,CAAC;AAEF,MAAM,MAAM,iBAAiB,CAAC,CAAC,GAAG,gBAAgB,IAAI;IACpD,EAAE,EAAE,CAAC,CAAC;IACN,SAAS,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,iBAAiB,CAAC,CAAC,GAAG,gBAAgB,IAAI,CACpD,GAAG,EAAE,iBAAiB,CAAC,CAAC,CAAC,EACzB,KAAK,EAAE,eAAe,KACnB,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;AAE1B,MAAM,MAAM,kBAAkB,CAAC,CAAC,GAAG,gBAAgB,IAAI,MAAM,CAC3D,MAAM,EACN,iBAAiB,CAAC,CAAC,CAAC,EAAE,CACvB,CAAC;AAEF,wBAAgB,wBAAwB,CAAC,CAAC,GAAG,gBAAgB,EAC3D,GAAG,UAAU,EAAE,kBAAkB,CAAC,CAAC,CAAC,EAAE,GACrC,kBAAkB,CAAC,CAAC,CAAC,CAYvB;AAGD,YAAY,EAAE,UAAU,EAAE,MAAM,8BAA8B,CAAC"}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export const PostgreSQLEventStoreDefaultStreamVersion = 0n;
|
|
2
|
+
export const DEFAULT_PARTITION = "default_partition";
|
|
3
|
+
export function createProjectionRegistry(...registries) {
|
|
4
|
+
const combined = {};
|
|
5
|
+
/**
|
|
6
|
+
* This is necessary because the projection runner can be used to project events from multiple partitions.
|
|
7
|
+
* e.g., the generators-read-model projection runner can be used to project events for partition A, partition B, and partition C.
|
|
8
|
+
*/
|
|
9
|
+
for (const reg of registries) {
|
|
10
|
+
for (const [eventType, handlers] of Object.entries(reg)) {
|
|
11
|
+
combined[eventType] = [...(combined[eventType] ?? []), ...handlers];
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
return combined;
|
|
15
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@wataruoguchi/emmett-event-store-kysely",
|
|
3
|
+
"publishConfig": {
|
|
4
|
+
"access": "public"
|
|
5
|
+
},
|
|
6
|
+
"version": "1.1.1",
|
|
7
|
+
"description": "Emmett Event Store with Kysely",
|
|
8
|
+
"author": "Wataru Oguchi",
|
|
9
|
+
"license": "MIT",
|
|
10
|
+
"repository": {
|
|
11
|
+
"type": "git",
|
|
12
|
+
"url": "https://github.com/wataruoguchi/poc-emmett.git"
|
|
13
|
+
},
|
|
14
|
+
"homepage": "https://github.com/wataruoguchi/poc-emmett",
|
|
15
|
+
"bugs": "https://github.com/wataruoguchi/poc-emmett/issues",
|
|
16
|
+
"type": "module",
|
|
17
|
+
"main": "dist/event-store/index.js",
|
|
18
|
+
"module": "dist/event-store/index.js",
|
|
19
|
+
"types": "dist/types.d.ts",
|
|
20
|
+
"files": [
|
|
21
|
+
"dist"
|
|
22
|
+
],
|
|
23
|
+
"exports": {
|
|
24
|
+
".": {
|
|
25
|
+
"types": "./dist/event-store/index.d.ts",
|
|
26
|
+
"import": "./dist/event-store/index.js",
|
|
27
|
+
"require": "./dist/event-store/index.cjs"
|
|
28
|
+
},
|
|
29
|
+
"./event-store": {
|
|
30
|
+
"types": "./dist/event-store/index.d.ts",
|
|
31
|
+
"import": "./dist/event-store/index.js",
|
|
32
|
+
"require": "./dist/event-store/index.cjs"
|
|
33
|
+
},
|
|
34
|
+
"./projections": {
|
|
35
|
+
"types": "./dist/projections/index.d.ts",
|
|
36
|
+
"import": "./dist/projections/index.js",
|
|
37
|
+
"require": "./dist/projections/index.cjs"
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"scripts": {
|
|
41
|
+
"build": "rm -rf dist && tsc -p tsconfig.build.json && tsup src/event-store/index.ts src/projections/index.ts",
|
|
42
|
+
"type-check": "tsc --noEmit",
|
|
43
|
+
"test": "vitest run",
|
|
44
|
+
"release": "semantic-release",
|
|
45
|
+
"release:dry-run": "semantic-release --dry-run"
|
|
46
|
+
},
|
|
47
|
+
"devDependencies": {
|
|
48
|
+
"@semantic-release/commit-analyzer": "^13.0.1",
|
|
49
|
+
"@semantic-release/github": "^11.0.6",
|
|
50
|
+
"@semantic-release/npm": "^12.0.2",
|
|
51
|
+
"@semantic-release/release-notes-generator": "^14.1.0",
|
|
52
|
+
"@vitest/coverage-v8": "^3.2.4",
|
|
53
|
+
"semantic-release": "^24.2.9",
|
|
54
|
+
"tsup": "^8.5.0",
|
|
55
|
+
"typescript": "^5.8.3",
|
|
56
|
+
"vitest": "^3.2.4"
|
|
57
|
+
},
|
|
58
|
+
"peerDependencies": {
|
|
59
|
+
"@event-driven-io/emmett": "^0.38.5",
|
|
60
|
+
"kysely": "^0.28.7"
|
|
61
|
+
},
|
|
62
|
+
"optionalDependencies": {
|
|
63
|
+
"@rollup/rollup-linux-x64-gnu": "4.9.5"
|
|
64
|
+
},
|
|
65
|
+
"keywords": [
|
|
66
|
+
"emmett",
|
|
67
|
+
"event-store",
|
|
68
|
+
"kysely",
|
|
69
|
+
"postgres",
|
|
70
|
+
"postgresql",
|
|
71
|
+
"Event Sourcing",
|
|
72
|
+
"event-sourcing",
|
|
73
|
+
"read-model",
|
|
74
|
+
"read-models",
|
|
75
|
+
"read-models-projection",
|
|
76
|
+
"read-models-projections"
|
|
77
|
+
]
|
|
78
|
+
}
|