@wataruoguchi/emmett-event-store-kysely 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -0
- package/dist/db-schema.d.ts +34 -0
- package/dist/db-schema.d.ts.map +1 -0
- package/dist/db-schema.js +3 -0
- package/dist/event-store/consumers.d.ts +23 -0
- package/dist/event-store/consumers.d.ts.map +1 -0
- package/dist/event-store/consumers.js +155 -0
- package/dist/event-store/kysely-event-store.d.ts +42 -0
- package/dist/event-store/kysely-event-store.d.ts.map +1 -0
- package/dist/event-store/kysely-event-store.js +265 -0
- package/dist/index.cjs +600 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +5 -0
- package/dist/projections/runner.d.ts +22 -0
- package/dist/projections/runner.d.ts.map +1 -0
- package/dist/projections/runner.js +82 -0
- package/dist/projections/snapshot-projection.d.ts +120 -0
- package/dist/projections/snapshot-projection.d.ts.map +1 -0
- package/dist/projections/snapshot-projection.js +135 -0
- package/dist/types.d.ts +77 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +15 -0
- package/package.json +72 -0
package/README.md
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# @wataruoguchi/emmett-event-store-kysely
|
|
2
|
+
|
|
3
|
+
A Kysely-based event store implementation for [Emmett](https://github.com/event-driven-io/emmett), providing event sourcing capabilities with PostgreSQL.
|
|
4
|
+
|
|
5
|
+
## 📚 Documentation
|
|
6
|
+
|
|
7
|
+
**👉 [View Complete Documentation →](https://wataruoguchi.github.io/emmett-libs/emmett-event-store-kysely)**
|
|
8
|
+
|
|
9
|
+
## Features
|
|
10
|
+
|
|
11
|
+
- **Event Store** - Full event sourcing with Kysely and PostgreSQL
|
|
12
|
+
- **Snapshot Projections** - Recommended approach for read models
|
|
13
|
+
- **Event Consumer** - Continuous background event processing
|
|
14
|
+
- **Type Safety** - Full TypeScript support with discriminated unions
|
|
15
|
+
- **Multi-Tenancy** - Built-in partition support
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @wataruoguchi/emmett-event-store-kysely @event-driven-io/emmett kysely pg
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Quick Start
|
|
24
|
+
|
|
25
|
+
### 1. Database Setup
|
|
26
|
+
|
|
27
|
+
Set up the required PostgreSQL tables using [our migration example](https://github.com/wataruoguchi/emmett-libs/blob/main/packages/emmett-event-store-kysely/database/migrations/1758758113676_event_sourcing_migration_example.ts):
|
|
28
|
+
|
|
29
|
+
```typescript
|
|
30
|
+
import { Kysely } from "kysely";
|
|
31
|
+
|
|
32
|
+
// Required tables: messages, streams, subscriptions
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
A read model table expects to have the following columns:
|
|
36
|
+
|
|
37
|
+
- stream_id (uuid)
|
|
38
|
+
- last_stream_position (bigint)
|
|
39
|
+
- last_global_position (bigint)
|
|
40
|
+
- partition (text)
|
|
41
|
+
- snapshot (jsonb)
|
|
42
|
+
|
|
43
|
+
### 2. Create Event Store
|
|
44
|
+
|
|
45
|
+
```typescript
|
|
46
|
+
import { getKyselyEventStore } from "@wataruoguchi/emmett-event-store-kysely";
|
|
47
|
+
import { Kysely, PostgresDialect } from "kysely";
|
|
48
|
+
|
|
49
|
+
const db = new Kysely({
|
|
50
|
+
dialect: new PostgresDialect({
|
|
51
|
+
pool: new Pool({ connectionString: process.env.DATABASE_URL }),
|
|
52
|
+
}),
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
const eventStore = getKyselyEventStore({
|
|
56
|
+
db,
|
|
57
|
+
logger: console,
|
|
58
|
+
});
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 3. Write Events & Commands & Business Logic & State
|
|
62
|
+
|
|
63
|
+
Please read <https://event-driven-io.github.io/emmett/getting-started.html>
|
|
64
|
+
|
|
65
|
+
- [Events](https://event-driven-io.github.io/emmett/getting-started.html#events)
|
|
66
|
+
- [Commands](https://event-driven-io.github.io/emmett/getting-started.html#commands)
|
|
67
|
+
- [Business logic and decisions](https://event-driven-io.github.io/emmett/getting-started.html#business-logic-and-decisions)
|
|
68
|
+
- [Building state from events](https://event-driven-io.github.io/emmett/getting-started.html#building-state-from-events)
|
|
69
|
+
|
|
70
|
+
### 4. Build Read Models
|
|
71
|
+
|
|
72
|
+
This package supports "Snapshot Projections".
|
|
73
|
+
|
|
74
|
+
```typescript
|
|
75
|
+
import {
|
|
76
|
+
createSnapshotProjectionRegistry
|
|
77
|
+
} from "@wataruoguchi/emmett-event-store-kysely/projections";
|
|
78
|
+
|
|
79
|
+
// Reuse your write model's evolve function!
|
|
80
|
+
const registry = createSnapshotProjectionRegistry(
|
|
81
|
+
["CartCreated", "ItemAdded", "CartCheckedOut"],
|
|
82
|
+
{
|
|
83
|
+
tableName: "carts",
|
|
84
|
+
primaryKeys: ["tenant_id", "cart_id", "partition"],
|
|
85
|
+
extractKeys: (event, partition) => ({
|
|
86
|
+
tenant_id: event.data.eventMeta.tenantId,
|
|
87
|
+
cart_id: event.data.eventMeta.cartId,
|
|
88
|
+
partition,
|
|
89
|
+
}),
|
|
90
|
+
evolve: domainEvolve, // Reuse from write model!
|
|
91
|
+
initialState,
|
|
92
|
+
mapToColumns: (state) => ({ // Optional: denormalize for queries
|
|
93
|
+
currency: state.currency,
|
|
94
|
+
total: state.status === "checkedOut" ? state.total : null,
|
|
95
|
+
}),
|
|
96
|
+
}
|
|
97
|
+
);
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### 5. Process Events and Update Read Model
|
|
101
|
+
|
|
102
|
+
```typescript
|
|
103
|
+
import { createProjectionRunner } from "@wataruoguchi/emmett-event-store-kysely/projections";
|
|
104
|
+
|
|
105
|
+
const runner = createProjectionRunner({
|
|
106
|
+
db,
|
|
107
|
+
readStream: eventStore.readStream,
|
|
108
|
+
registry,
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
await runner.projectEvents("subscription-id", "cart-123", {
|
|
112
|
+
partition: "tenant-456"
|
|
113
|
+
});
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## Examples
|
|
117
|
+
|
|
118
|
+
- [Working Example](https://github.com/wataruoguchi/emmett-libs/tree/main/example/) - Complete application with carts and generators
|
|
119
|
+
- [Migration Example](https://github.com/wataruoguchi/emmett-libs/blob/main/packages/emmett-event-store-kysely/database/migrations/1758758113676_event_sourcing_migration_example.ts) - Database setup
|
|
120
|
+
|
|
121
|
+
## License
|
|
122
|
+
|
|
123
|
+
MIT
|
|
124
|
+
|
|
125
|
+
## Contributing
|
|
126
|
+
|
|
127
|
+
Contributions are welcome! Please see our [GitHub repository](https://github.com/wataruoguchi/emmett-libs) for issues and PRs.
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
export interface MessagesTable {
|
|
2
|
+
created: Date;
|
|
3
|
+
global_position: bigint | null;
|
|
4
|
+
is_archived: boolean;
|
|
5
|
+
message_data: unknown;
|
|
6
|
+
message_id: string;
|
|
7
|
+
message_kind: string;
|
|
8
|
+
message_metadata: unknown;
|
|
9
|
+
message_schema_version: string;
|
|
10
|
+
message_type: string;
|
|
11
|
+
partition: string;
|
|
12
|
+
stream_id: string;
|
|
13
|
+
stream_position: bigint;
|
|
14
|
+
}
|
|
15
|
+
export interface StreamsTable {
|
|
16
|
+
is_archived: boolean;
|
|
17
|
+
partition: string;
|
|
18
|
+
stream_id: string;
|
|
19
|
+
stream_metadata: unknown;
|
|
20
|
+
stream_position: bigint;
|
|
21
|
+
stream_type: string;
|
|
22
|
+
}
|
|
23
|
+
export interface SubscriptionsTable {
|
|
24
|
+
last_processed_position: bigint;
|
|
25
|
+
partition: string;
|
|
26
|
+
subscription_id: string;
|
|
27
|
+
version: number;
|
|
28
|
+
}
|
|
29
|
+
export interface EventStoreDBSchema {
|
|
30
|
+
messages: MessagesTable;
|
|
31
|
+
streams: StreamsTable;
|
|
32
|
+
subscriptions: SubscriptionsTable;
|
|
33
|
+
}
|
|
34
|
+
//# sourceMappingURL=db-schema.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"db-schema.d.ts","sourceRoot":"","sources":["../src/db-schema.ts"],"names":[],"mappings":"AAGA,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,IAAI,CAAC;IACd,eAAe,EAAE,MAAM,GAAG,IAAI,CAAC;IAC/B,WAAW,EAAE,OAAO,CAAC;IACrB,YAAY,EAAE,OAAO,CAAC;IACtB,UAAU,EAAE,MAAM,CAAC;IACnB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,OAAO,CAAC;IAC1B,sBAAsB,EAAE,MAAM,CAAC;IAC/B,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,WAAW,YAAY;IAC3B,WAAW,EAAE,OAAO,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,OAAO,CAAC;IACzB,eAAe,EAAE,MAAM,CAAC;IACxB,WAAW,EAAE,MAAM,CAAC;CACrB;AAED,MAAM,WAAW,kBAAkB;IACjC,uBAAuB,EAAE,MAAM,CAAC;IAChC,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,MAAM,CAAC;IACxB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,kBAAkB;IACjC,QAAQ,EAAE,aAAa,CAAC;IACxB,OAAO,EAAE,YAAY,CAAC;IACtB,aAAa,EAAE,kBAAkB,CAAC;CACnC"}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { Event, ReadEvent, ReadEventMetadataWithGlobalPosition } from "@event-driven-io/emmett";
|
|
2
|
+
import type { Dependencies } from "../types.js";
|
|
3
|
+
export type KyselyEventStoreConsumerConfig = {
|
|
4
|
+
/** Consumer name for tracking subscription state */
|
|
5
|
+
consumerName?: string;
|
|
6
|
+
/** Batch size for processing events */
|
|
7
|
+
batchSize?: number;
|
|
8
|
+
/** Polling interval in milliseconds */
|
|
9
|
+
pollingInterval?: number;
|
|
10
|
+
};
|
|
11
|
+
export type KyselyEventStoreConsumer = {
|
|
12
|
+
/** Start consuming events */
|
|
13
|
+
start(): Promise<void>;
|
|
14
|
+
/** Stop consuming events */
|
|
15
|
+
stop(): Promise<void>;
|
|
16
|
+
/** Subscribe to specific event types */
|
|
17
|
+
subscribe<EventType extends Event>(handler: (event: ReadEvent<EventType, ReadEventMetadataWithGlobalPosition>) => Promise<void> | void, eventType: string): void;
|
|
18
|
+
/** Subscribe to all events */
|
|
19
|
+
subscribeToAll(handler: (event: ReadEvent<Event, ReadEventMetadataWithGlobalPosition>) => Promise<void> | void): void;
|
|
20
|
+
};
|
|
21
|
+
export declare function createKyselyEventStoreConsumer({ db, logger, consumerName, batchSize, pollingInterval, }: Dependencies & KyselyEventStoreConsumerConfig): KyselyEventStoreConsumer;
|
|
22
|
+
export declare function createKyselyEventStoreConsumerWithDefaults(deps: Dependencies, config?: KyselyEventStoreConsumerConfig): KyselyEventStoreConsumer;
|
|
23
|
+
//# sourceMappingURL=consumers.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"consumers.d.ts","sourceRoot":"","sources":["../../src/event-store/consumers.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,KAAK,EACL,SAAS,EACT,mCAAmC,EACpC,MAAM,yBAAyB,CAAC;AACjC,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,MAAM,8BAA8B,GAAG;IAC3C,oDAAoD;IACpD,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,uCAAuC;IACvC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,uCAAuC;IACvC,eAAe,CAAC,EAAE,MAAM,CAAC;CAC1B,CAAC;AAEF,MAAM,MAAM,wBAAwB,GAAG;IACrC,6BAA6B;IAC7B,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IACvB,4BAA4B;IAC5B,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IACtB,wCAAwC;IACxC,SAAS,CAAC,SAAS,SAAS,KAAK,EAC/B,OAAO,EAAE,CACP,KAAK,EAAE,SAAS,CAAC,SAAS,EAAE,mCAAmC,CAAC,KAC7D,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,EACzB,SAAS,EAAE,MAAM,GAChB,IAAI,CAAC;IACR,8BAA8B;IAC9B,cAAc,CACZ,OAAO,EAAE,CACP,KAAK,EAAE,SAAS,CAAC,KAAK,EAAE,mCAAmC,CAAC,KACzD,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,GACxB,IAAI,CAAC;CACT,CAAC;AAEF,wBAAgB,8BAA8B,CAAC,EAC7C,EAAE,EACF,MAAM,EACN,YAAiC,EACjC,SAAe,EACf,eAAsB,GACvB,EAAE,YAAY,GAAG,8BAA8B,GAAG,wBAAwB,CAkM1E;AAGD,wBAAgB,0CAA0C,CACxD,IAAI,EAAE,YAAY,EAClB,MAAM,GAAE,8BAAmC,GAC1C,wBAAwB,CAK1B"}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
export function createKyselyEventStoreConsumer({ db, logger, consumerName = "default-consumer", batchSize = 100, pollingInterval = 1000, }) {
|
|
2
|
+
let isRunning = false;
|
|
3
|
+
let lastProcessedPosition = 0n;
|
|
4
|
+
const eventHandlers = new Map();
|
|
5
|
+
const allEventHandlers = [];
|
|
6
|
+
let pollingTimer = null;
|
|
7
|
+
const processEvents = async () => {
|
|
8
|
+
if (!isRunning)
|
|
9
|
+
return;
|
|
10
|
+
try {
|
|
11
|
+
// Get events from the last processed position
|
|
12
|
+
const events = await db
|
|
13
|
+
.selectFrom("messages")
|
|
14
|
+
.select([
|
|
15
|
+
"message_type",
|
|
16
|
+
"message_data",
|
|
17
|
+
"message_metadata",
|
|
18
|
+
"stream_position",
|
|
19
|
+
"global_position",
|
|
20
|
+
"message_id",
|
|
21
|
+
"stream_id",
|
|
22
|
+
])
|
|
23
|
+
.where("global_position", ">", lastProcessedPosition)
|
|
24
|
+
.where("is_archived", "=", false)
|
|
25
|
+
.orderBy("global_position")
|
|
26
|
+
.limit(batchSize)
|
|
27
|
+
.execute();
|
|
28
|
+
if (events.length === 0) {
|
|
29
|
+
return;
|
|
30
|
+
}
|
|
31
|
+
// Process each event
|
|
32
|
+
for (const row of events) {
|
|
33
|
+
const event = {
|
|
34
|
+
kind: "Event",
|
|
35
|
+
type: row.message_type,
|
|
36
|
+
data: row.message_data,
|
|
37
|
+
metadata: {
|
|
38
|
+
...row.message_metadata,
|
|
39
|
+
messageId: row.message_id,
|
|
40
|
+
streamName: row.stream_id,
|
|
41
|
+
streamPosition: BigInt(String(row.stream_position)),
|
|
42
|
+
globalPosition: BigInt(String(row.global_position)),
|
|
43
|
+
},
|
|
44
|
+
};
|
|
45
|
+
// Call type-specific handlers
|
|
46
|
+
const typeHandlers = eventHandlers.get(row.message_type) || [];
|
|
47
|
+
for (const handler of typeHandlers) {
|
|
48
|
+
try {
|
|
49
|
+
await handler(event);
|
|
50
|
+
}
|
|
51
|
+
catch (error) {
|
|
52
|
+
logger.error({ error, event }, `Error processing event ${row.message_type}`);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
// Call all-event handlers
|
|
56
|
+
for (const handler of allEventHandlers) {
|
|
57
|
+
try {
|
|
58
|
+
await handler(event);
|
|
59
|
+
}
|
|
60
|
+
catch (error) {
|
|
61
|
+
logger.error({ error, event }, "Error processing event in all-event handler");
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
// Update last processed position
|
|
65
|
+
const globalPos = row.global_position;
|
|
66
|
+
if (globalPos !== null) {
|
|
67
|
+
lastProcessedPosition = BigInt(String(globalPos));
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
// Update subscription tracking
|
|
71
|
+
await updateSubscriptionPosition();
|
|
72
|
+
}
|
|
73
|
+
catch (error) {
|
|
74
|
+
logger.error({ error }, "Error processing events");
|
|
75
|
+
}
|
|
76
|
+
};
|
|
77
|
+
const updateSubscriptionPosition = async () => {
|
|
78
|
+
try {
|
|
79
|
+
await db
|
|
80
|
+
.insertInto("subscriptions")
|
|
81
|
+
.values({
|
|
82
|
+
consumer_name: consumerName,
|
|
83
|
+
last_processed_position: lastProcessedPosition,
|
|
84
|
+
last_processed_transaction_id: lastProcessedPosition,
|
|
85
|
+
created_at: new Date(),
|
|
86
|
+
updated_at: new Date(),
|
|
87
|
+
})
|
|
88
|
+
.onConflict((oc) => oc.column("consumer_name").doUpdateSet({
|
|
89
|
+
last_processed_position: lastProcessedPosition,
|
|
90
|
+
last_processed_transaction_id: lastProcessedPosition,
|
|
91
|
+
updated_at: new Date(),
|
|
92
|
+
}))
|
|
93
|
+
.execute();
|
|
94
|
+
}
|
|
95
|
+
catch (error) {
|
|
96
|
+
logger.error({ error }, "Error updating subscription position");
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
const loadLastProcessedPosition = async () => {
|
|
100
|
+
try {
|
|
101
|
+
const subscription = await db
|
|
102
|
+
.selectFrom("subscriptions")
|
|
103
|
+
.select(["last_processed_position"])
|
|
104
|
+
.where("consumer_name", "=", consumerName)
|
|
105
|
+
.executeTakeFirst();
|
|
106
|
+
if (subscription) {
|
|
107
|
+
lastProcessedPosition = BigInt(String(subscription.last_processed_position));
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
catch (error) {
|
|
111
|
+
logger.error({ error }, "Error loading last processed position");
|
|
112
|
+
}
|
|
113
|
+
};
|
|
114
|
+
return {
|
|
115
|
+
async start() {
|
|
116
|
+
if (isRunning)
|
|
117
|
+
return;
|
|
118
|
+
isRunning = true;
|
|
119
|
+
await loadLastProcessedPosition();
|
|
120
|
+
logger.info({ consumerName }, "Starting event store consumer");
|
|
121
|
+
pollingTimer = setInterval(processEvents, pollingInterval);
|
|
122
|
+
},
|
|
123
|
+
async stop() {
|
|
124
|
+
if (!isRunning)
|
|
125
|
+
return;
|
|
126
|
+
isRunning = false;
|
|
127
|
+
if (pollingTimer) {
|
|
128
|
+
clearInterval(pollingTimer);
|
|
129
|
+
pollingTimer = null;
|
|
130
|
+
}
|
|
131
|
+
logger.info({ consumerName }, "Stopped event store consumer");
|
|
132
|
+
},
|
|
133
|
+
subscribe(handler, eventType) {
|
|
134
|
+
if (!eventHandlers.has(eventType)) {
|
|
135
|
+
eventHandlers.set(eventType, []);
|
|
136
|
+
}
|
|
137
|
+
const handlers = eventHandlers.get(eventType);
|
|
138
|
+
if (handlers) {
|
|
139
|
+
// Type assertion needed because we're storing handlers for specific event types
|
|
140
|
+
// in a generic Map that accepts Event handlers
|
|
141
|
+
handlers.push(handler);
|
|
142
|
+
}
|
|
143
|
+
},
|
|
144
|
+
subscribeToAll(handler) {
|
|
145
|
+
allEventHandlers.push(handler);
|
|
146
|
+
},
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
// Helper function to create consumer with default options
|
|
150
|
+
export function createKyselyEventStoreConsumerWithDefaults(deps, config = {}) {
|
|
151
|
+
return createKyselyEventStoreConsumer({
|
|
152
|
+
...deps,
|
|
153
|
+
...config,
|
|
154
|
+
});
|
|
155
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { type AppendToStreamOptions, type AppendToStreamResultWithGlobalPosition, type Event, type EventStore, type EventStoreSessionFactory, type ReadEventMetadataWithGlobalPosition, type ReadStreamOptions, type ReadStreamResult } from "@event-driven-io/emmett";
|
|
2
|
+
import type { Kysely } from "kysely";
|
|
3
|
+
import { type Dependencies, type ExtendedOptions } from "../types.js";
|
|
4
|
+
type KyselyReadEventMetadata = ReadEventMetadataWithGlobalPosition;
|
|
5
|
+
type ExtendedAppendToStreamOptions = AppendToStreamOptions & ExtendedOptions;
|
|
6
|
+
export type ProjectionReadStreamOptions = {
|
|
7
|
+
from?: bigint;
|
|
8
|
+
to?: bigint;
|
|
9
|
+
partition?: string;
|
|
10
|
+
maxCount?: bigint;
|
|
11
|
+
};
|
|
12
|
+
export interface KyselyEventStore extends EventStore<KyselyReadEventMetadata>, EventStoreSessionFactory<KyselyEventStore> {
|
|
13
|
+
readStream<EventType extends Event>(streamName: string, options?: ReadStreamOptions<bigint> | ProjectionReadStreamOptions): Promise<ReadStreamResult<EventType, KyselyReadEventMetadata>>;
|
|
14
|
+
appendToStream<EventType extends Event>(streamName: string, events: EventType[], options?: ExtendedAppendToStreamOptions): Promise<AppendToStreamResultWithGlobalPosition>;
|
|
15
|
+
close(): Promise<void>;
|
|
16
|
+
schema: {
|
|
17
|
+
sql(): string;
|
|
18
|
+
print(): void;
|
|
19
|
+
migrate(): Promise<void>;
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
export type KyselyEventStoreOptions = {
|
|
23
|
+
/** Database connection options */
|
|
24
|
+
connectionOptions?: {
|
|
25
|
+
/** Custom database executor (Kysely instance) */
|
|
26
|
+
db?: Kysely<unknown>;
|
|
27
|
+
};
|
|
28
|
+
/** Schema management options */
|
|
29
|
+
schema?: {
|
|
30
|
+
/** Auto-migration strategy */
|
|
31
|
+
autoMigration?: "CreateOrUpdate" | "None";
|
|
32
|
+
};
|
|
33
|
+
/** Hooks for lifecycle events */
|
|
34
|
+
hooks?: {
|
|
35
|
+
/** Called after schema is created */
|
|
36
|
+
onAfterSchemaCreated?: () => Promise<void> | void;
|
|
37
|
+
};
|
|
38
|
+
};
|
|
39
|
+
export declare const defaultKyselyOptions: KyselyEventStoreOptions;
|
|
40
|
+
export declare const getKyselyEventStore: (deps: Dependencies) => KyselyEventStore;
|
|
41
|
+
export {};
|
|
42
|
+
//# sourceMappingURL=kysely-event-store.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"kysely-event-store.d.ts","sourceRoot":"","sources":["../../src/event-store/kysely-event-store.ts"],"names":[],"mappings":"AACA,OAAO,EAIL,KAAK,qBAAqB,EAC1B,KAAK,sCAAsC,EAC3C,KAAK,KAAK,EACV,KAAK,UAAU,EAEf,KAAK,wBAAwB,EAE7B,KAAK,mCAAmC,EACxC,KAAK,iBAAiB,EACtB,KAAK,gBAAgB,EACtB,MAAM,yBAAyB,CAAC;AACjC,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,QAAQ,CAAC;AACrC,OAAO,EAGL,KAAK,YAAY,EACjB,KAAK,eAAe,EACrB,MAAM,aAAa,CAAC;AAErB,KAAK,uBAAuB,GAAG,mCAAmC,CAAC;AACnE,KAAK,6BAA6B,GAAG,qBAAqB,GAAG,eAAe,CAAC;AAI7E,MAAM,MAAM,2BAA2B,GAAG;IACxC,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,EAAE,CAAC,EAAE,MAAM,CAAC;IACZ,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,MAAM,WAAW,gBACf,SAAQ,UAAU,CAAC,uBAAuB,CAAC,EACzC,wBAAwB,CAAC,gBAAgB,CAAC;IAE5C,UAAU,CAAC,SAAS,SAAS,KAAK,EAChC,UAAU,EAAE,MAAM,EAClB,OAAO,CAAC,EAAE,iBAAiB,CAAC,MAAM,CAAC,GAAG,2BAA2B,GAChE,OAAO,CAAC,gBAAgB,CAAC,SAAS,EAAE,uBAAuB,CAAC,CAAC,CAAC;IACjE,cAAc,CAAC,SAAS,SAAS,KAAK,EACpC,UAAU,EAAE,MAAM,EAClB,MAAM,EAAE,SAAS,EAAE,EACnB,OAAO,CAAC,EAAE,6BAA6B,GACtC,OAAO,CAAC,sCAAsC,CAAC,CAAC;IACnD,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IACvB,MAAM,EAAE;QACN,GAAG,IAAI,MAAM,CAAC;QACd,KAAK,IAAI,IAAI,CAAC;QACd,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;KAC1B,CAAC;CACH;AAED,MAAM,MAAM,uBAAuB,GAAG;IACpC,kCAAkC;IAClC,iBAAiB,CAAC,EAAE;QAClB,iDAAiD;QACjD,EAAE,CAAC,EAAE,MAAM,CAAC,OAAO,CAAC,CAAC;KACtB,CAAC;IACF,gCAAgC;IAChC,MAAM,CAAC,EAAE;QACP,8BAA8B;QAC9B,aAAa,CAAC,EAAE,gBAAgB,GAAG,MAAM,CAAC;KAC3C,CAAC;IACF,iCAAiC;IACjC,KAAK,CAAC,EAAE;QACN,qCAAqC;QACrC,oBAAoB,CAAC,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;KACnD,CAAC;CACH,CAAC;AAEF,eAAO,MAAM,oBAAoB,EAAE,uBAIlC,CAAC;AAEF,eAAO,MAAM,mBAAmB,GAAI,MAAM,YAAY,KAAG,gBAgLxD,CAAC"}
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
// biome-ignore assist/source/organizeImports: retain import order similar to app code
|
|
2
|
+
import { assertExpectedVersionMatchesCurrent, } from "@event-driven-io/emmett";
|
|
3
|
+
import { DEFAULT_PARTITION, PostgreSQLEventStoreDefaultStreamVersion, } from "../types.js";
|
|
4
|
+
export const defaultKyselyOptions = {
|
|
5
|
+
schema: {
|
|
6
|
+
autoMigration: "CreateOrUpdate",
|
|
7
|
+
},
|
|
8
|
+
};
|
|
9
|
+
export const getKyselyEventStore = (deps) => {
|
|
10
|
+
const { db, logger, inTransaction = false } = deps;
|
|
11
|
+
const eventStore = {
|
|
12
|
+
/**
|
|
13
|
+
* @description We do not use schema management in this package.
|
|
14
|
+
*/
|
|
15
|
+
schema: {
|
|
16
|
+
sql: () => "",
|
|
17
|
+
print: () => console.log(""),
|
|
18
|
+
migrate: async () => Promise.resolve(),
|
|
19
|
+
},
|
|
20
|
+
/**
|
|
21
|
+
* Provide a session-bound event store using a Kysely transaction.
|
|
22
|
+
* All operations within the callback will share the same DB transaction.
|
|
23
|
+
*/
|
|
24
|
+
async withSession(callback) {
|
|
25
|
+
return await db.transaction().execute(async (trx) => {
|
|
26
|
+
const sessionEventStore = getKyselyEventStore({
|
|
27
|
+
db: trx,
|
|
28
|
+
logger,
|
|
29
|
+
inTransaction: true,
|
|
30
|
+
});
|
|
31
|
+
return await callback({
|
|
32
|
+
eventStore: sessionEventStore,
|
|
33
|
+
close: () => Promise.resolve(),
|
|
34
|
+
});
|
|
35
|
+
});
|
|
36
|
+
},
|
|
37
|
+
async aggregateStream(streamName, options) {
|
|
38
|
+
const { evolve, initialState, read } = options;
|
|
39
|
+
logger.debug?.({ streamName, options }, "aggregateStream");
|
|
40
|
+
const expectedStreamVersion = read?.expectedStreamVersion;
|
|
41
|
+
const result = await eventStore.readStream(streamName, read);
|
|
42
|
+
assertExpectedVersionMatchesCurrent(result.currentStreamVersion, expectedStreamVersion, PostgreSQLEventStoreDefaultStreamVersion);
|
|
43
|
+
const state = result.events.reduce((state, event) => (event ? evolve(state, event) : state), initialState());
|
|
44
|
+
return {
|
|
45
|
+
state,
|
|
46
|
+
currentStreamVersion: result.currentStreamVersion,
|
|
47
|
+
streamExists: result.streamExists,
|
|
48
|
+
};
|
|
49
|
+
},
|
|
50
|
+
async readStream(streamName, options) {
|
|
51
|
+
const partition = getPartition(options);
|
|
52
|
+
logger.debug?.({ streamName, options, partition }, "readStream");
|
|
53
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo(db, streamName, partition);
|
|
54
|
+
const range = parseRangeOptions(options);
|
|
55
|
+
const rows = await buildEventsQuery({ db, logger }, streamName, partition, range).execute();
|
|
56
|
+
const events = rows.map((row) => mapRowToEvent(row, streamName));
|
|
57
|
+
return {
|
|
58
|
+
events,
|
|
59
|
+
currentStreamVersion,
|
|
60
|
+
streamExists,
|
|
61
|
+
};
|
|
62
|
+
},
|
|
63
|
+
async appendToStream(streamName, events, options) {
|
|
64
|
+
const streamType = getStreamType(options);
|
|
65
|
+
const partition = getPartition(options);
|
|
66
|
+
const expected = options?.expectedStreamVersion;
|
|
67
|
+
logger.debug?.({ streamName, events, options, partition }, "appendToStream");
|
|
68
|
+
ensureEventsNotEmpty(events, expected);
|
|
69
|
+
// It may be called within a transaction via withSession.
|
|
70
|
+
const executeOn = async (executor) => {
|
|
71
|
+
const { currentStreamVersion, streamExists } = await fetchStreamInfo(executor, streamName, partition);
|
|
72
|
+
assertExpectedVersion(expected, currentStreamVersion, streamExists);
|
|
73
|
+
const basePos = currentStreamVersion;
|
|
74
|
+
const nextStreamPosition = computeNextStreamPosition(basePos, events.length);
|
|
75
|
+
await upsertStreamRow(executor, streamName, partition, streamType, basePos, nextStreamPosition, expected, streamExists);
|
|
76
|
+
const messagesToInsert = buildMessagesToInsert(events, basePos, streamName, partition);
|
|
77
|
+
const lastEventGlobalPosition = await insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert);
|
|
78
|
+
return {
|
|
79
|
+
nextExpectedStreamVersion: nextStreamPosition,
|
|
80
|
+
lastEventGlobalPosition,
|
|
81
|
+
createdNewStream: !streamExists,
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
if (inTransaction) {
|
|
85
|
+
return executeOn(db);
|
|
86
|
+
}
|
|
87
|
+
return db.transaction().execute(async (trx) => executeOn(trx));
|
|
88
|
+
},
|
|
89
|
+
close: async () => {
|
|
90
|
+
// Kysely doesn't require explicit closing for most cases
|
|
91
|
+
// but we can add cleanup logic here if needed
|
|
92
|
+
await Promise.resolve();
|
|
93
|
+
},
|
|
94
|
+
};
|
|
95
|
+
return eventStore;
|
|
96
|
+
};
|
|
97
|
+
// Helper functions (consolidated from the optimized implementation)
|
|
98
|
+
function getStreamType(options) {
|
|
99
|
+
return options?.streamType ?? "unknown";
|
|
100
|
+
}
|
|
101
|
+
function getPartition(options) {
|
|
102
|
+
return options?.partition ?? DEFAULT_PARTITION;
|
|
103
|
+
}
|
|
104
|
+
function ensureEventsNotEmpty(events, _expected) {
|
|
105
|
+
if (events.length === 0) {
|
|
106
|
+
throw new Error("Cannot append empty events array");
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
function assertExpectedVersion(expected, currentPos, streamExistsNow) {
|
|
110
|
+
if (expected === "STREAM_EXISTS" && !streamExistsNow) {
|
|
111
|
+
throw new Error("Stream does not exist but expected to exist");
|
|
112
|
+
}
|
|
113
|
+
if (expected === "STREAM_DOES_NOT_EXIST" && streamExistsNow) {
|
|
114
|
+
throw new Error("Stream exists but expected not to exist");
|
|
115
|
+
}
|
|
116
|
+
if (typeof expected === "bigint" && expected !== currentPos) {
|
|
117
|
+
throw new Error(`Expected version ${expected} but current is ${currentPos}`);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
function computeNextStreamPosition(basePos, eventCount) {
|
|
121
|
+
return basePos + BigInt(eventCount);
|
|
122
|
+
}
|
|
123
|
+
async function upsertStreamRow(executor, streamId, partition, streamType, basePos, nextStreamPosition, expected, streamExistsNow) {
|
|
124
|
+
if (!streamExistsNow) {
|
|
125
|
+
await executor
|
|
126
|
+
.insertInto("streams")
|
|
127
|
+
.values({
|
|
128
|
+
stream_id: streamId,
|
|
129
|
+
stream_position: nextStreamPosition,
|
|
130
|
+
partition,
|
|
131
|
+
stream_type: streamType,
|
|
132
|
+
stream_metadata: {},
|
|
133
|
+
is_archived: false,
|
|
134
|
+
})
|
|
135
|
+
.execute();
|
|
136
|
+
return;
|
|
137
|
+
}
|
|
138
|
+
if (typeof expected === "bigint") {
|
|
139
|
+
const updatedRow = await executor
|
|
140
|
+
.updateTable("streams")
|
|
141
|
+
.set({ stream_position: nextStreamPosition })
|
|
142
|
+
.where("stream_id", "=", streamId)
|
|
143
|
+
.where("partition", "=", partition)
|
|
144
|
+
.where("is_archived", "=", false)
|
|
145
|
+
.where("stream_position", "=", basePos)
|
|
146
|
+
.returning("stream_position")
|
|
147
|
+
.executeTakeFirst();
|
|
148
|
+
if (!updatedRow) {
|
|
149
|
+
throw new Error(`Expected version ${expected} but current is ${basePos}`);
|
|
150
|
+
}
|
|
151
|
+
return;
|
|
152
|
+
}
|
|
153
|
+
await executor
|
|
154
|
+
.updateTable("streams")
|
|
155
|
+
.set({ stream_position: nextStreamPosition })
|
|
156
|
+
.where("stream_id", "=", streamId)
|
|
157
|
+
.where("partition", "=", partition)
|
|
158
|
+
.where("is_archived", "=", false)
|
|
159
|
+
.execute();
|
|
160
|
+
}
|
|
161
|
+
function buildMessagesToInsert(events, basePos, streamId, partition) {
|
|
162
|
+
return events.map((e, index) => {
|
|
163
|
+
const messageId = crypto.randomUUID();
|
|
164
|
+
const streamPosition = basePos + BigInt(index + 1);
|
|
165
|
+
const rawMeta = "metadata" in e ? e.metadata : undefined;
|
|
166
|
+
const eventMeta = rawMeta && typeof rawMeta === "object" ? rawMeta : {};
|
|
167
|
+
const messageMetadata = {
|
|
168
|
+
...eventMeta,
|
|
169
|
+
};
|
|
170
|
+
return {
|
|
171
|
+
stream_id: streamId,
|
|
172
|
+
stream_position: streamPosition,
|
|
173
|
+
partition,
|
|
174
|
+
message_data: e.data,
|
|
175
|
+
message_metadata: messageMetadata,
|
|
176
|
+
message_schema_version: index.toString(),
|
|
177
|
+
message_type: e.type,
|
|
178
|
+
message_kind: "E",
|
|
179
|
+
message_id: messageId,
|
|
180
|
+
is_archived: false,
|
|
181
|
+
created: new Date(),
|
|
182
|
+
};
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
async function insertMessagesAndGetLastGlobalPosition(executor, messagesToInsert) {
|
|
186
|
+
const inserted = await executor
|
|
187
|
+
.insertInto("messages")
|
|
188
|
+
.values(messagesToInsert)
|
|
189
|
+
.returning("global_position")
|
|
190
|
+
.execute();
|
|
191
|
+
if (!inserted || (Array.isArray(inserted) && inserted.length === 0)) {
|
|
192
|
+
return 0n;
|
|
193
|
+
}
|
|
194
|
+
const globalPositions = inserted.map((r) => BigInt(String(r.global_position)));
|
|
195
|
+
return globalPositions[globalPositions.length - 1];
|
|
196
|
+
}
|
|
197
|
+
function parseRangeOptions(options) {
|
|
198
|
+
const from = options && typeof options === "object" && "from" in options
|
|
199
|
+
? options.from
|
|
200
|
+
: undefined;
|
|
201
|
+
const to = options && typeof options === "object" && "to" in options
|
|
202
|
+
? options.to
|
|
203
|
+
: undefined;
|
|
204
|
+
const maxCount = options && typeof options === "object" && "maxCount" in options
|
|
205
|
+
? options.maxCount
|
|
206
|
+
: undefined;
|
|
207
|
+
return { from, to, maxCount };
|
|
208
|
+
}
|
|
209
|
+
function buildEventsQuery(deps, streamId, partition, range) {
|
|
210
|
+
const { db } = deps;
|
|
211
|
+
let q = db
|
|
212
|
+
.selectFrom("messages")
|
|
213
|
+
.select([
|
|
214
|
+
"message_type",
|
|
215
|
+
"message_data",
|
|
216
|
+
"message_metadata",
|
|
217
|
+
"stream_position",
|
|
218
|
+
"global_position",
|
|
219
|
+
"message_id",
|
|
220
|
+
])
|
|
221
|
+
.where("stream_id", "=", streamId)
|
|
222
|
+
.where("partition", "=", partition)
|
|
223
|
+
.where("is_archived", "=", false)
|
|
224
|
+
.orderBy("stream_position");
|
|
225
|
+
if (range.from !== undefined) {
|
|
226
|
+
q = q.where("stream_position", ">=", range.from);
|
|
227
|
+
}
|
|
228
|
+
if (range.to !== undefined) {
|
|
229
|
+
q = q.where("stream_position", "<=", range.to);
|
|
230
|
+
}
|
|
231
|
+
if (range.maxCount !== undefined) {
|
|
232
|
+
q = q.limit(Number(range.maxCount));
|
|
233
|
+
}
|
|
234
|
+
return q;
|
|
235
|
+
}
|
|
236
|
+
function mapRowToEvent(row, streamId) {
|
|
237
|
+
const streamPosition = BigInt(String(row.stream_position));
|
|
238
|
+
const globalPosition = BigInt(String(row.global_position ?? 0));
|
|
239
|
+
const baseMetadata = (row.message_metadata ?? {});
|
|
240
|
+
return {
|
|
241
|
+
kind: "Event",
|
|
242
|
+
type: row.message_type,
|
|
243
|
+
data: row.message_data,
|
|
244
|
+
metadata: {
|
|
245
|
+
...baseMetadata,
|
|
246
|
+
messageId: row.message_id,
|
|
247
|
+
streamId: streamId,
|
|
248
|
+
streamPosition: streamPosition,
|
|
249
|
+
globalPosition: globalPosition,
|
|
250
|
+
},
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
async function fetchStreamInfo(executor, streamId, partition) {
|
|
254
|
+
const streamRow = await executor
|
|
255
|
+
.selectFrom("streams")
|
|
256
|
+
.select(["stream_position"])
|
|
257
|
+
.where("stream_id", "=", streamId)
|
|
258
|
+
.where("partition", "=", partition)
|
|
259
|
+
.where("is_archived", "=", false)
|
|
260
|
+
.executeTakeFirst();
|
|
261
|
+
const currentStreamVersion = streamRow
|
|
262
|
+
? BigInt(String(streamRow.stream_position))
|
|
263
|
+
: PostgreSQLEventStoreDefaultStreamVersion;
|
|
264
|
+
return { currentStreamVersion, streamExists: !!streamRow };
|
|
265
|
+
}
|