@kernl-sdk/pg 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,64 @@
1
+ import type { Pool, PoolClient } from "pg";
2
+ import { type ThreadRecord } from "@kernl-sdk/storage";
3
+ import { Thread, type ThreadEvent } from "kernl/internal";
4
+ import { type AgentRegistry, type ModelRegistry, type ThreadStore, type NewThread, type ThreadUpdate, type ThreadInclude, type ThreadListOptions, type ThreadHistoryOptions } from "kernl";
5
+ /**
6
+ * PostgreSQL Thread store implementation.
7
+ */
8
+ export declare class PGThreadStore implements ThreadStore {
9
+ private db;
10
+ private registries;
11
+ constructor(db: Pool | PoolClient);
12
+ /**
13
+ * Bind runtime registries for hydrating Thread instances.
14
+ *
15
+ * (TODO): move into abstract ThreadStore class
16
+ */
17
+ bind(registries: {
18
+ agents: AgentRegistry;
19
+ models: ModelRegistry;
20
+ }): void;
21
+ /**
22
+ * Get a thread by id.
23
+ */
24
+ get(tid: string, include?: ThreadInclude): Promise<Thread | null>;
25
+ /**
26
+ * List threads matching the filter.
27
+ */
28
+ list(options?: ThreadListOptions): Promise<Thread[]>;
29
+ /**
30
+ * Insert a new thread into the store.
31
+ */
32
+ insert(thread: NewThread): Promise<Thread>;
33
+ /**
34
+ * Update thread runtime state.
35
+ */
36
+ update(tid: string, patch: ThreadUpdate): Promise<Thread>;
37
+ /**
38
+ * Delete a thread and cascade to thread_events.
39
+ */
40
+ delete(tid: string): Promise<void>;
41
+ /**
42
+ * Get the event history for a thread.
43
+ */
44
+ history(tid: string, opts?: ThreadHistoryOptions): Promise<ThreadEvent[]>;
45
+ /**
46
+ * Append events to the thread history.
47
+ *
48
+ * Semantics:
49
+ * - Guaranteed per-thread ordering via monotonically increasing `seq`
50
+ * - Idempotent on `(tid, event.id)`: duplicate ids MUST NOT create duplicate rows
51
+ * - Events maintain insertion order
52
+ *
53
+ * NOTE: Thread class manages monotonic seq and timestamp assignment, is the only entrypoint.
54
+ */
55
+ append(events: ThreadEvent[]): Promise<void>;
56
+ /**
57
+ * Hydrate a Thread instance from a database record.
58
+ */
59
+ hydrate(thread: {
60
+ record: ThreadRecord;
61
+ events?: ThreadEvent[];
62
+ }): Thread;
63
+ }
64
+ //# sourceMappingURL=store.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"store.d.ts","sourceRoot":"","sources":["../../src/thread/store.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,IAAI,EAAE,UAAU,EAAE,MAAM,IAAI,CAAC;AAE3C,OAAO,EAIL,KAAK,YAAY,EAElB,MAAM,oBAAoB,CAAC;AAC5B,OAAO,EAAE,MAAM,EAAE,KAAK,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAC1D,OAAO,EAEL,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,WAAW,EAChB,KAAK,SAAS,EACd,KAAK,YAAY,EACjB,KAAK,aAAa,EAClB,KAAK,iBAAiB,EACtB,KAAK,oBAAoB,EAC1B,MAAM,OAAO,CAAC;AAEf;;GAEG;AACH,qBAAa,aAAc,YAAW,WAAW;IAC/C,OAAO,CAAC,EAAE,CAAoB;IAC9B,OAAO,CAAC,UAAU,CAA0D;gBAEhE,EAAE,EAAE,IAAI,GAAG,UAAU;IAKjC;;;;OAIG;IACH,IAAI,CAAC,UAAU,EAAE;QAAE,MAAM,EAAE,aAAa,CAAC;QAAC,MAAM,EAAE,aAAa,CAAA;KAAE,GAAG,IAAI;IAIxE;;OAEG;IACG,GAAG,CAAC,GAAG,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,aAAa,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAqGvE;;OAEG;IACG,IAAI,CAAC,OAAO,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC;IAiG1D;;OAEG;IACG,MAAM,CAAC,MAAM,EAAE,SAAS,GAAG,OAAO,CAAC,MAAM,CAAC;IA0BhD;;OAEG;IACG,MAAM,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,YAAY,GAAG,OAAO,CAAC,MAAM,CAAC;IA6C/D;;OAEG;IACG,MAAM,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAMxC;;OAEG;IACG,OAAO,CACX,GAAG,EAAE,MAAM,EACX,IAAI,CAAC,EAAE,oBAAoB,GAC1B,OAAO,CAAC,WAAW,EAAE,CAAC;IAsCzB;;;;;;;;;OASG;IACG,MAAM,CAAC,MAAM,EAAE,WAAW,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;IAkClD;;OAEG;IACH,OAAO,CAAC,MAAM,EAAE;QAAE,MAAM,EAAE,YAAY,CAAC;QAAC,MAAM,CAAC,EAAE,WAAW,EAAE,CAAA;KAAE,GAAG,MAAM;CAkC1E"}
@@ -0,0 +1,346 @@
1
+ import assert from "assert";
2
+ import { SCHEMA_NAME, NewThreadCodec, ThreadEventRecordCodec, } from "@kernl-sdk/storage";
3
+ import { Thread } from "kernl/internal";
4
+ import { Context, } from "kernl";
5
+ /**
6
+ * PostgreSQL Thread store implementation.
7
+ */
8
+ export class PGThreadStore {
9
+ db;
10
+ registries;
11
+ constructor(db) {
12
+ this.db = db;
13
+ this.registries = null;
14
+ }
15
+ /**
16
+ * Bind runtime registries for hydrating Thread instances.
17
+ *
18
+ * (TODO): move into abstract ThreadStore class
19
+ */
20
+ bind(registries) {
21
+ this.registries = registries;
22
+ }
23
+ /**
24
+ * Get a thread by id.
25
+ */
26
+ async get(tid, include) {
27
+ // JOIN with thread_events if include.history
28
+ if (include?.history) {
29
+ const opts = typeof include.history === "object" ? include.history : undefined;
30
+ const params = [tid];
31
+ let index = 2;
32
+ let eventFilter = "";
33
+ if (opts?.after !== undefined) {
34
+ eventFilter += ` AND e.seq > $${index++}`;
35
+ params.push(opts.after);
36
+ }
37
+ if (opts?.kinds && opts.kinds.length > 0) {
38
+ eventFilter += ` AND e.kind = ANY($${index++})`;
39
+ params.push(opts.kinds);
40
+ }
41
+ const order = opts?.order ?? "asc";
42
+ const limit = opts?.limit ? ` LIMIT ${opts.limit}` : "";
43
+ const query = `
44
+ SELECT
45
+ t.*,
46
+ e.id as event_id,
47
+ e.tid as event_tid,
48
+ e.seq,
49
+ e.kind as event_kind,
50
+ e.timestamp,
51
+ e.data,
52
+ e.metadata as event_metadata
53
+ FROM ${SCHEMA_NAME}.threads t
54
+ LEFT JOIN ${SCHEMA_NAME}.thread_events e ON t.id = e.tid${eventFilter}
55
+ WHERE t.id = $1
56
+ ORDER BY e.seq ${order.toUpperCase()}
57
+ ${limit}
58
+ `;
59
+ const result = await this.db.query(query, params);
60
+ if (result.rows.length === 0) {
61
+ return null;
62
+ }
63
+ // first row has thread data (all rows have same thread data)
64
+ const first = result.rows[0];
65
+ const record = {
66
+ id: first.id,
67
+ namespace: first.namespace,
68
+ agent_id: first.agent_id,
69
+ model: first.model,
70
+ context: first.context,
71
+ tick: first.tick,
72
+ state: first.state,
73
+ parent_task_id: first.parent_task_id,
74
+ metadata: first.metadata,
75
+ created_at: first.created_at,
76
+ updated_at: first.updated_at,
77
+ };
78
+ // collect events from all rows (skip rows where event_id is null)
79
+ const events = result.rows
80
+ .filter((row) => row.event_id !== null)
81
+ .map((row) => ThreadEventRecordCodec.decode({
82
+ id: row.event_id,
83
+ tid: row.event_tid,
84
+ seq: row.seq,
85
+ kind: row.event_kind,
86
+ timestamp: Number(row.timestamp), // pg returns BIGINT as string by default, normalize to number
87
+ data: row.data,
88
+ metadata: row.event_metadata,
89
+ }));
90
+ try {
91
+ return this.hydrate({ record, events });
92
+ }
93
+ catch (error) {
94
+ return null;
95
+ }
96
+ }
97
+ // simple query without events
98
+ const result = await this.db.query(`SELECT * FROM ${SCHEMA_NAME}.threads WHERE id = $1`, [tid]);
99
+ if (result.rows.length === 0) {
100
+ return null;
101
+ }
102
+ try {
103
+ return this.hydrate({ record: result.rows[0] });
104
+ }
105
+ catch (error) {
106
+ return null;
107
+ }
108
+ }
109
+ /**
110
+ * List threads matching the filter.
111
+ */
112
+ async list(options) {
113
+ let query = `SELECT * FROM ${SCHEMA_NAME}.threads`;
114
+ const values = [];
115
+ let paramIndex = 1;
116
+ // build WHERE clause
117
+ const conditions = [];
118
+ if (options?.filter) {
119
+ const { state, agentId, parentTaskId, createdAfter, createdBefore, namespace, } = options.filter;
120
+ if (namespace) {
121
+ conditions.push(`namespace = $${paramIndex++}`);
122
+ values.push(namespace);
123
+ }
124
+ if (state) {
125
+ if (Array.isArray(state)) {
126
+ conditions.push(`state = ANY($${paramIndex++})`);
127
+ values.push(state);
128
+ }
129
+ else {
130
+ conditions.push(`state = $${paramIndex++}`);
131
+ values.push(state);
132
+ }
133
+ }
134
+ if (agentId) {
135
+ conditions.push(`agent_id = $${paramIndex++}`);
136
+ values.push(agentId);
137
+ }
138
+ if (parentTaskId) {
139
+ conditions.push(`parent_task_id = $${paramIndex++}`);
140
+ values.push(parentTaskId);
141
+ }
142
+ if (createdAfter) {
143
+ conditions.push(`created_at > $${paramIndex++}`);
144
+ values.push(createdAfter.getTime());
145
+ }
146
+ if (createdBefore) {
147
+ conditions.push(`created_at < $${paramIndex++}`);
148
+ values.push(createdBefore.getTime());
149
+ }
150
+ }
151
+ if (conditions.length > 0) {
152
+ query += ` WHERE ${conditions.join(" AND ")}`;
153
+ }
154
+ // build ORDER BY clause
155
+ const orderClauses = [];
156
+ if (options?.order?.createdAt) {
157
+ orderClauses.push(`created_at ${options.order.createdAt.toUpperCase()}`);
158
+ }
159
+ if (options?.order?.updatedAt) {
160
+ orderClauses.push(`updated_at ${options.order.updatedAt.toUpperCase()}`);
161
+ }
162
+ if (orderClauses.length > 0) {
163
+ query += ` ORDER BY ${orderClauses.join(", ")}`;
164
+ }
165
+ else {
166
+ // default: most recent first
167
+ query += ` ORDER BY created_at DESC`;
168
+ }
169
+ if (options?.limit) {
170
+ query += ` LIMIT $${paramIndex++}`;
171
+ values.push(options.limit);
172
+ }
173
+ if (options?.offset) {
174
+ query += ` OFFSET $${paramIndex++}`;
175
+ values.push(options.offset);
176
+ }
177
+ const result = await this.db.query(query, values);
178
+ return result.rows
179
+ .map((record) => {
180
+ try {
181
+ return this.hydrate({ record });
182
+ }
183
+ catch (error) {
184
+ // Skip threads with non-existent agent/model (graceful degradation)
185
+ //
186
+ // (TODO): what do we want to do with this?
187
+ return null;
188
+ }
189
+ })
190
+ .filter((thread) => thread !== null);
191
+ }
192
+ /**
193
+ * Insert a new thread into the store.
194
+ */
195
+ async insert(thread) {
196
+ const record = NewThreadCodec.encode(thread);
197
+ const result = await this.db.query(`INSERT INTO ${SCHEMA_NAME}.threads
198
+ (id, namespace, agent_id, model, context, tick, state, parent_task_id, metadata, created_at, updated_at)
199
+ VALUES ($1, $2, $3, $4, $5::jsonb, $6, $7, $8, $9::jsonb, $10, $11)
200
+ RETURNING *`, [
201
+ record.id,
202
+ record.namespace,
203
+ record.agent_id,
204
+ record.model,
205
+ record.context,
206
+ record.tick,
207
+ record.state,
208
+ record.parent_task_id,
209
+ record.metadata,
210
+ record.created_at,
211
+ record.updated_at,
212
+ ]);
213
+ return this.hydrate({ record: result.rows[0] });
214
+ }
215
+ /**
216
+ * Update thread runtime state.
217
+ */
218
+ async update(tid, patch) {
219
+ const updates = [];
220
+ const values = [];
221
+ let paramIndex = 1;
222
+ if (patch.tick !== undefined) {
223
+ updates.push(`tick = $${paramIndex++}`);
224
+ values.push(patch.tick);
225
+ }
226
+ if (patch.state !== undefined) {
227
+ updates.push(`state = $${paramIndex++}`);
228
+ values.push(patch.state);
229
+ }
230
+ if (patch.context !== undefined) {
231
+ updates.push(`context = $${paramIndex++}`);
232
+ // NOTE: Store the raw context value, not the Context wrapper.
233
+ //
234
+ // THis may change in the future depending on Context implementation.
235
+ values.push(JSON.stringify(patch.context.context));
236
+ }
237
+ if (patch.metadata !== undefined) {
238
+ updates.push(`metadata = $${paramIndex++}`);
239
+ values.push(patch.metadata ? JSON.stringify(patch.metadata) : null);
240
+ }
241
+ // always update `updated_at`
242
+ updates.push(`updated_at = $${paramIndex++}`);
243
+ values.push(Date.now());
244
+ values.push(tid); // WHERE id = $N
245
+ const result = await this.db.query(`UPDATE ${SCHEMA_NAME}.threads
246
+ SET ${updates.join(", ")}
247
+ WHERE id = $${paramIndex}
248
+ RETURNING *`, values);
249
+ return this.hydrate({ record: result.rows[0] });
250
+ }
251
+ /**
252
+ * Delete a thread and cascade to thread_events.
253
+ */
254
+ async delete(tid) {
255
+ await this.db.query(`DELETE FROM ${SCHEMA_NAME}.threads WHERE id = $1`, [
256
+ tid,
257
+ ]);
258
+ }
259
+ /**
260
+ * Get the event history for a thread.
261
+ */
262
+ async history(tid, opts) {
263
+ let query = `SELECT * FROM ${SCHEMA_NAME}.thread_events WHERE tid = $1`;
264
+ const values = [tid];
265
+ let paramIndex = 2;
266
+ // - filter:seq -
267
+ if (opts?.after !== undefined) {
268
+ query += ` AND seq > $${paramIndex++}`;
269
+ values.push(opts.after);
270
+ }
271
+ // - filter:kind -
272
+ if (opts?.kinds && opts.kinds.length > 0) {
273
+ query += ` AND kind = ANY($${paramIndex++})`;
274
+ values.push(opts.kinds);
275
+ }
276
+ // - order -
277
+ const order = opts?.order ?? "asc";
278
+ query += ` ORDER BY seq ${order.toUpperCase()}`;
279
+ // - limit -
280
+ if (opts?.limit !== undefined) {
281
+ query += ` LIMIT $${paramIndex++}`;
282
+ values.push(opts.limit);
283
+ }
284
+ const result = await this.db.query(query, values);
285
+ return result.rows.map((record) => ThreadEventRecordCodec.decode({
286
+ ...record,
287
+ // Normalize BIGINT (string) to number for zod schema
288
+ timestamp: Number(record.timestamp),
289
+ }));
290
+ }
291
+ /**
292
+ * Append events to the thread history.
293
+ *
294
+ * Semantics:
295
+ * - Guaranteed per-thread ordering via monotonically increasing `seq`
296
+ * - Idempotent on `(tid, event.id)`: duplicate ids MUST NOT create duplicate rows
297
+ * - Events maintain insertion order
298
+ *
299
+ * NOTE: Thread class manages monotonic seq and timestamp assignment, is the only entrypoint.
300
+ */
301
+ async append(events) {
302
+ if (events.length === 0)
303
+ return;
304
+ const records = events.map((e) => ThreadEventRecordCodec.encode(e));
305
+ const values = [];
306
+ const placeholders = [];
307
+ let index = 1;
308
+ for (const record of records) {
309
+ placeholders.push(`($${index++}, $${index++}, $${index++}, $${index++}, $${index++}, $${index++}::jsonb, $${index++}::jsonb)`);
310
+ values.push(record.id, record.tid, record.seq, record.kind, record.timestamp, record.data, record.metadata);
311
+ }
312
+ // insert with ON CONFLICT DO NOTHING for idempotency
313
+ await this.db.query(`INSERT INTO ${SCHEMA_NAME}.thread_events
314
+ (id, tid, seq, kind, timestamp, data, metadata)
315
+ VALUES ${placeholders.join(", ")}
316
+ ON CONFLICT (tid, id) DO NOTHING`, values);
317
+ }
318
+ /**
319
+ * Hydrate a Thread instance from a database record.
320
+ */
321
+ hydrate(thread) {
322
+ assert(this.registries, "registries should be bound to storage in Kernl constructor");
323
+ const { record, events = [] } = thread;
324
+ const agent = this.registries.agents.get(record.agent_id);
325
+ const model = this.registries.models.get(record.model);
326
+ if (!agent || !model) {
327
+ throw new Error(`Thread ${record.id} references non-existent agent/model (agent: ${record.agent_id}, model: ${record.model})`);
328
+ }
329
+ return new Thread({
330
+ agent,
331
+ history: events,
332
+ context: new Context(record.namespace, record.context),
333
+ model,
334
+ task: null, // TODO: load from TaskStore when it exists
335
+ tid: record.id,
336
+ namespace: record.namespace,
337
+ tick: record.tick,
338
+ state: record.state,
339
+ metadata: record.metadata,
340
+ createdAt: new Date(record.created_at),
341
+ updatedAt: new Date(record.updated_at),
342
+ storage: this, // pass storage reference so resumed thread can persist
343
+ persisted: true,
344
+ });
345
+ }
346
+ }
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@kernl-sdk/pg",
3
+ "version": "0.1.9",
4
+ "description": "PostgreSQL storage adapter for kernl",
5
+ "keywords": [
6
+ "kernl",
7
+ "storage",
8
+ "postgresql",
9
+ "postgres"
10
+ ],
11
+ "author": "dremnik",
12
+ "license": "MIT",
13
+ "repository": {
14
+ "type": "git",
15
+ "url": "https://github.com/kernl-sdk/kernl.git",
16
+ "directory": "packages/storage/pg"
17
+ },
18
+ "homepage": "https://github.com/kernl-sdk/kernl#readme",
19
+ "bugs": {
20
+ "url": "https://github.com/kernl-sdk/kernl/issues"
21
+ },
22
+ "type": "module",
23
+ "publishConfig": {
24
+ "access": "public"
25
+ },
26
+ "exports": {
27
+ ".": {
28
+ "types": "./dist/index.d.ts",
29
+ "import": "./dist/index.js"
30
+ }
31
+ },
32
+ "devDependencies": {
33
+ "@types/node": "^24.10.0",
34
+ "@types/pg": "^8.15.6",
35
+ "tsc-alias": "^1.8.10",
36
+ "typescript": "5.9.2",
37
+ "vitest": "^4.0.8",
38
+ "@kernl-sdk/protocol": "^0.2.4"
39
+ },
40
+ "dependencies": {
41
+ "pg": "^8.16.3",
42
+ "kernl": "^0.6.0",
43
+ "@kernl-sdk/shared": "^0.1.5",
44
+ "@kernl-sdk/storage": "0.1.9"
45
+ },
46
+ "scripts": {
47
+ "build": "tsc && tsc-alias",
48
+ "dev": "tsc --watch",
49
+ "check-types": "tsc --noEmit",
50
+ "test": "vitest",
51
+ "test:watch": "vitest --watch",
52
+ "test:run": "vitest run"
53
+ }
54
+ }