@kernl-sdk/pg 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,208 @@
1
+ import { describe, it, expect, beforeAll, afterAll, beforeEach } from "vitest";
2
+ import { Pool } from "pg";
3
+ import { Agent, Kernl, tool, FunctionToolkit, } from "kernl";
4
+ import { STOPPED, message, IN_PROGRESS } from "@kernl-sdk/protocol";
5
+ import { postgres } from "../postgres";
6
+ // Use helper from test fixtures (handles streaming properly)
7
+ function createMockModel(generateFn) {
8
+ return {
9
+ spec: "1.0",
10
+ provider: "test",
11
+ modelId: "test-model",
12
+ generate: generateFn,
13
+ stream: async function* (req) {
14
+ const response = await generateFn(req);
15
+ for (const item of response.content) {
16
+ yield item;
17
+ }
18
+ yield {
19
+ kind: "finish",
20
+ finishReason: response.finishReason,
21
+ usage: response.usage,
22
+ };
23
+ },
24
+ };
25
+ }
26
+ const TEST_DB_URL = process.env.KERNL_PG_TEST_URL;
27
+ describe.sequential("PG Thread Lifecycle", () => {
28
+ if (!TEST_DB_URL) {
29
+ it.skip("requires KERNL_PG_TEST_URL environment variable", () => { });
30
+ return;
31
+ }
32
+ let pool;
33
+ let storage;
34
+ let kernl;
35
+ beforeAll(async () => {
36
+ pool = new Pool({ connectionString: TEST_DB_URL });
37
+ storage = postgres({ pool });
38
+ await pool.query('DROP SCHEMA IF EXISTS "kernl" CASCADE');
39
+ await storage.init();
40
+ kernl = new Kernl({ storage: { db: storage } });
41
+ });
42
+ afterAll(async () => {
43
+ await storage.close();
44
+ });
45
+ beforeEach(async () => {
46
+ // Clean threads between tests
47
+ await pool.query('TRUNCATE "kernl"."threads", "kernl"."thread_events" CASCADE');
48
+ });
49
+ describe("Simple thread (no tools)", () => {
50
+ it("should persist thread record and events correctly", async () => {
51
+ const model = createMockModel(async () => ({
52
+ content: [message({ role: "assistant", text: "Hello, world!" })],
53
+ finishReason: "stop",
54
+ usage: { inputTokens: 5, outputTokens: 3, totalTokens: 8 },
55
+ warnings: [],
56
+ }));
57
+ const agent = new Agent({
58
+ id: "simple-agent",
59
+ name: "Simple",
60
+ instructions: "Test",
61
+ model,
62
+ });
63
+ kernl.register(agent);
64
+ const result = await agent.run("Hello");
65
+ expect(result.response).toBe("Hello, world!");
66
+ // Verify thread record
67
+ const threadResult = await pool.query('SELECT * FROM "kernl"."threads" WHERE agent_id = $1', ["simple-agent"]);
68
+ expect(threadResult.rows).toHaveLength(1);
69
+ const thread = threadResult.rows[0];
70
+ expect(thread.agent_id).toBe("simple-agent");
71
+ expect(thread.state).toBe(STOPPED);
72
+ expect(thread.tick).toBe(1);
73
+ expect(thread.model).toMatch(/test/);
74
+ // Verify events
75
+ const eventsResult = await pool.query('SELECT * FROM "kernl"."thread_events" WHERE tid = $1 ORDER BY seq ASC', [thread.id]);
76
+ expect(eventsResult.rows.length).toBeGreaterThanOrEqual(2);
77
+ // User message
78
+ expect(eventsResult.rows[0].kind).toBe("message");
79
+ expect(eventsResult.rows[0].seq).toBe(0);
80
+ // Assistant message
81
+ expect(eventsResult.rows[1].kind).toBe("message");
82
+ expect(eventsResult.rows[1].seq).toBe(1);
83
+ // Verify monotonic seq
84
+ const seqs = eventsResult.rows.map((r) => r.seq);
85
+ expect(seqs).toEqual([...seqs].sort((a, b) => a - b));
86
+ expect(new Set(seqs).size).toBe(seqs.length);
87
+ });
88
+ });
89
+ describe("Multi-turn with tools", () => {
90
+ it("should persist all events across ticks with correct seq", async () => {
91
+ let callCount = 0;
92
+ const model = createMockModel(async () => {
93
+ callCount++;
94
+ if (callCount === 1) {
95
+ return {
96
+ content: [
97
+ message({ role: "assistant", text: "" }),
98
+ {
99
+ kind: "tool-call",
100
+ toolId: "add",
101
+ callId: "call_1",
102
+ state: IN_PROGRESS,
103
+ arguments: '{"a":2,"b":3}',
104
+ },
105
+ ],
106
+ finishReason: "stop",
107
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
108
+ warnings: [],
109
+ };
110
+ }
111
+ return {
112
+ content: [message({ role: "assistant", text: "The sum is 5" })],
113
+ finishReason: "stop",
114
+ usage: { inputTokens: 15, outputTokens: 5, totalTokens: 20 },
115
+ warnings: [],
116
+ };
117
+ });
118
+ const addTool = tool({
119
+ id: "add",
120
+ description: "Add numbers",
121
+ parameters: undefined, // No validation for test
122
+ execute: async (ctx, params) => {
123
+ const { a, b } = params;
124
+ return a + b;
125
+ },
126
+ });
127
+ const agent = new Agent({
128
+ id: "tool-agent",
129
+ name: "Tool Agent",
130
+ instructions: "Test",
131
+ model,
132
+ toolkits: [new FunctionToolkit({ id: "math", tools: [addTool] })],
133
+ });
134
+ kernl.register(agent);
135
+ await agent.run("Add 2 and 3");
136
+ // verify thread
137
+ const threadResult = await pool.query('SELECT * FROM "kernl"."threads" WHERE agent_id = $1', ["tool-agent"]);
138
+ expect(threadResult.rows).toHaveLength(1);
139
+ expect(threadResult.rows[0].tick).toBe(2); // 2 ticks
140
+ expect(threadResult.rows[0].state).toBe(STOPPED);
141
+ // verify events
142
+ const eventsResult = await pool.query('SELECT * FROM "kernl"."thread_events" WHERE tid = $1 ORDER BY seq ASC', [threadResult.rows[0].id]);
143
+ const events = eventsResult.rows;
144
+ // Should have: user msg, assistant msg (tick1), tool-call, tool-result, assistant msg (tick2)
145
+ expect(events.length).toBeGreaterThanOrEqual(5);
146
+ // Verify event kinds and order
147
+ expect(events[0].kind).toBe("message"); // user
148
+ expect(events[1].kind).toBe("message"); // assistant (tick 1)
149
+ expect(events[2].kind).toBe("tool-call");
150
+ expect(events[3].kind).toBe("tool-result");
151
+ expect(events[4].kind).toBe("message"); // assistant (tick 2)
152
+ // Verify tool result exists and is in correct format
153
+ const toolResult = events[3];
154
+ expect(toolResult.data.state).toBe("completed");
155
+ expect(toolResult.data.callId).toBe("call_1");
156
+ expect(toolResult.data.toolId).toBe("add");
157
+ // Verify seq is monotonic
158
+ const seqs = events.map((e) => e.seq);
159
+ expect(seqs).toEqual([0, 1, 2, 3, 4]);
160
+ });
161
+ });
162
+ describe("Resume from storage", () => {
163
+ it("should load thread from storage and append new events", async () => {
164
+ const model = createMockModel(async () => ({
165
+ content: [message({ role: "assistant", text: "Response" })],
166
+ finishReason: "stop",
167
+ usage: { inputTokens: 5, outputTokens: 3, totalTokens: 8 },
168
+ warnings: [],
169
+ }));
170
+ const agent = new Agent({
171
+ id: "resume-agent",
172
+ name: "Resume",
173
+ instructions: "Test",
174
+ model,
175
+ });
176
+ kernl.register(agent);
177
+ const threadId = "resume-test-123";
178
+ // First run
179
+ await agent.run("First message", { threadId });
180
+ const firstEventsResult = await pool.query('SELECT COUNT(*) as count FROM "kernl"."thread_events" WHERE tid = $1', [threadId]);
181
+ const firstEventCount = parseInt(firstEventsResult.rows[0].count);
182
+ expect(firstEventCount).toBeGreaterThanOrEqual(2);
183
+ // Second run: resume
184
+ await agent.run("Second message", { threadId });
185
+ const secondEventsResult = await pool.query('SELECT * FROM "kernl"."thread_events" WHERE tid = $1 ORDER BY seq ASC', [threadId]);
186
+ const secondEventCount = secondEventsResult.rows.length;
187
+ // Should have more events now
188
+ expect(secondEventCount).toBeGreaterThan(firstEventCount);
189
+ // Verify seq is still monotonic across both runs
190
+ const seqs = secondEventsResult.rows.map((r) => r.seq);
191
+ expect(seqs).toEqual([...seqs].sort((a, b) => a - b));
192
+ expect(new Set(seqs).size).toBe(seqs.length);
193
+ // Verify last events are from second run
194
+ const lastEvents = secondEventsResult.rows.slice(-2);
195
+ expect(lastEvents[0].kind).toBe("message"); // user: "Second message"
196
+ expect(lastEvents[1].kind).toBe("message"); // assistant response
197
+ });
198
+ });
199
+ describe("Error handling", () => {
200
+ it.skip("should rollback on persist failure", async () => {
201
+ // TODO: Implement once we have transaction-level control in storage
202
+ // This would require forcing a DB error mid-transaction
203
+ // Expected behavior:
204
+ // - If append() fails, no partial events should be written
205
+ // - Thread state should remain consistent
206
+ });
207
+ });
208
+ });
@@ -0,0 +1,7 @@
1
+ /**
2
+ * @kernl/pg - PostgreSQL storage adapter for Kernl
3
+ */
4
+ export { PGStorage, type PGStorageConfig } from "./storage";
5
+ export { postgres, type PostgresConfig } from "./postgres";
6
+ export { migrations, REQUIRED_SCHEMA_VERSION } from "./migrations";
7
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,EAAE,SAAS,EAAE,KAAK,eAAe,EAAE,MAAM,WAAW,CAAC;AAC5D,OAAO,EAAE,QAAQ,EAAE,KAAK,cAAc,EAAE,MAAM,YAAY,CAAC;AAC3D,OAAO,EAAE,UAAU,EAAE,uBAAuB,EAAE,MAAM,cAAc,CAAC"}
package/dist/index.js ADDED
@@ -0,0 +1,6 @@
1
+ /**
2
+ * @kernl/pg - PostgreSQL storage adapter for Kernl
3
+ */
4
+ export { PGStorage } from "./storage";
5
+ export { postgres } from "./postgres";
6
+ export { migrations, REQUIRED_SCHEMA_VERSION } from "./migrations";
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Database migrations.
3
+ */
4
+ import type { PoolClient } from "pg";
5
+ import type { Table, Column } from "@kernl-sdk/storage";
6
+ /**
7
+ * Migration context with helpers.
8
+ */
9
+ export interface MigrationContext {
10
+ client: PoolClient;
11
+ createTable: (table: Table<string, Record<string, Column>>) => Promise<void>;
12
+ }
13
+ export interface Migration {
14
+ id: string;
15
+ up: (ctx: MigrationContext) => Promise<void>;
16
+ }
17
+ /**
18
+ * List of all migrations in order.
19
+ */
20
+ export declare const migrations: Migration[];
21
+ /**
22
+ * Minimum schema version required by this version of @kernl/pg.
23
+ */
24
+ export declare const REQUIRED_SCHEMA_VERSION = "0001_initial";
25
+ //# sourceMappingURL=migrations.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"migrations.d.ts","sourceRoot":"","sources":["../src/migrations.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,IAAI,CAAC;AACrC,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,EAAE,MAAM,oBAAoB,CAAC;AAGxD;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC/B,MAAM,EAAE,UAAU,CAAC;IACnB,WAAW,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;CAC9E;AAED,MAAM,WAAW,SAAS;IACxB,EAAE,EAAE,MAAM,CAAC;IACX,EAAE,EAAE,CAAC,GAAG,EAAE,gBAAgB,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;CAC9C;AAED;;GAEG;AACH,eAAO,MAAM,UAAU,EAAE,SAAS,EAQjC,CAAC;AAEF;;GAEG;AACH,eAAO,MAAM,uBAAuB,iBAAiB,CAAC"}
@@ -0,0 +1,20 @@
1
+ /**
2
+ * Database migrations.
3
+ */
4
+ import { TABLE_THREADS, TABLE_THREAD_EVENTS } from "@kernl-sdk/storage";
5
+ /**
6
+ * List of all migrations in order.
7
+ */
8
+ export const migrations = [
9
+ {
10
+ id: "0001_initial",
11
+ async up(ctx) {
12
+ await ctx.createTable(TABLE_THREADS);
13
+ await ctx.createTable(TABLE_THREAD_EVENTS);
14
+ },
15
+ },
16
+ ];
17
+ /**
18
+ * Minimum schema version required by this version of @kernl/pg.
19
+ */
20
+ export const REQUIRED_SCHEMA_VERSION = "0001_initial";
@@ -0,0 +1,43 @@
1
+ import { Pool } from "pg";
2
+ import type { KernlStorage } from "kernl";
3
+ /**
4
+ * PostgreSQL connection configuration.
5
+ */
6
+ export type PostgresConfig = {
7
+ pool: Pool;
8
+ } | {
9
+ connstr: string;
10
+ } | {
11
+ host: string;
12
+ port: number;
13
+ database: string;
14
+ user: string;
15
+ password: string;
16
+ };
17
+ /**
18
+ * Create a PostgreSQL storage adapter for Kernl.
19
+ *
20
+ * @param config - Connection configuration (pool, connection string, or credentials)
21
+ * @returns KernlStorage instance backed by PostgreSQL
22
+ *
23
+ * @example
24
+ * ```ts
25
+ * // with connection string
26
+ * const storage = postgres({ connstr: "postgresql://localhost/mydb" });
27
+ *
28
+ * // with connection options
29
+ * const storage = postgres({
30
+ * host: "localhost",
31
+ * port: 5432,
32
+ * database: "mydb",
33
+ * user: "user",
34
+ * password: "password"
35
+ * });
36
+ *
37
+ * // existing pool
38
+ * const pool = new Pool({ ... });
39
+ * const storage = postgres({ pool });
40
+ * ```
41
+ */
42
+ export declare function postgres(config: PostgresConfig): KernlStorage;
43
+ //# sourceMappingURL=postgres.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"postgres.d.ts","sourceRoot":"","sources":["../src/postgres.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,IAAI,CAAC;AAC1B,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,OAAO,CAAC;AAI1C;;GAEG;AACH,MAAM,MAAM,cAAc,GACtB;IAAE,IAAI,EAAE,IAAI,CAAA;CAAE,GACd;IAAE,OAAO,EAAE,MAAM,CAAA;CAAE,GACnB;IACE,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CAClB,CAAC;AAEN;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;AACH,wBAAgB,QAAQ,CAAC,MAAM,EAAE,cAAc,GAAG,YAAY,CAkB7D"}
@@ -0,0 +1,46 @@
1
+ import { Pool } from "pg";
2
+ import { PGStorage } from "./storage";
3
+ /**
4
+ * Create a PostgreSQL storage adapter for Kernl.
5
+ *
6
+ * @param config - Connection configuration (pool, connection string, or credentials)
7
+ * @returns KernlStorage instance backed by PostgreSQL
8
+ *
9
+ * @example
10
+ * ```ts
11
+ * // with connection string
12
+ * const storage = postgres({ connstr: "postgresql://localhost/mydb" });
13
+ *
14
+ * // with connection options
15
+ * const storage = postgres({
16
+ * host: "localhost",
17
+ * port: 5432,
18
+ * database: "mydb",
19
+ * user: "user",
20
+ * password: "password"
21
+ * });
22
+ *
23
+ * // existing pool
24
+ * const pool = new Pool({ ... });
25
+ * const storage = postgres({ pool });
26
+ * ```
27
+ */
28
+ export function postgres(config) {
29
+ let pool;
30
+ if ("pool" in config) {
31
+ pool = config.pool;
32
+ }
33
+ else if ("connstr" in config) {
34
+ pool = new Pool({ connectionString: config.connstr });
35
+ }
36
+ else {
37
+ pool = new Pool({
38
+ host: config.host,
39
+ port: config.port,
40
+ database: config.database,
41
+ user: config.user,
42
+ password: config.password,
43
+ });
44
+ }
45
+ return new PGStorage({ pool });
46
+ }
package/dist/sql.d.ts ADDED
@@ -0,0 +1,8 @@
1
+ /**
2
+ * SQL utilities for safe query construction.
3
+ */
4
+ /**
5
+ * SQL identifier regex - alphanumeric + underscore, must start with letter/underscore.
6
+ */
7
+ export declare const SQL_IDENTIFIER_REGEX: RegExp;
8
+ //# sourceMappingURL=sql.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sql.d.ts","sourceRoot":"","sources":["../src/sql.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH;;GAEG;AACH,eAAO,MAAM,oBAAoB,QAA6B,CAAC"}
package/dist/sql.js ADDED
@@ -0,0 +1,7 @@
1
+ /**
2
+ * SQL utilities for safe query construction.
3
+ */
4
+ /**
5
+ * SQL identifier regex - alphanumeric + underscore, must start with letter/underscore.
6
+ */
7
+ export const SQL_IDENTIFIER_REGEX = /^[a-zA-Z_][a-zA-Z0-9_]*$/;
@@ -0,0 +1,68 @@
1
+ import type { Pool } from "pg";
2
+ import type { AgentRegistry, ModelRegistry, KernlStorage, Transaction } from "kernl";
3
+ import { PGThreadStore } from "./thread/store";
4
+ /**
5
+ * PostgreSQL storage configuration.
6
+ */
7
+ export interface PGStorageConfig {
8
+ /**
9
+ * Pool instance for database connections.
10
+ */
11
+ pool: Pool;
12
+ }
13
+ /**
14
+ * PostgreSQL storage adapter.
15
+ */
16
+ export declare class PGStorage implements KernlStorage {
17
+ private pool;
18
+ threads: PGThreadStore;
19
+ constructor(config: PGStorageConfig);
20
+ /**
21
+ * Bind runtime registries to storage.
22
+ */
23
+ bind(registries: {
24
+ agents: AgentRegistry;
25
+ models: ModelRegistry;
26
+ }): void;
27
+ /**
28
+ * Execute a function within a transaction.
29
+ */
30
+ transaction<T>(fn: (tx: Transaction) => Promise<T>): Promise<T>;
31
+ /**
32
+ * Initialize the storage backend.
33
+ */
34
+ init(): Promise<void>;
35
+ /**
36
+ * Close the storage backend and cleanup resources.
37
+ */
38
+ close(): Promise<void>;
39
+ /**
40
+ * Run migrations to ensure all required tables exist.
41
+ */
42
+ migrate(): Promise<void>;
43
+ /**
44
+ * Create a table from its definition.
45
+ */
46
+ private createTable;
47
+ /**
48
+ * Create a table from its definition using a specific client.
49
+ */
50
+ private _createTable;
51
+ /**
52
+ * Alter a table definition (not implemented).
53
+ */
54
+ private alterTable;
55
+ /**
56
+ * Clear all rows from a table (not implemented).
57
+ */
58
+ private clearTable;
59
+ /**
60
+ * Drop a table (not implemented).
61
+ */
62
+ private dropTable;
63
+ /**
64
+ * Create an index from its definition.
65
+ */
66
+ private createIndex;
67
+ }
68
+ //# sourceMappingURL=storage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"storage.d.ts","sourceRoot":"","sources":["../src/storage.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,IAAI,EAAc,MAAM,IAAI,CAAC;AAK3C,OAAO,KAAK,EACV,aAAa,EACb,aAAa,EACb,YAAY,EACZ,WAAW,EACZ,MAAM,OAAO,CAAC;AAIf,OAAO,EAAE,aAAa,EAAE,MAAM,gBAAgB,CAAC;AAI/C;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B;;OAEG;IACH,IAAI,EAAE,IAAI,CAAC;CACZ;AAED;;GAEG;AACH,qBAAa,SAAU,YAAW,YAAY;IAC5C,OAAO,CAAC,IAAI,CAAO;IAEnB,OAAO,EAAE,aAAa,CAAC;gBAEX,MAAM,EAAE,eAAe;IAKnC;;OAEG;IACH,IAAI,CAAC,UAAU,EAAE;QAAE,MAAM,EAAE,aAAa,CAAC;QAAC,MAAM,EAAE,aAAa,CAAA;KAAE,GAAG,IAAI;IAIxE;;OAEG;IACG,WAAW,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,EAAE,WAAW,KAAK,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAIrE;;OAEG;IACG,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;IAM3B;;OAEG;IACG,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAI5B;;OAEG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAyC9B;;OAEG;YACW,WAAW;IAUzB;;OAEG;YACW,YAAY;IA6F1B;;OAEG;YACW,UAAU;IAMxB;;OAEG;YACW,UAAU;IAIxB;;OAEG;YACW,SAAS;IAIvB;;OAEG;YACW,WAAW;CAkB1B"}
@@ -0,0 +1,194 @@
1
+ import assert from "assert";
2
+ import { SCHEMA_NAME, TABLE_MIGRATIONS } from "@kernl-sdk/storage";
3
+ import { UnimplementedError } from "@kernl-sdk/shared/lib";
4
+ /* pg */
5
+ import { PGThreadStore } from "./thread/store";
6
+ import { SQL_IDENTIFIER_REGEX } from "./sql";
7
+ import { migrations } from "./migrations";
8
+ /**
9
+ * PostgreSQL storage adapter.
10
+ */
11
+ export class PGStorage {
12
+ pool;
13
+ threads;
14
+ constructor(config) {
15
+ this.pool = config.pool;
16
+ this.threads = new PGThreadStore(this.pool);
17
+ }
18
+ /**
19
+ * Bind runtime registries to storage.
20
+ */
21
+ bind(registries) {
22
+ this.threads.bind(registries);
23
+ }
24
+ /**
25
+ * Execute a function within a transaction.
26
+ */
27
+ async transaction(fn) {
28
+ throw new UnimplementedError();
29
+ }
30
+ /**
31
+ * Initialize the storage backend.
32
+ */
33
+ async init() {
34
+ await this.pool.query(`CREATE SCHEMA IF NOT EXISTS "${SCHEMA_NAME}"`);
35
+ await this.createTable(TABLE_MIGRATIONS);
36
+ await this.migrate();
37
+ }
38
+ /**
39
+ * Close the storage backend and cleanup resources.
40
+ */
41
+ async close() {
42
+ await this.pool.end();
43
+ }
44
+ /**
45
+ * Run migrations to ensure all required tables exist.
46
+ */
47
+ async migrate() {
48
+ const client = await this.pool.connect();
49
+ try {
50
+ await client.query("BEGIN");
51
+ // read applied migration IDs
52
+ const result = await client.query(`SELECT id FROM "${SCHEMA_NAME}".migrations ORDER BY applied_at ASC`);
53
+ const applied = new Set(result.rows.map((row) => row.id));
54
+ // filter pending migrations
55
+ const pending = migrations.filter((m) => !applied.has(m.id));
56
+ if (pending.length === 0) {
57
+ await client.query("COMMIT");
58
+ return;
59
+ }
60
+ // run pending migrations + insert into migrations table
61
+ for (const migration of pending) {
62
+ await migration.up({
63
+ client,
64
+ createTable: async (table) => {
65
+ await this._createTable(client, table);
66
+ },
67
+ });
68
+ await client.query(`INSERT INTO "${SCHEMA_NAME}".migrations (id, applied_at) VALUES ($1, $2)`, [migration.id, Date.now()]);
69
+ }
70
+ await client.query("COMMIT");
71
+ }
72
+ catch (error) {
73
+ await client.query("ROLLBACK");
74
+ throw error;
75
+ }
76
+ finally {
77
+ client.release();
78
+ }
79
+ }
80
+ /**
81
+ * Create a table from its definition.
82
+ */
83
+ async createTable(table) {
84
+ assert(SQL_IDENTIFIER_REGEX.test(table.name), "system table should have a valid name");
85
+ await this._createTable(this.pool, table);
86
+ }
87
+ /**
88
+ * Create a table from its definition using a specific client.
89
+ */
90
+ async _createTable(client, table) {
91
+ const columns = [];
92
+ const tableConstraints = [];
93
+ // build column definitions
94
+ for (const name in table.columns) {
95
+ const col = table.columns[name];
96
+ const constraints = [];
97
+ if (col._pk)
98
+ constraints.push("PRIMARY KEY");
99
+ if (col._unique)
100
+ constraints.push("UNIQUE");
101
+ if (!col._nullable && !col._pk)
102
+ constraints.push("NOT NULL");
103
+ if (col._default !== undefined) {
104
+ constraints.push(`DEFAULT ${col.encode(col._default)}`);
105
+ }
106
+ // foreign key reference
107
+ if (col._fk) {
108
+ let ref = `REFERENCES "${SCHEMA_NAME}"."${col._fk.table}" ("${col._fk.column}")`;
109
+ if (col._onDelete) {
110
+ ref += ` ON DELETE ${col._onDelete}`;
111
+ }
112
+ constraints.push(ref);
113
+ }
114
+ columns.push(`"${name}" ${col.type.toUpperCase()} ${constraints.join(" ")}`.trim());
115
+ }
116
+ // table-level constraints
117
+ const indexes = [];
118
+ if (table.constraints) {
119
+ for (const constraint of table.constraints) {
120
+ switch (constraint.kind) {
121
+ case "unique": {
122
+ const name = constraint.name ??
123
+ `${table.name}_${constraint.columns.join("_")}_unique`;
124
+ const cols = constraint.columns.map((c) => `"${c}"`).join(", ");
125
+ tableConstraints.push(`CONSTRAINT "${name}" UNIQUE (${cols})`);
126
+ break;
127
+ }
128
+ case "pkey": {
129
+ const name = constraint.name ?? `${table.name}_pkey`;
130
+ const cols = constraint.columns.map((c) => `"${c}"`).join(", ");
131
+ tableConstraints.push(`CONSTRAINT "${name}" PRIMARY KEY (${cols})`);
132
+ break;
133
+ }
134
+ case "fkey": {
135
+ throw new UnimplementedError("Composite foreign keys not yet supported. Use column-level .references() for single-column FKs.");
136
+ }
137
+ case "check": {
138
+ const name = constraint.name ?? `${table.name}_check`;
139
+ tableConstraints.push(`CONSTRAINT "${name}" CHECK (${constraint.expression})`);
140
+ break;
141
+ }
142
+ case "index": {
143
+ // collect indexes to create after table
144
+ indexes.push(constraint);
145
+ break;
146
+ }
147
+ }
148
+ }
149
+ }
150
+ const constraints = [...columns, ...tableConstraints];
151
+ const sql = `
152
+ CREATE TABLE IF NOT EXISTS "${SCHEMA_NAME}"."${table.name}" (
153
+ ${constraints.join(",\n ")}
154
+ )
155
+ `.trim();
156
+ await client.query(sql);
157
+ // create indexes
158
+ for (const index of indexes) {
159
+ await this.createIndex(client, table.name, index);
160
+ }
161
+ }
162
+ /**
163
+ * Alter a table definition (not implemented).
164
+ */
165
+ async alterTable(table) {
166
+ throw new UnimplementedError();
167
+ }
168
+ /**
169
+ * Clear all rows from a table (not implemented).
170
+ */
171
+ async clearTable(tableName) {
172
+ throw new UnimplementedError();
173
+ }
174
+ /**
175
+ * Drop a table (not implemented).
176
+ */
177
+ async dropTable(tableName) {
178
+ throw new UnimplementedError();
179
+ }
180
+ /**
181
+ * Create an index from its definition.
182
+ */
183
+ async createIndex(client, tableName, index) {
184
+ const uniqueKeyword = index.unique ? "UNIQUE" : "";
185
+ const columns = index.columns.map((c) => `"${c}"`).join(", ");
186
+ // Auto-generate index name: idx_{table}_{col1}_{col2}...
187
+ const indexName = `idx_${tableName}_${index.columns.join("_")}`;
188
+ const sql = `
189
+ CREATE ${uniqueKeyword} INDEX IF NOT EXISTS "${indexName}"
190
+ ON "${SCHEMA_NAME}"."${tableName}" (${columns})
191
+ `.trim();
192
+ await client.query(sql);
193
+ }
194
+ }