zen-code 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,393 @@
1
+ import { T as l, B as N, W as k, m as y } from "./app-CV_FJyjI.mjs";
2
+ import w from "pg";
3
+ const S = (h) => [
4
+ "checkpoints",
5
+ "checkpoint_blobs",
6
+ "checkpoint_migrations",
7
+ "checkpoint_writes"
8
+ ].reduce((t, i) => (t[i] = `${h}.${i}`, t), {}), b = (h) => {
9
+ const e = S(h);
10
+ return {
11
+ SELECT_SQL: `select
12
+ thread_id,
13
+ checkpoint,
14
+ checkpoint_ns,
15
+ checkpoint_id,
16
+ parent_checkpoint_id,
17
+ metadata,
18
+ (
19
+ select array_agg(array[bl.channel::bytea, bl.type::bytea, bl.blob])
20
+ from jsonb_each_text(checkpoint -> 'channel_versions')
21
+ inner join ${e.checkpoint_blobs} bl
22
+ on bl.thread_id = cp.thread_id
23
+ and bl.checkpoint_ns = cp.checkpoint_ns
24
+ and bl.channel = jsonb_each_text.key
25
+ and bl.version = jsonb_each_text.value
26
+ ) as channel_values,
27
+ (
28
+ select
29
+ array_agg(array[cw.task_id::text::bytea, cw.channel::bytea, cw.type::bytea, cw.blob] order by cw.task_id, cw.idx)
30
+ from ${e.checkpoint_writes} cw
31
+ where cw.thread_id = cp.thread_id
32
+ and cw.checkpoint_ns = cp.checkpoint_ns
33
+ and cw.checkpoint_id = cp.checkpoint_id
34
+ ) as pending_writes
35
+ from ${e.checkpoints} cp `,
36
+ SELECT_PENDING_SENDS_SQL: `select
37
+ checkpoint_id,
38
+ array_agg(array[cw.type::bytea, cw.blob] order by cw.task_id, cw.idx) as pending_sends
39
+ from ${e.checkpoint_writes} cw
40
+ where cw.thread_id = $1
41
+ and cw.checkpoint_id = any($2)
42
+ and cw.channel = '${l}'
43
+ group by cw.checkpoint_id
44
+ `,
45
+ UPSERT_CHECKPOINT_BLOBS_SQL: `INSERT INTO ${e.checkpoint_blobs} (thread_id, checkpoint_ns, channel, version, type, blob)
46
+ VALUES ($1, $2, $3, $4, $5, $6)
47
+ ON CONFLICT (thread_id, checkpoint_ns, channel, version) DO NOTHING
48
+ `,
49
+ UPSERT_CHECKPOINTS_SQL: `INSERT INTO ${e.checkpoints} (thread_id, checkpoint_ns, checkpoint_id, parent_checkpoint_id, checkpoint, metadata)
50
+ VALUES ($1, $2, $3, $4, $5, $6)
51
+ ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id)
52
+ DO UPDATE SET
53
+ checkpoint = EXCLUDED.checkpoint,
54
+ metadata = EXCLUDED.metadata;
55
+ `,
56
+ UPSERT_CHECKPOINT_WRITES_SQL: `INSERT INTO ${e.checkpoint_writes} (thread_id, checkpoint_ns, checkpoint_id, task_id, idx, channel, type, blob)
57
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
58
+ ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id, task_id, idx) DO UPDATE SET
59
+ channel = EXCLUDED.channel,
60
+ type = EXCLUDED.type,
61
+ blob = EXCLUDED.blob;
62
+ `,
63
+ INSERT_CHECKPOINT_WRITES_SQL: `INSERT INTO ${e.checkpoint_writes} (thread_id, checkpoint_ns, checkpoint_id, task_id, idx, channel, type, blob)
64
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
65
+ ON CONFLICT (thread_id, checkpoint_ns, checkpoint_id, task_id, idx) DO NOTHING
66
+ `,
67
+ DELETE_CHECKPOINTS_SQL: `DELETE FROM ${e.checkpoints} WHERE thread_id = $1`,
68
+ DELETE_CHECKPOINT_BLOBS_SQL: `DELETE FROM ${e.checkpoint_blobs} WHERE thread_id = $1`,
69
+ DELETE_CHECKPOINT_WRITES_SQL: `DELETE FROM ${e.checkpoint_writes} WHERE thread_id = $1`
70
+ };
71
+ }, O = (h) => {
72
+ const e = S(h);
73
+ return [
74
+ `CREATE TABLE IF NOT EXISTS ${e.checkpoint_migrations} (
75
+ v INTEGER PRIMARY KEY
76
+ );`,
77
+ `CREATE TABLE IF NOT EXISTS ${e.checkpoints} (
78
+ thread_id TEXT NOT NULL,
79
+ checkpoint_ns TEXT NOT NULL DEFAULT '',
80
+ checkpoint_id TEXT NOT NULL,
81
+ parent_checkpoint_id TEXT,
82
+ type TEXT,
83
+ checkpoint JSONB NOT NULL,
84
+ metadata JSONB NOT NULL DEFAULT '{}',
85
+ PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id)
86
+ );`,
87
+ `CREATE TABLE IF NOT EXISTS ${e.checkpoint_blobs} (
88
+ thread_id TEXT NOT NULL,
89
+ checkpoint_ns TEXT NOT NULL DEFAULT '',
90
+ channel TEXT NOT NULL,
91
+ version TEXT NOT NULL,
92
+ type TEXT NOT NULL,
93
+ blob BYTEA,
94
+ PRIMARY KEY (thread_id, checkpoint_ns, channel, version)
95
+ );`,
96
+ `CREATE TABLE IF NOT EXISTS ${e.checkpoint_writes} (
97
+ thread_id TEXT NOT NULL,
98
+ checkpoint_ns TEXT NOT NULL DEFAULT '',
99
+ checkpoint_id TEXT NOT NULL,
100
+ task_id TEXT NOT NULL,
101
+ idx INTEGER NOT NULL,
102
+ channel TEXT NOT NULL,
103
+ type TEXT,
104
+ blob BYTEA NOT NULL,
105
+ PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id, task_id, idx)
106
+ );`,
107
+ `ALTER TABLE ${e.checkpoint_blobs} ALTER COLUMN blob DROP not null;`
108
+ ];
109
+ }, C = { schema: "public" }, g = (h) => ({
110
+ ...h,
111
+ schema: h?.schema ?? C.schema
112
+ }), { Pool: I } = w;
113
+ var $ = class L extends N {
114
+ pool;
115
+ options;
116
+ SQL_STATEMENTS;
117
+ isSetup;
118
+ constructor(e, t, i) {
119
+ super(t), this.pool = e, this.isSetup = !1, this.options = g(i), this.SQL_STATEMENTS = b(this.options.schema);
120
+ }
121
+ /**
122
+ * Creates a new instance of PostgresSaver from a connection string.
123
+ *
124
+ * @param {string} connString - The connection string to connect to the Postgres database.
125
+ * @param {PostgresSaverOptions} [options] - Optional configuration object.
126
+ * @returns {PostgresSaver} A new instance of PostgresSaver.
127
+ *
128
+ * @example
129
+ * const connString = "postgresql://user:password@localhost:5432/db";
130
+ * const checkpointer = PostgresSaver.fromConnString(connString, {
131
+ * schema: "custom_schema" // defaults to "public"
132
+ * });
133
+ * await checkpointer.setup();
134
+ */
135
+ static fromConnString(e, t) {
136
+ const i = new I({ connectionString: e });
137
+ return new L(i, void 0, t);
138
+ }
139
+ /**
140
+ * Set up the checkpoint database asynchronously.
141
+ *
142
+ * This method creates the necessary tables in the Postgres database if they don't
143
+ * already exist and runs database migrations. It MUST be called directly by the user
144
+ * the first time checkpointer is used.
145
+ */
146
+ async setup() {
147
+ const e = await this.pool.connect(), t = S(this.options.schema);
148
+ try {
149
+ await e.query(`CREATE SCHEMA IF NOT EXISTS ${this.options.schema}`);
150
+ let i = -1;
151
+ const c = O(this.options.schema);
152
+ try {
153
+ const n = await e.query(`SELECT v FROM ${t.checkpoint_migrations} ORDER BY v DESC LIMIT 1`);
154
+ n.rows.length > 0 && (i = n.rows[0].v);
155
+ } catch (n) {
156
+ if (typeof n == "object" && n !== null && "code" in n && typeof n.code == "string" && n.code === "42P01") i = -1;
157
+ else throw n;
158
+ }
159
+ for (let n = i + 1; n < c.length; n += 1)
160
+ await e.query(c[n]), await e.query(`INSERT INTO ${t.checkpoint_migrations} (v) VALUES ($1)`, [n]);
161
+ } finally {
162
+ e.release();
163
+ }
164
+ }
165
+ async _loadCheckpoint(e, t) {
166
+ return {
167
+ ...e,
168
+ channel_values: await this._loadBlobs(t)
169
+ };
170
+ }
171
+ async _loadBlobs(e) {
172
+ if (!e || e.length === 0) return {};
173
+ const t = new TextDecoder(), i = await Promise.all(e.filter(([, c]) => t.decode(c) !== "empty").map(async ([c, n, a]) => [t.decode(c), await this.serde.loadsTyped(t.decode(n), a)]));
174
+ return Object.fromEntries(i);
175
+ }
176
+ async _loadMetadata(e) {
177
+ const [t, i] = await this.serde.dumpsTyped(e);
178
+ return this.serde.loadsTyped(t, i);
179
+ }
180
+ async _loadWrites(e) {
181
+ const t = new TextDecoder();
182
+ return e ? await Promise.all(e.map(async ([i, c, n, a]) => [
183
+ t.decode(i),
184
+ t.decode(c),
185
+ await this.serde.loadsTyped(t.decode(n), a)
186
+ ])) : [];
187
+ }
188
+ async _dumpBlobs(e, t, i, c) {
189
+ return Object.keys(c).length === 0 ? [] : Promise.all(Object.entries(c).map(async ([n, a]) => {
190
+ const [r, o] = n in i ? await this.serde.dumpsTyped(i[n]) : ["empty", null];
191
+ return [
192
+ e,
193
+ t,
194
+ n,
195
+ a.toString(),
196
+ r,
197
+ o ? new Uint8Array(o) : void 0
198
+ ];
199
+ }));
200
+ }
201
+ _dumpCheckpoint(e) {
202
+ const t = { ...e };
203
+ return "channel_values" in t && delete t.channel_values, t;
204
+ }
205
+ async _dumpMetadata(e) {
206
+ const [, t] = await this.serde.dumpsTyped(e);
207
+ return JSON.parse(new TextDecoder().decode(t).replace(/\0/g, ""));
208
+ }
209
+ async _dumpWrites(e, t, i, c, n) {
210
+ return Promise.all(n.map(async ([a, r], o) => {
211
+ const [d, p] = await this.serde.dumpsTyped(r);
212
+ return [
213
+ e,
214
+ t,
215
+ i,
216
+ c,
217
+ k[a] ?? o,
218
+ a,
219
+ d,
220
+ new Uint8Array(p)
221
+ ];
222
+ }));
223
+ }
224
+ /**
225
+ * Return WHERE clause predicates for a given list() config, filter, cursor.
226
+ *
227
+ * This method returns a tuple of a string and a tuple of values. The string
228
+ * is the parameterized WHERE clause predicate (including the WHERE keyword):
229
+ * "WHERE column1 = $1 AND column2 IS $2". The list of values contains the
230
+ * values for each of the corresponding parameters.
231
+ */
232
+ _searchWhere(e, t, i) {
233
+ const c = [], n = [];
234
+ return e?.configurable?.thread_id && (c.push(`thread_id = $${n.length + 1}`), n.push(e.configurable.thread_id)), e?.configurable?.checkpoint_ns !== void 0 && e?.configurable?.checkpoint_ns !== null && (c.push(`checkpoint_ns = $${n.length + 1}`), n.push(e.configurable.checkpoint_ns)), e?.configurable?.checkpoint_id && (c.push(`checkpoint_id = $${n.length + 1}`), n.push(e.configurable.checkpoint_id)), t && Object.keys(t).length > 0 && (c.push(`metadata @> $${n.length + 1}`), n.push(JSON.stringify(t))), i?.configurable?.checkpoint_id !== void 0 && (c.push(`checkpoint_id < $${n.length + 1}`), n.push(i.configurable.checkpoint_id)), [c.length > 0 ? `WHERE ${c.join(" AND ")}` : "", n];
235
+ }
236
+ /**
237
+ * Get a checkpoint tuple from the database.
238
+ * This method retrieves a checkpoint tuple from the Postgres database
239
+ * based on the provided config. If the config's configurable field contains
240
+ * a "checkpoint_id" key, the checkpoint with the matching thread_id and
241
+ * namespace is retrieved. Otherwise, the latest checkpoint for the given
242
+ * thread_id is retrieved.
243
+ * @param config The config to use for retrieving the checkpoint.
244
+ * @returns The retrieved checkpoint tuple, or undefined.
245
+ */
246
+ async getTuple(e) {
247
+ const { thread_id: t, checkpoint_ns: i = "", checkpoint_id: c } = e.configurable ?? {};
248
+ let n, a;
249
+ c ? (a = "WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3", n = [
250
+ t,
251
+ i,
252
+ c
253
+ ]) : (a = "WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1", n = [t, i]);
254
+ const r = await this.pool.query(this.SQL_STATEMENTS.SELECT_SQL + a, n), [o] = r.rows;
255
+ if (o === void 0) return;
256
+ if (o.checkpoint.v < 4 && o.parent_checkpoint_id != null) {
257
+ const T = await this.pool.query(this.SQL_STATEMENTS.SELECT_PENDING_SENDS_SQL, [t, [o.parent_checkpoint_id]]), [u] = T.rows;
258
+ u != null && await this._migratePendingSends(u.pending_sends, o);
259
+ }
260
+ const d = await this._loadCheckpoint(o.checkpoint, o.channel_values), p = { configurable: {
261
+ thread_id: t,
262
+ checkpoint_ns: i,
263
+ checkpoint_id: o.checkpoint_id
264
+ } }, s = await this._loadMetadata(o.metadata), E = o.parent_checkpoint_id ? { configurable: {
265
+ thread_id: t,
266
+ checkpoint_ns: i,
267
+ checkpoint_id: o.parent_checkpoint_id
268
+ } } : void 0, _ = await this._loadWrites(o.pending_writes);
269
+ return {
270
+ config: p,
271
+ checkpoint: d,
272
+ metadata: s,
273
+ parentConfig: E,
274
+ pendingWrites: _
275
+ };
276
+ }
277
+ /**
278
+ * List checkpoints from the database.
279
+ *
280
+ * This method retrieves a list of checkpoint tuples from the Postgres database based
281
+ * on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
282
+ */
283
+ async *list(e, t) {
284
+ const { filter: i, before: c, limit: n } = t ?? {}, [a, r] = this._searchWhere(e, i, c);
285
+ let o = `${this.SQL_STATEMENTS.SELECT_SQL}${a} ORDER BY checkpoint_id DESC`;
286
+ n !== void 0 && (o += ` LIMIT ${Number.parseInt(n.toString(), 10)}`);
287
+ const d = await this.pool.query(o, r), p = d.rows.filter((s) => s.checkpoint.v < 4 && s.parent_checkpoint_id != null);
288
+ if (p.length > 0) {
289
+ const s = await this.pool.query(this.SQL_STATEMENTS.SELECT_PENDING_SENDS_SQL, [p[0].thread_id, p.map((_) => _.parent_checkpoint_id)]), E = p.reduce((_, T) => (T.parent_checkpoint_id && (_[T.parent_checkpoint_id] ??= [], _[T.parent_checkpoint_id].push(T)), _), {});
290
+ for (const _ of s.rows) for (const T of E[_.checkpoint_id]) await this._migratePendingSends(_.pending_sends, T);
291
+ }
292
+ for (const s of d.rows) yield {
293
+ config: { configurable: {
294
+ thread_id: s.thread_id,
295
+ checkpoint_ns: s.checkpoint_ns,
296
+ checkpoint_id: s.checkpoint_id
297
+ } },
298
+ checkpoint: await this._loadCheckpoint(s.checkpoint, s.channel_values),
299
+ metadata: await this._loadMetadata(s.metadata),
300
+ parentConfig: s.parent_checkpoint_id ? { configurable: {
301
+ thread_id: s.thread_id,
302
+ checkpoint_ns: s.checkpoint_ns,
303
+ checkpoint_id: s.parent_checkpoint_id
304
+ } } : void 0,
305
+ pendingWrites: await this._loadWrites(s.pending_writes)
306
+ };
307
+ }
308
+ /** @internal */
309
+ async _migratePendingSends(e, t) {
310
+ const i = new TextEncoder(), c = new TextDecoder(), n = t, [a, r] = await this.serde.dumpsTyped(await Promise.all(e.map(([o, d]) => this.serde.loadsTyped(c.decode(o), d))));
311
+ n.channel_values ??= [], n.channel_values.push([
312
+ i.encode(l),
313
+ i.encode(a),
314
+ r
315
+ ]), n.checkpoint.channel_versions[l] = Object.keys(t.checkpoint.channel_versions).length > 0 ? y(...Object.values(t.checkpoint.channel_versions)) : this.getNextVersion(void 0);
316
+ }
317
+ /**
318
+ * Save a checkpoint to the database.
319
+ *
320
+ * This method saves a checkpoint to the Postgres database. The checkpoint is associated
321
+ * with the provided config and its parent config (if any).
322
+ * @param config
323
+ * @param checkpoint
324
+ * @param metadata
325
+ * @returns
326
+ */
327
+ async put(e, t, i, c) {
328
+ if (e.configurable === void 0) throw new Error('Missing "configurable" field in "config" param');
329
+ const { thread_id: n, checkpoint_ns: a = "", checkpoint_id: r } = e.configurable, o = { configurable: {
330
+ thread_id: n,
331
+ checkpoint_ns: a,
332
+ checkpoint_id: t.id
333
+ } }, d = await this.pool.connect(), p = this._dumpCheckpoint(t);
334
+ try {
335
+ await d.query("BEGIN");
336
+ const s = await this._dumpBlobs(n, a, t.channel_values, c);
337
+ for (const E of s) await d.query(this.SQL_STATEMENTS.UPSERT_CHECKPOINT_BLOBS_SQL, E);
338
+ await d.query(this.SQL_STATEMENTS.UPSERT_CHECKPOINTS_SQL, [
339
+ n,
340
+ a,
341
+ t.id,
342
+ r,
343
+ p,
344
+ await this._dumpMetadata(i)
345
+ ]), await d.query("COMMIT");
346
+ } catch (s) {
347
+ throw await d.query("ROLLBACK"), s;
348
+ } finally {
349
+ d.release();
350
+ }
351
+ return o;
352
+ }
353
+ /**
354
+ * Store intermediate writes linked to a checkpoint.
355
+ *
356
+ * This method saves intermediate writes associated with a checkpoint to the Postgres database.
357
+ * @param config Configuration of the related checkpoint.
358
+ * @param writes List of writes to store.
359
+ * @param taskId Identifier for the task creating the writes.
360
+ */
361
+ async putWrites(e, t, i) {
362
+ const c = t.every((r) => r[0] in k) ? this.SQL_STATEMENTS.UPSERT_CHECKPOINT_WRITES_SQL : this.SQL_STATEMENTS.INSERT_CHECKPOINT_WRITES_SQL, n = await this._dumpWrites(e.configurable?.thread_id, e.configurable?.checkpoint_ns, e.configurable?.checkpoint_id, i, t), a = await this.pool.connect();
363
+ try {
364
+ await a.query("BEGIN");
365
+ for await (const r of n) await a.query(c, r);
366
+ await a.query("COMMIT");
367
+ } catch (r) {
368
+ throw await a.query("ROLLBACK"), r;
369
+ } finally {
370
+ a.release();
371
+ }
372
+ }
373
+ async end() {
374
+ return this.pool.end();
375
+ }
376
+ async deleteThread(e) {
377
+ const t = await this.pool.connect();
378
+ try {
379
+ await t.query("BEGIN"), await t.query(this.SQL_STATEMENTS.DELETE_CHECKPOINT_BLOBS_SQL, [e]), await t.query(this.SQL_STATEMENTS.DELETE_CHECKPOINTS_SQL, [e]), await t.query(this.SQL_STATEMENTS.DELETE_CHECKPOINT_WRITES_SQL, [e]), await t.query("COMMIT");
380
+ } catch (i) {
381
+ throw await t.query("ROLLBACK"), i;
382
+ } finally {
383
+ t.release();
384
+ }
385
+ }
386
+ };
387
+ const A = async () => {
388
+ const h = $.fromConnString(process.env.DATABASE_URL);
389
+ return process.env.DATABASE_INIT === "true" && (console.debug("LG | Initializing postgres checkpoint"), await h.setup()), h;
390
+ };
391
+ export {
392
+ A as createPGCheckpoint
393
+ };
@@ -0,0 +1,193 @@
1
+ import { D as w, b as y, c as b, a as f, C as o } from "./migrator-BatO36Tk.mjs";
2
+ import { F as g, aS as p, ah as v } from "./sql-CJsUpKEQ.mjs";
3
+ const _ = /"/g;
4
+ class T extends w {
5
+ visitOrAction(e) {
6
+ this.append("or "), this.append(e.action);
7
+ }
8
+ getCurrentParameterPlaceholder() {
9
+ return "?";
10
+ }
11
+ getLeftExplainOptionsWrapper() {
12
+ return "";
13
+ }
14
+ getRightExplainOptionsWrapper() {
15
+ return "";
16
+ }
17
+ getLeftIdentifierWrapper() {
18
+ return '"';
19
+ }
20
+ getRightIdentifierWrapper() {
21
+ return '"';
22
+ }
23
+ getAutoIncrement() {
24
+ return "autoincrement";
25
+ }
26
+ sanitizeIdentifier(e) {
27
+ return e.replace(_, '""');
28
+ }
29
+ visitDefaultInsertValue(e) {
30
+ this.append("null");
31
+ }
32
+ }
33
+ class k {
34
+ #e;
35
+ constructor(e) {
36
+ this.#e = e;
37
+ }
38
+ async getSchemas() {
39
+ return [];
40
+ }
41
+ async getTables(e = { withInternalKyselyTables: !1 }) {
42
+ return await this.#a(e);
43
+ }
44
+ async getMetadata(e) {
45
+ return {
46
+ tables: await this.getTables(e)
47
+ };
48
+ }
49
+ #t(e, r) {
50
+ let n = e.selectFrom("sqlite_master").where("type", "in", ["table", "view"]).where("name", "not like", "sqlite_%").select(["name", "sql", "type"]).orderBy("name");
51
+ return r.withInternalKyselyTables || (n = n.where("name", "!=", y).where("name", "!=", b)), n;
52
+ }
53
+ async #a(e) {
54
+ const r = await this.#t(this.#e, e).execute(), n = await this.#e.with("table_list", (i) => this.#t(i, e)).selectFrom([
55
+ "table_list as tl",
56
+ g`pragma_table_info(tl.name)`.as("p")
57
+ ]).select([
58
+ "tl.name as table",
59
+ "p.cid",
60
+ "p.name",
61
+ "p.type",
62
+ "p.notnull",
63
+ "p.dflt_value",
64
+ "p.pk"
65
+ ]).orderBy("tl.name").orderBy("p.cid").execute(), s = {};
66
+ for (const i of n)
67
+ s[i.table] ??= [], s[i.table].push(i);
68
+ return r.map(({ name: i, sql: m, type: h }) => {
69
+ let l = m?.split(/[\(\),]/)?.find((a) => a.toLowerCase().includes("autoincrement"))?.trimStart()?.split(/\s+/)?.[0]?.replace(/["`]/g, "");
70
+ const u = s[i] ?? [];
71
+ if (!l) {
72
+ const a = u.filter((d) => d.pk > 0);
73
+ a.length === 1 && a[0].type.toLowerCase() === "integer" && (l = a[0].name);
74
+ }
75
+ return {
76
+ name: i,
77
+ isView: h === "view",
78
+ columns: u.map((a) => ({
79
+ name: a.name,
80
+ dataType: a.type,
81
+ isNullable: !a.notnull,
82
+ isAutoIncrementing: a.name === l,
83
+ hasDefaultValue: a.dflt_value != null,
84
+ comment: void 0
85
+ }))
86
+ };
87
+ });
88
+ }
89
+ }
90
+ class C extends f {
91
+ get supportsTransactionalDdl() {
92
+ return !1;
93
+ }
94
+ get supportsReturning() {
95
+ return !0;
96
+ }
97
+ async acquireMigrationLock(e, r) {
98
+ }
99
+ async releaseMigrationLock(e, r) {
100
+ }
101
+ }
102
+ var A = class {
103
+ /**
104
+ * Base class that implements {@link Dialect}
105
+ * @param create function that create {@link Driver}
106
+ */
107
+ constructor(t) {
108
+ this.createDriver = t;
109
+ }
110
+ createDriver;
111
+ createQueryCompiler() {
112
+ return new T();
113
+ }
114
+ createAdapter() {
115
+ return new C();
116
+ }
117
+ createIntrospector(t) {
118
+ return new k(t);
119
+ }
120
+ }, S = class {
121
+ promise;
122
+ resolve;
123
+ async lock() {
124
+ for (; this.promise; )
125
+ await this.promise;
126
+ this.promise = new Promise((t) => {
127
+ this.resolve = t;
128
+ });
129
+ }
130
+ unlock() {
131
+ const t = this.resolve;
132
+ this.promise = void 0, this.resolve = void 0, t?.();
133
+ }
134
+ };
135
+ async function c(t, e, r, n, s) {
136
+ await r.executeQuery(
137
+ s(
138
+ p.createWithChildren([
139
+ p.createWithSql(`${t} `),
140
+ v.create(n)
141
+ // ensures savepointName gets sanitized
142
+ ]),
143
+ e()
144
+ )
145
+ );
146
+ }
147
+ var D = class {
148
+ mutex = new S();
149
+ conn;
150
+ savepoint;
151
+ releaseSavepoint;
152
+ rollbackToSavepoint;
153
+ init;
154
+ /**
155
+ * Base abstract class that implements {@link Driver}
156
+ *
157
+ * You **MUST** assign `this.conn` in `init` and implement `destroy` method
158
+ */
159
+ constructor(t) {
160
+ this.init = () => import("./index-DS5HVciX.mjs").then(({ createQueryId: e }) => {
161
+ e && (this.savepoint = c.bind(null, "savepoint", e), this.releaseSavepoint = c.bind(null, "release", e), this.rollbackToSavepoint = c.bind(null, "rollback to", e));
162
+ }).then(t);
163
+ }
164
+ async acquireConnection() {
165
+ return await this.mutex.lock(), this.conn;
166
+ }
167
+ async beginTransaction(t) {
168
+ await t.executeQuery(o.raw("begin"));
169
+ }
170
+ async commitTransaction(t) {
171
+ await t.executeQuery(o.raw("commit"));
172
+ }
173
+ async rollbackTransaction(t) {
174
+ await t.executeQuery(o.raw("rollback"));
175
+ }
176
+ async releaseConnection() {
177
+ this.mutex.unlock();
178
+ }
179
+ };
180
+ function q(t) {
181
+ return async (e, r, n) => {
182
+ const s = await t.all(r, n);
183
+ return e || s.length ? { rows: s } : { rows: [], ...await t.run("select 1") };
184
+ };
185
+ }
186
+ export {
187
+ A as B,
188
+ T as S,
189
+ k as a,
190
+ C as b,
191
+ D as c,
192
+ q as d
193
+ };