@rotorsoft/act-pg 0.4.7 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -24,8 +24,7 @@ var DEFAULT_CONFIG = {
24
24
  user: "postgres",
25
25
  password: "postgres",
26
26
  schema: "public",
27
- table: "events",
28
- leaseMillis: 3e4
27
+ table: "events"
29
28
  };
30
29
  var PostgresStore = class {
31
30
  _pool;
@@ -91,11 +90,13 @@ var PostgresStore = class {
91
90
  await client.query(
92
91
  `CREATE TABLE IF NOT EXISTS ${this._fqs} (
93
92
  stream varchar(100) COLLATE pg_catalog."default" PRIMARY KEY,
93
+ source varchar(100) COLLATE pg_catalog."default",
94
94
  at int NOT NULL DEFAULT -1,
95
95
  retry smallint NOT NULL DEFAULT 0,
96
96
  blocked boolean NOT NULL DEFAULT false,
97
+ error text,
97
98
  leased_at int,
98
- leased_by uuid,
99
+ leased_by text,
99
100
  leased_until timestamptz
100
101
  ) TABLESPACE pg_default;`
101
102
  );
@@ -143,13 +144,12 @@ var PostgresStore = class {
143
144
  *
144
145
  * @param callback Function called for each event found
145
146
  * @param query (Optional) Query filter (stream, names, before, after, etc.)
146
- * @param withSnaps (Optional) If true, includes only events after the last snapshot
147
147
  * @returns The number of events found
148
148
  *
149
149
  * @example
150
150
  * await store.query((event) => console.log(event), { stream: "A" });
151
151
  */
152
- async query(callback, query, withSnaps = false) {
152
+ async query(callback, query) {
153
153
  const {
154
154
  stream,
155
155
  names,
@@ -159,17 +159,13 @@ var PostgresStore = class {
159
159
  created_before,
160
160
  created_after,
161
161
  backward,
162
- correlation
162
+ correlation,
163
+ with_snaps = false
163
164
  } = query || {};
164
165
  let sql = `SELECT * FROM ${this._fqt}`;
165
166
  const conditions = [];
166
167
  const values = [];
167
- if (withSnaps) {
168
- conditions.push(
169
- `id>=COALESCE((SELECT id FROM ${this._fqt} WHERE stream='${stream}' AND name='${SNAP_EVENT}' ORDER BY id DESC LIMIT 1), 0)`
170
- );
171
- conditions.push(`stream='${stream}'`);
172
- } else if (query) {
168
+ if (query) {
173
169
  if (typeof after !== "undefined") {
174
170
  values.push(after);
175
171
  conditions.push(`id>$${values.length}`);
@@ -178,7 +174,7 @@ var PostgresStore = class {
178
174
  }
179
175
  if (stream) {
180
176
  values.push(stream);
181
- conditions.push(`stream=$${values.length}`);
177
+ conditions.push(`stream ~ $${values.length}`);
182
178
  }
183
179
  if (names && names.length) {
184
180
  values.push(names);
@@ -200,6 +196,9 @@ var PostgresStore = class {
200
196
  values.push(correlation);
201
197
  conditions.push(`meta->>'correlation'=$${values.length}`);
202
198
  }
199
+ if (!with_snaps) {
200
+ conditions.push(`name <> '${SNAP_EVENT}'`);
201
+ }
203
202
  }
204
203
  if (conditions.length) {
205
204
  sql += " WHERE " + conditions.join(" AND ");
@@ -238,6 +237,7 @@ var PostgresStore = class {
238
237
  version = last.rowCount ? last.rows[0].version : -1;
239
238
  if (typeof expectedVersion === "number" && version !== expectedVersion)
240
239
  throw new ConcurrencyError(
240
+ stream,
241
241
  version,
242
242
  msgs,
243
243
  expectedVersion
@@ -265,6 +265,7 @@ var PostgresStore = class {
265
265
  ).catch((error) => {
266
266
  logger.error(error);
267
267
  throw new ConcurrencyError(
268
+ stream,
268
269
  version,
269
270
  msgs,
270
271
  expectedVersion || -1
@@ -280,80 +281,82 @@ var PostgresStore = class {
280
281
  }
281
282
  }
282
283
  /**
283
- * Fetch a batch of events and streams for processing (drain cycle).
284
- *
285
- * @param limit The maximum number of events to fetch
286
- * @returns An object with arrays of streams and events
284
+ * Polls the store for unblocked streams needing processing, ordered by lease watermark ascending.
285
+ * @param limit - Maximum number of streams to poll.
286
+ * @returns The polled streams.
287
287
  */
288
- async fetch(limit) {
288
+ async poll(limit) {
289
289
  const { rows } = await this._pool.query(
290
290
  `
291
291
  SELECT stream, at
292
292
  FROM ${this._fqs}
293
- WHERE blocked=false
293
+ WHERE blocked=false AND (leased_by IS NULL OR leased_until <= NOW())
294
294
  ORDER BY at ASC
295
295
  LIMIT $1::integer
296
296
  `,
297
297
  [limit]
298
298
  );
299
- const after = rows.length ? rows.reduce((min, r) => Math.min(min, r.at), Number.MAX_SAFE_INTEGER) : -1;
300
- const events = [];
301
- await this.query((e) => e.name !== SNAP_EVENT && events.push(e), {
302
- after,
303
- limit
304
- });
305
- return { streams: rows.map(({ stream }) => stream), events };
299
+ return rows;
306
300
  }
307
301
  /**
308
302
  * Lease streams for reaction processing, marking them as in-progress.
309
303
  *
310
- * @param leases Array of lease objects (stream, at, etc.)
304
+ * @param leases - Lease requests for streams, including end-of-lease watermark, lease holder, and source stream.
305
+ * @param millis - Lease duration in milliseconds.
311
306
  * @returns Array of leased objects with updated lease info
312
307
  */
313
- async lease(leases) {
314
- const { by, at } = leases.at(0);
315
- const streams = leases.map(({ stream }) => stream);
308
+ async lease(leases, millis) {
316
309
  const client = await this._pool.connect();
317
310
  try {
318
311
  await client.query("BEGIN");
319
312
  await client.query(
320
313
  `
321
- INSERT INTO ${this._fqs} (stream)
322
- SELECT UNNEST($1::text[])
314
+ INSERT INTO ${this._fqs} (stream, source)
315
+ SELECT lease->>'stream', lease->>'source'
316
+ FROM jsonb_array_elements($1::jsonb) AS lease
323
317
  ON CONFLICT (stream) DO NOTHING
324
318
  `,
325
- [streams]
319
+ [JSON.stringify(leases)]
326
320
  );
327
321
  const { rows } = await client.query(
328
322
  `
329
- WITH free AS (
330
- SELECT * FROM ${this._fqs}
331
- WHERE stream = ANY($1::text[]) AND (leased_by IS NULL OR leased_until <= NOW())
332
- FOR UPDATE
333
- )
334
- UPDATE ${this._fqs} U
335
- SET
336
- leased_by = $2::uuid,
337
- leased_at = $3::integer,
338
- leased_until = NOW() + ($4::integer || ' milliseconds')::interval
339
- FROM free
340
- WHERE U.stream = free.stream
341
- RETURNING U.stream, U.leased_at, U.retry
342
- `,
343
- [streams, by, at, this.config.leaseMillis]
323
+ WITH input AS (
324
+ SELECT * FROM jsonb_to_recordset($1::jsonb)
325
+ AS x(stream text, at int, by text)
326
+ ), free AS (
327
+ SELECT s.stream FROM ${this._fqs} s
328
+ JOIN input i ON s.stream = i.stream
329
+ WHERE s.leased_by IS NULL OR s.leased_until <= NOW()
330
+ FOR UPDATE
331
+ )
332
+ UPDATE ${this._fqs} s
333
+ SET
334
+ leased_by = i.by,
335
+ leased_at = i.at,
336
+ leased_until = NOW() + ($2::integer || ' milliseconds')::interval,
337
+ retry = CASE WHEN $2::integer > 0 THEN s.retry + 1 ELSE s.retry END
338
+ FROM input i, free f
339
+ WHERE s.stream = f.stream AND s.stream = i.stream
340
+ RETURNING s.stream, s.source, s.leased_at, s.leased_by, s.leased_until, s.retry
341
+ `,
342
+ [JSON.stringify(leases), millis]
344
343
  );
345
344
  await client.query("COMMIT");
346
- return rows.map(({ stream, leased_at, retry }) => ({
347
- stream,
348
- by,
349
- at: leased_at,
350
- retry,
351
- block: false
352
- }));
345
+ return rows.map(
346
+ ({ stream, source, leased_at, leased_by, leased_until, retry }) => ({
347
+ stream,
348
+ source: source ?? void 0,
349
+ at: leased_at,
350
+ by: leased_by,
351
+ until: new Date(leased_until),
352
+ retry
353
+ })
354
+ );
353
355
  } catch (error) {
354
356
  await client.query("ROLLBACK").catch(() => {
355
357
  });
356
- throw error;
358
+ logger.error(error);
359
+ return [];
357
360
  } finally {
358
361
  client.release();
359
362
  }
@@ -361,33 +364,86 @@ var PostgresStore = class {
361
364
  /**
362
365
  * Acknowledge and release leases after processing, updating stream positions.
363
366
  *
364
- * @param leases Array of lease objects to acknowledge
365
- * @returns Promise that resolves when leases are acknowledged
367
+ * @param leases - Leases to acknowledge, including last processed watermark and lease holder.
368
+ * @returns Acked leases.
366
369
  */
367
370
  async ack(leases) {
368
371
  const client = await this._pool.connect();
369
372
  try {
370
373
  await client.query("BEGIN");
371
- for (const { stream, by, at, retry, block } of leases) {
372
- await client.query(
373
- `UPDATE ${this._fqs}
374
- SET
375
- at = $3::integer,
376
- retry = $4::integer,
377
- blocked = $5::boolean,
378
- leased_by = NULL,
379
- leased_at = NULL,
380
- leased_until = NULL
381
- WHERE
382
- stream = $1::text
383
- AND leased_by = $2::uuid`,
384
- [stream, by, at, retry, block]
385
- );
386
- }
374
+ const { rows } = await client.query(
375
+ `
376
+ WITH input AS (
377
+ SELECT * FROM jsonb_to_recordset($1::jsonb)
378
+ AS x(stream text, by text, at int)
379
+ )
380
+ UPDATE ${this._fqs} AS s
381
+ SET
382
+ at = i.at,
383
+ retry = -1,
384
+ leased_by = NULL,
385
+ leased_at = NULL,
386
+ leased_until = NULL
387
+ FROM input i
388
+ WHERE s.stream = i.stream AND s.leased_by = i.by
389
+ RETURNING s.stream, s.source, s.at, s.retry
390
+ `,
391
+ [JSON.stringify(leases)]
392
+ );
393
+ await client.query("COMMIT");
394
+ return rows.map((row) => ({
395
+ stream: row.stream,
396
+ source: row.source ?? void 0,
397
+ at: row.at,
398
+ by: "",
399
+ retry: row.retry
400
+ }));
401
+ } catch (error) {
402
+ await client.query("ROLLBACK").catch(() => {
403
+ });
404
+ logger.error(error);
405
+ return [];
406
+ } finally {
407
+ client.release();
408
+ }
409
+ }
410
+ /**
411
+ * Block a stream for processing after failing to process and reaching max retries with blocking enabled.
412
+ * @param leases - Leases to block, including lease holder and last error message.
413
+ * @returns Blocked leases.
414
+ */
415
+ async block(leases) {
416
+ const client = await this._pool.connect();
417
+ try {
418
+ await client.query("BEGIN");
419
+ const { rows } = await client.query(
420
+ `
421
+ WITH input AS (
422
+ SELECT * FROM jsonb_to_recordset($1::jsonb)
423
+ AS x(stream text, by text, error text)
424
+ )
425
+ UPDATE ${this._fqs} AS s
426
+ SET blocked = true, error = i.error
427
+ FROM input i
428
+ WHERE s.stream = i.stream AND s.leased_by = i.by AND s.blocked = false
429
+ RETURNING s.stream, s.source, s.at, i.by, s.retry, s.error
430
+ `,
431
+ [JSON.stringify(leases)]
432
+ );
387
433
  await client.query("COMMIT");
388
- } catch {
434
+ return rows.map((row) => ({
435
+ stream: row.stream,
436
+ source: row.source ?? void 0,
437
+ at: row.at,
438
+ by: row.by,
439
+ retry: row.retry,
440
+ error: row.error ?? ""
441
+ }));
442
+ } catch (error) {
389
443
  await client.query("ROLLBACK").catch(() => {
390
444
  });
445
+ logger.error(error);
446
+ return [];
391
447
  } finally {
392
448
  client.release();
393
449
  }
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/PostgresStore.ts","../src/utils.ts"],"sourcesContent":["import type {\n Committed,\n EventMeta,\n Lease,\n Message,\n Query,\n Schemas,\n Store,\n} from \"@rotorsoft/act\";\nimport { ConcurrencyError, SNAP_EVENT, logger } from \"@rotorsoft/act\";\nimport pg from \"pg\";\nimport { dateReviver } from \"./utils.js\";\n\nconst { Pool, types } = pg;\ntypes.setTypeParser(types.builtins.JSONB, (val) =>\n JSON.parse(val, dateReviver)\n);\n\ntype Config = Readonly<{\n host: string;\n port: number;\n database: string;\n user: string;\n password: string;\n schema: string;\n table: string;\n leaseMillis: number;\n}>;\n\nconst DEFAULT_CONFIG: Config = {\n host: \"localhost\",\n port: 5432,\n database: \"postgres\",\n user: \"postgres\",\n password: \"postgres\",\n schema: \"public\",\n table: \"events\",\n leaseMillis: 30_000,\n};\n\n/**\n * @category Adapters\n * @see Store\n *\n * PostgresStore is a production-ready event store adapter for Act, using PostgreSQL as the backend.\n *\n * - Supports event sourcing, leasing, snapshots, and concurrency control.\n * - Designed for high-throughput, scalable, and reliable event storage.\n * - Implements the Act Store interface.\n *\n * @example\n * import { PostgresStore } from \"@act/pg\";\n * const store = new PostgresStore({ schema: \"my_schema\", table: \"events\" });\n * await store.seed();\n *\n * @see https://github.com/rotorsoft/act-root\n */\nexport class PostgresStore implements Store {\n private _pool;\n readonly config: Config;\n private _fqt: string;\n private _fqs: string;\n\n /**\n * Create a new PostgresStore instance.\n * @param config Partial configuration (host, port, user, password, schema, table, etc.)\n */\n constructor(config: Partial<Config> = {}) {\n this.config = { ...DEFAULT_CONFIG, ...config };\n this._pool = new Pool(this.config);\n this._fqt = `\"${this.config.schema}\".\"${this.config.table}\"`;\n this._fqs = `\"${this.config.schema}\".\"${this.config.table}_streams\"`;\n }\n\n /**\n * Dispose of the store and close all database connections.\n * @returns Promise that resolves when all connections are closed\n */\n async dispose() {\n await this._pool.end();\n }\n\n /**\n * Seed the database with required tables, indexes, and schema for event storage.\n * @returns Promise that resolves when seeding is complete\n * @throws Error if seeding fails\n */\n async seed() {\n const client = await this._pool.connect();\n\n try {\n await client.query(\"BEGIN\");\n\n // Create schema\n await client.query(\n `CREATE SCHEMA IF NOT EXISTS \"${this.config.schema}\";`\n );\n\n // Events table\n await client.query(\n `CREATE TABLE IF NOT EXISTS ${this._fqt} (\n id serial PRIMARY KEY,\n name varchar(100) COLLATE pg_catalog.\"default\" NOT NULL,\n data jsonb,\n stream varchar(100) COLLATE pg_catalog.\"default\" NOT NULL,\n version int NOT NULL,\n created timestamptz NOT NULL DEFAULT now(),\n meta jsonb\n ) TABLESPACE pg_default;`\n );\n\n // Indexes on events\n await client.query(\n `CREATE UNIQUE INDEX IF NOT EXISTS \"${this.config.table}_stream_ix\" \n ON ${this._fqt} (stream COLLATE pg_catalog.\"default\", version);`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_name_ix\" \n ON ${this._fqt} (name COLLATE pg_catalog.\"default\");`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_created_id_ix\" \n ON ${this._fqt} (created, id);`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_correlation_ix\" \n ON ${this._fqt} ((meta ->> 'correlation') COLLATE pg_catalog.\"default\");`\n );\n\n // Streams table\n await client.query(\n `CREATE TABLE IF NOT EXISTS ${this._fqs} (\n stream varchar(100) COLLATE pg_catalog.\"default\" PRIMARY KEY,\n at int NOT NULL DEFAULT -1,\n retry smallint NOT NULL DEFAULT 0,\n blocked boolean NOT NULL DEFAULT false,\n leased_at int,\n leased_by uuid,\n leased_until timestamptz\n ) TABLESPACE pg_default;`\n );\n\n // Index for fetching streams\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_streams_fetch_ix\" \n ON ${this._fqs} (blocked, at);`\n );\n\n await client.query(\"COMMIT\");\n logger.info(\n `Seeded schema \"${this.config.schema}\" with table \"${this.config.table}\"`\n );\n } catch (error) {\n await client.query(\"ROLLBACK\");\n logger.error(\"Failed to seed store:\", error);\n throw error;\n } finally {\n client.release();\n }\n }\n\n /**\n * Drop all tables and schema created by the store (for testing or cleanup).\n * @returns Promise that resolves when the schema is dropped\n */\n async drop() {\n await this._pool.query(\n `\n DO $$\n BEGIN\n IF EXISTS (SELECT 1 FROM information_schema.schemata\n WHERE schema_name = '${this.config.schema}'\n ) THEN\n EXECUTE 'DROP TABLE IF EXISTS ${this._fqt}';\n EXECUTE 'DROP TABLE IF EXISTS ${this._fqs}';\n IF '${this.config.schema}' <> 'public' THEN\n EXECUTE 'DROP SCHEMA \"${this.config.schema}\" CASCADE';\n END IF;\n END IF;\n END\n $$;\n `\n );\n }\n\n /**\n * Query events from the store, optionally filtered by stream, event name, time, etc.\n *\n * @param callback Function called for each event found\n * @param query (Optional) Query filter (stream, names, before, after, etc.)\n * @param withSnaps (Optional) If true, includes only events after the last snapshot\n * @returns The number of events found\n *\n * @example\n * await store.query((event) => console.log(event), { stream: \"A\" });\n */\n async query<E extends Schemas>(\n callback: (event: Committed<E, keyof E>) => void,\n query?: Query,\n withSnaps = false\n ) {\n const {\n stream,\n names,\n before,\n after,\n limit,\n created_before,\n created_after,\n backward,\n correlation,\n } = query || {};\n\n let sql = `SELECT * FROM ${this._fqt}`;\n const conditions: string[] = [];\n const values: any[] = [];\n\n if (withSnaps) {\n conditions.push(\n `id>=COALESCE((SELECT id FROM ${this._fqt} WHERE stream='${stream}' AND name='${SNAP_EVENT}' ORDER BY id DESC LIMIT 1), 0)`\n );\n conditions.push(`stream='${stream}'`);\n } else if (query) {\n if (typeof after !== \"undefined\") {\n values.push(after);\n conditions.push(`id>$${values.length}`);\n } else {\n conditions.push(\"id>-1\");\n }\n if (stream) {\n values.push(stream);\n conditions.push(`stream=$${values.length}`);\n }\n if (names && names.length) {\n values.push(names);\n conditions.push(`name = ANY($${values.length})`);\n }\n if (before) {\n values.push(before);\n conditions.push(`id<$${values.length}`);\n }\n if (created_after) {\n values.push(created_after.toISOString());\n conditions.push(`created>$${values.length}`);\n }\n if (created_before) {\n values.push(created_before.toISOString());\n conditions.push(`created<$${values.length}`);\n }\n if (correlation) {\n values.push(correlation);\n conditions.push(`meta->>'correlation'=$${values.length}`);\n }\n }\n if (conditions.length) {\n sql += \" WHERE \" + conditions.join(\" AND \");\n }\n sql += ` ORDER BY id ${backward ? \"DESC\" : \"ASC\"}`;\n if (limit) {\n values.push(limit);\n sql += ` LIMIT $${values.length}`;\n }\n\n const result = await this._pool.query<Committed<E, keyof E>>(sql, values);\n for (const row of result.rows) callback(row);\n\n return result.rowCount ?? 0;\n }\n\n /**\n * Commit new events to the store for a given stream, with concurrency control.\n *\n * @param stream The stream name\n * @param msgs Array of messages (event name and data)\n * @param meta Event metadata (correlation, causation, etc.)\n * @param expectedVersion (Optional) Expected stream version for concurrency control\n * @returns Array of committed events\n * @throws ConcurrencyError if the expected version does not match\n */\n async commit<E extends Schemas>(\n stream: string,\n msgs: Message<E, keyof E>[],\n meta: EventMeta,\n expectedVersion?: number\n ) {\n if (msgs.length === 0) return [];\n const client = await this._pool.connect();\n let version = -1;\n try {\n await client.query(\"BEGIN\");\n\n const last = await client.query<Committed<E, keyof E>>(\n `SELECT version\n FROM ${this._fqt}\n WHERE stream=$1 ORDER BY version DESC LIMIT 1`,\n [stream]\n );\n version = last.rowCount ? last.rows[0].version : -1;\n if (typeof expectedVersion === \"number\" && version !== expectedVersion)\n throw new ConcurrencyError(\n version,\n msgs as unknown as Message<Schemas, string>[],\n expectedVersion\n );\n\n const committed = await Promise.all(\n msgs.map(async ({ name, data }) => {\n version++;\n const sql = `\n INSERT INTO ${this._fqt}(name, data, stream, version, meta) \n VALUES($1, $2, $3, $4, $5) RETURNING *`;\n const vals = [name, data, stream, version, meta];\n const { rows } = await client.query<Committed<E, keyof E>>(sql, vals);\n return rows.at(0)!;\n })\n );\n\n await client\n .query(\n `\n NOTIFY \"${this.config.table}\", '${JSON.stringify({\n operation: \"INSERT\",\n id: committed[0].name,\n position: committed[0].id,\n })}';\n COMMIT;\n `\n )\n .catch((error) => {\n logger.error(error);\n throw new ConcurrencyError(\n version,\n msgs as unknown as Message<Schemas, string>[],\n expectedVersion || -1\n );\n });\n return committed;\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n throw error;\n } finally {\n client.release();\n }\n }\n\n /**\n * Fetch a batch of events and streams for processing (drain cycle).\n *\n * @param limit The maximum number of events to fetch\n * @returns An object with arrays of streams and events\n */\n async fetch<E extends Schemas>(limit: number) {\n const { rows } = await this._pool.query<{ stream: string; at: number }>(\n `\n SELECT stream, at\n FROM ${this._fqs}\n WHERE blocked=false\n ORDER BY at ASC\n LIMIT $1::integer\n `,\n [limit]\n );\n\n const after = rows.length\n ? rows.reduce((min, r) => Math.min(min, r.at), Number.MAX_SAFE_INTEGER)\n : -1;\n\n const events: Committed<E, keyof E>[] = [];\n await this.query<E>((e) => e.name !== SNAP_EVENT && events.push(e), {\n after,\n limit,\n });\n return { streams: rows.map(({ stream }) => stream), events };\n }\n\n /**\n * Lease streams for reaction processing, marking them as in-progress.\n *\n * @param leases Array of lease objects (stream, at, etc.)\n * @returns Array of leased objects with updated lease info\n */\n async lease(leases: Lease[]) {\n const { by, at } = leases.at(0)!;\n const streams = leases.map(({ stream }) => stream);\n const client = await this._pool.connect();\n\n try {\n await client.query(\"BEGIN\");\n // insert new streams\n await client.query(\n `\n INSERT INTO ${this._fqs} (stream)\n SELECT UNNEST($1::text[])\n ON CONFLICT (stream) DO NOTHING\n `,\n [streams]\n );\n // set leases\n const { rows } = await client.query<{\n stream: string;\n leased_at: number;\n retry: number;\n }>(\n `\n WITH free AS (\n SELECT * FROM ${this._fqs} \n WHERE stream = ANY($1::text[]) AND (leased_by IS NULL OR leased_until <= NOW())\n FOR UPDATE\n )\n UPDATE ${this._fqs} U\n SET\n leased_by = $2::uuid,\n leased_at = $3::integer,\n leased_until = NOW() + ($4::integer || ' milliseconds')::interval\n FROM free\n WHERE U.stream = free.stream\n RETURNING U.stream, U.leased_at, U.retry\n `,\n [streams, by, at, this.config.leaseMillis]\n );\n await client.query(\"COMMIT\");\n\n return rows.map(({ stream, leased_at, retry }) => ({\n stream,\n by,\n at: leased_at,\n retry,\n block: false,\n }));\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n throw error;\n } finally {\n client.release();\n }\n }\n\n /**\n * Acknowledge and release leases after processing, updating stream positions.\n *\n * @param leases Array of lease objects to acknowledge\n * @returns Promise that resolves when leases are acknowledged\n */\n async ack(leases: Lease[]) {\n const client = await this._pool.connect();\n\n try {\n await client.query(\"BEGIN\");\n for (const { stream, by, at, retry, block } of leases) {\n await client.query(\n `UPDATE ${this._fqs}\n SET\n at = $3::integer,\n retry = $4::integer,\n blocked = $5::boolean,\n leased_by = NULL,\n leased_at = NULL,\n leased_until = NULL\n WHERE\n stream = $1::text\n AND leased_by = $2::uuid`,\n [stream, by, at, retry, block]\n );\n }\n await client.query(\"COMMIT\");\n } catch {\n // leased_until fallback\n await client.query(\"ROLLBACK\").catch(() => {});\n } finally {\n client.release();\n }\n }\n}\n","/**\n * @module act-pg\n * Date reviver for JSON.parse to automatically convert ISO 8601 date strings to Date objects.\n *\n * Recognizes the following formats:\n * - YYYY-MM-DDTHH:MM:SS.sssZ\n * - YYYY-MM-DDTHH:MM:SS.sss+HH:MM\n * - YYYY-MM-DDTHH:MM:SS.sss-HH:MM\n *\n * @param key The key being parsed\n * @param value The value being parsed\n * @returns A Date object if the value matches ISO 8601, otherwise the original value\n *\n * @example\n * const obj = JSON.parse(jsonString, dateReviver);\n */\nconst ISO_8601 =\n /^(\\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(\\.\\d+)?(Z|[+-][0-2][0-9]:[0-5][0-9])?$/;\nexport const dateReviver = (key: string, value: string): string | Date => {\n if (typeof value === \"string\" && ISO_8601.test(value)) {\n return new Date(value);\n }\n return value;\n};\n"],"mappings":";AASA,SAAS,kBAAkB,YAAY,cAAc;AACrD,OAAO,QAAQ;;;ACMf,IAAM,WACJ;AACK,IAAM,cAAc,CAAC,KAAa,UAAiC;AACxE,MAAI,OAAO,UAAU,YAAY,SAAS,KAAK,KAAK,GAAG;AACrD,WAAO,IAAI,KAAK,KAAK;AAAA,EACvB;AACA,SAAO;AACT;;;ADVA,IAAM,EAAE,MAAM,MAAM,IAAI;AACxB,MAAM;AAAA,EAAc,MAAM,SAAS;AAAA,EAAO,CAAC,QACzC,KAAK,MAAM,KAAK,WAAW;AAC7B;AAaA,IAAM,iBAAyB;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,EACN,UAAU;AAAA,EACV,MAAM;AAAA,EACN,UAAU;AAAA,EACV,QAAQ;AAAA,EACR,OAAO;AAAA,EACP,aAAa;AACf;AAmBO,IAAM,gBAAN,MAAqC;AAAA,EAClC;AAAA,EACC;AAAA,EACD;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMR,YAAY,SAA0B,CAAC,GAAG;AACxC,SAAK,SAAS,EAAE,GAAG,gBAAgB,GAAG,OAAO;AAC7C,SAAK,QAAQ,IAAI,KAAK,KAAK,MAAM;AACjC,SAAK,OAAO,IAAI,KAAK,OAAO,MAAM,MAAM,KAAK,OAAO,KAAK;AACzD,SAAK,OAAO,IAAI,KAAK,OAAO,MAAM,MAAM,KAAK,OAAO,KAAK;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,UAAU;AACd,UAAM,KAAK,MAAM,IAAI;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,OAAO;AACX,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AAExC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAG1B,YAAM,OAAO;AAAA,QACX,gCAAgC,KAAK,OAAO,MAAM;AAAA,MACpD;AAGA,YAAM,OAAO;AAAA,QACX,8BAA8B,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASzC;AAGA,YAAM,OAAO;AAAA,QACX,sCAAsC,KAAK,OAAO,KAAK;AAAA,aAClD,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AAGA,YAAM,OAAO;AAAA,QACX,8BAA8B,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASzC;AAGA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AAEA,YAAM,OAAO,MAAM,QAAQ;AAC3B,aAAO;AAAA,QACL,kBAAkB,KAAK,OAAO,MAAM,iBAAiB,KAAK,OAAO,KAAK;AAAA,MACxE;AAAA,IACF,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU;AAC7B,aAAO,MAAM,yBAAyB,KAAK;AAC3C,YAAM;AAAA,IACR,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,OAAO;AACX,UAAM,KAAK,MAAM;AAAA,MACf;AAAA;AAAA;AAAA;AAAA,iCAI2B,KAAK,OAAO,MAAM;AAAA;AAAA,0CAET,KAAK,IAAI;AAAA,0CACT,KAAK,IAAI;AAAA,gBACnC,KAAK,OAAO,MAAM;AAAA,oCACE,KAAK,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMlD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,MACJ,UACA,OACA,YAAY,OACZ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,IAAI,SAAS,CAAC;AAEd,QAAI,MAAM,iBAAiB,KAAK,IAAI;AACpC,UAAM,aAAuB,CAAC;AAC9B,UAAM,SAAgB,CAAC;AAEvB,QAAI,WAAW;AACb,iBAAW;AAAA,QACT,gCAAgC,KAAK,IAAI,kBAAkB,MAAM,eAAe,UAAU;AAAA,MAC5F;AACA,iBAAW,KAAK,WAAW,MAAM,GAAG;AAAA,IACtC,WAAW,OAAO;AAChB,UAAI,OAAO,UAAU,aAAa;AAChC,eAAO,KAAK,KAAK;AACjB,mBAAW,KAAK,OAAO,OAAO,MAAM,EAAE;AAAA,MACxC,OAAO;AACL,mBAAW,KAAK,OAAO;AAAA,MACzB;AACA,UAAI,QAAQ;AACV,eAAO,KAAK,MAAM;AAClB,mBAAW,KAAK,WAAW,OAAO,MAAM,EAAE;AAAA,MAC5C;AACA,UAAI,SAAS,MAAM,QAAQ;AACzB,eAAO,KAAK,KAAK;AACjB,mBAAW,KAAK,eAAe,OAAO,MAAM,GAAG;AAAA,MACjD;AACA,UAAI,QAAQ;AACV,eAAO,KAAK,MAAM;AAClB,mBAAW,KAAK,OAAO,OAAO,MAAM,EAAE;AAAA,MACxC;AACA,UAAI,eAAe;AACjB,eAAO,KAAK,cAAc,YAAY,CAAC;AACvC,mBAAW,KAAK,YAAY,OAAO,MAAM,EAAE;AAAA,MAC7C;AACA,UAAI,gBAAgB;AAClB,eAAO,KAAK,eAAe,YAAY,CAAC;AACxC,mBAAW,KAAK,YAAY,OAAO,MAAM,EAAE;AAAA,MAC7C;AACA,UAAI,aAAa;AACf,eAAO,KAAK,WAAW;AACvB,mBAAW,KAAK,yBAAyB,OAAO,MAAM,EAAE;AAAA,MAC1D;AAAA,IACF;AACA,QAAI,WAAW,QAAQ;AACrB,aAAO,YAAY,WAAW,KAAK,OAAO;AAAA,IAC5C;AACA,WAAO,gBAAgB,WAAW,SAAS,KAAK;AAChD,QAAI,OAAO;AACT,aAAO,KAAK,KAAK;AACjB,aAAO,WAAW,OAAO,MAAM;AAAA,IACjC;AAEA,UAAM,SAAS,MAAM,KAAK,MAAM,MAA6B,KAAK,MAAM;AACxE,eAAW,OAAO,OAAO,KAAM,UAAS,GAAG;AAE3C,WAAO,OAAO,YAAY;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,OACJ,QACA,MACA,MACA,iBACA;AACA,QAAI,KAAK,WAAW,EAAG,QAAO,CAAC;AAC/B,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AACxC,QAAI,UAAU;AACd,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAE1B,YAAM,OAAO,MAAM,OAAO;AAAA,QACxB;AAAA,eACO,KAAK,IAAI;AAAA;AAAA,QAEhB,CAAC,MAAM;AAAA,MACT;AACA,gBAAU,KAAK,WAAW,KAAK,KAAK,CAAC,EAAE,UAAU;AACjD,UAAI,OAAO,oBAAoB,YAAY,YAAY;AACrD,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAEF,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,KAAK,IAAI,OAAO,EAAE,MAAM,KAAK,MAAM;AACjC;AACA,gBAAM,MAAM;AAAA,wBACE,KAAK,IAAI;AAAA;AAEvB,gBAAM,OAAO,CAAC,MAAM,MAAM,QAAQ,SAAS,IAAI;AAC/C,gBAAM,EAAE,KAAK,IAAI,MAAM,OAAO,MAA6B,KAAK,IAAI;AACpE,iBAAO,KAAK,GAAG,CAAC;AAAA,QAClB,CAAC;AAAA,MACH;AAEA,YAAM,OACH;AAAA,QACC;AAAA,sBACY,KAAK,OAAO,KAAK,OAAO,KAAK,UAAU;AAAA,UAC/C,WAAW;AAAA,UACX,IAAI,UAAU,CAAC,EAAE;AAAA,UACjB,UAAU,UAAU,CAAC,EAAE;AAAA,QACzB,CAAC,CAAC;AAAA;AAAA;AAAA,MAGN,EACC,MAAM,CAAC,UAAU;AAChB,eAAO,MAAM,KAAK;AAClB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA,mBAAmB;AAAA,QACrB;AAAA,MACF,CAAC;AACH,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,YAAM;AAAA,IACR,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,MAAyB,OAAe;AAC5C,UAAM,EAAE,KAAK,IAAI,MAAM,KAAK,MAAM;AAAA,MAChC;AAAA;AAAA,aAEO,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA,MAKhB,CAAC,KAAK;AAAA,IACR;AAEA,UAAM,QAAQ,KAAK,SACf,KAAK,OAAO,CAAC,KAAK,MAAM,KAAK,IAAI,KAAK,EAAE,EAAE,GAAG,OAAO,gBAAgB,IACpE;AAEJ,UAAM,SAAkC,CAAC;AACzC,UAAM,KAAK,MAAS,CAAC,MAAM,EAAE,SAAS,cAAc,OAAO,KAAK,CAAC,GAAG;AAAA,MAClE;AAAA,MACA;AAAA,IACF,CAAC;AACD,WAAO,EAAE,SAAS,KAAK,IAAI,CAAC,EAAE,OAAO,MAAM,MAAM,GAAG,OAAO;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,MAAM,QAAiB;AAC3B,UAAM,EAAE,IAAI,GAAG,IAAI,OAAO,GAAG,CAAC;AAC9B,UAAM,UAAU,OAAO,IAAI,CAAC,EAAE,OAAO,MAAM,MAAM;AACjD,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AAExC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAE1B,YAAM,OAAO;AAAA,QACX;AAAA,sBACc,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA,QAIvB,CAAC,OAAO;AAAA,MACV;AAEA,YAAM,EAAE,KAAK,IAAI,MAAM,OAAO;AAAA,QAK5B;AAAA;AAAA,0BAEkB,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA,iBAIlB,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QASlB,CAAC,SAAS,IAAI,IAAI,KAAK,OAAO,WAAW;AAAA,MAC3C;AACA,YAAM,OAAO,MAAM,QAAQ;AAE3B,aAAO,KAAK,IAAI,CAAC,EAAE,QAAQ,WAAW,MAAM,OAAO;AAAA,QACjD;AAAA,QACA;AAAA,QACA,IAAI;AAAA,QACJ;AAAA,QACA,OAAO;AAAA,MACT,EAAE;AAAA,IACJ,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,YAAM;AAAA,IACR,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,IAAI,QAAiB;AACzB,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AAExC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAC1B,iBAAW,EAAE,QAAQ,IAAI,IAAI,OAAO,MAAM,KAAK,QAAQ;AACrD,cAAM,OAAO;AAAA,UACX,UAAU,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAWnB,CAAC,QAAQ,IAAI,IAAI,OAAO,KAAK;AAAA,QAC/B;AAAA,MACF;AACA,YAAM,OAAO,MAAM,QAAQ;AAAA,IAC7B,QAAQ;AAEN,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAAA,IAC/C,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AACF;","names":[]}
1
+ {"version":3,"sources":["../src/PostgresStore.ts","../src/utils.ts"],"sourcesContent":["import type {\n Committed,\n EventMeta,\n Lease,\n Message,\n Query,\n Schemas,\n Store,\n} from \"@rotorsoft/act\";\nimport { ConcurrencyError, SNAP_EVENT, logger } from \"@rotorsoft/act\";\nimport pg from \"pg\";\nimport { dateReviver } from \"./utils.js\";\n\nconst { Pool, types } = pg;\ntypes.setTypeParser(types.builtins.JSONB, (val) =>\n JSON.parse(val, dateReviver)\n);\n\ntype Config = Readonly<{\n host: string;\n port: number;\n database: string;\n user: string;\n password: string;\n schema: string;\n table: string;\n}>;\n\nconst DEFAULT_CONFIG: Config = {\n host: \"localhost\",\n port: 5432,\n database: \"postgres\",\n user: \"postgres\",\n password: \"postgres\",\n schema: \"public\",\n table: \"events\",\n};\n\n/**\n * @category Adapters\n * @see Store\n *\n * PostgresStore is a production-ready event store adapter for Act, using PostgreSQL as the backend.\n *\n * - Supports event sourcing, leasing, snapshots, and concurrency control.\n * - Designed for high-throughput, scalable, and reliable event storage.\n * - Implements the Act Store interface.\n *\n * @example\n * import { PostgresStore } from \"@act/pg\";\n * const store = new PostgresStore({ schema: \"my_schema\", table: \"events\" });\n * await store.seed();\n *\n * @see https://github.com/rotorsoft/act-root\n */\nexport class PostgresStore implements Store {\n private _pool;\n readonly config: Config;\n private _fqt: string;\n private _fqs: string;\n\n /**\n * Create a new PostgresStore instance.\n * @param config Partial configuration (host, port, user, password, schema, table, etc.)\n */\n constructor(config: Partial<Config> = {}) {\n this.config = { ...DEFAULT_CONFIG, ...config };\n this._pool = new Pool(this.config);\n this._fqt = `\"${this.config.schema}\".\"${this.config.table}\"`;\n this._fqs = `\"${this.config.schema}\".\"${this.config.table}_streams\"`;\n }\n\n /**\n * Dispose of the store and close all database connections.\n * @returns Promise that resolves when all connections are closed\n */\n async dispose() {\n await this._pool.end();\n }\n\n /**\n * Seed the database with required tables, indexes, and schema for event storage.\n * @returns Promise that resolves when seeding is complete\n * @throws Error if seeding fails\n */\n async seed() {\n const client = await this._pool.connect();\n\n try {\n await client.query(\"BEGIN\");\n\n // Create schema\n await client.query(\n `CREATE SCHEMA IF NOT EXISTS \"${this.config.schema}\";`\n );\n\n // Events table\n await client.query(\n `CREATE TABLE IF NOT EXISTS ${this._fqt} (\n id serial PRIMARY KEY,\n name varchar(100) COLLATE pg_catalog.\"default\" NOT NULL,\n data jsonb,\n stream varchar(100) COLLATE pg_catalog.\"default\" NOT NULL,\n version int NOT NULL,\n created timestamptz NOT NULL DEFAULT now(),\n meta jsonb\n ) TABLESPACE pg_default;`\n );\n\n // Indexes on events\n await client.query(\n `CREATE UNIQUE INDEX IF NOT EXISTS \"${this.config.table}_stream_ix\" \n ON ${this._fqt} (stream COLLATE pg_catalog.\"default\", version);`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_name_ix\" \n ON ${this._fqt} (name COLLATE pg_catalog.\"default\");`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_created_id_ix\" \n ON ${this._fqt} (created, id);`\n );\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_correlation_ix\" \n ON ${this._fqt} ((meta ->> 'correlation') COLLATE pg_catalog.\"default\");`\n );\n\n // Streams table\n await client.query(\n `CREATE TABLE IF NOT EXISTS ${this._fqs} (\n stream varchar(100) COLLATE pg_catalog.\"default\" PRIMARY KEY,\n source varchar(100) COLLATE pg_catalog.\"default\",\n at int NOT NULL DEFAULT -1,\n retry smallint NOT NULL DEFAULT 0,\n blocked boolean NOT NULL DEFAULT false,\n error text,\n leased_at int,\n leased_by text,\n leased_until timestamptz\n ) TABLESPACE pg_default;`\n );\n\n // Index for fetching streams\n await client.query(\n `CREATE INDEX IF NOT EXISTS \"${this.config.table}_streams_fetch_ix\" \n ON ${this._fqs} (blocked, at);`\n );\n\n await client.query(\"COMMIT\");\n logger.info(\n `Seeded schema \"${this.config.schema}\" with table \"${this.config.table}\"`\n );\n } catch (error) {\n await client.query(\"ROLLBACK\");\n logger.error(\"Failed to seed store:\", error);\n throw error;\n } finally {\n client.release();\n }\n }\n\n /**\n * Drop all tables and schema created by the store (for testing or cleanup).\n * @returns Promise that resolves when the schema is dropped\n */\n async drop() {\n await this._pool.query(\n `\n DO $$\n BEGIN\n IF EXISTS (SELECT 1 FROM information_schema.schemata\n WHERE schema_name = '${this.config.schema}'\n ) THEN\n EXECUTE 'DROP TABLE IF EXISTS ${this._fqt}';\n EXECUTE 'DROP TABLE IF EXISTS ${this._fqs}';\n IF '${this.config.schema}' <> 'public' THEN\n EXECUTE 'DROP SCHEMA \"${this.config.schema}\" CASCADE';\n END IF;\n END IF;\n END\n $$;\n `\n );\n }\n\n /**\n * Query events from the store, optionally filtered by stream, event name, time, etc.\n *\n * @param callback Function called for each event found\n * @param query (Optional) Query filter (stream, names, before, after, etc.)\n * @returns The number of events found\n *\n * @example\n * await store.query((event) => console.log(event), { stream: \"A\" });\n */\n async query<E extends Schemas>(\n callback: (event: Committed<E, keyof E>) => void,\n query?: Query\n ) {\n const {\n stream,\n names,\n before,\n after,\n limit,\n created_before,\n created_after,\n backward,\n correlation,\n with_snaps = false,\n } = query || {};\n\n let sql = `SELECT * FROM ${this._fqt}`;\n const conditions: string[] = [];\n const values: any[] = [];\n\n if (query) {\n if (typeof after !== \"undefined\") {\n values.push(after);\n conditions.push(`id>$${values.length}`);\n } else {\n conditions.push(\"id>-1\");\n }\n if (stream) {\n values.push(stream);\n conditions.push(`stream ~ $${values.length}`);\n }\n if (names && names.length) {\n values.push(names);\n conditions.push(`name = ANY($${values.length})`);\n }\n if (before) {\n values.push(before);\n conditions.push(`id<$${values.length}`);\n }\n if (created_after) {\n values.push(created_after.toISOString());\n conditions.push(`created>$${values.length}`);\n }\n if (created_before) {\n values.push(created_before.toISOString());\n conditions.push(`created<$${values.length}`);\n }\n if (correlation) {\n values.push(correlation);\n conditions.push(`meta->>'correlation'=$${values.length}`);\n }\n if (!with_snaps) {\n conditions.push(`name <> '${SNAP_EVENT}'`);\n }\n }\n if (conditions.length) {\n sql += \" WHERE \" + conditions.join(\" AND \");\n }\n sql += ` ORDER BY id ${backward ? \"DESC\" : \"ASC\"}`;\n if (limit) {\n values.push(limit);\n sql += ` LIMIT $${values.length}`;\n }\n\n const result = await this._pool.query<Committed<E, keyof E>>(sql, values);\n for (const row of result.rows) callback(row);\n\n return result.rowCount ?? 0;\n }\n\n /**\n * Commit new events to the store for a given stream, with concurrency control.\n *\n * @param stream The stream name\n * @param msgs Array of messages (event name and data)\n * @param meta Event metadata (correlation, causation, etc.)\n * @param expectedVersion (Optional) Expected stream version for concurrency control\n * @returns Array of committed events\n * @throws ConcurrencyError if the expected version does not match\n */\n async commit<E extends Schemas>(\n stream: string,\n msgs: Message<E, keyof E>[],\n meta: EventMeta,\n expectedVersion?: number\n ) {\n if (msgs.length === 0) return [];\n const client = await this._pool.connect();\n let version = -1;\n try {\n await client.query(\"BEGIN\");\n\n const last = await client.query<Committed<E, keyof E>>(\n `SELECT version\n FROM ${this._fqt}\n WHERE stream=$1 ORDER BY version DESC LIMIT 1`,\n [stream]\n );\n version = last.rowCount ? last.rows[0].version : -1;\n if (typeof expectedVersion === \"number\" && version !== expectedVersion)\n throw new ConcurrencyError(\n stream,\n version,\n msgs as unknown as Message<Schemas, string>[],\n expectedVersion\n );\n\n const committed = await Promise.all(\n msgs.map(async ({ name, data }) => {\n version++;\n const sql = `\n INSERT INTO ${this._fqt}(name, data, stream, version, meta) \n VALUES($1, $2, $3, $4, $5) RETURNING *`;\n const vals = [name, data, stream, version, meta];\n const { rows } = await client.query<Committed<E, keyof E>>(sql, vals);\n return rows.at(0)!;\n })\n );\n\n await client\n .query(\n `\n NOTIFY \"${this.config.table}\", '${JSON.stringify({\n operation: \"INSERT\",\n id: committed[0].name,\n position: committed[0].id,\n })}';\n COMMIT;\n `\n )\n .catch((error) => {\n logger.error(error);\n throw new ConcurrencyError(\n stream,\n version,\n msgs as unknown as Message<Schemas, string>[],\n expectedVersion || -1\n );\n });\n return committed;\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n throw error;\n } finally {\n client.release();\n }\n }\n\n /**\n * Polls the store for unblocked streams needing processing, ordered by lease watermark ascending.\n * @param limit - Maximum number of streams to poll.\n * @returns The polled streams.\n */\n async poll(limit: number) {\n const { rows } = await this._pool.query<{\n stream: string;\n at: number;\n source: string;\n }>(\n `\n SELECT stream, at\n FROM ${this._fqs}\n WHERE blocked=false AND (leased_by IS NULL OR leased_until <= NOW())\n ORDER BY at ASC\n LIMIT $1::integer\n `,\n [limit]\n );\n return rows;\n }\n\n /**\n * Lease streams for reaction processing, marking them as in-progress.\n *\n * @param leases - Lease requests for streams, including end-of-lease watermark, lease holder, and source stream.\n * @param millis - Lease duration in milliseconds.\n * @returns Array of leased objects with updated lease info\n */\n async lease(leases: Lease[], millis: number): Promise<Lease[]> {\n const client = await this._pool.connect();\n try {\n await client.query(\"BEGIN\");\n // insert new streams\n await client.query(\n `\n INSERT INTO ${this._fqs} (stream, source)\n SELECT lease->>'stream', lease->>'source'\n FROM jsonb_array_elements($1::jsonb) AS lease\n ON CONFLICT (stream) DO NOTHING\n `,\n [JSON.stringify(leases)]\n );\n // set leases\n const { rows } = await client.query<{\n stream: string;\n source: string | null;\n leased_at: number;\n leased_by: string;\n leased_until: number;\n retry: number;\n }>(\n `\n WITH input AS (\n SELECT * FROM jsonb_to_recordset($1::jsonb)\n AS x(stream text, at int, by text)\n ), free AS (\n SELECT s.stream FROM ${this._fqs} s\n JOIN input i ON s.stream = i.stream\n WHERE s.leased_by IS NULL OR s.leased_until <= NOW()\n FOR UPDATE\n )\n UPDATE ${this._fqs} s\n SET\n leased_by = i.by,\n leased_at = i.at,\n leased_until = NOW() + ($2::integer || ' milliseconds')::interval,\n retry = CASE WHEN $2::integer > 0 THEN s.retry + 1 ELSE s.retry END\n FROM input i, free f\n WHERE s.stream = f.stream AND s.stream = i.stream\n RETURNING s.stream, s.source, s.leased_at, s.leased_by, s.leased_until, s.retry\n `,\n [JSON.stringify(leases), millis]\n );\n await client.query(\"COMMIT\");\n\n return rows.map(\n ({ stream, source, leased_at, leased_by, leased_until, retry }) => ({\n stream,\n source: source ?? undefined,\n at: leased_at,\n by: leased_by,\n until: new Date(leased_until),\n retry,\n })\n );\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n logger.error(error);\n return [];\n } finally {\n client.release();\n }\n }\n\n /**\n * Acknowledge and release leases after processing, updating stream positions.\n *\n * @param leases - Leases to acknowledge, including last processed watermark and lease holder.\n * @returns Acked leases.\n */\n async ack(leases: Lease[]): Promise<Lease[]> {\n const client = await this._pool.connect();\n try {\n await client.query(\"BEGIN\");\n const { rows } = await client.query<{\n stream: string;\n source: string | null;\n at: number;\n retry: number;\n }>(\n `\n WITH input AS (\n SELECT * FROM jsonb_to_recordset($1::jsonb)\n AS x(stream text, by text, at int)\n )\n UPDATE ${this._fqs} AS s\n SET\n at = i.at,\n retry = -1,\n leased_by = NULL,\n leased_at = NULL,\n leased_until = NULL\n FROM input i\n WHERE s.stream = i.stream AND s.leased_by = i.by\n RETURNING s.stream, s.source, s.at, s.retry\n `,\n [JSON.stringify(leases)]\n );\n await client.query(\"COMMIT\");\n\n return rows.map((row) => ({\n stream: row.stream,\n source: row.source ?? undefined,\n at: row.at,\n by: \"\",\n retry: row.retry,\n }));\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n logger.error(error);\n return [];\n } finally {\n client.release();\n }\n }\n\n /**\n * Block a stream for processing after failing to process and reaching max retries with blocking enabled.\n * @param leases - Leases to block, including lease holder and last error message.\n * @returns Blocked leases.\n */\n async block(\n leases: Array<Lease & { error: string }>\n ): Promise<(Lease & { error: string })[]> {\n const client = await this._pool.connect();\n try {\n await client.query(\"BEGIN\");\n const { rows } = await client.query<{\n stream: string;\n source: string | null;\n at: number;\n by: string;\n retry: number;\n error: string;\n }>(\n `\n WITH input AS (\n SELECT * FROM jsonb_to_recordset($1::jsonb)\n AS x(stream text, by text, error text)\n )\n UPDATE ${this._fqs} AS s\n SET blocked = true, error = i.error\n FROM input i\n WHERE s.stream = i.stream AND s.leased_by = i.by AND s.blocked = false\n RETURNING s.stream, s.source, s.at, i.by, s.retry, s.error\n `,\n [JSON.stringify(leases)]\n );\n await client.query(\"COMMIT\");\n\n return rows.map((row) => ({\n stream: row.stream,\n source: row.source ?? undefined,\n at: row.at,\n by: row.by,\n retry: row.retry,\n error: row.error ?? \"\",\n }));\n } catch (error) {\n await client.query(\"ROLLBACK\").catch(() => {});\n logger.error(error);\n return [];\n } finally {\n client.release();\n }\n }\n}\n","/**\n * @module act-pg\n * Date reviver for JSON.parse to automatically convert ISO 8601 date strings to Date objects.\n *\n * Recognizes the following formats:\n * - YYYY-MM-DDTHH:MM:SS.sssZ\n * - YYYY-MM-DDTHH:MM:SS.sss+HH:MM\n * - YYYY-MM-DDTHH:MM:SS.sss-HH:MM\n *\n * @param key The key being parsed\n * @param value The value being parsed\n * @returns A Date object if the value matches ISO 8601, otherwise the original value\n *\n * @example\n * const obj = JSON.parse(jsonString, dateReviver);\n */\nconst ISO_8601 =\n /^(\\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(\\.\\d+)?(Z|[+-][0-2][0-9]:[0-5][0-9])?$/;\nexport const dateReviver = (key: string, value: string): string | Date => {\n if (typeof value === \"string\" && ISO_8601.test(value)) {\n return new Date(value);\n }\n return value;\n};\n"],"mappings":";AASA,SAAS,kBAAkB,YAAY,cAAc;AACrD,OAAO,QAAQ;;;ACMf,IAAM,WACJ;AACK,IAAM,cAAc,CAAC,KAAa,UAAiC;AACxE,MAAI,OAAO,UAAU,YAAY,SAAS,KAAK,KAAK,GAAG;AACrD,WAAO,IAAI,KAAK,KAAK;AAAA,EACvB;AACA,SAAO;AACT;;;ADVA,IAAM,EAAE,MAAM,MAAM,IAAI;AACxB,MAAM;AAAA,EAAc,MAAM,SAAS;AAAA,EAAO,CAAC,QACzC,KAAK,MAAM,KAAK,WAAW;AAC7B;AAYA,IAAM,iBAAyB;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,EACN,UAAU;AAAA,EACV,MAAM;AAAA,EACN,UAAU;AAAA,EACV,QAAQ;AAAA,EACR,OAAO;AACT;AAmBO,IAAM,gBAAN,MAAqC;AAAA,EAClC;AAAA,EACC;AAAA,EACD;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMR,YAAY,SAA0B,CAAC,GAAG;AACxC,SAAK,SAAS,EAAE,GAAG,gBAAgB,GAAG,OAAO;AAC7C,SAAK,QAAQ,IAAI,KAAK,KAAK,MAAM;AACjC,SAAK,OAAO,IAAI,KAAK,OAAO,MAAM,MAAM,KAAK,OAAO,KAAK;AACzD,SAAK,OAAO,IAAI,KAAK,OAAO,MAAM,MAAM,KAAK,OAAO,KAAK;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,UAAU;AACd,UAAM,KAAK,MAAM,IAAI;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,OAAO;AACX,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AAExC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAG1B,YAAM,OAAO;AAAA,QACX,gCAAgC,KAAK,OAAO,MAAM;AAAA,MACpD;AAGA,YAAM,OAAO;AAAA,QACX,8BAA8B,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASzC;AAGA,YAAM,OAAO;AAAA,QACX,sCAAsC,KAAK,OAAO,KAAK;AAAA,aAClD,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AACA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AAGA,YAAM,OAAO;AAAA,QACX,8BAA8B,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAWzC;AAGA,YAAM,OAAO;AAAA,QACX,+BAA+B,KAAK,OAAO,KAAK;AAAA,aAC3C,KAAK,IAAI;AAAA,MAChB;AAEA,YAAM,OAAO,MAAM,QAAQ;AAC3B,aAAO;AAAA,QACL,kBAAkB,KAAK,OAAO,MAAM,iBAAiB,KAAK,OAAO,KAAK;AAAA,MACxE;AAAA,IACF,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU;AAC7B,aAAO,MAAM,yBAAyB,KAAK;AAC3C,YAAM;AAAA,IACR,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,OAAO;AACX,UAAM,KAAK,MAAM;AAAA,MACf;AAAA;AAAA;AAAA;AAAA,iCAI2B,KAAK,OAAO,MAAM;AAAA;AAAA,0CAET,KAAK,IAAI;AAAA,0CACT,KAAK,IAAI;AAAA,gBACnC,KAAK,OAAO,MAAM;AAAA,oCACE,KAAK,OAAO,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMlD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,MACJ,UACA,OACA;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,IACf,IAAI,SAAS,CAAC;AAEd,QAAI,MAAM,iBAAiB,KAAK,IAAI;AACpC,UAAM,aAAuB,CAAC;AAC9B,UAAM,SAAgB,CAAC;AAEvB,QAAI,OAAO;AACT,UAAI,OAAO,UAAU,aAAa;AAChC,eAAO,KAAK,KAAK;AACjB,mBAAW,KAAK,OAAO,OAAO,MAAM,EAAE;AAAA,MACxC,OAAO;AACL,mBAAW,KAAK,OAAO;AAAA,MACzB;AACA,UAAI,QAAQ;AACV,eAAO,KAAK,MAAM;AAClB,mBAAW,KAAK,aAAa,OAAO,MAAM,EAAE;AAAA,MAC9C;AACA,UAAI,SAAS,MAAM,QAAQ;AACzB,eAAO,KAAK,KAAK;AACjB,mBAAW,KAAK,eAAe,OAAO,MAAM,GAAG;AAAA,MACjD;AACA,UAAI,QAAQ;AACV,eAAO,KAAK,MAAM;AAClB,mBAAW,KAAK,OAAO,OAAO,MAAM,EAAE;AAAA,MACxC;AACA,UAAI,eAAe;AACjB,eAAO,KAAK,cAAc,YAAY,CAAC;AACvC,mBAAW,KAAK,YAAY,OAAO,MAAM,EAAE;AAAA,MAC7C;AACA,UAAI,gBAAgB;AAClB,eAAO,KAAK,eAAe,YAAY,CAAC;AACxC,mBAAW,KAAK,YAAY,OAAO,MAAM,EAAE;AAAA,MAC7C;AACA,UAAI,aAAa;AACf,eAAO,KAAK,WAAW;AACvB,mBAAW,KAAK,yBAAyB,OAAO,MAAM,EAAE;AAAA,MAC1D;AACA,UAAI,CAAC,YAAY;AACf,mBAAW,KAAK,YAAY,UAAU,GAAG;AAAA,MAC3C;AAAA,IACF;AACA,QAAI,WAAW,QAAQ;AACrB,aAAO,YAAY,WAAW,KAAK,OAAO;AAAA,IAC5C;AACA,WAAO,gBAAgB,WAAW,SAAS,KAAK;AAChD,QAAI,OAAO;AACT,aAAO,KAAK,KAAK;AACjB,aAAO,WAAW,OAAO,MAAM;AAAA,IACjC;AAEA,UAAM,SAAS,MAAM,KAAK,MAAM,MAA6B,KAAK,MAAM;AACxE,eAAW,OAAO,OAAO,KAAM,UAAS,GAAG;AAE3C,WAAO,OAAO,YAAY;AAAA,EAC5B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,OACJ,QACA,MACA,MACA,iBACA;AACA,QAAI,KAAK,WAAW,EAAG,QAAO,CAAC;AAC/B,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AACxC,QAAI,UAAU;AACd,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAE1B,YAAM,OAAO,MAAM,OAAO;AAAA,QACxB;AAAA,eACO,KAAK,IAAI;AAAA;AAAA,QAEhB,CAAC,MAAM;AAAA,MACT;AACA,gBAAU,KAAK,WAAW,KAAK,KAAK,CAAC,EAAE,UAAU;AACjD,UAAI,OAAO,oBAAoB,YAAY,YAAY;AACrD,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAEF,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,KAAK,IAAI,OAAO,EAAE,MAAM,KAAK,MAAM;AACjC;AACA,gBAAM,MAAM;AAAA,wBACE,KAAK,IAAI;AAAA;AAEvB,gBAAM,OAAO,CAAC,MAAM,MAAM,QAAQ,SAAS,IAAI;AAC/C,gBAAM,EAAE,KAAK,IAAI,MAAM,OAAO,MAA6B,KAAK,IAAI;AACpE,iBAAO,KAAK,GAAG,CAAC;AAAA,QAClB,CAAC;AAAA,MACH;AAEA,YAAM,OACH;AAAA,QACC;AAAA,sBACY,KAAK,OAAO,KAAK,OAAO,KAAK,UAAU;AAAA,UAC/C,WAAW;AAAA,UACX,IAAI,UAAU,CAAC,EAAE;AAAA,UACjB,UAAU,UAAU,CAAC,EAAE;AAAA,QACzB,CAAC,CAAC;AAAA;AAAA;AAAA,MAGN,EACC,MAAM,CAAC,UAAU;AAChB,eAAO,MAAM,KAAK;AAClB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA,mBAAmB;AAAA,QACrB;AAAA,MACF,CAAC;AACH,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,YAAM;AAAA,IACR,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,KAAK,OAAe;AACxB,UAAM,EAAE,KAAK,IAAI,MAAM,KAAK,MAAM;AAAA,MAKhC;AAAA;AAAA,aAEO,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA,MAKhB,CAAC,KAAK;AAAA,IACR;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAM,QAAiB,QAAkC;AAC7D,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AACxC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAE1B,YAAM,OAAO;AAAA,QACX;AAAA,sBACc,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA,QAKvB,CAAC,KAAK,UAAU,MAAM,CAAC;AAAA,MACzB;AAEA,YAAM,EAAE,KAAK,IAAI,MAAM,OAAO;AAAA,QAQ5B;AAAA;AAAA;AAAA;AAAA;AAAA,+BAKuB,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA,eAKzB,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAUhB,CAAC,KAAK,UAAU,MAAM,GAAG,MAAM;AAAA,MACjC;AACA,YAAM,OAAO,MAAM,QAAQ;AAE3B,aAAO,KAAK;AAAA,QACV,CAAC,EAAE,QAAQ,QAAQ,WAAW,WAAW,cAAc,MAAM,OAAO;AAAA,UAClE;AAAA,UACA,QAAQ,UAAU;AAAA,UAClB,IAAI;AAAA,UACJ,IAAI;AAAA,UACJ,OAAO,IAAI,KAAK,YAAY;AAAA,UAC5B;AAAA,QACF;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,aAAO,MAAM,KAAK;AAClB,aAAO,CAAC;AAAA,IACV,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,IAAI,QAAmC;AAC3C,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AACxC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAC1B,YAAM,EAAE,KAAK,IAAI,MAAM,OAAO;AAAA,QAM5B;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAWhB,CAAC,KAAK,UAAU,MAAM,CAAC;AAAA,MACzB;AACA,YAAM,OAAO,MAAM,QAAQ;AAE3B,aAAO,KAAK,IAAI,CAAC,SAAS;AAAA,QACxB,QAAQ,IAAI;AAAA,QACZ,QAAQ,IAAI,UAAU;AAAA,QACtB,IAAI,IAAI;AAAA,QACR,IAAI;AAAA,QACJ,OAAO,IAAI;AAAA,MACb,EAAE;AAAA,IACJ,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,aAAO,MAAM,KAAK;AAClB,aAAO,CAAC;AAAA,IACV,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,MACJ,QACwC;AACxC,UAAM,SAAS,MAAM,KAAK,MAAM,QAAQ;AACxC,QAAI;AACF,YAAM,OAAO,MAAM,OAAO;AAC1B,YAAM,EAAE,KAAK,IAAI,MAAM,OAAO;AAAA,QAQ5B;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,KAAK,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAMhB,CAAC,KAAK,UAAU,MAAM,CAAC;AAAA,MACzB;AACA,YAAM,OAAO,MAAM,QAAQ;AAE3B,aAAO,KAAK,IAAI,CAAC,SAAS;AAAA,QACxB,QAAQ,IAAI;AAAA,QACZ,QAAQ,IAAI,UAAU;AAAA,QACtB,IAAI,IAAI;AAAA,QACR,IAAI,IAAI;AAAA,QACR,OAAO,IAAI;AAAA,QACX,OAAO,IAAI,SAAS;AAAA,MACtB,EAAE;AAAA,IACJ,SAAS,OAAO;AACd,YAAM,OAAO,MAAM,UAAU,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC7C,aAAO,MAAM,KAAK;AAClB,aAAO,CAAC;AAAA,IACV,UAAE;AACA,aAAO,QAAQ;AAAA,IACjB;AAAA,EACF;AACF;","names":[]}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@rotorsoft/act-pg",
3
3
  "type": "module",
4
- "version": "0.4.7",
4
+ "version": "0.5.0",
5
5
  "description": "act pg adapters",
6
6
  "author": "rotorsoft",
7
7
  "license": "MIT",
@@ -25,22 +25,22 @@
25
25
  },
26
26
  "sideEffects": false,
27
27
  "engines": {
28
- "node": ">=22.17.0"
28
+ "node": ">=22.17.1"
29
29
  },
30
30
  "publishConfig": {
31
31
  "access": "public"
32
32
  },
33
33
  "dependencies": {
34
34
  "pg": "^8.16.3",
35
- "zod": "^4.0.5",
36
- "@rotorsoft/act": "0.5.7"
35
+ "zod": "^4.0.10",
36
+ "@rotorsoft/act": "0.6.0"
37
37
  },
38
38
  "devDependencies": {
39
39
  "@types/pg": "^8.15.4"
40
40
  },
41
41
  "scripts": {
42
42
  "clean": "rm -rf dist",
43
- "types": "tsc --build --emitDeclarationOnly",
43
+ "types": "tsc --build tsconfig.build.json --emitDeclarationOnly",
44
44
  "build": "pnpm clean && tsup && pnpm types"
45
45
  }
46
46
  }