@pgflow/edge-worker 0.0.5-prealpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.envrc +2 -0
  2. package/LICENSE.md +660 -0
  3. package/README.md +46 -0
  4. package/deno.json +32 -0
  5. package/deno.lock +369 -0
  6. package/dist/LICENSE.md +660 -0
  7. package/dist/README.md +46 -0
  8. package/dist/index.js +972 -0
  9. package/dist/index.js.map +7 -0
  10. package/mod.ts +7 -0
  11. package/package.json +14 -0
  12. package/project.json +164 -0
  13. package/scripts/concatenate-migrations.sh +22 -0
  14. package/scripts/wait-for-localhost +17 -0
  15. package/sql/990_active_workers.sql +11 -0
  16. package/sql/991_inactive_workers.sql +12 -0
  17. package/sql/992_spawn_worker.sql +68 -0
  18. package/sql/benchmarks/max_concurrency.sql +32 -0
  19. package/sql/queries/debug_connections.sql +0 -0
  20. package/sql/queries/debug_processing_gaps.sql +115 -0
  21. package/src/EdgeWorker.ts +172 -0
  22. package/src/core/BatchProcessor.ts +38 -0
  23. package/src/core/ExecutionController.ts +51 -0
  24. package/src/core/Heartbeat.ts +23 -0
  25. package/src/core/Logger.ts +69 -0
  26. package/src/core/Queries.ts +44 -0
  27. package/src/core/Worker.ts +102 -0
  28. package/src/core/WorkerLifecycle.ts +93 -0
  29. package/src/core/WorkerState.ts +85 -0
  30. package/src/core/types.ts +47 -0
  31. package/src/flow/FlowWorkerLifecycle.ts +81 -0
  32. package/src/flow/StepTaskExecutor.ts +87 -0
  33. package/src/flow/StepTaskPoller.ts +51 -0
  34. package/src/flow/createFlowWorker.ts +105 -0
  35. package/src/flow/types.ts +1 -0
  36. package/src/index.ts +15 -0
  37. package/src/queue/MessageExecutor.ts +105 -0
  38. package/src/queue/Queue.ts +92 -0
  39. package/src/queue/ReadWithPollPoller.ts +35 -0
  40. package/src/queue/createQueueWorker.ts +145 -0
  41. package/src/queue/types.ts +14 -0
  42. package/src/spawnNewEdgeFunction.ts +33 -0
  43. package/supabase/call +23 -0
  44. package/supabase/cli +3 -0
  45. package/supabase/config.toml +42 -0
  46. package/supabase/functions/cpu_intensive/index.ts +20 -0
  47. package/supabase/functions/creating_queue/index.ts +5 -0
  48. package/supabase/functions/failing_always/index.ts +13 -0
  49. package/supabase/functions/increment_sequence/index.ts +14 -0
  50. package/supabase/functions/max_concurrency/index.ts +17 -0
  51. package/supabase/functions/serial_sleep/index.ts +16 -0
  52. package/supabase/functions/utils.ts +13 -0
  53. package/supabase/seed.sql +2 -0
  54. package/tests/db/compose.yaml +20 -0
  55. package/tests/db.ts +71 -0
  56. package/tests/e2e/README.md +54 -0
  57. package/tests/e2e/_helpers.ts +135 -0
  58. package/tests/e2e/performance.test.ts +60 -0
  59. package/tests/e2e/restarts.test.ts +56 -0
  60. package/tests/helpers.ts +22 -0
  61. package/tests/integration/_helpers.ts +43 -0
  62. package/tests/integration/creating_queue.test.ts +32 -0
  63. package/tests/integration/flow/minimalFlow.test.ts +121 -0
  64. package/tests/integration/maxConcurrent.test.ts +76 -0
  65. package/tests/integration/retries.test.ts +78 -0
  66. package/tests/integration/starting_worker.test.ts +35 -0
  67. package/tests/sql.ts +46 -0
  68. package/tests/unit/WorkerState.test.ts +74 -0
  69. package/tsconfig.lib.json +23 -0
@@ -0,0 +1,47 @@
1
+ export type { Json } from '../../../core/src/types.ts';
2
+
3
+ export interface IPoller<IMessage> {
4
+ poll(): Promise<IMessage[]>;
5
+ }
6
+
7
+ export interface IExecutor {
8
+ get msgId(): number;
9
+ execute(): Promise<unknown>;
10
+ }
11
+
12
+ export interface IMessage {
13
+ msg_id: number;
14
+ }
15
+
16
+ export interface ILifecycle {
17
+ acknowledgeStart(workerBootstrap: WorkerBootstrap): Promise<void>;
18
+ acknowledgeStop(): void;
19
+ sendHeartbeat(): Promise<void>;
20
+
21
+ get edgeFunctionName(): string | undefined;
22
+ get queueName(): string;
23
+ get isRunning(): boolean;
24
+ get isStopping(): boolean;
25
+ get isStopped(): boolean;
26
+
27
+ transitionToStopping(): void;
28
+ }
29
+
30
+ export interface IBatchProcessor {
31
+ processBatch(): Promise<void>;
32
+ awaitCompletion(): Promise<void>;
33
+ }
34
+
35
+ export type WorkerRow = {
36
+ last_heartbeat_at: string;
37
+ queue_name: string;
38
+ started_at: string;
39
+ stopped_at: string | null;
40
+ worker_id: string;
41
+ function_name: string;
42
+ };
43
+
44
+ export interface WorkerBootstrap {
45
+ edgeFunctionName: string;
46
+ workerId: string;
47
+ }
@@ -0,0 +1,81 @@
1
+ import { Heartbeat } from '../core/Heartbeat.ts';
2
+ import { getLogger } from '../core/Logger.ts';
3
+ import type { Queries } from '../core/Queries.ts';
4
+ import type { ILifecycle, WorkerBootstrap, WorkerRow } from '../core/types.ts';
5
+ import { States, WorkerState } from '../core/WorkerState.ts';
6
+ import type { AnyFlow } from '@pgflow/dsl';
7
+
8
+ /**
9
+ * A specialized WorkerLifecycle for Flow-based workers that is aware of the Flow's step types
10
+ */
11
+ export class FlowWorkerLifecycle<TFlow extends AnyFlow> implements ILifecycle {
12
+ private workerState: WorkerState = new WorkerState();
13
+ private heartbeat?: Heartbeat;
14
+ private logger = getLogger('FlowWorkerLifecycle');
15
+ private queries: Queries;
16
+ private workerRow?: WorkerRow;
17
+ private flow: TFlow;
18
+
19
+ constructor(queries: Queries, flow: TFlow) {
20
+ this.queries = queries;
21
+ this.flow = flow;
22
+ }
23
+
24
+ async acknowledgeStart(workerBootstrap: WorkerBootstrap): Promise<void> {
25
+ this.workerState.transitionTo(States.Starting);
26
+
27
+ this.workerRow = await this.queries.onWorkerStarted({
28
+ queueName: this.queueName,
29
+ ...workerBootstrap,
30
+ });
31
+
32
+ this.heartbeat = new Heartbeat(5000, this.queries, this.workerRow);
33
+
34
+ this.workerState.transitionTo(States.Running);
35
+ }
36
+
37
+ acknowledgeStop() {
38
+ this.workerState.transitionTo(States.Stopping);
39
+
40
+ if (!this.workerRow) {
41
+ throw new Error('Cannot stop worker: workerRow not set');
42
+ }
43
+
44
+ try {
45
+ this.logger.debug('Acknowledging worker stop...');
46
+ this.workerState.transitionTo(States.Stopped);
47
+ this.logger.debug('Worker stop acknowledged');
48
+ } catch (error) {
49
+ this.logger.debug(`Error acknowledging worker stop: ${error}`);
50
+ throw error;
51
+ }
52
+ }
53
+
54
+ get edgeFunctionName() {
55
+ return this.workerRow?.function_name;
56
+ }
57
+
58
+ get queueName() {
59
+ return this.flow.slug;
60
+ }
61
+
62
+ async sendHeartbeat() {
63
+ await this.heartbeat?.send();
64
+ }
65
+
66
+ get isRunning() {
67
+ return this.workerState.isRunning;
68
+ }
69
+
70
+ get isStopping() {
71
+ return this.workerState.isStopping;
72
+ }
73
+
74
+ get isStopped() {
75
+ return this.workerState.isStopped;
76
+ }
77
+
78
+ transitionToStopping() {
79
+ this.workerState.transitionTo(States.Stopping);
80
+ }
81
+ }
@@ -0,0 +1,87 @@
1
+ import type { AnyFlow } from '@pgflow/dsl';
2
+ import type { StepTaskRecord, IPgflowClient } from './types.ts';
3
+ import type { IExecutor } from '../core/types.ts';
4
+ import { getLogger } from '../core/Logger.ts';
5
+
6
+ class AbortError extends Error {
7
+ constructor() {
8
+ super('Operation aborted');
9
+ this.name = 'AbortError';
10
+ }
11
+ }
12
+
13
+ /**
14
+ * An executor that processes step tasks using an IPgflowClient
15
+ * with strong typing for the flow's step handlers
16
+ */
17
+ export class StepTaskExecutor<TFlow extends AnyFlow> implements IExecutor {
18
+ private logger = getLogger('StepTaskExecutor');
19
+
20
+ constructor(
21
+ private readonly flow: TFlow,
22
+ private readonly task: StepTaskRecord<TFlow>,
23
+ private readonly adapter: IPgflowClient<TFlow>,
24
+ private readonly signal: AbortSignal
25
+ ) {}
26
+
27
+ get msgId() {
28
+ return this.task.msg_id;
29
+ }
30
+
31
+ async execute(): Promise<void> {
32
+ try {
33
+ if (this.signal.aborted) {
34
+ throw new AbortError();
35
+ }
36
+
37
+ // Check if already aborted before starting
38
+ this.signal.throwIfAborted();
39
+
40
+ const stepSlug = this.task.step_slug;
41
+ this.logger.debug(
42
+ `Executing step task ${this.task.msg_id} for step ${stepSlug}`
43
+ );
44
+
45
+ // Get the step handler from the flow with proper typing
46
+ const stepDef = this.flow.getStepDefinition(stepSlug);
47
+
48
+ if (!stepDef) {
49
+ throw new Error(`No step definition found for slug=${stepSlug}`);
50
+ }
51
+
52
+ // !!! HANDLER EXECUTION !!!
53
+ const result = await stepDef.handler(this.task.input);
54
+ // !!! HANDLER EXECUTION !!!
55
+
56
+ this.logger.debug(
57
+ `step task ${this.task.msg_id} completed successfully, marking as complete`
58
+ );
59
+ await this.adapter.completeTask(this.task, result);
60
+
61
+ this.logger.debug(`step task ${this.task.msg_id} marked as complete`);
62
+ } catch (error) {
63
+ await this.handleExecutionError(error);
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Handles the error that occurred during execution.
69
+ *
70
+ * If the error is an AbortError, it means that the worker was aborted and stopping,
71
+ * the task will be picked up by another worker later.
72
+ *
73
+ * Otherwise, it marks the task as failed.
74
+ */
75
+ private async handleExecutionError(error: unknown) {
76
+ if (error instanceof Error && error.name === 'AbortError') {
77
+ this.logger.debug(`Aborted execution for step task ${this.task.msg_id}`);
78
+ // Do not mark as failed - the worker was aborted and stopping,
79
+ // the task will be picked up by another worker later
80
+ } else {
81
+ this.logger.error(
82
+ `step task ${this.task.msg_id} failed with error: ${error}`
83
+ );
84
+ await this.adapter.failTask(this.task, error);
85
+ }
86
+ }
87
+ }
@@ -0,0 +1,51 @@
1
+ import type { StepTaskRecord, IPgflowClient } from './types.ts';
2
+ import type { IPoller } from '../core/types.ts';
3
+ import { getLogger } from '../core/Logger.ts';
4
+ import type { AnyFlow } from '@pgflow/dsl';
5
+
6
+ export interface StepTaskPollerConfig {
7
+ batchSize: number;
8
+ queueName: string;
9
+ }
10
+
11
+ /**
12
+ * A poller that retrieves flow tasks using an IPgflowClient
13
+ */
14
+ export class StepTaskPoller<TFlow extends AnyFlow>
15
+ implements IPoller<StepTaskRecord<TFlow>>
16
+ {
17
+ private logger = getLogger('StepTaskPoller');
18
+
19
+ constructor(
20
+ private readonly adapter: IPgflowClient<TFlow>,
21
+ private readonly signal: AbortSignal,
22
+ private readonly config: StepTaskPollerConfig
23
+ ) {}
24
+
25
+ async poll(): Promise<StepTaskRecord<TFlow>[]> {
26
+ if (this.isAborted()) {
27
+ this.logger.debug('Polling aborted, returning empty array');
28
+ return [];
29
+ }
30
+
31
+ this.logger.debug(
32
+ `Polling for flow tasks with batch size ${this.config.batchSize}`
33
+ );
34
+
35
+ try {
36
+ const tasks = await this.adapter.pollForTasks(
37
+ this.config.queueName,
38
+ this.config.batchSize
39
+ );
40
+ this.logger.debug(`Retrieved ${tasks.length} flow tasks`);
41
+ return tasks;
42
+ } catch (err: unknown) {
43
+ this.logger.error(`Error polling for flow tasks: ${err}`);
44
+ return [];
45
+ }
46
+ }
47
+
48
+ private isAborted(): boolean {
49
+ return this.signal.aborted;
50
+ }
51
+ }
@@ -0,0 +1,105 @@
1
+ import type { AnyFlow } from '@pgflow/dsl';
2
+ import type { EdgeWorkerConfig } from '../EdgeWorker.ts';
3
+ import { ExecutionController } from '../core/ExecutionController.ts';
4
+ import { StepTaskPoller, type StepTaskPollerConfig } from './StepTaskPoller.ts';
5
+ import { StepTaskExecutor } from './StepTaskExecutor.ts';
6
+ import { PgflowSqlClient } from '@pgflow/core';
7
+ import { Queries } from '../core/Queries.ts';
8
+ import type { StepTaskRecord } from './types.ts';
9
+ import type { IExecutor } from '../core/types.ts';
10
+ import { Worker } from '../core/Worker.ts';
11
+ import postgres from 'postgres';
12
+ import { FlowWorkerLifecycle } from './FlowWorkerLifecycle.ts';
13
+ import { BatchProcessor } from '../core/BatchProcessor.ts';
14
+ import { getLogger } from '../core/Logger.ts';
15
+
16
+ /**
17
+ * Configuration for the flow worker
18
+ */
19
+ export type FlowWorkerConfig = EdgeWorkerConfig & {
20
+ maxConcurrent?: number;
21
+ connectionString?: string;
22
+ sql?: postgres.Sql;
23
+ maxPgConnections?: number;
24
+ batchSize?: number;
25
+ };
26
+
27
+ /**
28
+ * Creates a new Worker instance for processing flow tasks.
29
+ *
30
+ * @param flow - The Flow DSL definition
31
+ * @param config - Configuration options for the worker
32
+ * @returns A configured Worker instance ready to be started
33
+ */
34
+ export function createFlowWorker<TFlow extends AnyFlow>(
35
+ flow: TFlow,
36
+ config: FlowWorkerConfig
37
+ ): Worker {
38
+ const logger = getLogger('createFlowWorker');
39
+
40
+ // Create abort controller for graceful shutdown
41
+ const abortController = new AbortController();
42
+ const abortSignal = abortController.signal;
43
+
44
+ if (!config.sql && !config.connectionString) {
45
+ throw new Error(
46
+ "Either 'sql' or 'connectionString' must be provided in FlowWorkerConfig."
47
+ );
48
+ }
49
+
50
+ const sql =
51
+ config.sql ||
52
+ postgres(config.connectionString!, {
53
+ max: config.maxPgConnections,
54
+ prepare: false,
55
+ });
56
+
57
+ // Create the pgflow adapter
58
+ const pgflowAdapter = new PgflowSqlClient<TFlow>(sql);
59
+
60
+ // Use flow slug as queue name, or fallback to 'tasks'
61
+ const queueName = flow.slug || 'tasks';
62
+ logger.debug(`Using queue name: ${queueName}`);
63
+
64
+ // Create specialized FlowWorkerLifecycle with the proxied queue and flow
65
+ const queries = new Queries(sql);
66
+ const lifecycle = new FlowWorkerLifecycle<TFlow>(queries, flow);
67
+
68
+ // Create StepTaskPoller
69
+ const pollerConfig: StepTaskPollerConfig = {
70
+ batchSize: config.batchSize || 10,
71
+ queueName: flow.slug,
72
+ };
73
+ const poller = new StepTaskPoller<TFlow>(
74
+ pgflowAdapter,
75
+ abortSignal,
76
+ pollerConfig
77
+ );
78
+
79
+ // Create executor factory with proper typing
80
+ const executorFactory = (
81
+ record: StepTaskRecord<TFlow>,
82
+ signal: AbortSignal
83
+ ): IExecutor => {
84
+ return new StepTaskExecutor<TFlow>(flow, record, pgflowAdapter, signal);
85
+ };
86
+
87
+ // Create ExecutionController
88
+ const executionController = new ExecutionController<StepTaskRecord<TFlow>>(
89
+ executorFactory,
90
+ abortSignal,
91
+ {
92
+ maxConcurrent: config.maxConcurrent || 10,
93
+ }
94
+ );
95
+
96
+ // Create BatchProcessor
97
+ const batchProcessor = new BatchProcessor<StepTaskRecord<TFlow>>(
98
+ executionController,
99
+ poller,
100
+ abortSignal
101
+ );
102
+
103
+ // Return Worker
104
+ return new Worker(batchProcessor, lifecycle, sql);
105
+ }
@@ -0,0 +1 @@
1
+ export * from '../../../core/src/types.ts';
package/src/index.ts ADDED
@@ -0,0 +1,15 @@
1
+ // Export existing queue-based worker
2
+ export { createQueueWorker } from './queue/createQueueWorker.ts';
3
+ export { EdgeWorker } from './EdgeWorker.ts';
4
+
5
+ // Export new flow-based worker
6
+ export { createFlowWorker } from './flow/createFlowWorker.ts';
7
+ export { FlowWorkerLifecycle } from './flow/FlowWorkerLifecycle.ts';
8
+
9
+ // Export types
10
+ export type { StepTaskRecord } from './flow/types.ts';
11
+ export type { FlowWorkerConfig } from './flow/createFlowWorker.ts';
12
+ export type { StepTaskPollerConfig } from './flow/StepTaskPoller.ts';
13
+
14
+ // Re-export types from the base system
15
+ export type { Json, IExecutor, IPoller, IMessage, ILifecycle, IBatchProcessor } from './core/types.ts';
@@ -0,0 +1,105 @@
1
+ import type { Json } from '../core/types.ts';
2
+ import type { PgmqMessageRecord } from './types.ts';
3
+ import type { Queue } from './Queue.ts';
4
+ import { getLogger } from '../core/Logger.ts';
5
+
6
+ class AbortError extends Error {
7
+ constructor() {
8
+ super('Operation aborted');
9
+ this.name = 'AbortError';
10
+ }
11
+ }
12
+
13
+ /**
14
+ * A class that executes a message handler.
15
+ *
16
+ * It handles the execution of the message handler and retries or archives the message
17
+ * based on the retry limit and delay.
18
+ *
19
+ * It also handles the abort signal and logs the error.
20
+ */
21
+ export class MessageExecutor<TPayload extends Json> {
22
+ private logger = getLogger('MessageExecutor');
23
+
24
+ constructor(
25
+ private readonly queue: Queue<TPayload>,
26
+ private readonly record: PgmqMessageRecord<TPayload>,
27
+ private readonly messageHandler: (
28
+ message: TPayload
29
+ ) => Promise<void> | void,
30
+ private readonly signal: AbortSignal,
31
+ private readonly retryLimit: number,
32
+ private readonly retryDelay: number
33
+ ) {}
34
+
35
+ get msgId() {
36
+ return this.record.msg_id;
37
+ }
38
+
39
+ async execute(): Promise<void> {
40
+ try {
41
+ if (this.signal.aborted) {
42
+ throw new AbortError();
43
+ }
44
+
45
+ // Check if already aborted before starting
46
+ this.signal.throwIfAborted();
47
+
48
+ this.logger.debug(`Executing task ${this.msgId}...`);
49
+ await this.messageHandler(this.record.message!);
50
+
51
+ this.logger.debug(
52
+ `Task ${this.msgId} completed successfully, archiving...`
53
+ );
54
+ await this.queue.archive(this.msgId);
55
+ this.logger.debug(`Archived task ${this.msgId} successfully`);
56
+ } catch (error) {
57
+ await this.handleExecutionError(error);
58
+ }
59
+ }
60
+
61
+ /**
62
+ * Handles the error that occurred during execution.
63
+ *
64
+ * If the error is an AbortError, it means that the worker was aborted and stopping,
65
+ * the message will reappear after the visibility timeout and be picked up by another worker.
66
+ *
67
+ * Otherwise, it proceeds with retry or archiving forever.
68
+ */
69
+ private async handleExecutionError(error: unknown) {
70
+ if (error instanceof Error && error.name === 'AbortError') {
71
+ this.logger.debug(`Aborted execution for ${this.msgId}`);
72
+ // Do not throw - the worker was aborted and stopping,
73
+ // the message will reappear after the visibility timeout
74
+ // and be picked up by another worker
75
+ } else {
76
+ this.logger.debug(`Task ${this.msgId} failed with error: ${error}`);
77
+ await this.retryOrArchive();
78
+ }
79
+ }
80
+
81
+ /**
82
+ * Retries the message if it is available.
83
+ * Otherwise, archives the message forever and stops processing it.
84
+ */
85
+ private async retryOrArchive() {
86
+ if (this.retryAvailable) {
87
+ // adjust visibility timeout for message to appear after retryDelay
88
+ this.logger.debug(`Retrying ${this.msgId} in ${this.retryDelay} seconds`);
89
+ await this.queue.setVt(this.msgId, this.retryDelay);
90
+ } else {
91
+ // archive message forever and stop processing it
92
+ this.logger.debug(`Archiving ${this.msgId} forever`);
93
+ await this.queue.archive(this.msgId);
94
+ }
95
+ }
96
+
97
+ /**
98
+ * Returns true if the message can be retried.
99
+ */
100
+ private get retryAvailable() {
101
+ const readCountLimit = this.retryLimit + 1; // initial read also counts
102
+
103
+ return this.record.read_ct < readCountLimit;
104
+ }
105
+ }
@@ -0,0 +1,92 @@
1
+ import type postgres from 'postgres';
2
+ import type { PgmqMessageRecord } from './types.ts';
3
+ import type { Json } from '../core/types.ts';
4
+
5
+ export class Queue<TPayload extends Json> {
6
+ constructor(private readonly sql: postgres.Sql, readonly queueName: string) {}
7
+
8
+ /**
9
+ * Creates a queue if it doesn't exist.
10
+ * If the queue already exists, this method does nothing.
11
+ */
12
+ async safeCreate() {
13
+ return await this.sql`
14
+ select * from pgmq.create(${this.queueName})
15
+ where not exists (
16
+ select 1 from pgmq.list_queues() where queue_name = ${this.queueName}
17
+ );
18
+ `;
19
+ }
20
+
21
+ /**
22
+ * Drops a queue if it exists.
23
+ * If the queue doesn't exist, this method does nothing.
24
+ */
25
+ async safeDrop() {
26
+ return await this.sql`
27
+ select * from pgmq.drop_queue(${this.queueName})
28
+ where exists (
29
+ select 1 from pgmq.list_queues() where queue_name = ${this.queueName}
30
+ );
31
+ `;
32
+ }
33
+
34
+ async archive(msgId: number): Promise<void> {
35
+ await this.sql`
36
+ SELECT pgmq.archive(queue_name => ${this.queueName}, msg_id => ${msgId}::bigint);
37
+ `;
38
+ }
39
+
40
+ async archiveBatch(msgIds: number[]): Promise<void> {
41
+ await this.sql`
42
+ SELECT pgmq.archive(queue_name => ${this.queueName}, msg_ids => ${msgIds}::bigint[]);
43
+ `;
44
+ }
45
+
46
+ async send(message: TPayload): Promise<void> {
47
+ const msgJson = JSON.stringify(message);
48
+ await this.sql`
49
+ SELECT pgmq.send(queue_name => ${this.queueName}, msg => ${msgJson}::jsonb)
50
+ `;
51
+ }
52
+
53
+ async readWithPoll(
54
+ batchSize = 20,
55
+ visibilityTimeout = 2,
56
+ maxPollSeconds = 5,
57
+ pollIntervalMs = 200
58
+ ) {
59
+ return await this.sql<PgmqMessageRecord<TPayload>[]>`
60
+ SELECT *
61
+ FROM edge_worker.read_with_poll(
62
+ queue_name => ${this.queueName},
63
+ vt => ${visibilityTimeout},
64
+ qty => ${batchSize},
65
+ max_poll_seconds => ${maxPollSeconds},
66
+ poll_interval_ms => ${pollIntervalMs}
67
+ );
68
+ `;
69
+ }
70
+
71
+ /**
72
+ * Sets the visibility timeout of a message to the current time plus the given offset.
73
+ *
74
+ * This is an inlined version of the pgmq.set_vt in order to fix the bug.
75
+ * The original uses now() instead of clock_timestamp() which is problematic in transactions.
76
+ * See more details here: https://github.com/tembo-io/pgmq/issues/367
77
+ *
78
+ * The only change made is now() replaced with clock_timestamp().
79
+ */
80
+ async setVt(
81
+ msgId: number,
82
+ vtOffsetSeconds: number
83
+ ): Promise<PgmqMessageRecord<TPayload>> {
84
+ const records = await this.sql<PgmqMessageRecord<TPayload>[]>`
85
+ UPDATE ${this.sql('pgmq.q_' + this.queueName)}
86
+ SET vt = (clock_timestamp() + make_interval(secs => ${vtOffsetSeconds}))
87
+ WHERE msg_id = ${msgId}::bigint
88
+ RETURNING *;
89
+ `;
90
+ return records[0];
91
+ }
92
+ }
@@ -0,0 +1,35 @@
1
+ import type { Queue } from './Queue.ts';
2
+ import type { PgmqMessageRecord } from './types.ts';
3
+ import type { Json } from '../core/types.ts';
4
+
5
+ export interface PollerConfig {
6
+ batchSize: number;
7
+ maxPollSeconds: number;
8
+ pollIntervalMs: number;
9
+ visibilityTimeout: number;
10
+ }
11
+
12
+ export class ReadWithPollPoller<TPayload extends Json> {
13
+ constructor(
14
+ protected readonly queue: Queue<TPayload>,
15
+ protected readonly signal: AbortSignal,
16
+ protected readonly config: PollerConfig
17
+ ) {}
18
+
19
+ async poll(): Promise<PgmqMessageRecord<TPayload>[]> {
20
+ if (this.isAborted()) {
21
+ return [];
22
+ }
23
+
24
+ return await this.queue.readWithPoll(
25
+ this.config.batchSize,
26
+ this.config.visibilityTimeout,
27
+ this.config.maxPollSeconds,
28
+ this.config.pollIntervalMs
29
+ );
30
+ }
31
+
32
+ private isAborted(): boolean {
33
+ return this.signal.aborted;
34
+ }
35
+ }