@pgflow/edge-worker 0.0.5-prealpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.envrc +2 -0
- package/LICENSE.md +660 -0
- package/README.md +46 -0
- package/deno.json +32 -0
- package/deno.lock +369 -0
- package/dist/LICENSE.md +660 -0
- package/dist/README.md +46 -0
- package/dist/index.js +972 -0
- package/dist/index.js.map +7 -0
- package/mod.ts +7 -0
- package/package.json +14 -0
- package/project.json +164 -0
- package/scripts/concatenate-migrations.sh +22 -0
- package/scripts/wait-for-localhost +17 -0
- package/sql/990_active_workers.sql +11 -0
- package/sql/991_inactive_workers.sql +12 -0
- package/sql/992_spawn_worker.sql +68 -0
- package/sql/benchmarks/max_concurrency.sql +32 -0
- package/sql/queries/debug_connections.sql +0 -0
- package/sql/queries/debug_processing_gaps.sql +115 -0
- package/src/EdgeWorker.ts +172 -0
- package/src/core/BatchProcessor.ts +38 -0
- package/src/core/ExecutionController.ts +51 -0
- package/src/core/Heartbeat.ts +23 -0
- package/src/core/Logger.ts +69 -0
- package/src/core/Queries.ts +44 -0
- package/src/core/Worker.ts +102 -0
- package/src/core/WorkerLifecycle.ts +93 -0
- package/src/core/WorkerState.ts +85 -0
- package/src/core/types.ts +47 -0
- package/src/flow/FlowWorkerLifecycle.ts +81 -0
- package/src/flow/StepTaskExecutor.ts +87 -0
- package/src/flow/StepTaskPoller.ts +51 -0
- package/src/flow/createFlowWorker.ts +105 -0
- package/src/flow/types.ts +1 -0
- package/src/index.ts +15 -0
- package/src/queue/MessageExecutor.ts +105 -0
- package/src/queue/Queue.ts +92 -0
- package/src/queue/ReadWithPollPoller.ts +35 -0
- package/src/queue/createQueueWorker.ts +145 -0
- package/src/queue/types.ts +14 -0
- package/src/spawnNewEdgeFunction.ts +33 -0
- package/supabase/call +23 -0
- package/supabase/cli +3 -0
- package/supabase/config.toml +42 -0
- package/supabase/functions/cpu_intensive/index.ts +20 -0
- package/supabase/functions/creating_queue/index.ts +5 -0
- package/supabase/functions/failing_always/index.ts +13 -0
- package/supabase/functions/increment_sequence/index.ts +14 -0
- package/supabase/functions/max_concurrency/index.ts +17 -0
- package/supabase/functions/serial_sleep/index.ts +16 -0
- package/supabase/functions/utils.ts +13 -0
- package/supabase/seed.sql +2 -0
- package/tests/db/compose.yaml +20 -0
- package/tests/db.ts +71 -0
- package/tests/e2e/README.md +54 -0
- package/tests/e2e/_helpers.ts +135 -0
- package/tests/e2e/performance.test.ts +60 -0
- package/tests/e2e/restarts.test.ts +56 -0
- package/tests/helpers.ts +22 -0
- package/tests/integration/_helpers.ts +43 -0
- package/tests/integration/creating_queue.test.ts +32 -0
- package/tests/integration/flow/minimalFlow.test.ts +121 -0
- package/tests/integration/maxConcurrent.test.ts +76 -0
- package/tests/integration/retries.test.ts +78 -0
- package/tests/integration/starting_worker.test.ts +35 -0
- package/tests/sql.ts +46 -0
- package/tests/unit/WorkerState.test.ts +74 -0
- package/tsconfig.lib.json +23 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import { ExecutionController } from '../core/ExecutionController.ts';
|
|
2
|
+
import { MessageExecutor } from './MessageExecutor.ts';
|
|
3
|
+
import { Queries } from '../core/Queries.ts';
|
|
4
|
+
import { Queue } from './Queue.ts';
|
|
5
|
+
import { ReadWithPollPoller } from './ReadWithPollPoller.ts';
|
|
6
|
+
import type { Json } from '../core/types.ts';
|
|
7
|
+
import type { PgmqMessageRecord } from './types.ts';
|
|
8
|
+
import { Worker } from '../core/Worker.ts';
|
|
9
|
+
import postgres from 'postgres';
|
|
10
|
+
import { WorkerLifecycle } from '../core/WorkerLifecycle.ts';
|
|
11
|
+
import { BatchProcessor } from '../core/BatchProcessor.ts';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Configuration for the queue worker
|
|
15
|
+
*/
|
|
16
|
+
export type QueueWorkerConfig = {
|
|
17
|
+
/**
|
|
18
|
+
* PostgreSQL connection string.
|
|
19
|
+
* If not provided, it will be read from the EDGE_WORKER_DB_URL environment variable.
|
|
20
|
+
*/
|
|
21
|
+
connectionString?: string;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Name of the queue to poll for messages
|
|
25
|
+
* @default 'tasks'
|
|
26
|
+
*/
|
|
27
|
+
queueName?: string;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* How many tasks are processed at the same time
|
|
31
|
+
* @default 10
|
|
32
|
+
*/
|
|
33
|
+
maxConcurrent?: number;
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* How many connections to the database are opened
|
|
37
|
+
* @default 4
|
|
38
|
+
*/
|
|
39
|
+
maxPgConnections?: number;
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* In-worker polling interval in seconds
|
|
43
|
+
* @default 5
|
|
44
|
+
*/
|
|
45
|
+
maxPollSeconds?: number;
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* In-database polling interval in milliseconds
|
|
49
|
+
* @default 200
|
|
50
|
+
*/
|
|
51
|
+
pollIntervalMs?: number;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* How long to wait before retrying a failed job in seconds
|
|
55
|
+
* @default 5
|
|
56
|
+
*/
|
|
57
|
+
retryDelay?: number;
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* How many times to retry a failed job
|
|
61
|
+
* @default 5
|
|
62
|
+
*/
|
|
63
|
+
retryLimit?: number;
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* How long a job is invisible after reading in seconds.
|
|
67
|
+
* If not successful, will reappear after this time.
|
|
68
|
+
* @default 3
|
|
69
|
+
*/
|
|
70
|
+
visibilityTimeout?: number;
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Batch size for polling messages
|
|
74
|
+
* @default 10
|
|
75
|
+
*/
|
|
76
|
+
batchSize?: number;
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Optional SQL client instance
|
|
80
|
+
*/
|
|
81
|
+
sql?: postgres.Sql;
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Creates a new Worker instance for processing queue messages.
|
|
86
|
+
*
|
|
87
|
+
* @param handler - The message handler function that processes each message from the queue
|
|
88
|
+
* @param config - Configuration options for the worker
|
|
89
|
+
* @returns A configured Worker instance ready to be started
|
|
90
|
+
*/
|
|
91
|
+
export function createQueueWorker<TPayload extends Json>(
|
|
92
|
+
handler: (message: TPayload) => Promise<void> | void,
|
|
93
|
+
config: QueueWorkerConfig
|
|
94
|
+
): Worker {
|
|
95
|
+
type QueueMessage = PgmqMessageRecord<TPayload>;
|
|
96
|
+
|
|
97
|
+
const abortController = new AbortController();
|
|
98
|
+
const abortSignal = abortController.signal;
|
|
99
|
+
|
|
100
|
+
// Use provided SQL connection if available, otherwise create one from connection string
|
|
101
|
+
const sql =
|
|
102
|
+
config.sql ||
|
|
103
|
+
postgres(config.connectionString || '', {
|
|
104
|
+
max: config.maxPgConnections,
|
|
105
|
+
prepare: false,
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
const queue = new Queue<TPayload>(sql, config.queueName || 'tasks');
|
|
109
|
+
const queries = new Queries(sql);
|
|
110
|
+
|
|
111
|
+
const lifecycle = new WorkerLifecycle<TPayload>(queries, queue);
|
|
112
|
+
|
|
113
|
+
const executorFactory = (record: QueueMessage, signal: AbortSignal) => {
|
|
114
|
+
return new MessageExecutor(
|
|
115
|
+
queue,
|
|
116
|
+
record,
|
|
117
|
+
handler,
|
|
118
|
+
signal,
|
|
119
|
+
config.retryLimit || 5,
|
|
120
|
+
config.retryDelay || 3
|
|
121
|
+
);
|
|
122
|
+
};
|
|
123
|
+
|
|
124
|
+
const poller = new ReadWithPollPoller(queue, abortSignal, {
|
|
125
|
+
batchSize: config.batchSize || config.maxConcurrent || 10,
|
|
126
|
+
maxPollSeconds: config.maxPollSeconds || 5,
|
|
127
|
+
pollIntervalMs: config.pollIntervalMs || 200,
|
|
128
|
+
visibilityTimeout: config.visibilityTimeout || 3,
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
const executionController = new ExecutionController<QueueMessage>(
|
|
132
|
+
executorFactory,
|
|
133
|
+
abortSignal,
|
|
134
|
+
{
|
|
135
|
+
maxConcurrent: config.maxConcurrent || 10,
|
|
136
|
+
}
|
|
137
|
+
);
|
|
138
|
+
const batchProcessor = new BatchProcessor<QueueMessage>(
|
|
139
|
+
executionController,
|
|
140
|
+
poller,
|
|
141
|
+
abortSignal
|
|
142
|
+
);
|
|
143
|
+
|
|
144
|
+
return new Worker(batchProcessor, lifecycle, sql);
|
|
145
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { Json, IMessage } from "../core/types.ts";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Fields are nullable because types in postgres does not allow NOT NULL,
|
|
5
|
+
* but all those values except `message` come from queue table columns,
|
|
6
|
+
* which are explicitely marked as NOT NULL.
|
|
7
|
+
*/
|
|
8
|
+
export interface PgmqMessageRecord<TPayload extends Json | null = Json> extends IMessage {
|
|
9
|
+
read_ct: number;
|
|
10
|
+
enqueued_at: string;
|
|
11
|
+
vt: string;
|
|
12
|
+
message: TPayload;
|
|
13
|
+
}
|
|
14
|
+
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { getLogger } from './core/Logger.ts';
|
|
2
|
+
|
|
3
|
+
// @ts-ignore - TODO: fix the types
|
|
4
|
+
const SUPABASE_URL = Deno.env.get('SUPABASE_URL') as string;
|
|
5
|
+
const SUPABASE_ANON_KEY = Deno.env.get('SUPABASE_ANON_KEY') as string;
|
|
6
|
+
|
|
7
|
+
const logger = getLogger('spawnNewEdgeFunction');
|
|
8
|
+
|
|
9
|
+
export default async function spawnNewEdgeFunction(
|
|
10
|
+
functionName: string = 'edge-worker'
|
|
11
|
+
): Promise<void> {
|
|
12
|
+
if (!functionName) {
|
|
13
|
+
throw new Error('functionName cannot be null or empty');
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
logger.debug('Spawning a new Edge Function...');
|
|
17
|
+
|
|
18
|
+
const response = await fetch(`${SUPABASE_URL}/functions/v1/${functionName}`, {
|
|
19
|
+
method: 'POST',
|
|
20
|
+
headers: {
|
|
21
|
+
Authorization: `Bearer ${SUPABASE_ANON_KEY}`,
|
|
22
|
+
'Content-Type': 'application/json',
|
|
23
|
+
}
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
logger.debug('Edge Function spawned successfully!');
|
|
27
|
+
|
|
28
|
+
if (!response.ok) {
|
|
29
|
+
throw new Error(
|
|
30
|
+
`Edge function returned non-OK status: ${response.status} ${response.statusText}`
|
|
31
|
+
);
|
|
32
|
+
}
|
|
33
|
+
}
|
package/supabase/call
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# get env vars from supabase status and eval them for usage
|
|
4
|
+
supabase_env=$(supabase status --output env 2>/dev/null)
|
|
5
|
+
|
|
6
|
+
if [ $? -eq 0 ]; then
|
|
7
|
+
eval "$supabase_env"
|
|
8
|
+
fi
|
|
9
|
+
|
|
10
|
+
if [ -f ".env" ]; then
|
|
11
|
+
source .env
|
|
12
|
+
fi
|
|
13
|
+
|
|
14
|
+
function_name="$1"
|
|
15
|
+
data="$2"
|
|
16
|
+
|
|
17
|
+
API_URL=http://localhost:50321
|
|
18
|
+
curl \
|
|
19
|
+
--request POST \
|
|
20
|
+
"${API_URL}/functions/v1/${function_name}" \
|
|
21
|
+
--header 'Authorization: Bearer '${ANON_KEY}'' \
|
|
22
|
+
--header 'Content-Type: application/json' \
|
|
23
|
+
--data-raw "$data"
|
package/supabase/cli
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
project_id = "edge-worker"
|
|
2
|
+
|
|
3
|
+
[api]
|
|
4
|
+
enabled = true
|
|
5
|
+
port = 50321
|
|
6
|
+
|
|
7
|
+
[db]
|
|
8
|
+
port = 50322
|
|
9
|
+
shadow_port = 50320
|
|
10
|
+
major_version = 15
|
|
11
|
+
|
|
12
|
+
[db.pooler]
|
|
13
|
+
enabled = true
|
|
14
|
+
port = 50329
|
|
15
|
+
pool_mode = "transaction"
|
|
16
|
+
default_pool_size = 200
|
|
17
|
+
max_client_conn = 250
|
|
18
|
+
|
|
19
|
+
[db.seed]
|
|
20
|
+
enabled = true
|
|
21
|
+
sql_paths = ['./seed.sql']
|
|
22
|
+
|
|
23
|
+
[edge_runtime]
|
|
24
|
+
enabled = true
|
|
25
|
+
policy = "per_worker"
|
|
26
|
+
inspector_port = 8083
|
|
27
|
+
|
|
28
|
+
# disable unused features
|
|
29
|
+
[realtime]
|
|
30
|
+
enabled = false
|
|
31
|
+
[studio]
|
|
32
|
+
enabled = false
|
|
33
|
+
[inbucket]
|
|
34
|
+
enabled = false
|
|
35
|
+
[analytics]
|
|
36
|
+
enabled = false
|
|
37
|
+
[storage]
|
|
38
|
+
enabled = false
|
|
39
|
+
[auth]
|
|
40
|
+
enabled = false
|
|
41
|
+
|
|
42
|
+
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
+
import { crypto } from 'jsr:@std/crypto';
|
|
3
|
+
import { sql } from '../utils.ts';
|
|
4
|
+
|
|
5
|
+
async function cpuIntensiveTask() {
|
|
6
|
+
let data = new TextEncoder().encode('burn');
|
|
7
|
+
const timeId = `cpu_intensive_${Math.random()}`;
|
|
8
|
+
console.time(timeId);
|
|
9
|
+
for (let i = 0; i < 10000; i++) {
|
|
10
|
+
data = new Uint8Array(await crypto.subtle.digest('SHA-256', data));
|
|
11
|
+
}
|
|
12
|
+
console.timeEnd(timeId);
|
|
13
|
+
|
|
14
|
+
console.log(
|
|
15
|
+
'[cpu_intensive] last_val = ',
|
|
16
|
+
await sql`SELECT nextval('test_seq')`
|
|
17
|
+
);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
EdgeWorker.start(cpuIntensiveTask, { queueName: 'cpu_intensive' });
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
+
|
|
3
|
+
function failingAlways() {
|
|
4
|
+
console.log('(╯°□°)╯︵ ┻━┻');
|
|
5
|
+
throw new Error('(╯°□°)╯︵ ┻━┻');
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
EdgeWorker.start(failingAlways, {
|
|
9
|
+
queueName: 'failing_always',
|
|
10
|
+
retryLimit: 2,
|
|
11
|
+
retryDelay: 2,
|
|
12
|
+
maxPollSeconds: 1,
|
|
13
|
+
});
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
+
import { sql } from '../utils.ts';
|
|
3
|
+
|
|
4
|
+
// await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
5
|
+
// await sql`SELECT pgmq.create('increment_sequence')`;
|
|
6
|
+
|
|
7
|
+
async function incrementCounter() {
|
|
8
|
+
console.log(
|
|
9
|
+
'[increment_sequence] next_seq =',
|
|
10
|
+
await sql`SELECT nextval('test_seq')`
|
|
11
|
+
);
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
EdgeWorker.start(incrementCounter, { queueName: 'increment_sequence' });
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
+
import { sleep, sql } from '../utils.ts';
|
|
3
|
+
|
|
4
|
+
async function incrementSeq() {
|
|
5
|
+
await sleep(50);
|
|
6
|
+
|
|
7
|
+
console.log(
|
|
8
|
+
'[max_concurrency] last_val =',
|
|
9
|
+
await sql`SELECT nextval('test_seq')`
|
|
10
|
+
);
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
EdgeWorker.start(incrementSeq, {
|
|
14
|
+
queueName: 'max_concurrency',
|
|
15
|
+
maxConcurrent: 10,
|
|
16
|
+
maxPgConnections: 4,
|
|
17
|
+
});
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
+
import { sql, sleep } from '../utils.ts';
|
|
3
|
+
|
|
4
|
+
const sleep1s = async () => {
|
|
5
|
+
console.time('Task time');
|
|
6
|
+
const lastVal = await sql`SELECT nextval('test_seq')`;
|
|
7
|
+
console.log('[serial_sleep] lastVal =', lastVal);
|
|
8
|
+
await sleep(1000);
|
|
9
|
+
console.timeEnd('Task time');
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
EdgeWorker.start(sleep1s, {
|
|
13
|
+
queueName: 'serial_sleep',
|
|
14
|
+
maxConcurrent: 1,
|
|
15
|
+
visibilityTimeout: 5, // higher than the delay()
|
|
16
|
+
});
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import postgres from 'postgres';
|
|
2
|
+
import { delay } from '@std/async';
|
|
3
|
+
|
|
4
|
+
const EDGE_WORKER_DB_URL = Deno.env.get('EDGE_WORKER_DB_URL')!;
|
|
5
|
+
console.log('EDGE_WORKER_DB_URL', EDGE_WORKER_DB_URL);
|
|
6
|
+
|
|
7
|
+
export const sql = postgres(EDGE_WORKER_DB_URL, { prepare: false });
|
|
8
|
+
|
|
9
|
+
export const sleep = delay;
|
|
10
|
+
|
|
11
|
+
export function randomInt(min: number, max: number) {
|
|
12
|
+
return Math.floor(Math.random() * (max - min + 1)) + min;
|
|
13
|
+
}
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
select vault.create_secret('http://host.docker.internal:50321', 'app_url');
|
|
2
|
+
select vault.create_secret('eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0', 'supabase_anon_key');
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
services:
|
|
2
|
+
db:
|
|
3
|
+
# image: postgres:15.8-alpine
|
|
4
|
+
# image: supabase/postgres:15.8.1.039
|
|
5
|
+
# image: supabase/postgres:15.1.0.37
|
|
6
|
+
|
|
7
|
+
image: supabase/postgres:15.8.1.020
|
|
8
|
+
# image: supabase/postgres:15.8.1.018
|
|
9
|
+
ports:
|
|
10
|
+
- '5432:5432'
|
|
11
|
+
volumes:
|
|
12
|
+
- ./migrations/edge_worker.sql:/docker-entrypoint-initdb.d/migrations/950_edge_worker.sql
|
|
13
|
+
environment:
|
|
14
|
+
POSTGRES_DB: postgres
|
|
15
|
+
# commented out because of the problems with supabase_admin permissions
|
|
16
|
+
# see: https://github.com/supabase/postgres/issues/1219#issuecomment-2362955730
|
|
17
|
+
# POSTGRES_USER: postgres
|
|
18
|
+
POSTGRES_PASSWORD: postgres
|
|
19
|
+
POSTGRES_HOST: /var/run/postgresql
|
|
20
|
+
POSTGRES_PORT: 5432
|
package/tests/db.ts
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import postgres from 'postgres';
|
|
2
|
+
|
|
3
|
+
function createSql(dbUrl: string) {
|
|
4
|
+
return postgres(dbUrl, {
|
|
5
|
+
prepare: false,
|
|
6
|
+
onnotice(_: unknown) {
|
|
7
|
+
// no-op to silence notices
|
|
8
|
+
},
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export function withTransaction(
|
|
13
|
+
callback: (sql: postgres.Sql) => Promise<unknown>
|
|
14
|
+
) {
|
|
15
|
+
const dbUrl = `postgresql://supabase_admin:postgres@localhost:5432/postgres`;
|
|
16
|
+
const localSql = createSql(dbUrl);
|
|
17
|
+
|
|
18
|
+
return async () => {
|
|
19
|
+
try {
|
|
20
|
+
console.log('calling callback');
|
|
21
|
+
|
|
22
|
+
let callbackError: unknown = null;
|
|
23
|
+
|
|
24
|
+
await localSql.begin(async (sql: postgres.Sql) => {
|
|
25
|
+
// Add no-op end() method to transaction-local sql
|
|
26
|
+
const wrappedSql = Object.assign(sql, {
|
|
27
|
+
end: async () => {
|
|
28
|
+
/* no-op */
|
|
29
|
+
},
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
try {
|
|
33
|
+
await callback(wrappedSql);
|
|
34
|
+
} catch (error) {
|
|
35
|
+
callbackError = error;
|
|
36
|
+
} finally {
|
|
37
|
+
console.log('Rolling back transaction');
|
|
38
|
+
// Using ROLLBACK AND CHAIN to avoid "no transaction in progress" warning
|
|
39
|
+
await sql`ROLLBACK AND CHAIN`;
|
|
40
|
+
}
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
console.log('callback called');
|
|
44
|
+
|
|
45
|
+
if (callbackError) {
|
|
46
|
+
throw callbackError;
|
|
47
|
+
}
|
|
48
|
+
} catch (err) {
|
|
49
|
+
console.error('Error in withTransaction:', err);
|
|
50
|
+
throw err;
|
|
51
|
+
} finally {
|
|
52
|
+
console.log('Closing connection');
|
|
53
|
+
await localSql.end();
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function withPgNoTransaction(
|
|
59
|
+
callback: (sql: postgres.Sql) => Promise<unknown>
|
|
60
|
+
) {
|
|
61
|
+
const dbUrl = 'postgresql://supabase_admin:postgres@localhost:5432/postgres';
|
|
62
|
+
const sql = createSql(dbUrl);
|
|
63
|
+
|
|
64
|
+
return async () => {
|
|
65
|
+
try {
|
|
66
|
+
await callback(sql);
|
|
67
|
+
} finally {
|
|
68
|
+
await sql.end();
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# E2E Testing Strategy
|
|
2
|
+
|
|
3
|
+
We use a real Supabase instance running locally (in `supabase/`) to perform end-to-end testing of the entire EdgeWorker stack. The tests interact with purpose-built test workers that simulate different behaviors:
|
|
4
|
+
|
|
5
|
+
- Different queues with specific behaviors
|
|
6
|
+
- Workers that always fail
|
|
7
|
+
- Workers with varying retry patterns
|
|
8
|
+
- etc.
|
|
9
|
+
|
|
10
|
+
Lot of workers increment `test_seq` as a mean to identify the completion of a job.
|
|
11
|
+
This also allows to identify how many times handlers were called if any retries were attempted.
|
|
12
|
+
|
|
13
|
+
The test flow is straightforward:
|
|
14
|
+
|
|
15
|
+
1. Put messages on specific queues
|
|
16
|
+
2. Worker calls handlers, handlers increment `test_seq`
|
|
17
|
+
3. `await` until `test_seq` was incremented to expected value (number of messages)
|
|
18
|
+
4. Assert that workers behaved as expected (retries, failures, etc.)
|
|
19
|
+
|
|
20
|
+
This approach lets us verify the entire stack from message enqueueing through worker processing, retries, and completion.
|
|
21
|
+
|
|
22
|
+
## Core Test Scenarios Needed
|
|
23
|
+
|
|
24
|
+
#### Glossary
|
|
25
|
+
|
|
26
|
+
- `worker` - instance of given worker edge function that is subject to CPU and memory limits and can be killed
|
|
27
|
+
- `worker function` - edge function within supabase app that uses EdgeWorker instead of serving requests
|
|
28
|
+
- `queue` - pgmq queue that workers can pull from
|
|
29
|
+
- `message` - PGMQ `message_record` that contains metadata (`msg_id`, `read_ct`, `vt`) and payload (`message JSONB`)
|
|
30
|
+
|
|
31
|
+
### [ ] Happy Path
|
|
32
|
+
|
|
33
|
+
- [x] Worker picks messages from queue
|
|
34
|
+
- [ ] Worker calls handler function with each message
|
|
35
|
+
- [x] Worker can process big amounts of messages (restarts itself when CPU clock limit hits)
|
|
36
|
+
- [x] Different worker functions can pull from different queues
|
|
37
|
+
- [ ] Different worker functions can pull from the same queue
|
|
38
|
+
|
|
39
|
+
### [x] Worker Lifecycle
|
|
40
|
+
|
|
41
|
+
- [x] Worker registers on start
|
|
42
|
+
- [x] Worker sends heartbeats every 5s
|
|
43
|
+
- [x] Worker updates function_name with heartbeat
|
|
44
|
+
|
|
45
|
+
### [ ] Retries & Failures
|
|
46
|
+
|
|
47
|
+
- [ ] Worker retries failed jobs n-times and succeeds
|
|
48
|
+
- [ ] Worker uses exponential backoff for each subsequent retry
|
|
49
|
+
- [x] Worker uses proper number of retries for each job
|
|
50
|
+
- [x] Worker archives jobs that will not be retried
|
|
51
|
+
|
|
52
|
+
### [x] Concurrency
|
|
53
|
+
|
|
54
|
+
- [x] Worker respects maxConcurrent and processes messages in serial when set to 1
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import { sql } from '../sql.ts';
|
|
2
|
+
import { delay } from '@std/async';
|
|
3
|
+
import ProgressBar from 'jsr:@deno-library/progress';
|
|
4
|
+
import { dim } from 'https://deno.land/std@0.224.0/fmt/colors.ts';
|
|
5
|
+
|
|
6
|
+
interface WaitForOptions {
|
|
7
|
+
pollIntervalMs?: number;
|
|
8
|
+
timeoutMs?: number;
|
|
9
|
+
description?: string;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export function log(message: string, ...args: unknown[]) {
|
|
13
|
+
console.log(dim(` -> ${message}`), ...args);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export async function waitFor<T>(
|
|
17
|
+
predicate: () => Promise<T | false>,
|
|
18
|
+
options: WaitForOptions = {}
|
|
19
|
+
): Promise<T> {
|
|
20
|
+
const {
|
|
21
|
+
pollIntervalMs = 250,
|
|
22
|
+
timeoutMs = 30000,
|
|
23
|
+
description = 'condition',
|
|
24
|
+
} = options;
|
|
25
|
+
|
|
26
|
+
const startTime = Date.now();
|
|
27
|
+
|
|
28
|
+
while (true) {
|
|
29
|
+
const result = await predicate();
|
|
30
|
+
|
|
31
|
+
if (result) return result;
|
|
32
|
+
|
|
33
|
+
if (Date.now() - startTime > timeoutMs) {
|
|
34
|
+
throw new Error(
|
|
35
|
+
`Timeout after ${timeoutMs}ms waiting for ${description}`
|
|
36
|
+
);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
await delay(pollIntervalMs);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export async function sendBatch(count: number, queueName: string) {
|
|
44
|
+
return await sql`
|
|
45
|
+
SELECT pgmq.send_batch(
|
|
46
|
+
${queueName},
|
|
47
|
+
ARRAY(
|
|
48
|
+
SELECT '{}'::jsonb
|
|
49
|
+
FROM generate_series(1, ${count}::integer)
|
|
50
|
+
)
|
|
51
|
+
)`;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export async function seqLastValue(
|
|
55
|
+
seqName: string = 'test_seq'
|
|
56
|
+
): Promise<number> {
|
|
57
|
+
// Postgres sequences are initialized with a value of 1,
|
|
58
|
+
// but incrementing them for the first time does not increment the last_value,
|
|
59
|
+
// only sets is_called to true
|
|
60
|
+
const seqResult = await sql`
|
|
61
|
+
SELECT
|
|
62
|
+
CASE
|
|
63
|
+
WHEN is_called THEN last_value::integer
|
|
64
|
+
ELSE 0
|
|
65
|
+
END as last_value
|
|
66
|
+
FROM ${sql(seqName)}`;
|
|
67
|
+
return seqResult[0].last_value;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
interface WaitForSeqValueOptions {
|
|
71
|
+
pollIntervalMs?: number;
|
|
72
|
+
seqName?: string;
|
|
73
|
+
timeoutMs?: number;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export async function waitForSeqToIncrementBy(
|
|
77
|
+
value: number,
|
|
78
|
+
options: WaitForSeqValueOptions = {}
|
|
79
|
+
): Promise<number> {
|
|
80
|
+
const { seqName = 'test_seq' } = options;
|
|
81
|
+
|
|
82
|
+
const perSecond = 0;
|
|
83
|
+
|
|
84
|
+
const progress = new ProgressBar({
|
|
85
|
+
title: `${seqName} (${perSecond}/s)`,
|
|
86
|
+
total: value,
|
|
87
|
+
width: 20,
|
|
88
|
+
display: dim(
|
|
89
|
+
` -> incrementing "${seqName}": :completed/:total (:eta left) [:bar] :percent`
|
|
90
|
+
),
|
|
91
|
+
prettyTime: true,
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
const startVal = await seqLastValue(seqName);
|
|
95
|
+
let lastVal = startVal;
|
|
96
|
+
|
|
97
|
+
return await waitFor(
|
|
98
|
+
async () => {
|
|
99
|
+
lastVal = await seqLastValue(seqName);
|
|
100
|
+
progress.render(lastVal);
|
|
101
|
+
const incrementedBy = lastVal - startVal;
|
|
102
|
+
|
|
103
|
+
return incrementedBy >= value ? lastVal : false;
|
|
104
|
+
},
|
|
105
|
+
{
|
|
106
|
+
...options,
|
|
107
|
+
description: `sequence ${seqName} to reach value ${value}`,
|
|
108
|
+
}
|
|
109
|
+
);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
export async function waitForActiveWorker() {
|
|
113
|
+
return await waitFor(
|
|
114
|
+
async () => {
|
|
115
|
+
const [{ has_active: hasActiveWorker }] =
|
|
116
|
+
await sql`SELECT count(*) > 0 AS has_active FROM edge_worker.active_workers`;
|
|
117
|
+
log('waiting for active worker ', hasActiveWorker);
|
|
118
|
+
return hasActiveWorker;
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
pollIntervalMs: 300,
|
|
122
|
+
description: 'active worker',
|
|
123
|
+
}
|
|
124
|
+
);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export async function fetchWorkers(functionName: string) {
|
|
128
|
+
return await sql`SELECT * FROM edge_worker.workers WHERE function_name = ${functionName}`;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
export async function startWorker(workerName: string) {
|
|
132
|
+
await sql`SELECT edge_worker.spawn(${workerName}::text)`;
|
|
133
|
+
await waitForActiveWorker();
|
|
134
|
+
log('worker spawned!');
|
|
135
|
+
}
|