@pgflow/edge-worker 0.0.5 → 0.0.7-prealpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +10 -4
- package/.envrc +0 -2
- package/CHANGELOG.md +0 -10
- package/deno.lock +0 -336
- package/deno.test.json +0 -32
- package/dist/LICENSE.md +0 -660
- package/dist/README.md +0 -46
- package/dist/index.js +0 -972
- package/dist/index.js.map +0 -7
- package/mod.ts +0 -7
- package/pkgs/edge-worker/dist/index.js +0 -953
- package/pkgs/edge-worker/dist/index.js.map +0 -7
- package/pkgs/edge-worker/dist/pkgs/edge-worker/LICENSE.md +0 -660
- package/pkgs/edge-worker/dist/pkgs/edge-worker/README.md +0 -46
- package/project.json +0 -164
- package/scripts/concatenate-migrations.sh +0 -22
- package/scripts/wait-for-localhost +0 -17
- package/sql/990_active_workers.sql +0 -11
- package/sql/991_inactive_workers.sql +0 -12
- package/sql/992_spawn_worker.sql +0 -68
- package/sql/benchmarks/max_concurrency.sql +0 -32
- package/sql/queries/debug_connections.sql +0 -0
- package/sql/queries/debug_processing_gaps.sql +0 -115
- package/src/EdgeWorker.ts +0 -172
- package/src/core/BatchProcessor.ts +0 -38
- package/src/core/ExecutionController.ts +0 -51
- package/src/core/Heartbeat.ts +0 -23
- package/src/core/Logger.ts +0 -42
- package/src/core/Queries.ts +0 -44
- package/src/core/Worker.ts +0 -102
- package/src/core/WorkerLifecycle.ts +0 -93
- package/src/core/WorkerState.ts +0 -85
- package/src/core/types.ts +0 -47
- package/src/flow/FlowWorkerLifecycle.ts +0 -81
- package/src/flow/StepTaskExecutor.ts +0 -87
- package/src/flow/StepTaskPoller.ts +0 -51
- package/src/flow/createFlowWorker.ts +0 -105
- package/src/flow/types.ts +0 -1
- package/src/index.ts +0 -15
- package/src/queue/MessageExecutor.ts +0 -105
- package/src/queue/Queue.ts +0 -92
- package/src/queue/ReadWithPollPoller.ts +0 -35
- package/src/queue/createQueueWorker.ts +0 -145
- package/src/queue/types.ts +0 -14
- package/src/spawnNewEdgeFunction.ts +0 -33
- package/supabase/call +0 -23
- package/supabase/cli +0 -3
- package/supabase/config.toml +0 -42
- package/supabase/functions/cpu_intensive/index.ts +0 -20
- package/supabase/functions/creating_queue/index.ts +0 -5
- package/supabase/functions/failing_always/index.ts +0 -13
- package/supabase/functions/increment_sequence/index.ts +0 -14
- package/supabase/functions/max_concurrency/index.ts +0 -17
- package/supabase/functions/serial_sleep/index.ts +0 -16
- package/supabase/functions/utils.ts +0 -13
- package/supabase/seed.sql +0 -2
- package/tests/db/compose.yaml +0 -20
- package/tests/db.ts +0 -71
- package/tests/e2e/README.md +0 -54
- package/tests/e2e/_helpers.ts +0 -135
- package/tests/e2e/performance.test.ts +0 -60
- package/tests/e2e/restarts.test.ts +0 -56
- package/tests/helpers.ts +0 -22
- package/tests/integration/_helpers.ts +0 -43
- package/tests/integration/creating_queue.test.ts +0 -32
- package/tests/integration/flow/minimalFlow.test.ts +0 -121
- package/tests/integration/maxConcurrent.test.ts +0 -76
- package/tests/integration/retries.test.ts +0 -78
- package/tests/integration/starting_worker.test.ts +0 -35
- package/tests/sql.ts +0 -46
- package/tests/unit/WorkerState.test.ts +0 -74
- package/tsconfig.lib.json +0 -23
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
-
|
|
3
|
-
function failingAlways() {
|
|
4
|
-
console.log('(╯°□°)╯︵ ┻━┻');
|
|
5
|
-
throw new Error('(╯°□°)╯︵ ┻━┻');
|
|
6
|
-
}
|
|
7
|
-
|
|
8
|
-
EdgeWorker.start(failingAlways, {
|
|
9
|
-
queueName: 'failing_always',
|
|
10
|
-
retryLimit: 2,
|
|
11
|
-
retryDelay: 2,
|
|
12
|
-
maxPollSeconds: 1,
|
|
13
|
-
});
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
-
import { sql } from '../utils.ts';
|
|
3
|
-
|
|
4
|
-
// await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
5
|
-
// await sql`SELECT pgmq.create('increment_sequence')`;
|
|
6
|
-
|
|
7
|
-
async function incrementCounter() {
|
|
8
|
-
console.log(
|
|
9
|
-
'[increment_sequence] next_seq =',
|
|
10
|
-
await sql`SELECT nextval('test_seq')`
|
|
11
|
-
);
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
EdgeWorker.start(incrementCounter, { queueName: 'increment_sequence' });
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
-
import { sleep, sql } from '../utils.ts';
|
|
3
|
-
|
|
4
|
-
async function incrementSeq() {
|
|
5
|
-
await sleep(50);
|
|
6
|
-
|
|
7
|
-
console.log(
|
|
8
|
-
'[max_concurrency] last_val =',
|
|
9
|
-
await sql`SELECT nextval('test_seq')`
|
|
10
|
-
);
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
EdgeWorker.start(incrementSeq, {
|
|
14
|
-
queueName: 'max_concurrency',
|
|
15
|
-
maxConcurrent: 10,
|
|
16
|
-
maxPgConnections: 4,
|
|
17
|
-
});
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
import { EdgeWorker } from '../_src/EdgeWorker.ts';
|
|
2
|
-
import { sql, sleep } from '../utils.ts';
|
|
3
|
-
|
|
4
|
-
const sleep1s = async () => {
|
|
5
|
-
console.time('Task time');
|
|
6
|
-
const lastVal = await sql`SELECT nextval('test_seq')`;
|
|
7
|
-
console.log('[serial_sleep] lastVal =', lastVal);
|
|
8
|
-
await sleep(1000);
|
|
9
|
-
console.timeEnd('Task time');
|
|
10
|
-
};
|
|
11
|
-
|
|
12
|
-
EdgeWorker.start(sleep1s, {
|
|
13
|
-
queueName: 'serial_sleep',
|
|
14
|
-
maxConcurrent: 1,
|
|
15
|
-
visibilityTimeout: 5, // higher than the delay()
|
|
16
|
-
});
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import postgres from 'postgres';
|
|
2
|
-
import { delay } from '@std/async';
|
|
3
|
-
|
|
4
|
-
const EDGE_WORKER_DB_URL = Deno.env.get('EDGE_WORKER_DB_URL')!;
|
|
5
|
-
console.log('EDGE_WORKER_DB_URL', EDGE_WORKER_DB_URL);
|
|
6
|
-
|
|
7
|
-
export const sql = postgres(EDGE_WORKER_DB_URL, { prepare: false });
|
|
8
|
-
|
|
9
|
-
export const sleep = delay;
|
|
10
|
-
|
|
11
|
-
export function randomInt(min: number, max: number) {
|
|
12
|
-
return Math.floor(Math.random() * (max - min + 1)) + min;
|
|
13
|
-
}
|
package/supabase/seed.sql
DELETED
|
@@ -1,2 +0,0 @@
|
|
|
1
|
-
select vault.create_secret('http://host.docker.internal:50321', 'app_url');
|
|
2
|
-
select vault.create_secret('eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0', 'supabase_anon_key');
|
package/tests/db/compose.yaml
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
services:
|
|
2
|
-
db:
|
|
3
|
-
# image: postgres:15.8-alpine
|
|
4
|
-
# image: supabase/postgres:15.8.1.039
|
|
5
|
-
# image: supabase/postgres:15.1.0.37
|
|
6
|
-
|
|
7
|
-
image: supabase/postgres:15.8.1.020
|
|
8
|
-
# image: supabase/postgres:15.8.1.018
|
|
9
|
-
ports:
|
|
10
|
-
- '5432:5432'
|
|
11
|
-
volumes:
|
|
12
|
-
- ./migrations/edge_worker.sql:/docker-entrypoint-initdb.d/migrations/950_edge_worker.sql
|
|
13
|
-
environment:
|
|
14
|
-
POSTGRES_DB: postgres
|
|
15
|
-
# commented out because of the problems with supabase_admin permissions
|
|
16
|
-
# see: https://github.com/supabase/postgres/issues/1219#issuecomment-2362955730
|
|
17
|
-
# POSTGRES_USER: postgres
|
|
18
|
-
POSTGRES_PASSWORD: postgres
|
|
19
|
-
POSTGRES_HOST: /var/run/postgresql
|
|
20
|
-
POSTGRES_PORT: 5432
|
package/tests/db.ts
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
import postgres from 'postgres';
|
|
2
|
-
|
|
3
|
-
function createSql(dbUrl: string) {
|
|
4
|
-
return postgres(dbUrl, {
|
|
5
|
-
prepare: false,
|
|
6
|
-
onnotice(_: unknown) {
|
|
7
|
-
// no-op to silence notices
|
|
8
|
-
},
|
|
9
|
-
});
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export function withTransaction(
|
|
13
|
-
callback: (sql: postgres.Sql) => Promise<unknown>
|
|
14
|
-
) {
|
|
15
|
-
const dbUrl = `postgresql://supabase_admin:postgres@localhost:5432/postgres`;
|
|
16
|
-
const localSql = createSql(dbUrl);
|
|
17
|
-
|
|
18
|
-
return async () => {
|
|
19
|
-
try {
|
|
20
|
-
console.log('calling callback');
|
|
21
|
-
|
|
22
|
-
let callbackError: unknown = null;
|
|
23
|
-
|
|
24
|
-
await localSql.begin(async (sql: postgres.Sql) => {
|
|
25
|
-
// Add no-op end() method to transaction-local sql
|
|
26
|
-
const wrappedSql = Object.assign(sql, {
|
|
27
|
-
end: async () => {
|
|
28
|
-
/* no-op */
|
|
29
|
-
},
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
try {
|
|
33
|
-
await callback(wrappedSql);
|
|
34
|
-
} catch (error) {
|
|
35
|
-
callbackError = error;
|
|
36
|
-
} finally {
|
|
37
|
-
console.log('Rolling back transaction');
|
|
38
|
-
// Using ROLLBACK AND CHAIN to avoid "no transaction in progress" warning
|
|
39
|
-
await sql`ROLLBACK AND CHAIN`;
|
|
40
|
-
}
|
|
41
|
-
});
|
|
42
|
-
|
|
43
|
-
console.log('callback called');
|
|
44
|
-
|
|
45
|
-
if (callbackError) {
|
|
46
|
-
throw callbackError;
|
|
47
|
-
}
|
|
48
|
-
} catch (err) {
|
|
49
|
-
console.error('Error in withTransaction:', err);
|
|
50
|
-
throw err;
|
|
51
|
-
} finally {
|
|
52
|
-
console.log('Closing connection');
|
|
53
|
-
await localSql.end();
|
|
54
|
-
}
|
|
55
|
-
};
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
export function withPgNoTransaction(
|
|
59
|
-
callback: (sql: postgres.Sql) => Promise<unknown>
|
|
60
|
-
) {
|
|
61
|
-
const dbUrl = 'postgresql://supabase_admin:postgres@localhost:5432/postgres';
|
|
62
|
-
const sql = createSql(dbUrl);
|
|
63
|
-
|
|
64
|
-
return async () => {
|
|
65
|
-
try {
|
|
66
|
-
await callback(sql);
|
|
67
|
-
} finally {
|
|
68
|
-
await sql.end();
|
|
69
|
-
}
|
|
70
|
-
};
|
|
71
|
-
}
|
package/tests/e2e/README.md
DELETED
|
@@ -1,54 +0,0 @@
|
|
|
1
|
-
# E2E Testing Strategy
|
|
2
|
-
|
|
3
|
-
We use a real Supabase instance running locally (in `supabase/`) to perform end-to-end testing of the entire EdgeWorker stack. The tests interact with purpose-built test workers that simulate different behaviors:
|
|
4
|
-
|
|
5
|
-
- Different queues with specific behaviors
|
|
6
|
-
- Workers that always fail
|
|
7
|
-
- Workers with varying retry patterns
|
|
8
|
-
- etc.
|
|
9
|
-
|
|
10
|
-
Lot of workers increment `test_seq` as a mean to identify the completion of a job.
|
|
11
|
-
This also allows to identify how many times handlers were called if any retries were attempted.
|
|
12
|
-
|
|
13
|
-
The test flow is straightforward:
|
|
14
|
-
|
|
15
|
-
1. Put messages on specific queues
|
|
16
|
-
2. Worker calls handlers, handlers increment `test_seq`
|
|
17
|
-
3. `await` until `test_seq` was incremented to expected value (number of messages)
|
|
18
|
-
4. Assert that workers behaved as expected (retries, failures, etc.)
|
|
19
|
-
|
|
20
|
-
This approach lets us verify the entire stack from message enqueueing through worker processing, retries, and completion.
|
|
21
|
-
|
|
22
|
-
## Core Test Scenarios Needed
|
|
23
|
-
|
|
24
|
-
#### Glossary
|
|
25
|
-
|
|
26
|
-
- `worker` - instance of given worker edge function that is subject to CPU and memory limits and can be killed
|
|
27
|
-
- `worker function` - edge function within supabase app that uses EdgeWorker instead of serving requests
|
|
28
|
-
- `queue` - pgmq queue that workers can pull from
|
|
29
|
-
- `message` - PGMQ `message_record` that contains metadata (`msg_id`, `read_ct`, `vt`) and payload (`message JSONB`)
|
|
30
|
-
|
|
31
|
-
### [ ] Happy Path
|
|
32
|
-
|
|
33
|
-
- [x] Worker picks messages from queue
|
|
34
|
-
- [ ] Worker calls handler function with each message
|
|
35
|
-
- [x] Worker can process big amounts of messages (restarts itself when CPU clock limit hits)
|
|
36
|
-
- [x] Different worker functions can pull from different queues
|
|
37
|
-
- [ ] Different worker functions can pull from the same queue
|
|
38
|
-
|
|
39
|
-
### [x] Worker Lifecycle
|
|
40
|
-
|
|
41
|
-
- [x] Worker registers on start
|
|
42
|
-
- [x] Worker sends heartbeats every 5s
|
|
43
|
-
- [x] Worker updates function_name with heartbeat
|
|
44
|
-
|
|
45
|
-
### [ ] Retries & Failures
|
|
46
|
-
|
|
47
|
-
- [ ] Worker retries failed jobs n-times and succeeds
|
|
48
|
-
- [ ] Worker uses exponential backoff for each subsequent retry
|
|
49
|
-
- [x] Worker uses proper number of retries for each job
|
|
50
|
-
- [x] Worker archives jobs that will not be retried
|
|
51
|
-
|
|
52
|
-
### [x] Concurrency
|
|
53
|
-
|
|
54
|
-
- [x] Worker respects maxConcurrent and processes messages in serial when set to 1
|
package/tests/e2e/_helpers.ts
DELETED
|
@@ -1,135 +0,0 @@
|
|
|
1
|
-
import { sql } from '../sql.ts';
|
|
2
|
-
import { delay } from '@std/async';
|
|
3
|
-
import ProgressBar from 'jsr:@deno-library/progress';
|
|
4
|
-
import { dim } from 'https://deno.land/std@0.224.0/fmt/colors.ts';
|
|
5
|
-
|
|
6
|
-
interface WaitForOptions {
|
|
7
|
-
pollIntervalMs?: number;
|
|
8
|
-
timeoutMs?: number;
|
|
9
|
-
description?: string;
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export function log(message: string, ...args: unknown[]) {
|
|
13
|
-
console.log(dim(` -> ${message}`), ...args);
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
export async function waitFor<T>(
|
|
17
|
-
predicate: () => Promise<T | false>,
|
|
18
|
-
options: WaitForOptions = {}
|
|
19
|
-
): Promise<T> {
|
|
20
|
-
const {
|
|
21
|
-
pollIntervalMs = 250,
|
|
22
|
-
timeoutMs = 30000,
|
|
23
|
-
description = 'condition',
|
|
24
|
-
} = options;
|
|
25
|
-
|
|
26
|
-
const startTime = Date.now();
|
|
27
|
-
|
|
28
|
-
while (true) {
|
|
29
|
-
const result = await predicate();
|
|
30
|
-
|
|
31
|
-
if (result) return result;
|
|
32
|
-
|
|
33
|
-
if (Date.now() - startTime > timeoutMs) {
|
|
34
|
-
throw new Error(
|
|
35
|
-
`Timeout after ${timeoutMs}ms waiting for ${description}`
|
|
36
|
-
);
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
await delay(pollIntervalMs);
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
export async function sendBatch(count: number, queueName: string) {
|
|
44
|
-
return await sql`
|
|
45
|
-
SELECT pgmq.send_batch(
|
|
46
|
-
${queueName},
|
|
47
|
-
ARRAY(
|
|
48
|
-
SELECT '{}'::jsonb
|
|
49
|
-
FROM generate_series(1, ${count}::integer)
|
|
50
|
-
)
|
|
51
|
-
)`;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
export async function seqLastValue(
|
|
55
|
-
seqName: string = 'test_seq'
|
|
56
|
-
): Promise<number> {
|
|
57
|
-
// Postgres sequences are initialized with a value of 1,
|
|
58
|
-
// but incrementing them for the first time does not increment the last_value,
|
|
59
|
-
// only sets is_called to true
|
|
60
|
-
const seqResult = await sql`
|
|
61
|
-
SELECT
|
|
62
|
-
CASE
|
|
63
|
-
WHEN is_called THEN last_value::integer
|
|
64
|
-
ELSE 0
|
|
65
|
-
END as last_value
|
|
66
|
-
FROM ${sql(seqName)}`;
|
|
67
|
-
return seqResult[0].last_value;
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
interface WaitForSeqValueOptions {
|
|
71
|
-
pollIntervalMs?: number;
|
|
72
|
-
seqName?: string;
|
|
73
|
-
timeoutMs?: number;
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
export async function waitForSeqToIncrementBy(
|
|
77
|
-
value: number,
|
|
78
|
-
options: WaitForSeqValueOptions = {}
|
|
79
|
-
): Promise<number> {
|
|
80
|
-
const { seqName = 'test_seq' } = options;
|
|
81
|
-
|
|
82
|
-
const perSecond = 0;
|
|
83
|
-
|
|
84
|
-
const progress = new ProgressBar({
|
|
85
|
-
title: `${seqName} (${perSecond}/s)`,
|
|
86
|
-
total: value,
|
|
87
|
-
width: 20,
|
|
88
|
-
display: dim(
|
|
89
|
-
` -> incrementing "${seqName}": :completed/:total (:eta left) [:bar] :percent`
|
|
90
|
-
),
|
|
91
|
-
prettyTime: true,
|
|
92
|
-
});
|
|
93
|
-
|
|
94
|
-
const startVal = await seqLastValue(seqName);
|
|
95
|
-
let lastVal = startVal;
|
|
96
|
-
|
|
97
|
-
return await waitFor(
|
|
98
|
-
async () => {
|
|
99
|
-
lastVal = await seqLastValue(seqName);
|
|
100
|
-
progress.render(lastVal);
|
|
101
|
-
const incrementedBy = lastVal - startVal;
|
|
102
|
-
|
|
103
|
-
return incrementedBy >= value ? lastVal : false;
|
|
104
|
-
},
|
|
105
|
-
{
|
|
106
|
-
...options,
|
|
107
|
-
description: `sequence ${seqName} to reach value ${value}`,
|
|
108
|
-
}
|
|
109
|
-
);
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
export async function waitForActiveWorker() {
|
|
113
|
-
return await waitFor(
|
|
114
|
-
async () => {
|
|
115
|
-
const [{ has_active: hasActiveWorker }] =
|
|
116
|
-
await sql`SELECT count(*) > 0 AS has_active FROM edge_worker.active_workers`;
|
|
117
|
-
log('waiting for active worker ', hasActiveWorker);
|
|
118
|
-
return hasActiveWorker;
|
|
119
|
-
},
|
|
120
|
-
{
|
|
121
|
-
pollIntervalMs: 300,
|
|
122
|
-
description: 'active worker',
|
|
123
|
-
}
|
|
124
|
-
);
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
export async function fetchWorkers(functionName: string) {
|
|
128
|
-
return await sql`SELECT * FROM edge_worker.workers WHERE function_name = ${functionName}`;
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
export async function startWorker(workerName: string) {
|
|
132
|
-
await sql`SELECT edge_worker.spawn(${workerName}::text)`;
|
|
133
|
-
await waitForActiveWorker();
|
|
134
|
-
log('worker spawned!');
|
|
135
|
-
}
|
|
@@ -1,60 +0,0 @@
|
|
|
1
|
-
import { sql } from '../sql.ts';
|
|
2
|
-
import {
|
|
3
|
-
waitFor,
|
|
4
|
-
sendBatch,
|
|
5
|
-
waitForSeqToIncrementBy,
|
|
6
|
-
startWorker,
|
|
7
|
-
log,
|
|
8
|
-
} from './_helpers.ts';
|
|
9
|
-
|
|
10
|
-
const MESSAGES_TO_SEND = 20000;
|
|
11
|
-
const WORKER_NAME = 'max_concurrency';
|
|
12
|
-
|
|
13
|
-
Deno.test(
|
|
14
|
-
'worker can handle tens of thousands of jobs queued at once',
|
|
15
|
-
async () => {
|
|
16
|
-
await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
17
|
-
await sql`ALTER SEQUENCE test_seq RESTART WITH 1`;
|
|
18
|
-
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
19
|
-
await sql`SELECT pgmq.drop_queue(${WORKER_NAME})`;
|
|
20
|
-
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
21
|
-
await startWorker(WORKER_NAME);
|
|
22
|
-
await waitFor(
|
|
23
|
-
async () => {
|
|
24
|
-
const [{ worker_count }] = await sql`
|
|
25
|
-
SELECT COUNT(*)::integer AS worker_count
|
|
26
|
-
FROM edge_worker.active_workers
|
|
27
|
-
WHERE function_name = ${WORKER_NAME}
|
|
28
|
-
`;
|
|
29
|
-
|
|
30
|
-
log('worker_count', worker_count);
|
|
31
|
-
return worker_count === 1;
|
|
32
|
-
},
|
|
33
|
-
{ description: 'Waiting for exacly one worker' }
|
|
34
|
-
);
|
|
35
|
-
|
|
36
|
-
try {
|
|
37
|
-
// worker sleeps for 1s for each message
|
|
38
|
-
// se we will expect roughly 1 message per second
|
|
39
|
-
const startTime = Date.now();
|
|
40
|
-
|
|
41
|
-
await sendBatch(MESSAGES_TO_SEND, WORKER_NAME);
|
|
42
|
-
await waitForSeqToIncrementBy(MESSAGES_TO_SEND, {
|
|
43
|
-
timeoutMs: MESSAGES_TO_SEND * 1000 + 1000,
|
|
44
|
-
pollIntervalMs: 1000,
|
|
45
|
-
});
|
|
46
|
-
|
|
47
|
-
const endTime = Date.now();
|
|
48
|
-
const totalMs = Math.round(endTime - startTime);
|
|
49
|
-
const totalS = totalMs / 1000;
|
|
50
|
-
const msgsPerSecond = MESSAGES_TO_SEND / totalS;
|
|
51
|
-
|
|
52
|
-
log('');
|
|
53
|
-
log('');
|
|
54
|
-
log(`Total time:`, totalMs);
|
|
55
|
-
log(`msgs/second:`, msgsPerSecond);
|
|
56
|
-
} finally {
|
|
57
|
-
await sql.end();
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
);
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
import { sql } from '../sql.ts';
|
|
2
|
-
import { assertGreater, assertGreaterOrEqual } from 'jsr:@std/assert';
|
|
3
|
-
import {
|
|
4
|
-
fetchWorkers,
|
|
5
|
-
sendBatch,
|
|
6
|
-
seqLastValue,
|
|
7
|
-
startWorker,
|
|
8
|
-
waitForSeqToIncrementBy,
|
|
9
|
-
} from './_helpers.ts';
|
|
10
|
-
|
|
11
|
-
const WORKER_NAME = 'cpu_intensive';
|
|
12
|
-
|
|
13
|
-
// TODO: document relation between CPU clock limit, amount of time to process
|
|
14
|
-
// single message and amount of messages to send
|
|
15
|
-
const MESSAGES_TO_SEND = 30;
|
|
16
|
-
|
|
17
|
-
Deno.test('should spawn next worker when CPU clock limit hits', async () => {
|
|
18
|
-
await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
19
|
-
await sql`ALTER SEQUENCE test_seq RESTART WITH 1`;
|
|
20
|
-
try {
|
|
21
|
-
await sql`SELECT pgmq.drop_queue(${WORKER_NAME})`;
|
|
22
|
-
} catch {
|
|
23
|
-
// ignore
|
|
24
|
-
}
|
|
25
|
-
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
26
|
-
await sql`
|
|
27
|
-
DELETE FROM edge_worker.workers
|
|
28
|
-
WHERE worker_id IN (
|
|
29
|
-
SELECT worker_id
|
|
30
|
-
FROM edge_worker.inactive_workers
|
|
31
|
-
)`;
|
|
32
|
-
await startWorker(WORKER_NAME);
|
|
33
|
-
|
|
34
|
-
try {
|
|
35
|
-
await sendBatch(MESSAGES_TO_SEND, WORKER_NAME);
|
|
36
|
-
await waitForSeqToIncrementBy(MESSAGES_TO_SEND, {
|
|
37
|
-
timeoutMs: 35000,
|
|
38
|
-
pollIntervalMs: 300,
|
|
39
|
-
});
|
|
40
|
-
|
|
41
|
-
assertGreaterOrEqual(
|
|
42
|
-
await seqLastValue(),
|
|
43
|
-
MESSAGES_TO_SEND,
|
|
44
|
-
'Sequence value should be greater than or equal to the number of messages sent'
|
|
45
|
-
);
|
|
46
|
-
|
|
47
|
-
const workers = await fetchWorkers(WORKER_NAME);
|
|
48
|
-
assertGreater(
|
|
49
|
-
workers.length,
|
|
50
|
-
1,
|
|
51
|
-
'expected worker to spawn another but there is only 1 worker'
|
|
52
|
-
);
|
|
53
|
-
} finally {
|
|
54
|
-
await sql.end();
|
|
55
|
-
}
|
|
56
|
-
});
|
package/tests/helpers.ts
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
import type { PgmqMessageRecord } from "../src/queue/types.ts";
|
|
2
|
-
import type { postgres } from "./sql.ts";
|
|
3
|
-
|
|
4
|
-
export async function sendBatch(count: number, queueName: string, sql: postgres.Sql) {
|
|
5
|
-
return await sql`
|
|
6
|
-
SELECT pgmq.send_batch(
|
|
7
|
-
${queueName},
|
|
8
|
-
ARRAY(
|
|
9
|
-
SELECT '{}'::jsonb
|
|
10
|
-
FROM generate_series(1, ${count}::integer)
|
|
11
|
-
)
|
|
12
|
-
)`;
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
/**
|
|
16
|
-
* Fetches archived messages from the queue
|
|
17
|
-
*/
|
|
18
|
-
export async function getArchivedMessages(sql: postgres.Sql, queueName: string) {
|
|
19
|
-
return await sql<PgmqMessageRecord[]>
|
|
20
|
-
`SELECT * FROM ${sql('pgmq.a_' + queueName)}`;
|
|
21
|
-
}
|
|
22
|
-
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import type { AnyFlow, ExtractFlowInput, Flow, Json } from '@pgflow/dsl';
|
|
2
|
-
import {
|
|
3
|
-
createFlowWorker,
|
|
4
|
-
type FlowWorkerConfig,
|
|
5
|
-
} from '../../src/flow/createFlowWorker.ts';
|
|
6
|
-
import type { postgres } from '../sql.ts';
|
|
7
|
-
import { PgflowSqlClient } from '../../../core/src/PgflowSqlClient.ts';
|
|
8
|
-
|
|
9
|
-
export async function startFlow<TFlow extends AnyFlow>(
|
|
10
|
-
sql: postgres.Sql,
|
|
11
|
-
flow: TFlow,
|
|
12
|
-
input: ExtractFlowInput<TFlow>
|
|
13
|
-
) {
|
|
14
|
-
const pgflow = new PgflowSqlClient<TFlow>(sql);
|
|
15
|
-
|
|
16
|
-
return await pgflow.startFlow(flow, input);
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
export function startWorker<
|
|
20
|
-
T extends Json,
|
|
21
|
-
S extends Record<string, Json> = Record<never, never>,
|
|
22
|
-
D extends Record<string, string[]> = Record<string, string[]>
|
|
23
|
-
>(sql: postgres.Sql, flow: Flow<T, S, D>, options: FlowWorkerConfig) {
|
|
24
|
-
const defaultOptions = {
|
|
25
|
-
sql,
|
|
26
|
-
maxConcurrent: 1,
|
|
27
|
-
batchSize: 10,
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
const mergedOptions = {
|
|
31
|
-
...defaultOptions,
|
|
32
|
-
...options,
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
const worker = createFlowWorker(flow, mergedOptions);
|
|
36
|
-
|
|
37
|
-
worker.startOnlyOnce({
|
|
38
|
-
edgeFunctionName: 'test_flow',
|
|
39
|
-
workerId: crypto.randomUUID(),
|
|
40
|
-
});
|
|
41
|
-
|
|
42
|
-
return worker;
|
|
43
|
-
}
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
import { assertEquals } from "@std/assert";
|
|
2
|
-
import { createQueueWorker } from '../../src/queue/createQueueWorker.ts';
|
|
3
|
-
import { withTransaction } from "../db.ts";
|
|
4
|
-
import { delay } from "@std/async";
|
|
5
|
-
|
|
6
|
-
Deno.test('creates queue when starting worker', withTransaction(async (sql) => {
|
|
7
|
-
const worker = createQueueWorker(console.log, {
|
|
8
|
-
sql,
|
|
9
|
-
maxPollSeconds: 1,
|
|
10
|
-
queueName: 'custom_queue'
|
|
11
|
-
});
|
|
12
|
-
|
|
13
|
-
worker.startOnlyOnce({
|
|
14
|
-
edgeFunctionName: 'test',
|
|
15
|
-
// random uuid
|
|
16
|
-
workerId: crypto.randomUUID(),
|
|
17
|
-
});
|
|
18
|
-
|
|
19
|
-
await delay(100);
|
|
20
|
-
|
|
21
|
-
try {
|
|
22
|
-
const result: {queue_name: string}[] = await sql`select queue_name from pgmq.list_queues();`;
|
|
23
|
-
|
|
24
|
-
assertEquals(
|
|
25
|
-
[...result],
|
|
26
|
-
[{ queue_name: 'custom_queue' }],
|
|
27
|
-
'queue "custom_queue" was created'
|
|
28
|
-
);
|
|
29
|
-
} finally {
|
|
30
|
-
await worker.stop();
|
|
31
|
-
}
|
|
32
|
-
}));
|