@pgflow/edge-worker 0.0.5-prealpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.envrc +2 -0
- package/LICENSE.md +660 -0
- package/README.md +46 -0
- package/deno.json +32 -0
- package/deno.lock +369 -0
- package/dist/LICENSE.md +660 -0
- package/dist/README.md +46 -0
- package/dist/index.js +972 -0
- package/dist/index.js.map +7 -0
- package/mod.ts +7 -0
- package/package.json +14 -0
- package/project.json +164 -0
- package/scripts/concatenate-migrations.sh +22 -0
- package/scripts/wait-for-localhost +17 -0
- package/sql/990_active_workers.sql +11 -0
- package/sql/991_inactive_workers.sql +12 -0
- package/sql/992_spawn_worker.sql +68 -0
- package/sql/benchmarks/max_concurrency.sql +32 -0
- package/sql/queries/debug_connections.sql +0 -0
- package/sql/queries/debug_processing_gaps.sql +115 -0
- package/src/EdgeWorker.ts +172 -0
- package/src/core/BatchProcessor.ts +38 -0
- package/src/core/ExecutionController.ts +51 -0
- package/src/core/Heartbeat.ts +23 -0
- package/src/core/Logger.ts +69 -0
- package/src/core/Queries.ts +44 -0
- package/src/core/Worker.ts +102 -0
- package/src/core/WorkerLifecycle.ts +93 -0
- package/src/core/WorkerState.ts +85 -0
- package/src/core/types.ts +47 -0
- package/src/flow/FlowWorkerLifecycle.ts +81 -0
- package/src/flow/StepTaskExecutor.ts +87 -0
- package/src/flow/StepTaskPoller.ts +51 -0
- package/src/flow/createFlowWorker.ts +105 -0
- package/src/flow/types.ts +1 -0
- package/src/index.ts +15 -0
- package/src/queue/MessageExecutor.ts +105 -0
- package/src/queue/Queue.ts +92 -0
- package/src/queue/ReadWithPollPoller.ts +35 -0
- package/src/queue/createQueueWorker.ts +145 -0
- package/src/queue/types.ts +14 -0
- package/src/spawnNewEdgeFunction.ts +33 -0
- package/supabase/call +23 -0
- package/supabase/cli +3 -0
- package/supabase/config.toml +42 -0
- package/supabase/functions/cpu_intensive/index.ts +20 -0
- package/supabase/functions/creating_queue/index.ts +5 -0
- package/supabase/functions/failing_always/index.ts +13 -0
- package/supabase/functions/increment_sequence/index.ts +14 -0
- package/supabase/functions/max_concurrency/index.ts +17 -0
- package/supabase/functions/serial_sleep/index.ts +16 -0
- package/supabase/functions/utils.ts +13 -0
- package/supabase/seed.sql +2 -0
- package/tests/db/compose.yaml +20 -0
- package/tests/db.ts +71 -0
- package/tests/e2e/README.md +54 -0
- package/tests/e2e/_helpers.ts +135 -0
- package/tests/e2e/performance.test.ts +60 -0
- package/tests/e2e/restarts.test.ts +56 -0
- package/tests/helpers.ts +22 -0
- package/tests/integration/_helpers.ts +43 -0
- package/tests/integration/creating_queue.test.ts +32 -0
- package/tests/integration/flow/minimalFlow.test.ts +121 -0
- package/tests/integration/maxConcurrent.test.ts +76 -0
- package/tests/integration/retries.test.ts +78 -0
- package/tests/integration/starting_worker.test.ts +35 -0
- package/tests/sql.ts +46 -0
- package/tests/unit/WorkerState.test.ts +74 -0
- package/tsconfig.lib.json +23 -0
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { sql } from '../sql.ts';
|
|
2
|
+
import {
|
|
3
|
+
waitFor,
|
|
4
|
+
sendBatch,
|
|
5
|
+
waitForSeqToIncrementBy,
|
|
6
|
+
startWorker,
|
|
7
|
+
log,
|
|
8
|
+
} from './_helpers.ts';
|
|
9
|
+
|
|
10
|
+
const MESSAGES_TO_SEND = 20000;
|
|
11
|
+
const WORKER_NAME = 'max_concurrency';
|
|
12
|
+
|
|
13
|
+
Deno.test(
|
|
14
|
+
'worker can handle tens of thousands of jobs queued at once',
|
|
15
|
+
async () => {
|
|
16
|
+
await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
17
|
+
await sql`ALTER SEQUENCE test_seq RESTART WITH 1`;
|
|
18
|
+
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
19
|
+
await sql`SELECT pgmq.drop_queue(${WORKER_NAME})`;
|
|
20
|
+
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
21
|
+
await startWorker(WORKER_NAME);
|
|
22
|
+
await waitFor(
|
|
23
|
+
async () => {
|
|
24
|
+
const [{ worker_count }] = await sql`
|
|
25
|
+
SELECT COUNT(*)::integer AS worker_count
|
|
26
|
+
FROM edge_worker.active_workers
|
|
27
|
+
WHERE function_name = ${WORKER_NAME}
|
|
28
|
+
`;
|
|
29
|
+
|
|
30
|
+
log('worker_count', worker_count);
|
|
31
|
+
return worker_count === 1;
|
|
32
|
+
},
|
|
33
|
+
{ description: 'Waiting for exacly one worker' }
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
// worker sleeps for 1s for each message
|
|
38
|
+
// se we will expect roughly 1 message per second
|
|
39
|
+
const startTime = Date.now();
|
|
40
|
+
|
|
41
|
+
await sendBatch(MESSAGES_TO_SEND, WORKER_NAME);
|
|
42
|
+
await waitForSeqToIncrementBy(MESSAGES_TO_SEND, {
|
|
43
|
+
timeoutMs: MESSAGES_TO_SEND * 1000 + 1000,
|
|
44
|
+
pollIntervalMs: 1000,
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
const endTime = Date.now();
|
|
48
|
+
const totalMs = Math.round(endTime - startTime);
|
|
49
|
+
const totalS = totalMs / 1000;
|
|
50
|
+
const msgsPerSecond = MESSAGES_TO_SEND / totalS;
|
|
51
|
+
|
|
52
|
+
log('');
|
|
53
|
+
log('');
|
|
54
|
+
log(`Total time:`, totalMs);
|
|
55
|
+
log(`msgs/second:`, msgsPerSecond);
|
|
56
|
+
} finally {
|
|
57
|
+
await sql.end();
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
);
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { sql } from '../sql.ts';
|
|
2
|
+
import { assertGreater, assertGreaterOrEqual } from 'jsr:@std/assert';
|
|
3
|
+
import {
|
|
4
|
+
fetchWorkers,
|
|
5
|
+
sendBatch,
|
|
6
|
+
seqLastValue,
|
|
7
|
+
startWorker,
|
|
8
|
+
waitForSeqToIncrementBy,
|
|
9
|
+
} from './_helpers.ts';
|
|
10
|
+
|
|
11
|
+
const WORKER_NAME = 'cpu_intensive';
|
|
12
|
+
|
|
13
|
+
// TODO: document relation between CPU clock limit, amount of time to process
|
|
14
|
+
// single message and amount of messages to send
|
|
15
|
+
const MESSAGES_TO_SEND = 30;
|
|
16
|
+
|
|
17
|
+
Deno.test('should spawn next worker when CPU clock limit hits', async () => {
|
|
18
|
+
await sql`CREATE SEQUENCE IF NOT EXISTS test_seq`;
|
|
19
|
+
await sql`ALTER SEQUENCE test_seq RESTART WITH 1`;
|
|
20
|
+
try {
|
|
21
|
+
await sql`SELECT pgmq.drop_queue(${WORKER_NAME})`;
|
|
22
|
+
} catch {
|
|
23
|
+
// ignore
|
|
24
|
+
}
|
|
25
|
+
await sql`SELECT pgmq.create(${WORKER_NAME})`;
|
|
26
|
+
await sql`
|
|
27
|
+
DELETE FROM edge_worker.workers
|
|
28
|
+
WHERE worker_id IN (
|
|
29
|
+
SELECT worker_id
|
|
30
|
+
FROM edge_worker.inactive_workers
|
|
31
|
+
)`;
|
|
32
|
+
await startWorker(WORKER_NAME);
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
await sendBatch(MESSAGES_TO_SEND, WORKER_NAME);
|
|
36
|
+
await waitForSeqToIncrementBy(MESSAGES_TO_SEND, {
|
|
37
|
+
timeoutMs: 35000,
|
|
38
|
+
pollIntervalMs: 300,
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
assertGreaterOrEqual(
|
|
42
|
+
await seqLastValue(),
|
|
43
|
+
MESSAGES_TO_SEND,
|
|
44
|
+
'Sequence value should be greater than or equal to the number of messages sent'
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
const workers = await fetchWorkers(WORKER_NAME);
|
|
48
|
+
assertGreater(
|
|
49
|
+
workers.length,
|
|
50
|
+
1,
|
|
51
|
+
'expected worker to spawn another but there is only 1 worker'
|
|
52
|
+
);
|
|
53
|
+
} finally {
|
|
54
|
+
await sql.end();
|
|
55
|
+
}
|
|
56
|
+
});
|
package/tests/helpers.ts
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import type { PgmqMessageRecord } from "../src/queue/types.ts";
|
|
2
|
+
import type { postgres } from "./sql.ts";
|
|
3
|
+
|
|
4
|
+
export async function sendBatch(count: number, queueName: string, sql: postgres.Sql) {
|
|
5
|
+
return await sql`
|
|
6
|
+
SELECT pgmq.send_batch(
|
|
7
|
+
${queueName},
|
|
8
|
+
ARRAY(
|
|
9
|
+
SELECT '{}'::jsonb
|
|
10
|
+
FROM generate_series(1, ${count}::integer)
|
|
11
|
+
)
|
|
12
|
+
)`;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Fetches archived messages from the queue
|
|
17
|
+
*/
|
|
18
|
+
export async function getArchivedMessages(sql: postgres.Sql, queueName: string) {
|
|
19
|
+
return await sql<PgmqMessageRecord[]>
|
|
20
|
+
`SELECT * FROM ${sql('pgmq.a_' + queueName)}`;
|
|
21
|
+
}
|
|
22
|
+
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import type { AnyFlow, ExtractFlowInput, Flow, Json } from '@pgflow/dsl';
|
|
2
|
+
import {
|
|
3
|
+
createFlowWorker,
|
|
4
|
+
type FlowWorkerConfig,
|
|
5
|
+
} from '../../src/flow/createFlowWorker.ts';
|
|
6
|
+
import type { postgres } from '../sql.ts';
|
|
7
|
+
import { PgflowSqlClient } from '../../../core/src/PgflowSqlClient.ts';
|
|
8
|
+
|
|
9
|
+
export async function startFlow<TFlow extends AnyFlow>(
|
|
10
|
+
sql: postgres.Sql,
|
|
11
|
+
flow: TFlow,
|
|
12
|
+
input: ExtractFlowInput<TFlow>
|
|
13
|
+
) {
|
|
14
|
+
const pgflow = new PgflowSqlClient<TFlow>(sql);
|
|
15
|
+
|
|
16
|
+
return await pgflow.startFlow(flow, input);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export function startWorker<
|
|
20
|
+
T extends Json,
|
|
21
|
+
S extends Record<string, Json> = Record<never, never>,
|
|
22
|
+
D extends Record<string, string[]> = Record<string, string[]>
|
|
23
|
+
>(sql: postgres.Sql, flow: Flow<T, S, D>, options: FlowWorkerConfig) {
|
|
24
|
+
const defaultOptions = {
|
|
25
|
+
sql,
|
|
26
|
+
maxConcurrent: 1,
|
|
27
|
+
batchSize: 10,
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
const mergedOptions = {
|
|
31
|
+
...defaultOptions,
|
|
32
|
+
...options,
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
const worker = createFlowWorker(flow, mergedOptions);
|
|
36
|
+
|
|
37
|
+
worker.startOnlyOnce({
|
|
38
|
+
edgeFunctionName: 'test_flow',
|
|
39
|
+
workerId: crypto.randomUUID(),
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
return worker;
|
|
43
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { assertEquals } from "@std/assert";
|
|
2
|
+
import { createQueueWorker } from '../../src/queue/createQueueWorker.ts';
|
|
3
|
+
import { withTransaction } from "../db.ts";
|
|
4
|
+
import { delay } from "@std/async";
|
|
5
|
+
|
|
6
|
+
Deno.test('creates queue when starting worker', withTransaction(async (sql) => {
|
|
7
|
+
const worker = createQueueWorker(console.log, {
|
|
8
|
+
sql,
|
|
9
|
+
maxPollSeconds: 1,
|
|
10
|
+
queueName: 'custom_queue'
|
|
11
|
+
});
|
|
12
|
+
|
|
13
|
+
worker.startOnlyOnce({
|
|
14
|
+
edgeFunctionName: 'test',
|
|
15
|
+
// random uuid
|
|
16
|
+
workerId: crypto.randomUUID(),
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
await delay(100);
|
|
20
|
+
|
|
21
|
+
try {
|
|
22
|
+
const result: {queue_name: string}[] = await sql`select queue_name from pgmq.list_queues();`;
|
|
23
|
+
|
|
24
|
+
assertEquals(
|
|
25
|
+
[...result],
|
|
26
|
+
[{ queue_name: 'custom_queue' }],
|
|
27
|
+
'queue "custom_queue" was created'
|
|
28
|
+
);
|
|
29
|
+
} finally {
|
|
30
|
+
await worker.stop();
|
|
31
|
+
}
|
|
32
|
+
}));
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import { assert, assertEquals } from '@std/assert';
|
|
2
|
+
import { withPgNoTransaction } from '../../db.ts';
|
|
3
|
+
import { Flow } from '../../../../dsl/src/dsl.ts';
|
|
4
|
+
import { waitFor } from '../../e2e/_helpers.ts';
|
|
5
|
+
import { delay } from '@std/async';
|
|
6
|
+
import type { Json } from '@pgflow/core';
|
|
7
|
+
import { startFlow, startWorker } from '../_helpers.ts';
|
|
8
|
+
|
|
9
|
+
// Define a minimal flow with two steps:
|
|
10
|
+
// 1. Convert a number to a string
|
|
11
|
+
// 2. Wrap the string in an array
|
|
12
|
+
const MinimalFlow = new Flow<number>({ slug: 'test_minimal_flow' })
|
|
13
|
+
.step({ slug: 'toStringStep' }, async (input) => {
|
|
14
|
+
await delay(1);
|
|
15
|
+
return input.run.toString();
|
|
16
|
+
})
|
|
17
|
+
.step(
|
|
18
|
+
{ slug: 'wrapInArrayStep', dependsOn: ['toStringStep'] },
|
|
19
|
+
async (input) => {
|
|
20
|
+
await delay(1);
|
|
21
|
+
return [input.toStringStep];
|
|
22
|
+
}
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
Deno.test(
|
|
26
|
+
'minimal flow executes successfully',
|
|
27
|
+
withPgNoTransaction(async (sql) => {
|
|
28
|
+
await sql`select pgflow_tests.reset_db();`;
|
|
29
|
+
|
|
30
|
+
const worker = startWorker(sql, MinimalFlow, {
|
|
31
|
+
maxConcurrent: 1,
|
|
32
|
+
batchSize: 10,
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
try {
|
|
36
|
+
await sql`select pgflow.create_flow('test_minimal_flow');`;
|
|
37
|
+
await sql`select pgflow.add_step('test_minimal_flow', 'toStringStep');`;
|
|
38
|
+
await sql`select pgflow.add_step('test_minimal_flow', 'wrapInArrayStep', deps_slugs => ARRAY['toStringStep']::text[]);`;
|
|
39
|
+
|
|
40
|
+
// Start a flow run with input value 42
|
|
41
|
+
const flowRun = await startFlow(sql, MinimalFlow, 42);
|
|
42
|
+
|
|
43
|
+
let i = 0;
|
|
44
|
+
// Wait for the run to complete with a timeout
|
|
45
|
+
const polledRun = await waitFor(
|
|
46
|
+
async () => {
|
|
47
|
+
// Check run status
|
|
48
|
+
const [run] = await sql`
|
|
49
|
+
SELECT * FROM pgflow.runs WHERE run_id = ${flowRun.run_id};
|
|
50
|
+
`;
|
|
51
|
+
|
|
52
|
+
i += 1;
|
|
53
|
+
console.log(`Run ${i}`, run);
|
|
54
|
+
|
|
55
|
+
if (run.status != 'completed' && run.status != 'failed') {
|
|
56
|
+
return false;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return run;
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
pollIntervalMs: 500,
|
|
63
|
+
timeoutMs: 5000,
|
|
64
|
+
description: `flow run ${flowRun.run_id} to be 'completed'`,
|
|
65
|
+
}
|
|
66
|
+
);
|
|
67
|
+
|
|
68
|
+
console.log('Polled run', polledRun);
|
|
69
|
+
|
|
70
|
+
assert(polledRun.status === 'completed', 'Run should be completed');
|
|
71
|
+
|
|
72
|
+
// Verify step_states are all completed
|
|
73
|
+
const stepStates = await sql<{ step_slug: string; status: string }[]>`
|
|
74
|
+
SELECT step_slug, status FROM pgflow.step_states
|
|
75
|
+
WHERE run_id = ${flowRun.run_id}
|
|
76
|
+
ORDER BY step_slug;
|
|
77
|
+
`;
|
|
78
|
+
|
|
79
|
+
console.log('Step states:', stepStates);
|
|
80
|
+
assertEquals(
|
|
81
|
+
stepStates.map((s) => s.status),
|
|
82
|
+
['completed', 'completed'],
|
|
83
|
+
'All step states should be completed'
|
|
84
|
+
);
|
|
85
|
+
|
|
86
|
+
// Verify step_tasks are all succeeded
|
|
87
|
+
const stepTasks = await sql<
|
|
88
|
+
{ step_slug: string; status: string; output: Json }[]
|
|
89
|
+
>`
|
|
90
|
+
SELECT step_slug, status, output FROM pgflow.step_tasks
|
|
91
|
+
WHERE run_id = ${flowRun.run_id}
|
|
92
|
+
ORDER BY step_slug;
|
|
93
|
+
`;
|
|
94
|
+
|
|
95
|
+
console.log('Step tasks:', stepTasks);
|
|
96
|
+
assertEquals(
|
|
97
|
+
stepTasks.map((s) => s.status),
|
|
98
|
+
['completed', 'completed'],
|
|
99
|
+
'All step tasks should be succeeded'
|
|
100
|
+
);
|
|
101
|
+
|
|
102
|
+
// Verify run is succeeded
|
|
103
|
+
const [finalRun] = await sql<{ status: string; output: unknown }[]>`
|
|
104
|
+
SELECT status, output FROM pgflow.runs WHERE run_id = ${flowRun.run_id};
|
|
105
|
+
`;
|
|
106
|
+
|
|
107
|
+
console.log('Final run:', finalRun);
|
|
108
|
+
assertEquals(finalRun.status, 'completed', 'Run should be succeeded');
|
|
109
|
+
|
|
110
|
+
// Verify run output matches expected ["42"]
|
|
111
|
+
assertEquals(
|
|
112
|
+
finalRun.output,
|
|
113
|
+
{ wrapInArrayStep: ['42'] },
|
|
114
|
+
'Run output should match expected value'
|
|
115
|
+
);
|
|
116
|
+
} finally {
|
|
117
|
+
// Stop the worker
|
|
118
|
+
await worker.stop();
|
|
119
|
+
}
|
|
120
|
+
})
|
|
121
|
+
);
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import { assertEquals, assertGreaterOrEqual } from '@std/assert';
|
|
2
|
+
import { createQueueWorker } from '../../src/queue/createQueueWorker.ts';
|
|
3
|
+
import { withTransaction } from '../db.ts';
|
|
4
|
+
import { waitFor } from '../e2e/_helpers.ts';
|
|
5
|
+
import type { PgmqMessageRecord } from '../../src/queue/types.ts';
|
|
6
|
+
import { delay } from '@std/async';
|
|
7
|
+
import { sendBatch } from '../helpers.ts';
|
|
8
|
+
|
|
9
|
+
const QUEUE_NAME = 'max_concurrent';
|
|
10
|
+
const MESSAGES_TO_SEND = 3;
|
|
11
|
+
|
|
12
|
+
async function sleepFor1s() {
|
|
13
|
+
await delay(1000);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
Deno.test(
|
|
17
|
+
'maxConcurrent option is respected',
|
|
18
|
+
withTransaction(async (sql) => {
|
|
19
|
+
const worker = createQueueWorker(sleepFor1s, {
|
|
20
|
+
sql,
|
|
21
|
+
maxConcurrent: 1,
|
|
22
|
+
maxPollSeconds: 1,
|
|
23
|
+
visibilityTimeout: 5,
|
|
24
|
+
queueName: QUEUE_NAME,
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
try {
|
|
28
|
+
worker.startOnlyOnce({
|
|
29
|
+
edgeFunctionName: 'test',
|
|
30
|
+
// random uuid
|
|
31
|
+
workerId: crypto.randomUUID(),
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
await sendBatch(MESSAGES_TO_SEND, QUEUE_NAME, sql);
|
|
35
|
+
|
|
36
|
+
// worker sleeps for 1s for each message
|
|
37
|
+
// se we will expect roughly 1 message per second
|
|
38
|
+
const startTime = Date.now();
|
|
39
|
+
|
|
40
|
+
const messages = await waitFor(
|
|
41
|
+
async () => {
|
|
42
|
+
const archivedMessages = await sql<
|
|
43
|
+
PgmqMessageRecord[]
|
|
44
|
+
>`SELECT * FROM ${sql('pgmq.a_' + QUEUE_NAME)}`;
|
|
45
|
+
|
|
46
|
+
return (
|
|
47
|
+
archivedMessages.length >= MESSAGES_TO_SEND && archivedMessages
|
|
48
|
+
);
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
timeoutMs: 5000,
|
|
52
|
+
}
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
assertEquals(messages.length, 3, 'there should be 3 archived messages');
|
|
56
|
+
assertEquals(
|
|
57
|
+
messages.map((m) => m.read_ct),
|
|
58
|
+
[1, 1, 1],
|
|
59
|
+
'each message should be read exacly once'
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
const endTime = Date.now();
|
|
63
|
+
const totalMs = Math.round(endTime - startTime);
|
|
64
|
+
|
|
65
|
+
assertGreaterOrEqual(
|
|
66
|
+
totalMs,
|
|
67
|
+
MESSAGES_TO_SEND * 1000, // 3 messages, each takes 1s
|
|
68
|
+
`Should take at least ${MESSAGES_TO_SEND}s to process all messages, took ${totalMs}ms instead`
|
|
69
|
+
);
|
|
70
|
+
} catch (error) {
|
|
71
|
+
throw error;
|
|
72
|
+
} finally {
|
|
73
|
+
await worker.stop();
|
|
74
|
+
}
|
|
75
|
+
})
|
|
76
|
+
);
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import { assertEquals, assertGreaterOrEqual } from '@std/assert';
|
|
2
|
+
import { createQueueWorker } from '../../src/queue/createQueueWorker.ts';
|
|
3
|
+
import { withTransaction } from "../db.ts";
|
|
4
|
+
import { log, waitFor } from "../e2e/_helpers.ts";
|
|
5
|
+
import { getArchivedMessages, sendBatch } from "../helpers.ts";
|
|
6
|
+
|
|
7
|
+
const workerConfig = {
|
|
8
|
+
maxPollSeconds: 1,
|
|
9
|
+
retryDelay: 2, // seconds between retries
|
|
10
|
+
retryLimit: 2, // number of retries
|
|
11
|
+
queueName: 'failing_always',
|
|
12
|
+
} as const;
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Creates a handler that always fails and logs elapsed time
|
|
16
|
+
* @param startTime - reference time for elapsed calculations
|
|
17
|
+
*/
|
|
18
|
+
function createFailingHandler(startTime: number) {
|
|
19
|
+
return function failingHandler() {
|
|
20
|
+
const elapsedSec = ((Date.now() - startTime) / 1000).toFixed(2);
|
|
21
|
+
log(`[elapsed: ${elapsedSec}s] Failed as expected (╯°□°)╯︵ ┻━┻`);
|
|
22
|
+
throw new Error('Intentional failure');
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Test verifies that:
|
|
28
|
+
* 1. Message processing takes at least RETRY_LIMIT * RETRY_DELAY seconds
|
|
29
|
+
* 2. Message is read exactly RETRY_LIMIT + 1 times (initial + retries)
|
|
30
|
+
*/
|
|
31
|
+
Deno.test('message retry mechanism works correctly', withTransaction(async (sql) => {
|
|
32
|
+
const startTime = Date.now();
|
|
33
|
+
const worker = createQueueWorker(createFailingHandler(startTime), {
|
|
34
|
+
sql,
|
|
35
|
+
...workerConfig
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
// Start worker and send test message
|
|
40
|
+
worker.startOnlyOnce({
|
|
41
|
+
edgeFunctionName: 'test',
|
|
42
|
+
workerId: crypto.randomUUID(),
|
|
43
|
+
});
|
|
44
|
+
await sendBatch(1, workerConfig.queueName, sql);
|
|
45
|
+
|
|
46
|
+
// Calculate expected processing time
|
|
47
|
+
const expectedMinimumMs = workerConfig.retryLimit * workerConfig.retryDelay * 1000;
|
|
48
|
+
|
|
49
|
+
// Wait for message to be archived
|
|
50
|
+
const [message] = await waitFor(
|
|
51
|
+
async () => {
|
|
52
|
+
const messages = await getArchivedMessages(sql, workerConfig.queueName);
|
|
53
|
+
return messages.length >= 1 && messages;
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
timeoutMs: expectedMinimumMs + 500,
|
|
57
|
+
}
|
|
58
|
+
);
|
|
59
|
+
|
|
60
|
+
// Verify timing
|
|
61
|
+
const totalMs = Date.now() - startTime;
|
|
62
|
+
assertGreaterOrEqual(
|
|
63
|
+
totalMs,
|
|
64
|
+
expectedMinimumMs,
|
|
65
|
+
`Processing time ${totalMs}ms was shorter than minimum ${expectedMinimumMs}ms`
|
|
66
|
+
);
|
|
67
|
+
|
|
68
|
+
// Verify retry count
|
|
69
|
+
const expectedReads = workerConfig.retryLimit + 1;
|
|
70
|
+
assertEquals(
|
|
71
|
+
message.read_ct,
|
|
72
|
+
expectedReads,
|
|
73
|
+
`Message should be read ${expectedReads} times (1 initial + ${workerConfig.retryLimit} retries)`
|
|
74
|
+
);
|
|
75
|
+
} finally {
|
|
76
|
+
await worker.stop();
|
|
77
|
+
}
|
|
78
|
+
}));
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { createQueueWorker } from '../../src/queue/createQueueWorker.ts';
|
|
2
|
+
import { withTransaction } from "../db.ts";
|
|
3
|
+
import { delay } from "@std/async";
|
|
4
|
+
|
|
5
|
+
Deno.test('Starting worker', withTransaction(async (sql) => {
|
|
6
|
+
const worker = createQueueWorker(console.log, {
|
|
7
|
+
sql,
|
|
8
|
+
maxPollSeconds: 1
|
|
9
|
+
});
|
|
10
|
+
|
|
11
|
+
worker.startOnlyOnce({
|
|
12
|
+
edgeFunctionName: 'test',
|
|
13
|
+
// random uuid
|
|
14
|
+
workerId: crypto.randomUUID(),
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
await delay(100);
|
|
18
|
+
|
|
19
|
+
try {
|
|
20
|
+
const workers = await sql`select * from edge_worker.workers`;
|
|
21
|
+
|
|
22
|
+
console.log(workers);
|
|
23
|
+
} finally {
|
|
24
|
+
await worker.stop();
|
|
25
|
+
}
|
|
26
|
+
}));
|
|
27
|
+
|
|
28
|
+
Deno.test('check pgmq version', withTransaction(async (sql) => {
|
|
29
|
+
const result = await sql`
|
|
30
|
+
SELECT extversion
|
|
31
|
+
FROM pg_extension
|
|
32
|
+
WHERE extname = 'pgmq'
|
|
33
|
+
`;
|
|
34
|
+
console.log('pgmq version:', result);
|
|
35
|
+
}));
|
package/tests/sql.ts
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import postgres from 'postgres';
|
|
2
|
+
|
|
3
|
+
const DB_URL = 'postgresql://postgres:postgres@127.0.0.1:50322/postgres';
|
|
4
|
+
|
|
5
|
+
export function createSql() {
|
|
6
|
+
return postgres(DB_URL, {
|
|
7
|
+
prepare: false,
|
|
8
|
+
onnotice(_: unknown) {
|
|
9
|
+
// no-op to silence notices
|
|
10
|
+
},
|
|
11
|
+
});
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export async function withRollback<T>(
|
|
15
|
+
callback: (sql: postgres.Sql) => Promise<T>
|
|
16
|
+
): Promise<T> {
|
|
17
|
+
const sql = createSql();
|
|
18
|
+
try {
|
|
19
|
+
const result = (await sql.begin(
|
|
20
|
+
'read write',
|
|
21
|
+
async (sqlTx: postgres.Sql) => {
|
|
22
|
+
const callbackResult = await callback(sqlTx);
|
|
23
|
+
await sqlTx`ROLLBACK`;
|
|
24
|
+
return callbackResult;
|
|
25
|
+
}
|
|
26
|
+
)) as T;
|
|
27
|
+
return result;
|
|
28
|
+
} finally {
|
|
29
|
+
await sql.end();
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export async function withSql<T>(
|
|
34
|
+
callback: (sql: postgres.Sql) => Promise<T>
|
|
35
|
+
): Promise<T> {
|
|
36
|
+
const sql = createSql();
|
|
37
|
+
try {
|
|
38
|
+
return await callback(sql);
|
|
39
|
+
} finally {
|
|
40
|
+
await sql.end();
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const sql = createSql();
|
|
45
|
+
|
|
46
|
+
export { type postgres, sql };
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import { assertEquals, assertThrows } from '@std/assert';
|
|
2
|
+
import { WorkerState, States, TransitionError } from '../../src/core/WorkerState.ts';
|
|
3
|
+
|
|
4
|
+
Deno.test('WorkerState - initial state should be Created', () => {
|
|
5
|
+
const state = new WorkerState();
|
|
6
|
+
assertEquals(state.current, States.Created);
|
|
7
|
+
assertEquals(state.isCreated, true);
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
Deno.test('WorkerState - valid state transitions', () => {
|
|
11
|
+
const state = new WorkerState();
|
|
12
|
+
|
|
13
|
+
// Created -> Starting
|
|
14
|
+
state.transitionTo(States.Starting);
|
|
15
|
+
assertEquals(state.current, States.Starting);
|
|
16
|
+
assertEquals(state.isStarting, true);
|
|
17
|
+
|
|
18
|
+
// Starting -> Running
|
|
19
|
+
state.transitionTo(States.Running);
|
|
20
|
+
assertEquals(state.current, States.Running);
|
|
21
|
+
assertEquals(state.isRunning, true);
|
|
22
|
+
|
|
23
|
+
// Running -> Stopping
|
|
24
|
+
state.transitionTo(States.Stopping);
|
|
25
|
+
assertEquals(state.current, States.Stopping);
|
|
26
|
+
assertEquals(state.isStopping, true);
|
|
27
|
+
|
|
28
|
+
// Stopping -> Stopped
|
|
29
|
+
state.transitionTo(States.Stopped);
|
|
30
|
+
assertEquals(state.current, States.Stopped);
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
Deno.test('WorkerState - invalid state transitions should throw', () => {
|
|
34
|
+
const state = new WorkerState();
|
|
35
|
+
|
|
36
|
+
// Cannot transition from Created to Running
|
|
37
|
+
assertThrows(
|
|
38
|
+
() => {
|
|
39
|
+
state.transitionTo(States.Running);
|
|
40
|
+
},
|
|
41
|
+
TransitionError,
|
|
42
|
+
'Cannot transition from created to running'
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
// Cannot transition from Created to Stopped
|
|
46
|
+
assertThrows(
|
|
47
|
+
() => {
|
|
48
|
+
state.transitionTo(States.Stopped);
|
|
49
|
+
},
|
|
50
|
+
TransitionError,
|
|
51
|
+
'Cannot transition from created to stopped'
|
|
52
|
+
);
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
Deno.test('WorkerState - transitioning to same state should be no-op', () => {
|
|
56
|
+
const state = new WorkerState();
|
|
57
|
+
|
|
58
|
+
// Transition to Starting first
|
|
59
|
+
state.transitionTo(States.Starting);
|
|
60
|
+
assertEquals(state.current, States.Starting);
|
|
61
|
+
|
|
62
|
+
// Transition to Starting again
|
|
63
|
+
state.transitionTo(States.Starting);
|
|
64
|
+
assertEquals(state.current, States.Starting);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
Deno.test('WorkerState - state getters', () => {
|
|
68
|
+
const state = new WorkerState();
|
|
69
|
+
|
|
70
|
+
assertEquals(state.isCreated, true);
|
|
71
|
+
assertEquals(state.isStarting, false);
|
|
72
|
+
assertEquals(state.isRunning, false);
|
|
73
|
+
assertEquals(state.isStopping, false);
|
|
74
|
+
});
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"extends": "../../tsconfig.base.json",
|
|
3
|
+
"compilerOptions": {
|
|
4
|
+
"outDir": "dist",
|
|
5
|
+
"tsBuildInfoFile": "dist/tsconfig.lib.tsbuildinfo",
|
|
6
|
+
"emitDeclarationOnly": false,
|
|
7
|
+
"types": ["node"],
|
|
8
|
+
"target": "es2020",
|
|
9
|
+
"module": "commonjs",
|
|
10
|
+
"moduleResolution": "node"
|
|
11
|
+
},
|
|
12
|
+
"include": ["src/**/*.ts"],
|
|
13
|
+
"exclude": [
|
|
14
|
+
"src/**/*.test.ts",
|
|
15
|
+
"src/**/*.spec.ts",
|
|
16
|
+
"src/**/*.test.tsx",
|
|
17
|
+
"src/**/*.spec.tsx",
|
|
18
|
+
"src/**/*.test.js",
|
|
19
|
+
"src/**/*.spec.js",
|
|
20
|
+
"src/**/*.test.jsx",
|
|
21
|
+
"src/**/*.spec.jsx"
|
|
22
|
+
]
|
|
23
|
+
}
|