@pgflow/edge-worker 0.0.5 → 0.0.7-prealpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/package.json +10 -4
  2. package/.envrc +0 -2
  3. package/CHANGELOG.md +0 -10
  4. package/deno.lock +0 -336
  5. package/deno.test.json +0 -32
  6. package/dist/LICENSE.md +0 -660
  7. package/dist/README.md +0 -46
  8. package/dist/index.js +0 -972
  9. package/dist/index.js.map +0 -7
  10. package/mod.ts +0 -7
  11. package/pkgs/edge-worker/dist/index.js +0 -953
  12. package/pkgs/edge-worker/dist/index.js.map +0 -7
  13. package/pkgs/edge-worker/dist/pkgs/edge-worker/LICENSE.md +0 -660
  14. package/pkgs/edge-worker/dist/pkgs/edge-worker/README.md +0 -46
  15. package/project.json +0 -164
  16. package/scripts/concatenate-migrations.sh +0 -22
  17. package/scripts/wait-for-localhost +0 -17
  18. package/sql/990_active_workers.sql +0 -11
  19. package/sql/991_inactive_workers.sql +0 -12
  20. package/sql/992_spawn_worker.sql +0 -68
  21. package/sql/benchmarks/max_concurrency.sql +0 -32
  22. package/sql/queries/debug_connections.sql +0 -0
  23. package/sql/queries/debug_processing_gaps.sql +0 -115
  24. package/src/EdgeWorker.ts +0 -172
  25. package/src/core/BatchProcessor.ts +0 -38
  26. package/src/core/ExecutionController.ts +0 -51
  27. package/src/core/Heartbeat.ts +0 -23
  28. package/src/core/Logger.ts +0 -42
  29. package/src/core/Queries.ts +0 -44
  30. package/src/core/Worker.ts +0 -102
  31. package/src/core/WorkerLifecycle.ts +0 -93
  32. package/src/core/WorkerState.ts +0 -85
  33. package/src/core/types.ts +0 -47
  34. package/src/flow/FlowWorkerLifecycle.ts +0 -81
  35. package/src/flow/StepTaskExecutor.ts +0 -87
  36. package/src/flow/StepTaskPoller.ts +0 -51
  37. package/src/flow/createFlowWorker.ts +0 -105
  38. package/src/flow/types.ts +0 -1
  39. package/src/index.ts +0 -15
  40. package/src/queue/MessageExecutor.ts +0 -105
  41. package/src/queue/Queue.ts +0 -92
  42. package/src/queue/ReadWithPollPoller.ts +0 -35
  43. package/src/queue/createQueueWorker.ts +0 -145
  44. package/src/queue/types.ts +0 -14
  45. package/src/spawnNewEdgeFunction.ts +0 -33
  46. package/supabase/call +0 -23
  47. package/supabase/cli +0 -3
  48. package/supabase/config.toml +0 -42
  49. package/supabase/functions/cpu_intensive/index.ts +0 -20
  50. package/supabase/functions/creating_queue/index.ts +0 -5
  51. package/supabase/functions/failing_always/index.ts +0 -13
  52. package/supabase/functions/increment_sequence/index.ts +0 -14
  53. package/supabase/functions/max_concurrency/index.ts +0 -17
  54. package/supabase/functions/serial_sleep/index.ts +0 -16
  55. package/supabase/functions/utils.ts +0 -13
  56. package/supabase/seed.sql +0 -2
  57. package/tests/db/compose.yaml +0 -20
  58. package/tests/db.ts +0 -71
  59. package/tests/e2e/README.md +0 -54
  60. package/tests/e2e/_helpers.ts +0 -135
  61. package/tests/e2e/performance.test.ts +0 -60
  62. package/tests/e2e/restarts.test.ts +0 -56
  63. package/tests/helpers.ts +0 -22
  64. package/tests/integration/_helpers.ts +0 -43
  65. package/tests/integration/creating_queue.test.ts +0 -32
  66. package/tests/integration/flow/minimalFlow.test.ts +0 -121
  67. package/tests/integration/maxConcurrent.test.ts +0 -76
  68. package/tests/integration/retries.test.ts +0 -78
  69. package/tests/integration/starting_worker.test.ts +0 -35
  70. package/tests/sql.ts +0 -46
  71. package/tests/unit/WorkerState.test.ts +0 -74
  72. package/tsconfig.lib.json +0 -23
@@ -1,51 +0,0 @@
1
- import type { StepTaskRecord, IPgflowClient } from './types.ts';
2
- import type { IPoller } from '../core/types.ts';
3
- import { getLogger } from '../core/Logger.ts';
4
- import type { AnyFlow } from '@pgflow/dsl';
5
-
6
- export interface StepTaskPollerConfig {
7
- batchSize: number;
8
- queueName: string;
9
- }
10
-
11
- /**
12
- * A poller that retrieves flow tasks using an IPgflowClient
13
- */
14
- export class StepTaskPoller<TFlow extends AnyFlow>
15
- implements IPoller<StepTaskRecord<TFlow>>
16
- {
17
- private logger = getLogger('StepTaskPoller');
18
-
19
- constructor(
20
- private readonly adapter: IPgflowClient<TFlow>,
21
- private readonly signal: AbortSignal,
22
- private readonly config: StepTaskPollerConfig
23
- ) {}
24
-
25
- async poll(): Promise<StepTaskRecord<TFlow>[]> {
26
- if (this.isAborted()) {
27
- this.logger.debug('Polling aborted, returning empty array');
28
- return [];
29
- }
30
-
31
- this.logger.debug(
32
- `Polling for flow tasks with batch size ${this.config.batchSize}`
33
- );
34
-
35
- try {
36
- const tasks = await this.adapter.pollForTasks(
37
- this.config.queueName,
38
- this.config.batchSize
39
- );
40
- this.logger.debug(`Retrieved ${tasks.length} flow tasks`);
41
- return tasks;
42
- } catch (err: unknown) {
43
- this.logger.error(`Error polling for flow tasks: ${err}`);
44
- return [];
45
- }
46
- }
47
-
48
- private isAborted(): boolean {
49
- return this.signal.aborted;
50
- }
51
- }
@@ -1,105 +0,0 @@
1
- import type { AnyFlow } from '@pgflow/dsl';
2
- import type { EdgeWorkerConfig } from '../EdgeWorker.ts';
3
- import { ExecutionController } from '../core/ExecutionController.ts';
4
- import { StepTaskPoller, type StepTaskPollerConfig } from './StepTaskPoller.ts';
5
- import { StepTaskExecutor } from './StepTaskExecutor.ts';
6
- import { PgflowSqlClient } from '@pgflow/core';
7
- import { Queries } from '../core/Queries.ts';
8
- import type { StepTaskRecord } from './types.ts';
9
- import type { IExecutor } from '../core/types.ts';
10
- import { Worker } from '../core/Worker.ts';
11
- import postgres from 'postgres';
12
- import { FlowWorkerLifecycle } from './FlowWorkerLifecycle.ts';
13
- import { BatchProcessor } from '../core/BatchProcessor.ts';
14
- import { getLogger } from '../core/Logger.ts';
15
-
16
- /**
17
- * Configuration for the flow worker
18
- */
19
- export type FlowWorkerConfig = EdgeWorkerConfig & {
20
- maxConcurrent?: number;
21
- connectionString?: string;
22
- sql?: postgres.Sql;
23
- maxPgConnections?: number;
24
- batchSize?: number;
25
- };
26
-
27
- /**
28
- * Creates a new Worker instance for processing flow tasks.
29
- *
30
- * @param flow - The Flow DSL definition
31
- * @param config - Configuration options for the worker
32
- * @returns A configured Worker instance ready to be started
33
- */
34
- export function createFlowWorker<TFlow extends AnyFlow>(
35
- flow: TFlow,
36
- config: FlowWorkerConfig
37
- ): Worker {
38
- const logger = getLogger('createFlowWorker');
39
-
40
- // Create abort controller for graceful shutdown
41
- const abortController = new AbortController();
42
- const abortSignal = abortController.signal;
43
-
44
- if (!config.sql && !config.connectionString) {
45
- throw new Error(
46
- "Either 'sql' or 'connectionString' must be provided in FlowWorkerConfig."
47
- );
48
- }
49
-
50
- const sql =
51
- config.sql ||
52
- postgres(config.connectionString!, {
53
- max: config.maxPgConnections,
54
- prepare: false,
55
- });
56
-
57
- // Create the pgflow adapter
58
- const pgflowAdapter = new PgflowSqlClient<TFlow>(sql);
59
-
60
- // Use flow slug as queue name, or fallback to 'tasks'
61
- const queueName = flow.slug || 'tasks';
62
- logger.debug(`Using queue name: ${queueName}`);
63
-
64
- // Create specialized FlowWorkerLifecycle with the proxied queue and flow
65
- const queries = new Queries(sql);
66
- const lifecycle = new FlowWorkerLifecycle<TFlow>(queries, flow);
67
-
68
- // Create StepTaskPoller
69
- const pollerConfig: StepTaskPollerConfig = {
70
- batchSize: config.batchSize || 10,
71
- queueName: flow.slug,
72
- };
73
- const poller = new StepTaskPoller<TFlow>(
74
- pgflowAdapter,
75
- abortSignal,
76
- pollerConfig
77
- );
78
-
79
- // Create executor factory with proper typing
80
- const executorFactory = (
81
- record: StepTaskRecord<TFlow>,
82
- signal: AbortSignal
83
- ): IExecutor => {
84
- return new StepTaskExecutor<TFlow>(flow, record, pgflowAdapter, signal);
85
- };
86
-
87
- // Create ExecutionController
88
- const executionController = new ExecutionController<StepTaskRecord<TFlow>>(
89
- executorFactory,
90
- abortSignal,
91
- {
92
- maxConcurrent: config.maxConcurrent || 10,
93
- }
94
- );
95
-
96
- // Create BatchProcessor
97
- const batchProcessor = new BatchProcessor<StepTaskRecord<TFlow>>(
98
- executionController,
99
- poller,
100
- abortSignal
101
- );
102
-
103
- // Return Worker
104
- return new Worker(batchProcessor, lifecycle, sql);
105
- }
package/src/flow/types.ts DELETED
@@ -1 +0,0 @@
1
- export * from '../../../core/src/types.ts';
package/src/index.ts DELETED
@@ -1,15 +0,0 @@
1
- // Export existing queue-based worker
2
- export { createQueueWorker } from './queue/createQueueWorker.ts';
3
- export { EdgeWorker } from './EdgeWorker.ts';
4
-
5
- // Export new flow-based worker
6
- export { createFlowWorker } from './flow/createFlowWorker.ts';
7
- export { FlowWorkerLifecycle } from './flow/FlowWorkerLifecycle.ts';
8
-
9
- // Export types
10
- export type { StepTaskRecord } from './flow/types.ts';
11
- export type { FlowWorkerConfig } from './flow/createFlowWorker.ts';
12
- export type { StepTaskPollerConfig } from './flow/StepTaskPoller.ts';
13
-
14
- // Re-export types from the base system
15
- export type { Json, IExecutor, IPoller, IMessage, ILifecycle, IBatchProcessor } from './core/types.ts';
@@ -1,105 +0,0 @@
1
- import type { Json } from '../core/types.ts';
2
- import type { PgmqMessageRecord } from './types.ts';
3
- import type { Queue } from './Queue.ts';
4
- import { getLogger } from '../core/Logger.ts';
5
-
6
- class AbortError extends Error {
7
- constructor() {
8
- super('Operation aborted');
9
- this.name = 'AbortError';
10
- }
11
- }
12
-
13
- /**
14
- * A class that executes a message handler.
15
- *
16
- * It handles the execution of the message handler and retries or archives the message
17
- * based on the retry limit and delay.
18
- *
19
- * It also handles the abort signal and logs the error.
20
- */
21
- export class MessageExecutor<TPayload extends Json> {
22
- private logger = getLogger('MessageExecutor');
23
-
24
- constructor(
25
- private readonly queue: Queue<TPayload>,
26
- private readonly record: PgmqMessageRecord<TPayload>,
27
- private readonly messageHandler: (
28
- message: TPayload
29
- ) => Promise<void> | void,
30
- private readonly signal: AbortSignal,
31
- private readonly retryLimit: number,
32
- private readonly retryDelay: number
33
- ) {}
34
-
35
- get msgId() {
36
- return this.record.msg_id;
37
- }
38
-
39
- async execute(): Promise<void> {
40
- try {
41
- if (this.signal.aborted) {
42
- throw new AbortError();
43
- }
44
-
45
- // Check if already aborted before starting
46
- this.signal.throwIfAborted();
47
-
48
- this.logger.debug(`Executing task ${this.msgId}...`);
49
- await this.messageHandler(this.record.message!);
50
-
51
- this.logger.debug(
52
- `Task ${this.msgId} completed successfully, archiving...`
53
- );
54
- await this.queue.archive(this.msgId);
55
- this.logger.debug(`Archived task ${this.msgId} successfully`);
56
- } catch (error) {
57
- await this.handleExecutionError(error);
58
- }
59
- }
60
-
61
- /**
62
- * Handles the error that occurred during execution.
63
- *
64
- * If the error is an AbortError, it means that the worker was aborted and stopping,
65
- * the message will reappear after the visibility timeout and be picked up by another worker.
66
- *
67
- * Otherwise, it proceeds with retry or archiving forever.
68
- */
69
- private async handleExecutionError(error: unknown) {
70
- if (error instanceof Error && error.name === 'AbortError') {
71
- this.logger.debug(`Aborted execution for ${this.msgId}`);
72
- // Do not throw - the worker was aborted and stopping,
73
- // the message will reappear after the visibility timeout
74
- // and be picked up by another worker
75
- } else {
76
- this.logger.debug(`Task ${this.msgId} failed with error: ${error}`);
77
- await this.retryOrArchive();
78
- }
79
- }
80
-
81
- /**
82
- * Retries the message if it is available.
83
- * Otherwise, archives the message forever and stops processing it.
84
- */
85
- private async retryOrArchive() {
86
- if (this.retryAvailable) {
87
- // adjust visibility timeout for message to appear after retryDelay
88
- this.logger.debug(`Retrying ${this.msgId} in ${this.retryDelay} seconds`);
89
- await this.queue.setVt(this.msgId, this.retryDelay);
90
- } else {
91
- // archive message forever and stop processing it
92
- this.logger.debug(`Archiving ${this.msgId} forever`);
93
- await this.queue.archive(this.msgId);
94
- }
95
- }
96
-
97
- /**
98
- * Returns true if the message can be retried.
99
- */
100
- private get retryAvailable() {
101
- const readCountLimit = this.retryLimit + 1; // initial read also counts
102
-
103
- return this.record.read_ct < readCountLimit;
104
- }
105
- }
@@ -1,92 +0,0 @@
1
- import type postgres from 'postgres';
2
- import type { PgmqMessageRecord } from './types.ts';
3
- import type { Json } from '../core/types.ts';
4
-
5
- export class Queue<TPayload extends Json> {
6
- constructor(private readonly sql: postgres.Sql, readonly queueName: string) {}
7
-
8
- /**
9
- * Creates a queue if it doesn't exist.
10
- * If the queue already exists, this method does nothing.
11
- */
12
- async safeCreate() {
13
- return await this.sql`
14
- select * from pgmq.create(${this.queueName})
15
- where not exists (
16
- select 1 from pgmq.list_queues() where queue_name = ${this.queueName}
17
- );
18
- `;
19
- }
20
-
21
- /**
22
- * Drops a queue if it exists.
23
- * If the queue doesn't exist, this method does nothing.
24
- */
25
- async safeDrop() {
26
- return await this.sql`
27
- select * from pgmq.drop_queue(${this.queueName})
28
- where exists (
29
- select 1 from pgmq.list_queues() where queue_name = ${this.queueName}
30
- );
31
- `;
32
- }
33
-
34
- async archive(msgId: number): Promise<void> {
35
- await this.sql`
36
- SELECT pgmq.archive(queue_name => ${this.queueName}, msg_id => ${msgId}::bigint);
37
- `;
38
- }
39
-
40
- async archiveBatch(msgIds: number[]): Promise<void> {
41
- await this.sql`
42
- SELECT pgmq.archive(queue_name => ${this.queueName}, msg_ids => ${msgIds}::bigint[]);
43
- `;
44
- }
45
-
46
- async send(message: TPayload): Promise<void> {
47
- const msgJson = JSON.stringify(message);
48
- await this.sql`
49
- SELECT pgmq.send(queue_name => ${this.queueName}, msg => ${msgJson}::jsonb)
50
- `;
51
- }
52
-
53
- async readWithPoll(
54
- batchSize = 20,
55
- visibilityTimeout = 2,
56
- maxPollSeconds = 5,
57
- pollIntervalMs = 200
58
- ) {
59
- return await this.sql<PgmqMessageRecord<TPayload>[]>`
60
- SELECT *
61
- FROM edge_worker.read_with_poll(
62
- queue_name => ${this.queueName},
63
- vt => ${visibilityTimeout},
64
- qty => ${batchSize},
65
- max_poll_seconds => ${maxPollSeconds},
66
- poll_interval_ms => ${pollIntervalMs}
67
- );
68
- `;
69
- }
70
-
71
- /**
72
- * Sets the visibility timeout of a message to the current time plus the given offset.
73
- *
74
- * This is an inlined version of the pgmq.set_vt in order to fix the bug.
75
- * The original uses now() instead of clock_timestamp() which is problematic in transactions.
76
- * See more details here: https://github.com/tembo-io/pgmq/issues/367
77
- *
78
- * The only change made is now() replaced with clock_timestamp().
79
- */
80
- async setVt(
81
- msgId: number,
82
- vtOffsetSeconds: number
83
- ): Promise<PgmqMessageRecord<TPayload>> {
84
- const records = await this.sql<PgmqMessageRecord<TPayload>[]>`
85
- UPDATE ${this.sql('pgmq.q_' + this.queueName)}
86
- SET vt = (clock_timestamp() + make_interval(secs => ${vtOffsetSeconds}))
87
- WHERE msg_id = ${msgId}::bigint
88
- RETURNING *;
89
- `;
90
- return records[0];
91
- }
92
- }
@@ -1,35 +0,0 @@
1
- import type { Queue } from './Queue.ts';
2
- import type { PgmqMessageRecord } from './types.ts';
3
- import type { Json } from '../core/types.ts';
4
-
5
- export interface PollerConfig {
6
- batchSize: number;
7
- maxPollSeconds: number;
8
- pollIntervalMs: number;
9
- visibilityTimeout: number;
10
- }
11
-
12
- export class ReadWithPollPoller<TPayload extends Json> {
13
- constructor(
14
- protected readonly queue: Queue<TPayload>,
15
- protected readonly signal: AbortSignal,
16
- protected readonly config: PollerConfig
17
- ) {}
18
-
19
- async poll(): Promise<PgmqMessageRecord<TPayload>[]> {
20
- if (this.isAborted()) {
21
- return [];
22
- }
23
-
24
- return await this.queue.readWithPoll(
25
- this.config.batchSize,
26
- this.config.visibilityTimeout,
27
- this.config.maxPollSeconds,
28
- this.config.pollIntervalMs
29
- );
30
- }
31
-
32
- private isAborted(): boolean {
33
- return this.signal.aborted;
34
- }
35
- }
@@ -1,145 +0,0 @@
1
- import { ExecutionController } from '../core/ExecutionController.ts';
2
- import { MessageExecutor } from './MessageExecutor.ts';
3
- import { Queries } from '../core/Queries.ts';
4
- import { Queue } from './Queue.ts';
5
- import { ReadWithPollPoller } from './ReadWithPollPoller.ts';
6
- import type { Json } from '../core/types.ts';
7
- import type { PgmqMessageRecord } from './types.ts';
8
- import { Worker } from '../core/Worker.ts';
9
- import postgres from 'postgres';
10
- import { WorkerLifecycle } from '../core/WorkerLifecycle.ts';
11
- import { BatchProcessor } from '../core/BatchProcessor.ts';
12
-
13
- /**
14
- * Configuration for the queue worker
15
- */
16
- export type QueueWorkerConfig = {
17
- /**
18
- * PostgreSQL connection string.
19
- * If not provided, it will be read from the EDGE_WORKER_DB_URL environment variable.
20
- */
21
- connectionString?: string;
22
-
23
- /**
24
- * Name of the queue to poll for messages
25
- * @default 'tasks'
26
- */
27
- queueName?: string;
28
-
29
- /**
30
- * How many tasks are processed at the same time
31
- * @default 10
32
- */
33
- maxConcurrent?: number;
34
-
35
- /**
36
- * How many connections to the database are opened
37
- * @default 4
38
- */
39
- maxPgConnections?: number;
40
-
41
- /**
42
- * In-worker polling interval in seconds
43
- * @default 5
44
- */
45
- maxPollSeconds?: number;
46
-
47
- /**
48
- * In-database polling interval in milliseconds
49
- * @default 200
50
- */
51
- pollIntervalMs?: number;
52
-
53
- /**
54
- * How long to wait before retrying a failed job in seconds
55
- * @default 5
56
- */
57
- retryDelay?: number;
58
-
59
- /**
60
- * How many times to retry a failed job
61
- * @default 5
62
- */
63
- retryLimit?: number;
64
-
65
- /**
66
- * How long a job is invisible after reading in seconds.
67
- * If not successful, will reappear after this time.
68
- * @default 3
69
- */
70
- visibilityTimeout?: number;
71
-
72
- /**
73
- * Batch size for polling messages
74
- * @default 10
75
- */
76
- batchSize?: number;
77
-
78
- /**
79
- * Optional SQL client instance
80
- */
81
- sql?: postgres.Sql;
82
- };
83
-
84
- /**
85
- * Creates a new Worker instance for processing queue messages.
86
- *
87
- * @param handler - The message handler function that processes each message from the queue
88
- * @param config - Configuration options for the worker
89
- * @returns A configured Worker instance ready to be started
90
- */
91
- export function createQueueWorker<TPayload extends Json>(
92
- handler: (message: TPayload) => Promise<void> | void,
93
- config: QueueWorkerConfig
94
- ): Worker {
95
- type QueueMessage = PgmqMessageRecord<TPayload>;
96
-
97
- const abortController = new AbortController();
98
- const abortSignal = abortController.signal;
99
-
100
- // Use provided SQL connection if available, otherwise create one from connection string
101
- const sql =
102
- config.sql ||
103
- postgres(config.connectionString || '', {
104
- max: config.maxPgConnections,
105
- prepare: false,
106
- });
107
-
108
- const queue = new Queue<TPayload>(sql, config.queueName || 'tasks');
109
- const queries = new Queries(sql);
110
-
111
- const lifecycle = new WorkerLifecycle<TPayload>(queries, queue);
112
-
113
- const executorFactory = (record: QueueMessage, signal: AbortSignal) => {
114
- return new MessageExecutor(
115
- queue,
116
- record,
117
- handler,
118
- signal,
119
- config.retryLimit || 5,
120
- config.retryDelay || 3
121
- );
122
- };
123
-
124
- const poller = new ReadWithPollPoller(queue, abortSignal, {
125
- batchSize: config.batchSize || config.maxConcurrent || 10,
126
- maxPollSeconds: config.maxPollSeconds || 5,
127
- pollIntervalMs: config.pollIntervalMs || 200,
128
- visibilityTimeout: config.visibilityTimeout || 3,
129
- });
130
-
131
- const executionController = new ExecutionController<QueueMessage>(
132
- executorFactory,
133
- abortSignal,
134
- {
135
- maxConcurrent: config.maxConcurrent || 10,
136
- }
137
- );
138
- const batchProcessor = new BatchProcessor<QueueMessage>(
139
- executionController,
140
- poller,
141
- abortSignal
142
- );
143
-
144
- return new Worker(batchProcessor, lifecycle, sql);
145
- }
@@ -1,14 +0,0 @@
1
- import type { Json, IMessage } from "../core/types.ts";
2
-
3
- /**
4
- * Fields are nullable because types in postgres does not allow NOT NULL,
5
- * but all those values except `message` come from queue table columns,
6
- * which are explicitely marked as NOT NULL.
7
- */
8
- export interface PgmqMessageRecord<TPayload extends Json | null = Json> extends IMessage {
9
- read_ct: number;
10
- enqueued_at: string;
11
- vt: string;
12
- message: TPayload;
13
- }
14
-
@@ -1,33 +0,0 @@
1
- import { getLogger } from './core/Logger.ts';
2
-
3
- // @ts-ignore - TODO: fix the types
4
- const SUPABASE_URL = Deno.env.get('SUPABASE_URL') as string;
5
- const SUPABASE_ANON_KEY = Deno.env.get('SUPABASE_ANON_KEY') as string;
6
-
7
- const logger = getLogger('spawnNewEdgeFunction');
8
-
9
- export default async function spawnNewEdgeFunction(
10
- functionName: string = 'edge-worker'
11
- ): Promise<void> {
12
- if (!functionName) {
13
- throw new Error('functionName cannot be null or empty');
14
- }
15
-
16
- logger.debug('Spawning a new Edge Function...');
17
-
18
- const response = await fetch(`${SUPABASE_URL}/functions/v1/${functionName}`, {
19
- method: 'POST',
20
- headers: {
21
- Authorization: `Bearer ${SUPABASE_ANON_KEY}`,
22
- 'Content-Type': 'application/json',
23
- }
24
- });
25
-
26
- logger.debug('Edge Function spawned successfully!');
27
-
28
- if (!response.ok) {
29
- throw new Error(
30
- `Edge function returned non-OK status: ${response.status} ${response.statusText}`
31
- );
32
- }
33
- }
package/supabase/call DELETED
@@ -1,23 +0,0 @@
1
- #!/bin/bash
2
-
3
- # get env vars from supabase status and eval them for usage
4
- supabase_env=$(supabase status --output env 2>/dev/null)
5
-
6
- if [ $? -eq 0 ]; then
7
- eval "$supabase_env"
8
- fi
9
-
10
- if [ -f ".env" ]; then
11
- source .env
12
- fi
13
-
14
- function_name="$1"
15
- data="$2"
16
-
17
- API_URL=http://localhost:50321
18
- curl \
19
- --request POST \
20
- "${API_URL}/functions/v1/${function_name}" \
21
- --header 'Authorization: Bearer '${ANON_KEY}'' \
22
- --header 'Content-Type: application/json' \
23
- --data-raw "$data"
package/supabase/cli DELETED
@@ -1,3 +0,0 @@
1
- #!/bin/bash
2
-
3
- pnpm supabase --workdir pkgs/edge-worker $@
@@ -1,42 +0,0 @@
1
- project_id = "edge-worker"
2
-
3
- [api]
4
- enabled = true
5
- port = 50321
6
-
7
- [db]
8
- port = 50322
9
- shadow_port = 50320
10
- major_version = 15
11
-
12
- [db.pooler]
13
- enabled = true
14
- port = 50329
15
- pool_mode = "transaction"
16
- default_pool_size = 200
17
- max_client_conn = 250
18
-
19
- [db.seed]
20
- enabled = true
21
- sql_paths = ['./seed.sql']
22
-
23
- [edge_runtime]
24
- enabled = true
25
- policy = "per_worker"
26
- inspector_port = 8083
27
-
28
- # disable unused features
29
- [realtime]
30
- enabled = false
31
- [studio]
32
- enabled = false
33
- [inbucket]
34
- enabled = false
35
- [analytics]
36
- enabled = false
37
- [storage]
38
- enabled = false
39
- [auth]
40
- enabled = false
41
-
42
-
@@ -1,20 +0,0 @@
1
- import { EdgeWorker } from '../_src/EdgeWorker.ts';
2
- import { crypto } from 'jsr:@std/crypto';
3
- import { sql } from '../utils.ts';
4
-
5
- async function cpuIntensiveTask() {
6
- let data = new TextEncoder().encode('burn');
7
- const timeId = `cpu_intensive_${Math.random()}`;
8
- console.time(timeId);
9
- for (let i = 0; i < 10000; i++) {
10
- data = new Uint8Array(await crypto.subtle.digest('SHA-256', data));
11
- }
12
- console.timeEnd(timeId);
13
-
14
- console.log(
15
- '[cpu_intensive] last_val = ',
16
- await sql`SELECT nextval('test_seq')`
17
- );
18
- }
19
-
20
- EdgeWorker.start(cpuIntensiveTask, { queueName: 'cpu_intensive' });