@pgflow/edge-worker 0.0.5-prealpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.envrc +2 -0
  2. package/LICENSE.md +660 -0
  3. package/README.md +46 -0
  4. package/deno.json +32 -0
  5. package/deno.lock +369 -0
  6. package/dist/LICENSE.md +660 -0
  7. package/dist/README.md +46 -0
  8. package/dist/index.js +972 -0
  9. package/dist/index.js.map +7 -0
  10. package/mod.ts +7 -0
  11. package/package.json +14 -0
  12. package/project.json +164 -0
  13. package/scripts/concatenate-migrations.sh +22 -0
  14. package/scripts/wait-for-localhost +17 -0
  15. package/sql/990_active_workers.sql +11 -0
  16. package/sql/991_inactive_workers.sql +12 -0
  17. package/sql/992_spawn_worker.sql +68 -0
  18. package/sql/benchmarks/max_concurrency.sql +32 -0
  19. package/sql/queries/debug_connections.sql +0 -0
  20. package/sql/queries/debug_processing_gaps.sql +115 -0
  21. package/src/EdgeWorker.ts +172 -0
  22. package/src/core/BatchProcessor.ts +38 -0
  23. package/src/core/ExecutionController.ts +51 -0
  24. package/src/core/Heartbeat.ts +23 -0
  25. package/src/core/Logger.ts +69 -0
  26. package/src/core/Queries.ts +44 -0
  27. package/src/core/Worker.ts +102 -0
  28. package/src/core/WorkerLifecycle.ts +93 -0
  29. package/src/core/WorkerState.ts +85 -0
  30. package/src/core/types.ts +47 -0
  31. package/src/flow/FlowWorkerLifecycle.ts +81 -0
  32. package/src/flow/StepTaskExecutor.ts +87 -0
  33. package/src/flow/StepTaskPoller.ts +51 -0
  34. package/src/flow/createFlowWorker.ts +105 -0
  35. package/src/flow/types.ts +1 -0
  36. package/src/index.ts +15 -0
  37. package/src/queue/MessageExecutor.ts +105 -0
  38. package/src/queue/Queue.ts +92 -0
  39. package/src/queue/ReadWithPollPoller.ts +35 -0
  40. package/src/queue/createQueueWorker.ts +145 -0
  41. package/src/queue/types.ts +14 -0
  42. package/src/spawnNewEdgeFunction.ts +33 -0
  43. package/supabase/call +23 -0
  44. package/supabase/cli +3 -0
  45. package/supabase/config.toml +42 -0
  46. package/supabase/functions/cpu_intensive/index.ts +20 -0
  47. package/supabase/functions/creating_queue/index.ts +5 -0
  48. package/supabase/functions/failing_always/index.ts +13 -0
  49. package/supabase/functions/increment_sequence/index.ts +14 -0
  50. package/supabase/functions/max_concurrency/index.ts +17 -0
  51. package/supabase/functions/serial_sleep/index.ts +16 -0
  52. package/supabase/functions/utils.ts +13 -0
  53. package/supabase/seed.sql +2 -0
  54. package/tests/db/compose.yaml +20 -0
  55. package/tests/db.ts +71 -0
  56. package/tests/e2e/README.md +54 -0
  57. package/tests/e2e/_helpers.ts +135 -0
  58. package/tests/e2e/performance.test.ts +60 -0
  59. package/tests/e2e/restarts.test.ts +56 -0
  60. package/tests/helpers.ts +22 -0
  61. package/tests/integration/_helpers.ts +43 -0
  62. package/tests/integration/creating_queue.test.ts +32 -0
  63. package/tests/integration/flow/minimalFlow.test.ts +121 -0
  64. package/tests/integration/maxConcurrent.test.ts +76 -0
  65. package/tests/integration/retries.test.ts +78 -0
  66. package/tests/integration/starting_worker.test.ts +35 -0
  67. package/tests/sql.ts +46 -0
  68. package/tests/unit/WorkerState.test.ts +74 -0
  69. package/tsconfig.lib.json +23 -0
@@ -0,0 +1,7 @@
1
+ {
2
+ "version": 3,
3
+ "sources": ["../src/core/ExecutionController.ts", "../src/core/Logger.ts", "../src/queue/MessageExecutor.ts", "../src/core/Queries.ts", "../src/queue/Queue.ts", "../src/queue/ReadWithPollPoller.ts", "../src/core/Worker.ts", "../src/queue/createQueueWorker.ts", "../src/core/Heartbeat.ts", "../src/core/WorkerState.ts", "../src/core/WorkerLifecycle.ts", "../src/core/BatchProcessor.ts", "../src/spawnNewEdgeFunction.ts", "../src/EdgeWorker.ts", "../src/flow/StepTaskPoller.ts", "../src/flow/StepTaskExecutor.ts", "../../core/src/PgflowSqlClient.ts", "../src/flow/createFlowWorker.ts", "../src/flow/FlowWorkerLifecycle.ts"],
4
+ "sourcesContent": ["import { newQueue, type Queue as PromiseQueue } from '@jsr/henrygd__queue';\nimport type { IExecutor, IMessage } from './types.ts';\nimport { getLogger } from './Logger.ts';\n\nexport interface ExecutionConfig {\n maxConcurrent: number;\n}\n\nexport class ExecutionController<TMessage extends IMessage> {\n private logger = getLogger('ExecutionController');\n private promiseQueue: PromiseQueue;\n private signal: AbortSignal;\n private createExecutor: (record: TMessage, signal: AbortSignal) => IExecutor;\n\n constructor(\n executorFactory: (record: TMessage, signal: AbortSignal) => IExecutor,\n abortSignal: AbortSignal,\n config: ExecutionConfig\n ) {\n this.signal = abortSignal;\n this.createExecutor = executorFactory;\n this.promiseQueue = newQueue(config.maxConcurrent);\n }\n\n async start(record: TMessage) {\n const executor = this.createExecutor(record, this.signal);\n\n this.logger.info(`Scheduling execution of task ${executor.msgId}`);\n\n return await this.promiseQueue.add(async () => {\n try {\n this.logger.debug(`Executing task ${executor.msgId}...`);\n await executor.execute();\n this.logger.debug(`Execution successful for ${executor.msgId}`);\n } catch (error) {\n this.logger.error(`Execution failed for ${executor.msgId}:`, error);\n throw error;\n }\n });\n }\n\n async awaitCompletion() {\n const active = this.promiseQueue.active();\n const all = this.promiseQueue.size();\n\n this.logger.debug(\n `Awaiting completion of all tasks... (active/all: ${active}}/${all})`\n );\n await this.promiseQueue.done();\n }\n}\n", "import * as log from '@jsr/std__log';\n\nfunction getLogLevelFromEnv(): log.LevelName {\n const validLevels = [\n // 'NOTSET',\n 'DEBUG',\n 'INFO',\n // 'WARNING',\n 'ERROR',\n // 'CRITICAL',\n ];\n const logLevel = Deno.env.get('EDGE_WORKER_LOG_LEVEL')?.toUpperCase();\n\n if (logLevel && !validLevels.includes(logLevel)) {\n console.warn(`Invalid log level \"${logLevel}\". Using \"INFO\" instead.`);\n return 'INFO';\n }\n\n return (logLevel as log.LevelName) || 'INFO';\n}\n\nconst defaultLoggerConfig: log.LoggerConfig = {\n level: 'DEBUG',\n handlers: ['console'],\n};\n\nexport function setupLogger(workerId: string) {\n log.setup({\n handlers: {\n console: new log.ConsoleHandler(getLogLevelFromEnv(), {\n formatter: (record: {\n loggerName: string;\n msg: string;\n args: unknown[];\n }) => {\n const prefix = `worker_id=${workerId}`;\n const module = record.loggerName;\n const msg = record.msg;\n\n // If there are additional args, pretty print them using console.log\n if (record.args.length > 0) {\n return `${prefix} [${module}] ${msg}`;\n }\n\n return `${prefix} [${module}] ${msg}`;\n },\n useColors: true,\n }),\n },\n\n loggers: {\n BatchProcessor: defaultLoggerConfig,\n EdgeWorker: defaultLoggerConfig,\n ExecutionController: defaultLoggerConfig,\n Heartbeat: defaultLoggerConfig,\n Logger: defaultLoggerConfig,\n MessageExecutor: defaultLoggerConfig,\n Worker: defaultLoggerConfig,\n WorkerLifecycle: defaultLoggerConfig,\n WorkerState: defaultLoggerConfig,\n spawnNewEdgeFunction: defaultLoggerConfig,\n },\n });\n}\n\n// Helper function to get logger for specific module\nexport function getLogger(module: string) {\n return log.getLogger(module);\n}\n", "import type { Json } from '../core/types.ts';\nimport type { PgmqMessageRecord } from './types.ts';\nimport type { Queue } from './Queue.ts';\nimport { getLogger } from '../core/Logger.ts';\n\nclass AbortError extends Error {\n constructor() {\n super('Operation aborted');\n this.name = 'AbortError';\n }\n}\n\n/**\n * A class that executes a message handler.\n *\n * It handles the execution of the message handler and retries or archives the message\n * based on the retry limit and delay.\n *\n * It also handles the abort signal and logs the error.\n */\nexport class MessageExecutor<TPayload extends Json> {\n private logger = getLogger('MessageExecutor');\n\n constructor(\n private readonly queue: Queue<TPayload>,\n private readonly record: PgmqMessageRecord<TPayload>,\n private readonly messageHandler: (\n message: TPayload\n ) => Promise<void> | void,\n private readonly signal: AbortSignal,\n private readonly retryLimit: number,\n private readonly retryDelay: number\n ) {}\n\n get msgId() {\n return this.record.msg_id;\n }\n\n async execute(): Promise<void> {\n try {\n if (this.signal.aborted) {\n throw new AbortError();\n }\n\n // Check if already aborted before starting\n this.signal.throwIfAborted();\n\n this.logger.debug(`Executing task ${this.msgId}...`);\n await this.messageHandler(this.record.message!);\n\n this.logger.debug(\n `Task ${this.msgId} completed successfully, archiving...`\n );\n await this.queue.archive(this.msgId);\n this.logger.debug(`Archived task ${this.msgId} successfully`);\n } catch (error) {\n await this.handleExecutionError(error);\n }\n }\n\n /**\n * Handles the error that occurred during execution.\n *\n * If the error is an AbortError, it means that the worker was aborted and stopping,\n * the message will reappear after the visibility timeout and be picked up by another worker.\n *\n * Otherwise, it proceeds with retry or archiving forever.\n */\n private async handleExecutionError(error: unknown) {\n if (error instanceof Error && error.name === 'AbortError') {\n this.logger.debug(`Aborted execution for ${this.msgId}`);\n // Do not throw - the worker was aborted and stopping,\n // the message will reappear after the visibility timeout\n // and be picked up by another worker\n } else {\n this.logger.debug(`Task ${this.msgId} failed with error: ${error}`);\n await this.retryOrArchive();\n }\n }\n\n /**\n * Retries the message if it is available.\n * Otherwise, archives the message forever and stops processing it.\n */\n private async retryOrArchive() {\n if (this.retryAvailable) {\n // adjust visibility timeout for message to appear after retryDelay\n this.logger.debug(`Retrying ${this.msgId} in ${this.retryDelay} seconds`);\n await this.queue.setVt(this.msgId, this.retryDelay);\n } else {\n // archive message forever and stop processing it\n this.logger.debug(`Archiving ${this.msgId} forever`);\n await this.queue.archive(this.msgId);\n }\n }\n\n /**\n * Returns true if the message can be retried.\n */\n private get retryAvailable() {\n const readCountLimit = this.retryLimit + 1; // initial read also counts\n\n return this.record.read_ct < readCountLimit;\n }\n}\n", "import type postgres from 'postgres';\nimport type { WorkerRow } from './types.ts';\n\nexport class Queries {\n constructor(private readonly sql: postgres.Sql) {}\n\n async onWorkerStarted({\n queueName,\n workerId,\n edgeFunctionName,\n }: {\n queueName: string;\n workerId: string;\n edgeFunctionName: string;\n }): Promise<WorkerRow> {\n const [worker] = await this.sql<WorkerRow[]>`\n INSERT INTO edge_worker.workers (queue_name, worker_id, function_name)\n VALUES (${queueName}, ${workerId}, ${edgeFunctionName})\n RETURNING *;\n `;\n\n return worker;\n }\n\n async onWorkerStopped(workerRow: WorkerRow): Promise<WorkerRow> {\n const [worker] = await this.sql<WorkerRow[]>`\n UPDATE edge_worker.workers AS w\n SET stopped_at = clock_timestamp(), last_heartbeat_at = clock_timestamp()\n WHERE w.worker_id = ${workerRow.worker_id}\n RETURNING *;\n `;\n\n return worker;\n }\n\n async sendHeartbeat(workerRow: WorkerRow): Promise<void> {\n await this.sql<WorkerRow[]>`\n UPDATE edge_worker.workers AS w\n SET last_heartbeat_at = clock_timestamp()\n WHERE w.worker_id = ${workerRow.worker_id}\n RETURNING *;\n `;\n }\n}\n", "import type postgres from 'postgres';\nimport type { PgmqMessageRecord } from './types.ts';\nimport type { Json } from '../core/types.ts';\n\nexport class Queue<TPayload extends Json> {\n constructor(private readonly sql: postgres.Sql, readonly queueName: string) {}\n\n /**\n * Creates a queue if it doesn't exist.\n * If the queue already exists, this method does nothing.\n */\n async safeCreate() {\n return await this.sql`\n select * from pgmq.create(${this.queueName})\n where not exists (\n select 1 from pgmq.list_queues() where queue_name = ${this.queueName}\n );\n `;\n }\n\n /**\n * Drops a queue if it exists.\n * If the queue doesn't exist, this method does nothing.\n */\n async safeDrop() {\n return await this.sql`\n select * from pgmq.drop_queue(${this.queueName})\n where exists (\n select 1 from pgmq.list_queues() where queue_name = ${this.queueName}\n );\n `;\n }\n\n async archive(msgId: number): Promise<void> {\n await this.sql`\n SELECT pgmq.archive(queue_name => ${this.queueName}, msg_id => ${msgId}::bigint);\n `;\n }\n\n async archiveBatch(msgIds: number[]): Promise<void> {\n await this.sql`\n SELECT pgmq.archive(queue_name => ${this.queueName}, msg_ids => ${msgIds}::bigint[]);\n `;\n }\n\n async send(message: TPayload): Promise<void> {\n const msgJson = JSON.stringify(message);\n await this.sql`\n SELECT pgmq.send(queue_name => ${this.queueName}, msg => ${msgJson}::jsonb)\n `;\n }\n\n async readWithPoll(\n batchSize = 20,\n visibilityTimeout = 2,\n maxPollSeconds = 5,\n pollIntervalMs = 200\n ) {\n return await this.sql<PgmqMessageRecord<TPayload>[]>`\n SELECT *\n FROM edge_worker.read_with_poll(\n queue_name => ${this.queueName},\n vt => ${visibilityTimeout},\n qty => ${batchSize},\n max_poll_seconds => ${maxPollSeconds},\n poll_interval_ms => ${pollIntervalMs}\n );\n `;\n }\n\n /**\n * Sets the visibility timeout of a message to the current time plus the given offset.\n *\n * This is an inlined version of the pgmq.set_vt in order to fix the bug.\n * The original uses now() instead of clock_timestamp() which is problematic in transactions.\n * See more details here: https://github.com/tembo-io/pgmq/issues/367\n *\n * The only change made is now() replaced with clock_timestamp().\n */\n async setVt(\n msgId: number,\n vtOffsetSeconds: number\n ): Promise<PgmqMessageRecord<TPayload>> {\n const records = await this.sql<PgmqMessageRecord<TPayload>[]>`\n UPDATE ${this.sql('pgmq.q_' + this.queueName)}\n SET vt = (clock_timestamp() + make_interval(secs => ${vtOffsetSeconds}))\n WHERE msg_id = ${msgId}::bigint\n RETURNING *;\n `;\n return records[0];\n }\n}\n", "import type { Queue } from './Queue.ts';\nimport type { PgmqMessageRecord } from './types.ts';\nimport type { Json } from '../core/types.ts';\n\nexport interface PollerConfig {\n batchSize: number;\n maxPollSeconds: number;\n pollIntervalMs: number;\n visibilityTimeout: number;\n}\n\nexport class ReadWithPollPoller<TPayload extends Json> {\n constructor(\n protected readonly queue: Queue<TPayload>,\n protected readonly signal: AbortSignal,\n protected readonly config: PollerConfig\n ) {}\n\n async poll(): Promise<PgmqMessageRecord<TPayload>[]> {\n if (this.isAborted()) {\n return [];\n }\n\n return await this.queue.readWithPoll(\n this.config.batchSize,\n this.config.visibilityTimeout,\n this.config.maxPollSeconds,\n this.config.pollIntervalMs\n );\n }\n\n private isAborted(): boolean {\n return this.signal.aborted;\n }\n}\n", "import type postgres from 'postgres';\nimport type { IBatchProcessor, ILifecycle, WorkerBootstrap } from './types.ts';\nimport { getLogger, setupLogger } from './Logger.ts';\n\nexport class Worker {\n private lifecycle: ILifecycle;\n private logger = getLogger('Worker');\n private abortController = new AbortController();\n\n private batchProcessor: IBatchProcessor;\n private sql: postgres.Sql;\n\n constructor(\n batchProcessor: IBatchProcessor,\n lifecycle: ILifecycle,\n sql: postgres.Sql,\n ) {\n this.sql = sql;\n\n this.lifecycle = lifecycle;\n\n this.batchProcessor = batchProcessor;\n }\n\n async startOnlyOnce(workerBootstrap: WorkerBootstrap) {\n if (this.lifecycle.isRunning) {\n this.logger.debug('Worker already running, ignoring start request');\n return;\n }\n\n await this.start(workerBootstrap);\n }\n\n private async start(workerBootstrap: WorkerBootstrap) {\n setupLogger(workerBootstrap.workerId);\n\n try {\n await this.lifecycle.acknowledgeStart(workerBootstrap);\n\n while (this.isMainLoopActive) {\n try {\n await this.lifecycle.sendHeartbeat();\n } catch (error: unknown) {\n this.logger.error(`Error sending heartbeat: ${error}`);\n // Continue execution - a failed heartbeat shouldn't stop processing\n }\n\n try {\n await this.batchProcessor.processBatch();\n } catch (error: unknown) {\n this.logger.error(`Error processing batch: ${error}`);\n // Continue to next iteration - failed batch shouldn't stop the worker\n }\n }\n } catch (error) {\n this.logger.error(`Error in worker main loop: ${error}`);\n throw error;\n }\n }\n\n async stop() {\n // If the worker is already stopping or stopped, do nothing\n if (this.lifecycle.isStopping || this.lifecycle.isStopped) {\n return;\n }\n\n this.lifecycle.transitionToStopping();\n\n try {\n this.logger.info('-> Stopped accepting new messages');\n this.abortController.abort();\n\n this.logger.info('-> Waiting for pending tasks to complete...');\n await this.batchProcessor.awaitCompletion();\n this.logger.info('-> Pending tasks completed!');\n\n this.lifecycle.acknowledgeStop();\n\n this.logger.info('-> Closing SQL connection...');\n await this.sql.end();\n this.logger.info('-> SQL connection closed!');\n } catch (error) {\n this.logger.info(`Error during worker stop: ${error}`);\n throw error;\n }\n }\n\n get edgeFunctionName() {\n return this.lifecycle.edgeFunctionName;\n }\n\n /**\n * Returns true if worker state is Running and worker was not stopped\n */\n private get isMainLoopActive() {\n return this.lifecycle.isRunning && !this.isAborted;\n }\n\n private get isAborted() {\n return this.abortController.signal.aborted;\n }\n}\n", "import { ExecutionController } from '../core/ExecutionController.ts';\nimport { MessageExecutor } from './MessageExecutor.ts';\nimport { Queries } from '../core/Queries.ts';\nimport { Queue } from './Queue.ts';\nimport { ReadWithPollPoller } from './ReadWithPollPoller.ts';\nimport type { Json } from '../core/types.ts';\nimport type { PgmqMessageRecord } from './types.ts';\nimport { Worker } from '../core/Worker.ts';\nimport postgres from 'postgres';\nimport { WorkerLifecycle } from '../core/WorkerLifecycle.ts';\nimport { BatchProcessor } from '../core/BatchProcessor.ts';\n\n/**\n * Configuration for the queue worker\n */\nexport type QueueWorkerConfig = {\n /**\n * PostgreSQL connection string.\n * If not provided, it will be read from the EDGE_WORKER_DB_URL environment variable.\n */\n connectionString?: string;\n\n /**\n * Name of the queue to poll for messages\n * @default 'tasks'\n */\n queueName?: string;\n\n /**\n * How many tasks are processed at the same time\n * @default 10\n */\n maxConcurrent?: number;\n\n /**\n * How many connections to the database are opened\n * @default 4\n */\n maxPgConnections?: number;\n\n /**\n * In-worker polling interval in seconds\n * @default 5\n */\n maxPollSeconds?: number;\n\n /**\n * In-database polling interval in milliseconds\n * @default 200\n */\n pollIntervalMs?: number;\n\n /**\n * How long to wait before retrying a failed job in seconds\n * @default 5\n */\n retryDelay?: number;\n\n /**\n * How many times to retry a failed job\n * @default 5\n */\n retryLimit?: number;\n\n /**\n * How long a job is invisible after reading in seconds.\n * If not successful, will reappear after this time.\n * @default 3\n */\n visibilityTimeout?: number;\n\n /**\n * Batch size for polling messages\n * @default 10\n */\n batchSize?: number;\n\n /**\n * Optional SQL client instance\n */\n sql?: postgres.Sql;\n};\n\n/**\n * Creates a new Worker instance for processing queue messages.\n *\n * @param handler - The message handler function that processes each message from the queue\n * @param config - Configuration options for the worker\n * @returns A configured Worker instance ready to be started\n */\nexport function createQueueWorker<TPayload extends Json>(\n handler: (message: TPayload) => Promise<void> | void,\n config: QueueWorkerConfig\n): Worker {\n type QueueMessage = PgmqMessageRecord<TPayload>;\n\n const abortController = new AbortController();\n const abortSignal = abortController.signal;\n\n // Use provided SQL connection if available, otherwise create one from connection string\n const sql =\n config.sql ||\n postgres(config.connectionString || '', {\n max: config.maxPgConnections,\n prepare: false,\n });\n\n const queue = new Queue<TPayload>(sql, config.queueName || 'tasks');\n const queries = new Queries(sql);\n\n const lifecycle = new WorkerLifecycle<TPayload>(queries, queue);\n\n const executorFactory = (record: QueueMessage, signal: AbortSignal) => {\n return new MessageExecutor(\n queue,\n record,\n handler,\n signal,\n config.retryLimit || 5,\n config.retryDelay || 3\n );\n };\n\n const poller = new ReadWithPollPoller(queue, abortSignal, {\n batchSize: config.batchSize || config.maxConcurrent || 10,\n maxPollSeconds: config.maxPollSeconds || 5,\n pollIntervalMs: config.pollIntervalMs || 200,\n visibilityTimeout: config.visibilityTimeout || 3,\n });\n\n const executionController = new ExecutionController<QueueMessage>(\n executorFactory,\n abortSignal,\n {\n maxConcurrent: config.maxConcurrent || 10,\n }\n );\n const batchProcessor = new BatchProcessor<QueueMessage>(\n executionController,\n poller,\n abortSignal\n );\n\n return new Worker(batchProcessor, lifecycle, sql);\n}\n", "import type { Queries } from './Queries.ts';\nimport type { WorkerRow } from './types.ts';\nimport { getLogger } from './Logger.ts';\n\nexport class Heartbeat {\n private logger = getLogger('Heartbeat');\n private lastHeartbeat = 0;\n\n constructor(\n private interval: number,\n private queries: Queries,\n private workerRow: WorkerRow\n ) {}\n\n async send(): Promise<void> {\n const now = Date.now();\n if (now - this.lastHeartbeat >= this.interval) {\n await this.queries.sendHeartbeat(this.workerRow);\n this.logger.debug('OK');\n this.lastHeartbeat = now;\n }\n }\n}\n", "import { getLogger } from './Logger.ts';\n\nexport enum States {\n /** The worker has been created but has not yet started. */\n Created = 'created',\n\n /** The worker is starting but has not yet started processing messages. */\n Starting = 'starting',\n\n /** The worker is processing messages. */\n Running = 'running',\n\n /** The worker stopped processing messages but is still releasing resources. */\n Stopping = 'stopping',\n\n /** The worker has stopped processing messages and released resources\n * and can be discarded. */\n Stopped = 'stopped',\n}\n\nexport const Transitions: Record<States, States[]> = {\n [States.Created]: [States.Starting],\n [States.Starting]: [States.Running],\n [States.Running]: [States.Stopping],\n [States.Stopping]: [States.Stopped],\n [States.Stopped]: [], // Terminal state - no valid transitions from here\n};\n\nexport class TransitionError extends Error {\n constructor(options: { from: States; to: States }) {\n super(`Cannot transition from ${options.from} to ${options.to}`);\n }\n}\n\n/**\n * Represents the state of a worker and exposes method for doing allowed transitions\n */\nexport class WorkerState {\n private logger = getLogger('WorkerState');\n private state: States = States.Created;\n\n get current() {\n return this.state;\n }\n\n get isCreated() {\n return this.state === States.Created;\n }\n\n get isStarting() {\n return this.state === States.Starting;\n }\n\n get isRunning() {\n return this.state === States.Running;\n }\n\n get isStopping() {\n return this.state === States.Stopping;\n }\n\n get isStopped() {\n return this.state === States.Stopped;\n }\n\n transitionTo(state: States) {\n this.logger.debug(\n `[WorkerState] Starting transition to '${state}' (current state: ${this.state})`\n );\n\n if (this.state === state) {\n return;\n }\n\n if (Transitions[this.state].includes(state)) {\n this.state = state;\n this.logger.debug(`[WorkerState] Transitioned to '${state}'`);\n } else {\n throw new TransitionError({\n from: this.state,\n to: state,\n });\n }\n }\n}\n", "import { Heartbeat } from './Heartbeat.ts';\nimport { getLogger } from './Logger.ts';\nimport type { Queries } from './Queries.ts';\nimport type { Queue } from '../queue/Queue.ts';\nimport type { ILifecycle, Json, WorkerBootstrap, WorkerRow } from './types.ts';\nimport { States, WorkerState } from './WorkerState.ts';\n\nexport interface LifecycleConfig {\n queueName: string;\n}\n\nexport class WorkerLifecycle<IMessage extends Json> implements ILifecycle {\n private workerState: WorkerState = new WorkerState();\n private heartbeat?: Heartbeat;\n private logger = getLogger('WorkerLifecycle');\n private queries: Queries;\n private queue: Queue<IMessage>;\n private workerRow?: WorkerRow;\n\n constructor(queries: Queries, queue: Queue<IMessage>) {\n this.queries = queries;\n this.queue = queue;\n }\n\n async acknowledgeStart(workerBootstrap: WorkerBootstrap): Promise<void> {\n this.workerState.transitionTo(States.Starting);\n\n this.logger.info(`Ensuring queue '${this.queue.queueName}' exists...`);\n await this.queue.safeCreate();\n\n this.workerRow = await this.queries.onWorkerStarted({\n queueName: this.queueName,\n ...workerBootstrap,\n });\n\n this.heartbeat = new Heartbeat(5000, this.queries, this.workerRow);\n\n this.workerState.transitionTo(States.Running);\n }\n\n acknowledgeStop() {\n this.workerState.transitionTo(States.Stopping);\n\n if (!this.workerRow) {\n throw new Error('Cannot stop worker: workerRow not set');\n }\n\n try {\n this.logger.debug('Acknowledging worker stop...');\n\n // TODO: commented out because we can live without this\n // but it is causing problems with DbHandler - workes does not have\n // enough time to fire this query before hard-terimnated\n // We can always check the heartbeat to see if it is still running\n //\n // await this.queries.onWorkerStopped(this.workerRow);\n\n this.workerState.transitionTo(States.Stopped);\n this.logger.debug('Worker stop acknowledged');\n } catch (error) {\n this.logger.debug(`Error acknowledging worker stop: ${error}`);\n throw error;\n }\n }\n\n get edgeFunctionName() {\n return this.workerRow?.function_name;\n }\n\n get queueName() {\n return this.queue.queueName;\n }\n\n async sendHeartbeat() {\n await this.heartbeat?.send();\n }\n\n get isRunning() {\n return this.workerState.isRunning;\n }\n\n get isStopping() {\n return this.workerState.isStopping;\n }\n\n get isStopped() {\n return this.workerState.isStopped;\n }\n\n transitionToStopping() {\n this.workerState.transitionTo(States.Stopping);\n }\n}\n", "import type { ExecutionController } from './ExecutionController.ts';\nimport type { IMessage, IPoller } from './types.ts';\nimport { getLogger } from './Logger.ts';\n\nexport class BatchProcessor<TMessage extends IMessage> {\n private logger = getLogger('BatchProcessor');\n\n constructor(\n private executionController: ExecutionController<TMessage>,\n private poller: IPoller<TMessage>,\n private signal: AbortSignal\n ) {\n this.executionController = executionController;\n this.signal = signal;\n this.poller = poller;\n }\n\n async processBatch() {\n this.logger.debug('Polling for new batch of messages...');\n const messageRecords = await this.poller.poll();\n\n if (this.signal.aborted) {\n this.logger.info('Discarding messageRecords because worker is stopping');\n return;\n }\n\n this.logger.debug(`Starting ${messageRecords.length} messages`);\n\n const startPromises = messageRecords.map(\n (message) => this.executionController.start(message)\n );\n await Promise.all(startPromises);\n }\n\n async awaitCompletion() {\n return await this.executionController.awaitCompletion();\n }\n}\n", "import { getLogger } from './core/Logger.ts';\n\n// @ts-ignore - TODO: fix the types\nconst SUPABASE_URL = Deno.env.get('SUPABASE_URL') as string;\nconst SUPABASE_ANON_KEY = Deno.env.get('SUPABASE_ANON_KEY') as string;\n\nconst logger = getLogger('spawnNewEdgeFunction');\n\nexport default async function spawnNewEdgeFunction(\n functionName: string = 'edge-worker'\n): Promise<void> {\n if (!functionName) {\n throw new Error('functionName cannot be null or empty');\n }\n\n logger.debug('Spawning a new Edge Function...');\n\n const response = await fetch(`${SUPABASE_URL}/functions/v1/${functionName}`, {\n method: 'POST',\n headers: {\n Authorization: `Bearer ${SUPABASE_ANON_KEY}`,\n 'Content-Type': 'application/json',\n }\n });\n\n logger.debug('Edge Function spawned successfully!');\n\n if (!response.ok) {\n throw new Error(\n `Edge function returned non-OK status: ${response.status} ${response.statusText}`\n );\n }\n}\n", "import type { Worker } from './core/Worker.ts';\nimport spawnNewEdgeFunction from './spawnNewEdgeFunction.ts';\nimport type { Json } from './core/types.ts';\nimport { getLogger, setupLogger } from './core/Logger.ts';\nimport {\n createQueueWorker,\n type QueueWorkerConfig,\n} from './queue/createQueueWorker.ts';\n\n/**\n * Configuration options for the EdgeWorker.\n */\nexport type EdgeWorkerConfig = QueueWorkerConfig;\n\n/**\n * EdgeWorker is the main entry point for creating and starting edge workers.\n *\n * It provides a simple interface for starting a worker that processes messages from a queue.\n *\n * @example\n * ```typescript\n * import { EdgeWorker } from '@pgflow/edge-worker';\n *\n * EdgeWorker.start(async (message) => {\n * // Process the message\n * console.log('Processing message:', message);\n * }, {\n * queueName: 'my-queue',\n * maxConcurrent: 5,\n * retryLimit: 3\n * });\n * ```\n */\nexport class EdgeWorker {\n private static logger = getLogger('EdgeWorker');\n private static wasCalled = false;\n\n /**\n * Start the EdgeWorker with the given message handler and configuration.\n *\n * @param handler - Function that processes each message from the queue\n * @param config - Configuration options for the worker\n *\n * @example\n * ```typescript\n * EdgeWorker.start(handler, {\n * // name of the queue to poll for messages\n * queueName: 'tasks',\n *\n * // how many tasks are processed at the same time\n * maxConcurrent: 10,\n *\n * // how many connections to the database are opened\n * maxPgConnections: 4,\n *\n * // in-worker polling interval\n * maxPollSeconds: 5,\n *\n * // in-database polling interval\n * pollIntervalMs: 200,\n *\n * // how long to wait before retrying a failed job\n * retryDelay: 5,\n *\n * // how many times to retry a failed job\n * retryLimit: 5,\n *\n * // how long a job is invisible after reading\n * // if not successful, will reappear after this time\n * visibilityTimeout: 3,\n * });\n * ```\n */\n static start<TPayload extends Json = Json>(\n handler: (message: TPayload) => Promise<void> | void,\n config: EdgeWorkerConfig = {}\n ) {\n this.ensureFirstCall();\n\n // Get connection string from config or environment\n const connectionString =\n config.connectionString || this.getConnectionString();\n\n // Create a complete configuration object with defaults\n const completeConfig: EdgeWorkerConfig = {\n // Pass through any config options first\n ...config,\n\n // Then override with defaults for missing values\n queueName: config.queueName || 'tasks',\n maxConcurrent: config.maxConcurrent ?? 10,\n maxPgConnections: config.maxPgConnections ?? 4,\n maxPollSeconds: config.maxPollSeconds ?? 5,\n pollIntervalMs: config.pollIntervalMs ?? 200,\n retryDelay: config.retryDelay ?? 5,\n retryLimit: config.retryLimit ?? 5,\n visibilityTimeout: config.visibilityTimeout ?? 3,\n\n // Ensure connectionString is always set\n connectionString,\n };\n\n this.setupRequestHandler(handler, completeConfig);\n }\n\n private static ensureFirstCall() {\n if (this.wasCalled) {\n throw new Error('EdgeWorker.start() can only be called once');\n }\n this.wasCalled = true;\n }\n\n private static getConnectionString(): string {\n // @ts-ignore - TODO: fix the types\n const connectionString = Deno.env.get('EDGE_WORKER_DB_URL');\n if (!connectionString) {\n const message =\n 'EDGE_WORKER_DB_URL is not set!\\n' +\n 'See https://pgflow.pages.dev/edge-worker/prepare-environment/#prepare-connection-string';\n throw new Error(message);\n }\n return connectionString;\n }\n\n private static setupShutdownHandler(worker: Worker) {\n globalThis.onbeforeunload = async () => {\n if (worker.edgeFunctionName) {\n await spawnNewEdgeFunction(worker.edgeFunctionName);\n }\n\n worker.stop();\n };\n\n // use waitUntil to prevent the function from exiting\n // @ts-ignore: TODO: fix the types\n EdgeRuntime.waitUntil(new Promise(() => {}));\n }\n\n private static setupRequestHandler<TPayload extends Json>(\n handler: (message: TPayload) => Promise<void> | void,\n workerConfig: EdgeWorkerConfig\n ) {\n let worker: Worker | null = null;\n\n Deno.serve({}, (req) => {\n if (!worker) {\n const edgeFunctionName = this.extractFunctionName(req);\n const sbExecutionId = Deno.env.get('SB_EXECUTION_ID')!;\n setupLogger(sbExecutionId);\n\n this.logger.info(`HTTP Request: ${edgeFunctionName}`);\n // Create the worker with all configuration options\n\n worker = createQueueWorker(handler, workerConfig);\n worker.startOnlyOnce({\n edgeFunctionName,\n workerId: sbExecutionId,\n });\n\n this.setupShutdownHandler(worker);\n }\n\n return new Response('ok', {\n headers: { 'Content-Type': 'application/json' },\n });\n });\n }\n\n private static extractFunctionName(req: Request): string {\n return new URL(req.url).pathname.replace(/^\\/+|\\/+$/g, '');\n }\n}\n", "import type { StepTaskRecord, IPgflowClient } from './types.ts';\nimport type { IPoller } from '../core/types.ts';\nimport { getLogger } from '../core/Logger.ts';\nimport type { AnyFlow } from '@pgflow/dsl';\n\nexport interface StepTaskPollerConfig {\n batchSize: number;\n queueName: string;\n}\n\n/**\n * A poller that retrieves flow tasks using an IPgflowClient\n */\nexport class StepTaskPoller<TFlow extends AnyFlow>\n implements IPoller<StepTaskRecord<TFlow>>\n{\n private logger = getLogger('StepTaskPoller');\n\n constructor(\n private readonly adapter: IPgflowClient<TFlow>,\n private readonly signal: AbortSignal,\n private readonly config: StepTaskPollerConfig\n ) {}\n\n async poll(): Promise<StepTaskRecord<TFlow>[]> {\n if (this.isAborted()) {\n this.logger.debug('Polling aborted, returning empty array');\n return [];\n }\n\n this.logger.debug(\n `Polling for flow tasks with batch size ${this.config.batchSize}`\n );\n\n try {\n const tasks = await this.adapter.pollForTasks(\n this.config.queueName,\n this.config.batchSize\n );\n this.logger.debug(`Retrieved ${tasks.length} flow tasks`);\n return tasks;\n } catch (err: unknown) {\n this.logger.error(`Error polling for flow tasks: ${err}`);\n return [];\n }\n }\n\n private isAborted(): boolean {\n return this.signal.aborted;\n }\n}\n", "import type { AnyFlow } from '@pgflow/dsl';\nimport type { StepTaskRecord, IPgflowClient } from './types.ts';\nimport type { IExecutor } from '../core/types.ts';\nimport { getLogger } from '../core/Logger.ts';\n\nclass AbortError extends Error {\n constructor() {\n super('Operation aborted');\n this.name = 'AbortError';\n }\n}\n\n/**\n * An executor that processes step tasks using an IPgflowClient\n * with strong typing for the flow's step handlers\n */\nexport class StepTaskExecutor<TFlow extends AnyFlow> implements IExecutor {\n private logger = getLogger('StepTaskExecutor');\n\n constructor(\n private readonly flow: TFlow,\n private readonly task: StepTaskRecord<TFlow>,\n private readonly adapter: IPgflowClient<TFlow>,\n private readonly signal: AbortSignal\n ) {}\n\n get msgId() {\n return this.task.msg_id;\n }\n\n async execute(): Promise<void> {\n try {\n if (this.signal.aborted) {\n throw new AbortError();\n }\n\n // Check if already aborted before starting\n this.signal.throwIfAborted();\n\n const stepSlug = this.task.step_slug;\n this.logger.debug(\n `Executing step task ${this.task.msg_id} for step ${stepSlug}`\n );\n\n // Get the step handler from the flow with proper typing\n const stepDef = this.flow.getStepDefinition(stepSlug);\n\n if (!stepDef) {\n throw new Error(`No step definition found for slug=${stepSlug}`);\n }\n\n // !!! HANDLER EXECUTION !!!\n const result = await stepDef.handler(this.task.input);\n // !!! HANDLER EXECUTION !!!\n\n this.logger.debug(\n `step task ${this.task.msg_id} completed successfully, marking as complete`\n );\n await this.adapter.completeTask(this.task, result);\n\n this.logger.debug(`step task ${this.task.msg_id} marked as complete`);\n } catch (error) {\n await this.handleExecutionError(error);\n }\n }\n\n /**\n * Handles the error that occurred during execution.\n *\n * If the error is an AbortError, it means that the worker was aborted and stopping,\n * the task will be picked up by another worker later.\n *\n * Otherwise, it marks the task as failed.\n */\n private async handleExecutionError(error: unknown) {\n if (error instanceof Error && error.name === 'AbortError') {\n this.logger.debug(`Aborted execution for step task ${this.task.msg_id}`);\n // Do not mark as failed - the worker was aborted and stopping,\n // the task will be picked up by another worker later\n } else {\n this.logger.error(\n `step task ${this.task.msg_id} failed with error: ${error}`\n );\n await this.adapter.failTask(this.task, error);\n }\n }\n}\n", "import type postgres from 'postgres';\nimport type {\n StepTaskRecord,\n IPgflowClient,\n StepTaskKey,\n RunRow,\n} from './types.ts';\nimport type { Json } from './types.ts';\nimport type { AnyFlow, ExtractFlowInput } from '@pgflow/dsl';\n\n/**\n * Implementation of IPgflowClient that uses direct SQL calls to pgflow functions\n */\nexport class PgflowSqlClient<TFlow extends AnyFlow>\n implements IPgflowClient<TFlow>\n{\n constructor(private readonly sql: postgres.Sql) {}\n\n async pollForTasks(\n queueName: string,\n batchSize = 20,\n visibilityTimeout = 2,\n maxPollSeconds = 5,\n pollIntervalMs = 200\n ): Promise<StepTaskRecord<TFlow>[]> {\n return await this.sql<StepTaskRecord<TFlow>[]>`\n SELECT *\n FROM pgflow.poll_for_tasks(\n queue_name => ${queueName},\n vt => ${visibilityTimeout},\n qty => ${batchSize},\n max_poll_seconds => ${maxPollSeconds},\n poll_interval_ms => ${pollIntervalMs}\n );\n `;\n }\n\n async completeTask(stepTask: StepTaskKey, output?: Json): Promise<void> {\n await this.sql`\n SELECT pgflow.complete_task(\n run_id => ${stepTask.run_id}::uuid,\n step_slug => ${stepTask.step_slug}::text,\n task_index => ${0}::int,\n output => ${this.sql.json(output || null)}::jsonb\n );\n `;\n }\n\n async failTask(stepTask: StepTaskKey, error: unknown): Promise<void> {\n const errorString =\n typeof error === 'string'\n ? error\n : error instanceof Error\n ? error.message\n : JSON.stringify(error);\n\n await this.sql`\n SELECT pgflow.fail_task(\n run_id => ${stepTask.run_id}::uuid,\n step_slug => ${stepTask.step_slug}::text,\n task_index => ${0}::int,\n error_message => ${errorString}::text\n );\n `;\n }\n\n async startFlow<TFlow extends AnyFlow>(\n flow: TFlow,\n input: ExtractFlowInput<TFlow>\n ): Promise<RunRow> {\n const results = await this.sql<RunRow[]>`\n SELECT * FROM pgflow.start_flow(${flow.slug}::text, ${this.sql.json(\n input\n )}::jsonb);\n `;\n\n if (results.length === 0) {\n throw new Error(`Failed to start flow ${flow.slug}`);\n }\n\n const [flowRun] = results;\n\n return flowRun;\n }\n}\n", "import type { AnyFlow } from '@pgflow/dsl';\nimport type { EdgeWorkerConfig } from '../EdgeWorker.ts';\nimport { ExecutionController } from '../core/ExecutionController.ts';\nimport { StepTaskPoller, type StepTaskPollerConfig } from './StepTaskPoller.ts';\nimport { StepTaskExecutor } from './StepTaskExecutor.ts';\nimport { PgflowSqlClient } from '@pgflow/core';\nimport { Queries } from '../core/Queries.ts';\nimport type { StepTaskRecord } from './types.ts';\nimport type { IExecutor } from '../core/types.ts';\nimport { Worker } from '../core/Worker.ts';\nimport postgres from 'postgres';\nimport { FlowWorkerLifecycle } from './FlowWorkerLifecycle.ts';\nimport { BatchProcessor } from '../core/BatchProcessor.ts';\nimport { getLogger } from '../core/Logger.ts';\n\n/**\n * Configuration for the flow worker\n */\nexport type FlowWorkerConfig = EdgeWorkerConfig & {\n maxConcurrent?: number;\n connectionString?: string;\n sql?: postgres.Sql;\n maxPgConnections?: number;\n batchSize?: number;\n};\n\n/**\n * Creates a new Worker instance for processing flow tasks.\n *\n * @param flow - The Flow DSL definition\n * @param config - Configuration options for the worker\n * @returns A configured Worker instance ready to be started\n */\nexport function createFlowWorker<TFlow extends AnyFlow>(\n flow: TFlow,\n config: FlowWorkerConfig\n): Worker {\n const logger = getLogger('createFlowWorker');\n\n // Create abort controller for graceful shutdown\n const abortController = new AbortController();\n const abortSignal = abortController.signal;\n\n if (!config.sql && !config.connectionString) {\n throw new Error(\n \"Either 'sql' or 'connectionString' must be provided in FlowWorkerConfig.\"\n );\n }\n\n const sql =\n config.sql ||\n postgres(config.connectionString!, {\n max: config.maxPgConnections,\n prepare: false,\n });\n\n // Create the pgflow adapter\n const pgflowAdapter = new PgflowSqlClient<TFlow>(sql);\n\n // Use flow slug as queue name, or fallback to 'tasks'\n const queueName = flow.slug || 'tasks';\n logger.debug(`Using queue name: ${queueName}`);\n\n // Create specialized FlowWorkerLifecycle with the proxied queue and flow\n const queries = new Queries(sql);\n const lifecycle = new FlowWorkerLifecycle<TFlow>(queries, flow);\n\n // Create StepTaskPoller\n const pollerConfig: StepTaskPollerConfig = {\n batchSize: config.batchSize || 10,\n queueName: flow.slug,\n };\n const poller = new StepTaskPoller<TFlow>(\n pgflowAdapter,\n abortSignal,\n pollerConfig\n );\n\n // Create executor factory with proper typing\n const executorFactory = (\n record: StepTaskRecord<TFlow>,\n signal: AbortSignal\n ): IExecutor => {\n return new StepTaskExecutor<TFlow>(flow, record, pgflowAdapter, signal);\n };\n\n // Create ExecutionController\n const executionController = new ExecutionController<StepTaskRecord<TFlow>>(\n executorFactory,\n abortSignal,\n {\n maxConcurrent: config.maxConcurrent || 10,\n }\n );\n\n // Create BatchProcessor\n const batchProcessor = new BatchProcessor<StepTaskRecord<TFlow>>(\n executionController,\n poller,\n abortSignal\n );\n\n // Return Worker\n return new Worker(batchProcessor, lifecycle, sql);\n}\n", "import { Heartbeat } from '../core/Heartbeat.ts';\nimport { getLogger } from '../core/Logger.ts';\nimport type { Queries } from '../core/Queries.ts';\nimport type { ILifecycle, WorkerBootstrap, WorkerRow } from '../core/types.ts';\nimport { States, WorkerState } from '../core/WorkerState.ts';\nimport type { AnyFlow } from '@pgflow/dsl';\n\n/**\n * A specialized WorkerLifecycle for Flow-based workers that is aware of the Flow's step types\n */\nexport class FlowWorkerLifecycle<TFlow extends AnyFlow> implements ILifecycle {\n private workerState: WorkerState = new WorkerState();\n private heartbeat?: Heartbeat;\n private logger = getLogger('FlowWorkerLifecycle');\n private queries: Queries;\n private workerRow?: WorkerRow;\n private flow: TFlow;\n\n constructor(queries: Queries, flow: TFlow) {\n this.queries = queries;\n this.flow = flow;\n }\n\n async acknowledgeStart(workerBootstrap: WorkerBootstrap): Promise<void> {\n this.workerState.transitionTo(States.Starting);\n\n this.workerRow = await this.queries.onWorkerStarted({\n queueName: this.queueName,\n ...workerBootstrap,\n });\n\n this.heartbeat = new Heartbeat(5000, this.queries, this.workerRow);\n\n this.workerState.transitionTo(States.Running);\n }\n\n acknowledgeStop() {\n this.workerState.transitionTo(States.Stopping);\n\n if (!this.workerRow) {\n throw new Error('Cannot stop worker: workerRow not set');\n }\n\n try {\n this.logger.debug('Acknowledging worker stop...');\n this.workerState.transitionTo(States.Stopped);\n this.logger.debug('Worker stop acknowledged');\n } catch (error) {\n this.logger.debug(`Error acknowledging worker stop: ${error}`);\n throw error;\n }\n }\n\n get edgeFunctionName() {\n return this.workerRow?.function_name;\n }\n\n get queueName() {\n return this.flow.slug;\n }\n\n async sendHeartbeat() {\n await this.heartbeat?.send();\n }\n\n get isRunning() {\n return this.workerState.isRunning;\n }\n\n get isStopping() {\n return this.workerState.isStopping;\n }\n\n get isStopped() {\n return this.workerState.isStopped;\n }\n\n transitionToStopping() {\n this.workerState.transitionTo(States.Stopping);\n }\n}\n"],
5
+ "mappings": ";AAAA,SAAS,gBAA4C;;;ACArD,YAAY,SAAS;AAErB,SAAS,qBAAoC;AAC3C,QAAM,cAAc;AAAA;AAAA,IAElB;AAAA,IACA;AAAA;AAAA,IAEA;AAAA;AAAA,EAEF;AACA,QAAM,WAAW,KAAK,IAAI,IAAI,uBAAuB,GAAG,YAAY;AAEpE,MAAI,YAAY,CAAC,YAAY,SAAS,QAAQ,GAAG;AAC/C,YAAQ,KAAK,sBAAsB,QAAQ,0BAA0B;AACrE,WAAO;AAAA,EACT;AAEA,SAAQ,YAA8B;AACxC;AAEA,IAAM,sBAAwC;AAAA,EAC5C,OAAO;AAAA,EACP,UAAU,CAAC,SAAS;AACtB;AAEO,SAAS,YAAY,UAAkB;AAC5C,EAAI,UAAM;AAAA,IACR,UAAU;AAAA,MACR,SAAS,IAAQ,mBAAe,mBAAmB,GAAG;AAAA,QACpD,WAAW,CAAC,WAIN;AACJ,gBAAM,SAAS,aAAa,QAAQ;AACpC,gBAAM,SAAS,OAAO;AACtB,gBAAM,MAAM,OAAO;AAGnB,cAAI,OAAO,KAAK,SAAS,GAAG;AAC1B,mBAAO,GAAG,MAAM,KAAK,MAAM,KAAK,GAAG;AAAA,UACrC;AAEA,iBAAO,GAAG,MAAM,KAAK,MAAM,KAAK,GAAG;AAAA,QACrC;AAAA,QACA,WAAW;AAAA,MACb,CAAC;AAAA,IACH;AAAA,IAEA,SAAS;AAAA,MACP,gBAAgB;AAAA,MAChB,YAAY;AAAA,MACZ,qBAAqB;AAAA,MACrB,WAAW;AAAA,MACX,QAAQ;AAAA,MACR,iBAAiB;AAAA,MACjB,QAAQ;AAAA,MACR,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,sBAAsB;AAAA,IACxB;AAAA,EACF,CAAC;AACH;AAGO,SAASA,WAAU,QAAgB;AACxC,SAAW,cAAU,MAAM;AAC7B;;;AD5DO,IAAM,sBAAN,MAAqD;AAAA,EAM1D,YACE,iBACA,aACA,QACA;AATF,SAAQ,SAASC,WAAU,qBAAqB;AAU9C,SAAK,SAAS;AACd,SAAK,iBAAiB;AACtB,SAAK,eAAe,SAAS,OAAO,aAAa;AAAA,EACnD;AAAA,EAEA,MAAM,MAAM,QAAkB;AAC5B,UAAM,WAAW,KAAK,eAAe,QAAQ,KAAK,MAAM;AAExD,SAAK,OAAO,KAAK,gCAAgC,SAAS,KAAK,EAAE;AAEjE,WAAO,MAAM,KAAK,aAAa,IAAI,YAAY;AAC7C,UAAI;AACF,aAAK,OAAO,MAAM,kBAAkB,SAAS,KAAK,KAAK;AACvD,cAAM,SAAS,QAAQ;AACvB,aAAK,OAAO,MAAM,4BAA4B,SAAS,KAAK,EAAE;AAAA,MAChE,SAAS,OAAO;AACd,aAAK,OAAO,MAAM,wBAAwB,SAAS,KAAK,KAAK,KAAK;AAClE,cAAM;AAAA,MACR;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,kBAAkB;AACtB,UAAM,SAAS,KAAK,aAAa,OAAO;AACxC,UAAM,MAAM,KAAK,aAAa,KAAK;AAEnC,SAAK,OAAO;AAAA,MACV,oDAAoD,MAAM,KAAK,GAAG;AAAA,IACpE;AACA,UAAM,KAAK,aAAa,KAAK;AAAA,EAC/B;AACF;;;AE7CA,IAAM,aAAN,cAAyB,MAAM;AAAA,EAC7B,cAAc;AACZ,UAAM,mBAAmB;AACzB,SAAK,OAAO;AAAA,EACd;AACF;AAUO,IAAM,kBAAN,MAA6C;AAAA,EAGlD,YACmB,OACA,QACA,gBAGA,QACA,YACA,YACjB;AARiB;AACA;AACA;AAGA;AACA;AACA;AAVnB,SAAQ,SAASC,WAAU,iBAAiB;AAAA,EAWzC;AAAA,EAEH,IAAI,QAAQ;AACV,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEA,MAAM,UAAyB;AAC7B,QAAI;AACF,UAAI,KAAK,OAAO,SAAS;AACvB,cAAM,IAAI,WAAW;AAAA,MACvB;AAGA,WAAK,OAAO,eAAe;AAE3B,WAAK,OAAO,MAAM,kBAAkB,KAAK,KAAK,KAAK;AACnD,YAAM,KAAK,eAAe,KAAK,OAAO,OAAQ;AAE9C,WAAK,OAAO;AAAA,QACV,QAAQ,KAAK,KAAK;AAAA,MACpB;AACA,YAAM,KAAK,MAAM,QAAQ,KAAK,KAAK;AACnC,WAAK,OAAO,MAAM,iBAAiB,KAAK,KAAK,eAAe;AAAA,IAC9D,SAAS,OAAO;AACd,YAAM,KAAK,qBAAqB,KAAK;AAAA,IACvC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,qBAAqB,OAAgB;AACjD,QAAI,iBAAiB,SAAS,MAAM,SAAS,cAAc;AACzD,WAAK,OAAO,MAAM,yBAAyB,KAAK,KAAK,EAAE;AAAA,IAIzD,OAAO;AACL,WAAK,OAAO,MAAM,QAAQ,KAAK,KAAK,uBAAuB,KAAK,EAAE;AAClE,YAAM,KAAK,eAAe;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,iBAAiB;AAC7B,QAAI,KAAK,gBAAgB;AAEvB,WAAK,OAAO,MAAM,YAAY,KAAK,KAAK,OAAO,KAAK,UAAU,UAAU;AACxE,YAAM,KAAK,MAAM,MAAM,KAAK,OAAO,KAAK,UAAU;AAAA,IACpD,OAAO;AAEL,WAAK,OAAO,MAAM,aAAa,KAAK,KAAK,UAAU;AACnD,YAAM,KAAK,MAAM,QAAQ,KAAK,KAAK;AAAA,IACrC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,IAAY,iBAAiB;AAC3B,UAAM,iBAAiB,KAAK,aAAa;AAEzC,WAAO,KAAK,OAAO,UAAU;AAAA,EAC/B;AACF;;;ACrGO,IAAM,UAAN,MAAc;AAAA,EACnB,YAA6B,KAAmB;AAAnB;AAAA,EAAoB;AAAA,EAEjD,MAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAIuB;AACrB,UAAM,CAAC,MAAM,IAAI,MAAM,KAAK;AAAA;AAAA,gBAEhB,SAAS,KAAK,QAAQ,KAAK,gBAAgB;AAAA;AAAA;AAIvD,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,gBAAgB,WAA0C;AAC9D,UAAM,CAAC,MAAM,IAAI,MAAM,KAAK;AAAA;AAAA;AAAA,4BAGJ,UAAU,SAAS;AAAA;AAAA;AAI3C,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,cAAc,WAAqC;AACvD,UAAM,KAAK;AAAA;AAAA;AAAA,4BAGa,UAAU,SAAS;AAAA;AAAA;AAAA,EAG7C;AACF;;;ACvCO,IAAM,QAAN,MAAmC;AAAA,EACxC,YAA6B,KAA4B,WAAmB;AAA/C;AAA4B;AAAA,EAAoB;AAAA;AAAA;AAAA;AAAA;AAAA,EAM7E,MAAM,aAAa;AACjB,WAAO,MAAM,KAAK;AAAA,oCACc,KAAK,SAAS;AAAA;AAAA,gEAEc,KAAK,SAAS;AAAA;AAAA;AAAA,EAG5E;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAW;AACf,WAAO,MAAM,KAAK;AAAA,wCACkB,KAAK,SAAS;AAAA;AAAA,gEAEU,KAAK,SAAS;AAAA;AAAA;AAAA,EAG5E;AAAA,EAEA,MAAM,QAAQ,OAA8B;AAC1C,UAAM,KAAK;AAAA,0CAC2B,KAAK,SAAS,eAAe,KAAK;AAAA;AAAA,EAE1E;AAAA,EAEA,MAAM,aAAa,QAAiC;AAClD,UAAM,KAAK;AAAA,0CAC2B,KAAK,SAAS,gBAAgB,MAAM;AAAA;AAAA,EAE5E;AAAA,EAEA,MAAM,KAAK,SAAkC;AAC3C,UAAM,UAAU,KAAK,UAAU,OAAO;AACtC,UAAM,KAAK;AAAA,uCACwB,KAAK,SAAS,YAAY,OAAO;AAAA;AAAA,EAEtE;AAAA,EAEA,MAAM,aACJ,YAAY,IACZ,oBAAoB,GACpB,iBAAiB,GACjB,iBAAiB,KACjB;AACA,WAAO,MAAM,KAAK;AAAA;AAAA;AAAA,wBAGE,KAAK,SAAS;AAAA,gBACtB,iBAAiB;AAAA,iBAChB,SAAS;AAAA,8BACI,cAAc;AAAA,8BACd,cAAc;AAAA;AAAA;AAAA,EAG1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,MACJ,OACA,iBACsC;AACtC,UAAM,UAAU,MAAM,KAAK;AAAA,eAChB,KAAK,IAAI,YAAY,KAAK,SAAS,CAAC;AAAA,4DACS,eAAe;AAAA,uBACpD,KAAK;AAAA;AAAA;AAGxB,WAAO,QAAQ,CAAC;AAAA,EAClB;AACF;;;AChFO,IAAM,qBAAN,MAAgD;AAAA,EACrD,YACqB,OACA,QACA,QACnB;AAHmB;AACA;AACA;AAAA,EAClB;AAAA,EAEH,MAAM,OAA+C;AACnD,QAAI,KAAK,UAAU,GAAG;AACpB,aAAO,CAAC;AAAA,IACV;AAEA,WAAO,MAAM,KAAK,MAAM;AAAA,MACtB,KAAK,OAAO;AAAA,MACZ,KAAK,OAAO;AAAA,MACZ,KAAK,OAAO;AAAA,MACZ,KAAK,OAAO;AAAA,IACd;AAAA,EACF;AAAA,EAEQ,YAAqB;AAC3B,WAAO,KAAK,OAAO;AAAA,EACrB;AACF;;;AC9BO,IAAM,SAAN,MAAa;AAAA,EAQlB,YACE,gBACA,WACA,KACA;AAVF,SAAQ,SAASC,WAAU,QAAQ;AACnC,SAAQ,kBAAkB,IAAI,gBAAgB;AAU5C,SAAK,MAAM;AAEX,SAAK,YAAY;AAEjB,SAAK,iBAAiB;AAAA,EACxB;AAAA,EAEA,MAAM,cAAc,iBAAkC;AACpD,QAAI,KAAK,UAAU,WAAW;AAC5B,WAAK,OAAO,MAAM,gDAAgD;AAClE;AAAA,IACF;AAEA,UAAM,KAAK,MAAM,eAAe;AAAA,EAClC;AAAA,EAEA,MAAc,MAAM,iBAAkC;AACpD,gBAAY,gBAAgB,QAAQ;AAEpC,QAAI;AACF,YAAM,KAAK,UAAU,iBAAiB,eAAe;AAErD,aAAO,KAAK,kBAAkB;AAC5B,YAAI;AACF,gBAAM,KAAK,UAAU,cAAc;AAAA,QACrC,SAAS,OAAgB;AACvB,eAAK,OAAO,MAAM,4BAA4B,KAAK,EAAE;AAAA,QAEvD;AAEA,YAAI;AACF,gBAAM,KAAK,eAAe,aAAa;AAAA,QACzC,SAAS,OAAgB;AACvB,eAAK,OAAO,MAAM,2BAA2B,KAAK,EAAE;AAAA,QAEtD;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,8BAA8B,KAAK,EAAE;AACvD,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,MAAM,OAAO;AAEX,QAAI,KAAK,UAAU,cAAc,KAAK,UAAU,WAAW;AACzD;AAAA,IACF;AAEA,SAAK,UAAU,qBAAqB;AAEpC,QAAI;AACF,WAAK,OAAO,KAAK,mCAAmC;AACpD,WAAK,gBAAgB,MAAM;AAE3B,WAAK,OAAO,KAAK,6CAA6C;AAC9D,YAAM,KAAK,eAAe,gBAAgB;AAC1C,WAAK,OAAO,KAAK,6BAA6B;AAE9C,WAAK,UAAU,gBAAgB;AAE/B,WAAK,OAAO,KAAK,8BAA8B;AAC/C,YAAM,KAAK,IAAI,IAAI;AACnB,WAAK,OAAO,KAAK,2BAA2B;AAAA,IAC9C,SAAS,OAAO;AACd,WAAK,OAAO,KAAK,6BAA6B,KAAK,EAAE;AACrD,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,IAAI,mBAAmB;AACrB,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,IAAY,mBAAmB;AAC7B,WAAO,KAAK,UAAU,aAAa,CAAC,KAAK;AAAA,EAC3C;AAAA,EAEA,IAAY,YAAY;AACtB,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AACF;;;AC7FA,OAAO,cAAc;;;ACJd,IAAM,YAAN,MAAgB;AAAA,EAIrB,YACU,UACA,SACA,WACR;AAHQ;AACA;AACA;AANV,SAAQ,SAASC,WAAU,WAAW;AACtC,SAAQ,gBAAgB;AAAA,EAMrB;AAAA,EAEH,MAAM,OAAsB;AAC1B,UAAM,MAAM,KAAK,IAAI;AACrB,QAAI,MAAM,KAAK,iBAAiB,KAAK,UAAU;AAC7C,YAAM,KAAK,QAAQ,cAAc,KAAK,SAAS;AAC/C,WAAK,OAAO,MAAM,IAAI;AACtB,WAAK,gBAAgB;AAAA,IACvB;AAAA,EACF;AACF;;;ACFO,IAAM,cAAwC;AAAA,EACnD,CAAC,uBAAc,GAAG,CAAC,yBAAe;AAAA,EAClC,CAAC,yBAAe,GAAG,CAAC,uBAAc;AAAA,EAClC,CAAC,uBAAc,GAAG,CAAC,yBAAe;AAAA,EAClC,CAAC,yBAAe,GAAG,CAAC,uBAAc;AAAA,EAClC,CAAC,uBAAc,GAAG,CAAC;AAAA;AACrB;AAEO,IAAM,kBAAN,cAA8B,MAAM;AAAA,EACzC,YAAY,SAAuC;AACjD,UAAM,0BAA0B,QAAQ,IAAI,OAAO,QAAQ,EAAE,EAAE;AAAA,EACjE;AACF;AAKO,IAAM,cAAN,MAAkB;AAAA,EAAlB;AACL,SAAQ,SAASC,WAAU,aAAa;AACxC,SAAQ,QAAgB;AAAA;AAAA,EAExB,IAAI,UAAU;AACZ,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA,EAEA,IAAI,aAAa;AACf,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA,EAEA,IAAI,aAAa;AACf,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,UAAU;AAAA,EACxB;AAAA,EAEA,aAAa,OAAe;AAC1B,SAAK,OAAO;AAAA,MACV,yCAAyC,KAAK,qBAAqB,KAAK,KAAK;AAAA,IAC/E;AAEA,QAAI,KAAK,UAAU,OAAO;AACxB;AAAA,IACF;AAEA,QAAI,YAAY,KAAK,KAAK,EAAE,SAAS,KAAK,GAAG;AAC3C,WAAK,QAAQ;AACb,WAAK,OAAO,MAAM,kCAAkC,KAAK,GAAG;AAAA,IAC9D,OAAO;AACL,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM,KAAK;AAAA,QACX,IAAI;AAAA,MACN,CAAC;AAAA,IACH;AAAA,EACF;AACF;;;ACzEO,IAAM,kBAAN,MAAmE;AAAA,EAQxE,YAAY,SAAkB,OAAwB;AAPtD,SAAQ,cAA2B,IAAI,YAAY;AAEnD,SAAQ,SAASC,WAAU,iBAAiB;AAM1C,SAAK,UAAU;AACf,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,MAAM,iBAAiB,iBAAiD;AACtE,SAAK,YAAY,sCAA4B;AAE7C,SAAK,OAAO,KAAK,mBAAmB,KAAK,MAAM,SAAS,aAAa;AACrE,UAAM,KAAK,MAAM,WAAW;AAE5B,SAAK,YAAY,MAAM,KAAK,QAAQ,gBAAgB;AAAA,MAClD,WAAW,KAAK;AAAA,MAChB,GAAG;AAAA,IACL,CAAC;AAED,SAAK,YAAY,IAAI,UAAU,KAAM,KAAK,SAAS,KAAK,SAAS;AAEjE,SAAK,YAAY,oCAA2B;AAAA,EAC9C;AAAA,EAEA,kBAAkB;AAChB,SAAK,YAAY,sCAA4B;AAE7C,QAAI,CAAC,KAAK,WAAW;AACnB,YAAM,IAAI,MAAM,uCAAuC;AAAA,IACzD;AAEA,QAAI;AACF,WAAK,OAAO,MAAM,8BAA8B;AAShD,WAAK,YAAY,oCAA2B;AAC5C,WAAK,OAAO,MAAM,0BAA0B;AAAA,IAC9C,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,oCAAoC,KAAK,EAAE;AAC7D,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,IAAI,mBAAmB;AACrB,WAAO,KAAK,WAAW;AAAA,EACzB;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA,EAEA,MAAM,gBAAgB;AACpB,UAAM,KAAK,WAAW,KAAK;AAAA,EAC7B;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,IAAI,aAAa;AACf,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,uBAAuB;AACrB,SAAK,YAAY,sCAA4B;AAAA,EAC/C;AACF;;;ACxFO,IAAM,iBAAN,MAAgD;AAAA,EAGrD,YACU,qBACA,QACA,QACR;AAHQ;AACA;AACA;AALV,SAAQ,SAASC,WAAU,gBAAgB;AAOzC,SAAK,sBAAsB;AAC3B,SAAK,SAAS;AACd,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,MAAM,eAAe;AACnB,SAAK,OAAO,MAAM,sCAAsC;AACxD,UAAM,iBAAiB,MAAM,KAAK,OAAO,KAAK;AAE9C,QAAI,KAAK,OAAO,SAAS;AACvB,WAAK,OAAO,KAAK,sDAAsD;AACvE;AAAA,IACF;AAEA,SAAK,OAAO,MAAM,YAAY,eAAe,MAAM,WAAW;AAE9D,UAAM,gBAAgB,eAAe;AAAA,MACnC,CAAC,YAAY,KAAK,oBAAoB,MAAM,OAAO;AAAA,IACrD;AACA,UAAM,QAAQ,IAAI,aAAa;AAAA,EACjC;AAAA,EAEA,MAAM,kBAAkB;AACtB,WAAO,MAAM,KAAK,oBAAoB,gBAAgB;AAAA,EACxD;AACF;;;AJqDO,SAAS,kBACd,SACA,QACQ;AAGR,QAAM,kBAAkB,IAAI,gBAAgB;AAC5C,QAAM,cAAc,gBAAgB;AAGpC,QAAM,MACJ,OAAO,OACP,SAAS,OAAO,oBAAoB,IAAI;AAAA,IACtC,KAAK,OAAO;AAAA,IACZ,SAAS;AAAA,EACX,CAAC;AAEH,QAAM,QAAQ,IAAI,MAAgB,KAAK,OAAO,aAAa,OAAO;AAClE,QAAM,UAAU,IAAI,QAAQ,GAAG;AAE/B,QAAM,YAAY,IAAI,gBAA0B,SAAS,KAAK;AAE9D,QAAM,kBAAkB,CAAC,QAAsB,WAAwB;AACrE,WAAO,IAAI;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO,cAAc;AAAA,MACrB,OAAO,cAAc;AAAA,IACvB;AAAA,EACF;AAEA,QAAM,SAAS,IAAI,mBAAmB,OAAO,aAAa;AAAA,IACxD,WAAW,OAAO,aAAa,OAAO,iBAAiB;AAAA,IACvD,gBAAgB,OAAO,kBAAkB;AAAA,IACzC,gBAAgB,OAAO,kBAAkB;AAAA,IACzC,mBAAmB,OAAO,qBAAqB;AAAA,EACjD,CAAC;AAED,QAAM,sBAAsB,IAAI;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,MACE,eAAe,OAAO,iBAAiB;AAAA,IACzC;AAAA,EACF;AACA,QAAM,iBAAiB,IAAI;AAAA,IACzB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,SAAO,IAAI,OAAO,gBAAgB,WAAW,GAAG;AAClD;;;AK7IA,IAAM,eAAe,KAAK,IAAI,IAAI,cAAc;AAChD,IAAM,oBAAoB,KAAK,IAAI,IAAI,mBAAmB;AAE1D,IAAM,SAASC,WAAU,sBAAsB;AAE/C,eAAO,qBACL,eAAuB,eACR;AACf,MAAI,CAAC,cAAc;AACjB,UAAM,IAAI,MAAM,sCAAsC;AAAA,EACxD;AAEA,SAAO,MAAM,iCAAiC;AAE9C,QAAM,WAAW,MAAM,MAAM,GAAG,YAAY,iBAAiB,YAAY,IAAI;AAAA,IAC3E,QAAQ;AAAA,IACR,SAAS;AAAA,MACP,eAAe,UAAU,iBAAiB;AAAA,MAC1C,gBAAgB;AAAA,IAClB;AAAA,EACF,CAAC;AAED,SAAO,MAAM,qCAAqC;AAElD,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,IAAI;AAAA,MACR,yCAAyC,SAAS,MAAM,IAAI,SAAS,UAAU;AAAA,IACjF;AAAA,EACF;AACF;;;ACCO,IAAM,aAAN,MAAiB;AAAA,EACtB;AAAA,SAAe,SAASC,WAAU,YAAY;AAAA;AAAA,EAC9C;AAAA,SAAe,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsC3B,OAAO,MACL,SACA,SAA2B,CAAC,GAC5B;AACA,SAAK,gBAAgB;AAGrB,UAAM,mBACJ,OAAO,oBAAoB,KAAK,oBAAoB;AAGtD,UAAM,iBAAmC;AAAA;AAAA,MAEvC,GAAG;AAAA;AAAA,MAGH,WAAW,OAAO,aAAa;AAAA,MAC/B,eAAe,OAAO,iBAAiB;AAAA,MACvC,kBAAkB,OAAO,oBAAoB;AAAA,MAC7C,gBAAgB,OAAO,kBAAkB;AAAA,MACzC,gBAAgB,OAAO,kBAAkB;AAAA,MACzC,YAAY,OAAO,cAAc;AAAA,MACjC,YAAY,OAAO,cAAc;AAAA,MACjC,mBAAmB,OAAO,qBAAqB;AAAA;AAAA,MAG/C;AAAA,IACF;AAEA,SAAK,oBAAoB,SAAS,cAAc;AAAA,EAClD;AAAA,EAEA,OAAe,kBAAkB;AAC/B,QAAI,KAAK,WAAW;AAClB,YAAM,IAAI,MAAM,4CAA4C;AAAA,IAC9D;AACA,SAAK,YAAY;AAAA,EACnB;AAAA,EAEA,OAAe,sBAA8B;AAE3C,UAAM,mBAAmB,KAAK,IAAI,IAAI,oBAAoB;AAC1D,QAAI,CAAC,kBAAkB;AACrB,YAAM,UACJ;AAEF,YAAM,IAAI,MAAM,OAAO;AAAA,IACzB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,OAAe,qBAAqB,QAAgB;AAClD,eAAW,iBAAiB,YAAY;AACtC,UAAI,OAAO,kBAAkB;AAC3B,cAAM,qBAAqB,OAAO,gBAAgB;AAAA,MACpD;AAEA,aAAO,KAAK;AAAA,IACd;AAIA,gBAAY,UAAU,IAAI,QAAQ,MAAM;AAAA,IAAC,CAAC,CAAC;AAAA,EAC7C;AAAA,EAEA,OAAe,oBACb,SACA,cACA;AACA,QAAI,SAAwB;AAE5B,SAAK,MAAM,CAAC,GAAG,CAAC,QAAQ;AACtB,UAAI,CAAC,QAAQ;AACX,cAAM,mBAAmB,KAAK,oBAAoB,GAAG;AACrD,cAAM,gBAAgB,KAAK,IAAI,IAAI,iBAAiB;AACpD,oBAAY,aAAa;AAEzB,aAAK,OAAO,KAAK,iBAAiB,gBAAgB,EAAE;AAGpD,iBAAS,kBAAkB,SAAS,YAAY;AAChD,eAAO,cAAc;AAAA,UACnB;AAAA,UACA,UAAU;AAAA,QACZ,CAAC;AAED,aAAK,qBAAqB,MAAM;AAAA,MAClC;AAEA,aAAO,IAAI,SAAS,MAAM;AAAA,QACxB,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAChD,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEA,OAAe,oBAAoB,KAAsB;AACvD,WAAO,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,cAAc,EAAE;AAAA,EAC3D;AACF;;;AC9JO,IAAM,iBAAN,MAEP;AAAA,EAGE,YACmB,SACA,QACA,QACjB;AAHiB;AACA;AACA;AALnB,SAAQ,SAASC,WAAU,gBAAgB;AAAA,EAMxC;AAAA,EAEH,MAAM,OAAyC;AAC7C,QAAI,KAAK,UAAU,GAAG;AACpB,WAAK,OAAO,MAAM,wCAAwC;AAC1D,aAAO,CAAC;AAAA,IACV;AAEA,SAAK,OAAO;AAAA,MACV,0CAA0C,KAAK,OAAO,SAAS;AAAA,IACjE;AAEA,QAAI;AACF,YAAM,QAAQ,MAAM,KAAK,QAAQ;AAAA,QAC/B,KAAK,OAAO;AAAA,QACZ,KAAK,OAAO;AAAA,MACd;AACA,WAAK,OAAO,MAAM,aAAa,MAAM,MAAM,aAAa;AACxD,aAAO;AAAA,IACT,SAAS,KAAc;AACrB,WAAK,OAAO,MAAM,iCAAiC,GAAG,EAAE;AACxD,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA,EAEQ,YAAqB;AAC3B,WAAO,KAAK,OAAO;AAAA,EACrB;AACF;;;AC7CA,IAAMC,cAAN,cAAyB,MAAM;AAAA,EAC7B,cAAc;AACZ,UAAM,mBAAmB;AACzB,SAAK,OAAO;AAAA,EACd;AACF;AAMO,IAAM,mBAAN,MAAmE;AAAA,EAGxE,YACmB,MACA,MACA,SACA,QACjB;AAJiB;AACA;AACA;AACA;AANnB,SAAQ,SAASC,WAAU,kBAAkB;AAAA,EAO1C;AAAA,EAEH,IAAI,QAAQ;AACV,WAAO,KAAK,KAAK;AAAA,EACnB;AAAA,EAEA,MAAM,UAAyB;AAC7B,QAAI;AACF,UAAI,KAAK,OAAO,SAAS;AACvB,cAAM,IAAID,YAAW;AAAA,MACvB;AAGA,WAAK,OAAO,eAAe;AAE3B,YAAM,WAAW,KAAK,KAAK;AAC3B,WAAK,OAAO;AAAA,QACV,uBAAuB,KAAK,KAAK,MAAM,aAAa,QAAQ;AAAA,MAC9D;AAGA,YAAM,UAAU,KAAK,KAAK,kBAAkB,QAAQ;AAEpD,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI,MAAM,qCAAqC,QAAQ,EAAE;AAAA,MACjE;AAGA,YAAM,SAAS,MAAM,QAAQ,QAAQ,KAAK,KAAK,KAAK;AAGpD,WAAK,OAAO;AAAA,QACV,aAAa,KAAK,KAAK,MAAM;AAAA,MAC/B;AACA,YAAM,KAAK,QAAQ,aAAa,KAAK,MAAM,MAAM;AAEjD,WAAK,OAAO,MAAM,aAAa,KAAK,KAAK,MAAM,qBAAqB;AAAA,IACtE,SAAS,OAAO;AACd,YAAM,KAAK,qBAAqB,KAAK;AAAA,IACvC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,qBAAqB,OAAgB;AACjD,QAAI,iBAAiB,SAAS,MAAM,SAAS,cAAc;AACzD,WAAK,OAAO,MAAM,mCAAmC,KAAK,KAAK,MAAM,EAAE;AAAA,IAGzE,OAAO;AACL,WAAK,OAAO;AAAA,QACV,aAAa,KAAK,KAAK,MAAM,uBAAuB,KAAK;AAAA,MAC3D;AACA,YAAM,KAAK,QAAQ,SAAS,KAAK,MAAM,KAAK;AAAA,IAC9C;AAAA,EACF;AACF;;;ACzEO,IAAM,kBAAN,MAEP;AAAA,EACE,YAA6B,KAAmB;AAAnB;AAAA,EAAoB;AAAA,EAEjD,MAAM,aACJ,WACA,YAAY,IACZ,oBAAoB,GACpB,iBAAiB,GACjB,iBAAiB,KACiB;AAClC,WAAO,MAAM,KAAK;AAAA;AAAA;AAAA,wBAGE,SAAS;AAAA,gBACjB,iBAAiB;AAAA,iBAChB,SAAS;AAAA,8BACI,cAAc;AAAA,8BACd,cAAc;AAAA;AAAA;AAAA,EAG1C;AAAA,EAEA,MAAM,aAAa,UAAuB,QAA8B;AACtE,UAAM,KAAK;AAAA;AAAA,oBAEK,SAAS,MAAM;AAAA,uBACZ,SAAS,SAAS;AAAA,wBACjB,CAAC;AAAA,oBACL,KAAK,IAAI,KAAK,UAAU,IAAI,CAAC;AAAA;AAAA;AAAA,EAG/C;AAAA,EAEA,MAAM,SAAS,UAAuB,OAA+B;AACnE,UAAM,cACJ,OAAO,UAAU,WACb,QACA,iBAAiB,QACjB,MAAM,UACN,KAAK,UAAU,KAAK;AAE1B,UAAM,KAAK;AAAA;AAAA,oBAEK,SAAS,MAAM;AAAA,uBACZ,SAAS,SAAS;AAAA,wBACjB,CAAC;AAAA,2BACE,WAAW;AAAA;AAAA;AAAA,EAGpC;AAAA,EAEA,MAAM,UACJ,MACA,OACiB;AACjB,UAAM,UAAU,MAAM,KAAK;AAAA,wCACS,KAAK,IAAI,WAAW,KAAK,IAAI;AAAA,MAC/D;AAAA,IACF,CAAC;AAAA;AAGD,QAAI,QAAQ,WAAW,GAAG;AACxB,YAAM,IAAI,MAAM,wBAAwB,KAAK,IAAI,EAAE;AAAA,IACrD;AAEA,UAAM,CAAC,OAAO,IAAI;AAElB,WAAO;AAAA,EACT;AACF;;;AC1EA,OAAOE,eAAc;;;ACAd,IAAM,sBAAN,MAAuE;AAAA,EAQ5E,YAAY,SAAkB,MAAa;AAP3C,SAAQ,cAA2B,IAAI,YAAY;AAEnD,SAAQ,SAASC,WAAU,qBAAqB;AAM9C,SAAK,UAAU;AACf,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,MAAM,iBAAiB,iBAAiD;AACtE,SAAK,YAAY,sCAA4B;AAE7C,SAAK,YAAY,MAAM,KAAK,QAAQ,gBAAgB;AAAA,MAClD,WAAW,KAAK;AAAA,MAChB,GAAG;AAAA,IACL,CAAC;AAED,SAAK,YAAY,IAAI,UAAU,KAAM,KAAK,SAAS,KAAK,SAAS;AAEjE,SAAK,YAAY,oCAA2B;AAAA,EAC9C;AAAA,EAEA,kBAAkB;AAChB,SAAK,YAAY,sCAA4B;AAE7C,QAAI,CAAC,KAAK,WAAW;AACnB,YAAM,IAAI,MAAM,uCAAuC;AAAA,IACzD;AAEA,QAAI;AACF,WAAK,OAAO,MAAM,8BAA8B;AAChD,WAAK,YAAY,oCAA2B;AAC5C,WAAK,OAAO,MAAM,0BAA0B;AAAA,IAC9C,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,oCAAoC,KAAK,EAAE;AAC7D,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,IAAI,mBAAmB;AACrB,WAAO,KAAK,WAAW;AAAA,EACzB;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,KAAK;AAAA,EACnB;AAAA,EAEA,MAAM,gBAAgB;AACpB,UAAM,KAAK,WAAW,KAAK;AAAA,EAC7B;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,IAAI,aAAa;AACf,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,IAAI,YAAY;AACd,WAAO,KAAK,YAAY;AAAA,EAC1B;AAAA,EAEA,uBAAuB;AACrB,SAAK,YAAY,sCAA4B;AAAA,EAC/C;AACF;;;AD/CO,SAAS,iBACd,MACA,QACQ;AACR,QAAMC,UAASC,WAAU,kBAAkB;AAG3C,QAAM,kBAAkB,IAAI,gBAAgB;AAC5C,QAAM,cAAc,gBAAgB;AAEpC,MAAI,CAAC,OAAO,OAAO,CAAC,OAAO,kBAAkB;AAC3C,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,QAAM,MACJ,OAAO,OACPC,UAAS,OAAO,kBAAmB;AAAA,IACjC,KAAK,OAAO;AAAA,IACZ,SAAS;AAAA,EACX,CAAC;AAGH,QAAM,gBAAgB,IAAI,gBAAuB,GAAG;AAGpD,QAAM,YAAY,KAAK,QAAQ;AAC/B,EAAAF,QAAO,MAAM,qBAAqB,SAAS,EAAE;AAG7C,QAAM,UAAU,IAAI,QAAQ,GAAG;AAC/B,QAAM,YAAY,IAAI,oBAA2B,SAAS,IAAI;AAG9D,QAAM,eAAqC;AAAA,IACzC,WAAW,OAAO,aAAa;AAAA,IAC/B,WAAW,KAAK;AAAA,EAClB;AACA,QAAM,SAAS,IAAI;AAAA,IACjB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,QAAM,kBAAkB,CACtB,QACA,WACc;AACd,WAAO,IAAI,iBAAwB,MAAM,QAAQ,eAAe,MAAM;AAAA,EACxE;AAGA,QAAM,sBAAsB,IAAI;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,MACE,eAAe,OAAO,iBAAiB;AAAA,IACzC;AAAA,EACF;AAGA,QAAM,iBAAiB,IAAI;AAAA,IACzB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,SAAO,IAAI,OAAO,gBAAgB,WAAW,GAAG;AAClD;",
6
+ "names": ["getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "getLogger", "AbortError", "getLogger", "postgres", "getLogger", "logger", "getLogger", "postgres"]
7
+ }
package/mod.ts ADDED
@@ -0,0 +1,7 @@
1
+ export { EdgeWorker, type EdgeWorkerConfig } from './src/EdgeWorker.ts';
2
+
3
+ // Internal exports - use with caution
4
+ export { Worker, type WorkerConfig } from './src/Worker.ts';
5
+ export { Queries } from './src/Queries.ts';
6
+ export { Queue } from './src/Queue.ts';
7
+ export * as types from './src/types.ts';
package/package.json ADDED
@@ -0,0 +1,14 @@
1
+ {
2
+ "name": "@pgflow/edge-worker",
3
+ "version": "0.0.5-prealpha.0",
4
+ "license": "AGPL-3.0",
5
+ "type": "module",
6
+ "dependencies": {
7
+ "@jsr/henrygd__queue": "^1.0.7",
8
+ "@jsr/std__log": "^0.224.13",
9
+ "postgres": "3.4.5"
10
+ },
11
+ "publishConfig": {
12
+ "access": "public"
13
+ }
14
+ }
package/project.json ADDED
@@ -0,0 +1,164 @@
1
+ {
2
+ "name": "edge-worker",
3
+ "$schema": "../../node_modules/nx/schemas/project-schema.json",
4
+ "sourceRoot": "pkgs/edge-worker",
5
+ "projectType": "library",
6
+ "targets": {
7
+ "build": {
8
+ "executor": "@nx/esbuild:esbuild",
9
+ "outputs": ["{options.outputPath}"],
10
+ "options": {
11
+ "outputPath": "pkgs/edge-worker/dist",
12
+ "main": "pkgs/edge-worker/src/index.ts",
13
+ "tsConfig": "pkgs/edge-worker/tsconfig.lib.json",
14
+ "platform": "node",
15
+ "format": ["esm"],
16
+ "assets": ["pkgs/edge-worker/*.md"],
17
+ "external": ["@jsr/*", "postgres"],
18
+ "bundle": true,
19
+ "sourcemap": true
20
+ }
21
+ },
22
+ "test:nx-deno": {
23
+ "executor": "@axhxrx/nx-deno:test",
24
+ "outputs": ["{workspaceRoot}/coverage/pkgs/edge-worker"],
25
+ "options": {
26
+ "coverageDirectory": "coverage/pkgs/edge-worker",
27
+ "denoConfig": "pkgs/edge-worker/deno.json",
28
+ "allowNone": false,
29
+ "check": "local"
30
+ }
31
+ },
32
+ "lint": {
33
+ "executor": "@axhxrx/nx-deno:lint",
34
+ "options": {
35
+ "denoConfig": "pkgs/edge-worker/deno.json",
36
+ "ignore": "pkgs/edge-worker/supabase/functions/_src/"
37
+ }
38
+ },
39
+ "supabase:start": {
40
+ "executor": "nx:run-commands",
41
+ "options": {
42
+ "cwd": "pkgs/edge-worker",
43
+ "commands": ["supabase start"],
44
+ "parallel": false
45
+ }
46
+ },
47
+ "supabase:stop": {
48
+ "executor": "nx:run-commands",
49
+ "options": {
50
+ "cwd": "pkgs/edge-worker",
51
+ "commands": ["supabase stop --no-backup"],
52
+ "parallel": false
53
+ }
54
+ },
55
+ "supabase:status": {
56
+ "executor": "nx:run-commands",
57
+ "options": {
58
+ "cwd": "pkgs/edge-worker",
59
+ "commands": ["supabase status"],
60
+ "parallel": false
61
+ }
62
+ },
63
+ "supabase:restart": {
64
+ "executor": "nx:run-commands",
65
+ "options": {
66
+ "cwd": "pkgs/edge-worker",
67
+ "commands": ["supabase stop --no-backup", "supabase start"],
68
+ "parallel": false
69
+ }
70
+ },
71
+ "supabase:reset": {
72
+ "dependsOn": ["supabase:prepare"],
73
+ "executor": "nx:run-commands",
74
+ "options": {
75
+ "cwd": "pkgs/edge-worker",
76
+ "commands": [
77
+ "rm -f supabase/migrations/*.sql",
78
+ "cp ../core/supabase/migrations/*.sql supabase/migrations/",
79
+ "cp sql/*_*.sql supabase/migrations/",
80
+ "supabase db reset"
81
+ ],
82
+ "parallel": false
83
+ }
84
+ },
85
+ "supabase:prepare-edge-fn": {
86
+ "executor": "nx:run-commands",
87
+ "options": {
88
+ "cwd": "pkgs/edge-worker",
89
+ "commands": [
90
+ "rm -f supabase/functions/_src/*.ts",
91
+ "rm -f supabase/functions/_src/deno.*",
92
+ "cp -r src/* ./supabase/functions/_src/",
93
+ "cp deno.* ./supabase/functions/_src/",
94
+ "find supabase/functions -maxdepth 1 -mindepth 1 -type d -not -name '_src' -exec cp deno.* {} \\;"
95
+ ],
96
+ "parallel": false
97
+ }
98
+ },
99
+ "supabase:functions-serve": {
100
+ "dependsOn": ["supabase:start", "supabase:prepare-edge-fn"],
101
+ "executor": "nx:run-commands",
102
+ "options": {
103
+ "cwd": "pkgs/edge-worker",
104
+ "commands": [
105
+ "supabase functions serve --env-file supabase/functions/.env"
106
+ ],
107
+ "parallel": false
108
+ }
109
+ },
110
+ "db:ensure": {
111
+ "executor": "nx:run-commands",
112
+ "options": {
113
+ "cwd": "pkgs/edge-worker",
114
+ "commands": ["deno task db:ensure"],
115
+ "parallel": false
116
+ }
117
+ },
118
+ "test:unit": {
119
+ "dependsOn": ["db:ensure"],
120
+ "executor": "nx:run-commands",
121
+ "options": {
122
+ "cwd": "pkgs/edge-worker",
123
+ "commands": [
124
+ "deno test --allow-all --env=supabase/functions/.env tests/unit/"
125
+ ],
126
+ "parallel": false
127
+ }
128
+ },
129
+ "test:integration": {
130
+ "dependsOn": ["db:ensure"],
131
+ "executor": "nx:run-commands",
132
+ "options": {
133
+ "cwd": "pkgs/edge-worker",
134
+ "commands": [
135
+ "deno test --allow-all --env=supabase/functions/.env tests/integration/"
136
+ ],
137
+ "parallel": false
138
+ }
139
+ },
140
+ "test:e2e": {
141
+ "dependsOn": ["supabase:prepare-edge-fn"],
142
+ "executor": "nx:run-commands",
143
+ "options": {
144
+ "cwd": "pkgs/edge-worker",
145
+ "commands": [
146
+ "deno test --allow-all --env=supabase/functions/.env tests/e2e/"
147
+ ],
148
+ "parallel": false
149
+ }
150
+ },
151
+ "test": {
152
+ "dependsOn": ["test:unit", "test:integration"]
153
+ },
154
+ "jsr:publish": {
155
+ "executor": "nx:run-commands",
156
+ "options": {
157
+ "cwd": "pkgs/edge-worker",
158
+ "commands": ["pnpm dlx jsr publish --allow-slow-types"],
159
+ "parallel": false
160
+ }
161
+ }
162
+ },
163
+ "tags": []
164
+ }
@@ -0,0 +1,22 @@
1
+ #!/bin/bash
2
+
3
+ # Create or clear the target file
4
+ target_file="./tests/db/migrations/edge_worker.sql"
5
+ mkdir -p $(dirname "$target_file")
6
+ echo "-- Combined migrations file" > "$target_file"
7
+ echo "-- Generated on $(date)" >> "$target_file"
8
+ echo "" >> "$target_file"
9
+
10
+ # Also add core migrations
11
+ for f in $(find ../core/supabase/migrations -name '*.sql' | sort); do
12
+ echo "-- From file: $(basename $f)" >> "$target_file"
13
+ cat "$f" >> "$target_file"
14
+ echo "" >> "$target_file"
15
+ echo "" >> "$target_file"
16
+ done
17
+
18
+ # And copy the pgflow_tests
19
+ echo "-- From file: seed.sql" >> "$target_file"
20
+ cat "../core/supabase/seed.sql" >> "$target_file"
21
+ echo "" >> "$target_file"
22
+ echo "" >> "$target_file"
@@ -0,0 +1,17 @@
1
+ #!/bin/bash
2
+ port_number=$1
3
+
4
+ # exit if port is not provided
5
+ if [ -z "$port_number" ]; then
6
+ echo "Port number is not provided"
7
+ exit 1
8
+ fi
9
+
10
+ echo "Waiting for localhost:$port_number..."
11
+
12
+ until nc -z localhost "$port_number" 2>/dev/null; do
13
+ echo -n "."
14
+ sleep 0.1
15
+ done
16
+
17
+ echo -e "\nPort $port_number is available!"
@@ -0,0 +1,11 @@
1
+ -- Active workers are workers that have sent a heartbeat in the last 6 seconds
2
+ create or replace view edge_worker.active_workers as
3
+ select
4
+ worker_id,
5
+ queue_name,
6
+ function_name,
7
+ started_at,
8
+ stopped_at,
9
+ last_heartbeat_at
10
+ from edge_worker.workers
11
+ where last_heartbeat_at > now() - make_interval(secs => 6);
@@ -0,0 +1,12 @@
1
+ -- Inactive workers are workers that have not sent
2
+ -- a heartbeat in the last 6 seconds
3
+ create or replace view edge_worker.inactive_workers as
4
+ select
5
+ worker_id,
6
+ queue_name,
7
+ function_name,
8
+ started_at,
9
+ stopped_at,
10
+ last_heartbeat_at
11
+ from edge_worker.workers
12
+ where last_heartbeat_at < now() - make_interval(secs => 6);
@@ -0,0 +1,68 @@
1
+ create extension if not exists pg_net;
2
+
3
+ -- Calls edge function asynchronously, requires Vault secrets to be set:
4
+ -- - supabase_anon_key
5
+ -- - app_url
6
+ create or replace function edge_worker.call_edgefn_async(
7
+ function_name text,
8
+ body text
9
+ )
10
+ returns bigint
11
+ language plpgsql
12
+ volatile
13
+ set search_path to edge_worker
14
+ as $$
15
+ declare
16
+ request_id bigint;
17
+ begin
18
+ IF function_name IS NULL OR function_name = '' THEN
19
+ raise exception 'function_name cannot be null or empty';
20
+ END IF;
21
+
22
+ WITH secret as (
23
+ select decrypted_secret AS supabase_anon_key
24
+ from vault.decrypted_secrets
25
+ where name = 'supabase_anon_key'
26
+ ),
27
+ settings AS (
28
+ select decrypted_secret AS app_url
29
+ from vault.decrypted_secrets
30
+ where name = 'app_url'
31
+ )
32
+ select net.http_post(
33
+ url => (select app_url from settings) || '/functions/v1/' || function_name,
34
+ body => jsonb_build_object('body', body),
35
+ headers := jsonb_build_object(
36
+ 'Authorization', 'Bearer ' || (select supabase_anon_key from secret)
37
+ )
38
+ ) into request_id;
39
+
40
+ return request_id;
41
+ end;
42
+ $$;
43
+
44
+ -- Spawn a new worker asynchronously via edge function
45
+ --
46
+ -- It is intended to be used in a cron job that ensures continuos operation
47
+ create or replace function edge_worker.spawn(
48
+ function_name text
49
+ ) returns integer as $$
50
+ declare
51
+ p_function_name text := function_name;
52
+ v_active_count integer;
53
+ begin
54
+ SELECT COUNT(*)
55
+ INTO v_active_count
56
+ FROM edge_worker.active_workers AS aw
57
+ WHERE aw.function_name = p_function_name;
58
+
59
+ IF v_active_count < 1 THEN
60
+ raise notice 'Spawning new worker: %', p_function_name;
61
+ PERFORM edge_worker.call_edgefn_async(p_function_name, '');
62
+ return 1;
63
+ ELSE
64
+ raise notice 'Worker Exists for queue: NOT spawning new worker for queue: %', p_function_name;
65
+ return 0;
66
+ END IF;
67
+ end;
68
+ $$ language plpgsql;
@@ -0,0 +1,32 @@
1
+ -- select * from pgmq.create('max_concurrency');
2
+ -- select * from pgmq.drop_queue('max_concurrency');
3
+ WITH
4
+ params AS (
5
+ SELECT
6
+ 2000000 as msg_count,
7
+ 1000 as batch_size
8
+ ),
9
+ batch_nums AS (
10
+ SELECT generate_series(0, msg_count/batch_size - 1) as batch_num
11
+ FROM params
12
+ ),
13
+ batch_ranges AS (
14
+ SELECT
15
+ batch_num,
16
+ batch_num * batch_size + 1 as start_id,
17
+ (batch_num + 1) * batch_size as end_id
18
+ FROM batch_nums
19
+ CROSS JOIN params
20
+ ),
21
+ batches AS (
22
+ SELECT
23
+ batch_num,
24
+ array_agg(jsonb_build_object('id', i)) as msg_array
25
+ FROM batch_ranges,
26
+ generate_series(start_id, end_id) i
27
+ GROUP BY batch_num
28
+ )
29
+ SELECT pgmq.send_batch('max_concurrency', msg_array)
30
+ FROM batches
31
+ ORDER BY batch_num;
32
+
File without changes
@@ -0,0 +1,115 @@
1
+ -- select count(*) from pgmq.a_max_concurrency;
2
+ -- select read_ct, count(*) from pgmq.q_max_concurrency group by read_ct;
3
+ select read_ct - 1 as retries_count, count(*)
4
+ from pgmq.a_max_concurrency group by read_ct order by read_ct;
5
+
6
+ select * from pgmq.metrics('max_concurrency');
7
+
8
+ select * from pgmq.a_max_concurrency limit 10;
9
+ select EXTRACT(EPOCH FROM (max(archived_at) - min(enqueued_at))) as total_seconds from pgmq.a_max_concurrency;
10
+
11
+ -- Processing time ranges per read_ct
12
+ SELECT
13
+ read_ct - 1 as retry_count,
14
+ COUNT(*) as messages,
15
+ round(avg(EXTRACT(EPOCH FROM (vt - make_interval(secs => 3) - enqueued_at))), 2) as avg_s,
16
+ round(min(EXTRACT(EPOCH FROM (vt - make_interval(secs => 3) - enqueued_at))), 2) as min_s,
17
+ round(max(EXTRACT(EPOCH FROM (vt - make_interval(secs => 3) - enqueued_at))), 2) as max_s
18
+ FROM pgmq.a_max_concurrency
19
+ GROUP BY read_ct
20
+ ORDER BY read_ct;
21
+
22
+ -- Processing time percentiles
23
+ WITH processing_times AS (
24
+ SELECT archived_at - (vt - make_interval(secs=>3)) as processing_time
25
+ FROM pgmq.a_max_concurrency
26
+ )
27
+ SELECT
28
+ ROUND(EXTRACT(epoch FROM percentile_cont(0.50) WITHIN GROUP (ORDER BY processing_time)) * 1000) as p50_ms,
29
+ ROUND(EXTRACT(epoch FROM percentile_cont(0.75) WITHIN GROUP (ORDER BY processing_time)) * 1000) as p75_ms,
30
+ ROUND(EXTRACT(epoch FROM percentile_cont(0.90) WITHIN GROUP (ORDER BY processing_time)) * 1000) as p90_ms,
31
+ ROUND(EXTRACT(epoch FROM percentile_cont(0.95) WITHIN GROUP (ORDER BY processing_time)) * 1000) as p95_ms,
32
+ ROUND(EXTRACT(epoch FROM percentile_cont(0.99) WITHIN GROUP (ORDER BY processing_time)) * 1000) as p99_ms,
33
+ ROUND(EXTRACT(epoch FROM MIN(processing_time)) * 1000) as min_ms,
34
+ ROUND(EXTRACT(epoch FROM MAX(processing_time)) * 1000) as max_ms
35
+ FROM processing_times;
36
+
37
+ -- Total processing time for messages with read_ct 1 or 2
38
+ SELECT
39
+ round(sum(EXTRACT(EPOCH FROM (archived_at - enqueued_at))), 2) as total_processing_seconds
40
+ FROM pgmq.a_max_concurrency
41
+ WHERE read_ct IN (1, 2);
42
+
43
+ -- Distribution of processing times in configurable intervals
44
+ WITH
45
+ interval_conf AS (
46
+ SELECT 1 as interval_seconds
47
+ ),
48
+ processing_times AS (
49
+ SELECT
50
+ EXTRACT(EPOCH FROM (archived_at - enqueued_at)) as seconds
51
+ FROM pgmq.a_max_concurrency
52
+ )
53
+ SELECT
54
+ ((floor(seconds / interval_seconds) * interval_seconds) || '-' ||
55
+ (floor(seconds / interval_seconds) * interval_seconds + interval_seconds) || 's')::text as time_bucket,
56
+ COUNT(*) as message_count,
57
+ round((COUNT(*)::numeric / interval_seconds), 1) as messages_per_second,
58
+ SUM(COUNT(*)) OVER (ORDER BY floor(seconds / interval_seconds)) as total_processed_so_far
59
+ FROM processing_times, interval_conf
60
+ GROUP BY floor(seconds / interval_seconds), interval_seconds
61
+ ORDER BY floor(seconds / interval_seconds);
62
+
63
+
64
+ -- First let's check the raw distribution
65
+ WITH processing_times AS (
66
+ SELECT
67
+ EXTRACT(EPOCH FROM (archived_at - enqueued_at)) as seconds
68
+ FROM pgmq.a_max_concurrency
69
+ )
70
+ SELECT
71
+ floor(seconds) as seconds,
72
+ COUNT(*) as message_count
73
+ FROM processing_times
74
+ WHERE seconds BETWEEN 165 AND 381
75
+ GROUP BY floor(seconds)
76
+ ORDER BY floor(seconds);
77
+
78
+
79
+ -- Examine messages around the gap
80
+ WITH processing_times AS (
81
+ SELECT
82
+ msg_id,
83
+ enqueued_at,
84
+ archived_at,
85
+ EXTRACT(EPOCH FROM (archived_at - enqueued_at)) as processing_time,
86
+ read_ct
87
+ FROM pgmq.a_max_concurrency
88
+ )
89
+ SELECT
90
+ msg_id,
91
+ enqueued_at,
92
+ archived_at,
93
+ round(processing_time::numeric, 2) as processing_seconds,
94
+ read_ct
95
+ FROM processing_times
96
+ WHERE
97
+ processing_time BETWEEN 164 AND 380
98
+ ORDER BY processing_time;
99
+
100
+ -- Show processing time distribution by retry count
101
+ WITH processing_times AS (
102
+ SELECT
103
+ EXTRACT(EPOCH FROM (archived_at - enqueued_at)) as processing_time,
104
+ read_ct,
105
+ width_bucket(EXTRACT(EPOCH FROM (archived_at - enqueued_at)), 0, 400, 20) as time_bucket
106
+ FROM pgmq.a_max_concurrency
107
+ )
108
+ SELECT
109
+ ((time_bucket - 1) * 20) || '-' || (time_bucket * 20) || 's' as time_range,
110
+ read_ct,
111
+ COUNT(*) as message_count
112
+ FROM processing_times
113
+ GROUP BY time_bucket, read_ct
114
+ ORDER BY time_bucket, read_ct;
115
+