@pgflow/core 0.0.0-test-snapshot-releases-8d5d9bc1-20250922101013 → 0.0.0-testsnap-9294d743-20251207205914

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +177 -73
  2. package/dist/ATLAS.md +32 -0
  3. package/dist/CHANGELOG.md +824 -0
  4. package/dist/PgflowSqlClient.d.ts +17 -0
  5. package/dist/PgflowSqlClient.d.ts.map +1 -0
  6. package/dist/PgflowSqlClient.js +70 -0
  7. package/dist/README.md +497 -0
  8. package/dist/database-types.d.ts +1041 -0
  9. package/dist/database-types.d.ts.map +1 -0
  10. package/dist/database-types.js +8 -0
  11. package/dist/index.d.ts +4 -0
  12. package/dist/index.d.ts.map +1 -0
  13. package/dist/index.js +2 -0
  14. package/dist/package.json +31 -0
  15. package/dist/supabase/migrations/20250429164909_pgflow_initial.sql +579 -0
  16. package/dist/supabase/migrations/20250517072017_pgflow_fix_poll_for_tasks_to_use_separate_statement_for_polling.sql +101 -0
  17. package/dist/supabase/migrations/20250609105135_pgflow_add_start_tasks_and_started_status.sql +371 -0
  18. package/dist/supabase/migrations/20250610180554_pgflow_add_set_vt_batch_and_use_it_in_start_tasks.sql +127 -0
  19. package/dist/supabase/migrations/20250614124241_pgflow_add_realtime.sql +501 -0
  20. package/dist/supabase/migrations/20250619195327_pgflow_fix_fail_task_missing_realtime_event.sql +185 -0
  21. package/dist/supabase/migrations/20250627090700_pgflow_fix_function_search_paths.sql +6 -0
  22. package/dist/supabase/migrations/20250707210212_pgflow_add_opt_start_delay.sql +103 -0
  23. package/dist/supabase/migrations/20250719205006_pgflow_worker_deprecation.sql +2 -0
  24. package/dist/supabase/migrations/20251006073122_pgflow_add_map_step_type.sql +1244 -0
  25. package/dist/supabase/migrations/20251103222045_pgflow_fix_broadcast_order_and_timestamp_handling.sql +622 -0
  26. package/dist/supabase/migrations/20251104080523_pgflow_upgrade_pgmq_1_5_1.sql +93 -0
  27. package/dist/supabase/migrations/20251130000000_pgflow_auto_compilation.sql +268 -0
  28. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  29. package/dist/types.d.ts +93 -0
  30. package/dist/types.d.ts.map +1 -0
  31. package/dist/types.js +1 -0
  32. package/package.json +4 -5
@@ -0,0 +1,17 @@
1
+ import type postgres from 'postgres';
2
+ import type { StepTaskRecord, IPgflowClient, StepTaskKey, RunRow, MessageRecord } from './types.js';
3
+ import type { Json } from './types.js';
4
+ import type { AnyFlow, ExtractFlowInput } from '@pgflow/dsl';
5
+ /**
6
+ * Implementation of IPgflowClient that uses direct SQL calls to pgflow functions
7
+ */
8
+ export declare class PgflowSqlClient<TFlow extends AnyFlow> implements IPgflowClient<TFlow> {
9
+ private readonly sql;
10
+ constructor(sql: postgres.Sql);
11
+ readMessages(queueName: string, visibilityTimeout: number, batchSize: number, maxPollSeconds?: number, pollIntervalMs?: number): Promise<MessageRecord[]>;
12
+ startTasks(flowSlug: string, msgIds: number[], workerId: string): Promise<StepTaskRecord<TFlow>[]>;
13
+ completeTask(stepTask: StepTaskKey, output?: Json): Promise<void>;
14
+ failTask(stepTask: StepTaskKey, error: unknown): Promise<void>;
15
+ startFlow<TFlow extends AnyFlow>(flow_slug: string, input: ExtractFlowInput<TFlow>, run_id?: string): Promise<RunRow>;
16
+ }
17
+ //# sourceMappingURL=PgflowSqlClient.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"PgflowSqlClient.d.ts","sourceRoot":"","sources":["../src/PgflowSqlClient.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AACrC,OAAO,KAAK,EACV,cAAc,EACd,aAAa,EACb,WAAW,EACX,MAAM,EACN,aAAa,EACd,MAAM,YAAY,CAAC;AACpB,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,YAAY,CAAC;AACvC,OAAO,KAAK,EAAE,OAAO,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AAE7D;;GAEG;AACH,qBAAa,eAAe,CAAC,KAAK,SAAS,OAAO,CAChD,YAAW,aAAa,CAAC,KAAK,CAAC;IAEnB,OAAO,CAAC,QAAQ,CAAC,GAAG;gBAAH,GAAG,EAAE,QAAQ,CAAC,GAAG;IAExC,YAAY,CAChB,SAAS,EAAE,MAAM,EACjB,iBAAiB,EAAE,MAAM,EACzB,SAAS,EAAE,MAAM,EACjB,cAAc,SAAI,EAClB,cAAc,SAAM,GACnB,OAAO,CAAC,aAAa,EAAE,CAAC;IAarB,UAAU,CACd,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EAAE,EAChB,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,CAAC;IAW7B,YAAY,CAAC,QAAQ,EAAE,WAAW,EAAE,MAAM,CAAC,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAWjE,QAAQ,CAAC,QAAQ,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAkB9D,SAAS,CAAC,KAAK,SAAS,OAAO,EACnC,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,gBAAgB,CAAC,KAAK,CAAC,EAC9B,MAAM,CAAC,EAAE,MAAM,GACd,OAAO,CAAC,MAAM,CAAC;CAiBnB"}
@@ -0,0 +1,70 @@
1
+ /**
2
+ * Implementation of IPgflowClient that uses direct SQL calls to pgflow functions
3
+ */
4
+ export class PgflowSqlClient {
5
+ sql;
6
+ constructor(sql) {
7
+ this.sql = sql;
8
+ }
9
+ async readMessages(queueName, visibilityTimeout, batchSize, maxPollSeconds = 5, pollIntervalMs = 200) {
10
+ return await this.sql `
11
+ SELECT *
12
+ FROM pgmq.read_with_poll(
13
+ queue_name => ${queueName},
14
+ vt => ${visibilityTimeout},
15
+ qty => ${batchSize},
16
+ max_poll_seconds => ${maxPollSeconds},
17
+ poll_interval_ms => ${pollIntervalMs}
18
+ );
19
+ `;
20
+ }
21
+ async startTasks(flowSlug, msgIds, workerId) {
22
+ return await this.sql `
23
+ SELECT *
24
+ FROM pgflow.start_tasks(
25
+ flow_slug => ${flowSlug},
26
+ msg_ids => ${msgIds}::bigint[],
27
+ worker_id => ${workerId}::uuid
28
+ );
29
+ `;
30
+ }
31
+ async completeTask(stepTask, output) {
32
+ await this.sql `
33
+ SELECT pgflow.complete_task(
34
+ run_id => ${stepTask.run_id}::uuid,
35
+ step_slug => ${stepTask.step_slug}::text,
36
+ task_index => ${stepTask.task_index}::int,
37
+ output => ${this.sql.json(output || null)}::jsonb
38
+ );
39
+ `;
40
+ }
41
+ async failTask(stepTask, error) {
42
+ const errorString = typeof error === 'string'
43
+ ? error
44
+ : error instanceof Error
45
+ ? error.message
46
+ : JSON.stringify(error);
47
+ await this.sql `
48
+ SELECT pgflow.fail_task(
49
+ run_id => ${stepTask.run_id}::uuid,
50
+ step_slug => ${stepTask.step_slug}::text,
51
+ task_index => ${stepTask.task_index}::int,
52
+ error_message => ${errorString}::text
53
+ );
54
+ `;
55
+ }
56
+ async startFlow(flow_slug, input, run_id) {
57
+ const results = await this.sql `
58
+ SELECT * FROM pgflow.start_flow(
59
+ flow_slug => ${flow_slug}::text,
60
+ input => ${this.sql.json(input)}::jsonb
61
+ ${run_id ? this.sql `, run_id => ${run_id}::uuid` : this.sql ``}
62
+ );
63
+ `;
64
+ if (results.length === 0) {
65
+ throw new Error(`Failed to start flow ${flow_slug}`);
66
+ }
67
+ const [flowRun] = results;
68
+ return flowRun;
69
+ }
70
+ }
package/dist/README.md ADDED
@@ -0,0 +1,497 @@
1
+ # pgflow SQL Core
2
+
3
+ PostgreSQL-native workflow engine for defining, managing, and tracking DAG-based workflows directly in your database.
4
+
5
+ > [!NOTE]
6
+ > This project and all its components are licensed under [Apache 2.0](./LICENSE) license.
7
+
8
+ > [!WARNING]
9
+ > This project uses [Atlas](https://atlasgo.io/docs) to manage the schemas and migrations.
10
+ > See [ATLAS.md](ATLAS.md) for more details.
11
+
12
+ ## Table of Contents
13
+
14
+ - [Overview](#overview)
15
+ - [Key Features](#key-features)
16
+ - [Architecture](#architecture)
17
+ - [Schema Design](#schema-design)
18
+ - [Execution Model](#execution-model)
19
+ - [Example Flow and its life](#example-flow-and-its-life)
20
+ - [Defining a Workflow](#defining-a-workflow)
21
+ - [Starting a Workflow Run](#starting-a-workflow-run)
22
+ - [Workflow Execution](#workflow-execution)
23
+ - [Task Polling](#task-polling)
24
+ - [Task Completion](#task-completion)
25
+ - [Error Handling](#error-handling)
26
+ - [Retries and Timeouts](#retries-and-timeouts)
27
+ - [TypeScript Flow DSL](#typescript-flow-dsl)
28
+ - [Overview](#overview-1)
29
+ - [Type Inference System](#type-inference-system)
30
+ - [Basic Example](#basic-example)
31
+ - [How Payload Types Are Built](#how-payload-types-are-built)
32
+ - [Benefits of Automatic Type Inference](#benefits-of-automatic-type-inference)
33
+ - [Data Flow](#data-flow)
34
+ - [Input and Output Handling](#input-and-output-handling)
35
+ - [Run Completion](#run-completion)
36
+
37
+ ## Overview
38
+
39
+ The pgflow SQL Core provides the data model, state machine, and transactional functions for workflow management. It treats workflows as Directed Acyclic Graphs (DAGs) of steps, each step being a simple state machine.
40
+
41
+ This package focuses on:
42
+
43
+ - Defining and storing workflow shapes
44
+ - Managing workflow state transitions
45
+ - Exposing transactional functions for workflow operations
46
+ - Providing two-phase APIs for reliable task polling and status updates
47
+
48
+ The actual execution of workflow tasks is handled by the [Edge Worker](../edge-worker/README.md), which calls back to the SQL Core to acknowledge task completion or failure.
49
+
50
+ ## Requirements
51
+
52
+ > [!IMPORTANT]
53
+ > **pgmq Version Requirement** (since v0.8.0)
54
+ >
55
+ > pgflow v0.8.0 and later requires **pgmq 1.5.0 or higher**. This version of pgflow will NOT work with pgmq 1.4.x or earlier.
56
+ >
57
+ > - **Supabase Cloud**: Recent versions include pgmq 1.5.0+ by default
58
+ > - **Self-hosted**: You must upgrade pgmq to version 1.5.0+ before upgrading pgflow
59
+ > - **Version Check**: Run `SELECT extversion FROM pg_extension WHERE extname = 'pgmq';` to verify your pgmq version
60
+
61
+ ## Key Features
62
+
63
+ - **Declarative Workflows**: Define flows and steps via SQL tables
64
+ - **Dependency Management**: Explicit step dependencies with atomic transitions
65
+ - **Configurable Behavior**: Per-flow and per-step options for timeouts, retries, and delays
66
+ - **Queue Integration**: Built on pgmq for reliable task processing
67
+ - **Transactional Guarantees**: All state transitions are ACID-compliant
68
+
69
+ ## Architecture
70
+
71
+ ### Schema Design
72
+
73
+ [Schema ERD Diagram (click to enlarge)](./assets/schema.svg)
74
+
75
+ <a href="./assets/schema.svg">
76
+ <img src="./assets/schema.svg" alt="Schema ERD Diagram" width="25%" height="25%">
77
+ </a>
78
+
79
+ ---
80
+
81
+ The schema consists of two main categories of tables:
82
+
83
+ #### Static definition tables
84
+
85
+ - `flows` (just an identity for the workflow with some global options)
86
+ - `steps` (DAG nodes belonging to particular `flows`, with option overrides)
87
+ - `deps` (DAG edges between `steps`)
88
+
89
+ #### Runtime state tables
90
+
91
+ - `runs` (execution instances of `flows`)
92
+ - `step_states` (states of individual `steps` within a `run`)
93
+ - `step_tasks` (units of work for individual `steps` within a `run`, so we can have fanouts)
94
+
95
+ ### Execution Model
96
+
97
+ The SQL Core handles the workflow lifecycle through these key operations:
98
+
99
+ 1. **Definition**: Workflows are defined using `create_flow` and `add_step`
100
+ 2. **Instantiation**: Workflow instances are started with `start_flow`, creating a new run
101
+ 3. **Task Retrieval**: The [Edge Worker](../edge-worker/README.md) uses two-phase polling - first `read_with_poll` to reserve queue messages, then `start_tasks` to convert them to executable tasks
102
+ 4. **State Transitions**: When the Edge Worker reports back using `complete_task` or `fail_task`, the SQL Core handles state transitions and schedules dependent steps
103
+
104
+ [Flow lifecycle diagram (click to enlarge)](./assets/flow-lifecycle.svg)
105
+
106
+ <a href="./assets/flow-lifecycle.svg"><img src="./assets/flow-lifecycle.svg" alt="Flow Lifecycle" width="25%" height="25%"></a>
107
+
108
+ ## Step Types
109
+
110
+ pgflow supports two fundamental step types that control how tasks are created and executed:
111
+
112
+ ### Single Steps (Default)
113
+
114
+ Single steps are the standard step type where each step creates exactly one task when started. These steps process their input as a whole and return a single output value.
115
+
116
+ ```sql
117
+ -- Regular single step definition
118
+ SELECT pgflow.add_step('my_flow', 'process_data');
119
+ ```
120
+
121
+ ### Map Steps
122
+
123
+ Map steps enable parallel processing of arrays by automatically creating multiple tasks - one for each array element. The system handles task distribution, parallel execution, and output aggregation transparently.
124
+
125
+ ```sql
126
+ -- Map step definition (step_type => 'map')
127
+ SELECT pgflow.add_step(
128
+ flow_slug => 'my_flow',
129
+ step_slug => 'process_items',
130
+ deps_slugs => ARRAY['fetch_items'],
131
+ step_type => 'map'
132
+ );
133
+ ```
134
+
135
+ #### Key Characteristics
136
+
137
+ - **Multiple Task Creation**: The SQL core creates N tasks for a map step (one per array element), unlike single steps which create one task
138
+ - **Element Distribution**: The SQL core distributes individual array elements to tasks based on `task_index`
139
+ - **Output Aggregation**: The SQL core aggregates task outputs back into an array for dependent steps
140
+ - **Constraint**: Map steps can have at most one dependency (which must return an array), or zero dependencies (then flow input must be an array)
141
+
142
+ #### Map Step Execution Flow
143
+
144
+ 1. **Array Input Validation**: The SQL core validates that the input is an array
145
+ 2. **Task Creation**: The SQL core creates N tasks with indices 0 to N-1
146
+ 3. **Element Distribution**: The SQL core assigns `array[task_index]` as input to each task
147
+ 4. **Parallel Execution**: Edge workers execute tasks independently in parallel
148
+ 5. **Output Collection**: The SQL core aggregates outputs preserving array order
149
+ 6. **Dependent Activation**: The SQL core passes the aggregated array to dependent steps
150
+
151
+ #### Root Map vs Dependent Map
152
+
153
+ **Root Map Steps** process the flow's input array directly:
154
+
155
+ ```sql
156
+ -- Root map: no dependencies, processes flow input
157
+ SELECT pgflow.add_step(
158
+ flow_slug => 'batch_processor',
159
+ step_slug => 'process_each',
160
+ step_type => 'map'
161
+ );
162
+
163
+ -- Starting the flow with array input
164
+ SELECT pgflow.start_flow(
165
+ flow_slug => 'batch_processor',
166
+ input => '[1, 2, 3, 4, 5]'::jsonb
167
+ );
168
+ ```
169
+
170
+ **Dependent Map Steps** process another step's array output:
171
+
172
+ ```sql
173
+ -- Dependent map: processes the array from 'fetch_items'
174
+ SELECT pgflow.add_step(
175
+ flow_slug => 'data_pipeline',
176
+ step_slug => 'transform_each',
177
+ deps_slugs => ARRAY['fetch_items'],
178
+ step_type => 'map'
179
+ );
180
+ ```
181
+
182
+ #### Edge Cases and Special Behaviors
183
+
184
+ 1. **Empty Array Cascade**: When a map step receives an empty array (`[]`):
185
+
186
+ - The SQL core completes it immediately without creating tasks
187
+ - The completed map step outputs an empty array
188
+ - Any dependent map steps also receive empty arrays and complete immediately
189
+ - This cascades through the entire chain of map steps in a single transaction
190
+ - Example: `[] → map1 → [] → map2 → [] → map3 → []` all complete together
191
+
192
+ 2. **NULL Values**: NULL array elements are preserved and distributed to their respective tasks
193
+
194
+ 3. **Non-Array Input**: The SQL core fails the step when input is not an array
195
+
196
+ 4. **Type Violations**: When a single step outputs non-array data to a map step, the SQL core fails the entire run (stores the invalid output for debugging, archives all queued messages, prevents orphaned tasks)
197
+
198
+ #### Implementation Details
199
+
200
+ Map steps utilize several database fields for state management:
201
+
202
+ - `initial_tasks`: Number of tasks to create (NULL until array size is known)
203
+ - `remaining_tasks`: Tracks incomplete tasks for the step
204
+ - `task_index`: Identifies which array element each task processes
205
+ - `step_type`: Column value 'map' triggers map behavior
206
+
207
+ The aggregation process ensures:
208
+
209
+ - **Order Preservation**: Task outputs maintain array element ordering
210
+ - **NULL Handling**: NULL outputs are included in the aggregated array
211
+ - **Atomicity**: Aggregation occurs within the same transaction as task completion
212
+
213
+ ## Example flow and its life
214
+
215
+ Let's walk through creating and running a workflow that fetches a website,
216
+ does summarization and sentiment analysis in parallel steps
217
+ and saves the results to a database.
218
+
219
+ ![example flow graph](./assets/example-flow.svg)
220
+
221
+ ### Defining a Workflow
222
+
223
+ Workflows are defined using two SQL functions: `create_flow` and `add_step`.
224
+
225
+ In this example, we'll create a workflow with:
226
+
227
+ - `website` as the entry point ("root step")
228
+ - `sentiment` and `summary` as parallel steps that depend on `website`
229
+ - `saveToDb` as the final step, depending on both parallel steps
230
+
231
+ ```sql
232
+ -- Define workflow with parallel steps
233
+ SELECT pgflow.create_flow('analyze_website');
234
+ SELECT pgflow.add_step('analyze_website', 'website');
235
+ SELECT pgflow.add_step('analyze_website', 'sentiment', deps_slugs => ARRAY['website']);
236
+ SELECT pgflow.add_step('analyze_website', 'summary', deps_slugs => ARRAY['website']);
237
+ SELECT pgflow.add_step('analyze_website', 'saveToDb', deps_slugs => ARRAY['sentiment', 'summary']);
238
+ ```
239
+
240
+ > [!WARNING]
241
+ > You need to call `add_step` in topological order, which is enforced by foreign key constraints.
242
+
243
+ > [!NOTE]
244
+ > You can have multiple "root steps" in a workflow. You can even create a root-steps-only workflow
245
+ > to process a single input in parallel, because at the end, all of the outputs from steps
246
+ > that does not have dependents ("final steps") are aggregated and saved as run's `output`.
247
+
248
+ ### Starting a Workflow Run
249
+
250
+ To start a workflow, call `start_flow` with a flow slug and input arguments:
251
+
252
+ ```sql
253
+ SELECT * FROM pgflow.start_flow(
254
+ flow_slug => 'analyze_website',
255
+ input => '{"url": "https://example.com"}'::jsonb
256
+ );
257
+
258
+ -- run_id | flow_slug | status | input | output | remaining_steps
259
+ -- ------------+-----------------+---------+--------------------------------+--------+-----------------
260
+ -- <run uuid> | analyze_website | started | {"url": "https://example.com"} | [NULL] | 4
261
+ ```
262
+
263
+ When a workflow starts:
264
+
265
+ - A new `run` record is created
266
+ - Initial states for all steps are created
267
+ - Root steps are marked as `started`
268
+ - Tasks are created for root steps
269
+ - Messages are enqueued on PGMQ for worker processing
270
+
271
+ > [!NOTE]
272
+ > The `input` argument must be a valid JSONB object: string, number, boolean, array, object or null.
273
+
274
+ ### Workflow Execution
275
+
276
+ #### Task Polling
277
+
278
+ The Edge Worker uses a two-phase approach to retrieve and start tasks:
279
+
280
+ **Phase 1 - Reserve Messages:**
281
+
282
+ ```sql
283
+ SELECT * FROM pgmq.read_with_poll(
284
+ queue_name => 'analyze_website',
285
+ vt => 60, -- visibility timeout in seconds
286
+ qty => 5 -- maximum number of messages to fetch
287
+ );
288
+ ```
289
+
290
+ **Phase 2 - Start Tasks:**
291
+
292
+ ```sql
293
+ SELECT * FROM pgflow.start_tasks(
294
+ flow_slug => 'analyze_website',
295
+ msg_ids => ARRAY[101, 102, 103], -- message IDs from phase 1
296
+ worker_id => '550e8400-e29b-41d4-a716-446655440000'::uuid
297
+ );
298
+ ```
299
+
300
+ **How it works:**
301
+
302
+ 1. **read_with_poll** reserves raw queue messages and hides them from other workers
303
+ 2. **start_tasks** finds matching step_tasks, increments attempts counter, and builds task inputs
304
+ 3. Task metadata and input are returned to the worker for execution
305
+
306
+ This two-phase approach ensures tasks always exist before processing begins, eliminating race conditions that could occur with single-phase polling.
307
+
308
+ #### Task Completion
309
+
310
+ After successful processing, the worker acknowledges completion:
311
+
312
+ ```sql
313
+ SELECT pgflow.complete_task(
314
+ run_id => '<run_uuid>',
315
+ step_slug => 'website',
316
+ task_index => 0, -- we will have multiple tasks for a step in the future
317
+ output => '{"content": "HTML content", "status": 200}'::jsonb
318
+ );
319
+ ```
320
+
321
+ When a task completes:
322
+
323
+ 1. The task status is updated to 'completed' and the output is saved
324
+ 2. The message is archived in PGMQ
325
+ 3. The step state is updated to 'completed'
326
+ 4. Dependent steps with all dependencies completed are automatically started
327
+ 5. The run's remaining_steps counter is decremented
328
+ 6. If all steps are completed, the run is marked as completed with aggregated outputs
329
+
330
+ #### Error Handling
331
+
332
+ If a task fails, the worker acknowledges this using `fail_task`:
333
+
334
+ ```sql
335
+ SELECT pgflow.fail_task(
336
+ run_id => '<run_uuid>',
337
+ step_slug => 'website',
338
+ task_index => 0,
339
+ error_message => 'Connection timeout when fetching URL'::text
340
+ );
341
+ ```
342
+
343
+ The system handles failures by:
344
+
345
+ 1. Checking if retry attempts are available
346
+ 2. For available retries:
347
+ - Keeping the task in 'queued' status
348
+ - Applying exponential backoff for visibility
349
+ - Preventing processing until the visibility timeout expires
350
+ 3. When retries are exhausted:
351
+ - Marking the task as 'failed'
352
+ - Storing the task output (even for failed tasks)
353
+ - Marking the step as 'failed'
354
+ - Marking the run as 'failed'
355
+ - Archiving the message in PGMQ
356
+ - **Archiving all queued messages for the failed run** (preventing orphaned messages)
357
+ 4. Additional failure handling:
358
+ - **No retries on already-failed runs** - tasks are immediately marked as failed
359
+ - **Graceful type constraint violations** - handled without exceptions when single steps feed map steps
360
+ - **Stores invalid output on type violations** - captures the output that caused the violation for debugging
361
+ - **Performance-optimized message archiving** using indexed queries
362
+
363
+ #### Retries and Timeouts
364
+
365
+ Retry behavior can be configured at both the flow and step level:
366
+
367
+ ```sql
368
+ -- Flow-level defaults
369
+ SELECT pgflow.create_flow(
370
+ flow_slug => 'analyze_website',
371
+ max_attempts => 3, -- Maximum retry attempts (including first attempt)
372
+ base_delay => 5, -- Base delay in seconds for exponential backoff
373
+ timeout => 60 -- Task timeout in seconds
374
+ );
375
+
376
+ -- Step-level overrides
377
+ SELECT pgflow.add_step(
378
+ flow_slug => 'analyze_website',
379
+ step_slug => 'sentiment',
380
+ deps_slugs => ARRAY['website']::text[],
381
+ max_attempts => 5, -- Override max attempts for this step
382
+ base_delay => 2, -- Override base delay for exponential backoff
383
+ timeout => 30 -- Override timeout for this step
384
+ );
385
+ ```
386
+
387
+ The system applies exponential backoff for retries using the formula:
388
+
389
+ ```
390
+ delay = base_delay * (2 ^ attempts_count)
391
+ ```
392
+
393
+ Timeouts are enforced by setting the message visibility timeout to the step's timeout value plus a small buffer. If a worker doesn't acknowledge completion or failure within this period, the task becomes visible again and can be retried.
394
+
395
+ ## Workflow Definition with TypeScript DSL
396
+
397
+ The SQL Core is the DAG orchestration engine that handles dependency resolution, step state management, and task spawning. However, workflows are defined using the TypeScript Flow DSL, which compiles user intent into the SQL primitives that populate the definition tables (`flows`, `steps`, `deps`).
398
+
399
+ See the [@pgflow/dsl package](../dsl/README.md) for complete documentation on:
400
+
401
+ - Expressing workflows with type-safe method chaining
402
+ - Step types (`.step()`, `.array()`, `.map()`)
403
+ - Compilation to SQL migrations
404
+ - Type inference and handler context
405
+
406
+ The SQL Core executes these compiled definitions, managing when steps are ready, how many tasks to create (1 for single steps, N for map steps), and how to aggregate results.
407
+
408
+ ## Data Flow
409
+
410
+ ### Input and Output Handling
411
+
412
+ Handlers in pgflow **must return** JSON-serializable values that are captured and saved when `complete_task` is called. These outputs become available as inputs to dependent steps, allowing data to flow through your workflow pipeline.
413
+
414
+ When a step is executed, it receives an input object where:
415
+
416
+ - Each key is a step_slug of a completed dependency
417
+ - Each value is that step's output
418
+ - A special "run" key contains the original workflow input
419
+
420
+ #### Example: `sentiment`
421
+
422
+ When the `sentiment` step runs, it receives:
423
+
424
+ ```json
425
+ {
426
+ "run": { "url": "https://example.com" },
427
+ "website": { "content": "HTML content", "status": 200 }
428
+ }
429
+ ```
430
+
431
+ #### Example: `saveToDb`
432
+
433
+ The `saveToDb` step depends on both `sentiment` and `summary`:
434
+
435
+ ```json
436
+ {
437
+ "run": { "url": "https://example.com" },
438
+ "sentiment": { "score": 0.85, "label": "positive" },
439
+ "summary": "This website discusses various topics related to technology and innovation."
440
+ }
441
+ ```
442
+
443
+ ### Map Step Handler Inputs
444
+
445
+ Map step tasks receive a fundamentally different input structure than single step tasks. Instead of receiving an object with `run` and dependency keys, **map tasks receive only their assigned array element**:
446
+
447
+ #### Example: Processing user IDs
448
+
449
+ ```json
450
+ // Flow input (for root map) or dependency output:
451
+ ["user123", "user456", "user789"]
452
+
453
+ // What each map task receives:
454
+ // Task 0: "user123"
455
+ // Task 1: "user456"
456
+ // Task 2: "user789"
457
+
458
+ // NOT this:
459
+ // { "run": {...}, "dependency": [...] }
460
+ ```
461
+
462
+ This means:
463
+
464
+ - Map handlers process individual elements in isolation
465
+ - Map handlers cannot access the original flow input (`run`)
466
+ - Map handlers cannot access other dependencies
467
+ - Map handlers focus solely on transforming their assigned element
468
+
469
+ #### Map Step Outputs Become Arrays
470
+
471
+ When a step depends on a map step, it receives the aggregated array output:
472
+
473
+ ```json
474
+ // If 'process_users' is a map step that processed ["user1", "user2"]
475
+ // and output [{"name": "Alice"}, {"name": "Bob"}]
476
+
477
+ // A step depending on 'process_users' receives:
478
+ {
479
+ "run": {
480
+ /* original flow input */
481
+ },
482
+ "process_users": [{ "name": "Alice" }, { "name": "Bob" }] // Full array
483
+ }
484
+ ```
485
+
486
+ ### Run Completion
487
+
488
+ When all steps in a run are completed, the run status is automatically updated to 'completed' and its output is set. The output is an aggregation of all the outputs from final steps (steps that have no dependents):
489
+
490
+ ```sql
491
+ -- Example of a completed run with output
492
+ SELECT run_id, status, output FROM pgflow.runs WHERE run_id = '<run_uuid>';
493
+
494
+ -- run_id | status | output
495
+ -- ------------+-----------+-----------------------------------------------------
496
+ -- <run uuid> | completed | {"saveToDb": {"status": "success"}}
497
+ ```