@pgflow/dsl 0.0.0-array-map-steps-cd94242a-20251008042921 → 0.0.0-condition-4354fcb6-20260108134756

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -31,30 +31,30 @@ type Input = {
31
31
 
32
32
  // Define a flow with steps and dependencies
33
33
  export const AnalyzeWebsite = new Flow<Input>({
34
- slug: 'analyze_website',
34
+ slug: 'analyzeWebsite',
35
35
  maxAttempts: 3,
36
36
  baseDelay: 5,
37
37
  timeout: 10,
38
38
  })
39
39
  .step(
40
40
  { slug: 'website' },
41
- async (input) => await scrapeWebsite(input.run.url)
41
+ async (flowInput) => await scrapeWebsite(flowInput.url)
42
42
  )
43
43
  .step(
44
44
  { slug: 'sentiment', dependsOn: ['website'] },
45
- async (input) => await analyzeSentiment(input.website.content)
45
+ async (deps) => await analyzeSentiment(deps.website.content)
46
46
  )
47
47
  .step(
48
48
  { slug: 'summary', dependsOn: ['website'] },
49
- async (input) => await summarizeWithAI(input.website.content)
49
+ async (deps) => await summarizeWithAI(deps.website.content)
50
50
  )
51
51
  .step(
52
52
  { slug: 'saveToDb', dependsOn: ['sentiment', 'summary'] },
53
- async (input) => {
53
+ async (deps, ctx) => {
54
54
  return await saveToDb({
55
- websiteUrl: input.run.url,
56
- sentiment: input.sentiment.score,
57
- summary: input.summary.aiSummary,
55
+ websiteUrl: ctx.flowInput.url,
56
+ sentiment: deps.sentiment.score,
57
+ summary: deps.summary.aiSummary,
58
58
  });
59
59
  }
60
60
  );
@@ -62,16 +62,22 @@ export const AnalyzeWebsite = new Flow<Input>({
62
62
 
63
63
  ### Understanding Data Flow
64
64
 
65
- In pgflow, each step receives an `input` object that contains:
65
+ In pgflow, step handlers use **asymmetric signatures** based on whether they have dependencies:
66
66
 
67
- 1. **`input.run`** - The original flow input (available to all steps)
68
- 2. **`input.{stepName}`** - Outputs from dependency steps
67
+ **Root steps (no dependencies):**
68
+ - First parameter: `flowInput` - the original flow input directly
69
+ - Second parameter: `ctx` - context object (env, supabase, flowInput, etc.)
70
+
71
+ **Dependent steps (with dependsOn):**
72
+ - First parameter: `deps` - object with outputs from dependency steps (`deps.{stepName}`)
73
+ - Second parameter: `ctx` - context object (includes `ctx.flowInput` if needed)
69
74
 
70
75
  This design ensures:
71
76
 
72
- - Original flow parameters are accessible throughout the entire flow
73
- - Data doesn't need to be manually forwarded through intermediate steps
74
- - Steps can combine original input with processed data from previous steps
77
+ - Root steps receive flow input directly for clean, simple handlers
78
+ - Dependent steps focus on their dependencies without wrapping
79
+ - Original flow input is always accessible via `ctx.flowInput` when needed
80
+ - Steps can combine dependency outputs with original input via context
75
81
 
76
82
  ### Step Methods
77
83
 
@@ -84,8 +90,9 @@ The standard method for adding steps to a flow. Each step processes input and re
84
90
  ```typescript
85
91
  .step(
86
92
  { slug: 'process', dependsOn: ['previous'] },
87
- async (input) => {
88
- // Access input.run and input.previous
93
+ async (deps, ctx) => {
94
+ // Access deps.previous for dependency output
95
+ // Access ctx.flowInput if original flow input is needed
89
96
  return { result: 'processed' };
90
97
  }
91
98
  )
@@ -98,14 +105,14 @@ A semantic wrapper around `.step()` that provides type enforcement for steps tha
98
105
  ```typescript
99
106
  // Fetch an array of items to be processed
100
107
  .array(
101
- { slug: 'fetch_items' },
108
+ { slug: 'fetchItems' },
102
109
  async () => [1, 2, 3, 4, 5]
103
110
  )
104
111
 
105
112
  // With dependencies - combining data from multiple sources
106
113
  .array(
107
- { slug: 'combine_results', dependsOn: ['source1', 'source2'] },
108
- async (input) => [...input.source1, ...input.source2]
114
+ { slug: 'combineResults', dependsOn: ['source1', 'source2'] },
115
+ async (deps) => [...deps.source1, ...deps.source2]
109
116
  )
110
117
  ```
111
118
 
@@ -131,7 +138,7 @@ Processes arrays element-by-element, similar to JavaScript's `Array.map()`. The
131
138
  ```typescript
132
139
  // ROOT MAP - No array: property means use flow input
133
140
  // Flow input MUST be an array (e.g., ["hello", "world"])
134
- new Flow<string[]>({ slug: 'process_strings' })
141
+ new Flow<string[]>({ slug: 'processStrings' })
135
142
  .map(
136
143
  { slug: 'uppercase' }, // No array: property!
137
144
  (item) => item.toUpperCase()
@@ -139,7 +146,7 @@ new Flow<string[]>({ slug: 'process_strings' })
139
146
  // Each string in the input array gets uppercased in parallel
140
147
 
141
148
  // DEPENDENT MAP - array: property specifies the source step
142
- new Flow<{}>({ slug: 'data_pipeline' })
149
+ new Flow<{}>({ slug: 'dataPipeline' })
143
150
  .array({ slug: 'numbers' }, () => [1, 2, 3])
144
151
  .map(
145
152
  { slug: 'double', array: 'numbers' }, // Processes 'numbers' output
@@ -166,7 +173,7 @@ The `.map()` method provides full TypeScript type inference for array elements:
166
173
  ```typescript
167
174
  type User = { id: number; name: string };
168
175
 
169
- new Flow<{}>({ slug: 'user_flow' })
176
+ new Flow<{}>({ slug: 'userFlow' })
170
177
  .array({ slug: 'users' }, (): User[] => [
171
178
  { id: 1, name: 'Alice' },
172
179
  { id: 2, name: 'Bob' }
@@ -181,7 +188,7 @@ new Flow<{}>({ slug: 'user_flow' })
181
188
 
182
189
  ```typescript
183
190
  // Batch processing - process multiple items in parallel
184
- new Flow<number[]>({ slug: 'batch_processor' })
191
+ new Flow<number[]>({ slug: 'batchProcessor' })
185
192
  .map({ slug: 'validate' }, (item) => {
186
193
  if (item < 0) throw new Error('Invalid item');
187
194
  return item;
@@ -192,17 +199,17 @@ new Flow<number[]>({ slug: 'batch_processor' })
192
199
  });
193
200
 
194
201
  // Data transformation pipeline
195
- new Flow<{}>({ slug: 'etl_pipeline' })
196
- .step({ slug: 'fetch_urls' }, () => ['url1', 'url2', 'url3'])
197
- .map({ slug: 'scrape', array: 'fetch_urls' }, async (url) => {
202
+ new Flow<{}>({ slug: 'etlPipeline' })
203
+ .step({ slug: 'fetchUrls' }, () => ['url1', 'url2', 'url3'])
204
+ .map({ slug: 'scrape', array: 'fetchUrls' }, async (url) => {
198
205
  return await fetchContent(url);
199
206
  })
200
207
  .map({ slug: 'extract', array: 'scrape' }, (html) => {
201
208
  return extractData(html);
202
209
  })
203
- .step({ slug: 'aggregate', dependsOn: ['extract'] }, (input) => {
204
- // input.extract is the aggregated array from all map tasks
205
- return consolidateResults(input.extract);
210
+ .step({ slug: 'aggregate', dependsOn: ['extract'] }, (deps) => {
211
+ // deps.extract is the aggregated array from all map tasks
212
+ return consolidateResults(deps.extract);
206
213
  });
207
214
  ```
208
215
 
@@ -218,9 +225,9 @@ Step handlers can optionally receive a second parameter - the **context object**
218
225
  ```typescript
219
226
  .step(
220
227
  { slug: 'saveToDb' },
221
- async (input, context) => {
228
+ async (flowInput, ctx) => {
222
229
  // Access platform resources through context
223
- const result = await context.sql`SELECT * FROM users WHERE id = ${input.userId}`;
230
+ const result = await ctx.sql`SELECT * FROM users WHERE id = ${flowInput.userId}`;
224
231
  return result[0];
225
232
  }
226
233
  )
@@ -230,40 +237,40 @@ Step handlers can optionally receive a second parameter - the **context object**
230
237
 
231
238
  All platforms provide these core resources:
232
239
 
233
- - **`context.env`** - Environment variables (`Record<string, string | undefined>`)
234
- - **`context.shutdownSignal`** - AbortSignal for graceful shutdown handling
235
- - **`context.rawMessage`** - Original pgmq message with metadata
240
+ - **`ctx.env`** - Environment variables (`Record<string, string | undefined>`)
241
+ - **`ctx.flowInput`** - Original flow input (typed as the flow's input type)
242
+ - **`ctx.shutdownSignal`** - AbortSignal for graceful shutdown handling
243
+ - **`ctx.rawMessage`** - Original pgmq message with metadata
236
244
  ```typescript
237
245
  interface PgmqMessageRecord<T> {
238
246
  msg_id: number;
239
247
  read_ct: number;
240
248
  enqueued_at: Date;
241
249
  vt: Date;
242
- message: T; // <-- this is your 'input'
250
+ message: T;
243
251
  }
244
252
  ```
245
- - **`context.stepTask`** - Current step task details (flow handlers only)
253
+ - **`ctx.stepTask`** - Current step task details (flow handlers only)
246
254
  ```typescript
247
255
  interface StepTaskRecord<TFlow> {
248
256
  flow_slug: string;
249
257
  run_id: string;
250
258
  step_slug: string;
251
- input: StepInput<TFlow, StepSlug>; // <-- this is handler 'input'
252
259
  msg_id: number;
253
260
  }
254
261
  ```
255
- - **`context.workerConfig`** - Resolved worker configuration with all defaults applied
262
+ - **`ctx.workerConfig`** - Resolved worker configuration with all defaults applied
256
263
  ```typescript
257
264
  // Provides access to worker settings like retry limits
258
- const isLastAttempt = context.rawMessage.read_ct >= context.workerConfig.retry.limit;
265
+ const isLastAttempt = ctx.rawMessage.read_ct >= ctx.workerConfig.retry.limit;
259
266
  ```
260
267
 
261
268
  #### Supabase Platform Resources
262
269
 
263
270
  When using the Supabase platform with EdgeWorker, additional resources are available:
264
271
 
265
- - **`context.sql`** - PostgreSQL client (postgres.js)
266
- - **`context.supabase`** - Supabase client with service role key for full database access
272
+ - **`ctx.sql`** - PostgreSQL client (postgres.js)
273
+ - **`ctx.supabase`** - Supabase client with service role key for full database access
267
274
 
268
275
  To use Supabase resources, import the `Flow` class from the Supabase preset:
269
276
 
@@ -271,18 +278,18 @@ To use Supabase resources, import the `Flow` class from the Supabase preset:
271
278
  import { Flow } from '@pgflow/dsl/supabase';
272
279
 
273
280
  const MyFlow = new Flow<{ userId: string }>({
274
- slug: 'my_flow',
275
- }).step({ slug: 'process' }, async (input, context) => {
276
- // TypeScript knows context includes Supabase resources
277
- const { data } = await context.supabase
281
+ slug: 'myFlow',
282
+ }).step({ slug: 'process' }, async (flowInput, ctx) => {
283
+ // TypeScript knows ctx includes Supabase resources
284
+ const { data } = await ctx.supabase
278
285
  .from('users')
279
286
  .select('*')
280
- .eq('id', input.userId);
287
+ .eq('id', flowInput.userId);
281
288
 
282
289
  // Use SQL directly
283
- const stats = await context.sql`
284
- SELECT COUNT(*) as total FROM events
285
- WHERE user_id = ${input.userId}
290
+ const stats = await ctx.sql`
291
+ SELECT COUNT(*) as total FROM events
292
+ WHERE user_id = ${flowInput.userId}
286
293
  `;
287
294
 
288
295
  return { user: data[0], eventCount: stats[0].total };
@@ -300,7 +307,7 @@ Configure flows and steps with runtime options:
300
307
 
301
308
  ```typescript
302
309
  new Flow<Input>({
303
- slug: 'my_flow', // Required: Unique flow identifier
310
+ slug: 'myFlow', // Required: Unique flow identifier
304
311
  maxAttempts: 3, // Optional: Maximum retry attempts (default: 1)
305
312
  baseDelay: 5, // Optional: Base delay in seconds for retries (default: 1)
306
313
  timeout: 10, // Optional: Task timeout in seconds (default: 30)
package/dist/CHANGELOG.md CHANGED
@@ -1,6 +1,60 @@
1
1
  # @pgflow/dsl
2
2
 
3
- ## 0.0.0-array-map-steps-cd94242a-20251008042921
3
+ ## 0.0.0-condition-4354fcb6-20260108134756
4
+
5
+ ### Patch Changes
6
+
7
+ - c25ab9f: Add whenFailed option for error handling after retries exhausted (fail, skip, skip-cascade)
8
+
9
+ ## 0.13.1
10
+
11
+ ## 0.13.0
12
+
13
+ ## 0.12.0
14
+
15
+ ### Minor Changes
16
+
17
+ - 37402eb: BREAKING: Asymmetric handler signatures - remove `run` key from step inputs
18
+
19
+ - Root steps: `(flowInput, ctx) => ...` - flow input directly as first param
20
+ - Dependent steps: `(deps, ctx) => ...` - only dependency outputs as first param
21
+ - Access flow input in dependent steps via `await ctx.flowInput` (async/lazy-loaded)
22
+ - Lazy loading prevents data duplication for map steps processing large arrays
23
+ - Enables functional composition and simplifies types for future subflows
24
+
25
+ ### Patch Changes
26
+
27
+ - 5dc5cfc: Fix Supabase Edge Runtime compatibility by replacing npm:postgres with jsr:@oscar6echo/postgres fork. The npm package fails to parse database URLs in Deno edge environments, causing CONNECT_TIMEOUT errors.
28
+
29
+ ## 0.11.0
30
+
31
+ ## 0.10.0
32
+
33
+ ### Patch Changes
34
+
35
+ - 0b84bb0: Add automatic flow compilation at worker startup. Workers now call ensure_flow_compiled to verify flows are up-to-date. In development, mismatched flows are recompiled automatically. In production, mismatches cause errors. Use ensureCompiledOnStartup: false to opt-out.
36
+
37
+ ## 0.9.1
38
+
39
+ ### Patch Changes
40
+
41
+ - 992a86b: Unify connection configuration with improved local detection. The `connectionString` config option now works correctly, and you can pass a raw postgres.js `sql` instance via `config.sql` for full control over connection options (SSL, pooling, etc.).
42
+
43
+ Fixes [#469](https://github.com/pgflow-dev/pgflow/issues/469), [#424](https://github.com/pgflow-dev/pgflow/issues/424). Thanks to [@Nciso](https://github.com/Nciso), [@mikz](https://github.com/mikz), [@ddlaws0n](https://github.com/ddlaws0n), and **PixelEcho** for feedback and bug reports.
44
+
45
+ ## 0.9.0
46
+
47
+ ## 0.8.1
48
+
49
+ ## 0.8.0
50
+
51
+ ## 0.7.3
52
+
53
+ ## 0.7.2
54
+
55
+ ## 0.7.1
56
+
57
+ ## 0.7.0
4
58
 
5
59
  ### Minor Changes
6
60
 
package/dist/README.md CHANGED
@@ -31,30 +31,30 @@ type Input = {
31
31
 
32
32
  // Define a flow with steps and dependencies
33
33
  export const AnalyzeWebsite = new Flow<Input>({
34
- slug: 'analyze_website',
34
+ slug: 'analyzeWebsite',
35
35
  maxAttempts: 3,
36
36
  baseDelay: 5,
37
37
  timeout: 10,
38
38
  })
39
39
  .step(
40
40
  { slug: 'website' },
41
- async (input) => await scrapeWebsite(input.run.url)
41
+ async (flowInput) => await scrapeWebsite(flowInput.url)
42
42
  )
43
43
  .step(
44
44
  { slug: 'sentiment', dependsOn: ['website'] },
45
- async (input) => await analyzeSentiment(input.website.content)
45
+ async (deps) => await analyzeSentiment(deps.website.content)
46
46
  )
47
47
  .step(
48
48
  { slug: 'summary', dependsOn: ['website'] },
49
- async (input) => await summarizeWithAI(input.website.content)
49
+ async (deps) => await summarizeWithAI(deps.website.content)
50
50
  )
51
51
  .step(
52
52
  { slug: 'saveToDb', dependsOn: ['sentiment', 'summary'] },
53
- async (input) => {
53
+ async (deps, ctx) => {
54
54
  return await saveToDb({
55
- websiteUrl: input.run.url,
56
- sentiment: input.sentiment.score,
57
- summary: input.summary.aiSummary,
55
+ websiteUrl: ctx.flowInput.url,
56
+ sentiment: deps.sentiment.score,
57
+ summary: deps.summary.aiSummary,
58
58
  });
59
59
  }
60
60
  );
@@ -62,16 +62,22 @@ export const AnalyzeWebsite = new Flow<Input>({
62
62
 
63
63
  ### Understanding Data Flow
64
64
 
65
- In pgflow, each step receives an `input` object that contains:
65
+ In pgflow, step handlers use **asymmetric signatures** based on whether they have dependencies:
66
66
 
67
- 1. **`input.run`** - The original flow input (available to all steps)
68
- 2. **`input.{stepName}`** - Outputs from dependency steps
67
+ **Root steps (no dependencies):**
68
+ - First parameter: `flowInput` - the original flow input directly
69
+ - Second parameter: `ctx` - context object (env, supabase, flowInput, etc.)
70
+
71
+ **Dependent steps (with dependsOn):**
72
+ - First parameter: `deps` - object with outputs from dependency steps (`deps.{stepName}`)
73
+ - Second parameter: `ctx` - context object (includes `ctx.flowInput` if needed)
69
74
 
70
75
  This design ensures:
71
76
 
72
- - Original flow parameters are accessible throughout the entire flow
73
- - Data doesn't need to be manually forwarded through intermediate steps
74
- - Steps can combine original input with processed data from previous steps
77
+ - Root steps receive flow input directly for clean, simple handlers
78
+ - Dependent steps focus on their dependencies without wrapping
79
+ - Original flow input is always accessible via `ctx.flowInput` when needed
80
+ - Steps can combine dependency outputs with original input via context
75
81
 
76
82
  ### Step Methods
77
83
 
@@ -84,8 +90,9 @@ The standard method for adding steps to a flow. Each step processes input and re
84
90
  ```typescript
85
91
  .step(
86
92
  { slug: 'process', dependsOn: ['previous'] },
87
- async (input) => {
88
- // Access input.run and input.previous
93
+ async (deps, ctx) => {
94
+ // Access deps.previous for dependency output
95
+ // Access ctx.flowInput if original flow input is needed
89
96
  return { result: 'processed' };
90
97
  }
91
98
  )
@@ -98,14 +105,14 @@ A semantic wrapper around `.step()` that provides type enforcement for steps tha
98
105
  ```typescript
99
106
  // Fetch an array of items to be processed
100
107
  .array(
101
- { slug: 'fetch_items' },
108
+ { slug: 'fetchItems' },
102
109
  async () => [1, 2, 3, 4, 5]
103
110
  )
104
111
 
105
112
  // With dependencies - combining data from multiple sources
106
113
  .array(
107
- { slug: 'combine_results', dependsOn: ['source1', 'source2'] },
108
- async (input) => [...input.source1, ...input.source2]
114
+ { slug: 'combineResults', dependsOn: ['source1', 'source2'] },
115
+ async (deps) => [...deps.source1, ...deps.source2]
109
116
  )
110
117
  ```
111
118
 
@@ -131,7 +138,7 @@ Processes arrays element-by-element, similar to JavaScript's `Array.map()`. The
131
138
  ```typescript
132
139
  // ROOT MAP - No array: property means use flow input
133
140
  // Flow input MUST be an array (e.g., ["hello", "world"])
134
- new Flow<string[]>({ slug: 'process_strings' })
141
+ new Flow<string[]>({ slug: 'processStrings' })
135
142
  .map(
136
143
  { slug: 'uppercase' }, // No array: property!
137
144
  (item) => item.toUpperCase()
@@ -139,7 +146,7 @@ new Flow<string[]>({ slug: 'process_strings' })
139
146
  // Each string in the input array gets uppercased in parallel
140
147
 
141
148
  // DEPENDENT MAP - array: property specifies the source step
142
- new Flow<{}>({ slug: 'data_pipeline' })
149
+ new Flow<{}>({ slug: 'dataPipeline' })
143
150
  .array({ slug: 'numbers' }, () => [1, 2, 3])
144
151
  .map(
145
152
  { slug: 'double', array: 'numbers' }, // Processes 'numbers' output
@@ -166,7 +173,7 @@ The `.map()` method provides full TypeScript type inference for array elements:
166
173
  ```typescript
167
174
  type User = { id: number; name: string };
168
175
 
169
- new Flow<{}>({ slug: 'user_flow' })
176
+ new Flow<{}>({ slug: 'userFlow' })
170
177
  .array({ slug: 'users' }, (): User[] => [
171
178
  { id: 1, name: 'Alice' },
172
179
  { id: 2, name: 'Bob' }
@@ -181,7 +188,7 @@ new Flow<{}>({ slug: 'user_flow' })
181
188
 
182
189
  ```typescript
183
190
  // Batch processing - process multiple items in parallel
184
- new Flow<number[]>({ slug: 'batch_processor' })
191
+ new Flow<number[]>({ slug: 'batchProcessor' })
185
192
  .map({ slug: 'validate' }, (item) => {
186
193
  if (item < 0) throw new Error('Invalid item');
187
194
  return item;
@@ -192,17 +199,17 @@ new Flow<number[]>({ slug: 'batch_processor' })
192
199
  });
193
200
 
194
201
  // Data transformation pipeline
195
- new Flow<{}>({ slug: 'etl_pipeline' })
196
- .step({ slug: 'fetch_urls' }, () => ['url1', 'url2', 'url3'])
197
- .map({ slug: 'scrape', array: 'fetch_urls' }, async (url) => {
202
+ new Flow<{}>({ slug: 'etlPipeline' })
203
+ .step({ slug: 'fetchUrls' }, () => ['url1', 'url2', 'url3'])
204
+ .map({ slug: 'scrape', array: 'fetchUrls' }, async (url) => {
198
205
  return await fetchContent(url);
199
206
  })
200
207
  .map({ slug: 'extract', array: 'scrape' }, (html) => {
201
208
  return extractData(html);
202
209
  })
203
- .step({ slug: 'aggregate', dependsOn: ['extract'] }, (input) => {
204
- // input.extract is the aggregated array from all map tasks
205
- return consolidateResults(input.extract);
210
+ .step({ slug: 'aggregate', dependsOn: ['extract'] }, (deps) => {
211
+ // deps.extract is the aggregated array from all map tasks
212
+ return consolidateResults(deps.extract);
206
213
  });
207
214
  ```
208
215
 
@@ -218,9 +225,9 @@ Step handlers can optionally receive a second parameter - the **context object**
218
225
  ```typescript
219
226
  .step(
220
227
  { slug: 'saveToDb' },
221
- async (input, context) => {
228
+ async (flowInput, ctx) => {
222
229
  // Access platform resources through context
223
- const result = await context.sql`SELECT * FROM users WHERE id = ${input.userId}`;
230
+ const result = await ctx.sql`SELECT * FROM users WHERE id = ${flowInput.userId}`;
224
231
  return result[0];
225
232
  }
226
233
  )
@@ -230,40 +237,40 @@ Step handlers can optionally receive a second parameter - the **context object**
230
237
 
231
238
  All platforms provide these core resources:
232
239
 
233
- - **`context.env`** - Environment variables (`Record<string, string | undefined>`)
234
- - **`context.shutdownSignal`** - AbortSignal for graceful shutdown handling
235
- - **`context.rawMessage`** - Original pgmq message with metadata
240
+ - **`ctx.env`** - Environment variables (`Record<string, string | undefined>`)
241
+ - **`ctx.flowInput`** - Original flow input (typed as the flow's input type)
242
+ - **`ctx.shutdownSignal`** - AbortSignal for graceful shutdown handling
243
+ - **`ctx.rawMessage`** - Original pgmq message with metadata
236
244
  ```typescript
237
245
  interface PgmqMessageRecord<T> {
238
246
  msg_id: number;
239
247
  read_ct: number;
240
248
  enqueued_at: Date;
241
249
  vt: Date;
242
- message: T; // <-- this is your 'input'
250
+ message: T;
243
251
  }
244
252
  ```
245
- - **`context.stepTask`** - Current step task details (flow handlers only)
253
+ - **`ctx.stepTask`** - Current step task details (flow handlers only)
246
254
  ```typescript
247
255
  interface StepTaskRecord<TFlow> {
248
256
  flow_slug: string;
249
257
  run_id: string;
250
258
  step_slug: string;
251
- input: StepInput<TFlow, StepSlug>; // <-- this is handler 'input'
252
259
  msg_id: number;
253
260
  }
254
261
  ```
255
- - **`context.workerConfig`** - Resolved worker configuration with all defaults applied
262
+ - **`ctx.workerConfig`** - Resolved worker configuration with all defaults applied
256
263
  ```typescript
257
264
  // Provides access to worker settings like retry limits
258
- const isLastAttempt = context.rawMessage.read_ct >= context.workerConfig.retry.limit;
265
+ const isLastAttempt = ctx.rawMessage.read_ct >= ctx.workerConfig.retry.limit;
259
266
  ```
260
267
 
261
268
  #### Supabase Platform Resources
262
269
 
263
270
  When using the Supabase platform with EdgeWorker, additional resources are available:
264
271
 
265
- - **`context.sql`** - PostgreSQL client (postgres.js)
266
- - **`context.supabase`** - Supabase client with service role key for full database access
272
+ - **`ctx.sql`** - PostgreSQL client (postgres.js)
273
+ - **`ctx.supabase`** - Supabase client with service role key for full database access
267
274
 
268
275
  To use Supabase resources, import the `Flow` class from the Supabase preset:
269
276
 
@@ -271,18 +278,18 @@ To use Supabase resources, import the `Flow` class from the Supabase preset:
271
278
  import { Flow } from '@pgflow/dsl/supabase';
272
279
 
273
280
  const MyFlow = new Flow<{ userId: string }>({
274
- slug: 'my_flow',
275
- }).step({ slug: 'process' }, async (input, context) => {
276
- // TypeScript knows context includes Supabase resources
277
- const { data } = await context.supabase
281
+ slug: 'myFlow',
282
+ }).step({ slug: 'process' }, async (flowInput, ctx) => {
283
+ // TypeScript knows ctx includes Supabase resources
284
+ const { data } = await ctx.supabase
278
285
  .from('users')
279
286
  .select('*')
280
- .eq('id', input.userId);
287
+ .eq('id', flowInput.userId);
281
288
 
282
289
  // Use SQL directly
283
- const stats = await context.sql`
284
- SELECT COUNT(*) as total FROM events
285
- WHERE user_id = ${input.userId}
290
+ const stats = await ctx.sql`
291
+ SELECT COUNT(*) as total FROM events
292
+ WHERE user_id = ${flowInput.userId}
286
293
  `;
287
294
 
288
295
  return { user: data[0], eventCount: stats[0].total };
@@ -300,7 +307,7 @@ Configure flows and steps with runtime options:
300
307
 
301
308
  ```typescript
302
309
  new Flow<Input>({
303
- slug: 'my_flow', // Required: Unique flow identifier
310
+ slug: 'myFlow', // Required: Unique flow identifier
304
311
  maxAttempts: 3, // Optional: Maximum retry attempts (default: 1)
305
312
  baseDelay: 5, // Optional: Base delay in seconds for retries (default: 1)
306
313
  timeout: 10, // Optional: Task timeout in seconds (default: 30)
@@ -46,5 +46,21 @@ function formatRuntimeOptions(options) {
46
46
  if ('startDelay' in options && options.startDelay !== undefined) {
47
47
  parts.push(`start_delay => ${options.startDelay}`);
48
48
  }
49
+ if ('if' in options && options.if !== undefined) {
50
+ // Serialize JSON pattern and escape for SQL
51
+ const jsonStr = JSON.stringify(options.if);
52
+ parts.push(`condition_pattern => '${jsonStr}'`);
53
+ }
54
+ if ('ifNot' in options && options.ifNot !== undefined) {
55
+ // Serialize JSON pattern and escape for SQL
56
+ const jsonStr = JSON.stringify(options.ifNot);
57
+ parts.push(`condition_not_pattern => '${jsonStr}'`);
58
+ }
59
+ if ('whenUnmet' in options && options.whenUnmet !== undefined) {
60
+ parts.push(`when_unmet => '${options.whenUnmet}'`);
61
+ }
62
+ if ('retriesExhausted' in options && options.retriesExhausted !== undefined) {
63
+ parts.push(`when_failed => '${options.retriesExhausted}'`);
64
+ }
49
65
  return parts.length > 0 ? `, ${parts.join(', ')}` : '';
50
66
  }