@pgflow/dsl 0.0.5 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -1
- package/CHANGELOG.md +0 -7
- package/__tests__/runtime/flow.test.ts +0 -121
- package/__tests__/runtime/steps.test.ts +0 -183
- package/__tests__/runtime/utils.test.ts +0 -149
- package/__tests__/types/dsl-types.test-d.ts +0 -103
- package/__tests__/types/example-flow.test-d.ts +0 -76
- package/__tests__/types/extract-flow-input.test-d.ts +0 -71
- package/__tests__/types/extract-flow-steps.test-d.ts +0 -74
- package/__tests__/types/getStepDefinition.test-d.ts +0 -65
- package/__tests__/types/step-input.test-d.ts +0 -212
- package/__tests__/types/step-output.test-d.ts +0 -55
- package/brainstorming/condition/condition-alternatives.md +0 -219
- package/brainstorming/condition/condition-with-flexibility.md +0 -303
- package/brainstorming/condition/condition.md +0 -139
- package/brainstorming/condition/implementation-plan.md +0 -372
- package/brainstorming/dsl/cli-json-schema.md +0 -225
- package/brainstorming/dsl/cli.md +0 -179
- package/brainstorming/dsl/create-compilator.md +0 -25
- package/brainstorming/dsl/dsl-analysis-2.md +0 -166
- package/brainstorming/dsl/dsl-analysis.md +0 -512
- package/brainstorming/dsl/dsl-critique.md +0 -41
- package/brainstorming/fanouts/fanout-subflows-flattened-vs-subruns.md +0 -213
- package/brainstorming/fanouts/fanouts-task-index.md +0 -150
- package/brainstorming/fanouts/fanouts-with-conditions-and-subflows.md +0 -239
- package/brainstorming/subflows/branching.ts.md +0 -38
- package/brainstorming/subflows/subflows-callbacks.ts.md +0 -124
- package/brainstorming/subflows/subflows-classes.ts.md +0 -83
- package/brainstorming/subflows/subflows-flattening-versioned.md +0 -119
- package/brainstorming/subflows/subflows-flattening.md +0 -138
- package/brainstorming/subflows/subflows.md +0 -118
- package/brainstorming/subflows/subruns-table.md +0 -282
- package/brainstorming/subflows/subruns.md +0 -315
- package/brainstorming/versioning/breaking-and-non-breaking-flow-changes.md +0 -259
- package/docs/refactor-edge-worker.md +0 -146
- package/docs/versioning.md +0 -19
- package/eslint.config.cjs +0 -22
- package/out-tsc/vitest/__tests__/runtime/flow.test.d.ts +0 -2
- package/out-tsc/vitest/__tests__/runtime/flow.test.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/runtime/steps.test.d.ts +0 -2
- package/out-tsc/vitest/__tests__/runtime/steps.test.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/runtime/utils.test.d.ts +0 -2
- package/out-tsc/vitest/__tests__/runtime/utils.test.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/dsl-types.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/dsl-types.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/example-flow.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/example-flow.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/extract-flow-input.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/extract-flow-input.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/extract-flow-steps.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/extract-flow-steps.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/getStepDefinition.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/getStepDefinition.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/step-input.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/step-input.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/__tests__/types/step-output.test-d.d.ts +0 -2
- package/out-tsc/vitest/__tests__/types/step-output.test-d.d.ts.map +0 -1
- package/out-tsc/vitest/tsconfig.spec.tsbuildinfo +0 -1
- package/out-tsc/vitest/vite.config.d.ts +0 -3
- package/out-tsc/vitest/vite.config.d.ts.map +0 -1
- package/project.json +0 -28
- package/prompts/edge-worker-refactor.md +0 -105
- package/src/dsl.ts +0 -318
- package/src/example-flow.ts +0 -67
- package/src/index.ts +0 -1
- package/src/utils.ts +0 -84
- package/tsconfig.json +0 -13
- package/tsconfig.lib.json +0 -26
- package/tsconfig.spec.json +0 -35
- package/typecheck.log +0 -120
- package/vite.config.ts +0 -57
|
@@ -1,213 +0,0 @@
|
|
|
1
|
-
# Exploring "Fanout Subflows" in a Workflow System
|
|
2
|
-
|
|
3
|
-
This document explores the notion of “fanout” in a workflow DSL—where an array of items at runtime spawns multiple parallel subflows (or steps), merging their outputs when each is complete. We will examine:
|
|
4
|
-
|
|
5
|
-
1. **Practical Utility**: Is fanout really needed or overkill?
|
|
6
|
-
2. **Flattened vs. Sub-Run Implementation**: Can we feasibly do it if we flatten all steps, or do we need genuine sub-runs?
|
|
7
|
-
3. **Generic Implementations**: Is there a “basic” mechanism that could unify both fanout tasks and fanout subflows?
|
|
8
|
-
4. **Implications of Using Sub-Runs**: If we pick sub-runs, how does that affect the entire subflow abstraction, and can the `.subflow()` idea still work? What about returning a `SubflowObject` in place of a callback?
|
|
9
|
-
|
|
10
|
-
Throughout, keep in mind that the array length for the fanout is **unknown at compile time** and only becomes clear once the flow is actually running.
|
|
11
|
-
|
|
12
|
-
---
|
|
13
|
-
|
|
14
|
-
## 1. Is a Fanout Feature Really Useful?
|
|
15
|
-
|
|
16
|
-
**Fanout** means taking an array (of N items) and producing N parallel tasks or subflows. Each parallel execution handles one array element, and when all parallel executions finish, their results merge back into a single array.
|
|
17
|
-
|
|
18
|
-
1. **Massively Parallel Processing**
|
|
19
|
-
- Whenever you have a list of items—say user IDs, URLs to scrape, or documents to process—fanout is an efficient way to parallelize.
|
|
20
|
-
- This pattern is quite common: ingest multiple items, run the same logic on each item, then aggregate the outputs.
|
|
21
|
-
|
|
22
|
-
2. **Order-Sensitive Merging**
|
|
23
|
-
- A typical requirement is that the final aggregated array lines up with the original input order.
|
|
24
|
-
- This is a non-trivial detail: if you run items in parallel, you must ensure results remain aligned with the input indices.
|
|
25
|
-
|
|
26
|
-
3. **Use Cases**
|
|
27
|
-
- Data pipelines (e.g., transform a batch of logs).
|
|
28
|
-
- Multi-tenant or multi-customer flows.
|
|
29
|
-
- Bulk computations (like AI inference or image processing) that must eventually return combined results.
|
|
30
|
-
|
|
31
|
-
4. **Conclusion**
|
|
32
|
-
- Fanout is extremely common in workflow systems, so having built-in support is more than a nicety; it’s borderline essential for serious batch parallelism.
|
|
33
|
-
|
|
34
|
-
**Hence, yes, it’s quite useful and definitely worth adding**—provided the implementation overhead isn’t overwhelming.
|
|
35
|
-
|
|
36
|
-
---
|
|
37
|
-
|
|
38
|
-
## 2. Feasibility of Implementing Fanout in a Flattened Approach
|
|
39
|
-
|
|
40
|
-
The **flattened** approach means your workflow engine eventually sees a single “flat” DAG of steps. If subflows are also flattened, we might imagine a scenario like:
|
|
41
|
-
|
|
42
|
-
- At compile time, the DSL transforms each subflow’s steps into part of the parent flow, prefixing their slugs to avoid collisions.
|
|
43
|
-
- For a fanout, you might need “N copies” of the subflow’s steps, one copy per array element.
|
|
44
|
-
|
|
45
|
-
### 2.1 The Key Complication
|
|
46
|
-
|
|
47
|
-
If the array length is unknown at compile time, flattening is not straightforward:
|
|
48
|
-
|
|
49
|
-
- You cannot create a fixed number of steps at design-time, because you don’t know how many items will appear.
|
|
50
|
-
- You’d have to dynamically insert steps into the DAG at runtime—meaning the “compiled” flow is incomplete until you see the array.
|
|
51
|
-
- This dynamic insertion is not trivial: it could require altering the database schema or adding new step rows “on the fly,” which changes your flow’s shape in ways the engine may not expect.
|
|
52
|
-
|
|
53
|
-
### 2.2 Potential Workarounds
|
|
54
|
-
|
|
55
|
-
1. **Maximum Bound**
|
|
56
|
-
- You could artificially set a maximum array length (say 100 items) and flatten at compile time, ignoring or skipping the extra steps if you have fewer items.
|
|
57
|
-
- This is messy—wasteful for smaller arrays and possibly insufficient for bigger arrays.
|
|
58
|
-
|
|
59
|
-
2. **Dynamic Step Creation**
|
|
60
|
-
- Your engine might “clone” or “spawn” steps at runtime.
|
|
61
|
-
- This means the engine must be built to handle newly inserted steps mid-run. That’s a lot of complexity.
|
|
62
|
-
|
|
63
|
-
3. **Single Step with Internal Logic**
|
|
64
|
-
- Instead of representing each item with a distinct DB record, you might rely on a single “fanout” step that manually processes each item.
|
|
65
|
-
- But then you lose the built-in concurrency or separate step status tracking for each item. You also lose the ability to fail or retry them individually in a clean way.
|
|
66
|
-
|
|
67
|
-
### 2.3 Conclusion on Flattening
|
|
68
|
-
|
|
69
|
-
Flattening a “true fanout subflow” (with unknown N) can be **very challenging**. If your engine is not already designed for dynamic DAG expansion at runtime, the implementation overhead can be huge.
|
|
70
|
-
|
|
71
|
-
---
|
|
72
|
-
|
|
73
|
-
## 3. A “Basic Mechanism” for Both Fanout Tasks and Subflows
|
|
74
|
-
|
|
75
|
-
An ideal solution might unify both:
|
|
76
|
-
|
|
77
|
-
- **Fanout tasks**: e.g., run the same function on each array element.
|
|
78
|
-
- **Fanout subflows**: e.g., run the same subflow on each array element.
|
|
79
|
-
|
|
80
|
-
### 3.1 High-Level Idea
|
|
81
|
-
|
|
82
|
-
**Single “Fanout Step or Subflow” with Pluggable Logic**
|
|
83
|
-
- You define some DSL construct, like `flow.fanout(sourceArraySelector, subflowDefinition)`.
|
|
84
|
-
- At runtime, the engine sees that this step is special: it spawns multiple parallel runs of the “subflowDefinition,” each with one item from the array.
|
|
85
|
-
- The engine merges the results into an array output that is aligned to the original array indices.
|
|
86
|
-
|
|
87
|
-
### 3.2 Implementation Sketch
|
|
88
|
-
|
|
89
|
-
1. **Fanout Step in DB**
|
|
90
|
-
- A step type: `step_type='fanout'`. This step references the subflow or the single-step logic that must be repeated.
|
|
91
|
-
- The engine knows how to “expand” or “execute” that fanout step by launching multiple “child tasks or sub-subflow runs” behind the scenes.
|
|
92
|
-
|
|
93
|
-
2. **Common Core for Both Single-Task and Subflow**
|
|
94
|
-
- The engine’s internal logic for fanout doesn’t care if it’s a one-step subflow or a multi-step subflow. The key is “Take an array, apply something repeatedly, gather results.”
|
|
95
|
-
|
|
96
|
-
3. **Challenges**
|
|
97
|
-
- The engine must track partial completions, partial failures, and how to handle indexing.
|
|
98
|
-
- If you want fully independent subflow concurrency, you still need a robust concurrency approach.
|
|
99
|
-
|
|
100
|
-
With a carefully-designed “fanout step,” both single-step logic and multi-step subflows can be handled uniformly.
|
|
101
|
-
|
|
102
|
-
---
|
|
103
|
-
|
|
104
|
-
## 4. Going with Sub-Runs: Impact and .subflow() Abstraction
|
|
105
|
-
|
|
106
|
-
Because flattening an unknown-size fanout is so complex, many workflow engines prefer **true sub-runs**: each array item spawns a child run that references the same subflow “definition,” just with different input. Then the parent flow:
|
|
107
|
-
|
|
108
|
-
1. Waits until all child runs complete.
|
|
109
|
-
2. Assembles their outputs in order.
|
|
110
|
-
|
|
111
|
-
### 4.1 Pros of Sub-Runs
|
|
112
|
-
|
|
113
|
-
- **No Need for Dynamically Expanding the DAG**
|
|
114
|
-
Each subflow is just a new run record in your DB. The “parent run” references it, but you are not injecting new steps into the parent’s DAG.
|
|
115
|
-
- **Engine Reuse**
|
|
116
|
-
Each sub-run is scheduled and executed exactly like a normal run. No specialized logic to create “extra steps” on the parent side.
|
|
117
|
-
- **Scalability**
|
|
118
|
-
Potentially you can spin up thousands of sub-runs in parallel if your system allows.
|
|
119
|
-
|
|
120
|
-
### 4.2 Impact on Subflow/Branching Abstraction
|
|
121
|
-
|
|
122
|
-
If everything is a separate run:
|
|
123
|
-
|
|
124
|
-
- **.subflow()** Implementation:
|
|
125
|
-
- Instead of flattening subflow definitions right away, calling `.subflow()` at runtime triggers a new run of a subflow ID.
|
|
126
|
-
- That subflow ID is a separate Flow definition in your DB (i.e., a fully versioned flow).
|
|
127
|
-
|
|
128
|
-
- **Passing a `SubflowObject` Instead of a Callback**
|
|
129
|
-
- In a flattened approach, `.subflow((sub) => { ... })` typically merges or “inline compiles” the sub-steps.
|
|
130
|
-
- With sub-runs, you might instead pass or retrieve a `SubflowObject` that points to a stand-alone flow. The DSL might look like `flow.fanout( items, SubflowObject )`, meaning: spawn the subflow described by `SubflowObject` for each item.
|
|
131
|
-
- The parent’s DSL code becomes:
|
|
132
|
-
```ts
|
|
133
|
-
flow.fanout(
|
|
134
|
-
(input) => input.run.items,
|
|
135
|
-
SubflowObject("mySubflow_v2") // or something akin to that
|
|
136
|
-
);
|
|
137
|
-
```
|
|
138
|
-
- You can still preserve a DSL style that calls `.subflow()` and returns that `SubflowObject`, but ultimately it’s referencing a separate flow ID. The difference is it won’t flatten—rather, it spawns sub-runs with that flow ID and merges results when done.
|
|
139
|
-
|
|
140
|
-
### 4.3 Downsides
|
|
141
|
-
|
|
142
|
-
- **Separate Metaruns**
|
|
143
|
-
- You now must query multiple run entries to see the overall state.
|
|
144
|
-
- The UI must reflect that 20 child runs exist for the parent.
|
|
145
|
-
|
|
146
|
-
- **Infrastructure Complexity**
|
|
147
|
-
- If your workflow system was simpler with only single-run DAGs, you now add an entire dimension of parent–child run orchestration.
|
|
148
|
-
|
|
149
|
-
---
|
|
150
|
-
|
|
151
|
-
### Can We Still Use `.subflow()` for Implementation?
|
|
152
|
-
|
|
153
|
-
Yes—**.subflow()** can still exist in the DSL. But the meaning changes from “directly inline these steps into my parent flow” to “store a reference to a distinct flow or subflow object, and at runtime, spawn runs for it.”
|
|
154
|
-
|
|
155
|
-
1. **At Code Definition**
|
|
156
|
-
```ts
|
|
157
|
-
const MySubflow = defineFlow("mySubflow", (sub) => {
|
|
158
|
-
// sub steps
|
|
159
|
-
});
|
|
160
|
-
|
|
161
|
-
const ParentFlow = defineFlow("parentFlow", (flow) => {
|
|
162
|
-
flow.fanout(
|
|
163
|
-
(payload) => payload.items,
|
|
164
|
-
MySubflow
|
|
165
|
-
);
|
|
166
|
-
});
|
|
167
|
-
```
|
|
168
|
-
- The engine sees that `MySubflow` is a separate flow definition.
|
|
169
|
-
- The parent flow references it for a fanout step.
|
|
170
|
-
|
|
171
|
-
2. **At Runtime**
|
|
172
|
-
- The parent run is created.
|
|
173
|
-
- When we get to the fanout step, we read the array length from parent’s data.
|
|
174
|
-
- For each item, we spawn a child run referencing “mySubflow.”
|
|
175
|
-
- We collect outputs once all child runs complete.
|
|
176
|
-
|
|
177
|
-
**That approach elegantly supports unknown array lengths,** because each child's subflow process is just a new run that can be started dynamically.
|
|
178
|
-
|
|
179
|
-
---
|
|
180
|
-
|
|
181
|
-
## Summary of Key Points
|
|
182
|
-
|
|
183
|
-
1. **Yes, Fanout is Often Needed**
|
|
184
|
-
- It’s a common scenario: parallelize processing for an arbitrary-length array.
|
|
185
|
-
|
|
186
|
-
2. **Flattened Approach is Possible… but Hard**
|
|
187
|
-
- If array length is not known until runtime, you’d have to dynamically add steps. Implementing that is not trivial.
|
|
188
|
-
|
|
189
|
-
3. **A Basic “Fanout Step”**
|
|
190
|
-
- One path is to implement a special step type in your engine that orchestrates sub-tasks or subflows in parallel and merges their outputs.
|
|
191
|
-
- This “fanout step” can handle both single-step logic (just a function) or multi-step logic (a subflow).
|
|
192
|
-
|
|
193
|
-
4. **Sub-Runs Are Often Simpler at Scale**
|
|
194
|
-
- Instead of flattening, spawn child runs for each array element.
|
|
195
|
-
- The DSL can still have `.subflow()`, but under the hood, it references a standalone subflow definition, invoked repeatedly.
|
|
196
|
-
|
|
197
|
-
5. **Passing a `SubflowObject` vs. Callback**
|
|
198
|
-
- In a flattening DSL, `.subflow(callback)` merges your sub-steps into the main flow.
|
|
199
|
-
- In a sub-run DSL, `.subflow()` likely returns a “flow reference” or `SubflowObject`. Then fanout or the parent flow just calls the subflow by ID.
|
|
200
|
-
|
|
201
|
-
---
|
|
202
|
-
|
|
203
|
-
## Conclusion
|
|
204
|
-
|
|
205
|
-
Implementing array-based “fanout” with unknown sizing is a powerful feature but can be **extremely difficult** if you rely on a purely flattened DAG. Sub-runs (where each array element gets its own run) can drastically simplify concurrency and dynamic expansion—at the cost of extra run objects in your database.
|
|
206
|
-
|
|
207
|
-
If your prime concern is the complexity of “expanding” the DAG at runtime, **sub-runs** are the more straightforward path. You can still provide a user-friendly DSL like `.fanout([...items], MySubflow)` which behind the scenes launches multiple runs and merges outputs in order. Your `.subflow()` concept can remain intact as a convenient way to define or reference re-usable subflows, but it will revolve around a separate flow entity rather than inlining steps.
|
|
208
|
-
|
|
209
|
-
In short:
|
|
210
|
-
- **Yes, fanout is important.**
|
|
211
|
-
- **Flattening with unknown array sizes can be done but is painful.**
|
|
212
|
-
- **A “unified fanout approach”** is possible if you define a dedicated “fanout step” concept that can handle both single-step tasks and multi-step subflows.
|
|
213
|
-
- **Sub-runs** are often the more **scalable** and **developer-friendly** solution, preserving a neat `.subflow()` abstraction without needing to do dynamic DAG expansions at runtime.
|
|
@@ -1,150 +0,0 @@
|
|
|
1
|
-
# Revisiting the Fanout Challenge: Dynamic Number of Items and Subflows
|
|
2
|
-
|
|
3
|
-
> **Context**
|
|
4
|
-
> In a workflow engine, we often have a _fanout_ scenario: a step receives an array of unknown length at runtime and must process each element in parallel. The engine should keep track of each item’s partial failure/success. How do we represent these **many parallel tasks** in the database while also allowing partial failures, clean skipping logic, etc.?
|
|
5
|
-
|
|
6
|
-
Below we explore:
|
|
7
|
-
|
|
8
|
-
1. **One-Step-With-Multiple-Tasks** using a `task_index` strategy.
|
|
9
|
-
2. Why that’s problematic for partial failure or skipping.
|
|
10
|
-
3. **Best alternative** ideas that balance ease of implementation, schema simplicity, flexibility, and readability.
|
|
11
|
-
|
|
12
|
-
---
|
|
13
|
-
|
|
14
|
-
## 1. One-Step-with-Multiple-Tasks (the `task_index` approach)
|
|
15
|
-
|
|
16
|
-
### 1.1 Rationale
|
|
17
|
-
A direct way to handle fanout is to say:
|
|
18
|
-
- We store a single `step_state` row in `step_states`.
|
|
19
|
-
- We spawn multiple parallel “tasks” for that step, each with a unique `task_index` (0, 1, 2, …).
|
|
20
|
-
- Each `task` processes one item from the input array.
|
|
21
|
-
|
|
22
|
-
This means:
|
|
23
|
-
|
|
24
|
-
- The “fanout” is visible at the **task** level, not the **step** level.
|
|
25
|
-
- All tasks share the same step definition and the same step state row (`step_states`), but we differentiate them with `task_index`.
|
|
26
|
-
|
|
27
|
-
### 1.2 Pros
|
|
28
|
-
|
|
29
|
-
1. **Minimal Schema Changes**
|
|
30
|
-
You don’t need extra columns or special “fanout” logic in `step_states`. The only difference is that you spawn multiple tasks (`fanout_count = array_length`) for that single step.
|
|
31
|
-
2. **Simplicity**
|
|
32
|
-
At first glance, it seems simpler in code: same step, many tasks.
|
|
33
|
-
|
|
34
|
-
### 1.3 Cons & Caveats
|
|
35
|
-
|
|
36
|
-
1. **Partial Failures Are Hard**
|
|
37
|
-
If any single task fails, the entire step is set to `failed`. You can’t have a scenario where _item #2 fails_ but _item #1 and item #3 succeed_ with that step eventually “completing.”
|
|
38
|
-
- The step-level status is a single field: `'created'`, `'started'`, `'completed'`, `'failed'`, or `'skipped'`.
|
|
39
|
-
- If you truly need a partial failure, you can’t represent it with a single step-level status.
|
|
40
|
-
|
|
41
|
-
2. **Skipping Individual Items**
|
|
42
|
-
“Skipping” normally marks the step as `'skipped'`. If you wanted to skip certain items while processing others, the one-step approach doesn’t handle that gracefully. You either skip the entire step or not at all.
|
|
43
|
-
|
|
44
|
-
3. **Inconsistent Aggregation**
|
|
45
|
-
When all tasks finish, you’re forced to unify outputs at the task level. The engine sees only one `step_state.output` for the entire step. Handling an array of partial results or partial failures is clumsy.
|
|
46
|
-
|
|
47
|
-
4. **Limited Future Extensibility**
|
|
48
|
-
If you want to add sub-steps that each item must run, or if item #2 needs a different path, you can’t nest further logic under a single step row. You would be forced to do contrived hacks or spawn new pseudo-steps.
|
|
49
|
-
|
|
50
|
-
---
|
|
51
|
-
|
|
52
|
-
## 2. Why Partial Success or Skipping Matters
|
|
53
|
-
|
|
54
|
-
Real-world scenarios often involve:
|
|
55
|
-
|
|
56
|
-
- **One item is invalid** → skip or fail that item alone, but continue others.
|
|
57
|
-
- **Condition-based skipping** analysis (e.g., `runIf: { isActive: true }`) that might apply differently to each array element.
|
|
58
|
-
- **Chained subflows** for each element (item-based sub-processing).
|
|
59
|
-
|
|
60
|
-
If you rely on a single step row with a single status, you lose the ability to differentiate item-level outcomes. In practice, an entire run might get stuck at “failed” even if only 1% of the items encountered a problem.
|
|
61
|
-
|
|
62
|
-
---
|
|
63
|
-
|
|
64
|
-
## 3. A Better Alternative: “Fanout Subflow” or “Multi-Step Fanout”
|
|
65
|
-
|
|
66
|
-
A more flexible (and only slightly more complex) approach is to create **multiple distinct step-level records** for each item **or** wrap them into subflows dynamically. That is:
|
|
67
|
-
|
|
68
|
-
1. **At runtime**, when the engine sees an array, it spawns a “fanout group” of steps (or subflows).
|
|
69
|
-
2. Each item receives its own “step_state” (or subflow instance).
|
|
70
|
-
3. Each one can independently succeed, fail, or skip.
|
|
71
|
-
4. An optional aggregator step can unify the outputs afterward.
|
|
72
|
-
|
|
73
|
-
### 3.1 Minimal Implementation Sketch
|
|
74
|
-
|
|
75
|
-
1. **Introduce a “fanout_config”** or a “step_type = 'fanout'” column in the `steps` table.
|
|
76
|
-
- This signals that the step can produce multiple child step entries at runtime.
|
|
77
|
-
2. **When the engine sees** that a `fanout` step is ready:
|
|
78
|
-
1. Retrieve the array (e.g. `payload.items`)
|
|
79
|
-
2. Dynamically insert N new `step_states` (or “subflow” entries), one per item.
|
|
80
|
-
3. Mark each child step as `'created'`, then `'started'` if dependencies are satisfied.
|
|
81
|
-
4. For partial failures, only that child’s row is `'failed'`. The rest can complete.
|
|
82
|
-
|
|
83
|
-
3. **Aggregate** the child outputs in a single aggregator step, or store them as an array in the parent. The aggregator is automatically triggered when all children reach a final state.
|
|
84
|
-
|
|
85
|
-
### 3.2 Pros
|
|
86
|
-
1. **Accurate Partial Failure**
|
|
87
|
-
Each item can fail, skip, or succeed. These statuses are reflected individually in `step_states`.
|
|
88
|
-
2. **Clean Skipping**
|
|
89
|
-
If some items are filtered out, they can be set to `'skipped'`. Others proceed normally.
|
|
90
|
-
3. **Extensible to Nested Logic**
|
|
91
|
-
Each “item step” can run further sub-steps or even be a subflow if needed, which is impossible with a single step row.
|
|
92
|
-
4. **Future Condition Support**
|
|
93
|
-
You can combine fanout with `runIf` or `runUnless` at the item level if desired (e.g., skip certain items automatically).
|
|
94
|
-
|
|
95
|
-
### 3.3 Cons
|
|
96
|
-
1. **Slightly More Schema or Engine Logic**
|
|
97
|
-
You must store “fanout” info in the `steps` table or store the child steps as a new row pointing to the parent.
|
|
98
|
-
2. **Potential Table Growth**
|
|
99
|
-
For large arrays, you get many new step-level rows. But that’s precisely what you need if you want item-level tracking.
|
|
100
|
-
|
|
101
|
-
---
|
|
102
|
-
|
|
103
|
-
## 4. The “Subflow for Each Item” Variation
|
|
104
|
-
|
|
105
|
-
An even more powerful approach is to treat each item as its own subflow instance:
|
|
106
|
-
|
|
107
|
-
1. **Subflow Root Step**: Mark the parent step as `step_type = 'subflow'` **plus** `fanout = true`.
|
|
108
|
-
2. **At runtime**, if the array has length `m`, spawn `m` subflows. Each subflow is flattened into your existing `steps` and `step_states` with an index or a new slug prefix.
|
|
109
|
-
3. Each subflow can contain multiple steps, can fail or skip individually.
|
|
110
|
-
4. A final aggregator collects all subflow outputs into an array or a structured object.
|
|
111
|
-
|
|
112
|
-
**Pro**: Perfect if each item requires multiple steps of its own processing.
|
|
113
|
-
**Con**: More table rows, more overhead. But you gain full power with minimal conceptual duplication.
|
|
114
|
-
|
|
115
|
-
---
|
|
116
|
-
|
|
117
|
-
## 5. Conclusion: Recommended Path for Balanced Implementation
|
|
118
|
-
|
|
119
|
-
Below is a proposed approach that balances:
|
|
120
|
-
|
|
121
|
-
- **Ease of Implementation**
|
|
122
|
-
- **Future Flexibility**
|
|
123
|
-
- **Minimal Schema Changes**
|
|
124
|
-
- **Clear Partial Failure Model**
|
|
125
|
-
|
|
126
|
-
1. **Introduce a “fanout” concept** (could be a `boolean` or an object, e.g. `{ path: 'someArrayPath' }`) in your step definition.
|
|
127
|
-
2. **Extend the database** to handle multiple “child step_states” or subflows for each item. Minimal changes might be:
|
|
128
|
-
- A `parent_step_id` field in `step_states` or a small “fanout_metadata” structure.
|
|
129
|
-
- Or store subflows in the same `steps` table with an additional `index` if needed.
|
|
130
|
-
3. **Each child** has its own row in `step_states`, allowing **independent** `'failed'`, `'skipped'`, `'completed'`.
|
|
131
|
-
4. **An aggregator** or the parent step can unify the final outcome (collate arrays, detect partial fails, etc.).
|
|
132
|
-
|
|
133
|
-
**Why this is best**:
|
|
134
|
-
- **Flexibility**: You can add more steps for each item (like sub-steps) later without forced kludges.
|
|
135
|
-
- **Readability**: The DB statuses match intuitive runtime reality (each item is its own “mini-run” or “mini-step”).
|
|
136
|
-
- **Simplicity of Conditions**: If you want to skip certain items entirely, you can do so on each item’s row. No complex shared step state needed.
|
|
137
|
-
- **Minimal Overhead**: You only add extra rows (or subflow expansions) when the array is discovered, which is exactly when you need them.
|
|
138
|
-
|
|
139
|
-
---
|
|
140
|
-
|
|
141
|
-
### Final Takeaway
|
|
142
|
-
|
|
143
|
-
Although using a single step with multiple tasks (identified by `task_index`) seems tempting at first, it quickly becomes unwieldy for partial failures and item-level skipping. A more robust solution is to model each item’s work as its own step or subflow record. This approach:
|
|
144
|
-
|
|
145
|
-
- Preserves partial success/failure.
|
|
146
|
-
- Integrates cleanly with skipping logic.
|
|
147
|
-
- Ensures future fanout + multi-step item processing is possible.
|
|
148
|
-
- Remains relatively simple in terms of schema changes (one or two extra columns plus some runtime logic to spawn item-level states).
|
|
149
|
-
|
|
150
|
-
This is the path that will keep a growing fanout and subflow system both **powerful** and **maintainable** as your workflow engine evolves.
|
|
@@ -1,239 +0,0 @@
|
|
|
1
|
-
# Analyzing the Impact of `runIf` / `runElse` and Subflows with Potential Fanout
|
|
2
|
-
|
|
3
|
-
This document explores how the upcoming _conditional logic_ (`runIf` / `runUnless`) and _subflows/branching_—specifically with flattening but keeping a `parent_subflow_slug` column and `step_type = subflow`—can set the stage for a **fanout** mechanism. “Fanout” in this context means that one step or subflow would produce an array of items, and each item would spawn its own parallel step or subflow execution.
|
|
4
|
-
|
|
5
|
-
We will revisit the previously discussed abstractions (flattening subflows vs. separate runs vs. a “middle ground” approach) and analyze how adding a fanout concept interacts with conditions (`runIf` / `runUnless`) in each case. The goal is to strike a balance between **MVP simplicity** and future extensibility.
|
|
6
|
-
|
|
7
|
-
---
|
|
8
|
-
|
|
9
|
-
## 1. Introduction and Background
|
|
10
|
-
|
|
11
|
-
### 1.1 Conditional Execution with `runIf` & `runUnless`
|
|
12
|
-
In the near-future implementation, each step can optionally be guarded by conditions:
|
|
13
|
-
|
|
14
|
-
- **`runIf`**: The step runs if this condition is **true** given the step’s input.
|
|
15
|
-
- **`runUnless`**: The step runs if this condition is **false** given the step’s input.
|
|
16
|
-
|
|
17
|
-
Any step for which these conditions fail is marked **`skipped`**. Transitive dependencies down the line also get skipped.
|
|
18
|
-
|
|
19
|
-
### 1.2 Subflows / Branching with “Flattened Steps” & `parent_subflow_slug`
|
|
20
|
-
We plan to store subflows in the same `steps` table as a “flattened” structure but with:
|
|
21
|
-
|
|
22
|
-
- A `step_type` column (e.g. `subflow` vs. `normal`).
|
|
23
|
-
- A `parent_subflow_slug` so we can group subflow steps.
|
|
24
|
-
- The possibility of referencing subflow-wide configuration in a “subflow step.”
|
|
25
|
-
|
|
26
|
-
This approach acts as a middle ground between:
|
|
27
|
-
|
|
28
|
-
1. *Fully flattening* subflow steps (just name them differently).
|
|
29
|
-
2. *Fully separate runs* for each subflow (with separate run IDs).
|
|
30
|
-
|
|
31
|
-
With the “flattened-with-parent” approach, everything remains within the same run ID, but we can identify subflows as distinct groups.
|
|
32
|
-
|
|
33
|
-
### 1.3 The New Fanout Concept
|
|
34
|
-
A **fanout** instructs the engine to spawn multiple tasks or sub-subflows in parallel when the input is an array (or some multi-value structure). For a normal step, setting a flag like `fanout: true` (or a more structured property like `fanout: { path: "someArrayField" }`) could cause creation of N parallel tasks, one per array element. For a subflow, this approach would run the entire subflow once per array item, also in parallel. Either way, we want the step DSL to handle:
|
|
35
|
-
|
|
36
|
-
- `runIf` / `runUnless` as usual.
|
|
37
|
-
- `fanout` as an **additional** or **alternative** property.
|
|
38
|
-
|
|
39
|
-
The idea is to keep the design MVP-friendly: minimal friction, consistent with how we already define conditions or branching.
|
|
40
|
-
|
|
41
|
-
---
|
|
42
|
-
|
|
43
|
-
## 2. Revisiting Flattening vs. Keeping a `parent_subflow_slug`
|
|
44
|
-
### 2.1 Flatten-Only Approach
|
|
45
|
-
If we were to flatten subflows entirely without references to a parent subflow entity, implementing fanout might require:
|
|
46
|
-
|
|
47
|
-
- Marking each expanded step with a unique slug suffix per array item.
|
|
48
|
-
- Dealing with a large blow-up in the steps table for big arrays.
|
|
49
|
-
|
|
50
|
-
**Pros:**
|
|
51
|
-
- Very direct: one steps table, everything is expanded in a single dimension.
|
|
52
|
-
- Minimal changes to dependency resolution.
|
|
53
|
-
|
|
54
|
-
**Cons:**
|
|
55
|
-
- Harder to see subflow groupings in the DB or UI.
|
|
56
|
-
- Potentially large expansions for big fanouts.
|
|
57
|
-
|
|
58
|
-
### 2.2 Using `parent_subflow_slug` (and `step_type = subflow`)
|
|
59
|
-
If each subflow is recorded as a special step row (e.g. `step_type = subflow`), then child steps are associated with the subflow’s slug. This approach is especially helpful for:
|
|
60
|
-
|
|
61
|
-
- **Visibility**: We can quickly see which steps belong to subflow “X” or “Y.”
|
|
62
|
-
- **Configuration**: Subflow-wide overrides (like `maxAttempts`).
|
|
63
|
-
- **Dependency Handling**: The parent flow can treat the subflow as a single “virtual step,” while the engine internally expands it.
|
|
64
|
-
|
|
65
|
-
**Where Fanout Fits**
|
|
66
|
-
With subflows, if we set `fanout: true` on that subflow root, the engine would spawn one parallel subflow instance per array element. Each instance has the same internal steps. This remains a “flattened” approach under the hood, but the system can group them by subflow ID or `parent_subflow_slug` plus an index for each array item.
|
|
67
|
-
|
|
68
|
-
---
|
|
69
|
-
|
|
70
|
-
## 3. Potential Implementation of Fanout + Conditions
|
|
71
|
-
|
|
72
|
-
### 3.1 Adding a `fanout` Property in Parallel to `runIf` / `runUnless`
|
|
73
|
-
We could treat **fanout** as a top-level property on the step definition, e.g.:
|
|
74
|
-
|
|
75
|
-
```ts
|
|
76
|
-
flow.step(
|
|
77
|
-
{
|
|
78
|
-
slug: "ProcessItems",
|
|
79
|
-
dependsOn: ["FetchArray"],
|
|
80
|
-
runIf: {...},
|
|
81
|
-
runUnless: {...},
|
|
82
|
-
fanout: {
|
|
83
|
-
path: "items", // or a boolean if there's only one array
|
|
84
|
-
concurrency?: 5 // optional concurrency limit
|
|
85
|
-
},
|
|
86
|
-
},
|
|
87
|
-
handlerFn
|
|
88
|
-
);
|
|
89
|
-
```
|
|
90
|
-
|
|
91
|
-
**Behavior**:
|
|
92
|
-
1. **Condition Check** (`runIf`, `runUnless`) – If the conditions fail, the entire step is `skipped`, no tasks are spawned.
|
|
93
|
-
2. If not skipped and `fanout` is set – The engine looks up the array at `payload.items` and spawns one “task” (or subflow instance) per element.
|
|
94
|
-
3. Each parallel instance has the item data embedded in its payload.
|
|
95
|
-
|
|
96
|
-
### 3.2 Subflows with Fanout
|
|
97
|
-
When “fanout: true” is specified on a subflow root step:
|
|
98
|
-
|
|
99
|
-
1. The subflow root step is repeated for each array element.
|
|
100
|
-
2. All child steps of that subflow are repeated accordingly.
|
|
101
|
-
3. The final aggregated output is typically an array of subflow outputs.
|
|
102
|
-
|
|
103
|
-
**Challenges**:
|
|
104
|
-
- **Skipping**: If `runIf` fails on the subflow root, the entire subflow is skipped.
|
|
105
|
-
- **Mixed Condition**: Potentially each item in the array could have a different condition outcome. If the condition depends on that item, you might skip some items but not others.
|
|
106
|
-
- **Aggregating Results**: The engine needs a consistent way to merge all partial or skipped results back.
|
|
107
|
-
|
|
108
|
-
---
|
|
109
|
-
|
|
110
|
-
## 4. Challenges Combining Conditions + Fanout
|
|
111
|
-
|
|
112
|
-
1. **Per-Item Condition Checking**
|
|
113
|
-
For a fanout step, we might want to do `runIf` at the item level. For example, only spawn tasks for items that meet a certain criterion. This might require a “filter” concept or skipping logic per item.
|
|
114
|
-
|
|
115
|
-
2. **Complexity in UI**
|
|
116
|
-
If a subflow fans out to 1,000 items, we don’t want to see 1,000 sub-step expansions in a naive interface. We may need summarizations or chunking.
|
|
117
|
-
|
|
118
|
-
3. **Transitive Dependencies**
|
|
119
|
-
If a subflow or step fans out, its downstream steps must wait for all parallel tasks or subflows to complete (unless we have more advanced partial-latching conditions). A “runIf” might skip the entire set or skip partial subsets.
|
|
120
|
-
|
|
121
|
-
4. **Performance**
|
|
122
|
-
Large fanouts can generate many tasks. We need to ensure our scheduling and skipping logic in the database is efficient.
|
|
123
|
-
|
|
124
|
-
Despite these challenges, the core conceptual structure stays the same: we store subflows with a `parent_subflow_slug` or `step_type`, use conditions to skip steps, and add a new concept for spawning parallel tasks when the input is an array.
|
|
125
|
-
|
|
126
|
-
---
|
|
127
|
-
|
|
128
|
-
## 5. Emphasizing MVP Readiness Over Complexity
|
|
129
|
-
|
|
130
|
-
A recommended MVP approach to keep things simple:
|
|
131
|
-
|
|
132
|
-
1. **Allow a Boolean `fanout` or a basic `fanout: { path: string }`.**
|
|
133
|
-
- If `fanout: true`, we assume the entire input to this step is already an array.
|
|
134
|
-
- If `fanout: { path }`, we look up `payload[path]` at runtime.
|
|
135
|
-
|
|
136
|
-
2. **Apply `runIf` / `runUnless` at the step level**
|
|
137
|
-
- If conditions fail, skip the entire fanout step.
|
|
138
|
-
- In the first iteration, do not provide a built-in mechanism for skipping _some_ items but running others. That can be a future enhancement.
|
|
139
|
-
|
|
140
|
-
3. **In subflows**, if `fanout: true` is on the subflow’s root, fan out the entire subflow.
|
|
141
|
-
- One subflow instance per array element.
|
|
142
|
-
- All sub-steps reference the per-item context.
|
|
143
|
-
- This keeps each item neatly grouped in subflow “batches,” and the parent sees it as a single entity (the root step or subflow aggregator).
|
|
144
|
-
|
|
145
|
-
4. **Keep the schema additions minimal.**
|
|
146
|
-
- Possibly store a `fanout_config jsonb` next to `run_if_condition` / `run_unless_condition`.
|
|
147
|
-
- Use existing “skipped” statuses for steps or tasks.
|
|
148
|
-
|
|
149
|
-
This incremental approach yields some immediate wins:
|
|
150
|
-
|
|
151
|
-
- Parallel item processing out of the box.
|
|
152
|
-
- Condition-based skipping.
|
|
153
|
-
- Subflows remain a single “block,” which can also be fanned out if needed.
|
|
154
|
-
|
|
155
|
-
---
|
|
156
|
-
|
|
157
|
-
## 6. Illustrating Fanout + Conditions in Each Subflow Abstraction
|
|
158
|
-
|
|
159
|
-
### 6.1 Flattened Only
|
|
160
|
-
- Steps for each item plus `_index` appended to the step slug.
|
|
161
|
-
- `runIf` / `runUnless` applies to the entire set; either everything gets spawned or everything is skipped.
|
|
162
|
-
- Some difficulty grouping & referencing subflow blocks if the array is large.
|
|
163
|
-
|
|
164
|
-
### 6.2 Flattened With `parent_subflow_slug`
|
|
165
|
-
- Mark the root subflow step with `step_type = subflow`.
|
|
166
|
-
- If `fanout = true` on that subflow step, it spawns N subflow “instances.”
|
|
167
|
-
- For each instance (i.e., each array item), the steps all share a consistent subflow group ID plus an index.
|
|
168
|
-
- The engine aggregates them at the subflow root or final subflow aggregator step.
|
|
169
|
-
|
|
170
|
-
### 6.3 Full Separate Runs for Each Item
|
|
171
|
-
- You’d create a new run for each array item.
|
|
172
|
-
- Overkill for an MVP. This approach quickly becomes complex, though it’s highly decoupled.
|
|
173
|
-
|
|
174
|
-
---
|
|
175
|
-
|
|
176
|
-
## 7. Creative Ideas for Simplicity
|
|
177
|
-
|
|
178
|
-
1. **Single “Fanout Step” Type**
|
|
179
|
-
Instead of layering `fanout: true`, define `step_type: 'fanout'`. This might reduce confusion by clarifying that the step always has multiple tasks.
|
|
180
|
-
|
|
181
|
-
2. **`runIf` as an Inline Filter**
|
|
182
|
-
Instead of skipping the entire step, allow a filter expression to remove some items from the array automatically. (“If item.status != 'active', skip that item.”)
|
|
183
|
-
|
|
184
|
-
3. **Hybrid Condition + Fanout Logging**
|
|
185
|
-
For debugging, store “item-based skip logs” to show which items were processed or skipped.
|
|
186
|
-
|
|
187
|
-
4. **Parallelism Limits**
|
|
188
|
-
A simple concurrency or “max parallel” property could avoid large floods of tasks in enormous fanouts.
|
|
189
|
-
|
|
190
|
-
5. **Subflow Post-Process**
|
|
191
|
-
Let the subflow aggregator define a single “merge” function that processes all item outputs, especially for conditions that skip a subset of items.
|
|
192
|
-
|
|
193
|
-
---
|
|
194
|
-
|
|
195
|
-
## 8. **Unexpected Alternative Solutions**
|
|
196
|
-
Below are five novel ideas that combine conditions, subflows, and fanouts in ways not previously explored in depth:
|
|
197
|
-
|
|
198
|
-
1. **Conditional Fanout at the Item Level**
|
|
199
|
-
Instead of skipping the entire step if `runIf` is false, do a partial item-level condition. For example, feed the array into a small function that returns only the items that pass `runIf`. Then only those items spawn tasks. This is a “built-in filter step.”
|
|
200
|
-
|
|
201
|
-
2. **Multi-layered Subflows with Dynamic Fanout**
|
|
202
|
-
A subflow might itself produce an array for another deeper subflow fanout. This vertical layering allows a “fan-out, gather, fan-out again” approach. At each subflow boundary, you can apply `runIf` or `runUnless` to skip entire sub-layers.
|
|
203
|
-
|
|
204
|
-
3. **Fanout as a Separate “Split” Step**
|
|
205
|
-
Instead of toggling a property on a normal step, have a dedicated “split” step that transforms a single object into multiple “emit” events. Each “emit” becomes a parallel path. Then, an optional “join” step merges them back. This more explicitly models fanout/fanin concepts.
|
|
206
|
-
|
|
207
|
-
4. **Conditional Subflow Branch**
|
|
208
|
-
Combine “branching logic” with fanout by letting a subflow pick which internal steps to spawn based on item-level conditions. The subflow might skip half its steps or sub-branches per item. This is more of a “decision tree” built within the subflow.
|
|
209
|
-
|
|
210
|
-
5. **DSL-Level Macros**
|
|
211
|
-
Introduce a DSL macro that auto-generates subflows for each item, each with its own runIf/unless. The user writes a single step, and the macro expands it behind the scenes. In effect, you write a simple block, and it compiles down to a set of parallel runIf-laden steps or subflows in concurrency form.
|
|
212
|
-
|
|
213
|
-
---
|
|
214
|
-
|
|
215
|
-
## 9. Conclusion and Final Recommendation
|
|
216
|
-
|
|
217
|
-
Summarizing the key points:
|
|
218
|
-
|
|
219
|
-
1. **Conditions + Fanout Fit Well Together**
|
|
220
|
-
- Mark a step or subflow with `runIf` / `runUnless` to decide if it should execute at all.
|
|
221
|
-
- If not skipped, a `fanout` property spawns parallel executions for each array item.
|
|
222
|
-
|
|
223
|
-
2. **Flattened Approach with a “Subflow Step”**
|
|
224
|
-
- Recommend continuing with the plan to store subflows in the same run, but label them with `step_type = subflow` and `parent_subflow_slug`.
|
|
225
|
-
- This approach maintains a single run while still letting the DB and UI group subflow steps.
|
|
226
|
-
|
|
227
|
-
3. **MVP-Ready, Minimal Implementation**
|
|
228
|
-
- Keep “fanout” either a simple boolean or a small config like `fanout: { path: "myArray" }`.
|
|
229
|
-
- Handle skipping at the top-level step first (all-or-nothing). Future expansions can filter per item.
|
|
230
|
-
|
|
231
|
-
4. **Adopt Simple Aggregation**
|
|
232
|
-
- When a fanout step completes, produce an array of outputs. For subflows, gather final step outputs from each parallel instance.
|
|
233
|
-
- The aggregator can be a single “virtual step” or the subflow root that marks when all parallel tasks/subflows finish.
|
|
234
|
-
|
|
235
|
-
5. **Balance Complexity Against Clarity**
|
|
236
|
-
- Provide enough detail to handle real-world array processing in parallel.
|
|
237
|
-
- Avoid partial-skip complexities in an initial MVP. That can be introduced later as a “filtered fanout” or “conditional item-level skip.”
|
|
238
|
-
|
|
239
|
-
By following this path, you keep the schema changes small, the runtime logic consistent with existing `skipped` status, and lay the foundation for robust parallelization scenarios. The net result is a flexible, developer-friendly approach to conditions, subflows, and fanout—without incurring unnecessary complexity upfront.
|
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
new Flow<string>({ slug: 'analyzeWebsite' })
|
|
2
|
-
.step({ slug: 'website' }, async ({ run }) => await fetchData(run.url))
|
|
3
|
-
.branch(
|
|
4
|
-
{
|
|
5
|
-
slug: 'ifSuccess',
|
|
6
|
-
dependsOn: ['fetchData'],
|
|
7
|
-
runIf: { website: { status: 200 } },
|
|
8
|
-
},
|
|
9
|
-
(flow) =>
|
|
10
|
-
flow
|
|
11
|
-
.step({ slug: 'sentiment' }, async ({ run, fetchData }) =>
|
|
12
|
-
saveData(run.url, fetchData.body)
|
|
13
|
-
)
|
|
14
|
-
.step({ slug: 'summary' }, async ({ run, fetchData }) =>
|
|
15
|
-
saveData(run.url, fetchData.body)
|
|
16
|
-
)
|
|
17
|
-
.step(
|
|
18
|
-
{ slug: 'saveToDb', dependsOn: ['summary', 'sentiment'] },
|
|
19
|
-
async (payload) =>
|
|
20
|
-
await slackHandler({
|
|
21
|
-
url: payload.run.url,
|
|
22
|
-
summary: payload.summary,
|
|
23
|
-
sentiment: payload.sentiment,
|
|
24
|
-
})
|
|
25
|
-
)
|
|
26
|
-
.step(
|
|
27
|
-
{ slug: 'sendSlackMessage', dependsOn: ['summary'] },
|
|
28
|
-
async ({ saveToDb }) => await slackHandler({ message: '' })
|
|
29
|
-
)
|
|
30
|
-
)
|
|
31
|
-
.branch(
|
|
32
|
-
{
|
|
33
|
-
slug: 'ifFailure',
|
|
34
|
-
dependsOn: ['fetch'],
|
|
35
|
-
runUnless: { website: { status: 200 } },
|
|
36
|
-
},
|
|
37
|
-
(flow) => flow.step({ slug: 'notifySentry' })
|
|
38
|
-
);
|