@gravito/flux 3.0.0 → 3.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +298 -0
- package/bin/flux.js +25 -1
- package/dev/viewer/app.js +4 -4
- package/dist/bun.cjs +2 -2
- package/dist/bun.cjs.map +1 -1
- package/dist/bun.d.cts +65 -26
- package/dist/bun.d.ts +65 -26
- package/dist/bun.js +1 -1
- package/dist/chunk-4DXCQ6CL.js +3486 -0
- package/dist/chunk-4DXCQ6CL.js.map +1 -0
- package/dist/chunk-6AZNHVEO.cjs +316 -0
- package/dist/chunk-6AZNHVEO.cjs.map +1 -0
- package/dist/{chunk-ZAMVC732.js → chunk-NAIVO7RR.js} +64 -15
- package/dist/chunk-NAIVO7RR.js.map +1 -0
- package/dist/chunk-WAPZDXSX.cjs +3486 -0
- package/dist/chunk-WAPZDXSX.cjs.map +1 -0
- package/dist/chunk-WGDTB6OC.js +316 -0
- package/dist/chunk-WGDTB6OC.js.map +1 -0
- package/dist/{chunk-SJSPR4ZU.cjs → chunk-YXBEYVGY.cjs} +66 -17
- package/dist/chunk-YXBEYVGY.cjs.map +1 -0
- package/dist/cli/flux-visualize.cjs +108 -0
- package/dist/cli/flux-visualize.cjs.map +1 -0
- package/dist/cli/flux-visualize.d.cts +1 -0
- package/dist/cli/flux-visualize.d.ts +1 -0
- package/dist/cli/flux-visualize.js +108 -0
- package/dist/cli/flux-visualize.js.map +1 -0
- package/dist/index.cjs +100 -12
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +402 -12
- package/dist/index.d.ts +402 -12
- package/dist/index.js +98 -10
- package/dist/index.js.map +1 -1
- package/dist/index.node.cjs +11 -3
- package/dist/index.node.cjs.map +1 -1
- package/dist/index.node.d.cts +1114 -258
- package/dist/index.node.d.ts +1114 -258
- package/dist/index.node.js +10 -2
- package/dist/types-CRz5XdLd.d.cts +433 -0
- package/dist/types-CRz5XdLd.d.ts +433 -0
- package/package.json +17 -6
- package/dist/chunk-LULCFPIK.js +0 -1004
- package/dist/chunk-LULCFPIK.js.map +0 -1
- package/dist/chunk-SJSPR4ZU.cjs.map +0 -1
- package/dist/chunk-X3NC7HS4.cjs +0 -1004
- package/dist/chunk-X3NC7HS4.cjs.map +0 -1
- package/dist/chunk-ZAMVC732.js.map +0 -1
- package/dist/types-cnIU1O3n.d.cts +0 -250
- package/dist/types-cnIU1O3n.d.ts +0 -250
package/dist/index.node.d.ts
CHANGED
|
@@ -1,288 +1,399 @@
|
|
|
1
|
-
import {
|
|
2
|
-
export {
|
|
1
|
+
import { l as WorkflowContext, q as StepHandlerResult, W as WorkflowDefinition, m as WorkflowDescriptor, a as WorkflowState, o as WorkflowStatus, h as LockProvider, L as Lock, S as StepDefinition, j as StepExecution, k as StepResult, c as FluxLogger, p as WorkflowStorage, n as WorkflowFilter, g as FluxTraceSink, e as FluxTraceEvent, b as FluxConfig, d as FluxResult, C as CronScheduleOptions } from './types-CRz5XdLd.js';
|
|
2
|
+
export { f as FluxTraceEventType, M as MemoryLockProvider, i as StepDescriptor } from './types-CRz5XdLd.js';
|
|
3
|
+
import { GravitoOrbit, PlanetCore } from '@gravito/core';
|
|
3
4
|
|
|
4
5
|
/**
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
* Type-safe, chainable workflow definition.
|
|
8
|
-
*
|
|
9
|
-
* @module @gravito/flux/builder
|
|
10
|
-
*/
|
|
11
|
-
|
|
12
|
-
/**
|
|
13
|
-
* Step options
|
|
6
|
+
* Configuration options for a workflow step.
|
|
7
|
+
* Allows fine-tuning of execution behavior such as retries, timeouts, and conditional logic.
|
|
14
8
|
*/
|
|
15
|
-
interface StepOptions<TInput =
|
|
9
|
+
interface StepOptions<TInput = unknown, TData = Record<string, any>> {
|
|
10
|
+
/** Maximum number of retry attempts on failure. */
|
|
16
11
|
retries?: number;
|
|
12
|
+
/** Execution time limit in milliseconds. */
|
|
17
13
|
timeout?: number;
|
|
14
|
+
/** Predicate to determine if the step should execute based on current context. */
|
|
18
15
|
when?: (ctx: WorkflowContext<TInput, TData>) => boolean;
|
|
16
|
+
/** Logic to execute for rolling back changes if a subsequent step fails. */
|
|
19
17
|
compensate?: (ctx: WorkflowContext<TInput, TData>) => Promise<void> | void;
|
|
20
18
|
}
|
|
19
|
+
interface ParallelStepConfig<TInput = unknown, TData = Record<string, any>> {
|
|
20
|
+
name: string;
|
|
21
|
+
handler: (ctx: WorkflowContext<TInput, TData>) => StepHandlerResult;
|
|
22
|
+
options?: StepOptions<TInput, TData>;
|
|
23
|
+
/** Shorthand for options.compensate - rollback logic if subsequent steps fail */
|
|
24
|
+
compensate?: (ctx: WorkflowContext<TInput, TData>) => Promise<void> | void;
|
|
25
|
+
/** Shorthand for options.retries */
|
|
26
|
+
retries?: number;
|
|
27
|
+
/** Shorthand for options.timeout */
|
|
28
|
+
timeout?: number;
|
|
29
|
+
/** Shorthand for options.when */
|
|
30
|
+
when?: (ctx: WorkflowContext<TInput, TData>) => boolean;
|
|
31
|
+
}
|
|
21
32
|
/**
|
|
22
|
-
*
|
|
23
|
-
*
|
|
24
|
-
*
|
|
33
|
+
* A fluent API for defining workflows in a type-safe manner.
|
|
34
|
+
* The builder pattern ensures that workflows are constructed with all necessary components
|
|
35
|
+
* before being passed to the execution engine.
|
|
25
36
|
*
|
|
26
37
|
* @example
|
|
27
38
|
* ```typescript
|
|
28
|
-
* const
|
|
29
|
-
* .input<{
|
|
30
|
-
* .step('validate',
|
|
31
|
-
*
|
|
32
|
-
* })
|
|
33
|
-
* .step('process', async (ctx) => {
|
|
34
|
-
* await processOrder(ctx.data.order)
|
|
35
|
-
* }, {
|
|
36
|
-
* compensate: async (ctx) => {
|
|
37
|
-
* await cancelOrder(ctx.data.order.id)
|
|
38
|
-
* }
|
|
39
|
-
* })
|
|
40
|
-
* .commit('notify', async (ctx) => {
|
|
41
|
-
* await sendEmail(ctx.data.order.email)
|
|
42
|
-
* })
|
|
39
|
+
* const flow = new WorkflowBuilder('order-process')
|
|
40
|
+
* .input<{ id: string }>()
|
|
41
|
+
* .step('validate', (ctx) => { ... })
|
|
42
|
+
* .build();
|
|
43
43
|
* ```
|
|
44
44
|
*/
|
|
45
|
-
declare class WorkflowBuilder<TInput = unknown, TData = Record<string,
|
|
45
|
+
declare class WorkflowBuilder<TInput = unknown, TData = Record<string, any>> {
|
|
46
46
|
private _name;
|
|
47
|
+
private _version?;
|
|
47
48
|
private _steps;
|
|
48
49
|
private _validateInput?;
|
|
50
|
+
private _parallelGroupCounter;
|
|
51
|
+
/**
|
|
52
|
+
* Initializes a new workflow builder with a unique name.
|
|
53
|
+
* @param name - The identifier for this workflow definition.
|
|
54
|
+
*/
|
|
49
55
|
constructor(name: string);
|
|
50
56
|
/**
|
|
51
|
-
*
|
|
52
|
-
*
|
|
53
|
-
*
|
|
57
|
+
* Defines the expected input type for the workflow.
|
|
58
|
+
* This is a type-only operation that enables compile-time safety for subsequent steps.
|
|
59
|
+
* @returns A builder instance with the specified input type.
|
|
54
60
|
*/
|
|
55
61
|
input<T>(): WorkflowBuilder<T, TData>;
|
|
56
62
|
/**
|
|
57
|
-
*
|
|
58
|
-
*
|
|
59
|
-
* This method is used for TypeScript type inference.
|
|
63
|
+
* Defines the structure of the shared data object used across steps.
|
|
64
|
+
* @returns A builder instance with the specified data type.
|
|
60
65
|
*/
|
|
61
|
-
data<T
|
|
66
|
+
data<T extends Record<string, any>>(): WorkflowBuilder<TInput, T>;
|
|
62
67
|
/**
|
|
63
|
-
*
|
|
68
|
+
* Sets the semantic version of this workflow definition.
|
|
69
|
+
* @param v - A semantic version string (e.g., "1.0.0", "2.1.0").
|
|
70
|
+
* @returns The builder instance for chaining.
|
|
71
|
+
*/
|
|
72
|
+
version(v: string): this;
|
|
73
|
+
/**
|
|
74
|
+
* Attaches a runtime validator for the workflow input.
|
|
75
|
+
* @param validator - A type guard function to verify input integrity.
|
|
76
|
+
* @returns The builder instance for chaining.
|
|
64
77
|
*/
|
|
65
78
|
validate(validator: (input: unknown) => input is TInput): this;
|
|
66
79
|
/**
|
|
67
|
-
*
|
|
80
|
+
* Adds a standard processing step to the workflow.
|
|
81
|
+
* Standard steps are subject to compensation if the workflow fails later.
|
|
82
|
+
*
|
|
83
|
+
* @param name - Unique name for the step.
|
|
84
|
+
* @param handler - The business logic to execute.
|
|
85
|
+
* @param options - Optional execution configuration.
|
|
86
|
+
* @returns The builder instance for chaining.
|
|
87
|
+
*/
|
|
88
|
+
step(name: string, handler: (ctx: WorkflowContext<TInput, TData>) => StepHandlerResult, options?: StepOptions<TInput, TData>): this;
|
|
89
|
+
/**
|
|
90
|
+
* Adds multiple steps that execute in parallel.
|
|
91
|
+
* All steps in a parallel group will run concurrently and must all succeed before proceeding.
|
|
92
|
+
*
|
|
93
|
+
* @param steps - Array of step configurations to execute in parallel.
|
|
94
|
+
* @returns The builder instance for chaining.
|
|
95
|
+
*
|
|
96
|
+
* @example
|
|
97
|
+
* ```typescript
|
|
98
|
+
* workflow.stepParallel([
|
|
99
|
+
* { name: 'fetch-user', handler: async (ctx) => { ctx.data.user = await getUser() } },
|
|
100
|
+
* { name: 'fetch-orders', handler: async (ctx) => { ctx.data.orders = await getOrders() } },
|
|
101
|
+
* { name: 'fetch-profile', handler: async (ctx) => { ctx.data.profile = await getProfile() } }
|
|
102
|
+
* ])
|
|
103
|
+
* ```
|
|
68
104
|
*/
|
|
69
|
-
|
|
105
|
+
stepParallel(steps: ParallelStepConfig<TInput, TData>[]): this;
|
|
70
106
|
/**
|
|
71
|
-
*
|
|
107
|
+
* Adds a "commit" step that represents a permanent side-effect.
|
|
108
|
+
* Commit steps are intended for operations that should not be rolled back
|
|
109
|
+
* or re-executed during certain replay scenarios.
|
|
72
110
|
*
|
|
73
|
-
*
|
|
74
|
-
*
|
|
111
|
+
* @param name - Unique name for the step.
|
|
112
|
+
* @param handler - The side-effect logic to execute.
|
|
113
|
+
* @param options - Optional execution configuration (compensation is not allowed).
|
|
114
|
+
* @returns The builder instance for chaining.
|
|
75
115
|
*/
|
|
76
116
|
commit(name: string, handler: (ctx: WorkflowContext<TInput, TData>) => Promise<void> | void, options?: Omit<StepOptions<TInput, TData>, 'compensate'>): this;
|
|
77
117
|
/**
|
|
78
|
-
*
|
|
118
|
+
* Finalizes the workflow definition.
|
|
119
|
+
* @returns A complete workflow blueprint ready for execution.
|
|
120
|
+
* @throws Error if the workflow has no steps defined.
|
|
79
121
|
*/
|
|
80
122
|
build(): WorkflowDefinition<TInput, TData>;
|
|
81
123
|
/**
|
|
82
|
-
*
|
|
124
|
+
* Generates a structural description of the workflow for introspection.
|
|
125
|
+
* @returns A descriptor containing step metadata.
|
|
83
126
|
*/
|
|
84
127
|
describe(): WorkflowDescriptor;
|
|
85
|
-
/**
|
|
86
|
-
* Get workflow name
|
|
87
|
-
*/
|
|
128
|
+
/** The name of the workflow being built. */
|
|
88
129
|
get name(): string;
|
|
89
|
-
/**
|
|
90
|
-
* Get step count
|
|
91
|
-
*/
|
|
130
|
+
/** The number of steps currently defined in the workflow. */
|
|
92
131
|
get stepCount(): number;
|
|
93
132
|
}
|
|
94
133
|
/**
|
|
95
|
-
*
|
|
134
|
+
* Factory function to initiate a new workflow definition.
|
|
96
135
|
*
|
|
97
|
-
* @param name -
|
|
98
|
-
* @returns WorkflowBuilder instance
|
|
136
|
+
* @param name - The unique name for the workflow.
|
|
137
|
+
* @returns A new WorkflowBuilder instance.
|
|
99
138
|
*
|
|
100
139
|
* @example
|
|
101
140
|
* ```typescript
|
|
102
|
-
* const
|
|
103
|
-
* .
|
|
104
|
-
* .
|
|
105
|
-
* ctx.data.resized = await sharp(ctx.input.file).resize(200).toBuffer()
|
|
106
|
-
* })
|
|
107
|
-
* .commit('save', async (ctx) => {
|
|
108
|
-
* await storage.put(ctx.data.resized)
|
|
109
|
-
* })
|
|
141
|
+
* const flow = createWorkflow('my-flow')
|
|
142
|
+
* .step('hello', () => console.log('world'))
|
|
143
|
+
* .build();
|
|
110
144
|
* ```
|
|
111
145
|
*/
|
|
112
146
|
declare function createWorkflow(name: string): WorkflowBuilder;
|
|
113
147
|
|
|
114
148
|
/**
|
|
115
|
-
*
|
|
116
|
-
*
|
|
117
|
-
* Orchestrates workflow execution with storage and event handling.
|
|
118
|
-
*
|
|
119
|
-
* @module @gravito/flux
|
|
120
|
-
*/
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Flux Engine
|
|
149
|
+
* Orchestrates the lifecycle and state transformations of a workflow context.
|
|
124
150
|
*
|
|
125
|
-
*
|
|
151
|
+
* The ContextManager is responsible for initializing new contexts, restoring them from
|
|
152
|
+
* persisted states, and performing immutable updates during execution.
|
|
126
153
|
*
|
|
127
154
|
* @example
|
|
128
155
|
* ```typescript
|
|
129
|
-
* const
|
|
130
|
-
*
|
|
131
|
-
* const workflow = createWorkflow('process-order')
|
|
132
|
-
* .input<{ orderId: string }>()
|
|
133
|
-
* .step('fetch', async (ctx) => { ... })
|
|
134
|
-
* .step('validate', async (ctx) => { ... })
|
|
135
|
-
* .commit('save', async (ctx) => { ... })
|
|
136
|
-
*
|
|
137
|
-
* const result = await engine.execute(workflow, { orderId: '123' })
|
|
156
|
+
* const manager = new ContextManager();
|
|
157
|
+
* const ctx = manager.create('order-flow', { id: '123' }, 5);
|
|
138
158
|
* ```
|
|
139
159
|
*/
|
|
140
|
-
declare class
|
|
141
|
-
private storage;
|
|
142
|
-
private executor;
|
|
143
|
-
private contextManager;
|
|
144
|
-
private config;
|
|
145
|
-
constructor(config?: FluxConfig);
|
|
160
|
+
declare class ContextManager {
|
|
146
161
|
/**
|
|
147
|
-
*
|
|
162
|
+
* Initializes a fresh workflow context with a pending status and empty history.
|
|
148
163
|
*
|
|
149
|
-
* @param
|
|
150
|
-
* @param input -
|
|
151
|
-
* @
|
|
152
|
-
|
|
153
|
-
execute<TInput, TData = any>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, input: TInput): Promise<FluxResult<TData>>;
|
|
154
|
-
/**
|
|
155
|
-
* Resume a paused or failed workflow
|
|
164
|
+
* @param name - The human-readable identifier for the workflow type.
|
|
165
|
+
* @param input - The initial data required to start the workflow.
|
|
166
|
+
* @param stepCount - Total number of steps defined in the workflow for history pre-allocation.
|
|
167
|
+
* @returns A new WorkflowContext instance.
|
|
156
168
|
*
|
|
157
|
-
* @
|
|
158
|
-
*
|
|
169
|
+
* @example
|
|
170
|
+
* ```typescript
|
|
171
|
+
* const ctx = manager.create('signup', { email: 'user@example.com' }, 3);
|
|
172
|
+
* ```
|
|
159
173
|
*/
|
|
160
|
-
|
|
161
|
-
fromStep?: number | string;
|
|
162
|
-
}): Promise<FluxResult<TData> | null>;
|
|
163
|
-
/**
|
|
164
|
-
* Send a signal to a suspended workflow
|
|
165
|
-
*/
|
|
166
|
-
signal<TInput, TData = any>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, workflowId: string, signalName: string, payload?: any): Promise<FluxResult<TData>>;
|
|
167
|
-
/**
|
|
168
|
-
* Retry a specific step (replays from that step onward)
|
|
169
|
-
*/
|
|
170
|
-
retryStep<TInput, TData = any>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, workflowId: string, stepName: string): Promise<FluxResult<TData> | null>;
|
|
171
|
-
/**
|
|
172
|
-
* Get workflow state by ID
|
|
173
|
-
*/
|
|
174
|
-
get<TInput = any, TData = any>(workflowId: string): Promise<WorkflowState<TInput, TData> | null>;
|
|
174
|
+
create<TInput, TData extends Record<string, any> = Record<string, any>>(name: string, input: TInput, stepCount: number): WorkflowContext<TInput, TData>;
|
|
175
175
|
/**
|
|
176
|
-
*
|
|
176
|
+
* Reconstructs a workflow context from a previously persisted state.
|
|
177
|
+
*
|
|
178
|
+
* Used for resuming suspended workflows or replaying failed ones from a specific point.
|
|
179
|
+
*
|
|
180
|
+
* @param state - The persisted state object.
|
|
181
|
+
* @returns A hydrated WorkflowContext ready for execution.
|
|
182
|
+
*
|
|
183
|
+
* @example
|
|
184
|
+
* ```typescript
|
|
185
|
+
* const state = await storage.load(id);
|
|
186
|
+
* const ctx = manager.restore(state);
|
|
187
|
+
* ```
|
|
177
188
|
*/
|
|
178
|
-
|
|
189
|
+
restore<TInput, TData extends Record<string, any> = Record<string, any>>(state: WorkflowState<TInput, TData>): WorkflowContext<TInput, TData>;
|
|
179
190
|
/**
|
|
180
|
-
*
|
|
191
|
+
* Converts a runtime context into a serializable state for persistence.
|
|
192
|
+
*
|
|
193
|
+
* Captures the current progress, data, and execution history.
|
|
194
|
+
*
|
|
195
|
+
* @param ctx - The active workflow context.
|
|
196
|
+
* @returns A serializable WorkflowState object.
|
|
197
|
+
*
|
|
198
|
+
* @example
|
|
199
|
+
* ```typescript
|
|
200
|
+
* const state = manager.toState(ctx);
|
|
201
|
+
* await storage.save(state);
|
|
202
|
+
* ```
|
|
181
203
|
*/
|
|
182
|
-
|
|
204
|
+
toState<TInput, TData extends Record<string, any>>(ctx: WorkflowContext<TInput, TData>): WorkflowState<TInput, TData>;
|
|
183
205
|
/**
|
|
184
|
-
*
|
|
206
|
+
* Updates the overall status of the workflow.
|
|
207
|
+
*
|
|
208
|
+
* @param ctx - The current context.
|
|
209
|
+
* @param status - The new status to apply.
|
|
210
|
+
* @returns A new context instance with the updated status.
|
|
211
|
+
*
|
|
212
|
+
* @example
|
|
213
|
+
* ```typescript
|
|
214
|
+
* const runningCtx = manager.updateStatus(ctx, 'running');
|
|
215
|
+
* ```
|
|
185
216
|
*/
|
|
186
|
-
|
|
217
|
+
updateStatus<TInput, TData extends Record<string, any>>(ctx: WorkflowContext<TInput, TData>, status: WorkflowStatus): WorkflowContext<TInput, TData>;
|
|
187
218
|
/**
|
|
188
|
-
*
|
|
219
|
+
* Increments the current step pointer.
|
|
220
|
+
*
|
|
221
|
+
* @param ctx - The current context.
|
|
222
|
+
* @returns A new context instance pointing to the next step.
|
|
223
|
+
*
|
|
224
|
+
* @example
|
|
225
|
+
* ```typescript
|
|
226
|
+
* const nextStepCtx = manager.advanceStep(ctx);
|
|
227
|
+
* console.log(nextStepCtx.currentStep); // ctx.currentStep + 1
|
|
228
|
+
* ```
|
|
189
229
|
*/
|
|
190
|
-
|
|
191
|
-
private resolveDefinition;
|
|
192
|
-
private resolveStartIndex;
|
|
193
|
-
private resetHistoryFrom;
|
|
230
|
+
advanceStep<TInput, TData extends Record<string, any>>(ctx: WorkflowContext<TInput, TData>): WorkflowContext<TInput, TData>;
|
|
194
231
|
/**
|
|
195
|
-
*
|
|
232
|
+
* Assigns a name to a specific step in the execution history.
|
|
233
|
+
*
|
|
234
|
+
* Useful for tracking which step is currently being executed or has been completed.
|
|
235
|
+
*
|
|
236
|
+
* @param ctx - The current context.
|
|
237
|
+
* @param index - The index of the step in the history array.
|
|
238
|
+
* @param name - The name to assign to the step.
|
|
239
|
+
* @returns A new context instance with the updated history.
|
|
240
|
+
*
|
|
241
|
+
* @example
|
|
242
|
+
* ```typescript
|
|
243
|
+
* const namedCtx = manager.setStepName(ctx, 0, 'validate-user');
|
|
244
|
+
* console.log(namedCtx.history[0].name); // 'validate-user'
|
|
245
|
+
* ```
|
|
196
246
|
*/
|
|
197
|
-
|
|
198
|
-
private runFrom;
|
|
199
|
-
private emitTrace;
|
|
247
|
+
setStepName<TInput, TData extends Record<string, any>>(ctx: WorkflowContext<TInput, TData>, index: number, name: string): WorkflowContext<TInput, TData>;
|
|
200
248
|
}
|
|
201
249
|
|
|
202
250
|
/**
|
|
203
|
-
* @fileoverview
|
|
251
|
+
* @fileoverview Redis-based Lock Provider for distributed workflow execution
|
|
204
252
|
*
|
|
205
|
-
*
|
|
253
|
+
* Provides a Redis-backed distributed locking mechanism for coordinating
|
|
254
|
+
* workflow execution across multiple nodes in a cluster.
|
|
206
255
|
*
|
|
207
|
-
* @module @gravito/flux/
|
|
256
|
+
* @module @gravito/flux/core
|
|
208
257
|
*/
|
|
209
258
|
|
|
210
259
|
/**
|
|
211
|
-
*
|
|
212
|
-
*
|
|
213
|
-
* In-memory storage adapter for development and testing.
|
|
214
|
-
* Data is not persisted across restarts.
|
|
260
|
+
* Minimal Redis client interface for compatibility with ioredis/redis libraries.
|
|
261
|
+
* Consumers should provide a client that implements these methods.
|
|
215
262
|
*/
|
|
216
|
-
|
|
217
|
-
private store;
|
|
218
|
-
save(state: WorkflowState): Promise<void>;
|
|
219
|
-
load(id: string): Promise<WorkflowState | null>;
|
|
220
|
-
list(filter?: WorkflowFilter): Promise<WorkflowState[]>;
|
|
221
|
-
delete(id: string): Promise<void>;
|
|
222
|
-
init(): Promise<void>;
|
|
223
|
-
close(): Promise<void>;
|
|
263
|
+
interface RedisClient {
|
|
224
264
|
/**
|
|
225
|
-
*
|
|
265
|
+
* SET command with optional NX and PX options.
|
|
266
|
+
* @param key - The key to set
|
|
267
|
+
* @param value - The value to set
|
|
268
|
+
* @param options - SET options (NX for only-if-not-exists, PX for expiry in ms)
|
|
269
|
+
* @returns 'OK' if set, null if key exists (with NX)
|
|
226
270
|
*/
|
|
227
|
-
|
|
271
|
+
set(key: string, value: string, options?: {
|
|
272
|
+
NX?: boolean;
|
|
273
|
+
PX?: number;
|
|
274
|
+
}): Promise<'OK' | null>;
|
|
275
|
+
/**
|
|
276
|
+
* GET command to retrieve a value.
|
|
277
|
+
* @param key - The key to get
|
|
278
|
+
* @returns The value or null if not found
|
|
279
|
+
*/
|
|
280
|
+
get(key: string): Promise<string | null>;
|
|
281
|
+
/**
|
|
282
|
+
* DEL command to delete a key.
|
|
283
|
+
* @param key - The key to delete
|
|
284
|
+
* @returns Number of keys deleted
|
|
285
|
+
*/
|
|
286
|
+
del(key: string): Promise<number>;
|
|
287
|
+
/**
|
|
288
|
+
* EVAL command to execute Lua scripts atomically.
|
|
289
|
+
* @param script - The Lua script
|
|
290
|
+
* @param keys - Array of keys
|
|
291
|
+
* @param args - Array of arguments
|
|
292
|
+
* @returns Script result
|
|
293
|
+
*/
|
|
294
|
+
eval(script: string, keys: string[], args: (string | number)[]): Promise<unknown>;
|
|
228
295
|
}
|
|
229
|
-
|
|
230
296
|
/**
|
|
231
|
-
*
|
|
232
|
-
*
|
|
233
|
-
* Writes trace events to a newline-delimited JSON file.
|
|
297
|
+
* Configuration options for RedisLockProvider.
|
|
234
298
|
*/
|
|
235
|
-
|
|
236
|
-
interface
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
299
|
+
interface RedisLockProviderOptions {
|
|
300
|
+
/** Redis client instance (must implement RedisClient interface) */
|
|
301
|
+
client: RedisClient;
|
|
302
|
+
/** Prefix for all lock keys (default: 'flux:lock:') */
|
|
303
|
+
keyPrefix?: string;
|
|
304
|
+
/** Default TTL for locks in milliseconds (default: 30000) */
|
|
305
|
+
defaultTtl?: number;
|
|
306
|
+
/** Delay between retry attempts in milliseconds (default: 100) */
|
|
307
|
+
retryDelay?: number;
|
|
308
|
+
/** Maximum number of retry attempts (default: 0, no retries) */
|
|
309
|
+
maxRetries?: number;
|
|
246
310
|
}
|
|
247
|
-
|
|
248
311
|
/**
|
|
249
|
-
*
|
|
312
|
+
* Redis-based implementation of LockProvider for distributed locking.
|
|
250
313
|
*
|
|
251
|
-
*
|
|
314
|
+
* Uses Redis SET NX PX for atomic lock acquisition and Lua scripts
|
|
315
|
+
* for safe release and refresh operations.
|
|
252
316
|
*
|
|
253
|
-
* @
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
*
|
|
317
|
+
* @example
|
|
318
|
+
* ```typescript
|
|
319
|
+
* import Redis from 'ioredis'
|
|
320
|
+
* import { RedisLockProvider } from '@gravito/flux'
|
|
321
|
+
*
|
|
322
|
+
* const redis = new Redis()
|
|
323
|
+
* const lockProvider = new RedisLockProvider({
|
|
324
|
+
* client: redis,
|
|
325
|
+
* keyPrefix: 'myapp:locks:',
|
|
326
|
+
* defaultTtl: 30000,
|
|
327
|
+
* })
|
|
258
328
|
*
|
|
259
|
-
*
|
|
329
|
+
* const lock = await lockProvider.acquire('workflow-123', 'node-1', 30000)
|
|
330
|
+
* if (lock) {
|
|
331
|
+
* try {
|
|
332
|
+
* // Do work with the lock
|
|
333
|
+
* } finally {
|
|
334
|
+
* await lock.release()
|
|
335
|
+
* }
|
|
336
|
+
* }
|
|
337
|
+
* ```
|
|
260
338
|
*/
|
|
261
|
-
declare class
|
|
339
|
+
declare class RedisLockProvider implements LockProvider {
|
|
340
|
+
private readonly client;
|
|
341
|
+
private readonly keyPrefix;
|
|
342
|
+
private readonly defaultTtl;
|
|
343
|
+
private readonly retryDelay;
|
|
344
|
+
private readonly maxRetries;
|
|
345
|
+
constructor(options: RedisLockProviderOptions);
|
|
346
|
+
/**
|
|
347
|
+
* Attempts to acquire a lock for a specific resource.
|
|
348
|
+
*
|
|
349
|
+
* Uses Redis SET with NX (only if not exists) and PX (expire in ms)
|
|
350
|
+
* for atomic lock acquisition. Supports retry with configurable delay.
|
|
351
|
+
*
|
|
352
|
+
* @param resourceId - The unique ID of the resource to lock
|
|
353
|
+
* @param owner - The identifier of the node/process requesting the lock
|
|
354
|
+
* @param ttl - Time-to-live for the lock in milliseconds
|
|
355
|
+
* @returns A Lock object if successful, otherwise null
|
|
356
|
+
*/
|
|
357
|
+
acquire(resourceId: string, owner: string, ttl: number): Promise<Lock | null>;
|
|
262
358
|
/**
|
|
263
|
-
*
|
|
359
|
+
* Refreshes an existing lock to extend its lifetime.
|
|
360
|
+
*
|
|
361
|
+
* Uses a Lua script to atomically check ownership and extend TTL.
|
|
362
|
+
*
|
|
363
|
+
* @param resourceId - The ID of the resource
|
|
364
|
+
* @param owner - The current owner of the lock
|
|
365
|
+
* @param ttl - The new time-to-live from the current moment
|
|
366
|
+
* @returns True if the lock was successfully refreshed
|
|
264
367
|
*/
|
|
265
|
-
|
|
368
|
+
refresh(resourceId: string, owner: string, ttl: number): Promise<boolean>;
|
|
266
369
|
/**
|
|
267
|
-
*
|
|
370
|
+
* Forcefully releases a lock, regardless of the owner.
|
|
371
|
+
*
|
|
372
|
+
* @param resourceId - The ID of the resource to unlock
|
|
268
373
|
*/
|
|
269
|
-
|
|
374
|
+
release(resourceId: string): Promise<void>;
|
|
270
375
|
/**
|
|
271
|
-
*
|
|
376
|
+
* Safely releases a lock only if owned by the specified owner.
|
|
377
|
+
*
|
|
378
|
+
* Uses a Lua script to atomically check ownership and delete.
|
|
379
|
+
*
|
|
380
|
+
* @param resourceId - The ID of the resource to unlock
|
|
381
|
+
* @param owner - The owner that should release the lock
|
|
382
|
+
* @returns True if the lock was released, false if not owned
|
|
272
383
|
*/
|
|
273
|
-
|
|
384
|
+
releaseIfOwned(resourceId: string, owner: string): Promise<boolean>;
|
|
274
385
|
/**
|
|
275
|
-
*
|
|
386
|
+
* Generates the Redis key for a resource.
|
|
276
387
|
*/
|
|
277
|
-
|
|
388
|
+
private getKey;
|
|
278
389
|
/**
|
|
279
|
-
*
|
|
390
|
+
* Creates a Lock object with a release method.
|
|
280
391
|
*/
|
|
281
|
-
|
|
392
|
+
private createLock;
|
|
282
393
|
/**
|
|
283
|
-
*
|
|
394
|
+
* Sleeps for the specified duration.
|
|
284
395
|
*/
|
|
285
|
-
|
|
396
|
+
private sleep;
|
|
286
397
|
}
|
|
287
398
|
|
|
288
399
|
/**
|
|
@@ -294,36 +405,93 @@ declare class ContextManager {
|
|
|
294
405
|
*/
|
|
295
406
|
|
|
296
407
|
/**
|
|
297
|
-
*
|
|
408
|
+
* Manages the lifecycle states of a workflow instance.
|
|
298
409
|
*
|
|
299
|
-
*
|
|
410
|
+
* The StateMachine enforces transition rules and notifies listeners of state changes.
|
|
411
|
+
* It is designed to be the single source of truth for a workflow's current progress.
|
|
412
|
+
*
|
|
413
|
+
* @example
|
|
414
|
+
* ```typescript
|
|
415
|
+
* const sm = new StateMachine();
|
|
416
|
+
* sm.addEventListener('transition', (e) => console.log(e.detail));
|
|
417
|
+
* sm.transition('running');
|
|
418
|
+
* ```
|
|
300
419
|
*/
|
|
301
420
|
declare class StateMachine extends EventTarget {
|
|
302
421
|
private _status;
|
|
303
422
|
/**
|
|
304
|
-
*
|
|
423
|
+
* The current operational status of the workflow.
|
|
305
424
|
*/
|
|
306
425
|
get status(): WorkflowStatus;
|
|
307
426
|
/**
|
|
308
|
-
*
|
|
427
|
+
* Evaluates if a transition to the specified status is valid from the current state.
|
|
428
|
+
*
|
|
429
|
+
* @param to - The target status to check.
|
|
430
|
+
* @returns True if the transition is permitted by the transition map.
|
|
431
|
+
*
|
|
432
|
+
* @example
|
|
433
|
+
* ```typescript
|
|
434
|
+
* if (sm.canTransition('completed')) {
|
|
435
|
+
* sm.transition('completed');
|
|
436
|
+
* }
|
|
437
|
+
* ```
|
|
309
438
|
*/
|
|
310
439
|
canTransition(to: WorkflowStatus): boolean;
|
|
311
440
|
/**
|
|
312
|
-
*
|
|
441
|
+
* Moves the workflow to a new status if the transition is valid.
|
|
442
|
+
*
|
|
443
|
+
* @param to - The target status.
|
|
444
|
+
* @throws {Error} If the transition is illegal according to the defined rules.
|
|
313
445
|
*
|
|
314
|
-
* @
|
|
446
|
+
* @example
|
|
447
|
+
* ```typescript
|
|
448
|
+
* try {
|
|
449
|
+
* sm.transition('completed');
|
|
450
|
+
* } catch (e) {
|
|
451
|
+
* // Handle invalid transition
|
|
452
|
+
* }
|
|
453
|
+
* ```
|
|
315
454
|
*/
|
|
316
455
|
transition(to: WorkflowStatus): void;
|
|
317
456
|
/**
|
|
318
|
-
*
|
|
457
|
+
* Overrides the current status without validation.
|
|
458
|
+
*
|
|
459
|
+
* This should only be used during workflow restoration from persisted storage
|
|
460
|
+
* or when replaying history where the state is already known to be valid.
|
|
461
|
+
*
|
|
462
|
+
* @param status - The status to force set.
|
|
463
|
+
*
|
|
464
|
+
* @example
|
|
465
|
+
* ```typescript
|
|
466
|
+
* // Restore state from database
|
|
467
|
+
* sm.forceStatus(storedState.status);
|
|
468
|
+
* ```
|
|
319
469
|
*/
|
|
320
470
|
forceStatus(status: WorkflowStatus): void;
|
|
321
471
|
/**
|
|
322
|
-
*
|
|
472
|
+
* Determines if the workflow has reached a state where no further execution is possible.
|
|
473
|
+
*
|
|
474
|
+
* @returns True if the status is 'completed', 'failed', or 'rolled_back'.
|
|
475
|
+
*
|
|
476
|
+
* @example
|
|
477
|
+
* ```typescript
|
|
478
|
+
* if (sm.isTerminal()) {
|
|
479
|
+
* console.log('Workflow finished');
|
|
480
|
+
* }
|
|
481
|
+
* ```
|
|
323
482
|
*/
|
|
324
483
|
isTerminal(): boolean;
|
|
325
484
|
/**
|
|
326
|
-
*
|
|
485
|
+
* Checks if the workflow is in a state that allows for execution or resumption.
|
|
486
|
+
*
|
|
487
|
+
* @returns True if the workflow can be started or resumed.
|
|
488
|
+
*
|
|
489
|
+
* @example
|
|
490
|
+
* ```typescript
|
|
491
|
+
* if (sm.canExecute()) {
|
|
492
|
+
* await engine.run();
|
|
493
|
+
* }
|
|
494
|
+
* ```
|
|
327
495
|
*/
|
|
328
496
|
canExecute(): boolean;
|
|
329
497
|
}
|
|
@@ -337,29 +505,81 @@ declare class StateMachine extends EventTarget {
|
|
|
337
505
|
*/
|
|
338
506
|
|
|
339
507
|
/**
|
|
340
|
-
*
|
|
508
|
+
* Handles the isolated execution of a single workflow step.
|
|
341
509
|
*
|
|
342
|
-
*
|
|
510
|
+
* The StepExecutor manages the operational aspects of step execution, including
|
|
511
|
+
* condition checking, retry logic with exponential backoff, and timeout enforcement.
|
|
512
|
+
*
|
|
513
|
+
* @example
|
|
514
|
+
* ```typescript
|
|
515
|
+
* const executor = new StepExecutor({ defaultRetries: 3 });
|
|
516
|
+
* const { result } = await executor.execute(stepDef, ctx, executionRecord);
|
|
517
|
+
* ```
|
|
343
518
|
*/
|
|
344
519
|
declare class StepExecutor {
|
|
345
520
|
private defaultRetries;
|
|
346
521
|
private defaultTimeout;
|
|
347
522
|
private onRetry?;
|
|
523
|
+
/**
|
|
524
|
+
* Creates a new StepExecutor with global defaults.
|
|
525
|
+
*
|
|
526
|
+
* @param options - Configuration for default behavior and lifecycle hooks.
|
|
527
|
+
*/
|
|
348
528
|
constructor(options?: {
|
|
349
529
|
defaultRetries?: number;
|
|
350
530
|
defaultTimeout?: number;
|
|
351
531
|
onRetry?: (step: StepDefinition<any, any>, ctx: WorkflowContext<any, any>, error: Error, attempt: number, maxRetries: number) => void | Promise<void>;
|
|
352
532
|
});
|
|
353
533
|
/**
|
|
354
|
-
*
|
|
534
|
+
* Executes a step definition against a workflow context.
|
|
535
|
+
*
|
|
536
|
+
* This method performs the following sequence:
|
|
537
|
+
* 1. Evaluates the `when` condition (if present).
|
|
538
|
+
* 2. Initiates the execution loop with retries.
|
|
539
|
+
* 3. Enforces timeouts for each attempt.
|
|
540
|
+
* 4. Handles suspension signals (`flux_wait`).
|
|
541
|
+
* 5. Updates the execution history record.
|
|
542
|
+
*
|
|
543
|
+
* @param step - The definition of the step to execute.
|
|
544
|
+
* @param ctx - The current workflow context.
|
|
545
|
+
* @param execution - The current execution record for this step.
|
|
546
|
+
* @returns The result of the execution and the updated execution record.
|
|
547
|
+
*
|
|
548
|
+
* @throws {Error} If the step handler throws an unrecoverable error or times out.
|
|
549
|
+
*
|
|
550
|
+
* @example
|
|
551
|
+
* ```typescript
|
|
552
|
+
* const { result, execution } = await executor.execute(
|
|
553
|
+
* stepDefinition,
|
|
554
|
+
* currentContext,
|
|
555
|
+
* currentExecution
|
|
556
|
+
* );
|
|
557
|
+
*
|
|
558
|
+
* if (!result.success) {
|
|
559
|
+
* console.error(result.error);
|
|
560
|
+
* }
|
|
561
|
+
* ```
|
|
355
562
|
*/
|
|
356
|
-
execute<TInput, TData
|
|
563
|
+
execute<TInput, TData extends Record<string, any>>(step: StepDefinition<TInput, TData>, ctx: WorkflowContext<TInput, TData>, execution: StepExecution): Promise<{
|
|
564
|
+
result: StepResult;
|
|
565
|
+
execution: StepExecution;
|
|
566
|
+
}>;
|
|
357
567
|
/**
|
|
358
|
-
*
|
|
568
|
+
* Wraps the step handler in a timeout race.
|
|
569
|
+
*
|
|
570
|
+
* @param handler - The user-defined step handler.
|
|
571
|
+
* @param ctx - The workflow context.
|
|
572
|
+
* @param timeout - Maximum time allowed for execution in milliseconds.
|
|
573
|
+
* @returns The handler result or a suspension signal.
|
|
574
|
+
* @throws {Error} If the timeout is reached before the handler completes.
|
|
575
|
+
* @private
|
|
359
576
|
*/
|
|
360
577
|
private executeWithTimeout;
|
|
361
578
|
/**
|
|
362
|
-
*
|
|
579
|
+
* Pauses execution for a specified duration.
|
|
580
|
+
*
|
|
581
|
+
* @param ms - Milliseconds to sleep.
|
|
582
|
+
* @private
|
|
363
583
|
*/
|
|
364
584
|
private sleep;
|
|
365
585
|
}
|
|
@@ -373,106 +593,123 @@ declare class StepExecutor {
|
|
|
373
593
|
*/
|
|
374
594
|
|
|
375
595
|
/**
|
|
376
|
-
* Console Logger
|
|
596
|
+
* Console Logger implementation for FluxEngine.
|
|
377
597
|
*
|
|
378
|
-
*
|
|
598
|
+
* Provides a standard way to output workflow execution logs to the system console
|
|
599
|
+
* with a configurable prefix for easy filtering.
|
|
379
600
|
*
|
|
380
601
|
* @example
|
|
381
602
|
* ```typescript
|
|
382
|
-
* const
|
|
383
|
-
*
|
|
384
|
-
* })
|
|
603
|
+
* const logger = new FluxConsoleLogger('[OrderFlow]');
|
|
604
|
+
* logger.info('Workflow started', { orderId: '123' });
|
|
385
605
|
* ```
|
|
386
606
|
*/
|
|
387
607
|
declare class FluxConsoleLogger implements FluxLogger {
|
|
388
608
|
private prefix;
|
|
609
|
+
/**
|
|
610
|
+
* Creates a new console logger instance.
|
|
611
|
+
*
|
|
612
|
+
* @param prefix - String prepended to every log message for identification.
|
|
613
|
+
*/
|
|
389
614
|
constructor(prefix?: string);
|
|
615
|
+
/**
|
|
616
|
+
* Logs fine-grained informational events that are most useful to debug an application.
|
|
617
|
+
*
|
|
618
|
+
* @param message - The primary log message.
|
|
619
|
+
* @param args - Additional metadata or objects to include in the log output.
|
|
620
|
+
*/
|
|
390
621
|
debug(message: string, ...args: unknown[]): void;
|
|
622
|
+
/**
|
|
623
|
+
* Logs informational messages that highlight the progress of the application at coarse-grained level.
|
|
624
|
+
*
|
|
625
|
+
* @param message - The primary log message.
|
|
626
|
+
* @param args - Additional metadata or objects to include in the log output.
|
|
627
|
+
*/
|
|
391
628
|
info(message: string, ...args: unknown[]): void;
|
|
629
|
+
/**
|
|
630
|
+
* Logs potentially harmful situations that should be noted but don't stop execution.
|
|
631
|
+
*
|
|
632
|
+
* @param message - The primary log message.
|
|
633
|
+
* @param args - Additional metadata or objects to include in the log output.
|
|
634
|
+
*/
|
|
392
635
|
warn(message: string, ...args: unknown[]): void;
|
|
636
|
+
/**
|
|
637
|
+
* Logs error events that might still allow the application to continue running.
|
|
638
|
+
*
|
|
639
|
+
* @param message - The primary log message.
|
|
640
|
+
* @param args - Additional metadata or objects to include in the log output.
|
|
641
|
+
*/
|
|
393
642
|
error(message: string, ...args: unknown[]): void;
|
|
394
643
|
}
|
|
395
644
|
/**
|
|
396
|
-
* Silent Logger
|
|
645
|
+
* Silent Logger implementation that discards all log entries.
|
|
397
646
|
*
|
|
398
|
-
*
|
|
647
|
+
* Useful for production environments where logging is handled externally,
|
|
648
|
+
* or for unit tests where console output should be suppressed.
|
|
649
|
+
*
|
|
650
|
+
* @example
|
|
651
|
+
* ```typescript
|
|
652
|
+
* const engine = new FluxEngine({
|
|
653
|
+
* logger: new FluxSilentLogger()
|
|
654
|
+
* });
|
|
655
|
+
* ```
|
|
399
656
|
*/
|
|
400
657
|
declare class FluxSilentLogger implements FluxLogger {
|
|
658
|
+
/** Discards debug logs. */
|
|
401
659
|
debug(): void;
|
|
660
|
+
/** Discards info logs. */
|
|
402
661
|
info(): void;
|
|
662
|
+
/** Discards warning logs. */
|
|
403
663
|
warn(): void;
|
|
664
|
+
/** Discards error logs. */
|
|
404
665
|
error(): void;
|
|
405
666
|
}
|
|
406
667
|
|
|
407
668
|
/**
|
|
408
|
-
*
|
|
669
|
+
* Configuration options for the OrbitFlux extension.
|
|
409
670
|
*
|
|
410
|
-
*
|
|
411
|
-
*
|
|
412
|
-
* @module @gravito/flux
|
|
413
|
-
*/
|
|
414
|
-
|
|
415
|
-
/**
|
|
416
|
-
* Minimal PlanetCore interface for type compatibility
|
|
417
|
-
* (Avoids importing @gravito/core sources which causes rootDir issues)
|
|
418
|
-
*/
|
|
419
|
-
interface PlanetCore {
|
|
420
|
-
logger: {
|
|
421
|
-
debug(message: string, ...args: unknown[]): void;
|
|
422
|
-
info(message: string, ...args: unknown[]): void;
|
|
423
|
-
warn(message: string, ...args: unknown[]): void;
|
|
424
|
-
error(message: string, ...args: unknown[]): void;
|
|
425
|
-
};
|
|
426
|
-
services: {
|
|
427
|
-
set(key: string, value: unknown): void;
|
|
428
|
-
get<T>(key: string): T | undefined;
|
|
429
|
-
};
|
|
430
|
-
hooks: {
|
|
431
|
-
doAction(name: string, payload?: unknown): void;
|
|
432
|
-
};
|
|
433
|
-
}
|
|
434
|
-
/**
|
|
435
|
-
* GravitoOrbit interface
|
|
436
|
-
*/
|
|
437
|
-
interface GravitoOrbit {
|
|
438
|
-
install(core: PlanetCore): void | Promise<void>;
|
|
439
|
-
}
|
|
440
|
-
/**
|
|
441
|
-
* OrbitFlux configuration options
|
|
671
|
+
* Defines how the workflow engine should be initialized and integrated into the Gravito core.
|
|
442
672
|
*/
|
|
443
673
|
interface OrbitFluxOptions {
|
|
444
674
|
/**
|
|
445
|
-
*
|
|
446
|
-
*
|
|
675
|
+
* Specifies the storage driver to use for persisting workflow states.
|
|
676
|
+
*
|
|
677
|
+
* - 'memory': Volatile in-memory storage (default).
|
|
678
|
+
* - 'sqlite': Persistent SQLite storage (Bun only).
|
|
679
|
+
* - WorkflowStorage: A custom implementation of the storage interface.
|
|
447
680
|
*/
|
|
448
681
|
storage?: 'memory' | 'sqlite' | WorkflowStorage;
|
|
449
682
|
/**
|
|
450
|
-
*
|
|
451
|
-
*
|
|
683
|
+
* The file system path for the SQLite database.
|
|
684
|
+
*
|
|
685
|
+
* Only applicable when `storage` is set to 'sqlite'.
|
|
452
686
|
*/
|
|
453
687
|
dbPath?: string;
|
|
454
688
|
/**
|
|
455
|
-
*
|
|
456
|
-
* @default 'flux'
|
|
689
|
+
* The key under which the FluxEngine will be registered in the core service container.
|
|
457
690
|
*/
|
|
458
691
|
exposeAs?: string;
|
|
459
692
|
/**
|
|
460
|
-
*
|
|
693
|
+
* A custom logger implementation for the workflow engine.
|
|
694
|
+
*
|
|
695
|
+
* If not provided, the engine will use a wrapper around the core logger.
|
|
461
696
|
*/
|
|
462
697
|
logger?: FluxLogger;
|
|
463
698
|
/**
|
|
464
|
-
*
|
|
465
|
-
* @default 3
|
|
699
|
+
* The default number of times a workflow step should be retried upon failure.
|
|
466
700
|
*/
|
|
467
701
|
defaultRetries?: number;
|
|
468
702
|
/**
|
|
469
|
-
*
|
|
470
|
-
* @default 30000
|
|
703
|
+
* The default timeout in milliseconds for workflow steps.
|
|
471
704
|
*/
|
|
472
705
|
defaultTimeout?: number;
|
|
473
706
|
}
|
|
474
707
|
/**
|
|
475
|
-
* OrbitFlux
|
|
708
|
+
* OrbitFlux integrates the Flux workflow engine into the Gravito Galaxy Architecture.
|
|
709
|
+
*
|
|
710
|
+
* It acts as an "Orbit" (infrastructure extension) that provides a managed `FluxEngine`
|
|
711
|
+
* instance to the core application and other satellites. It also bridges engine events
|
|
712
|
+
* to the core hook system.
|
|
476
713
|
*
|
|
477
714
|
* @example
|
|
478
715
|
* ```typescript
|
|
@@ -480,33 +717,652 @@ interface OrbitFluxOptions {
|
|
|
480
717
|
*
|
|
481
718
|
* const core = await PlanetCore.boot({
|
|
482
719
|
* orbits: [
|
|
483
|
-
* new OrbitFlux({
|
|
720
|
+
* new OrbitFlux({
|
|
721
|
+
* storage: 'sqlite',
|
|
722
|
+
* dbPath: './data/workflows.db'
|
|
723
|
+
* })
|
|
484
724
|
* ]
|
|
485
725
|
* })
|
|
486
726
|
*
|
|
487
|
-
* // Access
|
|
488
|
-
* const flux = core.
|
|
489
|
-
* await flux.execute(myWorkflow, input)
|
|
727
|
+
* // Access the engine from the container
|
|
728
|
+
* const flux = core.container.make<FluxEngine>('flux')
|
|
490
729
|
* ```
|
|
491
730
|
*/
|
|
492
731
|
declare class OrbitFlux implements GravitoOrbit {
|
|
493
732
|
private options;
|
|
494
733
|
private engine?;
|
|
734
|
+
/**
|
|
735
|
+
* Initializes a new OrbitFlux instance with the given options.
|
|
736
|
+
*
|
|
737
|
+
* @param options - Configuration for the workflow engine integration.
|
|
738
|
+
*/
|
|
495
739
|
constructor(options?: OrbitFluxOptions);
|
|
496
740
|
/**
|
|
497
|
-
*
|
|
741
|
+
* Static factory method to create and configure an OrbitFlux instance.
|
|
742
|
+
*
|
|
743
|
+
* @param options - Configuration for the workflow engine integration.
|
|
744
|
+
* @returns A configured OrbitFlux instance.
|
|
498
745
|
*/
|
|
499
746
|
static configure(options?: OrbitFluxOptions): OrbitFlux;
|
|
500
747
|
/**
|
|
501
|
-
*
|
|
748
|
+
* Installs the Flux workflow engine into the Gravito core.
|
|
502
749
|
*
|
|
503
|
-
*
|
|
750
|
+
* This method resolves the storage adapter, initializes it, configures the engine
|
|
751
|
+
* with core-integrated logging and hooks, and registers the engine in the IoC container.
|
|
752
|
+
*
|
|
753
|
+
* @param core - The PlanetCore instance being booted.
|
|
754
|
+
* @throws {Error} If storage initialization fails or engine registration conflicts occur.
|
|
504
755
|
*/
|
|
505
756
|
install(core: PlanetCore): Promise<void>;
|
|
506
757
|
/**
|
|
507
|
-
*
|
|
758
|
+
* Retrieves the managed FluxEngine instance.
|
|
759
|
+
*
|
|
760
|
+
* @returns The FluxEngine instance, or undefined if the orbit has not been installed.
|
|
508
761
|
*/
|
|
509
762
|
getEngine(): FluxEngine | undefined;
|
|
763
|
+
/**
|
|
764
|
+
* Performs cleanup operations when the core is shutting down.
|
|
765
|
+
*
|
|
766
|
+
* Closes the underlying workflow engine and its storage connections.
|
|
767
|
+
*
|
|
768
|
+
* @throws {Error} If the engine fails to close cleanly.
|
|
769
|
+
*/
|
|
770
|
+
cleanup(): Promise<void>;
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
/**
|
|
774
|
+
* @fileoverview In-memory storage adapter
|
|
775
|
+
*
|
|
776
|
+
* Simple storage for development and testing.
|
|
777
|
+
*
|
|
778
|
+
* @module @gravito/flux/storage
|
|
779
|
+
*/
|
|
780
|
+
|
|
781
|
+
/**
|
|
782
|
+
* MemoryStorage provides a volatile, in-memory storage backend for Flux workflows.
|
|
783
|
+
*
|
|
784
|
+
* It is primarily intended for development, testing, and ephemeral workloads where persistence
|
|
785
|
+
* across process restarts is not required.
|
|
786
|
+
*
|
|
787
|
+
* @example
|
|
788
|
+
* ```typescript
|
|
789
|
+
* const storage = new MemoryStorage();
|
|
790
|
+
* await storage.save(workflowState);
|
|
791
|
+
* const state = await storage.load('workflow-id');
|
|
792
|
+
* ```
|
|
793
|
+
*/
|
|
794
|
+
declare class MemoryStorage implements WorkflowStorage {
|
|
795
|
+
private store;
|
|
796
|
+
/**
|
|
797
|
+
* Stores a workflow state in the internal Map.
|
|
798
|
+
*
|
|
799
|
+
* Automatically updates the `updatedAt` timestamp to reflect the current time.
|
|
800
|
+
*
|
|
801
|
+
* @param state - The workflow state to persist.
|
|
802
|
+
* @throws {Error} If the state object is invalid or cannot be stored.
|
|
803
|
+
*/
|
|
804
|
+
save(state: WorkflowState): Promise<void>;
|
|
805
|
+
/**
|
|
806
|
+
* Retrieves a workflow state by its ID from the internal Map.
|
|
807
|
+
*
|
|
808
|
+
* @param id - The unique identifier of the workflow.
|
|
809
|
+
* @returns The workflow state if found, otherwise null.
|
|
810
|
+
*/
|
|
811
|
+
load(id: string): Promise<WorkflowState | null>;
|
|
812
|
+
/**
|
|
813
|
+
* Filters and returns workflow states stored in memory.
|
|
814
|
+
*
|
|
815
|
+
* Supports filtering by name and status, and provides basic pagination.
|
|
816
|
+
* Results are sorted by creation date in descending order.
|
|
817
|
+
*
|
|
818
|
+
* @param filter - Criteria for filtering and paginating results.
|
|
819
|
+
* @returns An array of matching workflow states.
|
|
820
|
+
*/
|
|
821
|
+
list(filter?: WorkflowFilter): Promise<WorkflowState[]>;
|
|
822
|
+
/**
|
|
823
|
+
* Removes a workflow state from the internal Map.
|
|
824
|
+
*
|
|
825
|
+
* @param id - The unique identifier of the workflow to delete.
|
|
826
|
+
*/
|
|
827
|
+
delete(id: string): Promise<void>;
|
|
828
|
+
/**
|
|
829
|
+
* Initializes the memory storage.
|
|
830
|
+
*
|
|
831
|
+
* This is a no-op for MemoryStorage but satisfies the WorkflowStorage interface.
|
|
832
|
+
*/
|
|
833
|
+
init(): Promise<void>;
|
|
834
|
+
/**
|
|
835
|
+
* Clears all stored workflow states and resets the storage.
|
|
836
|
+
*/
|
|
837
|
+
close(): Promise<void>;
|
|
838
|
+
/**
|
|
839
|
+
* Returns the total number of workflow states currently stored in memory.
|
|
840
|
+
*
|
|
841
|
+
* Useful for assertions in test environments.
|
|
842
|
+
*
|
|
843
|
+
* @returns The number of entries in the store.
|
|
844
|
+
*/
|
|
845
|
+
size(): number;
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
/**
|
|
849
|
+
* Configuration options for the PostgreSQL storage adapter.
|
|
850
|
+
*
|
|
851
|
+
* Defines connection parameters and database schema settings.
|
|
852
|
+
*/
|
|
853
|
+
interface PostgreSQLStorageOptions {
|
|
854
|
+
/** Connection string URL (e.g. "postgres://user:pass@host:5432/db"). */
|
|
855
|
+
connectionString?: string;
|
|
856
|
+
/** Database host address. */
|
|
857
|
+
host?: string;
|
|
858
|
+
/** Database port number. @default 5432 */
|
|
859
|
+
port?: number;
|
|
860
|
+
/** Database name. @default "postgres" */
|
|
861
|
+
database?: string;
|
|
862
|
+
/** Database user. */
|
|
863
|
+
user?: string;
|
|
864
|
+
/** Database password. */
|
|
865
|
+
password?: string;
|
|
866
|
+
/** Name of the table to store workflows. @default "flux_workflows" */
|
|
867
|
+
tableName?: string;
|
|
868
|
+
/** SSL configuration for secure connections. */
|
|
869
|
+
ssl?: boolean | {
|
|
870
|
+
rejectUnauthorized?: boolean;
|
|
871
|
+
};
|
|
872
|
+
}
|
|
873
|
+
/**
|
|
874
|
+
* PostgreSQL storage adapter for Flux workflows.
|
|
875
|
+
*
|
|
876
|
+
* Provides persistent, reliable storage using a PostgreSQL database.
|
|
877
|
+
* Supports JSONB for efficient storage of workflow state and history.
|
|
878
|
+
*
|
|
879
|
+
* @example
|
|
880
|
+
* ```typescript
|
|
881
|
+
* const storage = new PostgreSQLStorage({
|
|
882
|
+
* connectionString: process.env.DATABASE_URL,
|
|
883
|
+
* tableName: 'my_workflows'
|
|
884
|
+
* });
|
|
885
|
+
* await storage.init();
|
|
886
|
+
* ```
|
|
887
|
+
*/
|
|
888
|
+
declare class PostgreSQLStorage implements WorkflowStorage {
|
|
889
|
+
private pool;
|
|
890
|
+
private tableName;
|
|
891
|
+
private initialized;
|
|
892
|
+
private options;
|
|
893
|
+
/**
|
|
894
|
+
* Creates a new PostgreSQL storage instance.
|
|
895
|
+
*
|
|
896
|
+
* @param options - Connection and configuration options.
|
|
897
|
+
*/
|
|
898
|
+
constructor(options?: PostgreSQLStorageOptions);
|
|
899
|
+
/**
|
|
900
|
+
* Initializes the database connection pool and schema.
|
|
901
|
+
*
|
|
902
|
+
* Creates the workflow table and indexes if they do not exist.
|
|
903
|
+
* This method is idempotent.
|
|
904
|
+
*
|
|
905
|
+
* @throws {Error} If connection fails or schema creation fails.
|
|
906
|
+
*/
|
|
907
|
+
init(): Promise<void>;
|
|
908
|
+
/**
|
|
909
|
+
* Persists the current state of a workflow.
|
|
910
|
+
*
|
|
911
|
+
* Uses upsert (INSERT ... ON CONFLICT) to save or update the workflow state.
|
|
912
|
+
*
|
|
913
|
+
* @param state - The workflow state to save.
|
|
914
|
+
* @throws {Error} If the database operation fails.
|
|
915
|
+
*/
|
|
916
|
+
save(state: WorkflowState): Promise<void>;
|
|
917
|
+
/**
|
|
918
|
+
* Loads a workflow state by its ID.
|
|
919
|
+
*
|
|
920
|
+
* @param id - The unique identifier of the workflow.
|
|
921
|
+
* @returns The workflow state, or null if not found.
|
|
922
|
+
* @throws {Error} If the database query fails.
|
|
923
|
+
*/
|
|
924
|
+
load(id: string): Promise<WorkflowState | null>;
|
|
925
|
+
/**
|
|
926
|
+
* Lists workflows matching the given filter.
|
|
927
|
+
*
|
|
928
|
+
* @param filter - Criteria to filter the results.
|
|
929
|
+
* @returns A list of matching workflow states.
|
|
930
|
+
* @throws {Error} If the database query fails.
|
|
931
|
+
*/
|
|
932
|
+
list(filter?: WorkflowFilter): Promise<WorkflowState[]>;
|
|
933
|
+
/**
|
|
934
|
+
* Deletes a workflow state by its ID.
|
|
935
|
+
*
|
|
936
|
+
* @param id - The unique identifier of the workflow to delete.
|
|
937
|
+
* @throws {Error} If the database operation fails.
|
|
938
|
+
*/
|
|
939
|
+
delete(id: string): Promise<void>;
|
|
940
|
+
/**
|
|
941
|
+
* Closes the database connection pool.
|
|
942
|
+
*
|
|
943
|
+
* Should be called when shutting down the application.
|
|
944
|
+
*/
|
|
945
|
+
close(): Promise<void>;
|
|
946
|
+
/**
|
|
947
|
+
* Converts a database row to a WorkflowState object.
|
|
948
|
+
*
|
|
949
|
+
* @param row - The raw database row.
|
|
950
|
+
* @returns The parsed workflow state.
|
|
951
|
+
*/
|
|
952
|
+
private rowToState;
|
|
953
|
+
/**
|
|
954
|
+
* Returns the underlying pg.Pool instance.
|
|
955
|
+
*
|
|
956
|
+
* Useful for testing or advanced database operations.
|
|
957
|
+
*
|
|
958
|
+
* @returns The PostgreSQL connection pool.
|
|
959
|
+
*/
|
|
960
|
+
getPool(): any;
|
|
961
|
+
/**
|
|
962
|
+
* Optimizes the database table by reclaiming storage and updating statistics.
|
|
963
|
+
*
|
|
964
|
+
* Runs VACUUM ANALYZE on the workflow table.
|
|
965
|
+
*
|
|
966
|
+
* @throws {Error} If the maintenance operation fails.
|
|
967
|
+
*/
|
|
968
|
+
vacuum(): Promise<void>;
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
/**
|
|
972
|
+
* @fileoverview JSON file trace sink (NDJSON)
|
|
973
|
+
*
|
|
974
|
+
* Writes trace events to a newline-delimited JSON file.
|
|
975
|
+
*/
|
|
976
|
+
|
|
977
|
+
/**
|
|
978
|
+
* Options for configuring the `JsonFileTraceSink`.
|
|
979
|
+
*
|
|
980
|
+
* @public
|
|
981
|
+
*/
|
|
982
|
+
interface JsonFileTraceSinkOptions {
|
|
983
|
+
/** Absolute path where the trace file should be stored. */
|
|
984
|
+
path: string;
|
|
985
|
+
/** Whether to reset (clear) the file on initialization. @default true */
|
|
986
|
+
reset?: boolean;
|
|
987
|
+
}
|
|
988
|
+
/**
|
|
989
|
+
* A trace sink that writes events to a newline-delimited JSON (NDJSON) file.
|
|
990
|
+
*
|
|
991
|
+
* This sink is ideal for local development and debugging as it produces
|
|
992
|
+
* a human-readable and easily machine-parsable log of workflow events.
|
|
993
|
+
*
|
|
994
|
+
* @example
|
|
995
|
+
* ```typescript
|
|
996
|
+
* const sink = new JsonFileTraceSink({
|
|
997
|
+
* path: './traces/workflow.jsonl',
|
|
998
|
+
* reset: true
|
|
999
|
+
* });
|
|
1000
|
+
* ```
|
|
1001
|
+
*
|
|
1002
|
+
* @public
|
|
1003
|
+
*/
|
|
1004
|
+
declare class JsonFileTraceSink implements FluxTraceSink {
|
|
1005
|
+
private path;
|
|
1006
|
+
private ready;
|
|
1007
|
+
/**
|
|
1008
|
+
* Creates a new JSON file trace sink.
|
|
1009
|
+
*
|
|
1010
|
+
* @param options - Configuration options for the sink.
|
|
1011
|
+
*/
|
|
1012
|
+
constructor(options: JsonFileTraceSinkOptions);
|
|
1013
|
+
/**
|
|
1014
|
+
* Ensures the target directory exists and optionally resets the file.
|
|
1015
|
+
*
|
|
1016
|
+
* @param reset - Whether to truncate the file if it already exists.
|
|
1017
|
+
* @throws {Error} If directory creation or file writing fails.
|
|
1018
|
+
*/
|
|
1019
|
+
private init;
|
|
1020
|
+
/**
|
|
1021
|
+
* Appends a trace event to the file in NDJSON format.
|
|
1022
|
+
*
|
|
1023
|
+
* Waits for initialization to complete before writing.
|
|
1024
|
+
*
|
|
1025
|
+
* @param event - The trace event to record.
|
|
1026
|
+
* @throws {Error} If writing to the file fails.
|
|
1027
|
+
*
|
|
1028
|
+
* @example
|
|
1029
|
+
* ```typescript
|
|
1030
|
+
* await sink.emit({
|
|
1031
|
+
* type: 'step_start',
|
|
1032
|
+
* workflowId: 'wf-1',
|
|
1033
|
+
* timestamp: Date.now(),
|
|
1034
|
+
* data: { step: 'validate' }
|
|
1035
|
+
* });
|
|
1036
|
+
* ```
|
|
1037
|
+
*/
|
|
1038
|
+
emit(event: FluxTraceEvent): Promise<void>;
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
/**
|
|
1042
|
+
* Callback for handling recovery needed events.
|
|
1043
|
+
*/
|
|
1044
|
+
type RecoveryCallback<TInput = unknown, TData = Record<string, any>> = (ctx: WorkflowContext<TInput, TData>, stepName: string, error: Error) => Promise<void> | void;
|
|
1045
|
+
/**
|
|
1046
|
+
* Recovery action types for manual intervention.
|
|
1047
|
+
*/
|
|
1048
|
+
type RecoveryAction = {
|
|
1049
|
+
type: 'retry';
|
|
1050
|
+
maxAttempts?: number;
|
|
1051
|
+
} | {
|
|
1052
|
+
type: 'skip';
|
|
1053
|
+
} | {
|
|
1054
|
+
type: 'manual';
|
|
1055
|
+
handler: () => Promise<void>;
|
|
1056
|
+
} | {
|
|
1057
|
+
type: 'abort';
|
|
1058
|
+
};
|
|
1059
|
+
/**
|
|
1060
|
+
* Manages recovery from failed compensation actions through human intervention.
|
|
1061
|
+
*
|
|
1062
|
+
* When automatic retry fails, this manager allows workflows to wait for
|
|
1063
|
+
* manual recovery actions before proceeding with rollback.
|
|
1064
|
+
*
|
|
1065
|
+
* @example
|
|
1066
|
+
* ```typescript
|
|
1067
|
+
* const manager = new RecoveryManager();
|
|
1068
|
+
*
|
|
1069
|
+
* manager.onRecoveryNeeded(async (ctx, stepName, error) => {
|
|
1070
|
+
* await notificationService.alert(
|
|
1071
|
+
* 'CRITICAL: Manual recovery needed',
|
|
1072
|
+
* { workflow: ctx.id, step: stepName, error: error.message }
|
|
1073
|
+
* );
|
|
1074
|
+
* });
|
|
1075
|
+
*
|
|
1076
|
+
* manager.registerAction('refund-payment', {
|
|
1077
|
+
* type: 'manual',
|
|
1078
|
+
* handler: async () => {
|
|
1079
|
+
* await finance.manualRefund(ctx.data.transactionId);
|
|
1080
|
+
* }
|
|
1081
|
+
* });
|
|
1082
|
+
* ```
|
|
1083
|
+
*/
|
|
1084
|
+
declare class RecoveryManager<TInput = unknown, TData = Record<string, any>> {
|
|
1085
|
+
private recoveryCallbacks;
|
|
1086
|
+
private actions;
|
|
1087
|
+
private pendingRecoveries;
|
|
1088
|
+
/**
|
|
1089
|
+
* Registers a callback to be invoked when recovery is needed.
|
|
1090
|
+
*
|
|
1091
|
+
* @param callback - Function to call when a recovery event occurs.
|
|
1092
|
+
*
|
|
1093
|
+
* @example
|
|
1094
|
+
* ```typescript
|
|
1095
|
+
* manager.onRecoveryNeeded(async (ctx, stepName, error) => {
|
|
1096
|
+
* await slack.send(`#alerts`, `Workflow ${ctx.id} needs recovery at ${stepName}`);
|
|
1097
|
+
* });
|
|
1098
|
+
* ```
|
|
1099
|
+
*/
|
|
1100
|
+
onRecoveryNeeded(callback: RecoveryCallback<TInput, TData>): void;
|
|
1101
|
+
/**
|
|
1102
|
+
* Emits a recovery needed event.
|
|
1103
|
+
*
|
|
1104
|
+
* @param ctx - The workflow context.
|
|
1105
|
+
* @param stepName - The step that failed compensation.
|
|
1106
|
+
* @param error - The error that occurred.
|
|
1107
|
+
*/
|
|
1108
|
+
notifyRecoveryNeeded(ctx: WorkflowContext<TInput, TData>, stepName: string, error: Error): Promise<void>;
|
|
1109
|
+
/**
|
|
1110
|
+
* Registers a recovery action for a specific step.
|
|
1111
|
+
*
|
|
1112
|
+
* @param stepName - The step name to associate the action with.
|
|
1113
|
+
* @param action - The recovery action to perform.
|
|
1114
|
+
*
|
|
1115
|
+
* @example
|
|
1116
|
+
* ```typescript
|
|
1117
|
+
* manager.registerAction('book-flight', {
|
|
1118
|
+
* type: 'retry',
|
|
1119
|
+
* maxAttempts: 5
|
|
1120
|
+
* });
|
|
1121
|
+
*
|
|
1122
|
+
* manager.registerAction('charge-card', {
|
|
1123
|
+
* type: 'manual',
|
|
1124
|
+
* handler: async () => {
|
|
1125
|
+
* await accountingSystem.manualRefund(transactionId);
|
|
1126
|
+
* }
|
|
1127
|
+
* });
|
|
1128
|
+
* ```
|
|
1129
|
+
*/
|
|
1130
|
+
registerAction(stepName: string, action: RecoveryAction): void;
|
|
1131
|
+
/**
|
|
1132
|
+
* Gets the registered recovery action for a step.
|
|
1133
|
+
*
|
|
1134
|
+
* @param stepName - The step name.
|
|
1135
|
+
* @returns The recovery action if registered, otherwise undefined.
|
|
1136
|
+
*/
|
|
1137
|
+
getAction(stepName: string): RecoveryAction | undefined;
|
|
1138
|
+
/**
|
|
1139
|
+
* Checks if a workflow has a pending recovery.
|
|
1140
|
+
*
|
|
1141
|
+
* @param workflowId - The workflow ID.
|
|
1142
|
+
* @returns True if recovery is pending.
|
|
1143
|
+
*/
|
|
1144
|
+
hasPendingRecovery(workflowId: string): boolean;
|
|
1145
|
+
/**
|
|
1146
|
+
* Gets the pending recovery details for a workflow.
|
|
1147
|
+
*
|
|
1148
|
+
* @param workflowId - The workflow ID.
|
|
1149
|
+
* @returns The pending recovery details if any.
|
|
1150
|
+
*/
|
|
1151
|
+
getPendingRecovery(workflowId: string): {
|
|
1152
|
+
stepName: string;
|
|
1153
|
+
error: Error;
|
|
1154
|
+
} | undefined;
|
|
1155
|
+
/**
|
|
1156
|
+
* Marks a recovery as resolved.
|
|
1157
|
+
*
|
|
1158
|
+
* @param workflowId - The workflow ID.
|
|
1159
|
+
*/
|
|
1160
|
+
resolveRecovery(workflowId: string): void;
|
|
1161
|
+
/**
|
|
1162
|
+
* Executes the registered recovery action for a step.
|
|
1163
|
+
*
|
|
1164
|
+
* @param stepName - The step name.
|
|
1165
|
+
* @returns The recovery action result.
|
|
1166
|
+
*
|
|
1167
|
+
* @example
|
|
1168
|
+
* ```typescript
|
|
1169
|
+
* const action = manager.getAction('failed-step');
|
|
1170
|
+
* if (action && action.type === 'manual') {
|
|
1171
|
+
* await manager.executeRecovery('failed-step');
|
|
1172
|
+
* }
|
|
1173
|
+
* ```
|
|
1174
|
+
*/
|
|
1175
|
+
executeRecovery(stepName: string): Promise<void>;
|
|
1176
|
+
/**
|
|
1177
|
+
* Clears all registered actions.
|
|
1178
|
+
*/
|
|
1179
|
+
clearActions(): void;
|
|
1180
|
+
/**
|
|
1181
|
+
* Clears all recovery callbacks.
|
|
1182
|
+
*/
|
|
1183
|
+
clearCallbacks(): void;
|
|
1184
|
+
/**
|
|
1185
|
+
* Gets all pending recoveries.
|
|
1186
|
+
*
|
|
1187
|
+
* @returns A map of workflow IDs to pending recovery details.
|
|
1188
|
+
*/
|
|
1189
|
+
getAllPendingRecoveries(): Map<string, {
|
|
1190
|
+
stepName: string;
|
|
1191
|
+
error: Error;
|
|
1192
|
+
}>;
|
|
1193
|
+
/**
|
|
1194
|
+
* Clears all pending recoveries.
|
|
1195
|
+
*/
|
|
1196
|
+
clearPendingRecoveries(): void;
|
|
1197
|
+
/**
|
|
1198
|
+
* Gets the count of registered callbacks.
|
|
1199
|
+
* @internal For testing purposes.
|
|
1200
|
+
*/
|
|
1201
|
+
getCallbackCount(): number;
|
|
1202
|
+
}
|
|
1203
|
+
|
|
1204
|
+
/**
|
|
1205
|
+
* The core execution engine for Flux workflows.
|
|
1206
|
+
*
|
|
1207
|
+
* FluxEngine manages the lifecycle of workflow execution, including persistence,
|
|
1208
|
+
* state transitions, retries, and compensation (rollback) logic. It acts as the
|
|
1209
|
+
* primary entry point for interacting with the workflow system.
|
|
1210
|
+
*
|
|
1211
|
+
* @example
|
|
1212
|
+
* ```typescript
|
|
1213
|
+
* const engine = new FluxEngine({
|
|
1214
|
+
* storage: new MemoryStorage(),
|
|
1215
|
+
* defaultRetries: 3
|
|
1216
|
+
* });
|
|
1217
|
+
* await engine.init();
|
|
1218
|
+
* const result = await engine.execute(myWorkflow, { userId: '123' });
|
|
1219
|
+
* ```
|
|
1220
|
+
*/
|
|
1221
|
+
declare class FluxEngine {
|
|
1222
|
+
private storage;
|
|
1223
|
+
private contextManager;
|
|
1224
|
+
private traceEmitter;
|
|
1225
|
+
private executor;
|
|
1226
|
+
private rollbackManager;
|
|
1227
|
+
private cronTrigger;
|
|
1228
|
+
private dataOptimizer?;
|
|
1229
|
+
constructor(config?: FluxConfig);
|
|
1230
|
+
private config;
|
|
1231
|
+
execute<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, input: TInput): Promise<FluxResult<TData>>;
|
|
1232
|
+
executeBatch<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, inputs: TInput[], options?: BatchExecutionOptions): Promise<BatchResult<TData>>;
|
|
1233
|
+
private executeWithLock;
|
|
1234
|
+
resume<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, workflowId: string, options?: {
|
|
1235
|
+
fromStep?: number | string;
|
|
1236
|
+
}): Promise<FluxResult<TData> | null>;
|
|
1237
|
+
signal<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, workflowId: string, signalName: string, payload?: any): Promise<FluxResult<TData>>;
|
|
1238
|
+
retryStep<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, workflowId: string, stepName: string): Promise<FluxResult<TData> | null>;
|
|
1239
|
+
/**
|
|
1240
|
+
* Retrieves the current state of a workflow instance from storage.
|
|
1241
|
+
*
|
|
1242
|
+
* @param workflowId - The unique identifier of the workflow.
|
|
1243
|
+
* @returns A promise resolving to the workflow state or null if not found.
|
|
1244
|
+
*/
|
|
1245
|
+
get<TInput = any, TData = any>(workflowId: string): Promise<WorkflowState<TInput, TData> | null>;
|
|
1246
|
+
saveState<TInput, TData extends Record<string, any>>(state: WorkflowState<TInput, TData>): Promise<void>;
|
|
1247
|
+
list(filter?: Parameters<WorkflowStorage['list']>[0]): Promise<WorkflowState<unknown, Record<string, any>>[]>;
|
|
1248
|
+
schedule<TInput, TData extends Record<string, any>>(cron: string, workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, input: TInput, id?: string): string;
|
|
1249
|
+
unschedule(id: string): void;
|
|
1250
|
+
/**
|
|
1251
|
+
* Lists all active workflow schedules.
|
|
1252
|
+
*/
|
|
1253
|
+
listSchedules(): CronScheduleOptions[];
|
|
1254
|
+
/**
|
|
1255
|
+
* Gets the recovery manager for handling manual intervention.
|
|
1256
|
+
*/
|
|
1257
|
+
getRecoveryManager(): RecoveryManager<unknown, Record<string, any>>;
|
|
1258
|
+
/**
|
|
1259
|
+
* Gets the lock provider used for cluster mode.
|
|
1260
|
+
*/
|
|
1261
|
+
getLockProvider(): LockProvider | undefined;
|
|
1262
|
+
/**
|
|
1263
|
+
* Initializes the engine and its underlying storage, and starts the scheduler.
|
|
1264
|
+
*/
|
|
1265
|
+
init(): Promise<void>;
|
|
1266
|
+
/**
|
|
1267
|
+
* Closes the engine and releases storage resources.
|
|
1268
|
+
*/
|
|
1269
|
+
close(): Promise<void>;
|
|
1270
|
+
private persist;
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1273
|
+
/**
|
|
1274
|
+
* Options for batch execution of workflows.
|
|
1275
|
+
*/
|
|
1276
|
+
interface BatchExecutionOptions {
|
|
1277
|
+
/** Maximum concurrent workflow executions. @default 10 */
|
|
1278
|
+
concurrency?: number;
|
|
1279
|
+
/** Whether to continue on individual workflow failures. @default true */
|
|
1280
|
+
continueOnError?: boolean;
|
|
1281
|
+
/** Callback for progress updates */
|
|
1282
|
+
onProgress?: (completed: number, total: number, result: BatchItemResult) => void;
|
|
1283
|
+
/** Abort signal for cancellation */
|
|
1284
|
+
signal?: AbortSignal;
|
|
1285
|
+
}
|
|
1286
|
+
/**
|
|
1287
|
+
* Result of a single item in a batch execution.
|
|
1288
|
+
*/
|
|
1289
|
+
interface BatchItemResult<TData = any> {
|
|
1290
|
+
/** Index in the batch */
|
|
1291
|
+
index: number;
|
|
1292
|
+
/** Input provided to this workflow */
|
|
1293
|
+
input: unknown;
|
|
1294
|
+
/** Execution result (null if failed before execution) */
|
|
1295
|
+
result: FluxResult<TData> | null;
|
|
1296
|
+
/** Error if failed */
|
|
1297
|
+
error?: Error;
|
|
1298
|
+
/** Whether this item succeeded */
|
|
1299
|
+
success: boolean;
|
|
1300
|
+
}
|
|
1301
|
+
/**
|
|
1302
|
+
* Result of a batch execution containing all individual results.
|
|
1303
|
+
*/
|
|
1304
|
+
interface BatchResult<TData = any> {
|
|
1305
|
+
/** Total items in batch */
|
|
1306
|
+
total: number;
|
|
1307
|
+
/** Number of successful executions */
|
|
1308
|
+
succeeded: number;
|
|
1309
|
+
/** Number of failed executions */
|
|
1310
|
+
failed: number;
|
|
1311
|
+
/** Results for each item in order */
|
|
1312
|
+
results: BatchItemResult<TData>[];
|
|
1313
|
+
/** Total execution time in milliseconds */
|
|
1314
|
+
duration: number;
|
|
1315
|
+
}
|
|
1316
|
+
/**
|
|
1317
|
+
* Executes workflows in batches with controlled concurrency.
|
|
1318
|
+
*
|
|
1319
|
+
* BatchExecutor provides efficient parallel execution of multiple workflow instances
|
|
1320
|
+
* while respecting concurrency limits, handling errors gracefully, and supporting
|
|
1321
|
+
* cancellation via AbortSignal.
|
|
1322
|
+
*
|
|
1323
|
+
* @example
|
|
1324
|
+
* ```typescript
|
|
1325
|
+
* const engine = new FluxEngine()
|
|
1326
|
+
* const executor = new BatchExecutor(engine)
|
|
1327
|
+
*
|
|
1328
|
+
* const results = await executor.execute(
|
|
1329
|
+
* myWorkflow,
|
|
1330
|
+
* [{ id: 1 }, { id: 2 }, { id: 3 }],
|
|
1331
|
+
* {
|
|
1332
|
+
* concurrency: 5,
|
|
1333
|
+
* onProgress: (completed, total) => console.log(`${completed}/${total}`)
|
|
1334
|
+
* }
|
|
1335
|
+
* )
|
|
1336
|
+
*
|
|
1337
|
+
* console.log(`Succeeded: ${results.succeeded}, Failed: ${results.failed}`)
|
|
1338
|
+
* ```
|
|
1339
|
+
*/
|
|
1340
|
+
declare class BatchExecutor {
|
|
1341
|
+
private engine;
|
|
1342
|
+
constructor(engine: FluxEngine);
|
|
1343
|
+
/**
|
|
1344
|
+
* Execute a workflow for multiple inputs with controlled concurrency.
|
|
1345
|
+
*
|
|
1346
|
+
* @param workflow - The workflow definition or builder to execute.
|
|
1347
|
+
* @param inputs - Array of inputs, one per workflow execution.
|
|
1348
|
+
* @param options - Execution options (concurrency, error handling, etc.).
|
|
1349
|
+
* @returns Batch result containing all individual execution results.
|
|
1350
|
+
*/
|
|
1351
|
+
execute<TInput, TData extends Record<string, any> = Record<string, any>>(workflow: WorkflowBuilder<TInput, TData> | WorkflowDefinition<TInput, TData>, inputs: TInput[], options?: BatchExecutionOptions): Promise<BatchResult<TData>>;
|
|
1352
|
+
/**
|
|
1353
|
+
* Execute different workflows in a single batch.
|
|
1354
|
+
*
|
|
1355
|
+
* Unlike `execute()`, this method allows each item in the batch to use a different
|
|
1356
|
+
* workflow definition, enabling heterogeneous batch processing.
|
|
1357
|
+
*
|
|
1358
|
+
* @param items - Array of workflow/input pairs to execute.
|
|
1359
|
+
* @param options - Execution options (concurrency, error handling, etc.).
|
|
1360
|
+
* @returns Batch result containing all individual execution results.
|
|
1361
|
+
*/
|
|
1362
|
+
executeMany<TData = any>(items: Array<{
|
|
1363
|
+
workflow: WorkflowDefinition<any, any>;
|
|
1364
|
+
input: any;
|
|
1365
|
+
}>, options?: BatchExecutionOptions): Promise<BatchResult<TData>>;
|
|
510
1366
|
}
|
|
511
1367
|
|
|
512
|
-
export { ContextManager, FluxConfig, FluxConsoleLogger, FluxEngine, FluxLogger, FluxResult, FluxSilentLogger, FluxTraceEvent, FluxTraceSink, JsonFileTraceSink, MemoryStorage, OrbitFlux, type OrbitFluxOptions, StateMachine, StepDefinition, StepExecution, StepExecutor, StepResult, WorkflowBuilder, WorkflowContext, WorkflowDefinition, WorkflowDescriptor, WorkflowFilter, WorkflowState, WorkflowStatus, WorkflowStorage, createWorkflow };
|
|
1368
|
+
export { type BatchExecutionOptions, BatchExecutor, type BatchItemResult, type BatchResult, ContextManager, FluxConfig, FluxConsoleLogger, FluxEngine, FluxLogger, FluxResult, FluxSilentLogger, FluxTraceEvent, FluxTraceSink, JsonFileTraceSink, Lock, LockProvider, MemoryStorage, OrbitFlux, type OrbitFluxOptions, PostgreSQLStorage, type PostgreSQLStorageOptions, type RedisClient, RedisLockProvider, type RedisLockProviderOptions, StateMachine, StepDefinition, StepExecution, StepExecutor, StepResult, WorkflowBuilder, WorkflowContext, WorkflowDefinition, WorkflowDescriptor, WorkflowFilter, WorkflowState, WorkflowStatus, WorkflowStorage, createWorkflow };
|