@bratsos/workflow-engine 0.0.11 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-7IITBLFY.js +900 -0
- package/dist/chunk-7IITBLFY.js.map +1 -0
- package/dist/chunk-D7RVRRM2.js +3 -0
- package/dist/chunk-D7RVRRM2.js.map +1 -0
- package/dist/chunk-MUWP5SF2.js +33 -0
- package/dist/chunk-MUWP5SF2.js.map +1 -0
- package/dist/chunk-P4KMGCT3.js +2292 -0
- package/dist/chunk-P4KMGCT3.js.map +1 -0
- package/dist/cli/sync-models.d.ts +1 -0
- package/dist/cli/sync-models.js +210 -0
- package/dist/cli/sync-models.js.map +1 -0
- package/dist/client-5vz5Vv4A.d.ts +938 -0
- package/dist/client.d.ts +4 -0
- package/dist/client.js +4 -0
- package/dist/client.js.map +1 -0
- package/dist/index-DmR3E8D7.d.ts +198 -0
- package/dist/index.d.ts +936 -0
- package/dist/index.js +2387 -0
- package/dist/index.js.map +1 -0
- package/dist/interface-Cv22wvLG.d.ts +344 -0
- package/dist/persistence/index.d.ts +2 -0
- package/dist/persistence/index.js +5 -0
- package/dist/persistence/index.js.map +1 -0
- package/dist/persistence/prisma/index.d.ts +37 -0
- package/dist/persistence/prisma/index.js +4 -0
- package/dist/persistence/prisma/index.js.map +1 -0
- package/dist/testing/index.d.ts +242 -0
- package/dist/testing/index.js +777 -0
- package/dist/testing/index.js.map +1 -0
- package/package.json +1 -1
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,936 @@
|
|
|
1
|
+
import { S as Stage, a as StageMetrics, W as WorkflowEventType, b as WorkflowSSEEvent, L as LogContext, A as AIHelper, M as ModelKey, c as LogLevel } from './client-5vz5Vv4A.js';
|
|
2
|
+
export { d as AIBatch, e as AIBatchHandle, f as AIBatchProvider, g as AIBatchRequest, h as AIBatchResult, i as AICallType, j as AIEmbedResult, k as AIObjectResult, l as AIStreamResult, m as AITextResult, n as AVAILABLE_MODELS, o as AsyncBatchStageDefinition, B as BatchLogFn, D as DEFAULT_MODEL_KEY, E as EmbedOptions, p as EnhancedStageContext, I as InferInput, q as ModelConfig, r as ModelRegistry, s as ModelStats, t as ModelStatsTracker, u as ModelSyncConfig, N as NoInputSchema, O as ObjectOptions, R as RecordCallParams, v as SimpleStageResult, w as StageResult, x as StreamOptions, y as SyncStageDefinition, T as TextOptions, z as calculateCost, C as createAIHelper, F as defineAsyncBatchStage, G as defineStage, H as getDefaultModel, J as getModel, K as getModelById, P as getRegisteredModel, Q as listModels, U as listRegisteredModels, V as modelSupportsBatch, X as printAvailableModels, Y as registerModels, Z as requireStageOutput } from './client-5vz5Vv4A.js';
|
|
3
|
+
import z$1, { z } from 'zod';
|
|
4
|
+
import { EventEmitter } from 'node:events';
|
|
5
|
+
import { W as WorkflowPersistence, A as AICallLogger, J as JobQueue } from './interface-Cv22wvLG.js';
|
|
6
|
+
export { b as AICallRecord, a as AIHelperStats, p as ArtifactType, C as CreateAICallInput, m as CreateLogInput, e as CreateRunInput, h as CreateStageInput, D as DequeueResult, E as EnqueueJobInput, c as JobRecord, d as JobStatus, L as LogLevel, S as SaveArtifactInput, q as Status, U as UpdateRunInput, k as UpdateStageInput, j as UpsertStageInput, n as WorkflowArtifactRecord, o as WorkflowLogRecord, f as WorkflowRunRecord, i as WorkflowStageRecord, l as WorkflowStageStatus, g as WorkflowStatus } from './interface-Cv22wvLG.js';
|
|
7
|
+
export { P as PrismaAICallLogger, a as PrismaJobQueue, c as PrismaWorkflowPersistence, e as createPrismaAICallLogger, f as createPrismaJobQueue, g as createPrismaWorkflowPersistence } from './index-DmR3E8D7.js';
|
|
8
|
+
import { ToolSet } from 'ai';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Workflow Builder - Fluent API for composing type-safe workflows
|
|
12
|
+
*
|
|
13
|
+
* Workflows are composed of stages that are executed sequentially or in parallel.
|
|
14
|
+
* The builder ensures type safety: output of one stage matches input of next stage.
|
|
15
|
+
*
|
|
16
|
+
* ## Type System Features
|
|
17
|
+
*
|
|
18
|
+
* ### Automatic Context Inference
|
|
19
|
+
* The workflow context type is automatically accumulated as you pipe stages.
|
|
20
|
+
* Use `InferWorkflowContext<typeof workflow>` to extract the context type.
|
|
21
|
+
*
|
|
22
|
+
* ```typescript
|
|
23
|
+
* const workflow = new WorkflowBuilder(...)
|
|
24
|
+
* .pipe(stage1)
|
|
25
|
+
* .pipe(stage2)
|
|
26
|
+
* .build();
|
|
27
|
+
*
|
|
28
|
+
* // Auto-generated type
|
|
29
|
+
* type MyContext = InferWorkflowContext<typeof workflow>;
|
|
30
|
+
* // = { "stage-1": Stage1Output, "stage-2": Stage2Output }
|
|
31
|
+
* ```
|
|
32
|
+
*
|
|
33
|
+
* ### Stage ID Constants
|
|
34
|
+
* Use `workflow.stageIds` for type-safe stage ID references.
|
|
35
|
+
*/
|
|
36
|
+
|
|
37
|
+
interface StageNode {
|
|
38
|
+
stage: Stage<any, any, any>;
|
|
39
|
+
executionGroup: number;
|
|
40
|
+
}
|
|
41
|
+
declare class Workflow<TInput extends z.ZodTypeAny, TOutput extends z.ZodTypeAny, TContext extends Record<string, unknown> = {}> {
|
|
42
|
+
readonly id: string;
|
|
43
|
+
readonly name: string;
|
|
44
|
+
readonly description: string;
|
|
45
|
+
readonly inputSchema: TInput;
|
|
46
|
+
readonly outputSchema: TOutput;
|
|
47
|
+
private readonly stages;
|
|
48
|
+
readonly contextType?: TContext | undefined;
|
|
49
|
+
constructor(id: string, name: string, description: string, inputSchema: TInput, outputSchema: TOutput, stages: StageNode[], contextType?: TContext | undefined);
|
|
50
|
+
/**
|
|
51
|
+
* Get execution plan as groups of stages
|
|
52
|
+
* Stages in the same group can be executed in parallel
|
|
53
|
+
*/
|
|
54
|
+
getExecutionPlan(): StageNode[][];
|
|
55
|
+
/**
|
|
56
|
+
* Get a specific stage by ID
|
|
57
|
+
*/
|
|
58
|
+
getStage(stageId: string): Stage<any, any, any> | undefined;
|
|
59
|
+
/**
|
|
60
|
+
* Get all stages in order
|
|
61
|
+
*/
|
|
62
|
+
getAllStages(): StageNode[];
|
|
63
|
+
/**
|
|
64
|
+
* Get a visual representation of the workflow execution order
|
|
65
|
+
*/
|
|
66
|
+
getExecutionOrder(): string;
|
|
67
|
+
/**
|
|
68
|
+
* Get all stage IDs in execution order
|
|
69
|
+
*
|
|
70
|
+
* @returns Array of stage IDs
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* const ids = workflow.getStageIds();
|
|
75
|
+
* // ["data-extraction", "guidelines", "generator"]
|
|
76
|
+
* ```
|
|
77
|
+
*/
|
|
78
|
+
getStageIds(): string[];
|
|
79
|
+
/**
|
|
80
|
+
* Check if a stage ID exists in this workflow
|
|
81
|
+
*
|
|
82
|
+
* @param stageId - The stage ID to check
|
|
83
|
+
* @returns true if the stage exists
|
|
84
|
+
*/
|
|
85
|
+
hasStage(stageId: string): boolean;
|
|
86
|
+
/**
|
|
87
|
+
* Validate workflow configuration before execution
|
|
88
|
+
* Checks that all stage configs match their schemas
|
|
89
|
+
*
|
|
90
|
+
* @param config - Configuration object with keys matching stage IDs
|
|
91
|
+
* @returns Validation result with any errors
|
|
92
|
+
*/
|
|
93
|
+
validateConfig(config: Record<string, unknown>): {
|
|
94
|
+
valid: boolean;
|
|
95
|
+
errors: Array<{
|
|
96
|
+
stageId: string;
|
|
97
|
+
error: string;
|
|
98
|
+
}>;
|
|
99
|
+
};
|
|
100
|
+
/**
|
|
101
|
+
* Estimate total cost for the workflow
|
|
102
|
+
*/
|
|
103
|
+
estimateCost(input: z.infer<TInput>, config: Record<string, unknown>): number;
|
|
104
|
+
/**
|
|
105
|
+
* Get configuration schemas for all stages in this workflow
|
|
106
|
+
* Returns a map of stageId → { schema, defaults, name, description }
|
|
107
|
+
*/
|
|
108
|
+
getStageConfigs(): Record<string, {
|
|
109
|
+
schema: z.ZodTypeAny;
|
|
110
|
+
defaults: Record<string, unknown>;
|
|
111
|
+
name: string;
|
|
112
|
+
description?: string;
|
|
113
|
+
}>;
|
|
114
|
+
/**
|
|
115
|
+
* Generate default configuration object for all stages
|
|
116
|
+
* Automatically discovers all stage configs - add/remove stages and this updates automatically
|
|
117
|
+
*/
|
|
118
|
+
getDefaultConfig(): Record<string, Record<string, unknown>>;
|
|
119
|
+
/**
|
|
120
|
+
* Get all stages in a specific execution group
|
|
121
|
+
*/
|
|
122
|
+
getStagesInExecutionGroup(groupIndex: number): Stage<any, any, any>[];
|
|
123
|
+
/**
|
|
124
|
+
* Get the sequential index of a stage (0-based)
|
|
125
|
+
*/
|
|
126
|
+
getStageIndex(stageId: string): number;
|
|
127
|
+
/**
|
|
128
|
+
* Get the execution group index for a stage
|
|
129
|
+
*/
|
|
130
|
+
getExecutionGroupIndex(stageId: string): number;
|
|
131
|
+
/**
|
|
132
|
+
* Get the ID of the stage immediately preceding the given stage
|
|
133
|
+
*/
|
|
134
|
+
getPreviousStageId(stageId: string): string | undefined;
|
|
135
|
+
}
|
|
136
|
+
declare class WorkflowBuilder<TInput extends z.ZodTypeAny, TCurrentOutput extends z.ZodTypeAny, TContext extends Record<string, unknown> = {}> {
|
|
137
|
+
private id;
|
|
138
|
+
private name;
|
|
139
|
+
private description;
|
|
140
|
+
private inputSchema;
|
|
141
|
+
private currentOutputSchema;
|
|
142
|
+
private stages;
|
|
143
|
+
private currentExecutionGroup;
|
|
144
|
+
constructor(id: string, name: string, description: string, inputSchema: TInput, currentOutputSchema: TCurrentOutput);
|
|
145
|
+
/**
|
|
146
|
+
* Add a stage to the workflow (sequential execution)
|
|
147
|
+
*
|
|
148
|
+
* Automatically accumulates the stage's output in the context under its stage ID.
|
|
149
|
+
* This provides type-safe access to all previous stage outputs.
|
|
150
|
+
*
|
|
151
|
+
* Note: This accepts any stage regardless of strict input type matching.
|
|
152
|
+
* This is necessary because stages using passthrough() can accept objects
|
|
153
|
+
* with additional fields beyond what's declared in their input schema.
|
|
154
|
+
* Runtime validation via Zod ensures type safety at execution time.
|
|
155
|
+
*
|
|
156
|
+
* Validates that all declared dependencies exist in the workflow.
|
|
157
|
+
*/
|
|
158
|
+
pipe<TStageInput extends z.ZodTypeAny, TStageOutput extends z.ZodTypeAny, TStageConfig extends z.ZodTypeAny, TStageContext extends Record<string, unknown>>(stage: Stage<TStageInput, TStageOutput, TStageConfig, TStageContext>): WorkflowBuilder<TInput, TStageOutput, TContext & {
|
|
159
|
+
[x: string]: z.infer<TStageOutput>;
|
|
160
|
+
}>;
|
|
161
|
+
/**
|
|
162
|
+
* Add a stage with strict input type checking
|
|
163
|
+
*
|
|
164
|
+
* Note: pipeStrict() and pipeLoose() have been removed as they were
|
|
165
|
+
* just aliases for pipe(). Use pipe() for all stage chaining.
|
|
166
|
+
*/
|
|
167
|
+
/**
|
|
168
|
+
* Add multiple stages that execute in parallel
|
|
169
|
+
*
|
|
170
|
+
* All stages receive the same input (current output)
|
|
171
|
+
* Their outputs are merged into an object by index AND accumulated in context by stage ID.
|
|
172
|
+
*
|
|
173
|
+
* Note: This accepts stages regardless of strict input type matching.
|
|
174
|
+
* This is necessary because stages using passthrough() can accept objects
|
|
175
|
+
* with additional fields. Runtime validation via Zod ensures type safety.
|
|
176
|
+
*
|
|
177
|
+
* Validates that all declared dependencies exist in the workflow.
|
|
178
|
+
*/
|
|
179
|
+
parallel<TStages extends {
|
|
180
|
+
id: string;
|
|
181
|
+
outputSchema: z.ZodTypeAny;
|
|
182
|
+
dependencies?: string[];
|
|
183
|
+
}[]>(stages: [...TStages]): WorkflowBuilder<TInput, z.ZodTypeAny, TContext & {
|
|
184
|
+
[K in TStages[number]["id"]]: TStages[number] extends {
|
|
185
|
+
outputSchema: infer O;
|
|
186
|
+
} ? O extends z.ZodTypeAny ? z.infer<O> : never : never;
|
|
187
|
+
}>;
|
|
188
|
+
/**
|
|
189
|
+
* Build the final workflow
|
|
190
|
+
*/
|
|
191
|
+
build(): Workflow<TInput, TCurrentOutput, TContext>;
|
|
192
|
+
/**
|
|
193
|
+
* Get current stage count
|
|
194
|
+
*/
|
|
195
|
+
getStageCount(): number;
|
|
196
|
+
/**
|
|
197
|
+
* Get execution group count
|
|
198
|
+
*/
|
|
199
|
+
getExecutionGroupCount(): number;
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Extract stage IDs as a union type from a Workflow instance
|
|
203
|
+
*
|
|
204
|
+
* Useful for creating type-safe stage ID references.
|
|
205
|
+
*
|
|
206
|
+
* @example
|
|
207
|
+
* ```typescript
|
|
208
|
+
* type StageId = InferWorkflowStageIds<typeof myWorkflow>;
|
|
209
|
+
* // = "data-extraction" | "guidelines" | "generator"
|
|
210
|
+
*
|
|
211
|
+
* function getStageOutput(stageId: StageId) { ... }
|
|
212
|
+
* ```
|
|
213
|
+
*/
|
|
214
|
+
type InferWorkflowStageIds<W> = W extends Workflow<any, any, infer C> ? keyof C & string : never;
|
|
215
|
+
|
|
216
|
+
/**
|
|
217
|
+
* Workflow Executor - Executes workflows with support for resume and suspension
|
|
218
|
+
*
|
|
219
|
+
* Key features:
|
|
220
|
+
* - Sequential and parallel stage execution
|
|
221
|
+
* - Automatic state persistence to R2 and database
|
|
222
|
+
* - Resume from last completed stage
|
|
223
|
+
* - Suspend/resume for long-running batch jobs
|
|
224
|
+
* - Event emission for real-time updates
|
|
225
|
+
*
|
|
226
|
+
* Note: Stages should import createAIHelper directly from ~/lib/ai-helper
|
|
227
|
+
* and create their own AI helper instances with appropriate topics.
|
|
228
|
+
* The executor creates a helper for tracking aggregate stats per stage.
|
|
229
|
+
*/
|
|
230
|
+
|
|
231
|
+
interface WorkflowExecutorOptions {
|
|
232
|
+
persistence?: WorkflowPersistence;
|
|
233
|
+
/** Optional AI call logger. If not provided, AI tracking is disabled. */
|
|
234
|
+
aiLogger?: AICallLogger;
|
|
235
|
+
}
|
|
236
|
+
declare class WorkflowExecutor extends EventEmitter {
|
|
237
|
+
private workflow;
|
|
238
|
+
private workflowRunId;
|
|
239
|
+
private workflowType;
|
|
240
|
+
private cancelled;
|
|
241
|
+
private persistence;
|
|
242
|
+
private aiLogger;
|
|
243
|
+
constructor(workflow: Workflow<any, any>, workflowRunId: string, workflowType: string, storageProviderOrOptions?: WorkflowExecutorOptions);
|
|
244
|
+
/**
|
|
245
|
+
* Override emit to also forward events to the global event bus for SSE
|
|
246
|
+
*/
|
|
247
|
+
emit(eventName: string | symbol, ...args: any[]): boolean;
|
|
248
|
+
/**
|
|
249
|
+
* Check if the workflow has been interrupted (cancelled or suspended) externally
|
|
250
|
+
* This checks the database status to detect external requests
|
|
251
|
+
*/
|
|
252
|
+
private checkExternalInterruption;
|
|
253
|
+
/**
|
|
254
|
+
* Execute the workflow
|
|
255
|
+
*
|
|
256
|
+
* @param input - Workflow input data
|
|
257
|
+
* @param config - Configuration for each stage (keyed by stage ID)
|
|
258
|
+
* @param options - Execution options (resume, etc.)
|
|
259
|
+
* @returns Final output or 'suspended' if workflow is suspended
|
|
260
|
+
*/
|
|
261
|
+
execute<TInput, TOutput>(input: TInput, config: Record<string, unknown>, options?: {
|
|
262
|
+
resume?: boolean;
|
|
263
|
+
fromStage?: string;
|
|
264
|
+
}): Promise<TOutput | "suspended">;
|
|
265
|
+
/**
|
|
266
|
+
* Execute a single stage
|
|
267
|
+
*/
|
|
268
|
+
private executeStage;
|
|
269
|
+
/**
|
|
270
|
+
* Load resume state from database
|
|
271
|
+
*/
|
|
272
|
+
private loadResumeState;
|
|
273
|
+
/**
|
|
274
|
+
* Load workflow context from all completed stages
|
|
275
|
+
* This rebuilds the workflowContext object so resumed stages can access previous outputs
|
|
276
|
+
*/
|
|
277
|
+
private loadWorkflowContext;
|
|
278
|
+
/**
|
|
279
|
+
* Load state for rerunning from a specific stage.
|
|
280
|
+
* Requires that previous stages have already been executed and their outputs persisted.
|
|
281
|
+
*
|
|
282
|
+
* @param stageId - The stage ID to start execution from
|
|
283
|
+
* @returns The execution group, input data, and workflow context
|
|
284
|
+
*/
|
|
285
|
+
private loadFromStageState;
|
|
286
|
+
/**
|
|
287
|
+
* Create a minimal storage shim for context.storage (for API compatibility).
|
|
288
|
+
* Stage implementations should not rely on this - it may be removed in future.
|
|
289
|
+
*/
|
|
290
|
+
private createStorageShim;
|
|
291
|
+
/**
|
|
292
|
+
* Get aggregated statistics for the workflow run
|
|
293
|
+
*/
|
|
294
|
+
private getAggregatedStats;
|
|
295
|
+
/**
|
|
296
|
+
* Log a message with automatic database persistence
|
|
297
|
+
*/
|
|
298
|
+
private log;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
/**
|
|
302
|
+
* Stage Executor - Executes a single stage
|
|
303
|
+
*
|
|
304
|
+
* Unlike WorkflowExecutor which runs entire workflows,
|
|
305
|
+
* this executes exactly ONE stage and returns.
|
|
306
|
+
*
|
|
307
|
+
* Designed for distributed workers.
|
|
308
|
+
*/
|
|
309
|
+
|
|
310
|
+
interface WorkflowRegistry {
|
|
311
|
+
getWorkflow(id: string): Workflow<any, any> | undefined;
|
|
312
|
+
}
|
|
313
|
+
interface StageExecutionRequest {
|
|
314
|
+
workflowRunId: string;
|
|
315
|
+
workflowId: string;
|
|
316
|
+
stageId: string;
|
|
317
|
+
config: Record<string, unknown>;
|
|
318
|
+
}
|
|
319
|
+
interface StageExecutionResult {
|
|
320
|
+
type: "completed" | "suspended" | "failed";
|
|
321
|
+
output?: unknown;
|
|
322
|
+
suspendedState?: unknown;
|
|
323
|
+
nextPollAt?: Date;
|
|
324
|
+
error?: string;
|
|
325
|
+
metrics?: StageMetrics;
|
|
326
|
+
}
|
|
327
|
+
declare class StageExecutor {
|
|
328
|
+
private registry;
|
|
329
|
+
private persistence;
|
|
330
|
+
private workerId;
|
|
331
|
+
constructor(registry: WorkflowRegistry, persistence: WorkflowPersistence, workerId?: string);
|
|
332
|
+
/**
|
|
333
|
+
* Execute a single stage
|
|
334
|
+
*/
|
|
335
|
+
execute(request: StageExecutionRequest): Promise<StageExecutionResult>;
|
|
336
|
+
private handleCompleted;
|
|
337
|
+
private handleSuspended;
|
|
338
|
+
private handleFailed;
|
|
339
|
+
private loadWorkflowContext;
|
|
340
|
+
/**
|
|
341
|
+
* Create a minimal storage shim for context.storage (for API compatibility).
|
|
342
|
+
* Stage implementations should not rely on this - it may be removed in future.
|
|
343
|
+
*/
|
|
344
|
+
private createStorageShim;
|
|
345
|
+
private resolveStageInput;
|
|
346
|
+
private getStageNumber;
|
|
347
|
+
private getExecutionGroup;
|
|
348
|
+
private log;
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/**
|
|
352
|
+
* Storage abstraction for stage artifacts and outputs
|
|
353
|
+
*
|
|
354
|
+
* Modified to support Prisma/Memory storage only (R2 removed)
|
|
355
|
+
*/
|
|
356
|
+
interface StageStorage {
|
|
357
|
+
/**
|
|
358
|
+
* Save data to storage
|
|
359
|
+
*/
|
|
360
|
+
save<T>(key: string, data: T): Promise<void>;
|
|
361
|
+
/**
|
|
362
|
+
* Load data from storage
|
|
363
|
+
*/
|
|
364
|
+
load<T>(key: string): Promise<T>;
|
|
365
|
+
/**
|
|
366
|
+
* Check if key exists in storage
|
|
367
|
+
*/
|
|
368
|
+
exists(key: string): Promise<boolean>;
|
|
369
|
+
/**
|
|
370
|
+
* Delete data from storage
|
|
371
|
+
*/
|
|
372
|
+
delete(key: string): Promise<void>;
|
|
373
|
+
/**
|
|
374
|
+
* Generate a storage key for a stage
|
|
375
|
+
*/
|
|
376
|
+
getStageKey(stageId: string, suffix?: string): string;
|
|
377
|
+
/**
|
|
378
|
+
* Save stage output with standard key
|
|
379
|
+
*/
|
|
380
|
+
saveStageOutput<T>(stageId: string, output: T): Promise<string>;
|
|
381
|
+
/**
|
|
382
|
+
* Load stage output with standard key
|
|
383
|
+
*/
|
|
384
|
+
loadStageOutput<T>(stageId: string): Promise<T>;
|
|
385
|
+
/**
|
|
386
|
+
* Save arbitrary artifact for a stage
|
|
387
|
+
*/
|
|
388
|
+
saveArtifact<T>(stageId: string, artifactName: string, data: T): Promise<string>;
|
|
389
|
+
/**
|
|
390
|
+
* Load arbitrary artifact for a stage
|
|
391
|
+
*/
|
|
392
|
+
loadArtifact<T>(stageId: string, artifactName: string): Promise<T>;
|
|
393
|
+
/**
|
|
394
|
+
* List all artifacts for a workflow run (for export)
|
|
395
|
+
*/
|
|
396
|
+
listAllArtifacts(): Promise<Array<{
|
|
397
|
+
key: string;
|
|
398
|
+
stageId: string;
|
|
399
|
+
name: string;
|
|
400
|
+
}>>;
|
|
401
|
+
/**
|
|
402
|
+
* Provider metadata
|
|
403
|
+
*/
|
|
404
|
+
readonly providerType: "prisma" | "memory";
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
/**
|
|
408
|
+
* Factory for creating storage provider instances
|
|
409
|
+
*
|
|
410
|
+
* Prisma storage requires a PrismaClient to be passed via options.
|
|
411
|
+
*/
|
|
412
|
+
|
|
413
|
+
type PrismaClient = any;
|
|
414
|
+
type StorageProviderType = "prisma" | "memory";
|
|
415
|
+
interface StorageFactoryOptions {
|
|
416
|
+
provider: StorageProviderType;
|
|
417
|
+
workflowRunId: string;
|
|
418
|
+
workflowType: string;
|
|
419
|
+
/** Required when provider is "prisma" */
|
|
420
|
+
prisma?: PrismaClient;
|
|
421
|
+
}
|
|
422
|
+
/**
|
|
423
|
+
* Create a storage instance based on the provider type
|
|
424
|
+
*/
|
|
425
|
+
declare function createStorage(options: StorageFactoryOptions): StageStorage;
|
|
426
|
+
/**
|
|
427
|
+
* Get default provider from environment or config
|
|
428
|
+
* Falls back to 'prisma' for database-backed persistence
|
|
429
|
+
*/
|
|
430
|
+
declare function getDefaultStorageProvider(): StorageProviderType;
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
* Workflow Event Bus - Global event emitter for workflow events
|
|
434
|
+
*
|
|
435
|
+
* This singleton allows SSE endpoints to subscribe to real-time workflow events
|
|
436
|
+
* emitted by WorkflowExecutor instances running in the same process.
|
|
437
|
+
*
|
|
438
|
+
* Supports cross-process events via PostgreSQL LISTEN/NOTIFY when enabled.
|
|
439
|
+
*
|
|
440
|
+
* Events are namespaced by workflowRunId:
|
|
441
|
+
* - workflow:{runId}:stage:started
|
|
442
|
+
* - workflow:{runId}:stage:completed
|
|
443
|
+
* - workflow:{runId}:log
|
|
444
|
+
* - etc.
|
|
445
|
+
*/
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* Interface matching PgNotify from @zertai/database
|
|
449
|
+
* Defined here to avoid importing the database package into workflow-engine
|
|
450
|
+
*/
|
|
451
|
+
interface PgNotifyLike {
|
|
452
|
+
listen(channel: string, handler: (channel: string, payload: string) => void): Promise<() => void>;
|
|
453
|
+
notify(channel: string, payload: string): Promise<void>;
|
|
454
|
+
isConnected(): boolean;
|
|
455
|
+
}
|
|
456
|
+
declare class WorkflowEventBus extends EventEmitter {
|
|
457
|
+
private static instance;
|
|
458
|
+
private pgNotify;
|
|
459
|
+
private static readonly PG_CHANNEL;
|
|
460
|
+
private pgListenerUnsubscribe;
|
|
461
|
+
private static readonly MAX_PAYLOAD_SIZE;
|
|
462
|
+
private constructor();
|
|
463
|
+
static getInstance(): WorkflowEventBus;
|
|
464
|
+
/**
|
|
465
|
+
* Enable cross-process event publishing via PostgreSQL NOTIFY
|
|
466
|
+
*
|
|
467
|
+
* Call this during process initialization to enable events to propagate
|
|
468
|
+
* across multiple workers and the React Router app.
|
|
469
|
+
*
|
|
470
|
+
* @param pgNotify - A connected PgNotify instance from @zertai/database
|
|
471
|
+
*/
|
|
472
|
+
enablePgNotify(pgNotify: PgNotifyLike): Promise<void>;
|
|
473
|
+
/**
|
|
474
|
+
* Disable cross-process events (for cleanup)
|
|
475
|
+
*/
|
|
476
|
+
disablePgNotify(): void;
|
|
477
|
+
/**
|
|
478
|
+
* Check if cross-process events are enabled
|
|
479
|
+
*/
|
|
480
|
+
isPgNotifyEnabled(): boolean;
|
|
481
|
+
/**
|
|
482
|
+
* Truncate event payload to fit within PostgreSQL NOTIFY size limits.
|
|
483
|
+
* Large data fields (like workflow output) are replaced with a truncation marker.
|
|
484
|
+
*/
|
|
485
|
+
private truncatePayloadForNotify;
|
|
486
|
+
/**
|
|
487
|
+
* Emit event locally only (used for re-emitting pg notifications)
|
|
488
|
+
*/
|
|
489
|
+
private emitLocally;
|
|
490
|
+
/**
|
|
491
|
+
* Emit a workflow event with proper namespacing
|
|
492
|
+
*
|
|
493
|
+
* When PgNotify is enabled, also publishes to PostgreSQL for cross-process
|
|
494
|
+
* consumption by other workers and the React Router app.
|
|
495
|
+
*/
|
|
496
|
+
emitWorkflowEvent(workflowRunId: string, eventType: WorkflowEventType, payload: Record<string, unknown>): void;
|
|
497
|
+
/**
|
|
498
|
+
* Subscribe to all events for a specific workflow run
|
|
499
|
+
*/
|
|
500
|
+
subscribeToWorkflow(workflowRunId: string, handler: (event: WorkflowSSEEvent) => void): () => void;
|
|
501
|
+
/**
|
|
502
|
+
* Subscribe to a specific event type globally (across all workflows)
|
|
503
|
+
*/
|
|
504
|
+
subscribeGlobal(eventType: WorkflowEventType, handler: (event: WorkflowSSEEvent) => void): () => void;
|
|
505
|
+
/**
|
|
506
|
+
* Subscribe to a specific event type for a workflow
|
|
507
|
+
*/
|
|
508
|
+
subscribeToEvent(workflowRunId: string, eventType: WorkflowEventType, handler: (event: WorkflowSSEEvent) => void): () => void;
|
|
509
|
+
}
|
|
510
|
+
declare const workflowEventBus: WorkflowEventBus;
|
|
511
|
+
|
|
512
|
+
/**
|
|
513
|
+
* Workflow Runtime - Unified API for workflow execution
|
|
514
|
+
*
|
|
515
|
+
* The main entry point for the workflow engine. Handles everything:
|
|
516
|
+
* - Creating new workflow runs (with validation)
|
|
517
|
+
* - Polling for pending workflows → enqueuing first stage
|
|
518
|
+
* - Processing jobs from the queue (executing stages)
|
|
519
|
+
* - Polling for suspended stages → resuming execution
|
|
520
|
+
* - Transitioning workflows between stages
|
|
521
|
+
*/
|
|
522
|
+
|
|
523
|
+
interface WorkflowRuntimeConfig {
|
|
524
|
+
/** Persistence implementation */
|
|
525
|
+
persistence: WorkflowPersistence;
|
|
526
|
+
/** Job queue implementation */
|
|
527
|
+
jobQueue: JobQueue;
|
|
528
|
+
/** Workflow registry */
|
|
529
|
+
registry: WorkflowRegistry;
|
|
530
|
+
/** AI call logger for createAIHelper */
|
|
531
|
+
aiCallLogger?: AICallLogger;
|
|
532
|
+
/** Interval between poll cycles in milliseconds (default: 10000) */
|
|
533
|
+
pollIntervalMs?: number;
|
|
534
|
+
/** Interval between job dequeue attempts in milliseconds (default: 1000) */
|
|
535
|
+
jobPollIntervalMs?: number;
|
|
536
|
+
/** Worker ID (default: auto-generated) */
|
|
537
|
+
workerId?: string;
|
|
538
|
+
/** Stale job threshold in milliseconds (default: 60000) */
|
|
539
|
+
staleJobThresholdMs?: number;
|
|
540
|
+
/** Function to determine workflow priority */
|
|
541
|
+
getWorkflowPriority?: (workflowId: string) => number;
|
|
542
|
+
}
|
|
543
|
+
interface CreateRunOptions {
|
|
544
|
+
workflowId: string;
|
|
545
|
+
input: Record<string, unknown>;
|
|
546
|
+
config?: Record<string, unknown>;
|
|
547
|
+
priority?: number;
|
|
548
|
+
/** Domain-specific metadata */
|
|
549
|
+
metadata?: Record<string, unknown>;
|
|
550
|
+
}
|
|
551
|
+
interface CreateRunResult {
|
|
552
|
+
workflowRunId: string;
|
|
553
|
+
}
|
|
554
|
+
declare class WorkflowRuntime {
|
|
555
|
+
private isPolling;
|
|
556
|
+
private isProcessingJobs;
|
|
557
|
+
private isRunning;
|
|
558
|
+
private pollIntervalMs;
|
|
559
|
+
private jobPollIntervalMs;
|
|
560
|
+
private staleJobThresholdMs;
|
|
561
|
+
private workerId;
|
|
562
|
+
private persistence;
|
|
563
|
+
private jobQueue;
|
|
564
|
+
private registry;
|
|
565
|
+
private aiCallLogger?;
|
|
566
|
+
private getWorkflowPriority?;
|
|
567
|
+
private pollTimer;
|
|
568
|
+
private stageExecutor;
|
|
569
|
+
private jobsProcessed;
|
|
570
|
+
constructor(config: WorkflowRuntimeConfig);
|
|
571
|
+
/**
|
|
572
|
+
* Create an AI helper bound to this runtime's logger
|
|
573
|
+
* @param topic - Topic for logging (e.g., "workflow.abc123.stage.extraction")
|
|
574
|
+
* @param logContext - Optional log context for persistence logging in batch operations
|
|
575
|
+
*/
|
|
576
|
+
createAIHelper(topic: string, logContext?: LogContext): AIHelper;
|
|
577
|
+
/**
|
|
578
|
+
* Create a LogContext for a workflow stage (for use with createAIHelper)
|
|
579
|
+
* This enables batch operations to log to the workflow persistence.
|
|
580
|
+
*/
|
|
581
|
+
createLogContext(workflowRunId: string, stageRecordId: string): LogContext;
|
|
582
|
+
/**
|
|
583
|
+
* Start the runtime as a full worker (processes jobs + polls)
|
|
584
|
+
*/
|
|
585
|
+
start(): Promise<void>;
|
|
586
|
+
/**
|
|
587
|
+
* Stop the runtime
|
|
588
|
+
*/
|
|
589
|
+
stop(): void;
|
|
590
|
+
/**
|
|
591
|
+
* Create a new workflow run with validation.
|
|
592
|
+
* The runtime will pick it up on the next poll cycle and start execution.
|
|
593
|
+
*/
|
|
594
|
+
createRun(options: CreateRunOptions): Promise<CreateRunResult>;
|
|
595
|
+
/**
|
|
596
|
+
* Process jobs from the queue
|
|
597
|
+
*/
|
|
598
|
+
private processJobs;
|
|
599
|
+
private getWorkflowId;
|
|
600
|
+
/**
|
|
601
|
+
* Poll for pending workflows and suspended stages
|
|
602
|
+
*/
|
|
603
|
+
private poll;
|
|
604
|
+
/**
|
|
605
|
+
* Poll for pending workflows and enqueue their first stage.
|
|
606
|
+
* Uses claimNextPendingRun() for zero-contention claiming with FOR UPDATE SKIP LOCKED.
|
|
607
|
+
*/
|
|
608
|
+
private pollPendingWorkflows;
|
|
609
|
+
/**
|
|
610
|
+
* Poll suspended stages and resume if ready (public for manual triggering)
|
|
611
|
+
*/
|
|
612
|
+
pollSuspendedStages(): Promise<void>;
|
|
613
|
+
/**
|
|
614
|
+
* Transition a workflow to its next state (public for external calls)
|
|
615
|
+
*/
|
|
616
|
+
transitionWorkflow(workflowRunId: string): Promise<void>;
|
|
617
|
+
private checkAndResume;
|
|
618
|
+
private resumeWorkflow;
|
|
619
|
+
/**
|
|
620
|
+
* Create a minimal storage shim for context.storage (for API compatibility).
|
|
621
|
+
*/
|
|
622
|
+
private createStorageShim;
|
|
623
|
+
private markStageFailed;
|
|
624
|
+
private enqueueExecutionGroup;
|
|
625
|
+
private completeWorkflow;
|
|
626
|
+
}
|
|
627
|
+
/**
|
|
628
|
+
* Factory function to create a WorkflowRuntime instance
|
|
629
|
+
*/
|
|
630
|
+
declare function createWorkflowRuntime(config: WorkflowRuntimeConfig): WorkflowRuntime;
|
|
631
|
+
|
|
632
|
+
/**
|
|
633
|
+
* Model Mapping for Batch Providers
|
|
634
|
+
*
|
|
635
|
+
* Dynamically maps models from the registry to provider-specific batch API identifiers.
|
|
636
|
+
* Uses the `supportsAsyncBatch` flag and OpenRouter ID prefix to determine compatibility.
|
|
637
|
+
*/
|
|
638
|
+
|
|
639
|
+
declare const BatchProviderName: z$1.ZodEnum<{
|
|
640
|
+
google: "google";
|
|
641
|
+
anthropic: "anthropic";
|
|
642
|
+
openai: "openai";
|
|
643
|
+
}>;
|
|
644
|
+
type BatchProviderName = z$1.infer<typeof BatchProviderName>;
|
|
645
|
+
/**
|
|
646
|
+
* Get the provider-specific model ID, with fallback to default
|
|
647
|
+
*
|
|
648
|
+
* @param modelKey - The ModelKey from model-helper.ts (optional)
|
|
649
|
+
* @param provider - The batch provider
|
|
650
|
+
* @returns Provider-specific model ID
|
|
651
|
+
* @throws Error if model is not supported by the provider
|
|
652
|
+
*/
|
|
653
|
+
declare function resolveModelForProvider(modelKey: ModelKey | undefined, provider: BatchProviderName): string;
|
|
654
|
+
/**
|
|
655
|
+
* Get the best provider for a given ModelKey
|
|
656
|
+
* Returns the provider that natively supports the model
|
|
657
|
+
*/
|
|
658
|
+
declare function getBestProviderForModel(modelKey: ModelKey): BatchProviderName | undefined;
|
|
659
|
+
|
|
660
|
+
/**
|
|
661
|
+
* Low-Level Batch Provider Types
|
|
662
|
+
*
|
|
663
|
+
* These types are used internally by batch providers (Google, Anthropic, OpenAI).
|
|
664
|
+
* They have more fields than the high-level user-facing types in ai/ai-helper.ts.
|
|
665
|
+
*
|
|
666
|
+
* For the user-facing batch API, see:
|
|
667
|
+
* - AIBatchRequest - simple request with id, prompt, schema
|
|
668
|
+
* - AIBatchResult<T> - result with id, result, tokens
|
|
669
|
+
* - AIBatchHandle - handle with id, status, provider
|
|
670
|
+
*
|
|
671
|
+
* These provider-level types include additional fields like:
|
|
672
|
+
* - BatchHandle: requestCount, createdAt, metadata
|
|
673
|
+
* - BaseBatchRequest: model, system, maxTokens, temperature, tools
|
|
674
|
+
* - RawBatchResult: raw text before parsing, index
|
|
675
|
+
*/
|
|
676
|
+
|
|
677
|
+
/** Tool choice options compatible with AI SDK */
|
|
678
|
+
type BatchToolChoice = "auto" | "required" | "none" | {
|
|
679
|
+
type: "tool";
|
|
680
|
+
toolName: string;
|
|
681
|
+
};
|
|
682
|
+
/**
|
|
683
|
+
* Handle returned when a batch is submitted
|
|
684
|
+
*/
|
|
685
|
+
interface BatchHandle {
|
|
686
|
+
/** Unique identifier for the batch (provider-specific format) */
|
|
687
|
+
id: string;
|
|
688
|
+
/** Provider name (google, anthropic, openai) */
|
|
689
|
+
provider: string;
|
|
690
|
+
/** Number of requests in the batch */
|
|
691
|
+
requestCount: number;
|
|
692
|
+
/** When the batch was created */
|
|
693
|
+
createdAt: Date;
|
|
694
|
+
/** Provider-specific metadata */
|
|
695
|
+
metadata?: Record<string, unknown>;
|
|
696
|
+
}
|
|
697
|
+
/**
|
|
698
|
+
* Status of a batch job
|
|
699
|
+
*/
|
|
700
|
+
interface BatchStatus {
|
|
701
|
+
/** Current state of the batch */
|
|
702
|
+
state: BatchState;
|
|
703
|
+
/** Number of requests processed so far */
|
|
704
|
+
processedCount: number;
|
|
705
|
+
/** Total number of requests in the batch */
|
|
706
|
+
totalCount: number;
|
|
707
|
+
/** Number of successful requests (if available) */
|
|
708
|
+
succeededCount?: number;
|
|
709
|
+
/** Number of failed requests (if available) */
|
|
710
|
+
failedCount?: number;
|
|
711
|
+
/** Error message if batch failed */
|
|
712
|
+
error?: string;
|
|
713
|
+
/** Estimated completion time (if available) */
|
|
714
|
+
estimatedCompletion?: Date;
|
|
715
|
+
/** Total input tokens used (available when batch is complete) */
|
|
716
|
+
totalInputTokens?: number;
|
|
717
|
+
/** Total output tokens used (available when batch is complete) */
|
|
718
|
+
totalOutputTokens?: number;
|
|
719
|
+
}
|
|
720
|
+
type BatchState = "pending" | "processing" | "completed" | "failed" | "cancelled";
|
|
721
|
+
/**
|
|
722
|
+
* Result of a single request in a batch
|
|
723
|
+
*/
|
|
724
|
+
interface BatchResult<T> {
|
|
725
|
+
/** Index in the original request array */
|
|
726
|
+
index: number;
|
|
727
|
+
/** Custom ID if provided */
|
|
728
|
+
customId?: string;
|
|
729
|
+
/** Parsed/validated data */
|
|
730
|
+
data: T;
|
|
731
|
+
/** Input tokens used */
|
|
732
|
+
inputTokens: number;
|
|
733
|
+
/** Output tokens used */
|
|
734
|
+
outputTokens: number;
|
|
735
|
+
/** Status of this individual result */
|
|
736
|
+
status: "succeeded" | "failed";
|
|
737
|
+
/** Error message if this specific request failed */
|
|
738
|
+
error?: string;
|
|
739
|
+
}
|
|
740
|
+
/**
|
|
741
|
+
* Raw result from a provider (before schema validation)
|
|
742
|
+
*/
|
|
743
|
+
interface RawBatchResult {
|
|
744
|
+
index: number;
|
|
745
|
+
customId?: string;
|
|
746
|
+
text: string;
|
|
747
|
+
inputTokens: number;
|
|
748
|
+
outputTokens: number;
|
|
749
|
+
error?: string;
|
|
750
|
+
}
|
|
751
|
+
/**
|
|
752
|
+
* Base request configuration shared across providers
|
|
753
|
+
*/
|
|
754
|
+
interface BaseBatchRequest {
|
|
755
|
+
/** The prompt/content to send */
|
|
756
|
+
prompt: string;
|
|
757
|
+
/** Optional custom ID for tracking */
|
|
758
|
+
customId?: string;
|
|
759
|
+
/** Model to use - accepts ModelKey from model-helper.ts */
|
|
760
|
+
model?: ModelKey;
|
|
761
|
+
/** System prompt (if supported) */
|
|
762
|
+
system?: string;
|
|
763
|
+
/** Maximum tokens to generate */
|
|
764
|
+
maxTokens?: number;
|
|
765
|
+
/** Temperature for generation */
|
|
766
|
+
temperature?: number;
|
|
767
|
+
/** Optional Zod schema for structured output */
|
|
768
|
+
schema?: z.ZodTypeAny;
|
|
769
|
+
/** Tool definitions (same as AI SDK generateText/streamText) */
|
|
770
|
+
tools?: ToolSet;
|
|
771
|
+
/** Tool choice mode */
|
|
772
|
+
toolChoice?: BatchToolChoice;
|
|
773
|
+
}
|
|
774
|
+
/**
|
|
775
|
+
* Request with schema for structured output
|
|
776
|
+
*/
|
|
777
|
+
interface BatchRequestWithSchema<TSchema extends z.ZodTypeAny> extends BaseBatchRequest {
|
|
778
|
+
schema: TSchema;
|
|
779
|
+
}
|
|
780
|
+
/**
|
|
781
|
+
* Request without schema (plain text output)
|
|
782
|
+
*/
|
|
783
|
+
interface BatchRequestText extends BaseBatchRequest {
|
|
784
|
+
schema?: never;
|
|
785
|
+
}
|
|
786
|
+
/**
|
|
787
|
+
* Union type for batch requests
|
|
788
|
+
*/
|
|
789
|
+
type BatchRequest<TSchema extends z.ZodTypeAny = z.ZodTypeAny> = BatchRequestWithSchema<TSchema> | BatchRequestText;
|
|
790
|
+
/**
|
|
791
|
+
* Interface that all batch providers must implement
|
|
792
|
+
*/
|
|
793
|
+
interface BatchProvider<TRequest extends BaseBatchRequest = BaseBatchRequest, TRawResult extends RawBatchResult = RawBatchResult> {
|
|
794
|
+
/** Provider name (google, anthropic, openai) */
|
|
795
|
+
readonly name: string;
|
|
796
|
+
/** Whether this provider supports batching */
|
|
797
|
+
readonly supportsBatching: boolean;
|
|
798
|
+
/**
|
|
799
|
+
* Submit requests to batch processing
|
|
800
|
+
* @param requests Array of requests to process
|
|
801
|
+
* @param options Provider-specific options
|
|
802
|
+
* @returns Handle to track the batch
|
|
803
|
+
*/
|
|
804
|
+
submit(requests: TRequest[], options?: BatchSubmitOptions): Promise<BatchHandle>;
|
|
805
|
+
/**
|
|
806
|
+
* Check the status of a batch
|
|
807
|
+
* @param handle Batch handle from submit()
|
|
808
|
+
* @returns Current status
|
|
809
|
+
*/
|
|
810
|
+
checkStatus(handle: BatchHandle): Promise<BatchStatus>;
|
|
811
|
+
/**
|
|
812
|
+
* Retrieve results from a completed batch
|
|
813
|
+
* @param handle Batch handle from submit()
|
|
814
|
+
* @returns Array of raw results
|
|
815
|
+
*/
|
|
816
|
+
getResults(handle: BatchHandle): Promise<TRawResult[]>;
|
|
817
|
+
/**
|
|
818
|
+
* Cancel a running batch (optional - not all providers support this)
|
|
819
|
+
* @param handle Batch handle from submit()
|
|
820
|
+
*/
|
|
821
|
+
cancel?(handle: BatchHandle): Promise<void>;
|
|
822
|
+
}
|
|
823
|
+
interface BatchSubmitOptions {
|
|
824
|
+
/** Display name for the batch (if supported) */
|
|
825
|
+
displayName?: string;
|
|
826
|
+
/** Completion window (e.g., "24h" for OpenAI) */
|
|
827
|
+
completionWindow?: string;
|
|
828
|
+
/** Custom metadata to attach */
|
|
829
|
+
metadata?: Record<string, string>;
|
|
830
|
+
}
|
|
831
|
+
/**
|
|
832
|
+
* Serialized batch for persistence during workflow suspension
|
|
833
|
+
*/
|
|
834
|
+
interface SerializedBatch {
|
|
835
|
+
handle: BatchHandle;
|
|
836
|
+
providerName: string;
|
|
837
|
+
/** Serialized schema definitions (if any) */
|
|
838
|
+
schemaDefinitions?: unknown[];
|
|
839
|
+
}
|
|
840
|
+
/**
|
|
841
|
+
* Logger interface for batch operations
|
|
842
|
+
*/
|
|
843
|
+
interface BatchLogger {
|
|
844
|
+
log: (level: LogLevel, message: string, meta?: Record<string, unknown>) => void;
|
|
845
|
+
}
|
|
846
|
+
/**
|
|
847
|
+
* Aggregated metrics for a batch
|
|
848
|
+
*/
|
|
849
|
+
interface BatchMetrics {
|
|
850
|
+
provider: string;
|
|
851
|
+
model: string;
|
|
852
|
+
requestCount: number;
|
|
853
|
+
totalInputTokens: number;
|
|
854
|
+
totalOutputTokens: number;
|
|
855
|
+
estimatedCost: number;
|
|
856
|
+
actualDuration: number;
|
|
857
|
+
successRate: number;
|
|
858
|
+
}
|
|
859
|
+
interface GoogleBatchRequest extends BaseBatchRequest {
|
|
860
|
+
}
|
|
861
|
+
interface AnthropicBatchRequest extends BaseBatchRequest {
|
|
862
|
+
}
|
|
863
|
+
interface OpenAIBatchRequest extends BaseBatchRequest {
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
/**
|
|
867
|
+
* Google Batch Provider
|
|
868
|
+
*
|
|
869
|
+
* Implements batch processing using Google's GenAI Batch API.
|
|
870
|
+
* Uses inline requests for simpler API.
|
|
871
|
+
*/
|
|
872
|
+
|
|
873
|
+
interface GoogleBatchProviderConfig {
|
|
874
|
+
apiKey?: string;
|
|
875
|
+
}
|
|
876
|
+
declare class GoogleBatchProvider implements BatchProvider<GoogleBatchRequest, RawBatchResult> {
|
|
877
|
+
readonly name = "google";
|
|
878
|
+
readonly supportsBatching = true;
|
|
879
|
+
private ai;
|
|
880
|
+
private logger?;
|
|
881
|
+
constructor(config?: GoogleBatchProviderConfig, logger?: BatchLogger);
|
|
882
|
+
submit(requests: GoogleBatchRequest[], options?: BatchSubmitOptions): Promise<BatchHandle>;
|
|
883
|
+
checkStatus(handle: BatchHandle): Promise<BatchStatus>;
|
|
884
|
+
getResults(handle: BatchHandle, customIds?: string[]): Promise<RawBatchResult[]>;
|
|
885
|
+
cancel(handle: BatchHandle): Promise<void>;
|
|
886
|
+
private mapState;
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
/**
|
|
890
|
+
* Anthropic Batch Provider
|
|
891
|
+
*
|
|
892
|
+
* Implements batch processing using Anthropic's Message Batches API.
|
|
893
|
+
* Supports up to 10,000 requests per batch with 24h processing window.
|
|
894
|
+
*/
|
|
895
|
+
|
|
896
|
+
interface AnthropicBatchProviderConfig {
|
|
897
|
+
apiKey?: string;
|
|
898
|
+
}
|
|
899
|
+
declare class AnthropicBatchProvider implements BatchProvider<AnthropicBatchRequest, RawBatchResult> {
|
|
900
|
+
readonly name = "anthropic";
|
|
901
|
+
readonly supportsBatching = true;
|
|
902
|
+
private client;
|
|
903
|
+
private logger?;
|
|
904
|
+
constructor(config?: AnthropicBatchProviderConfig, logger?: BatchLogger);
|
|
905
|
+
submit(requests: AnthropicBatchRequest[], options?: BatchSubmitOptions): Promise<BatchHandle>;
|
|
906
|
+
checkStatus(handle: BatchHandle): Promise<BatchStatus>;
|
|
907
|
+
getResults(handle: BatchHandle): Promise<RawBatchResult[]>;
|
|
908
|
+
cancel(handle: BatchHandle): Promise<void>;
|
|
909
|
+
private mapStatus;
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
/**
|
|
913
|
+
* OpenAI Batch Provider
|
|
914
|
+
*
|
|
915
|
+
* Implements batch processing using OpenAI's Batch API.
|
|
916
|
+
* Note: OpenAI requires JSONL file uploads for batches.
|
|
917
|
+
* Supports up to 50,000 requests per batch with 24h processing window.
|
|
918
|
+
*/
|
|
919
|
+
|
|
920
|
+
interface OpenAIBatchProviderConfig {
|
|
921
|
+
apiKey?: string;
|
|
922
|
+
}
|
|
923
|
+
declare class OpenAIBatchProvider implements BatchProvider<OpenAIBatchRequest, RawBatchResult> {
|
|
924
|
+
readonly name = "openai";
|
|
925
|
+
readonly supportsBatching = true;
|
|
926
|
+
private client;
|
|
927
|
+
private logger?;
|
|
928
|
+
constructor(config?: OpenAIBatchProviderConfig, logger?: BatchLogger);
|
|
929
|
+
submit(requests: OpenAIBatchRequest[], options?: BatchSubmitOptions): Promise<BatchHandle>;
|
|
930
|
+
checkStatus(handle: BatchHandle): Promise<BatchStatus>;
|
|
931
|
+
getResults(handle: BatchHandle): Promise<RawBatchResult[]>;
|
|
932
|
+
cancel(handle: BatchHandle): Promise<void>;
|
|
933
|
+
private mapStatus;
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
export { AICallLogger, AIHelper, AnthropicBatchProvider, type AnthropicBatchProviderConfig, type AnthropicBatchRequest, type BaseBatchRequest, type BatchHandle, type BatchLogger, type BatchMetrics, type BatchProvider, type BatchRequest, type BatchRequestText, type BatchRequestWithSchema, type BatchResult, type BatchState, type BatchStatus, type BatchSubmitOptions, type CreateRunOptions, type CreateRunResult, GoogleBatchProvider, type GoogleBatchProviderConfig, type GoogleBatchRequest, type InferWorkflowStageIds, JobQueue, LogContext, ModelKey, OpenAIBatchProvider, type OpenAIBatchProviderConfig, type OpenAIBatchRequest, type PgNotifyLike, type RawBatchResult, type SerializedBatch, Stage, type StageExecutionRequest, type StageExecutionResult, StageExecutor, Workflow, WorkflowBuilder, WorkflowEventType, WorkflowExecutor, WorkflowPersistence, type WorkflowRegistry, WorkflowRuntime, type WorkflowRuntimeConfig, WorkflowSSEEvent, createStorage, createWorkflowRuntime, getBestProviderForModel, getDefaultStorageProvider, resolveModelForProvider, workflowEventBus };
|