@bratsos/workflow-engine 0.0.11 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +270 -513
- package/dist/chunk-D7RVRRM2.js +3 -0
- package/dist/chunk-D7RVRRM2.js.map +1 -0
- package/dist/chunk-HL3OJG7W.js +1033 -0
- package/dist/chunk-HL3OJG7W.js.map +1 -0
- package/dist/chunk-MUWP5SF2.js +33 -0
- package/dist/chunk-MUWP5SF2.js.map +1 -0
- package/dist/chunk-NYKMT46J.js +1143 -0
- package/dist/chunk-NYKMT46J.js.map +1 -0
- package/dist/chunk-P4KMGCT3.js +2292 -0
- package/dist/chunk-P4KMGCT3.js.map +1 -0
- package/dist/chunk-SPXBCZLB.js +17 -0
- package/dist/chunk-SPXBCZLB.js.map +1 -0
- package/dist/cli/sync-models.d.ts +1 -0
- package/dist/cli/sync-models.js +210 -0
- package/dist/cli/sync-models.js.map +1 -0
- package/dist/client-D4PoxADF.d.ts +798 -0
- package/dist/client.d.ts +5 -0
- package/dist/client.js +4 -0
- package/dist/client.js.map +1 -0
- package/dist/index-DAzCfO1R.d.ts +217 -0
- package/dist/index.d.ts +569 -0
- package/dist/index.js +399 -0
- package/dist/index.js.map +1 -0
- package/dist/interface-MMqhfQQK.d.ts +411 -0
- package/dist/kernel/index.d.ts +26 -0
- package/dist/kernel/index.js +3 -0
- package/dist/kernel/index.js.map +1 -0
- package/dist/kernel/testing/index.d.ts +44 -0
- package/dist/kernel/testing/index.js +85 -0
- package/dist/kernel/testing/index.js.map +1 -0
- package/dist/persistence/index.d.ts +2 -0
- package/dist/persistence/index.js +6 -0
- package/dist/persistence/index.js.map +1 -0
- package/dist/persistence/prisma/index.d.ts +37 -0
- package/dist/persistence/prisma/index.js +5 -0
- package/dist/persistence/prisma/index.js.map +1 -0
- package/dist/plugins-BCnDUwIc.d.ts +415 -0
- package/dist/ports-tU3rzPXJ.d.ts +245 -0
- package/dist/stage-BPw7m9Wx.d.ts +144 -0
- package/dist/testing/index.d.ts +264 -0
- package/dist/testing/index.js +920 -0
- package/dist/testing/index.js.map +1 -0
- package/package.json +11 -1
- package/skills/workflow-engine/SKILL.md +234 -348
- package/skills/workflow-engine/references/03-runtime-setup.md +111 -426
- package/skills/workflow-engine/references/05-persistence-setup.md +32 -0
- package/skills/workflow-engine/references/07-testing-patterns.md +141 -474
- package/skills/workflow-engine/references/08-common-patterns.md +118 -431
|
@@ -0,0 +1,798 @@
|
|
|
1
|
+
import { ToolSet, generateText, StepResult, streamText } from 'ai';
|
|
2
|
+
import z$1, { z } from 'zod';
|
|
3
|
+
import { A as AICallLogger } from './interface-MMqhfQQK.js';
|
|
4
|
+
import { b as StageContext, c as SuspendedStateSchema, C as CheckCompletionContext, d as CompletionCheckResult, S as Stage } from './stage-BPw7m9Wx.js';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Schema Helpers and Utilities
|
|
8
|
+
*
|
|
9
|
+
* Provides common schemas and utilities for building type-safe workflows.
|
|
10
|
+
* Reduces boilerplate and enforces best practices.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Constant for stages that don't need sequential input
|
|
15
|
+
* Use when a stage receives data from workflowContext instead of the input parameter
|
|
16
|
+
*
|
|
17
|
+
* @example
|
|
18
|
+
* export const myStage: Stage<
|
|
19
|
+
* typeof NoInputSchema, // Explicit: this stage uses workflowContext
|
|
20
|
+
* typeof OutputSchema,
|
|
21
|
+
* typeof ConfigSchema
|
|
22
|
+
* > = {
|
|
23
|
+
* inputSchema: NoInputSchema,
|
|
24
|
+
* // ...
|
|
25
|
+
* };
|
|
26
|
+
*/
|
|
27
|
+
declare const NoInputSchema: z.ZodObject<{}, z.core.$strip>;
|
|
28
|
+
/**
|
|
29
|
+
* Access previous stage output with guaranteed type safety
|
|
30
|
+
*
|
|
31
|
+
* Requires that the stage output exists, throws clear error if missing.
|
|
32
|
+
* Use this for required dependencies on previous stages.
|
|
33
|
+
*
|
|
34
|
+
* @param workflowContext - The workflow context containing all previous stage outputs
|
|
35
|
+
* @param stageId - ID of the stage to access
|
|
36
|
+
* @param field - Optional: specific field to extract from stage output
|
|
37
|
+
* @returns The stage output (or field within it)
|
|
38
|
+
* @throws Error if stage or field is missing
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* // Get entire stage output
|
|
42
|
+
* const extractedData = requireStageOutput<ExtractedData>(
|
|
43
|
+
* context.workflowContext,
|
|
44
|
+
* "data-extraction"
|
|
45
|
+
* );
|
|
46
|
+
*
|
|
47
|
+
* // Get specific field
|
|
48
|
+
* const guidelines = requireStageOutput<Guideline[]>(
|
|
49
|
+
* context.workflowContext,
|
|
50
|
+
* "guidelines",
|
|
51
|
+
* "guidelines"
|
|
52
|
+
* );
|
|
53
|
+
*/
|
|
54
|
+
declare function requireStageOutput<T>(workflowContext: Record<string, unknown>, stageId: string, field?: string): T;
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Stage Factory - Simplified stage definition with auto-metrics
|
|
58
|
+
*
|
|
59
|
+
* Provides a `defineStage()` function that reduces boilerplate by:
|
|
60
|
+
* - Inferring types from schemas
|
|
61
|
+
* - Auto-calculating metrics (timing handled by executor)
|
|
62
|
+
* - Adding fluent context helpers (require/optional)
|
|
63
|
+
* - Supporting both sync and async-batch modes
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* ```typescript
|
|
67
|
+
* export const myStage = defineStage({
|
|
68
|
+
* id: "my-stage",
|
|
69
|
+
* name: "My Stage",
|
|
70
|
+
* description: "Does something useful",
|
|
71
|
+
* dependencies: ["previous-stage"],
|
|
72
|
+
*
|
|
73
|
+
* schemas: {
|
|
74
|
+
* input: InputSchema, // or "none" for NoInputSchema
|
|
75
|
+
* output: OutputSchema,
|
|
76
|
+
* config: ConfigSchema,
|
|
77
|
+
* },
|
|
78
|
+
*
|
|
79
|
+
* async execute(ctx) {
|
|
80
|
+
* const prevData = ctx.require("previous-stage");
|
|
81
|
+
* // ... stage logic
|
|
82
|
+
* return { output: { ... } };
|
|
83
|
+
* },
|
|
84
|
+
* });
|
|
85
|
+
* ```
|
|
86
|
+
*/
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Helper type to safely infer input type, handling the "none" special case
|
|
90
|
+
*/
|
|
91
|
+
type InferInput<TInput extends z.ZodTypeAny | "none"> = TInput extends "none" ? z.infer<typeof NoInputSchema> : TInput extends z.ZodTypeAny ? z.infer<TInput> : never;
|
|
92
|
+
/**
|
|
93
|
+
* Enhanced stage context with fluent helpers
|
|
94
|
+
*/
|
|
95
|
+
interface EnhancedStageContext<TInput, TConfig, TContext extends Record<string, unknown>> extends StageContext<TInput, TConfig, TContext> {
|
|
96
|
+
/**
|
|
97
|
+
* Require output from a previous stage (throws if not found)
|
|
98
|
+
*
|
|
99
|
+
* @example
|
|
100
|
+
* const { extractedData } = ctx.require("data-extraction");
|
|
101
|
+
*/
|
|
102
|
+
require: <K extends keyof TContext>(stageId: K) => TContext[K];
|
|
103
|
+
/**
|
|
104
|
+
* Optionally get output from a previous stage (returns undefined if not found)
|
|
105
|
+
*
|
|
106
|
+
* @example
|
|
107
|
+
* const optionalData = ctx.optional("optional-stage");
|
|
108
|
+
* if (optionalData) { ... }
|
|
109
|
+
*/
|
|
110
|
+
optional: <K extends keyof TContext>(stageId: K) => TContext[K] | undefined;
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Simplified execute result - just output and optional custom metrics
|
|
114
|
+
*/
|
|
115
|
+
interface SimpleStageResult<TOutput> {
|
|
116
|
+
output: TOutput;
|
|
117
|
+
/**
|
|
118
|
+
* Custom metrics specific to this stage (e.g., itemsProcessed, sectionsFound)
|
|
119
|
+
* Timing metrics (startTime, endTime, duration) are auto-calculated by executor
|
|
120
|
+
* AI metrics should be added here by stages that create their own AIHelper
|
|
121
|
+
*/
|
|
122
|
+
customMetrics?: Record<string, number>;
|
|
123
|
+
/**
|
|
124
|
+
* Optional artifacts to store
|
|
125
|
+
*/
|
|
126
|
+
artifacts?: Record<string, unknown>;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Simplified suspended result - metrics are auto-filled by the factory
|
|
130
|
+
*/
|
|
131
|
+
interface SimpleSuspendedResult {
|
|
132
|
+
suspended: true;
|
|
133
|
+
state: {
|
|
134
|
+
batchId: string;
|
|
135
|
+
submittedAt: string;
|
|
136
|
+
pollInterval: number;
|
|
137
|
+
maxWaitTime: number;
|
|
138
|
+
metadata?: Record<string, unknown>;
|
|
139
|
+
apiKey?: string;
|
|
140
|
+
};
|
|
141
|
+
pollConfig: {
|
|
142
|
+
pollInterval: number;
|
|
143
|
+
maxWaitTime: number;
|
|
144
|
+
nextPollAt: Date;
|
|
145
|
+
};
|
|
146
|
+
/**
|
|
147
|
+
* Optional custom metrics (timing & AI metrics are auto-filled)
|
|
148
|
+
*/
|
|
149
|
+
customMetrics?: Record<string, number>;
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* Sync stage definition
|
|
153
|
+
*/
|
|
154
|
+
interface SyncStageDefinition<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>> {
|
|
155
|
+
/** Unique stage identifier */
|
|
156
|
+
id: string;
|
|
157
|
+
/** Human-readable name */
|
|
158
|
+
name: string;
|
|
159
|
+
/** Optional description */
|
|
160
|
+
description?: string;
|
|
161
|
+
/** Stage IDs this stage depends on (validated at workflow build time) */
|
|
162
|
+
dependencies?: string[];
|
|
163
|
+
/** Zod schemas for validation */
|
|
164
|
+
schemas: {
|
|
165
|
+
/** Input schema, or "none" for stages that use workflowContext */
|
|
166
|
+
input: TInput;
|
|
167
|
+
/** Output schema */
|
|
168
|
+
output: TOutput;
|
|
169
|
+
/** Configuration schema */
|
|
170
|
+
config: TConfig;
|
|
171
|
+
};
|
|
172
|
+
/**
|
|
173
|
+
* Execute the stage logic
|
|
174
|
+
* Return just { output } - metrics are auto-calculated
|
|
175
|
+
*/
|
|
176
|
+
execute: (ctx: EnhancedStageContext<InferInput<TInput>, z.infer<TConfig>, TContext>) => Promise<SimpleStageResult<z.infer<TOutput>>>;
|
|
177
|
+
/**
|
|
178
|
+
* Optional: Estimate cost before execution
|
|
179
|
+
*/
|
|
180
|
+
estimateCost?: (input: InferInput<TInput>, config: z.infer<TConfig>) => number;
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Async-batch stage definition (for long-running batch jobs)
|
|
184
|
+
*/
|
|
185
|
+
interface AsyncBatchStageDefinition<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>> extends Omit<SyncStageDefinition<TInput, TOutput, TConfig, TContext>, "execute"> {
|
|
186
|
+
/** Mark as async-batch mode */
|
|
187
|
+
mode: "async-batch";
|
|
188
|
+
/**
|
|
189
|
+
* Execute the stage - either return result or suspend for batch processing
|
|
190
|
+
*
|
|
191
|
+
* When resuming from suspension, ctx.resumeState contains the suspended state.
|
|
192
|
+
* Check this to determine whether to submit a new batch or fetch results.
|
|
193
|
+
*
|
|
194
|
+
* Return SimpleSuspendedResult when suspending - metrics will be auto-filled.
|
|
195
|
+
*/
|
|
196
|
+
execute: (ctx: EnhancedStageContext<InferInput<TInput>, z.infer<TConfig>, TContext>) => Promise<SimpleStageResult<z.infer<TOutput>> | SimpleSuspendedResult>;
|
|
197
|
+
/**
|
|
198
|
+
* Check if the batch job is complete
|
|
199
|
+
* Called by the orchestrator when polling suspended stages
|
|
200
|
+
*
|
|
201
|
+
* Context includes workflowRunId, stageId, config, log, and storage
|
|
202
|
+
* so you don't need to store these in metadata.
|
|
203
|
+
*/
|
|
204
|
+
checkCompletion: (suspendedState: z.infer<typeof SuspendedStateSchema>, context: CheckCompletionContext<z.infer<TConfig>>) => Promise<CompletionCheckResult<z.infer<TOutput>>>;
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Define a sync stage with simplified API
|
|
208
|
+
*/
|
|
209
|
+
declare function defineStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: SyncStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
210
|
+
/**
|
|
211
|
+
* Define an async-batch stage with simplified API
|
|
212
|
+
*/
|
|
213
|
+
declare function defineStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: AsyncBatchStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
214
|
+
/**
|
|
215
|
+
* Define an async-batch stage with proper type inference for checkCompletion
|
|
216
|
+
*
|
|
217
|
+
* This is a dedicated function (not an alias) to ensure TypeScript properly
|
|
218
|
+
* infers callback parameter types without overload resolution ambiguity.
|
|
219
|
+
*/
|
|
220
|
+
declare function defineAsyncBatchStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: AsyncBatchStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Model Helper - Centralized model selection and cost tracking for AI scripts
|
|
224
|
+
*/
|
|
225
|
+
|
|
226
|
+
interface ModelConfig {
|
|
227
|
+
id: string;
|
|
228
|
+
name: string;
|
|
229
|
+
inputCostPerMillion: number;
|
|
230
|
+
outputCostPerMillion: number;
|
|
231
|
+
provider: "openrouter" | "google" | "other";
|
|
232
|
+
description?: string;
|
|
233
|
+
supportsAsyncBatch?: boolean;
|
|
234
|
+
batchDiscountPercent?: number;
|
|
235
|
+
isEmbeddingModel?: boolean;
|
|
236
|
+
supportsTools?: boolean;
|
|
237
|
+
supportsStructuredOutputs?: boolean;
|
|
238
|
+
contextLength?: number;
|
|
239
|
+
maxCompletionTokens?: number | null;
|
|
240
|
+
}
|
|
241
|
+
/**
|
|
242
|
+
* Filter options for listModels()
|
|
243
|
+
*/
|
|
244
|
+
interface ModelFilter {
|
|
245
|
+
/** Only include embedding models */
|
|
246
|
+
isEmbeddingModel?: boolean;
|
|
247
|
+
/** Only include models that support function calling */
|
|
248
|
+
supportsTools?: boolean;
|
|
249
|
+
/** Only include models that support structured outputs */
|
|
250
|
+
supportsStructuredOutputs?: boolean;
|
|
251
|
+
/** Only include models that support async batch */
|
|
252
|
+
supportsAsyncBatch?: boolean;
|
|
253
|
+
}
|
|
254
|
+
/**
|
|
255
|
+
* Configuration for workflow-engine.models.ts sync config
|
|
256
|
+
*/
|
|
257
|
+
interface ModelSyncConfig {
|
|
258
|
+
/** Only include models matching these patterns (applied before exclude) */
|
|
259
|
+
include?: (string | RegExp)[];
|
|
260
|
+
/** Output path relative to consumer's project root (default: src/generated/models.ts) */
|
|
261
|
+
outputPath?: string;
|
|
262
|
+
/** Patterns to exclude models (string for exact match, RegExp for pattern) */
|
|
263
|
+
exclude?: (string | RegExp)[];
|
|
264
|
+
/** Custom models to add (embeddings, rerankers, etc.) */
|
|
265
|
+
customModels?: Record<string, ModelConfig>;
|
|
266
|
+
}
|
|
267
|
+
/**
|
|
268
|
+
* Model Registry - augmented by consumer's generated file for autocomplete
|
|
269
|
+
* Import the generated file to populate this interface
|
|
270
|
+
*/
|
|
271
|
+
interface ModelRegistry {
|
|
272
|
+
}
|
|
273
|
+
/**
|
|
274
|
+
* Register models at runtime (called by generated file)
|
|
275
|
+
*/
|
|
276
|
+
declare function registerModels(models: Record<string, ModelConfig>): void;
|
|
277
|
+
/**
|
|
278
|
+
* Get a model from the runtime registry
|
|
279
|
+
*/
|
|
280
|
+
declare function getRegisteredModel(key: string): ModelConfig | undefined;
|
|
281
|
+
/**
|
|
282
|
+
* List all registered models
|
|
283
|
+
*/
|
|
284
|
+
declare function listRegisteredModels(): Array<{
|
|
285
|
+
key: string;
|
|
286
|
+
config: ModelConfig;
|
|
287
|
+
}>;
|
|
288
|
+
interface ModelStats {
|
|
289
|
+
modelId: string;
|
|
290
|
+
modelName: string;
|
|
291
|
+
apiCalls: number;
|
|
292
|
+
inputTokens: number;
|
|
293
|
+
outputTokens: number;
|
|
294
|
+
totalTokens: number;
|
|
295
|
+
inputCost: number;
|
|
296
|
+
outputCost: number;
|
|
297
|
+
totalCost: number;
|
|
298
|
+
}
|
|
299
|
+
/**
|
|
300
|
+
* Static enum for built-in models - provides .enum accessor for AVAILABLE_MODELS keys
|
|
301
|
+
*/
|
|
302
|
+
declare const ModelKeyEnum: z$1.ZodEnum<{
|
|
303
|
+
"gemini-2.5-flash": "gemini-2.5-flash";
|
|
304
|
+
}>;
|
|
305
|
+
/**
|
|
306
|
+
* Type representing all available model keys
|
|
307
|
+
* Supports both built-in enum keys AND dynamically registered keys via ModelRegistry
|
|
308
|
+
*/
|
|
309
|
+
type ModelKey = z$1.infer<typeof ModelKeyEnum> | keyof ModelRegistry;
|
|
310
|
+
/**
|
|
311
|
+
* Zod schema that validates model keys against both the static enum AND the runtime registry
|
|
312
|
+
* Use ModelKey.parse() to validate and type model key strings
|
|
313
|
+
*/
|
|
314
|
+
declare const ModelKey: z$1.ZodPipe<z$1.ZodString, z$1.ZodTransform<"gemini-2.5-flash", string>>;
|
|
315
|
+
/**
|
|
316
|
+
* Available AI models with their configurations
|
|
317
|
+
* Prices should be updated regularly from provider pricing pages
|
|
318
|
+
*/
|
|
319
|
+
declare const AVAILABLE_MODELS: Record<string, ModelConfig>;
|
|
320
|
+
/**
|
|
321
|
+
* Default model selection
|
|
322
|
+
* Change this to switch the default model across all scripts
|
|
323
|
+
*/
|
|
324
|
+
declare const DEFAULT_MODEL_KEY: ModelKey;
|
|
325
|
+
/**
|
|
326
|
+
* Get a model configuration by key
|
|
327
|
+
* Checks both built-in AVAILABLE_MODELS and runtime MODEL_REGISTRY
|
|
328
|
+
*/
|
|
329
|
+
declare function getModel(key: ModelKey): ModelConfig;
|
|
330
|
+
/**
|
|
331
|
+
* Get the default model configuration
|
|
332
|
+
*/
|
|
333
|
+
declare function getDefaultModel(): ModelConfig;
|
|
334
|
+
/**
|
|
335
|
+
* List all available models (built-in + registered)
|
|
336
|
+
* @param filter Optional filter to narrow down models by capability
|
|
337
|
+
*/
|
|
338
|
+
declare function listModels(filter?: ModelFilter): Array<{
|
|
339
|
+
key: string;
|
|
340
|
+
config: ModelConfig;
|
|
341
|
+
}>;
|
|
342
|
+
/**
|
|
343
|
+
* Check if a model supports async batch processing
|
|
344
|
+
*/
|
|
345
|
+
declare function modelSupportsBatch(modelKey: ModelKey): boolean;
|
|
346
|
+
/**
|
|
347
|
+
* Interface for model with bound recording function
|
|
348
|
+
* Useful for parallel execution where you want to pass model + recordCall together
|
|
349
|
+
*/
|
|
350
|
+
interface ModelWithRecorder {
|
|
351
|
+
id: string;
|
|
352
|
+
name: string;
|
|
353
|
+
recordCall: (inputTokens: number, outputTokens: number) => void;
|
|
354
|
+
}
|
|
355
|
+
/**
|
|
356
|
+
* Get model by key with bound recordCall function
|
|
357
|
+
* Perfect for parallel execution - no need to write model name twice
|
|
358
|
+
*
|
|
359
|
+
* Usage:
|
|
360
|
+
* const model = getModelById("gemini-2.5-flash", modelTracker);
|
|
361
|
+
* const result = await generateText({
|
|
362
|
+
* model: openRouter(model.id),
|
|
363
|
+
* prompt: "...",
|
|
364
|
+
* });
|
|
365
|
+
* model.recordCall(result.usage.inputTokens, result.usage.outputTokens);
|
|
366
|
+
*/
|
|
367
|
+
declare function getModelById(modelKey: ModelKey, tracker?: ModelStatsTracker): ModelWithRecorder;
|
|
368
|
+
/**
|
|
369
|
+
* Calculate costs based on token usage
|
|
370
|
+
*/
|
|
371
|
+
declare function calculateCost(modelKey: ModelKey, inputTokens: number, outputTokens: number): {
|
|
372
|
+
inputCost: number;
|
|
373
|
+
outputCost: number;
|
|
374
|
+
totalCost: number;
|
|
375
|
+
};
|
|
376
|
+
/**
|
|
377
|
+
* Model stats tracker class - tracks single model OR aggregates multiple models
|
|
378
|
+
*/
|
|
379
|
+
declare class ModelStatsTracker {
|
|
380
|
+
private modelKey?;
|
|
381
|
+
private modelConfig?;
|
|
382
|
+
private stats;
|
|
383
|
+
private perModelStats;
|
|
384
|
+
private isAggregating;
|
|
385
|
+
constructor(modelKey?: ModelKey);
|
|
386
|
+
/**
|
|
387
|
+
* Create an aggregating tracker that combines stats from multiple models
|
|
388
|
+
* Perfect for parallel execution where different calls use different models
|
|
389
|
+
*/
|
|
390
|
+
static createAggregating(): ModelStatsTracker;
|
|
391
|
+
/**
|
|
392
|
+
* Get the model ID for use with AI SDK
|
|
393
|
+
* @deprecated Use getModelById(modelKey).id instead for parallel execution
|
|
394
|
+
*/
|
|
395
|
+
getModelId(): string;
|
|
396
|
+
/**
|
|
397
|
+
* Get the model configuration
|
|
398
|
+
* @deprecated Use getModelById(modelKey) instead for parallel execution
|
|
399
|
+
*/
|
|
400
|
+
getModelConfig(): ModelConfig;
|
|
401
|
+
/**
|
|
402
|
+
* Switch model (useful for sequential model switching)
|
|
403
|
+
* @deprecated For parallel execution, pass model key to recordCall() instead
|
|
404
|
+
*/
|
|
405
|
+
switchModel(modelKey: ModelKey): void;
|
|
406
|
+
/**
|
|
407
|
+
* Get a model helper with bound recordCall for parallel execution
|
|
408
|
+
* Perfect for running multiple AI calls in parallel with different models
|
|
409
|
+
*
|
|
410
|
+
* Usage:
|
|
411
|
+
* const flashModel = tracker.getModelById("gemini-2.5-flash");
|
|
412
|
+
* const liteModel = tracker.getModelById("gemini-2.5-flash-lite");
|
|
413
|
+
*
|
|
414
|
+
* const [result1, result2] = await Promise.all([
|
|
415
|
+
* generateText({
|
|
416
|
+
* model: openRouter(flashModel.id),
|
|
417
|
+
* prompt: prompt1,
|
|
418
|
+
* }).then(r => { flashModel.recordCall(r.usage.inputTokens, r.usage.outputTokens); return r; }),
|
|
419
|
+
* generateText({
|
|
420
|
+
* model: openRouter(liteModel.id),
|
|
421
|
+
* prompt: prompt2,
|
|
422
|
+
* }).then(r => { liteModel.recordCall(r.usage.inputTokens, r.usage.outputTokens); return r; }),
|
|
423
|
+
* ]);
|
|
424
|
+
*/
|
|
425
|
+
getModelById(modelKey: ModelKey): {
|
|
426
|
+
id: string;
|
|
427
|
+
name: string;
|
|
428
|
+
recordCall: (inputTokens: number, outputTokens: number) => void;
|
|
429
|
+
};
|
|
430
|
+
/**
|
|
431
|
+
* Record an API call with token usage
|
|
432
|
+
*
|
|
433
|
+
* For sequential execution:
|
|
434
|
+
* tracker.switchModel("gemini-2.5-flash")
|
|
435
|
+
* tracker.recordCall(inputTokens, outputTokens)
|
|
436
|
+
*
|
|
437
|
+
* For parallel execution:
|
|
438
|
+
* tracker.recordCall(inputTokens, outputTokens, "gemini-2.5-flash")
|
|
439
|
+
* tracker.recordCall(inputTokens, outputTokens, "gemini-2.5-pro")
|
|
440
|
+
*/
|
|
441
|
+
recordCall(inputTokens?: number, outputTokens?: number, modelKeyOverride?: ModelKey): void;
|
|
442
|
+
/**
|
|
443
|
+
* Estimate cost for a prompt without making an API call
|
|
444
|
+
* Useful for dry-run mode to preview costs
|
|
445
|
+
*
|
|
446
|
+
* Note: This method is async because it lazy-loads the tiktoken library
|
|
447
|
+
* to avoid bundling 2MB of tokenizer data for browser clients.
|
|
448
|
+
*
|
|
449
|
+
* @param prompt - The prompt text to estimate
|
|
450
|
+
* @param estimatedOutputTokens - Estimated number of output tokens (default: 500)
|
|
451
|
+
* @returns Object with token counts and cost estimates
|
|
452
|
+
*/
|
|
453
|
+
estimateCost(prompt: string, estimatedOutputTokens?: number): Promise<{
|
|
454
|
+
inputTokens: number;
|
|
455
|
+
outputTokens: number;
|
|
456
|
+
totalTokens: number;
|
|
457
|
+
inputCost: number;
|
|
458
|
+
outputCost: number;
|
|
459
|
+
totalCost: number;
|
|
460
|
+
}>;
|
|
461
|
+
/**
|
|
462
|
+
* Get current statistics (single model or aggregated)
|
|
463
|
+
* Returns null only if tracker is in aggregating mode - use getAggregatedStats() instead
|
|
464
|
+
*/
|
|
465
|
+
getStats(): ModelStats | null;
|
|
466
|
+
/**
|
|
467
|
+
* Get aggregated statistics from all models
|
|
468
|
+
*/
|
|
469
|
+
getAggregatedStats(): {
|
|
470
|
+
perModel: ModelStats[];
|
|
471
|
+
totals: {
|
|
472
|
+
totalApiCalls: number;
|
|
473
|
+
totalInputTokens: number;
|
|
474
|
+
totalOutputTokens: number;
|
|
475
|
+
totalTokens: number;
|
|
476
|
+
totalInputCost: number;
|
|
477
|
+
totalOutputCost: number;
|
|
478
|
+
totalCost: number;
|
|
479
|
+
};
|
|
480
|
+
};
|
|
481
|
+
/**
|
|
482
|
+
* Print statistics to console
|
|
483
|
+
*/
|
|
484
|
+
printStats(): void;
|
|
485
|
+
/**
|
|
486
|
+
* Print aggregated statistics from all models
|
|
487
|
+
*/
|
|
488
|
+
printAggregatedStats(): void;
|
|
489
|
+
/**
|
|
490
|
+
* Reset statistics
|
|
491
|
+
*/
|
|
492
|
+
reset(): void;
|
|
493
|
+
}
|
|
494
|
+
/**
|
|
495
|
+
* Print available models to console
|
|
496
|
+
*/
|
|
497
|
+
declare function printAvailableModels(): void;
|
|
498
|
+
|
|
499
|
+
/**
|
|
500
|
+
* AI Helper - Unified AI interaction tracking with hierarchical topics
|
|
501
|
+
*
|
|
502
|
+
* This is the new unified AI tracking system that replaces workflow-specific tracking.
|
|
503
|
+
* It supports:
|
|
504
|
+
* - Hierarchical topics for flexible categorization (e.g., "workflow.abc.stage.extraction")
|
|
505
|
+
* - All AI call types: generateText, generateObject, embed, streamText, batch
|
|
506
|
+
* - Automatic cost calculation with batch discounts
|
|
507
|
+
* - Persistent DB logging to AICall table
|
|
508
|
+
*
|
|
509
|
+
* @example
|
|
510
|
+
* ```typescript
|
|
511
|
+
* const ai = createAIHelper("workflow.abc123").createChild("stage", "extraction");
|
|
512
|
+
* const result = await ai.generateText("gemini-2.5-flash", prompt);
|
|
513
|
+
* ```
|
|
514
|
+
*/
|
|
515
|
+
|
|
516
|
+
type AICallType = "text" | "object" | "embed" | "stream" | "batch";
|
|
517
|
+
interface AITextResult {
|
|
518
|
+
text: string;
|
|
519
|
+
inputTokens: number;
|
|
520
|
+
outputTokens: number;
|
|
521
|
+
cost: number;
|
|
522
|
+
/** Structured output when experimental_output is used */
|
|
523
|
+
output?: any;
|
|
524
|
+
}
|
|
525
|
+
interface AIObjectResult<T> {
|
|
526
|
+
object: T;
|
|
527
|
+
inputTokens: number;
|
|
528
|
+
outputTokens: number;
|
|
529
|
+
cost: number;
|
|
530
|
+
}
|
|
531
|
+
interface AIEmbedResult {
|
|
532
|
+
embedding: number[];
|
|
533
|
+
embeddings: number[][];
|
|
534
|
+
dimensions: number;
|
|
535
|
+
inputTokens: number;
|
|
536
|
+
cost: number;
|
|
537
|
+
}
|
|
538
|
+
type AISDKStreamResult = ReturnType<typeof streamText>;
|
|
539
|
+
interface AIStreamResult {
|
|
540
|
+
stream: AsyncIterable<string>;
|
|
541
|
+
getUsage(): Promise<{
|
|
542
|
+
inputTokens: number;
|
|
543
|
+
outputTokens: number;
|
|
544
|
+
cost: number;
|
|
545
|
+
}>;
|
|
546
|
+
/** The raw AI SDK result - use this for methods like toUIMessageStreamResponse */
|
|
547
|
+
rawResult: AISDKStreamResult;
|
|
548
|
+
}
|
|
549
|
+
/**
|
|
550
|
+
* Context for logging to workflow persistence (optional).
|
|
551
|
+
* When provided, batch operations can log to the database.
|
|
552
|
+
*/
|
|
553
|
+
interface LogContext {
|
|
554
|
+
workflowRunId: string;
|
|
555
|
+
stageRecordId: string;
|
|
556
|
+
/** Function to create a log entry in persistence */
|
|
557
|
+
createLog: (data: {
|
|
558
|
+
workflowRunId: string;
|
|
559
|
+
workflowStageId: string;
|
|
560
|
+
level: "DEBUG" | "INFO" | "WARN" | "ERROR";
|
|
561
|
+
message: string;
|
|
562
|
+
metadata?: Record<string, unknown>;
|
|
563
|
+
}) => Promise<void>;
|
|
564
|
+
}
|
|
565
|
+
/** Log function type for batch operations */
|
|
566
|
+
type BatchLogFn = (level: "DEBUG" | "INFO" | "WARN" | "ERROR", message: string, meta?: Record<string, unknown>) => void;
|
|
567
|
+
interface TextOptions<TTools extends ToolSet = ToolSet> {
|
|
568
|
+
temperature?: number;
|
|
569
|
+
maxTokens?: number;
|
|
570
|
+
/** Tool definitions for the model to use */
|
|
571
|
+
tools?: TTools;
|
|
572
|
+
/** Tool choice: 'auto' (default), 'required' (force tool use), 'none', or specific tool name */
|
|
573
|
+
toolChoice?: Parameters<typeof generateText>[0]["toolChoice"];
|
|
574
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
575
|
+
stopWhen?: Parameters<typeof generateText>[0]["stopWhen"];
|
|
576
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
577
|
+
onStepFinish?: (stepResult: StepResult<TTools>) => Promise<void> | void;
|
|
578
|
+
/** Experimental structured output - use with tools for combined tool calling + structured output */
|
|
579
|
+
experimental_output?: Parameters<typeof generateText>[0]["experimental_output"];
|
|
580
|
+
}
|
|
581
|
+
interface ObjectOptions<TTools extends ToolSet = ToolSet> {
|
|
582
|
+
temperature?: number;
|
|
583
|
+
maxTokens?: number;
|
|
584
|
+
/** Tool definitions for the model to use */
|
|
585
|
+
tools?: TTools;
|
|
586
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
587
|
+
stopWhen?: Parameters<typeof generateText>[0]["stopWhen"];
|
|
588
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
589
|
+
onStepFinish?: (stepResult: StepResult<TTools>) => Promise<void> | void;
|
|
590
|
+
}
|
|
591
|
+
interface EmbedOptions {
|
|
592
|
+
taskType?: "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY";
|
|
593
|
+
/** Override the default embedding dimensions (from embedding-config.ts) */
|
|
594
|
+
dimensions?: number;
|
|
595
|
+
}
|
|
596
|
+
interface StreamOptions {
|
|
597
|
+
temperature?: number;
|
|
598
|
+
maxTokens?: number;
|
|
599
|
+
onChunk?: (chunk: string) => void;
|
|
600
|
+
/** Tool definitions for the model to use */
|
|
601
|
+
tools?: Parameters<typeof streamText>[0]["tools"];
|
|
602
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
603
|
+
stopWhen?: Parameters<typeof streamText>[0]["stopWhen"];
|
|
604
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
605
|
+
onStepFinish?: Parameters<typeof streamText>[0]["onStepFinish"];
|
|
606
|
+
}
|
|
607
|
+
interface MediaPart {
|
|
608
|
+
type: "file";
|
|
609
|
+
data: Buffer | Uint8Array | string;
|
|
610
|
+
mediaType: string;
|
|
611
|
+
filename?: string;
|
|
612
|
+
}
|
|
613
|
+
interface TextPart {
|
|
614
|
+
type: "text";
|
|
615
|
+
text: string;
|
|
616
|
+
}
|
|
617
|
+
type ContentPart = TextPart | MediaPart;
|
|
618
|
+
type TextInput = string | ContentPart[];
|
|
619
|
+
type StreamTextInput = {
|
|
620
|
+
prompt: string;
|
|
621
|
+
messages?: never;
|
|
622
|
+
system?: string;
|
|
623
|
+
} | {
|
|
624
|
+
messages: Parameters<typeof streamText>[0]["messages"];
|
|
625
|
+
prompt?: never;
|
|
626
|
+
system?: string;
|
|
627
|
+
};
|
|
628
|
+
/** Provider identifier for batch operations */
|
|
629
|
+
type AIBatchProvider = "google" | "anthropic" | "openai";
|
|
630
|
+
/** A request to be processed in a batch */
|
|
631
|
+
interface AIBatchRequest {
|
|
632
|
+
/** Unique identifier for this request (used to match results) */
|
|
633
|
+
id: string;
|
|
634
|
+
/** The prompt to send to the model */
|
|
635
|
+
prompt: string;
|
|
636
|
+
/** Optional Zod schema for structured JSON output */
|
|
637
|
+
schema?: z.ZodTypeAny;
|
|
638
|
+
}
|
|
639
|
+
/** Result of a single request in a batch */
|
|
640
|
+
interface AIBatchResult<T = string> {
|
|
641
|
+
/** The request ID (matches the id from AIBatchRequest) */
|
|
642
|
+
id: string;
|
|
643
|
+
/** Original prompt (may be empty if not available from provider) */
|
|
644
|
+
prompt: string;
|
|
645
|
+
/** The parsed result (JSON object if schema was provided, otherwise string) */
|
|
646
|
+
result: T;
|
|
647
|
+
/** Input tokens used */
|
|
648
|
+
inputTokens: number;
|
|
649
|
+
/** Output tokens used */
|
|
650
|
+
outputTokens: number;
|
|
651
|
+
/** Status of this individual result */
|
|
652
|
+
status: "succeeded" | "failed";
|
|
653
|
+
/** Error message if status is "failed" */
|
|
654
|
+
error?: string;
|
|
655
|
+
}
|
|
656
|
+
/** Handle for tracking a submitted batch */
|
|
657
|
+
interface AIBatchHandle {
|
|
658
|
+
/** Batch identifier from the provider */
|
|
659
|
+
id: string;
|
|
660
|
+
/** Current status of the batch */
|
|
661
|
+
status: "pending" | "processing" | "completed" | "failed";
|
|
662
|
+
/** The provider used for this batch (for resume support) */
|
|
663
|
+
provider?: AIBatchProvider;
|
|
664
|
+
}
|
|
665
|
+
/** Interface for batch operations on an AI model */
|
|
666
|
+
interface AIBatch<T = string> {
|
|
667
|
+
/** Submit requests for batch processing */
|
|
668
|
+
submit(requests: AIBatchRequest[]): Promise<AIBatchHandle>;
|
|
669
|
+
/** Check the status of a batch */
|
|
670
|
+
getStatus(batchId: string): Promise<AIBatchHandle>;
|
|
671
|
+
/** Retrieve results from a completed batch */
|
|
672
|
+
getResults(batchId: string, metadata?: Record<string, unknown>): Promise<AIBatchResult<T>[]>;
|
|
673
|
+
/** Check if results have been recorded for this batch */
|
|
674
|
+
isRecorded(batchId: string): Promise<boolean>;
|
|
675
|
+
/** Record batch results manually when batch provider integration is not implemented */
|
|
676
|
+
recordResults(batchId: string, results: AIBatchResult<T>[]): Promise<void>;
|
|
677
|
+
}
|
|
678
|
+
interface RecordCallParams {
|
|
679
|
+
modelKey: ModelKey;
|
|
680
|
+
callType: AICallType;
|
|
681
|
+
prompt: string;
|
|
682
|
+
response: string;
|
|
683
|
+
inputTokens: number;
|
|
684
|
+
outputTokens: number;
|
|
685
|
+
metadata?: Record<string, unknown>;
|
|
686
|
+
}
|
|
687
|
+
interface AIHelperStats {
|
|
688
|
+
totalCalls: number;
|
|
689
|
+
totalInputTokens: number;
|
|
690
|
+
totalOutputTokens: number;
|
|
691
|
+
totalCost: number;
|
|
692
|
+
perModel: Record<string, {
|
|
693
|
+
calls: number;
|
|
694
|
+
inputTokens: number;
|
|
695
|
+
outputTokens: number;
|
|
696
|
+
cost: number;
|
|
697
|
+
}>;
|
|
698
|
+
}
|
|
699
|
+
interface AIHelper {
|
|
700
|
+
/** Current topic path */
|
|
701
|
+
readonly topic: string;
|
|
702
|
+
generateText<TTools extends ToolSet = ToolSet>(modelKey: ModelKey, prompt: TextInput, options?: TextOptions<TTools>): Promise<AITextResult>;
|
|
703
|
+
generateObject<TSchema extends z.ZodTypeAny>(modelKey: ModelKey, prompt: TextInput, schema: TSchema, options?: ObjectOptions): Promise<AIObjectResult<z.infer<TSchema>>>;
|
|
704
|
+
embed(modelKey: ModelKey, text: string | string[], options?: EmbedOptions): Promise<AIEmbedResult>;
|
|
705
|
+
streamText(modelKey: ModelKey, input: StreamTextInput, options?: StreamOptions): AIStreamResult;
|
|
706
|
+
batch<T = string>(modelKey: ModelKey, provider?: AIBatchProvider): AIBatch<T>;
|
|
707
|
+
createChild(segment: string, id?: string): AIHelper;
|
|
708
|
+
recordCall(params: RecordCallParams): void;
|
|
709
|
+
recordCall(modelKey: ModelKey, prompt: string, response: string, tokens: {
|
|
710
|
+
input: number;
|
|
711
|
+
output: number;
|
|
712
|
+
}, options?: {
|
|
713
|
+
callType?: AICallType;
|
|
714
|
+
isBatch?: boolean;
|
|
715
|
+
metadata?: Record<string, unknown>;
|
|
716
|
+
}): void;
|
|
717
|
+
getStats(): Promise<AIHelperStats>;
|
|
718
|
+
}
|
|
719
|
+
/**
|
|
720
|
+
* Create an AI helper instance with topic-based tracking.
|
|
721
|
+
*
|
|
722
|
+
* @param topic - Initial topic path (e.g., "workflow.abc123" or "reranker")
|
|
723
|
+
* @returns AIHelper instance
|
|
724
|
+
*
|
|
725
|
+
* @example
|
|
726
|
+
* ```typescript
|
|
727
|
+
* // Simple topic
|
|
728
|
+
* const ai = createAIHelper("reranker");
|
|
729
|
+
*
|
|
730
|
+
* // Hierarchical topic
|
|
731
|
+
* const ai = createAIHelper("workflow.abc123", logger)
|
|
732
|
+
* .createChild("stage", "extraction");
|
|
733
|
+
* // topic: "workflow.abc123.stage.extraction"
|
|
734
|
+
*
|
|
735
|
+
* // Use AI methods
|
|
736
|
+
* const result = await ai.generateText("gemini-2.5-flash", prompt);
|
|
737
|
+
* ```
|
|
738
|
+
*/
|
|
739
|
+
declare function createAIHelper(topic: string, logger: AICallLogger, logContext?: LogContext): AIHelper;
|
|
740
|
+
|
|
741
|
+
/**
|
|
742
|
+
* Workflow Event Types for SSE Streaming
|
|
743
|
+
*
|
|
744
|
+
* This file contains ONLY types and interfaces for workflow events.
|
|
745
|
+
* It is safe to use in both client and server environments.
|
|
746
|
+
*/
|
|
747
|
+
interface WorkflowSSEEvent {
|
|
748
|
+
type: WorkflowEventType;
|
|
749
|
+
workflowRunId: string;
|
|
750
|
+
timestamp: Date;
|
|
751
|
+
data: Record<string, unknown>;
|
|
752
|
+
}
|
|
753
|
+
type WorkflowEventType = "connected" | "workflow:started" | "workflow:completed" | "workflow:suspended" | "workflow:cancelled" | "workflow:failed" | "stage:started" | "stage:progress" | "stage:completed" | "stage:suspended" | "stage:failed" | "log";
|
|
754
|
+
interface WorkflowStartedPayload {
|
|
755
|
+
workflowRunId: string;
|
|
756
|
+
workflowName: string;
|
|
757
|
+
}
|
|
758
|
+
interface WorkflowCompletedPayload {
|
|
759
|
+
workflowRunId: string;
|
|
760
|
+
output: unknown;
|
|
761
|
+
duration?: number;
|
|
762
|
+
totalCost?: number;
|
|
763
|
+
totalTokens?: number;
|
|
764
|
+
}
|
|
765
|
+
interface WorkflowSuspendedPayload {
|
|
766
|
+
workflowRunId: string;
|
|
767
|
+
stageId: string;
|
|
768
|
+
}
|
|
769
|
+
interface WorkflowFailedPayload {
|
|
770
|
+
workflowRunId: string;
|
|
771
|
+
error: string;
|
|
772
|
+
}
|
|
773
|
+
interface StageStartedPayload {
|
|
774
|
+
stageId: string;
|
|
775
|
+
stageName: string;
|
|
776
|
+
stageNumber: number;
|
|
777
|
+
}
|
|
778
|
+
interface StageCompletedPayload {
|
|
779
|
+
stageId: string;
|
|
780
|
+
stageName: string;
|
|
781
|
+
duration: number;
|
|
782
|
+
cost?: number;
|
|
783
|
+
inputTokens?: number;
|
|
784
|
+
outputTokens?: number;
|
|
785
|
+
outputCount?: number;
|
|
786
|
+
}
|
|
787
|
+
interface StageFailedPayload {
|
|
788
|
+
stageId: string;
|
|
789
|
+
stageName: string;
|
|
790
|
+
error: string;
|
|
791
|
+
}
|
|
792
|
+
interface LogPayload {
|
|
793
|
+
level: string;
|
|
794
|
+
message: string;
|
|
795
|
+
meta?: Record<string, unknown>;
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
export { type WorkflowCompletedPayload as $, type AIBatch as A, type BatchLogFn as B, getModel as C, DEFAULT_MODEL_KEY as D, type EmbedOptions as E, getModelById as F, getRegisteredModel as G, listModels as H, type InferInput as I, listRegisteredModels as J, modelSupportsBatch as K, type LogContext as L, ModelKey as M, NoInputSchema as N, type ObjectOptions as O, printAvailableModels as P, registerModels as Q, type RecordCallParams as R, type SimpleStageResult as S, type TextOptions as T, requireStageOutput as U, type LogPayload as V, type WorkflowEventType as W, type ModelFilter as X, type StageCompletedPayload as Y, type StageFailedPayload as Z, type StageStartedPayload as _, type AIBatchHandle as a, type WorkflowFailedPayload as a0, type WorkflowStartedPayload as a1, type WorkflowSuspendedPayload as a2, type AIBatchProvider as b, type AIBatchRequest as c, type AIBatchResult as d, type AICallType as e, type AIEmbedResult as f, type AIHelper as g, type AIObjectResult as h, type AIStreamResult as i, type AITextResult as j, AVAILABLE_MODELS as k, type AsyncBatchStageDefinition as l, type EnhancedStageContext as m, type ModelConfig as n, type ModelRegistry as o, type ModelStats as p, ModelStatsTracker as q, type ModelSyncConfig as r, type StreamOptions as s, type SyncStageDefinition as t, type WorkflowSSEEvent as u, calculateCost as v, createAIHelper as w, defineAsyncBatchStage as x, defineStage as y, getDefaultModel as z };
|