@bratsos/workflow-engine 0.0.11 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-7IITBLFY.js +900 -0
- package/dist/chunk-7IITBLFY.js.map +1 -0
- package/dist/chunk-D7RVRRM2.js +3 -0
- package/dist/chunk-D7RVRRM2.js.map +1 -0
- package/dist/chunk-MUWP5SF2.js +33 -0
- package/dist/chunk-MUWP5SF2.js.map +1 -0
- package/dist/chunk-P4KMGCT3.js +2292 -0
- package/dist/chunk-P4KMGCT3.js.map +1 -0
- package/dist/cli/sync-models.d.ts +1 -0
- package/dist/cli/sync-models.js +210 -0
- package/dist/cli/sync-models.js.map +1 -0
- package/dist/client-5vz5Vv4A.d.ts +938 -0
- package/dist/client.d.ts +4 -0
- package/dist/client.js +4 -0
- package/dist/client.js.map +1 -0
- package/dist/index-DmR3E8D7.d.ts +198 -0
- package/dist/index.d.ts +936 -0
- package/dist/index.js +2387 -0
- package/dist/index.js.map +1 -0
- package/dist/interface-Cv22wvLG.d.ts +344 -0
- package/dist/persistence/index.d.ts +2 -0
- package/dist/persistence/index.js +5 -0
- package/dist/persistence/index.js.map +1 -0
- package/dist/persistence/prisma/index.d.ts +37 -0
- package/dist/persistence/prisma/index.js +4 -0
- package/dist/persistence/prisma/index.js.map +1 -0
- package/dist/testing/index.d.ts +242 -0
- package/dist/testing/index.js +777 -0
- package/dist/testing/index.js.map +1 -0
- package/package.json +1 -1
|
@@ -0,0 +1,938 @@
|
|
|
1
|
+
import { ToolSet, generateText, StepResult, streamText } from 'ai';
|
|
2
|
+
import z$1, { z } from 'zod';
|
|
3
|
+
import { A as AICallLogger } from './interface-Cv22wvLG.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Schema Helpers and Utilities
|
|
7
|
+
*
|
|
8
|
+
* Provides common schemas and utilities for building type-safe workflows.
|
|
9
|
+
* Reduces boilerplate and enforces best practices.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Constant for stages that don't need sequential input
|
|
14
|
+
* Use when a stage receives data from workflowContext instead of the input parameter
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* export const myStage: Stage<
|
|
18
|
+
* typeof NoInputSchema, // Explicit: this stage uses workflowContext
|
|
19
|
+
* typeof OutputSchema,
|
|
20
|
+
* typeof ConfigSchema
|
|
21
|
+
* > = {
|
|
22
|
+
* inputSchema: NoInputSchema,
|
|
23
|
+
* // ...
|
|
24
|
+
* };
|
|
25
|
+
*/
|
|
26
|
+
declare const NoInputSchema: z.ZodObject<{}, z.core.$strip>;
|
|
27
|
+
/**
|
|
28
|
+
* Access previous stage output with guaranteed type safety
|
|
29
|
+
*
|
|
30
|
+
* Requires that the stage output exists, throws clear error if missing.
|
|
31
|
+
* Use this for required dependencies on previous stages.
|
|
32
|
+
*
|
|
33
|
+
* @param workflowContext - The workflow context containing all previous stage outputs
|
|
34
|
+
* @param stageId - ID of the stage to access
|
|
35
|
+
* @param field - Optional: specific field to extract from stage output
|
|
36
|
+
* @returns The stage output (or field within it)
|
|
37
|
+
* @throws Error if stage or field is missing
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* // Get entire stage output
|
|
41
|
+
* const extractedData = requireStageOutput<ExtractedData>(
|
|
42
|
+
* context.workflowContext,
|
|
43
|
+
* "data-extraction"
|
|
44
|
+
* );
|
|
45
|
+
*
|
|
46
|
+
* // Get specific field
|
|
47
|
+
* const guidelines = requireStageOutput<Guideline[]>(
|
|
48
|
+
* context.workflowContext,
|
|
49
|
+
* "guidelines",
|
|
50
|
+
* "guidelines"
|
|
51
|
+
* );
|
|
52
|
+
*/
|
|
53
|
+
declare function requireStageOutput<T>(workflowContext: Record<string, unknown>, stageId: string, field?: string): T;
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Core type definitions for Workflow System v2
|
|
57
|
+
*
|
|
58
|
+
* See WORKFLOW_SYSTEM_PROPOSAL.md for full architectural details
|
|
59
|
+
*/
|
|
60
|
+
|
|
61
|
+
interface ProgressUpdate {
|
|
62
|
+
stageId: string;
|
|
63
|
+
stageName: string;
|
|
64
|
+
progress: number;
|
|
65
|
+
message: string;
|
|
66
|
+
details?: Record<string, unknown>;
|
|
67
|
+
}
|
|
68
|
+
interface StageMetrics {
|
|
69
|
+
startTime: number;
|
|
70
|
+
endTime: number;
|
|
71
|
+
duration: number;
|
|
72
|
+
itemsProcessed?: number;
|
|
73
|
+
itemsProduced?: number;
|
|
74
|
+
aiCalls?: number;
|
|
75
|
+
totalTokens?: number;
|
|
76
|
+
totalCost?: number;
|
|
77
|
+
}
|
|
78
|
+
interface EmbeddingResult {
|
|
79
|
+
id: string;
|
|
80
|
+
content: string;
|
|
81
|
+
embedding: number[];
|
|
82
|
+
similarity?: number;
|
|
83
|
+
metadata?: Record<string, unknown>;
|
|
84
|
+
}
|
|
85
|
+
interface EmbeddingInfo {
|
|
86
|
+
model: string;
|
|
87
|
+
dimensions: number;
|
|
88
|
+
results: EmbeddingResult[];
|
|
89
|
+
totalProcessed?: number;
|
|
90
|
+
averageSimilarity?: number;
|
|
91
|
+
}
|
|
92
|
+
interface StageResult<TOutput> {
|
|
93
|
+
output: TOutput;
|
|
94
|
+
metrics: StageMetrics;
|
|
95
|
+
artifacts?: Record<string, unknown>;
|
|
96
|
+
embeddings?: EmbeddingInfo;
|
|
97
|
+
}
|
|
98
|
+
declare const SuspendedStateSchema: z.ZodObject<{
|
|
99
|
+
batchId: z.ZodString;
|
|
100
|
+
statusUrl: z.ZodOptional<z.ZodString>;
|
|
101
|
+
apiKey: z.ZodOptional<z.ZodString>;
|
|
102
|
+
submittedAt: z.ZodString;
|
|
103
|
+
pollInterval: z.ZodNumber;
|
|
104
|
+
maxWaitTime: z.ZodNumber;
|
|
105
|
+
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
106
|
+
}, z.core.$strip>;
|
|
107
|
+
interface SuspendedResult {
|
|
108
|
+
suspended: true;
|
|
109
|
+
state: z.infer<typeof SuspendedStateSchema>;
|
|
110
|
+
pollConfig: {
|
|
111
|
+
pollInterval: number;
|
|
112
|
+
maxWaitTime: number;
|
|
113
|
+
nextPollAt: Date;
|
|
114
|
+
};
|
|
115
|
+
metrics: StageMetrics;
|
|
116
|
+
}
|
|
117
|
+
interface CompletionCheckResult<TOutput> {
|
|
118
|
+
ready: boolean;
|
|
119
|
+
output?: TOutput;
|
|
120
|
+
error?: string;
|
|
121
|
+
nextCheckIn?: number;
|
|
122
|
+
metrics?: StageMetrics;
|
|
123
|
+
embeddings?: EmbeddingInfo;
|
|
124
|
+
}
|
|
125
|
+
type LogLevel = "DEBUG" | "INFO" | "WARN" | "ERROR";
|
|
126
|
+
type StageMode = "sync" | "async-batch";
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Stage interface and context definitions
|
|
130
|
+
*
|
|
131
|
+
* Stages are the building blocks of workflows. Each stage:
|
|
132
|
+
* - Has strongly-typed input, output, and config schemas (Zod)
|
|
133
|
+
* - Can be sync or async-batch
|
|
134
|
+
* - Has access to AI helper, storage, and logging via context
|
|
135
|
+
* - Can suspend workflow for long-running batch operations
|
|
136
|
+
*/
|
|
137
|
+
|
|
138
|
+
interface StageContext<TInput, TConfig, TWorkflowContext = Record<string, unknown>> {
|
|
139
|
+
workflowRunId: string;
|
|
140
|
+
stageId: string;
|
|
141
|
+
stageNumber: number;
|
|
142
|
+
stageName: string;
|
|
143
|
+
/** Database record ID for this stage execution (for logging to persistence) */
|
|
144
|
+
stageRecordId?: string;
|
|
145
|
+
input: TInput;
|
|
146
|
+
config: TConfig;
|
|
147
|
+
resumeState?: z.infer<typeof SuspendedStateSchema>;
|
|
148
|
+
onProgress: (update: ProgressUpdate) => void;
|
|
149
|
+
onLog: (level: LogLevel, message: string, meta?: Record<string, unknown>) => void;
|
|
150
|
+
log: (level: LogLevel, message: string, meta?: Record<string, unknown>) => void;
|
|
151
|
+
storage: StageStorage;
|
|
152
|
+
workflowContext: Partial<TWorkflowContext>;
|
|
153
|
+
}
|
|
154
|
+
interface StageStorage {
|
|
155
|
+
save<T>(key: string, data: T): Promise<void>;
|
|
156
|
+
load<T>(key: string): Promise<T>;
|
|
157
|
+
exists(key: string): Promise<boolean>;
|
|
158
|
+
delete(key: string): Promise<void>;
|
|
159
|
+
getStageKey(stageId: string, suffix?: string): string;
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Context passed to checkCompletion for async-batch stages.
|
|
163
|
+
* Includes identification info so stages don't need to store it in metadata.
|
|
164
|
+
*/
|
|
165
|
+
interface CheckCompletionContext<TConfig> {
|
|
166
|
+
workflowRunId: string;
|
|
167
|
+
stageId: string;
|
|
168
|
+
/** Database record ID for this stage execution (for logging to persistence) */
|
|
169
|
+
stageRecordId?: string;
|
|
170
|
+
config: TConfig;
|
|
171
|
+
onLog: (level: LogLevel, message: string, meta?: Record<string, unknown>) => void;
|
|
172
|
+
log: (level: LogLevel, message: string, meta?: Record<string, unknown>) => void;
|
|
173
|
+
storage: StageStorage;
|
|
174
|
+
}
|
|
175
|
+
interface Stage<TInput extends z.ZodTypeAny, TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TWorkflowContext = Record<string, unknown>> {
|
|
176
|
+
id: string;
|
|
177
|
+
name: string;
|
|
178
|
+
description?: string;
|
|
179
|
+
/**
|
|
180
|
+
* Optional: List of stage IDs that this stage depends on.
|
|
181
|
+
* The workflow builder will validate that all dependencies are present
|
|
182
|
+
* in the workflow before this stage is executed.
|
|
183
|
+
*
|
|
184
|
+
* Example: dependencies: ["data-extraction", "guidelines"]
|
|
185
|
+
*/
|
|
186
|
+
dependencies?: string[];
|
|
187
|
+
inputSchema: TInput;
|
|
188
|
+
outputSchema: TOutput;
|
|
189
|
+
configSchema: TConfig;
|
|
190
|
+
execute: (context: StageContext<z.infer<TInput>, z.infer<TConfig>, TWorkflowContext>) => Promise<StageResult<z.infer<TOutput>> | SuspendedResult>;
|
|
191
|
+
checkCompletion?: (suspendedState: z.infer<typeof SuspendedStateSchema>, context: CheckCompletionContext<z.infer<TConfig>>) => Promise<CompletionCheckResult<z.infer<TOutput>>>;
|
|
192
|
+
mode?: StageMode;
|
|
193
|
+
estimateCost?: (input: z.infer<TInput>, config: z.infer<TConfig>) => number;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Stage Factory - Simplified stage definition with auto-metrics
|
|
198
|
+
*
|
|
199
|
+
* Provides a `defineStage()` function that reduces boilerplate by:
|
|
200
|
+
* - Inferring types from schemas
|
|
201
|
+
* - Auto-calculating metrics (timing handled by executor)
|
|
202
|
+
* - Adding fluent context helpers (require/optional)
|
|
203
|
+
* - Supporting both sync and async-batch modes
|
|
204
|
+
*
|
|
205
|
+
* @example
|
|
206
|
+
* ```typescript
|
|
207
|
+
* export const myStage = defineStage({
|
|
208
|
+
* id: "my-stage",
|
|
209
|
+
* name: "My Stage",
|
|
210
|
+
* description: "Does something useful",
|
|
211
|
+
* dependencies: ["previous-stage"],
|
|
212
|
+
*
|
|
213
|
+
* schemas: {
|
|
214
|
+
* input: InputSchema, // or "none" for NoInputSchema
|
|
215
|
+
* output: OutputSchema,
|
|
216
|
+
* config: ConfigSchema,
|
|
217
|
+
* },
|
|
218
|
+
*
|
|
219
|
+
* async execute(ctx) {
|
|
220
|
+
* const prevData = ctx.require("previous-stage");
|
|
221
|
+
* // ... stage logic
|
|
222
|
+
* return { output: { ... } };
|
|
223
|
+
* },
|
|
224
|
+
* });
|
|
225
|
+
* ```
|
|
226
|
+
*/
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Helper type to safely infer input type, handling the "none" special case
|
|
230
|
+
*/
|
|
231
|
+
type InferInput<TInput extends z.ZodTypeAny | "none"> = TInput extends "none" ? z.infer<typeof NoInputSchema> : TInput extends z.ZodTypeAny ? z.infer<TInput> : never;
|
|
232
|
+
/**
|
|
233
|
+
* Enhanced stage context with fluent helpers
|
|
234
|
+
*/
|
|
235
|
+
interface EnhancedStageContext<TInput, TConfig, TContext extends Record<string, unknown>> extends StageContext<TInput, TConfig, TContext> {
|
|
236
|
+
/**
|
|
237
|
+
* Require output from a previous stage (throws if not found)
|
|
238
|
+
*
|
|
239
|
+
* @example
|
|
240
|
+
* const { extractedData } = ctx.require("data-extraction");
|
|
241
|
+
*/
|
|
242
|
+
require: <K extends keyof TContext>(stageId: K) => TContext[K];
|
|
243
|
+
/**
|
|
244
|
+
* Optionally get output from a previous stage (returns undefined if not found)
|
|
245
|
+
*
|
|
246
|
+
* @example
|
|
247
|
+
* const optionalData = ctx.optional("optional-stage");
|
|
248
|
+
* if (optionalData) { ... }
|
|
249
|
+
*/
|
|
250
|
+
optional: <K extends keyof TContext>(stageId: K) => TContext[K] | undefined;
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Simplified execute result - just output and optional custom metrics
|
|
254
|
+
*/
|
|
255
|
+
interface SimpleStageResult<TOutput> {
|
|
256
|
+
output: TOutput;
|
|
257
|
+
/**
|
|
258
|
+
* Custom metrics specific to this stage (e.g., itemsProcessed, sectionsFound)
|
|
259
|
+
* Timing metrics (startTime, endTime, duration) are auto-calculated by executor
|
|
260
|
+
* AI metrics should be added here by stages that create their own AIHelper
|
|
261
|
+
*/
|
|
262
|
+
customMetrics?: Record<string, number>;
|
|
263
|
+
/**
|
|
264
|
+
* Optional artifacts to store
|
|
265
|
+
*/
|
|
266
|
+
artifacts?: Record<string, unknown>;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Simplified suspended result - metrics are auto-filled by the factory
|
|
270
|
+
*/
|
|
271
|
+
interface SimpleSuspendedResult {
|
|
272
|
+
suspended: true;
|
|
273
|
+
state: {
|
|
274
|
+
batchId: string;
|
|
275
|
+
submittedAt: string;
|
|
276
|
+
pollInterval: number;
|
|
277
|
+
maxWaitTime: number;
|
|
278
|
+
metadata?: Record<string, unknown>;
|
|
279
|
+
apiKey?: string;
|
|
280
|
+
};
|
|
281
|
+
pollConfig: {
|
|
282
|
+
pollInterval: number;
|
|
283
|
+
maxWaitTime: number;
|
|
284
|
+
nextPollAt: Date;
|
|
285
|
+
};
|
|
286
|
+
/**
|
|
287
|
+
* Optional custom metrics (timing & AI metrics are auto-filled)
|
|
288
|
+
*/
|
|
289
|
+
customMetrics?: Record<string, number>;
|
|
290
|
+
}
|
|
291
|
+
/**
|
|
292
|
+
* Sync stage definition
|
|
293
|
+
*/
|
|
294
|
+
interface SyncStageDefinition<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>> {
|
|
295
|
+
/** Unique stage identifier */
|
|
296
|
+
id: string;
|
|
297
|
+
/** Human-readable name */
|
|
298
|
+
name: string;
|
|
299
|
+
/** Optional description */
|
|
300
|
+
description?: string;
|
|
301
|
+
/** Stage IDs this stage depends on (validated at workflow build time) */
|
|
302
|
+
dependencies?: string[];
|
|
303
|
+
/** Zod schemas for validation */
|
|
304
|
+
schemas: {
|
|
305
|
+
/** Input schema, or "none" for stages that use workflowContext */
|
|
306
|
+
input: TInput;
|
|
307
|
+
/** Output schema */
|
|
308
|
+
output: TOutput;
|
|
309
|
+
/** Configuration schema */
|
|
310
|
+
config: TConfig;
|
|
311
|
+
};
|
|
312
|
+
/**
|
|
313
|
+
* Execute the stage logic
|
|
314
|
+
* Return just { output } - metrics are auto-calculated
|
|
315
|
+
*/
|
|
316
|
+
execute: (ctx: EnhancedStageContext<InferInput<TInput>, z.infer<TConfig>, TContext>) => Promise<SimpleStageResult<z.infer<TOutput>>>;
|
|
317
|
+
/**
|
|
318
|
+
* Optional: Estimate cost before execution
|
|
319
|
+
*/
|
|
320
|
+
estimateCost?: (input: InferInput<TInput>, config: z.infer<TConfig>) => number;
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Async-batch stage definition (for long-running batch jobs)
|
|
324
|
+
*/
|
|
325
|
+
interface AsyncBatchStageDefinition<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>> extends Omit<SyncStageDefinition<TInput, TOutput, TConfig, TContext>, "execute"> {
|
|
326
|
+
/** Mark as async-batch mode */
|
|
327
|
+
mode: "async-batch";
|
|
328
|
+
/**
|
|
329
|
+
* Execute the stage - either return result or suspend for batch processing
|
|
330
|
+
*
|
|
331
|
+
* When resuming from suspension, ctx.resumeState contains the suspended state.
|
|
332
|
+
* Check this to determine whether to submit a new batch or fetch results.
|
|
333
|
+
*
|
|
334
|
+
* Return SimpleSuspendedResult when suspending - metrics will be auto-filled.
|
|
335
|
+
*/
|
|
336
|
+
execute: (ctx: EnhancedStageContext<InferInput<TInput>, z.infer<TConfig>, TContext>) => Promise<SimpleStageResult<z.infer<TOutput>> | SimpleSuspendedResult>;
|
|
337
|
+
/**
|
|
338
|
+
* Check if the batch job is complete
|
|
339
|
+
* Called by the orchestrator when polling suspended stages
|
|
340
|
+
*
|
|
341
|
+
* Context includes workflowRunId, stageId, config, log, and storage
|
|
342
|
+
* so you don't need to store these in metadata.
|
|
343
|
+
*/
|
|
344
|
+
checkCompletion: (suspendedState: z.infer<typeof SuspendedStateSchema>, context: CheckCompletionContext<z.infer<TConfig>>) => Promise<CompletionCheckResult<z.infer<TOutput>>>;
|
|
345
|
+
}
|
|
346
|
+
/**
|
|
347
|
+
* Define a sync stage with simplified API
|
|
348
|
+
*/
|
|
349
|
+
declare function defineStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: SyncStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
350
|
+
/**
|
|
351
|
+
* Define an async-batch stage with simplified API
|
|
352
|
+
*/
|
|
353
|
+
declare function defineStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: AsyncBatchStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
354
|
+
/**
|
|
355
|
+
* Define an async-batch stage with proper type inference for checkCompletion
|
|
356
|
+
*
|
|
357
|
+
* This is a dedicated function (not an alias) to ensure TypeScript properly
|
|
358
|
+
* infers callback parameter types without overload resolution ambiguity.
|
|
359
|
+
*/
|
|
360
|
+
declare function defineAsyncBatchStage<TInput extends z.ZodTypeAny | "none", TOutput extends z.ZodTypeAny, TConfig extends z.ZodTypeAny, TContext extends Record<string, unknown> = Record<string, unknown>>(definition: AsyncBatchStageDefinition<TInput, TOutput, TConfig, TContext>): Stage<TInput extends "none" ? typeof NoInputSchema : TInput, TOutput, TConfig, TContext>;
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* Model Helper - Centralized model selection and cost tracking for AI scripts
|
|
364
|
+
*/
|
|
365
|
+
|
|
366
|
+
interface ModelConfig {
|
|
367
|
+
id: string;
|
|
368
|
+
name: string;
|
|
369
|
+
inputCostPerMillion: number;
|
|
370
|
+
outputCostPerMillion: number;
|
|
371
|
+
provider: "openrouter" | "google" | "other";
|
|
372
|
+
description?: string;
|
|
373
|
+
supportsAsyncBatch?: boolean;
|
|
374
|
+
batchDiscountPercent?: number;
|
|
375
|
+
isEmbeddingModel?: boolean;
|
|
376
|
+
supportsTools?: boolean;
|
|
377
|
+
supportsStructuredOutputs?: boolean;
|
|
378
|
+
contextLength?: number;
|
|
379
|
+
maxCompletionTokens?: number | null;
|
|
380
|
+
}
|
|
381
|
+
/**
|
|
382
|
+
* Filter options for listModels()
|
|
383
|
+
*/
|
|
384
|
+
interface ModelFilter {
|
|
385
|
+
/** Only include embedding models */
|
|
386
|
+
isEmbeddingModel?: boolean;
|
|
387
|
+
/** Only include models that support function calling */
|
|
388
|
+
supportsTools?: boolean;
|
|
389
|
+
/** Only include models that support structured outputs */
|
|
390
|
+
supportsStructuredOutputs?: boolean;
|
|
391
|
+
/** Only include models that support async batch */
|
|
392
|
+
supportsAsyncBatch?: boolean;
|
|
393
|
+
}
|
|
394
|
+
/**
|
|
395
|
+
* Configuration for workflow-engine.models.ts sync config
|
|
396
|
+
*/
|
|
397
|
+
interface ModelSyncConfig {
|
|
398
|
+
/** Only include models matching these patterns (applied before exclude) */
|
|
399
|
+
include?: (string | RegExp)[];
|
|
400
|
+
/** Output path relative to consumer's project root (default: src/generated/models.ts) */
|
|
401
|
+
outputPath?: string;
|
|
402
|
+
/** Patterns to exclude models (string for exact match, RegExp for pattern) */
|
|
403
|
+
exclude?: (string | RegExp)[];
|
|
404
|
+
/** Custom models to add (embeddings, rerankers, etc.) */
|
|
405
|
+
customModels?: Record<string, ModelConfig>;
|
|
406
|
+
}
|
|
407
|
+
/**
|
|
408
|
+
* Model Registry - augmented by consumer's generated file for autocomplete
|
|
409
|
+
* Import the generated file to populate this interface
|
|
410
|
+
*/
|
|
411
|
+
interface ModelRegistry {
|
|
412
|
+
}
|
|
413
|
+
/**
|
|
414
|
+
* Register models at runtime (called by generated file)
|
|
415
|
+
*/
|
|
416
|
+
declare function registerModels(models: Record<string, ModelConfig>): void;
|
|
417
|
+
/**
|
|
418
|
+
* Get a model from the runtime registry
|
|
419
|
+
*/
|
|
420
|
+
declare function getRegisteredModel(key: string): ModelConfig | undefined;
|
|
421
|
+
/**
|
|
422
|
+
* List all registered models
|
|
423
|
+
*/
|
|
424
|
+
declare function listRegisteredModels(): Array<{
|
|
425
|
+
key: string;
|
|
426
|
+
config: ModelConfig;
|
|
427
|
+
}>;
|
|
428
|
+
interface ModelStats {
|
|
429
|
+
modelId: string;
|
|
430
|
+
modelName: string;
|
|
431
|
+
apiCalls: number;
|
|
432
|
+
inputTokens: number;
|
|
433
|
+
outputTokens: number;
|
|
434
|
+
totalTokens: number;
|
|
435
|
+
inputCost: number;
|
|
436
|
+
outputCost: number;
|
|
437
|
+
totalCost: number;
|
|
438
|
+
}
|
|
439
|
+
/**
|
|
440
|
+
* Static enum for built-in models - provides .enum accessor for AVAILABLE_MODELS keys
|
|
441
|
+
*/
|
|
442
|
+
declare const ModelKeyEnum: z$1.ZodEnum<{
|
|
443
|
+
"gemini-2.5-flash": "gemini-2.5-flash";
|
|
444
|
+
}>;
|
|
445
|
+
/**
|
|
446
|
+
* Type representing all available model keys
|
|
447
|
+
* Supports both built-in enum keys AND dynamically registered keys via ModelRegistry
|
|
448
|
+
*/
|
|
449
|
+
type ModelKey = z$1.infer<typeof ModelKeyEnum> | keyof ModelRegistry;
|
|
450
|
+
/**
|
|
451
|
+
* Zod schema that validates model keys against both the static enum AND the runtime registry
|
|
452
|
+
* Use ModelKey.parse() to validate and type model key strings
|
|
453
|
+
*/
|
|
454
|
+
declare const ModelKey: z$1.ZodPipe<z$1.ZodString, z$1.ZodTransform<"gemini-2.5-flash", string>>;
|
|
455
|
+
/**
|
|
456
|
+
* Available AI models with their configurations
|
|
457
|
+
* Prices should be updated regularly from provider pricing pages
|
|
458
|
+
*/
|
|
459
|
+
declare const AVAILABLE_MODELS: Record<string, ModelConfig>;
|
|
460
|
+
/**
|
|
461
|
+
* Default model selection
|
|
462
|
+
* Change this to switch the default model across all scripts
|
|
463
|
+
*/
|
|
464
|
+
declare const DEFAULT_MODEL_KEY: ModelKey;
|
|
465
|
+
/**
|
|
466
|
+
* Get a model configuration by key
|
|
467
|
+
* Checks both built-in AVAILABLE_MODELS and runtime MODEL_REGISTRY
|
|
468
|
+
*/
|
|
469
|
+
declare function getModel(key: ModelKey): ModelConfig;
|
|
470
|
+
/**
|
|
471
|
+
* Get the default model configuration
|
|
472
|
+
*/
|
|
473
|
+
declare function getDefaultModel(): ModelConfig;
|
|
474
|
+
/**
|
|
475
|
+
* List all available models (built-in + registered)
|
|
476
|
+
* @param filter Optional filter to narrow down models by capability
|
|
477
|
+
*/
|
|
478
|
+
declare function listModels(filter?: ModelFilter): Array<{
|
|
479
|
+
key: string;
|
|
480
|
+
config: ModelConfig;
|
|
481
|
+
}>;
|
|
482
|
+
/**
|
|
483
|
+
* Check if a model supports async batch processing
|
|
484
|
+
*/
|
|
485
|
+
declare function modelSupportsBatch(modelKey: ModelKey): boolean;
|
|
486
|
+
/**
|
|
487
|
+
* Interface for model with bound recording function
|
|
488
|
+
* Useful for parallel execution where you want to pass model + recordCall together
|
|
489
|
+
*/
|
|
490
|
+
interface ModelWithRecorder {
|
|
491
|
+
id: string;
|
|
492
|
+
name: string;
|
|
493
|
+
recordCall: (inputTokens: number, outputTokens: number) => void;
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Get model by key with bound recordCall function
|
|
497
|
+
* Perfect for parallel execution - no need to write model name twice
|
|
498
|
+
*
|
|
499
|
+
* Usage:
|
|
500
|
+
* const model = getModelById("gemini-2.5-flash", modelTracker);
|
|
501
|
+
* const result = await generateText({
|
|
502
|
+
* model: openRouter(model.id),
|
|
503
|
+
* prompt: "...",
|
|
504
|
+
* });
|
|
505
|
+
* model.recordCall(result.usage.inputTokens, result.usage.outputTokens);
|
|
506
|
+
*/
|
|
507
|
+
declare function getModelById(modelKey: ModelKey, tracker?: ModelStatsTracker): ModelWithRecorder;
|
|
508
|
+
/**
|
|
509
|
+
* Calculate costs based on token usage
|
|
510
|
+
*/
|
|
511
|
+
declare function calculateCost(modelKey: ModelKey, inputTokens: number, outputTokens: number): {
|
|
512
|
+
inputCost: number;
|
|
513
|
+
outputCost: number;
|
|
514
|
+
totalCost: number;
|
|
515
|
+
};
|
|
516
|
+
/**
|
|
517
|
+
* Model stats tracker class - tracks single model OR aggregates multiple models
|
|
518
|
+
*/
|
|
519
|
+
declare class ModelStatsTracker {
|
|
520
|
+
private modelKey?;
|
|
521
|
+
private modelConfig?;
|
|
522
|
+
private stats;
|
|
523
|
+
private perModelStats;
|
|
524
|
+
private isAggregating;
|
|
525
|
+
constructor(modelKey?: ModelKey);
|
|
526
|
+
/**
|
|
527
|
+
* Create an aggregating tracker that combines stats from multiple models
|
|
528
|
+
* Perfect for parallel execution where different calls use different models
|
|
529
|
+
*/
|
|
530
|
+
static createAggregating(): ModelStatsTracker;
|
|
531
|
+
/**
|
|
532
|
+
* Get the model ID for use with AI SDK
|
|
533
|
+
* @deprecated Use getModelById(modelKey).id instead for parallel execution
|
|
534
|
+
*/
|
|
535
|
+
getModelId(): string;
|
|
536
|
+
/**
|
|
537
|
+
* Get the model configuration
|
|
538
|
+
* @deprecated Use getModelById(modelKey) instead for parallel execution
|
|
539
|
+
*/
|
|
540
|
+
getModelConfig(): ModelConfig;
|
|
541
|
+
/**
|
|
542
|
+
* Switch model (useful for sequential model switching)
|
|
543
|
+
* @deprecated For parallel execution, pass model key to recordCall() instead
|
|
544
|
+
*/
|
|
545
|
+
switchModel(modelKey: ModelKey): void;
|
|
546
|
+
/**
|
|
547
|
+
* Get a model helper with bound recordCall for parallel execution
|
|
548
|
+
* Perfect for running multiple AI calls in parallel with different models
|
|
549
|
+
*
|
|
550
|
+
* Usage:
|
|
551
|
+
* const flashModel = tracker.getModelById("gemini-2.5-flash");
|
|
552
|
+
* const liteModel = tracker.getModelById("gemini-2.5-flash-lite");
|
|
553
|
+
*
|
|
554
|
+
* const [result1, result2] = await Promise.all([
|
|
555
|
+
* generateText({
|
|
556
|
+
* model: openRouter(flashModel.id),
|
|
557
|
+
* prompt: prompt1,
|
|
558
|
+
* }).then(r => { flashModel.recordCall(r.usage.inputTokens, r.usage.outputTokens); return r; }),
|
|
559
|
+
* generateText({
|
|
560
|
+
* model: openRouter(liteModel.id),
|
|
561
|
+
* prompt: prompt2,
|
|
562
|
+
* }).then(r => { liteModel.recordCall(r.usage.inputTokens, r.usage.outputTokens); return r; }),
|
|
563
|
+
* ]);
|
|
564
|
+
*/
|
|
565
|
+
getModelById(modelKey: ModelKey): {
|
|
566
|
+
id: string;
|
|
567
|
+
name: string;
|
|
568
|
+
recordCall: (inputTokens: number, outputTokens: number) => void;
|
|
569
|
+
};
|
|
570
|
+
/**
|
|
571
|
+
* Record an API call with token usage
|
|
572
|
+
*
|
|
573
|
+
* For sequential execution:
|
|
574
|
+
* tracker.switchModel("gemini-2.5-flash")
|
|
575
|
+
* tracker.recordCall(inputTokens, outputTokens)
|
|
576
|
+
*
|
|
577
|
+
* For parallel execution:
|
|
578
|
+
* tracker.recordCall(inputTokens, outputTokens, "gemini-2.5-flash")
|
|
579
|
+
* tracker.recordCall(inputTokens, outputTokens, "gemini-2.5-pro")
|
|
580
|
+
*/
|
|
581
|
+
recordCall(inputTokens?: number, outputTokens?: number, modelKeyOverride?: ModelKey): void;
|
|
582
|
+
/**
|
|
583
|
+
* Estimate cost for a prompt without making an API call
|
|
584
|
+
* Useful for dry-run mode to preview costs
|
|
585
|
+
*
|
|
586
|
+
* Note: This method is async because it lazy-loads the tiktoken library
|
|
587
|
+
* to avoid bundling 2MB of tokenizer data for browser clients.
|
|
588
|
+
*
|
|
589
|
+
* @param prompt - The prompt text to estimate
|
|
590
|
+
* @param estimatedOutputTokens - Estimated number of output tokens (default: 500)
|
|
591
|
+
* @returns Object with token counts and cost estimates
|
|
592
|
+
*/
|
|
593
|
+
estimateCost(prompt: string, estimatedOutputTokens?: number): Promise<{
|
|
594
|
+
inputTokens: number;
|
|
595
|
+
outputTokens: number;
|
|
596
|
+
totalTokens: number;
|
|
597
|
+
inputCost: number;
|
|
598
|
+
outputCost: number;
|
|
599
|
+
totalCost: number;
|
|
600
|
+
}>;
|
|
601
|
+
/**
|
|
602
|
+
* Get current statistics (single model or aggregated)
|
|
603
|
+
* Returns null only if tracker is in aggregating mode - use getAggregatedStats() instead
|
|
604
|
+
*/
|
|
605
|
+
getStats(): ModelStats | null;
|
|
606
|
+
/**
|
|
607
|
+
* Get aggregated statistics from all models
|
|
608
|
+
*/
|
|
609
|
+
getAggregatedStats(): {
|
|
610
|
+
perModel: ModelStats[];
|
|
611
|
+
totals: {
|
|
612
|
+
totalApiCalls: number;
|
|
613
|
+
totalInputTokens: number;
|
|
614
|
+
totalOutputTokens: number;
|
|
615
|
+
totalTokens: number;
|
|
616
|
+
totalInputCost: number;
|
|
617
|
+
totalOutputCost: number;
|
|
618
|
+
totalCost: number;
|
|
619
|
+
};
|
|
620
|
+
};
|
|
621
|
+
/**
|
|
622
|
+
* Print statistics to console
|
|
623
|
+
*/
|
|
624
|
+
printStats(): void;
|
|
625
|
+
/**
|
|
626
|
+
* Print aggregated statistics from all models
|
|
627
|
+
*/
|
|
628
|
+
printAggregatedStats(): void;
|
|
629
|
+
/**
|
|
630
|
+
* Reset statistics
|
|
631
|
+
*/
|
|
632
|
+
reset(): void;
|
|
633
|
+
}
|
|
634
|
+
/**
|
|
635
|
+
* Print available models to console
|
|
636
|
+
*/
|
|
637
|
+
declare function printAvailableModels(): void;
|
|
638
|
+
|
|
639
|
+
/**
|
|
640
|
+
* AI Helper - Unified AI interaction tracking with hierarchical topics
|
|
641
|
+
*
|
|
642
|
+
* This is the new unified AI tracking system that replaces workflow-specific tracking.
|
|
643
|
+
* It supports:
|
|
644
|
+
* - Hierarchical topics for flexible categorization (e.g., "workflow.abc.stage.extraction")
|
|
645
|
+
* - All AI call types: generateText, generateObject, embed, streamText, batch
|
|
646
|
+
* - Automatic cost calculation with batch discounts
|
|
647
|
+
* - Persistent DB logging to AICall table
|
|
648
|
+
*
|
|
649
|
+
* @example
|
|
650
|
+
* ```typescript
|
|
651
|
+
* const ai = createAIHelper("workflow.abc123").createChild("stage", "extraction");
|
|
652
|
+
* const result = await ai.generateText("gemini-2.5-flash", prompt);
|
|
653
|
+
* ```
|
|
654
|
+
*/
|
|
655
|
+
|
|
656
|
+
type AICallType = "text" | "object" | "embed" | "stream" | "batch";
|
|
657
|
+
interface AITextResult {
|
|
658
|
+
text: string;
|
|
659
|
+
inputTokens: number;
|
|
660
|
+
outputTokens: number;
|
|
661
|
+
cost: number;
|
|
662
|
+
/** Structured output when experimental_output is used */
|
|
663
|
+
output?: any;
|
|
664
|
+
}
|
|
665
|
+
interface AIObjectResult<T> {
|
|
666
|
+
object: T;
|
|
667
|
+
inputTokens: number;
|
|
668
|
+
outputTokens: number;
|
|
669
|
+
cost: number;
|
|
670
|
+
}
|
|
671
|
+
interface AIEmbedResult {
|
|
672
|
+
embedding: number[];
|
|
673
|
+
embeddings: number[][];
|
|
674
|
+
dimensions: number;
|
|
675
|
+
inputTokens: number;
|
|
676
|
+
cost: number;
|
|
677
|
+
}
|
|
678
|
+
type AISDKStreamResult = ReturnType<typeof streamText>;
|
|
679
|
+
interface AIStreamResult {
|
|
680
|
+
stream: AsyncIterable<string>;
|
|
681
|
+
getUsage(): Promise<{
|
|
682
|
+
inputTokens: number;
|
|
683
|
+
outputTokens: number;
|
|
684
|
+
cost: number;
|
|
685
|
+
}>;
|
|
686
|
+
/** The raw AI SDK result - use this for methods like toUIMessageStreamResponse */
|
|
687
|
+
rawResult: AISDKStreamResult;
|
|
688
|
+
}
|
|
689
|
+
/**
|
|
690
|
+
* Context for logging to workflow persistence (optional).
|
|
691
|
+
* When provided, batch operations can log to the database.
|
|
692
|
+
*/
|
|
693
|
+
interface LogContext {
|
|
694
|
+
workflowRunId: string;
|
|
695
|
+
stageRecordId: string;
|
|
696
|
+
/** Function to create a log entry in persistence */
|
|
697
|
+
createLog: (data: {
|
|
698
|
+
workflowRunId: string;
|
|
699
|
+
workflowStageId: string;
|
|
700
|
+
level: "DEBUG" | "INFO" | "WARN" | "ERROR";
|
|
701
|
+
message: string;
|
|
702
|
+
metadata?: Record<string, unknown>;
|
|
703
|
+
}) => Promise<void>;
|
|
704
|
+
}
|
|
705
|
+
/** Log function type for batch operations */
|
|
706
|
+
type BatchLogFn = (level: "DEBUG" | "INFO" | "WARN" | "ERROR", message: string, meta?: Record<string, unknown>) => void;
|
|
707
|
+
interface TextOptions<TTools extends ToolSet = ToolSet> {
|
|
708
|
+
temperature?: number;
|
|
709
|
+
maxTokens?: number;
|
|
710
|
+
/** Tool definitions for the model to use */
|
|
711
|
+
tools?: TTools;
|
|
712
|
+
/** Tool choice: 'auto' (default), 'required' (force tool use), 'none', or specific tool name */
|
|
713
|
+
toolChoice?: Parameters<typeof generateText>[0]["toolChoice"];
|
|
714
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
715
|
+
stopWhen?: Parameters<typeof generateText>[0]["stopWhen"];
|
|
716
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
717
|
+
onStepFinish?: (stepResult: StepResult<TTools>) => Promise<void> | void;
|
|
718
|
+
/** Experimental structured output - use with tools for combined tool calling + structured output */
|
|
719
|
+
experimental_output?: Parameters<typeof generateText>[0]["experimental_output"];
|
|
720
|
+
}
|
|
721
|
+
interface ObjectOptions<TTools extends ToolSet = ToolSet> {
|
|
722
|
+
temperature?: number;
|
|
723
|
+
maxTokens?: number;
|
|
724
|
+
/** Tool definitions for the model to use */
|
|
725
|
+
tools?: TTools;
|
|
726
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
727
|
+
stopWhen?: Parameters<typeof generateText>[0]["stopWhen"];
|
|
728
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
729
|
+
onStepFinish?: (stepResult: StepResult<TTools>) => Promise<void> | void;
|
|
730
|
+
}
|
|
731
|
+
interface EmbedOptions {
|
|
732
|
+
taskType?: "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY";
|
|
733
|
+
/** Override the default embedding dimensions (from embedding-config.ts) */
|
|
734
|
+
dimensions?: number;
|
|
735
|
+
}
|
|
736
|
+
interface StreamOptions {
|
|
737
|
+
temperature?: number;
|
|
738
|
+
maxTokens?: number;
|
|
739
|
+
onChunk?: (chunk: string) => void;
|
|
740
|
+
/** Tool definitions for the model to use */
|
|
741
|
+
tools?: Parameters<typeof streamText>[0]["tools"];
|
|
742
|
+
/** Condition to stop tool execution (e.g., stepCountIs(3)) */
|
|
743
|
+
stopWhen?: Parameters<typeof streamText>[0]["stopWhen"];
|
|
744
|
+
/** Callback fired when each step completes (for collecting tool results) */
|
|
745
|
+
onStepFinish?: Parameters<typeof streamText>[0]["onStepFinish"];
|
|
746
|
+
}
|
|
747
|
+
interface MediaPart {
|
|
748
|
+
type: "file";
|
|
749
|
+
data: Buffer | Uint8Array | string;
|
|
750
|
+
mediaType: string;
|
|
751
|
+
filename?: string;
|
|
752
|
+
}
|
|
753
|
+
interface TextPart {
|
|
754
|
+
type: "text";
|
|
755
|
+
text: string;
|
|
756
|
+
}
|
|
757
|
+
type ContentPart = TextPart | MediaPart;
|
|
758
|
+
type TextInput = string | ContentPart[];
|
|
759
|
+
type StreamTextInput = {
|
|
760
|
+
prompt: string;
|
|
761
|
+
messages?: never;
|
|
762
|
+
system?: string;
|
|
763
|
+
} | {
|
|
764
|
+
messages: Parameters<typeof streamText>[0]["messages"];
|
|
765
|
+
prompt?: never;
|
|
766
|
+
system?: string;
|
|
767
|
+
};
|
|
768
|
+
/** Provider identifier for batch operations */
|
|
769
|
+
type AIBatchProvider = "google" | "anthropic" | "openai";
|
|
770
|
+
/** A request to be processed in a batch */
|
|
771
|
+
interface AIBatchRequest {
|
|
772
|
+
/** Unique identifier for this request (used to match results) */
|
|
773
|
+
id: string;
|
|
774
|
+
/** The prompt to send to the model */
|
|
775
|
+
prompt: string;
|
|
776
|
+
/** Optional Zod schema for structured JSON output */
|
|
777
|
+
schema?: z.ZodTypeAny;
|
|
778
|
+
}
|
|
779
|
+
/** Result of a single request in a batch */
|
|
780
|
+
interface AIBatchResult<T = string> {
|
|
781
|
+
/** The request ID (matches the id from AIBatchRequest) */
|
|
782
|
+
id: string;
|
|
783
|
+
/** Original prompt (may be empty if not available from provider) */
|
|
784
|
+
prompt: string;
|
|
785
|
+
/** The parsed result (JSON object if schema was provided, otherwise string) */
|
|
786
|
+
result: T;
|
|
787
|
+
/** Input tokens used */
|
|
788
|
+
inputTokens: number;
|
|
789
|
+
/** Output tokens used */
|
|
790
|
+
outputTokens: number;
|
|
791
|
+
/** Status of this individual result */
|
|
792
|
+
status: "succeeded" | "failed";
|
|
793
|
+
/** Error message if status is "failed" */
|
|
794
|
+
error?: string;
|
|
795
|
+
}
|
|
796
|
+
/** Handle for tracking a submitted batch */
|
|
797
|
+
interface AIBatchHandle {
|
|
798
|
+
/** Batch identifier from the provider */
|
|
799
|
+
id: string;
|
|
800
|
+
/** Current status of the batch */
|
|
801
|
+
status: "pending" | "processing" | "completed" | "failed";
|
|
802
|
+
/** The provider used for this batch (for resume support) */
|
|
803
|
+
provider?: AIBatchProvider;
|
|
804
|
+
}
|
|
805
|
+
/** Interface for batch operations on an AI model */
|
|
806
|
+
interface AIBatch<T = string> {
|
|
807
|
+
/** Submit requests for batch processing */
|
|
808
|
+
submit(requests: AIBatchRequest[]): Promise<AIBatchHandle>;
|
|
809
|
+
/** Check the status of a batch */
|
|
810
|
+
getStatus(batchId: string): Promise<AIBatchHandle>;
|
|
811
|
+
/** Retrieve results from a completed batch */
|
|
812
|
+
getResults(batchId: string, metadata?: Record<string, unknown>): Promise<AIBatchResult<T>[]>;
|
|
813
|
+
/** Check if results have been recorded for this batch */
|
|
814
|
+
isRecorded(batchId: string): Promise<boolean>;
|
|
815
|
+
/** Record batch results manually when batch provider integration is not implemented */
|
|
816
|
+
recordResults(batchId: string, results: AIBatchResult<T>[]): Promise<void>;
|
|
817
|
+
}
|
|
818
|
+
interface RecordCallParams {
|
|
819
|
+
modelKey: ModelKey;
|
|
820
|
+
callType: AICallType;
|
|
821
|
+
prompt: string;
|
|
822
|
+
response: string;
|
|
823
|
+
inputTokens: number;
|
|
824
|
+
outputTokens: number;
|
|
825
|
+
metadata?: Record<string, unknown>;
|
|
826
|
+
}
|
|
827
|
+
interface AIHelperStats {
|
|
828
|
+
totalCalls: number;
|
|
829
|
+
totalInputTokens: number;
|
|
830
|
+
totalOutputTokens: number;
|
|
831
|
+
totalCost: number;
|
|
832
|
+
perModel: Record<string, {
|
|
833
|
+
calls: number;
|
|
834
|
+
inputTokens: number;
|
|
835
|
+
outputTokens: number;
|
|
836
|
+
cost: number;
|
|
837
|
+
}>;
|
|
838
|
+
}
|
|
839
|
+
interface AIHelper {
|
|
840
|
+
/** Current topic path */
|
|
841
|
+
readonly topic: string;
|
|
842
|
+
generateText<TTools extends ToolSet = ToolSet>(modelKey: ModelKey, prompt: TextInput, options?: TextOptions<TTools>): Promise<AITextResult>;
|
|
843
|
+
generateObject<TSchema extends z.ZodTypeAny>(modelKey: ModelKey, prompt: TextInput, schema: TSchema, options?: ObjectOptions): Promise<AIObjectResult<z.infer<TSchema>>>;
|
|
844
|
+
embed(modelKey: ModelKey, text: string | string[], options?: EmbedOptions): Promise<AIEmbedResult>;
|
|
845
|
+
streamText(modelKey: ModelKey, input: StreamTextInput, options?: StreamOptions): AIStreamResult;
|
|
846
|
+
batch<T = string>(modelKey: ModelKey, provider?: AIBatchProvider): AIBatch<T>;
|
|
847
|
+
createChild(segment: string, id?: string): AIHelper;
|
|
848
|
+
recordCall(params: RecordCallParams): void;
|
|
849
|
+
recordCall(modelKey: ModelKey, prompt: string, response: string, tokens: {
|
|
850
|
+
input: number;
|
|
851
|
+
output: number;
|
|
852
|
+
}, options?: {
|
|
853
|
+
callType?: AICallType;
|
|
854
|
+
isBatch?: boolean;
|
|
855
|
+
metadata?: Record<string, unknown>;
|
|
856
|
+
}): void;
|
|
857
|
+
getStats(): Promise<AIHelperStats>;
|
|
858
|
+
}
|
|
859
|
+
/**
|
|
860
|
+
* Create an AI helper instance with topic-based tracking.
|
|
861
|
+
*
|
|
862
|
+
* @param topic - Initial topic path (e.g., "workflow.abc123" or "reranker")
|
|
863
|
+
* @returns AIHelper instance
|
|
864
|
+
*
|
|
865
|
+
* @example
|
|
866
|
+
* ```typescript
|
|
867
|
+
* // Simple topic
|
|
868
|
+
* const ai = createAIHelper("reranker");
|
|
869
|
+
*
|
|
870
|
+
* // Hierarchical topic
|
|
871
|
+
* const ai = createAIHelper("workflow.abc123", logger)
|
|
872
|
+
* .createChild("stage", "extraction");
|
|
873
|
+
* // topic: "workflow.abc123.stage.extraction"
|
|
874
|
+
*
|
|
875
|
+
* // Use AI methods
|
|
876
|
+
* const result = await ai.generateText("gemini-2.5-flash", prompt);
|
|
877
|
+
* ```
|
|
878
|
+
*/
|
|
879
|
+
declare function createAIHelper(topic: string, logger: AICallLogger, logContext?: LogContext): AIHelper;
|
|
880
|
+
|
|
881
|
+
/**
|
|
882
|
+
* Workflow Event Types for SSE Streaming
|
|
883
|
+
*
|
|
884
|
+
* This file contains ONLY types and interfaces for workflow events.
|
|
885
|
+
* It is safe to use in both client and server environments.
|
|
886
|
+
*/
|
|
887
|
+
interface WorkflowSSEEvent {
|
|
888
|
+
type: WorkflowEventType;
|
|
889
|
+
workflowRunId: string;
|
|
890
|
+
timestamp: Date;
|
|
891
|
+
data: Record<string, unknown>;
|
|
892
|
+
}
|
|
893
|
+
type WorkflowEventType = "connected" | "workflow:started" | "workflow:completed" | "workflow:suspended" | "workflow:cancelled" | "workflow:failed" | "stage:started" | "stage:progress" | "stage:completed" | "stage:suspended" | "stage:failed" | "log";
|
|
894
|
+
interface WorkflowStartedPayload {
|
|
895
|
+
workflowRunId: string;
|
|
896
|
+
workflowName: string;
|
|
897
|
+
}
|
|
898
|
+
interface WorkflowCompletedPayload {
|
|
899
|
+
workflowRunId: string;
|
|
900
|
+
output: unknown;
|
|
901
|
+
duration?: number;
|
|
902
|
+
totalCost?: number;
|
|
903
|
+
totalTokens?: number;
|
|
904
|
+
}
|
|
905
|
+
interface WorkflowSuspendedPayload {
|
|
906
|
+
workflowRunId: string;
|
|
907
|
+
stageId: string;
|
|
908
|
+
}
|
|
909
|
+
interface WorkflowFailedPayload {
|
|
910
|
+
workflowRunId: string;
|
|
911
|
+
error: string;
|
|
912
|
+
}
|
|
913
|
+
interface StageStartedPayload {
|
|
914
|
+
stageId: string;
|
|
915
|
+
stageName: string;
|
|
916
|
+
stageNumber: number;
|
|
917
|
+
}
|
|
918
|
+
interface StageCompletedPayload {
|
|
919
|
+
stageId: string;
|
|
920
|
+
stageName: string;
|
|
921
|
+
duration: number;
|
|
922
|
+
cost?: number;
|
|
923
|
+
inputTokens?: number;
|
|
924
|
+
outputTokens?: number;
|
|
925
|
+
outputCount?: number;
|
|
926
|
+
}
|
|
927
|
+
interface StageFailedPayload {
|
|
928
|
+
stageId: string;
|
|
929
|
+
stageName: string;
|
|
930
|
+
error: string;
|
|
931
|
+
}
|
|
932
|
+
interface LogPayload {
|
|
933
|
+
level: string;
|
|
934
|
+
message: string;
|
|
935
|
+
meta?: Record<string, unknown>;
|
|
936
|
+
}
|
|
937
|
+
|
|
938
|
+
export { type ModelFilter as $, type AIHelper as A, type BatchLogFn as B, createAIHelper as C, DEFAULT_MODEL_KEY as D, type EmbedOptions as E, defineAsyncBatchStage as F, defineStage as G, getDefaultModel as H, type InferInput as I, getModel as J, getModelById as K, type LogContext as L, ModelKey as M, NoInputSchema as N, type ObjectOptions as O, getRegisteredModel as P, listModels as Q, type RecordCallParams as R, type Stage as S, type TextOptions as T, listRegisteredModels as U, modelSupportsBatch as V, type WorkflowEventType as W, printAvailableModels as X, registerModels as Y, requireStageOutput as Z, type LogPayload as _, type StageMetrics as a, type StageCompletedPayload as a0, type StageFailedPayload as a1, type StageStartedPayload as a2, type WorkflowCompletedPayload as a3, type WorkflowFailedPayload as a4, type WorkflowStartedPayload as a5, type WorkflowSuspendedPayload as a6, type WorkflowSSEEvent as b, type LogLevel as c, type AIBatch as d, type AIBatchHandle as e, type AIBatchProvider as f, type AIBatchRequest as g, type AIBatchResult as h, type AICallType as i, type AIEmbedResult as j, type AIObjectResult as k, type AIStreamResult as l, type AITextResult as m, AVAILABLE_MODELS as n, type AsyncBatchStageDefinition as o, type EnhancedStageContext as p, type ModelConfig as q, type ModelRegistry as r, type ModelStats as s, ModelStatsTracker as t, type ModelSyncConfig as u, type SimpleStageResult as v, type StageResult as w, type StreamOptions as x, type SyncStageDefinition as y, calculateCost as z };
|