@ai-sdk/workflow 0.0.0-bf6e4b15-20260402200305

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,444 @@
1
+ import type {
2
+ LanguageModelV4CallOptions,
3
+ LanguageModelV4Prompt,
4
+ LanguageModelV4ToolCall,
5
+ LanguageModelV4ToolResultPart,
6
+ } from '@ai-sdk/provider';
7
+ import type {
8
+ Experimental_ModelCallStreamPart as ModelCallStreamPart,
9
+ ModelMessage,
10
+ StepResult,
11
+ StreamTextOnStepFinishCallback,
12
+ ToolCallRepairFunction,
13
+ ToolChoice,
14
+ ToolSet,
15
+ } from 'ai';
16
+ import {
17
+ doStreamStep,
18
+ type ModelStopCondition,
19
+ type ParsedToolCall,
20
+ type ProviderExecutedToolResult,
21
+ } from './do-stream-step.js';
22
+ import { serializeToolSet } from './serializable-schema.js';
23
+ import type {
24
+ GenerationSettings,
25
+ PrepareStepCallback,
26
+ StreamTextOnErrorCallback,
27
+ TelemetrySettings,
28
+ WorkflowAgentOnStepStartCallback,
29
+ } from './workflow-agent.js';
30
+ import type { CompatibleLanguageModel } from './types.js';
31
+
32
+ // Re-export for consumers
33
+ export type { ProviderExecutedToolResult } from './do-stream-step.js';
34
+
35
+ /**
36
+ * The value yielded by the stream text iterator when tool calls are requested.
37
+ * Contains both the tool calls and the current conversation messages.
38
+ */
39
+ export interface StreamTextIteratorYieldValue {
40
+ /** The tool calls requested by the model (parsed with typed inputs) */
41
+ toolCalls: ParsedToolCall[];
42
+ /** The conversation messages up to (and including) the tool call request */
43
+ messages: LanguageModelV4Prompt;
44
+ /** The step result from the current step */
45
+ step?: StepResult<ToolSet, any>;
46
+ /** The current experimental context */
47
+ context?: unknown;
48
+ /** Provider-executed tool results (keyed by tool call ID) */
49
+ providerExecutedToolResults?: Map<string, ProviderExecutedToolResult>;
50
+ }
51
+
52
+ // This runs in the workflow context
53
+ export async function* streamTextIterator({
54
+ prompt,
55
+ tools = {},
56
+ writable,
57
+ model,
58
+ stopConditions,
59
+ maxSteps,
60
+ onStepFinish,
61
+ onStepStart,
62
+ onError,
63
+ prepareStep,
64
+ generationSettings,
65
+ toolChoice,
66
+ experimental_context,
67
+ experimental_telemetry,
68
+ includeRawChunks = false,
69
+ repairToolCall,
70
+ responseFormat,
71
+ }: {
72
+ prompt: LanguageModelV4Prompt;
73
+ tools: ToolSet;
74
+ writable?: WritableStream<ModelCallStreamPart<ToolSet>>;
75
+ model: string | (() => Promise<CompatibleLanguageModel>);
76
+ stopConditions?: ModelStopCondition[] | ModelStopCondition;
77
+ maxSteps?: number;
78
+ onStepFinish?: StreamTextOnStepFinishCallback<any, any>;
79
+ onStepStart?: WorkflowAgentOnStepStartCallback;
80
+ onError?: StreamTextOnErrorCallback;
81
+ prepareStep?: PrepareStepCallback<any>;
82
+ generationSettings?: GenerationSettings;
83
+ toolChoice?: ToolChoice<ToolSet>;
84
+ experimental_context?: unknown;
85
+ experimental_telemetry?: TelemetrySettings;
86
+ includeRawChunks?: boolean;
87
+ repairToolCall?: ToolCallRepairFunction<ToolSet>;
88
+ responseFormat?: LanguageModelV4CallOptions['responseFormat'];
89
+ }): AsyncGenerator<
90
+ StreamTextIteratorYieldValue,
91
+ LanguageModelV4Prompt,
92
+ LanguageModelV4ToolResultPart[]
93
+ > {
94
+ let conversationPrompt = [...prompt]; // Create a mutable copy
95
+ let currentModel: string | (() => Promise<CompatibleLanguageModel>) = model;
96
+ let currentGenerationSettings = generationSettings ?? {};
97
+ let currentToolChoice = toolChoice;
98
+ let currentContext = experimental_context;
99
+ let currentActiveTools: string[] | undefined;
100
+
101
+ const steps: StepResult<any, any>[] = [];
102
+ let done = false;
103
+ let isFirstIteration = true;
104
+ let stepNumber = 0;
105
+ let lastStep: StepResult<any, any> | undefined;
106
+ let lastStepWasToolCalls = false;
107
+
108
+ // Default maxSteps to Infinity to preserve backwards compatibility
109
+ // (agent loops until completion unless explicitly limited)
110
+ const effectiveMaxSteps = maxSteps ?? Infinity;
111
+
112
+ while (!done) {
113
+ // Check if we've exceeded the maximum number of steps
114
+ if (stepNumber >= effectiveMaxSteps) {
115
+ break;
116
+ }
117
+
118
+ // Check for abort signal
119
+ if (currentGenerationSettings.abortSignal?.aborted) {
120
+ break;
121
+ }
122
+
123
+ // Call prepareStep callback before each step if provided
124
+ if (prepareStep) {
125
+ const prepareResult = await prepareStep({
126
+ model: currentModel,
127
+ stepNumber,
128
+ steps,
129
+ messages: conversationPrompt,
130
+ experimental_context: currentContext,
131
+ });
132
+
133
+ // Apply any overrides from prepareStep
134
+ if (prepareResult.model !== undefined) {
135
+ currentModel = prepareResult.model;
136
+ }
137
+ // Apply messages override BEFORE system so the system message
138
+ // isn't lost when messages replaces the prompt.
139
+ if (prepareResult.messages !== undefined) {
140
+ conversationPrompt = [...prepareResult.messages];
141
+ }
142
+ if (prepareResult.system !== undefined) {
143
+ // Update or prepend system message in the conversation prompt.
144
+ // Applied AFTER messages override so the system message isn't
145
+ // lost when messages replaces the prompt.
146
+ if (
147
+ conversationPrompt.length > 0 &&
148
+ conversationPrompt[0].role === 'system'
149
+ ) {
150
+ // Replace existing system message
151
+ conversationPrompt[0] = {
152
+ role: 'system',
153
+ content: prepareResult.system,
154
+ };
155
+ } else {
156
+ // Prepend new system message
157
+ conversationPrompt.unshift({
158
+ role: 'system',
159
+ content: prepareResult.system,
160
+ });
161
+ }
162
+ }
163
+ if (prepareResult.experimental_context !== undefined) {
164
+ currentContext = prepareResult.experimental_context;
165
+ }
166
+ if (prepareResult.activeTools !== undefined) {
167
+ currentActiveTools = prepareResult.activeTools;
168
+ }
169
+ // Apply generation settings overrides
170
+ if (prepareResult.maxOutputTokens !== undefined) {
171
+ currentGenerationSettings = {
172
+ ...currentGenerationSettings,
173
+ maxOutputTokens: prepareResult.maxOutputTokens,
174
+ };
175
+ }
176
+ if (prepareResult.temperature !== undefined) {
177
+ currentGenerationSettings = {
178
+ ...currentGenerationSettings,
179
+ temperature: prepareResult.temperature,
180
+ };
181
+ }
182
+ if (prepareResult.topP !== undefined) {
183
+ currentGenerationSettings = {
184
+ ...currentGenerationSettings,
185
+ topP: prepareResult.topP,
186
+ };
187
+ }
188
+ if (prepareResult.topK !== undefined) {
189
+ currentGenerationSettings = {
190
+ ...currentGenerationSettings,
191
+ topK: prepareResult.topK,
192
+ };
193
+ }
194
+ if (prepareResult.presencePenalty !== undefined) {
195
+ currentGenerationSettings = {
196
+ ...currentGenerationSettings,
197
+ presencePenalty: prepareResult.presencePenalty,
198
+ };
199
+ }
200
+ if (prepareResult.frequencyPenalty !== undefined) {
201
+ currentGenerationSettings = {
202
+ ...currentGenerationSettings,
203
+ frequencyPenalty: prepareResult.frequencyPenalty,
204
+ };
205
+ }
206
+ if (prepareResult.stopSequences !== undefined) {
207
+ currentGenerationSettings = {
208
+ ...currentGenerationSettings,
209
+ stopSequences: prepareResult.stopSequences,
210
+ };
211
+ }
212
+ if (prepareResult.seed !== undefined) {
213
+ currentGenerationSettings = {
214
+ ...currentGenerationSettings,
215
+ seed: prepareResult.seed,
216
+ };
217
+ }
218
+ if (prepareResult.maxRetries !== undefined) {
219
+ currentGenerationSettings = {
220
+ ...currentGenerationSettings,
221
+ maxRetries: prepareResult.maxRetries,
222
+ };
223
+ }
224
+ if (prepareResult.headers !== undefined) {
225
+ currentGenerationSettings = {
226
+ ...currentGenerationSettings,
227
+ headers: prepareResult.headers,
228
+ };
229
+ }
230
+ if (prepareResult.providerOptions !== undefined) {
231
+ currentGenerationSettings = {
232
+ ...currentGenerationSettings,
233
+ providerOptions: prepareResult.providerOptions,
234
+ };
235
+ }
236
+ if (prepareResult.toolChoice !== undefined) {
237
+ currentToolChoice = prepareResult.toolChoice;
238
+ }
239
+ }
240
+
241
+ if (onStepStart) {
242
+ await onStepStart({
243
+ stepNumber,
244
+ model: currentModel,
245
+ messages: conversationPrompt as unknown as ModelMessage[],
246
+ });
247
+ }
248
+
249
+ try {
250
+ // Filter tools if activeTools is specified
251
+ const effectiveTools =
252
+ currentActiveTools && currentActiveTools.length > 0
253
+ ? filterToolSet(tools, currentActiveTools)
254
+ : tools;
255
+
256
+ // Serialize tools before crossing the step boundary — zod schemas
257
+ // contain functions that can't be serialized by the workflow runtime.
258
+ // Tools are reconstructed with Ajv validation inside doStreamStep.
259
+ const serializedTools = serializeToolSet(effectiveTools);
260
+
261
+ const { toolCalls, finish, step, providerExecutedToolResults } =
262
+ await doStreamStep(
263
+ conversationPrompt,
264
+ currentModel,
265
+ writable,
266
+ serializedTools,
267
+ {
268
+ ...currentGenerationSettings,
269
+ toolChoice: currentToolChoice,
270
+ includeRawChunks,
271
+ experimental_telemetry,
272
+ repairToolCall,
273
+ responseFormat,
274
+ },
275
+ );
276
+
277
+ isFirstIteration = false;
278
+ stepNumber++;
279
+ steps.push(step);
280
+ lastStep = step;
281
+ lastStepWasToolCalls = false;
282
+
283
+ const finishReason = finish?.finishReason;
284
+
285
+ if (finishReason === 'tool-calls') {
286
+ lastStepWasToolCalls = true;
287
+
288
+ // Add assistant message with tool calls to the conversation
289
+ // Note: providerMetadata from the tool call is mapped to providerOptions
290
+ // in the prompt format, following the AI SDK convention. This is critical
291
+ // for providers like Gemini that require thoughtSignature to be preserved
292
+ // across multi-turn tool calls. Some fields are sanitized before mapping.
293
+ conversationPrompt.push({
294
+ role: 'assistant',
295
+ content: toolCalls.map(toolCall => {
296
+ const sanitizedMetadata = sanitizeProviderMetadataForToolCall(
297
+ toolCall.providerMetadata,
298
+ );
299
+ return {
300
+ type: 'tool-call',
301
+ toolCallId: toolCall.toolCallId,
302
+ toolName: toolCall.toolName,
303
+ input: toolCall.input,
304
+ ...(sanitizedMetadata != null
305
+ ? { providerOptions: sanitizedMetadata }
306
+ : {}),
307
+ };
308
+ }) as typeof toolCalls,
309
+ });
310
+
311
+ // Yield the tool calls along with the current conversation messages
312
+ // This allows executeTool to pass the conversation context to tool execute functions
313
+ // Also include provider-executed tool results so they can be used instead of local execution
314
+ const toolResults = yield {
315
+ toolCalls,
316
+ messages: conversationPrompt,
317
+ step,
318
+ context: currentContext,
319
+ providerExecutedToolResults,
320
+ };
321
+
322
+ conversationPrompt.push({
323
+ role: 'tool',
324
+ content: toolResults,
325
+ });
326
+
327
+ if (stopConditions) {
328
+ const stopConditionList = Array.isArray(stopConditions)
329
+ ? stopConditions
330
+ : [stopConditions];
331
+ if (stopConditionList.some(test => test({ steps }))) {
332
+ done = true;
333
+ }
334
+ }
335
+ } else if (finishReason === 'stop') {
336
+ // Add assistant message with text content to the conversation
337
+ const textContent = step.content.filter(
338
+ item => item.type === 'text',
339
+ ) as Array<{ type: 'text'; text: string }>;
340
+
341
+ if (textContent.length > 0) {
342
+ conversationPrompt.push({
343
+ role: 'assistant',
344
+ content: textContent,
345
+ });
346
+ }
347
+
348
+ done = true;
349
+ } else if (finishReason === 'length') {
350
+ // Model hit max tokens - stop but don't throw
351
+ done = true;
352
+ } else if (finishReason === 'content-filter') {
353
+ // Content filter triggered - stop but don't throw
354
+ done = true;
355
+ } else if (finishReason === 'error') {
356
+ // Model error - stop but don't throw
357
+ done = true;
358
+ } else if (finishReason === 'other') {
359
+ // Other reason - stop but don't throw
360
+ done = true;
361
+ } else if (finishReason === 'unknown') {
362
+ // Unknown reason - stop but don't throw
363
+ done = true;
364
+ } else if (!finishReason) {
365
+ // No finish reason - this might happen on incomplete streams
366
+ done = true;
367
+ } else {
368
+ throw new Error(
369
+ `Unexpected finish reason: ${typeof finish?.finishReason === 'object' ? JSON.stringify(finish?.finishReason) : finish?.finishReason}`,
370
+ );
371
+ }
372
+
373
+ if (onStepFinish) {
374
+ await onStepFinish(step);
375
+ }
376
+ } catch (error) {
377
+ if (onError) {
378
+ await onError({ error });
379
+ }
380
+ throw error;
381
+ }
382
+ }
383
+
384
+ // Yield the final step if it wasn't already yielded (tool-calls steps are yielded inside the loop)
385
+ if (lastStep && !lastStepWasToolCalls) {
386
+ yield {
387
+ toolCalls: [],
388
+ messages: conversationPrompt,
389
+ step: lastStep,
390
+ context: currentContext,
391
+ };
392
+ }
393
+
394
+ return conversationPrompt;
395
+ }
396
+
397
+ /**
398
+ * Filter a tool set to only include the specified active tools.
399
+ */
400
+ function filterToolSet(tools: ToolSet, activeTools: string[]): ToolSet {
401
+ const filtered: ToolSet = {};
402
+ for (const toolName of activeTools) {
403
+ if (toolName in tools) {
404
+ filtered[toolName] = tools[toolName];
405
+ }
406
+ }
407
+ return filtered;
408
+ }
409
+
410
+ /**
411
+ * Strip OpenAI's itemId from providerMetadata (requires reasoning items we don't preserve).
412
+ * Preserves all other provider metadata (e.g., Gemini's thoughtSignature).
413
+ */
414
+ function sanitizeProviderMetadataForToolCall(
415
+ metadata: unknown,
416
+ ): Record<string, unknown> | undefined {
417
+ if (metadata == null) return undefined;
418
+
419
+ const meta = metadata as Record<string, unknown>;
420
+
421
+ // Check if OpenAI metadata exists and needs sanitization
422
+ if ('openai' in meta && meta.openai != null) {
423
+ const { openai, ...restProviders } = meta;
424
+ const openaiMeta = openai as Record<string, unknown>;
425
+
426
+ // Remove itemId from OpenAI metadata - it requires reasoning items we don't preserve
427
+ const { itemId: _itemId, ...restOpenai } = openaiMeta;
428
+
429
+ // Reconstruct metadata without itemId
430
+ const hasOtherOpenaiFields = Object.keys(restOpenai).length > 0;
431
+ const hasOtherProviders = Object.keys(restProviders).length > 0;
432
+
433
+ if (hasOtherOpenaiFields && hasOtherProviders) {
434
+ return { ...restProviders, openai: restOpenai };
435
+ } else if (hasOtherOpenaiFields) {
436
+ return { openai: restOpenai };
437
+ } else if (hasOtherProviders) {
438
+ return restProviders;
439
+ }
440
+ return undefined;
441
+ }
442
+
443
+ return meta;
444
+ }
@@ -0,0 +1,199 @@
1
+ import type { TelemetrySettings } from './workflow-agent.js';
2
+
3
+ // Minimal OTel type shims so we don't depend on @opentelemetry/api at compile time.
4
+ type Attributes = Record<string, unknown>;
5
+
6
+ type Span = {
7
+ setAttributes(attributes: Attributes): void;
8
+ setStatus(status: { code: number; message?: string }): void;
9
+ recordException(exception: {
10
+ name: string;
11
+ message: string;
12
+ stack?: string;
13
+ }): void;
14
+ end(): void;
15
+ };
16
+
17
+ type Context = unknown;
18
+
19
+ type Tracer = {
20
+ startActiveSpan<T>(
21
+ name: string,
22
+ options: Attributes,
23
+ fn: (span: Span) => T,
24
+ ): T;
25
+ };
26
+
27
+ // Full OTel API surface we use
28
+ interface OtelApi {
29
+ trace: {
30
+ getTracer(name: string): Tracer;
31
+ setSpan(context: Context, span: Span): Context;
32
+ };
33
+ context: {
34
+ active(): Context;
35
+ with<T>(ctx: Context, fn: () => T): T;
36
+ };
37
+ SpanStatusCode: { ERROR: number };
38
+ }
39
+
40
+ // Lazy-loaded OTel API — self-initializes on first use (item 5)
41
+ let otelApi: OtelApi | null = null;
42
+ let otelLoadAttempted = false;
43
+
44
+ async function ensureOtelApi(): Promise<OtelApi | null> {
45
+ if (otelLoadAttempted) return otelApi;
46
+ otelLoadAttempted = true;
47
+ try {
48
+ // Dynamic import — @opentelemetry/api is an optional peer dependency.
49
+ // Use Function() to hide the import from bundlers that would fail at
50
+ // compile time when the package is absent.
51
+ otelApi = await (Function(
52
+ 'return import("@opentelemetry/api")',
53
+ )() as Promise<OtelApi>);
54
+ } catch {
55
+ otelApi = null;
56
+ }
57
+ return otelApi;
58
+ }
59
+
60
+ /**
61
+ * Stateless tracer accessor matching AI SDK's `getTracer` pattern (item 5).
62
+ * Returns a no-op–equivalent `null` when telemetry is disabled, so callers
63
+ * don't need a separate init step.
64
+ */
65
+ function getTracer(telemetry?: TelemetrySettings): Tracer | null {
66
+ if (!telemetry?.isEnabled || !otelApi) return null;
67
+ if (telemetry.tracer) return telemetry.tracer as Tracer;
68
+ return otelApi.trace.getTracer('ai');
69
+ }
70
+
71
+ // ── Attribute helpers ──────────────────────────────────────────────────
72
+
73
+ /**
74
+ * Assemble `operation.name` / `resource.name` following the AI SDK convention
75
+ * (items 1 + 2): separator is a **space**, not a dot.
76
+ */
77
+ function assembleOperationName(
78
+ operationId: string,
79
+ telemetry?: TelemetrySettings,
80
+ ): Attributes {
81
+ return {
82
+ 'operation.name': `${operationId}${
83
+ telemetry?.functionId != null ? ` ${telemetry.functionId}` : ''
84
+ }`,
85
+ 'resource.name': telemetry?.functionId,
86
+ 'ai.operationId': operationId,
87
+ 'ai.telemetry.functionId': telemetry?.functionId,
88
+ };
89
+ }
90
+
91
+ /**
92
+ * Build the full attribute bag for a span, merging operation name,
93
+ * caller-supplied attributes, and user-defined telemetry metadata.
94
+ */
95
+ function buildAttributes(
96
+ operationId: string,
97
+ telemetry: TelemetrySettings | undefined,
98
+ extra?: Attributes,
99
+ ): Attributes {
100
+ if (!telemetry?.isEnabled) return {};
101
+
102
+ const attrs: Attributes = {
103
+ ...assembleOperationName(operationId, telemetry),
104
+ ...extra,
105
+ };
106
+
107
+ if (telemetry.metadata) {
108
+ for (const [key, value] of Object.entries(telemetry.metadata)) {
109
+ if (value != null) {
110
+ attrs[`ai.telemetry.metadata.${key}`] = value;
111
+ }
112
+ }
113
+ }
114
+
115
+ return attrs;
116
+ }
117
+
118
+ // ── Error recording (item 3) ───────────────────────────────────────────
119
+
120
+ /**
121
+ * Record an error on a span following the AI SDK pattern:
122
+ * `recordException` (with name / message / stack) + `setStatus`.
123
+ */
124
+ function recordErrorOnSpan(span: Span, error: unknown): void {
125
+ if (error instanceof Error) {
126
+ span.recordException({
127
+ name: error.name,
128
+ message: error.message,
129
+ stack: error.stack,
130
+ });
131
+ span.setStatus({
132
+ code: otelApi?.SpanStatusCode.ERROR ?? 2,
133
+ message: error.message,
134
+ });
135
+ } else {
136
+ span.setStatus({ code: otelApi?.SpanStatusCode.ERROR ?? 2 });
137
+ }
138
+ }
139
+
140
+ // ── Public API ─────────────────────────────────────────────────────────
141
+
142
+ /**
143
+ * Record a span around an async function.
144
+ *
145
+ * Self-initialising: the first call lazily loads `@opentelemetry/api`.
146
+ * If telemetry is disabled or OTel is unavailable the `fn` runs without
147
+ * instrumentation (no-op fast path).
148
+ *
149
+ * Matches the AI SDK's `recordSpan`:
150
+ * - Uses `context.with()` for proper context propagation (item 4)
151
+ * - Calls `recordException` + `setStatus` on errors (item 3)
152
+ * - Uses space separator in `operation.name` (item 1)
153
+ * - Sets `resource.name` (item 2)
154
+ */
155
+ export async function recordSpan<T>(options: {
156
+ name: string;
157
+ telemetry?: TelemetrySettings;
158
+ attributes?: Attributes;
159
+ fn: (span?: Span) => PromiseLike<T> | T;
160
+ }): Promise<T> {
161
+ // Self-initialise on first call (item 5)
162
+ if (!otelLoadAttempted) {
163
+ await ensureOtelApi();
164
+ }
165
+
166
+ const tracer = getTracer(options.telemetry);
167
+ if (!tracer || !otelApi) {
168
+ return options.fn(undefined);
169
+ }
170
+
171
+ const attrs = buildAttributes(
172
+ options.name,
173
+ options.telemetry,
174
+ options.attributes,
175
+ );
176
+
177
+ return tracer.startActiveSpan(
178
+ options.name,
179
+ { attributes: attrs },
180
+ async span => {
181
+ // Capture current context so nested spans parent correctly (item 4).
182
+ // otelApi is guaranteed non-null here (checked before startActiveSpan).
183
+ const ctx = otelApi!.context.active();
184
+
185
+ try {
186
+ const result = await otelApi!.context.with(ctx, () => options.fn(span));
187
+ span.end();
188
+ return result;
189
+ } catch (error) {
190
+ try {
191
+ recordErrorOnSpan(span, error);
192
+ } finally {
193
+ span.end();
194
+ }
195
+ throw error;
196
+ }
197
+ },
198
+ );
199
+ }