@ai-sdk/workflow 0.0.0-bf6e4b15-20260402200305
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +52 -0
- package/LICENSE +13 -0
- package/README.md +62 -0
- package/dist/index.d.mts +739 -0
- package/dist/index.mjs +1261 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +77 -0
- package/src/do-stream-step.ts +329 -0
- package/src/index.ts +37 -0
- package/src/providers/mock-function-wrapper.ts +11 -0
- package/src/providers/mock.ts +123 -0
- package/src/serializable-schema.ts +81 -0
- package/src/stream-iterator.ts +46 -0
- package/src/stream-text-iterator.ts +444 -0
- package/src/telemetry.ts +199 -0
- package/src/test/agent-e2e-workflows.ts +507 -0
- package/src/test/calculate-workflow.ts +19 -0
- package/src/to-ui-message-chunk.ts +214 -0
- package/src/types.ts +11 -0
- package/src/workflow-agent.ts +1647 -0
- package/src/workflow-chat-transport.ts +359 -0
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LanguageModelV4CallOptions,
|
|
3
|
+
LanguageModelV4Prompt,
|
|
4
|
+
} from '@ai-sdk/provider';
|
|
5
|
+
import {
|
|
6
|
+
type Experimental_ModelCallStreamPart as ModelCallStreamPart,
|
|
7
|
+
experimental_streamModelCall as streamModelCall,
|
|
8
|
+
type FinishReason,
|
|
9
|
+
type LanguageModel,
|
|
10
|
+
type LanguageModelUsage,
|
|
11
|
+
type ModelMessage,
|
|
12
|
+
type StepResult,
|
|
13
|
+
type StopCondition,
|
|
14
|
+
type ToolCallRepairFunction,
|
|
15
|
+
type ToolChoice,
|
|
16
|
+
type ToolSet,
|
|
17
|
+
} from 'ai';
|
|
18
|
+
import { gateway } from 'ai';
|
|
19
|
+
import type { ProviderOptions, TelemetrySettings } from './workflow-agent.js';
|
|
20
|
+
import {
|
|
21
|
+
resolveSerializableTools,
|
|
22
|
+
type SerializableToolDef,
|
|
23
|
+
} from './serializable-schema.js';
|
|
24
|
+
import type { CompatibleLanguageModel } from './types.js';
|
|
25
|
+
|
|
26
|
+
export type { Experimental_ModelCallStreamPart as ModelCallStreamPart } from 'ai';
|
|
27
|
+
|
|
28
|
+
export type ModelStopCondition = StopCondition<NoInfer<ToolSet>, any>;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Provider-executed tool result captured from the stream.
|
|
32
|
+
*/
|
|
33
|
+
export interface ProviderExecutedToolResult {
|
|
34
|
+
toolCallId: string;
|
|
35
|
+
toolName: string;
|
|
36
|
+
result: unknown;
|
|
37
|
+
isError?: boolean;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Options for the doStreamStep function.
|
|
42
|
+
*/
|
|
43
|
+
export interface DoStreamStepOptions {
|
|
44
|
+
maxOutputTokens?: number;
|
|
45
|
+
temperature?: number;
|
|
46
|
+
topP?: number;
|
|
47
|
+
topK?: number;
|
|
48
|
+
presencePenalty?: number;
|
|
49
|
+
frequencyPenalty?: number;
|
|
50
|
+
stopSequences?: string[];
|
|
51
|
+
seed?: number;
|
|
52
|
+
maxRetries?: number;
|
|
53
|
+
abortSignal?: AbortSignal;
|
|
54
|
+
headers?: Record<string, string | undefined>;
|
|
55
|
+
providerOptions?: ProviderOptions;
|
|
56
|
+
toolChoice?: ToolChoice<ToolSet>;
|
|
57
|
+
includeRawChunks?: boolean;
|
|
58
|
+
experimental_telemetry?: TelemetrySettings;
|
|
59
|
+
repairToolCall?: ToolCallRepairFunction<ToolSet>;
|
|
60
|
+
responseFormat?: LanguageModelV4CallOptions['responseFormat'];
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Parsed tool call from the stream (parsed by streamModelCall's transform).
|
|
65
|
+
*/
|
|
66
|
+
export interface ParsedToolCall {
|
|
67
|
+
type: 'tool-call';
|
|
68
|
+
toolCallId: string;
|
|
69
|
+
toolName: string;
|
|
70
|
+
input: unknown;
|
|
71
|
+
providerExecuted?: boolean;
|
|
72
|
+
providerMetadata?: Record<string, unknown>;
|
|
73
|
+
dynamic?: boolean;
|
|
74
|
+
invalid?: boolean;
|
|
75
|
+
error?: unknown;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Finish metadata from the stream.
|
|
80
|
+
*/
|
|
81
|
+
export interface StreamFinish {
|
|
82
|
+
finishReason: FinishReason;
|
|
83
|
+
rawFinishReason: string | undefined;
|
|
84
|
+
usage: LanguageModelUsage;
|
|
85
|
+
providerMetadata?: Record<string, unknown>;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
export async function doStreamStep(
|
|
89
|
+
conversationPrompt: LanguageModelV4Prompt,
|
|
90
|
+
modelInit: string | (() => Promise<CompatibleLanguageModel>),
|
|
91
|
+
writable?: WritableStream<ModelCallStreamPart<ToolSet>>,
|
|
92
|
+
serializedTools?: Record<string, SerializableToolDef>,
|
|
93
|
+
options?: DoStreamStepOptions,
|
|
94
|
+
) {
|
|
95
|
+
'use step';
|
|
96
|
+
|
|
97
|
+
// Resolve model inside step (must happen here for serialization boundary)
|
|
98
|
+
let model: CompatibleLanguageModel;
|
|
99
|
+
if (typeof modelInit === 'string') {
|
|
100
|
+
model = gateway.languageModel(modelInit) as CompatibleLanguageModel;
|
|
101
|
+
} else if (typeof modelInit === 'function') {
|
|
102
|
+
model = await modelInit();
|
|
103
|
+
} else {
|
|
104
|
+
throw new Error(
|
|
105
|
+
'Invalid "model initialization" argument. Must be a string or a function that returns a LanguageModel instance.',
|
|
106
|
+
);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Reconstruct tools from serializable definitions with Ajv validation.
|
|
110
|
+
// Tools are serialized before crossing the step boundary because zod schemas
|
|
111
|
+
// contain functions that can't be serialized by the workflow runtime.
|
|
112
|
+
const tools = serializedTools
|
|
113
|
+
? resolveSerializableTools(serializedTools)
|
|
114
|
+
: undefined;
|
|
115
|
+
|
|
116
|
+
// streamModelCall handles: prompt standardization, tool preparation,
|
|
117
|
+
// model.doStream(), retry logic, and stream part transformation
|
|
118
|
+
// (tool call parsing, finish reason mapping, file wrapping).
|
|
119
|
+
const { stream: modelStream } = await streamModelCall({
|
|
120
|
+
model: model as LanguageModel,
|
|
121
|
+
// streamModelCall expects Prompt (ModelMessage[]) but we pass the
|
|
122
|
+
// pre-converted LanguageModelV4Prompt. standardizePrompt inside
|
|
123
|
+
// streamModelCall handles both formats.
|
|
124
|
+
messages: conversationPrompt as unknown as ModelMessage[],
|
|
125
|
+
tools,
|
|
126
|
+
toolChoice: options?.toolChoice,
|
|
127
|
+
includeRawChunks: options?.includeRawChunks,
|
|
128
|
+
providerOptions: options?.providerOptions,
|
|
129
|
+
abortSignal: options?.abortSignal,
|
|
130
|
+
headers: options?.headers,
|
|
131
|
+
maxOutputTokens: options?.maxOutputTokens,
|
|
132
|
+
temperature: options?.temperature,
|
|
133
|
+
topP: options?.topP,
|
|
134
|
+
topK: options?.topK,
|
|
135
|
+
presencePenalty: options?.presencePenalty,
|
|
136
|
+
frequencyPenalty: options?.frequencyPenalty,
|
|
137
|
+
stopSequences: options?.stopSequences,
|
|
138
|
+
seed: options?.seed,
|
|
139
|
+
repairToolCall: options?.repairToolCall,
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
// Consume the stream: capture data and write to writable in real-time
|
|
143
|
+
const toolCalls: ParsedToolCall[] = [];
|
|
144
|
+
const providerExecutedToolResults = new Map<
|
|
145
|
+
string,
|
|
146
|
+
ProviderExecutedToolResult
|
|
147
|
+
>();
|
|
148
|
+
let finish: StreamFinish | undefined;
|
|
149
|
+
|
|
150
|
+
// Aggregation for StepResult
|
|
151
|
+
let text = '';
|
|
152
|
+
const reasoningParts: Array<{ text: string }> = [];
|
|
153
|
+
let responseMetadata:
|
|
154
|
+
| { id?: string; timestamp?: Date; modelId?: string }
|
|
155
|
+
| undefined;
|
|
156
|
+
let warnings: unknown[] | undefined;
|
|
157
|
+
|
|
158
|
+
// Acquire writer once before the loop to avoid per-chunk lock overhead
|
|
159
|
+
const writer = writable?.getWriter();
|
|
160
|
+
|
|
161
|
+
try {
|
|
162
|
+
for await (const part of modelStream) {
|
|
163
|
+
switch (part.type) {
|
|
164
|
+
case 'text-delta':
|
|
165
|
+
text += part.text;
|
|
166
|
+
break;
|
|
167
|
+
case 'reasoning-delta':
|
|
168
|
+
reasoningParts.push({ text: part.text });
|
|
169
|
+
break;
|
|
170
|
+
case 'tool-call': {
|
|
171
|
+
// parseToolCall adds dynamic/invalid/error at runtime
|
|
172
|
+
const toolCallPart = part as typeof part & Partial<ParsedToolCall>;
|
|
173
|
+
toolCalls.push({
|
|
174
|
+
type: 'tool-call',
|
|
175
|
+
toolCallId: toolCallPart.toolCallId,
|
|
176
|
+
toolName: toolCallPart.toolName,
|
|
177
|
+
input: toolCallPart.input,
|
|
178
|
+
providerExecuted: toolCallPart.providerExecuted,
|
|
179
|
+
providerMetadata: toolCallPart.providerMetadata as
|
|
180
|
+
| Record<string, unknown>
|
|
181
|
+
| undefined,
|
|
182
|
+
dynamic: toolCallPart.dynamic,
|
|
183
|
+
invalid: toolCallPart.invalid,
|
|
184
|
+
error: toolCallPart.error,
|
|
185
|
+
});
|
|
186
|
+
break;
|
|
187
|
+
}
|
|
188
|
+
case 'tool-result':
|
|
189
|
+
if (part.providerExecuted) {
|
|
190
|
+
providerExecutedToolResults.set(part.toolCallId, {
|
|
191
|
+
toolCallId: part.toolCallId,
|
|
192
|
+
toolName: part.toolName,
|
|
193
|
+
result: part.output,
|
|
194
|
+
isError: false,
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
break;
|
|
198
|
+
case 'tool-error': {
|
|
199
|
+
const errorPart = part as typeof part & {
|
|
200
|
+
providerExecuted?: boolean;
|
|
201
|
+
};
|
|
202
|
+
if (errorPart.providerExecuted) {
|
|
203
|
+
providerExecutedToolResults.set(errorPart.toolCallId, {
|
|
204
|
+
toolCallId: errorPart.toolCallId,
|
|
205
|
+
toolName: errorPart.toolName,
|
|
206
|
+
result: errorPart.error,
|
|
207
|
+
isError: true,
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
break;
|
|
211
|
+
}
|
|
212
|
+
case 'model-call-end':
|
|
213
|
+
finish = {
|
|
214
|
+
finishReason: part.finishReason,
|
|
215
|
+
rawFinishReason: part.rawFinishReason,
|
|
216
|
+
usage: part.usage,
|
|
217
|
+
providerMetadata: part.providerMetadata as
|
|
218
|
+
| Record<string, unknown>
|
|
219
|
+
| undefined,
|
|
220
|
+
};
|
|
221
|
+
break;
|
|
222
|
+
case 'model-call-start':
|
|
223
|
+
warnings = part.warnings;
|
|
224
|
+
break;
|
|
225
|
+
case 'model-call-response-metadata':
|
|
226
|
+
responseMetadata = part;
|
|
227
|
+
break;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Write to writable in real-time
|
|
231
|
+
if (writer) {
|
|
232
|
+
await writer.write(part);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
} finally {
|
|
236
|
+
writer?.releaseLock();
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Build StepResult
|
|
240
|
+
const reasoningText = reasoningParts.map(r => r.text).join('') || undefined;
|
|
241
|
+
|
|
242
|
+
const step: StepResult<ToolSet, any> = {
|
|
243
|
+
callId: 'workflow-agent',
|
|
244
|
+
stepNumber: 0,
|
|
245
|
+
model: {
|
|
246
|
+
provider: responseMetadata?.modelId?.split(':')[0] ?? 'unknown',
|
|
247
|
+
modelId: responseMetadata?.modelId ?? 'unknown',
|
|
248
|
+
},
|
|
249
|
+
functionId: undefined,
|
|
250
|
+
metadata: undefined,
|
|
251
|
+
experimental_context: undefined,
|
|
252
|
+
content: [
|
|
253
|
+
...(text ? [{ type: 'text' as const, text }] : []),
|
|
254
|
+
...toolCalls
|
|
255
|
+
.filter(tc => !tc.invalid)
|
|
256
|
+
.map(tc => ({
|
|
257
|
+
type: 'tool-call' as const,
|
|
258
|
+
toolCallId: tc.toolCallId,
|
|
259
|
+
toolName: tc.toolName,
|
|
260
|
+
input: tc.input,
|
|
261
|
+
...(tc.dynamic ? { dynamic: true as const } : {}),
|
|
262
|
+
})),
|
|
263
|
+
],
|
|
264
|
+
text,
|
|
265
|
+
reasoning: reasoningParts.map(r => ({
|
|
266
|
+
type: 'reasoning' as const,
|
|
267
|
+
text: r.text,
|
|
268
|
+
})),
|
|
269
|
+
reasoningText,
|
|
270
|
+
files: [],
|
|
271
|
+
sources: [],
|
|
272
|
+
toolCalls: toolCalls
|
|
273
|
+
.filter(tc => !tc.invalid)
|
|
274
|
+
.map(tc => ({
|
|
275
|
+
type: 'tool-call' as const,
|
|
276
|
+
toolCallId: tc.toolCallId,
|
|
277
|
+
toolName: tc.toolName,
|
|
278
|
+
input: tc.input,
|
|
279
|
+
...(tc.dynamic ? { dynamic: true as const } : {}),
|
|
280
|
+
})),
|
|
281
|
+
staticToolCalls: [],
|
|
282
|
+
dynamicToolCalls: toolCalls
|
|
283
|
+
.filter(tc => !tc.invalid && tc.dynamic)
|
|
284
|
+
.map(tc => ({
|
|
285
|
+
type: 'tool-call' as const,
|
|
286
|
+
toolCallId: tc.toolCallId,
|
|
287
|
+
toolName: tc.toolName,
|
|
288
|
+
input: tc.input,
|
|
289
|
+
dynamic: true as const,
|
|
290
|
+
})),
|
|
291
|
+
toolResults: [],
|
|
292
|
+
staticToolResults: [],
|
|
293
|
+
dynamicToolResults: [],
|
|
294
|
+
finishReason: finish?.finishReason ?? 'other',
|
|
295
|
+
rawFinishReason: finish?.rawFinishReason,
|
|
296
|
+
usage:
|
|
297
|
+
finish?.usage ??
|
|
298
|
+
({
|
|
299
|
+
inputTokens: 0,
|
|
300
|
+
inputTokenDetails: {
|
|
301
|
+
noCacheTokens: undefined,
|
|
302
|
+
cacheReadTokens: undefined,
|
|
303
|
+
cacheWriteTokens: undefined,
|
|
304
|
+
},
|
|
305
|
+
outputTokens: 0,
|
|
306
|
+
outputTokenDetails: {
|
|
307
|
+
textTokens: undefined,
|
|
308
|
+
reasoningTokens: undefined,
|
|
309
|
+
},
|
|
310
|
+
totalTokens: 0,
|
|
311
|
+
} as LanguageModelUsage),
|
|
312
|
+
warnings,
|
|
313
|
+
request: { body: '' },
|
|
314
|
+
response: {
|
|
315
|
+
id: responseMetadata?.id ?? 'unknown',
|
|
316
|
+
timestamp: responseMetadata?.timestamp ?? new Date(),
|
|
317
|
+
modelId: responseMetadata?.modelId ?? 'unknown',
|
|
318
|
+
messages: [],
|
|
319
|
+
},
|
|
320
|
+
providerMetadata: finish?.providerMetadata ?? {},
|
|
321
|
+
} as StepResult<ToolSet, any>;
|
|
322
|
+
|
|
323
|
+
return {
|
|
324
|
+
toolCalls,
|
|
325
|
+
finish,
|
|
326
|
+
step,
|
|
327
|
+
providerExecutedToolResults,
|
|
328
|
+
};
|
|
329
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
export {
|
|
2
|
+
WorkflowAgent,
|
|
3
|
+
Output,
|
|
4
|
+
type CompatibleLanguageModel,
|
|
5
|
+
type DownloadFunction,
|
|
6
|
+
type WorkflowAgentOptions,
|
|
7
|
+
type WorkflowAgentStreamOptions,
|
|
8
|
+
type WorkflowAgentStreamResult,
|
|
9
|
+
type GenerationSettings,
|
|
10
|
+
type InferWorkflowAgentTools,
|
|
11
|
+
type InferWorkflowAgentUIMessage,
|
|
12
|
+
type OutputSpecification,
|
|
13
|
+
type PrepareCallCallback,
|
|
14
|
+
type PrepareCallOptions,
|
|
15
|
+
type PrepareCallResult,
|
|
16
|
+
type PrepareStepCallback,
|
|
17
|
+
type PrepareStepInfo,
|
|
18
|
+
type PrepareStepResult,
|
|
19
|
+
type ProviderOptions,
|
|
20
|
+
type StreamTextOnAbortCallback,
|
|
21
|
+
type StreamTextOnErrorCallback,
|
|
22
|
+
type StreamTextOnFinishCallback,
|
|
23
|
+
type StreamTextTransform,
|
|
24
|
+
type TelemetrySettings,
|
|
25
|
+
type ToolCallRepairFunction,
|
|
26
|
+
type WorkflowAgentOnStartCallback,
|
|
27
|
+
type WorkflowAgentOnStepStartCallback,
|
|
28
|
+
type WorkflowAgentOnToolCallStartCallback,
|
|
29
|
+
type WorkflowAgentOnToolCallFinishCallback,
|
|
30
|
+
} from './workflow-agent.js';
|
|
31
|
+
|
|
32
|
+
export {
|
|
33
|
+
createModelCallToUIChunkTransform,
|
|
34
|
+
toUIMessageChunk,
|
|
35
|
+
} from './to-ui-message-chunk.js';
|
|
36
|
+
|
|
37
|
+
export type { ModelCallStreamPart } from './do-stream-step.js';
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { MockLanguageModelV4 } from 'ai/test';
|
|
2
|
+
|
|
3
|
+
// Workaround for SWC plugin bug (https://github.com/vercel/workflow/issues/1365):
|
|
4
|
+
// `new ClassName(...)` in a step closure doesn't get closure vars hoisted
|
|
5
|
+
// correctly. Wrapping the constructor call in a plain function (imported
|
|
6
|
+
// from a separate file) fixes it.
|
|
7
|
+
export function mockProvider(
|
|
8
|
+
...args: ConstructorParameters<typeof MockLanguageModelV4>
|
|
9
|
+
) {
|
|
10
|
+
return new MockLanguageModelV4(...args);
|
|
11
|
+
}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import { mockProvider } from './mock-function-wrapper.js';
|
|
2
|
+
|
|
3
|
+
export type MockResponseDescriptor =
|
|
4
|
+
| { type: 'text'; text: string }
|
|
5
|
+
| { type: 'tool-call'; toolName: string; input: string };
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Mock model that returns a fixed text response.
|
|
9
|
+
* Same 'use step' pattern as real providers (anthropic, openai, etc.).
|
|
10
|
+
* Only captures `text` (string) — fully serializable across step boundary.
|
|
11
|
+
*/
|
|
12
|
+
export function mockTextModel(text: string) {
|
|
13
|
+
return async () => {
|
|
14
|
+
'use step';
|
|
15
|
+
// Bind closure var at step body level so SWC plugin detects it
|
|
16
|
+
const _text = text;
|
|
17
|
+
return mockProvider({
|
|
18
|
+
doStream: async () => ({
|
|
19
|
+
stream: new ReadableStream({
|
|
20
|
+
start(c) {
|
|
21
|
+
for (const v of [
|
|
22
|
+
{ type: 'stream-start', warnings: [] },
|
|
23
|
+
{
|
|
24
|
+
type: 'response-metadata',
|
|
25
|
+
id: 'r',
|
|
26
|
+
modelId: 'mock',
|
|
27
|
+
timestamp: new Date(),
|
|
28
|
+
},
|
|
29
|
+
{ type: 'text-start', id: '1' },
|
|
30
|
+
{ type: 'text-delta', id: '1', delta: _text },
|
|
31
|
+
{ type: 'text-end', id: '1' },
|
|
32
|
+
{
|
|
33
|
+
type: 'finish',
|
|
34
|
+
finishReason: { unified: 'stop', raw: 'stop' },
|
|
35
|
+
usage: {
|
|
36
|
+
inputTokens: { total: 5, noCache: 5 },
|
|
37
|
+
outputTokens: { total: 10, text: 10 },
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
] as any[])
|
|
41
|
+
c.enqueue(v);
|
|
42
|
+
c.close();
|
|
43
|
+
},
|
|
44
|
+
}),
|
|
45
|
+
}),
|
|
46
|
+
});
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Mock model that plays through a sequence of responses.
|
|
52
|
+
* Determines which response to return by counting assistant messages in the prompt.
|
|
53
|
+
* Only captures `responses` (array of plain objects) — fully serializable.
|
|
54
|
+
*/
|
|
55
|
+
export function mockSequenceModel(responses: MockResponseDescriptor[]) {
|
|
56
|
+
return async () => {
|
|
57
|
+
'use step';
|
|
58
|
+
// Bind closure var at step body level so SWC plugin detects it
|
|
59
|
+
const _responses = responses;
|
|
60
|
+
return mockProvider({
|
|
61
|
+
doStream: async (options: any) => {
|
|
62
|
+
const idx = Math.min(
|
|
63
|
+
options.prompt.filter((m: any) => m.role === 'assistant').length,
|
|
64
|
+
_responses.length - 1,
|
|
65
|
+
);
|
|
66
|
+
const r = _responses[idx];
|
|
67
|
+
const parts =
|
|
68
|
+
r.type === 'text'
|
|
69
|
+
? [
|
|
70
|
+
{ type: 'stream-start', warnings: [] },
|
|
71
|
+
{
|
|
72
|
+
type: 'response-metadata',
|
|
73
|
+
id: 'r',
|
|
74
|
+
modelId: 'mock',
|
|
75
|
+
timestamp: new Date(),
|
|
76
|
+
},
|
|
77
|
+
{ type: 'text-start', id: '1' },
|
|
78
|
+
{ type: 'text-delta', id: '1', delta: r.text },
|
|
79
|
+
{ type: 'text-end', id: '1' },
|
|
80
|
+
{
|
|
81
|
+
type: 'finish',
|
|
82
|
+
finishReason: { unified: 'stop', raw: 'stop' },
|
|
83
|
+
usage: {
|
|
84
|
+
inputTokens: { total: 5, noCache: 5 },
|
|
85
|
+
outputTokens: { total: 10, text: 10 },
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
]
|
|
89
|
+
: [
|
|
90
|
+
{ type: 'stream-start', warnings: [] },
|
|
91
|
+
{
|
|
92
|
+
type: 'response-metadata',
|
|
93
|
+
id: 'r',
|
|
94
|
+
modelId: 'mock',
|
|
95
|
+
timestamp: new Date(),
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
type: 'tool-call',
|
|
99
|
+
toolCallId: `call-${idx + 1}`,
|
|
100
|
+
toolName: r.toolName,
|
|
101
|
+
input: r.input,
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
type: 'finish',
|
|
105
|
+
finishReason: { unified: 'tool-calls', raw: undefined },
|
|
106
|
+
usage: {
|
|
107
|
+
inputTokens: { total: 5, noCache: 5 },
|
|
108
|
+
outputTokens: { total: 10, text: 10 },
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
];
|
|
112
|
+
return {
|
|
113
|
+
stream: new ReadableStream({
|
|
114
|
+
start(c) {
|
|
115
|
+
for (const p of parts as any[]) c.enqueue(p);
|
|
116
|
+
c.close();
|
|
117
|
+
},
|
|
118
|
+
}),
|
|
119
|
+
};
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
};
|
|
123
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Helpers for passing tool schemas across workflow step boundaries.
|
|
3
|
+
*
|
|
4
|
+
* Tool schemas (zod, valibot, arktype, etc.) contain functions that can't be
|
|
5
|
+
* serialized by the workflow runtime. These helpers extract JSON Schema from
|
|
6
|
+
* schemas, then reconstruct tools with Ajv validation inside step functions.
|
|
7
|
+
*
|
|
8
|
+
* Uses `asSchema()` from `@ai-sdk/provider-utils` for JSON Schema extraction,
|
|
9
|
+
* which supports Standard Schema compatible libraries. When libraries adopt
|
|
10
|
+
* `~standard.jsonSchema` (Standard Schema v2), extraction can be simplified
|
|
11
|
+
* to use that interface directly.
|
|
12
|
+
*/
|
|
13
|
+
import type { JSONSchema7 } from '@ai-sdk/provider';
|
|
14
|
+
import { asSchema, jsonSchema } from '@ai-sdk/provider-utils';
|
|
15
|
+
import { tool, type ToolSet } from 'ai';
|
|
16
|
+
import Ajv from 'ajv';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Serializable tool definition — plain objects only, safe for workflow steps.
|
|
20
|
+
*/
|
|
21
|
+
export type SerializableToolDef = {
|
|
22
|
+
description?: string;
|
|
23
|
+
inputSchema: JSONSchema7;
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Converts a ToolSet (with zod/standard schemas and execute functions) to a
|
|
28
|
+
* serializable record of tool definitions. Only description and inputSchema
|
|
29
|
+
* (as JSON Schema) are preserved — execute functions are stripped since they
|
|
30
|
+
* run outside the step.
|
|
31
|
+
*/
|
|
32
|
+
export function serializeToolSet(
|
|
33
|
+
tools: ToolSet,
|
|
34
|
+
): Record<string, SerializableToolDef> {
|
|
35
|
+
return Object.fromEntries(
|
|
36
|
+
Object.entries(tools).map(([name, t]) => [
|
|
37
|
+
name,
|
|
38
|
+
{
|
|
39
|
+
description: t.description,
|
|
40
|
+
inputSchema: asSchema(t.inputSchema).jsonSchema as JSONSchema7,
|
|
41
|
+
},
|
|
42
|
+
]),
|
|
43
|
+
);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Reconstructs tool objects from serializable tool definitions inside a step.
|
|
48
|
+
*
|
|
49
|
+
* Wraps each tool's JSON Schema with `jsonSchema()` and validates tool call
|
|
50
|
+
* arguments against the schema using Ajv. This provides runtime type safety
|
|
51
|
+
* equivalent to using zod schemas directly with the AI SDK.
|
|
52
|
+
*/
|
|
53
|
+
export function resolveSerializableTools(
|
|
54
|
+
tools: Record<string, SerializableToolDef>,
|
|
55
|
+
): ToolSet {
|
|
56
|
+
const ajv = new Ajv();
|
|
57
|
+
|
|
58
|
+
return Object.fromEntries(
|
|
59
|
+
Object.entries(tools).map(([name, t]) => {
|
|
60
|
+
const validateFn = ajv.compile(t.inputSchema);
|
|
61
|
+
|
|
62
|
+
return [
|
|
63
|
+
name,
|
|
64
|
+
tool({
|
|
65
|
+
description: t.description,
|
|
66
|
+
inputSchema: jsonSchema(t.inputSchema, {
|
|
67
|
+
validate: value => {
|
|
68
|
+
if (validateFn(value)) {
|
|
69
|
+
return { success: true, value: value as any };
|
|
70
|
+
}
|
|
71
|
+
return {
|
|
72
|
+
success: false,
|
|
73
|
+
error: new Error(ajv.errorsText(validateFn.errors)),
|
|
74
|
+
};
|
|
75
|
+
},
|
|
76
|
+
}),
|
|
77
|
+
}),
|
|
78
|
+
];
|
|
79
|
+
}),
|
|
80
|
+
);
|
|
81
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Converts an AsyncGenerator to a ReadableStream.
|
|
3
|
+
*/
|
|
4
|
+
export function iteratorToStream<T>(
|
|
5
|
+
iterator: AsyncGenerator<T>,
|
|
6
|
+
options?: { signal?: AbortSignal },
|
|
7
|
+
): ReadableStream<T> {
|
|
8
|
+
return new ReadableStream<T>({
|
|
9
|
+
async pull(controller) {
|
|
10
|
+
if (options?.signal?.aborted) {
|
|
11
|
+
controller.close();
|
|
12
|
+
return;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
const { value, done } = await iterator.next();
|
|
16
|
+
if (done) {
|
|
17
|
+
controller.close();
|
|
18
|
+
} else {
|
|
19
|
+
controller.enqueue(value);
|
|
20
|
+
}
|
|
21
|
+
},
|
|
22
|
+
cancel() {
|
|
23
|
+
iterator.return(undefined as any);
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Converts a ReadableStream to an AsyncIterable.
|
|
30
|
+
*
|
|
31
|
+
* @yields Items from the stream.
|
|
32
|
+
*/
|
|
33
|
+
export async function* streamToIterator<T>(
|
|
34
|
+
stream: ReadableStream<T>,
|
|
35
|
+
): AsyncGenerator<T> {
|
|
36
|
+
const reader = stream.getReader();
|
|
37
|
+
try {
|
|
38
|
+
while (true) {
|
|
39
|
+
const { value, done } = await reader.read();
|
|
40
|
+
if (done) break;
|
|
41
|
+
yield value;
|
|
42
|
+
}
|
|
43
|
+
} finally {
|
|
44
|
+
reader.releaseLock();
|
|
45
|
+
}
|
|
46
|
+
}
|