@ai-sdk/rsc 2.0.45 → 2.0.47
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/package.json +3 -2
- package/src/ai-state.test.ts +146 -0
- package/src/ai-state.tsx +210 -0
- package/src/index.ts +20 -0
- package/src/provider.tsx +149 -0
- package/src/rsc-client.ts +8 -0
- package/src/rsc-server.ts +5 -0
- package/src/rsc-shared.mts +11 -0
- package/src/shared-client/context.tsx +226 -0
- package/src/shared-client/index.ts +11 -0
- package/src/stream-ui/__snapshots__/render.ui.test.tsx.snap +91 -0
- package/src/stream-ui/__snapshots__/stream-ui.ui.test.tsx.snap +213 -0
- package/src/stream-ui/index.tsx +1 -0
- package/src/stream-ui/stream-ui.tsx +419 -0
- package/src/stream-ui/stream-ui.ui.test.tsx +321 -0
- package/src/streamable-ui/create-streamable-ui.tsx +148 -0
- package/src/streamable-ui/create-streamable-ui.ui.test.tsx +354 -0
- package/src/streamable-ui/create-suspended-chunk.tsx +84 -0
- package/src/streamable-value/create-streamable-value.test.tsx +179 -0
- package/src/streamable-value/create-streamable-value.ts +296 -0
- package/src/streamable-value/is-streamable-value.ts +10 -0
- package/src/streamable-value/read-streamable-value.tsx +113 -0
- package/src/streamable-value/read-streamable-value.ui.test.tsx +165 -0
- package/src/streamable-value/streamable-value.ts +37 -0
- package/src/streamable-value/use-streamable-value.tsx +91 -0
- package/src/types/index.ts +1 -0
- package/src/types.test-d.ts +17 -0
- package/src/types.ts +71 -0
- package/src/util/constants.ts +5 -0
- package/src/util/create-resolvable-promise.ts +28 -0
- package/src/util/is-async-generator.ts +7 -0
- package/src/util/is-function.ts +8 -0
- package/src/util/is-generator.ts +5 -0
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
import {
|
|
2
|
+
LanguageModelV3,
|
|
3
|
+
LanguageModelV3StreamResult,
|
|
4
|
+
LanguageModelV3Usage,
|
|
5
|
+
SharedV3Warning,
|
|
6
|
+
} from '@ai-sdk/provider';
|
|
7
|
+
import {
|
|
8
|
+
InferSchema,
|
|
9
|
+
ProviderOptions,
|
|
10
|
+
safeParseJSON,
|
|
11
|
+
} from '@ai-sdk/provider-utils';
|
|
12
|
+
import {
|
|
13
|
+
CallSettings,
|
|
14
|
+
CallWarning,
|
|
15
|
+
FinishReason,
|
|
16
|
+
InvalidToolInputError,
|
|
17
|
+
LanguageModelUsage,
|
|
18
|
+
NoSuchToolError,
|
|
19
|
+
Prompt,
|
|
20
|
+
Schema,
|
|
21
|
+
ToolChoice,
|
|
22
|
+
} from 'ai';
|
|
23
|
+
import {
|
|
24
|
+
asLanguageModelUsage,
|
|
25
|
+
convertToLanguageModelPrompt,
|
|
26
|
+
prepareCallSettings,
|
|
27
|
+
prepareRetries,
|
|
28
|
+
prepareToolsAndToolChoice,
|
|
29
|
+
standardizePrompt,
|
|
30
|
+
} from 'ai/internal';
|
|
31
|
+
import { ReactNode } from 'react';
|
|
32
|
+
import * as z3 from 'zod/v3';
|
|
33
|
+
import * as z4 from 'zod/v4';
|
|
34
|
+
import { createStreamableUI } from '../streamable-ui/create-streamable-ui';
|
|
35
|
+
import { createResolvablePromise } from '../util/create-resolvable-promise';
|
|
36
|
+
import { isAsyncGenerator } from '../util/is-async-generator';
|
|
37
|
+
import { isGenerator } from '../util/is-generator';
|
|
38
|
+
|
|
39
|
+
type Streamable = ReactNode | Promise<ReactNode>;
|
|
40
|
+
|
|
41
|
+
type Renderer<T extends Array<any>> = (
|
|
42
|
+
...args: T
|
|
43
|
+
) =>
|
|
44
|
+
| Streamable
|
|
45
|
+
| Generator<Streamable, Streamable, void>
|
|
46
|
+
| AsyncGenerator<Streamable, Streamable, void>;
|
|
47
|
+
|
|
48
|
+
type RenderTool<
|
|
49
|
+
INPUT_SCHEMA extends z4.core.$ZodType | z3.Schema | Schema = any,
|
|
50
|
+
> = {
|
|
51
|
+
description?: string;
|
|
52
|
+
inputSchema: INPUT_SCHEMA;
|
|
53
|
+
generate?: Renderer<
|
|
54
|
+
[
|
|
55
|
+
InferSchema<INPUT_SCHEMA>,
|
|
56
|
+
{
|
|
57
|
+
toolName: string;
|
|
58
|
+
toolCallId: string;
|
|
59
|
+
},
|
|
60
|
+
]
|
|
61
|
+
>;
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
type RenderText = Renderer<
|
|
65
|
+
[
|
|
66
|
+
{
|
|
67
|
+
/**
|
|
68
|
+
* The full text content from the model so far.
|
|
69
|
+
*/
|
|
70
|
+
content: string;
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* The new appended text content from the model since the last `text` call.
|
|
74
|
+
*/
|
|
75
|
+
delta: string;
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Whether the model is done generating text.
|
|
79
|
+
* If `true`, the `content` will be the final output and this call will be the last.
|
|
80
|
+
*/
|
|
81
|
+
done: boolean;
|
|
82
|
+
},
|
|
83
|
+
]
|
|
84
|
+
>;
|
|
85
|
+
|
|
86
|
+
type RenderResult = {
|
|
87
|
+
value: ReactNode;
|
|
88
|
+
} & LanguageModelV3StreamResult;
|
|
89
|
+
|
|
90
|
+
const defaultTextRenderer: RenderText = ({ content }: { content: string }) =>
|
|
91
|
+
content;
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* `streamUI` is a helper function to create a streamable UI from LLMs.
|
|
95
|
+
*/
|
|
96
|
+
export async function streamUI<
|
|
97
|
+
TOOLS extends { [name: string]: z4.core.$ZodType | z3.Schema | Schema } = {},
|
|
98
|
+
>({
|
|
99
|
+
model,
|
|
100
|
+
tools,
|
|
101
|
+
toolChoice,
|
|
102
|
+
system,
|
|
103
|
+
prompt,
|
|
104
|
+
messages,
|
|
105
|
+
maxRetries,
|
|
106
|
+
abortSignal,
|
|
107
|
+
headers,
|
|
108
|
+
initial,
|
|
109
|
+
text,
|
|
110
|
+
providerOptions,
|
|
111
|
+
onFinish,
|
|
112
|
+
...settings
|
|
113
|
+
}: CallSettings &
|
|
114
|
+
Prompt & {
|
|
115
|
+
/**
|
|
116
|
+
* The language model to use.
|
|
117
|
+
*/
|
|
118
|
+
model: LanguageModelV3;
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* The tools that the model can call. The model needs to support calling tools.
|
|
122
|
+
*/
|
|
123
|
+
tools?: {
|
|
124
|
+
[name in keyof TOOLS]: RenderTool<TOOLS[name]>;
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* The tool choice strategy. Default: 'auto'.
|
|
129
|
+
*/
|
|
130
|
+
toolChoice?: ToolChoice<TOOLS>;
|
|
131
|
+
|
|
132
|
+
text?: RenderText;
|
|
133
|
+
initial?: ReactNode;
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
Additional provider-specific options. They are passed through
|
|
137
|
+
to the provider from the AI SDK and enable provider-specific
|
|
138
|
+
functionality that can be fully encapsulated in the provider.
|
|
139
|
+
*/
|
|
140
|
+
providerOptions?: ProviderOptions;
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Callback that is called when the LLM response and the final object validation are finished.
|
|
144
|
+
*/
|
|
145
|
+
onFinish?: (event: {
|
|
146
|
+
/**
|
|
147
|
+
* The reason why the generation finished.
|
|
148
|
+
*/
|
|
149
|
+
finishReason: FinishReason;
|
|
150
|
+
/**
|
|
151
|
+
* The token usage of the generated response.
|
|
152
|
+
*/
|
|
153
|
+
usage: LanguageModelUsage;
|
|
154
|
+
/**
|
|
155
|
+
* The final ui node that was generated.
|
|
156
|
+
*/
|
|
157
|
+
value: ReactNode;
|
|
158
|
+
/**
|
|
159
|
+
* Warnings from the model provider (e.g. unsupported settings)
|
|
160
|
+
*/
|
|
161
|
+
warnings?: CallWarning[];
|
|
162
|
+
/**
|
|
163
|
+
* Optional response data.
|
|
164
|
+
*/
|
|
165
|
+
response?: {
|
|
166
|
+
/**
|
|
167
|
+
* Response headers.
|
|
168
|
+
*/
|
|
169
|
+
headers?: Record<string, string>;
|
|
170
|
+
};
|
|
171
|
+
}) => Promise<void> | void;
|
|
172
|
+
}): Promise<RenderResult> {
|
|
173
|
+
// TODO: Remove these errors after the experimental phase.
|
|
174
|
+
if (typeof model === 'string') {
|
|
175
|
+
throw new Error(
|
|
176
|
+
'`model` cannot be a string in `streamUI`. Use the actual model instance instead.',
|
|
177
|
+
);
|
|
178
|
+
}
|
|
179
|
+
if ('functions' in settings) {
|
|
180
|
+
throw new Error(
|
|
181
|
+
'`functions` is not supported in `streamUI`, use `tools` instead.',
|
|
182
|
+
);
|
|
183
|
+
}
|
|
184
|
+
if ('provider' in settings) {
|
|
185
|
+
throw new Error(
|
|
186
|
+
'`provider` is no longer needed in `streamUI`. Use `model` instead.',
|
|
187
|
+
);
|
|
188
|
+
}
|
|
189
|
+
if (tools) {
|
|
190
|
+
for (const [name, tool] of Object.entries(tools)) {
|
|
191
|
+
if ('render' in tool) {
|
|
192
|
+
throw new Error(
|
|
193
|
+
'Tool definition in `streamUI` should not have `render` property. Use `generate` instead. Found in tool: ' +
|
|
194
|
+
name,
|
|
195
|
+
);
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
const ui = createStreamableUI(initial);
|
|
201
|
+
|
|
202
|
+
// The default text renderer just returns the content as string.
|
|
203
|
+
const textRender = text || defaultTextRenderer;
|
|
204
|
+
|
|
205
|
+
let finished: Promise<void> | undefined;
|
|
206
|
+
|
|
207
|
+
let finishEvent: {
|
|
208
|
+
finishReason: FinishReason;
|
|
209
|
+
usage: LanguageModelV3Usage;
|
|
210
|
+
warnings?: CallWarning[];
|
|
211
|
+
response?: {
|
|
212
|
+
headers?: Record<string, string>;
|
|
213
|
+
};
|
|
214
|
+
} | null = null;
|
|
215
|
+
|
|
216
|
+
async function render({
|
|
217
|
+
args,
|
|
218
|
+
renderer,
|
|
219
|
+
streamableUI,
|
|
220
|
+
isLastCall = false,
|
|
221
|
+
}: {
|
|
222
|
+
renderer: undefined | Renderer<any>;
|
|
223
|
+
args: [payload: any] | [payload: any, options: any];
|
|
224
|
+
streamableUI: ReturnType<typeof createStreamableUI>;
|
|
225
|
+
isLastCall?: boolean;
|
|
226
|
+
}) {
|
|
227
|
+
if (!renderer) return;
|
|
228
|
+
|
|
229
|
+
// create a promise that will be resolved when the render call is finished.
|
|
230
|
+
// it is appended to the `finished` promise chain to ensure the render call
|
|
231
|
+
// is finished before the next render call starts.
|
|
232
|
+
const renderFinished = createResolvablePromise<void>();
|
|
233
|
+
finished = finished
|
|
234
|
+
? finished.then(() => renderFinished.promise)
|
|
235
|
+
: renderFinished.promise;
|
|
236
|
+
|
|
237
|
+
const rendererResult = renderer(...args);
|
|
238
|
+
|
|
239
|
+
if (isAsyncGenerator(rendererResult) || isGenerator(rendererResult)) {
|
|
240
|
+
while (true) {
|
|
241
|
+
const { done, value } = await rendererResult.next();
|
|
242
|
+
const node = await value;
|
|
243
|
+
|
|
244
|
+
if (isLastCall && done) {
|
|
245
|
+
streamableUI.done(node);
|
|
246
|
+
} else {
|
|
247
|
+
streamableUI.update(node);
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
if (done) break;
|
|
251
|
+
}
|
|
252
|
+
} else {
|
|
253
|
+
const node = await rendererResult;
|
|
254
|
+
|
|
255
|
+
if (isLastCall) {
|
|
256
|
+
streamableUI.done(node);
|
|
257
|
+
} else {
|
|
258
|
+
streamableUI.update(node);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
// resolve the promise to signal that the render call is finished
|
|
263
|
+
renderFinished.resolve(undefined);
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
const { retry } = prepareRetries({ maxRetries, abortSignal });
|
|
267
|
+
|
|
268
|
+
const validatedPrompt = await standardizePrompt({
|
|
269
|
+
system,
|
|
270
|
+
prompt,
|
|
271
|
+
messages,
|
|
272
|
+
} as Prompt);
|
|
273
|
+
const result = await retry(async () =>
|
|
274
|
+
model.doStream({
|
|
275
|
+
...prepareCallSettings(settings),
|
|
276
|
+
...prepareToolsAndToolChoice({
|
|
277
|
+
tools: tools as any,
|
|
278
|
+
toolChoice,
|
|
279
|
+
activeTools: undefined,
|
|
280
|
+
}),
|
|
281
|
+
prompt: await convertToLanguageModelPrompt({
|
|
282
|
+
prompt: validatedPrompt,
|
|
283
|
+
supportedUrls: await model.supportedUrls,
|
|
284
|
+
download: undefined,
|
|
285
|
+
}),
|
|
286
|
+
providerOptions,
|
|
287
|
+
abortSignal,
|
|
288
|
+
headers,
|
|
289
|
+
includeRawChunks: false,
|
|
290
|
+
}),
|
|
291
|
+
);
|
|
292
|
+
|
|
293
|
+
// For the stream and consume it asynchronously:
|
|
294
|
+
const [stream, forkedStream] = result.stream.tee();
|
|
295
|
+
(async () => {
|
|
296
|
+
try {
|
|
297
|
+
let content = '';
|
|
298
|
+
let hasToolCall = false;
|
|
299
|
+
let warnings: SharedV3Warning[] | undefined;
|
|
300
|
+
|
|
301
|
+
const reader = forkedStream.getReader();
|
|
302
|
+
while (true) {
|
|
303
|
+
const { done, value } = await reader.read();
|
|
304
|
+
if (done) break;
|
|
305
|
+
|
|
306
|
+
switch (value.type) {
|
|
307
|
+
case 'stream-start': {
|
|
308
|
+
warnings = value.warnings;
|
|
309
|
+
break;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
case 'text-delta': {
|
|
313
|
+
content += value.delta;
|
|
314
|
+
render({
|
|
315
|
+
renderer: textRender,
|
|
316
|
+
args: [{ content, done: false, delta: value.delta }],
|
|
317
|
+
streamableUI: ui,
|
|
318
|
+
});
|
|
319
|
+
break;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
case 'tool-input-start':
|
|
323
|
+
case 'tool-input-delta': {
|
|
324
|
+
hasToolCall = true;
|
|
325
|
+
break;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
case 'tool-call': {
|
|
329
|
+
const toolName = value.toolName as keyof TOOLS & string;
|
|
330
|
+
|
|
331
|
+
if (!tools) {
|
|
332
|
+
throw new NoSuchToolError({ toolName });
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
const tool = tools[toolName];
|
|
336
|
+
if (!tool) {
|
|
337
|
+
throw new NoSuchToolError({
|
|
338
|
+
toolName,
|
|
339
|
+
availableTools: Object.keys(tools),
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
hasToolCall = true;
|
|
344
|
+
const parseResult = await safeParseJSON({
|
|
345
|
+
text: value.input,
|
|
346
|
+
schema: tool.inputSchema,
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
if (parseResult.success === false) {
|
|
350
|
+
throw new InvalidToolInputError({
|
|
351
|
+
toolName,
|
|
352
|
+
toolInput: value.input,
|
|
353
|
+
cause: parseResult.error,
|
|
354
|
+
});
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
render({
|
|
358
|
+
renderer: tool.generate,
|
|
359
|
+
args: [
|
|
360
|
+
parseResult.value,
|
|
361
|
+
{
|
|
362
|
+
toolName,
|
|
363
|
+
toolCallId: value.toolCallId,
|
|
364
|
+
},
|
|
365
|
+
],
|
|
366
|
+
streamableUI: ui,
|
|
367
|
+
isLastCall: true,
|
|
368
|
+
});
|
|
369
|
+
|
|
370
|
+
break;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
case 'error': {
|
|
374
|
+
throw value.error;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
case 'finish': {
|
|
378
|
+
finishEvent = {
|
|
379
|
+
finishReason: value.finishReason?.unified,
|
|
380
|
+
usage: value.usage,
|
|
381
|
+
warnings,
|
|
382
|
+
response: result.response,
|
|
383
|
+
};
|
|
384
|
+
break;
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
if (!hasToolCall) {
|
|
390
|
+
render({
|
|
391
|
+
renderer: textRender,
|
|
392
|
+
args: [{ content, done: true }],
|
|
393
|
+
streamableUI: ui,
|
|
394
|
+
isLastCall: true,
|
|
395
|
+
});
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
await finished;
|
|
399
|
+
|
|
400
|
+
if (finishEvent && onFinish) {
|
|
401
|
+
await onFinish({
|
|
402
|
+
...finishEvent,
|
|
403
|
+
usage: asLanguageModelUsage(finishEvent.usage),
|
|
404
|
+
value: ui.value,
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
} catch (error) {
|
|
408
|
+
// During the stream rendering, we don't want to throw the error to the
|
|
409
|
+
// parent scope but only let the React's error boundary to catch it.
|
|
410
|
+
ui.error(error);
|
|
411
|
+
}
|
|
412
|
+
})();
|
|
413
|
+
|
|
414
|
+
return {
|
|
415
|
+
...result,
|
|
416
|
+
stream,
|
|
417
|
+
value: ui.value,
|
|
418
|
+
};
|
|
419
|
+
}
|