ai 6.0.93 → 6.0.95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.mjs +1 -1
- package/docs/02-foundations/03-prompts.mdx +1 -1
- package/docs/02-getting-started/01-navigating-the-library.mdx +1 -1
- package/docs/02-getting-started/09-coding-agents.mdx +1 -1
- package/docs/03-agents/03-workflows.mdx +71 -55
- package/docs/03-ai-sdk-core/01-overview.mdx +2 -4
- package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +8 -172
- package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +4 -4
- package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +14 -12
- package/docs/03-ai-sdk-core/55-testing.mdx +8 -8
- package/docs/03-ai-sdk-core/60-telemetry.mdx +13 -53
- package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +1 -1
- package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +2 -3
- package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +9 -9
- package/docs/06-advanced/04-caching.mdx +1 -1
- package/docs/07-reference/01-ai-sdk-core/index.mdx +3 -8
- package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +2 -2
- package/docs/09-troubleshooting/09-client-stream-error.mdx +1 -1
- package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +4 -4
- package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +18 -14
- package/package.json +2 -2
- package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +0 -780
- package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +0 -1152
|
@@ -99,14 +99,14 @@ const result = streamText({
|
|
|
99
99
|
});
|
|
100
100
|
```
|
|
101
101
|
|
|
102
|
-
###
|
|
102
|
+
### generateText with Output
|
|
103
103
|
|
|
104
104
|
```ts
|
|
105
|
-
import {
|
|
105
|
+
import { generateText, Output } from 'ai';
|
|
106
106
|
import { MockLanguageModelV3 } from 'ai/test';
|
|
107
107
|
import { z } from 'zod';
|
|
108
108
|
|
|
109
|
-
const result = await
|
|
109
|
+
const result = await generateText({
|
|
110
110
|
model: new MockLanguageModelV3({
|
|
111
111
|
doGenerate: async () => ({
|
|
112
112
|
content: [{ type: 'text', text: `{"content":"Hello, world!"}` }],
|
|
@@ -127,19 +127,19 @@ const result = await generateObject({
|
|
|
127
127
|
warnings: [],
|
|
128
128
|
}),
|
|
129
129
|
}),
|
|
130
|
-
schema: z.object({ content: z.string() }),
|
|
130
|
+
output: Output.object({ schema: z.object({ content: z.string() }) }),
|
|
131
131
|
prompt: 'Hello, test!',
|
|
132
132
|
});
|
|
133
133
|
```
|
|
134
134
|
|
|
135
|
-
###
|
|
135
|
+
### streamText with Output
|
|
136
136
|
|
|
137
137
|
```ts
|
|
138
|
-
import {
|
|
138
|
+
import { streamText, Output, simulateReadableStream } from 'ai';
|
|
139
139
|
import { MockLanguageModelV3 } from 'ai/test';
|
|
140
140
|
import { z } from 'zod';
|
|
141
141
|
|
|
142
|
-
const result =
|
|
142
|
+
const result = streamText({
|
|
143
143
|
model: new MockLanguageModelV3({
|
|
144
144
|
doStream: async () => ({
|
|
145
145
|
stream: simulateReadableStream({
|
|
@@ -174,7 +174,7 @@ const result = streamObject({
|
|
|
174
174
|
}),
|
|
175
175
|
}),
|
|
176
176
|
}),
|
|
177
|
-
schema: z.object({ content: z.string() }),
|
|
177
|
+
output: Output.object({ schema: z.object({ content: z.string() }) }),
|
|
178
178
|
prompt: 'Hello, test!',
|
|
179
179
|
});
|
|
180
180
|
```
|
|
@@ -151,60 +151,21 @@ const result = await generateText({
|
|
|
151
151
|
|
|
152
152
|
It also records a `ai.stream.firstChunk` event when the first chunk of the stream is received.
|
|
153
153
|
|
|
154
|
-
###
|
|
154
|
+
### Deprecated object APIs
|
|
155
155
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
- `operation.name`: `ai.generateObject` and the functionId that was set through `telemetry.functionId`
|
|
162
|
-
- `ai.operationId`: `"ai.generateObject"`
|
|
163
|
-
- `ai.prompt`: the prompt that was used when calling `generateObject`
|
|
164
|
-
- `ai.schema`: Stringified JSON schema version of the schema that was passed into the `generateObject` function
|
|
165
|
-
- `ai.schema.name`: the name of the schema that was passed into the `generateObject` function
|
|
166
|
-
- `ai.schema.description`: the description of the schema that was passed into the `generateObject` function
|
|
167
|
-
- `ai.response.object`: the object that was generated (stringified JSON)
|
|
168
|
-
- `ai.settings.output`: the output type that was used, e.g. `object` or `no-schema`
|
|
169
|
-
|
|
170
|
-
- `ai.generateObject.doGenerate` (span): a provider doGenerate call.
|
|
171
|
-
It contains the [call LLM span information](#call-llm-span-information) and the following attributes:
|
|
172
|
-
|
|
173
|
-
- `operation.name`: `ai.generateObject.doGenerate` and the functionId that was set through `telemetry.functionId`
|
|
174
|
-
- `ai.operationId`: `"ai.generateObject.doGenerate"`
|
|
175
|
-
- `ai.prompt.messages`: the messages that were passed into the provider
|
|
176
|
-
- `ai.response.object`: the object that was generated (stringified JSON)
|
|
177
|
-
- `ai.response.finishReason`: the reason why the generation finished
|
|
178
|
-
|
|
179
|
-
### streamObject function
|
|
180
|
-
|
|
181
|
-
`streamObject` records 2 types of spans and 1 type of event:
|
|
182
|
-
|
|
183
|
-
- `ai.streamObject` (span): the full length of the streamObject call. It contains 1 or more `ai.streamObject.doStream` spans.
|
|
184
|
-
It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes:
|
|
156
|
+
<Note type="warning">
|
|
157
|
+
`generateObject` and `streamObject` are deprecated. Use `generateText` and
|
|
158
|
+
`streamText` with the `output` property instead.
|
|
159
|
+
</Note>
|
|
185
160
|
|
|
186
|
-
|
|
187
|
-
- `ai.operationId`: `"ai.streamObject"`
|
|
188
|
-
- `ai.prompt`: the prompt that was used when calling `streamObject`
|
|
189
|
-
- `ai.schema`: Stringified JSON schema version of the schema that was passed into the `streamObject` function
|
|
190
|
-
- `ai.schema.name`: the name of the schema that was passed into the `streamObject` function
|
|
191
|
-
- `ai.schema.description`: the description of the schema that was passed into the `streamObject` function
|
|
192
|
-
- `ai.response.object`: the object that was generated (stringified JSON)
|
|
193
|
-
- `ai.settings.output`: the output type that was used, e.g. `object` or `no-schema`
|
|
194
|
-
|
|
195
|
-
- `ai.streamObject.doStream` (span): a provider doStream call.
|
|
196
|
-
This span contains an `ai.stream.firstChunk` event.
|
|
197
|
-
It contains the [call LLM span information](#call-llm-span-information) and the following attributes:
|
|
161
|
+
If you still run deprecated object APIs, you will see legacy span names:
|
|
198
162
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
- `ai.prompt.messages`: the messages that were passed into the provider
|
|
202
|
-
- `ai.response.object`: the object that was generated (stringified JSON)
|
|
203
|
-
- `ai.response.msToFirstChunk`: the time it took to receive the first chunk
|
|
204
|
-
- `ai.response.finishReason`: the reason why the generation finished
|
|
163
|
+
- `generateObject`: `ai.generateObject`, `ai.generateObject.doGenerate`
|
|
164
|
+
- `streamObject`: `ai.streamObject`, `ai.streamObject.doStream`, `ai.stream.firstChunk`
|
|
205
165
|
|
|
206
|
-
|
|
207
|
-
|
|
166
|
+
Legacy object spans include the same core metadata as other LLM spans, plus
|
|
167
|
+
object-specific attributes such as `ai.schema.*`, `ai.response.object`, and
|
|
168
|
+
`ai.settings.output`.
|
|
208
169
|
|
|
209
170
|
### embed function
|
|
210
171
|
|
|
@@ -250,8 +211,7 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea
|
|
|
250
211
|
|
|
251
212
|
### Basic LLM span information
|
|
252
213
|
|
|
253
|
-
Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.streamText`, `ai.streamText.doStream
|
|
254
|
-
`ai.generateObject`, `ai.generateObject.doGenerate`, `ai.streamObject`, `ai.streamObject.doStream`) contain the following attributes:
|
|
214
|
+
Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.streamText`, `ai.streamText.doStream`) contain the following attributes:
|
|
255
215
|
|
|
256
216
|
- `resource.name`: the functionId that was set through `telemetry.functionId`
|
|
257
217
|
- `ai.model.id`: the id of the model
|
|
@@ -266,7 +226,7 @@ Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.s
|
|
|
266
226
|
|
|
267
227
|
### Call LLM span information
|
|
268
228
|
|
|
269
|
-
Spans that correspond to individual LLM calls (`ai.generateText.doGenerate`, `ai.streamText.doStream
|
|
229
|
+
Spans that correspond to individual LLM calls (`ai.generateText.doGenerate`, `ai.streamText.doStream`) contain
|
|
270
230
|
[basic LLM span information](#basic-llm-span-information) and the following attributes:
|
|
271
231
|
|
|
272
232
|
- `ai.response.model`: the model that was used to generate the response. This can be different from the model that was requested if the provider supports aliases.
|
|
@@ -18,7 +18,7 @@ import { Weather } from '@/components/home/weather';
|
|
|
18
18
|
|
|
19
19
|
The RSC API allows you to stream React components from the server to the client with the [`streamUI`](/docs/reference/ai-sdk-rsc/stream-ui) function. This is useful when you want to go beyond raw text and stream components to the client in real-time.
|
|
20
20
|
|
|
21
|
-
Similar to [ AI SDK Core ](/docs/ai-sdk-core/overview) APIs (like [ `streamText` ](/docs/reference/ai-sdk-core/stream-text)
|
|
21
|
+
Similar to [ AI SDK Core ](/docs/ai-sdk-core/overview) APIs (like [ `streamText` ](/docs/reference/ai-sdk-core/stream-text)), `streamUI` provides a single function to call a model and allow it to respond with React Server Components.
|
|
22
22
|
It supports the same model interfaces as AI SDK Core APIs.
|
|
23
23
|
|
|
24
24
|
### Concepts
|
|
@@ -20,9 +20,8 @@ The RSC API provides several utility functions to allow you to stream values fro
|
|
|
20
20
|
|
|
21
21
|
<Note>
|
|
22
22
|
These utilities can also be paired with [AI SDK Core](/docs/ai-sdk-core)
|
|
23
|
-
functions like [`streamText`](/docs/reference/ai-sdk-core/stream-text)
|
|
24
|
-
|
|
25
|
-
LLM generations from the server to the client.
|
|
23
|
+
functions like [`streamText`](/docs/reference/ai-sdk-core/stream-text) to
|
|
24
|
+
easily stream LLM generations from the server to the client.
|
|
26
25
|
</Note>
|
|
27
26
|
|
|
28
27
|
There are two functions provided by the RSC API that allow you to create streamable values:
|
|
@@ -598,12 +598,12 @@ export function Chat({
|
|
|
598
598
|
|
|
599
599
|
## Streaming Object Generation
|
|
600
600
|
|
|
601
|
-
The `createStreamableValue` function streams any serializable data from the server to the client. As a result, this function allows you to stream object generations from the server to the client when paired with `
|
|
601
|
+
The `createStreamableValue` function streams any serializable data from the server to the client. As a result, this function allows you to stream object generations from the server to the client when paired with `streamText` and `Output`.
|
|
602
602
|
|
|
603
603
|
#### Before: Use streamable value to stream object generations
|
|
604
604
|
|
|
605
605
|
```ts filename="@/app/actions.ts"
|
|
606
|
-
import {
|
|
606
|
+
import { Output, streamText } from 'ai';
|
|
607
607
|
import { openai } from '@ai-sdk/openai';
|
|
608
608
|
import { createStreamableValue } from '@ai-sdk/rsc';
|
|
609
609
|
import { notificationsSchema } from '@/utils/schemas';
|
|
@@ -614,14 +614,14 @@ export async function generateSampleNotifications() {
|
|
|
614
614
|
const stream = createStreamableValue();
|
|
615
615
|
|
|
616
616
|
(async () => {
|
|
617
|
-
const {
|
|
617
|
+
const { partialOutputStream } = streamText({
|
|
618
618
|
model: __MODEL__,
|
|
619
619
|
system: 'generate sample ios messages for testing',
|
|
620
620
|
prompt: 'messages from a family group chat during diwali, max 4',
|
|
621
|
-
schema: notificationsSchema,
|
|
621
|
+
output: Output.object({ schema: notificationsSchema }),
|
|
622
622
|
});
|
|
623
623
|
|
|
624
|
-
for await (const partialObject of
|
|
624
|
+
for await (const partialObject of partialOutputStream) {
|
|
625
625
|
stream.update(partialObject);
|
|
626
626
|
}
|
|
627
627
|
})();
|
|
@@ -667,21 +667,21 @@ export default function Page() {
|
|
|
667
667
|
}
|
|
668
668
|
```
|
|
669
669
|
|
|
670
|
-
To migrate to AI SDK UI, you should use the `useObject` hook and implement `
|
|
670
|
+
To migrate to AI SDK UI, you should use the `useObject` hook and implement `streamText` with `Output` within your route handler.
|
|
671
671
|
|
|
672
672
|
#### After: Replace with route handler and stream text response
|
|
673
673
|
|
|
674
674
|
```ts filename="@/app/api/object/route.ts"
|
|
675
|
-
import {
|
|
675
|
+
import { Output, streamText } from 'ai';
|
|
676
676
|
import { openai } from '@ai-sdk/openai';
|
|
677
677
|
import { notificationSchema } from '@/utils/schemas';
|
|
678
678
|
|
|
679
679
|
export async function POST(req: Request) {
|
|
680
680
|
const context = await req.json();
|
|
681
681
|
|
|
682
|
-
const result =
|
|
682
|
+
const result = streamText({
|
|
683
683
|
model: __MODEL__,
|
|
684
|
-
schema: notificationSchema,
|
|
684
|
+
output: Output.object({ schema: notificationSchema }),
|
|
685
685
|
prompt:
|
|
686
686
|
`Generate 3 notifications for a messages app in this context:` + context,
|
|
687
687
|
});
|
|
@@ -110,7 +110,7 @@ export const cacheMiddleware: LanguageModelV3Middleware = {
|
|
|
110
110
|
responses but you can use any KV storage provider you would like.
|
|
111
111
|
</Note>
|
|
112
112
|
|
|
113
|
-
`LanguageModelV3Middleware` has two methods: `wrapGenerate` and `wrapStream`. `wrapGenerate` is called when using [`generateText`](/docs/reference/ai-sdk-core/generate-text)
|
|
113
|
+
`LanguageModelV3Middleware` has two methods: `wrapGenerate` and `wrapStream`. `wrapGenerate` is called when using [`generateText`](/docs/reference/ai-sdk-core/generate-text), while `wrapStream` is called when using [`streamText`](/docs/reference/ai-sdk-core/stream-text).
|
|
114
114
|
|
|
115
115
|
For `wrapGenerate`, you can cache the response directly. Instead, for `wrapStream`, you cache an array of the stream parts, which can then be used with [`simulateReadableStream`](/docs/ai-sdk-core/testing#simulate-data-stream-protocol-responses) function to create a simulated `ReadableStream` that returns the cached response. In this way, the cached response is returned chunk-by-chunk as if it were being generated by the model. You can control the initial delay and delay between chunks by adjusting the `initialDelayInMs` and `chunkDelayInMs` parameters of `simulateReadableStream`.
|
|
116
116
|
|
|
@@ -25,14 +25,9 @@ AI SDK Core contains the following main functions:
|
|
|
25
25
|
href: '/docs/reference/ai-sdk-core/stream-text',
|
|
26
26
|
},
|
|
27
27
|
{
|
|
28
|
-
title: '
|
|
29
|
-
description: '
|
|
30
|
-
href: '/docs/reference/ai-sdk-core/
|
|
31
|
-
},
|
|
32
|
-
{
|
|
33
|
-
title: 'streamObject()',
|
|
34
|
-
description: 'Stream structured data from a language model.',
|
|
35
|
-
href: '/docs/reference/ai-sdk-core/stream-object',
|
|
28
|
+
title: 'Output',
|
|
29
|
+
description: 'Structured output types for generateText and streamText.',
|
|
30
|
+
href: '/docs/reference/ai-sdk-core/output',
|
|
36
31
|
},
|
|
37
32
|
{
|
|
38
33
|
title: 'embed()',
|
|
@@ -26,10 +26,10 @@ It can arise due to the following reasons:
|
|
|
26
26
|
You can check if an error is an instance of `AI_NoObjectGeneratedError` using:
|
|
27
27
|
|
|
28
28
|
```typescript
|
|
29
|
-
import {
|
|
29
|
+
import { generateText, NoObjectGeneratedError, Output } from 'ai';
|
|
30
30
|
|
|
31
31
|
try {
|
|
32
|
-
await
|
|
32
|
+
await generateText({ model, output: Output.object({ schema }), prompt });
|
|
33
33
|
} catch (error) {
|
|
34
34
|
if (NoObjectGeneratedError.isInstance(error)) {
|
|
35
35
|
console.log('NoObjectGeneratedError');
|
|
@@ -7,7 +7,7 @@ description: Troubleshooting errors related to using AI SDK Core functions with
|
|
|
7
7
|
|
|
8
8
|
## Issue
|
|
9
9
|
|
|
10
|
-
I am using [`streamText`](/docs/reference/ai-sdk-core/stream-text)
|
|
10
|
+
I am using [`streamText`](/docs/reference/ai-sdk-core/stream-text) with Server Actions, and I am getting a `"only plain objects and a few built ins can be passed from client components"` error.
|
|
11
11
|
|
|
12
12
|
## Background
|
|
13
13
|
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: Tool calling with
|
|
3
|
-
description: Troubleshooting tool calling when combined with
|
|
2
|
+
title: Tool calling with structured outputs
|
|
3
|
+
description: Troubleshooting tool calling when combined with structured output generation
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
# Tool calling with
|
|
6
|
+
# Tool calling with structured outputs
|
|
7
7
|
|
|
8
8
|
## Issue
|
|
9
9
|
|
|
10
|
-
You may want to combine tool calling with structured output generation.
|
|
10
|
+
You may want to combine tool calling with structured output generation.
|
|
11
11
|
|
|
12
12
|
## Background
|
|
13
13
|
|
|
@@ -7,20 +7,22 @@ description: Troubleshooting NoObjectGeneratedError with finish-reason content-f
|
|
|
7
7
|
|
|
8
8
|
## Issue
|
|
9
9
|
|
|
10
|
-
When using
|
|
10
|
+
When using structured output generation with OpenAI, you may encounter a `NoObjectGeneratedError` with the finish reason `content-filter`. This error occurs when your Zod schema contains incompatible types that OpenAI's structured output feature cannot process.
|
|
11
11
|
|
|
12
12
|
```typescript
|
|
13
13
|
// Problematic code - incompatible schema types
|
|
14
|
-
import {
|
|
14
|
+
import { generateText, Output } from 'ai';
|
|
15
15
|
import { openai } from '@ai-sdk/openai';
|
|
16
16
|
import { z } from 'zod';
|
|
17
17
|
|
|
18
|
-
const result = await
|
|
18
|
+
const result = await generateText({
|
|
19
19
|
model: openai('gpt-4o-2024-08-06'),
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
20
|
+
output: Output.object({
|
|
21
|
+
schema: z.object({
|
|
22
|
+
name: z.string().nullish(), // ❌ .nullish() is not supported
|
|
23
|
+
email: z.string().optional(), // ❌ .optional() is not supported
|
|
24
|
+
age: z.number().nullable(), // ✅ .nullable() is supported
|
|
25
|
+
}),
|
|
24
26
|
}),
|
|
25
27
|
prompt: 'Generate a user profile',
|
|
26
28
|
});
|
|
@@ -38,22 +40,24 @@ OpenAI's structured output generation uses JSON Schema under the hood and has sp
|
|
|
38
40
|
Replace `.nullish()` and `.optional()` with `.nullable()` in your Zod schemas when using structured output generation with OpenAI models.
|
|
39
41
|
|
|
40
42
|
```typescript
|
|
41
|
-
import {
|
|
43
|
+
import { generateText, Output } from 'ai';
|
|
42
44
|
import { openai } from '@ai-sdk/openai';
|
|
43
45
|
import { z } from 'zod';
|
|
44
46
|
|
|
45
47
|
// Correct approach - use .nullable()
|
|
46
|
-
const result = await
|
|
48
|
+
const result = await generateText({
|
|
47
49
|
model: openai('gpt-4o-2024-08-06'),
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
50
|
+
output: Output.object({
|
|
51
|
+
schema: z.object({
|
|
52
|
+
name: z.string().nullable(), // ✅ Use .nullable() instead of .nullish()
|
|
53
|
+
email: z.string().nullable(), // ✅ Use .nullable() instead of .optional()
|
|
54
|
+
age: z.number().nullable(),
|
|
55
|
+
}),
|
|
52
56
|
}),
|
|
53
57
|
prompt: 'Generate a user profile',
|
|
54
58
|
});
|
|
55
59
|
|
|
56
|
-
console.log(result.
|
|
60
|
+
console.log(result.output);
|
|
57
61
|
// { name: "John Doe", email: "john@example.com", age: 30 }
|
|
58
62
|
// or { name: null, email: null, age: 25 }
|
|
59
63
|
```
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ai",
|
|
3
|
-
"version": "6.0.
|
|
3
|
+
"version": "6.0.95",
|
|
4
4
|
"description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -45,7 +45,7 @@
|
|
|
45
45
|
},
|
|
46
46
|
"dependencies": {
|
|
47
47
|
"@opentelemetry/api": "1.9.0",
|
|
48
|
-
"@ai-sdk/gateway": "3.0.
|
|
48
|
+
"@ai-sdk/gateway": "3.0.53",
|
|
49
49
|
"@ai-sdk/provider": "3.0.8",
|
|
50
50
|
"@ai-sdk/provider-utils": "4.0.15"
|
|
51
51
|
},
|