ai 5.0.0-canary.0 → 5.0.0-canary.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/dist/index.d.mts +52 -93
- package/dist/index.d.ts +52 -93
- package/dist/index.js +94 -30
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +94 -30
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -13
- package/rsc/dist/index.d.ts +7 -7
- package/rsc/dist/rsc-server.d.mts +7 -7
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/test/dist/index.d.mts +18 -18
- package/test/dist/index.d.ts +18 -18
- package/test/dist/index.js +4 -4
- package/test/dist/index.js.map +1 -1
- package/test/dist/index.mjs +3 -3
- package/test/dist/index.mjs.map +1 -1
- package/react/dist/index.d.mts +0 -18
- package/react/dist/index.d.ts +0 -18
- package/react/dist/index.js +0 -39
- package/react/dist/index.js.map +0 -1
- package/react/dist/index.mjs +0 -17
- package/react/dist/index.mjs.map +0 -1
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "ai",
|
3
|
-
"version": "5.0.0-canary.
|
3
|
+
"version": "5.0.0-canary.2",
|
4
4
|
"description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",
|
5
5
|
"license": "Apache-2.0",
|
6
6
|
"sideEffects": false,
|
@@ -10,7 +10,6 @@
|
|
10
10
|
"files": [
|
11
11
|
"dist/**/*",
|
12
12
|
"mcp-stdio/**/*",
|
13
|
-
"react/dist/**/*",
|
14
13
|
"rsc/dist/**/*",
|
15
14
|
"test/dist/**/*",
|
16
15
|
"CHANGELOG.md"
|
@@ -33,12 +32,6 @@
|
|
33
32
|
"react-server": "./rsc/dist/rsc-server.mjs",
|
34
33
|
"import": "./rsc/dist/rsc-client.mjs"
|
35
34
|
},
|
36
|
-
"./react": {
|
37
|
-
"types": "./react/dist/index.d.ts",
|
38
|
-
"react-server": "./react/dist/index.server.mjs",
|
39
|
-
"import": "./react/dist/index.mjs",
|
40
|
-
"require": "./react/dist/index.js"
|
41
|
-
},
|
42
35
|
"./mcp-stdio": {
|
43
36
|
"types": "./mcp-stdio/dist/index.d.ts",
|
44
37
|
"import": "./mcp-stdio/dist/index.mjs",
|
@@ -47,9 +40,8 @@
|
|
47
40
|
},
|
48
41
|
"dependencies": {
|
49
42
|
"@ai-sdk/provider": "2.0.0-canary.0",
|
50
|
-
"@ai-sdk/provider-utils": "3.0.0-canary.
|
51
|
-
"@ai-sdk/
|
52
|
-
"@ai-sdk/ui-utils": "2.0.0-canary.0",
|
43
|
+
"@ai-sdk/provider-utils": "3.0.0-canary.1",
|
44
|
+
"@ai-sdk/ui-utils": "2.0.0-canary.1",
|
53
45
|
"@opentelemetry/api": "1.9.0",
|
54
46
|
"jsondiffpatch": "0.6.0"
|
55
47
|
},
|
@@ -94,14 +86,13 @@
|
|
94
86
|
"keywords": [
|
95
87
|
"ai",
|
96
88
|
"vercel",
|
97
|
-
"react",
|
98
89
|
"next",
|
99
90
|
"nextjs"
|
100
91
|
],
|
101
92
|
"scripts": {
|
102
93
|
"build": "tsup",
|
103
94
|
"build:watch": "tsup --watch",
|
104
|
-
"clean": "rm -rf dist && rm -rf
|
95
|
+
"clean": "rm -rf dist && rm -rf rsc/dist",
|
105
96
|
"lint": "eslint \"./**/*.ts*\"",
|
106
97
|
"type-check": "tsc --noEmit",
|
107
98
|
"prettier-check": "prettier --check \"./**/*.ts*\"",
|
package/rsc/dist/index.d.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2ProviderMetadata, LanguageModelV2 } from '@ai-sdk/provider';
|
2
2
|
import { ReactNode } from 'react';
|
3
3
|
import { z } from 'zod';
|
4
4
|
import { Message } from '@ai-sdk/ui-utils';
|
@@ -185,12 +185,12 @@ Can be one of the following:
|
|
185
185
|
- `error`: model stopped because of an error
|
186
186
|
- `other`: model stopped for other reasons
|
187
187
|
*/
|
188
|
-
type FinishReason =
|
188
|
+
type FinishReason = LanguageModelV2FinishReason;
|
189
189
|
/**
|
190
190
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
191
191
|
some settings might not be supported, which can lead to suboptimal results.
|
192
192
|
*/
|
193
|
-
type CallWarning =
|
193
|
+
type CallWarning = LanguageModelV2CallWarning;
|
194
194
|
/**
|
195
195
|
Tool choice for the generation. It supports the following settings:
|
196
196
|
|
@@ -210,14 +210,14 @@ Additional provider-specific metadata that is returned from the provider.
|
|
210
210
|
This is needed to enable provider-specific functionality that can be
|
211
211
|
fully encapsulated in the provider.
|
212
212
|
*/
|
213
|
-
type ProviderMetadata =
|
213
|
+
type ProviderMetadata = LanguageModelV2ProviderMetadata;
|
214
214
|
/**
|
215
215
|
Additional provider-specific options.
|
216
216
|
|
217
217
|
They are passed through to the provider from the AI SDK and enable
|
218
218
|
provider-specific functionality that can be fully encapsulated in the provider.
|
219
219
|
*/
|
220
|
-
type ProviderOptions =
|
220
|
+
type ProviderOptions = LanguageModelV2ProviderMetadata;
|
221
221
|
|
222
222
|
/**
|
223
223
|
Represents the number of tokens used in a prompt and completion.
|
@@ -580,7 +580,7 @@ type RenderText = Renderer<[
|
|
580
580
|
]>;
|
581
581
|
type RenderResult = {
|
582
582
|
value: ReactNode;
|
583
|
-
} & Awaited<ReturnType<
|
583
|
+
} & Awaited<ReturnType<LanguageModelV2['doStream']>>;
|
584
584
|
/**
|
585
585
|
* `streamUI` is a helper function to create a streamable UI from LLMs.
|
586
586
|
*/
|
@@ -590,7 +590,7 @@ declare function streamUI<TOOLS extends {
|
|
590
590
|
/**
|
591
591
|
* The language model to use.
|
592
592
|
*/
|
593
|
-
model:
|
593
|
+
model: LanguageModelV2;
|
594
594
|
/**
|
595
595
|
* The tools that the model can call. The model needs to support calling tools.
|
596
596
|
*/
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2ProviderMetadata, LanguageModelV2 } from '@ai-sdk/provider';
|
2
2
|
import { ReactNode } from 'react';
|
3
3
|
import { z } from 'zod';
|
4
4
|
import { Message } from '@ai-sdk/ui-utils';
|
@@ -183,12 +183,12 @@ Can be one of the following:
|
|
183
183
|
- `error`: model stopped because of an error
|
184
184
|
- `other`: model stopped for other reasons
|
185
185
|
*/
|
186
|
-
type FinishReason =
|
186
|
+
type FinishReason = LanguageModelV2FinishReason;
|
187
187
|
/**
|
188
188
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
189
189
|
some settings might not be supported, which can lead to suboptimal results.
|
190
190
|
*/
|
191
|
-
type CallWarning =
|
191
|
+
type CallWarning = LanguageModelV2CallWarning;
|
192
192
|
/**
|
193
193
|
Tool choice for the generation. It supports the following settings:
|
194
194
|
|
@@ -208,14 +208,14 @@ Additional provider-specific metadata that is returned from the provider.
|
|
208
208
|
This is needed to enable provider-specific functionality that can be
|
209
209
|
fully encapsulated in the provider.
|
210
210
|
*/
|
211
|
-
type ProviderMetadata =
|
211
|
+
type ProviderMetadata = LanguageModelV2ProviderMetadata;
|
212
212
|
/**
|
213
213
|
Additional provider-specific options.
|
214
214
|
|
215
215
|
They are passed through to the provider from the AI SDK and enable
|
216
216
|
provider-specific functionality that can be fully encapsulated in the provider.
|
217
217
|
*/
|
218
|
-
type ProviderOptions =
|
218
|
+
type ProviderOptions = LanguageModelV2ProviderMetadata;
|
219
219
|
|
220
220
|
/**
|
221
221
|
Represents the number of tokens used in a prompt and completion.
|
@@ -578,7 +578,7 @@ type RenderText = Renderer<[
|
|
578
578
|
]>;
|
579
579
|
type RenderResult = {
|
580
580
|
value: ReactNode;
|
581
|
-
} & Awaited<ReturnType<
|
581
|
+
} & Awaited<ReturnType<LanguageModelV2['doStream']>>;
|
582
582
|
/**
|
583
583
|
* `streamUI` is a helper function to create a streamable UI from LLMs.
|
584
584
|
*/
|
@@ -588,7 +588,7 @@ declare function streamUI<TOOLS extends {
|
|
588
588
|
/**
|
589
589
|
* The language model to use.
|
590
590
|
*/
|
591
|
-
model:
|
591
|
+
model: LanguageModelV2;
|
592
592
|
/**
|
593
593
|
* The tools that the model can call. The model needs to support calling tools.
|
594
594
|
*/
|