ai 4.0.0-canary.9 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +116 -0
- package/README.md +10 -14
- package/dist/index.d.mts +16 -15
- package/dist/index.d.ts +16 -15
- package/dist/index.js +1098 -1047
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1101 -1050
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -7
- package/test/dist/index.d.mts +3 -3
- package/test/dist/index.d.ts +3 -3
- package/test/dist/index.js +3 -3
- package/test/dist/index.js.map +1 -1
- package/test/dist/index.mjs +3 -3
- package/test/dist/index.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,121 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.0.1
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- b117255: feat (ai/core): add messages to tool call options
|
8
|
+
|
9
|
+
## 4.0.0
|
10
|
+
|
11
|
+
### Major Changes
|
12
|
+
|
13
|
+
- 4e38b38: chore (ai): remove LanguageModelResponseMetadataWithHeaders type
|
14
|
+
- 8bf5756: chore: remove legacy function/tool calling
|
15
|
+
- f0cb69d: chore (ai/core): remove experimental function exports
|
16
|
+
- da8c609: chore (ai): remove Tokens RSC helper
|
17
|
+
- cbab571: chore (ai): remove ExperimentalXXXMessage types
|
18
|
+
- b469a7e: chore: remove isXXXError methods
|
19
|
+
- 54cb888: chore (ai): remove experimental_StreamData export
|
20
|
+
- 4d61295: chore (ai): remove streamToResponse and streamingTextResponse
|
21
|
+
- 9a3d741: chore (ai): remove ExperimentalTool export
|
22
|
+
- 064257d: chore (ai/core): rename simulateReadableStream values parameter to chunks
|
23
|
+
- 60e69ed: chore (ai/core): remove ai-stream related methods from streamText
|
24
|
+
- a4f8ce9: chore (ai): AssistantResponse cleanups
|
25
|
+
- d3ae4f6: chore (ui/react): remove useObject setInput helper
|
26
|
+
- 7264b0a: chore (ai): remove responseMessages property from streamText/generateText result
|
27
|
+
- b801982: chore (ai/core): remove init option from streamText result methods
|
28
|
+
- f68d7b1: chore (ai/core): streamObject returns result immediately (no Promise)
|
29
|
+
- 6090cea: chore (ai): remove rawResponse from generate/stream result objects
|
30
|
+
- 073f282: chore (ai): remove AIStream and related exports
|
31
|
+
- 1c58337: chore (ai): remove 2.x prompt helpers
|
32
|
+
- a40a93d: chore (ai/ui): remove vue, svelte, solid re-export and dependency
|
33
|
+
- a7ad35a: chore: remove legacy providers & rsc render
|
34
|
+
- c0ddc24: chore (ai): remove toJSON method from AI SDK errors
|
35
|
+
- 007cb81: chore (ai): change `streamText` warnings result to Promise
|
36
|
+
- effbce3: chore (ai): remove responseMessage from streamText onFinish callback
|
37
|
+
- 545d133: chore (ai): remove deprecated roundtrip settings from streamText / generateText
|
38
|
+
- 7e89ccb: chore: remove nanoid export
|
39
|
+
- f967199: chore (ai/core): streamText returns result immediately (no Promise)
|
40
|
+
- 62d08fd: chore (ai): remove TokenUsage, CompletionTokenUsage, and EmbeddingTokenUsage types
|
41
|
+
- e5d2ce8: chore (ai): remove deprecated provider registry exports
|
42
|
+
- 70ce742: chore (ai): remove experimental_continuationSteps option
|
43
|
+
- 2f09717: chore (ai): remove deprecated telemetry data
|
44
|
+
- 0827bf9: chore (ai): remove LangChain adapter `toAIStream` method
|
45
|
+
|
46
|
+
### Patch Changes
|
47
|
+
|
48
|
+
- dce4158: chore (dependencies): update eventsource-parser to 3.0.0
|
49
|
+
- f0ec721: chore (ai): remove openai peer dependency
|
50
|
+
- f9bb30c: chore (ai): remove unnecessary dev dependencies
|
51
|
+
- b053413: chore (ui): refactorings & README update
|
52
|
+
- Updated dependencies [e117b54]
|
53
|
+
- Updated dependencies [8bf5756]
|
54
|
+
- Updated dependencies [b469a7e]
|
55
|
+
- Updated dependencies [79c6dd9]
|
56
|
+
- Updated dependencies [9f81e66]
|
57
|
+
- Updated dependencies [70f28f6]
|
58
|
+
- Updated dependencies [dce4158]
|
59
|
+
- Updated dependencies [d3ae4f6]
|
60
|
+
- Updated dependencies [68d30e9]
|
61
|
+
- Updated dependencies [7814c4b]
|
62
|
+
- Updated dependencies [ca3e586]
|
63
|
+
- Updated dependencies [c0ddc24]
|
64
|
+
- Updated dependencies [fe4f109]
|
65
|
+
- Updated dependencies [84edae5]
|
66
|
+
- Updated dependencies [b1da952]
|
67
|
+
- Updated dependencies [04d3747]
|
68
|
+
- Updated dependencies [dce4158]
|
69
|
+
- Updated dependencies [7e89ccb]
|
70
|
+
- Updated dependencies [8426f55]
|
71
|
+
- Updated dependencies [db46ce5]
|
72
|
+
- Updated dependencies [b053413]
|
73
|
+
- @ai-sdk/react@1.0.0
|
74
|
+
- @ai-sdk/ui-utils@1.0.0
|
75
|
+
- @ai-sdk/provider-utils@2.0.0
|
76
|
+
- @ai-sdk/provider@1.0.0
|
77
|
+
|
78
|
+
## 4.0.0-canary.13
|
79
|
+
|
80
|
+
### Major Changes
|
81
|
+
|
82
|
+
- 064257d: chore (ai/core): rename simulateReadableStream values parameter to chunks
|
83
|
+
|
84
|
+
### Patch Changes
|
85
|
+
|
86
|
+
- Updated dependencies [79c6dd9]
|
87
|
+
- Updated dependencies [04d3747]
|
88
|
+
- @ai-sdk/react@1.0.0-canary.9
|
89
|
+
- @ai-sdk/ui-utils@1.0.0-canary.9
|
90
|
+
|
91
|
+
## 4.0.0-canary.12
|
92
|
+
|
93
|
+
### Patch Changes
|
94
|
+
|
95
|
+
- b053413: chore (ui): refactorings & README update
|
96
|
+
- Updated dependencies [b053413]
|
97
|
+
- @ai-sdk/ui-utils@1.0.0-canary.8
|
98
|
+
- @ai-sdk/react@1.0.0-canary.8
|
99
|
+
|
100
|
+
## 4.0.0-canary.11
|
101
|
+
|
102
|
+
### Major Changes
|
103
|
+
|
104
|
+
- f68d7b1: chore (ai/core): streamObject returns result immediately (no Promise)
|
105
|
+
- f967199: chore (ai/core): streamText returns result immediately (no Promise)
|
106
|
+
|
107
|
+
## 4.0.0-canary.10
|
108
|
+
|
109
|
+
### Major Changes
|
110
|
+
|
111
|
+
- effbce3: chore (ai): remove responseMessage from streamText onFinish callback
|
112
|
+
|
113
|
+
### Patch Changes
|
114
|
+
|
115
|
+
- Updated dependencies [fe4f109]
|
116
|
+
- @ai-sdk/ui-utils@1.0.0-canary.7
|
117
|
+
- @ai-sdk/react@1.0.0-canary.7
|
118
|
+
|
3
119
|
## 4.0.0-canary.9
|
4
120
|
|
5
121
|
### Patch Changes
|
package/README.md
CHANGED
@@ -32,17 +32,13 @@ npm install @ai-sdk/openai
|
|
32
32
|
import { generateText } from 'ai';
|
33
33
|
import { openai } from '@ai-sdk/openai'; // Ensure OPENAI_API_KEY environment variable is set
|
34
34
|
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
});
|
41
|
-
|
42
|
-
console.log(text);
|
43
|
-
}
|
35
|
+
const { text } = await generateText({
|
36
|
+
model: openai('gpt-4o'),
|
37
|
+
system: 'You are a friendly assistant!',
|
38
|
+
prompt: 'Why is the sky blue?',
|
39
|
+
});
|
44
40
|
|
45
|
-
|
41
|
+
console.log(text);
|
46
42
|
```
|
47
43
|
|
48
44
|
### AI SDK UI
|
@@ -85,14 +81,14 @@ export default function Page() {
|
|
85
81
|
###### @/app/api/chat/route.ts (Next.js App Router)
|
86
82
|
|
87
83
|
```ts
|
88
|
-
import {
|
84
|
+
import { streamText } from 'ai';
|
89
85
|
import { openai } from '@ai-sdk/openai';
|
90
86
|
|
91
87
|
export async function POST(req: Request) {
|
92
|
-
const { messages }
|
88
|
+
const { messages } = await req.json();
|
93
89
|
|
94
|
-
const result =
|
95
|
-
model: openai('gpt-
|
90
|
+
const result = streamText({
|
91
|
+
model: openai('gpt-4o'),
|
96
92
|
system: 'You are a helpful assistant.',
|
97
93
|
messages,
|
98
94
|
});
|
package/dist/index.d.mts
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
|
-
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema,
|
2
|
+
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
4
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
5
5
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
@@ -891,7 +891,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
891
891
|
/**
|
892
892
|
Warnings from the model provider (e.g. unsupported settings)
|
893
893
|
*/
|
894
|
-
readonly warnings: CallWarning[] | undefined
|
894
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
895
895
|
/**
|
896
896
|
The token usage of the generated response. Resolved when the response is finished.
|
897
897
|
*/
|
@@ -1066,7 +1066,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1066
1066
|
currentDate?: () => Date;
|
1067
1067
|
now?: () => number;
|
1068
1068
|
};
|
1069
|
-
}):
|
1069
|
+
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
1070
1070
|
/**
|
1071
1071
|
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
1072
1072
|
|
@@ -1133,7 +1133,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1133
1133
|
currentDate?: () => Date;
|
1134
1134
|
now?: () => number;
|
1135
1135
|
};
|
1136
|
-
}):
|
1136
|
+
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
1137
1137
|
/**
|
1138
1138
|
Generate JSON with any schema for a given prompt using a language model.
|
1139
1139
|
|
@@ -1174,7 +1174,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1174
1174
|
currentDate?: () => Date;
|
1175
1175
|
now?: () => number;
|
1176
1176
|
};
|
1177
|
-
}):
|
1177
|
+
}): StreamObjectResult<JSONValue, JSONValue, never>;
|
1178
1178
|
|
1179
1179
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1180
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
@@ -1203,6 +1203,14 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1203
1203
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1204
|
*/
|
1205
1205
|
execute?: (args: inferParameters<PARAMETERS>, options: {
|
1206
|
+
/**
|
1207
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
+
*/
|
1210
|
+
messages: CoreMessage[];
|
1211
|
+
/**
|
1212
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
+
*/
|
1206
1214
|
abortSignal?: AbortSignal;
|
1207
1215
|
}) => PromiseLike<RESULT>;
|
1208
1216
|
} & ({
|
@@ -1233,10 +1241,12 @@ Helper function for inferring the execute args of a tool.
|
|
1233
1241
|
*/
|
1234
1242
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1235
1243
|
execute: (args: inferParameters<PARAMETERS>, options: {
|
1244
|
+
messages: CoreMessage[];
|
1236
1245
|
abortSignal?: AbortSignal;
|
1237
1246
|
}) => PromiseLike<RESULT>;
|
1238
1247
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1239
1248
|
execute: (args: inferParameters<PARAMETERS>, options: {
|
1249
|
+
messages: CoreMessage[];
|
1240
1250
|
abortSignal?: AbortSignal;
|
1241
1251
|
}) => PromiseLike<RESULT>;
|
1242
1252
|
};
|
@@ -1876,15 +1886,6 @@ The usage is the combined usage of all steps.
|
|
1876
1886
|
Details for all steps.
|
1877
1887
|
*/
|
1878
1888
|
readonly steps: StepResult<TOOLS>[];
|
1879
|
-
/**
|
1880
|
-
The response messages that were generated during the call. It consists of an assistant message,
|
1881
|
-
potentially containing tool calls.
|
1882
|
-
|
1883
|
-
When there are tool results, there is an additional tool message with the tool results that are available.
|
1884
|
-
If there are tools that do not have execute functions, they are not included in the tool results and
|
1885
|
-
need to be added separately.
|
1886
|
-
*/
|
1887
|
-
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1888
1889
|
}) => Promise<void> | void;
|
1889
1890
|
/**
|
1890
1891
|
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
@@ -1898,7 +1899,7 @@ need to be added separately.
|
|
1898
1899
|
generateId?: () => string;
|
1899
1900
|
currentDate?: () => Date;
|
1900
1901
|
};
|
1901
|
-
}):
|
1902
|
+
}): StreamTextResult<TOOLS>;
|
1902
1903
|
|
1903
1904
|
/**
|
1904
1905
|
* Experimental middleware for LanguageModelV1.
|
package/dist/index.d.ts
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
|
-
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema,
|
2
|
+
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
4
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
5
5
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
@@ -891,7 +891,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
891
891
|
/**
|
892
892
|
Warnings from the model provider (e.g. unsupported settings)
|
893
893
|
*/
|
894
|
-
readonly warnings: CallWarning[] | undefined
|
894
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
895
895
|
/**
|
896
896
|
The token usage of the generated response. Resolved when the response is finished.
|
897
897
|
*/
|
@@ -1066,7 +1066,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1066
1066
|
currentDate?: () => Date;
|
1067
1067
|
now?: () => number;
|
1068
1068
|
};
|
1069
|
-
}):
|
1069
|
+
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
1070
1070
|
/**
|
1071
1071
|
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
1072
1072
|
|
@@ -1133,7 +1133,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1133
1133
|
currentDate?: () => Date;
|
1134
1134
|
now?: () => number;
|
1135
1135
|
};
|
1136
|
-
}):
|
1136
|
+
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
1137
1137
|
/**
|
1138
1138
|
Generate JSON with any schema for a given prompt using a language model.
|
1139
1139
|
|
@@ -1174,7 +1174,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1174
1174
|
currentDate?: () => Date;
|
1175
1175
|
now?: () => number;
|
1176
1176
|
};
|
1177
|
-
}):
|
1177
|
+
}): StreamObjectResult<JSONValue, JSONValue, never>;
|
1178
1178
|
|
1179
1179
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1180
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
@@ -1203,6 +1203,14 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1203
1203
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1204
|
*/
|
1205
1205
|
execute?: (args: inferParameters<PARAMETERS>, options: {
|
1206
|
+
/**
|
1207
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
+
*/
|
1210
|
+
messages: CoreMessage[];
|
1211
|
+
/**
|
1212
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
+
*/
|
1206
1214
|
abortSignal?: AbortSignal;
|
1207
1215
|
}) => PromiseLike<RESULT>;
|
1208
1216
|
} & ({
|
@@ -1233,10 +1241,12 @@ Helper function for inferring the execute args of a tool.
|
|
1233
1241
|
*/
|
1234
1242
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1235
1243
|
execute: (args: inferParameters<PARAMETERS>, options: {
|
1244
|
+
messages: CoreMessage[];
|
1236
1245
|
abortSignal?: AbortSignal;
|
1237
1246
|
}) => PromiseLike<RESULT>;
|
1238
1247
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1239
1248
|
execute: (args: inferParameters<PARAMETERS>, options: {
|
1249
|
+
messages: CoreMessage[];
|
1240
1250
|
abortSignal?: AbortSignal;
|
1241
1251
|
}) => PromiseLike<RESULT>;
|
1242
1252
|
};
|
@@ -1876,15 +1886,6 @@ The usage is the combined usage of all steps.
|
|
1876
1886
|
Details for all steps.
|
1877
1887
|
*/
|
1878
1888
|
readonly steps: StepResult<TOOLS>[];
|
1879
|
-
/**
|
1880
|
-
The response messages that were generated during the call. It consists of an assistant message,
|
1881
|
-
potentially containing tool calls.
|
1882
|
-
|
1883
|
-
When there are tool results, there is an additional tool message with the tool results that are available.
|
1884
|
-
If there are tools that do not have execute functions, they are not included in the tool results and
|
1885
|
-
need to be added separately.
|
1886
|
-
*/
|
1887
|
-
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1888
1889
|
}) => Promise<void> | void;
|
1889
1890
|
/**
|
1890
1891
|
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
@@ -1898,7 +1899,7 @@ need to be added separately.
|
|
1898
1899
|
generateId?: () => string;
|
1899
1900
|
currentDate?: () => Date;
|
1900
1901
|
};
|
1901
|
-
}):
|
1902
|
+
}): StreamTextResult<TOOLS>;
|
1902
1903
|
|
1903
1904
|
/**
|
1904
1905
|
* Experimental middleware for LanguageModelV1.
|