ai 4.0.0-canary.9 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,115 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.0
4
+
5
+ ### Major Changes
6
+
7
+ - 4e38b38: chore (ai): remove LanguageModelResponseMetadataWithHeaders type
8
+ - 8bf5756: chore: remove legacy function/tool calling
9
+ - f0cb69d: chore (ai/core): remove experimental function exports
10
+ - da8c609: chore (ai): remove Tokens RSC helper
11
+ - cbab571: chore (ai): remove ExperimentalXXXMessage types
12
+ - b469a7e: chore: remove isXXXError methods
13
+ - 54cb888: chore (ai): remove experimental_StreamData export
14
+ - 4d61295: chore (ai): remove streamToResponse and streamingTextResponse
15
+ - 9a3d741: chore (ai): remove ExperimentalTool export
16
+ - 064257d: chore (ai/core): rename simulateReadableStream values parameter to chunks
17
+ - 60e69ed: chore (ai/core): remove ai-stream related methods from streamText
18
+ - a4f8ce9: chore (ai): AssistantResponse cleanups
19
+ - d3ae4f6: chore (ui/react): remove useObject setInput helper
20
+ - 7264b0a: chore (ai): remove responseMessages property from streamText/generateText result
21
+ - b801982: chore (ai/core): remove init option from streamText result methods
22
+ - f68d7b1: chore (ai/core): streamObject returns result immediately (no Promise)
23
+ - 6090cea: chore (ai): remove rawResponse from generate/stream result objects
24
+ - 073f282: chore (ai): remove AIStream and related exports
25
+ - 1c58337: chore (ai): remove 2.x prompt helpers
26
+ - a40a93d: chore (ai/ui): remove vue, svelte, solid re-export and dependency
27
+ - a7ad35a: chore: remove legacy providers & rsc render
28
+ - c0ddc24: chore (ai): remove toJSON method from AI SDK errors
29
+ - 007cb81: chore (ai): change `streamText` warnings result to Promise
30
+ - effbce3: chore (ai): remove responseMessage from streamText onFinish callback
31
+ - 545d133: chore (ai): remove deprecated roundtrip settings from streamText / generateText
32
+ - 7e89ccb: chore: remove nanoid export
33
+ - f967199: chore (ai/core): streamText returns result immediately (no Promise)
34
+ - 62d08fd: chore (ai): remove TokenUsage, CompletionTokenUsage, and EmbeddingTokenUsage types
35
+ - e5d2ce8: chore (ai): remove deprecated provider registry exports
36
+ - 70ce742: chore (ai): remove experimental_continuationSteps option
37
+ - 2f09717: chore (ai): remove deprecated telemetry data
38
+ - 0827bf9: chore (ai): remove LangChain adapter `toAIStream` method
39
+
40
+ ### Patch Changes
41
+
42
+ - dce4158: chore (dependencies): update eventsource-parser to 3.0.0
43
+ - f0ec721: chore (ai): remove openai peer dependency
44
+ - f9bb30c: chore (ai): remove unnecessary dev dependencies
45
+ - b053413: chore (ui): refactorings & README update
46
+ - Updated dependencies [e117b54]
47
+ - Updated dependencies [8bf5756]
48
+ - Updated dependencies [b469a7e]
49
+ - Updated dependencies [79c6dd9]
50
+ - Updated dependencies [9f81e66]
51
+ - Updated dependencies [70f28f6]
52
+ - Updated dependencies [dce4158]
53
+ - Updated dependencies [d3ae4f6]
54
+ - Updated dependencies [68d30e9]
55
+ - Updated dependencies [7814c4b]
56
+ - Updated dependencies [ca3e586]
57
+ - Updated dependencies [c0ddc24]
58
+ - Updated dependencies [fe4f109]
59
+ - Updated dependencies [84edae5]
60
+ - Updated dependencies [b1da952]
61
+ - Updated dependencies [04d3747]
62
+ - Updated dependencies [dce4158]
63
+ - Updated dependencies [7e89ccb]
64
+ - Updated dependencies [8426f55]
65
+ - Updated dependencies [db46ce5]
66
+ - Updated dependencies [b053413]
67
+ - @ai-sdk/react@1.0.0
68
+ - @ai-sdk/ui-utils@1.0.0
69
+ - @ai-sdk/provider-utils@2.0.0
70
+ - @ai-sdk/provider@1.0.0
71
+
72
+ ## 4.0.0-canary.13
73
+
74
+ ### Major Changes
75
+
76
+ - 064257d: chore (ai/core): rename simulateReadableStream values parameter to chunks
77
+
78
+ ### Patch Changes
79
+
80
+ - Updated dependencies [79c6dd9]
81
+ - Updated dependencies [04d3747]
82
+ - @ai-sdk/react@1.0.0-canary.9
83
+ - @ai-sdk/ui-utils@1.0.0-canary.9
84
+
85
+ ## 4.0.0-canary.12
86
+
87
+ ### Patch Changes
88
+
89
+ - b053413: chore (ui): refactorings & README update
90
+ - Updated dependencies [b053413]
91
+ - @ai-sdk/ui-utils@1.0.0-canary.8
92
+ - @ai-sdk/react@1.0.0-canary.8
93
+
94
+ ## 4.0.0-canary.11
95
+
96
+ ### Major Changes
97
+
98
+ - f68d7b1: chore (ai/core): streamObject returns result immediately (no Promise)
99
+ - f967199: chore (ai/core): streamText returns result immediately (no Promise)
100
+
101
+ ## 4.0.0-canary.10
102
+
103
+ ### Major Changes
104
+
105
+ - effbce3: chore (ai): remove responseMessage from streamText onFinish callback
106
+
107
+ ### Patch Changes
108
+
109
+ - Updated dependencies [fe4f109]
110
+ - @ai-sdk/ui-utils@1.0.0-canary.7
111
+ - @ai-sdk/react@1.0.0-canary.7
112
+
3
113
  ## 4.0.0-canary.9
4
114
 
5
115
  ### Patch Changes
package/README.md CHANGED
@@ -32,17 +32,13 @@ npm install @ai-sdk/openai
32
32
  import { generateText } from 'ai';
33
33
  import { openai } from '@ai-sdk/openai'; // Ensure OPENAI_API_KEY environment variable is set
34
34
 
35
- async function main() {
36
- const { text } = await generateText({
37
- model: openai('gpt-4-turbo'),
38
- system: 'You are a friendly assistant!',
39
- prompt: 'Why is the sky blue?',
40
- });
41
-
42
- console.log(text);
43
- }
35
+ const { text } = await generateText({
36
+ model: openai('gpt-4o'),
37
+ system: 'You are a friendly assistant!',
38
+ prompt: 'Why is the sky blue?',
39
+ });
44
40
 
45
- main();
41
+ console.log(text);
46
42
  ```
47
43
 
48
44
  ### AI SDK UI
@@ -85,14 +81,14 @@ export default function Page() {
85
81
  ###### @/app/api/chat/route.ts (Next.js App Router)
86
82
 
87
83
  ```ts
88
- import { CoreMessage, streamText } from 'ai';
84
+ import { streamText } from 'ai';
89
85
  import { openai } from '@ai-sdk/openai';
90
86
 
91
87
  export async function POST(req: Request) {
92
- const { messages }: { messages: CoreMessage[] } = await req.json();
88
+ const { messages } = await req.json();
93
89
 
94
- const result = await streamText({
95
- model: openai('gpt-4'),
90
+ const result = streamText({
91
+ model: openai('gpt-4o'),
96
92
  system: 'You are a helpful assistant.',
97
93
  messages,
98
94
  });
package/dist/index.d.mts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
2
+ export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
4
  import { AttributeValue, Tracer } from '@opentelemetry/api';
5
5
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
@@ -891,7 +891,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
891
891
  /**
892
892
  Warnings from the model provider (e.g. unsupported settings)
893
893
  */
894
- readonly warnings: CallWarning[] | undefined;
894
+ readonly warnings: Promise<CallWarning[] | undefined>;
895
895
  /**
896
896
  The token usage of the generated response. Resolved when the response is finished.
897
897
  */
@@ -1066,7 +1066,7 @@ Callback that is called when the LLM response and the final object validation ar
1066
1066
  currentDate?: () => Date;
1067
1067
  now?: () => number;
1068
1068
  };
1069
- }): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
1069
+ }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1070
1070
  /**
1071
1071
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
1072
1072
 
@@ -1133,7 +1133,7 @@ Callback that is called when the LLM response and the final object validation ar
1133
1133
  currentDate?: () => Date;
1134
1134
  now?: () => number;
1135
1135
  };
1136
- }): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
1136
+ }): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
1137
1137
  /**
1138
1138
  Generate JSON with any schema for a given prompt using a language model.
1139
1139
 
@@ -1174,7 +1174,7 @@ Callback that is called when the LLM response and the final object validation ar
1174
1174
  currentDate?: () => Date;
1175
1175
  now?: () => number;
1176
1176
  };
1177
- }): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
1177
+ }): StreamObjectResult<JSONValue, JSONValue, never>;
1178
1178
 
1179
1179
  type Parameters = z.ZodTypeAny | Schema<any>;
1180
1180
  type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
@@ -1876,15 +1876,6 @@ The usage is the combined usage of all steps.
1876
1876
  Details for all steps.
1877
1877
  */
1878
1878
  readonly steps: StepResult<TOOLS>[];
1879
- /**
1880
- The response messages that were generated during the call. It consists of an assistant message,
1881
- potentially containing tool calls.
1882
-
1883
- When there are tool results, there is an additional tool message with the tool results that are available.
1884
- If there are tools that do not have execute functions, they are not included in the tool results and
1885
- need to be added separately.
1886
- */
1887
- readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
1888
1879
  }) => Promise<void> | void;
1889
1880
  /**
1890
1881
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
@@ -1898,7 +1889,7 @@ need to be added separately.
1898
1889
  generateId?: () => string;
1899
1890
  currentDate?: () => Date;
1900
1891
  };
1901
- }): Promise<StreamTextResult<TOOLS>>;
1892
+ }): StreamTextResult<TOOLS>;
1902
1893
 
1903
1894
  /**
1904
1895
  * Experimental middleware for LanguageModelV1.
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
2
+ export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
4
  import { AttributeValue, Tracer } from '@opentelemetry/api';
5
5
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
@@ -891,7 +891,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
891
891
  /**
892
892
  Warnings from the model provider (e.g. unsupported settings)
893
893
  */
894
- readonly warnings: CallWarning[] | undefined;
894
+ readonly warnings: Promise<CallWarning[] | undefined>;
895
895
  /**
896
896
  The token usage of the generated response. Resolved when the response is finished.
897
897
  */
@@ -1066,7 +1066,7 @@ Callback that is called when the LLM response and the final object validation ar
1066
1066
  currentDate?: () => Date;
1067
1067
  now?: () => number;
1068
1068
  };
1069
- }): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
1069
+ }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1070
1070
  /**
1071
1071
  Generate an array with structured, typed elements for a given prompt and element schema using a language model.
1072
1072
 
@@ -1133,7 +1133,7 @@ Callback that is called when the LLM response and the final object validation ar
1133
1133
  currentDate?: () => Date;
1134
1134
  now?: () => number;
1135
1135
  };
1136
- }): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
1136
+ }): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
1137
1137
  /**
1138
1138
  Generate JSON with any schema for a given prompt using a language model.
1139
1139
 
@@ -1174,7 +1174,7 @@ Callback that is called when the LLM response and the final object validation ar
1174
1174
  currentDate?: () => Date;
1175
1175
  now?: () => number;
1176
1176
  };
1177
- }): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
1177
+ }): StreamObjectResult<JSONValue, JSONValue, never>;
1178
1178
 
1179
1179
  type Parameters = z.ZodTypeAny | Schema<any>;
1180
1180
  type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
@@ -1876,15 +1876,6 @@ The usage is the combined usage of all steps.
1876
1876
  Details for all steps.
1877
1877
  */
1878
1878
  readonly steps: StepResult<TOOLS>[];
1879
- /**
1880
- The response messages that were generated during the call. It consists of an assistant message,
1881
- potentially containing tool calls.
1882
-
1883
- When there are tool results, there is an additional tool message with the tool results that are available.
1884
- If there are tools that do not have execute functions, they are not included in the tool results and
1885
- need to be added separately.
1886
- */
1887
- readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
1888
1879
  }) => Promise<void> | void;
1889
1880
  /**
1890
1881
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
@@ -1898,7 +1889,7 @@ need to be added separately.
1898
1889
  generateId?: () => string;
1899
1890
  currentDate?: () => Date;
1900
1891
  };
1901
- }): Promise<StreamTextResult<TOOLS>>;
1892
+ }): StreamTextResult<TOOLS>;
1902
1893
 
1903
1894
  /**
1904
1895
  * Experimental middleware for LanguageModelV1.