ai 4.1.44 → 4.1.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +53 -49
- package/dist/index.d.ts +53 -49
- package/dist/index.js +3 -0
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3 -0
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
@@ -2,13 +2,61 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
3
|
import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
4
4
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
|
5
|
-
import {
|
5
|
+
import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONParseError, TypeValidationError, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
6
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
7
7
|
import { ServerResponse } from 'node:http';
|
8
8
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
9
9
|
import { z } from 'zod';
|
10
10
|
import { ServerResponse as ServerResponse$1 } from 'http';
|
11
11
|
|
12
|
+
/**
|
13
|
+
Language model that is used by the AI SDK Core functions.
|
14
|
+
*/
|
15
|
+
type LanguageModel = LanguageModelV1;
|
16
|
+
/**
|
17
|
+
Reason why a language model finished generating a response.
|
18
|
+
|
19
|
+
Can be one of the following:
|
20
|
+
- `stop`: model generated stop sequence
|
21
|
+
- `length`: model generated maximum number of tokens
|
22
|
+
- `content-filter`: content filter violation stopped the model
|
23
|
+
- `tool-calls`: model triggered tool calls
|
24
|
+
- `error`: model stopped because of an error
|
25
|
+
- `other`: model stopped for other reasons
|
26
|
+
*/
|
27
|
+
type FinishReason = LanguageModelV1FinishReason;
|
28
|
+
/**
|
29
|
+
Log probabilities for each token and its top log probabilities.
|
30
|
+
|
31
|
+
@deprecated Will become a provider extension in the future.
|
32
|
+
*/
|
33
|
+
type LogProbs = LanguageModelV1LogProbs;
|
34
|
+
/**
|
35
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
36
|
+
some settings might not be supported, which can lead to suboptimal results.
|
37
|
+
*/
|
38
|
+
type CallWarning = LanguageModelV1CallWarning;
|
39
|
+
/**
|
40
|
+
A source that has been used as input to generate the response.
|
41
|
+
*/
|
42
|
+
type Source = LanguageModelV1Source;
|
43
|
+
/**
|
44
|
+
Tool choice for the generation. It supports the following settings:
|
45
|
+
|
46
|
+
- `auto` (default): the model can choose whether and which tools to call.
|
47
|
+
- `required`: the model must call a tool. It can choose which tool to call.
|
48
|
+
- `none`: the model must not call tools
|
49
|
+
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
|
50
|
+
*/
|
51
|
+
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
52
|
+
type: 'tool';
|
53
|
+
toolName: keyof TOOLS;
|
54
|
+
};
|
55
|
+
/**
|
56
|
+
* @deprecated Use `ToolChoice` instead.
|
57
|
+
*/
|
58
|
+
type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
|
59
|
+
|
12
60
|
interface DataStreamWriter {
|
13
61
|
/**
|
14
62
|
* Appends a data part to the stream.
|
@@ -22,6 +70,10 @@ interface DataStreamWriter {
|
|
22
70
|
* Appends a message annotation to the stream.
|
23
71
|
*/
|
24
72
|
writeMessageAnnotation(value: JSONValue): void;
|
73
|
+
/**
|
74
|
+
* Appends a source part to the stream.
|
75
|
+
*/
|
76
|
+
writeSource(source: Source): void;
|
25
77
|
/**
|
26
78
|
* Merges the contents of another stream to this stream.
|
27
79
|
*/
|
@@ -119,54 +171,6 @@ type ImageModelResponseMetadata = {
|
|
119
171
|
headers?: Record<string, string>;
|
120
172
|
};
|
121
173
|
|
122
|
-
/**
|
123
|
-
Language model that is used by the AI SDK Core functions.
|
124
|
-
*/
|
125
|
-
type LanguageModel = LanguageModelV1;
|
126
|
-
/**
|
127
|
-
Reason why a language model finished generating a response.
|
128
|
-
|
129
|
-
Can be one of the following:
|
130
|
-
- `stop`: model generated stop sequence
|
131
|
-
- `length`: model generated maximum number of tokens
|
132
|
-
- `content-filter`: content filter violation stopped the model
|
133
|
-
- `tool-calls`: model triggered tool calls
|
134
|
-
- `error`: model stopped because of an error
|
135
|
-
- `other`: model stopped for other reasons
|
136
|
-
*/
|
137
|
-
type FinishReason = LanguageModelV1FinishReason;
|
138
|
-
/**
|
139
|
-
Log probabilities for each token and its top log probabilities.
|
140
|
-
|
141
|
-
@deprecated Will become a provider extension in the future.
|
142
|
-
*/
|
143
|
-
type LogProbs = LanguageModelV1LogProbs;
|
144
|
-
/**
|
145
|
-
Warning from the model provider for this call. The call will proceed, but e.g.
|
146
|
-
some settings might not be supported, which can lead to suboptimal results.
|
147
|
-
*/
|
148
|
-
type CallWarning = LanguageModelV1CallWarning;
|
149
|
-
/**
|
150
|
-
A source that has been used as input to generate the response.
|
151
|
-
*/
|
152
|
-
type Source = LanguageModelV1Source;
|
153
|
-
/**
|
154
|
-
Tool choice for the generation. It supports the following settings:
|
155
|
-
|
156
|
-
- `auto` (default): the model can choose whether and which tools to call.
|
157
|
-
- `required`: the model must call a tool. It can choose which tool to call.
|
158
|
-
- `none`: the model must not call tools
|
159
|
-
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
|
160
|
-
*/
|
161
|
-
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
162
|
-
type: 'tool';
|
163
|
-
toolName: keyof TOOLS;
|
164
|
-
};
|
165
|
-
/**
|
166
|
-
* @deprecated Use `ToolChoice` instead.
|
167
|
-
*/
|
168
|
-
type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
|
169
|
-
|
170
174
|
type LanguageModelRequestMetadata = {
|
171
175
|
/**
|
172
176
|
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
package/dist/index.d.ts
CHANGED
@@ -2,13 +2,61 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
3
|
import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
4
4
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
|
5
|
-
import {
|
5
|
+
import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONParseError, TypeValidationError, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
6
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
7
7
|
import { ServerResponse } from 'node:http';
|
8
8
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
9
9
|
import { z } from 'zod';
|
10
10
|
import { ServerResponse as ServerResponse$1 } from 'http';
|
11
11
|
|
12
|
+
/**
|
13
|
+
Language model that is used by the AI SDK Core functions.
|
14
|
+
*/
|
15
|
+
type LanguageModel = LanguageModelV1;
|
16
|
+
/**
|
17
|
+
Reason why a language model finished generating a response.
|
18
|
+
|
19
|
+
Can be one of the following:
|
20
|
+
- `stop`: model generated stop sequence
|
21
|
+
- `length`: model generated maximum number of tokens
|
22
|
+
- `content-filter`: content filter violation stopped the model
|
23
|
+
- `tool-calls`: model triggered tool calls
|
24
|
+
- `error`: model stopped because of an error
|
25
|
+
- `other`: model stopped for other reasons
|
26
|
+
*/
|
27
|
+
type FinishReason = LanguageModelV1FinishReason;
|
28
|
+
/**
|
29
|
+
Log probabilities for each token and its top log probabilities.
|
30
|
+
|
31
|
+
@deprecated Will become a provider extension in the future.
|
32
|
+
*/
|
33
|
+
type LogProbs = LanguageModelV1LogProbs;
|
34
|
+
/**
|
35
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
36
|
+
some settings might not be supported, which can lead to suboptimal results.
|
37
|
+
*/
|
38
|
+
type CallWarning = LanguageModelV1CallWarning;
|
39
|
+
/**
|
40
|
+
A source that has been used as input to generate the response.
|
41
|
+
*/
|
42
|
+
type Source = LanguageModelV1Source;
|
43
|
+
/**
|
44
|
+
Tool choice for the generation. It supports the following settings:
|
45
|
+
|
46
|
+
- `auto` (default): the model can choose whether and which tools to call.
|
47
|
+
- `required`: the model must call a tool. It can choose which tool to call.
|
48
|
+
- `none`: the model must not call tools
|
49
|
+
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
|
50
|
+
*/
|
51
|
+
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
52
|
+
type: 'tool';
|
53
|
+
toolName: keyof TOOLS;
|
54
|
+
};
|
55
|
+
/**
|
56
|
+
* @deprecated Use `ToolChoice` instead.
|
57
|
+
*/
|
58
|
+
type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
|
59
|
+
|
12
60
|
interface DataStreamWriter {
|
13
61
|
/**
|
14
62
|
* Appends a data part to the stream.
|
@@ -22,6 +70,10 @@ interface DataStreamWriter {
|
|
22
70
|
* Appends a message annotation to the stream.
|
23
71
|
*/
|
24
72
|
writeMessageAnnotation(value: JSONValue): void;
|
73
|
+
/**
|
74
|
+
* Appends a source part to the stream.
|
75
|
+
*/
|
76
|
+
writeSource(source: Source): void;
|
25
77
|
/**
|
26
78
|
* Merges the contents of another stream to this stream.
|
27
79
|
*/
|
@@ -119,54 +171,6 @@ type ImageModelResponseMetadata = {
|
|
119
171
|
headers?: Record<string, string>;
|
120
172
|
};
|
121
173
|
|
122
|
-
/**
|
123
|
-
Language model that is used by the AI SDK Core functions.
|
124
|
-
*/
|
125
|
-
type LanguageModel = LanguageModelV1;
|
126
|
-
/**
|
127
|
-
Reason why a language model finished generating a response.
|
128
|
-
|
129
|
-
Can be one of the following:
|
130
|
-
- `stop`: model generated stop sequence
|
131
|
-
- `length`: model generated maximum number of tokens
|
132
|
-
- `content-filter`: content filter violation stopped the model
|
133
|
-
- `tool-calls`: model triggered tool calls
|
134
|
-
- `error`: model stopped because of an error
|
135
|
-
- `other`: model stopped for other reasons
|
136
|
-
*/
|
137
|
-
type FinishReason = LanguageModelV1FinishReason;
|
138
|
-
/**
|
139
|
-
Log probabilities for each token and its top log probabilities.
|
140
|
-
|
141
|
-
@deprecated Will become a provider extension in the future.
|
142
|
-
*/
|
143
|
-
type LogProbs = LanguageModelV1LogProbs;
|
144
|
-
/**
|
145
|
-
Warning from the model provider for this call. The call will proceed, but e.g.
|
146
|
-
some settings might not be supported, which can lead to suboptimal results.
|
147
|
-
*/
|
148
|
-
type CallWarning = LanguageModelV1CallWarning;
|
149
|
-
/**
|
150
|
-
A source that has been used as input to generate the response.
|
151
|
-
*/
|
152
|
-
type Source = LanguageModelV1Source;
|
153
|
-
/**
|
154
|
-
Tool choice for the generation. It supports the following settings:
|
155
|
-
|
156
|
-
- `auto` (default): the model can choose whether and which tools to call.
|
157
|
-
- `required`: the model must call a tool. It can choose which tool to call.
|
158
|
-
- `none`: the model must not call tools
|
159
|
-
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
|
160
|
-
*/
|
161
|
-
type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
162
|
-
type: 'tool';
|
163
|
-
toolName: keyof TOOLS;
|
164
|
-
};
|
165
|
-
/**
|
166
|
-
* @deprecated Use `ToolChoice` instead.
|
167
|
-
*/
|
168
|
-
type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
|
169
|
-
|
170
174
|
type LanguageModelRequestMetadata = {
|
171
175
|
/**
|
172
176
|
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
package/dist/index.js
CHANGED
@@ -126,6 +126,9 @@ function createDataStream({
|
|
126
126
|
writeMessageAnnotation(annotation) {
|
127
127
|
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("message_annotations", [annotation]));
|
128
128
|
},
|
129
|
+
writeSource(source) {
|
130
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("source", source));
|
131
|
+
},
|
129
132
|
merge(streamArg) {
|
130
133
|
ongoingStreamPromises.push(
|
131
134
|
(async () => {
|