ai 6.0.0-beta.99 → 6.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +823 -0
- package/README.md +17 -13
- package/dist/index.d.mts +924 -624
- package/dist/index.d.ts +924 -624
- package/dist/index.js +1394 -976
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1209 -785
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +103 -6
- package/dist/internal/index.d.ts +103 -6
- package/dist/internal/index.js +124 -107
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +97 -79
- package/dist/internal/index.mjs.map +1 -1
- package/dist/test/index.d.mts +19 -19
- package/dist/test/index.d.ts +19 -19
- package/dist/test/index.js +2 -2
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +2 -2
- package/dist/test/index.mjs.map +1 -1
- package/package.json +5 -5
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { ModelMessage, Tool } from '@ai-sdk/provider-utils';
|
|
1
|
+
import { SystemModelMessage, ModelMessage, Tool } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { convertAsyncIteratorToReadableStream } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { LanguageModelV3Prompt, LanguageModelV3FunctionTool,
|
|
3
|
+
import { LanguageModelV3Prompt, LanguageModelV3Usage, JSONObject, LanguageModelV3FunctionTool, LanguageModelV3ProviderTool, LanguageModelV3ToolChoice } from '@ai-sdk/provider';
|
|
4
4
|
|
|
5
5
|
/**
|
|
6
6
|
* Experimental. Can change in patch versions without warning.
|
|
@@ -35,7 +35,7 @@ type Prompt = {
|
|
|
35
35
|
/**
|
|
36
36
|
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
37
37
|
*/
|
|
38
|
-
system?: string
|
|
38
|
+
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
39
39
|
} & ({
|
|
40
40
|
/**
|
|
41
41
|
A prompt. It can be either a text prompt or a list of messages.
|
|
@@ -68,7 +68,7 @@ type StandardizedPrompt = {
|
|
|
68
68
|
/**
|
|
69
69
|
* System message.
|
|
70
70
|
*/
|
|
71
|
-
system?: string
|
|
71
|
+
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
72
72
|
/**
|
|
73
73
|
* Messages.
|
|
74
74
|
*/
|
|
@@ -153,6 +153,39 @@ type CallSettings = {
|
|
|
153
153
|
headers?: Record<string, string | undefined>;
|
|
154
154
|
};
|
|
155
155
|
|
|
156
|
+
declare global {
|
|
157
|
+
/**
|
|
158
|
+
* Global interface that can be augmented by third-party packages to register custom model IDs.
|
|
159
|
+
*
|
|
160
|
+
* You can register model IDs in two ways:
|
|
161
|
+
*
|
|
162
|
+
* 1. Register baesd on Model IDs from a provider package:
|
|
163
|
+
* @example
|
|
164
|
+
* ```typescript
|
|
165
|
+
* import { openai } from '@ai-sdk/openai';
|
|
166
|
+
* type OpenAIResponsesModelId = Parameters<typeof openai>[0];
|
|
167
|
+
*
|
|
168
|
+
* declare global {
|
|
169
|
+
* interface RegisteredProviderModels {
|
|
170
|
+
* openai: OpenAIResponsesModelId;
|
|
171
|
+
* }
|
|
172
|
+
* }
|
|
173
|
+
* ```
|
|
174
|
+
*
|
|
175
|
+
* 2. Register individual model IDs directly as keys:
|
|
176
|
+
* @example
|
|
177
|
+
* ```typescript
|
|
178
|
+
* declare global {
|
|
179
|
+
* interface RegisteredProviderModels {
|
|
180
|
+
* 'my-provider:my-model': any;
|
|
181
|
+
* 'my-provider:another-model': any;
|
|
182
|
+
* }
|
|
183
|
+
* }
|
|
184
|
+
* ```
|
|
185
|
+
*/
|
|
186
|
+
interface RegisteredProviderModels {
|
|
187
|
+
}
|
|
188
|
+
}
|
|
156
189
|
/**
|
|
157
190
|
Tool choice for the generation. It supports the following settings:
|
|
158
191
|
|
|
@@ -166,6 +199,70 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
|
|
|
166
199
|
toolName: Extract<keyof TOOLS, string>;
|
|
167
200
|
};
|
|
168
201
|
|
|
202
|
+
/**
|
|
203
|
+
* Represents the number of tokens used in a prompt and completion.
|
|
204
|
+
*/
|
|
205
|
+
type LanguageModelUsage = {
|
|
206
|
+
/**
|
|
207
|
+
* The total number of input (prompt) tokens used.
|
|
208
|
+
*/
|
|
209
|
+
inputTokens: number | undefined;
|
|
210
|
+
/**
|
|
211
|
+
* Detailed information about the input tokens.
|
|
212
|
+
*/
|
|
213
|
+
inputTokenDetails: {
|
|
214
|
+
/**
|
|
215
|
+
* The number of non-cached input (prompt) tokens used.
|
|
216
|
+
*/
|
|
217
|
+
noCacheTokens: number | undefined;
|
|
218
|
+
/**
|
|
219
|
+
* The number of cached input (prompt) tokens read.
|
|
220
|
+
*/
|
|
221
|
+
cacheReadTokens: number | undefined;
|
|
222
|
+
/**
|
|
223
|
+
* The number of cached input (prompt) tokens written.
|
|
224
|
+
*/
|
|
225
|
+
cacheWriteTokens: number | undefined;
|
|
226
|
+
};
|
|
227
|
+
/**
|
|
228
|
+
* The number of total output (completion) tokens used.
|
|
229
|
+
*/
|
|
230
|
+
outputTokens: number | undefined;
|
|
231
|
+
/**
|
|
232
|
+
* Detailed information about the output tokens.
|
|
233
|
+
*/
|
|
234
|
+
outputTokenDetails: {
|
|
235
|
+
/**
|
|
236
|
+
* The number of text tokens used.
|
|
237
|
+
*/
|
|
238
|
+
textTokens: number | undefined;
|
|
239
|
+
/**
|
|
240
|
+
* The number of reasoning tokens used.
|
|
241
|
+
*/
|
|
242
|
+
reasoningTokens: number | undefined;
|
|
243
|
+
};
|
|
244
|
+
/**
|
|
245
|
+
* The total number of tokens used.
|
|
246
|
+
*/
|
|
247
|
+
totalTokens: number | undefined;
|
|
248
|
+
/**
|
|
249
|
+
* @deprecated Use outputTokenDetails.reasoning instead.
|
|
250
|
+
*/
|
|
251
|
+
reasoningTokens?: number | undefined;
|
|
252
|
+
/**
|
|
253
|
+
* @deprecated Use inputTokenDetails.cacheRead instead.
|
|
254
|
+
*/
|
|
255
|
+
cachedInputTokens?: number | undefined;
|
|
256
|
+
/**
|
|
257
|
+
* Raw usage information from the provider.
|
|
258
|
+
*
|
|
259
|
+
* This is the usage information in the shape that the provider returns.
|
|
260
|
+
* It can include additional information that is not part of the standard usage information.
|
|
261
|
+
*/
|
|
262
|
+
raw?: JSONObject;
|
|
263
|
+
};
|
|
264
|
+
declare function asLanguageModelUsage(usage: LanguageModelV3Usage): LanguageModelUsage;
|
|
265
|
+
|
|
169
266
|
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta' | 'needsApproval'>>;
|
|
170
267
|
|
|
171
268
|
declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
|
|
@@ -173,7 +270,7 @@ declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolC
|
|
|
173
270
|
toolChoice: ToolChoice<TOOLS> | undefined;
|
|
174
271
|
activeTools: Array<keyof TOOLS> | undefined;
|
|
175
272
|
}): Promise<{
|
|
176
|
-
tools: Array<LanguageModelV3FunctionTool |
|
|
273
|
+
tools: Array<LanguageModelV3FunctionTool | LanguageModelV3ProviderTool> | undefined;
|
|
177
274
|
toolChoice: LanguageModelV3ToolChoice | undefined;
|
|
178
275
|
}>;
|
|
179
276
|
|
|
@@ -195,4 +292,4 @@ declare function prepareRetries({ maxRetries, abortSignal, }: {
|
|
|
195
292
|
retry: RetryFunction;
|
|
196
293
|
};
|
|
197
294
|
|
|
198
|
-
export { convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
|
|
295
|
+
export { asLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { ModelMessage, Tool } from '@ai-sdk/provider-utils';
|
|
1
|
+
import { SystemModelMessage, ModelMessage, Tool } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { convertAsyncIteratorToReadableStream } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { LanguageModelV3Prompt, LanguageModelV3FunctionTool,
|
|
3
|
+
import { LanguageModelV3Prompt, LanguageModelV3Usage, JSONObject, LanguageModelV3FunctionTool, LanguageModelV3ProviderTool, LanguageModelV3ToolChoice } from '@ai-sdk/provider';
|
|
4
4
|
|
|
5
5
|
/**
|
|
6
6
|
* Experimental. Can change in patch versions without warning.
|
|
@@ -35,7 +35,7 @@ type Prompt = {
|
|
|
35
35
|
/**
|
|
36
36
|
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
37
37
|
*/
|
|
38
|
-
system?: string
|
|
38
|
+
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
39
39
|
} & ({
|
|
40
40
|
/**
|
|
41
41
|
A prompt. It can be either a text prompt or a list of messages.
|
|
@@ -68,7 +68,7 @@ type StandardizedPrompt = {
|
|
|
68
68
|
/**
|
|
69
69
|
* System message.
|
|
70
70
|
*/
|
|
71
|
-
system?: string
|
|
71
|
+
system?: string | SystemModelMessage | Array<SystemModelMessage>;
|
|
72
72
|
/**
|
|
73
73
|
* Messages.
|
|
74
74
|
*/
|
|
@@ -153,6 +153,39 @@ type CallSettings = {
|
|
|
153
153
|
headers?: Record<string, string | undefined>;
|
|
154
154
|
};
|
|
155
155
|
|
|
156
|
+
declare global {
|
|
157
|
+
/**
|
|
158
|
+
* Global interface that can be augmented by third-party packages to register custom model IDs.
|
|
159
|
+
*
|
|
160
|
+
* You can register model IDs in two ways:
|
|
161
|
+
*
|
|
162
|
+
* 1. Register baesd on Model IDs from a provider package:
|
|
163
|
+
* @example
|
|
164
|
+
* ```typescript
|
|
165
|
+
* import { openai } from '@ai-sdk/openai';
|
|
166
|
+
* type OpenAIResponsesModelId = Parameters<typeof openai>[0];
|
|
167
|
+
*
|
|
168
|
+
* declare global {
|
|
169
|
+
* interface RegisteredProviderModels {
|
|
170
|
+
* openai: OpenAIResponsesModelId;
|
|
171
|
+
* }
|
|
172
|
+
* }
|
|
173
|
+
* ```
|
|
174
|
+
*
|
|
175
|
+
* 2. Register individual model IDs directly as keys:
|
|
176
|
+
* @example
|
|
177
|
+
* ```typescript
|
|
178
|
+
* declare global {
|
|
179
|
+
* interface RegisteredProviderModels {
|
|
180
|
+
* 'my-provider:my-model': any;
|
|
181
|
+
* 'my-provider:another-model': any;
|
|
182
|
+
* }
|
|
183
|
+
* }
|
|
184
|
+
* ```
|
|
185
|
+
*/
|
|
186
|
+
interface RegisteredProviderModels {
|
|
187
|
+
}
|
|
188
|
+
}
|
|
156
189
|
/**
|
|
157
190
|
Tool choice for the generation. It supports the following settings:
|
|
158
191
|
|
|
@@ -166,6 +199,70 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
|
|
|
166
199
|
toolName: Extract<keyof TOOLS, string>;
|
|
167
200
|
};
|
|
168
201
|
|
|
202
|
+
/**
|
|
203
|
+
* Represents the number of tokens used in a prompt and completion.
|
|
204
|
+
*/
|
|
205
|
+
type LanguageModelUsage = {
|
|
206
|
+
/**
|
|
207
|
+
* The total number of input (prompt) tokens used.
|
|
208
|
+
*/
|
|
209
|
+
inputTokens: number | undefined;
|
|
210
|
+
/**
|
|
211
|
+
* Detailed information about the input tokens.
|
|
212
|
+
*/
|
|
213
|
+
inputTokenDetails: {
|
|
214
|
+
/**
|
|
215
|
+
* The number of non-cached input (prompt) tokens used.
|
|
216
|
+
*/
|
|
217
|
+
noCacheTokens: number | undefined;
|
|
218
|
+
/**
|
|
219
|
+
* The number of cached input (prompt) tokens read.
|
|
220
|
+
*/
|
|
221
|
+
cacheReadTokens: number | undefined;
|
|
222
|
+
/**
|
|
223
|
+
* The number of cached input (prompt) tokens written.
|
|
224
|
+
*/
|
|
225
|
+
cacheWriteTokens: number | undefined;
|
|
226
|
+
};
|
|
227
|
+
/**
|
|
228
|
+
* The number of total output (completion) tokens used.
|
|
229
|
+
*/
|
|
230
|
+
outputTokens: number | undefined;
|
|
231
|
+
/**
|
|
232
|
+
* Detailed information about the output tokens.
|
|
233
|
+
*/
|
|
234
|
+
outputTokenDetails: {
|
|
235
|
+
/**
|
|
236
|
+
* The number of text tokens used.
|
|
237
|
+
*/
|
|
238
|
+
textTokens: number | undefined;
|
|
239
|
+
/**
|
|
240
|
+
* The number of reasoning tokens used.
|
|
241
|
+
*/
|
|
242
|
+
reasoningTokens: number | undefined;
|
|
243
|
+
};
|
|
244
|
+
/**
|
|
245
|
+
* The total number of tokens used.
|
|
246
|
+
*/
|
|
247
|
+
totalTokens: number | undefined;
|
|
248
|
+
/**
|
|
249
|
+
* @deprecated Use outputTokenDetails.reasoning instead.
|
|
250
|
+
*/
|
|
251
|
+
reasoningTokens?: number | undefined;
|
|
252
|
+
/**
|
|
253
|
+
* @deprecated Use inputTokenDetails.cacheRead instead.
|
|
254
|
+
*/
|
|
255
|
+
cachedInputTokens?: number | undefined;
|
|
256
|
+
/**
|
|
257
|
+
* Raw usage information from the provider.
|
|
258
|
+
*
|
|
259
|
+
* This is the usage information in the shape that the provider returns.
|
|
260
|
+
* It can include additional information that is not part of the standard usage information.
|
|
261
|
+
*/
|
|
262
|
+
raw?: JSONObject;
|
|
263
|
+
};
|
|
264
|
+
declare function asLanguageModelUsage(usage: LanguageModelV3Usage): LanguageModelUsage;
|
|
265
|
+
|
|
169
266
|
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta' | 'needsApproval'>>;
|
|
170
267
|
|
|
171
268
|
declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
|
|
@@ -173,7 +270,7 @@ declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolC
|
|
|
173
270
|
toolChoice: ToolChoice<TOOLS> | undefined;
|
|
174
271
|
activeTools: Array<keyof TOOLS> | undefined;
|
|
175
272
|
}): Promise<{
|
|
176
|
-
tools: Array<LanguageModelV3FunctionTool |
|
|
273
|
+
tools: Array<LanguageModelV3FunctionTool | LanguageModelV3ProviderTool> | undefined;
|
|
177
274
|
toolChoice: LanguageModelV3ToolChoice | undefined;
|
|
178
275
|
}>;
|
|
179
276
|
|
|
@@ -195,4 +292,4 @@ declare function prepareRetries({ maxRetries, abortSignal, }: {
|
|
|
195
292
|
retry: RetryFunction;
|
|
196
293
|
};
|
|
197
294
|
|
|
198
|
-
export { convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
|
|
295
|
+
export { asLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
|