@ai-sdk/openai 3.0.13 → 3.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -1
- package/dist/internal/index.d.ts +1 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +5 -4
- package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
- package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
- package/src/chat/convert-openai-chat-usage.ts +57 -0
- package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
- package/src/chat/convert-to-openai-chat-messages.ts +225 -0
- package/src/chat/get-response-metadata.ts +15 -0
- package/src/chat/map-openai-finish-reason.ts +19 -0
- package/src/chat/openai-chat-api.ts +198 -0
- package/src/chat/openai-chat-language-model.test.ts +3496 -0
- package/src/chat/openai-chat-language-model.ts +700 -0
- package/src/chat/openai-chat-options.ts +186 -0
- package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
- package/src/chat/openai-chat-prepare-tools.ts +84 -0
- package/src/chat/openai-chat-prompt.ts +70 -0
- package/src/completion/convert-openai-completion-usage.ts +46 -0
- package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
- package/src/completion/get-response-metadata.ts +15 -0
- package/src/completion/map-openai-finish-reason.ts +19 -0
- package/src/completion/openai-completion-api.ts +81 -0
- package/src/completion/openai-completion-language-model.test.ts +752 -0
- package/src/completion/openai-completion-language-model.ts +336 -0
- package/src/completion/openai-completion-options.ts +58 -0
- package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
- package/src/embedding/openai-embedding-api.ts +13 -0
- package/src/embedding/openai-embedding-model.test.ts +146 -0
- package/src/embedding/openai-embedding-model.ts +95 -0
- package/src/embedding/openai-embedding-options.ts +30 -0
- package/src/image/openai-image-api.ts +35 -0
- package/src/image/openai-image-model.test.ts +722 -0
- package/src/image/openai-image-model.ts +305 -0
- package/src/image/openai-image-options.ts +28 -0
- package/src/index.ts +9 -0
- package/src/internal/index.ts +19 -0
- package/src/openai-config.ts +18 -0
- package/src/openai-error.test.ts +34 -0
- package/src/openai-error.ts +22 -0
- package/src/openai-language-model-capabilities.test.ts +93 -0
- package/src/openai-language-model-capabilities.ts +54 -0
- package/src/openai-provider.test.ts +98 -0
- package/src/openai-provider.ts +270 -0
- package/src/openai-tools.ts +114 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
- package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
- package/src/responses/__fixtures__/openai-error.1.json +8 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
- package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
- package/src/responses/convert-openai-responses-usage.ts +53 -0
- package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
- package/src/responses/convert-to-openai-responses-input.ts +578 -0
- package/src/responses/map-openai-responses-finish-reason.ts +22 -0
- package/src/responses/openai-responses-api.test.ts +89 -0
- package/src/responses/openai-responses-api.ts +1086 -0
- package/src/responses/openai-responses-language-model.test.ts +6927 -0
- package/src/responses/openai-responses-language-model.ts +1932 -0
- package/src/responses/openai-responses-options.ts +312 -0
- package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
- package/src/responses/openai-responses-prepare-tools.ts +264 -0
- package/src/responses/openai-responses-provider-metadata.ts +39 -0
- package/src/speech/openai-speech-api.ts +38 -0
- package/src/speech/openai-speech-model.test.ts +202 -0
- package/src/speech/openai-speech-model.ts +137 -0
- package/src/speech/openai-speech-options.ts +22 -0
- package/src/tool/apply-patch.ts +141 -0
- package/src/tool/code-interpreter.ts +104 -0
- package/src/tool/file-search.ts +145 -0
- package/src/tool/image-generation.ts +126 -0
- package/src/tool/local-shell.test-d.ts +20 -0
- package/src/tool/local-shell.ts +72 -0
- package/src/tool/mcp.ts +125 -0
- package/src/tool/shell.ts +85 -0
- package/src/tool/web-search-preview.ts +139 -0
- package/src/tool/web-search.test-d.ts +13 -0
- package/src/tool/web-search.ts +179 -0
- package/src/transcription/openai-transcription-api.ts +37 -0
- package/src/transcription/openai-transcription-model.test.ts +507 -0
- package/src/transcription/openai-transcription-model.ts +232 -0
- package/src/transcription/openai-transcription-options.ts +50 -0
- package/src/transcription/transcription-test.mp3 +0 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
// https://platform.openai.com/docs/models
|
|
5
|
+
export type OpenAIChatModelId =
|
|
6
|
+
| 'o1'
|
|
7
|
+
| 'o1-2024-12-17'
|
|
8
|
+
| 'o3-mini'
|
|
9
|
+
| 'o3-mini-2025-01-31'
|
|
10
|
+
| 'o3'
|
|
11
|
+
| 'o3-2025-04-16'
|
|
12
|
+
| 'o4-mini'
|
|
13
|
+
| 'o4-mini-2025-04-16'
|
|
14
|
+
| 'gpt-4.1'
|
|
15
|
+
| 'gpt-4.1-2025-04-14'
|
|
16
|
+
| 'gpt-4.1-mini'
|
|
17
|
+
| 'gpt-4.1-mini-2025-04-14'
|
|
18
|
+
| 'gpt-4.1-nano'
|
|
19
|
+
| 'gpt-4.1-nano-2025-04-14'
|
|
20
|
+
| 'gpt-4o'
|
|
21
|
+
| 'gpt-4o-2024-05-13'
|
|
22
|
+
| 'gpt-4o-2024-08-06'
|
|
23
|
+
| 'gpt-4o-2024-11-20'
|
|
24
|
+
| 'gpt-4o-mini'
|
|
25
|
+
| 'gpt-4o-mini-2024-07-18'
|
|
26
|
+
| 'gpt-4-turbo'
|
|
27
|
+
| 'gpt-4-turbo-2024-04-09'
|
|
28
|
+
| 'gpt-4'
|
|
29
|
+
| 'gpt-4-0613'
|
|
30
|
+
| 'gpt-4.5-preview'
|
|
31
|
+
| 'gpt-4.5-preview-2025-02-27'
|
|
32
|
+
| 'gpt-3.5-turbo-0125'
|
|
33
|
+
| 'gpt-3.5-turbo'
|
|
34
|
+
| 'gpt-3.5-turbo-1106'
|
|
35
|
+
| 'chatgpt-4o-latest'
|
|
36
|
+
| 'gpt-5'
|
|
37
|
+
| 'gpt-5-2025-08-07'
|
|
38
|
+
| 'gpt-5-mini'
|
|
39
|
+
| 'gpt-5-mini-2025-08-07'
|
|
40
|
+
| 'gpt-5-nano'
|
|
41
|
+
| 'gpt-5-nano-2025-08-07'
|
|
42
|
+
| 'gpt-5-chat-latest'
|
|
43
|
+
| 'gpt-5.1'
|
|
44
|
+
| 'gpt-5.1-chat-latest'
|
|
45
|
+
| 'gpt-5.2'
|
|
46
|
+
| 'gpt-5.2-chat-latest'
|
|
47
|
+
| 'gpt-5.2-pro'
|
|
48
|
+
| (string & {});
|
|
49
|
+
|
|
50
|
+
export const openaiChatLanguageModelOptions = lazySchema(() =>
|
|
51
|
+
zodSchema(
|
|
52
|
+
z.object({
|
|
53
|
+
/**
|
|
54
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
55
|
+
*
|
|
56
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
57
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
58
|
+
*/
|
|
59
|
+
logitBias: z.record(z.coerce.number<string>(), z.number()).optional(),
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Return the log probabilities of the tokens.
|
|
63
|
+
*
|
|
64
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
65
|
+
* were generated.
|
|
66
|
+
*
|
|
67
|
+
* Setting to a number will return the log probabilities of the top n
|
|
68
|
+
* tokens that were generated.
|
|
69
|
+
*/
|
|
70
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
74
|
+
*/
|
|
75
|
+
parallelToolCalls: z.boolean().optional(),
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
79
|
+
* monitor and detect abuse.
|
|
80
|
+
*/
|
|
81
|
+
user: z.string().optional(),
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
85
|
+
*/
|
|
86
|
+
reasoningEffort: z
|
|
87
|
+
.enum(['none', 'minimal', 'low', 'medium', 'high', 'xhigh'])
|
|
88
|
+
.optional(),
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
92
|
+
*/
|
|
93
|
+
maxCompletionTokens: z.number().optional(),
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Whether to enable persistence in responses API.
|
|
97
|
+
*/
|
|
98
|
+
store: z.boolean().optional(),
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Metadata to associate with the request.
|
|
102
|
+
*/
|
|
103
|
+
metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Parameters for prediction mode.
|
|
107
|
+
*/
|
|
108
|
+
prediction: z.record(z.string(), z.any()).optional(),
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Service tier for the request.
|
|
112
|
+
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
113
|
+
* Project settings. Unless otherwise configured, the Project will use 'default'.
|
|
114
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
115
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
116
|
+
* - 'default': The request will be processed with the standard pricing and performance for the selected model.
|
|
117
|
+
*
|
|
118
|
+
* @default 'auto'
|
|
119
|
+
*/
|
|
120
|
+
serviceTier: z.enum(['auto', 'flex', 'priority', 'default']).optional(),
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Whether to use strict JSON schema validation.
|
|
124
|
+
*
|
|
125
|
+
* @default true
|
|
126
|
+
*/
|
|
127
|
+
strictJsonSchema: z.boolean().optional(),
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Controls the verbosity of the model's responses.
|
|
131
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
132
|
+
*/
|
|
133
|
+
textVerbosity: z.enum(['low', 'medium', 'high']).optional(),
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
137
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
138
|
+
*/
|
|
139
|
+
promptCacheKey: z.string().optional(),
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* The retention policy for the prompt cache.
|
|
143
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
144
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
145
|
+
* Currently only available for 5.1 series models.
|
|
146
|
+
*
|
|
147
|
+
* @default 'in_memory'
|
|
148
|
+
*/
|
|
149
|
+
promptCacheRetention: z.enum(['in_memory', '24h']).optional(),
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* A stable identifier used to help detect users of your application
|
|
153
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
154
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
155
|
+
* username or email address, in order to avoid sending us any identifying
|
|
156
|
+
* information.
|
|
157
|
+
*/
|
|
158
|
+
safetyIdentifier: z.string().optional(),
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Override the system message mode for this model.
|
|
162
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
163
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
164
|
+
* - 'remove': Remove system messages entirely
|
|
165
|
+
*
|
|
166
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
167
|
+
*/
|
|
168
|
+
systemMessageMode: z.enum(['system', 'developer', 'remove']).optional(),
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Force treating this model as a reasoning model.
|
|
172
|
+
*
|
|
173
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
174
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
175
|
+
*
|
|
176
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
177
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
178
|
+
*/
|
|
179
|
+
forceReasoning: z.boolean().optional(),
|
|
180
|
+
}),
|
|
181
|
+
),
|
|
182
|
+
);
|
|
183
|
+
|
|
184
|
+
export type OpenAIChatLanguageModelOptions = InferSchema<
|
|
185
|
+
typeof openaiChatLanguageModelOptions
|
|
186
|
+
>;
|
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
|
2
|
+
import { prepareChatTools } from './openai-chat-prepare-tools';
|
|
3
|
+
|
|
4
|
+
describe('prepareChatTools', () => {
|
|
5
|
+
it('should return undefined tools and toolChoice when tools are null', () => {
|
|
6
|
+
const result = prepareChatTools({
|
|
7
|
+
tools: undefined,
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
expect(result).toEqual({
|
|
11
|
+
tools: undefined,
|
|
12
|
+
toolChoice: undefined,
|
|
13
|
+
toolWarnings: [],
|
|
14
|
+
});
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
it('should return undefined tools and toolChoice when tools are empty', () => {
|
|
18
|
+
const result = prepareChatTools({
|
|
19
|
+
tools: [],
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
expect(result).toEqual({
|
|
23
|
+
tools: undefined,
|
|
24
|
+
toolChoice: undefined,
|
|
25
|
+
toolWarnings: [],
|
|
26
|
+
});
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
it('should correctly prepare function tools', () => {
|
|
30
|
+
const result = prepareChatTools({
|
|
31
|
+
tools: [
|
|
32
|
+
{
|
|
33
|
+
type: 'function',
|
|
34
|
+
name: 'testFunction',
|
|
35
|
+
description: 'A test function',
|
|
36
|
+
inputSchema: { type: 'object', properties: {} },
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
expect(result).toMatchInlineSnapshot(`
|
|
42
|
+
{
|
|
43
|
+
"toolChoice": undefined,
|
|
44
|
+
"toolWarnings": [],
|
|
45
|
+
"tools": [
|
|
46
|
+
{
|
|
47
|
+
"function": {
|
|
48
|
+
"description": "A test function",
|
|
49
|
+
"name": "testFunction",
|
|
50
|
+
"parameters": {
|
|
51
|
+
"properties": {},
|
|
52
|
+
"type": "object",
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
"type": "function",
|
|
56
|
+
},
|
|
57
|
+
],
|
|
58
|
+
}
|
|
59
|
+
`);
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
it('should add warnings for unsupported tools', () => {
|
|
63
|
+
const result = prepareChatTools({
|
|
64
|
+
tools: [
|
|
65
|
+
{
|
|
66
|
+
type: 'provider',
|
|
67
|
+
id: 'openai.unsupported_tool',
|
|
68
|
+
name: 'unsupported_tool',
|
|
69
|
+
args: {},
|
|
70
|
+
},
|
|
71
|
+
],
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
expect(result.tools).toEqual([]);
|
|
75
|
+
expect(result.toolChoice).toBeUndefined();
|
|
76
|
+
expect(result.toolWarnings).toMatchInlineSnapshot(`
|
|
77
|
+
[
|
|
78
|
+
{
|
|
79
|
+
"feature": "tool type: provider",
|
|
80
|
+
"type": "unsupported",
|
|
81
|
+
},
|
|
82
|
+
]
|
|
83
|
+
`);
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
it('should handle tool choice "auto"', () => {
|
|
87
|
+
const result = prepareChatTools({
|
|
88
|
+
tools: [
|
|
89
|
+
{
|
|
90
|
+
type: 'function',
|
|
91
|
+
name: 'testFunction',
|
|
92
|
+
description: 'Test',
|
|
93
|
+
inputSchema: {},
|
|
94
|
+
},
|
|
95
|
+
],
|
|
96
|
+
toolChoice: { type: 'auto' },
|
|
97
|
+
});
|
|
98
|
+
expect(result.toolChoice).toEqual('auto');
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
it('should handle tool choice "required"', () => {
|
|
102
|
+
const result = prepareChatTools({
|
|
103
|
+
tools: [
|
|
104
|
+
{
|
|
105
|
+
type: 'function',
|
|
106
|
+
name: 'testFunction',
|
|
107
|
+
description: 'Test',
|
|
108
|
+
inputSchema: {},
|
|
109
|
+
},
|
|
110
|
+
],
|
|
111
|
+
toolChoice: { type: 'required' },
|
|
112
|
+
});
|
|
113
|
+
expect(result.toolChoice).toEqual('required');
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
it('should handle tool choice "none"', () => {
|
|
117
|
+
const result = prepareChatTools({
|
|
118
|
+
tools: [
|
|
119
|
+
{
|
|
120
|
+
type: 'function',
|
|
121
|
+
name: 'testFunction',
|
|
122
|
+
description: 'Test',
|
|
123
|
+
inputSchema: {},
|
|
124
|
+
},
|
|
125
|
+
],
|
|
126
|
+
toolChoice: { type: 'none' },
|
|
127
|
+
});
|
|
128
|
+
expect(result.toolChoice).toEqual('none');
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
it('should handle tool choice "tool"', () => {
|
|
132
|
+
const result = prepareChatTools({
|
|
133
|
+
tools: [
|
|
134
|
+
{
|
|
135
|
+
type: 'function',
|
|
136
|
+
name: 'testFunction',
|
|
137
|
+
description: 'Test',
|
|
138
|
+
inputSchema: {},
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
toolChoice: { type: 'tool', toolName: 'testFunction' },
|
|
142
|
+
});
|
|
143
|
+
expect(result.toolChoice).toEqual({
|
|
144
|
+
type: 'function',
|
|
145
|
+
function: { name: 'testFunction' },
|
|
146
|
+
});
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
it('should pass through strict mode when strict is true', () => {
|
|
150
|
+
const result = prepareChatTools({
|
|
151
|
+
tools: [
|
|
152
|
+
{
|
|
153
|
+
type: 'function',
|
|
154
|
+
name: 'testFunction',
|
|
155
|
+
description: 'A test function',
|
|
156
|
+
inputSchema: { type: 'object', properties: {} },
|
|
157
|
+
strict: true,
|
|
158
|
+
},
|
|
159
|
+
],
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
expect(result).toMatchInlineSnapshot(`
|
|
163
|
+
{
|
|
164
|
+
"toolChoice": undefined,
|
|
165
|
+
"toolWarnings": [],
|
|
166
|
+
"tools": [
|
|
167
|
+
{
|
|
168
|
+
"function": {
|
|
169
|
+
"description": "A test function",
|
|
170
|
+
"name": "testFunction",
|
|
171
|
+
"parameters": {
|
|
172
|
+
"properties": {},
|
|
173
|
+
"type": "object",
|
|
174
|
+
},
|
|
175
|
+
"strict": true,
|
|
176
|
+
},
|
|
177
|
+
"type": "function",
|
|
178
|
+
},
|
|
179
|
+
],
|
|
180
|
+
}
|
|
181
|
+
`);
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
it('should pass through strict mode when strict is false', () => {
|
|
185
|
+
const result = prepareChatTools({
|
|
186
|
+
tools: [
|
|
187
|
+
{
|
|
188
|
+
type: 'function',
|
|
189
|
+
name: 'testFunction',
|
|
190
|
+
description: 'A test function',
|
|
191
|
+
inputSchema: { type: 'object', properties: {} },
|
|
192
|
+
strict: false,
|
|
193
|
+
},
|
|
194
|
+
],
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
expect(result).toMatchInlineSnapshot(`
|
|
198
|
+
{
|
|
199
|
+
"toolChoice": undefined,
|
|
200
|
+
"toolWarnings": [],
|
|
201
|
+
"tools": [
|
|
202
|
+
{
|
|
203
|
+
"function": {
|
|
204
|
+
"description": "A test function",
|
|
205
|
+
"name": "testFunction",
|
|
206
|
+
"parameters": {
|
|
207
|
+
"properties": {},
|
|
208
|
+
"type": "object",
|
|
209
|
+
},
|
|
210
|
+
"strict": false,
|
|
211
|
+
},
|
|
212
|
+
"type": "function",
|
|
213
|
+
},
|
|
214
|
+
],
|
|
215
|
+
}
|
|
216
|
+
`);
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
it('should not include strict mode when strict is undefined', () => {
|
|
220
|
+
const result = prepareChatTools({
|
|
221
|
+
tools: [
|
|
222
|
+
{
|
|
223
|
+
type: 'function',
|
|
224
|
+
name: 'testFunction',
|
|
225
|
+
description: 'A test function',
|
|
226
|
+
inputSchema: { type: 'object', properties: {} },
|
|
227
|
+
},
|
|
228
|
+
],
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
expect(result).toMatchInlineSnapshot(`
|
|
232
|
+
{
|
|
233
|
+
"toolChoice": undefined,
|
|
234
|
+
"toolWarnings": [],
|
|
235
|
+
"tools": [
|
|
236
|
+
{
|
|
237
|
+
"function": {
|
|
238
|
+
"description": "A test function",
|
|
239
|
+
"name": "testFunction",
|
|
240
|
+
"parameters": {
|
|
241
|
+
"properties": {},
|
|
242
|
+
"type": "object",
|
|
243
|
+
},
|
|
244
|
+
},
|
|
245
|
+
"type": "function",
|
|
246
|
+
},
|
|
247
|
+
],
|
|
248
|
+
}
|
|
249
|
+
`);
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
it('should pass through strict mode for multiple tools with different strict settings', () => {
|
|
253
|
+
const result = prepareChatTools({
|
|
254
|
+
tools: [
|
|
255
|
+
{
|
|
256
|
+
type: 'function',
|
|
257
|
+
name: 'strictTool',
|
|
258
|
+
description: 'A strict tool',
|
|
259
|
+
inputSchema: { type: 'object', properties: {} },
|
|
260
|
+
strict: true,
|
|
261
|
+
},
|
|
262
|
+
{
|
|
263
|
+
type: 'function',
|
|
264
|
+
name: 'nonStrictTool',
|
|
265
|
+
description: 'A non-strict tool',
|
|
266
|
+
inputSchema: { type: 'object', properties: {} },
|
|
267
|
+
strict: false,
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
type: 'function',
|
|
271
|
+
name: 'defaultTool',
|
|
272
|
+
description: 'A tool without strict setting',
|
|
273
|
+
inputSchema: { type: 'object', properties: {} },
|
|
274
|
+
},
|
|
275
|
+
],
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
expect(result).toMatchInlineSnapshot(`
|
|
279
|
+
{
|
|
280
|
+
"toolChoice": undefined,
|
|
281
|
+
"toolWarnings": [],
|
|
282
|
+
"tools": [
|
|
283
|
+
{
|
|
284
|
+
"function": {
|
|
285
|
+
"description": "A strict tool",
|
|
286
|
+
"name": "strictTool",
|
|
287
|
+
"parameters": {
|
|
288
|
+
"properties": {},
|
|
289
|
+
"type": "object",
|
|
290
|
+
},
|
|
291
|
+
"strict": true,
|
|
292
|
+
},
|
|
293
|
+
"type": "function",
|
|
294
|
+
},
|
|
295
|
+
{
|
|
296
|
+
"function": {
|
|
297
|
+
"description": "A non-strict tool",
|
|
298
|
+
"name": "nonStrictTool",
|
|
299
|
+
"parameters": {
|
|
300
|
+
"properties": {},
|
|
301
|
+
"type": "object",
|
|
302
|
+
},
|
|
303
|
+
"strict": false,
|
|
304
|
+
},
|
|
305
|
+
"type": "function",
|
|
306
|
+
},
|
|
307
|
+
{
|
|
308
|
+
"function": {
|
|
309
|
+
"description": "A tool without strict setting",
|
|
310
|
+
"name": "defaultTool",
|
|
311
|
+
"parameters": {
|
|
312
|
+
"properties": {},
|
|
313
|
+
"type": "object",
|
|
314
|
+
},
|
|
315
|
+
},
|
|
316
|
+
"type": "function",
|
|
317
|
+
},
|
|
318
|
+
],
|
|
319
|
+
}
|
|
320
|
+
`);
|
|
321
|
+
});
|
|
322
|
+
});
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import {
|
|
2
|
+
LanguageModelV3CallOptions,
|
|
3
|
+
SharedV3Warning,
|
|
4
|
+
UnsupportedFunctionalityError,
|
|
5
|
+
} from '@ai-sdk/provider';
|
|
6
|
+
import {
|
|
7
|
+
OpenAIChatToolChoice,
|
|
8
|
+
OpenAIChatFunctionTool,
|
|
9
|
+
} from './openai-chat-api';
|
|
10
|
+
|
|
11
|
+
export function prepareChatTools({
|
|
12
|
+
tools,
|
|
13
|
+
toolChoice,
|
|
14
|
+
}: {
|
|
15
|
+
tools: LanguageModelV3CallOptions['tools'];
|
|
16
|
+
toolChoice?: LanguageModelV3CallOptions['toolChoice'];
|
|
17
|
+
}): {
|
|
18
|
+
tools?: OpenAIChatFunctionTool[];
|
|
19
|
+
toolChoice?: OpenAIChatToolChoice;
|
|
20
|
+
toolWarnings: Array<SharedV3Warning>;
|
|
21
|
+
} {
|
|
22
|
+
// when the tools array is empty, change it to undefined to prevent errors:
|
|
23
|
+
tools = tools?.length ? tools : undefined;
|
|
24
|
+
|
|
25
|
+
const toolWarnings: SharedV3Warning[] = [];
|
|
26
|
+
|
|
27
|
+
if (tools == null) {
|
|
28
|
+
return { tools: undefined, toolChoice: undefined, toolWarnings };
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const openaiTools: OpenAIChatFunctionTool[] = [];
|
|
32
|
+
|
|
33
|
+
for (const tool of tools) {
|
|
34
|
+
switch (tool.type) {
|
|
35
|
+
case 'function':
|
|
36
|
+
openaiTools.push({
|
|
37
|
+
type: 'function',
|
|
38
|
+
function: {
|
|
39
|
+
name: tool.name,
|
|
40
|
+
description: tool.description,
|
|
41
|
+
parameters: tool.inputSchema,
|
|
42
|
+
...(tool.strict != null ? { strict: tool.strict } : {}),
|
|
43
|
+
},
|
|
44
|
+
});
|
|
45
|
+
break;
|
|
46
|
+
default:
|
|
47
|
+
toolWarnings.push({
|
|
48
|
+
type: 'unsupported',
|
|
49
|
+
feature: `tool type: ${tool.type}`,
|
|
50
|
+
});
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if (toolChoice == null) {
|
|
56
|
+
return { tools: openaiTools, toolChoice: undefined, toolWarnings };
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const type = toolChoice.type;
|
|
60
|
+
|
|
61
|
+
switch (type) {
|
|
62
|
+
case 'auto':
|
|
63
|
+
case 'none':
|
|
64
|
+
case 'required':
|
|
65
|
+
return { tools: openaiTools, toolChoice: type, toolWarnings };
|
|
66
|
+
case 'tool':
|
|
67
|
+
return {
|
|
68
|
+
tools: openaiTools,
|
|
69
|
+
toolChoice: {
|
|
70
|
+
type: 'function',
|
|
71
|
+
function: {
|
|
72
|
+
name: toolChoice.toolName,
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
toolWarnings,
|
|
76
|
+
};
|
|
77
|
+
default: {
|
|
78
|
+
const _exhaustiveCheck: never = type;
|
|
79
|
+
throw new UnsupportedFunctionalityError({
|
|
80
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
export type OpenAIChatPrompt = Array<ChatCompletionMessage>;
|
|
2
|
+
|
|
3
|
+
export type ChatCompletionMessage =
|
|
4
|
+
| ChatCompletionSystemMessage
|
|
5
|
+
| ChatCompletionDeveloperMessage
|
|
6
|
+
| ChatCompletionUserMessage
|
|
7
|
+
| ChatCompletionAssistantMessage
|
|
8
|
+
| ChatCompletionToolMessage;
|
|
9
|
+
|
|
10
|
+
export interface ChatCompletionSystemMessage {
|
|
11
|
+
role: 'system';
|
|
12
|
+
content: string;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export interface ChatCompletionDeveloperMessage {
|
|
16
|
+
role: 'developer';
|
|
17
|
+
content: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface ChatCompletionUserMessage {
|
|
21
|
+
role: 'user';
|
|
22
|
+
content: string | Array<ChatCompletionContentPart>;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export type ChatCompletionContentPart =
|
|
26
|
+
| ChatCompletionContentPartText
|
|
27
|
+
| ChatCompletionContentPartImage
|
|
28
|
+
| ChatCompletionContentPartInputAudio
|
|
29
|
+
| ChatCompletionContentPartFile;
|
|
30
|
+
|
|
31
|
+
export interface ChatCompletionContentPartText {
|
|
32
|
+
type: 'text';
|
|
33
|
+
text: string;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export interface ChatCompletionContentPartImage {
|
|
37
|
+
type: 'image_url';
|
|
38
|
+
image_url: { url: string };
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export interface ChatCompletionContentPartInputAudio {
|
|
42
|
+
type: 'input_audio';
|
|
43
|
+
input_audio: { data: string; format: 'wav' | 'mp3' };
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export interface ChatCompletionContentPartFile {
|
|
47
|
+
type: 'file';
|
|
48
|
+
file: { filename: string; file_data: string } | { file_id: string };
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export interface ChatCompletionAssistantMessage {
|
|
52
|
+
role: 'assistant';
|
|
53
|
+
content?: string | null;
|
|
54
|
+
tool_calls?: Array<ChatCompletionMessageToolCall>;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
export interface ChatCompletionMessageToolCall {
|
|
58
|
+
type: 'function';
|
|
59
|
+
id: string;
|
|
60
|
+
function: {
|
|
61
|
+
arguments: string;
|
|
62
|
+
name: string;
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export interface ChatCompletionToolMessage {
|
|
67
|
+
role: 'tool';
|
|
68
|
+
content: string;
|
|
69
|
+
tool_call_id: string;
|
|
70
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { LanguageModelV3Usage } from '@ai-sdk/provider';
|
|
2
|
+
|
|
3
|
+
export type OpenAICompletionUsage = {
|
|
4
|
+
prompt_tokens?: number | null;
|
|
5
|
+
completion_tokens?: number | null;
|
|
6
|
+
total_tokens?: number | null;
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
export function convertOpenAICompletionUsage(
|
|
10
|
+
usage: OpenAICompletionUsage | undefined | null,
|
|
11
|
+
): LanguageModelV3Usage {
|
|
12
|
+
if (usage == null) {
|
|
13
|
+
return {
|
|
14
|
+
inputTokens: {
|
|
15
|
+
total: undefined,
|
|
16
|
+
noCache: undefined,
|
|
17
|
+
cacheRead: undefined,
|
|
18
|
+
cacheWrite: undefined,
|
|
19
|
+
},
|
|
20
|
+
outputTokens: {
|
|
21
|
+
total: undefined,
|
|
22
|
+
text: undefined,
|
|
23
|
+
reasoning: undefined,
|
|
24
|
+
},
|
|
25
|
+
raw: undefined,
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const promptTokens = usage.prompt_tokens ?? 0;
|
|
30
|
+
const completionTokens = usage.completion_tokens ?? 0;
|
|
31
|
+
|
|
32
|
+
return {
|
|
33
|
+
inputTokens: {
|
|
34
|
+
total: usage.prompt_tokens ?? undefined,
|
|
35
|
+
noCache: promptTokens,
|
|
36
|
+
cacheRead: undefined,
|
|
37
|
+
cacheWrite: undefined,
|
|
38
|
+
},
|
|
39
|
+
outputTokens: {
|
|
40
|
+
total: usage.completion_tokens ?? undefined,
|
|
41
|
+
text: completionTokens,
|
|
42
|
+
reasoning: undefined,
|
|
43
|
+
},
|
|
44
|
+
raw: usage,
|
|
45
|
+
};
|
|
46
|
+
}
|