@ai-sdk/openai 3.0.14 → 3.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/package.json +6 -5
- package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
- package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
- package/src/chat/convert-openai-chat-usage.ts +57 -0
- package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
- package/src/chat/convert-to-openai-chat-messages.ts +225 -0
- package/src/chat/get-response-metadata.ts +15 -0
- package/src/chat/map-openai-finish-reason.ts +19 -0
- package/src/chat/openai-chat-api.ts +198 -0
- package/src/chat/openai-chat-language-model.test.ts +3496 -0
- package/src/chat/openai-chat-language-model.ts +700 -0
- package/src/chat/openai-chat-options.ts +186 -0
- package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
- package/src/chat/openai-chat-prepare-tools.ts +84 -0
- package/src/chat/openai-chat-prompt.ts +70 -0
- package/src/completion/convert-openai-completion-usage.ts +46 -0
- package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
- package/src/completion/get-response-metadata.ts +15 -0
- package/src/completion/map-openai-finish-reason.ts +19 -0
- package/src/completion/openai-completion-api.ts +81 -0
- package/src/completion/openai-completion-language-model.test.ts +752 -0
- package/src/completion/openai-completion-language-model.ts +336 -0
- package/src/completion/openai-completion-options.ts +58 -0
- package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
- package/src/embedding/openai-embedding-api.ts +13 -0
- package/src/embedding/openai-embedding-model.test.ts +146 -0
- package/src/embedding/openai-embedding-model.ts +95 -0
- package/src/embedding/openai-embedding-options.ts +30 -0
- package/src/image/openai-image-api.ts +35 -0
- package/src/image/openai-image-model.test.ts +722 -0
- package/src/image/openai-image-model.ts +305 -0
- package/src/image/openai-image-options.ts +28 -0
- package/src/index.ts +9 -0
- package/src/internal/index.ts +19 -0
- package/src/openai-config.ts +18 -0
- package/src/openai-error.test.ts +34 -0
- package/src/openai-error.ts +22 -0
- package/src/openai-language-model-capabilities.test.ts +93 -0
- package/src/openai-language-model-capabilities.ts +54 -0
- package/src/openai-provider.test.ts +98 -0
- package/src/openai-provider.ts +270 -0
- package/src/openai-tools.ts +114 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
- package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
- package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
- package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
- package/src/responses/__fixtures__/openai-error.1.json +8 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
- package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
- package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
- package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
- package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
- package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
- package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
- package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
- package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
- package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
- package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
- package/src/responses/convert-openai-responses-usage.ts +53 -0
- package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
- package/src/responses/convert-to-openai-responses-input.ts +578 -0
- package/src/responses/map-openai-responses-finish-reason.ts +22 -0
- package/src/responses/openai-responses-api.test.ts +89 -0
- package/src/responses/openai-responses-api.ts +1086 -0
- package/src/responses/openai-responses-language-model.test.ts +6927 -0
- package/src/responses/openai-responses-language-model.ts +1932 -0
- package/src/responses/openai-responses-options.ts +312 -0
- package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
- package/src/responses/openai-responses-prepare-tools.ts +264 -0
- package/src/responses/openai-responses-provider-metadata.ts +39 -0
- package/src/speech/openai-speech-api.ts +38 -0
- package/src/speech/openai-speech-model.test.ts +202 -0
- package/src/speech/openai-speech-model.ts +137 -0
- package/src/speech/openai-speech-options.ts +22 -0
- package/src/tool/apply-patch.ts +141 -0
- package/src/tool/code-interpreter.ts +104 -0
- package/src/tool/file-search.ts +145 -0
- package/src/tool/image-generation.ts +126 -0
- package/src/tool/local-shell.test-d.ts +20 -0
- package/src/tool/local-shell.ts +72 -0
- package/src/tool/mcp.ts +125 -0
- package/src/tool/shell.ts +85 -0
- package/src/tool/web-search-preview.ts +139 -0
- package/src/tool/web-search.test-d.ts +13 -0
- package/src/tool/web-search.ts +179 -0
- package/src/transcription/openai-transcription-api.ts +37 -0
- package/src/transcription/openai-transcription-model.test.ts +507 -0
- package/src/transcription/openai-transcription-model.ts +232 -0
- package/src/transcription/openai-transcription-options.ts +50 -0
- package/src/transcription/transcription-test.mp3 +0 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import {
|
|
2
|
+
InvalidPromptError,
|
|
3
|
+
LanguageModelV3Prompt,
|
|
4
|
+
UnsupportedFunctionalityError,
|
|
5
|
+
} from '@ai-sdk/provider';
|
|
6
|
+
|
|
7
|
+
export function convertToOpenAICompletionPrompt({
|
|
8
|
+
prompt,
|
|
9
|
+
user = 'user',
|
|
10
|
+
assistant = 'assistant',
|
|
11
|
+
}: {
|
|
12
|
+
prompt: LanguageModelV3Prompt;
|
|
13
|
+
user?: string;
|
|
14
|
+
assistant?: string;
|
|
15
|
+
}): {
|
|
16
|
+
prompt: string;
|
|
17
|
+
stopSequences?: string[];
|
|
18
|
+
} {
|
|
19
|
+
// transform to a chat message format:
|
|
20
|
+
let text = '';
|
|
21
|
+
|
|
22
|
+
// if first message is a system message, add it to the text:
|
|
23
|
+
if (prompt[0].role === 'system') {
|
|
24
|
+
text += `${prompt[0].content}\n\n`;
|
|
25
|
+
prompt = prompt.slice(1);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
for (const { role, content } of prompt) {
|
|
29
|
+
switch (role) {
|
|
30
|
+
case 'system': {
|
|
31
|
+
throw new InvalidPromptError({
|
|
32
|
+
message: 'Unexpected system message in prompt: ${content}',
|
|
33
|
+
prompt,
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
case 'user': {
|
|
38
|
+
const userMessage = content
|
|
39
|
+
.map(part => {
|
|
40
|
+
switch (part.type) {
|
|
41
|
+
case 'text': {
|
|
42
|
+
return part.text;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
})
|
|
46
|
+
.filter(Boolean)
|
|
47
|
+
.join('');
|
|
48
|
+
|
|
49
|
+
text += `${user}:\n${userMessage}\n\n`;
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
case 'assistant': {
|
|
54
|
+
const assistantMessage = content
|
|
55
|
+
.map(part => {
|
|
56
|
+
switch (part.type) {
|
|
57
|
+
case 'text': {
|
|
58
|
+
return part.text;
|
|
59
|
+
}
|
|
60
|
+
case 'tool-call': {
|
|
61
|
+
throw new UnsupportedFunctionalityError({
|
|
62
|
+
functionality: 'tool-call messages',
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
})
|
|
67
|
+
.join('');
|
|
68
|
+
|
|
69
|
+
text += `${assistant}:\n${assistantMessage}\n\n`;
|
|
70
|
+
break;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
case 'tool': {
|
|
74
|
+
throw new UnsupportedFunctionalityError({
|
|
75
|
+
functionality: 'tool messages',
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
default: {
|
|
80
|
+
const _exhaustiveCheck: never = role;
|
|
81
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Assistant message prefix:
|
|
87
|
+
text += `${assistant}:\n`;
|
|
88
|
+
|
|
89
|
+
return {
|
|
90
|
+
prompt: text,
|
|
91
|
+
stopSequences: [`\n${user}:`],
|
|
92
|
+
};
|
|
93
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export function getResponseMetadata({
|
|
2
|
+
id,
|
|
3
|
+
model,
|
|
4
|
+
created,
|
|
5
|
+
}: {
|
|
6
|
+
id?: string | undefined | null;
|
|
7
|
+
created?: number | undefined | null;
|
|
8
|
+
model?: string | undefined | null;
|
|
9
|
+
}) {
|
|
10
|
+
return {
|
|
11
|
+
id: id ?? undefined,
|
|
12
|
+
modelId: model ?? undefined,
|
|
13
|
+
timestamp: created != null ? new Date(created * 1000) : undefined,
|
|
14
|
+
};
|
|
15
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
|
|
2
|
+
|
|
3
|
+
export function mapOpenAIFinishReason(
|
|
4
|
+
finishReason: string | null | undefined,
|
|
5
|
+
): LanguageModelV3FinishReason['unified'] {
|
|
6
|
+
switch (finishReason) {
|
|
7
|
+
case 'stop':
|
|
8
|
+
return 'stop';
|
|
9
|
+
case 'length':
|
|
10
|
+
return 'length';
|
|
11
|
+
case 'content_filter':
|
|
12
|
+
return 'content-filter';
|
|
13
|
+
case 'function_call':
|
|
14
|
+
case 'tool_calls':
|
|
15
|
+
return 'tool-calls';
|
|
16
|
+
default:
|
|
17
|
+
return 'other';
|
|
18
|
+
}
|
|
19
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
import { openaiErrorDataSchema } from '../openai-error';
|
|
3
|
+
import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
|
|
4
|
+
|
|
5
|
+
// limited version of the schema, focussed on what is needed for the implementation
|
|
6
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
7
|
+
export const openaiCompletionResponseSchema = lazySchema(() =>
|
|
8
|
+
zodSchema(
|
|
9
|
+
z.object({
|
|
10
|
+
id: z.string().nullish(),
|
|
11
|
+
created: z.number().nullish(),
|
|
12
|
+
model: z.string().nullish(),
|
|
13
|
+
choices: z.array(
|
|
14
|
+
z.object({
|
|
15
|
+
text: z.string(),
|
|
16
|
+
finish_reason: z.string(),
|
|
17
|
+
logprobs: z
|
|
18
|
+
.object({
|
|
19
|
+
tokens: z.array(z.string()),
|
|
20
|
+
token_logprobs: z.array(z.number()),
|
|
21
|
+
top_logprobs: z.array(z.record(z.string(), z.number())).nullish(),
|
|
22
|
+
})
|
|
23
|
+
.nullish(),
|
|
24
|
+
}),
|
|
25
|
+
),
|
|
26
|
+
usage: z
|
|
27
|
+
.object({
|
|
28
|
+
prompt_tokens: z.number(),
|
|
29
|
+
completion_tokens: z.number(),
|
|
30
|
+
total_tokens: z.number(),
|
|
31
|
+
})
|
|
32
|
+
.nullish(),
|
|
33
|
+
}),
|
|
34
|
+
),
|
|
35
|
+
);
|
|
36
|
+
|
|
37
|
+
// limited version of the schema, focussed on what is needed for the implementation
|
|
38
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
39
|
+
export const openaiCompletionChunkSchema = lazySchema(() =>
|
|
40
|
+
zodSchema(
|
|
41
|
+
z.union([
|
|
42
|
+
z.object({
|
|
43
|
+
id: z.string().nullish(),
|
|
44
|
+
created: z.number().nullish(),
|
|
45
|
+
model: z.string().nullish(),
|
|
46
|
+
choices: z.array(
|
|
47
|
+
z.object({
|
|
48
|
+
text: z.string(),
|
|
49
|
+
finish_reason: z.string().nullish(),
|
|
50
|
+
index: z.number(),
|
|
51
|
+
logprobs: z
|
|
52
|
+
.object({
|
|
53
|
+
tokens: z.array(z.string()),
|
|
54
|
+
token_logprobs: z.array(z.number()),
|
|
55
|
+
top_logprobs: z
|
|
56
|
+
.array(z.record(z.string(), z.number()))
|
|
57
|
+
.nullish(),
|
|
58
|
+
})
|
|
59
|
+
.nullish(),
|
|
60
|
+
}),
|
|
61
|
+
),
|
|
62
|
+
usage: z
|
|
63
|
+
.object({
|
|
64
|
+
prompt_tokens: z.number(),
|
|
65
|
+
completion_tokens: z.number(),
|
|
66
|
+
total_tokens: z.number(),
|
|
67
|
+
})
|
|
68
|
+
.nullish(),
|
|
69
|
+
}),
|
|
70
|
+
openaiErrorDataSchema,
|
|
71
|
+
]),
|
|
72
|
+
),
|
|
73
|
+
);
|
|
74
|
+
|
|
75
|
+
export type OpenAICompletionChunk = InferSchema<
|
|
76
|
+
typeof openaiCompletionChunkSchema
|
|
77
|
+
>;
|
|
78
|
+
|
|
79
|
+
export type OpenAICompletionResponse = InferSchema<
|
|
80
|
+
typeof openaiCompletionResponseSchema
|
|
81
|
+
>;
|