@ai-sdk/openai 3.0.14 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/package.json +6 -5
  5. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  6. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  7. package/src/chat/convert-openai-chat-usage.ts +57 -0
  8. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  9. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  10. package/src/chat/get-response-metadata.ts +15 -0
  11. package/src/chat/map-openai-finish-reason.ts +19 -0
  12. package/src/chat/openai-chat-api.ts +198 -0
  13. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  14. package/src/chat/openai-chat-language-model.ts +700 -0
  15. package/src/chat/openai-chat-options.ts +186 -0
  16. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  17. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  18. package/src/chat/openai-chat-prompt.ts +70 -0
  19. package/src/completion/convert-openai-completion-usage.ts +46 -0
  20. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  21. package/src/completion/get-response-metadata.ts +15 -0
  22. package/src/completion/map-openai-finish-reason.ts +19 -0
  23. package/src/completion/openai-completion-api.ts +81 -0
  24. package/src/completion/openai-completion-language-model.test.ts +752 -0
  25. package/src/completion/openai-completion-language-model.ts +336 -0
  26. package/src/completion/openai-completion-options.ts +58 -0
  27. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  28. package/src/embedding/openai-embedding-api.ts +13 -0
  29. package/src/embedding/openai-embedding-model.test.ts +146 -0
  30. package/src/embedding/openai-embedding-model.ts +95 -0
  31. package/src/embedding/openai-embedding-options.ts +30 -0
  32. package/src/image/openai-image-api.ts +35 -0
  33. package/src/image/openai-image-model.test.ts +722 -0
  34. package/src/image/openai-image-model.ts +305 -0
  35. package/src/image/openai-image-options.ts +28 -0
  36. package/src/index.ts +9 -0
  37. package/src/internal/index.ts +19 -0
  38. package/src/openai-config.ts +18 -0
  39. package/src/openai-error.test.ts +34 -0
  40. package/src/openai-error.ts +22 -0
  41. package/src/openai-language-model-capabilities.test.ts +93 -0
  42. package/src/openai-language-model-capabilities.ts +54 -0
  43. package/src/openai-provider.test.ts +98 -0
  44. package/src/openai-provider.ts +270 -0
  45. package/src/openai-tools.ts +114 -0
  46. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  47. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  48. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  49. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  50. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  51. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  52. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  53. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  54. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  55. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  56. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  57. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  58. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  59. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  60. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  61. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  62. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  63. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  64. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  65. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  66. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  67. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  68. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  71. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  72. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  73. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  74. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  75. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  76. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  77. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  78. package/src/responses/convert-openai-responses-usage.ts +53 -0
  79. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  80. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  81. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  82. package/src/responses/openai-responses-api.test.ts +89 -0
  83. package/src/responses/openai-responses-api.ts +1086 -0
  84. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  85. package/src/responses/openai-responses-language-model.ts +1932 -0
  86. package/src/responses/openai-responses-options.ts +312 -0
  87. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  88. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  89. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  90. package/src/speech/openai-speech-api.ts +38 -0
  91. package/src/speech/openai-speech-model.test.ts +202 -0
  92. package/src/speech/openai-speech-model.ts +137 -0
  93. package/src/speech/openai-speech-options.ts +22 -0
  94. package/src/tool/apply-patch.ts +141 -0
  95. package/src/tool/code-interpreter.ts +104 -0
  96. package/src/tool/file-search.ts +145 -0
  97. package/src/tool/image-generation.ts +126 -0
  98. package/src/tool/local-shell.test-d.ts +20 -0
  99. package/src/tool/local-shell.ts +72 -0
  100. package/src/tool/mcp.ts +125 -0
  101. package/src/tool/shell.ts +85 -0
  102. package/src/tool/web-search-preview.ts +139 -0
  103. package/src/tool/web-search.test-d.ts +13 -0
  104. package/src/tool/web-search.ts +179 -0
  105. package/src/transcription/openai-transcription-api.ts +37 -0
  106. package/src/transcription/openai-transcription-model.test.ts +507 -0
  107. package/src/transcription/openai-transcription-model.ts +232 -0
  108. package/src/transcription/openai-transcription-options.ts +50 -0
  109. package/src/transcription/transcription-test.mp3 +0 -0
  110. package/src/version.ts +6 -0
@@ -0,0 +1,93 @@
1
+ import {
2
+ InvalidPromptError,
3
+ LanguageModelV3Prompt,
4
+ UnsupportedFunctionalityError,
5
+ } from '@ai-sdk/provider';
6
+
7
+ export function convertToOpenAICompletionPrompt({
8
+ prompt,
9
+ user = 'user',
10
+ assistant = 'assistant',
11
+ }: {
12
+ prompt: LanguageModelV3Prompt;
13
+ user?: string;
14
+ assistant?: string;
15
+ }): {
16
+ prompt: string;
17
+ stopSequences?: string[];
18
+ } {
19
+ // transform to a chat message format:
20
+ let text = '';
21
+
22
+ // if first message is a system message, add it to the text:
23
+ if (prompt[0].role === 'system') {
24
+ text += `${prompt[0].content}\n\n`;
25
+ prompt = prompt.slice(1);
26
+ }
27
+
28
+ for (const { role, content } of prompt) {
29
+ switch (role) {
30
+ case 'system': {
31
+ throw new InvalidPromptError({
32
+ message: 'Unexpected system message in prompt: ${content}',
33
+ prompt,
34
+ });
35
+ }
36
+
37
+ case 'user': {
38
+ const userMessage = content
39
+ .map(part => {
40
+ switch (part.type) {
41
+ case 'text': {
42
+ return part.text;
43
+ }
44
+ }
45
+ })
46
+ .filter(Boolean)
47
+ .join('');
48
+
49
+ text += `${user}:\n${userMessage}\n\n`;
50
+ break;
51
+ }
52
+
53
+ case 'assistant': {
54
+ const assistantMessage = content
55
+ .map(part => {
56
+ switch (part.type) {
57
+ case 'text': {
58
+ return part.text;
59
+ }
60
+ case 'tool-call': {
61
+ throw new UnsupportedFunctionalityError({
62
+ functionality: 'tool-call messages',
63
+ });
64
+ }
65
+ }
66
+ })
67
+ .join('');
68
+
69
+ text += `${assistant}:\n${assistantMessage}\n\n`;
70
+ break;
71
+ }
72
+
73
+ case 'tool': {
74
+ throw new UnsupportedFunctionalityError({
75
+ functionality: 'tool messages',
76
+ });
77
+ }
78
+
79
+ default: {
80
+ const _exhaustiveCheck: never = role;
81
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
82
+ }
83
+ }
84
+ }
85
+
86
+ // Assistant message prefix:
87
+ text += `${assistant}:\n`;
88
+
89
+ return {
90
+ prompt: text,
91
+ stopSequences: [`\n${user}:`],
92
+ };
93
+ }
@@ -0,0 +1,15 @@
1
+ export function getResponseMetadata({
2
+ id,
3
+ model,
4
+ created,
5
+ }: {
6
+ id?: string | undefined | null;
7
+ created?: number | undefined | null;
8
+ model?: string | undefined | null;
9
+ }) {
10
+ return {
11
+ id: id ?? undefined,
12
+ modelId: model ?? undefined,
13
+ timestamp: created != null ? new Date(created * 1000) : undefined,
14
+ };
15
+ }
@@ -0,0 +1,19 @@
1
+ import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
2
+
3
+ export function mapOpenAIFinishReason(
4
+ finishReason: string | null | undefined,
5
+ ): LanguageModelV3FinishReason['unified'] {
6
+ switch (finishReason) {
7
+ case 'stop':
8
+ return 'stop';
9
+ case 'length':
10
+ return 'length';
11
+ case 'content_filter':
12
+ return 'content-filter';
13
+ case 'function_call':
14
+ case 'tool_calls':
15
+ return 'tool-calls';
16
+ default:
17
+ return 'other';
18
+ }
19
+ }
@@ -0,0 +1,81 @@
1
+ import { z } from 'zod/v4';
2
+ import { openaiErrorDataSchema } from '../openai-error';
3
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
4
+
5
+ // limited version of the schema, focussed on what is needed for the implementation
6
+ // this approach limits breakages when the API changes and increases efficiency
7
+ export const openaiCompletionResponseSchema = lazySchema(() =>
8
+ zodSchema(
9
+ z.object({
10
+ id: z.string().nullish(),
11
+ created: z.number().nullish(),
12
+ model: z.string().nullish(),
13
+ choices: z.array(
14
+ z.object({
15
+ text: z.string(),
16
+ finish_reason: z.string(),
17
+ logprobs: z
18
+ .object({
19
+ tokens: z.array(z.string()),
20
+ token_logprobs: z.array(z.number()),
21
+ top_logprobs: z.array(z.record(z.string(), z.number())).nullish(),
22
+ })
23
+ .nullish(),
24
+ }),
25
+ ),
26
+ usage: z
27
+ .object({
28
+ prompt_tokens: z.number(),
29
+ completion_tokens: z.number(),
30
+ total_tokens: z.number(),
31
+ })
32
+ .nullish(),
33
+ }),
34
+ ),
35
+ );
36
+
37
+ // limited version of the schema, focussed on what is needed for the implementation
38
+ // this approach limits breakages when the API changes and increases efficiency
39
+ export const openaiCompletionChunkSchema = lazySchema(() =>
40
+ zodSchema(
41
+ z.union([
42
+ z.object({
43
+ id: z.string().nullish(),
44
+ created: z.number().nullish(),
45
+ model: z.string().nullish(),
46
+ choices: z.array(
47
+ z.object({
48
+ text: z.string(),
49
+ finish_reason: z.string().nullish(),
50
+ index: z.number(),
51
+ logprobs: z
52
+ .object({
53
+ tokens: z.array(z.string()),
54
+ token_logprobs: z.array(z.number()),
55
+ top_logprobs: z
56
+ .array(z.record(z.string(), z.number()))
57
+ .nullish(),
58
+ })
59
+ .nullish(),
60
+ }),
61
+ ),
62
+ usage: z
63
+ .object({
64
+ prompt_tokens: z.number(),
65
+ completion_tokens: z.number(),
66
+ total_tokens: z.number(),
67
+ })
68
+ .nullish(),
69
+ }),
70
+ openaiErrorDataSchema,
71
+ ]),
72
+ ),
73
+ );
74
+
75
+ export type OpenAICompletionChunk = InferSchema<
76
+ typeof openaiCompletionChunkSchema
77
+ >;
78
+
79
+ export type OpenAICompletionResponse = InferSchema<
80
+ typeof openaiCompletionResponseSchema
81
+ >;