@ai-sdk/openai 4.0.0-beta.2 → 4.0.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +225 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.mts +124 -35
  4. package/dist/index.d.ts +124 -35
  5. package/dist/index.js +1319 -895
  6. package/dist/index.js.map +1 -1
  7. package/dist/index.mjs +1275 -844
  8. package/dist/index.mjs.map +1 -1
  9. package/dist/internal/index.d.mts +102 -36
  10. package/dist/internal/index.d.ts +102 -36
  11. package/dist/internal/index.js +1348 -934
  12. package/dist/internal/index.js.map +1 -1
  13. package/dist/internal/index.mjs +1332 -911
  14. package/dist/internal/index.mjs.map +1 -1
  15. package/docs/03-openai.mdx +274 -9
  16. package/package.json +3 -5
  17. package/src/chat/convert-openai-chat-usage.ts +2 -2
  18. package/src/chat/convert-to-openai-chat-messages.ts +5 -5
  19. package/src/chat/map-openai-finish-reason.ts +2 -2
  20. package/src/chat/openai-chat-language-model.ts +32 -24
  21. package/src/chat/openai-chat-options.ts +5 -0
  22. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  23. package/src/completion/convert-openai-completion-usage.ts +2 -2
  24. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  25. package/src/completion/map-openai-finish-reason.ts +2 -2
  26. package/src/completion/openai-completion-language-model.ts +20 -20
  27. package/src/embedding/openai-embedding-model.ts +5 -5
  28. package/src/image/openai-image-model.ts +9 -9
  29. package/src/index.ts +1 -0
  30. package/src/openai-language-model-capabilities.ts +3 -2
  31. package/src/openai-provider.ts +21 -21
  32. package/src/openai-tools.ts +12 -1
  33. package/src/responses/convert-openai-responses-usage.ts +2 -2
  34. package/src/responses/convert-to-openai-responses-input.ts +159 -12
  35. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  36. package/src/responses/openai-responses-api.ts +136 -2
  37. package/src/responses/openai-responses-language-model.ts +233 -37
  38. package/src/responses/openai-responses-options.ts +24 -2
  39. package/src/responses/openai-responses-prepare-tools.ts +34 -9
  40. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  41. package/src/speech/openai-speech-model.ts +7 -7
  42. package/src/tool/custom.ts +0 -6
  43. package/src/tool/tool-search.ts +98 -0
  44. package/src/transcription/openai-transcription-model.ts +8 -8
package/CHANGELOG.md CHANGED
@@ -1,5 +1,208 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 4.0.0-beta.20
4
+
5
+ ### Patch Changes
6
+
7
+ - 38fc777: Add AI Gateway hint to provider READMEs
8
+
9
+ ## 4.0.0-beta.19
10
+
11
+ ### Patch Changes
12
+
13
+ - Updated dependencies [2e17091]
14
+ - @ai-sdk/provider-utils@5.0.0-beta.9
15
+
16
+ ## 4.0.0-beta.18
17
+
18
+ ### Patch Changes
19
+
20
+ - Updated dependencies [986c6fd]
21
+ - Updated dependencies [493295c]
22
+ - @ai-sdk/provider-utils@5.0.0-beta.8
23
+
24
+ ## 4.0.0-beta.17
25
+
26
+ ### Patch Changes
27
+
28
+ - 817a1a6: fix(openai): support file-url parts in tool output content
29
+
30
+ ## 4.0.0-beta.16
31
+
32
+ ### Patch Changes
33
+
34
+ - 1f509d4: fix(ai): force template check on 'kind' param
35
+ - Updated dependencies [1f509d4]
36
+ - @ai-sdk/provider-utils@5.0.0-beta.7
37
+ - @ai-sdk/provider@4.0.0-beta.5
38
+
39
+ ## 4.0.0-beta.15
40
+
41
+ ### Patch Changes
42
+
43
+ - 365da1a: Add `gpt-5.4-mini`, `gpt-5.4-mini-2026-03-17`, `gpt-5.4-nano`, and `gpt-5.4-nano-2026-03-17` models.
44
+
45
+ ## 4.0.0-beta.14
46
+
47
+ ### Patch Changes
48
+
49
+ - e6376c2: fix(openai): preserve raw finish reason for failed responses stream events
50
+
51
+ Handle `response.failed` chunks in Responses API streaming so `finishReason.raw` is preserved from `incomplete_details.reason` (e.g. `max_output_tokens`), and map failed-without-reason cases to unified `error` instead of `other`.
52
+
53
+ ## 4.0.0-beta.13
54
+
55
+ ### Patch Changes
56
+
57
+ - 3887c70: feat(provider): add new top-level reasoning parameter to spec and support it in `generateText` and `streamText`
58
+ - Updated dependencies [3887c70]
59
+ - @ai-sdk/provider-utils@5.0.0-beta.6
60
+ - @ai-sdk/provider@4.0.0-beta.4
61
+
62
+ ## 4.0.0-beta.12
63
+
64
+ ### Patch Changes
65
+
66
+ - d9a1e9a: feat(openai): add server side compaction for openai
67
+
68
+ ## 4.0.0-beta.11
69
+
70
+ ### Patch Changes
71
+
72
+ - Updated dependencies [776b617]
73
+ - @ai-sdk/provider-utils@5.0.0-beta.5
74
+ - @ai-sdk/provider@4.0.0-beta.3
75
+
76
+ ## 4.0.0-beta.10
77
+
78
+ ### Major Changes
79
+
80
+ - 61753c3: ### `@ai-sdk/openai`: remove redundant `name` argument from `openai.tools.customTool()`
81
+
82
+ `openai.tools.customTool()` no longer accepts a `name` field. the tool name is now derived from the sdk tool key (the object key in the `tools` object).
83
+
84
+ migration: remove the `name` property from `customTool()` calls. the object key is now used as the tool name sent to the openai api.
85
+
86
+ before:
87
+
88
+ ```ts
89
+ tools: {
90
+ write_sql: openai.tools.customTool({
91
+ name: 'write_sql',
92
+ description: '...',
93
+ }),
94
+ }
95
+ ```
96
+
97
+ after:
98
+
99
+ ```ts
100
+ tools: {
101
+ write_sql: openai.tools.customTool({
102
+ description: '...',
103
+ }),
104
+ }
105
+ ```
106
+
107
+ ### `@ai-sdk/provider-utils`: `createToolNameMapping()` no longer accepts the `resolveProviderToolName` parameter
108
+
109
+ before: tool name can be set dynamically
110
+
111
+ ```ts
112
+ const toolNameMapping = createToolNameMapping({
113
+ tools,
114
+ providerToolNames: {
115
+ "openai.code_interpreter": "code_interpreter",
116
+ "openai.file_search": "file_search",
117
+ "openai.image_generation": "image_generation",
118
+ "openai.local_shell": "local_shell",
119
+ "openai.shell": "shell",
120
+ "openai.web_search": "web_search",
121
+ "openai.web_search_preview": "web_search_preview",
122
+ "openai.mcp": "mcp",
123
+ "openai.apply_patch": "apply_patch",
124
+ },
125
+ resolveProviderToolName: (tool) =>
126
+ tool.id === "openai.custom"
127
+ ? (tool.args as { name?: string }).name
128
+ : undefined,
129
+ });
130
+ ```
131
+
132
+ after: tool name is static based on `tools` keys
133
+
134
+ ```
135
+ const toolNameMapping = createToolNameMapping({
136
+ tools,
137
+ providerToolNames: {
138
+ 'openai.code_interpreter': 'code_interpreter',
139
+ 'openai.file_search': 'file_search',
140
+ 'openai.image_generation': 'image_generation',
141
+ 'openai.local_shell': 'local_shell',
142
+ 'openai.shell': 'shell',
143
+ 'openai.web_search': 'web_search',
144
+ 'openai.web_search_preview': 'web_search_preview',
145
+ 'openai.mcp': 'mcp',
146
+ 'openai.apply_patch': 'apply_patch',
147
+ }
148
+ });
149
+ ```
150
+
151
+ ### Patch Changes
152
+
153
+ - Updated dependencies [61753c3]
154
+ - @ai-sdk/provider-utils@5.0.0-beta.4
155
+
156
+ ## 4.0.0-beta.9
157
+
158
+ ### Patch Changes
159
+
160
+ - 156cdf0: feat(openai): add new tool search tool
161
+
162
+ ## 4.0.0-beta.8
163
+
164
+ ### Patch Changes
165
+
166
+ - Updated dependencies [f7d4f01]
167
+ - @ai-sdk/provider-utils@5.0.0-beta.3
168
+ - @ai-sdk/provider@4.0.0-beta.2
169
+
170
+ ## 4.0.0-beta.7
171
+
172
+ ### Patch Changes
173
+
174
+ - Updated dependencies [5c2a5a2]
175
+ - @ai-sdk/provider@4.0.0-beta.1
176
+ - @ai-sdk/provider-utils@5.0.0-beta.2
177
+
178
+ ## 4.0.0-beta.6
179
+
180
+ ### Patch Changes
181
+
182
+ - 83f9d04: feat(openai): upgrade v3 specs to v4
183
+
184
+ ## 4.0.0-beta.5
185
+
186
+ ### Patch Changes
187
+
188
+ - ac18f89: feat(provider/openai): add `gpt-5.3-chat-latest`
189
+
190
+ ## 4.0.0-beta.4
191
+
192
+ ### Patch Changes
193
+
194
+ - a71d345: fix(provider/openai): drop reasoning parts without encrypted content when store: false
195
+
196
+ ## 4.0.0-beta.3
197
+
198
+ ### Patch Changes
199
+
200
+ - 45b3d76: fix(security): prevent streaming tool calls from finalizing on parsable partial JSON
201
+
202
+ Streaming tool call arguments were finalized using `isParsableJson()` as a heuristic for completion. If partial accumulated JSON happened to be valid JSON before all chunks arrived, the tool call would be executed with incomplete arguments. Tool call finalization now only occurs in `flush()` after the stream is fully consumed.
203
+
204
+ - f7295cb: revert incorrect fix https://github.com/vercel/ai/pull/13172
205
+
3
206
  ## 4.0.0-beta.2
4
207
 
5
208
  ### Patch Changes
@@ -360,13 +563,13 @@
360
563
  Before
361
564
 
362
565
  ```ts
363
- model.textEmbeddingModel('my-model-id');
566
+ model.textEmbeddingModel("my-model-id");
364
567
  ```
365
568
 
366
569
  After
367
570
 
368
571
  ```ts
369
- model.embeddingModel('my-model-id');
572
+ model.embeddingModel("my-model-id");
370
573
  ```
371
574
 
372
575
  - 60f4775: fix: remove code for unsuported o1-mini and o1-preview models
@@ -376,15 +579,15 @@
376
579
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
377
580
 
378
581
  ```ts
379
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
380
- import { generateText } from 'ai';
582
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
583
+ import { generateText } from "ai";
381
584
 
382
585
  await generateText({
383
- model: openai.chat('gpt-4o'),
384
- prompt: 'Invent a new holiday and describe its traditions.',
586
+ model: openai.chat("gpt-4o"),
587
+ prompt: "Invent a new holiday and describe its traditions.",
385
588
  providerOptions: {
386
589
  openai: {
387
- user: 'user-123',
590
+ user: "user-123",
388
591
  } satisfies OpenAIChatLanguageModelOptions,
389
592
  },
390
593
  });
@@ -785,13 +988,13 @@
785
988
  Before
786
989
 
787
990
  ```ts
788
- model.textEmbeddingModel('my-model-id');
991
+ model.textEmbeddingModel("my-model-id");
789
992
  ```
790
993
 
791
994
  After
792
995
 
793
996
  ```ts
794
- model.embeddingModel('my-model-id');
997
+ model.embeddingModel("my-model-id");
795
998
  ```
796
999
 
797
1000
  - Updated dependencies [8d9e8ad]
@@ -1261,15 +1464,15 @@
1261
1464
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
1262
1465
 
1263
1466
  ```ts
1264
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
1265
- import { generateText } from 'ai';
1467
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
1468
+ import { generateText } from "ai";
1266
1469
 
1267
1470
  await generateText({
1268
- model: openai.chat('gpt-4o'),
1269
- prompt: 'Invent a new holiday and describe its traditions.',
1471
+ model: openai.chat("gpt-4o"),
1472
+ prompt: "Invent a new holiday and describe its traditions.",
1270
1473
  providerOptions: {
1271
1474
  openai: {
1272
- user: 'user-123',
1475
+ user: "user-123",
1273
1476
  } satisfies OpenAIChatLanguageModelOptions,
1274
1477
  },
1275
1478
  });
@@ -1565,7 +1768,7 @@
1565
1768
 
1566
1769
  ```js
1567
1770
  await generateImage({
1568
- model: luma.image('photon-flash-1', {
1771
+ model: luma.image("photon-flash-1", {
1569
1772
  maxImagesPerCall: 5,
1570
1773
  pollIntervalMillis: 500,
1571
1774
  }),
@@ -1578,7 +1781,7 @@
1578
1781
 
1579
1782
  ```js
1580
1783
  await generateImage({
1581
- model: luma.image('photon-flash-1'),
1784
+ model: luma.image("photon-flash-1"),
1582
1785
  prompt,
1583
1786
  n: 10,
1584
1787
  maxImagesPerCall: 5,
@@ -1640,10 +1843,10 @@
1640
1843
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1641
1844
 
1642
1845
  ```js
1643
- const prompt = 'Santa Claus driving a Cadillac';
1846
+ const prompt = "Santa Claus driving a Cadillac";
1644
1847
 
1645
1848
  const { providerMetadata } = await experimental_generateImage({
1646
- model: openai.image('dall-e-3'),
1849
+ model: openai.image("dall-e-3"),
1647
1850
  prompt,
1648
1851
  });
1649
1852
 
@@ -1942,7 +2145,7 @@
1942
2145
 
1943
2146
  ```js
1944
2147
  await generateImage({
1945
- model: luma.image('photon-flash-1', {
2148
+ model: luma.image("photon-flash-1", {
1946
2149
  maxImagesPerCall: 5,
1947
2150
  pollIntervalMillis: 500,
1948
2151
  }),
@@ -1955,7 +2158,7 @@
1955
2158
 
1956
2159
  ```js
1957
2160
  await generateImage({
1958
- model: luma.image('photon-flash-1'),
2161
+ model: luma.image("photon-flash-1"),
1959
2162
  prompt,
1960
2163
  n: 10,
1961
2164
  maxImagesPerCall: 5,
@@ -2000,10 +2203,10 @@
2000
2203
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2001
2204
 
2002
2205
  ```js
2003
- const prompt = 'Santa Claus driving a Cadillac';
2206
+ const prompt = "Santa Claus driving a Cadillac";
2004
2207
 
2005
2208
  const { providerMetadata } = await experimental_generateImage({
2006
- model: openai.image('dall-e-3'),
2209
+ model: openai.image("dall-e-3"),
2007
2210
  prompt,
2008
2211
  });
2009
2212
 
package/README.md CHANGED
@@ -3,6 +3,8 @@
3
3
  The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs)
4
4
  contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
5
5
 
6
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access OpenAI (and hundreds of models from other providers) — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
7
+
6
8
  ## Setup
7
9
 
8
10
  The OpenAI provider is available in the `@ai-sdk/openai` module. You can install it with