@ai-sdk/openai 2.0.38 → 2.1.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,57 +1,16 @@
1
1
  # @ai-sdk/openai
2
2
 
3
- ## 2.0.38
3
+ ## 2.1.0-beta.0
4
4
 
5
- ### Patch Changes
6
-
7
- - 0bda600: enables code_interpreter and file_search capabilities in the Azure provider through the Responses API
8
-
9
- ## 2.0.37
10
-
11
- ### Patch Changes
12
-
13
- - 6075c91: feat(provider/openai): only send item references for reasoning when store: true
14
-
15
- ## 2.0.36
16
-
17
- ### Patch Changes
18
-
19
- - bc5ed71: chore: update zod peer depenedency version
20
- - Updated dependencies [bc5ed71]
21
- - @ai-sdk/provider-utils@3.0.10
22
-
23
- ## 2.0.35
24
-
25
- ### Patch Changes
26
-
27
- - 1cfc209: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
28
-
29
- ```ts
30
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
31
- import { generateText } from 'ai';
32
-
33
- await generateText({
34
- model: openai.chat('gpt-4o'),
35
- prompt: 'Invent a new holiday and describe its traditions.',
36
- providerOptions: {
37
- openai: {
38
- user: 'user-123',
39
- } satisfies OpenAIChatLanguageModelOptions,
40
- },
41
- });
42
- ```
43
-
44
- ## 2.0.34
45
-
46
- ### Patch Changes
47
-
48
- - 322901b: feat: add provider version to user-agent header
5
+ ### Minor Changes
49
6
 
50
- ## 2.0.33
7
+ - 78928cb: release: start 5.1 beta
51
8
 
52
9
  ### Patch Changes
53
10
 
54
- - a617948: fix the "incomplete_details" key from nullable to nullish for openai compatibility
11
+ - Updated dependencies [78928cb]
12
+ - @ai-sdk/provider@2.1.0-beta.0
13
+ - @ai-sdk/provider-utils@3.1.0-beta.0
55
14
 
56
15
  ## 2.0.32
57
16
 
package/dist/index.d.mts CHANGED
@@ -4,37 +4,6 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
38
7
 
39
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
9
 
@@ -367,6 +336,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
367
336
  }, z.core.$strip>;
368
337
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
369
338
 
370
- declare const VERSION: string;
371
-
372
- export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
339
+ export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -4,37 +4,6 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
38
7
 
39
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
40
9
 
@@ -367,6 +336,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
367
336
  }, z.core.$strip>;
368
337
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
369
338
 
370
- declare const VERSION: string;
371
-
372
- export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
339
+ export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
package/dist/index.js CHANGED
@@ -20,7 +20,6 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // src/index.ts
21
21
  var src_exports = {};
22
22
  __export(src_exports, {
23
- VERSION: () => VERSION,
24
23
  createOpenAI: () => createOpenAI,
25
24
  openai: () => openai
26
25
  });
@@ -263,7 +262,7 @@ function mapOpenAIFinishReason(finishReason) {
263
262
 
264
263
  // src/chat/openai-chat-options.ts
265
264
  var import_v42 = require("zod/v4");
266
- var openaiChatLanguageModelOptions = import_v42.z.object({
265
+ var openaiProviderOptions = import_v42.z.object({
267
266
  /**
268
267
  * Modify the likelihood of specified tokens appearing in the completion.
269
268
  *
@@ -445,7 +444,7 @@ var OpenAIChatLanguageModel = class {
445
444
  const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
446
445
  provider: "openai",
447
446
  providerOptions,
448
- schema: openaiChatLanguageModelOptions
447
+ schema: openaiProviderOptions
449
448
  })) != null ? _a : {};
450
449
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
451
450
  if (topK != null) {
@@ -2087,40 +2086,26 @@ async function convertToOpenAIResponsesInput({
2087
2086
  });
2088
2087
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2089
2088
  if (reasoningId != null) {
2090
- const reasoningMessage = reasoningMessages[reasoningId];
2091
- if (store) {
2092
- if (reasoningMessage === void 0) {
2093
- input.push({ type: "item_reference", id: reasoningId });
2094
- reasoningMessages[reasoningId] = {
2095
- type: "reasoning",
2096
- id: reasoningId,
2097
- summary: []
2098
- };
2099
- }
2089
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2090
+ const summaryParts = [];
2091
+ if (part.text.length > 0) {
2092
+ summaryParts.push({ type: "summary_text", text: part.text });
2093
+ } else if (existingReasoningMessage !== void 0) {
2094
+ warnings.push({
2095
+ type: "other",
2096
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2097
+ });
2098
+ }
2099
+ if (existingReasoningMessage === void 0) {
2100
+ reasoningMessages[reasoningId] = {
2101
+ type: "reasoning",
2102
+ id: reasoningId,
2103
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2104
+ summary: summaryParts
2105
+ };
2106
+ input.push(reasoningMessages[reasoningId]);
2100
2107
  } else {
2101
- const summaryParts = [];
2102
- if (part.text.length > 0) {
2103
- summaryParts.push({
2104
- type: "summary_text",
2105
- text: part.text
2106
- });
2107
- } else if (reasoningMessage !== void 0) {
2108
- warnings.push({
2109
- type: "other",
2110
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2111
- });
2112
- }
2113
- if (reasoningMessage === void 0) {
2114
- reasoningMessages[reasoningId] = {
2115
- type: "reasoning",
2116
- id: reasoningId,
2117
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2118
- summary: summaryParts
2119
- };
2120
- input.push(reasoningMessages[reasoningId]);
2121
- } else {
2122
- reasoningMessage.summary.push(...summaryParts);
2123
- }
2108
+ existingReasoningMessage.summary.push(...summaryParts);
2124
2109
  }
2125
2110
  } else {
2126
2111
  warnings.push({
@@ -2672,7 +2657,7 @@ var OpenAIResponsesLanguageModel = class {
2672
2657
  ])
2673
2658
  ),
2674
2659
  service_tier: import_v415.z.string().nullish(),
2675
- incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
2660
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
2676
2661
  usage: usageSchema2
2677
2662
  })
2678
2663
  ),
@@ -3882,27 +3867,21 @@ var openaiTranscriptionResponseSchema = import_v418.z.object({
3882
3867
  ).nullish()
3883
3868
  });
3884
3869
 
3885
- // src/version.ts
3886
- var VERSION = true ? "2.0.38" : "0.0.0-test";
3887
-
3888
3870
  // src/openai-provider.ts
3889
3871
  function createOpenAI(options = {}) {
3890
3872
  var _a, _b;
3891
3873
  const baseURL = (_a = (0, import_provider_utils16.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3892
3874
  const providerName = (_b = options.name) != null ? _b : "openai";
3893
- const getHeaders = () => (0, import_provider_utils16.withUserAgentSuffix)(
3894
- {
3895
- Authorization: `Bearer ${(0, import_provider_utils16.loadApiKey)({
3896
- apiKey: options.apiKey,
3897
- environmentVariableName: "OPENAI_API_KEY",
3898
- description: "OpenAI"
3899
- })}`,
3900
- "OpenAI-Organization": options.organization,
3901
- "OpenAI-Project": options.project,
3902
- ...options.headers
3903
- },
3904
- `ai-sdk/openai/${VERSION}`
3905
- );
3875
+ const getHeaders = () => ({
3876
+ Authorization: `Bearer ${(0, import_provider_utils16.loadApiKey)({
3877
+ apiKey: options.apiKey,
3878
+ environmentVariableName: "OPENAI_API_KEY",
3879
+ description: "OpenAI"
3880
+ })}`,
3881
+ "OpenAI-Organization": options.organization,
3882
+ "OpenAI-Project": options.project,
3883
+ ...options.headers
3884
+ });
3906
3885
  const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3907
3886
  provider: `${providerName}.chat`,
3908
3887
  url: ({ path }) => `${baseURL}${path}`,
@@ -3978,7 +3957,6 @@ function createOpenAI(options = {}) {
3978
3957
  var openai = createOpenAI();
3979
3958
  // Annotate the CommonJS export names for ESM import in node:
3980
3959
  0 && (module.exports = {
3981
- VERSION,
3982
3960
  createOpenAI,
3983
3961
  openai
3984
3962
  });