@ai-sdk/openai 2.0.37 → 2.1.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod/v4';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
6
+ declare const openaiProviderOptions: z.ZodObject<{
7
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
9
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
@@ -33,7 +33,7 @@ declare const openaiChatLanguageModelOptions: z.ZodObject<{
33
33
  promptCacheKey: z.ZodOptional<z.ZodString>;
34
34
  safetyIdentifier: z.ZodOptional<z.ZodString>;
35
35
  }, z.core.$strip>;
36
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
36
+ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
37
37
 
38
38
  type OpenAIChatConfig = {
39
39
  provider: string;
@@ -247,4 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
247
247
  }, z.core.$strip>;
248
248
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
249
249
 
250
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
250
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -3,7 +3,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod/v4';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
6
+ declare const openaiProviderOptions: z.ZodObject<{
7
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
9
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
@@ -33,7 +33,7 @@ declare const openaiChatLanguageModelOptions: z.ZodObject<{
33
33
  promptCacheKey: z.ZodOptional<z.ZodString>;
34
34
  safetyIdentifier: z.ZodOptional<z.ZodString>;
35
35
  }, z.core.$strip>;
36
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
36
+ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
37
37
 
38
38
  type OpenAIChatConfig = {
39
39
  provider: string;
@@ -247,4 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
247
247
  }, z.core.$strip>;
248
248
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
249
249
 
250
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
250
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -30,9 +30,9 @@ __export(internal_exports, {
30
30
  hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
32
  openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
- openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
34
33
  openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
35
- openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
36
36
  });
37
37
  module.exports = __toCommonJS(internal_exports);
38
38
 
@@ -270,7 +270,7 @@ function mapOpenAIFinishReason(finishReason) {
270
270
 
271
271
  // src/chat/openai-chat-options.ts
272
272
  var import_v42 = require("zod/v4");
273
- var openaiChatLanguageModelOptions = import_v42.z.object({
273
+ var openaiProviderOptions = import_v42.z.object({
274
274
  /**
275
275
  * Modify the likelihood of specified tokens appearing in the completion.
276
276
  *
@@ -452,7 +452,7 @@ var OpenAIChatLanguageModel = class {
452
452
  const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
453
453
  provider: "openai",
454
454
  providerOptions,
455
- schema: openaiChatLanguageModelOptions
455
+ schema: openaiProviderOptions
456
456
  })) != null ? _a : {};
457
457
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
458
458
  if (topK != null) {
@@ -2156,40 +2156,26 @@ async function convertToOpenAIResponsesInput({
2156
2156
  });
2157
2157
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2158
2158
  if (reasoningId != null) {
2159
- const reasoningMessage = reasoningMessages[reasoningId];
2160
- if (store) {
2161
- if (reasoningMessage === void 0) {
2162
- input.push({ type: "item_reference", id: reasoningId });
2163
- reasoningMessages[reasoningId] = {
2164
- type: "reasoning",
2165
- id: reasoningId,
2166
- summary: []
2167
- };
2168
- }
2159
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2160
+ const summaryParts = [];
2161
+ if (part.text.length > 0) {
2162
+ summaryParts.push({ type: "summary_text", text: part.text });
2163
+ } else if (existingReasoningMessage !== void 0) {
2164
+ warnings.push({
2165
+ type: "other",
2166
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2167
+ });
2168
+ }
2169
+ if (existingReasoningMessage === void 0) {
2170
+ reasoningMessages[reasoningId] = {
2171
+ type: "reasoning",
2172
+ id: reasoningId,
2173
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2174
+ summary: summaryParts
2175
+ };
2176
+ input.push(reasoningMessages[reasoningId]);
2169
2177
  } else {
2170
- const summaryParts = [];
2171
- if (part.text.length > 0) {
2172
- summaryParts.push({
2173
- type: "summary_text",
2174
- text: part.text
2175
- });
2176
- } else if (reasoningMessage !== void 0) {
2177
- warnings.push({
2178
- type: "other",
2179
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2180
- });
2181
- }
2182
- if (reasoningMessage === void 0) {
2183
- reasoningMessages[reasoningId] = {
2184
- type: "reasoning",
2185
- id: reasoningId,
2186
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2187
- summary: summaryParts
2188
- };
2189
- input.push(reasoningMessages[reasoningId]);
2190
- } else {
2191
- reasoningMessage.summary.push(...summaryParts);
2192
- }
2178
+ existingReasoningMessage.summary.push(...summaryParts);
2193
2179
  }
2194
2180
  } else {
2195
2181
  warnings.push({
@@ -2939,7 +2925,7 @@ var OpenAIResponsesLanguageModel = class {
2939
2925
  ])
2940
2926
  ),
2941
2927
  service_tier: import_v418.z.string().nullish(),
2942
- incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullish(),
2928
+ incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullable(),
2943
2929
  usage: usageSchema2
2944
2930
  })
2945
2931
  ),
@@ -3829,8 +3815,8 @@ var openaiResponsesProviderOptionsSchema = import_v418.z.object({
3829
3815
  hasDefaultResponseFormat,
3830
3816
  modelMaxImagesPerCall,
3831
3817
  openAITranscriptionProviderOptions,
3832
- openaiChatLanguageModelOptions,
3833
3818
  openaiCompletionProviderOptions,
3834
- openaiEmbeddingProviderOptions
3819
+ openaiEmbeddingProviderOptions,
3820
+ openaiProviderOptions
3835
3821
  });
3836
3822
  //# sourceMappingURL=index.js.map