@ai-sdk/openai 2.0.76 → 2.0.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -39,6 +39,22 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
39
39
  errorToMessage: (data) => data.error.message
40
40
  });
41
41
 
42
+ // src/openai-language-model-capabilities.ts
43
+ function getOpenAILanguageModelCapabilities(modelId) {
44
+ const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
45
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
46
+ const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
47
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
48
+ const systemMessageMode = isReasoningModel ? "developer" : "system";
49
+ return {
50
+ supportsFlexProcessing,
51
+ supportsPriorityProcessing,
52
+ isReasoningModel,
53
+ systemMessageMode,
54
+ supportsNonReasoningParameters
55
+ };
56
+ }
57
+
42
58
  // src/chat/convert-to-openai-chat-messages.ts
43
59
  import {
44
60
  UnsupportedFunctionalityError
@@ -598,6 +614,7 @@ var OpenAIChatLanguageModel = class {
598
614
  schema: openaiChatLanguageModelOptions
599
615
  })) != null ? _a : {};
600
616
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
617
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
601
618
  if (topK != null) {
602
619
  warnings.push({
603
620
  type: "unsupported-setting",
@@ -614,7 +631,7 @@ var OpenAIChatLanguageModel = class {
614
631
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
615
632
  {
616
633
  prompt,
617
- systemMessageMode: getSystemMessageMode(this.modelId)
634
+ systemMessageMode: modelCapabilities.systemMessageMode
618
635
  }
619
636
  );
620
637
  warnings.push(...messageWarnings);
@@ -660,22 +677,31 @@ var OpenAIChatLanguageModel = class {
660
677
  // messages:
661
678
  messages
662
679
  };
663
- if (isReasoningModel(this.modelId)) {
664
- if (baseArgs.temperature != null) {
665
- baseArgs.temperature = void 0;
666
- warnings.push({
667
- type: "unsupported-setting",
668
- setting: "temperature",
669
- details: "temperature is not supported for reasoning models"
670
- });
671
- }
672
- if (baseArgs.top_p != null) {
673
- baseArgs.top_p = void 0;
674
- warnings.push({
675
- type: "unsupported-setting",
676
- setting: "topP",
677
- details: "topP is not supported for reasoning models"
678
- });
680
+ if (modelCapabilities.isReasoningModel) {
681
+ if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
682
+ if (baseArgs.temperature != null) {
683
+ baseArgs.temperature = void 0;
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "temperature",
687
+ details: "temperature is not supported for reasoning models"
688
+ });
689
+ }
690
+ if (baseArgs.top_p != null) {
691
+ baseArgs.top_p = void 0;
692
+ warnings.push({
693
+ type: "unsupported-setting",
694
+ setting: "topP",
695
+ details: "topP is not supported for reasoning models"
696
+ });
697
+ }
698
+ if (baseArgs.logprobs != null) {
699
+ baseArgs.logprobs = void 0;
700
+ warnings.push({
701
+ type: "other",
702
+ message: "logprobs is not supported for reasoning models"
703
+ });
704
+ }
679
705
  }
680
706
  if (baseArgs.frequency_penalty != null) {
681
707
  baseArgs.frequency_penalty = void 0;
@@ -700,13 +726,6 @@ var OpenAIChatLanguageModel = class {
700
726
  message: "logitBias is not supported for reasoning models"
701
727
  });
702
728
  }
703
- if (baseArgs.logprobs != null) {
704
- baseArgs.logprobs = void 0;
705
- warnings.push({
706
- type: "other",
707
- message: "logprobs is not supported for reasoning models"
708
- });
709
- }
710
729
  if (baseArgs.top_logprobs != null) {
711
730
  baseArgs.top_logprobs = void 0;
712
731
  warnings.push({
@@ -730,7 +749,7 @@ var OpenAIChatLanguageModel = class {
730
749
  });
731
750
  }
732
751
  }
733
- if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
752
+ if (openaiOptions.serviceTier === "flex" && !modelCapabilities.supportsFlexProcessing) {
734
753
  warnings.push({
735
754
  type: "unsupported-setting",
736
755
  setting: "serviceTier",
@@ -738,7 +757,7 @@ var OpenAIChatLanguageModel = class {
738
757
  });
739
758
  baseArgs.service_tier = void 0;
740
759
  }
741
- if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
760
+ if (openaiOptions.serviceTier === "priority" && !modelCapabilities.supportsPriorityProcessing) {
742
761
  warnings.push({
743
762
  type: "unsupported-setting",
744
763
  setting: "serviceTier",
@@ -1059,42 +1078,6 @@ var OpenAIChatLanguageModel = class {
1059
1078
  };
1060
1079
  }
1061
1080
  };
1062
- function isReasoningModel(modelId) {
1063
- return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1064
- }
1065
- function supportsFlexProcessing(modelId) {
1066
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1067
- }
1068
- function supportsPriorityProcessing(modelId) {
1069
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1070
- }
1071
- function getSystemMessageMode(modelId) {
1072
- var _a, _b;
1073
- if (!isReasoningModel(modelId)) {
1074
- return "system";
1075
- }
1076
- return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1077
- }
1078
- var reasoningModels = {
1079
- o3: {
1080
- systemMessageMode: "developer"
1081
- },
1082
- "o3-2025-04-16": {
1083
- systemMessageMode: "developer"
1084
- },
1085
- "o3-mini": {
1086
- systemMessageMode: "developer"
1087
- },
1088
- "o3-mini-2025-01-31": {
1089
- systemMessageMode: "developer"
1090
- },
1091
- "o4-mini": {
1092
- systemMessageMode: "developer"
1093
- },
1094
- "o4-mini-2025-04-16": {
1095
- systemMessageMode: "developer"
1096
- }
1097
- };
1098
1081
 
1099
1082
  // src/completion/openai-completion-language-model.ts
1100
1083
  import {
@@ -2004,12 +1987,12 @@ var webSearchOutputSchema = lazySchema5(
2004
1987
  }),
2005
1988
  z13.object({
2006
1989
  type: z13.literal("openPage"),
2007
- url: z13.string()
1990
+ url: z13.string().nullish()
2008
1991
  }),
2009
1992
  z13.object({
2010
1993
  type: z13.literal("find"),
2011
- url: z13.string(),
2012
- pattern: z13.string()
1994
+ url: z13.string().nullish(),
1995
+ pattern: z13.string().nullish()
2013
1996
  })
2014
1997
  ]),
2015
1998
  sources: z13.array(
@@ -2063,12 +2046,12 @@ var webSearchPreviewOutputSchema = lazySchema6(
2063
2046
  }),
2064
2047
  z14.object({
2065
2048
  type: z14.literal("openPage"),
2066
- url: z14.string()
2049
+ url: z14.string().nullish()
2067
2050
  }),
2068
2051
  z14.object({
2069
2052
  type: z14.literal("find"),
2070
- url: z14.string(),
2071
- pattern: z14.string()
2053
+ url: z14.string().nullish(),
2054
+ pattern: z14.string().nullish()
2072
2055
  })
2073
2056
  ])
2074
2057
  })
@@ -2630,12 +2613,12 @@ var openaiResponsesChunkSchema = lazyValidator8(
2630
2613
  }),
2631
2614
  z16.object({
2632
2615
  type: z16.literal("open_page"),
2633
- url: z16.string()
2616
+ url: z16.string().nullish()
2634
2617
  }),
2635
2618
  z16.object({
2636
2619
  type: z16.literal("find"),
2637
- url: z16.string(),
2638
- pattern: z16.string()
2620
+ url: z16.string().nullish(),
2621
+ pattern: z16.string().nullish()
2639
2622
  })
2640
2623
  ])
2641
2624
  }),
@@ -2840,12 +2823,12 @@ var openaiResponsesResponseSchema = lazyValidator8(
2840
2823
  }),
2841
2824
  z16.object({
2842
2825
  type: z16.literal("open_page"),
2843
- url: z16.string()
2826
+ url: z16.string().nullish()
2844
2827
  }),
2845
2828
  z16.object({
2846
2829
  type: z16.literal("find"),
2847
- url: z16.string(),
2848
- pattern: z16.string()
2830
+ url: z16.string().nullish(),
2831
+ pattern: z16.string().nullish()
2849
2832
  })
2850
2833
  ])
2851
2834
  }),
@@ -3237,7 +3220,7 @@ var OpenAIResponsesLanguageModel = class {
3237
3220
  }) {
3238
3221
  var _a, _b, _c, _d;
3239
3222
  const warnings = [];
3240
- const modelConfig = getResponsesModelConfig(this.modelId);
3223
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
3241
3224
  if (topK != null) {
3242
3225
  warnings.push({ type: "unsupported-setting", setting: "topK" });
3243
3226
  }
@@ -3273,7 +3256,7 @@ var OpenAIResponsesLanguageModel = class {
3273
3256
  }
3274
3257
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3275
3258
  prompt,
3276
- systemMessageMode: modelConfig.systemMessageMode,
3259
+ systemMessageMode: modelCapabilities.systemMessageMode,
3277
3260
  fileIdPrefixes: this.config.fileIdPrefixes,
3278
3261
  store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
3279
3262
  hasLocalShellTool: hasOpenAITool("openai.local_shell")
@@ -3307,7 +3290,7 @@ var OpenAIResponsesLanguageModel = class {
3307
3290
  addInclude("code_interpreter_call.outputs");
3308
3291
  }
3309
3292
  const store = openaiOptions == null ? void 0 : openaiOptions.store;
3310
- if (store === false && modelConfig.isReasoningModel) {
3293
+ if (store === false && modelCapabilities.isReasoningModel) {
3311
3294
  addInclude("reasoning.encrypted_content");
3312
3295
  }
3313
3296
  const baseArgs = {
@@ -3349,7 +3332,7 @@ var OpenAIResponsesLanguageModel = class {
3349
3332
  top_logprobs: topLogprobs,
3350
3333
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3351
3334
  // model-specific settings:
3352
- ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3335
+ ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3353
3336
  reasoning: {
3354
3337
  ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
3355
3338
  effort: openaiOptions.reasoningEffort
@@ -3360,7 +3343,7 @@ var OpenAIResponsesLanguageModel = class {
3360
3343
  }
3361
3344
  }
3362
3345
  };
3363
- if (modelConfig.isReasoningModel) {
3346
+ if (modelCapabilities.isReasoningModel || (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters) {
3364
3347
  if (baseArgs.temperature != null) {
3365
3348
  baseArgs.temperature = void 0;
3366
3349
  warnings.push({
@@ -3393,7 +3376,7 @@ var OpenAIResponsesLanguageModel = class {
3393
3376
  });
3394
3377
  }
3395
3378
  }
3396
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
3379
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelCapabilities.supportsFlexProcessing) {
3397
3380
  warnings.push({
3398
3381
  type: "unsupported-setting",
3399
3382
  setting: "serviceTier",
@@ -3401,7 +3384,7 @@ var OpenAIResponsesLanguageModel = class {
3401
3384
  });
3402
3385
  delete baseArgs.service_tier;
3403
3386
  }
3404
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
3387
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelCapabilities.supportsPriorityProcessing) {
3405
3388
  warnings.push({
3406
3389
  type: "unsupported-setting",
3407
3390
  setting: "serviceTier",
@@ -4258,32 +4241,6 @@ function isResponseAnnotationAddedChunk(chunk) {
4258
4241
  function isErrorChunk(chunk) {
4259
4242
  return chunk.type === "error";
4260
4243
  }
4261
- function getResponsesModelConfig(modelId) {
4262
- const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4263
- const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4264
- const defaults = {
4265
- systemMessageMode: "system",
4266
- supportsFlexProcessing: supportsFlexProcessing2,
4267
- supportsPriorityProcessing: supportsPriorityProcessing2
4268
- };
4269
- if (modelId.startsWith("gpt-5-chat")) {
4270
- return {
4271
- ...defaults,
4272
- isReasoningModel: false
4273
- };
4274
- }
4275
- if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
4276
- return {
4277
- ...defaults,
4278
- isReasoningModel: true,
4279
- systemMessageMode: "developer"
4280
- };
4281
- }
4282
- return {
4283
- ...defaults,
4284
- isReasoningModel: false
4285
- };
4286
- }
4287
4244
  function mapWebSearchOutput(action) {
4288
4245
  var _a;
4289
4246
  switch (action.type) {
@@ -4673,7 +4630,7 @@ var OpenAITranscriptionModel = class {
4673
4630
  };
4674
4631
 
4675
4632
  // src/version.ts
4676
- var VERSION = true ? "2.0.76" : "0.0.0-test";
4633
+ var VERSION = true ? "2.0.78" : "0.0.0-test";
4677
4634
 
4678
4635
  // src/openai-provider.ts
4679
4636
  function createOpenAI(options = {}) {