@ai-sdk/openai 2.0.77 → 2.0.79
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.js +70 -111
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +70 -111
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +69 -110
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +69 -110
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -39,6 +39,22 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
39
39
|
errorToMessage: (data) => data.error.message
|
|
40
40
|
});
|
|
41
41
|
|
|
42
|
+
// src/openai-language-model-capabilities.ts
|
|
43
|
+
function getOpenAILanguageModelCapabilities(modelId) {
|
|
44
|
+
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
45
|
+
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
46
|
+
const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
|
|
47
|
+
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
|
|
48
|
+
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
49
|
+
return {
|
|
50
|
+
supportsFlexProcessing,
|
|
51
|
+
supportsPriorityProcessing,
|
|
52
|
+
isReasoningModel,
|
|
53
|
+
systemMessageMode,
|
|
54
|
+
supportsNonReasoningParameters
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
42
58
|
// src/chat/convert-to-openai-chat-messages.ts
|
|
43
59
|
import {
|
|
44
60
|
UnsupportedFunctionalityError
|
|
@@ -598,6 +614,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
598
614
|
schema: openaiChatLanguageModelOptions
|
|
599
615
|
})) != null ? _a : {};
|
|
600
616
|
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
617
|
+
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
601
618
|
if (topK != null) {
|
|
602
619
|
warnings.push({
|
|
603
620
|
type: "unsupported-setting",
|
|
@@ -614,7 +631,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
614
631
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
615
632
|
{
|
|
616
633
|
prompt,
|
|
617
|
-
systemMessageMode:
|
|
634
|
+
systemMessageMode: modelCapabilities.systemMessageMode
|
|
618
635
|
}
|
|
619
636
|
);
|
|
620
637
|
warnings.push(...messageWarnings);
|
|
@@ -660,22 +677,31 @@ var OpenAIChatLanguageModel = class {
|
|
|
660
677
|
// messages:
|
|
661
678
|
messages
|
|
662
679
|
};
|
|
663
|
-
if (isReasoningModel
|
|
664
|
-
if (
|
|
665
|
-
baseArgs.temperature
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
baseArgs.top_p
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
680
|
+
if (modelCapabilities.isReasoningModel) {
|
|
681
|
+
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
682
|
+
if (baseArgs.temperature != null) {
|
|
683
|
+
baseArgs.temperature = void 0;
|
|
684
|
+
warnings.push({
|
|
685
|
+
type: "unsupported-setting",
|
|
686
|
+
setting: "temperature",
|
|
687
|
+
details: "temperature is not supported for reasoning models"
|
|
688
|
+
});
|
|
689
|
+
}
|
|
690
|
+
if (baseArgs.top_p != null) {
|
|
691
|
+
baseArgs.top_p = void 0;
|
|
692
|
+
warnings.push({
|
|
693
|
+
type: "unsupported-setting",
|
|
694
|
+
setting: "topP",
|
|
695
|
+
details: "topP is not supported for reasoning models"
|
|
696
|
+
});
|
|
697
|
+
}
|
|
698
|
+
if (baseArgs.logprobs != null) {
|
|
699
|
+
baseArgs.logprobs = void 0;
|
|
700
|
+
warnings.push({
|
|
701
|
+
type: "other",
|
|
702
|
+
message: "logprobs is not supported for reasoning models"
|
|
703
|
+
});
|
|
704
|
+
}
|
|
679
705
|
}
|
|
680
706
|
if (baseArgs.frequency_penalty != null) {
|
|
681
707
|
baseArgs.frequency_penalty = void 0;
|
|
@@ -700,13 +726,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
700
726
|
message: "logitBias is not supported for reasoning models"
|
|
701
727
|
});
|
|
702
728
|
}
|
|
703
|
-
if (baseArgs.logprobs != null) {
|
|
704
|
-
baseArgs.logprobs = void 0;
|
|
705
|
-
warnings.push({
|
|
706
|
-
type: "other",
|
|
707
|
-
message: "logprobs is not supported for reasoning models"
|
|
708
|
-
});
|
|
709
|
-
}
|
|
710
729
|
if (baseArgs.top_logprobs != null) {
|
|
711
730
|
baseArgs.top_logprobs = void 0;
|
|
712
731
|
warnings.push({
|
|
@@ -730,7 +749,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
730
749
|
});
|
|
731
750
|
}
|
|
732
751
|
}
|
|
733
|
-
if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing
|
|
752
|
+
if (openaiOptions.serviceTier === "flex" && !modelCapabilities.supportsFlexProcessing) {
|
|
734
753
|
warnings.push({
|
|
735
754
|
type: "unsupported-setting",
|
|
736
755
|
setting: "serviceTier",
|
|
@@ -738,7 +757,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
738
757
|
});
|
|
739
758
|
baseArgs.service_tier = void 0;
|
|
740
759
|
}
|
|
741
|
-
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing
|
|
760
|
+
if (openaiOptions.serviceTier === "priority" && !modelCapabilities.supportsPriorityProcessing) {
|
|
742
761
|
warnings.push({
|
|
743
762
|
type: "unsupported-setting",
|
|
744
763
|
setting: "serviceTier",
|
|
@@ -1059,42 +1078,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
1059
1078
|
};
|
|
1060
1079
|
}
|
|
1061
1080
|
};
|
|
1062
|
-
function isReasoningModel(modelId) {
|
|
1063
|
-
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1064
|
-
}
|
|
1065
|
-
function supportsFlexProcessing(modelId) {
|
|
1066
|
-
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
1067
|
-
}
|
|
1068
|
-
function supportsPriorityProcessing(modelId) {
|
|
1069
|
-
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1070
|
-
}
|
|
1071
|
-
function getSystemMessageMode(modelId) {
|
|
1072
|
-
var _a, _b;
|
|
1073
|
-
if (!isReasoningModel(modelId)) {
|
|
1074
|
-
return "system";
|
|
1075
|
-
}
|
|
1076
|
-
return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
|
|
1077
|
-
}
|
|
1078
|
-
var reasoningModels = {
|
|
1079
|
-
o3: {
|
|
1080
|
-
systemMessageMode: "developer"
|
|
1081
|
-
},
|
|
1082
|
-
"o3-2025-04-16": {
|
|
1083
|
-
systemMessageMode: "developer"
|
|
1084
|
-
},
|
|
1085
|
-
"o3-mini": {
|
|
1086
|
-
systemMessageMode: "developer"
|
|
1087
|
-
},
|
|
1088
|
-
"o3-mini-2025-01-31": {
|
|
1089
|
-
systemMessageMode: "developer"
|
|
1090
|
-
},
|
|
1091
|
-
"o4-mini": {
|
|
1092
|
-
systemMessageMode: "developer"
|
|
1093
|
-
},
|
|
1094
|
-
"o4-mini-2025-04-16": {
|
|
1095
|
-
systemMessageMode: "developer"
|
|
1096
|
-
}
|
|
1097
|
-
};
|
|
1098
1081
|
|
|
1099
1082
|
// src/completion/openai-completion-language-model.ts
|
|
1100
1083
|
import {
|
|
@@ -3237,7 +3220,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3237
3220
|
}) {
|
|
3238
3221
|
var _a, _b, _c, _d;
|
|
3239
3222
|
const warnings = [];
|
|
3240
|
-
const
|
|
3223
|
+
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
3241
3224
|
if (topK != null) {
|
|
3242
3225
|
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
3243
3226
|
}
|
|
@@ -3273,7 +3256,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3273
3256
|
}
|
|
3274
3257
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3275
3258
|
prompt,
|
|
3276
|
-
systemMessageMode:
|
|
3259
|
+
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
3277
3260
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
3278
3261
|
store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
|
|
3279
3262
|
hasLocalShellTool: hasOpenAITool("openai.local_shell")
|
|
@@ -3307,7 +3290,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3307
3290
|
addInclude("code_interpreter_call.outputs");
|
|
3308
3291
|
}
|
|
3309
3292
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
3310
|
-
if (store === false &&
|
|
3293
|
+
if (store === false && modelCapabilities.isReasoningModel) {
|
|
3311
3294
|
addInclude("reasoning.encrypted_content");
|
|
3312
3295
|
}
|
|
3313
3296
|
const baseArgs = {
|
|
@@ -3349,7 +3332,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3349
3332
|
top_logprobs: topLogprobs,
|
|
3350
3333
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
3351
3334
|
// model-specific settings:
|
|
3352
|
-
...
|
|
3335
|
+
...modelCapabilities.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
3353
3336
|
reasoning: {
|
|
3354
3337
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
3355
3338
|
effort: openaiOptions.reasoningEffort
|
|
@@ -3360,22 +3343,24 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3360
3343
|
}
|
|
3361
3344
|
}
|
|
3362
3345
|
};
|
|
3363
|
-
if (
|
|
3364
|
-
if (
|
|
3365
|
-
baseArgs.temperature
|
|
3366
|
-
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
|
|
3370
|
-
|
|
3371
|
-
|
|
3372
|
-
|
|
3373
|
-
baseArgs.top_p
|
|
3374
|
-
|
|
3375
|
-
|
|
3376
|
-
|
|
3377
|
-
|
|
3378
|
-
|
|
3346
|
+
if (modelCapabilities.isReasoningModel) {
|
|
3347
|
+
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
3348
|
+
if (baseArgs.temperature != null) {
|
|
3349
|
+
baseArgs.temperature = void 0;
|
|
3350
|
+
warnings.push({
|
|
3351
|
+
type: "unsupported-setting",
|
|
3352
|
+
setting: "temperature",
|
|
3353
|
+
details: "temperature is not supported for reasoning models"
|
|
3354
|
+
});
|
|
3355
|
+
}
|
|
3356
|
+
if (baseArgs.top_p != null) {
|
|
3357
|
+
baseArgs.top_p = void 0;
|
|
3358
|
+
warnings.push({
|
|
3359
|
+
type: "unsupported-setting",
|
|
3360
|
+
setting: "topP",
|
|
3361
|
+
details: "topP is not supported for reasoning models"
|
|
3362
|
+
});
|
|
3363
|
+
}
|
|
3379
3364
|
}
|
|
3380
3365
|
} else {
|
|
3381
3366
|
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
@@ -3393,7 +3378,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3393
3378
|
});
|
|
3394
3379
|
}
|
|
3395
3380
|
}
|
|
3396
|
-
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !
|
|
3381
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelCapabilities.supportsFlexProcessing) {
|
|
3397
3382
|
warnings.push({
|
|
3398
3383
|
type: "unsupported-setting",
|
|
3399
3384
|
setting: "serviceTier",
|
|
@@ -3401,7 +3386,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3401
3386
|
});
|
|
3402
3387
|
delete baseArgs.service_tier;
|
|
3403
3388
|
}
|
|
3404
|
-
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !
|
|
3389
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelCapabilities.supportsPriorityProcessing) {
|
|
3405
3390
|
warnings.push({
|
|
3406
3391
|
type: "unsupported-setting",
|
|
3407
3392
|
setting: "serviceTier",
|
|
@@ -4258,32 +4243,6 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
4258
4243
|
function isErrorChunk(chunk) {
|
|
4259
4244
|
return chunk.type === "error";
|
|
4260
4245
|
}
|
|
4261
|
-
function getResponsesModelConfig(modelId) {
|
|
4262
|
-
const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
4263
|
-
const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
4264
|
-
const defaults = {
|
|
4265
|
-
systemMessageMode: "system",
|
|
4266
|
-
supportsFlexProcessing: supportsFlexProcessing2,
|
|
4267
|
-
supportsPriorityProcessing: supportsPriorityProcessing2
|
|
4268
|
-
};
|
|
4269
|
-
if (modelId.startsWith("gpt-5-chat")) {
|
|
4270
|
-
return {
|
|
4271
|
-
...defaults,
|
|
4272
|
-
isReasoningModel: false
|
|
4273
|
-
};
|
|
4274
|
-
}
|
|
4275
|
-
if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
4276
|
-
return {
|
|
4277
|
-
...defaults,
|
|
4278
|
-
isReasoningModel: true,
|
|
4279
|
-
systemMessageMode: "developer"
|
|
4280
|
-
};
|
|
4281
|
-
}
|
|
4282
|
-
return {
|
|
4283
|
-
...defaults,
|
|
4284
|
-
isReasoningModel: false
|
|
4285
|
-
};
|
|
4286
|
-
}
|
|
4287
4246
|
function mapWebSearchOutput(action) {
|
|
4288
4247
|
var _a;
|
|
4289
4248
|
switch (action.type) {
|
|
@@ -4673,7 +4632,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4673
4632
|
};
|
|
4674
4633
|
|
|
4675
4634
|
// src/version.ts
|
|
4676
|
-
var VERSION = true ? "2.0.
|
|
4635
|
+
var VERSION = true ? "2.0.79" : "0.0.0-test";
|
|
4677
4636
|
|
|
4678
4637
|
// src/openai-provider.ts
|
|
4679
4638
|
function createOpenAI(options = {}) {
|