@ai-sdk/openai 3.0.0-beta.100 → 3.0.0-beta.102
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.js +5 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +5 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +4 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +4 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.102
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [763d04a]
|
|
8
|
+
- @ai-sdk/provider-utils@4.0.0-beta.52
|
|
9
|
+
|
|
10
|
+
## 3.0.0-beta.101
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- 3220329: fix openai responses input: process all provider tool outputs (shell/apply_patch) so parallel tool results aren’t dropped and apply_patch outputs are forwarded.
|
|
15
|
+
- 5648ec0: Add GPT-5.2 support for non-reasoning parameters (temperature, topP, logProbs) when reasoningEffort is none.
|
|
16
|
+
|
|
3
17
|
## 3.0.0-beta.100
|
|
4
18
|
|
|
5
19
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -57,7 +57,7 @@ function getOpenAILanguageModelCapabilities(modelId) {
|
|
|
57
57
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
58
58
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
59
59
|
const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
|
|
60
|
-
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
|
|
60
|
+
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
61
61
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
62
62
|
return {
|
|
63
63
|
supportsFlexProcessing,
|
|
@@ -2619,7 +2619,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2619
2619
|
call_id: part.toolCallId,
|
|
2620
2620
|
output: parsedOutput.output
|
|
2621
2621
|
});
|
|
2622
|
-
|
|
2622
|
+
continue;
|
|
2623
2623
|
}
|
|
2624
2624
|
if (hasShellTool && resolvedToolName === "shell" && output.type === "json") {
|
|
2625
2625
|
const parsedOutput = await (0, import_provider_utils23.validateTypes)({
|
|
@@ -2638,7 +2638,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2638
2638
|
}
|
|
2639
2639
|
}))
|
|
2640
2640
|
});
|
|
2641
|
-
|
|
2641
|
+
continue;
|
|
2642
2642
|
}
|
|
2643
2643
|
if (hasApplyPatchTool && part.toolName === "apply_patch" && output.type === "json") {
|
|
2644
2644
|
const parsedOutput = await (0, import_provider_utils23.validateTypes)({
|
|
@@ -2651,7 +2651,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2651
2651
|
status: parsedOutput.status,
|
|
2652
2652
|
output: parsedOutput.output
|
|
2653
2653
|
});
|
|
2654
|
-
|
|
2654
|
+
continue;
|
|
2655
2655
|
}
|
|
2656
2656
|
let contentValue;
|
|
2657
2657
|
switch (output.type) {
|
|
@@ -5493,7 +5493,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5493
5493
|
};
|
|
5494
5494
|
|
|
5495
5495
|
// src/version.ts
|
|
5496
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5496
|
+
var VERSION = true ? "3.0.0-beta.102" : "0.0.0-test";
|
|
5497
5497
|
|
|
5498
5498
|
// src/openai-provider.ts
|
|
5499
5499
|
function createOpenAI(options = {}) {
|