@ai-sdk/openai 3.0.0-beta.56 → 3.0.0-beta.58
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +3 -3
- package/dist/index.d.ts +3 -3
- package/dist/index.js +23 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -3
- package/dist/internal/index.d.ts +3 -3
- package/dist/internal/index.js +22 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +22 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -435,7 +435,7 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
435
435
|
/**
|
|
436
436
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
437
437
|
*/
|
|
438
|
-
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
438
|
+
reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
439
439
|
/**
|
|
440
440
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
441
441
|
*/
|
|
@@ -1644,7 +1644,16 @@ var openaiImageResponseSchema = lazySchema7(
|
|
|
1644
1644
|
b64_json: z8.string(),
|
|
1645
1645
|
revised_prompt: z8.string().nullish()
|
|
1646
1646
|
})
|
|
1647
|
-
)
|
|
1647
|
+
),
|
|
1648
|
+
usage: z8.object({
|
|
1649
|
+
input_tokens: z8.number().nullish(),
|
|
1650
|
+
output_tokens: z8.number().nullish(),
|
|
1651
|
+
total_tokens: z8.number().nullish(),
|
|
1652
|
+
input_tokens_details: z8.object({
|
|
1653
|
+
image_tokens: z8.number().nullish(),
|
|
1654
|
+
text_tokens: z8.number().nullish()
|
|
1655
|
+
}).nullish()
|
|
1656
|
+
}).nullish()
|
|
1648
1657
|
})
|
|
1649
1658
|
)
|
|
1650
1659
|
);
|
|
@@ -1685,7 +1694,7 @@ var OpenAIImageModel = class {
|
|
|
1685
1694
|
headers,
|
|
1686
1695
|
abortSignal
|
|
1687
1696
|
}) {
|
|
1688
|
-
var _a, _b, _c, _d;
|
|
1697
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
1689
1698
|
const warnings = [];
|
|
1690
1699
|
if (aspectRatio != null) {
|
|
1691
1700
|
warnings.push({
|
|
@@ -1722,6 +1731,11 @@ var OpenAIImageModel = class {
|
|
|
1722
1731
|
return {
|
|
1723
1732
|
images: response.data.map((item) => item.b64_json),
|
|
1724
1733
|
warnings,
|
|
1734
|
+
usage: response.usage != null ? {
|
|
1735
|
+
inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
|
|
1736
|
+
outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
|
|
1737
|
+
totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
|
|
1738
|
+
} : void 0,
|
|
1725
1739
|
response: {
|
|
1726
1740
|
timestamp: currentDate,
|
|
1727
1741
|
modelId: this.modelId,
|
|
@@ -3143,7 +3157,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3143
3157
|
"gpt-5-nano",
|
|
3144
3158
|
"gpt-5-nano-2025-08-07",
|
|
3145
3159
|
"gpt-5-pro",
|
|
3146
|
-
"gpt-5-pro-2025-10-06"
|
|
3160
|
+
"gpt-5-pro-2025-10-06",
|
|
3161
|
+
"gpt-5.1",
|
|
3162
|
+
"gpt-5.1-chat-latest",
|
|
3163
|
+
"gpt-5.1-codex-mini",
|
|
3164
|
+
"gpt-5.1-codex"
|
|
3147
3165
|
];
|
|
3148
3166
|
var openaiResponsesModelIds = [
|
|
3149
3167
|
"gpt-4.1",
|
|
@@ -4992,7 +5010,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4992
5010
|
};
|
|
4993
5011
|
|
|
4994
5012
|
// src/version.ts
|
|
4995
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5013
|
+
var VERSION = true ? "3.0.0-beta.58" : "0.0.0-test";
|
|
4996
5014
|
|
|
4997
5015
|
// src/openai-provider.ts
|
|
4998
5016
|
function createOpenAI(options = {}) {
|