@ai-sdk/openai 3.0.0-beta.56 → 3.0.0-beta.58

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -427,7 +427,7 @@ var openaiChatLanguageModelOptions = lazySchema2(
427
427
  /**
428
428
  * Reasoning effort for reasoning models. Defaults to `medium`.
429
429
  */
430
- reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
430
+ reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
431
431
  /**
432
432
  * Maximum number of completion tokens to generate. Useful for reasoning models.
433
433
  */
@@ -1636,7 +1636,16 @@ var openaiImageResponseSchema = lazySchema7(
1636
1636
  b64_json: z8.string(),
1637
1637
  revised_prompt: z8.string().nullish()
1638
1638
  })
1639
- )
1639
+ ),
1640
+ usage: z8.object({
1641
+ input_tokens: z8.number().nullish(),
1642
+ output_tokens: z8.number().nullish(),
1643
+ total_tokens: z8.number().nullish(),
1644
+ input_tokens_details: z8.object({
1645
+ image_tokens: z8.number().nullish(),
1646
+ text_tokens: z8.number().nullish()
1647
+ }).nullish()
1648
+ }).nullish()
1640
1649
  })
1641
1650
  )
1642
1651
  );
@@ -1677,7 +1686,7 @@ var OpenAIImageModel = class {
1677
1686
  headers,
1678
1687
  abortSignal
1679
1688
  }) {
1680
- var _a, _b, _c, _d;
1689
+ var _a, _b, _c, _d, _e, _f, _g;
1681
1690
  const warnings = [];
1682
1691
  if (aspectRatio != null) {
1683
1692
  warnings.push({
@@ -1714,6 +1723,11 @@ var OpenAIImageModel = class {
1714
1723
  return {
1715
1724
  images: response.data.map((item) => item.b64_json),
1716
1725
  warnings,
1726
+ usage: response.usage != null ? {
1727
+ inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
1728
+ outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
1729
+ totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
1730
+ } : void 0,
1717
1731
  response: {
1718
1732
  timestamp: currentDate,
1719
1733
  modelId: this.modelId,
@@ -3062,7 +3076,11 @@ var openaiResponsesReasoningModelIds = [
3062
3076
  "gpt-5-nano",
3063
3077
  "gpt-5-nano-2025-08-07",
3064
3078
  "gpt-5-pro",
3065
- "gpt-5-pro-2025-10-06"
3079
+ "gpt-5-pro-2025-10-06",
3080
+ "gpt-5.1",
3081
+ "gpt-5.1-chat-latest",
3082
+ "gpt-5.1-codex-mini",
3083
+ "gpt-5.1-codex"
3066
3084
  ];
3067
3085
  var openaiResponsesModelIds = [
3068
3086
  "gpt-4.1",