@ai-sdk/openai 2.0.0-canary.7 → 2.0.0-canary.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -515,13 +515,13 @@ var OpenAIChatLanguageModel = class {
515
515
  }
516
516
  baseArgs.max_tokens = void 0;
517
517
  }
518
- } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
518
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
519
519
  if (baseArgs.temperature != null) {
520
520
  baseArgs.temperature = void 0;
521
521
  warnings.push({
522
522
  type: "unsupported-setting",
523
523
  setting: "temperature",
524
- details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
524
+ details: "temperature is not supported for the search preview models and has been removed."
525
525
  });
526
526
  }
527
527
  }
@@ -544,7 +544,7 @@ var OpenAIChatLanguageModel = class {
544
544
  };
545
545
  }
546
546
  async doGenerate(options) {
547
- var _a, _b, _c, _d, _e, _f, _g;
547
+ var _a, _b, _c, _d, _e, _f, _g, _h;
548
548
  const { args: body, warnings } = this.getArgs(options);
549
549
  const {
550
550
  responseHeaders,
@@ -564,10 +564,23 @@ var OpenAIChatLanguageModel = class {
564
564
  abortSignal: options.abortSignal,
565
565
  fetch: this.config.fetch
566
566
  });
567
- const { messages: rawPrompt, ...rawSettings } = body;
568
567
  const choice = response.choices[0];
569
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
570
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
568
+ const content = [];
569
+ const text = choice.message.content;
570
+ if (text != null && text.length > 0) {
571
+ content.push({ type: "text", text });
572
+ }
573
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
574
+ content.push({
575
+ type: "tool-call",
576
+ toolCallType: "function",
577
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
578
+ toolName: toolCall.function.name,
579
+ args: toolCall.function.arguments
580
+ });
581
+ }
582
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
583
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
571
584
  const providerMetadata = { openai: {} };
572
585
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
573
586
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -582,21 +595,11 @@ var OpenAIChatLanguageModel = class {
582
595
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
583
596
  }
584
597
  return {
585
- text: choice.message.content != null ? { type: "text", text: choice.message.content } : void 0,
586
- toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
587
- var _a2;
588
- return {
589
- type: "tool-call",
590
- toolCallType: "function",
591
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
592
- toolName: toolCall.function.name,
593
- args: toolCall.function.arguments
594
- };
595
- }),
598
+ content,
596
599
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
597
600
  usage: {
598
- inputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : void 0,
599
- outputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : void 0
601
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
602
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
600
603
  },
601
604
  request: { body },
602
605
  response: {
@@ -644,6 +647,9 @@ var OpenAIChatLanguageModel = class {
644
647
  return {
645
648
  stream: response.pipeThrough(
646
649
  new TransformStream({
650
+ start(controller) {
651
+ controller.enqueue({ type: "stream-start", warnings });
652
+ },
647
653
  transform(chunk, controller) {
648
654
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
649
655
  if (!chunk.success) {
@@ -801,8 +807,7 @@ var OpenAIChatLanguageModel = class {
801
807
  })
802
808
  ),
803
809
  request: { body },
804
- response: { headers: responseHeaders },
805
- warnings
810
+ response: { headers: responseHeaders }
806
811
  };
807
812
  }
808
813
  };
@@ -903,7 +908,7 @@ var openaiChatChunkSchema = z3.union([
903
908
  openaiErrorDataSchema
904
909
  ]);
905
910
  function isReasoningModel(modelId) {
906
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
911
+ return modelId.startsWith("o");
907
912
  }
908
913
  function isAudioModel(modelId) {
909
914
  return modelId.startsWith("gpt-4o-audio-preview");
@@ -1134,7 +1139,7 @@ var OpenAICompletionLanguageModel = class {
1134
1139
  });
1135
1140
  const choice = response.choices[0];
1136
1141
  return {
1137
- text: { type: "text", text: choice.text },
1142
+ content: [{ type: "text", text: choice.text }],
1138
1143
  usage: {
1139
1144
  inputTokens: response.usage.prompt_tokens,
1140
1145
  outputTokens: response.usage.completion_tokens
@@ -1182,6 +1187,9 @@ var OpenAICompletionLanguageModel = class {
1182
1187
  return {
1183
1188
  stream: response.pipeThrough(
1184
1189
  new TransformStream({
1190
+ start(controller) {
1191
+ controller.enqueue({ type: "stream-start", warnings });
1192
+ },
1185
1193
  transform(chunk, controller) {
1186
1194
  if (!chunk.success) {
1187
1195
  finishReason = "error";
@@ -1233,9 +1241,8 @@ var OpenAICompletionLanguageModel = class {
1233
1241
  }
1234
1242
  })
1235
1243
  ),
1236
- response: { headers: responseHeaders },
1237
- warnings,
1238
- request: { body: JSON.stringify(body) }
1244
+ request: { body },
1245
+ response: { headers: responseHeaders }
1239
1246
  };
1240
1247
  }
1241
1248
  };
@@ -1624,16 +1631,120 @@ var openaiTranscriptionResponseSchema = z7.object({
1624
1631
  ).nullish()
1625
1632
  });
1626
1633
 
1627
- // src/responses/openai-responses-language-model.ts
1634
+ // src/openai-speech-model.ts
1628
1635
  import {
1629
1636
  combineHeaders as combineHeaders6,
1630
- createEventSourceResponseHandler as createEventSourceResponseHandler3,
1631
- createJsonResponseHandler as createJsonResponseHandler6,
1632
- generateId as generateId2,
1637
+ createBinaryResponseHandler,
1633
1638
  parseProviderOptions as parseProviderOptions3,
1634
1639
  postJsonToApi as postJsonToApi5
1635
1640
  } from "@ai-sdk/provider-utils";
1636
1641
  import { z as z8 } from "zod";
1642
+ var OpenAIProviderOptionsSchema = z8.object({
1643
+ instructions: z8.string().nullish(),
1644
+ speed: z8.number().min(0.25).max(4).default(1).nullish()
1645
+ });
1646
+ var OpenAISpeechModel = class {
1647
+ constructor(modelId, config) {
1648
+ this.modelId = modelId;
1649
+ this.config = config;
1650
+ this.specificationVersion = "v1";
1651
+ }
1652
+ get provider() {
1653
+ return this.config.provider;
1654
+ }
1655
+ getArgs({
1656
+ text,
1657
+ voice = "alloy",
1658
+ outputFormat = "mp3",
1659
+ speed,
1660
+ instructions,
1661
+ providerOptions
1662
+ }) {
1663
+ const warnings = [];
1664
+ const openAIOptions = parseProviderOptions3({
1665
+ provider: "openai",
1666
+ providerOptions,
1667
+ schema: OpenAIProviderOptionsSchema
1668
+ });
1669
+ const requestBody = {
1670
+ model: this.modelId,
1671
+ input: text,
1672
+ voice,
1673
+ response_format: "mp3",
1674
+ speed,
1675
+ instructions
1676
+ };
1677
+ if (outputFormat) {
1678
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1679
+ requestBody.response_format = outputFormat;
1680
+ } else {
1681
+ warnings.push({
1682
+ type: "unsupported-setting",
1683
+ setting: "outputFormat",
1684
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1685
+ });
1686
+ }
1687
+ }
1688
+ if (openAIOptions) {
1689
+ const speechModelOptions = {};
1690
+ for (const key in speechModelOptions) {
1691
+ const value = speechModelOptions[key];
1692
+ if (value !== void 0) {
1693
+ requestBody[key] = value;
1694
+ }
1695
+ }
1696
+ }
1697
+ return {
1698
+ requestBody,
1699
+ warnings
1700
+ };
1701
+ }
1702
+ async doGenerate(options) {
1703
+ var _a, _b, _c;
1704
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1705
+ const { requestBody, warnings } = this.getArgs(options);
1706
+ const {
1707
+ value: audio,
1708
+ responseHeaders,
1709
+ rawValue: rawResponse
1710
+ } = await postJsonToApi5({
1711
+ url: this.config.url({
1712
+ path: "/audio/speech",
1713
+ modelId: this.modelId
1714
+ }),
1715
+ headers: combineHeaders6(this.config.headers(), options.headers),
1716
+ body: requestBody,
1717
+ failedResponseHandler: openaiFailedResponseHandler,
1718
+ successfulResponseHandler: createBinaryResponseHandler(),
1719
+ abortSignal: options.abortSignal,
1720
+ fetch: this.config.fetch
1721
+ });
1722
+ return {
1723
+ audio,
1724
+ warnings,
1725
+ request: {
1726
+ body: JSON.stringify(requestBody)
1727
+ },
1728
+ response: {
1729
+ timestamp: currentDate,
1730
+ modelId: this.modelId,
1731
+ headers: responseHeaders,
1732
+ body: rawResponse
1733
+ }
1734
+ };
1735
+ }
1736
+ };
1737
+
1738
+ // src/responses/openai-responses-language-model.ts
1739
+ import {
1740
+ combineHeaders as combineHeaders7,
1741
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1742
+ createJsonResponseHandler as createJsonResponseHandler6,
1743
+ generateId as generateId2,
1744
+ parseProviderOptions as parseProviderOptions4,
1745
+ postJsonToApi as postJsonToApi6
1746
+ } from "@ai-sdk/provider-utils";
1747
+ import { z as z9 } from "zod";
1637
1748
 
1638
1749
  // src/responses/convert-to-openai-responses-messages.ts
1639
1750
  import {
@@ -1847,6 +1958,7 @@ var OpenAIResponsesLanguageModel = class {
1847
1958
  constructor(modelId, config) {
1848
1959
  this.specificationVersion = "v2";
1849
1960
  this.defaultObjectGenerationMode = "json";
1961
+ this.supportsStructuredOutputs = true;
1850
1962
  this.modelId = modelId;
1851
1963
  this.config = config;
1852
1964
  }
@@ -1897,7 +2009,7 @@ var OpenAIResponsesLanguageModel = class {
1897
2009
  systemMessageMode: modelConfig.systemMessageMode
1898
2010
  });
1899
2011
  warnings.push(...messageWarnings);
1900
- const openaiOptions = parseProviderOptions3({
2012
+ const openaiOptions = parseProviderOptions4({
1901
2013
  provider: "openai",
1902
2014
  providerOptions,
1903
2015
  schema: openaiResponsesProviderOptionsSchema
@@ -1972,100 +2084,109 @@ var OpenAIResponsesLanguageModel = class {
1972
2084
  };
1973
2085
  }
1974
2086
  async doGenerate(options) {
1975
- var _a, _b, _c, _d, _e;
2087
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1976
2088
  const { args: body, warnings } = this.getArgs(options);
1977
2089
  const {
1978
2090
  responseHeaders,
1979
2091
  value: response,
1980
2092
  rawValue: rawResponse
1981
- } = await postJsonToApi5({
2093
+ } = await postJsonToApi6({
1982
2094
  url: this.config.url({
1983
2095
  path: "/responses",
1984
2096
  modelId: this.modelId
1985
2097
  }),
1986
- headers: combineHeaders6(this.config.headers(), options.headers),
2098
+ headers: combineHeaders7(this.config.headers(), options.headers),
1987
2099
  body,
1988
2100
  failedResponseHandler: openaiFailedResponseHandler,
1989
2101
  successfulResponseHandler: createJsonResponseHandler6(
1990
- z8.object({
1991
- id: z8.string(),
1992
- created_at: z8.number(),
1993
- model: z8.string(),
1994
- output: z8.array(
1995
- z8.discriminatedUnion("type", [
1996
- z8.object({
1997
- type: z8.literal("message"),
1998
- role: z8.literal("assistant"),
1999
- content: z8.array(
2000
- z8.object({
2001
- type: z8.literal("output_text"),
2002
- text: z8.string(),
2003
- annotations: z8.array(
2004
- z8.object({
2005
- type: z8.literal("url_citation"),
2006
- start_index: z8.number(),
2007
- end_index: z8.number(),
2008
- url: z8.string(),
2009
- title: z8.string()
2102
+ z9.object({
2103
+ id: z9.string(),
2104
+ created_at: z9.number(),
2105
+ model: z9.string(),
2106
+ output: z9.array(
2107
+ z9.discriminatedUnion("type", [
2108
+ z9.object({
2109
+ type: z9.literal("message"),
2110
+ role: z9.literal("assistant"),
2111
+ content: z9.array(
2112
+ z9.object({
2113
+ type: z9.literal("output_text"),
2114
+ text: z9.string(),
2115
+ annotations: z9.array(
2116
+ z9.object({
2117
+ type: z9.literal("url_citation"),
2118
+ start_index: z9.number(),
2119
+ end_index: z9.number(),
2120
+ url: z9.string(),
2121
+ title: z9.string()
2010
2122
  })
2011
2123
  )
2012
2124
  })
2013
2125
  )
2014
2126
  }),
2015
- z8.object({
2016
- type: z8.literal("function_call"),
2017
- call_id: z8.string(),
2018
- name: z8.string(),
2019
- arguments: z8.string()
2127
+ z9.object({
2128
+ type: z9.literal("function_call"),
2129
+ call_id: z9.string(),
2130
+ name: z9.string(),
2131
+ arguments: z9.string()
2020
2132
  }),
2021
- z8.object({
2022
- type: z8.literal("web_search_call")
2133
+ z9.object({
2134
+ type: z9.literal("web_search_call")
2023
2135
  }),
2024
- z8.object({
2025
- type: z8.literal("computer_call")
2136
+ z9.object({
2137
+ type: z9.literal("computer_call")
2026
2138
  }),
2027
- z8.object({
2028
- type: z8.literal("reasoning")
2139
+ z9.object({
2140
+ type: z9.literal("reasoning")
2029
2141
  })
2030
2142
  ])
2031
2143
  ),
2032
- incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2144
+ incomplete_details: z9.object({ reason: z9.string() }).nullable(),
2033
2145
  usage: usageSchema
2034
2146
  })
2035
2147
  ),
2036
2148
  abortSignal: options.abortSignal,
2037
2149
  fetch: this.config.fetch
2038
2150
  });
2039
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2040
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2041
- type: "tool-call",
2042
- toolCallType: "function",
2043
- toolCallId: output.call_id,
2044
- toolName: output.name,
2045
- args: output.arguments
2046
- }));
2151
+ const content = [];
2152
+ for (const part of response.output) {
2153
+ switch (part.type) {
2154
+ case "message": {
2155
+ for (const contentPart of part.content) {
2156
+ content.push({
2157
+ type: "text",
2158
+ text: contentPart.text
2159
+ });
2160
+ for (const annotation of contentPart.annotations) {
2161
+ content.push({
2162
+ type: "source",
2163
+ sourceType: "url",
2164
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2165
+ url: annotation.url,
2166
+ title: annotation.title
2167
+ });
2168
+ }
2169
+ }
2170
+ break;
2171
+ }
2172
+ case "function_call": {
2173
+ content.push({
2174
+ type: "tool-call",
2175
+ toolCallType: "function",
2176
+ toolCallId: part.call_id,
2177
+ toolName: part.name,
2178
+ args: part.arguments
2179
+ });
2180
+ break;
2181
+ }
2182
+ }
2183
+ }
2047
2184
  return {
2048
- text: {
2049
- type: "text",
2050
- text: outputTextElements.map((content) => content.text).join("\n")
2051
- },
2052
- sources: outputTextElements.flatMap(
2053
- (content) => content.annotations.map((annotation) => {
2054
- var _a2, _b2, _c2;
2055
- return {
2056
- type: "source",
2057
- sourceType: "url",
2058
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2059
- url: annotation.url,
2060
- title: annotation.title
2061
- };
2062
- })
2063
- ),
2185
+ content,
2064
2186
  finishReason: mapOpenAIResponseFinishReason({
2065
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2066
- hasToolCalls: toolCalls.length > 0
2187
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2188
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2067
2189
  }),
2068
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2069
2190
  usage: {
2070
2191
  inputTokens: response.usage.input_tokens,
2071
2192
  outputTokens: response.usage.output_tokens
@@ -2081,8 +2202,8 @@ var OpenAIResponsesLanguageModel = class {
2081
2202
  providerMetadata: {
2082
2203
  openai: {
2083
2204
  responseId: response.id,
2084
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2085
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2205
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2206
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2086
2207
  }
2087
2208
  },
2088
2209
  warnings
@@ -2090,12 +2211,12 @@ var OpenAIResponsesLanguageModel = class {
2090
2211
  }
2091
2212
  async doStream(options) {
2092
2213
  const { args: body, warnings } = this.getArgs(options);
2093
- const { responseHeaders, value: response } = await postJsonToApi5({
2214
+ const { responseHeaders, value: response } = await postJsonToApi6({
2094
2215
  url: this.config.url({
2095
2216
  path: "/responses",
2096
2217
  modelId: this.modelId
2097
2218
  }),
2098
- headers: combineHeaders6(this.config.headers(), options.headers),
2219
+ headers: combineHeaders7(this.config.headers(), options.headers),
2099
2220
  body: {
2100
2221
  ...body,
2101
2222
  stream: true
@@ -2121,6 +2242,9 @@ var OpenAIResponsesLanguageModel = class {
2121
2242
  return {
2122
2243
  stream: response.pipeThrough(
2123
2244
  new TransformStream({
2245
+ start(controller) {
2246
+ controller.enqueue({ type: "stream-start", warnings });
2247
+ },
2124
2248
  transform(chunk, controller) {
2125
2249
  var _a, _b, _c, _d, _e, _f, _g, _h;
2126
2250
  if (!chunk.success) {
@@ -2215,84 +2339,83 @@ var OpenAIResponsesLanguageModel = class {
2215
2339
  })
2216
2340
  ),
2217
2341
  request: { body },
2218
- response: { headers: responseHeaders },
2219
- warnings
2342
+ response: { headers: responseHeaders }
2220
2343
  };
2221
2344
  }
2222
2345
  };
2223
- var usageSchema = z8.object({
2224
- input_tokens: z8.number(),
2225
- input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2226
- output_tokens: z8.number(),
2227
- output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2346
+ var usageSchema = z9.object({
2347
+ input_tokens: z9.number(),
2348
+ input_tokens_details: z9.object({ cached_tokens: z9.number().nullish() }).nullish(),
2349
+ output_tokens: z9.number(),
2350
+ output_tokens_details: z9.object({ reasoning_tokens: z9.number().nullish() }).nullish()
2228
2351
  });
2229
- var textDeltaChunkSchema = z8.object({
2230
- type: z8.literal("response.output_text.delta"),
2231
- delta: z8.string()
2352
+ var textDeltaChunkSchema = z9.object({
2353
+ type: z9.literal("response.output_text.delta"),
2354
+ delta: z9.string()
2232
2355
  });
2233
- var responseFinishedChunkSchema = z8.object({
2234
- type: z8.enum(["response.completed", "response.incomplete"]),
2235
- response: z8.object({
2236
- incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2356
+ var responseFinishedChunkSchema = z9.object({
2357
+ type: z9.enum(["response.completed", "response.incomplete"]),
2358
+ response: z9.object({
2359
+ incomplete_details: z9.object({ reason: z9.string() }).nullish(),
2237
2360
  usage: usageSchema
2238
2361
  })
2239
2362
  });
2240
- var responseCreatedChunkSchema = z8.object({
2241
- type: z8.literal("response.created"),
2242
- response: z8.object({
2243
- id: z8.string(),
2244
- created_at: z8.number(),
2245
- model: z8.string()
2363
+ var responseCreatedChunkSchema = z9.object({
2364
+ type: z9.literal("response.created"),
2365
+ response: z9.object({
2366
+ id: z9.string(),
2367
+ created_at: z9.number(),
2368
+ model: z9.string()
2246
2369
  })
2247
2370
  });
2248
- var responseOutputItemDoneSchema = z8.object({
2249
- type: z8.literal("response.output_item.done"),
2250
- output_index: z8.number(),
2251
- item: z8.discriminatedUnion("type", [
2252
- z8.object({
2253
- type: z8.literal("message")
2371
+ var responseOutputItemDoneSchema = z9.object({
2372
+ type: z9.literal("response.output_item.done"),
2373
+ output_index: z9.number(),
2374
+ item: z9.discriminatedUnion("type", [
2375
+ z9.object({
2376
+ type: z9.literal("message")
2254
2377
  }),
2255
- z8.object({
2256
- type: z8.literal("function_call"),
2257
- id: z8.string(),
2258
- call_id: z8.string(),
2259
- name: z8.string(),
2260
- arguments: z8.string(),
2261
- status: z8.literal("completed")
2378
+ z9.object({
2379
+ type: z9.literal("function_call"),
2380
+ id: z9.string(),
2381
+ call_id: z9.string(),
2382
+ name: z9.string(),
2383
+ arguments: z9.string(),
2384
+ status: z9.literal("completed")
2262
2385
  })
2263
2386
  ])
2264
2387
  });
2265
- var responseFunctionCallArgumentsDeltaSchema = z8.object({
2266
- type: z8.literal("response.function_call_arguments.delta"),
2267
- item_id: z8.string(),
2268
- output_index: z8.number(),
2269
- delta: z8.string()
2388
+ var responseFunctionCallArgumentsDeltaSchema = z9.object({
2389
+ type: z9.literal("response.function_call_arguments.delta"),
2390
+ item_id: z9.string(),
2391
+ output_index: z9.number(),
2392
+ delta: z9.string()
2270
2393
  });
2271
- var responseOutputItemAddedSchema = z8.object({
2272
- type: z8.literal("response.output_item.added"),
2273
- output_index: z8.number(),
2274
- item: z8.discriminatedUnion("type", [
2275
- z8.object({
2276
- type: z8.literal("message")
2394
+ var responseOutputItemAddedSchema = z9.object({
2395
+ type: z9.literal("response.output_item.added"),
2396
+ output_index: z9.number(),
2397
+ item: z9.discriminatedUnion("type", [
2398
+ z9.object({
2399
+ type: z9.literal("message")
2277
2400
  }),
2278
- z8.object({
2279
- type: z8.literal("function_call"),
2280
- id: z8.string(),
2281
- call_id: z8.string(),
2282
- name: z8.string(),
2283
- arguments: z8.string()
2401
+ z9.object({
2402
+ type: z9.literal("function_call"),
2403
+ id: z9.string(),
2404
+ call_id: z9.string(),
2405
+ name: z9.string(),
2406
+ arguments: z9.string()
2284
2407
  })
2285
2408
  ])
2286
2409
  });
2287
- var responseAnnotationAddedSchema = z8.object({
2288
- type: z8.literal("response.output_text.annotation.added"),
2289
- annotation: z8.object({
2290
- type: z8.literal("url_citation"),
2291
- url: z8.string(),
2292
- title: z8.string()
2410
+ var responseAnnotationAddedSchema = z9.object({
2411
+ type: z9.literal("response.output_text.annotation.added"),
2412
+ annotation: z9.object({
2413
+ type: z9.literal("url_citation"),
2414
+ url: z9.string(),
2415
+ title: z9.string()
2293
2416
  })
2294
2417
  });
2295
- var openaiResponsesChunkSchema = z8.union([
2418
+ var openaiResponsesChunkSchema = z9.union([
2296
2419
  textDeltaChunkSchema,
2297
2420
  responseFinishedChunkSchema,
2298
2421
  responseCreatedChunkSchema,
@@ -2300,7 +2423,7 @@ var openaiResponsesChunkSchema = z8.union([
2300
2423
  responseFunctionCallArgumentsDeltaSchema,
2301
2424
  responseOutputItemAddedSchema,
2302
2425
  responseAnnotationAddedSchema,
2303
- z8.object({ type: z8.string() }).passthrough()
2426
+ z9.object({ type: z9.string() }).passthrough()
2304
2427
  // fallback for unknown chunks
2305
2428
  ]);
2306
2429
  function isTextDeltaChunk(chunk) {
@@ -2345,15 +2468,15 @@ function getResponsesModelConfig(modelId) {
2345
2468
  requiredAutoTruncation: false
2346
2469
  };
2347
2470
  }
2348
- var openaiResponsesProviderOptionsSchema = z8.object({
2349
- metadata: z8.any().nullish(),
2350
- parallelToolCalls: z8.boolean().nullish(),
2351
- previousResponseId: z8.string().nullish(),
2352
- store: z8.boolean().nullish(),
2353
- user: z8.string().nullish(),
2354
- reasoningEffort: z8.string().nullish(),
2355
- strictSchemas: z8.boolean().nullish(),
2356
- instructions: z8.string().nullish()
2471
+ var openaiResponsesProviderOptionsSchema = z9.object({
2472
+ metadata: z9.any().nullish(),
2473
+ parallelToolCalls: z9.boolean().nullish(),
2474
+ previousResponseId: z9.string().nullish(),
2475
+ store: z9.boolean().nullish(),
2476
+ user: z9.string().nullish(),
2477
+ reasoningEffort: z9.string().nullish(),
2478
+ strictSchemas: z9.boolean().nullish(),
2479
+ instructions: z9.string().nullish()
2357
2480
  });
2358
2481
  export {
2359
2482
  OpenAIChatLanguageModel,
@@ -2361,6 +2484,7 @@ export {
2361
2484
  OpenAIEmbeddingModel,
2362
2485
  OpenAIImageModel,
2363
2486
  OpenAIResponsesLanguageModel,
2487
+ OpenAISpeechModel,
2364
2488
  OpenAITranscriptionModel,
2365
2489
  modelMaxImagesPerCall,
2366
2490
  openaiProviderOptions