@ai-sdk/openai 2.0.0-canary.6 → 2.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -564,10 +564,23 @@ var OpenAIChatLanguageModel = class {
564
564
  abortSignal: options.abortSignal,
565
565
  fetch: this.config.fetch
566
566
  });
567
- const { messages: rawPrompt, ...rawSettings } = body;
568
567
  const choice = response.choices[0];
569
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
570
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
568
+ const content = [];
569
+ const text = choice.message.content;
570
+ if (text != null && text.length > 0) {
571
+ content.push({ type: "text", text });
572
+ }
573
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
574
+ content.push({
575
+ type: "tool-call",
576
+ toolCallType: "function",
577
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
578
+ toolName: toolCall.function.name,
579
+ args: toolCall.function.arguments
580
+ });
581
+ }
582
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
583
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
571
584
  const providerMetadata = { openai: {} };
572
585
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
573
586
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -582,16 +595,7 @@ var OpenAIChatLanguageModel = class {
582
595
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
583
596
  }
584
597
  return {
585
- text: (_c = choice.message.content) != null ? _c : void 0,
586
- toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
587
- var _a2;
588
- return {
589
- toolCallType: "function",
590
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
591
- toolName: toolCall.function.name,
592
- args: toolCall.function.arguments
593
- };
594
- }),
598
+ content,
595
599
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
596
600
  usage: {
597
601
  inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
@@ -643,6 +647,9 @@ var OpenAIChatLanguageModel = class {
643
647
  return {
644
648
  stream: response.pipeThrough(
645
649
  new TransformStream({
650
+ start(controller) {
651
+ controller.enqueue({ type: "stream-start", warnings });
652
+ },
646
653
  transform(chunk, controller) {
647
654
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
648
655
  if (!chunk.success) {
@@ -695,8 +702,8 @@ var OpenAIChatLanguageModel = class {
695
702
  const delta = choice.delta;
696
703
  if (delta.content != null) {
697
704
  controller.enqueue({
698
- type: "text-delta",
699
- textDelta: delta.content
705
+ type: "text",
706
+ text: delta.content
700
707
  });
701
708
  }
702
709
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -800,8 +807,7 @@ var OpenAIChatLanguageModel = class {
800
807
  })
801
808
  ),
802
809
  request: { body },
803
- response: { headers: responseHeaders },
804
- warnings
810
+ response: { headers: responseHeaders }
805
811
  };
806
812
  }
807
813
  };
@@ -1133,7 +1139,7 @@ var OpenAICompletionLanguageModel = class {
1133
1139
  });
1134
1140
  const choice = response.choices[0];
1135
1141
  return {
1136
- text: choice.text,
1142
+ content: [{ type: "text", text: choice.text }],
1137
1143
  usage: {
1138
1144
  inputTokens: response.usage.prompt_tokens,
1139
1145
  outputTokens: response.usage.completion_tokens
@@ -1181,6 +1187,9 @@ var OpenAICompletionLanguageModel = class {
1181
1187
  return {
1182
1188
  stream: response.pipeThrough(
1183
1189
  new TransformStream({
1190
+ start(controller) {
1191
+ controller.enqueue({ type: "stream-start", warnings });
1192
+ },
1184
1193
  transform(chunk, controller) {
1185
1194
  if (!chunk.success) {
1186
1195
  finishReason = "error";
@@ -1210,8 +1219,8 @@ var OpenAICompletionLanguageModel = class {
1210
1219
  }
1211
1220
  if ((choice == null ? void 0 : choice.text) != null) {
1212
1221
  controller.enqueue({
1213
- type: "text-delta",
1214
- textDelta: choice.text
1222
+ type: "text",
1223
+ text: choice.text
1215
1224
  });
1216
1225
  }
1217
1226
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1232,9 +1241,8 @@ var OpenAICompletionLanguageModel = class {
1232
1241
  }
1233
1242
  })
1234
1243
  ),
1235
- response: { headers: responseHeaders },
1236
- warnings,
1237
- request: { body: JSON.stringify(body) }
1244
+ request: { body },
1245
+ response: { headers: responseHeaders }
1238
1246
  };
1239
1247
  }
1240
1248
  };
@@ -1295,7 +1303,7 @@ import {
1295
1303
  import { z as z5 } from "zod";
1296
1304
  var OpenAIEmbeddingModel = class {
1297
1305
  constructor(modelId, settings, config) {
1298
- this.specificationVersion = "v1";
1306
+ this.specificationVersion = "v2";
1299
1307
  this.modelId = modelId;
1300
1308
  this.settings = settings;
1301
1309
  this.config = config;
@@ -1324,7 +1332,11 @@ var OpenAIEmbeddingModel = class {
1324
1332
  values
1325
1333
  });
1326
1334
  }
1327
- const { responseHeaders, value: response } = await postJsonToApi3({
1335
+ const {
1336
+ responseHeaders,
1337
+ value: response,
1338
+ rawValue
1339
+ } = await postJsonToApi3({
1328
1340
  url: this.config.url({
1329
1341
  path: "/embeddings",
1330
1342
  modelId: this.modelId
@@ -1347,7 +1359,7 @@ var OpenAIEmbeddingModel = class {
1347
1359
  return {
1348
1360
  embeddings: response.data.map((item) => item.embedding),
1349
1361
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1350
- rawResponse: { headers: responseHeaders }
1362
+ response: { headers: responseHeaders, body: rawValue }
1351
1363
  };
1352
1364
  }
1353
1365
  };
@@ -1453,7 +1465,7 @@ import {
1453
1465
  postFormDataToApi
1454
1466
  } from "@ai-sdk/provider-utils";
1455
1467
  import { z as z7 } from "zod";
1456
- var OpenAIProviderOptionsSchema = z7.object({
1468
+ var openAIProviderOptionsSchema = z7.object({
1457
1469
  include: z7.array(z7.string()).nullish(),
1458
1470
  language: z7.string().nullish(),
1459
1471
  prompt: z7.string().nullish(),
@@ -1538,7 +1550,7 @@ var OpenAITranscriptionModel = class {
1538
1550
  const openAIOptions = parseProviderOptions2({
1539
1551
  provider: "openai",
1540
1552
  providerOptions,
1541
- schema: OpenAIProviderOptionsSchema
1553
+ schema: openAIProviderOptionsSchema
1542
1554
  });
1543
1555
  const formData = new FormData();
1544
1556
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
@@ -1619,16 +1631,120 @@ var openaiTranscriptionResponseSchema = z7.object({
1619
1631
  ).nullish()
1620
1632
  });
1621
1633
 
1622
- // src/responses/openai-responses-language-model.ts
1634
+ // src/openai-speech-model.ts
1623
1635
  import {
1624
1636
  combineHeaders as combineHeaders6,
1625
- createEventSourceResponseHandler as createEventSourceResponseHandler3,
1626
- createJsonResponseHandler as createJsonResponseHandler6,
1627
- generateId as generateId2,
1637
+ createBinaryResponseHandler,
1628
1638
  parseProviderOptions as parseProviderOptions3,
1629
1639
  postJsonToApi as postJsonToApi5
1630
1640
  } from "@ai-sdk/provider-utils";
1631
1641
  import { z as z8 } from "zod";
1642
+ var OpenAIProviderOptionsSchema = z8.object({
1643
+ instructions: z8.string().nullish(),
1644
+ speed: z8.number().min(0.25).max(4).default(1).nullish()
1645
+ });
1646
+ var OpenAISpeechModel = class {
1647
+ constructor(modelId, config) {
1648
+ this.modelId = modelId;
1649
+ this.config = config;
1650
+ this.specificationVersion = "v1";
1651
+ }
1652
+ get provider() {
1653
+ return this.config.provider;
1654
+ }
1655
+ getArgs({
1656
+ text,
1657
+ voice = "alloy",
1658
+ outputFormat = "mp3",
1659
+ speed,
1660
+ instructions,
1661
+ providerOptions
1662
+ }) {
1663
+ const warnings = [];
1664
+ const openAIOptions = parseProviderOptions3({
1665
+ provider: "openai",
1666
+ providerOptions,
1667
+ schema: OpenAIProviderOptionsSchema
1668
+ });
1669
+ const requestBody = {
1670
+ model: this.modelId,
1671
+ input: text,
1672
+ voice,
1673
+ response_format: "mp3",
1674
+ speed,
1675
+ instructions
1676
+ };
1677
+ if (outputFormat) {
1678
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1679
+ requestBody.response_format = outputFormat;
1680
+ } else {
1681
+ warnings.push({
1682
+ type: "unsupported-setting",
1683
+ setting: "outputFormat",
1684
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1685
+ });
1686
+ }
1687
+ }
1688
+ if (openAIOptions) {
1689
+ const speechModelOptions = {};
1690
+ for (const key in speechModelOptions) {
1691
+ const value = speechModelOptions[key];
1692
+ if (value !== void 0) {
1693
+ requestBody[key] = value;
1694
+ }
1695
+ }
1696
+ }
1697
+ return {
1698
+ requestBody,
1699
+ warnings
1700
+ };
1701
+ }
1702
+ async doGenerate(options) {
1703
+ var _a, _b, _c;
1704
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1705
+ const { requestBody, warnings } = this.getArgs(options);
1706
+ const {
1707
+ value: audio,
1708
+ responseHeaders,
1709
+ rawValue: rawResponse
1710
+ } = await postJsonToApi5({
1711
+ url: this.config.url({
1712
+ path: "/audio/speech",
1713
+ modelId: this.modelId
1714
+ }),
1715
+ headers: combineHeaders6(this.config.headers(), options.headers),
1716
+ body: requestBody,
1717
+ failedResponseHandler: openaiFailedResponseHandler,
1718
+ successfulResponseHandler: createBinaryResponseHandler(),
1719
+ abortSignal: options.abortSignal,
1720
+ fetch: this.config.fetch
1721
+ });
1722
+ return {
1723
+ audio,
1724
+ warnings,
1725
+ request: {
1726
+ body: JSON.stringify(requestBody)
1727
+ },
1728
+ response: {
1729
+ timestamp: currentDate,
1730
+ modelId: this.modelId,
1731
+ headers: responseHeaders,
1732
+ body: rawResponse
1733
+ }
1734
+ };
1735
+ }
1736
+ };
1737
+
1738
+ // src/responses/openai-responses-language-model.ts
1739
+ import {
1740
+ combineHeaders as combineHeaders7,
1741
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1742
+ createJsonResponseHandler as createJsonResponseHandler6,
1743
+ generateId as generateId2,
1744
+ parseProviderOptions as parseProviderOptions4,
1745
+ postJsonToApi as postJsonToApi6
1746
+ } from "@ai-sdk/provider-utils";
1747
+ import { z as z9 } from "zod";
1632
1748
 
1633
1749
  // src/responses/convert-to-openai-responses-messages.ts
1634
1750
  import {
@@ -1892,7 +2008,7 @@ var OpenAIResponsesLanguageModel = class {
1892
2008
  systemMessageMode: modelConfig.systemMessageMode
1893
2009
  });
1894
2010
  warnings.push(...messageWarnings);
1895
- const openaiOptions = parseProviderOptions3({
2011
+ const openaiOptions = parseProviderOptions4({
1896
2012
  provider: "openai",
1897
2013
  providerOptions,
1898
2014
  schema: openaiResponsesProviderOptionsSchema
@@ -1967,95 +2083,109 @@ var OpenAIResponsesLanguageModel = class {
1967
2083
  };
1968
2084
  }
1969
2085
  async doGenerate(options) {
1970
- var _a, _b, _c, _d, _e;
2086
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1971
2087
  const { args: body, warnings } = this.getArgs(options);
1972
2088
  const {
1973
2089
  responseHeaders,
1974
2090
  value: response,
1975
2091
  rawValue: rawResponse
1976
- } = await postJsonToApi5({
2092
+ } = await postJsonToApi6({
1977
2093
  url: this.config.url({
1978
2094
  path: "/responses",
1979
2095
  modelId: this.modelId
1980
2096
  }),
1981
- headers: combineHeaders6(this.config.headers(), options.headers),
2097
+ headers: combineHeaders7(this.config.headers(), options.headers),
1982
2098
  body,
1983
2099
  failedResponseHandler: openaiFailedResponseHandler,
1984
2100
  successfulResponseHandler: createJsonResponseHandler6(
1985
- z8.object({
1986
- id: z8.string(),
1987
- created_at: z8.number(),
1988
- model: z8.string(),
1989
- output: z8.array(
1990
- z8.discriminatedUnion("type", [
1991
- z8.object({
1992
- type: z8.literal("message"),
1993
- role: z8.literal("assistant"),
1994
- content: z8.array(
1995
- z8.object({
1996
- type: z8.literal("output_text"),
1997
- text: z8.string(),
1998
- annotations: z8.array(
1999
- z8.object({
2000
- type: z8.literal("url_citation"),
2001
- start_index: z8.number(),
2002
- end_index: z8.number(),
2003
- url: z8.string(),
2004
- title: z8.string()
2101
+ z9.object({
2102
+ id: z9.string(),
2103
+ created_at: z9.number(),
2104
+ model: z9.string(),
2105
+ output: z9.array(
2106
+ z9.discriminatedUnion("type", [
2107
+ z9.object({
2108
+ type: z9.literal("message"),
2109
+ role: z9.literal("assistant"),
2110
+ content: z9.array(
2111
+ z9.object({
2112
+ type: z9.literal("output_text"),
2113
+ text: z9.string(),
2114
+ annotations: z9.array(
2115
+ z9.object({
2116
+ type: z9.literal("url_citation"),
2117
+ start_index: z9.number(),
2118
+ end_index: z9.number(),
2119
+ url: z9.string(),
2120
+ title: z9.string()
2005
2121
  })
2006
2122
  )
2007
2123
  })
2008
2124
  )
2009
2125
  }),
2010
- z8.object({
2011
- type: z8.literal("function_call"),
2012
- call_id: z8.string(),
2013
- name: z8.string(),
2014
- arguments: z8.string()
2126
+ z9.object({
2127
+ type: z9.literal("function_call"),
2128
+ call_id: z9.string(),
2129
+ name: z9.string(),
2130
+ arguments: z9.string()
2015
2131
  }),
2016
- z8.object({
2017
- type: z8.literal("web_search_call")
2132
+ z9.object({
2133
+ type: z9.literal("web_search_call")
2018
2134
  }),
2019
- z8.object({
2020
- type: z8.literal("computer_call")
2135
+ z9.object({
2136
+ type: z9.literal("computer_call")
2021
2137
  }),
2022
- z8.object({
2023
- type: z8.literal("reasoning")
2138
+ z9.object({
2139
+ type: z9.literal("reasoning")
2024
2140
  })
2025
2141
  ])
2026
2142
  ),
2027
- incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2143
+ incomplete_details: z9.object({ reason: z9.string() }).nullable(),
2028
2144
  usage: usageSchema
2029
2145
  })
2030
2146
  ),
2031
2147
  abortSignal: options.abortSignal,
2032
2148
  fetch: this.config.fetch
2033
2149
  });
2034
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2035
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2036
- toolCallType: "function",
2037
- toolCallId: output.call_id,
2038
- toolName: output.name,
2039
- args: output.arguments
2040
- }));
2150
+ const content = [];
2151
+ for (const part of response.output) {
2152
+ switch (part.type) {
2153
+ case "message": {
2154
+ for (const contentPart of part.content) {
2155
+ content.push({
2156
+ type: "text",
2157
+ text: contentPart.text
2158
+ });
2159
+ for (const annotation of contentPart.annotations) {
2160
+ content.push({
2161
+ type: "source",
2162
+ sourceType: "url",
2163
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2164
+ url: annotation.url,
2165
+ title: annotation.title
2166
+ });
2167
+ }
2168
+ }
2169
+ break;
2170
+ }
2171
+ case "function_call": {
2172
+ content.push({
2173
+ type: "tool-call",
2174
+ toolCallType: "function",
2175
+ toolCallId: part.call_id,
2176
+ toolName: part.name,
2177
+ args: part.arguments
2178
+ });
2179
+ break;
2180
+ }
2181
+ }
2182
+ }
2041
2183
  return {
2042
- text: outputTextElements.map((content) => content.text).join("\n"),
2043
- sources: outputTextElements.flatMap(
2044
- (content) => content.annotations.map((annotation) => {
2045
- var _a2, _b2, _c2;
2046
- return {
2047
- sourceType: "url",
2048
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2049
- url: annotation.url,
2050
- title: annotation.title
2051
- };
2052
- })
2053
- ),
2184
+ content,
2054
2185
  finishReason: mapOpenAIResponseFinishReason({
2055
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2056
- hasToolCalls: toolCalls.length > 0
2186
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2187
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2057
2188
  }),
2058
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2059
2189
  usage: {
2060
2190
  inputTokens: response.usage.input_tokens,
2061
2191
  outputTokens: response.usage.output_tokens
@@ -2071,8 +2201,8 @@ var OpenAIResponsesLanguageModel = class {
2071
2201
  providerMetadata: {
2072
2202
  openai: {
2073
2203
  responseId: response.id,
2074
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2075
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2204
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2205
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2076
2206
  }
2077
2207
  },
2078
2208
  warnings
@@ -2080,12 +2210,12 @@ var OpenAIResponsesLanguageModel = class {
2080
2210
  }
2081
2211
  async doStream(options) {
2082
2212
  const { args: body, warnings } = this.getArgs(options);
2083
- const { responseHeaders, value: response } = await postJsonToApi5({
2213
+ const { responseHeaders, value: response } = await postJsonToApi6({
2084
2214
  url: this.config.url({
2085
2215
  path: "/responses",
2086
2216
  modelId: this.modelId
2087
2217
  }),
2088
- headers: combineHeaders6(this.config.headers(), options.headers),
2218
+ headers: combineHeaders7(this.config.headers(), options.headers),
2089
2219
  body: {
2090
2220
  ...body,
2091
2221
  stream: true
@@ -2111,6 +2241,9 @@ var OpenAIResponsesLanguageModel = class {
2111
2241
  return {
2112
2242
  stream: response.pipeThrough(
2113
2243
  new TransformStream({
2244
+ start(controller) {
2245
+ controller.enqueue({ type: "stream-start", warnings });
2246
+ },
2114
2247
  transform(chunk, controller) {
2115
2248
  var _a, _b, _c, _d, _e, _f, _g, _h;
2116
2249
  if (!chunk.success) {
@@ -2154,8 +2287,8 @@ var OpenAIResponsesLanguageModel = class {
2154
2287
  });
2155
2288
  } else if (isTextDeltaChunk(value)) {
2156
2289
  controller.enqueue({
2157
- type: "text-delta",
2158
- textDelta: value.delta
2290
+ type: "text",
2291
+ text: value.delta
2159
2292
  });
2160
2293
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2161
2294
  ongoingToolCalls[value.output_index] = void 0;
@@ -2179,12 +2312,10 @@ var OpenAIResponsesLanguageModel = class {
2179
2312
  } else if (isResponseAnnotationAddedChunk(value)) {
2180
2313
  controller.enqueue({
2181
2314
  type: "source",
2182
- source: {
2183
- sourceType: "url",
2184
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2185
- url: value.annotation.url,
2186
- title: value.annotation.title
2187
- }
2315
+ sourceType: "url",
2316
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2317
+ url: value.annotation.url,
2318
+ title: value.annotation.title
2188
2319
  });
2189
2320
  }
2190
2321
  },
@@ -2207,84 +2338,83 @@ var OpenAIResponsesLanguageModel = class {
2207
2338
  })
2208
2339
  ),
2209
2340
  request: { body },
2210
- response: { headers: responseHeaders },
2211
- warnings
2341
+ response: { headers: responseHeaders }
2212
2342
  };
2213
2343
  }
2214
2344
  };
2215
- var usageSchema = z8.object({
2216
- input_tokens: z8.number(),
2217
- input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2218
- output_tokens: z8.number(),
2219
- output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2345
+ var usageSchema = z9.object({
2346
+ input_tokens: z9.number(),
2347
+ input_tokens_details: z9.object({ cached_tokens: z9.number().nullish() }).nullish(),
2348
+ output_tokens: z9.number(),
2349
+ output_tokens_details: z9.object({ reasoning_tokens: z9.number().nullish() }).nullish()
2220
2350
  });
2221
- var textDeltaChunkSchema = z8.object({
2222
- type: z8.literal("response.output_text.delta"),
2223
- delta: z8.string()
2351
+ var textDeltaChunkSchema = z9.object({
2352
+ type: z9.literal("response.output_text.delta"),
2353
+ delta: z9.string()
2224
2354
  });
2225
- var responseFinishedChunkSchema = z8.object({
2226
- type: z8.enum(["response.completed", "response.incomplete"]),
2227
- response: z8.object({
2228
- incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2355
+ var responseFinishedChunkSchema = z9.object({
2356
+ type: z9.enum(["response.completed", "response.incomplete"]),
2357
+ response: z9.object({
2358
+ incomplete_details: z9.object({ reason: z9.string() }).nullish(),
2229
2359
  usage: usageSchema
2230
2360
  })
2231
2361
  });
2232
- var responseCreatedChunkSchema = z8.object({
2233
- type: z8.literal("response.created"),
2234
- response: z8.object({
2235
- id: z8.string(),
2236
- created_at: z8.number(),
2237
- model: z8.string()
2362
+ var responseCreatedChunkSchema = z9.object({
2363
+ type: z9.literal("response.created"),
2364
+ response: z9.object({
2365
+ id: z9.string(),
2366
+ created_at: z9.number(),
2367
+ model: z9.string()
2238
2368
  })
2239
2369
  });
2240
- var responseOutputItemDoneSchema = z8.object({
2241
- type: z8.literal("response.output_item.done"),
2242
- output_index: z8.number(),
2243
- item: z8.discriminatedUnion("type", [
2244
- z8.object({
2245
- type: z8.literal("message")
2370
+ var responseOutputItemDoneSchema = z9.object({
2371
+ type: z9.literal("response.output_item.done"),
2372
+ output_index: z9.number(),
2373
+ item: z9.discriminatedUnion("type", [
2374
+ z9.object({
2375
+ type: z9.literal("message")
2246
2376
  }),
2247
- z8.object({
2248
- type: z8.literal("function_call"),
2249
- id: z8.string(),
2250
- call_id: z8.string(),
2251
- name: z8.string(),
2252
- arguments: z8.string(),
2253
- status: z8.literal("completed")
2377
+ z9.object({
2378
+ type: z9.literal("function_call"),
2379
+ id: z9.string(),
2380
+ call_id: z9.string(),
2381
+ name: z9.string(),
2382
+ arguments: z9.string(),
2383
+ status: z9.literal("completed")
2254
2384
  })
2255
2385
  ])
2256
2386
  });
2257
- var responseFunctionCallArgumentsDeltaSchema = z8.object({
2258
- type: z8.literal("response.function_call_arguments.delta"),
2259
- item_id: z8.string(),
2260
- output_index: z8.number(),
2261
- delta: z8.string()
2387
+ var responseFunctionCallArgumentsDeltaSchema = z9.object({
2388
+ type: z9.literal("response.function_call_arguments.delta"),
2389
+ item_id: z9.string(),
2390
+ output_index: z9.number(),
2391
+ delta: z9.string()
2262
2392
  });
2263
- var responseOutputItemAddedSchema = z8.object({
2264
- type: z8.literal("response.output_item.added"),
2265
- output_index: z8.number(),
2266
- item: z8.discriminatedUnion("type", [
2267
- z8.object({
2268
- type: z8.literal("message")
2393
+ var responseOutputItemAddedSchema = z9.object({
2394
+ type: z9.literal("response.output_item.added"),
2395
+ output_index: z9.number(),
2396
+ item: z9.discriminatedUnion("type", [
2397
+ z9.object({
2398
+ type: z9.literal("message")
2269
2399
  }),
2270
- z8.object({
2271
- type: z8.literal("function_call"),
2272
- id: z8.string(),
2273
- call_id: z8.string(),
2274
- name: z8.string(),
2275
- arguments: z8.string()
2400
+ z9.object({
2401
+ type: z9.literal("function_call"),
2402
+ id: z9.string(),
2403
+ call_id: z9.string(),
2404
+ name: z9.string(),
2405
+ arguments: z9.string()
2276
2406
  })
2277
2407
  ])
2278
2408
  });
2279
- var responseAnnotationAddedSchema = z8.object({
2280
- type: z8.literal("response.output_text.annotation.added"),
2281
- annotation: z8.object({
2282
- type: z8.literal("url_citation"),
2283
- url: z8.string(),
2284
- title: z8.string()
2409
+ var responseAnnotationAddedSchema = z9.object({
2410
+ type: z9.literal("response.output_text.annotation.added"),
2411
+ annotation: z9.object({
2412
+ type: z9.literal("url_citation"),
2413
+ url: z9.string(),
2414
+ title: z9.string()
2285
2415
  })
2286
2416
  });
2287
- var openaiResponsesChunkSchema = z8.union([
2417
+ var openaiResponsesChunkSchema = z9.union([
2288
2418
  textDeltaChunkSchema,
2289
2419
  responseFinishedChunkSchema,
2290
2420
  responseCreatedChunkSchema,
@@ -2292,7 +2422,7 @@ var openaiResponsesChunkSchema = z8.union([
2292
2422
  responseFunctionCallArgumentsDeltaSchema,
2293
2423
  responseOutputItemAddedSchema,
2294
2424
  responseAnnotationAddedSchema,
2295
- z8.object({ type: z8.string() }).passthrough()
2425
+ z9.object({ type: z9.string() }).passthrough()
2296
2426
  // fallback for unknown chunks
2297
2427
  ]);
2298
2428
  function isTextDeltaChunk(chunk) {
@@ -2337,15 +2467,15 @@ function getResponsesModelConfig(modelId) {
2337
2467
  requiredAutoTruncation: false
2338
2468
  };
2339
2469
  }
2340
- var openaiResponsesProviderOptionsSchema = z8.object({
2341
- metadata: z8.any().nullish(),
2342
- parallelToolCalls: z8.boolean().nullish(),
2343
- previousResponseId: z8.string().nullish(),
2344
- store: z8.boolean().nullish(),
2345
- user: z8.string().nullish(),
2346
- reasoningEffort: z8.string().nullish(),
2347
- strictSchemas: z8.boolean().nullish(),
2348
- instructions: z8.string().nullish()
2470
+ var openaiResponsesProviderOptionsSchema = z9.object({
2471
+ metadata: z9.any().nullish(),
2472
+ parallelToolCalls: z9.boolean().nullish(),
2473
+ previousResponseId: z9.string().nullish(),
2474
+ store: z9.boolean().nullish(),
2475
+ user: z9.string().nullish(),
2476
+ reasoningEffort: z9.string().nullish(),
2477
+ strictSchemas: z9.boolean().nullish(),
2478
+ instructions: z9.string().nullish()
2349
2479
  });
2350
2480
  export {
2351
2481
  OpenAIChatLanguageModel,
@@ -2353,6 +2483,7 @@ export {
2353
2483
  OpenAIEmbeddingModel,
2354
2484
  OpenAIImageModel,
2355
2485
  OpenAIResponsesLanguageModel,
2486
+ OpenAISpeechModel,
2356
2487
  OpenAITranscriptionModel,
2357
2488
  modelMaxImagesPerCall,
2358
2489
  openaiProviderOptions