@ai-sdk/openai 2.0.0-canary.0 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ InvalidResponseDataError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -9,18 +8,18 @@ import {
9
8
  createJsonResponseHandler,
10
9
  generateId,
11
10
  isParsableJson,
11
+ parseProviderOptions,
12
12
  postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
14
+ import { z as z3 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
18
18
  UnsupportedFunctionalityError
19
19
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
21
21
  function convertToOpenAIChatMessages({
22
22
  prompt,
23
- useLegacyFunctionCalling = false,
24
23
  systemMessageMode = "system"
25
24
  }) {
26
25
  const messages = [];
@@ -61,55 +60,71 @@ function convertToOpenAIChatMessages({
61
60
  messages.push({
62
61
  role: "user",
63
62
  content: content.map((part, index) => {
64
- var _a, _b, _c, _d;
63
+ var _a, _b, _c;
65
64
  switch (part.type) {
66
65
  case "text": {
67
66
  return { type: "text", text: part.text };
68
67
  }
69
- case "image": {
70
- return {
71
- type: "image_url",
72
- image_url: {
73
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
74
- // OpenAI specific extension: image detail
75
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
76
- }
77
- };
78
- }
79
68
  case "file": {
80
- if (part.data instanceof URL) {
81
- throw new UnsupportedFunctionalityError({
82
- functionality: "'File content parts with URL data' functionality not supported."
83
- });
84
- }
85
- switch (part.mimeType) {
86
- case "audio/wav": {
87
- return {
88
- type: "input_audio",
89
- input_audio: { data: part.data, format: "wav" }
90
- };
91
- }
92
- case "audio/mp3":
93
- case "audio/mpeg": {
94
- return {
95
- type: "input_audio",
96
- input_audio: { data: part.data, format: "mp3" }
97
- };
69
+ if (part.mediaType.startsWith("image/")) {
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ return {
72
+ type: "image_url",
73
+ image_url: {
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
+ // OpenAI specific extension: image detail
76
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
+ }
78
+ };
79
+ } else if (part.mediaType.startsWith("audio/")) {
80
+ if (part.data instanceof URL) {
81
+ throw new UnsupportedFunctionalityError({
82
+ functionality: "audio file parts with URLs"
83
+ });
98
84
  }
99
- case "application/pdf": {
100
- return {
101
- type: "file",
102
- file: {
103
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
104
- file_data: `data:application/pdf;base64,${part.data}`
105
- }
106
- };
85
+ switch (part.mediaType) {
86
+ case "audio/wav": {
87
+ return {
88
+ type: "input_audio",
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
93
+ };
94
+ }
95
+ case "audio/mp3":
96
+ case "audio/mpeg": {
97
+ return {
98
+ type: "input_audio",
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
103
+ };
104
+ }
105
+ default: {
106
+ throw new UnsupportedFunctionalityError({
107
+ functionality: `audio content parts with media type ${part.mediaType}`
108
+ });
109
+ }
107
110
  }
108
- default: {
111
+ } else if (part.mediaType === "application/pdf") {
112
+ if (part.data instanceof URL) {
109
113
  throw new UnsupportedFunctionalityError({
110
- functionality: `File content part type ${part.mimeType} in user messages`
114
+ functionality: "PDF file parts with URLs"
111
115
  });
112
116
  }
117
+ return {
118
+ type: "file",
119
+ file: {
120
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
+ file_data: `data:application/pdf;base64,${part.data}`
122
+ }
123
+ };
124
+ } else {
125
+ throw new UnsupportedFunctionalityError({
126
+ functionality: `file part media type ${part.mediaType}`
127
+ });
113
128
  }
114
129
  }
115
130
  }
@@ -139,41 +154,20 @@ function convertToOpenAIChatMessages({
139
154
  }
140
155
  }
141
156
  }
142
- if (useLegacyFunctionCalling) {
143
- if (toolCalls.length > 1) {
144
- throw new UnsupportedFunctionalityError({
145
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
146
- });
147
- }
148
- messages.push({
149
- role: "assistant",
150
- content: text,
151
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
152
- });
153
- } else {
154
- messages.push({
155
- role: "assistant",
156
- content: text,
157
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
158
- });
159
- }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
160
162
  break;
161
163
  }
162
164
  case "tool": {
163
165
  for (const toolResponse of content) {
164
- if (useLegacyFunctionCalling) {
165
- messages.push({
166
- role: "function",
167
- name: toolResponse.toolName,
168
- content: JSON.stringify(toolResponse.result)
169
- });
170
- } else {
171
- messages.push({
172
- role: "tool",
173
- tool_call_id: toolResponse.toolCallId,
174
- content: JSON.stringify(toolResponse.result)
175
- });
176
- }
166
+ messages.push({
167
+ role: "tool",
168
+ tool_call_id: toolResponse.toolCallId,
169
+ content: JSON.stringify(toolResponse.result)
170
+ });
177
171
  }
178
172
  break;
179
173
  }
@@ -186,6 +180,19 @@ function convertToOpenAIChatMessages({
186
180
  return { messages, warnings };
187
181
  }
188
182
 
183
+ // src/get-response-metadata.ts
184
+ function getResponseMetadata({
185
+ id,
186
+ model,
187
+ created
188
+ }) {
189
+ return {
190
+ id: id != null ? id : void 0,
191
+ modelId: model != null ? model : void 0,
192
+ timestamp: created != null ? new Date(created * 1e3) : void 0
193
+ };
194
+ }
195
+
189
196
  // src/map-openai-chat-logprobs.ts
190
197
  function mapOpenAIChatLogProbsOutput(logprobs) {
191
198
  var _a, _b;
@@ -216,18 +223,69 @@ function mapOpenAIFinishReason(finishReason) {
216
223
  }
217
224
  }
218
225
 
219
- // src/openai-error.ts
226
+ // src/openai-chat-options.ts
220
227
  import { z } from "zod";
228
+ var openaiProviderOptions = z.object({
229
+ /**
230
+ * Modify the likelihood of specified tokens appearing in the completion.
231
+ *
232
+ * Accepts a JSON object that maps tokens (specified by their token ID in
233
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
234
+ */
235
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
236
+ /**
237
+ * Return the log probabilities of the tokens.
238
+ *
239
+ * Setting to true will return the log probabilities of the tokens that
240
+ * were generated.
241
+ *
242
+ * Setting to a number will return the log probabilities of the top n
243
+ * tokens that were generated.
244
+ */
245
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
246
+ /**
247
+ * Whether to enable parallel function calling during tool use. Default to true.
248
+ */
249
+ parallelToolCalls: z.boolean().optional(),
250
+ /**
251
+ * A unique identifier representing your end-user, which can help OpenAI to
252
+ * monitor and detect abuse.
253
+ */
254
+ user: z.string().optional(),
255
+ /**
256
+ * Reasoning effort for reasoning models. Defaults to `medium`.
257
+ */
258
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
259
+ /**
260
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
261
+ */
262
+ maxCompletionTokens: z.number().optional(),
263
+ /**
264
+ * Whether to enable persistence in responses API.
265
+ */
266
+ store: z.boolean().optional(),
267
+ /**
268
+ * Metadata to associate with the request.
269
+ */
270
+ metadata: z.record(z.string()).optional(),
271
+ /**
272
+ * Parameters for prediction mode.
273
+ */
274
+ prediction: z.record(z.any()).optional()
275
+ });
276
+
277
+ // src/openai-error.ts
278
+ import { z as z2 } from "zod";
221
279
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
222
- var openaiErrorDataSchema = z.object({
223
- error: z.object({
224
- message: z.string(),
280
+ var openaiErrorDataSchema = z2.object({
281
+ error: z2.object({
282
+ message: z2.string(),
225
283
  // The additional information below is handled loosely to support
226
284
  // OpenAI-compatible providers that have slightly different error
227
285
  // responses:
228
- type: z.string().nullish(),
229
- param: z.any().nullish(),
230
- code: z.union([z.string(), z.number()]).nullish()
286
+ type: z2.string().nullish(),
287
+ param: z2.any().nullish(),
288
+ code: z2.union([z2.string(), z2.number()]).nullish()
231
289
  })
232
290
  });
233
291
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -235,76 +293,19 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
235
293
  errorToMessage: (data) => data.error.message
236
294
  });
237
295
 
238
- // src/get-response-metadata.ts
239
- function getResponseMetadata({
240
- id,
241
- model,
242
- created
243
- }) {
244
- return {
245
- id: id != null ? id : void 0,
246
- modelId: model != null ? model : void 0,
247
- timestamp: created != null ? new Date(created * 1e3) : void 0
248
- };
249
- }
250
-
251
296
  // src/openai-prepare-tools.ts
252
297
  import {
253
298
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
254
299
  } from "@ai-sdk/provider";
255
300
  function prepareTools({
256
- mode,
257
- useLegacyFunctionCalling = false,
301
+ tools,
302
+ toolChoice,
258
303
  structuredOutputs
259
304
  }) {
260
- var _a;
261
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
305
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
262
306
  const toolWarnings = [];
263
307
  if (tools == null) {
264
- return { tools: void 0, tool_choice: void 0, toolWarnings };
265
- }
266
- const toolChoice = mode.toolChoice;
267
- if (useLegacyFunctionCalling) {
268
- const openaiFunctions = [];
269
- for (const tool of tools) {
270
- if (tool.type === "provider-defined") {
271
- toolWarnings.push({ type: "unsupported-tool", tool });
272
- } else {
273
- openaiFunctions.push({
274
- name: tool.name,
275
- description: tool.description,
276
- parameters: tool.parameters
277
- });
278
- }
279
- }
280
- if (toolChoice == null) {
281
- return {
282
- functions: openaiFunctions,
283
- function_call: void 0,
284
- toolWarnings
285
- };
286
- }
287
- const type2 = toolChoice.type;
288
- switch (type2) {
289
- case "auto":
290
- case "none":
291
- case void 0:
292
- return {
293
- functions: openaiFunctions,
294
- function_call: void 0,
295
- toolWarnings
296
- };
297
- case "required":
298
- throw new UnsupportedFunctionalityError2({
299
- functionality: "useLegacyFunctionCalling and toolChoice: required"
300
- });
301
- default:
302
- return {
303
- functions: openaiFunctions,
304
- function_call: { name: toolChoice.toolName },
305
- toolWarnings
306
- };
307
- }
308
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
308
309
  }
309
310
  const openaiTools = [];
310
311
  for (const tool of tools) {
@@ -323,18 +324,18 @@ function prepareTools({
323
324
  }
324
325
  }
325
326
  if (toolChoice == null) {
326
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
327
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
327
328
  }
328
329
  const type = toolChoice.type;
329
330
  switch (type) {
330
331
  case "auto":
331
332
  case "none":
332
333
  case "required":
333
- return { tools: openaiTools, tool_choice: type, toolWarnings };
334
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
334
335
  case "tool":
335
336
  return {
336
337
  tools: openaiTools,
337
- tool_choice: {
338
+ toolChoice: {
338
339
  type: "function",
339
340
  function: {
340
341
  name: toolChoice.toolName
@@ -345,7 +346,7 @@ function prepareTools({
345
346
  default: {
346
347
  const _exhaustiveCheck = type;
347
348
  throw new UnsupportedFunctionalityError2({
348
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
349
+ functionality: `tool choice type: ${_exhaustiveCheck}`
349
350
  });
350
351
  }
351
352
  }
@@ -354,31 +355,22 @@ function prepareTools({
354
355
  // src/openai-chat-language-model.ts
355
356
  var OpenAIChatLanguageModel = class {
356
357
  constructor(modelId, settings, config) {
357
- this.specificationVersion = "v1";
358
+ this.specificationVersion = "v2";
358
359
  this.modelId = modelId;
359
360
  this.settings = settings;
360
361
  this.config = config;
361
362
  }
362
- get supportsStructuredOutputs() {
363
- var _a;
364
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
365
- }
366
- get defaultObjectGenerationMode() {
367
- if (isAudioModel(this.modelId)) {
368
- return "tool";
369
- }
370
- return this.supportsStructuredOutputs ? "json" : "tool";
371
- }
372
363
  get provider() {
373
364
  return this.config.provider;
374
365
  }
375
- get supportsImageUrls() {
376
- return !this.settings.downloadImages;
366
+ async getSupportedUrls() {
367
+ return {
368
+ "image/*": [/^https?:\/\/.*$/]
369
+ };
377
370
  }
378
371
  getArgs({
379
- mode,
380
372
  prompt,
381
- maxTokens,
373
+ maxOutputTokens,
382
374
  temperature,
383
375
  topP,
384
376
  topK,
@@ -387,39 +379,33 @@ var OpenAIChatLanguageModel = class {
387
379
  stopSequences,
388
380
  responseFormat,
389
381
  seed,
390
- providerMetadata
382
+ tools,
383
+ toolChoice,
384
+ providerOptions
391
385
  }) {
392
- var _a, _b, _c, _d, _e, _f, _g, _h;
393
- const type = mode.type;
386
+ var _a, _b, _c;
394
387
  const warnings = [];
388
+ const openaiOptions = (_a = parseProviderOptions({
389
+ provider: "openai",
390
+ providerOptions,
391
+ schema: openaiProviderOptions
392
+ })) != null ? _a : {};
395
393
  if (topK != null) {
396
394
  warnings.push({
397
395
  type: "unsupported-setting",
398
396
  setting: "topK"
399
397
  });
400
398
  }
401
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
399
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
402
400
  warnings.push({
403
401
  type: "unsupported-setting",
404
402
  setting: "responseFormat",
405
403
  details: "JSON response format schema is only supported with structuredOutputs"
406
404
  });
407
405
  }
408
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
409
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
410
- throw new UnsupportedFunctionalityError3({
411
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
412
- });
413
- }
414
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
415
- throw new UnsupportedFunctionalityError3({
416
- functionality: "structuredOutputs with useLegacyFunctionCalling"
417
- });
418
- }
419
406
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
420
407
  {
421
408
  prompt,
422
- useLegacyFunctionCalling,
423
409
  systemMessageMode: getSystemMessageMode(this.modelId)
424
410
  }
425
411
  );
@@ -428,35 +414,38 @@ var OpenAIChatLanguageModel = class {
428
414
  // model id:
429
415
  model: this.modelId,
430
416
  // model specific settings:
431
- logit_bias: this.settings.logitBias,
432
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
433
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
434
- user: this.settings.user,
435
- parallel_tool_calls: this.settings.parallelToolCalls,
417
+ logit_bias: openaiOptions.logitBias,
418
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
419
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
420
+ user: openaiOptions.user,
421
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
436
422
  // standardized settings:
437
- max_tokens: maxTokens,
423
+ max_tokens: maxOutputTokens,
438
424
  temperature,
439
425
  top_p: topP,
440
426
  frequency_penalty: frequencyPenalty,
441
427
  presence_penalty: presencePenalty,
442
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
443
- type: "json_schema",
444
- json_schema: {
445
- schema: responseFormat.schema,
446
- strict: true,
447
- name: (_a = responseFormat.name) != null ? _a : "response",
448
- description: responseFormat.description
449
- }
450
- } : { type: "json_object" } : void 0,
428
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
429
+ // TODO convert into provider option
430
+ this.settings.structuredOutputs && responseFormat.schema != null ? {
431
+ type: "json_schema",
432
+ json_schema: {
433
+ schema: responseFormat.schema,
434
+ strict: true,
435
+ name: (_b = responseFormat.name) != null ? _b : "response",
436
+ description: responseFormat.description
437
+ }
438
+ } : { type: "json_object" }
439
+ ) : void 0,
451
440
  stop: stopSequences,
452
441
  seed,
453
442
  // openai specific settings:
454
- // TODO remove in next major version; we auto-map maxTokens now
455
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
456
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
457
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
458
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
459
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
443
+ // TODO remove in next major version; we auto-map maxOutputTokens now
444
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
445
+ store: openaiOptions.store,
446
+ metadata: openaiOptions.metadata,
447
+ prediction: openaiOptions.prediction,
448
+ reasoning_effort: openaiOptions.reasoningEffort,
460
449
  // messages:
461
450
  messages
462
451
  };
@@ -520,82 +509,33 @@ var OpenAIChatLanguageModel = class {
520
509
  }
521
510
  baseArgs.max_tokens = void 0;
522
511
  }
523
- }
524
- switch (type) {
525
- case "regular": {
526
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
527
- mode,
528
- useLegacyFunctionCalling,
529
- structuredOutputs: this.supportsStructuredOutputs
512
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
513
+ if (baseArgs.temperature != null) {
514
+ baseArgs.temperature = void 0;
515
+ warnings.push({
516
+ type: "unsupported-setting",
517
+ setting: "temperature",
518
+ details: "temperature is not supported for the search preview models and has been removed."
530
519
  });
531
- return {
532
- args: {
533
- ...baseArgs,
534
- tools,
535
- tool_choice,
536
- functions,
537
- function_call
538
- },
539
- warnings: [...warnings, ...toolWarnings]
540
- };
541
- }
542
- case "object-json": {
543
- return {
544
- args: {
545
- ...baseArgs,
546
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
547
- type: "json_schema",
548
- json_schema: {
549
- schema: mode.schema,
550
- strict: true,
551
- name: (_h = mode.name) != null ? _h : "response",
552
- description: mode.description
553
- }
554
- } : { type: "json_object" }
555
- },
556
- warnings
557
- };
558
- }
559
- case "object-tool": {
560
- return {
561
- args: useLegacyFunctionCalling ? {
562
- ...baseArgs,
563
- function_call: {
564
- name: mode.tool.name
565
- },
566
- functions: [
567
- {
568
- name: mode.tool.name,
569
- description: mode.tool.description,
570
- parameters: mode.tool.parameters
571
- }
572
- ]
573
- } : {
574
- ...baseArgs,
575
- tool_choice: {
576
- type: "function",
577
- function: { name: mode.tool.name }
578
- },
579
- tools: [
580
- {
581
- type: "function",
582
- function: {
583
- name: mode.tool.name,
584
- description: mode.tool.description,
585
- parameters: mode.tool.parameters,
586
- strict: this.supportsStructuredOutputs ? true : void 0
587
- }
588
- }
589
- ]
590
- },
591
- warnings
592
- };
593
- }
594
- default: {
595
- const _exhaustiveCheck = type;
596
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
597
520
  }
598
521
  }
522
+ const {
523
+ tools: openaiTools,
524
+ toolChoice: openaiToolChoice,
525
+ toolWarnings
526
+ } = prepareTools({
527
+ tools,
528
+ toolChoice,
529
+ structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
530
+ });
531
+ return {
532
+ args: {
533
+ ...baseArgs,
534
+ tools: openaiTools,
535
+ tool_choice: openaiToolChoice
536
+ },
537
+ warnings: [...warnings, ...toolWarnings]
538
+ };
599
539
  }
600
540
  async doGenerate(options) {
601
541
  var _a, _b, _c, _d, _e, _f, _g, _h;
@@ -618,10 +558,23 @@ var OpenAIChatLanguageModel = class {
618
558
  abortSignal: options.abortSignal,
619
559
  fetch: this.config.fetch
620
560
  });
621
- const { messages: rawPrompt, ...rawSettings } = body;
622
561
  const choice = response.choices[0];
623
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
624
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
562
+ const content = [];
563
+ const text = choice.message.content;
564
+ if (text != null && text.length > 0) {
565
+ content.push({ type: "text", text });
566
+ }
567
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
568
+ content.push({
569
+ type: "tool-call",
570
+ toolCallType: "function",
571
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
572
+ toolName: toolCall.function.name,
573
+ args: toolCall.function.arguments
574
+ });
575
+ }
576
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
577
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
625
578
  const providerMetadata = { openai: {} };
626
579
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
627
580
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -636,81 +589,24 @@ var OpenAIChatLanguageModel = class {
636
589
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
637
590
  }
638
591
  return {
639
- text: (_c = choice.message.content) != null ? _c : void 0,
640
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
641
- {
642
- toolCallType: "function",
643
- toolCallId: generateId(),
644
- toolName: choice.message.function_call.name,
645
- args: choice.message.function_call.arguments
646
- }
647
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
648
- var _a2;
649
- return {
650
- toolCallType: "function",
651
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
652
- toolName: toolCall.function.name,
653
- args: toolCall.function.arguments
654
- };
655
- }),
592
+ content,
656
593
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
657
594
  usage: {
658
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
659
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
595
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
596
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
597
+ },
598
+ request: { body },
599
+ response: {
600
+ ...getResponseMetadata(response),
601
+ headers: responseHeaders,
602
+ body: rawResponse
660
603
  },
661
- rawCall: { rawPrompt, rawSettings },
662
- rawResponse: { headers: responseHeaders, body: rawResponse },
663
- request: { body: JSON.stringify(body) },
664
- response: getResponseMetadata(response),
665
604
  warnings,
666
605
  logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
667
606
  providerMetadata
668
607
  };
669
608
  }
670
609
  async doStream(options) {
671
- if (this.settings.simulateStreaming) {
672
- const result = await this.doGenerate(options);
673
- const simulatedStream = new ReadableStream({
674
- start(controller) {
675
- controller.enqueue({ type: "response-metadata", ...result.response });
676
- if (result.text) {
677
- controller.enqueue({
678
- type: "text-delta",
679
- textDelta: result.text
680
- });
681
- }
682
- if (result.toolCalls) {
683
- for (const toolCall of result.toolCalls) {
684
- controller.enqueue({
685
- type: "tool-call-delta",
686
- toolCallType: "function",
687
- toolCallId: toolCall.toolCallId,
688
- toolName: toolCall.toolName,
689
- argsTextDelta: toolCall.args
690
- });
691
- controller.enqueue({
692
- type: "tool-call",
693
- ...toolCall
694
- });
695
- }
696
- }
697
- controller.enqueue({
698
- type: "finish",
699
- finishReason: result.finishReason,
700
- usage: result.usage,
701
- logprobs: result.logprobs,
702
- providerMetadata: result.providerMetadata
703
- });
704
- controller.close();
705
- }
706
- });
707
- return {
708
- stream: simulatedStream,
709
- rawCall: result.rawCall,
710
- rawResponse: result.rawResponse,
711
- warnings: result.warnings
712
- };
713
- }
714
610
  const { args, warnings } = this.getArgs(options);
715
611
  const body = {
716
612
  ...args,
@@ -735,17 +631,19 @@ var OpenAIChatLanguageModel = class {
735
631
  const { messages: rawPrompt, ...rawSettings } = args;
736
632
  const toolCalls = [];
737
633
  let finishReason = "unknown";
738
- let usage = {
739
- promptTokens: void 0,
740
- completionTokens: void 0
634
+ const usage = {
635
+ inputTokens: void 0,
636
+ outputTokens: void 0
741
637
  };
742
638
  let logprobs;
743
639
  let isFirstChunk = true;
744
- const { useLegacyFunctionCalling } = this.settings;
745
640
  const providerMetadata = { openai: {} };
746
641
  return {
747
642
  stream: response.pipeThrough(
748
643
  new TransformStream({
644
+ start(controller) {
645
+ controller.enqueue({ type: "stream-start", warnings });
646
+ },
749
647
  transform(chunk, controller) {
750
648
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
751
649
  if (!chunk.success) {
@@ -773,10 +671,8 @@ var OpenAIChatLanguageModel = class {
773
671
  prompt_tokens_details,
774
672
  completion_tokens_details
775
673
  } = value.usage;
776
- usage = {
777
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
778
- completionTokens: completion_tokens != null ? completion_tokens : void 0
779
- };
674
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
675
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
780
676
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
781
677
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
782
678
  }
@@ -800,8 +696,8 @@ var OpenAIChatLanguageModel = class {
800
696
  const delta = choice.delta;
801
697
  if (delta.content != null) {
802
698
  controller.enqueue({
803
- type: "text-delta",
804
- textDelta: delta.content
699
+ type: "text",
700
+ text: delta.content
805
701
  });
806
702
  }
807
703
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -811,16 +707,8 @@ var OpenAIChatLanguageModel = class {
811
707
  if (logprobs === void 0) logprobs = [];
812
708
  logprobs.push(...mappedLogprobs);
813
709
  }
814
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
815
- {
816
- type: "function",
817
- id: generateId(),
818
- function: delta.function_call,
819
- index: 0
820
- }
821
- ] : delta.tool_calls;
822
- if (mappedToolCalls != null) {
823
- for (const toolCallDelta of mappedToolCalls) {
710
+ if (delta.tool_calls != null) {
711
+ for (const toolCallDelta of delta.tool_calls) {
824
712
  const index = toolCallDelta.index;
825
713
  if (toolCalls[index] == null) {
826
714
  if (toolCallDelta.type !== "function") {
@@ -902,125 +790,111 @@ var OpenAIChatLanguageModel = class {
902
790
  }
903
791
  },
904
792
  flush(controller) {
905
- var _a, _b;
906
793
  controller.enqueue({
907
794
  type: "finish",
908
795
  finishReason,
909
796
  logprobs,
910
- usage: {
911
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
912
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
913
- },
797
+ usage,
914
798
  ...providerMetadata != null ? { providerMetadata } : {}
915
799
  });
916
800
  }
917
801
  })
918
802
  ),
919
- rawCall: { rawPrompt, rawSettings },
920
- rawResponse: { headers: responseHeaders },
921
- request: { body: JSON.stringify(body) },
922
- warnings
803
+ request: { body },
804
+ response: { headers: responseHeaders }
923
805
  };
924
806
  }
925
807
  };
926
- var openaiTokenUsageSchema = z2.object({
927
- prompt_tokens: z2.number().nullish(),
928
- completion_tokens: z2.number().nullish(),
929
- prompt_tokens_details: z2.object({
930
- cached_tokens: z2.number().nullish()
808
+ var openaiTokenUsageSchema = z3.object({
809
+ prompt_tokens: z3.number().nullish(),
810
+ completion_tokens: z3.number().nullish(),
811
+ prompt_tokens_details: z3.object({
812
+ cached_tokens: z3.number().nullish()
931
813
  }).nullish(),
932
- completion_tokens_details: z2.object({
933
- reasoning_tokens: z2.number().nullish(),
934
- accepted_prediction_tokens: z2.number().nullish(),
935
- rejected_prediction_tokens: z2.number().nullish()
814
+ completion_tokens_details: z3.object({
815
+ reasoning_tokens: z3.number().nullish(),
816
+ accepted_prediction_tokens: z3.number().nullish(),
817
+ rejected_prediction_tokens: z3.number().nullish()
936
818
  }).nullish()
937
819
  }).nullish();
938
- var openaiChatResponseSchema = z2.object({
939
- id: z2.string().nullish(),
940
- created: z2.number().nullish(),
941
- model: z2.string().nullish(),
942
- choices: z2.array(
943
- z2.object({
944
- message: z2.object({
945
- role: z2.literal("assistant").nullish(),
946
- content: z2.string().nullish(),
947
- function_call: z2.object({
948
- arguments: z2.string(),
949
- name: z2.string()
950
- }).nullish(),
951
- tool_calls: z2.array(
952
- z2.object({
953
- id: z2.string().nullish(),
954
- type: z2.literal("function"),
955
- function: z2.object({
956
- name: z2.string(),
957
- arguments: z2.string()
820
+ var openaiChatResponseSchema = z3.object({
821
+ id: z3.string().nullish(),
822
+ created: z3.number().nullish(),
823
+ model: z3.string().nullish(),
824
+ choices: z3.array(
825
+ z3.object({
826
+ message: z3.object({
827
+ role: z3.literal("assistant").nullish(),
828
+ content: z3.string().nullish(),
829
+ tool_calls: z3.array(
830
+ z3.object({
831
+ id: z3.string().nullish(),
832
+ type: z3.literal("function"),
833
+ function: z3.object({
834
+ name: z3.string(),
835
+ arguments: z3.string()
958
836
  })
959
837
  })
960
838
  ).nullish()
961
839
  }),
962
- index: z2.number(),
963
- logprobs: z2.object({
964
- content: z2.array(
965
- z2.object({
966
- token: z2.string(),
967
- logprob: z2.number(),
968
- top_logprobs: z2.array(
969
- z2.object({
970
- token: z2.string(),
971
- logprob: z2.number()
840
+ index: z3.number(),
841
+ logprobs: z3.object({
842
+ content: z3.array(
843
+ z3.object({
844
+ token: z3.string(),
845
+ logprob: z3.number(),
846
+ top_logprobs: z3.array(
847
+ z3.object({
848
+ token: z3.string(),
849
+ logprob: z3.number()
972
850
  })
973
851
  )
974
852
  })
975
853
  ).nullable()
976
854
  }).nullish(),
977
- finish_reason: z2.string().nullish()
855
+ finish_reason: z3.string().nullish()
978
856
  })
979
857
  ),
980
858
  usage: openaiTokenUsageSchema
981
859
  });
982
- var openaiChatChunkSchema = z2.union([
983
- z2.object({
984
- id: z2.string().nullish(),
985
- created: z2.number().nullish(),
986
- model: z2.string().nullish(),
987
- choices: z2.array(
988
- z2.object({
989
- delta: z2.object({
990
- role: z2.enum(["assistant"]).nullish(),
991
- content: z2.string().nullish(),
992
- function_call: z2.object({
993
- name: z2.string().optional(),
994
- arguments: z2.string().optional()
995
- }).nullish(),
996
- tool_calls: z2.array(
997
- z2.object({
998
- index: z2.number(),
999
- id: z2.string().nullish(),
1000
- type: z2.literal("function").optional(),
1001
- function: z2.object({
1002
- name: z2.string().nullish(),
1003
- arguments: z2.string().nullish()
860
+ var openaiChatChunkSchema = z3.union([
861
+ z3.object({
862
+ id: z3.string().nullish(),
863
+ created: z3.number().nullish(),
864
+ model: z3.string().nullish(),
865
+ choices: z3.array(
866
+ z3.object({
867
+ delta: z3.object({
868
+ role: z3.enum(["assistant"]).nullish(),
869
+ content: z3.string().nullish(),
870
+ tool_calls: z3.array(
871
+ z3.object({
872
+ index: z3.number(),
873
+ id: z3.string().nullish(),
874
+ type: z3.literal("function").optional(),
875
+ function: z3.object({
876
+ name: z3.string().nullish(),
877
+ arguments: z3.string().nullish()
1004
878
  })
1005
879
  })
1006
880
  ).nullish()
1007
881
  }).nullish(),
1008
- logprobs: z2.object({
1009
- content: z2.array(
1010
- z2.object({
1011
- token: z2.string(),
1012
- logprob: z2.number(),
1013
- top_logprobs: z2.array(
1014
- z2.object({
1015
- token: z2.string(),
1016
- logprob: z2.number()
882
+ logprobs: z3.object({
883
+ content: z3.array(
884
+ z3.object({
885
+ token: z3.string(),
886
+ logprob: z3.number(),
887
+ top_logprobs: z3.array(
888
+ z3.object({
889
+ token: z3.string(),
890
+ logprob: z3.number()
1017
891
  })
1018
892
  )
1019
893
  })
1020
894
  ).nullable()
1021
895
  }).nullish(),
1022
- finish_reason: z2.string().nullable().optional(),
1023
- index: z2.number()
896
+ finish_reason: z3.string().nullable().optional(),
897
+ index: z3.number()
1024
898
  })
1025
899
  ),
1026
900
  usage: openaiTokenUsageSchema
@@ -1028,10 +902,7 @@ var openaiChatChunkSchema = z2.union([
1028
902
  openaiErrorDataSchema
1029
903
  ]);
1030
904
  function isReasoningModel(modelId) {
1031
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1032
- }
1033
- function isAudioModel(modelId) {
1034
- return modelId.startsWith("gpt-4o-audio-preview");
905
+ return modelId.startsWith("o");
1035
906
  }
1036
907
  function getSystemMessageMode(modelId) {
1037
908
  var _a, _b;
@@ -1062,21 +933,18 @@ var reasoningModels = {
1062
933
  };
1063
934
 
1064
935
  // src/openai-completion-language-model.ts
1065
- import {
1066
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1067
- } from "@ai-sdk/provider";
1068
936
  import {
1069
937
  combineHeaders as combineHeaders2,
1070
938
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
1071
939
  createJsonResponseHandler as createJsonResponseHandler2,
1072
940
  postJsonToApi as postJsonToApi2
1073
941
  } from "@ai-sdk/provider-utils";
1074
- import { z as z3 } from "zod";
942
+ import { z as z4 } from "zod";
1075
943
 
1076
944
  // src/convert-to-openai-completion-prompt.ts
1077
945
  import {
1078
946
  InvalidPromptError,
1079
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
947
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1080
948
  } from "@ai-sdk/provider";
1081
949
  function convertToOpenAICompletionPrompt({
1082
950
  prompt,
@@ -1108,13 +976,8 @@ function convertToOpenAICompletionPrompt({
1108
976
  case "text": {
1109
977
  return part.text;
1110
978
  }
1111
- case "image": {
1112
- throw new UnsupportedFunctionalityError4({
1113
- functionality: "images"
1114
- });
1115
- }
1116
979
  }
1117
- }).join("");
980
+ }).filter(Boolean).join("");
1118
981
  text += `${user}:
1119
982
  ${userMessage}
1120
983
 
@@ -1128,7 +991,7 @@ ${userMessage}
1128
991
  return part.text;
1129
992
  }
1130
993
  case "tool-call": {
1131
- throw new UnsupportedFunctionalityError4({
994
+ throw new UnsupportedFunctionalityError3({
1132
995
  functionality: "tool-call messages"
1133
996
  });
1134
997
  }
@@ -1141,7 +1004,7 @@ ${assistantMessage}
1141
1004
  break;
1142
1005
  }
1143
1006
  case "tool": {
1144
- throw new UnsupportedFunctionalityError4({
1007
+ throw new UnsupportedFunctionalityError3({
1145
1008
  functionality: "tool messages"
1146
1009
  });
1147
1010
  }
@@ -1177,8 +1040,7 @@ function mapOpenAICompletionLogProbs(logprobs) {
1177
1040
  // src/openai-completion-language-model.ts
1178
1041
  var OpenAICompletionLanguageModel = class {
1179
1042
  constructor(modelId, settings, config) {
1180
- this.specificationVersion = "v1";
1181
- this.defaultObjectGenerationMode = void 0;
1043
+ this.specificationVersion = "v2";
1182
1044
  this.modelId = modelId;
1183
1045
  this.settings = settings;
1184
1046
  this.config = config;
@@ -1186,11 +1048,15 @@ var OpenAICompletionLanguageModel = class {
1186
1048
  get provider() {
1187
1049
  return this.config.provider;
1188
1050
  }
1051
+ async getSupportedUrls() {
1052
+ return {
1053
+ // no supported urls for completion models
1054
+ };
1055
+ }
1189
1056
  getArgs({
1190
- mode,
1191
1057
  inputFormat,
1192
1058
  prompt,
1193
- maxTokens,
1059
+ maxOutputTokens,
1194
1060
  temperature,
1195
1061
  topP,
1196
1062
  topK,
@@ -1198,16 +1064,19 @@ var OpenAICompletionLanguageModel = class {
1198
1064
  presencePenalty,
1199
1065
  stopSequences: userStopSequences,
1200
1066
  responseFormat,
1067
+ tools,
1068
+ toolChoice,
1201
1069
  seed
1202
1070
  }) {
1203
- var _a;
1204
- const type = mode.type;
1205
1071
  const warnings = [];
1206
1072
  if (topK != null) {
1207
- warnings.push({
1208
- type: "unsupported-setting",
1209
- setting: "topK"
1210
- });
1073
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1074
+ }
1075
+ if (tools == null ? void 0 : tools.length) {
1076
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1077
+ }
1078
+ if (toolChoice != null) {
1079
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1211
1080
  }
1212
1081
  if (responseFormat != null && responseFormat.type !== "text") {
1213
1082
  warnings.push({
@@ -1218,56 +1087,30 @@ var OpenAICompletionLanguageModel = class {
1218
1087
  }
1219
1088
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1220
1089
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1221
- const baseArgs = {
1222
- // model id:
1223
- model: this.modelId,
1224
- // model specific settings:
1225
- echo: this.settings.echo,
1226
- logit_bias: this.settings.logitBias,
1227
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1228
- suffix: this.settings.suffix,
1229
- user: this.settings.user,
1230
- // standardized settings:
1231
- max_tokens: maxTokens,
1232
- temperature,
1233
- top_p: topP,
1234
- frequency_penalty: frequencyPenalty,
1235
- presence_penalty: presencePenalty,
1236
- seed,
1237
- // prompt:
1238
- prompt: completionPrompt,
1239
- // stop sequences:
1240
- stop: stop.length > 0 ? stop : void 0
1090
+ return {
1091
+ args: {
1092
+ // model id:
1093
+ model: this.modelId,
1094
+ // model specific settings:
1095
+ echo: this.settings.echo,
1096
+ logit_bias: this.settings.logitBias,
1097
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1098
+ suffix: this.settings.suffix,
1099
+ user: this.settings.user,
1100
+ // standardized settings:
1101
+ max_tokens: maxOutputTokens,
1102
+ temperature,
1103
+ top_p: topP,
1104
+ frequency_penalty: frequencyPenalty,
1105
+ presence_penalty: presencePenalty,
1106
+ seed,
1107
+ // prompt:
1108
+ prompt: completionPrompt,
1109
+ // stop sequences:
1110
+ stop: stop.length > 0 ? stop : void 0
1111
+ },
1112
+ warnings
1241
1113
  };
1242
- switch (type) {
1243
- case "regular": {
1244
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1245
- throw new UnsupportedFunctionalityError5({
1246
- functionality: "tools"
1247
- });
1248
- }
1249
- if (mode.toolChoice) {
1250
- throw new UnsupportedFunctionalityError5({
1251
- functionality: "toolChoice"
1252
- });
1253
- }
1254
- return { args: baseArgs, warnings };
1255
- }
1256
- case "object-json": {
1257
- throw new UnsupportedFunctionalityError5({
1258
- functionality: "object-json mode"
1259
- });
1260
- }
1261
- case "object-tool": {
1262
- throw new UnsupportedFunctionalityError5({
1263
- functionality: "object-tool mode"
1264
- });
1265
- }
1266
- default: {
1267
- const _exhaustiveCheck = type;
1268
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1269
- }
1270
- }
1271
1114
  }
1272
1115
  async doGenerate(options) {
1273
1116
  const { args, warnings } = this.getArgs(options);
@@ -1289,21 +1132,22 @@ var OpenAICompletionLanguageModel = class {
1289
1132
  abortSignal: options.abortSignal,
1290
1133
  fetch: this.config.fetch
1291
1134
  });
1292
- const { prompt: rawPrompt, ...rawSettings } = args;
1293
1135
  const choice = response.choices[0];
1294
1136
  return {
1295
- text: choice.text,
1137
+ content: [{ type: "text", text: choice.text }],
1296
1138
  usage: {
1297
- promptTokens: response.usage.prompt_tokens,
1298
- completionTokens: response.usage.completion_tokens
1139
+ inputTokens: response.usage.prompt_tokens,
1140
+ outputTokens: response.usage.completion_tokens
1299
1141
  },
1300
1142
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1301
1143
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1302
- rawCall: { rawPrompt, rawSettings },
1303
- rawResponse: { headers: responseHeaders, body: rawResponse },
1304
- response: getResponseMetadata(response),
1305
- warnings,
1306
- request: { body: JSON.stringify(args) }
1144
+ request: { body: args },
1145
+ response: {
1146
+ ...getResponseMetadata(response),
1147
+ headers: responseHeaders,
1148
+ body: rawResponse
1149
+ },
1150
+ warnings
1307
1151
  };
1308
1152
  }
1309
1153
  async doStream(options) {
@@ -1328,17 +1172,19 @@ var OpenAICompletionLanguageModel = class {
1328
1172
  abortSignal: options.abortSignal,
1329
1173
  fetch: this.config.fetch
1330
1174
  });
1331
- const { prompt: rawPrompt, ...rawSettings } = args;
1332
1175
  let finishReason = "unknown";
1333
- let usage = {
1334
- promptTokens: Number.NaN,
1335
- completionTokens: Number.NaN
1176
+ const usage = {
1177
+ inputTokens: void 0,
1178
+ outputTokens: void 0
1336
1179
  };
1337
1180
  let logprobs;
1338
1181
  let isFirstChunk = true;
1339
1182
  return {
1340
1183
  stream: response.pipeThrough(
1341
1184
  new TransformStream({
1185
+ start(controller) {
1186
+ controller.enqueue({ type: "stream-start", warnings });
1187
+ },
1342
1188
  transform(chunk, controller) {
1343
1189
  if (!chunk.success) {
1344
1190
  finishReason = "error";
@@ -1359,10 +1205,8 @@ var OpenAICompletionLanguageModel = class {
1359
1205
  });
1360
1206
  }
1361
1207
  if (value.usage != null) {
1362
- usage = {
1363
- promptTokens: value.usage.prompt_tokens,
1364
- completionTokens: value.usage.completion_tokens
1365
- };
1208
+ usage.inputTokens = value.usage.prompt_tokens;
1209
+ usage.outputTokens = value.usage.completion_tokens;
1366
1210
  }
1367
1211
  const choice = value.choices[0];
1368
1212
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1370,8 +1214,8 @@ var OpenAICompletionLanguageModel = class {
1370
1214
  }
1371
1215
  if ((choice == null ? void 0 : choice.text) != null) {
1372
1216
  controller.enqueue({
1373
- type: "text-delta",
1374
- textDelta: choice.text
1217
+ type: "text",
1218
+ text: choice.text
1375
1219
  });
1376
1220
  }
1377
1221
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1392,53 +1236,51 @@ var OpenAICompletionLanguageModel = class {
1392
1236
  }
1393
1237
  })
1394
1238
  ),
1395
- rawCall: { rawPrompt, rawSettings },
1396
- rawResponse: { headers: responseHeaders },
1397
- warnings,
1398
- request: { body: JSON.stringify(body) }
1239
+ request: { body },
1240
+ response: { headers: responseHeaders }
1399
1241
  };
1400
1242
  }
1401
1243
  };
1402
- var openaiCompletionResponseSchema = z3.object({
1403
- id: z3.string().nullish(),
1404
- created: z3.number().nullish(),
1405
- model: z3.string().nullish(),
1406
- choices: z3.array(
1407
- z3.object({
1408
- text: z3.string(),
1409
- finish_reason: z3.string(),
1410
- logprobs: z3.object({
1411
- tokens: z3.array(z3.string()),
1412
- token_logprobs: z3.array(z3.number()),
1413
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1244
+ var openaiCompletionResponseSchema = z4.object({
1245
+ id: z4.string().nullish(),
1246
+ created: z4.number().nullish(),
1247
+ model: z4.string().nullish(),
1248
+ choices: z4.array(
1249
+ z4.object({
1250
+ text: z4.string(),
1251
+ finish_reason: z4.string(),
1252
+ logprobs: z4.object({
1253
+ tokens: z4.array(z4.string()),
1254
+ token_logprobs: z4.array(z4.number()),
1255
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1414
1256
  }).nullish()
1415
1257
  })
1416
1258
  ),
1417
- usage: z3.object({
1418
- prompt_tokens: z3.number(),
1419
- completion_tokens: z3.number()
1259
+ usage: z4.object({
1260
+ prompt_tokens: z4.number(),
1261
+ completion_tokens: z4.number()
1420
1262
  })
1421
1263
  });
1422
- var openaiCompletionChunkSchema = z3.union([
1423
- z3.object({
1424
- id: z3.string().nullish(),
1425
- created: z3.number().nullish(),
1426
- model: z3.string().nullish(),
1427
- choices: z3.array(
1428
- z3.object({
1429
- text: z3.string(),
1430
- finish_reason: z3.string().nullish(),
1431
- index: z3.number(),
1432
- logprobs: z3.object({
1433
- tokens: z3.array(z3.string()),
1434
- token_logprobs: z3.array(z3.number()),
1435
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1264
+ var openaiCompletionChunkSchema = z4.union([
1265
+ z4.object({
1266
+ id: z4.string().nullish(),
1267
+ created: z4.number().nullish(),
1268
+ model: z4.string().nullish(),
1269
+ choices: z4.array(
1270
+ z4.object({
1271
+ text: z4.string(),
1272
+ finish_reason: z4.string().nullish(),
1273
+ index: z4.number(),
1274
+ logprobs: z4.object({
1275
+ tokens: z4.array(z4.string()),
1276
+ token_logprobs: z4.array(z4.number()),
1277
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1436
1278
  }).nullish()
1437
1279
  })
1438
1280
  ),
1439
- usage: z3.object({
1440
- prompt_tokens: z3.number(),
1441
- completion_tokens: z3.number()
1281
+ usage: z4.object({
1282
+ prompt_tokens: z4.number(),
1283
+ completion_tokens: z4.number()
1442
1284
  }).nullish()
1443
1285
  }),
1444
1286
  openaiErrorDataSchema
@@ -1451,12 +1293,30 @@ import {
1451
1293
  import {
1452
1294
  combineHeaders as combineHeaders3,
1453
1295
  createJsonResponseHandler as createJsonResponseHandler3,
1296
+ parseProviderOptions as parseProviderOptions2,
1454
1297
  postJsonToApi as postJsonToApi3
1455
1298
  } from "@ai-sdk/provider-utils";
1456
- import { z as z4 } from "zod";
1299
+ import { z as z6 } from "zod";
1300
+
1301
+ // src/openai-embedding-options.ts
1302
+ import { z as z5 } from "zod";
1303
+ var openaiEmbeddingProviderOptions = z5.object({
1304
+ /**
1305
+ The number of dimensions the resulting output embeddings should have.
1306
+ Only supported in text-embedding-3 and later models.
1307
+ */
1308
+ dimensions: z5.number().optional(),
1309
+ /**
1310
+ A unique identifier representing your end-user, which can help OpenAI to
1311
+ monitor and detect abuse. Learn more.
1312
+ */
1313
+ user: z5.string().optional()
1314
+ });
1315
+
1316
+ // src/openai-embedding-model.ts
1457
1317
  var OpenAIEmbeddingModel = class {
1458
1318
  constructor(modelId, settings, config) {
1459
- this.specificationVersion = "v1";
1319
+ this.specificationVersion = "v2";
1460
1320
  this.modelId = modelId;
1461
1321
  this.settings = settings;
1462
1322
  this.config = config;
@@ -1475,8 +1335,10 @@ var OpenAIEmbeddingModel = class {
1475
1335
  async doEmbed({
1476
1336
  values,
1477
1337
  headers,
1478
- abortSignal
1338
+ abortSignal,
1339
+ providerOptions
1479
1340
  }) {
1341
+ var _a;
1480
1342
  if (values.length > this.maxEmbeddingsPerCall) {
1481
1343
  throw new TooManyEmbeddingValuesForCallError({
1482
1344
  provider: this.provider,
@@ -1485,7 +1347,16 @@ var OpenAIEmbeddingModel = class {
1485
1347
  values
1486
1348
  });
1487
1349
  }
1488
- const { responseHeaders, value: response } = await postJsonToApi3({
1350
+ const openaiOptions = (_a = parseProviderOptions2({
1351
+ provider: "openai",
1352
+ providerOptions,
1353
+ schema: openaiEmbeddingProviderOptions
1354
+ })) != null ? _a : {};
1355
+ const {
1356
+ responseHeaders,
1357
+ value: response,
1358
+ rawValue
1359
+ } = await postJsonToApi3({
1489
1360
  url: this.config.url({
1490
1361
  path: "/embeddings",
1491
1362
  modelId: this.modelId
@@ -1495,8 +1366,8 @@ var OpenAIEmbeddingModel = class {
1495
1366
  model: this.modelId,
1496
1367
  input: values,
1497
1368
  encoding_format: "float",
1498
- dimensions: this.settings.dimensions,
1499
- user: this.settings.user
1369
+ dimensions: openaiOptions.dimensions,
1370
+ user: openaiOptions.user
1500
1371
  },
1501
1372
  failedResponseHandler: openaiFailedResponseHandler,
1502
1373
  successfulResponseHandler: createJsonResponseHandler3(
@@ -1508,13 +1379,13 @@ var OpenAIEmbeddingModel = class {
1508
1379
  return {
1509
1380
  embeddings: response.data.map((item) => item.embedding),
1510
1381
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1511
- rawResponse: { headers: responseHeaders }
1382
+ response: { headers: responseHeaders, body: rawValue }
1512
1383
  };
1513
1384
  }
1514
1385
  };
1515
- var openaiTextEmbeddingResponseSchema = z4.object({
1516
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1517
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1386
+ var openaiTextEmbeddingResponseSchema = z6.object({
1387
+ data: z6.array(z6.object({ embedding: z6.array(z6.number()) })),
1388
+ usage: z6.object({ prompt_tokens: z6.number() }).nullish()
1518
1389
  });
1519
1390
 
1520
1391
  // src/openai-image-model.ts
@@ -1523,7 +1394,7 @@ import {
1523
1394
  createJsonResponseHandler as createJsonResponseHandler4,
1524
1395
  postJsonToApi as postJsonToApi4
1525
1396
  } from "@ai-sdk/provider-utils";
1526
- import { z as z5 } from "zod";
1397
+ import { z as z7 } from "zod";
1527
1398
 
1528
1399
  // src/openai-image-settings.ts
1529
1400
  var modelMaxImagesPerCall = {
@@ -1601,26 +1472,304 @@ var OpenAIImageModel = class {
1601
1472
  };
1602
1473
  }
1603
1474
  };
1604
- var openaiImageResponseSchema = z5.object({
1605
- data: z5.array(z5.object({ b64_json: z5.string() }))
1475
+ var openaiImageResponseSchema = z7.object({
1476
+ data: z7.array(z7.object({ b64_json: z7.string() }))
1606
1477
  });
1607
1478
 
1608
- // src/responses/openai-responses-language-model.ts
1479
+ // src/openai-transcription-model.ts
1609
1480
  import {
1610
1481
  combineHeaders as combineHeaders5,
1611
- createEventSourceResponseHandler as createEventSourceResponseHandler3,
1482
+ convertBase64ToUint8Array,
1612
1483
  createJsonResponseHandler as createJsonResponseHandler5,
1613
- generateId as generateId2,
1614
- parseProviderOptions,
1484
+ parseProviderOptions as parseProviderOptions3,
1485
+ postFormDataToApi
1486
+ } from "@ai-sdk/provider-utils";
1487
+ import { z as z8 } from "zod";
1488
+ var openAIProviderOptionsSchema = z8.object({
1489
+ include: z8.array(z8.string()).nullish(),
1490
+ language: z8.string().nullish(),
1491
+ prompt: z8.string().nullish(),
1492
+ temperature: z8.number().min(0).max(1).nullish().default(0),
1493
+ timestampGranularities: z8.array(z8.enum(["word", "segment"])).nullish().default(["segment"])
1494
+ });
1495
+ var languageMap = {
1496
+ afrikaans: "af",
1497
+ arabic: "ar",
1498
+ armenian: "hy",
1499
+ azerbaijani: "az",
1500
+ belarusian: "be",
1501
+ bosnian: "bs",
1502
+ bulgarian: "bg",
1503
+ catalan: "ca",
1504
+ chinese: "zh",
1505
+ croatian: "hr",
1506
+ czech: "cs",
1507
+ danish: "da",
1508
+ dutch: "nl",
1509
+ english: "en",
1510
+ estonian: "et",
1511
+ finnish: "fi",
1512
+ french: "fr",
1513
+ galician: "gl",
1514
+ german: "de",
1515
+ greek: "el",
1516
+ hebrew: "he",
1517
+ hindi: "hi",
1518
+ hungarian: "hu",
1519
+ icelandic: "is",
1520
+ indonesian: "id",
1521
+ italian: "it",
1522
+ japanese: "ja",
1523
+ kannada: "kn",
1524
+ kazakh: "kk",
1525
+ korean: "ko",
1526
+ latvian: "lv",
1527
+ lithuanian: "lt",
1528
+ macedonian: "mk",
1529
+ malay: "ms",
1530
+ marathi: "mr",
1531
+ maori: "mi",
1532
+ nepali: "ne",
1533
+ norwegian: "no",
1534
+ persian: "fa",
1535
+ polish: "pl",
1536
+ portuguese: "pt",
1537
+ romanian: "ro",
1538
+ russian: "ru",
1539
+ serbian: "sr",
1540
+ slovak: "sk",
1541
+ slovenian: "sl",
1542
+ spanish: "es",
1543
+ swahili: "sw",
1544
+ swedish: "sv",
1545
+ tagalog: "tl",
1546
+ tamil: "ta",
1547
+ thai: "th",
1548
+ turkish: "tr",
1549
+ ukrainian: "uk",
1550
+ urdu: "ur",
1551
+ vietnamese: "vi",
1552
+ welsh: "cy"
1553
+ };
1554
+ var OpenAITranscriptionModel = class {
1555
+ constructor(modelId, config) {
1556
+ this.modelId = modelId;
1557
+ this.config = config;
1558
+ this.specificationVersion = "v1";
1559
+ }
1560
+ get provider() {
1561
+ return this.config.provider;
1562
+ }
1563
+ getArgs({
1564
+ audio,
1565
+ mediaType,
1566
+ providerOptions
1567
+ }) {
1568
+ var _a, _b, _c, _d, _e;
1569
+ const warnings = [];
1570
+ const openAIOptions = parseProviderOptions3({
1571
+ provider: "openai",
1572
+ providerOptions,
1573
+ schema: openAIProviderOptionsSchema
1574
+ });
1575
+ const formData = new FormData();
1576
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1577
+ formData.append("model", this.modelId);
1578
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1579
+ if (openAIOptions) {
1580
+ const transcriptionModelOptions = {
1581
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1582
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1583
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1584
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1585
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1586
+ };
1587
+ for (const key in transcriptionModelOptions) {
1588
+ const value = transcriptionModelOptions[key];
1589
+ if (value !== void 0) {
1590
+ formData.append(key, String(value));
1591
+ }
1592
+ }
1593
+ }
1594
+ return {
1595
+ formData,
1596
+ warnings
1597
+ };
1598
+ }
1599
+ async doGenerate(options) {
1600
+ var _a, _b, _c, _d, _e, _f;
1601
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1602
+ const { formData, warnings } = this.getArgs(options);
1603
+ const {
1604
+ value: response,
1605
+ responseHeaders,
1606
+ rawValue: rawResponse
1607
+ } = await postFormDataToApi({
1608
+ url: this.config.url({
1609
+ path: "/audio/transcriptions",
1610
+ modelId: this.modelId
1611
+ }),
1612
+ headers: combineHeaders5(this.config.headers(), options.headers),
1613
+ formData,
1614
+ failedResponseHandler: openaiFailedResponseHandler,
1615
+ successfulResponseHandler: createJsonResponseHandler5(
1616
+ openaiTranscriptionResponseSchema
1617
+ ),
1618
+ abortSignal: options.abortSignal,
1619
+ fetch: this.config.fetch
1620
+ });
1621
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1622
+ return {
1623
+ text: response.text,
1624
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1625
+ text: word.word,
1626
+ startSecond: word.start,
1627
+ endSecond: word.end
1628
+ }))) != null ? _e : [],
1629
+ language,
1630
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1631
+ warnings,
1632
+ response: {
1633
+ timestamp: currentDate,
1634
+ modelId: this.modelId,
1635
+ headers: responseHeaders,
1636
+ body: rawResponse
1637
+ }
1638
+ };
1639
+ }
1640
+ };
1641
+ var openaiTranscriptionResponseSchema = z8.object({
1642
+ text: z8.string(),
1643
+ language: z8.string().nullish(),
1644
+ duration: z8.number().nullish(),
1645
+ words: z8.array(
1646
+ z8.object({
1647
+ word: z8.string(),
1648
+ start: z8.number(),
1649
+ end: z8.number()
1650
+ })
1651
+ ).nullish()
1652
+ });
1653
+
1654
+ // src/openai-speech-model.ts
1655
+ import {
1656
+ combineHeaders as combineHeaders6,
1657
+ createBinaryResponseHandler,
1658
+ parseProviderOptions as parseProviderOptions4,
1615
1659
  postJsonToApi as postJsonToApi5
1616
1660
  } from "@ai-sdk/provider-utils";
1617
- import { z as z6 } from "zod";
1661
+ import { z as z9 } from "zod";
1662
+ var OpenAIProviderOptionsSchema = z9.object({
1663
+ instructions: z9.string().nullish(),
1664
+ speed: z9.number().min(0.25).max(4).default(1).nullish()
1665
+ });
1666
+ var OpenAISpeechModel = class {
1667
+ constructor(modelId, config) {
1668
+ this.modelId = modelId;
1669
+ this.config = config;
1670
+ this.specificationVersion = "v1";
1671
+ }
1672
+ get provider() {
1673
+ return this.config.provider;
1674
+ }
1675
+ getArgs({
1676
+ text,
1677
+ voice = "alloy",
1678
+ outputFormat = "mp3",
1679
+ speed,
1680
+ instructions,
1681
+ providerOptions
1682
+ }) {
1683
+ const warnings = [];
1684
+ const openAIOptions = parseProviderOptions4({
1685
+ provider: "openai",
1686
+ providerOptions,
1687
+ schema: OpenAIProviderOptionsSchema
1688
+ });
1689
+ const requestBody = {
1690
+ model: this.modelId,
1691
+ input: text,
1692
+ voice,
1693
+ response_format: "mp3",
1694
+ speed,
1695
+ instructions
1696
+ };
1697
+ if (outputFormat) {
1698
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1699
+ requestBody.response_format = outputFormat;
1700
+ } else {
1701
+ warnings.push({
1702
+ type: "unsupported-setting",
1703
+ setting: "outputFormat",
1704
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1705
+ });
1706
+ }
1707
+ }
1708
+ if (openAIOptions) {
1709
+ const speechModelOptions = {};
1710
+ for (const key in speechModelOptions) {
1711
+ const value = speechModelOptions[key];
1712
+ if (value !== void 0) {
1713
+ requestBody[key] = value;
1714
+ }
1715
+ }
1716
+ }
1717
+ return {
1718
+ requestBody,
1719
+ warnings
1720
+ };
1721
+ }
1722
+ async doGenerate(options) {
1723
+ var _a, _b, _c;
1724
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1725
+ const { requestBody, warnings } = this.getArgs(options);
1726
+ const {
1727
+ value: audio,
1728
+ responseHeaders,
1729
+ rawValue: rawResponse
1730
+ } = await postJsonToApi5({
1731
+ url: this.config.url({
1732
+ path: "/audio/speech",
1733
+ modelId: this.modelId
1734
+ }),
1735
+ headers: combineHeaders6(this.config.headers(), options.headers),
1736
+ body: requestBody,
1737
+ failedResponseHandler: openaiFailedResponseHandler,
1738
+ successfulResponseHandler: createBinaryResponseHandler(),
1739
+ abortSignal: options.abortSignal,
1740
+ fetch: this.config.fetch
1741
+ });
1742
+ return {
1743
+ audio,
1744
+ warnings,
1745
+ request: {
1746
+ body: JSON.stringify(requestBody)
1747
+ },
1748
+ response: {
1749
+ timestamp: currentDate,
1750
+ modelId: this.modelId,
1751
+ headers: responseHeaders,
1752
+ body: rawResponse
1753
+ }
1754
+ };
1755
+ }
1756
+ };
1757
+
1758
+ // src/responses/openai-responses-language-model.ts
1759
+ import {
1760
+ combineHeaders as combineHeaders7,
1761
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1762
+ createJsonResponseHandler as createJsonResponseHandler6,
1763
+ generateId as generateId2,
1764
+ parseProviderOptions as parseProviderOptions5,
1765
+ postJsonToApi as postJsonToApi6
1766
+ } from "@ai-sdk/provider-utils";
1767
+ import { z as z10 } from "zod";
1618
1768
 
1619
1769
  // src/responses/convert-to-openai-responses-messages.ts
1620
1770
  import {
1621
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1771
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1622
1772
  } from "@ai-sdk/provider";
1623
- import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1624
1773
  function convertToOpenAIResponsesMessages({
1625
1774
  prompt,
1626
1775
  systemMessageMode
@@ -1659,38 +1808,35 @@ function convertToOpenAIResponsesMessages({
1659
1808
  messages.push({
1660
1809
  role: "user",
1661
1810
  content: content.map((part, index) => {
1662
- var _a, _b, _c, _d;
1811
+ var _a, _b, _c;
1663
1812
  switch (part.type) {
1664
1813
  case "text": {
1665
1814
  return { type: "input_text", text: part.text };
1666
1815
  }
1667
- case "image": {
1668
- return {
1669
- type: "input_image",
1670
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1671
- // OpenAI specific extension: image detail
1672
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1673
- };
1674
- }
1675
1816
  case "file": {
1676
- if (part.data instanceof URL) {
1677
- throw new UnsupportedFunctionalityError6({
1678
- functionality: "File URLs in user messages"
1679
- });
1680
- }
1681
- switch (part.mimeType) {
1682
- case "application/pdf": {
1683
- return {
1684
- type: "input_file",
1685
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1686
- file_data: `data:application/pdf;base64,${part.data}`
1687
- };
1688
- }
1689
- default: {
1690
- throw new UnsupportedFunctionalityError6({
1691
- functionality: "Only PDF files are supported in user messages"
1817
+ if (part.mediaType.startsWith("image/")) {
1818
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1819
+ return {
1820
+ type: "input_image",
1821
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1822
+ // OpenAI specific extension: image detail
1823
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1824
+ };
1825
+ } else if (part.mediaType === "application/pdf") {
1826
+ if (part.data instanceof URL) {
1827
+ throw new UnsupportedFunctionalityError4({
1828
+ functionality: "PDF file parts with URLs"
1692
1829
  });
1693
1830
  }
1831
+ return {
1832
+ type: "input_file",
1833
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1834
+ file_data: `data:application/pdf;base64,${part.data}`
1835
+ };
1836
+ } else {
1837
+ throw new UnsupportedFunctionalityError4({
1838
+ functionality: `file part media type ${part.mediaType}`
1839
+ });
1694
1840
  }
1695
1841
  }
1696
1842
  }
@@ -1760,19 +1906,18 @@ function mapOpenAIResponseFinishReason({
1760
1906
 
1761
1907
  // src/responses/openai-responses-prepare-tools.ts
1762
1908
  import {
1763
- UnsupportedFunctionalityError as UnsupportedFunctionalityError7
1909
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1764
1910
  } from "@ai-sdk/provider";
1765
1911
  function prepareResponsesTools({
1766
- mode,
1912
+ tools,
1913
+ toolChoice,
1767
1914
  strict
1768
1915
  }) {
1769
- var _a;
1770
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1916
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1771
1917
  const toolWarnings = [];
1772
1918
  if (tools == null) {
1773
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1919
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1774
1920
  }
1775
- const toolChoice = mode.toolChoice;
1776
1921
  const openaiTools = [];
1777
1922
  for (const tool of tools) {
1778
1923
  switch (tool.type) {
@@ -1805,37 +1950,24 @@ function prepareResponsesTools({
1805
1950
  }
1806
1951
  }
1807
1952
  if (toolChoice == null) {
1808
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1953
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
1809
1954
  }
1810
1955
  const type = toolChoice.type;
1811
1956
  switch (type) {
1812
1957
  case "auto":
1813
1958
  case "none":
1814
1959
  case "required":
1815
- return { tools: openaiTools, tool_choice: type, toolWarnings };
1816
- case "tool": {
1817
- if (toolChoice.toolName === "web_search_preview") {
1818
- return {
1819
- tools: openaiTools,
1820
- tool_choice: {
1821
- type: "web_search_preview"
1822
- },
1823
- toolWarnings
1824
- };
1825
- }
1960
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
1961
+ case "tool":
1826
1962
  return {
1827
1963
  tools: openaiTools,
1828
- tool_choice: {
1829
- type: "function",
1830
- name: toolChoice.toolName
1831
- },
1964
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1832
1965
  toolWarnings
1833
1966
  };
1834
- }
1835
1967
  default: {
1836
1968
  const _exhaustiveCheck = type;
1837
- throw new UnsupportedFunctionalityError7({
1838
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1969
+ throw new UnsupportedFunctionalityError5({
1970
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1839
1971
  });
1840
1972
  }
1841
1973
  }
@@ -1844,17 +1976,20 @@ function prepareResponsesTools({
1844
1976
  // src/responses/openai-responses-language-model.ts
1845
1977
  var OpenAIResponsesLanguageModel = class {
1846
1978
  constructor(modelId, config) {
1847
- this.specificationVersion = "v1";
1848
- this.defaultObjectGenerationMode = "json";
1979
+ this.specificationVersion = "v2";
1849
1980
  this.modelId = modelId;
1850
1981
  this.config = config;
1851
1982
  }
1983
+ async getSupportedUrls() {
1984
+ return {
1985
+ "image/*": [/^https?:\/\/.*$/]
1986
+ };
1987
+ }
1852
1988
  get provider() {
1853
1989
  return this.config.provider;
1854
1990
  }
1855
1991
  getArgs({
1856
- mode,
1857
- maxTokens,
1992
+ maxOutputTokens,
1858
1993
  temperature,
1859
1994
  stopSequences,
1860
1995
  topP,
@@ -1863,24 +1998,19 @@ var OpenAIResponsesLanguageModel = class {
1863
1998
  frequencyPenalty,
1864
1999
  seed,
1865
2000
  prompt,
1866
- providerMetadata,
2001
+ providerOptions,
2002
+ tools,
2003
+ toolChoice,
1867
2004
  responseFormat
1868
2005
  }) {
1869
- var _a, _b, _c;
2006
+ var _a, _b;
1870
2007
  const warnings = [];
1871
2008
  const modelConfig = getResponsesModelConfig(this.modelId);
1872
- const type = mode.type;
1873
2009
  if (topK != null) {
1874
- warnings.push({
1875
- type: "unsupported-setting",
1876
- setting: "topK"
1877
- });
2010
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1878
2011
  }
1879
2012
  if (seed != null) {
1880
- warnings.push({
1881
- type: "unsupported-setting",
1882
- setting: "seed"
1883
- });
2013
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1884
2014
  }
1885
2015
  if (presencePenalty != null) {
1886
2016
  warnings.push({
@@ -1895,19 +2025,16 @@ var OpenAIResponsesLanguageModel = class {
1895
2025
  });
1896
2026
  }
1897
2027
  if (stopSequences != null) {
1898
- warnings.push({
1899
- type: "unsupported-setting",
1900
- setting: "stopSequences"
1901
- });
2028
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1902
2029
  }
1903
2030
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1904
2031
  prompt,
1905
2032
  systemMessageMode: modelConfig.systemMessageMode
1906
2033
  });
1907
2034
  warnings.push(...messageWarnings);
1908
- const openaiOptions = parseProviderOptions({
2035
+ const openaiOptions = parseProviderOptions5({
1909
2036
  provider: "openai",
1910
- providerOptions: providerMetadata,
2037
+ providerOptions,
1911
2038
  schema: openaiResponsesProviderOptionsSchema
1912
2039
  });
1913
2040
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -1916,7 +2043,7 @@ var OpenAIResponsesLanguageModel = class {
1916
2043
  input: messages,
1917
2044
  temperature,
1918
2045
  top_p: topP,
1919
- max_output_tokens: maxTokens,
2046
+ max_output_tokens: maxOutputTokens,
1920
2047
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1921
2048
  text: {
1922
2049
  format: responseFormat.schema != null ? {
@@ -1961,178 +2088,145 @@ var OpenAIResponsesLanguageModel = class {
1961
2088
  });
1962
2089
  }
1963
2090
  }
1964
- switch (type) {
1965
- case "regular": {
1966
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1967
- mode,
1968
- strict: isStrict
1969
- // TODO support provider options on tools
1970
- });
1971
- return {
1972
- args: {
1973
- ...baseArgs,
1974
- tools,
1975
- tool_choice
1976
- },
1977
- warnings: [...warnings, ...toolWarnings]
1978
- };
1979
- }
1980
- case "object-json": {
1981
- return {
1982
- args: {
1983
- ...baseArgs,
1984
- text: {
1985
- format: mode.schema != null ? {
1986
- type: "json_schema",
1987
- strict: isStrict,
1988
- name: (_c = mode.name) != null ? _c : "response",
1989
- description: mode.description,
1990
- schema: mode.schema
1991
- } : { type: "json_object" }
1992
- }
1993
- },
1994
- warnings
1995
- };
1996
- }
1997
- case "object-tool": {
1998
- return {
1999
- args: {
2000
- ...baseArgs,
2001
- tool_choice: { type: "function", name: mode.tool.name },
2002
- tools: [
2003
- {
2004
- type: "function",
2005
- name: mode.tool.name,
2006
- description: mode.tool.description,
2007
- parameters: mode.tool.parameters,
2008
- strict: isStrict
2009
- }
2010
- ]
2011
- },
2012
- warnings
2013
- };
2014
- }
2015
- default: {
2016
- const _exhaustiveCheck = type;
2017
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2018
- }
2019
- }
2091
+ const {
2092
+ tools: openaiTools,
2093
+ toolChoice: openaiToolChoice,
2094
+ toolWarnings
2095
+ } = prepareResponsesTools({
2096
+ tools,
2097
+ toolChoice,
2098
+ strict: isStrict
2099
+ });
2100
+ return {
2101
+ args: {
2102
+ ...baseArgs,
2103
+ tools: openaiTools,
2104
+ tool_choice: openaiToolChoice
2105
+ },
2106
+ warnings: [...warnings, ...toolWarnings]
2107
+ };
2020
2108
  }
2021
2109
  async doGenerate(options) {
2022
- var _a, _b, _c, _d, _e;
2110
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2023
2111
  const { args: body, warnings } = this.getArgs(options);
2024
2112
  const {
2025
2113
  responseHeaders,
2026
2114
  value: response,
2027
2115
  rawValue: rawResponse
2028
- } = await postJsonToApi5({
2116
+ } = await postJsonToApi6({
2029
2117
  url: this.config.url({
2030
2118
  path: "/responses",
2031
2119
  modelId: this.modelId
2032
2120
  }),
2033
- headers: combineHeaders5(this.config.headers(), options.headers),
2121
+ headers: combineHeaders7(this.config.headers(), options.headers),
2034
2122
  body,
2035
2123
  failedResponseHandler: openaiFailedResponseHandler,
2036
- successfulResponseHandler: createJsonResponseHandler5(
2037
- z6.object({
2038
- id: z6.string(),
2039
- created_at: z6.number(),
2040
- model: z6.string(),
2041
- output: z6.array(
2042
- z6.discriminatedUnion("type", [
2043
- z6.object({
2044
- type: z6.literal("message"),
2045
- role: z6.literal("assistant"),
2046
- content: z6.array(
2047
- z6.object({
2048
- type: z6.literal("output_text"),
2049
- text: z6.string(),
2050
- annotations: z6.array(
2051
- z6.object({
2052
- type: z6.literal("url_citation"),
2053
- start_index: z6.number(),
2054
- end_index: z6.number(),
2055
- url: z6.string(),
2056
- title: z6.string()
2124
+ successfulResponseHandler: createJsonResponseHandler6(
2125
+ z10.object({
2126
+ id: z10.string(),
2127
+ created_at: z10.number(),
2128
+ model: z10.string(),
2129
+ output: z10.array(
2130
+ z10.discriminatedUnion("type", [
2131
+ z10.object({
2132
+ type: z10.literal("message"),
2133
+ role: z10.literal("assistant"),
2134
+ content: z10.array(
2135
+ z10.object({
2136
+ type: z10.literal("output_text"),
2137
+ text: z10.string(),
2138
+ annotations: z10.array(
2139
+ z10.object({
2140
+ type: z10.literal("url_citation"),
2141
+ start_index: z10.number(),
2142
+ end_index: z10.number(),
2143
+ url: z10.string(),
2144
+ title: z10.string()
2057
2145
  })
2058
2146
  )
2059
2147
  })
2060
2148
  )
2061
2149
  }),
2062
- z6.object({
2063
- type: z6.literal("function_call"),
2064
- call_id: z6.string(),
2065
- name: z6.string(),
2066
- arguments: z6.string()
2150
+ z10.object({
2151
+ type: z10.literal("function_call"),
2152
+ call_id: z10.string(),
2153
+ name: z10.string(),
2154
+ arguments: z10.string()
2067
2155
  }),
2068
- z6.object({
2069
- type: z6.literal("web_search_call")
2156
+ z10.object({
2157
+ type: z10.literal("web_search_call")
2070
2158
  }),
2071
- z6.object({
2072
- type: z6.literal("computer_call")
2159
+ z10.object({
2160
+ type: z10.literal("computer_call")
2073
2161
  }),
2074
- z6.object({
2075
- type: z6.literal("reasoning")
2162
+ z10.object({
2163
+ type: z10.literal("reasoning")
2076
2164
  })
2077
2165
  ])
2078
2166
  ),
2079
- incomplete_details: z6.object({ reason: z6.string() }).nullable(),
2167
+ incomplete_details: z10.object({ reason: z10.string() }).nullable(),
2080
2168
  usage: usageSchema
2081
2169
  })
2082
2170
  ),
2083
2171
  abortSignal: options.abortSignal,
2084
2172
  fetch: this.config.fetch
2085
2173
  });
2086
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2087
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2088
- toolCallType: "function",
2089
- toolCallId: output.call_id,
2090
- toolName: output.name,
2091
- args: output.arguments
2092
- }));
2174
+ const content = [];
2175
+ for (const part of response.output) {
2176
+ switch (part.type) {
2177
+ case "message": {
2178
+ for (const contentPart of part.content) {
2179
+ content.push({
2180
+ type: "text",
2181
+ text: contentPart.text
2182
+ });
2183
+ for (const annotation of contentPart.annotations) {
2184
+ content.push({
2185
+ type: "source",
2186
+ sourceType: "url",
2187
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2188
+ url: annotation.url,
2189
+ title: annotation.title
2190
+ });
2191
+ }
2192
+ }
2193
+ break;
2194
+ }
2195
+ case "function_call": {
2196
+ content.push({
2197
+ type: "tool-call",
2198
+ toolCallType: "function",
2199
+ toolCallId: part.call_id,
2200
+ toolName: part.name,
2201
+ args: part.arguments
2202
+ });
2203
+ break;
2204
+ }
2205
+ }
2206
+ }
2093
2207
  return {
2094
- text: outputTextElements.map((content) => content.text).join("\n"),
2095
- sources: outputTextElements.flatMap(
2096
- (content) => content.annotations.map((annotation) => {
2097
- var _a2, _b2, _c2;
2098
- return {
2099
- sourceType: "url",
2100
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2101
- url: annotation.url,
2102
- title: annotation.title
2103
- };
2104
- })
2105
- ),
2208
+ content,
2106
2209
  finishReason: mapOpenAIResponseFinishReason({
2107
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2108
- hasToolCalls: toolCalls.length > 0
2210
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2211
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2109
2212
  }),
2110
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2111
2213
  usage: {
2112
- promptTokens: response.usage.input_tokens,
2113
- completionTokens: response.usage.output_tokens
2114
- },
2115
- rawCall: {
2116
- rawPrompt: void 0,
2117
- rawSettings: {}
2118
- },
2119
- rawResponse: {
2120
- headers: responseHeaders,
2121
- body: rawResponse
2122
- },
2123
- request: {
2124
- body: JSON.stringify(body)
2214
+ inputTokens: response.usage.input_tokens,
2215
+ outputTokens: response.usage.output_tokens
2125
2216
  },
2217
+ request: { body },
2126
2218
  response: {
2127
2219
  id: response.id,
2128
2220
  timestamp: new Date(response.created_at * 1e3),
2129
- modelId: response.model
2221
+ modelId: response.model,
2222
+ headers: responseHeaders,
2223
+ body: rawResponse
2130
2224
  },
2131
2225
  providerMetadata: {
2132
2226
  openai: {
2133
2227
  responseId: response.id,
2134
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2135
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2228
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2229
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2136
2230
  }
2137
2231
  },
2138
2232
  warnings
@@ -2140,12 +2234,12 @@ var OpenAIResponsesLanguageModel = class {
2140
2234
  }
2141
2235
  async doStream(options) {
2142
2236
  const { args: body, warnings } = this.getArgs(options);
2143
- const { responseHeaders, value: response } = await postJsonToApi5({
2237
+ const { responseHeaders, value: response } = await postJsonToApi6({
2144
2238
  url: this.config.url({
2145
2239
  path: "/responses",
2146
2240
  modelId: this.modelId
2147
2241
  }),
2148
- headers: combineHeaders5(this.config.headers(), options.headers),
2242
+ headers: combineHeaders7(this.config.headers(), options.headers),
2149
2243
  body: {
2150
2244
  ...body,
2151
2245
  stream: true
@@ -2159,8 +2253,10 @@ var OpenAIResponsesLanguageModel = class {
2159
2253
  });
2160
2254
  const self = this;
2161
2255
  let finishReason = "unknown";
2162
- let promptTokens = NaN;
2163
- let completionTokens = NaN;
2256
+ const usage = {
2257
+ inputTokens: void 0,
2258
+ outputTokens: void 0
2259
+ };
2164
2260
  let cachedPromptTokens = null;
2165
2261
  let reasoningTokens = null;
2166
2262
  let responseId = null;
@@ -2169,6 +2265,9 @@ var OpenAIResponsesLanguageModel = class {
2169
2265
  return {
2170
2266
  stream: response.pipeThrough(
2171
2267
  new TransformStream({
2268
+ start(controller) {
2269
+ controller.enqueue({ type: "stream-start", warnings });
2270
+ },
2172
2271
  transform(chunk, controller) {
2173
2272
  var _a, _b, _c, _d, _e, _f, _g, _h;
2174
2273
  if (!chunk.success) {
@@ -2212,8 +2311,8 @@ var OpenAIResponsesLanguageModel = class {
2212
2311
  });
2213
2312
  } else if (isTextDeltaChunk(value)) {
2214
2313
  controller.enqueue({
2215
- type: "text-delta",
2216
- textDelta: value.delta
2314
+ type: "text",
2315
+ text: value.delta
2217
2316
  });
2218
2317
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2219
2318
  ongoingToolCalls[value.output_index] = void 0;
@@ -2230,19 +2329,17 @@ var OpenAIResponsesLanguageModel = class {
2230
2329
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2231
2330
  hasToolCalls
2232
2331
  });
2233
- promptTokens = value.response.usage.input_tokens;
2234
- completionTokens = value.response.usage.output_tokens;
2332
+ usage.inputTokens = value.response.usage.input_tokens;
2333
+ usage.outputTokens = value.response.usage.output_tokens;
2235
2334
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2236
2335
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2237
2336
  } else if (isResponseAnnotationAddedChunk(value)) {
2238
2337
  controller.enqueue({
2239
2338
  type: "source",
2240
- source: {
2241
- sourceType: "url",
2242
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2243
- url: value.annotation.url,
2244
- title: value.annotation.title
2245
- }
2339
+ sourceType: "url",
2340
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2341
+ url: value.annotation.url,
2342
+ title: value.annotation.title
2246
2343
  });
2247
2344
  }
2248
2345
  },
@@ -2250,7 +2347,7 @@ var OpenAIResponsesLanguageModel = class {
2250
2347
  controller.enqueue({
2251
2348
  type: "finish",
2252
2349
  finishReason,
2253
- usage: { promptTokens, completionTokens },
2350
+ usage,
2254
2351
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2255
2352
  providerMetadata: {
2256
2353
  openai: {
@@ -2264,89 +2361,84 @@ var OpenAIResponsesLanguageModel = class {
2264
2361
  }
2265
2362
  })
2266
2363
  ),
2267
- rawCall: {
2268
- rawPrompt: void 0,
2269
- rawSettings: {}
2270
- },
2271
- rawResponse: { headers: responseHeaders },
2272
- request: { body: JSON.stringify(body) },
2273
- warnings
2364
+ request: { body },
2365
+ response: { headers: responseHeaders }
2274
2366
  };
2275
2367
  }
2276
2368
  };
2277
- var usageSchema = z6.object({
2278
- input_tokens: z6.number(),
2279
- input_tokens_details: z6.object({ cached_tokens: z6.number().nullish() }).nullish(),
2280
- output_tokens: z6.number(),
2281
- output_tokens_details: z6.object({ reasoning_tokens: z6.number().nullish() }).nullish()
2369
+ var usageSchema = z10.object({
2370
+ input_tokens: z10.number(),
2371
+ input_tokens_details: z10.object({ cached_tokens: z10.number().nullish() }).nullish(),
2372
+ output_tokens: z10.number(),
2373
+ output_tokens_details: z10.object({ reasoning_tokens: z10.number().nullish() }).nullish()
2282
2374
  });
2283
- var textDeltaChunkSchema = z6.object({
2284
- type: z6.literal("response.output_text.delta"),
2285
- delta: z6.string()
2375
+ var textDeltaChunkSchema = z10.object({
2376
+ type: z10.literal("response.output_text.delta"),
2377
+ delta: z10.string()
2286
2378
  });
2287
- var responseFinishedChunkSchema = z6.object({
2288
- type: z6.enum(["response.completed", "response.incomplete"]),
2289
- response: z6.object({
2290
- incomplete_details: z6.object({ reason: z6.string() }).nullish(),
2379
+ var responseFinishedChunkSchema = z10.object({
2380
+ type: z10.enum(["response.completed", "response.incomplete"]),
2381
+ response: z10.object({
2382
+ incomplete_details: z10.object({ reason: z10.string() }).nullish(),
2291
2383
  usage: usageSchema
2292
2384
  })
2293
2385
  });
2294
- var responseCreatedChunkSchema = z6.object({
2295
- type: z6.literal("response.created"),
2296
- response: z6.object({
2297
- id: z6.string(),
2298
- created_at: z6.number(),
2299
- model: z6.string()
2386
+ var responseCreatedChunkSchema = z10.object({
2387
+ type: z10.literal("response.created"),
2388
+ response: z10.object({
2389
+ id: z10.string(),
2390
+ created_at: z10.number(),
2391
+ model: z10.string()
2300
2392
  })
2301
2393
  });
2302
- var responseOutputItemDoneSchema = z6.object({
2303
- type: z6.literal("response.output_item.done"),
2304
- output_index: z6.number(),
2305
- item: z6.discriminatedUnion("type", [
2306
- z6.object({
2307
- type: z6.literal("message")
2394
+ var responseOutputItemDoneSchema = z10.object({
2395
+ type: z10.literal("response.output_item.done"),
2396
+ output_index: z10.number(),
2397
+ item: z10.discriminatedUnion("type", [
2398
+ z10.object({
2399
+ type: z10.literal("message")
2308
2400
  }),
2309
- z6.object({
2310
- type: z6.literal("function_call"),
2311
- id: z6.string(),
2312
- call_id: z6.string(),
2313
- name: z6.string(),
2314
- arguments: z6.string(),
2315
- status: z6.literal("completed")
2401
+ z10.object({
2402
+ type: z10.literal("function_call"),
2403
+ id: z10.string(),
2404
+ call_id: z10.string(),
2405
+ name: z10.string(),
2406
+ arguments: z10.string(),
2407
+ status: z10.literal("completed")
2316
2408
  })
2317
2409
  ])
2318
2410
  });
2319
- var responseFunctionCallArgumentsDeltaSchema = z6.object({
2320
- type: z6.literal("response.function_call_arguments.delta"),
2321
- item_id: z6.string(),
2322
- output_index: z6.number(),
2323
- delta: z6.string()
2411
+ var responseFunctionCallArgumentsDeltaSchema = z10.object({
2412
+ type: z10.literal("response.function_call_arguments.delta"),
2413
+ item_id: z10.string(),
2414
+ output_index: z10.number(),
2415
+ delta: z10.string()
2324
2416
  });
2325
- var responseOutputItemAddedSchema = z6.object({
2326
- type: z6.literal("response.output_item.added"),
2327
- output_index: z6.number(),
2328
- item: z6.discriminatedUnion("type", [
2329
- z6.object({
2330
- type: z6.literal("message")
2417
+ var responseOutputItemAddedSchema = z10.object({
2418
+ type: z10.literal("response.output_item.added"),
2419
+ output_index: z10.number(),
2420
+ item: z10.discriminatedUnion("type", [
2421
+ z10.object({
2422
+ type: z10.literal("message")
2331
2423
  }),
2332
- z6.object({
2333
- type: z6.literal("function_call"),
2334
- id: z6.string(),
2335
- call_id: z6.string(),
2336
- name: z6.string(),
2337
- arguments: z6.string()
2424
+ z10.object({
2425
+ type: z10.literal("function_call"),
2426
+ id: z10.string(),
2427
+ call_id: z10.string(),
2428
+ name: z10.string(),
2429
+ arguments: z10.string()
2338
2430
  })
2339
2431
  ])
2340
2432
  });
2341
- var responseAnnotationAddedSchema = z6.object({
2342
- type: z6.literal("response.output_text.annotation.added"),
2343
- annotation: z6.object({
2344
- type: z6.literal("url_citation"),
2345
- url: z6.string(),
2346
- title: z6.string()
2433
+ var responseAnnotationAddedSchema = z10.object({
2434
+ type: z10.literal("response.output_text.annotation.added"),
2435
+ annotation: z10.object({
2436
+ type: z10.literal("url_citation"),
2437
+ url: z10.string(),
2438
+ title: z10.string()
2347
2439
  })
2348
2440
  });
2349
- var openaiResponsesChunkSchema = z6.union([
2441
+ var openaiResponsesChunkSchema = z10.union([
2350
2442
  textDeltaChunkSchema,
2351
2443
  responseFinishedChunkSchema,
2352
2444
  responseCreatedChunkSchema,
@@ -2354,7 +2446,7 @@ var openaiResponsesChunkSchema = z6.union([
2354
2446
  responseFunctionCallArgumentsDeltaSchema,
2355
2447
  responseOutputItemAddedSchema,
2356
2448
  responseAnnotationAddedSchema,
2357
- z6.object({ type: z6.string() }).passthrough()
2449
+ z10.object({ type: z10.string() }).passthrough()
2358
2450
  // fallback for unknown chunks
2359
2451
  ]);
2360
2452
  function isTextDeltaChunk(chunk) {
@@ -2399,15 +2491,15 @@ function getResponsesModelConfig(modelId) {
2399
2491
  requiredAutoTruncation: false
2400
2492
  };
2401
2493
  }
2402
- var openaiResponsesProviderOptionsSchema = z6.object({
2403
- metadata: z6.any().nullish(),
2404
- parallelToolCalls: z6.boolean().nullish(),
2405
- previousResponseId: z6.string().nullish(),
2406
- store: z6.boolean().nullish(),
2407
- user: z6.string().nullish(),
2408
- reasoningEffort: z6.string().nullish(),
2409
- strictSchemas: z6.boolean().nullish(),
2410
- instructions: z6.string().nullish()
2494
+ var openaiResponsesProviderOptionsSchema = z10.object({
2495
+ metadata: z10.any().nullish(),
2496
+ parallelToolCalls: z10.boolean().nullish(),
2497
+ previousResponseId: z10.string().nullish(),
2498
+ store: z10.boolean().nullish(),
2499
+ user: z10.string().nullish(),
2500
+ reasoningEffort: z10.string().nullish(),
2501
+ strictSchemas: z10.boolean().nullish(),
2502
+ instructions: z10.string().nullish()
2411
2503
  });
2412
2504
  export {
2413
2505
  OpenAIChatLanguageModel,
@@ -2415,6 +2507,10 @@ export {
2415
2507
  OpenAIEmbeddingModel,
2416
2508
  OpenAIImageModel,
2417
2509
  OpenAIResponsesLanguageModel,
2418
- modelMaxImagesPerCall
2510
+ OpenAISpeechModel,
2511
+ OpenAITranscriptionModel,
2512
+ modelMaxImagesPerCall,
2513
+ openaiEmbeddingProviderOptions,
2514
+ openaiProviderOptions
2419
2515
  };
2420
2516
  //# sourceMappingURL=index.mjs.map