@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ InvalidResponseDataError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -9,18 +8,18 @@ import {
9
8
  createJsonResponseHandler,
10
9
  generateId,
11
10
  isParsableJson,
11
+ parseProviderOptions,
12
12
  postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
14
+ import { z as z3 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
18
18
  UnsupportedFunctionalityError
19
19
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
21
21
  function convertToOpenAIChatMessages({
22
22
  prompt,
23
- useLegacyFunctionCalling = false,
24
23
  systemMessageMode = "system"
25
24
  }) {
26
25
  const messages = [];
@@ -61,55 +60,71 @@ function convertToOpenAIChatMessages({
61
60
  messages.push({
62
61
  role: "user",
63
62
  content: content.map((part, index) => {
64
- var _a, _b, _c, _d;
63
+ var _a, _b, _c;
65
64
  switch (part.type) {
66
65
  case "text": {
67
66
  return { type: "text", text: part.text };
68
67
  }
69
- case "image": {
70
- return {
71
- type: "image_url",
72
- image_url: {
73
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
74
- // OpenAI specific extension: image detail
75
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
76
- }
77
- };
78
- }
79
68
  case "file": {
80
- if (part.data instanceof URL) {
81
- throw new UnsupportedFunctionalityError({
82
- functionality: "'File content parts with URL data' functionality not supported."
83
- });
84
- }
85
- switch (part.mimeType) {
86
- case "audio/wav": {
87
- return {
88
- type: "input_audio",
89
- input_audio: { data: part.data, format: "wav" }
90
- };
91
- }
92
- case "audio/mp3":
93
- case "audio/mpeg": {
94
- return {
95
- type: "input_audio",
96
- input_audio: { data: part.data, format: "mp3" }
97
- };
69
+ if (part.mediaType.startsWith("image/")) {
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ return {
72
+ type: "image_url",
73
+ image_url: {
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
+ // OpenAI specific extension: image detail
76
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
+ }
78
+ };
79
+ } else if (part.mediaType.startsWith("audio/")) {
80
+ if (part.data instanceof URL) {
81
+ throw new UnsupportedFunctionalityError({
82
+ functionality: "audio file parts with URLs"
83
+ });
98
84
  }
99
- case "application/pdf": {
100
- return {
101
- type: "file",
102
- file: {
103
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
104
- file_data: `data:application/pdf;base64,${part.data}`
105
- }
106
- };
85
+ switch (part.mediaType) {
86
+ case "audio/wav": {
87
+ return {
88
+ type: "input_audio",
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
93
+ };
94
+ }
95
+ case "audio/mp3":
96
+ case "audio/mpeg": {
97
+ return {
98
+ type: "input_audio",
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
103
+ };
104
+ }
105
+ default: {
106
+ throw new UnsupportedFunctionalityError({
107
+ functionality: `audio content parts with media type ${part.mediaType}`
108
+ });
109
+ }
107
110
  }
108
- default: {
111
+ } else if (part.mediaType === "application/pdf") {
112
+ if (part.data instanceof URL) {
109
113
  throw new UnsupportedFunctionalityError({
110
- functionality: `File content part type ${part.mimeType} in user messages`
114
+ functionality: "PDF file parts with URLs"
111
115
  });
112
116
  }
117
+ return {
118
+ type: "file",
119
+ file: {
120
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
+ file_data: `data:application/pdf;base64,${part.data}`
122
+ }
123
+ };
124
+ } else {
125
+ throw new UnsupportedFunctionalityError({
126
+ functionality: `file part media type ${part.mediaType}`
127
+ });
113
128
  }
114
129
  }
115
130
  }
@@ -139,41 +154,20 @@ function convertToOpenAIChatMessages({
139
154
  }
140
155
  }
141
156
  }
142
- if (useLegacyFunctionCalling) {
143
- if (toolCalls.length > 1) {
144
- throw new UnsupportedFunctionalityError({
145
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
146
- });
147
- }
148
- messages.push({
149
- role: "assistant",
150
- content: text,
151
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
152
- });
153
- } else {
154
- messages.push({
155
- role: "assistant",
156
- content: text,
157
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
158
- });
159
- }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
160
162
  break;
161
163
  }
162
164
  case "tool": {
163
165
  for (const toolResponse of content) {
164
- if (useLegacyFunctionCalling) {
165
- messages.push({
166
- role: "function",
167
- name: toolResponse.toolName,
168
- content: JSON.stringify(toolResponse.result)
169
- });
170
- } else {
171
- messages.push({
172
- role: "tool",
173
- tool_call_id: toolResponse.toolCallId,
174
- content: JSON.stringify(toolResponse.result)
175
- });
176
- }
166
+ messages.push({
167
+ role: "tool",
168
+ tool_call_id: toolResponse.toolCallId,
169
+ content: JSON.stringify(toolResponse.result)
170
+ });
177
171
  }
178
172
  break;
179
173
  }
@@ -186,17 +180,17 @@ function convertToOpenAIChatMessages({
186
180
  return { messages, warnings };
187
181
  }
188
182
 
189
- // src/map-openai-chat-logprobs.ts
190
- function mapOpenAIChatLogProbsOutput(logprobs) {
191
- var _a, _b;
192
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
193
- token,
194
- logprob,
195
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
196
- token: token2,
197
- logprob: logprob2
198
- })) : []
199
- }))) != null ? _b : void 0;
183
+ // src/get-response-metadata.ts
184
+ function getResponseMetadata({
185
+ id,
186
+ model,
187
+ created
188
+ }) {
189
+ return {
190
+ id: id != null ? id : void 0,
191
+ modelId: model != null ? model : void 0,
192
+ timestamp: created != null ? new Date(created * 1e3) : void 0
193
+ };
200
194
  }
201
195
 
202
196
  // src/map-openai-finish-reason.ts
@@ -216,18 +210,59 @@ function mapOpenAIFinishReason(finishReason) {
216
210
  }
217
211
  }
218
212
 
219
- // src/openai-error.ts
213
+ // src/openai-chat-options.ts
220
214
  import { z } from "zod";
215
+ var openaiProviderOptions = z.object({
216
+ /**
217
+ * Modify the likelihood of specified tokens appearing in the completion.
218
+ *
219
+ * Accepts a JSON object that maps tokens (specified by their token ID in
220
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
221
+ */
222
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
223
+ /**
224
+ * Whether to enable parallel function calling during tool use. Default to true.
225
+ */
226
+ parallelToolCalls: z.boolean().optional(),
227
+ /**
228
+ * A unique identifier representing your end-user, which can help OpenAI to
229
+ * monitor and detect abuse.
230
+ */
231
+ user: z.string().optional(),
232
+ /**
233
+ * Reasoning effort for reasoning models. Defaults to `medium`.
234
+ */
235
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
236
+ /**
237
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
238
+ */
239
+ maxCompletionTokens: z.number().optional(),
240
+ /**
241
+ * Whether to enable persistence in responses API.
242
+ */
243
+ store: z.boolean().optional(),
244
+ /**
245
+ * Metadata to associate with the request.
246
+ */
247
+ metadata: z.record(z.string()).optional(),
248
+ /**
249
+ * Parameters for prediction mode.
250
+ */
251
+ prediction: z.record(z.any()).optional()
252
+ });
253
+
254
+ // src/openai-error.ts
255
+ import { z as z2 } from "zod";
221
256
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
222
- var openaiErrorDataSchema = z.object({
223
- error: z.object({
224
- message: z.string(),
257
+ var openaiErrorDataSchema = z2.object({
258
+ error: z2.object({
259
+ message: z2.string(),
225
260
  // The additional information below is handled loosely to support
226
261
  // OpenAI-compatible providers that have slightly different error
227
262
  // responses:
228
- type: z.string().nullish(),
229
- param: z.any().nullish(),
230
- code: z.union([z.string(), z.number()]).nullish()
263
+ type: z2.string().nullish(),
264
+ param: z2.any().nullish(),
265
+ code: z2.union([z2.string(), z2.number()]).nullish()
231
266
  })
232
267
  });
233
268
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -235,76 +270,19 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
235
270
  errorToMessage: (data) => data.error.message
236
271
  });
237
272
 
238
- // src/get-response-metadata.ts
239
- function getResponseMetadata({
240
- id,
241
- model,
242
- created
243
- }) {
244
- return {
245
- id: id != null ? id : void 0,
246
- modelId: model != null ? model : void 0,
247
- timestamp: created != null ? new Date(created * 1e3) : void 0
248
- };
249
- }
250
-
251
273
  // src/openai-prepare-tools.ts
252
274
  import {
253
275
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
254
276
  } from "@ai-sdk/provider";
255
277
  function prepareTools({
256
- mode,
257
- useLegacyFunctionCalling = false,
278
+ tools,
279
+ toolChoice,
258
280
  structuredOutputs
259
281
  }) {
260
- var _a;
261
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
282
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
262
283
  const toolWarnings = [];
263
284
  if (tools == null) {
264
- return { tools: void 0, tool_choice: void 0, toolWarnings };
265
- }
266
- const toolChoice = mode.toolChoice;
267
- if (useLegacyFunctionCalling) {
268
- const openaiFunctions = [];
269
- for (const tool of tools) {
270
- if (tool.type === "provider-defined") {
271
- toolWarnings.push({ type: "unsupported-tool", tool });
272
- } else {
273
- openaiFunctions.push({
274
- name: tool.name,
275
- description: tool.description,
276
- parameters: tool.parameters
277
- });
278
- }
279
- }
280
- if (toolChoice == null) {
281
- return {
282
- functions: openaiFunctions,
283
- function_call: void 0,
284
- toolWarnings
285
- };
286
- }
287
- const type2 = toolChoice.type;
288
- switch (type2) {
289
- case "auto":
290
- case "none":
291
- case void 0:
292
- return {
293
- functions: openaiFunctions,
294
- function_call: void 0,
295
- toolWarnings
296
- };
297
- case "required":
298
- throw new UnsupportedFunctionalityError2({
299
- functionality: "useLegacyFunctionCalling and toolChoice: required"
300
- });
301
- default:
302
- return {
303
- functions: openaiFunctions,
304
- function_call: { name: toolChoice.toolName },
305
- toolWarnings
306
- };
307
- }
285
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
308
286
  }
309
287
  const openaiTools = [];
310
288
  for (const tool of tools) {
@@ -323,18 +301,18 @@ function prepareTools({
323
301
  }
324
302
  }
325
303
  if (toolChoice == null) {
326
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
304
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
327
305
  }
328
306
  const type = toolChoice.type;
329
307
  switch (type) {
330
308
  case "auto":
331
309
  case "none":
332
310
  case "required":
333
- return { tools: openaiTools, tool_choice: type, toolWarnings };
311
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
334
312
  case "tool":
335
313
  return {
336
314
  tools: openaiTools,
337
- tool_choice: {
315
+ toolChoice: {
338
316
  type: "function",
339
317
  function: {
340
318
  name: toolChoice.toolName
@@ -345,7 +323,7 @@ function prepareTools({
345
323
  default: {
346
324
  const _exhaustiveCheck = type;
347
325
  throw new UnsupportedFunctionalityError2({
348
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
326
+ functionality: `tool choice type: ${_exhaustiveCheck}`
349
327
  });
350
328
  }
351
329
  }
@@ -359,26 +337,17 @@ var OpenAIChatLanguageModel = class {
359
337
  this.settings = settings;
360
338
  this.config = config;
361
339
  }
362
- get supportsStructuredOutputs() {
363
- var _a;
364
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
365
- }
366
- get defaultObjectGenerationMode() {
367
- if (isAudioModel(this.modelId)) {
368
- return "tool";
369
- }
370
- return this.supportsStructuredOutputs ? "json" : "tool";
371
- }
372
340
  get provider() {
373
341
  return this.config.provider;
374
342
  }
375
- get supportsImageUrls() {
376
- return !this.settings.downloadImages;
343
+ async getSupportedUrls() {
344
+ return {
345
+ "image/*": [/^https?:\/\/.*$/]
346
+ };
377
347
  }
378
348
  getArgs({
379
- mode,
380
349
  prompt,
381
- maxTokens,
350
+ maxOutputTokens,
382
351
  temperature,
383
352
  topP,
384
353
  topK,
@@ -387,39 +356,33 @@ var OpenAIChatLanguageModel = class {
387
356
  stopSequences,
388
357
  responseFormat,
389
358
  seed,
390
- providerMetadata
359
+ tools,
360
+ toolChoice,
361
+ providerOptions
391
362
  }) {
392
- var _a, _b, _c, _d, _e, _f, _g, _h;
393
- const type = mode.type;
363
+ var _a, _b, _c;
394
364
  const warnings = [];
365
+ const openaiOptions = (_a = parseProviderOptions({
366
+ provider: "openai",
367
+ providerOptions,
368
+ schema: openaiProviderOptions
369
+ })) != null ? _a : {};
395
370
  if (topK != null) {
396
371
  warnings.push({
397
372
  type: "unsupported-setting",
398
373
  setting: "topK"
399
374
  });
400
375
  }
401
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
376
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
402
377
  warnings.push({
403
378
  type: "unsupported-setting",
404
379
  setting: "responseFormat",
405
380
  details: "JSON response format schema is only supported with structuredOutputs"
406
381
  });
407
382
  }
408
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
409
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
410
- throw new UnsupportedFunctionalityError3({
411
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
412
- });
413
- }
414
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
415
- throw new UnsupportedFunctionalityError3({
416
- functionality: "structuredOutputs with useLegacyFunctionCalling"
417
- });
418
- }
419
383
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
420
384
  {
421
385
  prompt,
422
- useLegacyFunctionCalling,
423
386
  systemMessageMode: getSystemMessageMode(this.modelId)
424
387
  }
425
388
  );
@@ -428,35 +391,36 @@ var OpenAIChatLanguageModel = class {
428
391
  // model id:
429
392
  model: this.modelId,
430
393
  // model specific settings:
431
- logit_bias: this.settings.logitBias,
432
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
433
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
434
- user: this.settings.user,
435
- parallel_tool_calls: this.settings.parallelToolCalls,
394
+ logit_bias: openaiOptions.logitBias,
395
+ user: openaiOptions.user,
396
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
436
397
  // standardized settings:
437
- max_tokens: maxTokens,
398
+ max_tokens: maxOutputTokens,
438
399
  temperature,
439
400
  top_p: topP,
440
401
  frequency_penalty: frequencyPenalty,
441
402
  presence_penalty: presencePenalty,
442
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
443
- type: "json_schema",
444
- json_schema: {
445
- schema: responseFormat.schema,
446
- strict: true,
447
- name: (_a = responseFormat.name) != null ? _a : "response",
448
- description: responseFormat.description
449
- }
450
- } : { type: "json_object" } : void 0,
403
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
404
+ // TODO convert into provider option
405
+ this.settings.structuredOutputs && responseFormat.schema != null ? {
406
+ type: "json_schema",
407
+ json_schema: {
408
+ schema: responseFormat.schema,
409
+ strict: true,
410
+ name: (_b = responseFormat.name) != null ? _b : "response",
411
+ description: responseFormat.description
412
+ }
413
+ } : { type: "json_object" }
414
+ ) : void 0,
451
415
  stop: stopSequences,
452
416
  seed,
453
417
  // openai specific settings:
454
- // TODO remove in next major version; we auto-map maxTokens now
455
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
456
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
457
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
458
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
459
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
418
+ // TODO remove in next major version; we auto-map maxOutputTokens now
419
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
420
+ store: openaiOptions.store,
421
+ metadata: openaiOptions.metadata,
422
+ prediction: openaiOptions.prediction,
423
+ reasoning_effort: openaiOptions.reasoningEffort,
460
424
  // messages:
461
425
  messages
462
426
  };
@@ -500,102 +464,39 @@ var OpenAIChatLanguageModel = class {
500
464
  message: "logitBias is not supported for reasoning models"
501
465
  });
502
466
  }
503
- if (baseArgs.logprobs != null) {
504
- baseArgs.logprobs = void 0;
505
- warnings.push({
506
- type: "other",
507
- message: "logprobs is not supported for reasoning models"
508
- });
509
- }
510
- if (baseArgs.top_logprobs != null) {
511
- baseArgs.top_logprobs = void 0;
512
- warnings.push({
513
- type: "other",
514
- message: "topLogprobs is not supported for reasoning models"
515
- });
516
- }
517
467
  if (baseArgs.max_tokens != null) {
518
468
  if (baseArgs.max_completion_tokens == null) {
519
469
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
520
470
  }
521
471
  baseArgs.max_tokens = void 0;
522
472
  }
523
- }
524
- switch (type) {
525
- case "regular": {
526
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
527
- mode,
528
- useLegacyFunctionCalling,
529
- structuredOutputs: this.supportsStructuredOutputs
473
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
474
+ if (baseArgs.temperature != null) {
475
+ baseArgs.temperature = void 0;
476
+ warnings.push({
477
+ type: "unsupported-setting",
478
+ setting: "temperature",
479
+ details: "temperature is not supported for the search preview models and has been removed."
530
480
  });
531
- return {
532
- args: {
533
- ...baseArgs,
534
- tools,
535
- tool_choice,
536
- functions,
537
- function_call
538
- },
539
- warnings: [...warnings, ...toolWarnings]
540
- };
541
- }
542
- case "object-json": {
543
- return {
544
- args: {
545
- ...baseArgs,
546
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
547
- type: "json_schema",
548
- json_schema: {
549
- schema: mode.schema,
550
- strict: true,
551
- name: (_h = mode.name) != null ? _h : "response",
552
- description: mode.description
553
- }
554
- } : { type: "json_object" }
555
- },
556
- warnings
557
- };
558
- }
559
- case "object-tool": {
560
- return {
561
- args: useLegacyFunctionCalling ? {
562
- ...baseArgs,
563
- function_call: {
564
- name: mode.tool.name
565
- },
566
- functions: [
567
- {
568
- name: mode.tool.name,
569
- description: mode.tool.description,
570
- parameters: mode.tool.parameters
571
- }
572
- ]
573
- } : {
574
- ...baseArgs,
575
- tool_choice: {
576
- type: "function",
577
- function: { name: mode.tool.name }
578
- },
579
- tools: [
580
- {
581
- type: "function",
582
- function: {
583
- name: mode.tool.name,
584
- description: mode.tool.description,
585
- parameters: mode.tool.parameters,
586
- strict: this.supportsStructuredOutputs ? true : void 0
587
- }
588
- }
589
- ]
590
- },
591
- warnings
592
- };
593
- }
594
- default: {
595
- const _exhaustiveCheck = type;
596
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
597
481
  }
598
482
  }
483
+ const {
484
+ tools: openaiTools,
485
+ toolChoice: openaiToolChoice,
486
+ toolWarnings
487
+ } = prepareTools({
488
+ tools,
489
+ toolChoice,
490
+ structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
491
+ });
492
+ return {
493
+ args: {
494
+ ...baseArgs,
495
+ tools: openaiTools,
496
+ tool_choice: openaiToolChoice
497
+ },
498
+ warnings: [...warnings, ...toolWarnings]
499
+ };
599
500
  }
600
501
  async doGenerate(options) {
601
502
  var _a, _b, _c, _d, _e, _f, _g, _h;
@@ -618,10 +519,23 @@ var OpenAIChatLanguageModel = class {
618
519
  abortSignal: options.abortSignal,
619
520
  fetch: this.config.fetch
620
521
  });
621
- const { messages: rawPrompt, ...rawSettings } = body;
622
522
  const choice = response.choices[0];
623
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
624
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
523
+ const content = [];
524
+ const text = choice.message.content;
525
+ if (text != null && text.length > 0) {
526
+ content.push({ type: "text", text });
527
+ }
528
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
529
+ content.push({
530
+ type: "tool-call",
531
+ toolCallType: "function",
532
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
533
+ toolName: toolCall.function.name,
534
+ args: toolCall.function.arguments
535
+ });
536
+ }
537
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
538
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
625
539
  const providerMetadata = { openai: {} };
626
540
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
627
541
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -636,81 +550,23 @@ var OpenAIChatLanguageModel = class {
636
550
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
637
551
  }
638
552
  return {
639
- text: (_c = choice.message.content) != null ? _c : void 0,
640
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
641
- {
642
- toolCallType: "function",
643
- toolCallId: generateId(),
644
- toolName: choice.message.function_call.name,
645
- args: choice.message.function_call.arguments
646
- }
647
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
648
- var _a2;
649
- return {
650
- toolCallType: "function",
651
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
652
- toolName: toolCall.function.name,
653
- args: toolCall.function.arguments
654
- };
655
- }),
553
+ content,
656
554
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
657
555
  usage: {
658
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
659
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
556
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
557
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
558
+ },
559
+ request: { body },
560
+ response: {
561
+ ...getResponseMetadata(response),
562
+ headers: responseHeaders,
563
+ body: rawResponse
660
564
  },
661
- rawCall: { rawPrompt, rawSettings },
662
- rawResponse: { headers: responseHeaders, body: rawResponse },
663
- request: { body: JSON.stringify(body) },
664
- response: getResponseMetadata(response),
665
565
  warnings,
666
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
667
566
  providerMetadata
668
567
  };
669
568
  }
670
569
  async doStream(options) {
671
- if (this.settings.simulateStreaming) {
672
- const result = await this.doGenerate(options);
673
- const simulatedStream = new ReadableStream({
674
- start(controller) {
675
- controller.enqueue({ type: "response-metadata", ...result.response });
676
- if (result.text) {
677
- controller.enqueue({
678
- type: "text-delta",
679
- textDelta: result.text
680
- });
681
- }
682
- if (result.toolCalls) {
683
- for (const toolCall of result.toolCalls) {
684
- controller.enqueue({
685
- type: "tool-call-delta",
686
- toolCallType: "function",
687
- toolCallId: toolCall.toolCallId,
688
- toolName: toolCall.toolName,
689
- argsTextDelta: toolCall.args
690
- });
691
- controller.enqueue({
692
- type: "tool-call",
693
- ...toolCall
694
- });
695
- }
696
- }
697
- controller.enqueue({
698
- type: "finish",
699
- finishReason: result.finishReason,
700
- usage: result.usage,
701
- logprobs: result.logprobs,
702
- providerMetadata: result.providerMetadata
703
- });
704
- controller.close();
705
- }
706
- });
707
- return {
708
- stream: simulatedStream,
709
- rawCall: result.rawCall,
710
- rawResponse: result.rawResponse,
711
- warnings: result.warnings
712
- };
713
- }
714
570
  const { args, warnings } = this.getArgs(options);
715
571
  const body = {
716
572
  ...args,
@@ -735,17 +591,18 @@ var OpenAIChatLanguageModel = class {
735
591
  const { messages: rawPrompt, ...rawSettings } = args;
736
592
  const toolCalls = [];
737
593
  let finishReason = "unknown";
738
- let usage = {
739
- promptTokens: void 0,
740
- completionTokens: void 0
594
+ const usage = {
595
+ inputTokens: void 0,
596
+ outputTokens: void 0
741
597
  };
742
- let logprobs;
743
598
  let isFirstChunk = true;
744
- const { useLegacyFunctionCalling } = this.settings;
745
599
  const providerMetadata = { openai: {} };
746
600
  return {
747
601
  stream: response.pipeThrough(
748
602
  new TransformStream({
603
+ start(controller) {
604
+ controller.enqueue({ type: "stream-start", warnings });
605
+ },
749
606
  transform(chunk, controller) {
750
607
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
751
608
  if (!chunk.success) {
@@ -773,10 +630,8 @@ var OpenAIChatLanguageModel = class {
773
630
  prompt_tokens_details,
774
631
  completion_tokens_details
775
632
  } = value.usage;
776
- usage = {
777
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
778
- completionTokens: completion_tokens != null ? completion_tokens : void 0
779
- };
633
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
634
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
780
635
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
781
636
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
782
637
  }
@@ -800,27 +655,12 @@ var OpenAIChatLanguageModel = class {
800
655
  const delta = choice.delta;
801
656
  if (delta.content != null) {
802
657
  controller.enqueue({
803
- type: "text-delta",
804
- textDelta: delta.content
658
+ type: "text",
659
+ text: delta.content
805
660
  });
806
661
  }
807
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
808
- choice == null ? void 0 : choice.logprobs
809
- );
810
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
811
- if (logprobs === void 0) logprobs = [];
812
- logprobs.push(...mappedLogprobs);
813
- }
814
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
815
- {
816
- type: "function",
817
- id: generateId(),
818
- function: delta.function_call,
819
- index: 0
820
- }
821
- ] : delta.tool_calls;
822
- if (mappedToolCalls != null) {
823
- for (const toolCallDelta of mappedToolCalls) {
662
+ if (delta.tool_calls != null) {
663
+ for (const toolCallDelta of delta.tool_calls) {
824
664
  const index = toolCallDelta.index;
825
665
  if (toolCalls[index] == null) {
826
666
  if (toolCallDelta.type !== "function") {
@@ -902,125 +742,82 @@ var OpenAIChatLanguageModel = class {
902
742
  }
903
743
  },
904
744
  flush(controller) {
905
- var _a, _b;
906
745
  controller.enqueue({
907
746
  type: "finish",
908
747
  finishReason,
909
- logprobs,
910
- usage: {
911
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
912
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
913
- },
748
+ usage,
914
749
  ...providerMetadata != null ? { providerMetadata } : {}
915
750
  });
916
751
  }
917
752
  })
918
753
  ),
919
- rawCall: { rawPrompt, rawSettings },
920
- rawResponse: { headers: responseHeaders },
921
- request: { body: JSON.stringify(body) },
922
- warnings
754
+ request: { body },
755
+ response: { headers: responseHeaders }
923
756
  };
924
757
  }
925
758
  };
926
- var openaiTokenUsageSchema = z2.object({
927
- prompt_tokens: z2.number().nullish(),
928
- completion_tokens: z2.number().nullish(),
929
- prompt_tokens_details: z2.object({
930
- cached_tokens: z2.number().nullish()
759
+ var openaiTokenUsageSchema = z3.object({
760
+ prompt_tokens: z3.number().nullish(),
761
+ completion_tokens: z3.number().nullish(),
762
+ prompt_tokens_details: z3.object({
763
+ cached_tokens: z3.number().nullish()
931
764
  }).nullish(),
932
- completion_tokens_details: z2.object({
933
- reasoning_tokens: z2.number().nullish(),
934
- accepted_prediction_tokens: z2.number().nullish(),
935
- rejected_prediction_tokens: z2.number().nullish()
765
+ completion_tokens_details: z3.object({
766
+ reasoning_tokens: z3.number().nullish(),
767
+ accepted_prediction_tokens: z3.number().nullish(),
768
+ rejected_prediction_tokens: z3.number().nullish()
936
769
  }).nullish()
937
770
  }).nullish();
938
- var openaiChatResponseSchema = z2.object({
939
- id: z2.string().nullish(),
940
- created: z2.number().nullish(),
941
- model: z2.string().nullish(),
942
- choices: z2.array(
943
- z2.object({
944
- message: z2.object({
945
- role: z2.literal("assistant").nullish(),
946
- content: z2.string().nullish(),
947
- function_call: z2.object({
948
- arguments: z2.string(),
949
- name: z2.string()
950
- }).nullish(),
951
- tool_calls: z2.array(
952
- z2.object({
953
- id: z2.string().nullish(),
954
- type: z2.literal("function"),
955
- function: z2.object({
956
- name: z2.string(),
957
- arguments: z2.string()
771
+ var openaiChatResponseSchema = z3.object({
772
+ id: z3.string().nullish(),
773
+ created: z3.number().nullish(),
774
+ model: z3.string().nullish(),
775
+ choices: z3.array(
776
+ z3.object({
777
+ message: z3.object({
778
+ role: z3.literal("assistant").nullish(),
779
+ content: z3.string().nullish(),
780
+ tool_calls: z3.array(
781
+ z3.object({
782
+ id: z3.string().nullish(),
783
+ type: z3.literal("function"),
784
+ function: z3.object({
785
+ name: z3.string(),
786
+ arguments: z3.string()
958
787
  })
959
788
  })
960
789
  ).nullish()
961
790
  }),
962
- index: z2.number(),
963
- logprobs: z2.object({
964
- content: z2.array(
965
- z2.object({
966
- token: z2.string(),
967
- logprob: z2.number(),
968
- top_logprobs: z2.array(
969
- z2.object({
970
- token: z2.string(),
971
- logprob: z2.number()
972
- })
973
- )
974
- })
975
- ).nullable()
976
- }).nullish(),
977
- finish_reason: z2.string().nullish()
791
+ index: z3.number(),
792
+ finish_reason: z3.string().nullish()
978
793
  })
979
794
  ),
980
795
  usage: openaiTokenUsageSchema
981
796
  });
982
- var openaiChatChunkSchema = z2.union([
983
- z2.object({
984
- id: z2.string().nullish(),
985
- created: z2.number().nullish(),
986
- model: z2.string().nullish(),
987
- choices: z2.array(
988
- z2.object({
989
- delta: z2.object({
990
- role: z2.enum(["assistant"]).nullish(),
991
- content: z2.string().nullish(),
992
- function_call: z2.object({
993
- name: z2.string().optional(),
994
- arguments: z2.string().optional()
995
- }).nullish(),
996
- tool_calls: z2.array(
997
- z2.object({
998
- index: z2.number(),
999
- id: z2.string().nullish(),
1000
- type: z2.literal("function").optional(),
1001
- function: z2.object({
1002
- name: z2.string().nullish(),
1003
- arguments: z2.string().nullish()
797
+ var openaiChatChunkSchema = z3.union([
798
+ z3.object({
799
+ id: z3.string().nullish(),
800
+ created: z3.number().nullish(),
801
+ model: z3.string().nullish(),
802
+ choices: z3.array(
803
+ z3.object({
804
+ delta: z3.object({
805
+ role: z3.enum(["assistant"]).nullish(),
806
+ content: z3.string().nullish(),
807
+ tool_calls: z3.array(
808
+ z3.object({
809
+ index: z3.number(),
810
+ id: z3.string().nullish(),
811
+ type: z3.literal("function").optional(),
812
+ function: z3.object({
813
+ name: z3.string().nullish(),
814
+ arguments: z3.string().nullish()
1004
815
  })
1005
816
  })
1006
817
  ).nullish()
1007
818
  }).nullish(),
1008
- logprobs: z2.object({
1009
- content: z2.array(
1010
- z2.object({
1011
- token: z2.string(),
1012
- logprob: z2.number(),
1013
- top_logprobs: z2.array(
1014
- z2.object({
1015
- token: z2.string(),
1016
- logprob: z2.number()
1017
- })
1018
- )
1019
- })
1020
- ).nullable()
1021
- }).nullish(),
1022
- finish_reason: z2.string().nullable().optional(),
1023
- index: z2.number()
819
+ finish_reason: z3.string().nullable().optional(),
820
+ index: z3.number()
1024
821
  })
1025
822
  ),
1026
823
  usage: openaiTokenUsageSchema
@@ -1028,10 +825,7 @@ var openaiChatChunkSchema = z2.union([
1028
825
  openaiErrorDataSchema
1029
826
  ]);
1030
827
  function isReasoningModel(modelId) {
1031
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1032
- }
1033
- function isAudioModel(modelId) {
1034
- return modelId.startsWith("gpt-4o-audio-preview");
828
+ return modelId.startsWith("o");
1035
829
  }
1036
830
  function getSystemMessageMode(modelId) {
1037
831
  var _a, _b;
@@ -1062,21 +856,18 @@ var reasoningModels = {
1062
856
  };
1063
857
 
1064
858
  // src/openai-completion-language-model.ts
1065
- import {
1066
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1067
- } from "@ai-sdk/provider";
1068
859
  import {
1069
860
  combineHeaders as combineHeaders2,
1070
861
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
1071
862
  createJsonResponseHandler as createJsonResponseHandler2,
1072
863
  postJsonToApi as postJsonToApi2
1073
864
  } from "@ai-sdk/provider-utils";
1074
- import { z as z3 } from "zod";
865
+ import { z as z4 } from "zod";
1075
866
 
1076
867
  // src/convert-to-openai-completion-prompt.ts
1077
868
  import {
1078
869
  InvalidPromptError,
1079
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
870
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1080
871
  } from "@ai-sdk/provider";
1081
872
  function convertToOpenAICompletionPrompt({
1082
873
  prompt,
@@ -1108,13 +899,8 @@ function convertToOpenAICompletionPrompt({
1108
899
  case "text": {
1109
900
  return part.text;
1110
901
  }
1111
- case "image": {
1112
- throw new UnsupportedFunctionalityError4({
1113
- functionality: "images"
1114
- });
1115
- }
1116
902
  }
1117
- }).join("");
903
+ }).filter(Boolean).join("");
1118
904
  text += `${user}:
1119
905
  ${userMessage}
1120
906
 
@@ -1128,7 +914,7 @@ ${userMessage}
1128
914
  return part.text;
1129
915
  }
1130
916
  case "tool-call": {
1131
- throw new UnsupportedFunctionalityError4({
917
+ throw new UnsupportedFunctionalityError3({
1132
918
  functionality: "tool-call messages"
1133
919
  });
1134
920
  }
@@ -1141,7 +927,7 @@ ${assistantMessage}
1141
927
  break;
1142
928
  }
1143
929
  case "tool": {
1144
- throw new UnsupportedFunctionalityError4({
930
+ throw new UnsupportedFunctionalityError3({
1145
931
  functionality: "tool messages"
1146
932
  });
1147
933
  }
@@ -1160,25 +946,10 @@ ${user}:`]
1160
946
  };
1161
947
  }
1162
948
 
1163
- // src/map-openai-completion-logprobs.ts
1164
- function mapOpenAICompletionLogProbs(logprobs) {
1165
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1166
- token,
1167
- logprob: logprobs.token_logprobs[index],
1168
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1169
- ([token2, logprob]) => ({
1170
- token: token2,
1171
- logprob
1172
- })
1173
- ) : []
1174
- }));
1175
- }
1176
-
1177
949
  // src/openai-completion-language-model.ts
1178
950
  var OpenAICompletionLanguageModel = class {
1179
951
  constructor(modelId, settings, config) {
1180
952
  this.specificationVersion = "v2";
1181
- this.defaultObjectGenerationMode = void 0;
1182
953
  this.modelId = modelId;
1183
954
  this.settings = settings;
1184
955
  this.config = config;
@@ -1186,11 +957,15 @@ var OpenAICompletionLanguageModel = class {
1186
957
  get provider() {
1187
958
  return this.config.provider;
1188
959
  }
960
+ async getSupportedUrls() {
961
+ return {
962
+ // no supported urls for completion models
963
+ };
964
+ }
1189
965
  getArgs({
1190
- mode,
1191
966
  inputFormat,
1192
967
  prompt,
1193
- maxTokens,
968
+ maxOutputTokens,
1194
969
  temperature,
1195
970
  topP,
1196
971
  topK,
@@ -1198,16 +973,19 @@ var OpenAICompletionLanguageModel = class {
1198
973
  presencePenalty,
1199
974
  stopSequences: userStopSequences,
1200
975
  responseFormat,
976
+ tools,
977
+ toolChoice,
1201
978
  seed
1202
979
  }) {
1203
- var _a;
1204
- const type = mode.type;
1205
980
  const warnings = [];
1206
981
  if (topK != null) {
1207
- warnings.push({
1208
- type: "unsupported-setting",
1209
- setting: "topK"
1210
- });
982
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
983
+ }
984
+ if (tools == null ? void 0 : tools.length) {
985
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
986
+ }
987
+ if (toolChoice != null) {
988
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1211
989
  }
1212
990
  if (responseFormat != null && responseFormat.type !== "text") {
1213
991
  warnings.push({
@@ -1218,56 +996,29 @@ var OpenAICompletionLanguageModel = class {
1218
996
  }
1219
997
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1220
998
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1221
- const baseArgs = {
1222
- // model id:
1223
- model: this.modelId,
1224
- // model specific settings:
1225
- echo: this.settings.echo,
1226
- logit_bias: this.settings.logitBias,
1227
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1228
- suffix: this.settings.suffix,
1229
- user: this.settings.user,
1230
- // standardized settings:
1231
- max_tokens: maxTokens,
1232
- temperature,
1233
- top_p: topP,
1234
- frequency_penalty: frequencyPenalty,
1235
- presence_penalty: presencePenalty,
1236
- seed,
1237
- // prompt:
1238
- prompt: completionPrompt,
1239
- // stop sequences:
1240
- stop: stop.length > 0 ? stop : void 0
999
+ return {
1000
+ args: {
1001
+ // model id:
1002
+ model: this.modelId,
1003
+ // model specific settings:
1004
+ echo: this.settings.echo,
1005
+ logit_bias: this.settings.logitBias,
1006
+ suffix: this.settings.suffix,
1007
+ user: this.settings.user,
1008
+ // standardized settings:
1009
+ max_tokens: maxOutputTokens,
1010
+ temperature,
1011
+ top_p: topP,
1012
+ frequency_penalty: frequencyPenalty,
1013
+ presence_penalty: presencePenalty,
1014
+ seed,
1015
+ // prompt:
1016
+ prompt: completionPrompt,
1017
+ // stop sequences:
1018
+ stop: stop.length > 0 ? stop : void 0
1019
+ },
1020
+ warnings
1241
1021
  };
1242
- switch (type) {
1243
- case "regular": {
1244
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1245
- throw new UnsupportedFunctionalityError5({
1246
- functionality: "tools"
1247
- });
1248
- }
1249
- if (mode.toolChoice) {
1250
- throw new UnsupportedFunctionalityError5({
1251
- functionality: "toolChoice"
1252
- });
1253
- }
1254
- return { args: baseArgs, warnings };
1255
- }
1256
- case "object-json": {
1257
- throw new UnsupportedFunctionalityError5({
1258
- functionality: "object-json mode"
1259
- });
1260
- }
1261
- case "object-tool": {
1262
- throw new UnsupportedFunctionalityError5({
1263
- functionality: "object-tool mode"
1264
- });
1265
- }
1266
- default: {
1267
- const _exhaustiveCheck = type;
1268
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1269
- }
1270
- }
1271
1022
  }
1272
1023
  async doGenerate(options) {
1273
1024
  const { args, warnings } = this.getArgs(options);
@@ -1289,21 +1040,21 @@ var OpenAICompletionLanguageModel = class {
1289
1040
  abortSignal: options.abortSignal,
1290
1041
  fetch: this.config.fetch
1291
1042
  });
1292
- const { prompt: rawPrompt, ...rawSettings } = args;
1293
1043
  const choice = response.choices[0];
1294
1044
  return {
1295
- text: choice.text,
1045
+ content: [{ type: "text", text: choice.text }],
1296
1046
  usage: {
1297
- promptTokens: response.usage.prompt_tokens,
1298
- completionTokens: response.usage.completion_tokens
1047
+ inputTokens: response.usage.prompt_tokens,
1048
+ outputTokens: response.usage.completion_tokens
1299
1049
  },
1300
1050
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1301
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1302
- rawCall: { rawPrompt, rawSettings },
1303
- rawResponse: { headers: responseHeaders, body: rawResponse },
1304
- response: getResponseMetadata(response),
1305
- warnings,
1306
- request: { body: JSON.stringify(args) }
1051
+ request: { body: args },
1052
+ response: {
1053
+ ...getResponseMetadata(response),
1054
+ headers: responseHeaders,
1055
+ body: rawResponse
1056
+ },
1057
+ warnings
1307
1058
  };
1308
1059
  }
1309
1060
  async doStream(options) {
@@ -1328,17 +1079,18 @@ var OpenAICompletionLanguageModel = class {
1328
1079
  abortSignal: options.abortSignal,
1329
1080
  fetch: this.config.fetch
1330
1081
  });
1331
- const { prompt: rawPrompt, ...rawSettings } = args;
1332
1082
  let finishReason = "unknown";
1333
- let usage = {
1334
- promptTokens: Number.NaN,
1335
- completionTokens: Number.NaN
1083
+ const usage = {
1084
+ inputTokens: void 0,
1085
+ outputTokens: void 0
1336
1086
  };
1337
- let logprobs;
1338
1087
  let isFirstChunk = true;
1339
1088
  return {
1340
1089
  stream: response.pipeThrough(
1341
1090
  new TransformStream({
1091
+ start(controller) {
1092
+ controller.enqueue({ type: "stream-start", warnings });
1093
+ },
1342
1094
  transform(chunk, controller) {
1343
1095
  if (!chunk.success) {
1344
1096
  finishReason = "error";
@@ -1359,10 +1111,8 @@ var OpenAICompletionLanguageModel = class {
1359
1111
  });
1360
1112
  }
1361
1113
  if (value.usage != null) {
1362
- usage = {
1363
- promptTokens: value.usage.prompt_tokens,
1364
- completionTokens: value.usage.completion_tokens
1365
- };
1114
+ usage.inputTokens = value.usage.prompt_tokens;
1115
+ usage.outputTokens = value.usage.completion_tokens;
1366
1116
  }
1367
1117
  const choice = value.choices[0];
1368
1118
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1370,75 +1120,55 @@ var OpenAICompletionLanguageModel = class {
1370
1120
  }
1371
1121
  if ((choice == null ? void 0 : choice.text) != null) {
1372
1122
  controller.enqueue({
1373
- type: "text-delta",
1374
- textDelta: choice.text
1123
+ type: "text",
1124
+ text: choice.text
1375
1125
  });
1376
1126
  }
1377
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1378
- choice == null ? void 0 : choice.logprobs
1379
- );
1380
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1381
- if (logprobs === void 0) logprobs = [];
1382
- logprobs.push(...mappedLogprobs);
1383
- }
1384
1127
  },
1385
1128
  flush(controller) {
1386
1129
  controller.enqueue({
1387
1130
  type: "finish",
1388
1131
  finishReason,
1389
- logprobs,
1390
1132
  usage
1391
1133
  });
1392
1134
  }
1393
1135
  })
1394
1136
  ),
1395
- rawCall: { rawPrompt, rawSettings },
1396
- rawResponse: { headers: responseHeaders },
1397
- warnings,
1398
- request: { body: JSON.stringify(body) }
1137
+ request: { body },
1138
+ response: { headers: responseHeaders }
1399
1139
  };
1400
1140
  }
1401
1141
  };
1402
- var openaiCompletionResponseSchema = z3.object({
1403
- id: z3.string().nullish(),
1404
- created: z3.number().nullish(),
1405
- model: z3.string().nullish(),
1406
- choices: z3.array(
1407
- z3.object({
1408
- text: z3.string(),
1409
- finish_reason: z3.string(),
1410
- logprobs: z3.object({
1411
- tokens: z3.array(z3.string()),
1412
- token_logprobs: z3.array(z3.number()),
1413
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1414
- }).nullish()
1142
+ var openaiCompletionResponseSchema = z4.object({
1143
+ id: z4.string().nullish(),
1144
+ created: z4.number().nullish(),
1145
+ model: z4.string().nullish(),
1146
+ choices: z4.array(
1147
+ z4.object({
1148
+ text: z4.string(),
1149
+ finish_reason: z4.string()
1415
1150
  })
1416
1151
  ),
1417
- usage: z3.object({
1418
- prompt_tokens: z3.number(),
1419
- completion_tokens: z3.number()
1152
+ usage: z4.object({
1153
+ prompt_tokens: z4.number(),
1154
+ completion_tokens: z4.number()
1420
1155
  })
1421
1156
  });
1422
- var openaiCompletionChunkSchema = z3.union([
1423
- z3.object({
1424
- id: z3.string().nullish(),
1425
- created: z3.number().nullish(),
1426
- model: z3.string().nullish(),
1427
- choices: z3.array(
1428
- z3.object({
1429
- text: z3.string(),
1430
- finish_reason: z3.string().nullish(),
1431
- index: z3.number(),
1432
- logprobs: z3.object({
1433
- tokens: z3.array(z3.string()),
1434
- token_logprobs: z3.array(z3.number()),
1435
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1436
- }).nullish()
1157
+ var openaiCompletionChunkSchema = z4.union([
1158
+ z4.object({
1159
+ id: z4.string().nullish(),
1160
+ created: z4.number().nullish(),
1161
+ model: z4.string().nullish(),
1162
+ choices: z4.array(
1163
+ z4.object({
1164
+ text: z4.string(),
1165
+ finish_reason: z4.string().nullish(),
1166
+ index: z4.number()
1437
1167
  })
1438
1168
  ),
1439
- usage: z3.object({
1440
- prompt_tokens: z3.number(),
1441
- completion_tokens: z3.number()
1169
+ usage: z4.object({
1170
+ prompt_tokens: z4.number(),
1171
+ completion_tokens: z4.number()
1442
1172
  }).nullish()
1443
1173
  }),
1444
1174
  openaiErrorDataSchema
@@ -1451,12 +1181,30 @@ import {
1451
1181
  import {
1452
1182
  combineHeaders as combineHeaders3,
1453
1183
  createJsonResponseHandler as createJsonResponseHandler3,
1184
+ parseProviderOptions as parseProviderOptions2,
1454
1185
  postJsonToApi as postJsonToApi3
1455
1186
  } from "@ai-sdk/provider-utils";
1456
- import { z as z4 } from "zod";
1187
+ import { z as z6 } from "zod";
1188
+
1189
+ // src/openai-embedding-options.ts
1190
+ import { z as z5 } from "zod";
1191
+ var openaiEmbeddingProviderOptions = z5.object({
1192
+ /**
1193
+ The number of dimensions the resulting output embeddings should have.
1194
+ Only supported in text-embedding-3 and later models.
1195
+ */
1196
+ dimensions: z5.number().optional(),
1197
+ /**
1198
+ A unique identifier representing your end-user, which can help OpenAI to
1199
+ monitor and detect abuse. Learn more.
1200
+ */
1201
+ user: z5.string().optional()
1202
+ });
1203
+
1204
+ // src/openai-embedding-model.ts
1457
1205
  var OpenAIEmbeddingModel = class {
1458
1206
  constructor(modelId, settings, config) {
1459
- this.specificationVersion = "v1";
1207
+ this.specificationVersion = "v2";
1460
1208
  this.modelId = modelId;
1461
1209
  this.settings = settings;
1462
1210
  this.config = config;
@@ -1475,8 +1223,10 @@ var OpenAIEmbeddingModel = class {
1475
1223
  async doEmbed({
1476
1224
  values,
1477
1225
  headers,
1478
- abortSignal
1226
+ abortSignal,
1227
+ providerOptions
1479
1228
  }) {
1229
+ var _a;
1480
1230
  if (values.length > this.maxEmbeddingsPerCall) {
1481
1231
  throw new TooManyEmbeddingValuesForCallError({
1482
1232
  provider: this.provider,
@@ -1485,7 +1235,16 @@ var OpenAIEmbeddingModel = class {
1485
1235
  values
1486
1236
  });
1487
1237
  }
1488
- const { responseHeaders, value: response } = await postJsonToApi3({
1238
+ const openaiOptions = (_a = parseProviderOptions2({
1239
+ provider: "openai",
1240
+ providerOptions,
1241
+ schema: openaiEmbeddingProviderOptions
1242
+ })) != null ? _a : {};
1243
+ const {
1244
+ responseHeaders,
1245
+ value: response,
1246
+ rawValue
1247
+ } = await postJsonToApi3({
1489
1248
  url: this.config.url({
1490
1249
  path: "/embeddings",
1491
1250
  modelId: this.modelId
@@ -1495,8 +1254,8 @@ var OpenAIEmbeddingModel = class {
1495
1254
  model: this.modelId,
1496
1255
  input: values,
1497
1256
  encoding_format: "float",
1498
- dimensions: this.settings.dimensions,
1499
- user: this.settings.user
1257
+ dimensions: openaiOptions.dimensions,
1258
+ user: openaiOptions.user
1500
1259
  },
1501
1260
  failedResponseHandler: openaiFailedResponseHandler,
1502
1261
  successfulResponseHandler: createJsonResponseHandler3(
@@ -1508,13 +1267,13 @@ var OpenAIEmbeddingModel = class {
1508
1267
  return {
1509
1268
  embeddings: response.data.map((item) => item.embedding),
1510
1269
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1511
- rawResponse: { headers: responseHeaders }
1270
+ response: { headers: responseHeaders, body: rawValue }
1512
1271
  };
1513
1272
  }
1514
1273
  };
1515
- var openaiTextEmbeddingResponseSchema = z4.object({
1516
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1517
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1274
+ var openaiTextEmbeddingResponseSchema = z6.object({
1275
+ data: z6.array(z6.object({ embedding: z6.array(z6.number()) })),
1276
+ usage: z6.object({ prompt_tokens: z6.number() }).nullish()
1518
1277
  });
1519
1278
 
1520
1279
  // src/openai-image-model.ts
@@ -1523,7 +1282,7 @@ import {
1523
1282
  createJsonResponseHandler as createJsonResponseHandler4,
1524
1283
  postJsonToApi as postJsonToApi4
1525
1284
  } from "@ai-sdk/provider-utils";
1526
- import { z as z5 } from "zod";
1285
+ import { z as z7 } from "zod";
1527
1286
 
1528
1287
  // src/openai-image-settings.ts
1529
1288
  var modelMaxImagesPerCall = {
@@ -1601,26 +1360,304 @@ var OpenAIImageModel = class {
1601
1360
  };
1602
1361
  }
1603
1362
  };
1604
- var openaiImageResponseSchema = z5.object({
1605
- data: z5.array(z5.object({ b64_json: z5.string() }))
1363
+ var openaiImageResponseSchema = z7.object({
1364
+ data: z7.array(z7.object({ b64_json: z7.string() }))
1606
1365
  });
1607
1366
 
1608
- // src/responses/openai-responses-language-model.ts
1367
+ // src/openai-transcription-model.ts
1609
1368
  import {
1610
1369
  combineHeaders as combineHeaders5,
1611
- createEventSourceResponseHandler as createEventSourceResponseHandler3,
1370
+ convertBase64ToUint8Array,
1612
1371
  createJsonResponseHandler as createJsonResponseHandler5,
1613
- generateId as generateId2,
1614
- parseProviderOptions,
1372
+ parseProviderOptions as parseProviderOptions3,
1373
+ postFormDataToApi
1374
+ } from "@ai-sdk/provider-utils";
1375
+ import { z as z8 } from "zod";
1376
+ var openAIProviderOptionsSchema = z8.object({
1377
+ include: z8.array(z8.string()).nullish(),
1378
+ language: z8.string().nullish(),
1379
+ prompt: z8.string().nullish(),
1380
+ temperature: z8.number().min(0).max(1).nullish().default(0),
1381
+ timestampGranularities: z8.array(z8.enum(["word", "segment"])).nullish().default(["segment"])
1382
+ });
1383
+ var languageMap = {
1384
+ afrikaans: "af",
1385
+ arabic: "ar",
1386
+ armenian: "hy",
1387
+ azerbaijani: "az",
1388
+ belarusian: "be",
1389
+ bosnian: "bs",
1390
+ bulgarian: "bg",
1391
+ catalan: "ca",
1392
+ chinese: "zh",
1393
+ croatian: "hr",
1394
+ czech: "cs",
1395
+ danish: "da",
1396
+ dutch: "nl",
1397
+ english: "en",
1398
+ estonian: "et",
1399
+ finnish: "fi",
1400
+ french: "fr",
1401
+ galician: "gl",
1402
+ german: "de",
1403
+ greek: "el",
1404
+ hebrew: "he",
1405
+ hindi: "hi",
1406
+ hungarian: "hu",
1407
+ icelandic: "is",
1408
+ indonesian: "id",
1409
+ italian: "it",
1410
+ japanese: "ja",
1411
+ kannada: "kn",
1412
+ kazakh: "kk",
1413
+ korean: "ko",
1414
+ latvian: "lv",
1415
+ lithuanian: "lt",
1416
+ macedonian: "mk",
1417
+ malay: "ms",
1418
+ marathi: "mr",
1419
+ maori: "mi",
1420
+ nepali: "ne",
1421
+ norwegian: "no",
1422
+ persian: "fa",
1423
+ polish: "pl",
1424
+ portuguese: "pt",
1425
+ romanian: "ro",
1426
+ russian: "ru",
1427
+ serbian: "sr",
1428
+ slovak: "sk",
1429
+ slovenian: "sl",
1430
+ spanish: "es",
1431
+ swahili: "sw",
1432
+ swedish: "sv",
1433
+ tagalog: "tl",
1434
+ tamil: "ta",
1435
+ thai: "th",
1436
+ turkish: "tr",
1437
+ ukrainian: "uk",
1438
+ urdu: "ur",
1439
+ vietnamese: "vi",
1440
+ welsh: "cy"
1441
+ };
1442
+ var OpenAITranscriptionModel = class {
1443
+ constructor(modelId, config) {
1444
+ this.modelId = modelId;
1445
+ this.config = config;
1446
+ this.specificationVersion = "v1";
1447
+ }
1448
+ get provider() {
1449
+ return this.config.provider;
1450
+ }
1451
+ getArgs({
1452
+ audio,
1453
+ mediaType,
1454
+ providerOptions
1455
+ }) {
1456
+ var _a, _b, _c, _d, _e;
1457
+ const warnings = [];
1458
+ const openAIOptions = parseProviderOptions3({
1459
+ provider: "openai",
1460
+ providerOptions,
1461
+ schema: openAIProviderOptionsSchema
1462
+ });
1463
+ const formData = new FormData();
1464
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1465
+ formData.append("model", this.modelId);
1466
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1467
+ if (openAIOptions) {
1468
+ const transcriptionModelOptions = {
1469
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1470
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1471
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1472
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1473
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1474
+ };
1475
+ for (const key in transcriptionModelOptions) {
1476
+ const value = transcriptionModelOptions[key];
1477
+ if (value !== void 0) {
1478
+ formData.append(key, String(value));
1479
+ }
1480
+ }
1481
+ }
1482
+ return {
1483
+ formData,
1484
+ warnings
1485
+ };
1486
+ }
1487
+ async doGenerate(options) {
1488
+ var _a, _b, _c, _d, _e, _f;
1489
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1490
+ const { formData, warnings } = this.getArgs(options);
1491
+ const {
1492
+ value: response,
1493
+ responseHeaders,
1494
+ rawValue: rawResponse
1495
+ } = await postFormDataToApi({
1496
+ url: this.config.url({
1497
+ path: "/audio/transcriptions",
1498
+ modelId: this.modelId
1499
+ }),
1500
+ headers: combineHeaders5(this.config.headers(), options.headers),
1501
+ formData,
1502
+ failedResponseHandler: openaiFailedResponseHandler,
1503
+ successfulResponseHandler: createJsonResponseHandler5(
1504
+ openaiTranscriptionResponseSchema
1505
+ ),
1506
+ abortSignal: options.abortSignal,
1507
+ fetch: this.config.fetch
1508
+ });
1509
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1510
+ return {
1511
+ text: response.text,
1512
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1513
+ text: word.word,
1514
+ startSecond: word.start,
1515
+ endSecond: word.end
1516
+ }))) != null ? _e : [],
1517
+ language,
1518
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1519
+ warnings,
1520
+ response: {
1521
+ timestamp: currentDate,
1522
+ modelId: this.modelId,
1523
+ headers: responseHeaders,
1524
+ body: rawResponse
1525
+ }
1526
+ };
1527
+ }
1528
+ };
1529
+ var openaiTranscriptionResponseSchema = z8.object({
1530
+ text: z8.string(),
1531
+ language: z8.string().nullish(),
1532
+ duration: z8.number().nullish(),
1533
+ words: z8.array(
1534
+ z8.object({
1535
+ word: z8.string(),
1536
+ start: z8.number(),
1537
+ end: z8.number()
1538
+ })
1539
+ ).nullish()
1540
+ });
1541
+
1542
+ // src/openai-speech-model.ts
1543
+ import {
1544
+ combineHeaders as combineHeaders6,
1545
+ createBinaryResponseHandler,
1546
+ parseProviderOptions as parseProviderOptions4,
1615
1547
  postJsonToApi as postJsonToApi5
1616
1548
  } from "@ai-sdk/provider-utils";
1617
- import { z as z6 } from "zod";
1549
+ import { z as z9 } from "zod";
1550
+ var OpenAIProviderOptionsSchema = z9.object({
1551
+ instructions: z9.string().nullish(),
1552
+ speed: z9.number().min(0.25).max(4).default(1).nullish()
1553
+ });
1554
+ var OpenAISpeechModel = class {
1555
+ constructor(modelId, config) {
1556
+ this.modelId = modelId;
1557
+ this.config = config;
1558
+ this.specificationVersion = "v1";
1559
+ }
1560
+ get provider() {
1561
+ return this.config.provider;
1562
+ }
1563
+ getArgs({
1564
+ text,
1565
+ voice = "alloy",
1566
+ outputFormat = "mp3",
1567
+ speed,
1568
+ instructions,
1569
+ providerOptions
1570
+ }) {
1571
+ const warnings = [];
1572
+ const openAIOptions = parseProviderOptions4({
1573
+ provider: "openai",
1574
+ providerOptions,
1575
+ schema: OpenAIProviderOptionsSchema
1576
+ });
1577
+ const requestBody = {
1578
+ model: this.modelId,
1579
+ input: text,
1580
+ voice,
1581
+ response_format: "mp3",
1582
+ speed,
1583
+ instructions
1584
+ };
1585
+ if (outputFormat) {
1586
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1587
+ requestBody.response_format = outputFormat;
1588
+ } else {
1589
+ warnings.push({
1590
+ type: "unsupported-setting",
1591
+ setting: "outputFormat",
1592
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1593
+ });
1594
+ }
1595
+ }
1596
+ if (openAIOptions) {
1597
+ const speechModelOptions = {};
1598
+ for (const key in speechModelOptions) {
1599
+ const value = speechModelOptions[key];
1600
+ if (value !== void 0) {
1601
+ requestBody[key] = value;
1602
+ }
1603
+ }
1604
+ }
1605
+ return {
1606
+ requestBody,
1607
+ warnings
1608
+ };
1609
+ }
1610
+ async doGenerate(options) {
1611
+ var _a, _b, _c;
1612
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1613
+ const { requestBody, warnings } = this.getArgs(options);
1614
+ const {
1615
+ value: audio,
1616
+ responseHeaders,
1617
+ rawValue: rawResponse
1618
+ } = await postJsonToApi5({
1619
+ url: this.config.url({
1620
+ path: "/audio/speech",
1621
+ modelId: this.modelId
1622
+ }),
1623
+ headers: combineHeaders6(this.config.headers(), options.headers),
1624
+ body: requestBody,
1625
+ failedResponseHandler: openaiFailedResponseHandler,
1626
+ successfulResponseHandler: createBinaryResponseHandler(),
1627
+ abortSignal: options.abortSignal,
1628
+ fetch: this.config.fetch
1629
+ });
1630
+ return {
1631
+ audio,
1632
+ warnings,
1633
+ request: {
1634
+ body: JSON.stringify(requestBody)
1635
+ },
1636
+ response: {
1637
+ timestamp: currentDate,
1638
+ modelId: this.modelId,
1639
+ headers: responseHeaders,
1640
+ body: rawResponse
1641
+ }
1642
+ };
1643
+ }
1644
+ };
1645
+
1646
+ // src/responses/openai-responses-language-model.ts
1647
+ import {
1648
+ combineHeaders as combineHeaders7,
1649
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1650
+ createJsonResponseHandler as createJsonResponseHandler6,
1651
+ generateId as generateId2,
1652
+ parseProviderOptions as parseProviderOptions5,
1653
+ postJsonToApi as postJsonToApi6
1654
+ } from "@ai-sdk/provider-utils";
1655
+ import { z as z10 } from "zod";
1618
1656
 
1619
1657
  // src/responses/convert-to-openai-responses-messages.ts
1620
1658
  import {
1621
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1659
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1622
1660
  } from "@ai-sdk/provider";
1623
- import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1624
1661
  function convertToOpenAIResponsesMessages({
1625
1662
  prompt,
1626
1663
  systemMessageMode
@@ -1659,38 +1696,35 @@ function convertToOpenAIResponsesMessages({
1659
1696
  messages.push({
1660
1697
  role: "user",
1661
1698
  content: content.map((part, index) => {
1662
- var _a, _b, _c, _d;
1699
+ var _a, _b, _c;
1663
1700
  switch (part.type) {
1664
1701
  case "text": {
1665
1702
  return { type: "input_text", text: part.text };
1666
1703
  }
1667
- case "image": {
1668
- return {
1669
- type: "input_image",
1670
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1671
- // OpenAI specific extension: image detail
1672
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1673
- };
1674
- }
1675
1704
  case "file": {
1676
- if (part.data instanceof URL) {
1677
- throw new UnsupportedFunctionalityError6({
1678
- functionality: "File URLs in user messages"
1679
- });
1680
- }
1681
- switch (part.mimeType) {
1682
- case "application/pdf": {
1683
- return {
1684
- type: "input_file",
1685
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1686
- file_data: `data:application/pdf;base64,${part.data}`
1687
- };
1688
- }
1689
- default: {
1690
- throw new UnsupportedFunctionalityError6({
1691
- functionality: "Only PDF files are supported in user messages"
1705
+ if (part.mediaType.startsWith("image/")) {
1706
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1707
+ return {
1708
+ type: "input_image",
1709
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1710
+ // OpenAI specific extension: image detail
1711
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1712
+ };
1713
+ } else if (part.mediaType === "application/pdf") {
1714
+ if (part.data instanceof URL) {
1715
+ throw new UnsupportedFunctionalityError4({
1716
+ functionality: "PDF file parts with URLs"
1692
1717
  });
1693
1718
  }
1719
+ return {
1720
+ type: "input_file",
1721
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1722
+ file_data: `data:application/pdf;base64,${part.data}`
1723
+ };
1724
+ } else {
1725
+ throw new UnsupportedFunctionalityError4({
1726
+ functionality: `file part media type ${part.mediaType}`
1727
+ });
1694
1728
  }
1695
1729
  }
1696
1730
  }
@@ -1760,19 +1794,18 @@ function mapOpenAIResponseFinishReason({
1760
1794
 
1761
1795
  // src/responses/openai-responses-prepare-tools.ts
1762
1796
  import {
1763
- UnsupportedFunctionalityError as UnsupportedFunctionalityError7
1797
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1764
1798
  } from "@ai-sdk/provider";
1765
1799
  function prepareResponsesTools({
1766
- mode,
1800
+ tools,
1801
+ toolChoice,
1767
1802
  strict
1768
1803
  }) {
1769
- var _a;
1770
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1804
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1771
1805
  const toolWarnings = [];
1772
1806
  if (tools == null) {
1773
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1807
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1774
1808
  }
1775
- const toolChoice = mode.toolChoice;
1776
1809
  const openaiTools = [];
1777
1810
  for (const tool of tools) {
1778
1811
  switch (tool.type) {
@@ -1805,37 +1838,24 @@ function prepareResponsesTools({
1805
1838
  }
1806
1839
  }
1807
1840
  if (toolChoice == null) {
1808
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1841
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
1809
1842
  }
1810
1843
  const type = toolChoice.type;
1811
1844
  switch (type) {
1812
1845
  case "auto":
1813
1846
  case "none":
1814
1847
  case "required":
1815
- return { tools: openaiTools, tool_choice: type, toolWarnings };
1816
- case "tool": {
1817
- if (toolChoice.toolName === "web_search_preview") {
1818
- return {
1819
- tools: openaiTools,
1820
- tool_choice: {
1821
- type: "web_search_preview"
1822
- },
1823
- toolWarnings
1824
- };
1825
- }
1848
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
1849
+ case "tool":
1826
1850
  return {
1827
1851
  tools: openaiTools,
1828
- tool_choice: {
1829
- type: "function",
1830
- name: toolChoice.toolName
1831
- },
1852
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1832
1853
  toolWarnings
1833
1854
  };
1834
- }
1835
1855
  default: {
1836
1856
  const _exhaustiveCheck = type;
1837
- throw new UnsupportedFunctionalityError7({
1838
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1857
+ throw new UnsupportedFunctionalityError5({
1858
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1839
1859
  });
1840
1860
  }
1841
1861
  }
@@ -1845,16 +1865,19 @@ function prepareResponsesTools({
1845
1865
  var OpenAIResponsesLanguageModel = class {
1846
1866
  constructor(modelId, config) {
1847
1867
  this.specificationVersion = "v2";
1848
- this.defaultObjectGenerationMode = "json";
1849
1868
  this.modelId = modelId;
1850
1869
  this.config = config;
1851
1870
  }
1871
+ async getSupportedUrls() {
1872
+ return {
1873
+ "image/*": [/^https?:\/\/.*$/]
1874
+ };
1875
+ }
1852
1876
  get provider() {
1853
1877
  return this.config.provider;
1854
1878
  }
1855
1879
  getArgs({
1856
- mode,
1857
- maxTokens,
1880
+ maxOutputTokens,
1858
1881
  temperature,
1859
1882
  stopSequences,
1860
1883
  topP,
@@ -1863,24 +1886,19 @@ var OpenAIResponsesLanguageModel = class {
1863
1886
  frequencyPenalty,
1864
1887
  seed,
1865
1888
  prompt,
1866
- providerMetadata,
1889
+ providerOptions,
1890
+ tools,
1891
+ toolChoice,
1867
1892
  responseFormat
1868
1893
  }) {
1869
- var _a, _b, _c;
1894
+ var _a, _b;
1870
1895
  const warnings = [];
1871
1896
  const modelConfig = getResponsesModelConfig(this.modelId);
1872
- const type = mode.type;
1873
1897
  if (topK != null) {
1874
- warnings.push({
1875
- type: "unsupported-setting",
1876
- setting: "topK"
1877
- });
1898
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1878
1899
  }
1879
1900
  if (seed != null) {
1880
- warnings.push({
1881
- type: "unsupported-setting",
1882
- setting: "seed"
1883
- });
1901
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1884
1902
  }
1885
1903
  if (presencePenalty != null) {
1886
1904
  warnings.push({
@@ -1895,19 +1913,16 @@ var OpenAIResponsesLanguageModel = class {
1895
1913
  });
1896
1914
  }
1897
1915
  if (stopSequences != null) {
1898
- warnings.push({
1899
- type: "unsupported-setting",
1900
- setting: "stopSequences"
1901
- });
1916
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1902
1917
  }
1903
1918
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1904
1919
  prompt,
1905
1920
  systemMessageMode: modelConfig.systemMessageMode
1906
1921
  });
1907
1922
  warnings.push(...messageWarnings);
1908
- const openaiOptions = parseProviderOptions({
1923
+ const openaiOptions = parseProviderOptions5({
1909
1924
  provider: "openai",
1910
- providerOptions: providerMetadata,
1925
+ providerOptions,
1911
1926
  schema: openaiResponsesProviderOptionsSchema
1912
1927
  });
1913
1928
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -1916,7 +1931,7 @@ var OpenAIResponsesLanguageModel = class {
1916
1931
  input: messages,
1917
1932
  temperature,
1918
1933
  top_p: topP,
1919
- max_output_tokens: maxTokens,
1934
+ max_output_tokens: maxOutputTokens,
1920
1935
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1921
1936
  text: {
1922
1937
  format: responseFormat.schema != null ? {
@@ -1936,8 +1951,15 @@ var OpenAIResponsesLanguageModel = class {
1936
1951
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1937
1952
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1938
1953
  // model-specific settings:
1939
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1940
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1954
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
1955
+ reasoning: {
1956
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1957
+ effort: openaiOptions.reasoningEffort
1958
+ },
1959
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
1960
+ summary: openaiOptions.reasoningSummary
1961
+ }
1962
+ }
1941
1963
  },
1942
1964
  ...modelConfig.requiredAutoTruncation && {
1943
1965
  truncation: "auto"
@@ -1961,178 +1983,159 @@ var OpenAIResponsesLanguageModel = class {
1961
1983
  });
1962
1984
  }
1963
1985
  }
1964
- switch (type) {
1965
- case "regular": {
1966
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1967
- mode,
1968
- strict: isStrict
1969
- // TODO support provider options on tools
1970
- });
1971
- return {
1972
- args: {
1973
- ...baseArgs,
1974
- tools,
1975
- tool_choice
1976
- },
1977
- warnings: [...warnings, ...toolWarnings]
1978
- };
1979
- }
1980
- case "object-json": {
1981
- return {
1982
- args: {
1983
- ...baseArgs,
1984
- text: {
1985
- format: mode.schema != null ? {
1986
- type: "json_schema",
1987
- strict: isStrict,
1988
- name: (_c = mode.name) != null ? _c : "response",
1989
- description: mode.description,
1990
- schema: mode.schema
1991
- } : { type: "json_object" }
1992
- }
1993
- },
1994
- warnings
1995
- };
1996
- }
1997
- case "object-tool": {
1998
- return {
1999
- args: {
2000
- ...baseArgs,
2001
- tool_choice: { type: "function", name: mode.tool.name },
2002
- tools: [
2003
- {
2004
- type: "function",
2005
- name: mode.tool.name,
2006
- description: mode.tool.description,
2007
- parameters: mode.tool.parameters,
2008
- strict: isStrict
2009
- }
2010
- ]
2011
- },
2012
- warnings
2013
- };
2014
- }
2015
- default: {
2016
- const _exhaustiveCheck = type;
2017
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2018
- }
2019
- }
1986
+ const {
1987
+ tools: openaiTools,
1988
+ toolChoice: openaiToolChoice,
1989
+ toolWarnings
1990
+ } = prepareResponsesTools({
1991
+ tools,
1992
+ toolChoice,
1993
+ strict: isStrict
1994
+ });
1995
+ return {
1996
+ args: {
1997
+ ...baseArgs,
1998
+ tools: openaiTools,
1999
+ tool_choice: openaiToolChoice
2000
+ },
2001
+ warnings: [...warnings, ...toolWarnings]
2002
+ };
2020
2003
  }
2021
2004
  async doGenerate(options) {
2022
- var _a, _b, _c, _d, _e;
2005
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2023
2006
  const { args: body, warnings } = this.getArgs(options);
2024
2007
  const {
2025
2008
  responseHeaders,
2026
2009
  value: response,
2027
2010
  rawValue: rawResponse
2028
- } = await postJsonToApi5({
2011
+ } = await postJsonToApi6({
2029
2012
  url: this.config.url({
2030
2013
  path: "/responses",
2031
2014
  modelId: this.modelId
2032
2015
  }),
2033
- headers: combineHeaders5(this.config.headers(), options.headers),
2016
+ headers: combineHeaders7(this.config.headers(), options.headers),
2034
2017
  body,
2035
2018
  failedResponseHandler: openaiFailedResponseHandler,
2036
- successfulResponseHandler: createJsonResponseHandler5(
2037
- z6.object({
2038
- id: z6.string(),
2039
- created_at: z6.number(),
2040
- model: z6.string(),
2041
- output: z6.array(
2042
- z6.discriminatedUnion("type", [
2043
- z6.object({
2044
- type: z6.literal("message"),
2045
- role: z6.literal("assistant"),
2046
- content: z6.array(
2047
- z6.object({
2048
- type: z6.literal("output_text"),
2049
- text: z6.string(),
2050
- annotations: z6.array(
2051
- z6.object({
2052
- type: z6.literal("url_citation"),
2053
- start_index: z6.number(),
2054
- end_index: z6.number(),
2055
- url: z6.string(),
2056
- title: z6.string()
2019
+ successfulResponseHandler: createJsonResponseHandler6(
2020
+ z10.object({
2021
+ id: z10.string(),
2022
+ created_at: z10.number(),
2023
+ model: z10.string(),
2024
+ output: z10.array(
2025
+ z10.discriminatedUnion("type", [
2026
+ z10.object({
2027
+ type: z10.literal("message"),
2028
+ role: z10.literal("assistant"),
2029
+ content: z10.array(
2030
+ z10.object({
2031
+ type: z10.literal("output_text"),
2032
+ text: z10.string(),
2033
+ annotations: z10.array(
2034
+ z10.object({
2035
+ type: z10.literal("url_citation"),
2036
+ start_index: z10.number(),
2037
+ end_index: z10.number(),
2038
+ url: z10.string(),
2039
+ title: z10.string()
2057
2040
  })
2058
2041
  )
2059
2042
  })
2060
2043
  )
2061
2044
  }),
2062
- z6.object({
2063
- type: z6.literal("function_call"),
2064
- call_id: z6.string(),
2065
- name: z6.string(),
2066
- arguments: z6.string()
2045
+ z10.object({
2046
+ type: z10.literal("function_call"),
2047
+ call_id: z10.string(),
2048
+ name: z10.string(),
2049
+ arguments: z10.string()
2067
2050
  }),
2068
- z6.object({
2069
- type: z6.literal("web_search_call")
2051
+ z10.object({
2052
+ type: z10.literal("web_search_call")
2070
2053
  }),
2071
- z6.object({
2072
- type: z6.literal("computer_call")
2054
+ z10.object({
2055
+ type: z10.literal("computer_call")
2073
2056
  }),
2074
- z6.object({
2075
- type: z6.literal("reasoning")
2057
+ z10.object({
2058
+ type: z10.literal("reasoning"),
2059
+ summary: z10.array(
2060
+ z10.object({
2061
+ type: z10.literal("summary_text"),
2062
+ text: z10.string()
2063
+ })
2064
+ )
2076
2065
  })
2077
2066
  ])
2078
2067
  ),
2079
- incomplete_details: z6.object({ reason: z6.string() }).nullable(),
2068
+ incomplete_details: z10.object({ reason: z10.string() }).nullable(),
2080
2069
  usage: usageSchema
2081
2070
  })
2082
2071
  ),
2083
2072
  abortSignal: options.abortSignal,
2084
2073
  fetch: this.config.fetch
2085
2074
  });
2086
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2087
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2088
- toolCallType: "function",
2089
- toolCallId: output.call_id,
2090
- toolName: output.name,
2091
- args: output.arguments
2092
- }));
2075
+ const content = [];
2076
+ for (const part of response.output) {
2077
+ switch (part.type) {
2078
+ case "reasoning": {
2079
+ content.push({
2080
+ type: "reasoning",
2081
+ reasoningType: "text",
2082
+ text: part.summary.map((summary) => summary.text).join()
2083
+ });
2084
+ break;
2085
+ }
2086
+ case "message": {
2087
+ for (const contentPart of part.content) {
2088
+ content.push({
2089
+ type: "text",
2090
+ text: contentPart.text
2091
+ });
2092
+ for (const annotation of contentPart.annotations) {
2093
+ content.push({
2094
+ type: "source",
2095
+ sourceType: "url",
2096
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2097
+ url: annotation.url,
2098
+ title: annotation.title
2099
+ });
2100
+ }
2101
+ }
2102
+ break;
2103
+ }
2104
+ case "function_call": {
2105
+ content.push({
2106
+ type: "tool-call",
2107
+ toolCallType: "function",
2108
+ toolCallId: part.call_id,
2109
+ toolName: part.name,
2110
+ args: part.arguments
2111
+ });
2112
+ break;
2113
+ }
2114
+ }
2115
+ }
2093
2116
  return {
2094
- text: outputTextElements.map((content) => content.text).join("\n"),
2095
- sources: outputTextElements.flatMap(
2096
- (content) => content.annotations.map((annotation) => {
2097
- var _a2, _b2, _c2;
2098
- return {
2099
- sourceType: "url",
2100
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2101
- url: annotation.url,
2102
- title: annotation.title
2103
- };
2104
- })
2105
- ),
2117
+ content,
2106
2118
  finishReason: mapOpenAIResponseFinishReason({
2107
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2108
- hasToolCalls: toolCalls.length > 0
2119
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2120
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2109
2121
  }),
2110
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2111
2122
  usage: {
2112
- promptTokens: response.usage.input_tokens,
2113
- completionTokens: response.usage.output_tokens
2114
- },
2115
- rawCall: {
2116
- rawPrompt: void 0,
2117
- rawSettings: {}
2118
- },
2119
- rawResponse: {
2120
- headers: responseHeaders,
2121
- body: rawResponse
2122
- },
2123
- request: {
2124
- body: JSON.stringify(body)
2123
+ inputTokens: response.usage.input_tokens,
2124
+ outputTokens: response.usage.output_tokens
2125
2125
  },
2126
+ request: { body },
2126
2127
  response: {
2127
2128
  id: response.id,
2128
2129
  timestamp: new Date(response.created_at * 1e3),
2129
- modelId: response.model
2130
+ modelId: response.model,
2131
+ headers: responseHeaders,
2132
+ body: rawResponse
2130
2133
  },
2131
2134
  providerMetadata: {
2132
2135
  openai: {
2133
2136
  responseId: response.id,
2134
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2135
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2137
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2138
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2136
2139
  }
2137
2140
  },
2138
2141
  warnings
@@ -2140,12 +2143,12 @@ var OpenAIResponsesLanguageModel = class {
2140
2143
  }
2141
2144
  async doStream(options) {
2142
2145
  const { args: body, warnings } = this.getArgs(options);
2143
- const { responseHeaders, value: response } = await postJsonToApi5({
2146
+ const { responseHeaders, value: response } = await postJsonToApi6({
2144
2147
  url: this.config.url({
2145
2148
  path: "/responses",
2146
2149
  modelId: this.modelId
2147
2150
  }),
2148
- headers: combineHeaders5(this.config.headers(), options.headers),
2151
+ headers: combineHeaders7(this.config.headers(), options.headers),
2149
2152
  body: {
2150
2153
  ...body,
2151
2154
  stream: true
@@ -2159,8 +2162,10 @@ var OpenAIResponsesLanguageModel = class {
2159
2162
  });
2160
2163
  const self = this;
2161
2164
  let finishReason = "unknown";
2162
- let promptTokens = NaN;
2163
- let completionTokens = NaN;
2165
+ const usage = {
2166
+ inputTokens: void 0,
2167
+ outputTokens: void 0
2168
+ };
2164
2169
  let cachedPromptTokens = null;
2165
2170
  let reasoningTokens = null;
2166
2171
  let responseId = null;
@@ -2169,6 +2174,9 @@ var OpenAIResponsesLanguageModel = class {
2169
2174
  return {
2170
2175
  stream: response.pipeThrough(
2171
2176
  new TransformStream({
2177
+ start(controller) {
2178
+ controller.enqueue({ type: "stream-start", warnings });
2179
+ },
2172
2180
  transform(chunk, controller) {
2173
2181
  var _a, _b, _c, _d, _e, _f, _g, _h;
2174
2182
  if (!chunk.success) {
@@ -2212,8 +2220,14 @@ var OpenAIResponsesLanguageModel = class {
2212
2220
  });
2213
2221
  } else if (isTextDeltaChunk(value)) {
2214
2222
  controller.enqueue({
2215
- type: "text-delta",
2216
- textDelta: value.delta
2223
+ type: "text",
2224
+ text: value.delta
2225
+ });
2226
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2227
+ controller.enqueue({
2228
+ type: "reasoning",
2229
+ reasoningType: "text",
2230
+ text: value.delta
2217
2231
  });
2218
2232
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2219
2233
  ongoingToolCalls[value.output_index] = void 0;
@@ -2230,19 +2244,17 @@ var OpenAIResponsesLanguageModel = class {
2230
2244
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2231
2245
  hasToolCalls
2232
2246
  });
2233
- promptTokens = value.response.usage.input_tokens;
2234
- completionTokens = value.response.usage.output_tokens;
2247
+ usage.inputTokens = value.response.usage.input_tokens;
2248
+ usage.outputTokens = value.response.usage.output_tokens;
2235
2249
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2236
2250
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2237
2251
  } else if (isResponseAnnotationAddedChunk(value)) {
2238
2252
  controller.enqueue({
2239
2253
  type: "source",
2240
- source: {
2241
- sourceType: "url",
2242
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2243
- url: value.annotation.url,
2244
- title: value.annotation.title
2245
- }
2254
+ sourceType: "url",
2255
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2256
+ url: value.annotation.url,
2257
+ title: value.annotation.title
2246
2258
  });
2247
2259
  }
2248
2260
  },
@@ -2250,7 +2262,7 @@ var OpenAIResponsesLanguageModel = class {
2250
2262
  controller.enqueue({
2251
2263
  type: "finish",
2252
2264
  finishReason,
2253
- usage: { promptTokens, completionTokens },
2265
+ usage,
2254
2266
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2255
2267
  providerMetadata: {
2256
2268
  openai: {
@@ -2264,89 +2276,91 @@ var OpenAIResponsesLanguageModel = class {
2264
2276
  }
2265
2277
  })
2266
2278
  ),
2267
- rawCall: {
2268
- rawPrompt: void 0,
2269
- rawSettings: {}
2270
- },
2271
- rawResponse: { headers: responseHeaders },
2272
- request: { body: JSON.stringify(body) },
2273
- warnings
2279
+ request: { body },
2280
+ response: { headers: responseHeaders }
2274
2281
  };
2275
2282
  }
2276
2283
  };
2277
- var usageSchema = z6.object({
2278
- input_tokens: z6.number(),
2279
- input_tokens_details: z6.object({ cached_tokens: z6.number().nullish() }).nullish(),
2280
- output_tokens: z6.number(),
2281
- output_tokens_details: z6.object({ reasoning_tokens: z6.number().nullish() }).nullish()
2284
+ var usageSchema = z10.object({
2285
+ input_tokens: z10.number(),
2286
+ input_tokens_details: z10.object({ cached_tokens: z10.number().nullish() }).nullish(),
2287
+ output_tokens: z10.number(),
2288
+ output_tokens_details: z10.object({ reasoning_tokens: z10.number().nullish() }).nullish()
2282
2289
  });
2283
- var textDeltaChunkSchema = z6.object({
2284
- type: z6.literal("response.output_text.delta"),
2285
- delta: z6.string()
2290
+ var textDeltaChunkSchema = z10.object({
2291
+ type: z10.literal("response.output_text.delta"),
2292
+ delta: z10.string()
2286
2293
  });
2287
- var responseFinishedChunkSchema = z6.object({
2288
- type: z6.enum(["response.completed", "response.incomplete"]),
2289
- response: z6.object({
2290
- incomplete_details: z6.object({ reason: z6.string() }).nullish(),
2294
+ var responseFinishedChunkSchema = z10.object({
2295
+ type: z10.enum(["response.completed", "response.incomplete"]),
2296
+ response: z10.object({
2297
+ incomplete_details: z10.object({ reason: z10.string() }).nullish(),
2291
2298
  usage: usageSchema
2292
2299
  })
2293
2300
  });
2294
- var responseCreatedChunkSchema = z6.object({
2295
- type: z6.literal("response.created"),
2296
- response: z6.object({
2297
- id: z6.string(),
2298
- created_at: z6.number(),
2299
- model: z6.string()
2301
+ var responseCreatedChunkSchema = z10.object({
2302
+ type: z10.literal("response.created"),
2303
+ response: z10.object({
2304
+ id: z10.string(),
2305
+ created_at: z10.number(),
2306
+ model: z10.string()
2300
2307
  })
2301
2308
  });
2302
- var responseOutputItemDoneSchema = z6.object({
2303
- type: z6.literal("response.output_item.done"),
2304
- output_index: z6.number(),
2305
- item: z6.discriminatedUnion("type", [
2306
- z6.object({
2307
- type: z6.literal("message")
2309
+ var responseOutputItemDoneSchema = z10.object({
2310
+ type: z10.literal("response.output_item.done"),
2311
+ output_index: z10.number(),
2312
+ item: z10.discriminatedUnion("type", [
2313
+ z10.object({
2314
+ type: z10.literal("message")
2308
2315
  }),
2309
- z6.object({
2310
- type: z6.literal("function_call"),
2311
- id: z6.string(),
2312
- call_id: z6.string(),
2313
- name: z6.string(),
2314
- arguments: z6.string(),
2315
- status: z6.literal("completed")
2316
+ z10.object({
2317
+ type: z10.literal("function_call"),
2318
+ id: z10.string(),
2319
+ call_id: z10.string(),
2320
+ name: z10.string(),
2321
+ arguments: z10.string(),
2322
+ status: z10.literal("completed")
2316
2323
  })
2317
2324
  ])
2318
2325
  });
2319
- var responseFunctionCallArgumentsDeltaSchema = z6.object({
2320
- type: z6.literal("response.function_call_arguments.delta"),
2321
- item_id: z6.string(),
2322
- output_index: z6.number(),
2323
- delta: z6.string()
2326
+ var responseFunctionCallArgumentsDeltaSchema = z10.object({
2327
+ type: z10.literal("response.function_call_arguments.delta"),
2328
+ item_id: z10.string(),
2329
+ output_index: z10.number(),
2330
+ delta: z10.string()
2324
2331
  });
2325
- var responseOutputItemAddedSchema = z6.object({
2326
- type: z6.literal("response.output_item.added"),
2327
- output_index: z6.number(),
2328
- item: z6.discriminatedUnion("type", [
2329
- z6.object({
2330
- type: z6.literal("message")
2332
+ var responseOutputItemAddedSchema = z10.object({
2333
+ type: z10.literal("response.output_item.added"),
2334
+ output_index: z10.number(),
2335
+ item: z10.discriminatedUnion("type", [
2336
+ z10.object({
2337
+ type: z10.literal("message")
2331
2338
  }),
2332
- z6.object({
2333
- type: z6.literal("function_call"),
2334
- id: z6.string(),
2335
- call_id: z6.string(),
2336
- name: z6.string(),
2337
- arguments: z6.string()
2339
+ z10.object({
2340
+ type: z10.literal("function_call"),
2341
+ id: z10.string(),
2342
+ call_id: z10.string(),
2343
+ name: z10.string(),
2344
+ arguments: z10.string()
2338
2345
  })
2339
2346
  ])
2340
2347
  });
2341
- var responseAnnotationAddedSchema = z6.object({
2342
- type: z6.literal("response.output_text.annotation.added"),
2343
- annotation: z6.object({
2344
- type: z6.literal("url_citation"),
2345
- url: z6.string(),
2346
- title: z6.string()
2348
+ var responseAnnotationAddedSchema = z10.object({
2349
+ type: z10.literal("response.output_text.annotation.added"),
2350
+ annotation: z10.object({
2351
+ type: z10.literal("url_citation"),
2352
+ url: z10.string(),
2353
+ title: z10.string()
2347
2354
  })
2348
2355
  });
2349
- var openaiResponsesChunkSchema = z6.union([
2356
+ var responseReasoningSummaryTextDeltaSchema = z10.object({
2357
+ type: z10.literal("response.reasoning_summary_text.delta"),
2358
+ item_id: z10.string(),
2359
+ output_index: z10.number(),
2360
+ summary_index: z10.number(),
2361
+ delta: z10.string()
2362
+ });
2363
+ var openaiResponsesChunkSchema = z10.union([
2350
2364
  textDeltaChunkSchema,
2351
2365
  responseFinishedChunkSchema,
2352
2366
  responseCreatedChunkSchema,
@@ -2354,7 +2368,8 @@ var openaiResponsesChunkSchema = z6.union([
2354
2368
  responseFunctionCallArgumentsDeltaSchema,
2355
2369
  responseOutputItemAddedSchema,
2356
2370
  responseAnnotationAddedSchema,
2357
- z6.object({ type: z6.string() }).passthrough()
2371
+ responseReasoningSummaryTextDeltaSchema,
2372
+ z10.object({ type: z10.string() }).passthrough()
2358
2373
  // fallback for unknown chunks
2359
2374
  ]);
2360
2375
  function isTextDeltaChunk(chunk) {
@@ -2378,6 +2393,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2378
2393
  function isResponseAnnotationAddedChunk(chunk) {
2379
2394
  return chunk.type === "response.output_text.annotation.added";
2380
2395
  }
2396
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2397
+ return chunk.type === "response.reasoning_summary_text.delta";
2398
+ }
2381
2399
  function getResponsesModelConfig(modelId) {
2382
2400
  if (modelId.startsWith("o")) {
2383
2401
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2399,15 +2417,16 @@ function getResponsesModelConfig(modelId) {
2399
2417
  requiredAutoTruncation: false
2400
2418
  };
2401
2419
  }
2402
- var openaiResponsesProviderOptionsSchema = z6.object({
2403
- metadata: z6.any().nullish(),
2404
- parallelToolCalls: z6.boolean().nullish(),
2405
- previousResponseId: z6.string().nullish(),
2406
- store: z6.boolean().nullish(),
2407
- user: z6.string().nullish(),
2408
- reasoningEffort: z6.string().nullish(),
2409
- strictSchemas: z6.boolean().nullish(),
2410
- instructions: z6.string().nullish()
2420
+ var openaiResponsesProviderOptionsSchema = z10.object({
2421
+ metadata: z10.any().nullish(),
2422
+ parallelToolCalls: z10.boolean().nullish(),
2423
+ previousResponseId: z10.string().nullish(),
2424
+ store: z10.boolean().nullish(),
2425
+ user: z10.string().nullish(),
2426
+ reasoningEffort: z10.string().nullish(),
2427
+ strictSchemas: z10.boolean().nullish(),
2428
+ instructions: z10.string().nullish(),
2429
+ reasoningSummary: z10.string().nullish()
2411
2430
  });
2412
2431
  export {
2413
2432
  OpenAIChatLanguageModel,
@@ -2415,6 +2434,10 @@ export {
2415
2434
  OpenAIEmbeddingModel,
2416
2435
  OpenAIImageModel,
2417
2436
  OpenAIResponsesLanguageModel,
2418
- modelMaxImagesPerCall
2437
+ OpenAISpeechModel,
2438
+ OpenAITranscriptionModel,
2439
+ modelMaxImagesPerCall,
2440
+ openaiEmbeddingProviderOptions,
2441
+ openaiProviderOptions
2419
2442
  };
2420
2443
  //# sourceMappingURL=index.mjs.map