@ai-sdk/openai 2.0.0-canary.2 → 2.0.0-canary.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ InvalidResponseDataError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -9,18 +8,18 @@ import {
9
8
  createJsonResponseHandler,
10
9
  generateId,
11
10
  isParsableJson,
11
+ parseProviderOptions,
12
12
  postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
14
+ import { z as z3 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
18
18
  UnsupportedFunctionalityError
19
19
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
21
21
  function convertToOpenAIChatMessages({
22
22
  prompt,
23
- useLegacyFunctionCalling = false,
24
23
  systemMessageMode = "system"
25
24
  }) {
26
25
  const messages = [];
@@ -61,55 +60,71 @@ function convertToOpenAIChatMessages({
61
60
  messages.push({
62
61
  role: "user",
63
62
  content: content.map((part, index) => {
64
- var _a, _b, _c, _d;
63
+ var _a, _b, _c;
65
64
  switch (part.type) {
66
65
  case "text": {
67
66
  return { type: "text", text: part.text };
68
67
  }
69
- case "image": {
70
- return {
71
- type: "image_url",
72
- image_url: {
73
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
74
- // OpenAI specific extension: image detail
75
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
76
- }
77
- };
78
- }
79
68
  case "file": {
80
- if (part.data instanceof URL) {
81
- throw new UnsupportedFunctionalityError({
82
- functionality: "'File content parts with URL data' functionality not supported."
83
- });
84
- }
85
- switch (part.mimeType) {
86
- case "audio/wav": {
87
- return {
88
- type: "input_audio",
89
- input_audio: { data: part.data, format: "wav" }
90
- };
91
- }
92
- case "audio/mp3":
93
- case "audio/mpeg": {
94
- return {
95
- type: "input_audio",
96
- input_audio: { data: part.data, format: "mp3" }
97
- };
69
+ if (part.mediaType.startsWith("image/")) {
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ return {
72
+ type: "image_url",
73
+ image_url: {
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
+ // OpenAI specific extension: image detail
76
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
+ }
78
+ };
79
+ } else if (part.mediaType.startsWith("audio/")) {
80
+ if (part.data instanceof URL) {
81
+ throw new UnsupportedFunctionalityError({
82
+ functionality: "audio file parts with URLs"
83
+ });
98
84
  }
99
- case "application/pdf": {
100
- return {
101
- type: "file",
102
- file: {
103
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
104
- file_data: `data:application/pdf;base64,${part.data}`
105
- }
106
- };
85
+ switch (part.mediaType) {
86
+ case "audio/wav": {
87
+ return {
88
+ type: "input_audio",
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
93
+ };
94
+ }
95
+ case "audio/mp3":
96
+ case "audio/mpeg": {
97
+ return {
98
+ type: "input_audio",
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
103
+ };
104
+ }
105
+ default: {
106
+ throw new UnsupportedFunctionalityError({
107
+ functionality: `audio content parts with media type ${part.mediaType}`
108
+ });
109
+ }
107
110
  }
108
- default: {
111
+ } else if (part.mediaType === "application/pdf") {
112
+ if (part.data instanceof URL) {
109
113
  throw new UnsupportedFunctionalityError({
110
- functionality: `File content part type ${part.mimeType} in user messages`
114
+ functionality: "PDF file parts with URLs"
111
115
  });
112
116
  }
117
+ return {
118
+ type: "file",
119
+ file: {
120
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
+ file_data: `data:application/pdf;base64,${part.data}`
122
+ }
123
+ };
124
+ } else {
125
+ throw new UnsupportedFunctionalityError({
126
+ functionality: `file part media type ${part.mediaType}`
127
+ });
113
128
  }
114
129
  }
115
130
  }
@@ -139,41 +154,20 @@ function convertToOpenAIChatMessages({
139
154
  }
140
155
  }
141
156
  }
142
- if (useLegacyFunctionCalling) {
143
- if (toolCalls.length > 1) {
144
- throw new UnsupportedFunctionalityError({
145
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
146
- });
147
- }
148
- messages.push({
149
- role: "assistant",
150
- content: text,
151
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
152
- });
153
- } else {
154
- messages.push({
155
- role: "assistant",
156
- content: text,
157
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
158
- });
159
- }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
160
162
  break;
161
163
  }
162
164
  case "tool": {
163
165
  for (const toolResponse of content) {
164
- if (useLegacyFunctionCalling) {
165
- messages.push({
166
- role: "function",
167
- name: toolResponse.toolName,
168
- content: JSON.stringify(toolResponse.result)
169
- });
170
- } else {
171
- messages.push({
172
- role: "tool",
173
- tool_call_id: toolResponse.toolCallId,
174
- content: JSON.stringify(toolResponse.result)
175
- });
176
- }
166
+ messages.push({
167
+ role: "tool",
168
+ tool_call_id: toolResponse.toolCallId,
169
+ content: JSON.stringify(toolResponse.result)
170
+ });
177
171
  }
178
172
  break;
179
173
  }
@@ -186,17 +180,17 @@ function convertToOpenAIChatMessages({
186
180
  return { messages, warnings };
187
181
  }
188
182
 
189
- // src/map-openai-chat-logprobs.ts
190
- function mapOpenAIChatLogProbsOutput(logprobs) {
191
- var _a, _b;
192
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
193
- token,
194
- logprob,
195
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
196
- token: token2,
197
- logprob: logprob2
198
- })) : []
199
- }))) != null ? _b : void 0;
183
+ // src/get-response-metadata.ts
184
+ function getResponseMetadata({
185
+ id,
186
+ model,
187
+ created
188
+ }) {
189
+ return {
190
+ id: id != null ? id : void 0,
191
+ modelId: model != null ? model : void 0,
192
+ timestamp: created != null ? new Date(created * 1e3) : void 0
193
+ };
200
194
  }
201
195
 
202
196
  // src/map-openai-finish-reason.ts
@@ -216,18 +210,75 @@ function mapOpenAIFinishReason(finishReason) {
216
210
  }
217
211
  }
218
212
 
219
- // src/openai-error.ts
213
+ // src/openai-chat-options.ts
220
214
  import { z } from "zod";
215
+ var openaiProviderOptions = z.object({
216
+ /**
217
+ * Modify the likelihood of specified tokens appearing in the completion.
218
+ *
219
+ * Accepts a JSON object that maps tokens (specified by their token ID in
220
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
221
+ */
222
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
223
+ /**
224
+ * Return the log probabilities of the tokens.
225
+ *
226
+ * Setting to true will return the log probabilities of the tokens that
227
+ * were generated.
228
+ *
229
+ * Setting to a number will return the log probabilities of the top n
230
+ * tokens that were generated.
231
+ */
232
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
233
+ /**
234
+ * Whether to enable parallel function calling during tool use. Default to true.
235
+ */
236
+ parallelToolCalls: z.boolean().optional(),
237
+ /**
238
+ * A unique identifier representing your end-user, which can help OpenAI to
239
+ * monitor and detect abuse.
240
+ */
241
+ user: z.string().optional(),
242
+ /**
243
+ * Reasoning effort for reasoning models. Defaults to `medium`.
244
+ */
245
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
246
+ /**
247
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
248
+ */
249
+ maxCompletionTokens: z.number().optional(),
250
+ /**
251
+ * Whether to enable persistence in responses API.
252
+ */
253
+ store: z.boolean().optional(),
254
+ /**
255
+ * Metadata to associate with the request.
256
+ */
257
+ metadata: z.record(z.string()).optional(),
258
+ /**
259
+ * Parameters for prediction mode.
260
+ */
261
+ prediction: z.record(z.any()).optional(),
262
+ /**
263
+ * Whether to use structured outputs.
264
+ *
265
+ * @default true
266
+ */
267
+ structuredOutputs: z.boolean().optional()
268
+ });
269
+
270
+ // src/openai-error.ts
271
+ import { z as z2 } from "zod";
221
272
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
222
- var openaiErrorDataSchema = z.object({
223
- error: z.object({
224
- message: z.string(),
273
+ var openaiErrorDataSchema = z2.object({
274
+ error: z2.object({
275
+ message: z2.string(),
225
276
  // The additional information below is handled loosely to support
226
277
  // OpenAI-compatible providers that have slightly different error
227
278
  // responses:
228
- type: z.string().nullish(),
229
- param: z.any().nullish(),
230
- code: z.union([z.string(), z.number()]).nullish()
279
+ type: z2.string().nullish(),
280
+ param: z2.any().nullish(),
281
+ code: z2.union([z2.string(), z2.number()]).nullish()
231
282
  })
232
283
  });
233
284
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -235,19 +286,6 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
235
286
  errorToMessage: (data) => data.error.message
236
287
  });
237
288
 
238
- // src/get-response-metadata.ts
239
- function getResponseMetadata({
240
- id,
241
- model,
242
- created
243
- }) {
244
- return {
245
- id: id != null ? id : void 0,
246
- modelId: model != null ? model : void 0,
247
- timestamp: created != null ? new Date(created * 1e3) : void 0
248
- };
249
- }
250
-
251
289
  // src/openai-prepare-tools.ts
252
290
  import {
253
291
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
@@ -255,7 +293,6 @@ import {
255
293
  function prepareTools({
256
294
  tools,
257
295
  toolChoice,
258
- useLegacyFunctionCalling = false,
259
296
  structuredOutputs
260
297
  }) {
261
298
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -263,48 +300,6 @@ function prepareTools({
263
300
  if (tools == null) {
264
301
  return { tools: void 0, toolChoice: void 0, toolWarnings };
265
302
  }
266
- if (useLegacyFunctionCalling) {
267
- const openaiFunctions = [];
268
- for (const tool of tools) {
269
- if (tool.type === "provider-defined") {
270
- toolWarnings.push({ type: "unsupported-tool", tool });
271
- } else {
272
- openaiFunctions.push({
273
- name: tool.name,
274
- description: tool.description,
275
- parameters: tool.parameters
276
- });
277
- }
278
- }
279
- if (toolChoice == null) {
280
- return {
281
- functions: openaiFunctions,
282
- function_call: void 0,
283
- toolWarnings
284
- };
285
- }
286
- const type2 = toolChoice.type;
287
- switch (type2) {
288
- case "auto":
289
- case "none":
290
- case void 0:
291
- return {
292
- functions: openaiFunctions,
293
- function_call: void 0,
294
- toolWarnings
295
- };
296
- case "required":
297
- throw new UnsupportedFunctionalityError2({
298
- functionality: "useLegacyFunctionCalling and toolChoice: required"
299
- });
300
- default:
301
- return {
302
- functions: openaiFunctions,
303
- function_call: { name: toolChoice.toolName },
304
- toolWarnings
305
- };
306
- }
307
- }
308
303
  const openaiTools = [];
309
304
  for (const tool of tools) {
310
305
  if (tool.type === "provider-defined") {
@@ -344,7 +339,7 @@ function prepareTools({
344
339
  default: {
345
340
  const _exhaustiveCheck = type;
346
341
  throw new UnsupportedFunctionalityError2({
347
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
342
+ functionality: `tool choice type: ${_exhaustiveCheck}`
348
343
  });
349
344
  }
350
345
  }
@@ -352,31 +347,20 @@ function prepareTools({
352
347
 
353
348
  // src/openai-chat-language-model.ts
354
349
  var OpenAIChatLanguageModel = class {
355
- constructor(modelId, settings, config) {
350
+ constructor(modelId, config) {
356
351
  this.specificationVersion = "v2";
352
+ this.supportedUrls = {
353
+ "image/*": [/^https?:\/\/.*$/]
354
+ };
357
355
  this.modelId = modelId;
358
- this.settings = settings;
359
356
  this.config = config;
360
357
  }
361
- get supportsStructuredOutputs() {
362
- var _a;
363
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
364
- }
365
- get defaultObjectGenerationMode() {
366
- if (isAudioModel(this.modelId)) {
367
- return "tool";
368
- }
369
- return this.supportsStructuredOutputs ? "json" : "tool";
370
- }
371
358
  get provider() {
372
359
  return this.config.provider;
373
360
  }
374
- get supportsImageUrls() {
375
- return !this.settings.downloadImages;
376
- }
377
- getArgs({
361
+ async getArgs({
378
362
  prompt,
379
- maxTokens,
363
+ maxOutputTokens,
380
364
  temperature,
381
365
  topP,
382
366
  topK,
@@ -389,36 +373,30 @@ var OpenAIChatLanguageModel = class {
389
373
  toolChoice,
390
374
  providerOptions
391
375
  }) {
392
- var _a, _b, _c, _d, _e, _f, _g;
376
+ var _a, _b, _c;
393
377
  const warnings = [];
378
+ const openaiOptions = (_a = await parseProviderOptions({
379
+ provider: "openai",
380
+ providerOptions,
381
+ schema: openaiProviderOptions
382
+ })) != null ? _a : {};
383
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
394
384
  if (topK != null) {
395
385
  warnings.push({
396
386
  type: "unsupported-setting",
397
387
  setting: "topK"
398
388
  });
399
389
  }
400
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
390
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
401
391
  warnings.push({
402
392
  type: "unsupported-setting",
403
393
  setting: "responseFormat",
404
394
  details: "JSON response format schema is only supported with structuredOutputs"
405
395
  });
406
396
  }
407
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
408
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
409
- throw new UnsupportedFunctionalityError3({
410
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
411
- });
412
- }
413
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
414
- throw new UnsupportedFunctionalityError3({
415
- functionality: "structuredOutputs with useLegacyFunctionCalling"
416
- });
417
- }
418
397
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
419
398
  {
420
399
  prompt,
421
- useLegacyFunctionCalling,
422
400
  systemMessageMode: getSystemMessageMode(this.modelId)
423
401
  }
424
402
  );
@@ -427,36 +405,38 @@ var OpenAIChatLanguageModel = class {
427
405
  // model id:
428
406
  model: this.modelId,
429
407
  // model specific settings:
430
- logit_bias: this.settings.logitBias,
431
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
432
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
433
- user: this.settings.user,
434
- parallel_tool_calls: this.settings.parallelToolCalls,
408
+ logit_bias: openaiOptions.logitBias,
409
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
410
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
411
+ user: openaiOptions.user,
412
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
435
413
  // standardized settings:
436
- max_tokens: maxTokens,
414
+ max_tokens: maxOutputTokens,
437
415
  temperature,
438
416
  top_p: topP,
439
417
  frequency_penalty: frequencyPenalty,
440
418
  presence_penalty: presencePenalty,
441
- // TODO improve below:
442
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
443
- type: "json_schema",
444
- json_schema: {
445
- schema: responseFormat.schema,
446
- strict: true,
447
- name: (_a = responseFormat.name) != null ? _a : "response",
448
- description: responseFormat.description
449
- }
450
- } : { type: "json_object" } : void 0,
419
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
420
+ // TODO convert into provider option
421
+ structuredOutputs && responseFormat.schema != null ? {
422
+ type: "json_schema",
423
+ json_schema: {
424
+ schema: responseFormat.schema,
425
+ strict: true,
426
+ name: (_c = responseFormat.name) != null ? _c : "response",
427
+ description: responseFormat.description
428
+ }
429
+ } : { type: "json_object" }
430
+ ) : void 0,
451
431
  stop: stopSequences,
452
432
  seed,
453
433
  // openai specific settings:
454
- // TODO remove in next major version; we auto-map maxTokens now
455
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
456
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
457
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
458
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
459
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
434
+ // TODO remove in next major version; we auto-map maxOutputTokens now
435
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
436
+ store: openaiOptions.store,
437
+ metadata: openaiOptions.metadata,
438
+ prediction: openaiOptions.prediction,
439
+ reasoning_effort: openaiOptions.reasoningEffort,
460
440
  // messages:
461
441
  messages
462
442
  };
@@ -520,33 +500,37 @@ var OpenAIChatLanguageModel = class {
520
500
  }
521
501
  baseArgs.max_tokens = void 0;
522
502
  }
503
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
504
+ if (baseArgs.temperature != null) {
505
+ baseArgs.temperature = void 0;
506
+ warnings.push({
507
+ type: "unsupported-setting",
508
+ setting: "temperature",
509
+ details: "temperature is not supported for the search preview models and has been removed."
510
+ });
511
+ }
523
512
  }
524
513
  const {
525
514
  tools: openaiTools,
526
515
  toolChoice: openaiToolChoice,
527
- functions,
528
- function_call,
529
516
  toolWarnings
530
517
  } = prepareTools({
531
518
  tools,
532
519
  toolChoice,
533
- useLegacyFunctionCalling,
534
- structuredOutputs: this.supportsStructuredOutputs
520
+ structuredOutputs
535
521
  });
536
522
  return {
537
523
  args: {
538
524
  ...baseArgs,
539
525
  tools: openaiTools,
540
- tool_choice: openaiToolChoice,
541
- functions,
542
- function_call
526
+ tool_choice: openaiToolChoice
543
527
  },
544
528
  warnings: [...warnings, ...toolWarnings]
545
529
  };
546
530
  }
547
531
  async doGenerate(options) {
548
- var _a, _b, _c, _d, _e, _f, _g, _h;
549
- const { args: body, warnings } = this.getArgs(options);
532
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
533
+ const { args: body, warnings } = await this.getArgs(options);
550
534
  const {
551
535
  responseHeaders,
552
536
  value: response,
@@ -565,105 +549,61 @@ var OpenAIChatLanguageModel = class {
565
549
  abortSignal: options.abortSignal,
566
550
  fetch: this.config.fetch
567
551
  });
568
- const { messages: rawPrompt, ...rawSettings } = body;
569
552
  const choice = response.choices[0];
570
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
571
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
572
- const providerMetadata = { openai: {} };
573
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
574
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
553
+ const content = [];
554
+ const text = choice.message.content;
555
+ if (text != null && text.length > 0) {
556
+ content.push({ type: "text", text });
575
557
  }
558
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
559
+ content.push({
560
+ type: "tool-call",
561
+ toolCallType: "function",
562
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
563
+ toolName: toolCall.function.name,
564
+ args: toolCall.function.arguments
565
+ });
566
+ }
567
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
568
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
569
+ const providerMetadata = { openai: {} };
576
570
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
577
571
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
578
572
  }
579
573
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
580
574
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
581
575
  }
582
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
583
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
576
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
577
+ providerMetadata.openai.logprobs = choice.logprobs.content;
584
578
  }
585
579
  return {
586
- text: (_c = choice.message.content) != null ? _c : void 0,
587
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
588
- {
589
- toolCallType: "function",
590
- toolCallId: generateId(),
591
- toolName: choice.message.function_call.name,
592
- args: choice.message.function_call.arguments
593
- }
594
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
595
- var _a2;
596
- return {
597
- toolCallType: "function",
598
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
599
- toolName: toolCall.function.name,
600
- args: toolCall.function.arguments
601
- };
602
- }),
580
+ content,
603
581
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
604
582
  usage: {
605
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
606
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
583
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
584
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
585
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
586
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
587
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
588
+ },
589
+ request: { body },
590
+ response: {
591
+ ...getResponseMetadata(response),
592
+ headers: responseHeaders,
593
+ body: rawResponse
607
594
  },
608
- rawCall: { rawPrompt, rawSettings },
609
- rawResponse: { headers: responseHeaders, body: rawResponse },
610
- request: { body: JSON.stringify(body) },
611
- response: getResponseMetadata(response),
612
595
  warnings,
613
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
614
596
  providerMetadata
615
597
  };
616
598
  }
617
599
  async doStream(options) {
618
- if (this.settings.simulateStreaming) {
619
- const result = await this.doGenerate(options);
620
- const simulatedStream = new ReadableStream({
621
- start(controller) {
622
- controller.enqueue({ type: "response-metadata", ...result.response });
623
- if (result.text) {
624
- controller.enqueue({
625
- type: "text-delta",
626
- textDelta: result.text
627
- });
628
- }
629
- if (result.toolCalls) {
630
- for (const toolCall of result.toolCalls) {
631
- controller.enqueue({
632
- type: "tool-call-delta",
633
- toolCallType: "function",
634
- toolCallId: toolCall.toolCallId,
635
- toolName: toolCall.toolName,
636
- argsTextDelta: toolCall.args
637
- });
638
- controller.enqueue({
639
- type: "tool-call",
640
- ...toolCall
641
- });
642
- }
643
- }
644
- controller.enqueue({
645
- type: "finish",
646
- finishReason: result.finishReason,
647
- usage: result.usage,
648
- logprobs: result.logprobs,
649
- providerMetadata: result.providerMetadata
650
- });
651
- controller.close();
652
- }
653
- });
654
- return {
655
- stream: simulatedStream,
656
- rawCall: result.rawCall,
657
- rawResponse: result.rawResponse,
658
- warnings: result.warnings
659
- };
660
- }
661
- const { args, warnings } = this.getArgs(options);
600
+ const { args, warnings } = await this.getArgs(options);
662
601
  const body = {
663
602
  ...args,
664
603
  stream: true,
665
- // only include stream_options when in strict compatibility mode:
666
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
604
+ stream_options: {
605
+ include_usage: true
606
+ }
667
607
  };
668
608
  const { responseHeaders, value: response } = await postJsonToApi({
669
609
  url: this.config.url({
@@ -679,22 +619,23 @@ var OpenAIChatLanguageModel = class {
679
619
  abortSignal: options.abortSignal,
680
620
  fetch: this.config.fetch
681
621
  });
682
- const { messages: rawPrompt, ...rawSettings } = args;
683
622
  const toolCalls = [];
684
623
  let finishReason = "unknown";
685
- let usage = {
686
- promptTokens: void 0,
687
- completionTokens: void 0
624
+ const usage = {
625
+ inputTokens: void 0,
626
+ outputTokens: void 0,
627
+ totalTokens: void 0
688
628
  };
689
- let logprobs;
690
629
  let isFirstChunk = true;
691
- const { useLegacyFunctionCalling } = this.settings;
692
630
  const providerMetadata = { openai: {} };
693
631
  return {
694
632
  stream: response.pipeThrough(
695
633
  new TransformStream({
634
+ start(controller) {
635
+ controller.enqueue({ type: "stream-start", warnings });
636
+ },
696
637
  transform(chunk, controller) {
697
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
638
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
698
639
  if (!chunk.success) {
699
640
  finishReason = "error";
700
641
  controller.enqueue({ type: "error", error: chunk.error });
@@ -714,60 +655,37 @@ var OpenAIChatLanguageModel = class {
714
655
  });
715
656
  }
716
657
  if (value.usage != null) {
717
- const {
718
- prompt_tokens,
719
- completion_tokens,
720
- prompt_tokens_details,
721
- completion_tokens_details
722
- } = value.usage;
723
- usage = {
724
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
725
- completionTokens: completion_tokens != null ? completion_tokens : void 0
726
- };
727
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
728
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
729
- }
730
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
731
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
658
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
659
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
660
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
661
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
662
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
663
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
664
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
732
665
  }
733
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
734
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
735
- }
736
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
737
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
666
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
667
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
738
668
  }
739
669
  }
740
670
  const choice = value.choices[0];
741
671
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
742
672
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
743
673
  }
674
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
675
+ providerMetadata.openai.logprobs = choice.logprobs.content;
676
+ }
744
677
  if ((choice == null ? void 0 : choice.delta) == null) {
745
678
  return;
746
679
  }
747
680
  const delta = choice.delta;
748
681
  if (delta.content != null) {
749
682
  controller.enqueue({
750
- type: "text-delta",
751
- textDelta: delta.content
683
+ type: "text",
684
+ text: delta.content
752
685
  });
753
686
  }
754
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
755
- choice == null ? void 0 : choice.logprobs
756
- );
757
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
758
- if (logprobs === void 0) logprobs = [];
759
- logprobs.push(...mappedLogprobs);
760
- }
761
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
762
- {
763
- type: "function",
764
- id: generateId(),
765
- function: delta.function_call,
766
- index: 0
767
- }
768
- ] : delta.tool_calls;
769
- if (mappedToolCalls != null) {
770
- for (const toolCallDelta of mappedToolCalls) {
687
+ if (delta.tool_calls != null) {
688
+ for (const toolCallDelta of delta.tool_calls) {
771
689
  const index = toolCallDelta.index;
772
690
  if (toolCalls[index] == null) {
773
691
  if (toolCallDelta.type !== "function") {
@@ -782,7 +700,7 @@ var OpenAIChatLanguageModel = class {
782
700
  message: `Expected 'id' to be a string.`
783
701
  });
784
702
  }
785
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
703
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
786
704
  throw new InvalidResponseDataError({
787
705
  data: toolCallDelta,
788
706
  message: `Expected 'function.name' to be a string.`
@@ -793,12 +711,12 @@ var OpenAIChatLanguageModel = class {
793
711
  type: "function",
794
712
  function: {
795
713
  name: toolCallDelta.function.name,
796
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
714
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
797
715
  },
798
716
  hasFinished: false
799
717
  };
800
718
  const toolCall2 = toolCalls[index];
801
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
719
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
802
720
  if (toolCall2.function.arguments.length > 0) {
803
721
  controller.enqueue({
804
722
  type: "tool-call-delta",
@@ -812,7 +730,7 @@ var OpenAIChatLanguageModel = class {
812
730
  controller.enqueue({
813
731
  type: "tool-call",
814
732
  toolCallType: "function",
815
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
733
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
816
734
  toolName: toolCall2.function.name,
817
735
  args: toolCall2.function.arguments
818
736
  });
@@ -825,21 +743,21 @@ var OpenAIChatLanguageModel = class {
825
743
  if (toolCall.hasFinished) {
826
744
  continue;
827
745
  }
828
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
829
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
746
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
747
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
830
748
  }
831
749
  controller.enqueue({
832
750
  type: "tool-call-delta",
833
751
  toolCallType: "function",
834
752
  toolCallId: toolCall.id,
835
753
  toolName: toolCall.function.name,
836
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
754
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
837
755
  });
838
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
756
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
839
757
  controller.enqueue({
840
758
  type: "tool-call",
841
759
  toolCallType: "function",
842
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
760
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
843
761
  toolName: toolCall.function.name,
844
762
  args: toolCall.function.arguments
845
763
  });
@@ -849,125 +767,111 @@ var OpenAIChatLanguageModel = class {
849
767
  }
850
768
  },
851
769
  flush(controller) {
852
- var _a, _b;
853
770
  controller.enqueue({
854
771
  type: "finish",
855
772
  finishReason,
856
- logprobs,
857
- usage: {
858
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
859
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
860
- },
773
+ usage,
861
774
  ...providerMetadata != null ? { providerMetadata } : {}
862
775
  });
863
776
  }
864
777
  })
865
778
  ),
866
- rawCall: { rawPrompt, rawSettings },
867
- rawResponse: { headers: responseHeaders },
868
- request: { body: JSON.stringify(body) },
869
- warnings
779
+ request: { body },
780
+ response: { headers: responseHeaders }
870
781
  };
871
782
  }
872
783
  };
873
- var openaiTokenUsageSchema = z2.object({
874
- prompt_tokens: z2.number().nullish(),
875
- completion_tokens: z2.number().nullish(),
876
- prompt_tokens_details: z2.object({
877
- cached_tokens: z2.number().nullish()
784
+ var openaiTokenUsageSchema = z3.object({
785
+ prompt_tokens: z3.number().nullish(),
786
+ completion_tokens: z3.number().nullish(),
787
+ total_tokens: z3.number().nullish(),
788
+ prompt_tokens_details: z3.object({
789
+ cached_tokens: z3.number().nullish()
878
790
  }).nullish(),
879
- completion_tokens_details: z2.object({
880
- reasoning_tokens: z2.number().nullish(),
881
- accepted_prediction_tokens: z2.number().nullish(),
882
- rejected_prediction_tokens: z2.number().nullish()
791
+ completion_tokens_details: z3.object({
792
+ reasoning_tokens: z3.number().nullish(),
793
+ accepted_prediction_tokens: z3.number().nullish(),
794
+ rejected_prediction_tokens: z3.number().nullish()
883
795
  }).nullish()
884
796
  }).nullish();
885
- var openaiChatResponseSchema = z2.object({
886
- id: z2.string().nullish(),
887
- created: z2.number().nullish(),
888
- model: z2.string().nullish(),
889
- choices: z2.array(
890
- z2.object({
891
- message: z2.object({
892
- role: z2.literal("assistant").nullish(),
893
- content: z2.string().nullish(),
894
- function_call: z2.object({
895
- arguments: z2.string(),
896
- name: z2.string()
897
- }).nullish(),
898
- tool_calls: z2.array(
899
- z2.object({
900
- id: z2.string().nullish(),
901
- type: z2.literal("function"),
902
- function: z2.object({
903
- name: z2.string(),
904
- arguments: z2.string()
797
+ var openaiChatResponseSchema = z3.object({
798
+ id: z3.string().nullish(),
799
+ created: z3.number().nullish(),
800
+ model: z3.string().nullish(),
801
+ choices: z3.array(
802
+ z3.object({
803
+ message: z3.object({
804
+ role: z3.literal("assistant").nullish(),
805
+ content: z3.string().nullish(),
806
+ tool_calls: z3.array(
807
+ z3.object({
808
+ id: z3.string().nullish(),
809
+ type: z3.literal("function"),
810
+ function: z3.object({
811
+ name: z3.string(),
812
+ arguments: z3.string()
905
813
  })
906
814
  })
907
815
  ).nullish()
908
816
  }),
909
- index: z2.number(),
910
- logprobs: z2.object({
911
- content: z2.array(
912
- z2.object({
913
- token: z2.string(),
914
- logprob: z2.number(),
915
- top_logprobs: z2.array(
916
- z2.object({
917
- token: z2.string(),
918
- logprob: z2.number()
817
+ index: z3.number(),
818
+ logprobs: z3.object({
819
+ content: z3.array(
820
+ z3.object({
821
+ token: z3.string(),
822
+ logprob: z3.number(),
823
+ top_logprobs: z3.array(
824
+ z3.object({
825
+ token: z3.string(),
826
+ logprob: z3.number()
919
827
  })
920
828
  )
921
829
  })
922
- ).nullable()
830
+ ).nullish()
923
831
  }).nullish(),
924
- finish_reason: z2.string().nullish()
832
+ finish_reason: z3.string().nullish()
925
833
  })
926
834
  ),
927
835
  usage: openaiTokenUsageSchema
928
836
  });
929
- var openaiChatChunkSchema = z2.union([
930
- z2.object({
931
- id: z2.string().nullish(),
932
- created: z2.number().nullish(),
933
- model: z2.string().nullish(),
934
- choices: z2.array(
935
- z2.object({
936
- delta: z2.object({
937
- role: z2.enum(["assistant"]).nullish(),
938
- content: z2.string().nullish(),
939
- function_call: z2.object({
940
- name: z2.string().optional(),
941
- arguments: z2.string().optional()
942
- }).nullish(),
943
- tool_calls: z2.array(
944
- z2.object({
945
- index: z2.number(),
946
- id: z2.string().nullish(),
947
- type: z2.literal("function").optional(),
948
- function: z2.object({
949
- name: z2.string().nullish(),
950
- arguments: z2.string().nullish()
837
+ var openaiChatChunkSchema = z3.union([
838
+ z3.object({
839
+ id: z3.string().nullish(),
840
+ created: z3.number().nullish(),
841
+ model: z3.string().nullish(),
842
+ choices: z3.array(
843
+ z3.object({
844
+ delta: z3.object({
845
+ role: z3.enum(["assistant"]).nullish(),
846
+ content: z3.string().nullish(),
847
+ tool_calls: z3.array(
848
+ z3.object({
849
+ index: z3.number(),
850
+ id: z3.string().nullish(),
851
+ type: z3.literal("function").nullish(),
852
+ function: z3.object({
853
+ name: z3.string().nullish(),
854
+ arguments: z3.string().nullish()
951
855
  })
952
856
  })
953
857
  ).nullish()
954
858
  }).nullish(),
955
- logprobs: z2.object({
956
- content: z2.array(
957
- z2.object({
958
- token: z2.string(),
959
- logprob: z2.number(),
960
- top_logprobs: z2.array(
961
- z2.object({
962
- token: z2.string(),
963
- logprob: z2.number()
859
+ logprobs: z3.object({
860
+ content: z3.array(
861
+ z3.object({
862
+ token: z3.string(),
863
+ logprob: z3.number(),
864
+ top_logprobs: z3.array(
865
+ z3.object({
866
+ token: z3.string(),
867
+ logprob: z3.number()
964
868
  })
965
869
  )
966
870
  })
967
- ).nullable()
871
+ ).nullish()
968
872
  }).nullish(),
969
- finish_reason: z2.string().nullable().optional(),
970
- index: z2.number()
873
+ finish_reason: z3.string().nullish(),
874
+ index: z3.number()
971
875
  })
972
876
  ),
973
877
  usage: openaiTokenUsageSchema
@@ -975,10 +879,7 @@ var openaiChatChunkSchema = z2.union([
975
879
  openaiErrorDataSchema
976
880
  ]);
977
881
  function isReasoningModel(modelId) {
978
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
979
- }
980
- function isAudioModel(modelId) {
981
- return modelId.startsWith("gpt-4o-audio-preview");
882
+ return modelId.startsWith("o");
982
883
  }
983
884
  function getSystemMessageMode(modelId) {
984
885
  var _a, _b;
@@ -1000,11 +901,23 @@ var reasoningModels = {
1000
901
  "o1-preview-2024-09-12": {
1001
902
  systemMessageMode: "remove"
1002
903
  },
904
+ o3: {
905
+ systemMessageMode: "developer"
906
+ },
907
+ "o3-2025-04-16": {
908
+ systemMessageMode: "developer"
909
+ },
1003
910
  "o3-mini": {
1004
911
  systemMessageMode: "developer"
1005
912
  },
1006
913
  "o3-mini-2025-01-31": {
1007
914
  systemMessageMode: "developer"
915
+ },
916
+ "o4-mini": {
917
+ systemMessageMode: "developer"
918
+ },
919
+ "o4-mini-2025-04-16": {
920
+ systemMessageMode: "developer"
1008
921
  }
1009
922
  };
1010
923
 
@@ -1013,24 +926,21 @@ import {
1013
926
  combineHeaders as combineHeaders2,
1014
927
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
1015
928
  createJsonResponseHandler as createJsonResponseHandler2,
929
+ parseProviderOptions as parseProviderOptions2,
1016
930
  postJsonToApi as postJsonToApi2
1017
931
  } from "@ai-sdk/provider-utils";
1018
- import { z as z3 } from "zod";
932
+ import { z as z5 } from "zod";
1019
933
 
1020
934
  // src/convert-to-openai-completion-prompt.ts
1021
935
  import {
1022
936
  InvalidPromptError,
1023
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
937
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1024
938
  } from "@ai-sdk/provider";
1025
939
  function convertToOpenAICompletionPrompt({
1026
940
  prompt,
1027
- inputFormat,
1028
941
  user = "user",
1029
942
  assistant = "assistant"
1030
943
  }) {
1031
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1032
- return { prompt: prompt[0].content[0].text };
1033
- }
1034
944
  let text = "";
1035
945
  if (prompt[0].role === "system") {
1036
946
  text += `${prompt[0].content}
@@ -1052,13 +962,8 @@ function convertToOpenAICompletionPrompt({
1052
962
  case "text": {
1053
963
  return part.text;
1054
964
  }
1055
- case "image": {
1056
- throw new UnsupportedFunctionalityError4({
1057
- functionality: "images"
1058
- });
1059
- }
1060
965
  }
1061
- }).join("");
966
+ }).filter(Boolean).join("");
1062
967
  text += `${user}:
1063
968
  ${userMessage}
1064
969
 
@@ -1072,7 +977,7 @@ ${userMessage}
1072
977
  return part.text;
1073
978
  }
1074
979
  case "tool-call": {
1075
- throw new UnsupportedFunctionalityError4({
980
+ throw new UnsupportedFunctionalityError3({
1076
981
  functionality: "tool-call messages"
1077
982
  });
1078
983
  }
@@ -1085,7 +990,7 @@ ${assistantMessage}
1085
990
  break;
1086
991
  }
1087
992
  case "tool": {
1088
- throw new UnsupportedFunctionalityError4({
993
+ throw new UnsupportedFunctionalityError3({
1089
994
  functionality: "tool messages"
1090
995
  });
1091
996
  }
@@ -1104,36 +1009,68 @@ ${user}:`]
1104
1009
  };
1105
1010
  }
1106
1011
 
1107
- // src/map-openai-completion-logprobs.ts
1108
- function mapOpenAICompletionLogProbs(logprobs) {
1109
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1110
- token,
1111
- logprob: logprobs.token_logprobs[index],
1112
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1113
- ([token2, logprob]) => ({
1114
- token: token2,
1115
- logprob
1116
- })
1117
- ) : []
1118
- }));
1119
- }
1012
+ // src/openai-completion-options.ts
1013
+ import { z as z4 } from "zod";
1014
+ var openaiCompletionProviderOptions = z4.object({
1015
+ /**
1016
+ Echo back the prompt in addition to the completion.
1017
+ */
1018
+ echo: z4.boolean().optional(),
1019
+ /**
1020
+ Modify the likelihood of specified tokens appearing in the completion.
1021
+
1022
+ Accepts a JSON object that maps tokens (specified by their token ID in
1023
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1024
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1025
+ the bias is added to the logits generated by the model prior to sampling.
1026
+ The exact effect will vary per model, but values between -1 and 1 should
1027
+ decrease or increase likelihood of selection; values like -100 or 100
1028
+ should result in a ban or exclusive selection of the relevant token.
1029
+
1030
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1031
+ token from being generated.
1032
+ */
1033
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
1034
+ /**
1035
+ The suffix that comes after a completion of inserted text.
1036
+ */
1037
+ suffix: z4.string().optional(),
1038
+ /**
1039
+ A unique identifier representing your end-user, which can help OpenAI to
1040
+ monitor and detect abuse. Learn more.
1041
+ */
1042
+ user: z4.string().optional(),
1043
+ /**
1044
+ Return the log probabilities of the tokens. Including logprobs will increase
1045
+ the response size and can slow down response times. However, it can
1046
+ be useful to better understand how the model is behaving.
1047
+ Setting to true will return the log probabilities of the tokens that
1048
+ were generated.
1049
+ Setting to a number will return the log probabilities of the top n
1050
+ tokens that were generated.
1051
+ */
1052
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1053
+ });
1120
1054
 
1121
1055
  // src/openai-completion-language-model.ts
1122
1056
  var OpenAICompletionLanguageModel = class {
1123
- constructor(modelId, settings, config) {
1057
+ constructor(modelId, config) {
1124
1058
  this.specificationVersion = "v2";
1125
- this.defaultObjectGenerationMode = void 0;
1059
+ this.supportedUrls = {
1060
+ // No URLs are supported for completion models.
1061
+ };
1126
1062
  this.modelId = modelId;
1127
- this.settings = settings;
1128
1063
  this.config = config;
1129
1064
  }
1065
+ get providerOptionsName() {
1066
+ return this.config.provider.split(".")[0].trim();
1067
+ }
1130
1068
  get provider() {
1131
1069
  return this.config.provider;
1132
1070
  }
1133
- getArgs({
1134
- inputFormat,
1071
+ async getArgs({
1135
1072
  prompt,
1136
- maxTokens,
1073
+ maxOutputTokens,
1137
1074
  temperature,
1138
1075
  topP,
1139
1076
  topK,
@@ -1143,9 +1080,22 @@ var OpenAICompletionLanguageModel = class {
1143
1080
  responseFormat,
1144
1081
  tools,
1145
1082
  toolChoice,
1146
- seed
1083
+ seed,
1084
+ providerOptions
1147
1085
  }) {
1148
1086
  const warnings = [];
1087
+ const openaiOptions = {
1088
+ ...await parseProviderOptions2({
1089
+ provider: "openai",
1090
+ providerOptions,
1091
+ schema: openaiCompletionProviderOptions
1092
+ }),
1093
+ ...await parseProviderOptions2({
1094
+ provider: this.providerOptionsName,
1095
+ providerOptions,
1096
+ schema: openaiCompletionProviderOptions
1097
+ })
1098
+ };
1149
1099
  if (topK != null) {
1150
1100
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1151
1101
  }
@@ -1162,20 +1112,20 @@ var OpenAICompletionLanguageModel = class {
1162
1112
  details: "JSON response format is not supported."
1163
1113
  });
1164
1114
  }
1165
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1115
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1166
1116
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1167
1117
  return {
1168
1118
  args: {
1169
1119
  // model id:
1170
1120
  model: this.modelId,
1171
1121
  // model specific settings:
1172
- echo: this.settings.echo,
1173
- logit_bias: this.settings.logitBias,
1174
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1175
- suffix: this.settings.suffix,
1176
- user: this.settings.user,
1122
+ echo: openaiOptions.echo,
1123
+ logit_bias: openaiOptions.logitBias,
1124
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1125
+ suffix: openaiOptions.suffix,
1126
+ user: openaiOptions.user,
1177
1127
  // standardized settings:
1178
- max_tokens: maxTokens,
1128
+ max_tokens: maxOutputTokens,
1179
1129
  temperature,
1180
1130
  top_p: topP,
1181
1131
  frequency_penalty: frequencyPenalty,
@@ -1190,7 +1140,8 @@ var OpenAICompletionLanguageModel = class {
1190
1140
  };
1191
1141
  }
1192
1142
  async doGenerate(options) {
1193
- const { args, warnings } = this.getArgs(options);
1143
+ var _a, _b, _c;
1144
+ const { args, warnings } = await this.getArgs(options);
1194
1145
  const {
1195
1146
  responseHeaders,
1196
1147
  value: response,
@@ -1209,30 +1160,37 @@ var OpenAICompletionLanguageModel = class {
1209
1160
  abortSignal: options.abortSignal,
1210
1161
  fetch: this.config.fetch
1211
1162
  });
1212
- const { prompt: rawPrompt, ...rawSettings } = args;
1213
1163
  const choice = response.choices[0];
1164
+ const providerMetadata = { openai: {} };
1165
+ if (choice.logprobs != null) {
1166
+ providerMetadata.openai.logprobs = choice.logprobs;
1167
+ }
1214
1168
  return {
1215
- text: choice.text,
1169
+ content: [{ type: "text", text: choice.text }],
1216
1170
  usage: {
1217
- promptTokens: response.usage.prompt_tokens,
1218
- completionTokens: response.usage.completion_tokens
1171
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1172
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1173
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1219
1174
  },
1220
1175
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1221
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1222
- rawCall: { rawPrompt, rawSettings },
1223
- rawResponse: { headers: responseHeaders, body: rawResponse },
1224
- response: getResponseMetadata(response),
1225
- warnings,
1226
- request: { body: JSON.stringify(args) }
1176
+ request: { body: args },
1177
+ response: {
1178
+ ...getResponseMetadata(response),
1179
+ headers: responseHeaders,
1180
+ body: rawResponse
1181
+ },
1182
+ providerMetadata,
1183
+ warnings
1227
1184
  };
1228
1185
  }
1229
1186
  async doStream(options) {
1230
- const { args, warnings } = this.getArgs(options);
1187
+ const { args, warnings } = await this.getArgs(options);
1231
1188
  const body = {
1232
1189
  ...args,
1233
1190
  stream: true,
1234
- // only include stream_options when in strict compatibility mode:
1235
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1191
+ stream_options: {
1192
+ include_usage: true
1193
+ }
1236
1194
  };
1237
1195
  const { responseHeaders, value: response } = await postJsonToApi2({
1238
1196
  url: this.config.url({
@@ -1248,17 +1206,20 @@ var OpenAICompletionLanguageModel = class {
1248
1206
  abortSignal: options.abortSignal,
1249
1207
  fetch: this.config.fetch
1250
1208
  });
1251
- const { prompt: rawPrompt, ...rawSettings } = args;
1252
1209
  let finishReason = "unknown";
1253
- let usage = {
1254
- promptTokens: Number.NaN,
1255
- completionTokens: Number.NaN
1210
+ const providerMetadata = { openai: {} };
1211
+ const usage = {
1212
+ inputTokens: void 0,
1213
+ outputTokens: void 0,
1214
+ totalTokens: void 0
1256
1215
  };
1257
- let logprobs;
1258
1216
  let isFirstChunk = true;
1259
1217
  return {
1260
1218
  stream: response.pipeThrough(
1261
1219
  new TransformStream({
1220
+ start(controller) {
1221
+ controller.enqueue({ type: "stream-start", warnings });
1222
+ },
1262
1223
  transform(chunk, controller) {
1263
1224
  if (!chunk.success) {
1264
1225
  finishReason = "error";
@@ -1279,87 +1240,79 @@ var OpenAICompletionLanguageModel = class {
1279
1240
  });
1280
1241
  }
1281
1242
  if (value.usage != null) {
1282
- usage = {
1283
- promptTokens: value.usage.prompt_tokens,
1284
- completionTokens: value.usage.completion_tokens
1285
- };
1243
+ usage.inputTokens = value.usage.prompt_tokens;
1244
+ usage.outputTokens = value.usage.completion_tokens;
1245
+ usage.totalTokens = value.usage.total_tokens;
1286
1246
  }
1287
1247
  const choice = value.choices[0];
1288
1248
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1289
1249
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1290
1250
  }
1251
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1252
+ providerMetadata.openai.logprobs = choice.logprobs;
1253
+ }
1291
1254
  if ((choice == null ? void 0 : choice.text) != null) {
1292
1255
  controller.enqueue({
1293
- type: "text-delta",
1294
- textDelta: choice.text
1256
+ type: "text",
1257
+ text: choice.text
1295
1258
  });
1296
1259
  }
1297
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1298
- choice == null ? void 0 : choice.logprobs
1299
- );
1300
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1301
- if (logprobs === void 0) logprobs = [];
1302
- logprobs.push(...mappedLogprobs);
1303
- }
1304
1260
  },
1305
1261
  flush(controller) {
1306
1262
  controller.enqueue({
1307
1263
  type: "finish",
1308
1264
  finishReason,
1309
- logprobs,
1265
+ providerMetadata,
1310
1266
  usage
1311
1267
  });
1312
1268
  }
1313
1269
  })
1314
1270
  ),
1315
- rawCall: { rawPrompt, rawSettings },
1316
- rawResponse: { headers: responseHeaders },
1317
- warnings,
1318
- request: { body: JSON.stringify(body) }
1271
+ request: { body },
1272
+ response: { headers: responseHeaders }
1319
1273
  };
1320
1274
  }
1321
1275
  };
1322
- var openaiCompletionResponseSchema = z3.object({
1323
- id: z3.string().nullish(),
1324
- created: z3.number().nullish(),
1325
- model: z3.string().nullish(),
1326
- choices: z3.array(
1327
- z3.object({
1328
- text: z3.string(),
1329
- finish_reason: z3.string(),
1330
- logprobs: z3.object({
1331
- tokens: z3.array(z3.string()),
1332
- token_logprobs: z3.array(z3.number()),
1333
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1276
+ var usageSchema = z5.object({
1277
+ prompt_tokens: z5.number(),
1278
+ completion_tokens: z5.number(),
1279
+ total_tokens: z5.number()
1280
+ });
1281
+ var openaiCompletionResponseSchema = z5.object({
1282
+ id: z5.string().nullish(),
1283
+ created: z5.number().nullish(),
1284
+ model: z5.string().nullish(),
1285
+ choices: z5.array(
1286
+ z5.object({
1287
+ text: z5.string(),
1288
+ finish_reason: z5.string(),
1289
+ logprobs: z5.object({
1290
+ tokens: z5.array(z5.string()),
1291
+ token_logprobs: z5.array(z5.number()),
1292
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1334
1293
  }).nullish()
1335
1294
  })
1336
1295
  ),
1337
- usage: z3.object({
1338
- prompt_tokens: z3.number(),
1339
- completion_tokens: z3.number()
1340
- })
1296
+ usage: usageSchema.nullish()
1341
1297
  });
1342
- var openaiCompletionChunkSchema = z3.union([
1343
- z3.object({
1344
- id: z3.string().nullish(),
1345
- created: z3.number().nullish(),
1346
- model: z3.string().nullish(),
1347
- choices: z3.array(
1348
- z3.object({
1349
- text: z3.string(),
1350
- finish_reason: z3.string().nullish(),
1351
- index: z3.number(),
1352
- logprobs: z3.object({
1353
- tokens: z3.array(z3.string()),
1354
- token_logprobs: z3.array(z3.number()),
1355
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1298
+ var openaiCompletionChunkSchema = z5.union([
1299
+ z5.object({
1300
+ id: z5.string().nullish(),
1301
+ created: z5.number().nullish(),
1302
+ model: z5.string().nullish(),
1303
+ choices: z5.array(
1304
+ z5.object({
1305
+ text: z5.string(),
1306
+ finish_reason: z5.string().nullish(),
1307
+ index: z5.number(),
1308
+ logprobs: z5.object({
1309
+ tokens: z5.array(z5.string()),
1310
+ token_logprobs: z5.array(z5.number()),
1311
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1356
1312
  }).nullish()
1357
1313
  })
1358
1314
  ),
1359
- usage: z3.object({
1360
- prompt_tokens: z3.number(),
1361
- completion_tokens: z3.number()
1362
- }).nullish()
1315
+ usage: usageSchema.nullish()
1363
1316
  }),
1364
1317
  openaiErrorDataSchema
1365
1318
  ]);
@@ -1371,32 +1324,45 @@ import {
1371
1324
  import {
1372
1325
  combineHeaders as combineHeaders3,
1373
1326
  createJsonResponseHandler as createJsonResponseHandler3,
1327
+ parseProviderOptions as parseProviderOptions3,
1374
1328
  postJsonToApi as postJsonToApi3
1375
1329
  } from "@ai-sdk/provider-utils";
1376
- import { z as z4 } from "zod";
1330
+ import { z as z7 } from "zod";
1331
+
1332
+ // src/openai-embedding-options.ts
1333
+ import { z as z6 } from "zod";
1334
+ var openaiEmbeddingProviderOptions = z6.object({
1335
+ /**
1336
+ The number of dimensions the resulting output embeddings should have.
1337
+ Only supported in text-embedding-3 and later models.
1338
+ */
1339
+ dimensions: z6.number().optional(),
1340
+ /**
1341
+ A unique identifier representing your end-user, which can help OpenAI to
1342
+ monitor and detect abuse. Learn more.
1343
+ */
1344
+ user: z6.string().optional()
1345
+ });
1346
+
1347
+ // src/openai-embedding-model.ts
1377
1348
  var OpenAIEmbeddingModel = class {
1378
- constructor(modelId, settings, config) {
1379
- this.specificationVersion = "v1";
1349
+ constructor(modelId, config) {
1350
+ this.specificationVersion = "v2";
1351
+ this.maxEmbeddingsPerCall = 2048;
1352
+ this.supportsParallelCalls = true;
1380
1353
  this.modelId = modelId;
1381
- this.settings = settings;
1382
1354
  this.config = config;
1383
1355
  }
1384
1356
  get provider() {
1385
1357
  return this.config.provider;
1386
1358
  }
1387
- get maxEmbeddingsPerCall() {
1388
- var _a;
1389
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1390
- }
1391
- get supportsParallelCalls() {
1392
- var _a;
1393
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1394
- }
1395
1359
  async doEmbed({
1396
1360
  values,
1397
1361
  headers,
1398
- abortSignal
1362
+ abortSignal,
1363
+ providerOptions
1399
1364
  }) {
1365
+ var _a;
1400
1366
  if (values.length > this.maxEmbeddingsPerCall) {
1401
1367
  throw new TooManyEmbeddingValuesForCallError({
1402
1368
  provider: this.provider,
@@ -1405,7 +1371,16 @@ var OpenAIEmbeddingModel = class {
1405
1371
  values
1406
1372
  });
1407
1373
  }
1408
- const { responseHeaders, value: response } = await postJsonToApi3({
1374
+ const openaiOptions = (_a = await parseProviderOptions3({
1375
+ provider: "openai",
1376
+ providerOptions,
1377
+ schema: openaiEmbeddingProviderOptions
1378
+ })) != null ? _a : {};
1379
+ const {
1380
+ responseHeaders,
1381
+ value: response,
1382
+ rawValue
1383
+ } = await postJsonToApi3({
1409
1384
  url: this.config.url({
1410
1385
  path: "/embeddings",
1411
1386
  modelId: this.modelId
@@ -1415,8 +1390,8 @@ var OpenAIEmbeddingModel = class {
1415
1390
  model: this.modelId,
1416
1391
  input: values,
1417
1392
  encoding_format: "float",
1418
- dimensions: this.settings.dimensions,
1419
- user: this.settings.user
1393
+ dimensions: openaiOptions.dimensions,
1394
+ user: openaiOptions.user
1420
1395
  },
1421
1396
  failedResponseHandler: openaiFailedResponseHandler,
1422
1397
  successfulResponseHandler: createJsonResponseHandler3(
@@ -1428,13 +1403,13 @@ var OpenAIEmbeddingModel = class {
1428
1403
  return {
1429
1404
  embeddings: response.data.map((item) => item.embedding),
1430
1405
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1431
- rawResponse: { headers: responseHeaders }
1406
+ response: { headers: responseHeaders, body: rawValue }
1432
1407
  };
1433
1408
  }
1434
1409
  };
1435
- var openaiTextEmbeddingResponseSchema = z4.object({
1436
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1437
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1410
+ var openaiTextEmbeddingResponseSchema = z7.object({
1411
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1412
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1438
1413
  });
1439
1414
 
1440
1415
  // src/openai-image-model.ts
@@ -1443,25 +1418,26 @@ import {
1443
1418
  createJsonResponseHandler as createJsonResponseHandler4,
1444
1419
  postJsonToApi as postJsonToApi4
1445
1420
  } from "@ai-sdk/provider-utils";
1446
- import { z as z5 } from "zod";
1421
+ import { z as z8 } from "zod";
1447
1422
 
1448
1423
  // src/openai-image-settings.ts
1449
1424
  var modelMaxImagesPerCall = {
1450
1425
  "dall-e-3": 1,
1451
- "dall-e-2": 10
1426
+ "dall-e-2": 10,
1427
+ "gpt-image-1": 10
1452
1428
  };
1429
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1453
1430
 
1454
1431
  // src/openai-image-model.ts
1455
1432
  var OpenAIImageModel = class {
1456
- constructor(modelId, settings, config) {
1433
+ constructor(modelId, config) {
1457
1434
  this.modelId = modelId;
1458
- this.settings = settings;
1459
1435
  this.config = config;
1460
- this.specificationVersion = "v1";
1436
+ this.specificationVersion = "v2";
1461
1437
  }
1462
1438
  get maxImagesPerCall() {
1463
- var _a, _b;
1464
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1439
+ var _a;
1440
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1465
1441
  }
1466
1442
  get provider() {
1467
1443
  return this.config.provider;
@@ -1501,7 +1477,7 @@ var OpenAIImageModel = class {
1501
1477
  n,
1502
1478
  size,
1503
1479
  ...(_d = providerOptions.openai) != null ? _d : {},
1504
- response_format: "b64_json"
1480
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1505
1481
  },
1506
1482
  failedResponseHandler: openaiFailedResponseHandler,
1507
1483
  successfulResponseHandler: createJsonResponseHandler4(
@@ -1517,30 +1493,339 @@ var OpenAIImageModel = class {
1517
1493
  timestamp: currentDate,
1518
1494
  modelId: this.modelId,
1519
1495
  headers: responseHeaders
1496
+ },
1497
+ providerMetadata: {
1498
+ openai: {
1499
+ images: response.data.map(
1500
+ (item) => item.revised_prompt ? {
1501
+ revisedPrompt: item.revised_prompt
1502
+ } : null
1503
+ )
1504
+ }
1520
1505
  }
1521
1506
  };
1522
1507
  }
1523
1508
  };
1524
- var openaiImageResponseSchema = z5.object({
1525
- data: z5.array(z5.object({ b64_json: z5.string() }))
1509
+ var openaiImageResponseSchema = z8.object({
1510
+ data: z8.array(
1511
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1512
+ )
1526
1513
  });
1527
1514
 
1528
- // src/responses/openai-responses-language-model.ts
1515
+ // src/openai-transcription-model.ts
1529
1516
  import {
1530
1517
  combineHeaders as combineHeaders5,
1531
- createEventSourceResponseHandler as createEventSourceResponseHandler3,
1518
+ convertBase64ToUint8Array,
1532
1519
  createJsonResponseHandler as createJsonResponseHandler5,
1533
- generateId as generateId2,
1534
- parseProviderOptions,
1520
+ parseProviderOptions as parseProviderOptions4,
1521
+ postFormDataToApi
1522
+ } from "@ai-sdk/provider-utils";
1523
+ import { z as z10 } from "zod";
1524
+
1525
+ // src/openai-transcription-options.ts
1526
+ import { z as z9 } from "zod";
1527
+ var openAITranscriptionProviderOptions = z9.object({
1528
+ /**
1529
+ * Additional information to include in the transcription response.
1530
+ */
1531
+ include: z9.array(z9.string()).optional(),
1532
+ /**
1533
+ * The language of the input audio in ISO-639-1 format.
1534
+ */
1535
+ language: z9.string().optional(),
1536
+ /**
1537
+ * An optional text to guide the model's style or continue a previous audio segment.
1538
+ */
1539
+ prompt: z9.string().optional(),
1540
+ /**
1541
+ * The sampling temperature, between 0 and 1.
1542
+ * @default 0
1543
+ */
1544
+ temperature: z9.number().min(0).max(1).default(0).optional(),
1545
+ /**
1546
+ * The timestamp granularities to populate for this transcription.
1547
+ * @default ['segment']
1548
+ */
1549
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1550
+ });
1551
+
1552
+ // src/openai-transcription-model.ts
1553
+ var languageMap = {
1554
+ afrikaans: "af",
1555
+ arabic: "ar",
1556
+ armenian: "hy",
1557
+ azerbaijani: "az",
1558
+ belarusian: "be",
1559
+ bosnian: "bs",
1560
+ bulgarian: "bg",
1561
+ catalan: "ca",
1562
+ chinese: "zh",
1563
+ croatian: "hr",
1564
+ czech: "cs",
1565
+ danish: "da",
1566
+ dutch: "nl",
1567
+ english: "en",
1568
+ estonian: "et",
1569
+ finnish: "fi",
1570
+ french: "fr",
1571
+ galician: "gl",
1572
+ german: "de",
1573
+ greek: "el",
1574
+ hebrew: "he",
1575
+ hindi: "hi",
1576
+ hungarian: "hu",
1577
+ icelandic: "is",
1578
+ indonesian: "id",
1579
+ italian: "it",
1580
+ japanese: "ja",
1581
+ kannada: "kn",
1582
+ kazakh: "kk",
1583
+ korean: "ko",
1584
+ latvian: "lv",
1585
+ lithuanian: "lt",
1586
+ macedonian: "mk",
1587
+ malay: "ms",
1588
+ marathi: "mr",
1589
+ maori: "mi",
1590
+ nepali: "ne",
1591
+ norwegian: "no",
1592
+ persian: "fa",
1593
+ polish: "pl",
1594
+ portuguese: "pt",
1595
+ romanian: "ro",
1596
+ russian: "ru",
1597
+ serbian: "sr",
1598
+ slovak: "sk",
1599
+ slovenian: "sl",
1600
+ spanish: "es",
1601
+ swahili: "sw",
1602
+ swedish: "sv",
1603
+ tagalog: "tl",
1604
+ tamil: "ta",
1605
+ thai: "th",
1606
+ turkish: "tr",
1607
+ ukrainian: "uk",
1608
+ urdu: "ur",
1609
+ vietnamese: "vi",
1610
+ welsh: "cy"
1611
+ };
1612
+ var OpenAITranscriptionModel = class {
1613
+ constructor(modelId, config) {
1614
+ this.modelId = modelId;
1615
+ this.config = config;
1616
+ this.specificationVersion = "v1";
1617
+ }
1618
+ get provider() {
1619
+ return this.config.provider;
1620
+ }
1621
+ async getArgs({
1622
+ audio,
1623
+ mediaType,
1624
+ providerOptions
1625
+ }) {
1626
+ const warnings = [];
1627
+ const openAIOptions = await parseProviderOptions4({
1628
+ provider: "openai",
1629
+ providerOptions,
1630
+ schema: openAITranscriptionProviderOptions
1631
+ });
1632
+ const formData = new FormData();
1633
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1634
+ formData.append("model", this.modelId);
1635
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1636
+ if (openAIOptions) {
1637
+ const transcriptionModelOptions = {
1638
+ include: openAIOptions.include,
1639
+ language: openAIOptions.language,
1640
+ prompt: openAIOptions.prompt,
1641
+ temperature: openAIOptions.temperature,
1642
+ timestamp_granularities: openAIOptions.timestampGranularities
1643
+ };
1644
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1645
+ if (value != null) {
1646
+ formData.append(key, String(value));
1647
+ }
1648
+ }
1649
+ }
1650
+ return {
1651
+ formData,
1652
+ warnings
1653
+ };
1654
+ }
1655
+ async doGenerate(options) {
1656
+ var _a, _b, _c, _d, _e, _f;
1657
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1658
+ const { formData, warnings } = await this.getArgs(options);
1659
+ const {
1660
+ value: response,
1661
+ responseHeaders,
1662
+ rawValue: rawResponse
1663
+ } = await postFormDataToApi({
1664
+ url: this.config.url({
1665
+ path: "/audio/transcriptions",
1666
+ modelId: this.modelId
1667
+ }),
1668
+ headers: combineHeaders5(this.config.headers(), options.headers),
1669
+ formData,
1670
+ failedResponseHandler: openaiFailedResponseHandler,
1671
+ successfulResponseHandler: createJsonResponseHandler5(
1672
+ openaiTranscriptionResponseSchema
1673
+ ),
1674
+ abortSignal: options.abortSignal,
1675
+ fetch: this.config.fetch
1676
+ });
1677
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1678
+ return {
1679
+ text: response.text,
1680
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1681
+ text: word.word,
1682
+ startSecond: word.start,
1683
+ endSecond: word.end
1684
+ }))) != null ? _e : [],
1685
+ language,
1686
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1687
+ warnings,
1688
+ response: {
1689
+ timestamp: currentDate,
1690
+ modelId: this.modelId,
1691
+ headers: responseHeaders,
1692
+ body: rawResponse
1693
+ }
1694
+ };
1695
+ }
1696
+ };
1697
+ var openaiTranscriptionResponseSchema = z10.object({
1698
+ text: z10.string(),
1699
+ language: z10.string().nullish(),
1700
+ duration: z10.number().nullish(),
1701
+ words: z10.array(
1702
+ z10.object({
1703
+ word: z10.string(),
1704
+ start: z10.number(),
1705
+ end: z10.number()
1706
+ })
1707
+ ).nullish()
1708
+ });
1709
+
1710
+ // src/openai-speech-model.ts
1711
+ import {
1712
+ combineHeaders as combineHeaders6,
1713
+ createBinaryResponseHandler,
1714
+ parseProviderOptions as parseProviderOptions5,
1535
1715
  postJsonToApi as postJsonToApi5
1536
1716
  } from "@ai-sdk/provider-utils";
1537
- import { z as z6 } from "zod";
1717
+ import { z as z11 } from "zod";
1718
+ var OpenAIProviderOptionsSchema = z11.object({
1719
+ instructions: z11.string().nullish(),
1720
+ speed: z11.number().min(0.25).max(4).default(1).nullish()
1721
+ });
1722
+ var OpenAISpeechModel = class {
1723
+ constructor(modelId, config) {
1724
+ this.modelId = modelId;
1725
+ this.config = config;
1726
+ this.specificationVersion = "v1";
1727
+ }
1728
+ get provider() {
1729
+ return this.config.provider;
1730
+ }
1731
+ async getArgs({
1732
+ text,
1733
+ voice = "alloy",
1734
+ outputFormat = "mp3",
1735
+ speed,
1736
+ instructions,
1737
+ providerOptions
1738
+ }) {
1739
+ const warnings = [];
1740
+ const openAIOptions = await parseProviderOptions5({
1741
+ provider: "openai",
1742
+ providerOptions,
1743
+ schema: OpenAIProviderOptionsSchema
1744
+ });
1745
+ const requestBody = {
1746
+ model: this.modelId,
1747
+ input: text,
1748
+ voice,
1749
+ response_format: "mp3",
1750
+ speed,
1751
+ instructions
1752
+ };
1753
+ if (outputFormat) {
1754
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1755
+ requestBody.response_format = outputFormat;
1756
+ } else {
1757
+ warnings.push({
1758
+ type: "unsupported-setting",
1759
+ setting: "outputFormat",
1760
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1761
+ });
1762
+ }
1763
+ }
1764
+ if (openAIOptions) {
1765
+ const speechModelOptions = {};
1766
+ for (const key in speechModelOptions) {
1767
+ const value = speechModelOptions[key];
1768
+ if (value !== void 0) {
1769
+ requestBody[key] = value;
1770
+ }
1771
+ }
1772
+ }
1773
+ return {
1774
+ requestBody,
1775
+ warnings
1776
+ };
1777
+ }
1778
+ async doGenerate(options) {
1779
+ var _a, _b, _c;
1780
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1781
+ const { requestBody, warnings } = await this.getArgs(options);
1782
+ const {
1783
+ value: audio,
1784
+ responseHeaders,
1785
+ rawValue: rawResponse
1786
+ } = await postJsonToApi5({
1787
+ url: this.config.url({
1788
+ path: "/audio/speech",
1789
+ modelId: this.modelId
1790
+ }),
1791
+ headers: combineHeaders6(this.config.headers(), options.headers),
1792
+ body: requestBody,
1793
+ failedResponseHandler: openaiFailedResponseHandler,
1794
+ successfulResponseHandler: createBinaryResponseHandler(),
1795
+ abortSignal: options.abortSignal,
1796
+ fetch: this.config.fetch
1797
+ });
1798
+ return {
1799
+ audio,
1800
+ warnings,
1801
+ request: {
1802
+ body: JSON.stringify(requestBody)
1803
+ },
1804
+ response: {
1805
+ timestamp: currentDate,
1806
+ modelId: this.modelId,
1807
+ headers: responseHeaders,
1808
+ body: rawResponse
1809
+ }
1810
+ };
1811
+ }
1812
+ };
1813
+
1814
+ // src/responses/openai-responses-language-model.ts
1815
+ import {
1816
+ combineHeaders as combineHeaders7,
1817
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1818
+ createJsonResponseHandler as createJsonResponseHandler6,
1819
+ generateId as generateId2,
1820
+ parseProviderOptions as parseProviderOptions6,
1821
+ postJsonToApi as postJsonToApi6
1822
+ } from "@ai-sdk/provider-utils";
1823
+ import { z as z12 } from "zod";
1538
1824
 
1539
1825
  // src/responses/convert-to-openai-responses-messages.ts
1540
1826
  import {
1541
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1827
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1542
1828
  } from "@ai-sdk/provider";
1543
- import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1544
1829
  function convertToOpenAIResponsesMessages({
1545
1830
  prompt,
1546
1831
  systemMessageMode
@@ -1579,38 +1864,35 @@ function convertToOpenAIResponsesMessages({
1579
1864
  messages.push({
1580
1865
  role: "user",
1581
1866
  content: content.map((part, index) => {
1582
- var _a, _b, _c, _d;
1867
+ var _a, _b, _c;
1583
1868
  switch (part.type) {
1584
1869
  case "text": {
1585
1870
  return { type: "input_text", text: part.text };
1586
1871
  }
1587
- case "image": {
1588
- return {
1589
- type: "input_image",
1590
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1591
- // OpenAI specific extension: image detail
1592
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1593
- };
1594
- }
1595
1872
  case "file": {
1596
- if (part.data instanceof URL) {
1597
- throw new UnsupportedFunctionalityError5({
1598
- functionality: "File URLs in user messages"
1599
- });
1600
- }
1601
- switch (part.mimeType) {
1602
- case "application/pdf": {
1603
- return {
1604
- type: "input_file",
1605
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1606
- file_data: `data:application/pdf;base64,${part.data}`
1607
- };
1608
- }
1609
- default: {
1610
- throw new UnsupportedFunctionalityError5({
1611
- functionality: "Only PDF files are supported in user messages"
1873
+ if (part.mediaType.startsWith("image/")) {
1874
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1875
+ return {
1876
+ type: "input_image",
1877
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1878
+ // OpenAI specific extension: image detail
1879
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1880
+ };
1881
+ } else if (part.mediaType === "application/pdf") {
1882
+ if (part.data instanceof URL) {
1883
+ throw new UnsupportedFunctionalityError4({
1884
+ functionality: "PDF file parts with URLs"
1612
1885
  });
1613
1886
  }
1887
+ return {
1888
+ type: "input_file",
1889
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1890
+ file_data: `data:application/pdf;base64,${part.data}`
1891
+ };
1892
+ } else {
1893
+ throw new UnsupportedFunctionalityError4({
1894
+ functionality: `file part media type ${part.mediaType}`
1895
+ });
1614
1896
  }
1615
1897
  }
1616
1898
  }
@@ -1680,7 +1962,7 @@ function mapOpenAIResponseFinishReason({
1680
1962
 
1681
1963
  // src/responses/openai-responses-prepare-tools.ts
1682
1964
  import {
1683
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1965
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1684
1966
  } from "@ai-sdk/provider";
1685
1967
  function prepareResponsesTools({
1686
1968
  tools,
@@ -1740,8 +2022,8 @@ function prepareResponsesTools({
1740
2022
  };
1741
2023
  default: {
1742
2024
  const _exhaustiveCheck = type;
1743
- throw new UnsupportedFunctionalityError6({
1744
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2025
+ throw new UnsupportedFunctionalityError5({
2026
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1745
2027
  });
1746
2028
  }
1747
2029
  }
@@ -1751,15 +2033,17 @@ function prepareResponsesTools({
1751
2033
  var OpenAIResponsesLanguageModel = class {
1752
2034
  constructor(modelId, config) {
1753
2035
  this.specificationVersion = "v2";
1754
- this.defaultObjectGenerationMode = "json";
2036
+ this.supportedUrls = {
2037
+ "image/*": [/^https?:\/\/.*$/]
2038
+ };
1755
2039
  this.modelId = modelId;
1756
2040
  this.config = config;
1757
2041
  }
1758
2042
  get provider() {
1759
2043
  return this.config.provider;
1760
2044
  }
1761
- getArgs({
1762
- maxTokens,
2045
+ async getArgs({
2046
+ maxOutputTokens,
1763
2047
  temperature,
1764
2048
  stopSequences,
1765
2049
  topP,
@@ -1802,7 +2086,7 @@ var OpenAIResponsesLanguageModel = class {
1802
2086
  systemMessageMode: modelConfig.systemMessageMode
1803
2087
  });
1804
2088
  warnings.push(...messageWarnings);
1805
- const openaiOptions = parseProviderOptions({
2089
+ const openaiOptions = await parseProviderOptions6({
1806
2090
  provider: "openai",
1807
2091
  providerOptions,
1808
2092
  schema: openaiResponsesProviderOptionsSchema
@@ -1813,7 +2097,7 @@ var OpenAIResponsesLanguageModel = class {
1813
2097
  input: messages,
1814
2098
  temperature,
1815
2099
  top_p: topP,
1816
- max_output_tokens: maxTokens,
2100
+ max_output_tokens: maxOutputTokens,
1817
2101
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1818
2102
  text: {
1819
2103
  format: responseFormat.schema != null ? {
@@ -1833,8 +2117,15 @@ var OpenAIResponsesLanguageModel = class {
1833
2117
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1834
2118
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1835
2119
  // model-specific settings:
1836
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1837
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2120
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2121
+ reasoning: {
2122
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2123
+ effort: openaiOptions.reasoningEffort
2124
+ },
2125
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2126
+ summary: openaiOptions.reasoningSummary
2127
+ }
2128
+ }
1838
2129
  },
1839
2130
  ...modelConfig.requiredAutoTruncation && {
1840
2131
  truncation: "auto"
@@ -1877,133 +2168,153 @@ var OpenAIResponsesLanguageModel = class {
1877
2168
  };
1878
2169
  }
1879
2170
  async doGenerate(options) {
1880
- var _a, _b, _c, _d, _e;
1881
- const { args: body, warnings } = this.getArgs(options);
2171
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2172
+ const { args: body, warnings } = await this.getArgs(options);
1882
2173
  const {
1883
2174
  responseHeaders,
1884
2175
  value: response,
1885
2176
  rawValue: rawResponse
1886
- } = await postJsonToApi5({
2177
+ } = await postJsonToApi6({
1887
2178
  url: this.config.url({
1888
2179
  path: "/responses",
1889
2180
  modelId: this.modelId
1890
2181
  }),
1891
- headers: combineHeaders5(this.config.headers(), options.headers),
2182
+ headers: combineHeaders7(this.config.headers(), options.headers),
1892
2183
  body,
1893
2184
  failedResponseHandler: openaiFailedResponseHandler,
1894
- successfulResponseHandler: createJsonResponseHandler5(
1895
- z6.object({
1896
- id: z6.string(),
1897
- created_at: z6.number(),
1898
- model: z6.string(),
1899
- output: z6.array(
1900
- z6.discriminatedUnion("type", [
1901
- z6.object({
1902
- type: z6.literal("message"),
1903
- role: z6.literal("assistant"),
1904
- content: z6.array(
1905
- z6.object({
1906
- type: z6.literal("output_text"),
1907
- text: z6.string(),
1908
- annotations: z6.array(
1909
- z6.object({
1910
- type: z6.literal("url_citation"),
1911
- start_index: z6.number(),
1912
- end_index: z6.number(),
1913
- url: z6.string(),
1914
- title: z6.string()
2185
+ successfulResponseHandler: createJsonResponseHandler6(
2186
+ z12.object({
2187
+ id: z12.string(),
2188
+ created_at: z12.number(),
2189
+ model: z12.string(),
2190
+ output: z12.array(
2191
+ z12.discriminatedUnion("type", [
2192
+ z12.object({
2193
+ type: z12.literal("message"),
2194
+ role: z12.literal("assistant"),
2195
+ content: z12.array(
2196
+ z12.object({
2197
+ type: z12.literal("output_text"),
2198
+ text: z12.string(),
2199
+ annotations: z12.array(
2200
+ z12.object({
2201
+ type: z12.literal("url_citation"),
2202
+ start_index: z12.number(),
2203
+ end_index: z12.number(),
2204
+ url: z12.string(),
2205
+ title: z12.string()
1915
2206
  })
1916
2207
  )
1917
2208
  })
1918
2209
  )
1919
2210
  }),
1920
- z6.object({
1921
- type: z6.literal("function_call"),
1922
- call_id: z6.string(),
1923
- name: z6.string(),
1924
- arguments: z6.string()
2211
+ z12.object({
2212
+ type: z12.literal("function_call"),
2213
+ call_id: z12.string(),
2214
+ name: z12.string(),
2215
+ arguments: z12.string()
1925
2216
  }),
1926
- z6.object({
1927
- type: z6.literal("web_search_call")
2217
+ z12.object({
2218
+ type: z12.literal("web_search_call")
1928
2219
  }),
1929
- z6.object({
1930
- type: z6.literal("computer_call")
2220
+ z12.object({
2221
+ type: z12.literal("computer_call")
1931
2222
  }),
1932
- z6.object({
1933
- type: z6.literal("reasoning")
2223
+ z12.object({
2224
+ type: z12.literal("reasoning"),
2225
+ summary: z12.array(
2226
+ z12.object({
2227
+ type: z12.literal("summary_text"),
2228
+ text: z12.string()
2229
+ })
2230
+ )
1934
2231
  })
1935
2232
  ])
1936
2233
  ),
1937
- incomplete_details: z6.object({ reason: z6.string() }).nullable(),
1938
- usage: usageSchema
2234
+ incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2235
+ usage: usageSchema2
1939
2236
  })
1940
2237
  ),
1941
2238
  abortSignal: options.abortSignal,
1942
2239
  fetch: this.config.fetch
1943
2240
  });
1944
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
1945
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
1946
- toolCallType: "function",
1947
- toolCallId: output.call_id,
1948
- toolName: output.name,
1949
- args: output.arguments
1950
- }));
2241
+ const content = [];
2242
+ for (const part of response.output) {
2243
+ switch (part.type) {
2244
+ case "reasoning": {
2245
+ content.push({
2246
+ type: "reasoning",
2247
+ text: part.summary.map((summary) => summary.text).join()
2248
+ });
2249
+ break;
2250
+ }
2251
+ case "message": {
2252
+ for (const contentPart of part.content) {
2253
+ content.push({
2254
+ type: "text",
2255
+ text: contentPart.text
2256
+ });
2257
+ for (const annotation of contentPart.annotations) {
2258
+ content.push({
2259
+ type: "source",
2260
+ sourceType: "url",
2261
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2262
+ url: annotation.url,
2263
+ title: annotation.title
2264
+ });
2265
+ }
2266
+ }
2267
+ break;
2268
+ }
2269
+ case "function_call": {
2270
+ content.push({
2271
+ type: "tool-call",
2272
+ toolCallType: "function",
2273
+ toolCallId: part.call_id,
2274
+ toolName: part.name,
2275
+ args: part.arguments
2276
+ });
2277
+ break;
2278
+ }
2279
+ }
2280
+ }
1951
2281
  return {
1952
- text: outputTextElements.map((content) => content.text).join("\n"),
1953
- sources: outputTextElements.flatMap(
1954
- (content) => content.annotations.map((annotation) => {
1955
- var _a2, _b2, _c2;
1956
- return {
1957
- sourceType: "url",
1958
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
1959
- url: annotation.url,
1960
- title: annotation.title
1961
- };
1962
- })
1963
- ),
2282
+ content,
1964
2283
  finishReason: mapOpenAIResponseFinishReason({
1965
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
1966
- hasToolCalls: toolCalls.length > 0
2284
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2285
+ hasToolCalls: content.some((part) => part.type === "tool-call")
1967
2286
  }),
1968
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
1969
2287
  usage: {
1970
- promptTokens: response.usage.input_tokens,
1971
- completionTokens: response.usage.output_tokens
1972
- },
1973
- rawCall: {
1974
- rawPrompt: void 0,
1975
- rawSettings: {}
1976
- },
1977
- rawResponse: {
1978
- headers: responseHeaders,
1979
- body: rawResponse
1980
- },
1981
- request: {
1982
- body: JSON.stringify(body)
2288
+ inputTokens: response.usage.input_tokens,
2289
+ outputTokens: response.usage.output_tokens,
2290
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2291
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2292
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
1983
2293
  },
2294
+ request: { body },
1984
2295
  response: {
1985
2296
  id: response.id,
1986
2297
  timestamp: new Date(response.created_at * 1e3),
1987
- modelId: response.model
2298
+ modelId: response.model,
2299
+ headers: responseHeaders,
2300
+ body: rawResponse
1988
2301
  },
1989
2302
  providerMetadata: {
1990
2303
  openai: {
1991
- responseId: response.id,
1992
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
1993
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2304
+ responseId: response.id
1994
2305
  }
1995
2306
  },
1996
2307
  warnings
1997
2308
  };
1998
2309
  }
1999
2310
  async doStream(options) {
2000
- const { args: body, warnings } = this.getArgs(options);
2001
- const { responseHeaders, value: response } = await postJsonToApi5({
2311
+ const { args: body, warnings } = await this.getArgs(options);
2312
+ const { responseHeaders, value: response } = await postJsonToApi6({
2002
2313
  url: this.config.url({
2003
2314
  path: "/responses",
2004
2315
  modelId: this.modelId
2005
2316
  }),
2006
- headers: combineHeaders5(this.config.headers(), options.headers),
2317
+ headers: combineHeaders7(this.config.headers(), options.headers),
2007
2318
  body: {
2008
2319
  ...body,
2009
2320
  stream: true
@@ -2017,16 +2328,20 @@ var OpenAIResponsesLanguageModel = class {
2017
2328
  });
2018
2329
  const self = this;
2019
2330
  let finishReason = "unknown";
2020
- let promptTokens = NaN;
2021
- let completionTokens = NaN;
2022
- let cachedPromptTokens = null;
2023
- let reasoningTokens = null;
2331
+ const usage = {
2332
+ inputTokens: void 0,
2333
+ outputTokens: void 0,
2334
+ totalTokens: void 0
2335
+ };
2024
2336
  let responseId = null;
2025
2337
  const ongoingToolCalls = {};
2026
2338
  let hasToolCalls = false;
2027
2339
  return {
2028
2340
  stream: response.pipeThrough(
2029
2341
  new TransformStream({
2342
+ start(controller) {
2343
+ controller.enqueue({ type: "stream-start", warnings });
2344
+ },
2030
2345
  transform(chunk, controller) {
2031
2346
  var _a, _b, _c, _d, _e, _f, _g, _h;
2032
2347
  if (!chunk.success) {
@@ -2070,8 +2385,13 @@ var OpenAIResponsesLanguageModel = class {
2070
2385
  });
2071
2386
  } else if (isTextDeltaChunk(value)) {
2072
2387
  controller.enqueue({
2073
- type: "text-delta",
2074
- textDelta: value.delta
2388
+ type: "text",
2389
+ text: value.delta
2390
+ });
2391
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2392
+ controller.enqueue({
2393
+ type: "reasoning",
2394
+ text: value.delta
2075
2395
  });
2076
2396
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2077
2397
  ongoingToolCalls[value.output_index] = void 0;
@@ -2088,19 +2408,18 @@ var OpenAIResponsesLanguageModel = class {
2088
2408
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2089
2409
  hasToolCalls
2090
2410
  });
2091
- promptTokens = value.response.usage.input_tokens;
2092
- completionTokens = value.response.usage.output_tokens;
2093
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2094
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2411
+ usage.inputTokens = value.response.usage.input_tokens;
2412
+ usage.outputTokens = value.response.usage.output_tokens;
2413
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2414
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2415
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2095
2416
  } else if (isResponseAnnotationAddedChunk(value)) {
2096
2417
  controller.enqueue({
2097
2418
  type: "source",
2098
- source: {
2099
- sourceType: "url",
2100
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2101
- url: value.annotation.url,
2102
- title: value.annotation.title
2103
- }
2419
+ sourceType: "url",
2420
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2421
+ url: value.annotation.url,
2422
+ title: value.annotation.title
2104
2423
  });
2105
2424
  }
2106
2425
  },
@@ -2108,103 +2427,101 @@ var OpenAIResponsesLanguageModel = class {
2108
2427
  controller.enqueue({
2109
2428
  type: "finish",
2110
2429
  finishReason,
2111
- usage: { promptTokens, completionTokens },
2112
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2113
- providerMetadata: {
2114
- openai: {
2115
- responseId,
2116
- cachedPromptTokens,
2117
- reasoningTokens
2118
- }
2430
+ usage,
2431
+ providerMetadata: {
2432
+ openai: {
2433
+ responseId
2119
2434
  }
2120
2435
  }
2121
2436
  });
2122
2437
  }
2123
2438
  })
2124
2439
  ),
2125
- rawCall: {
2126
- rawPrompt: void 0,
2127
- rawSettings: {}
2128
- },
2129
- rawResponse: { headers: responseHeaders },
2130
- request: { body: JSON.stringify(body) },
2131
- warnings
2440
+ request: { body },
2441
+ response: { headers: responseHeaders }
2132
2442
  };
2133
2443
  }
2134
2444
  };
2135
- var usageSchema = z6.object({
2136
- input_tokens: z6.number(),
2137
- input_tokens_details: z6.object({ cached_tokens: z6.number().nullish() }).nullish(),
2138
- output_tokens: z6.number(),
2139
- output_tokens_details: z6.object({ reasoning_tokens: z6.number().nullish() }).nullish()
2445
+ var usageSchema2 = z12.object({
2446
+ input_tokens: z12.number(),
2447
+ input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2448
+ output_tokens: z12.number(),
2449
+ output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
2140
2450
  });
2141
- var textDeltaChunkSchema = z6.object({
2142
- type: z6.literal("response.output_text.delta"),
2143
- delta: z6.string()
2451
+ var textDeltaChunkSchema = z12.object({
2452
+ type: z12.literal("response.output_text.delta"),
2453
+ delta: z12.string()
2144
2454
  });
2145
- var responseFinishedChunkSchema = z6.object({
2146
- type: z6.enum(["response.completed", "response.incomplete"]),
2147
- response: z6.object({
2148
- incomplete_details: z6.object({ reason: z6.string() }).nullish(),
2149
- usage: usageSchema
2455
+ var responseFinishedChunkSchema = z12.object({
2456
+ type: z12.enum(["response.completed", "response.incomplete"]),
2457
+ response: z12.object({
2458
+ incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2459
+ usage: usageSchema2
2150
2460
  })
2151
2461
  });
2152
- var responseCreatedChunkSchema = z6.object({
2153
- type: z6.literal("response.created"),
2154
- response: z6.object({
2155
- id: z6.string(),
2156
- created_at: z6.number(),
2157
- model: z6.string()
2462
+ var responseCreatedChunkSchema = z12.object({
2463
+ type: z12.literal("response.created"),
2464
+ response: z12.object({
2465
+ id: z12.string(),
2466
+ created_at: z12.number(),
2467
+ model: z12.string()
2158
2468
  })
2159
2469
  });
2160
- var responseOutputItemDoneSchema = z6.object({
2161
- type: z6.literal("response.output_item.done"),
2162
- output_index: z6.number(),
2163
- item: z6.discriminatedUnion("type", [
2164
- z6.object({
2165
- type: z6.literal("message")
2470
+ var responseOutputItemDoneSchema = z12.object({
2471
+ type: z12.literal("response.output_item.done"),
2472
+ output_index: z12.number(),
2473
+ item: z12.discriminatedUnion("type", [
2474
+ z12.object({
2475
+ type: z12.literal("message")
2166
2476
  }),
2167
- z6.object({
2168
- type: z6.literal("function_call"),
2169
- id: z6.string(),
2170
- call_id: z6.string(),
2171
- name: z6.string(),
2172
- arguments: z6.string(),
2173
- status: z6.literal("completed")
2477
+ z12.object({
2478
+ type: z12.literal("function_call"),
2479
+ id: z12.string(),
2480
+ call_id: z12.string(),
2481
+ name: z12.string(),
2482
+ arguments: z12.string(),
2483
+ status: z12.literal("completed")
2174
2484
  })
2175
2485
  ])
2176
2486
  });
2177
- var responseFunctionCallArgumentsDeltaSchema = z6.object({
2178
- type: z6.literal("response.function_call_arguments.delta"),
2179
- item_id: z6.string(),
2180
- output_index: z6.number(),
2181
- delta: z6.string()
2487
+ var responseFunctionCallArgumentsDeltaSchema = z12.object({
2488
+ type: z12.literal("response.function_call_arguments.delta"),
2489
+ item_id: z12.string(),
2490
+ output_index: z12.number(),
2491
+ delta: z12.string()
2182
2492
  });
2183
- var responseOutputItemAddedSchema = z6.object({
2184
- type: z6.literal("response.output_item.added"),
2185
- output_index: z6.number(),
2186
- item: z6.discriminatedUnion("type", [
2187
- z6.object({
2188
- type: z6.literal("message")
2493
+ var responseOutputItemAddedSchema = z12.object({
2494
+ type: z12.literal("response.output_item.added"),
2495
+ output_index: z12.number(),
2496
+ item: z12.discriminatedUnion("type", [
2497
+ z12.object({
2498
+ type: z12.literal("message")
2189
2499
  }),
2190
- z6.object({
2191
- type: z6.literal("function_call"),
2192
- id: z6.string(),
2193
- call_id: z6.string(),
2194
- name: z6.string(),
2195
- arguments: z6.string()
2500
+ z12.object({
2501
+ type: z12.literal("function_call"),
2502
+ id: z12.string(),
2503
+ call_id: z12.string(),
2504
+ name: z12.string(),
2505
+ arguments: z12.string()
2196
2506
  })
2197
2507
  ])
2198
2508
  });
2199
- var responseAnnotationAddedSchema = z6.object({
2200
- type: z6.literal("response.output_text.annotation.added"),
2201
- annotation: z6.object({
2202
- type: z6.literal("url_citation"),
2203
- url: z6.string(),
2204
- title: z6.string()
2509
+ var responseAnnotationAddedSchema = z12.object({
2510
+ type: z12.literal("response.output_text.annotation.added"),
2511
+ annotation: z12.object({
2512
+ type: z12.literal("url_citation"),
2513
+ url: z12.string(),
2514
+ title: z12.string()
2205
2515
  })
2206
2516
  });
2207
- var openaiResponsesChunkSchema = z6.union([
2517
+ var responseReasoningSummaryTextDeltaSchema = z12.object({
2518
+ type: z12.literal("response.reasoning_summary_text.delta"),
2519
+ item_id: z12.string(),
2520
+ output_index: z12.number(),
2521
+ summary_index: z12.number(),
2522
+ delta: z12.string()
2523
+ });
2524
+ var openaiResponsesChunkSchema = z12.union([
2208
2525
  textDeltaChunkSchema,
2209
2526
  responseFinishedChunkSchema,
2210
2527
  responseCreatedChunkSchema,
@@ -2212,7 +2529,8 @@ var openaiResponsesChunkSchema = z6.union([
2212
2529
  responseFunctionCallArgumentsDeltaSchema,
2213
2530
  responseOutputItemAddedSchema,
2214
2531
  responseAnnotationAddedSchema,
2215
- z6.object({ type: z6.string() }).passthrough()
2532
+ responseReasoningSummaryTextDeltaSchema,
2533
+ z12.object({ type: z12.string() }).passthrough()
2216
2534
  // fallback for unknown chunks
2217
2535
  ]);
2218
2536
  function isTextDeltaChunk(chunk) {
@@ -2236,6 +2554,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2236
2554
  function isResponseAnnotationAddedChunk(chunk) {
2237
2555
  return chunk.type === "response.output_text.annotation.added";
2238
2556
  }
2557
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2558
+ return chunk.type === "response.reasoning_summary_text.delta";
2559
+ }
2239
2560
  function getResponsesModelConfig(modelId) {
2240
2561
  if (modelId.startsWith("o")) {
2241
2562
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2257,15 +2578,16 @@ function getResponsesModelConfig(modelId) {
2257
2578
  requiredAutoTruncation: false
2258
2579
  };
2259
2580
  }
2260
- var openaiResponsesProviderOptionsSchema = z6.object({
2261
- metadata: z6.any().nullish(),
2262
- parallelToolCalls: z6.boolean().nullish(),
2263
- previousResponseId: z6.string().nullish(),
2264
- store: z6.boolean().nullish(),
2265
- user: z6.string().nullish(),
2266
- reasoningEffort: z6.string().nullish(),
2267
- strictSchemas: z6.boolean().nullish(),
2268
- instructions: z6.string().nullish()
2581
+ var openaiResponsesProviderOptionsSchema = z12.object({
2582
+ metadata: z12.any().nullish(),
2583
+ parallelToolCalls: z12.boolean().nullish(),
2584
+ previousResponseId: z12.string().nullish(),
2585
+ store: z12.boolean().nullish(),
2586
+ user: z12.string().nullish(),
2587
+ reasoningEffort: z12.string().nullish(),
2588
+ strictSchemas: z12.boolean().nullish(),
2589
+ instructions: z12.string().nullish(),
2590
+ reasoningSummary: z12.string().nullish()
2269
2591
  });
2270
2592
  export {
2271
2593
  OpenAIChatLanguageModel,
@@ -2273,6 +2595,13 @@ export {
2273
2595
  OpenAIEmbeddingModel,
2274
2596
  OpenAIImageModel,
2275
2597
  OpenAIResponsesLanguageModel,
2276
- modelMaxImagesPerCall
2598
+ OpenAISpeechModel,
2599
+ OpenAITranscriptionModel,
2600
+ hasDefaultResponseFormat,
2601
+ modelMaxImagesPerCall,
2602
+ openAITranscriptionProviderOptions,
2603
+ openaiCompletionProviderOptions,
2604
+ openaiEmbeddingProviderOptions,
2605
+ openaiProviderOptions
2277
2606
  };
2278
2607
  //# sourceMappingURL=index.mjs.map