@ai-sdk/openai 2.0.0-canary.9 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,10 +8,10 @@ import {
8
8
  createJsonResponseHandler,
9
9
  generateId,
10
10
  isParsableJson,
11
- postJsonToApi,
12
- parseProviderOptions
11
+ parseProviderOptions,
12
+ postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z3 } from "zod";
14
+ import { z as z5 } from "zod/v4";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
@@ -118,7 +118,7 @@ function convertToOpenAIChatMessages({
118
118
  type: "file",
119
119
  file: {
120
120
  filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
- file_data: `data:application/pdf;base64,${part.data}`
121
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
122
122
  }
123
123
  };
124
124
  } else {
@@ -147,7 +147,7 @@ function convertToOpenAIChatMessages({
147
147
  type: "function",
148
148
  function: {
149
149
  name: part.toolName,
150
- arguments: JSON.stringify(part.args)
150
+ arguments: JSON.stringify(part.input)
151
151
  }
152
152
  });
153
153
  break;
@@ -163,10 +163,23 @@ function convertToOpenAIChatMessages({
163
163
  }
164
164
  case "tool": {
165
165
  for (const toolResponse of content) {
166
+ const output = toolResponse.output;
167
+ let contentValue;
168
+ switch (output.type) {
169
+ case "text":
170
+ case "error-text":
171
+ contentValue = output.value;
172
+ break;
173
+ case "content":
174
+ case "json":
175
+ case "error-json":
176
+ contentValue = JSON.stringify(output.value);
177
+ break;
178
+ }
166
179
  messages.push({
167
180
  role: "tool",
168
181
  tool_call_id: toolResponse.toolCallId,
169
- content: JSON.stringify(toolResponse.result)
182
+ content: contentValue
170
183
  });
171
184
  }
172
185
  break;
@@ -180,17 +193,17 @@ function convertToOpenAIChatMessages({
180
193
  return { messages, warnings };
181
194
  }
182
195
 
183
- // src/map-openai-chat-logprobs.ts
184
- function mapOpenAIChatLogProbsOutput(logprobs) {
185
- var _a, _b;
186
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
187
- token,
188
- logprob,
189
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
190
- token: token2,
191
- logprob: logprob2
192
- })) : []
193
- }))) != null ? _b : void 0;
196
+ // src/get-response-metadata.ts
197
+ function getResponseMetadata({
198
+ id,
199
+ model,
200
+ created
201
+ }) {
202
+ return {
203
+ id: id != null ? id : void 0,
204
+ modelId: model != null ? model : void 0,
205
+ timestamp: created != null ? new Date(created * 1e3) : void 0
206
+ };
194
207
  }
195
208
 
196
209
  // src/map-openai-finish-reason.ts
@@ -211,7 +224,7 @@ function mapOpenAIFinishReason(finishReason) {
211
224
  }
212
225
 
213
226
  // src/openai-chat-options.ts
214
- import { z } from "zod";
227
+ import { z } from "zod/v4";
215
228
  var openaiProviderOptions = z.object({
216
229
  /**
217
230
  * Modify the likelihood of specified tokens appearing in the completion.
@@ -254,15 +267,36 @@ var openaiProviderOptions = z.object({
254
267
  /**
255
268
  * Metadata to associate with the request.
256
269
  */
257
- metadata: z.record(z.string()).optional(),
270
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
258
271
  /**
259
272
  * Parameters for prediction mode.
260
273
  */
261
- prediction: z.record(z.any()).optional()
274
+ prediction: z.record(z.string(), z.any()).optional(),
275
+ /**
276
+ * Whether to use structured outputs.
277
+ *
278
+ * @default true
279
+ */
280
+ structuredOutputs: z.boolean().optional(),
281
+ /**
282
+ * Service tier for the request.
283
+ * - 'auto': Default service tier
284
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
285
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
286
+ *
287
+ * @default 'auto'
288
+ */
289
+ serviceTier: z.enum(["auto", "flex", "priority"]).optional(),
290
+ /**
291
+ * Whether to use strict JSON schema validation.
292
+ *
293
+ * @default false
294
+ */
295
+ strictJsonSchema: z.boolean().optional()
262
296
  });
263
297
 
264
298
  // src/openai-error.ts
265
- import { z as z2 } from "zod";
299
+ import { z as z2 } from "zod/v4";
266
300
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
267
301
  var openaiErrorDataSchema = z2.object({
268
302
  error: z2.object({
@@ -280,27 +314,103 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
280
314
  errorToMessage: (data) => data.error.message
281
315
  });
282
316
 
283
- // src/get-response-metadata.ts
284
- function getResponseMetadata({
285
- id,
286
- model,
287
- created
288
- }) {
289
- return {
290
- id: id != null ? id : void 0,
291
- modelId: model != null ? model : void 0,
292
- timestamp: created != null ? new Date(created * 1e3) : void 0
293
- };
294
- }
295
-
296
317
  // src/openai-prepare-tools.ts
297
318
  import {
298
319
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
299
320
  } from "@ai-sdk/provider";
321
+
322
+ // src/tool/file-search.ts
323
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
324
+ import { z as z3 } from "zod/v4";
325
+ var comparisonFilterSchema = z3.object({
326
+ key: z3.string(),
327
+ type: z3.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
328
+ value: z3.union([z3.string(), z3.number(), z3.boolean()])
329
+ });
330
+ var compoundFilterSchema = z3.object({
331
+ type: z3.enum(["and", "or"]),
332
+ filters: z3.array(
333
+ z3.union([comparisonFilterSchema, z3.lazy(() => compoundFilterSchema)])
334
+ )
335
+ });
336
+ var filtersSchema = z3.union([comparisonFilterSchema, compoundFilterSchema]);
337
+ var fileSearchArgsSchema = z3.object({
338
+ /**
339
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
340
+ */
341
+ vectorStoreIds: z3.array(z3.string()).optional(),
342
+ /**
343
+ * Maximum number of search results to return. Defaults to 10.
344
+ */
345
+ maxNumResults: z3.number().optional(),
346
+ /**
347
+ * Ranking options for the search.
348
+ */
349
+ ranking: z3.object({
350
+ ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
351
+ }).optional(),
352
+ /**
353
+ * A filter to apply based on file attributes.
354
+ */
355
+ filters: filtersSchema.optional()
356
+ });
357
+ var fileSearch = createProviderDefinedToolFactory({
358
+ id: "openai.file_search",
359
+ name: "file_search",
360
+ inputSchema: z3.object({
361
+ query: z3.string()
362
+ })
363
+ });
364
+
365
+ // src/tool/web-search-preview.ts
366
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
367
+ import { z as z4 } from "zod/v4";
368
+ var webSearchPreviewArgsSchema = z4.object({
369
+ /**
370
+ * Search context size to use for the web search.
371
+ * - high: Most comprehensive context, highest cost, slower response
372
+ * - medium: Balanced context, cost, and latency (default)
373
+ * - low: Least context, lowest cost, fastest response
374
+ */
375
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
376
+ /**
377
+ * User location information to provide geographically relevant search results.
378
+ */
379
+ userLocation: z4.object({
380
+ /**
381
+ * Type of location (always 'approximate')
382
+ */
383
+ type: z4.literal("approximate"),
384
+ /**
385
+ * Two-letter ISO country code (e.g., 'US', 'GB')
386
+ */
387
+ country: z4.string().optional(),
388
+ /**
389
+ * City name (free text, e.g., 'Minneapolis')
390
+ */
391
+ city: z4.string().optional(),
392
+ /**
393
+ * Region name (free text, e.g., 'Minnesota')
394
+ */
395
+ region: z4.string().optional(),
396
+ /**
397
+ * IANA timezone (e.g., 'America/Chicago')
398
+ */
399
+ timezone: z4.string().optional()
400
+ }).optional()
401
+ });
402
+ var webSearchPreview = createProviderDefinedToolFactory2({
403
+ id: "openai.web_search_preview",
404
+ name: "web_search_preview",
405
+ inputSchema: z4.object({})
406
+ });
407
+
408
+ // src/openai-prepare-tools.ts
300
409
  function prepareTools({
301
410
  tools,
302
411
  toolChoice,
303
- structuredOutputs
412
+ structuredOutputs,
413
+ strictJsonSchema
304
414
  }) {
305
415
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
306
416
  const toolWarnings = [];
@@ -309,18 +419,48 @@ function prepareTools({
309
419
  }
310
420
  const openaiTools = [];
311
421
  for (const tool of tools) {
312
- if (tool.type === "provider-defined") {
313
- toolWarnings.push({ type: "unsupported-tool", tool });
314
- } else {
315
- openaiTools.push({
316
- type: "function",
317
- function: {
318
- name: tool.name,
319
- description: tool.description,
320
- parameters: tool.parameters,
321
- strict: structuredOutputs ? true : void 0
422
+ switch (tool.type) {
423
+ case "function":
424
+ openaiTools.push({
425
+ type: "function",
426
+ function: {
427
+ name: tool.name,
428
+ description: tool.description,
429
+ parameters: tool.inputSchema,
430
+ strict: structuredOutputs ? strictJsonSchema : void 0
431
+ }
432
+ });
433
+ break;
434
+ case "provider-defined":
435
+ switch (tool.id) {
436
+ case "openai.file_search": {
437
+ const args = fileSearchArgsSchema.parse(tool.args);
438
+ openaiTools.push({
439
+ type: "file_search",
440
+ vector_store_ids: args.vectorStoreIds,
441
+ max_num_results: args.maxNumResults,
442
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
443
+ filters: args.filters
444
+ });
445
+ break;
446
+ }
447
+ case "openai.web_search_preview": {
448
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
449
+ openaiTools.push({
450
+ type: "web_search_preview",
451
+ search_context_size: args.searchContextSize,
452
+ user_location: args.userLocation
453
+ });
454
+ break;
455
+ }
456
+ default:
457
+ toolWarnings.push({ type: "unsupported-tool", tool });
458
+ break;
322
459
  }
323
- });
460
+ break;
461
+ default:
462
+ toolWarnings.push({ type: "unsupported-tool", tool });
463
+ break;
324
464
  }
325
465
  }
326
466
  if (toolChoice == null) {
@@ -354,29 +494,18 @@ function prepareTools({
354
494
 
355
495
  // src/openai-chat-language-model.ts
356
496
  var OpenAIChatLanguageModel = class {
357
- constructor(modelId, settings, config) {
497
+ constructor(modelId, config) {
358
498
  this.specificationVersion = "v2";
499
+ this.supportedUrls = {
500
+ "image/*": [/^https?:\/\/.*$/]
501
+ };
359
502
  this.modelId = modelId;
360
- this.settings = settings;
361
503
  this.config = config;
362
504
  }
363
- get supportsStructuredOutputs() {
364
- var _a;
365
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
366
- }
367
- get defaultObjectGenerationMode() {
368
- if (isAudioModel(this.modelId)) {
369
- return "tool";
370
- }
371
- return this.supportsStructuredOutputs ? "json" : "tool";
372
- }
373
505
  get provider() {
374
506
  return this.config.provider;
375
507
  }
376
- get supportsImageUrls() {
377
- return !this.settings.downloadImages;
378
- }
379
- getArgs({
508
+ async getArgs({
380
509
  prompt,
381
510
  maxOutputTokens,
382
511
  temperature,
@@ -391,20 +520,21 @@ var OpenAIChatLanguageModel = class {
391
520
  toolChoice,
392
521
  providerOptions
393
522
  }) {
394
- var _a, _b;
523
+ var _a, _b, _c, _d;
395
524
  const warnings = [];
396
- const openaiOptions = (_a = parseProviderOptions({
525
+ const openaiOptions = (_a = await parseProviderOptions({
397
526
  provider: "openai",
398
527
  providerOptions,
399
528
  schema: openaiProviderOptions
400
529
  })) != null ? _a : {};
530
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
401
531
  if (topK != null) {
402
532
  warnings.push({
403
533
  type: "unsupported-setting",
404
534
  setting: "topK"
405
535
  });
406
536
  }
407
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
537
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
408
538
  warnings.push({
409
539
  type: "unsupported-setting",
410
540
  setting: "responseFormat",
@@ -418,6 +548,7 @@ var OpenAIChatLanguageModel = class {
418
548
  }
419
549
  );
420
550
  warnings.push(...messageWarnings);
551
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
421
552
  const baseArgs = {
422
553
  // model id:
423
554
  model: this.modelId,
@@ -433,13 +564,12 @@ var OpenAIChatLanguageModel = class {
433
564
  top_p: topP,
434
565
  frequency_penalty: frequencyPenalty,
435
566
  presence_penalty: presencePenalty,
436
- // TODO improve below:
437
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
567
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
438
568
  type: "json_schema",
439
569
  json_schema: {
440
570
  schema: responseFormat.schema,
441
- strict: true,
442
- name: (_b = responseFormat.name) != null ? _b : "response",
571
+ strict: strictJsonSchema,
572
+ name: (_d = responseFormat.name) != null ? _d : "response",
443
573
  description: responseFormat.description
444
574
  }
445
575
  } : { type: "json_object" } : void 0,
@@ -452,6 +582,7 @@ var OpenAIChatLanguageModel = class {
452
582
  metadata: openaiOptions.metadata,
453
583
  prediction: openaiOptions.prediction,
454
584
  reasoning_effort: openaiOptions.reasoningEffort,
585
+ service_tier: openaiOptions.serviceTier,
455
586
  // messages:
456
587
  messages
457
588
  };
@@ -525,6 +656,22 @@ var OpenAIChatLanguageModel = class {
525
656
  });
526
657
  }
527
658
  }
659
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
660
+ warnings.push({
661
+ type: "unsupported-setting",
662
+ setting: "serviceTier",
663
+ details: "flex processing is only available for o3 and o4-mini models"
664
+ });
665
+ baseArgs.service_tier = void 0;
666
+ }
667
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
668
+ warnings.push({
669
+ type: "unsupported-setting",
670
+ setting: "serviceTier",
671
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
672
+ });
673
+ baseArgs.service_tier = void 0;
674
+ }
528
675
  const {
529
676
  tools: openaiTools,
530
677
  toolChoice: openaiToolChoice,
@@ -532,7 +679,8 @@ var OpenAIChatLanguageModel = class {
532
679
  } = prepareTools({
533
680
  tools,
534
681
  toolChoice,
535
- structuredOutputs: this.supportsStructuredOutputs
682
+ structuredOutputs,
683
+ strictJsonSchema
536
684
  });
537
685
  return {
538
686
  args: {
@@ -544,8 +692,8 @@ var OpenAIChatLanguageModel = class {
544
692
  };
545
693
  }
546
694
  async doGenerate(options) {
547
- var _a, _b, _c, _d, _e, _f, _g, _h;
548
- const { args: body, warnings } = this.getArgs(options);
695
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
696
+ const { args: body, warnings } = await this.getArgs(options);
549
697
  const {
550
698
  responseHeaders,
551
699
  value: response,
@@ -573,33 +721,32 @@ var OpenAIChatLanguageModel = class {
573
721
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
574
722
  content.push({
575
723
  type: "tool-call",
576
- toolCallType: "function",
577
724
  toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
578
725
  toolName: toolCall.function.name,
579
- args: toolCall.function.arguments
726
+ input: toolCall.function.arguments
580
727
  });
581
728
  }
582
729
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
583
730
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
584
731
  const providerMetadata = { openai: {} };
585
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
586
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
587
- }
588
732
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
589
733
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
590
734
  }
591
735
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
592
736
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
593
737
  }
594
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
595
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
738
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
739
+ providerMetadata.openai.logprobs = choice.logprobs.content;
596
740
  }
597
741
  return {
598
742
  content,
599
743
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
600
744
  usage: {
601
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
602
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
745
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
746
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
747
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
748
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
749
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
603
750
  },
604
751
  request: { body },
605
752
  response: {
@@ -608,17 +755,17 @@ var OpenAIChatLanguageModel = class {
608
755
  body: rawResponse
609
756
  },
610
757
  warnings,
611
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
612
758
  providerMetadata
613
759
  };
614
760
  }
615
761
  async doStream(options) {
616
- const { args, warnings } = this.getArgs(options);
762
+ const { args, warnings } = await this.getArgs(options);
617
763
  const body = {
618
764
  ...args,
619
765
  stream: true,
620
- // only include stream_options when in strict compatibility mode:
621
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
766
+ stream_options: {
767
+ include_usage: true
768
+ }
622
769
  };
623
770
  const { responseHeaders, value: response } = await postJsonToApi({
624
771
  url: this.config.url({
@@ -634,15 +781,15 @@ var OpenAIChatLanguageModel = class {
634
781
  abortSignal: options.abortSignal,
635
782
  fetch: this.config.fetch
636
783
  });
637
- const { messages: rawPrompt, ...rawSettings } = args;
638
784
  const toolCalls = [];
639
785
  let finishReason = "unknown";
640
786
  const usage = {
641
787
  inputTokens: void 0,
642
- outputTokens: void 0
788
+ outputTokens: void 0,
789
+ totalTokens: void 0
643
790
  };
644
- let logprobs;
645
791
  let isFirstChunk = true;
792
+ let isActiveText = false;
646
793
  const providerMetadata = { openai: {} };
647
794
  return {
648
795
  stream: response.pipeThrough(
@@ -651,7 +798,10 @@ var OpenAIChatLanguageModel = class {
651
798
  controller.enqueue({ type: "stream-start", warnings });
652
799
  },
653
800
  transform(chunk, controller) {
654
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
801
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
802
+ if (options.includeRawChunks) {
803
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
804
+ }
655
805
  if (!chunk.success) {
656
806
  finishReason = "error";
657
807
  controller.enqueue({ type: "error", error: chunk.error });
@@ -671,48 +821,40 @@ var OpenAIChatLanguageModel = class {
671
821
  });
672
822
  }
673
823
  if (value.usage != null) {
674
- const {
675
- prompt_tokens,
676
- completion_tokens,
677
- prompt_tokens_details,
678
- completion_tokens_details
679
- } = value.usage;
680
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
681
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
682
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
683
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
824
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
825
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
826
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
827
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
828
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
829
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
830
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
684
831
  }
685
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
686
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
687
- }
688
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
689
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
690
- }
691
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
692
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
832
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
833
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
693
834
  }
694
835
  }
695
836
  const choice = value.choices[0];
696
837
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
697
838
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
698
839
  }
840
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
841
+ providerMetadata.openai.logprobs = choice.logprobs.content;
842
+ }
699
843
  if ((choice == null ? void 0 : choice.delta) == null) {
700
844
  return;
701
845
  }
702
846
  const delta = choice.delta;
703
847
  if (delta.content != null) {
848
+ if (!isActiveText) {
849
+ controller.enqueue({ type: "text-start", id: "0" });
850
+ isActiveText = true;
851
+ }
704
852
  controller.enqueue({
705
- type: "text",
706
- text: delta.content
853
+ type: "text-delta",
854
+ id: "0",
855
+ delta: delta.content
707
856
  });
708
857
  }
709
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
710
- choice == null ? void 0 : choice.logprobs
711
- );
712
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
713
- if (logprobs === void 0) logprobs = [];
714
- logprobs.push(...mappedLogprobs);
715
- }
716
858
  if (delta.tool_calls != null) {
717
859
  for (const toolCallDelta of delta.tool_calls) {
718
860
  const index = toolCallDelta.index;
@@ -729,39 +871,45 @@ var OpenAIChatLanguageModel = class {
729
871
  message: `Expected 'id' to be a string.`
730
872
  });
731
873
  }
732
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
874
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
733
875
  throw new InvalidResponseDataError({
734
876
  data: toolCallDelta,
735
877
  message: `Expected 'function.name' to be a string.`
736
878
  });
737
879
  }
880
+ controller.enqueue({
881
+ type: "tool-input-start",
882
+ id: toolCallDelta.id,
883
+ toolName: toolCallDelta.function.name
884
+ });
738
885
  toolCalls[index] = {
739
886
  id: toolCallDelta.id,
740
887
  type: "function",
741
888
  function: {
742
889
  name: toolCallDelta.function.name,
743
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
890
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
744
891
  },
745
892
  hasFinished: false
746
893
  };
747
894
  const toolCall2 = toolCalls[index];
748
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
895
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
749
896
  if (toolCall2.function.arguments.length > 0) {
750
897
  controller.enqueue({
751
- type: "tool-call-delta",
752
- toolCallType: "function",
753
- toolCallId: toolCall2.id,
754
- toolName: toolCall2.function.name,
755
- argsTextDelta: toolCall2.function.arguments
898
+ type: "tool-input-delta",
899
+ id: toolCall2.id,
900
+ delta: toolCall2.function.arguments
756
901
  });
757
902
  }
758
903
  if (isParsableJson(toolCall2.function.arguments)) {
904
+ controller.enqueue({
905
+ type: "tool-input-end",
906
+ id: toolCall2.id
907
+ });
759
908
  controller.enqueue({
760
909
  type: "tool-call",
761
- toolCallType: "function",
762
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
910
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
763
911
  toolName: toolCall2.function.name,
764
- args: toolCall2.function.arguments
912
+ input: toolCall2.function.arguments
765
913
  });
766
914
  toolCall2.hasFinished = true;
767
915
  }
@@ -772,23 +920,24 @@ var OpenAIChatLanguageModel = class {
772
920
  if (toolCall.hasFinished) {
773
921
  continue;
774
922
  }
775
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
776
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
923
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
924
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
777
925
  }
778
926
  controller.enqueue({
779
- type: "tool-call-delta",
780
- toolCallType: "function",
781
- toolCallId: toolCall.id,
782
- toolName: toolCall.function.name,
783
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
927
+ type: "tool-input-delta",
928
+ id: toolCall.id,
929
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
784
930
  });
785
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
931
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
932
+ controller.enqueue({
933
+ type: "tool-input-end",
934
+ id: toolCall.id
935
+ });
786
936
  controller.enqueue({
787
937
  type: "tool-call",
788
- toolCallType: "function",
789
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
938
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
790
939
  toolName: toolCall.function.name,
791
- args: toolCall.function.arguments
940
+ input: toolCall.function.arguments
792
941
  });
793
942
  toolCall.hasFinished = true;
794
943
  }
@@ -796,10 +945,12 @@ var OpenAIChatLanguageModel = class {
796
945
  }
797
946
  },
798
947
  flush(controller) {
948
+ if (isActiveText) {
949
+ controller.enqueue({ type: "text-end", id: "0" });
950
+ }
799
951
  controller.enqueue({
800
952
  type: "finish",
801
953
  finishReason,
802
- logprobs,
803
954
  usage,
804
955
  ...providerMetadata != null ? { providerMetadata } : {}
805
956
  });
@@ -811,96 +962,97 @@ var OpenAIChatLanguageModel = class {
811
962
  };
812
963
  }
813
964
  };
814
- var openaiTokenUsageSchema = z3.object({
815
- prompt_tokens: z3.number().nullish(),
816
- completion_tokens: z3.number().nullish(),
817
- prompt_tokens_details: z3.object({
818
- cached_tokens: z3.number().nullish()
965
+ var openaiTokenUsageSchema = z5.object({
966
+ prompt_tokens: z5.number().nullish(),
967
+ completion_tokens: z5.number().nullish(),
968
+ total_tokens: z5.number().nullish(),
969
+ prompt_tokens_details: z5.object({
970
+ cached_tokens: z5.number().nullish()
819
971
  }).nullish(),
820
- completion_tokens_details: z3.object({
821
- reasoning_tokens: z3.number().nullish(),
822
- accepted_prediction_tokens: z3.number().nullish(),
823
- rejected_prediction_tokens: z3.number().nullish()
972
+ completion_tokens_details: z5.object({
973
+ reasoning_tokens: z5.number().nullish(),
974
+ accepted_prediction_tokens: z5.number().nullish(),
975
+ rejected_prediction_tokens: z5.number().nullish()
824
976
  }).nullish()
825
977
  }).nullish();
826
- var openaiChatResponseSchema = z3.object({
827
- id: z3.string().nullish(),
828
- created: z3.number().nullish(),
829
- model: z3.string().nullish(),
830
- choices: z3.array(
831
- z3.object({
832
- message: z3.object({
833
- role: z3.literal("assistant").nullish(),
834
- content: z3.string().nullish(),
835
- tool_calls: z3.array(
836
- z3.object({
837
- id: z3.string().nullish(),
838
- type: z3.literal("function"),
839
- function: z3.object({
840
- name: z3.string(),
841
- arguments: z3.string()
978
+ var openaiChatResponseSchema = z5.object({
979
+ id: z5.string().nullish(),
980
+ created: z5.number().nullish(),
981
+ model: z5.string().nullish(),
982
+ choices: z5.array(
983
+ z5.object({
984
+ message: z5.object({
985
+ role: z5.literal("assistant").nullish(),
986
+ content: z5.string().nullish(),
987
+ tool_calls: z5.array(
988
+ z5.object({
989
+ id: z5.string().nullish(),
990
+ type: z5.literal("function"),
991
+ function: z5.object({
992
+ name: z5.string(),
993
+ arguments: z5.string()
842
994
  })
843
995
  })
844
996
  ).nullish()
845
997
  }),
846
- index: z3.number(),
847
- logprobs: z3.object({
848
- content: z3.array(
849
- z3.object({
850
- token: z3.string(),
851
- logprob: z3.number(),
852
- top_logprobs: z3.array(
853
- z3.object({
854
- token: z3.string(),
855
- logprob: z3.number()
998
+ index: z5.number(),
999
+ logprobs: z5.object({
1000
+ content: z5.array(
1001
+ z5.object({
1002
+ token: z5.string(),
1003
+ logprob: z5.number(),
1004
+ top_logprobs: z5.array(
1005
+ z5.object({
1006
+ token: z5.string(),
1007
+ logprob: z5.number()
856
1008
  })
857
1009
  )
858
1010
  })
859
- ).nullable()
1011
+ ).nullish()
860
1012
  }).nullish(),
861
- finish_reason: z3.string().nullish()
1013
+ finish_reason: z5.string().nullish()
862
1014
  })
863
1015
  ),
864
1016
  usage: openaiTokenUsageSchema
865
1017
  });
866
- var openaiChatChunkSchema = z3.union([
867
- z3.object({
868
- id: z3.string().nullish(),
869
- created: z3.number().nullish(),
870
- model: z3.string().nullish(),
871
- choices: z3.array(
872
- z3.object({
873
- delta: z3.object({
874
- role: z3.enum(["assistant"]).nullish(),
875
- content: z3.string().nullish(),
876
- tool_calls: z3.array(
877
- z3.object({
878
- index: z3.number(),
879
- id: z3.string().nullish(),
880
- type: z3.literal("function").optional(),
881
- function: z3.object({
882
- name: z3.string().nullish(),
883
- arguments: z3.string().nullish()
1018
+ var openaiChatChunkSchema = z5.union([
1019
+ z5.object({
1020
+ id: z5.string().nullish(),
1021
+ created: z5.number().nullish(),
1022
+ model: z5.string().nullish(),
1023
+ choices: z5.array(
1024
+ z5.object({
1025
+ delta: z5.object({
1026
+ role: z5.enum(["assistant"]).nullish(),
1027
+ content: z5.string().nullish(),
1028
+ tool_calls: z5.array(
1029
+ z5.object({
1030
+ index: z5.number(),
1031
+ id: z5.string().nullish(),
1032
+ type: z5.literal("function").nullish(),
1033
+ function: z5.object({
1034
+ name: z5.string().nullish(),
1035
+ arguments: z5.string().nullish()
884
1036
  })
885
1037
  })
886
1038
  ).nullish()
887
1039
  }).nullish(),
888
- logprobs: z3.object({
889
- content: z3.array(
890
- z3.object({
891
- token: z3.string(),
892
- logprob: z3.number(),
893
- top_logprobs: z3.array(
894
- z3.object({
895
- token: z3.string(),
896
- logprob: z3.number()
1040
+ logprobs: z5.object({
1041
+ content: z5.array(
1042
+ z5.object({
1043
+ token: z5.string(),
1044
+ logprob: z5.number(),
1045
+ top_logprobs: z5.array(
1046
+ z5.object({
1047
+ token: z5.string(),
1048
+ logprob: z5.number()
897
1049
  })
898
1050
  )
899
1051
  })
900
- ).nullable()
1052
+ ).nullish()
901
1053
  }).nullish(),
902
- finish_reason: z3.string().nullable().optional(),
903
- index: z3.number()
1054
+ finish_reason: z5.string().nullish(),
1055
+ index: z5.number()
904
1056
  })
905
1057
  ),
906
1058
  usage: openaiTokenUsageSchema
@@ -910,8 +1062,11 @@ var openaiChatChunkSchema = z3.union([
910
1062
  function isReasoningModel(modelId) {
911
1063
  return modelId.startsWith("o");
912
1064
  }
913
- function isAudioModel(modelId) {
914
- return modelId.startsWith("gpt-4o-audio-preview");
1065
+ function supportsFlexProcessing(modelId) {
1066
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1067
+ }
1068
+ function supportsPriorityProcessing(modelId) {
1069
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
915
1070
  }
916
1071
  function getSystemMessageMode(modelId) {
917
1072
  var _a, _b;
@@ -933,11 +1088,23 @@ var reasoningModels = {
933
1088
  "o1-preview-2024-09-12": {
934
1089
  systemMessageMode: "remove"
935
1090
  },
1091
+ o3: {
1092
+ systemMessageMode: "developer"
1093
+ },
1094
+ "o3-2025-04-16": {
1095
+ systemMessageMode: "developer"
1096
+ },
936
1097
  "o3-mini": {
937
1098
  systemMessageMode: "developer"
938
1099
  },
939
1100
  "o3-mini-2025-01-31": {
940
1101
  systemMessageMode: "developer"
1102
+ },
1103
+ "o4-mini": {
1104
+ systemMessageMode: "developer"
1105
+ },
1106
+ "o4-mini-2025-04-16": {
1107
+ systemMessageMode: "developer"
941
1108
  }
942
1109
  };
943
1110
 
@@ -946,9 +1113,10 @@ import {
946
1113
  combineHeaders as combineHeaders2,
947
1114
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
948
1115
  createJsonResponseHandler as createJsonResponseHandler2,
1116
+ parseProviderOptions as parseProviderOptions2,
949
1117
  postJsonToApi as postJsonToApi2
950
1118
  } from "@ai-sdk/provider-utils";
951
- import { z as z4 } from "zod";
1119
+ import { z as z7 } from "zod/v4";
952
1120
 
953
1121
  // src/convert-to-openai-completion-prompt.ts
954
1122
  import {
@@ -957,13 +1125,9 @@ import {
957
1125
  } from "@ai-sdk/provider";
958
1126
  function convertToOpenAICompletionPrompt({
959
1127
  prompt,
960
- inputFormat,
961
1128
  user = "user",
962
1129
  assistant = "assistant"
963
1130
  }) {
964
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
965
- return { prompt: prompt[0].content[0].text };
966
- }
967
1131
  let text = "";
968
1132
  if (prompt[0].role === "system") {
969
1133
  text += `${prompt[0].content}
@@ -1032,34 +1196,66 @@ ${user}:`]
1032
1196
  };
1033
1197
  }
1034
1198
 
1035
- // src/map-openai-completion-logprobs.ts
1036
- function mapOpenAICompletionLogProbs(logprobs) {
1037
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1038
- token,
1039
- logprob: logprobs.token_logprobs[index],
1040
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1041
- ([token2, logprob]) => ({
1042
- token: token2,
1043
- logprob
1044
- })
1045
- ) : []
1046
- }));
1047
- }
1199
+ // src/openai-completion-options.ts
1200
+ import { z as z6 } from "zod/v4";
1201
+ var openaiCompletionProviderOptions = z6.object({
1202
+ /**
1203
+ Echo back the prompt in addition to the completion.
1204
+ */
1205
+ echo: z6.boolean().optional(),
1206
+ /**
1207
+ Modify the likelihood of specified tokens appearing in the completion.
1208
+
1209
+ Accepts a JSON object that maps tokens (specified by their token ID in
1210
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1211
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1212
+ the bias is added to the logits generated by the model prior to sampling.
1213
+ The exact effect will vary per model, but values between -1 and 1 should
1214
+ decrease or increase likelihood of selection; values like -100 or 100
1215
+ should result in a ban or exclusive selection of the relevant token.
1216
+
1217
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1218
+ token from being generated.
1219
+ */
1220
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1221
+ /**
1222
+ The suffix that comes after a completion of inserted text.
1223
+ */
1224
+ suffix: z6.string().optional(),
1225
+ /**
1226
+ A unique identifier representing your end-user, which can help OpenAI to
1227
+ monitor and detect abuse. Learn more.
1228
+ */
1229
+ user: z6.string().optional(),
1230
+ /**
1231
+ Return the log probabilities of the tokens. Including logprobs will increase
1232
+ the response size and can slow down response times. However, it can
1233
+ be useful to better understand how the model is behaving.
1234
+ Setting to true will return the log probabilities of the tokens that
1235
+ were generated.
1236
+ Setting to a number will return the log probabilities of the top n
1237
+ tokens that were generated.
1238
+ */
1239
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1240
+ });
1048
1241
 
1049
1242
  // src/openai-completion-language-model.ts
1050
1243
  var OpenAICompletionLanguageModel = class {
1051
- constructor(modelId, settings, config) {
1244
+ constructor(modelId, config) {
1052
1245
  this.specificationVersion = "v2";
1053
- this.defaultObjectGenerationMode = void 0;
1246
+ this.supportedUrls = {
1247
+ // No URLs are supported for completion models.
1248
+ };
1054
1249
  this.modelId = modelId;
1055
- this.settings = settings;
1056
1250
  this.config = config;
1057
1251
  }
1252
+ get providerOptionsName() {
1253
+ return this.config.provider.split(".")[0].trim();
1254
+ }
1058
1255
  get provider() {
1059
1256
  return this.config.provider;
1060
1257
  }
1061
- getArgs({
1062
- inputFormat,
1258
+ async getArgs({
1063
1259
  prompt,
1064
1260
  maxOutputTokens,
1065
1261
  temperature,
@@ -1071,9 +1267,22 @@ var OpenAICompletionLanguageModel = class {
1071
1267
  responseFormat,
1072
1268
  tools,
1073
1269
  toolChoice,
1074
- seed
1270
+ seed,
1271
+ providerOptions
1075
1272
  }) {
1076
1273
  const warnings = [];
1274
+ const openaiOptions = {
1275
+ ...await parseProviderOptions2({
1276
+ provider: "openai",
1277
+ providerOptions,
1278
+ schema: openaiCompletionProviderOptions
1279
+ }),
1280
+ ...await parseProviderOptions2({
1281
+ provider: this.providerOptionsName,
1282
+ providerOptions,
1283
+ schema: openaiCompletionProviderOptions
1284
+ })
1285
+ };
1077
1286
  if (topK != null) {
1078
1287
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1079
1288
  }
@@ -1090,18 +1299,18 @@ var OpenAICompletionLanguageModel = class {
1090
1299
  details: "JSON response format is not supported."
1091
1300
  });
1092
1301
  }
1093
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1302
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1094
1303
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1095
1304
  return {
1096
1305
  args: {
1097
1306
  // model id:
1098
1307
  model: this.modelId,
1099
1308
  // model specific settings:
1100
- echo: this.settings.echo,
1101
- logit_bias: this.settings.logitBias,
1102
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1103
- suffix: this.settings.suffix,
1104
- user: this.settings.user,
1309
+ echo: openaiOptions.echo,
1310
+ logit_bias: openaiOptions.logitBias,
1311
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1312
+ suffix: openaiOptions.suffix,
1313
+ user: openaiOptions.user,
1105
1314
  // standardized settings:
1106
1315
  max_tokens: maxOutputTokens,
1107
1316
  temperature,
@@ -1118,7 +1327,8 @@ var OpenAICompletionLanguageModel = class {
1118
1327
  };
1119
1328
  }
1120
1329
  async doGenerate(options) {
1121
- const { args, warnings } = this.getArgs(options);
1330
+ var _a, _b, _c;
1331
+ const { args, warnings } = await this.getArgs(options);
1122
1332
  const {
1123
1333
  responseHeaders,
1124
1334
  value: response,
@@ -1138,30 +1348,36 @@ var OpenAICompletionLanguageModel = class {
1138
1348
  fetch: this.config.fetch
1139
1349
  });
1140
1350
  const choice = response.choices[0];
1351
+ const providerMetadata = { openai: {} };
1352
+ if (choice.logprobs != null) {
1353
+ providerMetadata.openai.logprobs = choice.logprobs;
1354
+ }
1141
1355
  return {
1142
1356
  content: [{ type: "text", text: choice.text }],
1143
1357
  usage: {
1144
- inputTokens: response.usage.prompt_tokens,
1145
- outputTokens: response.usage.completion_tokens
1358
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1359
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1360
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1146
1361
  },
1147
1362
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1148
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1149
1363
  request: { body: args },
1150
1364
  response: {
1151
1365
  ...getResponseMetadata(response),
1152
1366
  headers: responseHeaders,
1153
1367
  body: rawResponse
1154
1368
  },
1369
+ providerMetadata,
1155
1370
  warnings
1156
1371
  };
1157
1372
  }
1158
1373
  async doStream(options) {
1159
- const { args, warnings } = this.getArgs(options);
1374
+ const { args, warnings } = await this.getArgs(options);
1160
1375
  const body = {
1161
1376
  ...args,
1162
1377
  stream: true,
1163
- // only include stream_options when in strict compatibility mode:
1164
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1378
+ stream_options: {
1379
+ include_usage: true
1380
+ }
1165
1381
  };
1166
1382
  const { responseHeaders, value: response } = await postJsonToApi2({
1167
1383
  url: this.config.url({
@@ -1178,11 +1394,12 @@ var OpenAICompletionLanguageModel = class {
1178
1394
  fetch: this.config.fetch
1179
1395
  });
1180
1396
  let finishReason = "unknown";
1397
+ const providerMetadata = { openai: {} };
1181
1398
  const usage = {
1182
1399
  inputTokens: void 0,
1183
- outputTokens: void 0
1400
+ outputTokens: void 0,
1401
+ totalTokens: void 0
1184
1402
  };
1185
- let logprobs;
1186
1403
  let isFirstChunk = true;
1187
1404
  return {
1188
1405
  stream: response.pipeThrough(
@@ -1191,6 +1408,9 @@ var OpenAICompletionLanguageModel = class {
1191
1408
  controller.enqueue({ type: "stream-start", warnings });
1192
1409
  },
1193
1410
  transform(chunk, controller) {
1411
+ if (options.includeRawChunks) {
1412
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1413
+ }
1194
1414
  if (!chunk.success) {
1195
1415
  finishReason = "error";
1196
1416
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1208,34 +1428,36 @@ var OpenAICompletionLanguageModel = class {
1208
1428
  type: "response-metadata",
1209
1429
  ...getResponseMetadata(value)
1210
1430
  });
1431
+ controller.enqueue({ type: "text-start", id: "0" });
1211
1432
  }
1212
1433
  if (value.usage != null) {
1213
1434
  usage.inputTokens = value.usage.prompt_tokens;
1214
1435
  usage.outputTokens = value.usage.completion_tokens;
1436
+ usage.totalTokens = value.usage.total_tokens;
1215
1437
  }
1216
1438
  const choice = value.choices[0];
1217
1439
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1218
1440
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1219
1441
  }
1220
- if ((choice == null ? void 0 : choice.text) != null) {
1442
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1443
+ providerMetadata.openai.logprobs = choice.logprobs;
1444
+ }
1445
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1221
1446
  controller.enqueue({
1222
- type: "text",
1223
- text: choice.text
1447
+ type: "text-delta",
1448
+ id: "0",
1449
+ delta: choice.text
1224
1450
  });
1225
1451
  }
1226
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1227
- choice == null ? void 0 : choice.logprobs
1228
- );
1229
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1230
- if (logprobs === void 0) logprobs = [];
1231
- logprobs.push(...mappedLogprobs);
1232
- }
1233
1452
  },
1234
1453
  flush(controller) {
1454
+ if (!isFirstChunk) {
1455
+ controller.enqueue({ type: "text-end", id: "0" });
1456
+ }
1235
1457
  controller.enqueue({
1236
1458
  type: "finish",
1237
1459
  finishReason,
1238
- logprobs,
1460
+ providerMetadata,
1239
1461
  usage
1240
1462
  });
1241
1463
  }
@@ -1246,47 +1468,46 @@ var OpenAICompletionLanguageModel = class {
1246
1468
  };
1247
1469
  }
1248
1470
  };
1249
- var openaiCompletionResponseSchema = z4.object({
1250
- id: z4.string().nullish(),
1251
- created: z4.number().nullish(),
1252
- model: z4.string().nullish(),
1253
- choices: z4.array(
1254
- z4.object({
1255
- text: z4.string(),
1256
- finish_reason: z4.string(),
1257
- logprobs: z4.object({
1258
- tokens: z4.array(z4.string()),
1259
- token_logprobs: z4.array(z4.number()),
1260
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1471
+ var usageSchema = z7.object({
1472
+ prompt_tokens: z7.number(),
1473
+ completion_tokens: z7.number(),
1474
+ total_tokens: z7.number()
1475
+ });
1476
+ var openaiCompletionResponseSchema = z7.object({
1477
+ id: z7.string().nullish(),
1478
+ created: z7.number().nullish(),
1479
+ model: z7.string().nullish(),
1480
+ choices: z7.array(
1481
+ z7.object({
1482
+ text: z7.string(),
1483
+ finish_reason: z7.string(),
1484
+ logprobs: z7.object({
1485
+ tokens: z7.array(z7.string()),
1486
+ token_logprobs: z7.array(z7.number()),
1487
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1261
1488
  }).nullish()
1262
1489
  })
1263
1490
  ),
1264
- usage: z4.object({
1265
- prompt_tokens: z4.number(),
1266
- completion_tokens: z4.number()
1267
- })
1491
+ usage: usageSchema.nullish()
1268
1492
  });
1269
- var openaiCompletionChunkSchema = z4.union([
1270
- z4.object({
1271
- id: z4.string().nullish(),
1272
- created: z4.number().nullish(),
1273
- model: z4.string().nullish(),
1274
- choices: z4.array(
1275
- z4.object({
1276
- text: z4.string(),
1277
- finish_reason: z4.string().nullish(),
1278
- index: z4.number(),
1279
- logprobs: z4.object({
1280
- tokens: z4.array(z4.string()),
1281
- token_logprobs: z4.array(z4.number()),
1282
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1493
+ var openaiCompletionChunkSchema = z7.union([
1494
+ z7.object({
1495
+ id: z7.string().nullish(),
1496
+ created: z7.number().nullish(),
1497
+ model: z7.string().nullish(),
1498
+ choices: z7.array(
1499
+ z7.object({
1500
+ text: z7.string(),
1501
+ finish_reason: z7.string().nullish(),
1502
+ index: z7.number(),
1503
+ logprobs: z7.object({
1504
+ tokens: z7.array(z7.string()),
1505
+ token_logprobs: z7.array(z7.number()),
1506
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1283
1507
  }).nullish()
1284
1508
  })
1285
1509
  ),
1286
- usage: z4.object({
1287
- prompt_tokens: z4.number(),
1288
- completion_tokens: z4.number()
1289
- }).nullish()
1510
+ usage: usageSchema.nullish()
1290
1511
  }),
1291
1512
  openaiErrorDataSchema
1292
1513
  ]);
@@ -1298,32 +1519,45 @@ import {
1298
1519
  import {
1299
1520
  combineHeaders as combineHeaders3,
1300
1521
  createJsonResponseHandler as createJsonResponseHandler3,
1522
+ parseProviderOptions as parseProviderOptions3,
1301
1523
  postJsonToApi as postJsonToApi3
1302
1524
  } from "@ai-sdk/provider-utils";
1303
- import { z as z5 } from "zod";
1525
+ import { z as z9 } from "zod/v4";
1526
+
1527
+ // src/openai-embedding-options.ts
1528
+ import { z as z8 } from "zod/v4";
1529
+ var openaiEmbeddingProviderOptions = z8.object({
1530
+ /**
1531
+ The number of dimensions the resulting output embeddings should have.
1532
+ Only supported in text-embedding-3 and later models.
1533
+ */
1534
+ dimensions: z8.number().optional(),
1535
+ /**
1536
+ A unique identifier representing your end-user, which can help OpenAI to
1537
+ monitor and detect abuse. Learn more.
1538
+ */
1539
+ user: z8.string().optional()
1540
+ });
1541
+
1542
+ // src/openai-embedding-model.ts
1304
1543
  var OpenAIEmbeddingModel = class {
1305
- constructor(modelId, settings, config) {
1544
+ constructor(modelId, config) {
1306
1545
  this.specificationVersion = "v2";
1546
+ this.maxEmbeddingsPerCall = 2048;
1547
+ this.supportsParallelCalls = true;
1307
1548
  this.modelId = modelId;
1308
- this.settings = settings;
1309
1549
  this.config = config;
1310
1550
  }
1311
1551
  get provider() {
1312
1552
  return this.config.provider;
1313
1553
  }
1314
- get maxEmbeddingsPerCall() {
1315
- var _a;
1316
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1317
- }
1318
- get supportsParallelCalls() {
1319
- var _a;
1320
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1321
- }
1322
1554
  async doEmbed({
1323
1555
  values,
1324
1556
  headers,
1325
- abortSignal
1557
+ abortSignal,
1558
+ providerOptions
1326
1559
  }) {
1560
+ var _a;
1327
1561
  if (values.length > this.maxEmbeddingsPerCall) {
1328
1562
  throw new TooManyEmbeddingValuesForCallError({
1329
1563
  provider: this.provider,
@@ -1332,6 +1566,11 @@ var OpenAIEmbeddingModel = class {
1332
1566
  values
1333
1567
  });
1334
1568
  }
1569
+ const openaiOptions = (_a = await parseProviderOptions3({
1570
+ provider: "openai",
1571
+ providerOptions,
1572
+ schema: openaiEmbeddingProviderOptions
1573
+ })) != null ? _a : {};
1335
1574
  const {
1336
1575
  responseHeaders,
1337
1576
  value: response,
@@ -1346,8 +1585,8 @@ var OpenAIEmbeddingModel = class {
1346
1585
  model: this.modelId,
1347
1586
  input: values,
1348
1587
  encoding_format: "float",
1349
- dimensions: this.settings.dimensions,
1350
- user: this.settings.user
1588
+ dimensions: openaiOptions.dimensions,
1589
+ user: openaiOptions.user
1351
1590
  },
1352
1591
  failedResponseHandler: openaiFailedResponseHandler,
1353
1592
  successfulResponseHandler: createJsonResponseHandler3(
@@ -1363,9 +1602,9 @@ var OpenAIEmbeddingModel = class {
1363
1602
  };
1364
1603
  }
1365
1604
  };
1366
- var openaiTextEmbeddingResponseSchema = z5.object({
1367
- data: z5.array(z5.object({ embedding: z5.array(z5.number()) })),
1368
- usage: z5.object({ prompt_tokens: z5.number() }).nullish()
1605
+ var openaiTextEmbeddingResponseSchema = z9.object({
1606
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1607
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1369
1608
  });
1370
1609
 
1371
1610
  // src/openai-image-model.ts
@@ -1374,25 +1613,26 @@ import {
1374
1613
  createJsonResponseHandler as createJsonResponseHandler4,
1375
1614
  postJsonToApi as postJsonToApi4
1376
1615
  } from "@ai-sdk/provider-utils";
1377
- import { z as z6 } from "zod";
1616
+ import { z as z10 } from "zod/v4";
1378
1617
 
1379
1618
  // src/openai-image-settings.ts
1380
1619
  var modelMaxImagesPerCall = {
1381
1620
  "dall-e-3": 1,
1382
- "dall-e-2": 10
1621
+ "dall-e-2": 10,
1622
+ "gpt-image-1": 10
1383
1623
  };
1624
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1384
1625
 
1385
1626
  // src/openai-image-model.ts
1386
1627
  var OpenAIImageModel = class {
1387
- constructor(modelId, settings, config) {
1628
+ constructor(modelId, config) {
1388
1629
  this.modelId = modelId;
1389
- this.settings = settings;
1390
1630
  this.config = config;
1391
- this.specificationVersion = "v1";
1631
+ this.specificationVersion = "v2";
1392
1632
  }
1393
1633
  get maxImagesPerCall() {
1394
- var _a, _b;
1395
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1634
+ var _a;
1635
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1396
1636
  }
1397
1637
  get provider() {
1398
1638
  return this.config.provider;
@@ -1432,7 +1672,7 @@ var OpenAIImageModel = class {
1432
1672
  n,
1433
1673
  size,
1434
1674
  ...(_d = providerOptions.openai) != null ? _d : {},
1435
- response_format: "b64_json"
1675
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1436
1676
  },
1437
1677
  failedResponseHandler: openaiFailedResponseHandler,
1438
1678
  successfulResponseHandler: createJsonResponseHandler4(
@@ -1448,12 +1688,23 @@ var OpenAIImageModel = class {
1448
1688
  timestamp: currentDate,
1449
1689
  modelId: this.modelId,
1450
1690
  headers: responseHeaders
1691
+ },
1692
+ providerMetadata: {
1693
+ openai: {
1694
+ images: response.data.map(
1695
+ (item) => item.revised_prompt ? {
1696
+ revisedPrompt: item.revised_prompt
1697
+ } : null
1698
+ )
1699
+ }
1451
1700
  }
1452
1701
  };
1453
1702
  }
1454
1703
  };
1455
- var openaiImageResponseSchema = z6.object({
1456
- data: z6.array(z6.object({ b64_json: z6.string() }))
1704
+ var openaiImageResponseSchema = z10.object({
1705
+ data: z10.array(
1706
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1707
+ )
1457
1708
  });
1458
1709
 
1459
1710
  // src/openai-transcription-model.ts
@@ -1461,17 +1712,39 @@ import {
1461
1712
  combineHeaders as combineHeaders5,
1462
1713
  convertBase64ToUint8Array,
1463
1714
  createJsonResponseHandler as createJsonResponseHandler5,
1464
- parseProviderOptions as parseProviderOptions2,
1715
+ parseProviderOptions as parseProviderOptions4,
1465
1716
  postFormDataToApi
1466
1717
  } from "@ai-sdk/provider-utils";
1467
- import { z as z7 } from "zod";
1468
- var openAIProviderOptionsSchema = z7.object({
1469
- include: z7.array(z7.string()).nullish(),
1470
- language: z7.string().nullish(),
1471
- prompt: z7.string().nullish(),
1472
- temperature: z7.number().min(0).max(1).nullish().default(0),
1473
- timestampGranularities: z7.array(z7.enum(["word", "segment"])).nullish().default(["segment"])
1718
+ import { z as z12 } from "zod/v4";
1719
+
1720
+ // src/openai-transcription-options.ts
1721
+ import { z as z11 } from "zod/v4";
1722
+ var openAITranscriptionProviderOptions = z11.object({
1723
+ /**
1724
+ * Additional information to include in the transcription response.
1725
+ */
1726
+ include: z11.array(z11.string()).optional(),
1727
+ /**
1728
+ * The language of the input audio in ISO-639-1 format.
1729
+ */
1730
+ language: z11.string().optional(),
1731
+ /**
1732
+ * An optional text to guide the model's style or continue a previous audio segment.
1733
+ */
1734
+ prompt: z11.string().optional(),
1735
+ /**
1736
+ * The sampling temperature, between 0 and 1.
1737
+ * @default 0
1738
+ */
1739
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1740
+ /**
1741
+ * The timestamp granularities to populate for this transcription.
1742
+ * @default ['segment']
1743
+ */
1744
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1474
1745
  });
1746
+
1747
+ // src/openai-transcription-model.ts
1475
1748
  var languageMap = {
1476
1749
  afrikaans: "af",
1477
1750
  arabic: "ar",
@@ -1535,22 +1808,21 @@ var OpenAITranscriptionModel = class {
1535
1808
  constructor(modelId, config) {
1536
1809
  this.modelId = modelId;
1537
1810
  this.config = config;
1538
- this.specificationVersion = "v1";
1811
+ this.specificationVersion = "v2";
1539
1812
  }
1540
1813
  get provider() {
1541
1814
  return this.config.provider;
1542
1815
  }
1543
- getArgs({
1816
+ async getArgs({
1544
1817
  audio,
1545
1818
  mediaType,
1546
1819
  providerOptions
1547
1820
  }) {
1548
- var _a, _b, _c, _d, _e;
1549
1821
  const warnings = [];
1550
- const openAIOptions = parseProviderOptions2({
1822
+ const openAIOptions = await parseProviderOptions4({
1551
1823
  provider: "openai",
1552
1824
  providerOptions,
1553
- schema: openAIProviderOptionsSchema
1825
+ schema: openAITranscriptionProviderOptions
1554
1826
  });
1555
1827
  const formData = new FormData();
1556
1828
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
@@ -1558,15 +1830,14 @@ var OpenAITranscriptionModel = class {
1558
1830
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1559
1831
  if (openAIOptions) {
1560
1832
  const transcriptionModelOptions = {
1561
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1562
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1563
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1564
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1565
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1833
+ include: openAIOptions.include,
1834
+ language: openAIOptions.language,
1835
+ prompt: openAIOptions.prompt,
1836
+ temperature: openAIOptions.temperature,
1837
+ timestamp_granularities: openAIOptions.timestampGranularities
1566
1838
  };
1567
- for (const key in transcriptionModelOptions) {
1568
- const value = transcriptionModelOptions[key];
1569
- if (value !== void 0) {
1839
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1840
+ if (value != null) {
1570
1841
  formData.append(key, String(value));
1571
1842
  }
1572
1843
  }
@@ -1579,7 +1850,7 @@ var OpenAITranscriptionModel = class {
1579
1850
  async doGenerate(options) {
1580
1851
  var _a, _b, _c, _d, _e, _f;
1581
1852
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1582
- const { formData, warnings } = this.getArgs(options);
1853
+ const { formData, warnings } = await this.getArgs(options);
1583
1854
  const {
1584
1855
  value: response,
1585
1856
  responseHeaders,
@@ -1618,15 +1889,15 @@ var OpenAITranscriptionModel = class {
1618
1889
  };
1619
1890
  }
1620
1891
  };
1621
- var openaiTranscriptionResponseSchema = z7.object({
1622
- text: z7.string(),
1623
- language: z7.string().nullish(),
1624
- duration: z7.number().nullish(),
1625
- words: z7.array(
1626
- z7.object({
1627
- word: z7.string(),
1628
- start: z7.number(),
1629
- end: z7.number()
1892
+ var openaiTranscriptionResponseSchema = z12.object({
1893
+ text: z12.string(),
1894
+ language: z12.string().nullish(),
1895
+ duration: z12.number().nullish(),
1896
+ words: z12.array(
1897
+ z12.object({
1898
+ word: z12.string(),
1899
+ start: z12.number(),
1900
+ end: z12.number()
1630
1901
  })
1631
1902
  ).nullish()
1632
1903
  });
@@ -1635,33 +1906,34 @@ var openaiTranscriptionResponseSchema = z7.object({
1635
1906
  import {
1636
1907
  combineHeaders as combineHeaders6,
1637
1908
  createBinaryResponseHandler,
1638
- parseProviderOptions as parseProviderOptions3,
1909
+ parseProviderOptions as parseProviderOptions5,
1639
1910
  postJsonToApi as postJsonToApi5
1640
1911
  } from "@ai-sdk/provider-utils";
1641
- import { z as z8 } from "zod";
1642
- var OpenAIProviderOptionsSchema = z8.object({
1643
- instructions: z8.string().nullish(),
1644
- speed: z8.number().min(0.25).max(4).default(1).nullish()
1912
+ import { z as z13 } from "zod/v4";
1913
+ var OpenAIProviderOptionsSchema = z13.object({
1914
+ instructions: z13.string().nullish(),
1915
+ speed: z13.number().min(0.25).max(4).default(1).nullish()
1645
1916
  });
1646
1917
  var OpenAISpeechModel = class {
1647
1918
  constructor(modelId, config) {
1648
1919
  this.modelId = modelId;
1649
1920
  this.config = config;
1650
- this.specificationVersion = "v1";
1921
+ this.specificationVersion = "v2";
1651
1922
  }
1652
1923
  get provider() {
1653
1924
  return this.config.provider;
1654
1925
  }
1655
- getArgs({
1926
+ async getArgs({
1656
1927
  text,
1657
1928
  voice = "alloy",
1658
1929
  outputFormat = "mp3",
1659
1930
  speed,
1660
1931
  instructions,
1932
+ language,
1661
1933
  providerOptions
1662
1934
  }) {
1663
1935
  const warnings = [];
1664
- const openAIOptions = parseProviderOptions3({
1936
+ const openAIOptions = await parseProviderOptions5({
1665
1937
  provider: "openai",
1666
1938
  providerOptions,
1667
1939
  schema: OpenAIProviderOptionsSchema
@@ -1694,6 +1966,13 @@ var OpenAISpeechModel = class {
1694
1966
  }
1695
1967
  }
1696
1968
  }
1969
+ if (language) {
1970
+ warnings.push({
1971
+ type: "unsupported-setting",
1972
+ setting: "language",
1973
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1974
+ });
1975
+ }
1697
1976
  return {
1698
1977
  requestBody,
1699
1978
  warnings
@@ -1702,7 +1981,7 @@ var OpenAISpeechModel = class {
1702
1981
  async doGenerate(options) {
1703
1982
  var _a, _b, _c;
1704
1983
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1705
- const { requestBody, warnings } = this.getArgs(options);
1984
+ const { requestBody, warnings } = await this.getArgs(options);
1706
1985
  const {
1707
1986
  value: audio,
1708
1987
  responseHeaders,
@@ -1736,24 +2015,30 @@ var OpenAISpeechModel = class {
1736
2015
  };
1737
2016
 
1738
2017
  // src/responses/openai-responses-language-model.ts
2018
+ import {
2019
+ APICallError
2020
+ } from "@ai-sdk/provider";
1739
2021
  import {
1740
2022
  combineHeaders as combineHeaders7,
1741
2023
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1742
2024
  createJsonResponseHandler as createJsonResponseHandler6,
1743
2025
  generateId as generateId2,
1744
- parseProviderOptions as parseProviderOptions4,
2026
+ parseProviderOptions as parseProviderOptions7,
1745
2027
  postJsonToApi as postJsonToApi6
1746
2028
  } from "@ai-sdk/provider-utils";
1747
- import { z as z9 } from "zod";
2029
+ import { z as z15 } from "zod/v4";
1748
2030
 
1749
2031
  // src/responses/convert-to-openai-responses-messages.ts
1750
2032
  import {
1751
2033
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1752
2034
  } from "@ai-sdk/provider";
1753
- function convertToOpenAIResponsesMessages({
2035
+ import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2036
+ import { z as z14 } from "zod/v4";
2037
+ async function convertToOpenAIResponsesMessages({
1754
2038
  prompt,
1755
2039
  systemMessageMode
1756
2040
  }) {
2041
+ var _a, _b, _c, _d, _e, _f;
1757
2042
  const messages = [];
1758
2043
  const warnings = [];
1759
2044
  for (const { role, content } of prompt) {
@@ -1788,7 +2073,7 @@ function convertToOpenAIResponsesMessages({
1788
2073
  messages.push({
1789
2074
  role: "user",
1790
2075
  content: content.map((part, index) => {
1791
- var _a, _b, _c;
2076
+ var _a2, _b2, _c2;
1792
2077
  switch (part.type) {
1793
2078
  case "text": {
1794
2079
  return { type: "input_text", text: part.text };
@@ -1800,7 +2085,7 @@ function convertToOpenAIResponsesMessages({
1800
2085
  type: "input_image",
1801
2086
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1802
2087
  // OpenAI specific extension: image detail
1803
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
2088
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1804
2089
  };
1805
2090
  } else if (part.mediaType === "application/pdf") {
1806
2091
  if (part.data instanceof URL) {
@@ -1810,7 +2095,7 @@ function convertToOpenAIResponsesMessages({
1810
2095
  }
1811
2096
  return {
1812
2097
  type: "input_file",
1813
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
2098
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1814
2099
  file_data: `data:application/pdf;base64,${part.data}`
1815
2100
  };
1816
2101
  } else {
@@ -1825,22 +2110,72 @@ function convertToOpenAIResponsesMessages({
1825
2110
  break;
1826
2111
  }
1827
2112
  case "assistant": {
2113
+ const reasoningMessages = {};
1828
2114
  for (const part of content) {
1829
2115
  switch (part.type) {
1830
2116
  case "text": {
1831
2117
  messages.push({
1832
2118
  role: "assistant",
1833
- content: [{ type: "output_text", text: part.text }]
2119
+ content: [{ type: "output_text", text: part.text }],
2120
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1834
2121
  });
1835
2122
  break;
1836
2123
  }
1837
2124
  case "tool-call": {
2125
+ if (part.providerExecuted) {
2126
+ break;
2127
+ }
1838
2128
  messages.push({
1839
2129
  type: "function_call",
1840
2130
  call_id: part.toolCallId,
1841
2131
  name: part.toolName,
1842
- arguments: JSON.stringify(part.args)
2132
+ arguments: JSON.stringify(part.input),
2133
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2134
+ });
2135
+ break;
2136
+ }
2137
+ case "tool-result": {
2138
+ warnings.push({
2139
+ type: "other",
2140
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2141
+ });
2142
+ break;
2143
+ }
2144
+ case "reasoning": {
2145
+ const providerOptions = await parseProviderOptions6({
2146
+ provider: "openai",
2147
+ providerOptions: part.providerOptions,
2148
+ schema: openaiResponsesReasoningProviderOptionsSchema
1843
2149
  });
2150
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2151
+ if (reasoningId != null) {
2152
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2153
+ const summaryParts = [];
2154
+ if (part.text.length > 0) {
2155
+ summaryParts.push({ type: "summary_text", text: part.text });
2156
+ } else if (existingReasoningMessage !== void 0) {
2157
+ warnings.push({
2158
+ type: "other",
2159
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2160
+ });
2161
+ }
2162
+ if (existingReasoningMessage === void 0) {
2163
+ reasoningMessages[reasoningId] = {
2164
+ type: "reasoning",
2165
+ id: reasoningId,
2166
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2167
+ summary: summaryParts
2168
+ };
2169
+ messages.push(reasoningMessages[reasoningId]);
2170
+ } else {
2171
+ existingReasoningMessage.summary.push(...summaryParts);
2172
+ }
2173
+ } else {
2174
+ warnings.push({
2175
+ type: "other",
2176
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2177
+ });
2178
+ }
1844
2179
  break;
1845
2180
  }
1846
2181
  }
@@ -1849,10 +2184,23 @@ function convertToOpenAIResponsesMessages({
1849
2184
  }
1850
2185
  case "tool": {
1851
2186
  for (const part of content) {
2187
+ const output = part.output;
2188
+ let contentValue;
2189
+ switch (output.type) {
2190
+ case "text":
2191
+ case "error-text":
2192
+ contentValue = output.value;
2193
+ break;
2194
+ case "content":
2195
+ case "json":
2196
+ case "error-json":
2197
+ contentValue = JSON.stringify(output.value);
2198
+ break;
2199
+ }
1852
2200
  messages.push({
1853
2201
  type: "function_call_output",
1854
2202
  call_id: part.toolCallId,
1855
- output: JSON.stringify(part.result)
2203
+ output: contentValue
1856
2204
  });
1857
2205
  }
1858
2206
  break;
@@ -1865,6 +2213,10 @@ function convertToOpenAIResponsesMessages({
1865
2213
  }
1866
2214
  return { messages, warnings };
1867
2215
  }
2216
+ var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2217
+ itemId: z14.string().nullish(),
2218
+ reasoningEncryptedContent: z14.string().nullish()
2219
+ });
1868
2220
 
1869
2221
  // src/responses/map-openai-responses-finish-reason.ts
1870
2222
  function mapOpenAIResponseFinishReason({
@@ -1891,7 +2243,7 @@ import {
1891
2243
  function prepareResponsesTools({
1892
2244
  tools,
1893
2245
  toolChoice,
1894
- strict
2246
+ strictJsonSchema
1895
2247
  }) {
1896
2248
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1897
2249
  const toolWarnings = [];
@@ -1906,12 +2258,23 @@ function prepareResponsesTools({
1906
2258
  type: "function",
1907
2259
  name: tool.name,
1908
2260
  description: tool.description,
1909
- parameters: tool.parameters,
1910
- strict: strict ? true : void 0
2261
+ parameters: tool.inputSchema,
2262
+ strict: strictJsonSchema
1911
2263
  });
1912
2264
  break;
1913
2265
  case "provider-defined":
1914
2266
  switch (tool.id) {
2267
+ case "openai.file_search": {
2268
+ const args = fileSearchArgsSchema.parse(tool.args);
2269
+ openaiTools.push({
2270
+ type: "file_search",
2271
+ vector_store_ids: args.vectorStoreIds,
2272
+ max_num_results: args.maxNumResults,
2273
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2274
+ filters: args.filters
2275
+ });
2276
+ break;
2277
+ }
1915
2278
  case "openai.web_search_preview":
1916
2279
  openaiTools.push({
1917
2280
  type: "web_search_preview",
@@ -1941,7 +2304,7 @@ function prepareResponsesTools({
1941
2304
  case "tool":
1942
2305
  return {
1943
2306
  tools: openaiTools,
1944
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2307
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1945
2308
  toolWarnings
1946
2309
  };
1947
2310
  default: {
@@ -1957,15 +2320,16 @@ function prepareResponsesTools({
1957
2320
  var OpenAIResponsesLanguageModel = class {
1958
2321
  constructor(modelId, config) {
1959
2322
  this.specificationVersion = "v2";
1960
- this.defaultObjectGenerationMode = "json";
1961
- this.supportsStructuredOutputs = true;
2323
+ this.supportedUrls = {
2324
+ "image/*": [/^https?:\/\/.*$/]
2325
+ };
1962
2326
  this.modelId = modelId;
1963
2327
  this.config = config;
1964
2328
  }
1965
2329
  get provider() {
1966
2330
  return this.config.provider;
1967
2331
  }
1968
- getArgs({
2332
+ async getArgs({
1969
2333
  maxOutputTokens,
1970
2334
  temperature,
1971
2335
  stopSequences,
@@ -2004,17 +2368,17 @@ var OpenAIResponsesLanguageModel = class {
2004
2368
  if (stopSequences != null) {
2005
2369
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2006
2370
  }
2007
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2371
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2008
2372
  prompt,
2009
2373
  systemMessageMode: modelConfig.systemMessageMode
2010
2374
  });
2011
2375
  warnings.push(...messageWarnings);
2012
- const openaiOptions = parseProviderOptions4({
2376
+ const openaiOptions = await parseProviderOptions7({
2013
2377
  provider: "openai",
2014
2378
  providerOptions,
2015
2379
  schema: openaiResponsesProviderOptionsSchema
2016
2380
  });
2017
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2381
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2018
2382
  const baseArgs = {
2019
2383
  model: this.modelId,
2020
2384
  input: messages,
@@ -2025,7 +2389,7 @@ var OpenAIResponsesLanguageModel = class {
2025
2389
  text: {
2026
2390
  format: responseFormat.schema != null ? {
2027
2391
  type: "json_schema",
2028
- strict: isStrict,
2392
+ strict: strictJsonSchema,
2029
2393
  name: (_b = responseFormat.name) != null ? _b : "response",
2030
2394
  description: responseFormat.description,
2031
2395
  schema: responseFormat.schema
@@ -2039,9 +2403,18 @@ var OpenAIResponsesLanguageModel = class {
2039
2403
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2040
2404
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2041
2405
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2406
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2407
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2042
2408
  // model-specific settings:
2043
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2044
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2409
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2410
+ reasoning: {
2411
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2412
+ effort: openaiOptions.reasoningEffort
2413
+ },
2414
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2415
+ summary: openaiOptions.reasoningSummary
2416
+ }
2417
+ }
2045
2418
  },
2046
2419
  ...modelConfig.requiredAutoTruncation && {
2047
2420
  truncation: "auto"
@@ -2064,6 +2437,37 @@ var OpenAIResponsesLanguageModel = class {
2064
2437
  details: "topP is not supported for reasoning models"
2065
2438
  });
2066
2439
  }
2440
+ } else {
2441
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2442
+ warnings.push({
2443
+ type: "unsupported-setting",
2444
+ setting: "reasoningEffort",
2445
+ details: "reasoningEffort is not supported for non-reasoning models"
2446
+ });
2447
+ }
2448
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2449
+ warnings.push({
2450
+ type: "unsupported-setting",
2451
+ setting: "reasoningSummary",
2452
+ details: "reasoningSummary is not supported for non-reasoning models"
2453
+ });
2454
+ }
2455
+ }
2456
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2457
+ warnings.push({
2458
+ type: "unsupported-setting",
2459
+ setting: "serviceTier",
2460
+ details: "flex processing is only available for o3 and o4-mini models"
2461
+ });
2462
+ delete baseArgs.service_tier;
2463
+ }
2464
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2465
+ warnings.push({
2466
+ type: "unsupported-setting",
2467
+ setting: "serviceTier",
2468
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
2469
+ });
2470
+ delete baseArgs.service_tier;
2067
2471
  }
2068
2472
  const {
2069
2473
  tools: openaiTools,
@@ -2072,7 +2476,7 @@ var OpenAIResponsesLanguageModel = class {
2072
2476
  } = prepareResponsesTools({
2073
2477
  tools,
2074
2478
  toolChoice,
2075
- strict: isStrict
2479
+ strictJsonSchema
2076
2480
  });
2077
2481
  return {
2078
2482
  args: {
@@ -2084,84 +2488,142 @@ var OpenAIResponsesLanguageModel = class {
2084
2488
  };
2085
2489
  }
2086
2490
  async doGenerate(options) {
2087
- var _a, _b, _c, _d, _e, _f, _g, _h;
2088
- const { args: body, warnings } = this.getArgs(options);
2491
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2492
+ const { args: body, warnings } = await this.getArgs(options);
2493
+ const url = this.config.url({
2494
+ path: "/responses",
2495
+ modelId: this.modelId
2496
+ });
2089
2497
  const {
2090
2498
  responseHeaders,
2091
2499
  value: response,
2092
2500
  rawValue: rawResponse
2093
2501
  } = await postJsonToApi6({
2094
- url: this.config.url({
2095
- path: "/responses",
2096
- modelId: this.modelId
2097
- }),
2502
+ url,
2098
2503
  headers: combineHeaders7(this.config.headers(), options.headers),
2099
2504
  body,
2100
2505
  failedResponseHandler: openaiFailedResponseHandler,
2101
2506
  successfulResponseHandler: createJsonResponseHandler6(
2102
- z9.object({
2103
- id: z9.string(),
2104
- created_at: z9.number(),
2105
- model: z9.string(),
2106
- output: z9.array(
2107
- z9.discriminatedUnion("type", [
2108
- z9.object({
2109
- type: z9.literal("message"),
2110
- role: z9.literal("assistant"),
2111
- content: z9.array(
2112
- z9.object({
2113
- type: z9.literal("output_text"),
2114
- text: z9.string(),
2115
- annotations: z9.array(
2116
- z9.object({
2117
- type: z9.literal("url_citation"),
2118
- start_index: z9.number(),
2119
- end_index: z9.number(),
2120
- url: z9.string(),
2121
- title: z9.string()
2507
+ z15.object({
2508
+ id: z15.string(),
2509
+ created_at: z15.number(),
2510
+ error: z15.object({
2511
+ code: z15.string(),
2512
+ message: z15.string()
2513
+ }).nullish(),
2514
+ model: z15.string(),
2515
+ output: z15.array(
2516
+ z15.discriminatedUnion("type", [
2517
+ z15.object({
2518
+ type: z15.literal("message"),
2519
+ role: z15.literal("assistant"),
2520
+ id: z15.string(),
2521
+ content: z15.array(
2522
+ z15.object({
2523
+ type: z15.literal("output_text"),
2524
+ text: z15.string(),
2525
+ annotations: z15.array(
2526
+ z15.object({
2527
+ type: z15.literal("url_citation"),
2528
+ start_index: z15.number(),
2529
+ end_index: z15.number(),
2530
+ url: z15.string(),
2531
+ title: z15.string()
2122
2532
  })
2123
2533
  )
2124
2534
  })
2125
2535
  )
2126
2536
  }),
2127
- z9.object({
2128
- type: z9.literal("function_call"),
2129
- call_id: z9.string(),
2130
- name: z9.string(),
2131
- arguments: z9.string()
2537
+ z15.object({
2538
+ type: z15.literal("function_call"),
2539
+ call_id: z15.string(),
2540
+ name: z15.string(),
2541
+ arguments: z15.string(),
2542
+ id: z15.string()
2543
+ }),
2544
+ z15.object({
2545
+ type: z15.literal("web_search_call"),
2546
+ id: z15.string(),
2547
+ status: z15.string().optional()
2132
2548
  }),
2133
- z9.object({
2134
- type: z9.literal("web_search_call")
2549
+ z15.object({
2550
+ type: z15.literal("computer_call"),
2551
+ id: z15.string(),
2552
+ status: z15.string().optional()
2135
2553
  }),
2136
- z9.object({
2137
- type: z9.literal("computer_call")
2554
+ z15.object({
2555
+ type: z15.literal("file_search_call"),
2556
+ id: z15.string(),
2557
+ status: z15.string().optional()
2138
2558
  }),
2139
- z9.object({
2140
- type: z9.literal("reasoning")
2559
+ z15.object({
2560
+ type: z15.literal("reasoning"),
2561
+ id: z15.string(),
2562
+ encrypted_content: z15.string().nullish(),
2563
+ summary: z15.array(
2564
+ z15.object({
2565
+ type: z15.literal("summary_text"),
2566
+ text: z15.string()
2567
+ })
2568
+ )
2141
2569
  })
2142
2570
  ])
2143
2571
  ),
2144
- incomplete_details: z9.object({ reason: z9.string() }).nullable(),
2145
- usage: usageSchema
2572
+ incomplete_details: z15.object({ reason: z15.string() }).nullable(),
2573
+ usage: usageSchema2
2146
2574
  })
2147
2575
  ),
2148
2576
  abortSignal: options.abortSignal,
2149
2577
  fetch: this.config.fetch
2150
2578
  });
2579
+ if (response.error) {
2580
+ throw new APICallError({
2581
+ message: response.error.message,
2582
+ url,
2583
+ requestBodyValues: body,
2584
+ statusCode: 400,
2585
+ responseHeaders,
2586
+ responseBody: rawResponse,
2587
+ isRetryable: false
2588
+ });
2589
+ }
2151
2590
  const content = [];
2152
2591
  for (const part of response.output) {
2153
2592
  switch (part.type) {
2593
+ case "reasoning": {
2594
+ if (part.summary.length === 0) {
2595
+ part.summary.push({ type: "summary_text", text: "" });
2596
+ }
2597
+ for (const summary of part.summary) {
2598
+ content.push({
2599
+ type: "reasoning",
2600
+ text: summary.text,
2601
+ providerMetadata: {
2602
+ openai: {
2603
+ itemId: part.id,
2604
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2605
+ }
2606
+ }
2607
+ });
2608
+ }
2609
+ break;
2610
+ }
2154
2611
  case "message": {
2155
2612
  for (const contentPart of part.content) {
2156
2613
  content.push({
2157
2614
  type: "text",
2158
- text: contentPart.text
2615
+ text: contentPart.text,
2616
+ providerMetadata: {
2617
+ openai: {
2618
+ itemId: part.id
2619
+ }
2620
+ }
2159
2621
  });
2160
2622
  for (const annotation of contentPart.annotations) {
2161
2623
  content.push({
2162
2624
  type: "source",
2163
2625
  sourceType: "url",
2164
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2626
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
2165
2627
  url: annotation.url,
2166
2628
  title: annotation.title
2167
2629
  });
@@ -2172,10 +2634,71 @@ var OpenAIResponsesLanguageModel = class {
2172
2634
  case "function_call": {
2173
2635
  content.push({
2174
2636
  type: "tool-call",
2175
- toolCallType: "function",
2176
2637
  toolCallId: part.call_id,
2177
2638
  toolName: part.name,
2178
- args: part.arguments
2639
+ input: part.arguments,
2640
+ providerMetadata: {
2641
+ openai: {
2642
+ itemId: part.id
2643
+ }
2644
+ }
2645
+ });
2646
+ break;
2647
+ }
2648
+ case "web_search_call": {
2649
+ content.push({
2650
+ type: "tool-call",
2651
+ toolCallId: part.id,
2652
+ toolName: "web_search_preview",
2653
+ input: "",
2654
+ providerExecuted: true
2655
+ });
2656
+ content.push({
2657
+ type: "tool-result",
2658
+ toolCallId: part.id,
2659
+ toolName: "web_search_preview",
2660
+ result: { status: part.status || "completed" },
2661
+ providerExecuted: true
2662
+ });
2663
+ break;
2664
+ }
2665
+ case "computer_call": {
2666
+ content.push({
2667
+ type: "tool-call",
2668
+ toolCallId: part.id,
2669
+ toolName: "computer_use",
2670
+ input: "",
2671
+ providerExecuted: true
2672
+ });
2673
+ content.push({
2674
+ type: "tool-result",
2675
+ toolCallId: part.id,
2676
+ toolName: "computer_use",
2677
+ result: {
2678
+ type: "computer_use_tool_result",
2679
+ status: part.status || "completed"
2680
+ },
2681
+ providerExecuted: true
2682
+ });
2683
+ break;
2684
+ }
2685
+ case "file_search_call": {
2686
+ content.push({
2687
+ type: "tool-call",
2688
+ toolCallId: part.id,
2689
+ toolName: "file_search",
2690
+ input: "",
2691
+ providerExecuted: true
2692
+ });
2693
+ content.push({
2694
+ type: "tool-result",
2695
+ toolCallId: part.id,
2696
+ toolName: "file_search",
2697
+ result: {
2698
+ type: "file_search_tool_result",
2699
+ status: part.status || "completed"
2700
+ },
2701
+ providerExecuted: true
2179
2702
  });
2180
2703
  break;
2181
2704
  }
@@ -2184,12 +2707,15 @@ var OpenAIResponsesLanguageModel = class {
2184
2707
  return {
2185
2708
  content,
2186
2709
  finishReason: mapOpenAIResponseFinishReason({
2187
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2710
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2188
2711
  hasToolCalls: content.some((part) => part.type === "tool-call")
2189
2712
  }),
2190
2713
  usage: {
2191
2714
  inputTokens: response.usage.input_tokens,
2192
- outputTokens: response.usage.output_tokens
2715
+ outputTokens: response.usage.output_tokens,
2716
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2717
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2718
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2193
2719
  },
2194
2720
  request: { body },
2195
2721
  response: {
@@ -2201,16 +2727,14 @@ var OpenAIResponsesLanguageModel = class {
2201
2727
  },
2202
2728
  providerMetadata: {
2203
2729
  openai: {
2204
- responseId: response.id,
2205
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2206
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2730
+ responseId: response.id
2207
2731
  }
2208
2732
  },
2209
2733
  warnings
2210
2734
  };
2211
2735
  }
2212
2736
  async doStream(options) {
2213
- const { args: body, warnings } = this.getArgs(options);
2737
+ const { args: body, warnings } = await this.getArgs(options);
2214
2738
  const { responseHeaders, value: response } = await postJsonToApi6({
2215
2739
  url: this.config.url({
2216
2740
  path: "/responses",
@@ -2232,13 +2756,13 @@ var OpenAIResponsesLanguageModel = class {
2232
2756
  let finishReason = "unknown";
2233
2757
  const usage = {
2234
2758
  inputTokens: void 0,
2235
- outputTokens: void 0
2759
+ outputTokens: void 0,
2760
+ totalTokens: void 0
2236
2761
  };
2237
- let cachedPromptTokens = null;
2238
- let reasoningTokens = null;
2239
2762
  let responseId = null;
2240
2763
  const ongoingToolCalls = {};
2241
2764
  let hasToolCalls = false;
2765
+ const activeReasoning = {};
2242
2766
  return {
2243
2767
  stream: response.pipeThrough(
2244
2768
  new TransformStream({
@@ -2246,7 +2770,10 @@ var OpenAIResponsesLanguageModel = class {
2246
2770
  controller.enqueue({ type: "stream-start", warnings });
2247
2771
  },
2248
2772
  transform(chunk, controller) {
2249
- var _a, _b, _c, _d, _e, _f, _g, _h;
2773
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2774
+ if (options.includeRawChunks) {
2775
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2776
+ }
2250
2777
  if (!chunk.success) {
2251
2778
  finishReason = "error";
2252
2779
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2260,22 +2787,151 @@ var OpenAIResponsesLanguageModel = class {
2260
2787
  toolCallId: value.item.call_id
2261
2788
  };
2262
2789
  controller.enqueue({
2263
- type: "tool-call-delta",
2264
- toolCallType: "function",
2790
+ type: "tool-input-start",
2791
+ id: value.item.call_id,
2792
+ toolName: value.item.name
2793
+ });
2794
+ } else if (value.item.type === "web_search_call") {
2795
+ ongoingToolCalls[value.output_index] = {
2796
+ toolName: "web_search_preview",
2797
+ toolCallId: value.item.id
2798
+ };
2799
+ controller.enqueue({
2800
+ type: "tool-input-start",
2801
+ id: value.item.id,
2802
+ toolName: "web_search_preview"
2803
+ });
2804
+ } else if (value.item.type === "computer_call") {
2805
+ ongoingToolCalls[value.output_index] = {
2806
+ toolName: "computer_use",
2807
+ toolCallId: value.item.id
2808
+ };
2809
+ controller.enqueue({
2810
+ type: "tool-input-start",
2811
+ id: value.item.id,
2812
+ toolName: "computer_use"
2813
+ });
2814
+ } else if (value.item.type === "message") {
2815
+ controller.enqueue({
2816
+ type: "text-start",
2817
+ id: value.item.id,
2818
+ providerMetadata: {
2819
+ openai: {
2820
+ itemId: value.item.id
2821
+ }
2822
+ }
2823
+ });
2824
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2825
+ activeReasoning[value.item.id] = {
2826
+ encryptedContent: value.item.encrypted_content,
2827
+ summaryParts: [0]
2828
+ };
2829
+ controller.enqueue({
2830
+ type: "reasoning-start",
2831
+ id: `${value.item.id}:0`,
2832
+ providerMetadata: {
2833
+ openai: {
2834
+ itemId: value.item.id,
2835
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2836
+ }
2837
+ }
2838
+ });
2839
+ }
2840
+ } else if (isResponseOutputItemDoneChunk(value)) {
2841
+ if (value.item.type === "function_call") {
2842
+ ongoingToolCalls[value.output_index] = void 0;
2843
+ hasToolCalls = true;
2844
+ controller.enqueue({
2845
+ type: "tool-input-end",
2846
+ id: value.item.call_id
2847
+ });
2848
+ controller.enqueue({
2849
+ type: "tool-call",
2265
2850
  toolCallId: value.item.call_id,
2266
2851
  toolName: value.item.name,
2267
- argsTextDelta: value.item.arguments
2852
+ input: value.item.arguments,
2853
+ providerMetadata: {
2854
+ openai: {
2855
+ itemId: value.item.id
2856
+ }
2857
+ }
2858
+ });
2859
+ } else if (value.item.type === "web_search_call") {
2860
+ ongoingToolCalls[value.output_index] = void 0;
2861
+ hasToolCalls = true;
2862
+ controller.enqueue({
2863
+ type: "tool-input-end",
2864
+ id: value.item.id
2865
+ });
2866
+ controller.enqueue({
2867
+ type: "tool-call",
2868
+ toolCallId: value.item.id,
2869
+ toolName: "web_search_preview",
2870
+ input: "",
2871
+ providerExecuted: true
2268
2872
  });
2873
+ controller.enqueue({
2874
+ type: "tool-result",
2875
+ toolCallId: value.item.id,
2876
+ toolName: "web_search_preview",
2877
+ result: {
2878
+ type: "web_search_tool_result",
2879
+ status: value.item.status || "completed"
2880
+ },
2881
+ providerExecuted: true
2882
+ });
2883
+ } else if (value.item.type === "computer_call") {
2884
+ ongoingToolCalls[value.output_index] = void 0;
2885
+ hasToolCalls = true;
2886
+ controller.enqueue({
2887
+ type: "tool-input-end",
2888
+ id: value.item.id
2889
+ });
2890
+ controller.enqueue({
2891
+ type: "tool-call",
2892
+ toolCallId: value.item.id,
2893
+ toolName: "computer_use",
2894
+ input: "",
2895
+ providerExecuted: true
2896
+ });
2897
+ controller.enqueue({
2898
+ type: "tool-result",
2899
+ toolCallId: value.item.id,
2900
+ toolName: "computer_use",
2901
+ result: {
2902
+ type: "computer_use_tool_result",
2903
+ status: value.item.status || "completed"
2904
+ },
2905
+ providerExecuted: true
2906
+ });
2907
+ } else if (value.item.type === "message") {
2908
+ controller.enqueue({
2909
+ type: "text-end",
2910
+ id: value.item.id
2911
+ });
2912
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2913
+ const activeReasoningPart = activeReasoning[value.item.id];
2914
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2915
+ controller.enqueue({
2916
+ type: "reasoning-end",
2917
+ id: `${value.item.id}:${summaryIndex}`,
2918
+ providerMetadata: {
2919
+ openai: {
2920
+ itemId: value.item.id,
2921
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2922
+ }
2923
+ }
2924
+ });
2925
+ }
2926
+ delete activeReasoning[value.item.id];
2269
2927
  }
2270
2928
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2271
2929
  const toolCall = ongoingToolCalls[value.output_index];
2272
2930
  if (toolCall != null) {
2273
2931
  controller.enqueue({
2274
- type: "tool-call-delta",
2275
- toolCallType: "function",
2276
- toolCallId: toolCall.toolCallId,
2277
- toolName: toolCall.toolName,
2278
- argsTextDelta: value.delta
2932
+ type: "tool-input-delta",
2933
+ id: toolCall.toolCallId,
2934
+ delta: value.delta
2279
2935
  });
2280
2936
  }
2281
2937
  } else if (isResponseCreatedChunk(value)) {
@@ -2288,36 +2944,57 @@ var OpenAIResponsesLanguageModel = class {
2288
2944
  });
2289
2945
  } else if (isTextDeltaChunk(value)) {
2290
2946
  controller.enqueue({
2291
- type: "text",
2292
- text: value.delta
2947
+ type: "text-delta",
2948
+ id: value.item_id,
2949
+ delta: value.delta
2293
2950
  });
2294
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2295
- ongoingToolCalls[value.output_index] = void 0;
2296
- hasToolCalls = true;
2951
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2952
+ if (value.summary_index > 0) {
2953
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2954
+ value.summary_index
2955
+ );
2956
+ controller.enqueue({
2957
+ type: "reasoning-start",
2958
+ id: `${value.item_id}:${value.summary_index}`,
2959
+ providerMetadata: {
2960
+ openai: {
2961
+ itemId: value.item_id,
2962
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2963
+ }
2964
+ }
2965
+ });
2966
+ }
2967
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2297
2968
  controller.enqueue({
2298
- type: "tool-call",
2299
- toolCallType: "function",
2300
- toolCallId: value.item.call_id,
2301
- toolName: value.item.name,
2302
- args: value.item.arguments
2969
+ type: "reasoning-delta",
2970
+ id: `${value.item_id}:${value.summary_index}`,
2971
+ delta: value.delta,
2972
+ providerMetadata: {
2973
+ openai: {
2974
+ itemId: value.item_id
2975
+ }
2976
+ }
2303
2977
  });
2304
2978
  } else if (isResponseFinishedChunk(value)) {
2305
2979
  finishReason = mapOpenAIResponseFinishReason({
2306
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2980
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2307
2981
  hasToolCalls
2308
2982
  });
2309
2983
  usage.inputTokens = value.response.usage.input_tokens;
2310
2984
  usage.outputTokens = value.response.usage.output_tokens;
2311
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2312
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2985
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2986
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2987
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2313
2988
  } else if (isResponseAnnotationAddedChunk(value)) {
2314
2989
  controller.enqueue({
2315
2990
  type: "source",
2316
2991
  sourceType: "url",
2317
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2992
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
2318
2993
  url: value.annotation.url,
2319
2994
  title: value.annotation.title
2320
2995
  });
2996
+ } else if (isErrorChunk(value)) {
2997
+ controller.enqueue({ type: "error", error: value });
2321
2998
  }
2322
2999
  },
2323
3000
  flush(controller) {
@@ -2325,13 +3002,9 @@ var OpenAIResponsesLanguageModel = class {
2325
3002
  type: "finish",
2326
3003
  finishReason,
2327
3004
  usage,
2328
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2329
- providerMetadata: {
2330
- openai: {
2331
- responseId,
2332
- cachedPromptTokens,
2333
- reasoningTokens
2334
- }
3005
+ providerMetadata: {
3006
+ openai: {
3007
+ responseId
2335
3008
  }
2336
3009
  }
2337
3010
  });
@@ -2343,87 +3016,151 @@ var OpenAIResponsesLanguageModel = class {
2343
3016
  };
2344
3017
  }
2345
3018
  };
2346
- var usageSchema = z9.object({
2347
- input_tokens: z9.number(),
2348
- input_tokens_details: z9.object({ cached_tokens: z9.number().nullish() }).nullish(),
2349
- output_tokens: z9.number(),
2350
- output_tokens_details: z9.object({ reasoning_tokens: z9.number().nullish() }).nullish()
3019
+ var usageSchema2 = z15.object({
3020
+ input_tokens: z15.number(),
3021
+ input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
3022
+ output_tokens: z15.number(),
3023
+ output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
3024
+ });
3025
+ var textDeltaChunkSchema = z15.object({
3026
+ type: z15.literal("response.output_text.delta"),
3027
+ item_id: z15.string(),
3028
+ delta: z15.string()
2351
3029
  });
2352
- var textDeltaChunkSchema = z9.object({
2353
- type: z9.literal("response.output_text.delta"),
2354
- delta: z9.string()
3030
+ var errorChunkSchema = z15.object({
3031
+ type: z15.literal("error"),
3032
+ code: z15.string(),
3033
+ message: z15.string(),
3034
+ param: z15.string().nullish(),
3035
+ sequence_number: z15.number()
2355
3036
  });
2356
- var responseFinishedChunkSchema = z9.object({
2357
- type: z9.enum(["response.completed", "response.incomplete"]),
2358
- response: z9.object({
2359
- incomplete_details: z9.object({ reason: z9.string() }).nullish(),
2360
- usage: usageSchema
3037
+ var responseFinishedChunkSchema = z15.object({
3038
+ type: z15.enum(["response.completed", "response.incomplete"]),
3039
+ response: z15.object({
3040
+ incomplete_details: z15.object({ reason: z15.string() }).nullish(),
3041
+ usage: usageSchema2
2361
3042
  })
2362
3043
  });
2363
- var responseCreatedChunkSchema = z9.object({
2364
- type: z9.literal("response.created"),
2365
- response: z9.object({
2366
- id: z9.string(),
2367
- created_at: z9.number(),
2368
- model: z9.string()
3044
+ var responseCreatedChunkSchema = z15.object({
3045
+ type: z15.literal("response.created"),
3046
+ response: z15.object({
3047
+ id: z15.string(),
3048
+ created_at: z15.number(),
3049
+ model: z15.string()
2369
3050
  })
2370
3051
  });
2371
- var responseOutputItemDoneSchema = z9.object({
2372
- type: z9.literal("response.output_item.done"),
2373
- output_index: z9.number(),
2374
- item: z9.discriminatedUnion("type", [
2375
- z9.object({
2376
- type: z9.literal("message")
3052
+ var responseOutputItemAddedSchema = z15.object({
3053
+ type: z15.literal("response.output_item.added"),
3054
+ output_index: z15.number(),
3055
+ item: z15.discriminatedUnion("type", [
3056
+ z15.object({
3057
+ type: z15.literal("message"),
3058
+ id: z15.string()
3059
+ }),
3060
+ z15.object({
3061
+ type: z15.literal("reasoning"),
3062
+ id: z15.string(),
3063
+ encrypted_content: z15.string().nullish()
3064
+ }),
3065
+ z15.object({
3066
+ type: z15.literal("function_call"),
3067
+ id: z15.string(),
3068
+ call_id: z15.string(),
3069
+ name: z15.string(),
3070
+ arguments: z15.string()
3071
+ }),
3072
+ z15.object({
3073
+ type: z15.literal("web_search_call"),
3074
+ id: z15.string(),
3075
+ status: z15.string()
2377
3076
  }),
2378
- z9.object({
2379
- type: z9.literal("function_call"),
2380
- id: z9.string(),
2381
- call_id: z9.string(),
2382
- name: z9.string(),
2383
- arguments: z9.string(),
2384
- status: z9.literal("completed")
3077
+ z15.object({
3078
+ type: z15.literal("computer_call"),
3079
+ id: z15.string(),
3080
+ status: z15.string()
3081
+ }),
3082
+ z15.object({
3083
+ type: z15.literal("file_search_call"),
3084
+ id: z15.string(),
3085
+ status: z15.string()
2385
3086
  })
2386
3087
  ])
2387
3088
  });
2388
- var responseFunctionCallArgumentsDeltaSchema = z9.object({
2389
- type: z9.literal("response.function_call_arguments.delta"),
2390
- item_id: z9.string(),
2391
- output_index: z9.number(),
2392
- delta: z9.string()
2393
- });
2394
- var responseOutputItemAddedSchema = z9.object({
2395
- type: z9.literal("response.output_item.added"),
2396
- output_index: z9.number(),
2397
- item: z9.discriminatedUnion("type", [
2398
- z9.object({
2399
- type: z9.literal("message")
3089
+ var responseOutputItemDoneSchema = z15.object({
3090
+ type: z15.literal("response.output_item.done"),
3091
+ output_index: z15.number(),
3092
+ item: z15.discriminatedUnion("type", [
3093
+ z15.object({
3094
+ type: z15.literal("message"),
3095
+ id: z15.string()
3096
+ }),
3097
+ z15.object({
3098
+ type: z15.literal("reasoning"),
3099
+ id: z15.string(),
3100
+ encrypted_content: z15.string().nullish()
3101
+ }),
3102
+ z15.object({
3103
+ type: z15.literal("function_call"),
3104
+ id: z15.string(),
3105
+ call_id: z15.string(),
3106
+ name: z15.string(),
3107
+ arguments: z15.string(),
3108
+ status: z15.literal("completed")
3109
+ }),
3110
+ z15.object({
3111
+ type: z15.literal("web_search_call"),
3112
+ id: z15.string(),
3113
+ status: z15.literal("completed")
3114
+ }),
3115
+ z15.object({
3116
+ type: z15.literal("computer_call"),
3117
+ id: z15.string(),
3118
+ status: z15.literal("completed")
2400
3119
  }),
2401
- z9.object({
2402
- type: z9.literal("function_call"),
2403
- id: z9.string(),
2404
- call_id: z9.string(),
2405
- name: z9.string(),
2406
- arguments: z9.string()
3120
+ z15.object({
3121
+ type: z15.literal("file_search_call"),
3122
+ id: z15.string(),
3123
+ status: z15.literal("completed")
2407
3124
  })
2408
3125
  ])
2409
3126
  });
2410
- var responseAnnotationAddedSchema = z9.object({
2411
- type: z9.literal("response.output_text.annotation.added"),
2412
- annotation: z9.object({
2413
- type: z9.literal("url_citation"),
2414
- url: z9.string(),
2415
- title: z9.string()
3127
+ var responseFunctionCallArgumentsDeltaSchema = z15.object({
3128
+ type: z15.literal("response.function_call_arguments.delta"),
3129
+ item_id: z15.string(),
3130
+ output_index: z15.number(),
3131
+ delta: z15.string()
3132
+ });
3133
+ var responseAnnotationAddedSchema = z15.object({
3134
+ type: z15.literal("response.output_text.annotation.added"),
3135
+ annotation: z15.object({
3136
+ type: z15.literal("url_citation"),
3137
+ url: z15.string(),
3138
+ title: z15.string()
2416
3139
  })
2417
3140
  });
2418
- var openaiResponsesChunkSchema = z9.union([
3141
+ var responseReasoningSummaryPartAddedSchema = z15.object({
3142
+ type: z15.literal("response.reasoning_summary_part.added"),
3143
+ item_id: z15.string(),
3144
+ summary_index: z15.number()
3145
+ });
3146
+ var responseReasoningSummaryTextDeltaSchema = z15.object({
3147
+ type: z15.literal("response.reasoning_summary_text.delta"),
3148
+ item_id: z15.string(),
3149
+ summary_index: z15.number(),
3150
+ delta: z15.string()
3151
+ });
3152
+ var openaiResponsesChunkSchema = z15.union([
2419
3153
  textDeltaChunkSchema,
2420
3154
  responseFinishedChunkSchema,
2421
3155
  responseCreatedChunkSchema,
3156
+ responseOutputItemAddedSchema,
2422
3157
  responseOutputItemDoneSchema,
2423
3158
  responseFunctionCallArgumentsDeltaSchema,
2424
- responseOutputItemAddedSchema,
2425
3159
  responseAnnotationAddedSchema,
2426
- z9.object({ type: z9.string() }).passthrough()
3160
+ responseReasoningSummaryPartAddedSchema,
3161
+ responseReasoningSummaryTextDeltaSchema,
3162
+ errorChunkSchema,
3163
+ z15.object({ type: z15.string() }).loose()
2427
3164
  // fallback for unknown chunks
2428
3165
  ]);
2429
3166
  function isTextDeltaChunk(chunk) {
@@ -2432,6 +3169,9 @@ function isTextDeltaChunk(chunk) {
2432
3169
  function isResponseOutputItemDoneChunk(chunk) {
2433
3170
  return chunk.type === "response.output_item.done";
2434
3171
  }
3172
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3173
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3174
+ }
2435
3175
  function isResponseFinishedChunk(chunk) {
2436
3176
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2437
3177
  }
@@ -2444,11 +3184,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2444
3184
  function isResponseOutputItemAddedChunk(chunk) {
2445
3185
  return chunk.type === "response.output_item.added";
2446
3186
  }
3187
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3188
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3189
+ }
2447
3190
  function isResponseAnnotationAddedChunk(chunk) {
2448
3191
  return chunk.type === "response.output_text.annotation.added";
2449
3192
  }
3193
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3194
+ return chunk.type === "response.reasoning_summary_part.added";
3195
+ }
3196
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3197
+ return chunk.type === "response.reasoning_summary_text.delta";
3198
+ }
3199
+ function isErrorChunk(chunk) {
3200
+ return chunk.type === "error";
3201
+ }
2450
3202
  function getResponsesModelConfig(modelId) {
2451
- if (modelId.startsWith("o")) {
3203
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2452
3204
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2453
3205
  return {
2454
3206
  isReasoningModel: true,
@@ -2468,15 +3220,24 @@ function getResponsesModelConfig(modelId) {
2468
3220
  requiredAutoTruncation: false
2469
3221
  };
2470
3222
  }
2471
- var openaiResponsesProviderOptionsSchema = z9.object({
2472
- metadata: z9.any().nullish(),
2473
- parallelToolCalls: z9.boolean().nullish(),
2474
- previousResponseId: z9.string().nullish(),
2475
- store: z9.boolean().nullish(),
2476
- user: z9.string().nullish(),
2477
- reasoningEffort: z9.string().nullish(),
2478
- strictSchemas: z9.boolean().nullish(),
2479
- instructions: z9.string().nullish()
3223
+ function supportsFlexProcessing2(modelId) {
3224
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3225
+ }
3226
+ function supportsPriorityProcessing2(modelId) {
3227
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3228
+ }
3229
+ var openaiResponsesProviderOptionsSchema = z15.object({
3230
+ metadata: z15.any().nullish(),
3231
+ parallelToolCalls: z15.boolean().nullish(),
3232
+ previousResponseId: z15.string().nullish(),
3233
+ store: z15.boolean().nullish(),
3234
+ user: z15.string().nullish(),
3235
+ reasoningEffort: z15.string().nullish(),
3236
+ strictJsonSchema: z15.boolean().nullish(),
3237
+ instructions: z15.string().nullish(),
3238
+ reasoningSummary: z15.string().nullish(),
3239
+ serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
3240
+ include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
2480
3241
  });
2481
3242
  export {
2482
3243
  OpenAIChatLanguageModel,
@@ -2486,7 +3247,11 @@ export {
2486
3247
  OpenAIResponsesLanguageModel,
2487
3248
  OpenAISpeechModel,
2488
3249
  OpenAITranscriptionModel,
3250
+ hasDefaultResponseFormat,
2489
3251
  modelMaxImagesPerCall,
3252
+ openAITranscriptionProviderOptions,
3253
+ openaiCompletionProviderOptions,
3254
+ openaiEmbeddingProviderOptions,
2490
3255
  openaiProviderOptions
2491
3256
  };
2492
3257
  //# sourceMappingURL=index.mjs.map