@ai-sdk/openai 2.0.0-canary.9 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,15 +27,19 @@ __export(internal_exports, {
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAISpeechModel: () => OpenAISpeechModel,
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
30
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
31
35
  openaiProviderOptions: () => openaiProviderOptions
32
36
  });
33
37
  module.exports = __toCommonJS(internal_exports);
34
38
 
35
39
  // src/openai-chat-language-model.ts
36
40
  var import_provider3 = require("@ai-sdk/provider");
37
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
38
- var import_zod3 = require("zod");
41
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
42
+ var import_v45 = require("zod/v4");
39
43
 
40
44
  // src/convert-to-openai-chat-messages.ts
41
45
  var import_provider = require("@ai-sdk/provider");
@@ -140,7 +144,7 @@ function convertToOpenAIChatMessages({
140
144
  type: "file",
141
145
  file: {
142
146
  filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
143
- file_data: `data:application/pdf;base64,${part.data}`
147
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
144
148
  }
145
149
  };
146
150
  } else {
@@ -169,7 +173,7 @@ function convertToOpenAIChatMessages({
169
173
  type: "function",
170
174
  function: {
171
175
  name: part.toolName,
172
- arguments: JSON.stringify(part.args)
176
+ arguments: JSON.stringify(part.input)
173
177
  }
174
178
  });
175
179
  break;
@@ -185,10 +189,23 @@ function convertToOpenAIChatMessages({
185
189
  }
186
190
  case "tool": {
187
191
  for (const toolResponse of content) {
192
+ const output = toolResponse.output;
193
+ let contentValue;
194
+ switch (output.type) {
195
+ case "text":
196
+ case "error-text":
197
+ contentValue = output.value;
198
+ break;
199
+ case "content":
200
+ case "json":
201
+ case "error-json":
202
+ contentValue = JSON.stringify(output.value);
203
+ break;
204
+ }
188
205
  messages.push({
189
206
  role: "tool",
190
207
  tool_call_id: toolResponse.toolCallId,
191
- content: JSON.stringify(toolResponse.result)
208
+ content: contentValue
192
209
  });
193
210
  }
194
211
  break;
@@ -202,17 +219,17 @@ function convertToOpenAIChatMessages({
202
219
  return { messages, warnings };
203
220
  }
204
221
 
205
- // src/map-openai-chat-logprobs.ts
206
- function mapOpenAIChatLogProbsOutput(logprobs) {
207
- var _a, _b;
208
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
209
- token,
210
- logprob,
211
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
212
- token: token2,
213
- logprob: logprob2
214
- })) : []
215
- }))) != null ? _b : void 0;
222
+ // src/get-response-metadata.ts
223
+ function getResponseMetadata({
224
+ id,
225
+ model,
226
+ created
227
+ }) {
228
+ return {
229
+ id: id != null ? id : void 0,
230
+ modelId: model != null ? model : void 0,
231
+ timestamp: created != null ? new Date(created * 1e3) : void 0
232
+ };
216
233
  }
217
234
 
218
235
  // src/map-openai-finish-reason.ts
@@ -233,15 +250,15 @@ function mapOpenAIFinishReason(finishReason) {
233
250
  }
234
251
 
235
252
  // src/openai-chat-options.ts
236
- var import_zod = require("zod");
237
- var openaiProviderOptions = import_zod.z.object({
253
+ var import_v4 = require("zod/v4");
254
+ var openaiProviderOptions = import_v4.z.object({
238
255
  /**
239
256
  * Modify the likelihood of specified tokens appearing in the completion.
240
257
  *
241
258
  * Accepts a JSON object that maps tokens (specified by their token ID in
242
259
  * the GPT tokenizer) to an associated bias value from -100 to 100.
243
260
  */
244
- logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
261
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
245
262
  /**
246
263
  * Return the log probabilities of the tokens.
247
264
  *
@@ -251,50 +268,71 @@ var openaiProviderOptions = import_zod.z.object({
251
268
  * Setting to a number will return the log probabilities of the top n
252
269
  * tokens that were generated.
253
270
  */
254
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
271
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
255
272
  /**
256
273
  * Whether to enable parallel function calling during tool use. Default to true.
257
274
  */
258
- parallelToolCalls: import_zod.z.boolean().optional(),
275
+ parallelToolCalls: import_v4.z.boolean().optional(),
259
276
  /**
260
277
  * A unique identifier representing your end-user, which can help OpenAI to
261
278
  * monitor and detect abuse.
262
279
  */
263
- user: import_zod.z.string().optional(),
280
+ user: import_v4.z.string().optional(),
264
281
  /**
265
282
  * Reasoning effort for reasoning models. Defaults to `medium`.
266
283
  */
267
- reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
284
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
268
285
  /**
269
286
  * Maximum number of completion tokens to generate. Useful for reasoning models.
270
287
  */
271
- maxCompletionTokens: import_zod.z.number().optional(),
288
+ maxCompletionTokens: import_v4.z.number().optional(),
272
289
  /**
273
290
  * Whether to enable persistence in responses API.
274
291
  */
275
- store: import_zod.z.boolean().optional(),
292
+ store: import_v4.z.boolean().optional(),
276
293
  /**
277
294
  * Metadata to associate with the request.
278
295
  */
279
- metadata: import_zod.z.record(import_zod.z.string()).optional(),
296
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
280
297
  /**
281
298
  * Parameters for prediction mode.
282
299
  */
283
- prediction: import_zod.z.record(import_zod.z.any()).optional()
300
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
301
+ /**
302
+ * Whether to use structured outputs.
303
+ *
304
+ * @default true
305
+ */
306
+ structuredOutputs: import_v4.z.boolean().optional(),
307
+ /**
308
+ * Service tier for the request.
309
+ * - 'auto': Default service tier
310
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
311
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
312
+ *
313
+ * @default 'auto'
314
+ */
315
+ serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
316
+ /**
317
+ * Whether to use strict JSON schema validation.
318
+ *
319
+ * @default false
320
+ */
321
+ strictJsonSchema: import_v4.z.boolean().optional()
284
322
  });
285
323
 
286
324
  // src/openai-error.ts
287
- var import_zod2 = require("zod");
325
+ var import_v42 = require("zod/v4");
288
326
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
289
- var openaiErrorDataSchema = import_zod2.z.object({
290
- error: import_zod2.z.object({
291
- message: import_zod2.z.string(),
327
+ var openaiErrorDataSchema = import_v42.z.object({
328
+ error: import_v42.z.object({
329
+ message: import_v42.z.string(),
292
330
  // The additional information below is handled loosely to support
293
331
  // OpenAI-compatible providers that have slightly different error
294
332
  // responses:
295
- type: import_zod2.z.string().nullish(),
296
- param: import_zod2.z.any().nullish(),
297
- code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
333
+ type: import_v42.z.string().nullish(),
334
+ param: import_v42.z.any().nullish(),
335
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
298
336
  })
299
337
  });
300
338
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -302,25 +340,101 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
302
340
  errorToMessage: (data) => data.error.message
303
341
  });
304
342
 
305
- // src/get-response-metadata.ts
306
- function getResponseMetadata({
307
- id,
308
- model,
309
- created
310
- }) {
311
- return {
312
- id: id != null ? id : void 0,
313
- modelId: model != null ? model : void 0,
314
- timestamp: created != null ? new Date(created * 1e3) : void 0
315
- };
316
- }
317
-
318
343
  // src/openai-prepare-tools.ts
319
344
  var import_provider2 = require("@ai-sdk/provider");
345
+
346
+ // src/tool/file-search.ts
347
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
348
+ var import_v43 = require("zod/v4");
349
+ var comparisonFilterSchema = import_v43.z.object({
350
+ key: import_v43.z.string(),
351
+ type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
352
+ value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
353
+ });
354
+ var compoundFilterSchema = import_v43.z.object({
355
+ type: import_v43.z.enum(["and", "or"]),
356
+ filters: import_v43.z.array(
357
+ import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
358
+ )
359
+ });
360
+ var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
361
+ var fileSearchArgsSchema = import_v43.z.object({
362
+ /**
363
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
364
+ */
365
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
366
+ /**
367
+ * Maximum number of search results to return. Defaults to 10.
368
+ */
369
+ maxNumResults: import_v43.z.number().optional(),
370
+ /**
371
+ * Ranking options for the search.
372
+ */
373
+ ranking: import_v43.z.object({
374
+ ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
375
+ }).optional(),
376
+ /**
377
+ * A filter to apply based on file attributes.
378
+ */
379
+ filters: filtersSchema.optional()
380
+ });
381
+ var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
382
+ id: "openai.file_search",
383
+ name: "file_search",
384
+ inputSchema: import_v43.z.object({
385
+ query: import_v43.z.string()
386
+ })
387
+ });
388
+
389
+ // src/tool/web-search-preview.ts
390
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
391
+ var import_v44 = require("zod/v4");
392
+ var webSearchPreviewArgsSchema = import_v44.z.object({
393
+ /**
394
+ * Search context size to use for the web search.
395
+ * - high: Most comprehensive context, highest cost, slower response
396
+ * - medium: Balanced context, cost, and latency (default)
397
+ * - low: Least context, lowest cost, fastest response
398
+ */
399
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
400
+ /**
401
+ * User location information to provide geographically relevant search results.
402
+ */
403
+ userLocation: import_v44.z.object({
404
+ /**
405
+ * Type of location (always 'approximate')
406
+ */
407
+ type: import_v44.z.literal("approximate"),
408
+ /**
409
+ * Two-letter ISO country code (e.g., 'US', 'GB')
410
+ */
411
+ country: import_v44.z.string().optional(),
412
+ /**
413
+ * City name (free text, e.g., 'Minneapolis')
414
+ */
415
+ city: import_v44.z.string().optional(),
416
+ /**
417
+ * Region name (free text, e.g., 'Minnesota')
418
+ */
419
+ region: import_v44.z.string().optional(),
420
+ /**
421
+ * IANA timezone (e.g., 'America/Chicago')
422
+ */
423
+ timezone: import_v44.z.string().optional()
424
+ }).optional()
425
+ });
426
+ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
427
+ id: "openai.web_search_preview",
428
+ name: "web_search_preview",
429
+ inputSchema: import_v44.z.object({})
430
+ });
431
+
432
+ // src/openai-prepare-tools.ts
320
433
  function prepareTools({
321
434
  tools,
322
435
  toolChoice,
323
- structuredOutputs
436
+ structuredOutputs,
437
+ strictJsonSchema
324
438
  }) {
325
439
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
326
440
  const toolWarnings = [];
@@ -329,18 +443,48 @@ function prepareTools({
329
443
  }
330
444
  const openaiTools = [];
331
445
  for (const tool of tools) {
332
- if (tool.type === "provider-defined") {
333
- toolWarnings.push({ type: "unsupported-tool", tool });
334
- } else {
335
- openaiTools.push({
336
- type: "function",
337
- function: {
338
- name: tool.name,
339
- description: tool.description,
340
- parameters: tool.parameters,
341
- strict: structuredOutputs ? true : void 0
446
+ switch (tool.type) {
447
+ case "function":
448
+ openaiTools.push({
449
+ type: "function",
450
+ function: {
451
+ name: tool.name,
452
+ description: tool.description,
453
+ parameters: tool.inputSchema,
454
+ strict: structuredOutputs ? strictJsonSchema : void 0
455
+ }
456
+ });
457
+ break;
458
+ case "provider-defined":
459
+ switch (tool.id) {
460
+ case "openai.file_search": {
461
+ const args = fileSearchArgsSchema.parse(tool.args);
462
+ openaiTools.push({
463
+ type: "file_search",
464
+ vector_store_ids: args.vectorStoreIds,
465
+ max_num_results: args.maxNumResults,
466
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
467
+ filters: args.filters
468
+ });
469
+ break;
470
+ }
471
+ case "openai.web_search_preview": {
472
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
473
+ openaiTools.push({
474
+ type: "web_search_preview",
475
+ search_context_size: args.searchContextSize,
476
+ user_location: args.userLocation
477
+ });
478
+ break;
479
+ }
480
+ default:
481
+ toolWarnings.push({ type: "unsupported-tool", tool });
482
+ break;
342
483
  }
343
- });
484
+ break;
485
+ default:
486
+ toolWarnings.push({ type: "unsupported-tool", tool });
487
+ break;
344
488
  }
345
489
  }
346
490
  if (toolChoice == null) {
@@ -374,29 +518,18 @@ function prepareTools({
374
518
 
375
519
  // src/openai-chat-language-model.ts
376
520
  var OpenAIChatLanguageModel = class {
377
- constructor(modelId, settings, config) {
521
+ constructor(modelId, config) {
378
522
  this.specificationVersion = "v2";
523
+ this.supportedUrls = {
524
+ "image/*": [/^https?:\/\/.*$/]
525
+ };
379
526
  this.modelId = modelId;
380
- this.settings = settings;
381
527
  this.config = config;
382
528
  }
383
- get supportsStructuredOutputs() {
384
- var _a;
385
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
386
- }
387
- get defaultObjectGenerationMode() {
388
- if (isAudioModel(this.modelId)) {
389
- return "tool";
390
- }
391
- return this.supportsStructuredOutputs ? "json" : "tool";
392
- }
393
529
  get provider() {
394
530
  return this.config.provider;
395
531
  }
396
- get supportsImageUrls() {
397
- return !this.settings.downloadImages;
398
- }
399
- getArgs({
532
+ async getArgs({
400
533
  prompt,
401
534
  maxOutputTokens,
402
535
  temperature,
@@ -411,20 +544,21 @@ var OpenAIChatLanguageModel = class {
411
544
  toolChoice,
412
545
  providerOptions
413
546
  }) {
414
- var _a, _b;
547
+ var _a, _b, _c, _d;
415
548
  const warnings = [];
416
- const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
549
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
417
550
  provider: "openai",
418
551
  providerOptions,
419
552
  schema: openaiProviderOptions
420
553
  })) != null ? _a : {};
554
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
421
555
  if (topK != null) {
422
556
  warnings.push({
423
557
  type: "unsupported-setting",
424
558
  setting: "topK"
425
559
  });
426
560
  }
427
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
561
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
428
562
  warnings.push({
429
563
  type: "unsupported-setting",
430
564
  setting: "responseFormat",
@@ -438,6 +572,7 @@ var OpenAIChatLanguageModel = class {
438
572
  }
439
573
  );
440
574
  warnings.push(...messageWarnings);
575
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
441
576
  const baseArgs = {
442
577
  // model id:
443
578
  model: this.modelId,
@@ -453,13 +588,12 @@ var OpenAIChatLanguageModel = class {
453
588
  top_p: topP,
454
589
  frequency_penalty: frequencyPenalty,
455
590
  presence_penalty: presencePenalty,
456
- // TODO improve below:
457
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
591
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
458
592
  type: "json_schema",
459
593
  json_schema: {
460
594
  schema: responseFormat.schema,
461
- strict: true,
462
- name: (_b = responseFormat.name) != null ? _b : "response",
595
+ strict: strictJsonSchema,
596
+ name: (_d = responseFormat.name) != null ? _d : "response",
463
597
  description: responseFormat.description
464
598
  }
465
599
  } : { type: "json_object" } : void 0,
@@ -472,6 +606,7 @@ var OpenAIChatLanguageModel = class {
472
606
  metadata: openaiOptions.metadata,
473
607
  prediction: openaiOptions.prediction,
474
608
  reasoning_effort: openaiOptions.reasoningEffort,
609
+ service_tier: openaiOptions.serviceTier,
475
610
  // messages:
476
611
  messages
477
612
  };
@@ -545,6 +680,22 @@ var OpenAIChatLanguageModel = class {
545
680
  });
546
681
  }
547
682
  }
683
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "serviceTier",
687
+ details: "flex processing is only available for o3 and o4-mini models"
688
+ });
689
+ baseArgs.service_tier = void 0;
690
+ }
691
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
692
+ warnings.push({
693
+ type: "unsupported-setting",
694
+ setting: "serviceTier",
695
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
696
+ });
697
+ baseArgs.service_tier = void 0;
698
+ }
548
699
  const {
549
700
  tools: openaiTools,
550
701
  toolChoice: openaiToolChoice,
@@ -552,7 +703,8 @@ var OpenAIChatLanguageModel = class {
552
703
  } = prepareTools({
553
704
  tools,
554
705
  toolChoice,
555
- structuredOutputs: this.supportsStructuredOutputs
706
+ structuredOutputs,
707
+ strictJsonSchema
556
708
  });
557
709
  return {
558
710
  args: {
@@ -564,21 +716,21 @@ var OpenAIChatLanguageModel = class {
564
716
  };
565
717
  }
566
718
  async doGenerate(options) {
567
- var _a, _b, _c, _d, _e, _f, _g, _h;
568
- const { args: body, warnings } = this.getArgs(options);
719
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
720
+ const { args: body, warnings } = await this.getArgs(options);
569
721
  const {
570
722
  responseHeaders,
571
723
  value: response,
572
724
  rawValue: rawResponse
573
- } = await (0, import_provider_utils3.postJsonToApi)({
725
+ } = await (0, import_provider_utils5.postJsonToApi)({
574
726
  url: this.config.url({
575
727
  path: "/chat/completions",
576
728
  modelId: this.modelId
577
729
  }),
578
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
730
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
579
731
  body,
580
732
  failedResponseHandler: openaiFailedResponseHandler,
581
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
733
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
582
734
  openaiChatResponseSchema
583
735
  ),
584
736
  abortSignal: options.abortSignal,
@@ -593,33 +745,32 @@ var OpenAIChatLanguageModel = class {
593
745
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
594
746
  content.push({
595
747
  type: "tool-call",
596
- toolCallType: "function",
597
- toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
748
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
598
749
  toolName: toolCall.function.name,
599
- args: toolCall.function.arguments
750
+ input: toolCall.function.arguments
600
751
  });
601
752
  }
602
753
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
603
754
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
604
755
  const providerMetadata = { openai: {} };
605
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
606
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
607
- }
608
756
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
609
757
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
610
758
  }
611
759
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
612
760
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
613
761
  }
614
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
615
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
762
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
763
+ providerMetadata.openai.logprobs = choice.logprobs.content;
616
764
  }
617
765
  return {
618
766
  content,
619
767
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
620
768
  usage: {
621
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
622
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
769
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
770
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
771
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
772
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
773
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
623
774
  },
624
775
  request: { body },
625
776
  response: {
@@ -628,41 +779,41 @@ var OpenAIChatLanguageModel = class {
628
779
  body: rawResponse
629
780
  },
630
781
  warnings,
631
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
632
782
  providerMetadata
633
783
  };
634
784
  }
635
785
  async doStream(options) {
636
- const { args, warnings } = this.getArgs(options);
786
+ const { args, warnings } = await this.getArgs(options);
637
787
  const body = {
638
788
  ...args,
639
789
  stream: true,
640
- // only include stream_options when in strict compatibility mode:
641
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
790
+ stream_options: {
791
+ include_usage: true
792
+ }
642
793
  };
643
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
794
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
644
795
  url: this.config.url({
645
796
  path: "/chat/completions",
646
797
  modelId: this.modelId
647
798
  }),
648
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
799
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
649
800
  body,
650
801
  failedResponseHandler: openaiFailedResponseHandler,
651
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
802
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
652
803
  openaiChatChunkSchema
653
804
  ),
654
805
  abortSignal: options.abortSignal,
655
806
  fetch: this.config.fetch
656
807
  });
657
- const { messages: rawPrompt, ...rawSettings } = args;
658
808
  const toolCalls = [];
659
809
  let finishReason = "unknown";
660
810
  const usage = {
661
811
  inputTokens: void 0,
662
- outputTokens: void 0
812
+ outputTokens: void 0,
813
+ totalTokens: void 0
663
814
  };
664
- let logprobs;
665
815
  let isFirstChunk = true;
816
+ let isActiveText = false;
666
817
  const providerMetadata = { openai: {} };
667
818
  return {
668
819
  stream: response.pipeThrough(
@@ -671,7 +822,10 @@ var OpenAIChatLanguageModel = class {
671
822
  controller.enqueue({ type: "stream-start", warnings });
672
823
  },
673
824
  transform(chunk, controller) {
674
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
825
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
826
+ if (options.includeRawChunks) {
827
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
828
+ }
675
829
  if (!chunk.success) {
676
830
  finishReason = "error";
677
831
  controller.enqueue({ type: "error", error: chunk.error });
@@ -691,48 +845,40 @@ var OpenAIChatLanguageModel = class {
691
845
  });
692
846
  }
693
847
  if (value.usage != null) {
694
- const {
695
- prompt_tokens,
696
- completion_tokens,
697
- prompt_tokens_details,
698
- completion_tokens_details
699
- } = value.usage;
700
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
701
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
702
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
703
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
704
- }
705
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
706
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
848
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
849
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
850
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
851
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
852
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
853
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
854
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
707
855
  }
708
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
709
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
710
- }
711
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
712
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
856
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
857
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
713
858
  }
714
859
  }
715
860
  const choice = value.choices[0];
716
861
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
717
862
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
718
863
  }
864
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
865
+ providerMetadata.openai.logprobs = choice.logprobs.content;
866
+ }
719
867
  if ((choice == null ? void 0 : choice.delta) == null) {
720
868
  return;
721
869
  }
722
870
  const delta = choice.delta;
723
871
  if (delta.content != null) {
872
+ if (!isActiveText) {
873
+ controller.enqueue({ type: "text-start", id: "0" });
874
+ isActiveText = true;
875
+ }
724
876
  controller.enqueue({
725
- type: "text",
726
- text: delta.content
877
+ type: "text-delta",
878
+ id: "0",
879
+ delta: delta.content
727
880
  });
728
881
  }
729
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
730
- choice == null ? void 0 : choice.logprobs
731
- );
732
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
733
- if (logprobs === void 0) logprobs = [];
734
- logprobs.push(...mappedLogprobs);
735
- }
736
882
  if (delta.tool_calls != null) {
737
883
  for (const toolCallDelta of delta.tool_calls) {
738
884
  const index = toolCallDelta.index;
@@ -749,39 +895,45 @@ var OpenAIChatLanguageModel = class {
749
895
  message: `Expected 'id' to be a string.`
750
896
  });
751
897
  }
752
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
898
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
753
899
  throw new import_provider3.InvalidResponseDataError({
754
900
  data: toolCallDelta,
755
901
  message: `Expected 'function.name' to be a string.`
756
902
  });
757
903
  }
904
+ controller.enqueue({
905
+ type: "tool-input-start",
906
+ id: toolCallDelta.id,
907
+ toolName: toolCallDelta.function.name
908
+ });
758
909
  toolCalls[index] = {
759
910
  id: toolCallDelta.id,
760
911
  type: "function",
761
912
  function: {
762
913
  name: toolCallDelta.function.name,
763
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
914
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
764
915
  },
765
916
  hasFinished: false
766
917
  };
767
918
  const toolCall2 = toolCalls[index];
768
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
919
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
769
920
  if (toolCall2.function.arguments.length > 0) {
770
921
  controller.enqueue({
771
- type: "tool-call-delta",
772
- toolCallType: "function",
773
- toolCallId: toolCall2.id,
774
- toolName: toolCall2.function.name,
775
- argsTextDelta: toolCall2.function.arguments
922
+ type: "tool-input-delta",
923
+ id: toolCall2.id,
924
+ delta: toolCall2.function.arguments
776
925
  });
777
926
  }
778
- if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
927
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
928
+ controller.enqueue({
929
+ type: "tool-input-end",
930
+ id: toolCall2.id
931
+ });
779
932
  controller.enqueue({
780
933
  type: "tool-call",
781
- toolCallType: "function",
782
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
934
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
783
935
  toolName: toolCall2.function.name,
784
- args: toolCall2.function.arguments
936
+ input: toolCall2.function.arguments
785
937
  });
786
938
  toolCall2.hasFinished = true;
787
939
  }
@@ -792,23 +944,24 @@ var OpenAIChatLanguageModel = class {
792
944
  if (toolCall.hasFinished) {
793
945
  continue;
794
946
  }
795
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
796
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
947
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
948
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
797
949
  }
798
950
  controller.enqueue({
799
- type: "tool-call-delta",
800
- toolCallType: "function",
801
- toolCallId: toolCall.id,
802
- toolName: toolCall.function.name,
803
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
951
+ type: "tool-input-delta",
952
+ id: toolCall.id,
953
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
804
954
  });
805
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
955
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
956
+ controller.enqueue({
957
+ type: "tool-input-end",
958
+ id: toolCall.id
959
+ });
806
960
  controller.enqueue({
807
961
  type: "tool-call",
808
- toolCallType: "function",
809
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
962
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
810
963
  toolName: toolCall.function.name,
811
- args: toolCall.function.arguments
964
+ input: toolCall.function.arguments
812
965
  });
813
966
  toolCall.hasFinished = true;
814
967
  }
@@ -816,10 +969,12 @@ var OpenAIChatLanguageModel = class {
816
969
  }
817
970
  },
818
971
  flush(controller) {
972
+ if (isActiveText) {
973
+ controller.enqueue({ type: "text-end", id: "0" });
974
+ }
819
975
  controller.enqueue({
820
976
  type: "finish",
821
977
  finishReason,
822
- logprobs,
823
978
  usage,
824
979
  ...providerMetadata != null ? { providerMetadata } : {}
825
980
  });
@@ -831,96 +986,97 @@ var OpenAIChatLanguageModel = class {
831
986
  };
832
987
  }
833
988
  };
834
- var openaiTokenUsageSchema = import_zod3.z.object({
835
- prompt_tokens: import_zod3.z.number().nullish(),
836
- completion_tokens: import_zod3.z.number().nullish(),
837
- prompt_tokens_details: import_zod3.z.object({
838
- cached_tokens: import_zod3.z.number().nullish()
989
+ var openaiTokenUsageSchema = import_v45.z.object({
990
+ prompt_tokens: import_v45.z.number().nullish(),
991
+ completion_tokens: import_v45.z.number().nullish(),
992
+ total_tokens: import_v45.z.number().nullish(),
993
+ prompt_tokens_details: import_v45.z.object({
994
+ cached_tokens: import_v45.z.number().nullish()
839
995
  }).nullish(),
840
- completion_tokens_details: import_zod3.z.object({
841
- reasoning_tokens: import_zod3.z.number().nullish(),
842
- accepted_prediction_tokens: import_zod3.z.number().nullish(),
843
- rejected_prediction_tokens: import_zod3.z.number().nullish()
996
+ completion_tokens_details: import_v45.z.object({
997
+ reasoning_tokens: import_v45.z.number().nullish(),
998
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
999
+ rejected_prediction_tokens: import_v45.z.number().nullish()
844
1000
  }).nullish()
845
1001
  }).nullish();
846
- var openaiChatResponseSchema = import_zod3.z.object({
847
- id: import_zod3.z.string().nullish(),
848
- created: import_zod3.z.number().nullish(),
849
- model: import_zod3.z.string().nullish(),
850
- choices: import_zod3.z.array(
851
- import_zod3.z.object({
852
- message: import_zod3.z.object({
853
- role: import_zod3.z.literal("assistant").nullish(),
854
- content: import_zod3.z.string().nullish(),
855
- tool_calls: import_zod3.z.array(
856
- import_zod3.z.object({
857
- id: import_zod3.z.string().nullish(),
858
- type: import_zod3.z.literal("function"),
859
- function: import_zod3.z.object({
860
- name: import_zod3.z.string(),
861
- arguments: import_zod3.z.string()
1002
+ var openaiChatResponseSchema = import_v45.z.object({
1003
+ id: import_v45.z.string().nullish(),
1004
+ created: import_v45.z.number().nullish(),
1005
+ model: import_v45.z.string().nullish(),
1006
+ choices: import_v45.z.array(
1007
+ import_v45.z.object({
1008
+ message: import_v45.z.object({
1009
+ role: import_v45.z.literal("assistant").nullish(),
1010
+ content: import_v45.z.string().nullish(),
1011
+ tool_calls: import_v45.z.array(
1012
+ import_v45.z.object({
1013
+ id: import_v45.z.string().nullish(),
1014
+ type: import_v45.z.literal("function"),
1015
+ function: import_v45.z.object({
1016
+ name: import_v45.z.string(),
1017
+ arguments: import_v45.z.string()
862
1018
  })
863
1019
  })
864
1020
  ).nullish()
865
1021
  }),
866
- index: import_zod3.z.number(),
867
- logprobs: import_zod3.z.object({
868
- content: import_zod3.z.array(
869
- import_zod3.z.object({
870
- token: import_zod3.z.string(),
871
- logprob: import_zod3.z.number(),
872
- top_logprobs: import_zod3.z.array(
873
- import_zod3.z.object({
874
- token: import_zod3.z.string(),
875
- logprob: import_zod3.z.number()
1022
+ index: import_v45.z.number(),
1023
+ logprobs: import_v45.z.object({
1024
+ content: import_v45.z.array(
1025
+ import_v45.z.object({
1026
+ token: import_v45.z.string(),
1027
+ logprob: import_v45.z.number(),
1028
+ top_logprobs: import_v45.z.array(
1029
+ import_v45.z.object({
1030
+ token: import_v45.z.string(),
1031
+ logprob: import_v45.z.number()
876
1032
  })
877
1033
  )
878
1034
  })
879
- ).nullable()
1035
+ ).nullish()
880
1036
  }).nullish(),
881
- finish_reason: import_zod3.z.string().nullish()
1037
+ finish_reason: import_v45.z.string().nullish()
882
1038
  })
883
1039
  ),
884
1040
  usage: openaiTokenUsageSchema
885
1041
  });
886
- var openaiChatChunkSchema = import_zod3.z.union([
887
- import_zod3.z.object({
888
- id: import_zod3.z.string().nullish(),
889
- created: import_zod3.z.number().nullish(),
890
- model: import_zod3.z.string().nullish(),
891
- choices: import_zod3.z.array(
892
- import_zod3.z.object({
893
- delta: import_zod3.z.object({
894
- role: import_zod3.z.enum(["assistant"]).nullish(),
895
- content: import_zod3.z.string().nullish(),
896
- tool_calls: import_zod3.z.array(
897
- import_zod3.z.object({
898
- index: import_zod3.z.number(),
899
- id: import_zod3.z.string().nullish(),
900
- type: import_zod3.z.literal("function").optional(),
901
- function: import_zod3.z.object({
902
- name: import_zod3.z.string().nullish(),
903
- arguments: import_zod3.z.string().nullish()
1042
+ var openaiChatChunkSchema = import_v45.z.union([
1043
+ import_v45.z.object({
1044
+ id: import_v45.z.string().nullish(),
1045
+ created: import_v45.z.number().nullish(),
1046
+ model: import_v45.z.string().nullish(),
1047
+ choices: import_v45.z.array(
1048
+ import_v45.z.object({
1049
+ delta: import_v45.z.object({
1050
+ role: import_v45.z.enum(["assistant"]).nullish(),
1051
+ content: import_v45.z.string().nullish(),
1052
+ tool_calls: import_v45.z.array(
1053
+ import_v45.z.object({
1054
+ index: import_v45.z.number(),
1055
+ id: import_v45.z.string().nullish(),
1056
+ type: import_v45.z.literal("function").nullish(),
1057
+ function: import_v45.z.object({
1058
+ name: import_v45.z.string().nullish(),
1059
+ arguments: import_v45.z.string().nullish()
904
1060
  })
905
1061
  })
906
1062
  ).nullish()
907
1063
  }).nullish(),
908
- logprobs: import_zod3.z.object({
909
- content: import_zod3.z.array(
910
- import_zod3.z.object({
911
- token: import_zod3.z.string(),
912
- logprob: import_zod3.z.number(),
913
- top_logprobs: import_zod3.z.array(
914
- import_zod3.z.object({
915
- token: import_zod3.z.string(),
916
- logprob: import_zod3.z.number()
1064
+ logprobs: import_v45.z.object({
1065
+ content: import_v45.z.array(
1066
+ import_v45.z.object({
1067
+ token: import_v45.z.string(),
1068
+ logprob: import_v45.z.number(),
1069
+ top_logprobs: import_v45.z.array(
1070
+ import_v45.z.object({
1071
+ token: import_v45.z.string(),
1072
+ logprob: import_v45.z.number()
917
1073
  })
918
1074
  )
919
1075
  })
920
- ).nullable()
1076
+ ).nullish()
921
1077
  }).nullish(),
922
- finish_reason: import_zod3.z.string().nullable().optional(),
923
- index: import_zod3.z.number()
1078
+ finish_reason: import_v45.z.string().nullish(),
1079
+ index: import_v45.z.number()
924
1080
  })
925
1081
  ),
926
1082
  usage: openaiTokenUsageSchema
@@ -930,8 +1086,11 @@ var openaiChatChunkSchema = import_zod3.z.union([
930
1086
  function isReasoningModel(modelId) {
931
1087
  return modelId.startsWith("o");
932
1088
  }
933
- function isAudioModel(modelId) {
934
- return modelId.startsWith("gpt-4o-audio-preview");
1089
+ function supportsFlexProcessing(modelId) {
1090
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1091
+ }
1092
+ function supportsPriorityProcessing(modelId) {
1093
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
935
1094
  }
936
1095
  function getSystemMessageMode(modelId) {
937
1096
  var _a, _b;
@@ -953,29 +1112,37 @@ var reasoningModels = {
953
1112
  "o1-preview-2024-09-12": {
954
1113
  systemMessageMode: "remove"
955
1114
  },
1115
+ o3: {
1116
+ systemMessageMode: "developer"
1117
+ },
1118
+ "o3-2025-04-16": {
1119
+ systemMessageMode: "developer"
1120
+ },
956
1121
  "o3-mini": {
957
1122
  systemMessageMode: "developer"
958
1123
  },
959
1124
  "o3-mini-2025-01-31": {
960
1125
  systemMessageMode: "developer"
1126
+ },
1127
+ "o4-mini": {
1128
+ systemMessageMode: "developer"
1129
+ },
1130
+ "o4-mini-2025-04-16": {
1131
+ systemMessageMode: "developer"
961
1132
  }
962
1133
  };
963
1134
 
964
1135
  // src/openai-completion-language-model.ts
965
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
966
- var import_zod4 = require("zod");
1136
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1137
+ var import_v47 = require("zod/v4");
967
1138
 
968
1139
  // src/convert-to-openai-completion-prompt.ts
969
1140
  var import_provider4 = require("@ai-sdk/provider");
970
1141
  function convertToOpenAICompletionPrompt({
971
1142
  prompt,
972
- inputFormat,
973
1143
  user = "user",
974
1144
  assistant = "assistant"
975
1145
  }) {
976
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
977
- return { prompt: prompt[0].content[0].text };
978
- }
979
1146
  let text = "";
980
1147
  if (prompt[0].role === "system") {
981
1148
  text += `${prompt[0].content}
@@ -1044,34 +1211,66 @@ ${user}:`]
1044
1211
  };
1045
1212
  }
1046
1213
 
1047
- // src/map-openai-completion-logprobs.ts
1048
- function mapOpenAICompletionLogProbs(logprobs) {
1049
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1050
- token,
1051
- logprob: logprobs.token_logprobs[index],
1052
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1053
- ([token2, logprob]) => ({
1054
- token: token2,
1055
- logprob
1056
- })
1057
- ) : []
1058
- }));
1059
- }
1214
+ // src/openai-completion-options.ts
1215
+ var import_v46 = require("zod/v4");
1216
+ var openaiCompletionProviderOptions = import_v46.z.object({
1217
+ /**
1218
+ Echo back the prompt in addition to the completion.
1219
+ */
1220
+ echo: import_v46.z.boolean().optional(),
1221
+ /**
1222
+ Modify the likelihood of specified tokens appearing in the completion.
1223
+
1224
+ Accepts a JSON object that maps tokens (specified by their token ID in
1225
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1226
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1227
+ the bias is added to the logits generated by the model prior to sampling.
1228
+ The exact effect will vary per model, but values between -1 and 1 should
1229
+ decrease or increase likelihood of selection; values like -100 or 100
1230
+ should result in a ban or exclusive selection of the relevant token.
1231
+
1232
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1233
+ token from being generated.
1234
+ */
1235
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1236
+ /**
1237
+ The suffix that comes after a completion of inserted text.
1238
+ */
1239
+ suffix: import_v46.z.string().optional(),
1240
+ /**
1241
+ A unique identifier representing your end-user, which can help OpenAI to
1242
+ monitor and detect abuse. Learn more.
1243
+ */
1244
+ user: import_v46.z.string().optional(),
1245
+ /**
1246
+ Return the log probabilities of the tokens. Including logprobs will increase
1247
+ the response size and can slow down response times. However, it can
1248
+ be useful to better understand how the model is behaving.
1249
+ Setting to true will return the log probabilities of the tokens that
1250
+ were generated.
1251
+ Setting to a number will return the log probabilities of the top n
1252
+ tokens that were generated.
1253
+ */
1254
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1255
+ });
1060
1256
 
1061
1257
  // src/openai-completion-language-model.ts
1062
1258
  var OpenAICompletionLanguageModel = class {
1063
- constructor(modelId, settings, config) {
1259
+ constructor(modelId, config) {
1064
1260
  this.specificationVersion = "v2";
1065
- this.defaultObjectGenerationMode = void 0;
1261
+ this.supportedUrls = {
1262
+ // No URLs are supported for completion models.
1263
+ };
1066
1264
  this.modelId = modelId;
1067
- this.settings = settings;
1068
1265
  this.config = config;
1069
1266
  }
1267
+ get providerOptionsName() {
1268
+ return this.config.provider.split(".")[0].trim();
1269
+ }
1070
1270
  get provider() {
1071
1271
  return this.config.provider;
1072
1272
  }
1073
- getArgs({
1074
- inputFormat,
1273
+ async getArgs({
1075
1274
  prompt,
1076
1275
  maxOutputTokens,
1077
1276
  temperature,
@@ -1083,9 +1282,22 @@ var OpenAICompletionLanguageModel = class {
1083
1282
  responseFormat,
1084
1283
  tools,
1085
1284
  toolChoice,
1086
- seed
1285
+ seed,
1286
+ providerOptions
1087
1287
  }) {
1088
1288
  const warnings = [];
1289
+ const openaiOptions = {
1290
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1291
+ provider: "openai",
1292
+ providerOptions,
1293
+ schema: openaiCompletionProviderOptions
1294
+ }),
1295
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1296
+ provider: this.providerOptionsName,
1297
+ providerOptions,
1298
+ schema: openaiCompletionProviderOptions
1299
+ })
1300
+ };
1089
1301
  if (topK != null) {
1090
1302
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1091
1303
  }
@@ -1102,18 +1314,18 @@ var OpenAICompletionLanguageModel = class {
1102
1314
  details: "JSON response format is not supported."
1103
1315
  });
1104
1316
  }
1105
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1317
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1106
1318
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1107
1319
  return {
1108
1320
  args: {
1109
1321
  // model id:
1110
1322
  model: this.modelId,
1111
1323
  // model specific settings:
1112
- echo: this.settings.echo,
1113
- logit_bias: this.settings.logitBias,
1114
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1115
- suffix: this.settings.suffix,
1116
- user: this.settings.user,
1324
+ echo: openaiOptions.echo,
1325
+ logit_bias: openaiOptions.logitBias,
1326
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1327
+ suffix: openaiOptions.suffix,
1328
+ user: openaiOptions.user,
1117
1329
  // standardized settings:
1118
1330
  max_tokens: maxOutputTokens,
1119
1331
  temperature,
@@ -1130,71 +1342,79 @@ var OpenAICompletionLanguageModel = class {
1130
1342
  };
1131
1343
  }
1132
1344
  async doGenerate(options) {
1133
- const { args, warnings } = this.getArgs(options);
1345
+ var _a, _b, _c;
1346
+ const { args, warnings } = await this.getArgs(options);
1134
1347
  const {
1135
1348
  responseHeaders,
1136
1349
  value: response,
1137
1350
  rawValue: rawResponse
1138
- } = await (0, import_provider_utils4.postJsonToApi)({
1351
+ } = await (0, import_provider_utils6.postJsonToApi)({
1139
1352
  url: this.config.url({
1140
1353
  path: "/completions",
1141
1354
  modelId: this.modelId
1142
1355
  }),
1143
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1356
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1144
1357
  body: args,
1145
1358
  failedResponseHandler: openaiFailedResponseHandler,
1146
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1359
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1147
1360
  openaiCompletionResponseSchema
1148
1361
  ),
1149
1362
  abortSignal: options.abortSignal,
1150
1363
  fetch: this.config.fetch
1151
1364
  });
1152
1365
  const choice = response.choices[0];
1366
+ const providerMetadata = { openai: {} };
1367
+ if (choice.logprobs != null) {
1368
+ providerMetadata.openai.logprobs = choice.logprobs;
1369
+ }
1153
1370
  return {
1154
1371
  content: [{ type: "text", text: choice.text }],
1155
1372
  usage: {
1156
- inputTokens: response.usage.prompt_tokens,
1157
- outputTokens: response.usage.completion_tokens
1373
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1374
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1375
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1158
1376
  },
1159
1377
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1160
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1161
1378
  request: { body: args },
1162
1379
  response: {
1163
1380
  ...getResponseMetadata(response),
1164
1381
  headers: responseHeaders,
1165
1382
  body: rawResponse
1166
1383
  },
1384
+ providerMetadata,
1167
1385
  warnings
1168
1386
  };
1169
1387
  }
1170
1388
  async doStream(options) {
1171
- const { args, warnings } = this.getArgs(options);
1389
+ const { args, warnings } = await this.getArgs(options);
1172
1390
  const body = {
1173
1391
  ...args,
1174
1392
  stream: true,
1175
- // only include stream_options when in strict compatibility mode:
1176
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1393
+ stream_options: {
1394
+ include_usage: true
1395
+ }
1177
1396
  };
1178
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1397
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1179
1398
  url: this.config.url({
1180
1399
  path: "/completions",
1181
1400
  modelId: this.modelId
1182
1401
  }),
1183
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1402
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1184
1403
  body,
1185
1404
  failedResponseHandler: openaiFailedResponseHandler,
1186
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1405
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
1187
1406
  openaiCompletionChunkSchema
1188
1407
  ),
1189
1408
  abortSignal: options.abortSignal,
1190
1409
  fetch: this.config.fetch
1191
1410
  });
1192
1411
  let finishReason = "unknown";
1412
+ const providerMetadata = { openai: {} };
1193
1413
  const usage = {
1194
1414
  inputTokens: void 0,
1195
- outputTokens: void 0
1415
+ outputTokens: void 0,
1416
+ totalTokens: void 0
1196
1417
  };
1197
- let logprobs;
1198
1418
  let isFirstChunk = true;
1199
1419
  return {
1200
1420
  stream: response.pipeThrough(
@@ -1203,6 +1423,9 @@ var OpenAICompletionLanguageModel = class {
1203
1423
  controller.enqueue({ type: "stream-start", warnings });
1204
1424
  },
1205
1425
  transform(chunk, controller) {
1426
+ if (options.includeRawChunks) {
1427
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1428
+ }
1206
1429
  if (!chunk.success) {
1207
1430
  finishReason = "error";
1208
1431
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1220,34 +1443,36 @@ var OpenAICompletionLanguageModel = class {
1220
1443
  type: "response-metadata",
1221
1444
  ...getResponseMetadata(value)
1222
1445
  });
1446
+ controller.enqueue({ type: "text-start", id: "0" });
1223
1447
  }
1224
1448
  if (value.usage != null) {
1225
1449
  usage.inputTokens = value.usage.prompt_tokens;
1226
1450
  usage.outputTokens = value.usage.completion_tokens;
1451
+ usage.totalTokens = value.usage.total_tokens;
1227
1452
  }
1228
1453
  const choice = value.choices[0];
1229
1454
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1230
1455
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1231
1456
  }
1232
- if ((choice == null ? void 0 : choice.text) != null) {
1457
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1458
+ providerMetadata.openai.logprobs = choice.logprobs;
1459
+ }
1460
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1233
1461
  controller.enqueue({
1234
- type: "text",
1235
- text: choice.text
1462
+ type: "text-delta",
1463
+ id: "0",
1464
+ delta: choice.text
1236
1465
  });
1237
1466
  }
1238
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1239
- choice == null ? void 0 : choice.logprobs
1240
- );
1241
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1242
- if (logprobs === void 0) logprobs = [];
1243
- logprobs.push(...mappedLogprobs);
1244
- }
1245
1467
  },
1246
1468
  flush(controller) {
1469
+ if (!isFirstChunk) {
1470
+ controller.enqueue({ type: "text-end", id: "0" });
1471
+ }
1247
1472
  controller.enqueue({
1248
1473
  type: "finish",
1249
1474
  finishReason,
1250
- logprobs,
1475
+ providerMetadata,
1251
1476
  usage
1252
1477
  });
1253
1478
  }
@@ -1258,78 +1483,89 @@ var OpenAICompletionLanguageModel = class {
1258
1483
  };
1259
1484
  }
1260
1485
  };
1261
- var openaiCompletionResponseSchema = import_zod4.z.object({
1262
- id: import_zod4.z.string().nullish(),
1263
- created: import_zod4.z.number().nullish(),
1264
- model: import_zod4.z.string().nullish(),
1265
- choices: import_zod4.z.array(
1266
- import_zod4.z.object({
1267
- text: import_zod4.z.string(),
1268
- finish_reason: import_zod4.z.string(),
1269
- logprobs: import_zod4.z.object({
1270
- tokens: import_zod4.z.array(import_zod4.z.string()),
1271
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1272
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1486
+ var usageSchema = import_v47.z.object({
1487
+ prompt_tokens: import_v47.z.number(),
1488
+ completion_tokens: import_v47.z.number(),
1489
+ total_tokens: import_v47.z.number()
1490
+ });
1491
+ var openaiCompletionResponseSchema = import_v47.z.object({
1492
+ id: import_v47.z.string().nullish(),
1493
+ created: import_v47.z.number().nullish(),
1494
+ model: import_v47.z.string().nullish(),
1495
+ choices: import_v47.z.array(
1496
+ import_v47.z.object({
1497
+ text: import_v47.z.string(),
1498
+ finish_reason: import_v47.z.string(),
1499
+ logprobs: import_v47.z.object({
1500
+ tokens: import_v47.z.array(import_v47.z.string()),
1501
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1502
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1273
1503
  }).nullish()
1274
1504
  })
1275
1505
  ),
1276
- usage: import_zod4.z.object({
1277
- prompt_tokens: import_zod4.z.number(),
1278
- completion_tokens: import_zod4.z.number()
1279
- })
1506
+ usage: usageSchema.nullish()
1280
1507
  });
1281
- var openaiCompletionChunkSchema = import_zod4.z.union([
1282
- import_zod4.z.object({
1283
- id: import_zod4.z.string().nullish(),
1284
- created: import_zod4.z.number().nullish(),
1285
- model: import_zod4.z.string().nullish(),
1286
- choices: import_zod4.z.array(
1287
- import_zod4.z.object({
1288
- text: import_zod4.z.string(),
1289
- finish_reason: import_zod4.z.string().nullish(),
1290
- index: import_zod4.z.number(),
1291
- logprobs: import_zod4.z.object({
1292
- tokens: import_zod4.z.array(import_zod4.z.string()),
1293
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1294
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1508
+ var openaiCompletionChunkSchema = import_v47.z.union([
1509
+ import_v47.z.object({
1510
+ id: import_v47.z.string().nullish(),
1511
+ created: import_v47.z.number().nullish(),
1512
+ model: import_v47.z.string().nullish(),
1513
+ choices: import_v47.z.array(
1514
+ import_v47.z.object({
1515
+ text: import_v47.z.string(),
1516
+ finish_reason: import_v47.z.string().nullish(),
1517
+ index: import_v47.z.number(),
1518
+ logprobs: import_v47.z.object({
1519
+ tokens: import_v47.z.array(import_v47.z.string()),
1520
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1521
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1295
1522
  }).nullish()
1296
1523
  })
1297
1524
  ),
1298
- usage: import_zod4.z.object({
1299
- prompt_tokens: import_zod4.z.number(),
1300
- completion_tokens: import_zod4.z.number()
1301
- }).nullish()
1525
+ usage: usageSchema.nullish()
1302
1526
  }),
1303
1527
  openaiErrorDataSchema
1304
1528
  ]);
1305
1529
 
1306
1530
  // src/openai-embedding-model.ts
1307
1531
  var import_provider5 = require("@ai-sdk/provider");
1308
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1309
- var import_zod5 = require("zod");
1532
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1533
+ var import_v49 = require("zod/v4");
1534
+
1535
+ // src/openai-embedding-options.ts
1536
+ var import_v48 = require("zod/v4");
1537
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1538
+ /**
1539
+ The number of dimensions the resulting output embeddings should have.
1540
+ Only supported in text-embedding-3 and later models.
1541
+ */
1542
+ dimensions: import_v48.z.number().optional(),
1543
+ /**
1544
+ A unique identifier representing your end-user, which can help OpenAI to
1545
+ monitor and detect abuse. Learn more.
1546
+ */
1547
+ user: import_v48.z.string().optional()
1548
+ });
1549
+
1550
+ // src/openai-embedding-model.ts
1310
1551
  var OpenAIEmbeddingModel = class {
1311
- constructor(modelId, settings, config) {
1552
+ constructor(modelId, config) {
1312
1553
  this.specificationVersion = "v2";
1554
+ this.maxEmbeddingsPerCall = 2048;
1555
+ this.supportsParallelCalls = true;
1313
1556
  this.modelId = modelId;
1314
- this.settings = settings;
1315
1557
  this.config = config;
1316
1558
  }
1317
1559
  get provider() {
1318
1560
  return this.config.provider;
1319
1561
  }
1320
- get maxEmbeddingsPerCall() {
1321
- var _a;
1322
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1323
- }
1324
- get supportsParallelCalls() {
1325
- var _a;
1326
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1327
- }
1328
1562
  async doEmbed({
1329
1563
  values,
1330
1564
  headers,
1331
- abortSignal
1565
+ abortSignal,
1566
+ providerOptions
1332
1567
  }) {
1568
+ var _a;
1333
1569
  if (values.length > this.maxEmbeddingsPerCall) {
1334
1570
  throw new import_provider5.TooManyEmbeddingValuesForCallError({
1335
1571
  provider: this.provider,
@@ -1338,25 +1574,30 @@ var OpenAIEmbeddingModel = class {
1338
1574
  values
1339
1575
  });
1340
1576
  }
1577
+ const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
1578
+ provider: "openai",
1579
+ providerOptions,
1580
+ schema: openaiEmbeddingProviderOptions
1581
+ })) != null ? _a : {};
1341
1582
  const {
1342
1583
  responseHeaders,
1343
1584
  value: response,
1344
1585
  rawValue
1345
- } = await (0, import_provider_utils5.postJsonToApi)({
1586
+ } = await (0, import_provider_utils7.postJsonToApi)({
1346
1587
  url: this.config.url({
1347
1588
  path: "/embeddings",
1348
1589
  modelId: this.modelId
1349
1590
  }),
1350
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1591
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
1351
1592
  body: {
1352
1593
  model: this.modelId,
1353
1594
  input: values,
1354
1595
  encoding_format: "float",
1355
- dimensions: this.settings.dimensions,
1356
- user: this.settings.user
1596
+ dimensions: openaiOptions.dimensions,
1597
+ user: openaiOptions.user
1357
1598
  },
1358
1599
  failedResponseHandler: openaiFailedResponseHandler,
1359
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1600
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1360
1601
  openaiTextEmbeddingResponseSchema
1361
1602
  ),
1362
1603
  abortSignal,
@@ -1369,32 +1610,33 @@ var OpenAIEmbeddingModel = class {
1369
1610
  };
1370
1611
  }
1371
1612
  };
1372
- var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1373
- data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1374
- usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1613
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1614
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1615
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1375
1616
  });
1376
1617
 
1377
1618
  // src/openai-image-model.ts
1378
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1379
- var import_zod6 = require("zod");
1619
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1620
+ var import_v410 = require("zod/v4");
1380
1621
 
1381
1622
  // src/openai-image-settings.ts
1382
1623
  var modelMaxImagesPerCall = {
1383
1624
  "dall-e-3": 1,
1384
- "dall-e-2": 10
1625
+ "dall-e-2": 10,
1626
+ "gpt-image-1": 10
1385
1627
  };
1628
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1386
1629
 
1387
1630
  // src/openai-image-model.ts
1388
1631
  var OpenAIImageModel = class {
1389
- constructor(modelId, settings, config) {
1632
+ constructor(modelId, config) {
1390
1633
  this.modelId = modelId;
1391
- this.settings = settings;
1392
1634
  this.config = config;
1393
- this.specificationVersion = "v1";
1635
+ this.specificationVersion = "v2";
1394
1636
  }
1395
1637
  get maxImagesPerCall() {
1396
- var _a, _b;
1397
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1638
+ var _a;
1639
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1398
1640
  }
1399
1641
  get provider() {
1400
1642
  return this.config.provider;
@@ -1422,22 +1664,22 @@ var OpenAIImageModel = class {
1422
1664
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1423
1665
  }
1424
1666
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1425
- const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1667
+ const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
1426
1668
  url: this.config.url({
1427
1669
  path: "/images/generations",
1428
1670
  modelId: this.modelId
1429
1671
  }),
1430
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1672
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
1431
1673
  body: {
1432
1674
  model: this.modelId,
1433
1675
  prompt,
1434
1676
  n,
1435
1677
  size,
1436
1678
  ...(_d = providerOptions.openai) != null ? _d : {},
1437
- response_format: "b64_json"
1679
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1438
1680
  },
1439
1681
  failedResponseHandler: openaiFailedResponseHandler,
1440
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1682
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1441
1683
  openaiImageResponseSchema
1442
1684
  ),
1443
1685
  abortSignal,
@@ -1450,24 +1692,57 @@ var OpenAIImageModel = class {
1450
1692
  timestamp: currentDate,
1451
1693
  modelId: this.modelId,
1452
1694
  headers: responseHeaders
1695
+ },
1696
+ providerMetadata: {
1697
+ openai: {
1698
+ images: response.data.map(
1699
+ (item) => item.revised_prompt ? {
1700
+ revisedPrompt: item.revised_prompt
1701
+ } : null
1702
+ )
1703
+ }
1453
1704
  }
1454
1705
  };
1455
1706
  }
1456
1707
  };
1457
- var openaiImageResponseSchema = import_zod6.z.object({
1458
- data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1708
+ var openaiImageResponseSchema = import_v410.z.object({
1709
+ data: import_v410.z.array(
1710
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1711
+ )
1459
1712
  });
1460
1713
 
1461
1714
  // src/openai-transcription-model.ts
1462
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1463
- var import_zod7 = require("zod");
1464
- var openAIProviderOptionsSchema = import_zod7.z.object({
1465
- include: import_zod7.z.array(import_zod7.z.string()).nullish(),
1466
- language: import_zod7.z.string().nullish(),
1467
- prompt: import_zod7.z.string().nullish(),
1468
- temperature: import_zod7.z.number().min(0).max(1).nullish().default(0),
1469
- timestampGranularities: import_zod7.z.array(import_zod7.z.enum(["word", "segment"])).nullish().default(["segment"])
1715
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1716
+ var import_v412 = require("zod/v4");
1717
+
1718
+ // src/openai-transcription-options.ts
1719
+ var import_v411 = require("zod/v4");
1720
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1721
+ /**
1722
+ * Additional information to include in the transcription response.
1723
+ */
1724
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1725
+ /**
1726
+ * The language of the input audio in ISO-639-1 format.
1727
+ */
1728
+ language: import_v411.z.string().optional(),
1729
+ /**
1730
+ * An optional text to guide the model's style or continue a previous audio segment.
1731
+ */
1732
+ prompt: import_v411.z.string().optional(),
1733
+ /**
1734
+ * The sampling temperature, between 0 and 1.
1735
+ * @default 0
1736
+ */
1737
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1738
+ /**
1739
+ * The timestamp granularities to populate for this transcription.
1740
+ * @default ['segment']
1741
+ */
1742
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1470
1743
  });
1744
+
1745
+ // src/openai-transcription-model.ts
1471
1746
  var languageMap = {
1472
1747
  afrikaans: "af",
1473
1748
  arabic: "ar",
@@ -1531,38 +1806,36 @@ var OpenAITranscriptionModel = class {
1531
1806
  constructor(modelId, config) {
1532
1807
  this.modelId = modelId;
1533
1808
  this.config = config;
1534
- this.specificationVersion = "v1";
1809
+ this.specificationVersion = "v2";
1535
1810
  }
1536
1811
  get provider() {
1537
1812
  return this.config.provider;
1538
1813
  }
1539
- getArgs({
1814
+ async getArgs({
1540
1815
  audio,
1541
1816
  mediaType,
1542
1817
  providerOptions
1543
1818
  }) {
1544
- var _a, _b, _c, _d, _e;
1545
1819
  const warnings = [];
1546
- const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1820
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
1547
1821
  provider: "openai",
1548
1822
  providerOptions,
1549
- schema: openAIProviderOptionsSchema
1823
+ schema: openAITranscriptionProviderOptions
1550
1824
  });
1551
1825
  const formData = new FormData();
1552
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1826
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
1553
1827
  formData.append("model", this.modelId);
1554
1828
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1555
1829
  if (openAIOptions) {
1556
1830
  const transcriptionModelOptions = {
1557
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1558
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1559
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1560
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1561
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1831
+ include: openAIOptions.include,
1832
+ language: openAIOptions.language,
1833
+ prompt: openAIOptions.prompt,
1834
+ temperature: openAIOptions.temperature,
1835
+ timestamp_granularities: openAIOptions.timestampGranularities
1562
1836
  };
1563
- for (const key in transcriptionModelOptions) {
1564
- const value = transcriptionModelOptions[key];
1565
- if (value !== void 0) {
1837
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1838
+ if (value != null) {
1566
1839
  formData.append(key, String(value));
1567
1840
  }
1568
1841
  }
@@ -1575,20 +1848,20 @@ var OpenAITranscriptionModel = class {
1575
1848
  async doGenerate(options) {
1576
1849
  var _a, _b, _c, _d, _e, _f;
1577
1850
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1578
- const { formData, warnings } = this.getArgs(options);
1851
+ const { formData, warnings } = await this.getArgs(options);
1579
1852
  const {
1580
1853
  value: response,
1581
1854
  responseHeaders,
1582
1855
  rawValue: rawResponse
1583
- } = await (0, import_provider_utils7.postFormDataToApi)({
1856
+ } = await (0, import_provider_utils9.postFormDataToApi)({
1584
1857
  url: this.config.url({
1585
1858
  path: "/audio/transcriptions",
1586
1859
  modelId: this.modelId
1587
1860
  }),
1588
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1861
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1589
1862
  formData,
1590
1863
  failedResponseHandler: openaiFailedResponseHandler,
1591
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1864
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
1592
1865
  openaiTranscriptionResponseSchema
1593
1866
  ),
1594
1867
  abortSignal: options.abortSignal,
@@ -1614,45 +1887,46 @@ var OpenAITranscriptionModel = class {
1614
1887
  };
1615
1888
  }
1616
1889
  };
1617
- var openaiTranscriptionResponseSchema = import_zod7.z.object({
1618
- text: import_zod7.z.string(),
1619
- language: import_zod7.z.string().nullish(),
1620
- duration: import_zod7.z.number().nullish(),
1621
- words: import_zod7.z.array(
1622
- import_zod7.z.object({
1623
- word: import_zod7.z.string(),
1624
- start: import_zod7.z.number(),
1625
- end: import_zod7.z.number()
1890
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1891
+ text: import_v412.z.string(),
1892
+ language: import_v412.z.string().nullish(),
1893
+ duration: import_v412.z.number().nullish(),
1894
+ words: import_v412.z.array(
1895
+ import_v412.z.object({
1896
+ word: import_v412.z.string(),
1897
+ start: import_v412.z.number(),
1898
+ end: import_v412.z.number()
1626
1899
  })
1627
1900
  ).nullish()
1628
1901
  });
1629
1902
 
1630
1903
  // src/openai-speech-model.ts
1631
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
1632
- var import_zod8 = require("zod");
1633
- var OpenAIProviderOptionsSchema = import_zod8.z.object({
1634
- instructions: import_zod8.z.string().nullish(),
1635
- speed: import_zod8.z.number().min(0.25).max(4).default(1).nullish()
1904
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1905
+ var import_v413 = require("zod/v4");
1906
+ var OpenAIProviderOptionsSchema = import_v413.z.object({
1907
+ instructions: import_v413.z.string().nullish(),
1908
+ speed: import_v413.z.number().min(0.25).max(4).default(1).nullish()
1636
1909
  });
1637
1910
  var OpenAISpeechModel = class {
1638
1911
  constructor(modelId, config) {
1639
1912
  this.modelId = modelId;
1640
1913
  this.config = config;
1641
- this.specificationVersion = "v1";
1914
+ this.specificationVersion = "v2";
1642
1915
  }
1643
1916
  get provider() {
1644
1917
  return this.config.provider;
1645
1918
  }
1646
- getArgs({
1919
+ async getArgs({
1647
1920
  text,
1648
1921
  voice = "alloy",
1649
1922
  outputFormat = "mp3",
1650
1923
  speed,
1651
1924
  instructions,
1925
+ language,
1652
1926
  providerOptions
1653
1927
  }) {
1654
1928
  const warnings = [];
1655
- const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1929
+ const openAIOptions = await (0, import_provider_utils10.parseProviderOptions)({
1656
1930
  provider: "openai",
1657
1931
  providerOptions,
1658
1932
  schema: OpenAIProviderOptionsSchema
@@ -1685,6 +1959,13 @@ var OpenAISpeechModel = class {
1685
1959
  }
1686
1960
  }
1687
1961
  }
1962
+ if (language) {
1963
+ warnings.push({
1964
+ type: "unsupported-setting",
1965
+ setting: "language",
1966
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1967
+ });
1968
+ }
1688
1969
  return {
1689
1970
  requestBody,
1690
1971
  warnings
@@ -1693,20 +1974,20 @@ var OpenAISpeechModel = class {
1693
1974
  async doGenerate(options) {
1694
1975
  var _a, _b, _c;
1695
1976
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1696
- const { requestBody, warnings } = this.getArgs(options);
1977
+ const { requestBody, warnings } = await this.getArgs(options);
1697
1978
  const {
1698
1979
  value: audio,
1699
1980
  responseHeaders,
1700
1981
  rawValue: rawResponse
1701
- } = await (0, import_provider_utils8.postJsonToApi)({
1982
+ } = await (0, import_provider_utils10.postJsonToApi)({
1702
1983
  url: this.config.url({
1703
1984
  path: "/audio/speech",
1704
1985
  modelId: this.modelId
1705
1986
  }),
1706
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1987
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
1707
1988
  body: requestBody,
1708
1989
  failedResponseHandler: openaiFailedResponseHandler,
1709
- successfulResponseHandler: (0, import_provider_utils8.createBinaryResponseHandler)(),
1990
+ successfulResponseHandler: (0, import_provider_utils10.createBinaryResponseHandler)(),
1710
1991
  abortSignal: options.abortSignal,
1711
1992
  fetch: this.config.fetch
1712
1993
  });
@@ -1727,15 +2008,19 @@ var OpenAISpeechModel = class {
1727
2008
  };
1728
2009
 
1729
2010
  // src/responses/openai-responses-language-model.ts
1730
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
1731
- var import_zod9 = require("zod");
2011
+ var import_provider8 = require("@ai-sdk/provider");
2012
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
2013
+ var import_v415 = require("zod/v4");
1732
2014
 
1733
2015
  // src/responses/convert-to-openai-responses-messages.ts
1734
2016
  var import_provider6 = require("@ai-sdk/provider");
1735
- function convertToOpenAIResponsesMessages({
2017
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
2018
+ var import_v414 = require("zod/v4");
2019
+ async function convertToOpenAIResponsesMessages({
1736
2020
  prompt,
1737
2021
  systemMessageMode
1738
2022
  }) {
2023
+ var _a, _b, _c, _d, _e, _f;
1739
2024
  const messages = [];
1740
2025
  const warnings = [];
1741
2026
  for (const { role, content } of prompt) {
@@ -1770,7 +2055,7 @@ function convertToOpenAIResponsesMessages({
1770
2055
  messages.push({
1771
2056
  role: "user",
1772
2057
  content: content.map((part, index) => {
1773
- var _a, _b, _c;
2058
+ var _a2, _b2, _c2;
1774
2059
  switch (part.type) {
1775
2060
  case "text": {
1776
2061
  return { type: "input_text", text: part.text };
@@ -1782,7 +2067,7 @@ function convertToOpenAIResponsesMessages({
1782
2067
  type: "input_image",
1783
2068
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1784
2069
  // OpenAI specific extension: image detail
1785
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
2070
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1786
2071
  };
1787
2072
  } else if (part.mediaType === "application/pdf") {
1788
2073
  if (part.data instanceof URL) {
@@ -1792,7 +2077,7 @@ function convertToOpenAIResponsesMessages({
1792
2077
  }
1793
2078
  return {
1794
2079
  type: "input_file",
1795
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
2080
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1796
2081
  file_data: `data:application/pdf;base64,${part.data}`
1797
2082
  };
1798
2083
  } else {
@@ -1807,34 +2092,97 @@ function convertToOpenAIResponsesMessages({
1807
2092
  break;
1808
2093
  }
1809
2094
  case "assistant": {
2095
+ const reasoningMessages = {};
1810
2096
  for (const part of content) {
1811
2097
  switch (part.type) {
1812
2098
  case "text": {
1813
2099
  messages.push({
1814
2100
  role: "assistant",
1815
- content: [{ type: "output_text", text: part.text }]
2101
+ content: [{ type: "output_text", text: part.text }],
2102
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1816
2103
  });
1817
2104
  break;
1818
2105
  }
1819
2106
  case "tool-call": {
2107
+ if (part.providerExecuted) {
2108
+ break;
2109
+ }
1820
2110
  messages.push({
1821
2111
  type: "function_call",
1822
2112
  call_id: part.toolCallId,
1823
2113
  name: part.toolName,
1824
- arguments: JSON.stringify(part.args)
2114
+ arguments: JSON.stringify(part.input),
2115
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2116
+ });
2117
+ break;
2118
+ }
2119
+ case "tool-result": {
2120
+ warnings.push({
2121
+ type: "other",
2122
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
1825
2123
  });
1826
2124
  break;
1827
2125
  }
2126
+ case "reasoning": {
2127
+ const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
2128
+ provider: "openai",
2129
+ providerOptions: part.providerOptions,
2130
+ schema: openaiResponsesReasoningProviderOptionsSchema
2131
+ });
2132
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2133
+ if (reasoningId != null) {
2134
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2135
+ const summaryParts = [];
2136
+ if (part.text.length > 0) {
2137
+ summaryParts.push({ type: "summary_text", text: part.text });
2138
+ } else if (existingReasoningMessage !== void 0) {
2139
+ warnings.push({
2140
+ type: "other",
2141
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2142
+ });
2143
+ }
2144
+ if (existingReasoningMessage === void 0) {
2145
+ reasoningMessages[reasoningId] = {
2146
+ type: "reasoning",
2147
+ id: reasoningId,
2148
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2149
+ summary: summaryParts
2150
+ };
2151
+ messages.push(reasoningMessages[reasoningId]);
2152
+ } else {
2153
+ existingReasoningMessage.summary.push(...summaryParts);
2154
+ }
2155
+ } else {
2156
+ warnings.push({
2157
+ type: "other",
2158
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2159
+ });
2160
+ }
2161
+ break;
2162
+ }
1828
2163
  }
1829
2164
  }
1830
2165
  break;
1831
2166
  }
1832
2167
  case "tool": {
1833
2168
  for (const part of content) {
2169
+ const output = part.output;
2170
+ let contentValue;
2171
+ switch (output.type) {
2172
+ case "text":
2173
+ case "error-text":
2174
+ contentValue = output.value;
2175
+ break;
2176
+ case "content":
2177
+ case "json":
2178
+ case "error-json":
2179
+ contentValue = JSON.stringify(output.value);
2180
+ break;
2181
+ }
1834
2182
  messages.push({
1835
2183
  type: "function_call_output",
1836
2184
  call_id: part.toolCallId,
1837
- output: JSON.stringify(part.result)
2185
+ output: contentValue
1838
2186
  });
1839
2187
  }
1840
2188
  break;
@@ -1847,6 +2195,10 @@ function convertToOpenAIResponsesMessages({
1847
2195
  }
1848
2196
  return { messages, warnings };
1849
2197
  }
2198
+ var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
2199
+ itemId: import_v414.z.string().nullish(),
2200
+ reasoningEncryptedContent: import_v414.z.string().nullish()
2201
+ });
1850
2202
 
1851
2203
  // src/responses/map-openai-responses-finish-reason.ts
1852
2204
  function mapOpenAIResponseFinishReason({
@@ -1871,7 +2223,7 @@ var import_provider7 = require("@ai-sdk/provider");
1871
2223
  function prepareResponsesTools({
1872
2224
  tools,
1873
2225
  toolChoice,
1874
- strict
2226
+ strictJsonSchema
1875
2227
  }) {
1876
2228
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1877
2229
  const toolWarnings = [];
@@ -1886,12 +2238,23 @@ function prepareResponsesTools({
1886
2238
  type: "function",
1887
2239
  name: tool.name,
1888
2240
  description: tool.description,
1889
- parameters: tool.parameters,
1890
- strict: strict ? true : void 0
2241
+ parameters: tool.inputSchema,
2242
+ strict: strictJsonSchema
1891
2243
  });
1892
2244
  break;
1893
2245
  case "provider-defined":
1894
2246
  switch (tool.id) {
2247
+ case "openai.file_search": {
2248
+ const args = fileSearchArgsSchema.parse(tool.args);
2249
+ openaiTools.push({
2250
+ type: "file_search",
2251
+ vector_store_ids: args.vectorStoreIds,
2252
+ max_num_results: args.maxNumResults,
2253
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2254
+ filters: args.filters
2255
+ });
2256
+ break;
2257
+ }
1895
2258
  case "openai.web_search_preview":
1896
2259
  openaiTools.push({
1897
2260
  type: "web_search_preview",
@@ -1921,7 +2284,7 @@ function prepareResponsesTools({
1921
2284
  case "tool":
1922
2285
  return {
1923
2286
  tools: openaiTools,
1924
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2287
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1925
2288
  toolWarnings
1926
2289
  };
1927
2290
  default: {
@@ -1937,15 +2300,16 @@ function prepareResponsesTools({
1937
2300
  var OpenAIResponsesLanguageModel = class {
1938
2301
  constructor(modelId, config) {
1939
2302
  this.specificationVersion = "v2";
1940
- this.defaultObjectGenerationMode = "json";
1941
- this.supportsStructuredOutputs = true;
2303
+ this.supportedUrls = {
2304
+ "image/*": [/^https?:\/\/.*$/]
2305
+ };
1942
2306
  this.modelId = modelId;
1943
2307
  this.config = config;
1944
2308
  }
1945
2309
  get provider() {
1946
2310
  return this.config.provider;
1947
2311
  }
1948
- getArgs({
2312
+ async getArgs({
1949
2313
  maxOutputTokens,
1950
2314
  temperature,
1951
2315
  stopSequences,
@@ -1984,17 +2348,17 @@ var OpenAIResponsesLanguageModel = class {
1984
2348
  if (stopSequences != null) {
1985
2349
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1986
2350
  }
1987
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2351
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
1988
2352
  prompt,
1989
2353
  systemMessageMode: modelConfig.systemMessageMode
1990
2354
  });
1991
2355
  warnings.push(...messageWarnings);
1992
- const openaiOptions = (0, import_provider_utils9.parseProviderOptions)({
2356
+ const openaiOptions = await (0, import_provider_utils12.parseProviderOptions)({
1993
2357
  provider: "openai",
1994
2358
  providerOptions,
1995
2359
  schema: openaiResponsesProviderOptionsSchema
1996
2360
  });
1997
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2361
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
1998
2362
  const baseArgs = {
1999
2363
  model: this.modelId,
2000
2364
  input: messages,
@@ -2005,7 +2369,7 @@ var OpenAIResponsesLanguageModel = class {
2005
2369
  text: {
2006
2370
  format: responseFormat.schema != null ? {
2007
2371
  type: "json_schema",
2008
- strict: isStrict,
2372
+ strict: strictJsonSchema,
2009
2373
  name: (_b = responseFormat.name) != null ? _b : "response",
2010
2374
  description: responseFormat.description,
2011
2375
  schema: responseFormat.schema
@@ -2019,9 +2383,18 @@ var OpenAIResponsesLanguageModel = class {
2019
2383
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2020
2384
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2021
2385
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2386
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2387
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2022
2388
  // model-specific settings:
2023
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2024
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2389
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2390
+ reasoning: {
2391
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2392
+ effort: openaiOptions.reasoningEffort
2393
+ },
2394
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2395
+ summary: openaiOptions.reasoningSummary
2396
+ }
2397
+ }
2025
2398
  },
2026
2399
  ...modelConfig.requiredAutoTruncation && {
2027
2400
  truncation: "auto"
@@ -2044,6 +2417,37 @@ var OpenAIResponsesLanguageModel = class {
2044
2417
  details: "topP is not supported for reasoning models"
2045
2418
  });
2046
2419
  }
2420
+ } else {
2421
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2422
+ warnings.push({
2423
+ type: "unsupported-setting",
2424
+ setting: "reasoningEffort",
2425
+ details: "reasoningEffort is not supported for non-reasoning models"
2426
+ });
2427
+ }
2428
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2429
+ warnings.push({
2430
+ type: "unsupported-setting",
2431
+ setting: "reasoningSummary",
2432
+ details: "reasoningSummary is not supported for non-reasoning models"
2433
+ });
2434
+ }
2435
+ }
2436
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2437
+ warnings.push({
2438
+ type: "unsupported-setting",
2439
+ setting: "serviceTier",
2440
+ details: "flex processing is only available for o3 and o4-mini models"
2441
+ });
2442
+ delete baseArgs.service_tier;
2443
+ }
2444
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2445
+ warnings.push({
2446
+ type: "unsupported-setting",
2447
+ setting: "serviceTier",
2448
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
2449
+ });
2450
+ delete baseArgs.service_tier;
2047
2451
  }
2048
2452
  const {
2049
2453
  tools: openaiTools,
@@ -2052,7 +2456,7 @@ var OpenAIResponsesLanguageModel = class {
2052
2456
  } = prepareResponsesTools({
2053
2457
  tools,
2054
2458
  toolChoice,
2055
- strict: isStrict
2459
+ strictJsonSchema
2056
2460
  });
2057
2461
  return {
2058
2462
  args: {
@@ -2064,84 +2468,142 @@ var OpenAIResponsesLanguageModel = class {
2064
2468
  };
2065
2469
  }
2066
2470
  async doGenerate(options) {
2067
- var _a, _b, _c, _d, _e, _f, _g, _h;
2068
- const { args: body, warnings } = this.getArgs(options);
2471
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2472
+ const { args: body, warnings } = await this.getArgs(options);
2473
+ const url = this.config.url({
2474
+ path: "/responses",
2475
+ modelId: this.modelId
2476
+ });
2069
2477
  const {
2070
2478
  responseHeaders,
2071
2479
  value: response,
2072
2480
  rawValue: rawResponse
2073
- } = await (0, import_provider_utils9.postJsonToApi)({
2074
- url: this.config.url({
2075
- path: "/responses",
2076
- modelId: this.modelId
2077
- }),
2078
- headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2481
+ } = await (0, import_provider_utils12.postJsonToApi)({
2482
+ url,
2483
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2079
2484
  body,
2080
2485
  failedResponseHandler: openaiFailedResponseHandler,
2081
- successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2082
- import_zod9.z.object({
2083
- id: import_zod9.z.string(),
2084
- created_at: import_zod9.z.number(),
2085
- model: import_zod9.z.string(),
2086
- output: import_zod9.z.array(
2087
- import_zod9.z.discriminatedUnion("type", [
2088
- import_zod9.z.object({
2089
- type: import_zod9.z.literal("message"),
2090
- role: import_zod9.z.literal("assistant"),
2091
- content: import_zod9.z.array(
2092
- import_zod9.z.object({
2093
- type: import_zod9.z.literal("output_text"),
2094
- text: import_zod9.z.string(),
2095
- annotations: import_zod9.z.array(
2096
- import_zod9.z.object({
2097
- type: import_zod9.z.literal("url_citation"),
2098
- start_index: import_zod9.z.number(),
2099
- end_index: import_zod9.z.number(),
2100
- url: import_zod9.z.string(),
2101
- title: import_zod9.z.string()
2486
+ successfulResponseHandler: (0, import_provider_utils12.createJsonResponseHandler)(
2487
+ import_v415.z.object({
2488
+ id: import_v415.z.string(),
2489
+ created_at: import_v415.z.number(),
2490
+ error: import_v415.z.object({
2491
+ code: import_v415.z.string(),
2492
+ message: import_v415.z.string()
2493
+ }).nullish(),
2494
+ model: import_v415.z.string(),
2495
+ output: import_v415.z.array(
2496
+ import_v415.z.discriminatedUnion("type", [
2497
+ import_v415.z.object({
2498
+ type: import_v415.z.literal("message"),
2499
+ role: import_v415.z.literal("assistant"),
2500
+ id: import_v415.z.string(),
2501
+ content: import_v415.z.array(
2502
+ import_v415.z.object({
2503
+ type: import_v415.z.literal("output_text"),
2504
+ text: import_v415.z.string(),
2505
+ annotations: import_v415.z.array(
2506
+ import_v415.z.object({
2507
+ type: import_v415.z.literal("url_citation"),
2508
+ start_index: import_v415.z.number(),
2509
+ end_index: import_v415.z.number(),
2510
+ url: import_v415.z.string(),
2511
+ title: import_v415.z.string()
2102
2512
  })
2103
2513
  )
2104
2514
  })
2105
2515
  )
2106
2516
  }),
2107
- import_zod9.z.object({
2108
- type: import_zod9.z.literal("function_call"),
2109
- call_id: import_zod9.z.string(),
2110
- name: import_zod9.z.string(),
2111
- arguments: import_zod9.z.string()
2517
+ import_v415.z.object({
2518
+ type: import_v415.z.literal("function_call"),
2519
+ call_id: import_v415.z.string(),
2520
+ name: import_v415.z.string(),
2521
+ arguments: import_v415.z.string(),
2522
+ id: import_v415.z.string()
2523
+ }),
2524
+ import_v415.z.object({
2525
+ type: import_v415.z.literal("web_search_call"),
2526
+ id: import_v415.z.string(),
2527
+ status: import_v415.z.string().optional()
2112
2528
  }),
2113
- import_zod9.z.object({
2114
- type: import_zod9.z.literal("web_search_call")
2529
+ import_v415.z.object({
2530
+ type: import_v415.z.literal("computer_call"),
2531
+ id: import_v415.z.string(),
2532
+ status: import_v415.z.string().optional()
2115
2533
  }),
2116
- import_zod9.z.object({
2117
- type: import_zod9.z.literal("computer_call")
2534
+ import_v415.z.object({
2535
+ type: import_v415.z.literal("file_search_call"),
2536
+ id: import_v415.z.string(),
2537
+ status: import_v415.z.string().optional()
2118
2538
  }),
2119
- import_zod9.z.object({
2120
- type: import_zod9.z.literal("reasoning")
2539
+ import_v415.z.object({
2540
+ type: import_v415.z.literal("reasoning"),
2541
+ id: import_v415.z.string(),
2542
+ encrypted_content: import_v415.z.string().nullish(),
2543
+ summary: import_v415.z.array(
2544
+ import_v415.z.object({
2545
+ type: import_v415.z.literal("summary_text"),
2546
+ text: import_v415.z.string()
2547
+ })
2548
+ )
2121
2549
  })
2122
2550
  ])
2123
2551
  ),
2124
- incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullable(),
2125
- usage: usageSchema
2552
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
2553
+ usage: usageSchema2
2126
2554
  })
2127
2555
  ),
2128
2556
  abortSignal: options.abortSignal,
2129
2557
  fetch: this.config.fetch
2130
2558
  });
2559
+ if (response.error) {
2560
+ throw new import_provider8.APICallError({
2561
+ message: response.error.message,
2562
+ url,
2563
+ requestBodyValues: body,
2564
+ statusCode: 400,
2565
+ responseHeaders,
2566
+ responseBody: rawResponse,
2567
+ isRetryable: false
2568
+ });
2569
+ }
2131
2570
  const content = [];
2132
2571
  for (const part of response.output) {
2133
2572
  switch (part.type) {
2573
+ case "reasoning": {
2574
+ if (part.summary.length === 0) {
2575
+ part.summary.push({ type: "summary_text", text: "" });
2576
+ }
2577
+ for (const summary of part.summary) {
2578
+ content.push({
2579
+ type: "reasoning",
2580
+ text: summary.text,
2581
+ providerMetadata: {
2582
+ openai: {
2583
+ itemId: part.id,
2584
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2585
+ }
2586
+ }
2587
+ });
2588
+ }
2589
+ break;
2590
+ }
2134
2591
  case "message": {
2135
2592
  for (const contentPart of part.content) {
2136
2593
  content.push({
2137
2594
  type: "text",
2138
- text: contentPart.text
2595
+ text: contentPart.text,
2596
+ providerMetadata: {
2597
+ openai: {
2598
+ itemId: part.id
2599
+ }
2600
+ }
2139
2601
  });
2140
2602
  for (const annotation of contentPart.annotations) {
2141
2603
  content.push({
2142
2604
  type: "source",
2143
2605
  sourceType: "url",
2144
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils9.generateId)(),
2606
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
2145
2607
  url: annotation.url,
2146
2608
  title: annotation.title
2147
2609
  });
@@ -2152,10 +2614,71 @@ var OpenAIResponsesLanguageModel = class {
2152
2614
  case "function_call": {
2153
2615
  content.push({
2154
2616
  type: "tool-call",
2155
- toolCallType: "function",
2156
2617
  toolCallId: part.call_id,
2157
2618
  toolName: part.name,
2158
- args: part.arguments
2619
+ input: part.arguments,
2620
+ providerMetadata: {
2621
+ openai: {
2622
+ itemId: part.id
2623
+ }
2624
+ }
2625
+ });
2626
+ break;
2627
+ }
2628
+ case "web_search_call": {
2629
+ content.push({
2630
+ type: "tool-call",
2631
+ toolCallId: part.id,
2632
+ toolName: "web_search_preview",
2633
+ input: "",
2634
+ providerExecuted: true
2635
+ });
2636
+ content.push({
2637
+ type: "tool-result",
2638
+ toolCallId: part.id,
2639
+ toolName: "web_search_preview",
2640
+ result: { status: part.status || "completed" },
2641
+ providerExecuted: true
2642
+ });
2643
+ break;
2644
+ }
2645
+ case "computer_call": {
2646
+ content.push({
2647
+ type: "tool-call",
2648
+ toolCallId: part.id,
2649
+ toolName: "computer_use",
2650
+ input: "",
2651
+ providerExecuted: true
2652
+ });
2653
+ content.push({
2654
+ type: "tool-result",
2655
+ toolCallId: part.id,
2656
+ toolName: "computer_use",
2657
+ result: {
2658
+ type: "computer_use_tool_result",
2659
+ status: part.status || "completed"
2660
+ },
2661
+ providerExecuted: true
2662
+ });
2663
+ break;
2664
+ }
2665
+ case "file_search_call": {
2666
+ content.push({
2667
+ type: "tool-call",
2668
+ toolCallId: part.id,
2669
+ toolName: "file_search",
2670
+ input: "",
2671
+ providerExecuted: true
2672
+ });
2673
+ content.push({
2674
+ type: "tool-result",
2675
+ toolCallId: part.id,
2676
+ toolName: "file_search",
2677
+ result: {
2678
+ type: "file_search_tool_result",
2679
+ status: part.status || "completed"
2680
+ },
2681
+ providerExecuted: true
2159
2682
  });
2160
2683
  break;
2161
2684
  }
@@ -2164,12 +2687,15 @@ var OpenAIResponsesLanguageModel = class {
2164
2687
  return {
2165
2688
  content,
2166
2689
  finishReason: mapOpenAIResponseFinishReason({
2167
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2690
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2168
2691
  hasToolCalls: content.some((part) => part.type === "tool-call")
2169
2692
  }),
2170
2693
  usage: {
2171
2694
  inputTokens: response.usage.input_tokens,
2172
- outputTokens: response.usage.output_tokens
2695
+ outputTokens: response.usage.output_tokens,
2696
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2697
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2698
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2173
2699
  },
2174
2700
  request: { body },
2175
2701
  response: {
@@ -2181,28 +2707,26 @@ var OpenAIResponsesLanguageModel = class {
2181
2707
  },
2182
2708
  providerMetadata: {
2183
2709
  openai: {
2184
- responseId: response.id,
2185
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2186
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2710
+ responseId: response.id
2187
2711
  }
2188
2712
  },
2189
2713
  warnings
2190
2714
  };
2191
2715
  }
2192
2716
  async doStream(options) {
2193
- const { args: body, warnings } = this.getArgs(options);
2194
- const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
2717
+ const { args: body, warnings } = await this.getArgs(options);
2718
+ const { responseHeaders, value: response } = await (0, import_provider_utils12.postJsonToApi)({
2195
2719
  url: this.config.url({
2196
2720
  path: "/responses",
2197
2721
  modelId: this.modelId
2198
2722
  }),
2199
- headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2723
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2200
2724
  body: {
2201
2725
  ...body,
2202
2726
  stream: true
2203
2727
  },
2204
2728
  failedResponseHandler: openaiFailedResponseHandler,
2205
- successfulResponseHandler: (0, import_provider_utils9.createEventSourceResponseHandler)(
2729
+ successfulResponseHandler: (0, import_provider_utils12.createEventSourceResponseHandler)(
2206
2730
  openaiResponsesChunkSchema
2207
2731
  ),
2208
2732
  abortSignal: options.abortSignal,
@@ -2212,13 +2736,13 @@ var OpenAIResponsesLanguageModel = class {
2212
2736
  let finishReason = "unknown";
2213
2737
  const usage = {
2214
2738
  inputTokens: void 0,
2215
- outputTokens: void 0
2739
+ outputTokens: void 0,
2740
+ totalTokens: void 0
2216
2741
  };
2217
- let cachedPromptTokens = null;
2218
- let reasoningTokens = null;
2219
2742
  let responseId = null;
2220
2743
  const ongoingToolCalls = {};
2221
2744
  let hasToolCalls = false;
2745
+ const activeReasoning = {};
2222
2746
  return {
2223
2747
  stream: response.pipeThrough(
2224
2748
  new TransformStream({
@@ -2226,7 +2750,10 @@ var OpenAIResponsesLanguageModel = class {
2226
2750
  controller.enqueue({ type: "stream-start", warnings });
2227
2751
  },
2228
2752
  transform(chunk, controller) {
2229
- var _a, _b, _c, _d, _e, _f, _g, _h;
2753
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2754
+ if (options.includeRawChunks) {
2755
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2756
+ }
2230
2757
  if (!chunk.success) {
2231
2758
  finishReason = "error";
2232
2759
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2240,22 +2767,151 @@ var OpenAIResponsesLanguageModel = class {
2240
2767
  toolCallId: value.item.call_id
2241
2768
  };
2242
2769
  controller.enqueue({
2243
- type: "tool-call-delta",
2244
- toolCallType: "function",
2770
+ type: "tool-input-start",
2771
+ id: value.item.call_id,
2772
+ toolName: value.item.name
2773
+ });
2774
+ } else if (value.item.type === "web_search_call") {
2775
+ ongoingToolCalls[value.output_index] = {
2776
+ toolName: "web_search_preview",
2777
+ toolCallId: value.item.id
2778
+ };
2779
+ controller.enqueue({
2780
+ type: "tool-input-start",
2781
+ id: value.item.id,
2782
+ toolName: "web_search_preview"
2783
+ });
2784
+ } else if (value.item.type === "computer_call") {
2785
+ ongoingToolCalls[value.output_index] = {
2786
+ toolName: "computer_use",
2787
+ toolCallId: value.item.id
2788
+ };
2789
+ controller.enqueue({
2790
+ type: "tool-input-start",
2791
+ id: value.item.id,
2792
+ toolName: "computer_use"
2793
+ });
2794
+ } else if (value.item.type === "message") {
2795
+ controller.enqueue({
2796
+ type: "text-start",
2797
+ id: value.item.id,
2798
+ providerMetadata: {
2799
+ openai: {
2800
+ itemId: value.item.id
2801
+ }
2802
+ }
2803
+ });
2804
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2805
+ activeReasoning[value.item.id] = {
2806
+ encryptedContent: value.item.encrypted_content,
2807
+ summaryParts: [0]
2808
+ };
2809
+ controller.enqueue({
2810
+ type: "reasoning-start",
2811
+ id: `${value.item.id}:0`,
2812
+ providerMetadata: {
2813
+ openai: {
2814
+ itemId: value.item.id,
2815
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2816
+ }
2817
+ }
2818
+ });
2819
+ }
2820
+ } else if (isResponseOutputItemDoneChunk(value)) {
2821
+ if (value.item.type === "function_call") {
2822
+ ongoingToolCalls[value.output_index] = void 0;
2823
+ hasToolCalls = true;
2824
+ controller.enqueue({
2825
+ type: "tool-input-end",
2826
+ id: value.item.call_id
2827
+ });
2828
+ controller.enqueue({
2829
+ type: "tool-call",
2245
2830
  toolCallId: value.item.call_id,
2246
2831
  toolName: value.item.name,
2247
- argsTextDelta: value.item.arguments
2832
+ input: value.item.arguments,
2833
+ providerMetadata: {
2834
+ openai: {
2835
+ itemId: value.item.id
2836
+ }
2837
+ }
2838
+ });
2839
+ } else if (value.item.type === "web_search_call") {
2840
+ ongoingToolCalls[value.output_index] = void 0;
2841
+ hasToolCalls = true;
2842
+ controller.enqueue({
2843
+ type: "tool-input-end",
2844
+ id: value.item.id
2248
2845
  });
2846
+ controller.enqueue({
2847
+ type: "tool-call",
2848
+ toolCallId: value.item.id,
2849
+ toolName: "web_search_preview",
2850
+ input: "",
2851
+ providerExecuted: true
2852
+ });
2853
+ controller.enqueue({
2854
+ type: "tool-result",
2855
+ toolCallId: value.item.id,
2856
+ toolName: "web_search_preview",
2857
+ result: {
2858
+ type: "web_search_tool_result",
2859
+ status: value.item.status || "completed"
2860
+ },
2861
+ providerExecuted: true
2862
+ });
2863
+ } else if (value.item.type === "computer_call") {
2864
+ ongoingToolCalls[value.output_index] = void 0;
2865
+ hasToolCalls = true;
2866
+ controller.enqueue({
2867
+ type: "tool-input-end",
2868
+ id: value.item.id
2869
+ });
2870
+ controller.enqueue({
2871
+ type: "tool-call",
2872
+ toolCallId: value.item.id,
2873
+ toolName: "computer_use",
2874
+ input: "",
2875
+ providerExecuted: true
2876
+ });
2877
+ controller.enqueue({
2878
+ type: "tool-result",
2879
+ toolCallId: value.item.id,
2880
+ toolName: "computer_use",
2881
+ result: {
2882
+ type: "computer_use_tool_result",
2883
+ status: value.item.status || "completed"
2884
+ },
2885
+ providerExecuted: true
2886
+ });
2887
+ } else if (value.item.type === "message") {
2888
+ controller.enqueue({
2889
+ type: "text-end",
2890
+ id: value.item.id
2891
+ });
2892
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2893
+ const activeReasoningPart = activeReasoning[value.item.id];
2894
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2895
+ controller.enqueue({
2896
+ type: "reasoning-end",
2897
+ id: `${value.item.id}:${summaryIndex}`,
2898
+ providerMetadata: {
2899
+ openai: {
2900
+ itemId: value.item.id,
2901
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2902
+ }
2903
+ }
2904
+ });
2905
+ }
2906
+ delete activeReasoning[value.item.id];
2249
2907
  }
2250
2908
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2251
2909
  const toolCall = ongoingToolCalls[value.output_index];
2252
2910
  if (toolCall != null) {
2253
2911
  controller.enqueue({
2254
- type: "tool-call-delta",
2255
- toolCallType: "function",
2256
- toolCallId: toolCall.toolCallId,
2257
- toolName: toolCall.toolName,
2258
- argsTextDelta: value.delta
2912
+ type: "tool-input-delta",
2913
+ id: toolCall.toolCallId,
2914
+ delta: value.delta
2259
2915
  });
2260
2916
  }
2261
2917
  } else if (isResponseCreatedChunk(value)) {
@@ -2268,36 +2924,57 @@ var OpenAIResponsesLanguageModel = class {
2268
2924
  });
2269
2925
  } else if (isTextDeltaChunk(value)) {
2270
2926
  controller.enqueue({
2271
- type: "text",
2272
- text: value.delta
2927
+ type: "text-delta",
2928
+ id: value.item_id,
2929
+ delta: value.delta
2273
2930
  });
2274
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2275
- ongoingToolCalls[value.output_index] = void 0;
2276
- hasToolCalls = true;
2931
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2932
+ if (value.summary_index > 0) {
2933
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2934
+ value.summary_index
2935
+ );
2936
+ controller.enqueue({
2937
+ type: "reasoning-start",
2938
+ id: `${value.item_id}:${value.summary_index}`,
2939
+ providerMetadata: {
2940
+ openai: {
2941
+ itemId: value.item_id,
2942
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2943
+ }
2944
+ }
2945
+ });
2946
+ }
2947
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2277
2948
  controller.enqueue({
2278
- type: "tool-call",
2279
- toolCallType: "function",
2280
- toolCallId: value.item.call_id,
2281
- toolName: value.item.name,
2282
- args: value.item.arguments
2949
+ type: "reasoning-delta",
2950
+ id: `${value.item_id}:${value.summary_index}`,
2951
+ delta: value.delta,
2952
+ providerMetadata: {
2953
+ openai: {
2954
+ itemId: value.item_id
2955
+ }
2956
+ }
2283
2957
  });
2284
2958
  } else if (isResponseFinishedChunk(value)) {
2285
2959
  finishReason = mapOpenAIResponseFinishReason({
2286
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2960
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2287
2961
  hasToolCalls
2288
2962
  });
2289
2963
  usage.inputTokens = value.response.usage.input_tokens;
2290
2964
  usage.outputTokens = value.response.usage.output_tokens;
2291
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2292
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2965
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2966
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2967
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2293
2968
  } else if (isResponseAnnotationAddedChunk(value)) {
2294
2969
  controller.enqueue({
2295
2970
  type: "source",
2296
2971
  sourceType: "url",
2297
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils9.generateId)(),
2972
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
2298
2973
  url: value.annotation.url,
2299
2974
  title: value.annotation.title
2300
2975
  });
2976
+ } else if (isErrorChunk(value)) {
2977
+ controller.enqueue({ type: "error", error: value });
2301
2978
  }
2302
2979
  },
2303
2980
  flush(controller) {
@@ -2305,13 +2982,9 @@ var OpenAIResponsesLanguageModel = class {
2305
2982
  type: "finish",
2306
2983
  finishReason,
2307
2984
  usage,
2308
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2309
- providerMetadata: {
2310
- openai: {
2311
- responseId,
2312
- cachedPromptTokens,
2313
- reasoningTokens
2314
- }
2985
+ providerMetadata: {
2986
+ openai: {
2987
+ responseId
2315
2988
  }
2316
2989
  }
2317
2990
  });
@@ -2323,87 +2996,151 @@ var OpenAIResponsesLanguageModel = class {
2323
2996
  };
2324
2997
  }
2325
2998
  };
2326
- var usageSchema = import_zod9.z.object({
2327
- input_tokens: import_zod9.z.number(),
2328
- input_tokens_details: import_zod9.z.object({ cached_tokens: import_zod9.z.number().nullish() }).nullish(),
2329
- output_tokens: import_zod9.z.number(),
2330
- output_tokens_details: import_zod9.z.object({ reasoning_tokens: import_zod9.z.number().nullish() }).nullish()
2999
+ var usageSchema2 = import_v415.z.object({
3000
+ input_tokens: import_v415.z.number(),
3001
+ input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
3002
+ output_tokens: import_v415.z.number(),
3003
+ output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
3004
+ });
3005
+ var textDeltaChunkSchema = import_v415.z.object({
3006
+ type: import_v415.z.literal("response.output_text.delta"),
3007
+ item_id: import_v415.z.string(),
3008
+ delta: import_v415.z.string()
2331
3009
  });
2332
- var textDeltaChunkSchema = import_zod9.z.object({
2333
- type: import_zod9.z.literal("response.output_text.delta"),
2334
- delta: import_zod9.z.string()
3010
+ var errorChunkSchema = import_v415.z.object({
3011
+ type: import_v415.z.literal("error"),
3012
+ code: import_v415.z.string(),
3013
+ message: import_v415.z.string(),
3014
+ param: import_v415.z.string().nullish(),
3015
+ sequence_number: import_v415.z.number()
2335
3016
  });
2336
- var responseFinishedChunkSchema = import_zod9.z.object({
2337
- type: import_zod9.z.enum(["response.completed", "response.incomplete"]),
2338
- response: import_zod9.z.object({
2339
- incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullish(),
2340
- usage: usageSchema
3017
+ var responseFinishedChunkSchema = import_v415.z.object({
3018
+ type: import_v415.z.enum(["response.completed", "response.incomplete"]),
3019
+ response: import_v415.z.object({
3020
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
3021
+ usage: usageSchema2
2341
3022
  })
2342
3023
  });
2343
- var responseCreatedChunkSchema = import_zod9.z.object({
2344
- type: import_zod9.z.literal("response.created"),
2345
- response: import_zod9.z.object({
2346
- id: import_zod9.z.string(),
2347
- created_at: import_zod9.z.number(),
2348
- model: import_zod9.z.string()
3024
+ var responseCreatedChunkSchema = import_v415.z.object({
3025
+ type: import_v415.z.literal("response.created"),
3026
+ response: import_v415.z.object({
3027
+ id: import_v415.z.string(),
3028
+ created_at: import_v415.z.number(),
3029
+ model: import_v415.z.string()
2349
3030
  })
2350
3031
  });
2351
- var responseOutputItemDoneSchema = import_zod9.z.object({
2352
- type: import_zod9.z.literal("response.output_item.done"),
2353
- output_index: import_zod9.z.number(),
2354
- item: import_zod9.z.discriminatedUnion("type", [
2355
- import_zod9.z.object({
2356
- type: import_zod9.z.literal("message")
3032
+ var responseOutputItemAddedSchema = import_v415.z.object({
3033
+ type: import_v415.z.literal("response.output_item.added"),
3034
+ output_index: import_v415.z.number(),
3035
+ item: import_v415.z.discriminatedUnion("type", [
3036
+ import_v415.z.object({
3037
+ type: import_v415.z.literal("message"),
3038
+ id: import_v415.z.string()
3039
+ }),
3040
+ import_v415.z.object({
3041
+ type: import_v415.z.literal("reasoning"),
3042
+ id: import_v415.z.string(),
3043
+ encrypted_content: import_v415.z.string().nullish()
3044
+ }),
3045
+ import_v415.z.object({
3046
+ type: import_v415.z.literal("function_call"),
3047
+ id: import_v415.z.string(),
3048
+ call_id: import_v415.z.string(),
3049
+ name: import_v415.z.string(),
3050
+ arguments: import_v415.z.string()
3051
+ }),
3052
+ import_v415.z.object({
3053
+ type: import_v415.z.literal("web_search_call"),
3054
+ id: import_v415.z.string(),
3055
+ status: import_v415.z.string()
2357
3056
  }),
2358
- import_zod9.z.object({
2359
- type: import_zod9.z.literal("function_call"),
2360
- id: import_zod9.z.string(),
2361
- call_id: import_zod9.z.string(),
2362
- name: import_zod9.z.string(),
2363
- arguments: import_zod9.z.string(),
2364
- status: import_zod9.z.literal("completed")
3057
+ import_v415.z.object({
3058
+ type: import_v415.z.literal("computer_call"),
3059
+ id: import_v415.z.string(),
3060
+ status: import_v415.z.string()
3061
+ }),
3062
+ import_v415.z.object({
3063
+ type: import_v415.z.literal("file_search_call"),
3064
+ id: import_v415.z.string(),
3065
+ status: import_v415.z.string()
2365
3066
  })
2366
3067
  ])
2367
3068
  });
2368
- var responseFunctionCallArgumentsDeltaSchema = import_zod9.z.object({
2369
- type: import_zod9.z.literal("response.function_call_arguments.delta"),
2370
- item_id: import_zod9.z.string(),
2371
- output_index: import_zod9.z.number(),
2372
- delta: import_zod9.z.string()
2373
- });
2374
- var responseOutputItemAddedSchema = import_zod9.z.object({
2375
- type: import_zod9.z.literal("response.output_item.added"),
2376
- output_index: import_zod9.z.number(),
2377
- item: import_zod9.z.discriminatedUnion("type", [
2378
- import_zod9.z.object({
2379
- type: import_zod9.z.literal("message")
3069
+ var responseOutputItemDoneSchema = import_v415.z.object({
3070
+ type: import_v415.z.literal("response.output_item.done"),
3071
+ output_index: import_v415.z.number(),
3072
+ item: import_v415.z.discriminatedUnion("type", [
3073
+ import_v415.z.object({
3074
+ type: import_v415.z.literal("message"),
3075
+ id: import_v415.z.string()
3076
+ }),
3077
+ import_v415.z.object({
3078
+ type: import_v415.z.literal("reasoning"),
3079
+ id: import_v415.z.string(),
3080
+ encrypted_content: import_v415.z.string().nullish()
3081
+ }),
3082
+ import_v415.z.object({
3083
+ type: import_v415.z.literal("function_call"),
3084
+ id: import_v415.z.string(),
3085
+ call_id: import_v415.z.string(),
3086
+ name: import_v415.z.string(),
3087
+ arguments: import_v415.z.string(),
3088
+ status: import_v415.z.literal("completed")
3089
+ }),
3090
+ import_v415.z.object({
3091
+ type: import_v415.z.literal("web_search_call"),
3092
+ id: import_v415.z.string(),
3093
+ status: import_v415.z.literal("completed")
3094
+ }),
3095
+ import_v415.z.object({
3096
+ type: import_v415.z.literal("computer_call"),
3097
+ id: import_v415.z.string(),
3098
+ status: import_v415.z.literal("completed")
2380
3099
  }),
2381
- import_zod9.z.object({
2382
- type: import_zod9.z.literal("function_call"),
2383
- id: import_zod9.z.string(),
2384
- call_id: import_zod9.z.string(),
2385
- name: import_zod9.z.string(),
2386
- arguments: import_zod9.z.string()
3100
+ import_v415.z.object({
3101
+ type: import_v415.z.literal("file_search_call"),
3102
+ id: import_v415.z.string(),
3103
+ status: import_v415.z.literal("completed")
2387
3104
  })
2388
3105
  ])
2389
3106
  });
2390
- var responseAnnotationAddedSchema = import_zod9.z.object({
2391
- type: import_zod9.z.literal("response.output_text.annotation.added"),
2392
- annotation: import_zod9.z.object({
2393
- type: import_zod9.z.literal("url_citation"),
2394
- url: import_zod9.z.string(),
2395
- title: import_zod9.z.string()
3107
+ var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
3108
+ type: import_v415.z.literal("response.function_call_arguments.delta"),
3109
+ item_id: import_v415.z.string(),
3110
+ output_index: import_v415.z.number(),
3111
+ delta: import_v415.z.string()
3112
+ });
3113
+ var responseAnnotationAddedSchema = import_v415.z.object({
3114
+ type: import_v415.z.literal("response.output_text.annotation.added"),
3115
+ annotation: import_v415.z.object({
3116
+ type: import_v415.z.literal("url_citation"),
3117
+ url: import_v415.z.string(),
3118
+ title: import_v415.z.string()
2396
3119
  })
2397
3120
  });
2398
- var openaiResponsesChunkSchema = import_zod9.z.union([
3121
+ var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
3122
+ type: import_v415.z.literal("response.reasoning_summary_part.added"),
3123
+ item_id: import_v415.z.string(),
3124
+ summary_index: import_v415.z.number()
3125
+ });
3126
+ var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
3127
+ type: import_v415.z.literal("response.reasoning_summary_text.delta"),
3128
+ item_id: import_v415.z.string(),
3129
+ summary_index: import_v415.z.number(),
3130
+ delta: import_v415.z.string()
3131
+ });
3132
+ var openaiResponsesChunkSchema = import_v415.z.union([
2399
3133
  textDeltaChunkSchema,
2400
3134
  responseFinishedChunkSchema,
2401
3135
  responseCreatedChunkSchema,
3136
+ responseOutputItemAddedSchema,
2402
3137
  responseOutputItemDoneSchema,
2403
3138
  responseFunctionCallArgumentsDeltaSchema,
2404
- responseOutputItemAddedSchema,
2405
3139
  responseAnnotationAddedSchema,
2406
- import_zod9.z.object({ type: import_zod9.z.string() }).passthrough()
3140
+ responseReasoningSummaryPartAddedSchema,
3141
+ responseReasoningSummaryTextDeltaSchema,
3142
+ errorChunkSchema,
3143
+ import_v415.z.object({ type: import_v415.z.string() }).loose()
2407
3144
  // fallback for unknown chunks
2408
3145
  ]);
2409
3146
  function isTextDeltaChunk(chunk) {
@@ -2412,6 +3149,9 @@ function isTextDeltaChunk(chunk) {
2412
3149
  function isResponseOutputItemDoneChunk(chunk) {
2413
3150
  return chunk.type === "response.output_item.done";
2414
3151
  }
3152
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3153
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3154
+ }
2415
3155
  function isResponseFinishedChunk(chunk) {
2416
3156
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2417
3157
  }
@@ -2424,11 +3164,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2424
3164
  function isResponseOutputItemAddedChunk(chunk) {
2425
3165
  return chunk.type === "response.output_item.added";
2426
3166
  }
3167
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3168
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3169
+ }
2427
3170
  function isResponseAnnotationAddedChunk(chunk) {
2428
3171
  return chunk.type === "response.output_text.annotation.added";
2429
3172
  }
3173
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3174
+ return chunk.type === "response.reasoning_summary_part.added";
3175
+ }
3176
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3177
+ return chunk.type === "response.reasoning_summary_text.delta";
3178
+ }
3179
+ function isErrorChunk(chunk) {
3180
+ return chunk.type === "error";
3181
+ }
2430
3182
  function getResponsesModelConfig(modelId) {
2431
- if (modelId.startsWith("o")) {
3183
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2432
3184
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2433
3185
  return {
2434
3186
  isReasoningModel: true,
@@ -2448,15 +3200,24 @@ function getResponsesModelConfig(modelId) {
2448
3200
  requiredAutoTruncation: false
2449
3201
  };
2450
3202
  }
2451
- var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
2452
- metadata: import_zod9.z.any().nullish(),
2453
- parallelToolCalls: import_zod9.z.boolean().nullish(),
2454
- previousResponseId: import_zod9.z.string().nullish(),
2455
- store: import_zod9.z.boolean().nullish(),
2456
- user: import_zod9.z.string().nullish(),
2457
- reasoningEffort: import_zod9.z.string().nullish(),
2458
- strictSchemas: import_zod9.z.boolean().nullish(),
2459
- instructions: import_zod9.z.string().nullish()
3203
+ function supportsFlexProcessing2(modelId) {
3204
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3205
+ }
3206
+ function supportsPriorityProcessing2(modelId) {
3207
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3208
+ }
3209
+ var openaiResponsesProviderOptionsSchema = import_v415.z.object({
3210
+ metadata: import_v415.z.any().nullish(),
3211
+ parallelToolCalls: import_v415.z.boolean().nullish(),
3212
+ previousResponseId: import_v415.z.string().nullish(),
3213
+ store: import_v415.z.boolean().nullish(),
3214
+ user: import_v415.z.string().nullish(),
3215
+ reasoningEffort: import_v415.z.string().nullish(),
3216
+ strictJsonSchema: import_v415.z.boolean().nullish(),
3217
+ instructions: import_v415.z.string().nullish(),
3218
+ reasoningSummary: import_v415.z.string().nullish(),
3219
+ serviceTier: import_v415.z.enum(["auto", "flex", "priority"]).nullish(),
3220
+ include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
2460
3221
  });
2461
3222
  // Annotate the CommonJS export names for ESM import in node:
2462
3223
  0 && (module.exports = {
@@ -2467,7 +3228,11 @@ var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
2467
3228
  OpenAIResponsesLanguageModel,
2468
3229
  OpenAISpeechModel,
2469
3230
  OpenAITranscriptionModel,
3231
+ hasDefaultResponseFormat,
2470
3232
  modelMaxImagesPerCall,
3233
+ openAITranscriptionProviderOptions,
3234
+ openaiCompletionProviderOptions,
3235
+ openaiEmbeddingProviderOptions,
2471
3236
  openaiProviderOptions
2472
3237
  });
2473
3238
  //# sourceMappingURL=index.js.map