@ai-sdk/openai 2.0.0-canary.9 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,12 +26,12 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
- var import_zod3 = require("zod");
33
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
34
+ var import_v45 = require("zod/v4");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
@@ -136,7 +136,7 @@ function convertToOpenAIChatMessages({
136
136
  type: "file",
137
137
  file: {
138
138
  filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
139
- file_data: `data:application/pdf;base64,${part.data}`
139
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
140
140
  }
141
141
  };
142
142
  } else {
@@ -165,7 +165,7 @@ function convertToOpenAIChatMessages({
165
165
  type: "function",
166
166
  function: {
167
167
  name: part.toolName,
168
- arguments: JSON.stringify(part.args)
168
+ arguments: JSON.stringify(part.input)
169
169
  }
170
170
  });
171
171
  break;
@@ -181,10 +181,23 @@ function convertToOpenAIChatMessages({
181
181
  }
182
182
  case "tool": {
183
183
  for (const toolResponse of content) {
184
+ const output = toolResponse.output;
185
+ let contentValue;
186
+ switch (output.type) {
187
+ case "text":
188
+ case "error-text":
189
+ contentValue = output.value;
190
+ break;
191
+ case "content":
192
+ case "json":
193
+ case "error-json":
194
+ contentValue = JSON.stringify(output.value);
195
+ break;
196
+ }
184
197
  messages.push({
185
198
  role: "tool",
186
199
  tool_call_id: toolResponse.toolCallId,
187
- content: JSON.stringify(toolResponse.result)
200
+ content: contentValue
188
201
  });
189
202
  }
190
203
  break;
@@ -198,17 +211,17 @@ function convertToOpenAIChatMessages({
198
211
  return { messages, warnings };
199
212
  }
200
213
 
201
- // src/map-openai-chat-logprobs.ts
202
- function mapOpenAIChatLogProbsOutput(logprobs) {
203
- var _a, _b;
204
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
205
- token,
206
- logprob,
207
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
208
- token: token2,
209
- logprob: logprob2
210
- })) : []
211
- }))) != null ? _b : void 0;
214
+ // src/get-response-metadata.ts
215
+ function getResponseMetadata({
216
+ id,
217
+ model,
218
+ created
219
+ }) {
220
+ return {
221
+ id: id != null ? id : void 0,
222
+ modelId: model != null ? model : void 0,
223
+ timestamp: created != null ? new Date(created * 1e3) : void 0
224
+ };
212
225
  }
213
226
 
214
227
  // src/map-openai-finish-reason.ts
@@ -229,15 +242,15 @@ function mapOpenAIFinishReason(finishReason) {
229
242
  }
230
243
 
231
244
  // src/openai-chat-options.ts
232
- var import_zod = require("zod");
233
- var openaiProviderOptions = import_zod.z.object({
245
+ var import_v4 = require("zod/v4");
246
+ var openaiProviderOptions = import_v4.z.object({
234
247
  /**
235
248
  * Modify the likelihood of specified tokens appearing in the completion.
236
249
  *
237
250
  * Accepts a JSON object that maps tokens (specified by their token ID in
238
251
  * the GPT tokenizer) to an associated bias value from -100 to 100.
239
252
  */
240
- logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
253
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
241
254
  /**
242
255
  * Return the log probabilities of the tokens.
243
256
  *
@@ -247,50 +260,71 @@ var openaiProviderOptions = import_zod.z.object({
247
260
  * Setting to a number will return the log probabilities of the top n
248
261
  * tokens that were generated.
249
262
  */
250
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
263
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
251
264
  /**
252
265
  * Whether to enable parallel function calling during tool use. Default to true.
253
266
  */
254
- parallelToolCalls: import_zod.z.boolean().optional(),
267
+ parallelToolCalls: import_v4.z.boolean().optional(),
255
268
  /**
256
269
  * A unique identifier representing your end-user, which can help OpenAI to
257
270
  * monitor and detect abuse.
258
271
  */
259
- user: import_zod.z.string().optional(),
272
+ user: import_v4.z.string().optional(),
260
273
  /**
261
274
  * Reasoning effort for reasoning models. Defaults to `medium`.
262
275
  */
263
- reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
276
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
264
277
  /**
265
278
  * Maximum number of completion tokens to generate. Useful for reasoning models.
266
279
  */
267
- maxCompletionTokens: import_zod.z.number().optional(),
280
+ maxCompletionTokens: import_v4.z.number().optional(),
268
281
  /**
269
282
  * Whether to enable persistence in responses API.
270
283
  */
271
- store: import_zod.z.boolean().optional(),
284
+ store: import_v4.z.boolean().optional(),
272
285
  /**
273
286
  * Metadata to associate with the request.
274
287
  */
275
- metadata: import_zod.z.record(import_zod.z.string()).optional(),
288
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
276
289
  /**
277
290
  * Parameters for prediction mode.
278
291
  */
279
- prediction: import_zod.z.record(import_zod.z.any()).optional()
292
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
293
+ /**
294
+ * Whether to use structured outputs.
295
+ *
296
+ * @default true
297
+ */
298
+ structuredOutputs: import_v4.z.boolean().optional(),
299
+ /**
300
+ * Service tier for the request.
301
+ * - 'auto': Default service tier
302
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
303
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
304
+ *
305
+ * @default 'auto'
306
+ */
307
+ serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
308
+ /**
309
+ * Whether to use strict JSON schema validation.
310
+ *
311
+ * @default false
312
+ */
313
+ strictJsonSchema: import_v4.z.boolean().optional()
280
314
  });
281
315
 
282
316
  // src/openai-error.ts
283
- var import_zod2 = require("zod");
317
+ var import_v42 = require("zod/v4");
284
318
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
285
- var openaiErrorDataSchema = import_zod2.z.object({
286
- error: import_zod2.z.object({
287
- message: import_zod2.z.string(),
319
+ var openaiErrorDataSchema = import_v42.z.object({
320
+ error: import_v42.z.object({
321
+ message: import_v42.z.string(),
288
322
  // The additional information below is handled loosely to support
289
323
  // OpenAI-compatible providers that have slightly different error
290
324
  // responses:
291
- type: import_zod2.z.string().nullish(),
292
- param: import_zod2.z.any().nullish(),
293
- code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
325
+ type: import_v42.z.string().nullish(),
326
+ param: import_v42.z.any().nullish(),
327
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
294
328
  })
295
329
  });
296
330
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -298,25 +332,101 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
298
332
  errorToMessage: (data) => data.error.message
299
333
  });
300
334
 
301
- // src/get-response-metadata.ts
302
- function getResponseMetadata({
303
- id,
304
- model,
305
- created
306
- }) {
307
- return {
308
- id: id != null ? id : void 0,
309
- modelId: model != null ? model : void 0,
310
- timestamp: created != null ? new Date(created * 1e3) : void 0
311
- };
312
- }
313
-
314
335
  // src/openai-prepare-tools.ts
315
336
  var import_provider2 = require("@ai-sdk/provider");
337
+
338
+ // src/tool/file-search.ts
339
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
340
+ var import_v43 = require("zod/v4");
341
+ var comparisonFilterSchema = import_v43.z.object({
342
+ key: import_v43.z.string(),
343
+ type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
344
+ value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
345
+ });
346
+ var compoundFilterSchema = import_v43.z.object({
347
+ type: import_v43.z.enum(["and", "or"]),
348
+ filters: import_v43.z.array(
349
+ import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
350
+ )
351
+ });
352
+ var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
353
+ var fileSearchArgsSchema = import_v43.z.object({
354
+ /**
355
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
356
+ */
357
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
358
+ /**
359
+ * Maximum number of search results to return. Defaults to 10.
360
+ */
361
+ maxNumResults: import_v43.z.number().optional(),
362
+ /**
363
+ * Ranking options for the search.
364
+ */
365
+ ranking: import_v43.z.object({
366
+ ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
367
+ }).optional(),
368
+ /**
369
+ * A filter to apply based on file attributes.
370
+ */
371
+ filters: filtersSchema.optional()
372
+ });
373
+ var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
374
+ id: "openai.file_search",
375
+ name: "file_search",
376
+ inputSchema: import_v43.z.object({
377
+ query: import_v43.z.string()
378
+ })
379
+ });
380
+
381
+ // src/tool/web-search-preview.ts
382
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
383
+ var import_v44 = require("zod/v4");
384
+ var webSearchPreviewArgsSchema = import_v44.z.object({
385
+ /**
386
+ * Search context size to use for the web search.
387
+ * - high: Most comprehensive context, highest cost, slower response
388
+ * - medium: Balanced context, cost, and latency (default)
389
+ * - low: Least context, lowest cost, fastest response
390
+ */
391
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
392
+ /**
393
+ * User location information to provide geographically relevant search results.
394
+ */
395
+ userLocation: import_v44.z.object({
396
+ /**
397
+ * Type of location (always 'approximate')
398
+ */
399
+ type: import_v44.z.literal("approximate"),
400
+ /**
401
+ * Two-letter ISO country code (e.g., 'US', 'GB')
402
+ */
403
+ country: import_v44.z.string().optional(),
404
+ /**
405
+ * City name (free text, e.g., 'Minneapolis')
406
+ */
407
+ city: import_v44.z.string().optional(),
408
+ /**
409
+ * Region name (free text, e.g., 'Minnesota')
410
+ */
411
+ region: import_v44.z.string().optional(),
412
+ /**
413
+ * IANA timezone (e.g., 'America/Chicago')
414
+ */
415
+ timezone: import_v44.z.string().optional()
416
+ }).optional()
417
+ });
418
+ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
419
+ id: "openai.web_search_preview",
420
+ name: "web_search_preview",
421
+ inputSchema: import_v44.z.object({})
422
+ });
423
+
424
+ // src/openai-prepare-tools.ts
316
425
  function prepareTools({
317
426
  tools,
318
427
  toolChoice,
319
- structuredOutputs
428
+ structuredOutputs,
429
+ strictJsonSchema
320
430
  }) {
321
431
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
322
432
  const toolWarnings = [];
@@ -325,18 +435,48 @@ function prepareTools({
325
435
  }
326
436
  const openaiTools2 = [];
327
437
  for (const tool of tools) {
328
- if (tool.type === "provider-defined") {
329
- toolWarnings.push({ type: "unsupported-tool", tool });
330
- } else {
331
- openaiTools2.push({
332
- type: "function",
333
- function: {
334
- name: tool.name,
335
- description: tool.description,
336
- parameters: tool.parameters,
337
- strict: structuredOutputs ? true : void 0
438
+ switch (tool.type) {
439
+ case "function":
440
+ openaiTools2.push({
441
+ type: "function",
442
+ function: {
443
+ name: tool.name,
444
+ description: tool.description,
445
+ parameters: tool.inputSchema,
446
+ strict: structuredOutputs ? strictJsonSchema : void 0
447
+ }
448
+ });
449
+ break;
450
+ case "provider-defined":
451
+ switch (tool.id) {
452
+ case "openai.file_search": {
453
+ const args = fileSearchArgsSchema.parse(tool.args);
454
+ openaiTools2.push({
455
+ type: "file_search",
456
+ vector_store_ids: args.vectorStoreIds,
457
+ max_num_results: args.maxNumResults,
458
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
459
+ filters: args.filters
460
+ });
461
+ break;
462
+ }
463
+ case "openai.web_search_preview": {
464
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
465
+ openaiTools2.push({
466
+ type: "web_search_preview",
467
+ search_context_size: args.searchContextSize,
468
+ user_location: args.userLocation
469
+ });
470
+ break;
471
+ }
472
+ default:
473
+ toolWarnings.push({ type: "unsupported-tool", tool });
474
+ break;
338
475
  }
339
- });
476
+ break;
477
+ default:
478
+ toolWarnings.push({ type: "unsupported-tool", tool });
479
+ break;
340
480
  }
341
481
  }
342
482
  if (toolChoice == null) {
@@ -370,29 +510,18 @@ function prepareTools({
370
510
 
371
511
  // src/openai-chat-language-model.ts
372
512
  var OpenAIChatLanguageModel = class {
373
- constructor(modelId, settings, config) {
513
+ constructor(modelId, config) {
374
514
  this.specificationVersion = "v2";
515
+ this.supportedUrls = {
516
+ "image/*": [/^https?:\/\/.*$/]
517
+ };
375
518
  this.modelId = modelId;
376
- this.settings = settings;
377
519
  this.config = config;
378
520
  }
379
- get supportsStructuredOutputs() {
380
- var _a;
381
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
382
- }
383
- get defaultObjectGenerationMode() {
384
- if (isAudioModel(this.modelId)) {
385
- return "tool";
386
- }
387
- return this.supportsStructuredOutputs ? "json" : "tool";
388
- }
389
521
  get provider() {
390
522
  return this.config.provider;
391
523
  }
392
- get supportsImageUrls() {
393
- return !this.settings.downloadImages;
394
- }
395
- getArgs({
524
+ async getArgs({
396
525
  prompt,
397
526
  maxOutputTokens,
398
527
  temperature,
@@ -407,20 +536,21 @@ var OpenAIChatLanguageModel = class {
407
536
  toolChoice,
408
537
  providerOptions
409
538
  }) {
410
- var _a, _b;
539
+ var _a, _b, _c, _d;
411
540
  const warnings = [];
412
- const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
541
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
413
542
  provider: "openai",
414
543
  providerOptions,
415
544
  schema: openaiProviderOptions
416
545
  })) != null ? _a : {};
546
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
417
547
  if (topK != null) {
418
548
  warnings.push({
419
549
  type: "unsupported-setting",
420
550
  setting: "topK"
421
551
  });
422
552
  }
423
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
553
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
424
554
  warnings.push({
425
555
  type: "unsupported-setting",
426
556
  setting: "responseFormat",
@@ -434,6 +564,7 @@ var OpenAIChatLanguageModel = class {
434
564
  }
435
565
  );
436
566
  warnings.push(...messageWarnings);
567
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
437
568
  const baseArgs = {
438
569
  // model id:
439
570
  model: this.modelId,
@@ -449,13 +580,12 @@ var OpenAIChatLanguageModel = class {
449
580
  top_p: topP,
450
581
  frequency_penalty: frequencyPenalty,
451
582
  presence_penalty: presencePenalty,
452
- // TODO improve below:
453
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
583
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
454
584
  type: "json_schema",
455
585
  json_schema: {
456
586
  schema: responseFormat.schema,
457
- strict: true,
458
- name: (_b = responseFormat.name) != null ? _b : "response",
587
+ strict: strictJsonSchema,
588
+ name: (_d = responseFormat.name) != null ? _d : "response",
459
589
  description: responseFormat.description
460
590
  }
461
591
  } : { type: "json_object" } : void 0,
@@ -468,6 +598,7 @@ var OpenAIChatLanguageModel = class {
468
598
  metadata: openaiOptions.metadata,
469
599
  prediction: openaiOptions.prediction,
470
600
  reasoning_effort: openaiOptions.reasoningEffort,
601
+ service_tier: openaiOptions.serviceTier,
471
602
  // messages:
472
603
  messages
473
604
  };
@@ -541,6 +672,22 @@ var OpenAIChatLanguageModel = class {
541
672
  });
542
673
  }
543
674
  }
675
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
676
+ warnings.push({
677
+ type: "unsupported-setting",
678
+ setting: "serviceTier",
679
+ details: "flex processing is only available for o3 and o4-mini models"
680
+ });
681
+ baseArgs.service_tier = void 0;
682
+ }
683
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "serviceTier",
687
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
688
+ });
689
+ baseArgs.service_tier = void 0;
690
+ }
544
691
  const {
545
692
  tools: openaiTools2,
546
693
  toolChoice: openaiToolChoice,
@@ -548,7 +695,8 @@ var OpenAIChatLanguageModel = class {
548
695
  } = prepareTools({
549
696
  tools,
550
697
  toolChoice,
551
- structuredOutputs: this.supportsStructuredOutputs
698
+ structuredOutputs,
699
+ strictJsonSchema
552
700
  });
553
701
  return {
554
702
  args: {
@@ -560,21 +708,21 @@ var OpenAIChatLanguageModel = class {
560
708
  };
561
709
  }
562
710
  async doGenerate(options) {
563
- var _a, _b, _c, _d, _e, _f, _g, _h;
564
- const { args: body, warnings } = this.getArgs(options);
711
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
712
+ const { args: body, warnings } = await this.getArgs(options);
565
713
  const {
566
714
  responseHeaders,
567
715
  value: response,
568
716
  rawValue: rawResponse
569
- } = await (0, import_provider_utils3.postJsonToApi)({
717
+ } = await (0, import_provider_utils5.postJsonToApi)({
570
718
  url: this.config.url({
571
719
  path: "/chat/completions",
572
720
  modelId: this.modelId
573
721
  }),
574
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
722
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
575
723
  body,
576
724
  failedResponseHandler: openaiFailedResponseHandler,
577
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
725
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
578
726
  openaiChatResponseSchema
579
727
  ),
580
728
  abortSignal: options.abortSignal,
@@ -589,33 +737,32 @@ var OpenAIChatLanguageModel = class {
589
737
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
590
738
  content.push({
591
739
  type: "tool-call",
592
- toolCallType: "function",
593
- toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
740
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
594
741
  toolName: toolCall.function.name,
595
- args: toolCall.function.arguments
742
+ input: toolCall.function.arguments
596
743
  });
597
744
  }
598
745
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
599
746
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
600
747
  const providerMetadata = { openai: {} };
601
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
602
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
603
- }
604
748
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
605
749
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
606
750
  }
607
751
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
608
752
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
609
753
  }
610
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
611
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
754
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
755
+ providerMetadata.openai.logprobs = choice.logprobs.content;
612
756
  }
613
757
  return {
614
758
  content,
615
759
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
616
760
  usage: {
617
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
618
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
761
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
762
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
763
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
764
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
765
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
619
766
  },
620
767
  request: { body },
621
768
  response: {
@@ -624,41 +771,41 @@ var OpenAIChatLanguageModel = class {
624
771
  body: rawResponse
625
772
  },
626
773
  warnings,
627
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
628
774
  providerMetadata
629
775
  };
630
776
  }
631
777
  async doStream(options) {
632
- const { args, warnings } = this.getArgs(options);
778
+ const { args, warnings } = await this.getArgs(options);
633
779
  const body = {
634
780
  ...args,
635
781
  stream: true,
636
- // only include stream_options when in strict compatibility mode:
637
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
782
+ stream_options: {
783
+ include_usage: true
784
+ }
638
785
  };
639
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
786
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
640
787
  url: this.config.url({
641
788
  path: "/chat/completions",
642
789
  modelId: this.modelId
643
790
  }),
644
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
791
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
645
792
  body,
646
793
  failedResponseHandler: openaiFailedResponseHandler,
647
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
794
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
648
795
  openaiChatChunkSchema
649
796
  ),
650
797
  abortSignal: options.abortSignal,
651
798
  fetch: this.config.fetch
652
799
  });
653
- const { messages: rawPrompt, ...rawSettings } = args;
654
800
  const toolCalls = [];
655
801
  let finishReason = "unknown";
656
802
  const usage = {
657
803
  inputTokens: void 0,
658
- outputTokens: void 0
804
+ outputTokens: void 0,
805
+ totalTokens: void 0
659
806
  };
660
- let logprobs;
661
807
  let isFirstChunk = true;
808
+ let isActiveText = false;
662
809
  const providerMetadata = { openai: {} };
663
810
  return {
664
811
  stream: response.pipeThrough(
@@ -667,7 +814,10 @@ var OpenAIChatLanguageModel = class {
667
814
  controller.enqueue({ type: "stream-start", warnings });
668
815
  },
669
816
  transform(chunk, controller) {
670
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
817
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
818
+ if (options.includeRawChunks) {
819
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
820
+ }
671
821
  if (!chunk.success) {
672
822
  finishReason = "error";
673
823
  controller.enqueue({ type: "error", error: chunk.error });
@@ -687,48 +837,40 @@ var OpenAIChatLanguageModel = class {
687
837
  });
688
838
  }
689
839
  if (value.usage != null) {
690
- const {
691
- prompt_tokens,
692
- completion_tokens,
693
- prompt_tokens_details,
694
- completion_tokens_details
695
- } = value.usage;
696
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
697
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
698
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
699
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
840
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
841
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
842
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
843
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
844
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
845
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
846
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
700
847
  }
701
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
702
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
703
- }
704
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
705
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
706
- }
707
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
708
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
848
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
849
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
709
850
  }
710
851
  }
711
852
  const choice = value.choices[0];
712
853
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
713
854
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
714
855
  }
856
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
857
+ providerMetadata.openai.logprobs = choice.logprobs.content;
858
+ }
715
859
  if ((choice == null ? void 0 : choice.delta) == null) {
716
860
  return;
717
861
  }
718
862
  const delta = choice.delta;
719
863
  if (delta.content != null) {
864
+ if (!isActiveText) {
865
+ controller.enqueue({ type: "text-start", id: "0" });
866
+ isActiveText = true;
867
+ }
720
868
  controller.enqueue({
721
- type: "text",
722
- text: delta.content
869
+ type: "text-delta",
870
+ id: "0",
871
+ delta: delta.content
723
872
  });
724
873
  }
725
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
726
- choice == null ? void 0 : choice.logprobs
727
- );
728
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
729
- if (logprobs === void 0) logprobs = [];
730
- logprobs.push(...mappedLogprobs);
731
- }
732
874
  if (delta.tool_calls != null) {
733
875
  for (const toolCallDelta of delta.tool_calls) {
734
876
  const index = toolCallDelta.index;
@@ -745,39 +887,45 @@ var OpenAIChatLanguageModel = class {
745
887
  message: `Expected 'id' to be a string.`
746
888
  });
747
889
  }
748
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
890
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
749
891
  throw new import_provider3.InvalidResponseDataError({
750
892
  data: toolCallDelta,
751
893
  message: `Expected 'function.name' to be a string.`
752
894
  });
753
895
  }
896
+ controller.enqueue({
897
+ type: "tool-input-start",
898
+ id: toolCallDelta.id,
899
+ toolName: toolCallDelta.function.name
900
+ });
754
901
  toolCalls[index] = {
755
902
  id: toolCallDelta.id,
756
903
  type: "function",
757
904
  function: {
758
905
  name: toolCallDelta.function.name,
759
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
906
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
760
907
  },
761
908
  hasFinished: false
762
909
  };
763
910
  const toolCall2 = toolCalls[index];
764
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
911
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
765
912
  if (toolCall2.function.arguments.length > 0) {
766
913
  controller.enqueue({
767
- type: "tool-call-delta",
768
- toolCallType: "function",
769
- toolCallId: toolCall2.id,
770
- toolName: toolCall2.function.name,
771
- argsTextDelta: toolCall2.function.arguments
914
+ type: "tool-input-delta",
915
+ id: toolCall2.id,
916
+ delta: toolCall2.function.arguments
772
917
  });
773
918
  }
774
- if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
919
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
920
+ controller.enqueue({
921
+ type: "tool-input-end",
922
+ id: toolCall2.id
923
+ });
775
924
  controller.enqueue({
776
925
  type: "tool-call",
777
- toolCallType: "function",
778
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
926
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
779
927
  toolName: toolCall2.function.name,
780
- args: toolCall2.function.arguments
928
+ input: toolCall2.function.arguments
781
929
  });
782
930
  toolCall2.hasFinished = true;
783
931
  }
@@ -788,23 +936,24 @@ var OpenAIChatLanguageModel = class {
788
936
  if (toolCall.hasFinished) {
789
937
  continue;
790
938
  }
791
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
792
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
939
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
940
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
793
941
  }
794
942
  controller.enqueue({
795
- type: "tool-call-delta",
796
- toolCallType: "function",
797
- toolCallId: toolCall.id,
798
- toolName: toolCall.function.name,
799
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
943
+ type: "tool-input-delta",
944
+ id: toolCall.id,
945
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
800
946
  });
801
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
947
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
948
+ controller.enqueue({
949
+ type: "tool-input-end",
950
+ id: toolCall.id
951
+ });
802
952
  controller.enqueue({
803
953
  type: "tool-call",
804
- toolCallType: "function",
805
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
954
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
806
955
  toolName: toolCall.function.name,
807
- args: toolCall.function.arguments
956
+ input: toolCall.function.arguments
808
957
  });
809
958
  toolCall.hasFinished = true;
810
959
  }
@@ -812,10 +961,12 @@ var OpenAIChatLanguageModel = class {
812
961
  }
813
962
  },
814
963
  flush(controller) {
964
+ if (isActiveText) {
965
+ controller.enqueue({ type: "text-end", id: "0" });
966
+ }
815
967
  controller.enqueue({
816
968
  type: "finish",
817
969
  finishReason,
818
- logprobs,
819
970
  usage,
820
971
  ...providerMetadata != null ? { providerMetadata } : {}
821
972
  });
@@ -827,96 +978,97 @@ var OpenAIChatLanguageModel = class {
827
978
  };
828
979
  }
829
980
  };
830
- var openaiTokenUsageSchema = import_zod3.z.object({
831
- prompt_tokens: import_zod3.z.number().nullish(),
832
- completion_tokens: import_zod3.z.number().nullish(),
833
- prompt_tokens_details: import_zod3.z.object({
834
- cached_tokens: import_zod3.z.number().nullish()
981
+ var openaiTokenUsageSchema = import_v45.z.object({
982
+ prompt_tokens: import_v45.z.number().nullish(),
983
+ completion_tokens: import_v45.z.number().nullish(),
984
+ total_tokens: import_v45.z.number().nullish(),
985
+ prompt_tokens_details: import_v45.z.object({
986
+ cached_tokens: import_v45.z.number().nullish()
835
987
  }).nullish(),
836
- completion_tokens_details: import_zod3.z.object({
837
- reasoning_tokens: import_zod3.z.number().nullish(),
838
- accepted_prediction_tokens: import_zod3.z.number().nullish(),
839
- rejected_prediction_tokens: import_zod3.z.number().nullish()
988
+ completion_tokens_details: import_v45.z.object({
989
+ reasoning_tokens: import_v45.z.number().nullish(),
990
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
991
+ rejected_prediction_tokens: import_v45.z.number().nullish()
840
992
  }).nullish()
841
993
  }).nullish();
842
- var openaiChatResponseSchema = import_zod3.z.object({
843
- id: import_zod3.z.string().nullish(),
844
- created: import_zod3.z.number().nullish(),
845
- model: import_zod3.z.string().nullish(),
846
- choices: import_zod3.z.array(
847
- import_zod3.z.object({
848
- message: import_zod3.z.object({
849
- role: import_zod3.z.literal("assistant").nullish(),
850
- content: import_zod3.z.string().nullish(),
851
- tool_calls: import_zod3.z.array(
852
- import_zod3.z.object({
853
- id: import_zod3.z.string().nullish(),
854
- type: import_zod3.z.literal("function"),
855
- function: import_zod3.z.object({
856
- name: import_zod3.z.string(),
857
- arguments: import_zod3.z.string()
994
+ var openaiChatResponseSchema = import_v45.z.object({
995
+ id: import_v45.z.string().nullish(),
996
+ created: import_v45.z.number().nullish(),
997
+ model: import_v45.z.string().nullish(),
998
+ choices: import_v45.z.array(
999
+ import_v45.z.object({
1000
+ message: import_v45.z.object({
1001
+ role: import_v45.z.literal("assistant").nullish(),
1002
+ content: import_v45.z.string().nullish(),
1003
+ tool_calls: import_v45.z.array(
1004
+ import_v45.z.object({
1005
+ id: import_v45.z.string().nullish(),
1006
+ type: import_v45.z.literal("function"),
1007
+ function: import_v45.z.object({
1008
+ name: import_v45.z.string(),
1009
+ arguments: import_v45.z.string()
858
1010
  })
859
1011
  })
860
1012
  ).nullish()
861
1013
  }),
862
- index: import_zod3.z.number(),
863
- logprobs: import_zod3.z.object({
864
- content: import_zod3.z.array(
865
- import_zod3.z.object({
866
- token: import_zod3.z.string(),
867
- logprob: import_zod3.z.number(),
868
- top_logprobs: import_zod3.z.array(
869
- import_zod3.z.object({
870
- token: import_zod3.z.string(),
871
- logprob: import_zod3.z.number()
1014
+ index: import_v45.z.number(),
1015
+ logprobs: import_v45.z.object({
1016
+ content: import_v45.z.array(
1017
+ import_v45.z.object({
1018
+ token: import_v45.z.string(),
1019
+ logprob: import_v45.z.number(),
1020
+ top_logprobs: import_v45.z.array(
1021
+ import_v45.z.object({
1022
+ token: import_v45.z.string(),
1023
+ logprob: import_v45.z.number()
872
1024
  })
873
1025
  )
874
1026
  })
875
- ).nullable()
1027
+ ).nullish()
876
1028
  }).nullish(),
877
- finish_reason: import_zod3.z.string().nullish()
1029
+ finish_reason: import_v45.z.string().nullish()
878
1030
  })
879
1031
  ),
880
1032
  usage: openaiTokenUsageSchema
881
1033
  });
882
- var openaiChatChunkSchema = import_zod3.z.union([
883
- import_zod3.z.object({
884
- id: import_zod3.z.string().nullish(),
885
- created: import_zod3.z.number().nullish(),
886
- model: import_zod3.z.string().nullish(),
887
- choices: import_zod3.z.array(
888
- import_zod3.z.object({
889
- delta: import_zod3.z.object({
890
- role: import_zod3.z.enum(["assistant"]).nullish(),
891
- content: import_zod3.z.string().nullish(),
892
- tool_calls: import_zod3.z.array(
893
- import_zod3.z.object({
894
- index: import_zod3.z.number(),
895
- id: import_zod3.z.string().nullish(),
896
- type: import_zod3.z.literal("function").optional(),
897
- function: import_zod3.z.object({
898
- name: import_zod3.z.string().nullish(),
899
- arguments: import_zod3.z.string().nullish()
1034
+ var openaiChatChunkSchema = import_v45.z.union([
1035
+ import_v45.z.object({
1036
+ id: import_v45.z.string().nullish(),
1037
+ created: import_v45.z.number().nullish(),
1038
+ model: import_v45.z.string().nullish(),
1039
+ choices: import_v45.z.array(
1040
+ import_v45.z.object({
1041
+ delta: import_v45.z.object({
1042
+ role: import_v45.z.enum(["assistant"]).nullish(),
1043
+ content: import_v45.z.string().nullish(),
1044
+ tool_calls: import_v45.z.array(
1045
+ import_v45.z.object({
1046
+ index: import_v45.z.number(),
1047
+ id: import_v45.z.string().nullish(),
1048
+ type: import_v45.z.literal("function").nullish(),
1049
+ function: import_v45.z.object({
1050
+ name: import_v45.z.string().nullish(),
1051
+ arguments: import_v45.z.string().nullish()
900
1052
  })
901
1053
  })
902
1054
  ).nullish()
903
1055
  }).nullish(),
904
- logprobs: import_zod3.z.object({
905
- content: import_zod3.z.array(
906
- import_zod3.z.object({
907
- token: import_zod3.z.string(),
908
- logprob: import_zod3.z.number(),
909
- top_logprobs: import_zod3.z.array(
910
- import_zod3.z.object({
911
- token: import_zod3.z.string(),
912
- logprob: import_zod3.z.number()
1056
+ logprobs: import_v45.z.object({
1057
+ content: import_v45.z.array(
1058
+ import_v45.z.object({
1059
+ token: import_v45.z.string(),
1060
+ logprob: import_v45.z.number(),
1061
+ top_logprobs: import_v45.z.array(
1062
+ import_v45.z.object({
1063
+ token: import_v45.z.string(),
1064
+ logprob: import_v45.z.number()
913
1065
  })
914
1066
  )
915
1067
  })
916
- ).nullable()
1068
+ ).nullish()
917
1069
  }).nullish(),
918
- finish_reason: import_zod3.z.string().nullable().optional(),
919
- index: import_zod3.z.number()
1070
+ finish_reason: import_v45.z.string().nullish(),
1071
+ index: import_v45.z.number()
920
1072
  })
921
1073
  ),
922
1074
  usage: openaiTokenUsageSchema
@@ -926,8 +1078,11 @@ var openaiChatChunkSchema = import_zod3.z.union([
926
1078
  function isReasoningModel(modelId) {
927
1079
  return modelId.startsWith("o");
928
1080
  }
929
- function isAudioModel(modelId) {
930
- return modelId.startsWith("gpt-4o-audio-preview");
1081
+ function supportsFlexProcessing(modelId) {
1082
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1083
+ }
1084
+ function supportsPriorityProcessing(modelId) {
1085
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
931
1086
  }
932
1087
  function getSystemMessageMode(modelId) {
933
1088
  var _a, _b;
@@ -949,29 +1104,37 @@ var reasoningModels = {
949
1104
  "o1-preview-2024-09-12": {
950
1105
  systemMessageMode: "remove"
951
1106
  },
1107
+ o3: {
1108
+ systemMessageMode: "developer"
1109
+ },
1110
+ "o3-2025-04-16": {
1111
+ systemMessageMode: "developer"
1112
+ },
952
1113
  "o3-mini": {
953
1114
  systemMessageMode: "developer"
954
1115
  },
955
1116
  "o3-mini-2025-01-31": {
956
1117
  systemMessageMode: "developer"
1118
+ },
1119
+ "o4-mini": {
1120
+ systemMessageMode: "developer"
1121
+ },
1122
+ "o4-mini-2025-04-16": {
1123
+ systemMessageMode: "developer"
957
1124
  }
958
1125
  };
959
1126
 
960
1127
  // src/openai-completion-language-model.ts
961
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
962
- var import_zod4 = require("zod");
1128
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1129
+ var import_v47 = require("zod/v4");
963
1130
 
964
1131
  // src/convert-to-openai-completion-prompt.ts
965
1132
  var import_provider4 = require("@ai-sdk/provider");
966
1133
  function convertToOpenAICompletionPrompt({
967
1134
  prompt,
968
- inputFormat,
969
1135
  user = "user",
970
1136
  assistant = "assistant"
971
1137
  }) {
972
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
973
- return { prompt: prompt[0].content[0].text };
974
- }
975
1138
  let text = "";
976
1139
  if (prompt[0].role === "system") {
977
1140
  text += `${prompt[0].content}
@@ -1040,34 +1203,66 @@ ${user}:`]
1040
1203
  };
1041
1204
  }
1042
1205
 
1043
- // src/map-openai-completion-logprobs.ts
1044
- function mapOpenAICompletionLogProbs(logprobs) {
1045
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1046
- token,
1047
- logprob: logprobs.token_logprobs[index],
1048
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1049
- ([token2, logprob]) => ({
1050
- token: token2,
1051
- logprob
1052
- })
1053
- ) : []
1054
- }));
1055
- }
1206
+ // src/openai-completion-options.ts
1207
+ var import_v46 = require("zod/v4");
1208
+ var openaiCompletionProviderOptions = import_v46.z.object({
1209
+ /**
1210
+ Echo back the prompt in addition to the completion.
1211
+ */
1212
+ echo: import_v46.z.boolean().optional(),
1213
+ /**
1214
+ Modify the likelihood of specified tokens appearing in the completion.
1215
+
1216
+ Accepts a JSON object that maps tokens (specified by their token ID in
1217
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1218
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1219
+ the bias is added to the logits generated by the model prior to sampling.
1220
+ The exact effect will vary per model, but values between -1 and 1 should
1221
+ decrease or increase likelihood of selection; values like -100 or 100
1222
+ should result in a ban or exclusive selection of the relevant token.
1223
+
1224
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1225
+ token from being generated.
1226
+ */
1227
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1228
+ /**
1229
+ The suffix that comes after a completion of inserted text.
1230
+ */
1231
+ suffix: import_v46.z.string().optional(),
1232
+ /**
1233
+ A unique identifier representing your end-user, which can help OpenAI to
1234
+ monitor and detect abuse. Learn more.
1235
+ */
1236
+ user: import_v46.z.string().optional(),
1237
+ /**
1238
+ Return the log probabilities of the tokens. Including logprobs will increase
1239
+ the response size and can slow down response times. However, it can
1240
+ be useful to better understand how the model is behaving.
1241
+ Setting to true will return the log probabilities of the tokens that
1242
+ were generated.
1243
+ Setting to a number will return the log probabilities of the top n
1244
+ tokens that were generated.
1245
+ */
1246
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1247
+ });
1056
1248
 
1057
1249
  // src/openai-completion-language-model.ts
1058
1250
  var OpenAICompletionLanguageModel = class {
1059
- constructor(modelId, settings, config) {
1251
+ constructor(modelId, config) {
1060
1252
  this.specificationVersion = "v2";
1061
- this.defaultObjectGenerationMode = void 0;
1253
+ this.supportedUrls = {
1254
+ // No URLs are supported for completion models.
1255
+ };
1062
1256
  this.modelId = modelId;
1063
- this.settings = settings;
1064
1257
  this.config = config;
1065
1258
  }
1259
+ get providerOptionsName() {
1260
+ return this.config.provider.split(".")[0].trim();
1261
+ }
1066
1262
  get provider() {
1067
1263
  return this.config.provider;
1068
1264
  }
1069
- getArgs({
1070
- inputFormat,
1265
+ async getArgs({
1071
1266
  prompt,
1072
1267
  maxOutputTokens,
1073
1268
  temperature,
@@ -1079,9 +1274,22 @@ var OpenAICompletionLanguageModel = class {
1079
1274
  responseFormat,
1080
1275
  tools,
1081
1276
  toolChoice,
1082
- seed
1277
+ seed,
1278
+ providerOptions
1083
1279
  }) {
1084
1280
  const warnings = [];
1281
+ const openaiOptions = {
1282
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1283
+ provider: "openai",
1284
+ providerOptions,
1285
+ schema: openaiCompletionProviderOptions
1286
+ }),
1287
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1288
+ provider: this.providerOptionsName,
1289
+ providerOptions,
1290
+ schema: openaiCompletionProviderOptions
1291
+ })
1292
+ };
1085
1293
  if (topK != null) {
1086
1294
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1087
1295
  }
@@ -1098,18 +1306,18 @@ var OpenAICompletionLanguageModel = class {
1098
1306
  details: "JSON response format is not supported."
1099
1307
  });
1100
1308
  }
1101
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1309
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1102
1310
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1103
1311
  return {
1104
1312
  args: {
1105
1313
  // model id:
1106
1314
  model: this.modelId,
1107
1315
  // model specific settings:
1108
- echo: this.settings.echo,
1109
- logit_bias: this.settings.logitBias,
1110
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1111
- suffix: this.settings.suffix,
1112
- user: this.settings.user,
1316
+ echo: openaiOptions.echo,
1317
+ logit_bias: openaiOptions.logitBias,
1318
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1319
+ suffix: openaiOptions.suffix,
1320
+ user: openaiOptions.user,
1113
1321
  // standardized settings:
1114
1322
  max_tokens: maxOutputTokens,
1115
1323
  temperature,
@@ -1126,71 +1334,79 @@ var OpenAICompletionLanguageModel = class {
1126
1334
  };
1127
1335
  }
1128
1336
  async doGenerate(options) {
1129
- const { args, warnings } = this.getArgs(options);
1337
+ var _a, _b, _c;
1338
+ const { args, warnings } = await this.getArgs(options);
1130
1339
  const {
1131
1340
  responseHeaders,
1132
1341
  value: response,
1133
1342
  rawValue: rawResponse
1134
- } = await (0, import_provider_utils4.postJsonToApi)({
1343
+ } = await (0, import_provider_utils6.postJsonToApi)({
1135
1344
  url: this.config.url({
1136
1345
  path: "/completions",
1137
1346
  modelId: this.modelId
1138
1347
  }),
1139
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1348
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1140
1349
  body: args,
1141
1350
  failedResponseHandler: openaiFailedResponseHandler,
1142
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1351
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1143
1352
  openaiCompletionResponseSchema
1144
1353
  ),
1145
1354
  abortSignal: options.abortSignal,
1146
1355
  fetch: this.config.fetch
1147
1356
  });
1148
1357
  const choice = response.choices[0];
1358
+ const providerMetadata = { openai: {} };
1359
+ if (choice.logprobs != null) {
1360
+ providerMetadata.openai.logprobs = choice.logprobs;
1361
+ }
1149
1362
  return {
1150
1363
  content: [{ type: "text", text: choice.text }],
1151
1364
  usage: {
1152
- inputTokens: response.usage.prompt_tokens,
1153
- outputTokens: response.usage.completion_tokens
1365
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1366
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1367
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1154
1368
  },
1155
1369
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1156
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1157
1370
  request: { body: args },
1158
1371
  response: {
1159
1372
  ...getResponseMetadata(response),
1160
1373
  headers: responseHeaders,
1161
1374
  body: rawResponse
1162
1375
  },
1376
+ providerMetadata,
1163
1377
  warnings
1164
1378
  };
1165
1379
  }
1166
1380
  async doStream(options) {
1167
- const { args, warnings } = this.getArgs(options);
1381
+ const { args, warnings } = await this.getArgs(options);
1168
1382
  const body = {
1169
1383
  ...args,
1170
1384
  stream: true,
1171
- // only include stream_options when in strict compatibility mode:
1172
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1385
+ stream_options: {
1386
+ include_usage: true
1387
+ }
1173
1388
  };
1174
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1389
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1175
1390
  url: this.config.url({
1176
1391
  path: "/completions",
1177
1392
  modelId: this.modelId
1178
1393
  }),
1179
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1394
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1180
1395
  body,
1181
1396
  failedResponseHandler: openaiFailedResponseHandler,
1182
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1397
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
1183
1398
  openaiCompletionChunkSchema
1184
1399
  ),
1185
1400
  abortSignal: options.abortSignal,
1186
1401
  fetch: this.config.fetch
1187
1402
  });
1188
1403
  let finishReason = "unknown";
1404
+ const providerMetadata = { openai: {} };
1189
1405
  const usage = {
1190
1406
  inputTokens: void 0,
1191
- outputTokens: void 0
1407
+ outputTokens: void 0,
1408
+ totalTokens: void 0
1192
1409
  };
1193
- let logprobs;
1194
1410
  let isFirstChunk = true;
1195
1411
  return {
1196
1412
  stream: response.pipeThrough(
@@ -1199,6 +1415,9 @@ var OpenAICompletionLanguageModel = class {
1199
1415
  controller.enqueue({ type: "stream-start", warnings });
1200
1416
  },
1201
1417
  transform(chunk, controller) {
1418
+ if (options.includeRawChunks) {
1419
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1420
+ }
1202
1421
  if (!chunk.success) {
1203
1422
  finishReason = "error";
1204
1423
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1216,34 +1435,36 @@ var OpenAICompletionLanguageModel = class {
1216
1435
  type: "response-metadata",
1217
1436
  ...getResponseMetadata(value)
1218
1437
  });
1438
+ controller.enqueue({ type: "text-start", id: "0" });
1219
1439
  }
1220
1440
  if (value.usage != null) {
1221
1441
  usage.inputTokens = value.usage.prompt_tokens;
1222
1442
  usage.outputTokens = value.usage.completion_tokens;
1443
+ usage.totalTokens = value.usage.total_tokens;
1223
1444
  }
1224
1445
  const choice = value.choices[0];
1225
1446
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1226
1447
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1227
1448
  }
1228
- if ((choice == null ? void 0 : choice.text) != null) {
1449
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1450
+ providerMetadata.openai.logprobs = choice.logprobs;
1451
+ }
1452
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1229
1453
  controller.enqueue({
1230
- type: "text",
1231
- text: choice.text
1454
+ type: "text-delta",
1455
+ id: "0",
1456
+ delta: choice.text
1232
1457
  });
1233
1458
  }
1234
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1235
- choice == null ? void 0 : choice.logprobs
1236
- );
1237
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1238
- if (logprobs === void 0) logprobs = [];
1239
- logprobs.push(...mappedLogprobs);
1240
- }
1241
1459
  },
1242
1460
  flush(controller) {
1461
+ if (!isFirstChunk) {
1462
+ controller.enqueue({ type: "text-end", id: "0" });
1463
+ }
1243
1464
  controller.enqueue({
1244
1465
  type: "finish",
1245
1466
  finishReason,
1246
- logprobs,
1467
+ providerMetadata,
1247
1468
  usage
1248
1469
  });
1249
1470
  }
@@ -1254,78 +1475,89 @@ var OpenAICompletionLanguageModel = class {
1254
1475
  };
1255
1476
  }
1256
1477
  };
1257
- var openaiCompletionResponseSchema = import_zod4.z.object({
1258
- id: import_zod4.z.string().nullish(),
1259
- created: import_zod4.z.number().nullish(),
1260
- model: import_zod4.z.string().nullish(),
1261
- choices: import_zod4.z.array(
1262
- import_zod4.z.object({
1263
- text: import_zod4.z.string(),
1264
- finish_reason: import_zod4.z.string(),
1265
- logprobs: import_zod4.z.object({
1266
- tokens: import_zod4.z.array(import_zod4.z.string()),
1267
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1268
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1478
+ var usageSchema = import_v47.z.object({
1479
+ prompt_tokens: import_v47.z.number(),
1480
+ completion_tokens: import_v47.z.number(),
1481
+ total_tokens: import_v47.z.number()
1482
+ });
1483
+ var openaiCompletionResponseSchema = import_v47.z.object({
1484
+ id: import_v47.z.string().nullish(),
1485
+ created: import_v47.z.number().nullish(),
1486
+ model: import_v47.z.string().nullish(),
1487
+ choices: import_v47.z.array(
1488
+ import_v47.z.object({
1489
+ text: import_v47.z.string(),
1490
+ finish_reason: import_v47.z.string(),
1491
+ logprobs: import_v47.z.object({
1492
+ tokens: import_v47.z.array(import_v47.z.string()),
1493
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1494
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1269
1495
  }).nullish()
1270
1496
  })
1271
1497
  ),
1272
- usage: import_zod4.z.object({
1273
- prompt_tokens: import_zod4.z.number(),
1274
- completion_tokens: import_zod4.z.number()
1275
- })
1498
+ usage: usageSchema.nullish()
1276
1499
  });
1277
- var openaiCompletionChunkSchema = import_zod4.z.union([
1278
- import_zod4.z.object({
1279
- id: import_zod4.z.string().nullish(),
1280
- created: import_zod4.z.number().nullish(),
1281
- model: import_zod4.z.string().nullish(),
1282
- choices: import_zod4.z.array(
1283
- import_zod4.z.object({
1284
- text: import_zod4.z.string(),
1285
- finish_reason: import_zod4.z.string().nullish(),
1286
- index: import_zod4.z.number(),
1287
- logprobs: import_zod4.z.object({
1288
- tokens: import_zod4.z.array(import_zod4.z.string()),
1289
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1290
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1500
+ var openaiCompletionChunkSchema = import_v47.z.union([
1501
+ import_v47.z.object({
1502
+ id: import_v47.z.string().nullish(),
1503
+ created: import_v47.z.number().nullish(),
1504
+ model: import_v47.z.string().nullish(),
1505
+ choices: import_v47.z.array(
1506
+ import_v47.z.object({
1507
+ text: import_v47.z.string(),
1508
+ finish_reason: import_v47.z.string().nullish(),
1509
+ index: import_v47.z.number(),
1510
+ logprobs: import_v47.z.object({
1511
+ tokens: import_v47.z.array(import_v47.z.string()),
1512
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1513
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1291
1514
  }).nullish()
1292
1515
  })
1293
1516
  ),
1294
- usage: import_zod4.z.object({
1295
- prompt_tokens: import_zod4.z.number(),
1296
- completion_tokens: import_zod4.z.number()
1297
- }).nullish()
1517
+ usage: usageSchema.nullish()
1298
1518
  }),
1299
1519
  openaiErrorDataSchema
1300
1520
  ]);
1301
1521
 
1302
1522
  // src/openai-embedding-model.ts
1303
1523
  var import_provider5 = require("@ai-sdk/provider");
1304
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1305
- var import_zod5 = require("zod");
1524
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1525
+ var import_v49 = require("zod/v4");
1526
+
1527
+ // src/openai-embedding-options.ts
1528
+ var import_v48 = require("zod/v4");
1529
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1530
+ /**
1531
+ The number of dimensions the resulting output embeddings should have.
1532
+ Only supported in text-embedding-3 and later models.
1533
+ */
1534
+ dimensions: import_v48.z.number().optional(),
1535
+ /**
1536
+ A unique identifier representing your end-user, which can help OpenAI to
1537
+ monitor and detect abuse. Learn more.
1538
+ */
1539
+ user: import_v48.z.string().optional()
1540
+ });
1541
+
1542
+ // src/openai-embedding-model.ts
1306
1543
  var OpenAIEmbeddingModel = class {
1307
- constructor(modelId, settings, config) {
1544
+ constructor(modelId, config) {
1308
1545
  this.specificationVersion = "v2";
1546
+ this.maxEmbeddingsPerCall = 2048;
1547
+ this.supportsParallelCalls = true;
1309
1548
  this.modelId = modelId;
1310
- this.settings = settings;
1311
1549
  this.config = config;
1312
1550
  }
1313
1551
  get provider() {
1314
1552
  return this.config.provider;
1315
1553
  }
1316
- get maxEmbeddingsPerCall() {
1317
- var _a;
1318
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1319
- }
1320
- get supportsParallelCalls() {
1321
- var _a;
1322
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1323
- }
1324
1554
  async doEmbed({
1325
1555
  values,
1326
1556
  headers,
1327
- abortSignal
1557
+ abortSignal,
1558
+ providerOptions
1328
1559
  }) {
1560
+ var _a;
1329
1561
  if (values.length > this.maxEmbeddingsPerCall) {
1330
1562
  throw new import_provider5.TooManyEmbeddingValuesForCallError({
1331
1563
  provider: this.provider,
@@ -1334,25 +1566,30 @@ var OpenAIEmbeddingModel = class {
1334
1566
  values
1335
1567
  });
1336
1568
  }
1569
+ const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
1570
+ provider: "openai",
1571
+ providerOptions,
1572
+ schema: openaiEmbeddingProviderOptions
1573
+ })) != null ? _a : {};
1337
1574
  const {
1338
1575
  responseHeaders,
1339
1576
  value: response,
1340
1577
  rawValue
1341
- } = await (0, import_provider_utils5.postJsonToApi)({
1578
+ } = await (0, import_provider_utils7.postJsonToApi)({
1342
1579
  url: this.config.url({
1343
1580
  path: "/embeddings",
1344
1581
  modelId: this.modelId
1345
1582
  }),
1346
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1583
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
1347
1584
  body: {
1348
1585
  model: this.modelId,
1349
1586
  input: values,
1350
1587
  encoding_format: "float",
1351
- dimensions: this.settings.dimensions,
1352
- user: this.settings.user
1588
+ dimensions: openaiOptions.dimensions,
1589
+ user: openaiOptions.user
1353
1590
  },
1354
1591
  failedResponseHandler: openaiFailedResponseHandler,
1355
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1592
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1356
1593
  openaiTextEmbeddingResponseSchema
1357
1594
  ),
1358
1595
  abortSignal,
@@ -1365,32 +1602,33 @@ var OpenAIEmbeddingModel = class {
1365
1602
  };
1366
1603
  }
1367
1604
  };
1368
- var openaiTextEmbeddingResponseSchema = import_zod5.z.object({
1369
- data: import_zod5.z.array(import_zod5.z.object({ embedding: import_zod5.z.array(import_zod5.z.number()) })),
1370
- usage: import_zod5.z.object({ prompt_tokens: import_zod5.z.number() }).nullish()
1605
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1606
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1607
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1371
1608
  });
1372
1609
 
1373
1610
  // src/openai-image-model.ts
1374
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1375
- var import_zod6 = require("zod");
1611
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1612
+ var import_v410 = require("zod/v4");
1376
1613
 
1377
1614
  // src/openai-image-settings.ts
1378
1615
  var modelMaxImagesPerCall = {
1379
1616
  "dall-e-3": 1,
1380
- "dall-e-2": 10
1617
+ "dall-e-2": 10,
1618
+ "gpt-image-1": 10
1381
1619
  };
1620
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1382
1621
 
1383
1622
  // src/openai-image-model.ts
1384
1623
  var OpenAIImageModel = class {
1385
- constructor(modelId, settings, config) {
1624
+ constructor(modelId, config) {
1386
1625
  this.modelId = modelId;
1387
- this.settings = settings;
1388
1626
  this.config = config;
1389
- this.specificationVersion = "v1";
1627
+ this.specificationVersion = "v2";
1390
1628
  }
1391
1629
  get maxImagesPerCall() {
1392
- var _a, _b;
1393
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1630
+ var _a;
1631
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1394
1632
  }
1395
1633
  get provider() {
1396
1634
  return this.config.provider;
@@ -1418,22 +1656,22 @@ var OpenAIImageModel = class {
1418
1656
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1419
1657
  }
1420
1658
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1421
- const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1659
+ const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
1422
1660
  url: this.config.url({
1423
1661
  path: "/images/generations",
1424
1662
  modelId: this.modelId
1425
1663
  }),
1426
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1664
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
1427
1665
  body: {
1428
1666
  model: this.modelId,
1429
1667
  prompt,
1430
1668
  n,
1431
1669
  size,
1432
1670
  ...(_d = providerOptions.openai) != null ? _d : {},
1433
- response_format: "b64_json"
1671
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1434
1672
  },
1435
1673
  failedResponseHandler: openaiFailedResponseHandler,
1436
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1674
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1437
1675
  openaiImageResponseSchema
1438
1676
  ),
1439
1677
  abortSignal,
@@ -1446,45 +1684,63 @@ var OpenAIImageModel = class {
1446
1684
  timestamp: currentDate,
1447
1685
  modelId: this.modelId,
1448
1686
  headers: responseHeaders
1687
+ },
1688
+ providerMetadata: {
1689
+ openai: {
1690
+ images: response.data.map(
1691
+ (item) => item.revised_prompt ? {
1692
+ revisedPrompt: item.revised_prompt
1693
+ } : null
1694
+ )
1695
+ }
1449
1696
  }
1450
1697
  };
1451
1698
  }
1452
1699
  };
1453
- var openaiImageResponseSchema = import_zod6.z.object({
1454
- data: import_zod6.z.array(import_zod6.z.object({ b64_json: import_zod6.z.string() }))
1700
+ var openaiImageResponseSchema = import_v410.z.object({
1701
+ data: import_v410.z.array(
1702
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1703
+ )
1455
1704
  });
1456
1705
 
1457
1706
  // src/openai-tools.ts
1458
- var import_zod7 = require("zod");
1459
- var WebSearchPreviewParameters = import_zod7.z.object({});
1460
- function webSearchPreviewTool({
1461
- searchContextSize,
1462
- userLocation
1463
- } = {}) {
1464
- return {
1465
- type: "provider-defined",
1466
- id: "openai.web_search_preview",
1467
- args: {
1468
- searchContextSize,
1469
- userLocation
1470
- },
1471
- parameters: WebSearchPreviewParameters
1472
- };
1473
- }
1474
1707
  var openaiTools = {
1475
- webSearchPreview: webSearchPreviewTool
1708
+ fileSearch,
1709
+ webSearchPreview
1476
1710
  };
1477
1711
 
1478
1712
  // src/openai-transcription-model.ts
1479
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1480
- var import_zod8 = require("zod");
1481
- var openAIProviderOptionsSchema = import_zod8.z.object({
1482
- include: import_zod8.z.array(import_zod8.z.string()).nullish(),
1483
- language: import_zod8.z.string().nullish(),
1484
- prompt: import_zod8.z.string().nullish(),
1485
- temperature: import_zod8.z.number().min(0).max(1).nullish().default(0),
1486
- timestampGranularities: import_zod8.z.array(import_zod8.z.enum(["word", "segment"])).nullish().default(["segment"])
1713
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1714
+ var import_v412 = require("zod/v4");
1715
+
1716
+ // src/openai-transcription-options.ts
1717
+ var import_v411 = require("zod/v4");
1718
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1719
+ /**
1720
+ * Additional information to include in the transcription response.
1721
+ */
1722
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1723
+ /**
1724
+ * The language of the input audio in ISO-639-1 format.
1725
+ */
1726
+ language: import_v411.z.string().optional(),
1727
+ /**
1728
+ * An optional text to guide the model's style or continue a previous audio segment.
1729
+ */
1730
+ prompt: import_v411.z.string().optional(),
1731
+ /**
1732
+ * The sampling temperature, between 0 and 1.
1733
+ * @default 0
1734
+ */
1735
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1736
+ /**
1737
+ * The timestamp granularities to populate for this transcription.
1738
+ * @default ['segment']
1739
+ */
1740
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1487
1741
  });
1742
+
1743
+ // src/openai-transcription-model.ts
1488
1744
  var languageMap = {
1489
1745
  afrikaans: "af",
1490
1746
  arabic: "ar",
@@ -1548,38 +1804,36 @@ var OpenAITranscriptionModel = class {
1548
1804
  constructor(modelId, config) {
1549
1805
  this.modelId = modelId;
1550
1806
  this.config = config;
1551
- this.specificationVersion = "v1";
1807
+ this.specificationVersion = "v2";
1552
1808
  }
1553
1809
  get provider() {
1554
1810
  return this.config.provider;
1555
1811
  }
1556
- getArgs({
1812
+ async getArgs({
1557
1813
  audio,
1558
1814
  mediaType,
1559
1815
  providerOptions
1560
1816
  }) {
1561
- var _a, _b, _c, _d, _e;
1562
1817
  const warnings = [];
1563
- const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1818
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
1564
1819
  provider: "openai",
1565
1820
  providerOptions,
1566
- schema: openAIProviderOptionsSchema
1821
+ schema: openAITranscriptionProviderOptions
1567
1822
  });
1568
1823
  const formData = new FormData();
1569
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1824
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
1570
1825
  formData.append("model", this.modelId);
1571
1826
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1572
1827
  if (openAIOptions) {
1573
1828
  const transcriptionModelOptions = {
1574
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1575
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1576
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1577
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1578
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1829
+ include: openAIOptions.include,
1830
+ language: openAIOptions.language,
1831
+ prompt: openAIOptions.prompt,
1832
+ temperature: openAIOptions.temperature,
1833
+ timestamp_granularities: openAIOptions.timestampGranularities
1579
1834
  };
1580
- for (const key in transcriptionModelOptions) {
1581
- const value = transcriptionModelOptions[key];
1582
- if (value !== void 0) {
1835
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1836
+ if (value != null) {
1583
1837
  formData.append(key, String(value));
1584
1838
  }
1585
1839
  }
@@ -1592,20 +1846,20 @@ var OpenAITranscriptionModel = class {
1592
1846
  async doGenerate(options) {
1593
1847
  var _a, _b, _c, _d, _e, _f;
1594
1848
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1595
- const { formData, warnings } = this.getArgs(options);
1849
+ const { formData, warnings } = await this.getArgs(options);
1596
1850
  const {
1597
1851
  value: response,
1598
1852
  responseHeaders,
1599
1853
  rawValue: rawResponse
1600
- } = await (0, import_provider_utils7.postFormDataToApi)({
1854
+ } = await (0, import_provider_utils9.postFormDataToApi)({
1601
1855
  url: this.config.url({
1602
1856
  path: "/audio/transcriptions",
1603
1857
  modelId: this.modelId
1604
1858
  }),
1605
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1859
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1606
1860
  formData,
1607
1861
  failedResponseHandler: openaiFailedResponseHandler,
1608
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1862
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
1609
1863
  openaiTranscriptionResponseSchema
1610
1864
  ),
1611
1865
  abortSignal: options.abortSignal,
@@ -1631,29 +1885,33 @@ var OpenAITranscriptionModel = class {
1631
1885
  };
1632
1886
  }
1633
1887
  };
1634
- var openaiTranscriptionResponseSchema = import_zod8.z.object({
1635
- text: import_zod8.z.string(),
1636
- language: import_zod8.z.string().nullish(),
1637
- duration: import_zod8.z.number().nullish(),
1638
- words: import_zod8.z.array(
1639
- import_zod8.z.object({
1640
- word: import_zod8.z.string(),
1641
- start: import_zod8.z.number(),
1642
- end: import_zod8.z.number()
1888
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1889
+ text: import_v412.z.string(),
1890
+ language: import_v412.z.string().nullish(),
1891
+ duration: import_v412.z.number().nullish(),
1892
+ words: import_v412.z.array(
1893
+ import_v412.z.object({
1894
+ word: import_v412.z.string(),
1895
+ start: import_v412.z.number(),
1896
+ end: import_v412.z.number()
1643
1897
  })
1644
1898
  ).nullish()
1645
1899
  });
1646
1900
 
1647
1901
  // src/responses/openai-responses-language-model.ts
1648
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
1649
- var import_zod9 = require("zod");
1902
+ var import_provider8 = require("@ai-sdk/provider");
1903
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1904
+ var import_v414 = require("zod/v4");
1650
1905
 
1651
1906
  // src/responses/convert-to-openai-responses-messages.ts
1652
1907
  var import_provider6 = require("@ai-sdk/provider");
1653
- function convertToOpenAIResponsesMessages({
1908
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1909
+ var import_v413 = require("zod/v4");
1910
+ async function convertToOpenAIResponsesMessages({
1654
1911
  prompt,
1655
1912
  systemMessageMode
1656
1913
  }) {
1914
+ var _a, _b, _c, _d, _e, _f;
1657
1915
  const messages = [];
1658
1916
  const warnings = [];
1659
1917
  for (const { role, content } of prompt) {
@@ -1688,7 +1946,7 @@ function convertToOpenAIResponsesMessages({
1688
1946
  messages.push({
1689
1947
  role: "user",
1690
1948
  content: content.map((part, index) => {
1691
- var _a, _b, _c;
1949
+ var _a2, _b2, _c2;
1692
1950
  switch (part.type) {
1693
1951
  case "text": {
1694
1952
  return { type: "input_text", text: part.text };
@@ -1700,7 +1958,7 @@ function convertToOpenAIResponsesMessages({
1700
1958
  type: "input_image",
1701
1959
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1702
1960
  // OpenAI specific extension: image detail
1703
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1961
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1704
1962
  };
1705
1963
  } else if (part.mediaType === "application/pdf") {
1706
1964
  if (part.data instanceof URL) {
@@ -1710,7 +1968,7 @@ function convertToOpenAIResponsesMessages({
1710
1968
  }
1711
1969
  return {
1712
1970
  type: "input_file",
1713
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1971
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1714
1972
  file_data: `data:application/pdf;base64,${part.data}`
1715
1973
  };
1716
1974
  } else {
@@ -1725,34 +1983,97 @@ function convertToOpenAIResponsesMessages({
1725
1983
  break;
1726
1984
  }
1727
1985
  case "assistant": {
1986
+ const reasoningMessages = {};
1728
1987
  for (const part of content) {
1729
1988
  switch (part.type) {
1730
1989
  case "text": {
1731
1990
  messages.push({
1732
1991
  role: "assistant",
1733
- content: [{ type: "output_text", text: part.text }]
1992
+ content: [{ type: "output_text", text: part.text }],
1993
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1734
1994
  });
1735
1995
  break;
1736
1996
  }
1737
1997
  case "tool-call": {
1998
+ if (part.providerExecuted) {
1999
+ break;
2000
+ }
1738
2001
  messages.push({
1739
2002
  type: "function_call",
1740
2003
  call_id: part.toolCallId,
1741
2004
  name: part.toolName,
1742
- arguments: JSON.stringify(part.args)
2005
+ arguments: JSON.stringify(part.input),
2006
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
1743
2007
  });
1744
2008
  break;
1745
2009
  }
2010
+ case "tool-result": {
2011
+ warnings.push({
2012
+ type: "other",
2013
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2014
+ });
2015
+ break;
2016
+ }
2017
+ case "reasoning": {
2018
+ const providerOptions = await (0, import_provider_utils10.parseProviderOptions)({
2019
+ provider: "openai",
2020
+ providerOptions: part.providerOptions,
2021
+ schema: openaiResponsesReasoningProviderOptionsSchema
2022
+ });
2023
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2024
+ if (reasoningId != null) {
2025
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2026
+ const summaryParts = [];
2027
+ if (part.text.length > 0) {
2028
+ summaryParts.push({ type: "summary_text", text: part.text });
2029
+ } else if (existingReasoningMessage !== void 0) {
2030
+ warnings.push({
2031
+ type: "other",
2032
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2033
+ });
2034
+ }
2035
+ if (existingReasoningMessage === void 0) {
2036
+ reasoningMessages[reasoningId] = {
2037
+ type: "reasoning",
2038
+ id: reasoningId,
2039
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2040
+ summary: summaryParts
2041
+ };
2042
+ messages.push(reasoningMessages[reasoningId]);
2043
+ } else {
2044
+ existingReasoningMessage.summary.push(...summaryParts);
2045
+ }
2046
+ } else {
2047
+ warnings.push({
2048
+ type: "other",
2049
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2050
+ });
2051
+ }
2052
+ break;
2053
+ }
1746
2054
  }
1747
2055
  }
1748
2056
  break;
1749
2057
  }
1750
2058
  case "tool": {
1751
2059
  for (const part of content) {
2060
+ const output = part.output;
2061
+ let contentValue;
2062
+ switch (output.type) {
2063
+ case "text":
2064
+ case "error-text":
2065
+ contentValue = output.value;
2066
+ break;
2067
+ case "content":
2068
+ case "json":
2069
+ case "error-json":
2070
+ contentValue = JSON.stringify(output.value);
2071
+ break;
2072
+ }
1752
2073
  messages.push({
1753
2074
  type: "function_call_output",
1754
2075
  call_id: part.toolCallId,
1755
- output: JSON.stringify(part.result)
2076
+ output: contentValue
1756
2077
  });
1757
2078
  }
1758
2079
  break;
@@ -1765,6 +2086,10 @@ function convertToOpenAIResponsesMessages({
1765
2086
  }
1766
2087
  return { messages, warnings };
1767
2088
  }
2089
+ var openaiResponsesReasoningProviderOptionsSchema = import_v413.z.object({
2090
+ itemId: import_v413.z.string().nullish(),
2091
+ reasoningEncryptedContent: import_v413.z.string().nullish()
2092
+ });
1768
2093
 
1769
2094
  // src/responses/map-openai-responses-finish-reason.ts
1770
2095
  function mapOpenAIResponseFinishReason({
@@ -1789,7 +2114,7 @@ var import_provider7 = require("@ai-sdk/provider");
1789
2114
  function prepareResponsesTools({
1790
2115
  tools,
1791
2116
  toolChoice,
1792
- strict
2117
+ strictJsonSchema
1793
2118
  }) {
1794
2119
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1795
2120
  const toolWarnings = [];
@@ -1804,12 +2129,23 @@ function prepareResponsesTools({
1804
2129
  type: "function",
1805
2130
  name: tool.name,
1806
2131
  description: tool.description,
1807
- parameters: tool.parameters,
1808
- strict: strict ? true : void 0
2132
+ parameters: tool.inputSchema,
2133
+ strict: strictJsonSchema
1809
2134
  });
1810
2135
  break;
1811
2136
  case "provider-defined":
1812
2137
  switch (tool.id) {
2138
+ case "openai.file_search": {
2139
+ const args = fileSearchArgsSchema.parse(tool.args);
2140
+ openaiTools2.push({
2141
+ type: "file_search",
2142
+ vector_store_ids: args.vectorStoreIds,
2143
+ max_num_results: args.maxNumResults,
2144
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2145
+ filters: args.filters
2146
+ });
2147
+ break;
2148
+ }
1813
2149
  case "openai.web_search_preview":
1814
2150
  openaiTools2.push({
1815
2151
  type: "web_search_preview",
@@ -1839,7 +2175,7 @@ function prepareResponsesTools({
1839
2175
  case "tool":
1840
2176
  return {
1841
2177
  tools: openaiTools2,
1842
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2178
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1843
2179
  toolWarnings
1844
2180
  };
1845
2181
  default: {
@@ -1855,15 +2191,16 @@ function prepareResponsesTools({
1855
2191
  var OpenAIResponsesLanguageModel = class {
1856
2192
  constructor(modelId, config) {
1857
2193
  this.specificationVersion = "v2";
1858
- this.defaultObjectGenerationMode = "json";
1859
- this.supportsStructuredOutputs = true;
2194
+ this.supportedUrls = {
2195
+ "image/*": [/^https?:\/\/.*$/]
2196
+ };
1860
2197
  this.modelId = modelId;
1861
2198
  this.config = config;
1862
2199
  }
1863
2200
  get provider() {
1864
2201
  return this.config.provider;
1865
2202
  }
1866
- getArgs({
2203
+ async getArgs({
1867
2204
  maxOutputTokens,
1868
2205
  temperature,
1869
2206
  stopSequences,
@@ -1902,17 +2239,17 @@ var OpenAIResponsesLanguageModel = class {
1902
2239
  if (stopSequences != null) {
1903
2240
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1904
2241
  }
1905
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2242
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
1906
2243
  prompt,
1907
2244
  systemMessageMode: modelConfig.systemMessageMode
1908
2245
  });
1909
2246
  warnings.push(...messageWarnings);
1910
- const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
2247
+ const openaiOptions = await (0, import_provider_utils11.parseProviderOptions)({
1911
2248
  provider: "openai",
1912
2249
  providerOptions,
1913
2250
  schema: openaiResponsesProviderOptionsSchema
1914
2251
  });
1915
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2252
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
1916
2253
  const baseArgs = {
1917
2254
  model: this.modelId,
1918
2255
  input: messages,
@@ -1923,7 +2260,7 @@ var OpenAIResponsesLanguageModel = class {
1923
2260
  text: {
1924
2261
  format: responseFormat.schema != null ? {
1925
2262
  type: "json_schema",
1926
- strict: isStrict,
2263
+ strict: strictJsonSchema,
1927
2264
  name: (_b = responseFormat.name) != null ? _b : "response",
1928
2265
  description: responseFormat.description,
1929
2266
  schema: responseFormat.schema
@@ -1937,9 +2274,18 @@ var OpenAIResponsesLanguageModel = class {
1937
2274
  store: openaiOptions == null ? void 0 : openaiOptions.store,
1938
2275
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1939
2276
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2277
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2278
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
1940
2279
  // model-specific settings:
1941
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1942
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2280
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2281
+ reasoning: {
2282
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2283
+ effort: openaiOptions.reasoningEffort
2284
+ },
2285
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2286
+ summary: openaiOptions.reasoningSummary
2287
+ }
2288
+ }
1943
2289
  },
1944
2290
  ...modelConfig.requiredAutoTruncation && {
1945
2291
  truncation: "auto"
@@ -1962,6 +2308,37 @@ var OpenAIResponsesLanguageModel = class {
1962
2308
  details: "topP is not supported for reasoning models"
1963
2309
  });
1964
2310
  }
2311
+ } else {
2312
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2313
+ warnings.push({
2314
+ type: "unsupported-setting",
2315
+ setting: "reasoningEffort",
2316
+ details: "reasoningEffort is not supported for non-reasoning models"
2317
+ });
2318
+ }
2319
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2320
+ warnings.push({
2321
+ type: "unsupported-setting",
2322
+ setting: "reasoningSummary",
2323
+ details: "reasoningSummary is not supported for non-reasoning models"
2324
+ });
2325
+ }
2326
+ }
2327
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2328
+ warnings.push({
2329
+ type: "unsupported-setting",
2330
+ setting: "serviceTier",
2331
+ details: "flex processing is only available for o3 and o4-mini models"
2332
+ });
2333
+ delete baseArgs.service_tier;
2334
+ }
2335
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2336
+ warnings.push({
2337
+ type: "unsupported-setting",
2338
+ setting: "serviceTier",
2339
+ details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
2340
+ });
2341
+ delete baseArgs.service_tier;
1965
2342
  }
1966
2343
  const {
1967
2344
  tools: openaiTools2,
@@ -1970,7 +2347,7 @@ var OpenAIResponsesLanguageModel = class {
1970
2347
  } = prepareResponsesTools({
1971
2348
  tools,
1972
2349
  toolChoice,
1973
- strict: isStrict
2350
+ strictJsonSchema
1974
2351
  });
1975
2352
  return {
1976
2353
  args: {
@@ -1982,84 +2359,142 @@ var OpenAIResponsesLanguageModel = class {
1982
2359
  };
1983
2360
  }
1984
2361
  async doGenerate(options) {
1985
- var _a, _b, _c, _d, _e, _f, _g, _h;
1986
- const { args: body, warnings } = this.getArgs(options);
2362
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2363
+ const { args: body, warnings } = await this.getArgs(options);
2364
+ const url = this.config.url({
2365
+ path: "/responses",
2366
+ modelId: this.modelId
2367
+ });
1987
2368
  const {
1988
2369
  responseHeaders,
1989
2370
  value: response,
1990
2371
  rawValue: rawResponse
1991
- } = await (0, import_provider_utils8.postJsonToApi)({
1992
- url: this.config.url({
1993
- path: "/responses",
1994
- modelId: this.modelId
1995
- }),
1996
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2372
+ } = await (0, import_provider_utils11.postJsonToApi)({
2373
+ url,
2374
+ headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
1997
2375
  body,
1998
2376
  failedResponseHandler: openaiFailedResponseHandler,
1999
- successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
2000
- import_zod9.z.object({
2001
- id: import_zod9.z.string(),
2002
- created_at: import_zod9.z.number(),
2003
- model: import_zod9.z.string(),
2004
- output: import_zod9.z.array(
2005
- import_zod9.z.discriminatedUnion("type", [
2006
- import_zod9.z.object({
2007
- type: import_zod9.z.literal("message"),
2008
- role: import_zod9.z.literal("assistant"),
2009
- content: import_zod9.z.array(
2010
- import_zod9.z.object({
2011
- type: import_zod9.z.literal("output_text"),
2012
- text: import_zod9.z.string(),
2013
- annotations: import_zod9.z.array(
2014
- import_zod9.z.object({
2015
- type: import_zod9.z.literal("url_citation"),
2016
- start_index: import_zod9.z.number(),
2017
- end_index: import_zod9.z.number(),
2018
- url: import_zod9.z.string(),
2019
- title: import_zod9.z.string()
2377
+ successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
2378
+ import_v414.z.object({
2379
+ id: import_v414.z.string(),
2380
+ created_at: import_v414.z.number(),
2381
+ error: import_v414.z.object({
2382
+ code: import_v414.z.string(),
2383
+ message: import_v414.z.string()
2384
+ }).nullish(),
2385
+ model: import_v414.z.string(),
2386
+ output: import_v414.z.array(
2387
+ import_v414.z.discriminatedUnion("type", [
2388
+ import_v414.z.object({
2389
+ type: import_v414.z.literal("message"),
2390
+ role: import_v414.z.literal("assistant"),
2391
+ id: import_v414.z.string(),
2392
+ content: import_v414.z.array(
2393
+ import_v414.z.object({
2394
+ type: import_v414.z.literal("output_text"),
2395
+ text: import_v414.z.string(),
2396
+ annotations: import_v414.z.array(
2397
+ import_v414.z.object({
2398
+ type: import_v414.z.literal("url_citation"),
2399
+ start_index: import_v414.z.number(),
2400
+ end_index: import_v414.z.number(),
2401
+ url: import_v414.z.string(),
2402
+ title: import_v414.z.string()
2020
2403
  })
2021
2404
  )
2022
2405
  })
2023
2406
  )
2024
2407
  }),
2025
- import_zod9.z.object({
2026
- type: import_zod9.z.literal("function_call"),
2027
- call_id: import_zod9.z.string(),
2028
- name: import_zod9.z.string(),
2029
- arguments: import_zod9.z.string()
2408
+ import_v414.z.object({
2409
+ type: import_v414.z.literal("function_call"),
2410
+ call_id: import_v414.z.string(),
2411
+ name: import_v414.z.string(),
2412
+ arguments: import_v414.z.string(),
2413
+ id: import_v414.z.string()
2414
+ }),
2415
+ import_v414.z.object({
2416
+ type: import_v414.z.literal("web_search_call"),
2417
+ id: import_v414.z.string(),
2418
+ status: import_v414.z.string().optional()
2030
2419
  }),
2031
- import_zod9.z.object({
2032
- type: import_zod9.z.literal("web_search_call")
2420
+ import_v414.z.object({
2421
+ type: import_v414.z.literal("computer_call"),
2422
+ id: import_v414.z.string(),
2423
+ status: import_v414.z.string().optional()
2033
2424
  }),
2034
- import_zod9.z.object({
2035
- type: import_zod9.z.literal("computer_call")
2425
+ import_v414.z.object({
2426
+ type: import_v414.z.literal("file_search_call"),
2427
+ id: import_v414.z.string(),
2428
+ status: import_v414.z.string().optional()
2036
2429
  }),
2037
- import_zod9.z.object({
2038
- type: import_zod9.z.literal("reasoning")
2430
+ import_v414.z.object({
2431
+ type: import_v414.z.literal("reasoning"),
2432
+ id: import_v414.z.string(),
2433
+ encrypted_content: import_v414.z.string().nullish(),
2434
+ summary: import_v414.z.array(
2435
+ import_v414.z.object({
2436
+ type: import_v414.z.literal("summary_text"),
2437
+ text: import_v414.z.string()
2438
+ })
2439
+ )
2039
2440
  })
2040
2441
  ])
2041
2442
  ),
2042
- incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullable(),
2043
- usage: usageSchema
2443
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullable(),
2444
+ usage: usageSchema2
2044
2445
  })
2045
2446
  ),
2046
2447
  abortSignal: options.abortSignal,
2047
2448
  fetch: this.config.fetch
2048
2449
  });
2450
+ if (response.error) {
2451
+ throw new import_provider8.APICallError({
2452
+ message: response.error.message,
2453
+ url,
2454
+ requestBodyValues: body,
2455
+ statusCode: 400,
2456
+ responseHeaders,
2457
+ responseBody: rawResponse,
2458
+ isRetryable: false
2459
+ });
2460
+ }
2049
2461
  const content = [];
2050
2462
  for (const part of response.output) {
2051
2463
  switch (part.type) {
2464
+ case "reasoning": {
2465
+ if (part.summary.length === 0) {
2466
+ part.summary.push({ type: "summary_text", text: "" });
2467
+ }
2468
+ for (const summary of part.summary) {
2469
+ content.push({
2470
+ type: "reasoning",
2471
+ text: summary.text,
2472
+ providerMetadata: {
2473
+ openai: {
2474
+ itemId: part.id,
2475
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2476
+ }
2477
+ }
2478
+ });
2479
+ }
2480
+ break;
2481
+ }
2052
2482
  case "message": {
2053
2483
  for (const contentPart of part.content) {
2054
2484
  content.push({
2055
2485
  type: "text",
2056
- text: contentPart.text
2486
+ text: contentPart.text,
2487
+ providerMetadata: {
2488
+ openai: {
2489
+ itemId: part.id
2490
+ }
2491
+ }
2057
2492
  });
2058
2493
  for (const annotation of contentPart.annotations) {
2059
2494
  content.push({
2060
2495
  type: "source",
2061
2496
  sourceType: "url",
2062
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils8.generateId)(),
2497
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils11.generateId)(),
2063
2498
  url: annotation.url,
2064
2499
  title: annotation.title
2065
2500
  });
@@ -2070,10 +2505,71 @@ var OpenAIResponsesLanguageModel = class {
2070
2505
  case "function_call": {
2071
2506
  content.push({
2072
2507
  type: "tool-call",
2073
- toolCallType: "function",
2074
2508
  toolCallId: part.call_id,
2075
2509
  toolName: part.name,
2076
- args: part.arguments
2510
+ input: part.arguments,
2511
+ providerMetadata: {
2512
+ openai: {
2513
+ itemId: part.id
2514
+ }
2515
+ }
2516
+ });
2517
+ break;
2518
+ }
2519
+ case "web_search_call": {
2520
+ content.push({
2521
+ type: "tool-call",
2522
+ toolCallId: part.id,
2523
+ toolName: "web_search_preview",
2524
+ input: "",
2525
+ providerExecuted: true
2526
+ });
2527
+ content.push({
2528
+ type: "tool-result",
2529
+ toolCallId: part.id,
2530
+ toolName: "web_search_preview",
2531
+ result: { status: part.status || "completed" },
2532
+ providerExecuted: true
2533
+ });
2534
+ break;
2535
+ }
2536
+ case "computer_call": {
2537
+ content.push({
2538
+ type: "tool-call",
2539
+ toolCallId: part.id,
2540
+ toolName: "computer_use",
2541
+ input: "",
2542
+ providerExecuted: true
2543
+ });
2544
+ content.push({
2545
+ type: "tool-result",
2546
+ toolCallId: part.id,
2547
+ toolName: "computer_use",
2548
+ result: {
2549
+ type: "computer_use_tool_result",
2550
+ status: part.status || "completed"
2551
+ },
2552
+ providerExecuted: true
2553
+ });
2554
+ break;
2555
+ }
2556
+ case "file_search_call": {
2557
+ content.push({
2558
+ type: "tool-call",
2559
+ toolCallId: part.id,
2560
+ toolName: "file_search",
2561
+ input: "",
2562
+ providerExecuted: true
2563
+ });
2564
+ content.push({
2565
+ type: "tool-result",
2566
+ toolCallId: part.id,
2567
+ toolName: "file_search",
2568
+ result: {
2569
+ type: "file_search_tool_result",
2570
+ status: part.status || "completed"
2571
+ },
2572
+ providerExecuted: true
2077
2573
  });
2078
2574
  break;
2079
2575
  }
@@ -2082,12 +2578,15 @@ var OpenAIResponsesLanguageModel = class {
2082
2578
  return {
2083
2579
  content,
2084
2580
  finishReason: mapOpenAIResponseFinishReason({
2085
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2581
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2086
2582
  hasToolCalls: content.some((part) => part.type === "tool-call")
2087
2583
  }),
2088
2584
  usage: {
2089
2585
  inputTokens: response.usage.input_tokens,
2090
- outputTokens: response.usage.output_tokens
2586
+ outputTokens: response.usage.output_tokens,
2587
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2588
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2589
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2091
2590
  },
2092
2591
  request: { body },
2093
2592
  response: {
@@ -2099,28 +2598,26 @@ var OpenAIResponsesLanguageModel = class {
2099
2598
  },
2100
2599
  providerMetadata: {
2101
2600
  openai: {
2102
- responseId: response.id,
2103
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2104
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2601
+ responseId: response.id
2105
2602
  }
2106
2603
  },
2107
2604
  warnings
2108
2605
  };
2109
2606
  }
2110
2607
  async doStream(options) {
2111
- const { args: body, warnings } = this.getArgs(options);
2112
- const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2608
+ const { args: body, warnings } = await this.getArgs(options);
2609
+ const { responseHeaders, value: response } = await (0, import_provider_utils11.postJsonToApi)({
2113
2610
  url: this.config.url({
2114
2611
  path: "/responses",
2115
2612
  modelId: this.modelId
2116
2613
  }),
2117
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2614
+ headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2118
2615
  body: {
2119
2616
  ...body,
2120
2617
  stream: true
2121
2618
  },
2122
2619
  failedResponseHandler: openaiFailedResponseHandler,
2123
- successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2620
+ successfulResponseHandler: (0, import_provider_utils11.createEventSourceResponseHandler)(
2124
2621
  openaiResponsesChunkSchema
2125
2622
  ),
2126
2623
  abortSignal: options.abortSignal,
@@ -2130,13 +2627,13 @@ var OpenAIResponsesLanguageModel = class {
2130
2627
  let finishReason = "unknown";
2131
2628
  const usage = {
2132
2629
  inputTokens: void 0,
2133
- outputTokens: void 0
2630
+ outputTokens: void 0,
2631
+ totalTokens: void 0
2134
2632
  };
2135
- let cachedPromptTokens = null;
2136
- let reasoningTokens = null;
2137
2633
  let responseId = null;
2138
2634
  const ongoingToolCalls = {};
2139
2635
  let hasToolCalls = false;
2636
+ const activeReasoning = {};
2140
2637
  return {
2141
2638
  stream: response.pipeThrough(
2142
2639
  new TransformStream({
@@ -2144,7 +2641,10 @@ var OpenAIResponsesLanguageModel = class {
2144
2641
  controller.enqueue({ type: "stream-start", warnings });
2145
2642
  },
2146
2643
  transform(chunk, controller) {
2147
- var _a, _b, _c, _d, _e, _f, _g, _h;
2644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2645
+ if (options.includeRawChunks) {
2646
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2647
+ }
2148
2648
  if (!chunk.success) {
2149
2649
  finishReason = "error";
2150
2650
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2158,22 +2658,151 @@ var OpenAIResponsesLanguageModel = class {
2158
2658
  toolCallId: value.item.call_id
2159
2659
  };
2160
2660
  controller.enqueue({
2161
- type: "tool-call-delta",
2162
- toolCallType: "function",
2661
+ type: "tool-input-start",
2662
+ id: value.item.call_id,
2663
+ toolName: value.item.name
2664
+ });
2665
+ } else if (value.item.type === "web_search_call") {
2666
+ ongoingToolCalls[value.output_index] = {
2667
+ toolName: "web_search_preview",
2668
+ toolCallId: value.item.id
2669
+ };
2670
+ controller.enqueue({
2671
+ type: "tool-input-start",
2672
+ id: value.item.id,
2673
+ toolName: "web_search_preview"
2674
+ });
2675
+ } else if (value.item.type === "computer_call") {
2676
+ ongoingToolCalls[value.output_index] = {
2677
+ toolName: "computer_use",
2678
+ toolCallId: value.item.id
2679
+ };
2680
+ controller.enqueue({
2681
+ type: "tool-input-start",
2682
+ id: value.item.id,
2683
+ toolName: "computer_use"
2684
+ });
2685
+ } else if (value.item.type === "message") {
2686
+ controller.enqueue({
2687
+ type: "text-start",
2688
+ id: value.item.id,
2689
+ providerMetadata: {
2690
+ openai: {
2691
+ itemId: value.item.id
2692
+ }
2693
+ }
2694
+ });
2695
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2696
+ activeReasoning[value.item.id] = {
2697
+ encryptedContent: value.item.encrypted_content,
2698
+ summaryParts: [0]
2699
+ };
2700
+ controller.enqueue({
2701
+ type: "reasoning-start",
2702
+ id: `${value.item.id}:0`,
2703
+ providerMetadata: {
2704
+ openai: {
2705
+ itemId: value.item.id,
2706
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2707
+ }
2708
+ }
2709
+ });
2710
+ }
2711
+ } else if (isResponseOutputItemDoneChunk(value)) {
2712
+ if (value.item.type === "function_call") {
2713
+ ongoingToolCalls[value.output_index] = void 0;
2714
+ hasToolCalls = true;
2715
+ controller.enqueue({
2716
+ type: "tool-input-end",
2717
+ id: value.item.call_id
2718
+ });
2719
+ controller.enqueue({
2720
+ type: "tool-call",
2163
2721
  toolCallId: value.item.call_id,
2164
2722
  toolName: value.item.name,
2165
- argsTextDelta: value.item.arguments
2723
+ input: value.item.arguments,
2724
+ providerMetadata: {
2725
+ openai: {
2726
+ itemId: value.item.id
2727
+ }
2728
+ }
2729
+ });
2730
+ } else if (value.item.type === "web_search_call") {
2731
+ ongoingToolCalls[value.output_index] = void 0;
2732
+ hasToolCalls = true;
2733
+ controller.enqueue({
2734
+ type: "tool-input-end",
2735
+ id: value.item.id
2736
+ });
2737
+ controller.enqueue({
2738
+ type: "tool-call",
2739
+ toolCallId: value.item.id,
2740
+ toolName: "web_search_preview",
2741
+ input: "",
2742
+ providerExecuted: true
2743
+ });
2744
+ controller.enqueue({
2745
+ type: "tool-result",
2746
+ toolCallId: value.item.id,
2747
+ toolName: "web_search_preview",
2748
+ result: {
2749
+ type: "web_search_tool_result",
2750
+ status: value.item.status || "completed"
2751
+ },
2752
+ providerExecuted: true
2753
+ });
2754
+ } else if (value.item.type === "computer_call") {
2755
+ ongoingToolCalls[value.output_index] = void 0;
2756
+ hasToolCalls = true;
2757
+ controller.enqueue({
2758
+ type: "tool-input-end",
2759
+ id: value.item.id
2760
+ });
2761
+ controller.enqueue({
2762
+ type: "tool-call",
2763
+ toolCallId: value.item.id,
2764
+ toolName: "computer_use",
2765
+ input: "",
2766
+ providerExecuted: true
2767
+ });
2768
+ controller.enqueue({
2769
+ type: "tool-result",
2770
+ toolCallId: value.item.id,
2771
+ toolName: "computer_use",
2772
+ result: {
2773
+ type: "computer_use_tool_result",
2774
+ status: value.item.status || "completed"
2775
+ },
2776
+ providerExecuted: true
2166
2777
  });
2778
+ } else if (value.item.type === "message") {
2779
+ controller.enqueue({
2780
+ type: "text-end",
2781
+ id: value.item.id
2782
+ });
2783
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2784
+ const activeReasoningPart = activeReasoning[value.item.id];
2785
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2786
+ controller.enqueue({
2787
+ type: "reasoning-end",
2788
+ id: `${value.item.id}:${summaryIndex}`,
2789
+ providerMetadata: {
2790
+ openai: {
2791
+ itemId: value.item.id,
2792
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2793
+ }
2794
+ }
2795
+ });
2796
+ }
2797
+ delete activeReasoning[value.item.id];
2167
2798
  }
2168
2799
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2169
2800
  const toolCall = ongoingToolCalls[value.output_index];
2170
2801
  if (toolCall != null) {
2171
2802
  controller.enqueue({
2172
- type: "tool-call-delta",
2173
- toolCallType: "function",
2174
- toolCallId: toolCall.toolCallId,
2175
- toolName: toolCall.toolName,
2176
- argsTextDelta: value.delta
2803
+ type: "tool-input-delta",
2804
+ id: toolCall.toolCallId,
2805
+ delta: value.delta
2177
2806
  });
2178
2807
  }
2179
2808
  } else if (isResponseCreatedChunk(value)) {
@@ -2186,36 +2815,57 @@ var OpenAIResponsesLanguageModel = class {
2186
2815
  });
2187
2816
  } else if (isTextDeltaChunk(value)) {
2188
2817
  controller.enqueue({
2189
- type: "text",
2190
- text: value.delta
2818
+ type: "text-delta",
2819
+ id: value.item_id,
2820
+ delta: value.delta
2191
2821
  });
2192
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2193
- ongoingToolCalls[value.output_index] = void 0;
2194
- hasToolCalls = true;
2822
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2823
+ if (value.summary_index > 0) {
2824
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2825
+ value.summary_index
2826
+ );
2827
+ controller.enqueue({
2828
+ type: "reasoning-start",
2829
+ id: `${value.item_id}:${value.summary_index}`,
2830
+ providerMetadata: {
2831
+ openai: {
2832
+ itemId: value.item_id,
2833
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2834
+ }
2835
+ }
2836
+ });
2837
+ }
2838
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2195
2839
  controller.enqueue({
2196
- type: "tool-call",
2197
- toolCallType: "function",
2198
- toolCallId: value.item.call_id,
2199
- toolName: value.item.name,
2200
- args: value.item.arguments
2840
+ type: "reasoning-delta",
2841
+ id: `${value.item_id}:${value.summary_index}`,
2842
+ delta: value.delta,
2843
+ providerMetadata: {
2844
+ openai: {
2845
+ itemId: value.item_id
2846
+ }
2847
+ }
2201
2848
  });
2202
2849
  } else if (isResponseFinishedChunk(value)) {
2203
2850
  finishReason = mapOpenAIResponseFinishReason({
2204
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2851
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2205
2852
  hasToolCalls
2206
2853
  });
2207
2854
  usage.inputTokens = value.response.usage.input_tokens;
2208
2855
  usage.outputTokens = value.response.usage.output_tokens;
2209
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2210
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2856
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2857
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2858
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2211
2859
  } else if (isResponseAnnotationAddedChunk(value)) {
2212
2860
  controller.enqueue({
2213
2861
  type: "source",
2214
2862
  sourceType: "url",
2215
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2863
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils11.generateId)(),
2216
2864
  url: value.annotation.url,
2217
2865
  title: value.annotation.title
2218
2866
  });
2867
+ } else if (isErrorChunk(value)) {
2868
+ controller.enqueue({ type: "error", error: value });
2219
2869
  }
2220
2870
  },
2221
2871
  flush(controller) {
@@ -2223,13 +2873,9 @@ var OpenAIResponsesLanguageModel = class {
2223
2873
  type: "finish",
2224
2874
  finishReason,
2225
2875
  usage,
2226
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2227
- providerMetadata: {
2228
- openai: {
2229
- responseId,
2230
- cachedPromptTokens,
2231
- reasoningTokens
2232
- }
2876
+ providerMetadata: {
2877
+ openai: {
2878
+ responseId
2233
2879
  }
2234
2880
  }
2235
2881
  });
@@ -2241,87 +2887,151 @@ var OpenAIResponsesLanguageModel = class {
2241
2887
  };
2242
2888
  }
2243
2889
  };
2244
- var usageSchema = import_zod9.z.object({
2245
- input_tokens: import_zod9.z.number(),
2246
- input_tokens_details: import_zod9.z.object({ cached_tokens: import_zod9.z.number().nullish() }).nullish(),
2247
- output_tokens: import_zod9.z.number(),
2248
- output_tokens_details: import_zod9.z.object({ reasoning_tokens: import_zod9.z.number().nullish() }).nullish()
2890
+ var usageSchema2 = import_v414.z.object({
2891
+ input_tokens: import_v414.z.number(),
2892
+ input_tokens_details: import_v414.z.object({ cached_tokens: import_v414.z.number().nullish() }).nullish(),
2893
+ output_tokens: import_v414.z.number(),
2894
+ output_tokens_details: import_v414.z.object({ reasoning_tokens: import_v414.z.number().nullish() }).nullish()
2895
+ });
2896
+ var textDeltaChunkSchema = import_v414.z.object({
2897
+ type: import_v414.z.literal("response.output_text.delta"),
2898
+ item_id: import_v414.z.string(),
2899
+ delta: import_v414.z.string()
2249
2900
  });
2250
- var textDeltaChunkSchema = import_zod9.z.object({
2251
- type: import_zod9.z.literal("response.output_text.delta"),
2252
- delta: import_zod9.z.string()
2901
+ var errorChunkSchema = import_v414.z.object({
2902
+ type: import_v414.z.literal("error"),
2903
+ code: import_v414.z.string(),
2904
+ message: import_v414.z.string(),
2905
+ param: import_v414.z.string().nullish(),
2906
+ sequence_number: import_v414.z.number()
2253
2907
  });
2254
- var responseFinishedChunkSchema = import_zod9.z.object({
2255
- type: import_zod9.z.enum(["response.completed", "response.incomplete"]),
2256
- response: import_zod9.z.object({
2257
- incomplete_details: import_zod9.z.object({ reason: import_zod9.z.string() }).nullish(),
2258
- usage: usageSchema
2908
+ var responseFinishedChunkSchema = import_v414.z.object({
2909
+ type: import_v414.z.enum(["response.completed", "response.incomplete"]),
2910
+ response: import_v414.z.object({
2911
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullish(),
2912
+ usage: usageSchema2
2259
2913
  })
2260
2914
  });
2261
- var responseCreatedChunkSchema = import_zod9.z.object({
2262
- type: import_zod9.z.literal("response.created"),
2263
- response: import_zod9.z.object({
2264
- id: import_zod9.z.string(),
2265
- created_at: import_zod9.z.number(),
2266
- model: import_zod9.z.string()
2915
+ var responseCreatedChunkSchema = import_v414.z.object({
2916
+ type: import_v414.z.literal("response.created"),
2917
+ response: import_v414.z.object({
2918
+ id: import_v414.z.string(),
2919
+ created_at: import_v414.z.number(),
2920
+ model: import_v414.z.string()
2267
2921
  })
2268
2922
  });
2269
- var responseOutputItemDoneSchema = import_zod9.z.object({
2270
- type: import_zod9.z.literal("response.output_item.done"),
2271
- output_index: import_zod9.z.number(),
2272
- item: import_zod9.z.discriminatedUnion("type", [
2273
- import_zod9.z.object({
2274
- type: import_zod9.z.literal("message")
2923
+ var responseOutputItemAddedSchema = import_v414.z.object({
2924
+ type: import_v414.z.literal("response.output_item.added"),
2925
+ output_index: import_v414.z.number(),
2926
+ item: import_v414.z.discriminatedUnion("type", [
2927
+ import_v414.z.object({
2928
+ type: import_v414.z.literal("message"),
2929
+ id: import_v414.z.string()
2930
+ }),
2931
+ import_v414.z.object({
2932
+ type: import_v414.z.literal("reasoning"),
2933
+ id: import_v414.z.string(),
2934
+ encrypted_content: import_v414.z.string().nullish()
2275
2935
  }),
2276
- import_zod9.z.object({
2277
- type: import_zod9.z.literal("function_call"),
2278
- id: import_zod9.z.string(),
2279
- call_id: import_zod9.z.string(),
2280
- name: import_zod9.z.string(),
2281
- arguments: import_zod9.z.string(),
2282
- status: import_zod9.z.literal("completed")
2936
+ import_v414.z.object({
2937
+ type: import_v414.z.literal("function_call"),
2938
+ id: import_v414.z.string(),
2939
+ call_id: import_v414.z.string(),
2940
+ name: import_v414.z.string(),
2941
+ arguments: import_v414.z.string()
2942
+ }),
2943
+ import_v414.z.object({
2944
+ type: import_v414.z.literal("web_search_call"),
2945
+ id: import_v414.z.string(),
2946
+ status: import_v414.z.string()
2947
+ }),
2948
+ import_v414.z.object({
2949
+ type: import_v414.z.literal("computer_call"),
2950
+ id: import_v414.z.string(),
2951
+ status: import_v414.z.string()
2952
+ }),
2953
+ import_v414.z.object({
2954
+ type: import_v414.z.literal("file_search_call"),
2955
+ id: import_v414.z.string(),
2956
+ status: import_v414.z.string()
2283
2957
  })
2284
2958
  ])
2285
2959
  });
2286
- var responseFunctionCallArgumentsDeltaSchema = import_zod9.z.object({
2287
- type: import_zod9.z.literal("response.function_call_arguments.delta"),
2288
- item_id: import_zod9.z.string(),
2289
- output_index: import_zod9.z.number(),
2290
- delta: import_zod9.z.string()
2291
- });
2292
- var responseOutputItemAddedSchema = import_zod9.z.object({
2293
- type: import_zod9.z.literal("response.output_item.added"),
2294
- output_index: import_zod9.z.number(),
2295
- item: import_zod9.z.discriminatedUnion("type", [
2296
- import_zod9.z.object({
2297
- type: import_zod9.z.literal("message")
2960
+ var responseOutputItemDoneSchema = import_v414.z.object({
2961
+ type: import_v414.z.literal("response.output_item.done"),
2962
+ output_index: import_v414.z.number(),
2963
+ item: import_v414.z.discriminatedUnion("type", [
2964
+ import_v414.z.object({
2965
+ type: import_v414.z.literal("message"),
2966
+ id: import_v414.z.string()
2967
+ }),
2968
+ import_v414.z.object({
2969
+ type: import_v414.z.literal("reasoning"),
2970
+ id: import_v414.z.string(),
2971
+ encrypted_content: import_v414.z.string().nullish()
2972
+ }),
2973
+ import_v414.z.object({
2974
+ type: import_v414.z.literal("function_call"),
2975
+ id: import_v414.z.string(),
2976
+ call_id: import_v414.z.string(),
2977
+ name: import_v414.z.string(),
2978
+ arguments: import_v414.z.string(),
2979
+ status: import_v414.z.literal("completed")
2298
2980
  }),
2299
- import_zod9.z.object({
2300
- type: import_zod9.z.literal("function_call"),
2301
- id: import_zod9.z.string(),
2302
- call_id: import_zod9.z.string(),
2303
- name: import_zod9.z.string(),
2304
- arguments: import_zod9.z.string()
2981
+ import_v414.z.object({
2982
+ type: import_v414.z.literal("web_search_call"),
2983
+ id: import_v414.z.string(),
2984
+ status: import_v414.z.literal("completed")
2985
+ }),
2986
+ import_v414.z.object({
2987
+ type: import_v414.z.literal("computer_call"),
2988
+ id: import_v414.z.string(),
2989
+ status: import_v414.z.literal("completed")
2990
+ }),
2991
+ import_v414.z.object({
2992
+ type: import_v414.z.literal("file_search_call"),
2993
+ id: import_v414.z.string(),
2994
+ status: import_v414.z.literal("completed")
2305
2995
  })
2306
2996
  ])
2307
2997
  });
2308
- var responseAnnotationAddedSchema = import_zod9.z.object({
2309
- type: import_zod9.z.literal("response.output_text.annotation.added"),
2310
- annotation: import_zod9.z.object({
2311
- type: import_zod9.z.literal("url_citation"),
2312
- url: import_zod9.z.string(),
2313
- title: import_zod9.z.string()
2998
+ var responseFunctionCallArgumentsDeltaSchema = import_v414.z.object({
2999
+ type: import_v414.z.literal("response.function_call_arguments.delta"),
3000
+ item_id: import_v414.z.string(),
3001
+ output_index: import_v414.z.number(),
3002
+ delta: import_v414.z.string()
3003
+ });
3004
+ var responseAnnotationAddedSchema = import_v414.z.object({
3005
+ type: import_v414.z.literal("response.output_text.annotation.added"),
3006
+ annotation: import_v414.z.object({
3007
+ type: import_v414.z.literal("url_citation"),
3008
+ url: import_v414.z.string(),
3009
+ title: import_v414.z.string()
2314
3010
  })
2315
3011
  });
2316
- var openaiResponsesChunkSchema = import_zod9.z.union([
3012
+ var responseReasoningSummaryPartAddedSchema = import_v414.z.object({
3013
+ type: import_v414.z.literal("response.reasoning_summary_part.added"),
3014
+ item_id: import_v414.z.string(),
3015
+ summary_index: import_v414.z.number()
3016
+ });
3017
+ var responseReasoningSummaryTextDeltaSchema = import_v414.z.object({
3018
+ type: import_v414.z.literal("response.reasoning_summary_text.delta"),
3019
+ item_id: import_v414.z.string(),
3020
+ summary_index: import_v414.z.number(),
3021
+ delta: import_v414.z.string()
3022
+ });
3023
+ var openaiResponsesChunkSchema = import_v414.z.union([
2317
3024
  textDeltaChunkSchema,
2318
3025
  responseFinishedChunkSchema,
2319
3026
  responseCreatedChunkSchema,
3027
+ responseOutputItemAddedSchema,
2320
3028
  responseOutputItemDoneSchema,
2321
3029
  responseFunctionCallArgumentsDeltaSchema,
2322
- responseOutputItemAddedSchema,
2323
3030
  responseAnnotationAddedSchema,
2324
- import_zod9.z.object({ type: import_zod9.z.string() }).passthrough()
3031
+ responseReasoningSummaryPartAddedSchema,
3032
+ responseReasoningSummaryTextDeltaSchema,
3033
+ errorChunkSchema,
3034
+ import_v414.z.object({ type: import_v414.z.string() }).loose()
2325
3035
  // fallback for unknown chunks
2326
3036
  ]);
2327
3037
  function isTextDeltaChunk(chunk) {
@@ -2330,6 +3040,9 @@ function isTextDeltaChunk(chunk) {
2330
3040
  function isResponseOutputItemDoneChunk(chunk) {
2331
3041
  return chunk.type === "response.output_item.done";
2332
3042
  }
3043
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3044
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3045
+ }
2333
3046
  function isResponseFinishedChunk(chunk) {
2334
3047
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2335
3048
  }
@@ -2342,11 +3055,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2342
3055
  function isResponseOutputItemAddedChunk(chunk) {
2343
3056
  return chunk.type === "response.output_item.added";
2344
3057
  }
3058
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3059
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3060
+ }
2345
3061
  function isResponseAnnotationAddedChunk(chunk) {
2346
3062
  return chunk.type === "response.output_text.annotation.added";
2347
3063
  }
3064
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3065
+ return chunk.type === "response.reasoning_summary_part.added";
3066
+ }
3067
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3068
+ return chunk.type === "response.reasoning_summary_text.delta";
3069
+ }
3070
+ function isErrorChunk(chunk) {
3071
+ return chunk.type === "error";
3072
+ }
2348
3073
  function getResponsesModelConfig(modelId) {
2349
- if (modelId.startsWith("o")) {
3074
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2350
3075
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2351
3076
  return {
2352
3077
  isReasoningModel: true,
@@ -2366,43 +3091,53 @@ function getResponsesModelConfig(modelId) {
2366
3091
  requiredAutoTruncation: false
2367
3092
  };
2368
3093
  }
2369
- var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
2370
- metadata: import_zod9.z.any().nullish(),
2371
- parallelToolCalls: import_zod9.z.boolean().nullish(),
2372
- previousResponseId: import_zod9.z.string().nullish(),
2373
- store: import_zod9.z.boolean().nullish(),
2374
- user: import_zod9.z.string().nullish(),
2375
- reasoningEffort: import_zod9.z.string().nullish(),
2376
- strictSchemas: import_zod9.z.boolean().nullish(),
2377
- instructions: import_zod9.z.string().nullish()
3094
+ function supportsFlexProcessing2(modelId) {
3095
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3096
+ }
3097
+ function supportsPriorityProcessing2(modelId) {
3098
+ return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3099
+ }
3100
+ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
3101
+ metadata: import_v414.z.any().nullish(),
3102
+ parallelToolCalls: import_v414.z.boolean().nullish(),
3103
+ previousResponseId: import_v414.z.string().nullish(),
3104
+ store: import_v414.z.boolean().nullish(),
3105
+ user: import_v414.z.string().nullish(),
3106
+ reasoningEffort: import_v414.z.string().nullish(),
3107
+ strictJsonSchema: import_v414.z.boolean().nullish(),
3108
+ instructions: import_v414.z.string().nullish(),
3109
+ reasoningSummary: import_v414.z.string().nullish(),
3110
+ serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
3111
+ include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
2378
3112
  });
2379
3113
 
2380
3114
  // src/openai-speech-model.ts
2381
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
2382
- var import_zod10 = require("zod");
2383
- var OpenAIProviderOptionsSchema = import_zod10.z.object({
2384
- instructions: import_zod10.z.string().nullish(),
2385
- speed: import_zod10.z.number().min(0.25).max(4).default(1).nullish()
3115
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
3116
+ var import_v415 = require("zod/v4");
3117
+ var OpenAIProviderOptionsSchema = import_v415.z.object({
3118
+ instructions: import_v415.z.string().nullish(),
3119
+ speed: import_v415.z.number().min(0.25).max(4).default(1).nullish()
2386
3120
  });
2387
3121
  var OpenAISpeechModel = class {
2388
3122
  constructor(modelId, config) {
2389
3123
  this.modelId = modelId;
2390
3124
  this.config = config;
2391
- this.specificationVersion = "v1";
3125
+ this.specificationVersion = "v2";
2392
3126
  }
2393
3127
  get provider() {
2394
3128
  return this.config.provider;
2395
3129
  }
2396
- getArgs({
3130
+ async getArgs({
2397
3131
  text,
2398
3132
  voice = "alloy",
2399
3133
  outputFormat = "mp3",
2400
3134
  speed,
2401
3135
  instructions,
3136
+ language,
2402
3137
  providerOptions
2403
3138
  }) {
2404
3139
  const warnings = [];
2405
- const openAIOptions = (0, import_provider_utils9.parseProviderOptions)({
3140
+ const openAIOptions = await (0, import_provider_utils12.parseProviderOptions)({
2406
3141
  provider: "openai",
2407
3142
  providerOptions,
2408
3143
  schema: OpenAIProviderOptionsSchema
@@ -2435,6 +3170,13 @@ var OpenAISpeechModel = class {
2435
3170
  }
2436
3171
  }
2437
3172
  }
3173
+ if (language) {
3174
+ warnings.push({
3175
+ type: "unsupported-setting",
3176
+ setting: "language",
3177
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
3178
+ });
3179
+ }
2438
3180
  return {
2439
3181
  requestBody,
2440
3182
  warnings
@@ -2443,20 +3185,20 @@ var OpenAISpeechModel = class {
2443
3185
  async doGenerate(options) {
2444
3186
  var _a, _b, _c;
2445
3187
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2446
- const { requestBody, warnings } = this.getArgs(options);
3188
+ const { requestBody, warnings } = await this.getArgs(options);
2447
3189
  const {
2448
3190
  value: audio,
2449
3191
  responseHeaders,
2450
3192
  rawValue: rawResponse
2451
- } = await (0, import_provider_utils9.postJsonToApi)({
3193
+ } = await (0, import_provider_utils12.postJsonToApi)({
2452
3194
  url: this.config.url({
2453
3195
  path: "/audio/speech",
2454
3196
  modelId: this.modelId
2455
3197
  }),
2456
- headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
3198
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2457
3199
  body: requestBody,
2458
3200
  failedResponseHandler: openaiFailedResponseHandler,
2459
- successfulResponseHandler: (0, import_provider_utils9.createBinaryResponseHandler)(),
3201
+ successfulResponseHandler: (0, import_provider_utils12.createBinaryResponseHandler)(),
2460
3202
  abortSignal: options.abortSignal,
2461
3203
  fetch: this.config.fetch
2462
3204
  });
@@ -2478,12 +3220,11 @@ var OpenAISpeechModel = class {
2478
3220
 
2479
3221
  // src/openai-provider.ts
2480
3222
  function createOpenAI(options = {}) {
2481
- var _a, _b, _c;
2482
- const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2483
- const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2484
- const providerName = (_c = options.name) != null ? _c : "openai";
3223
+ var _a, _b;
3224
+ const baseURL = (_a = (0, import_provider_utils13.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3225
+ const providerName = (_b = options.name) != null ? _b : "openai";
2485
3226
  const getHeaders = () => ({
2486
- Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
3227
+ Authorization: `Bearer ${(0, import_provider_utils13.loadApiKey)({
2487
3228
  apiKey: options.apiKey,
2488
3229
  environmentVariableName: "OPENAI_API_KEY",
2489
3230
  description: "OpenAI"
@@ -2492,27 +3233,25 @@ function createOpenAI(options = {}) {
2492
3233
  "OpenAI-Project": options.project,
2493
3234
  ...options.headers
2494
3235
  });
2495
- const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
3236
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
2496
3237
  provider: `${providerName}.chat`,
2497
3238
  url: ({ path }) => `${baseURL}${path}`,
2498
3239
  headers: getHeaders,
2499
- compatibility,
2500
3240
  fetch: options.fetch
2501
3241
  });
2502
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
3242
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
2503
3243
  provider: `${providerName}.completion`,
2504
3244
  url: ({ path }) => `${baseURL}${path}`,
2505
3245
  headers: getHeaders,
2506
- compatibility,
2507
3246
  fetch: options.fetch
2508
3247
  });
2509
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
3248
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
2510
3249
  provider: `${providerName}.embedding`,
2511
3250
  url: ({ path }) => `${baseURL}${path}`,
2512
3251
  headers: getHeaders,
2513
3252
  fetch: options.fetch
2514
3253
  });
2515
- const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
3254
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
2516
3255
  provider: `${providerName}.image`,
2517
3256
  url: ({ path }) => `${baseURL}${path}`,
2518
3257
  headers: getHeaders,
@@ -2530,19 +3269,13 @@ function createOpenAI(options = {}) {
2530
3269
  headers: getHeaders,
2531
3270
  fetch: options.fetch
2532
3271
  });
2533
- const createLanguageModel = (modelId, settings) => {
3272
+ const createLanguageModel = (modelId) => {
2534
3273
  if (new.target) {
2535
3274
  throw new Error(
2536
3275
  "The OpenAI model function cannot be called with the new keyword."
2537
3276
  );
2538
3277
  }
2539
- if (modelId === "gpt-3.5-turbo-instruct") {
2540
- return createCompletionModel(
2541
- modelId,
2542
- settings
2543
- );
2544
- }
2545
- return createChatModel(modelId, settings);
3278
+ return createResponsesModel(modelId);
2546
3279
  };
2547
3280
  const createResponsesModel = (modelId) => {
2548
3281
  return new OpenAIResponsesLanguageModel(modelId, {
@@ -2552,8 +3285,8 @@ function createOpenAI(options = {}) {
2552
3285
  fetch: options.fetch
2553
3286
  });
2554
3287
  };
2555
- const provider = function(modelId, settings) {
2556
- return createLanguageModel(modelId, settings);
3288
+ const provider = function(modelId) {
3289
+ return createLanguageModel(modelId);
2557
3290
  };
2558
3291
  provider.languageModel = createLanguageModel;
2559
3292
  provider.chat = createChatModel;
@@ -2571,10 +3304,7 @@ function createOpenAI(options = {}) {
2571
3304
  provider.tools = openaiTools;
2572
3305
  return provider;
2573
3306
  }
2574
- var openai = createOpenAI({
2575
- compatibility: "strict"
2576
- // strict for OpenAI API
2577
- });
3307
+ var openai = createOpenAI();
2578
3308
  // Annotate the CommonJS export names for ESM import in node:
2579
3309
  0 && (module.exports = {
2580
3310
  createOpenAI,