@ai-sdk/openai 1.3.21 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -6,8 +6,7 @@ import {
6
6
 
7
7
  // src/openai-chat-language-model.ts
8
8
  import {
9
- InvalidResponseDataError,
10
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
9
+ InvalidResponseDataError
11
10
  } from "@ai-sdk/provider";
12
11
  import {
13
12
  combineHeaders,
@@ -15,18 +14,18 @@ import {
15
14
  createJsonResponseHandler,
16
15
  generateId,
17
16
  isParsableJson,
17
+ parseProviderOptions,
18
18
  postJsonToApi
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z2 } from "zod";
20
+ import { z as z3 } from "zod";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
24
24
  UnsupportedFunctionalityError
25
25
  } from "@ai-sdk/provider";
26
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
26
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
27
27
  function convertToOpenAIChatMessages({
28
28
  prompt,
29
- useLegacyFunctionCalling = false,
30
29
  systemMessageMode = "system"
31
30
  }) {
32
31
  const messages = [];
@@ -67,55 +66,71 @@ function convertToOpenAIChatMessages({
67
66
  messages.push({
68
67
  role: "user",
69
68
  content: content.map((part, index) => {
70
- var _a, _b, _c, _d;
69
+ var _a, _b, _c;
71
70
  switch (part.type) {
72
71
  case "text": {
73
72
  return { type: "text", text: part.text };
74
73
  }
75
- case "image": {
76
- return {
77
- type: "image_url",
78
- image_url: {
79
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
80
- // OpenAI specific extension: image detail
81
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
82
- }
83
- };
84
- }
85
74
  case "file": {
86
- if (part.data instanceof URL) {
87
- throw new UnsupportedFunctionalityError({
88
- functionality: "'File content parts with URL data' functionality not supported."
89
- });
90
- }
91
- switch (part.mimeType) {
92
- case "audio/wav": {
93
- return {
94
- type: "input_audio",
95
- input_audio: { data: part.data, format: "wav" }
96
- };
97
- }
98
- case "audio/mp3":
99
- case "audio/mpeg": {
100
- return {
101
- type: "input_audio",
102
- input_audio: { data: part.data, format: "mp3" }
103
- };
75
+ if (part.mediaType.startsWith("image/")) {
76
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
77
+ return {
78
+ type: "image_url",
79
+ image_url: {
80
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
81
+ // OpenAI specific extension: image detail
82
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
83
+ }
84
+ };
85
+ } else if (part.mediaType.startsWith("audio/")) {
86
+ if (part.data instanceof URL) {
87
+ throw new UnsupportedFunctionalityError({
88
+ functionality: "audio file parts with URLs"
89
+ });
104
90
  }
105
- case "application/pdf": {
106
- return {
107
- type: "file",
108
- file: {
109
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
110
- file_data: `data:application/pdf;base64,${part.data}`
111
- }
112
- };
91
+ switch (part.mediaType) {
92
+ case "audio/wav": {
93
+ return {
94
+ type: "input_audio",
95
+ input_audio: {
96
+ data: convertToBase64(part.data),
97
+ format: "wav"
98
+ }
99
+ };
100
+ }
101
+ case "audio/mp3":
102
+ case "audio/mpeg": {
103
+ return {
104
+ type: "input_audio",
105
+ input_audio: {
106
+ data: convertToBase64(part.data),
107
+ format: "mp3"
108
+ }
109
+ };
110
+ }
111
+ default: {
112
+ throw new UnsupportedFunctionalityError({
113
+ functionality: `audio content parts with media type ${part.mediaType}`
114
+ });
115
+ }
113
116
  }
114
- default: {
117
+ } else if (part.mediaType === "application/pdf") {
118
+ if (part.data instanceof URL) {
115
119
  throw new UnsupportedFunctionalityError({
116
- functionality: `File content part type ${part.mimeType} in user messages`
120
+ functionality: "PDF file parts with URLs"
117
121
  });
118
122
  }
123
+ return {
124
+ type: "file",
125
+ file: {
126
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
127
+ file_data: `data:application/pdf;base64,${part.data}`
128
+ }
129
+ };
130
+ } else {
131
+ throw new UnsupportedFunctionalityError({
132
+ functionality: `file part media type ${part.mediaType}`
133
+ });
119
134
  }
120
135
  }
121
136
  }
@@ -145,41 +160,20 @@ function convertToOpenAIChatMessages({
145
160
  }
146
161
  }
147
162
  }
148
- if (useLegacyFunctionCalling) {
149
- if (toolCalls.length > 1) {
150
- throw new UnsupportedFunctionalityError({
151
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
152
- });
153
- }
154
- messages.push({
155
- role: "assistant",
156
- content: text,
157
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
158
- });
159
- } else {
160
- messages.push({
161
- role: "assistant",
162
- content: text,
163
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
164
- });
165
- }
163
+ messages.push({
164
+ role: "assistant",
165
+ content: text,
166
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
+ });
166
168
  break;
167
169
  }
168
170
  case "tool": {
169
171
  for (const toolResponse of content) {
170
- if (useLegacyFunctionCalling) {
171
- messages.push({
172
- role: "function",
173
- name: toolResponse.toolName,
174
- content: JSON.stringify(toolResponse.result)
175
- });
176
- } else {
177
- messages.push({
178
- role: "tool",
179
- tool_call_id: toolResponse.toolCallId,
180
- content: JSON.stringify(toolResponse.result)
181
- });
182
- }
172
+ messages.push({
173
+ role: "tool",
174
+ tool_call_id: toolResponse.toolCallId,
175
+ content: JSON.stringify(toolResponse.result)
176
+ });
183
177
  }
184
178
  break;
185
179
  }
@@ -192,17 +186,17 @@ function convertToOpenAIChatMessages({
192
186
  return { messages, warnings };
193
187
  }
194
188
 
195
- // src/map-openai-chat-logprobs.ts
196
- function mapOpenAIChatLogProbsOutput(logprobs) {
197
- var _a, _b;
198
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
199
- token,
200
- logprob,
201
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
202
- token: token2,
203
- logprob: logprob2
204
- })) : []
205
- }))) != null ? _b : void 0;
189
+ // src/get-response-metadata.ts
190
+ function getResponseMetadata({
191
+ id,
192
+ model,
193
+ created
194
+ }) {
195
+ return {
196
+ id: id != null ? id : void 0,
197
+ modelId: model != null ? model : void 0,
198
+ timestamp: created != null ? new Date(created * 1e3) : void 0
199
+ };
206
200
  }
207
201
 
208
202
  // src/map-openai-finish-reason.ts
@@ -222,18 +216,75 @@ function mapOpenAIFinishReason(finishReason) {
222
216
  }
223
217
  }
224
218
 
225
- // src/openai-error.ts
219
+ // src/openai-chat-options.ts
226
220
  import { z } from "zod";
221
+ var openaiProviderOptions = z.object({
222
+ /**
223
+ * Modify the likelihood of specified tokens appearing in the completion.
224
+ *
225
+ * Accepts a JSON object that maps tokens (specified by their token ID in
226
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
227
+ */
228
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
229
+ /**
230
+ * Return the log probabilities of the tokens.
231
+ *
232
+ * Setting to true will return the log probabilities of the tokens that
233
+ * were generated.
234
+ *
235
+ * Setting to a number will return the log probabilities of the top n
236
+ * tokens that were generated.
237
+ */
238
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
239
+ /**
240
+ * Whether to enable parallel function calling during tool use. Default to true.
241
+ */
242
+ parallelToolCalls: z.boolean().optional(),
243
+ /**
244
+ * A unique identifier representing your end-user, which can help OpenAI to
245
+ * monitor and detect abuse.
246
+ */
247
+ user: z.string().optional(),
248
+ /**
249
+ * Reasoning effort for reasoning models. Defaults to `medium`.
250
+ */
251
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
252
+ /**
253
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
254
+ */
255
+ maxCompletionTokens: z.number().optional(),
256
+ /**
257
+ * Whether to enable persistence in responses API.
258
+ */
259
+ store: z.boolean().optional(),
260
+ /**
261
+ * Metadata to associate with the request.
262
+ */
263
+ metadata: z.record(z.string()).optional(),
264
+ /**
265
+ * Parameters for prediction mode.
266
+ */
267
+ prediction: z.record(z.any()).optional(),
268
+ /**
269
+ * Whether to use structured outputs.
270
+ *
271
+ * @default true
272
+ */
273
+ structuredOutputs: z.boolean().optional()
274
+ });
275
+
276
+ // src/openai-error.ts
277
+ import { z as z2 } from "zod";
227
278
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
228
- var openaiErrorDataSchema = z.object({
229
- error: z.object({
230
- message: z.string(),
279
+ var openaiErrorDataSchema = z2.object({
280
+ error: z2.object({
281
+ message: z2.string(),
231
282
  // The additional information below is handled loosely to support
232
283
  // OpenAI-compatible providers that have slightly different error
233
284
  // responses:
234
- type: z.string().nullish(),
235
- param: z.any().nullish(),
236
- code: z.union([z.string(), z.number()]).nullish()
285
+ type: z2.string().nullish(),
286
+ param: z2.any().nullish(),
287
+ code: z2.union([z2.string(), z2.number()]).nullish()
237
288
  })
238
289
  });
239
290
  var openaiFailedResponseHandler = createJsonErrorResponseHandler({
@@ -241,76 +292,19 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
241
292
  errorToMessage: (data) => data.error.message
242
293
  });
243
294
 
244
- // src/get-response-metadata.ts
245
- function getResponseMetadata({
246
- id,
247
- model,
248
- created
249
- }) {
250
- return {
251
- id: id != null ? id : void 0,
252
- modelId: model != null ? model : void 0,
253
- timestamp: created != null ? new Date(created * 1e3) : void 0
254
- };
255
- }
256
-
257
295
  // src/openai-prepare-tools.ts
258
296
  import {
259
297
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
260
298
  } from "@ai-sdk/provider";
261
299
  function prepareTools({
262
- mode,
263
- useLegacyFunctionCalling = false,
300
+ tools,
301
+ toolChoice,
264
302
  structuredOutputs
265
303
  }) {
266
- var _a;
267
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
304
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
268
305
  const toolWarnings = [];
269
306
  if (tools == null) {
270
- return { tools: void 0, tool_choice: void 0, toolWarnings };
271
- }
272
- const toolChoice = mode.toolChoice;
273
- if (useLegacyFunctionCalling) {
274
- const openaiFunctions = [];
275
- for (const tool of tools) {
276
- if (tool.type === "provider-defined") {
277
- toolWarnings.push({ type: "unsupported-tool", tool });
278
- } else {
279
- openaiFunctions.push({
280
- name: tool.name,
281
- description: tool.description,
282
- parameters: tool.parameters
283
- });
284
- }
285
- }
286
- if (toolChoice == null) {
287
- return {
288
- functions: openaiFunctions,
289
- function_call: void 0,
290
- toolWarnings
291
- };
292
- }
293
- const type2 = toolChoice.type;
294
- switch (type2) {
295
- case "auto":
296
- case "none":
297
- case void 0:
298
- return {
299
- functions: openaiFunctions,
300
- function_call: void 0,
301
- toolWarnings
302
- };
303
- case "required":
304
- throw new UnsupportedFunctionalityError2({
305
- functionality: "useLegacyFunctionCalling and toolChoice: required"
306
- });
307
- default:
308
- return {
309
- functions: openaiFunctions,
310
- function_call: { name: toolChoice.toolName },
311
- toolWarnings
312
- };
313
- }
307
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
314
308
  }
315
309
  const openaiTools2 = [];
316
310
  for (const tool of tools) {
@@ -329,18 +323,18 @@ function prepareTools({
329
323
  }
330
324
  }
331
325
  if (toolChoice == null) {
332
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
326
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
333
327
  }
334
328
  const type = toolChoice.type;
335
329
  switch (type) {
336
330
  case "auto":
337
331
  case "none":
338
332
  case "required":
339
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
333
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
340
334
  case "tool":
341
335
  return {
342
336
  tools: openaiTools2,
343
- tool_choice: {
337
+ toolChoice: {
344
338
  type: "function",
345
339
  function: {
346
340
  name: toolChoice.toolName
@@ -351,7 +345,7 @@ function prepareTools({
351
345
  default: {
352
346
  const _exhaustiveCheck = type;
353
347
  throw new UnsupportedFunctionalityError2({
354
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
348
+ functionality: `tool choice type: ${_exhaustiveCheck}`
355
349
  });
356
350
  }
357
351
  }
@@ -359,32 +353,20 @@ function prepareTools({
359
353
 
360
354
  // src/openai-chat-language-model.ts
361
355
  var OpenAIChatLanguageModel = class {
362
- constructor(modelId, settings, config) {
363
- this.specificationVersion = "v1";
356
+ constructor(modelId, config) {
357
+ this.specificationVersion = "v2";
358
+ this.supportedUrls = {
359
+ "image/*": [/^https?:\/\/.*$/]
360
+ };
364
361
  this.modelId = modelId;
365
- this.settings = settings;
366
362
  this.config = config;
367
363
  }
368
- get supportsStructuredOutputs() {
369
- var _a;
370
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
371
- }
372
- get defaultObjectGenerationMode() {
373
- if (isAudioModel(this.modelId)) {
374
- return "tool";
375
- }
376
- return this.supportsStructuredOutputs ? "json" : "tool";
377
- }
378
364
  get provider() {
379
365
  return this.config.provider;
380
366
  }
381
- get supportsImageUrls() {
382
- return !this.settings.downloadImages;
383
- }
384
- getArgs({
385
- mode,
367
+ async getArgs({
386
368
  prompt,
387
- maxTokens,
369
+ maxOutputTokens,
388
370
  temperature,
389
371
  topP,
390
372
  topK,
@@ -393,39 +375,34 @@ var OpenAIChatLanguageModel = class {
393
375
  stopSequences,
394
376
  responseFormat,
395
377
  seed,
396
- providerMetadata
378
+ tools,
379
+ toolChoice,
380
+ providerOptions
397
381
  }) {
398
- var _a, _b, _c, _d, _e, _f, _g, _h;
399
- const type = mode.type;
382
+ var _a, _b, _c;
400
383
  const warnings = [];
384
+ const openaiOptions = (_a = await parseProviderOptions({
385
+ provider: "openai",
386
+ providerOptions,
387
+ schema: openaiProviderOptions
388
+ })) != null ? _a : {};
389
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
401
390
  if (topK != null) {
402
391
  warnings.push({
403
392
  type: "unsupported-setting",
404
393
  setting: "topK"
405
394
  });
406
395
  }
407
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
396
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
408
397
  warnings.push({
409
398
  type: "unsupported-setting",
410
399
  setting: "responseFormat",
411
400
  details: "JSON response format schema is only supported with structuredOutputs"
412
401
  });
413
402
  }
414
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
415
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
416
- throw new UnsupportedFunctionalityError3({
417
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
418
- });
419
- }
420
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
421
- throw new UnsupportedFunctionalityError3({
422
- functionality: "structuredOutputs with useLegacyFunctionCalling"
423
- });
424
- }
425
403
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
426
404
  {
427
405
  prompt,
428
- useLegacyFunctionCalling,
429
406
  systemMessageMode: getSystemMessageMode(this.modelId)
430
407
  }
431
408
  );
@@ -434,35 +411,38 @@ var OpenAIChatLanguageModel = class {
434
411
  // model id:
435
412
  model: this.modelId,
436
413
  // model specific settings:
437
- logit_bias: this.settings.logitBias,
438
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
439
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
440
- user: this.settings.user,
441
- parallel_tool_calls: this.settings.parallelToolCalls,
414
+ logit_bias: openaiOptions.logitBias,
415
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
416
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
417
+ user: openaiOptions.user,
418
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
442
419
  // standardized settings:
443
- max_tokens: maxTokens,
420
+ max_tokens: maxOutputTokens,
444
421
  temperature,
445
422
  top_p: topP,
446
423
  frequency_penalty: frequencyPenalty,
447
424
  presence_penalty: presencePenalty,
448
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
449
- type: "json_schema",
450
- json_schema: {
451
- schema: responseFormat.schema,
452
- strict: true,
453
- name: (_a = responseFormat.name) != null ? _a : "response",
454
- description: responseFormat.description
455
- }
456
- } : { type: "json_object" } : void 0,
425
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
426
+ // TODO convert into provider option
427
+ structuredOutputs && responseFormat.schema != null ? {
428
+ type: "json_schema",
429
+ json_schema: {
430
+ schema: responseFormat.schema,
431
+ strict: true,
432
+ name: (_c = responseFormat.name) != null ? _c : "response",
433
+ description: responseFormat.description
434
+ }
435
+ } : { type: "json_object" }
436
+ ) : void 0,
457
437
  stop: stopSequences,
458
438
  seed,
459
439
  // openai specific settings:
460
- // TODO remove in next major version; we auto-map maxTokens now
461
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
462
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
463
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
464
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
465
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
440
+ // TODO remove in next major version; we auto-map maxOutputTokens now
441
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
442
+ store: openaiOptions.store,
443
+ metadata: openaiOptions.metadata,
444
+ prediction: openaiOptions.prediction,
445
+ reasoning_effort: openaiOptions.reasoningEffort,
466
446
  // messages:
467
447
  messages
468
448
  };
@@ -536,85 +516,27 @@ var OpenAIChatLanguageModel = class {
536
516
  });
537
517
  }
538
518
  }
539
- switch (type) {
540
- case "regular": {
541
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
542
- mode,
543
- useLegacyFunctionCalling,
544
- structuredOutputs: this.supportsStructuredOutputs
545
- });
546
- return {
547
- args: {
548
- ...baseArgs,
549
- tools,
550
- tool_choice,
551
- functions,
552
- function_call
553
- },
554
- warnings: [...warnings, ...toolWarnings]
555
- };
556
- }
557
- case "object-json": {
558
- return {
559
- args: {
560
- ...baseArgs,
561
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
562
- type: "json_schema",
563
- json_schema: {
564
- schema: mode.schema,
565
- strict: true,
566
- name: (_h = mode.name) != null ? _h : "response",
567
- description: mode.description
568
- }
569
- } : { type: "json_object" }
570
- },
571
- warnings
572
- };
573
- }
574
- case "object-tool": {
575
- return {
576
- args: useLegacyFunctionCalling ? {
577
- ...baseArgs,
578
- function_call: {
579
- name: mode.tool.name
580
- },
581
- functions: [
582
- {
583
- name: mode.tool.name,
584
- description: mode.tool.description,
585
- parameters: mode.tool.parameters
586
- }
587
- ]
588
- } : {
589
- ...baseArgs,
590
- tool_choice: {
591
- type: "function",
592
- function: { name: mode.tool.name }
593
- },
594
- tools: [
595
- {
596
- type: "function",
597
- function: {
598
- name: mode.tool.name,
599
- description: mode.tool.description,
600
- parameters: mode.tool.parameters,
601
- strict: this.supportsStructuredOutputs ? true : void 0
602
- }
603
- }
604
- ]
605
- },
606
- warnings
607
- };
608
- }
609
- default: {
610
- const _exhaustiveCheck = type;
611
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
612
- }
613
- }
519
+ const {
520
+ tools: openaiTools2,
521
+ toolChoice: openaiToolChoice,
522
+ toolWarnings
523
+ } = prepareTools({
524
+ tools,
525
+ toolChoice,
526
+ structuredOutputs
527
+ });
528
+ return {
529
+ args: {
530
+ ...baseArgs,
531
+ tools: openaiTools2,
532
+ tool_choice: openaiToolChoice
533
+ },
534
+ warnings: [...warnings, ...toolWarnings]
535
+ };
614
536
  }
615
537
  async doGenerate(options) {
616
- var _a, _b, _c, _d, _e, _f, _g, _h;
617
- const { args: body, warnings } = this.getArgs(options);
538
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
539
+ const { args: body, warnings } = await this.getArgs(options);
618
540
  const {
619
541
  responseHeaders,
620
542
  value: response,
@@ -633,105 +555,61 @@ var OpenAIChatLanguageModel = class {
633
555
  abortSignal: options.abortSignal,
634
556
  fetch: this.config.fetch
635
557
  });
636
- const { messages: rawPrompt, ...rawSettings } = body;
637
558
  const choice = response.choices[0];
638
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
639
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
640
- const providerMetadata = { openai: {} };
641
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
642
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
559
+ const content = [];
560
+ const text = choice.message.content;
561
+ if (text != null && text.length > 0) {
562
+ content.push({ type: "text", text });
563
+ }
564
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
565
+ content.push({
566
+ type: "tool-call",
567
+ toolCallType: "function",
568
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
569
+ toolName: toolCall.function.name,
570
+ args: toolCall.function.arguments
571
+ });
643
572
  }
573
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
574
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
575
+ const providerMetadata = { openai: {} };
644
576
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
645
577
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
646
578
  }
647
579
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
648
580
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
649
581
  }
650
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
651
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
582
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
583
+ providerMetadata.openai.logprobs = choice.logprobs.content;
652
584
  }
653
585
  return {
654
- text: (_c = choice.message.content) != null ? _c : void 0,
655
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
656
- {
657
- toolCallType: "function",
658
- toolCallId: generateId(),
659
- toolName: choice.message.function_call.name,
660
- args: choice.message.function_call.arguments
661
- }
662
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
663
- var _a2;
664
- return {
665
- toolCallType: "function",
666
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
667
- toolName: toolCall.function.name,
668
- args: toolCall.function.arguments
669
- };
670
- }),
586
+ content,
671
587
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
672
588
  usage: {
673
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
674
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
589
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
590
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
591
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
592
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
593
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
594
+ },
595
+ request: { body },
596
+ response: {
597
+ ...getResponseMetadata(response),
598
+ headers: responseHeaders,
599
+ body: rawResponse
675
600
  },
676
- rawCall: { rawPrompt, rawSettings },
677
- rawResponse: { headers: responseHeaders, body: rawResponse },
678
- request: { body: JSON.stringify(body) },
679
- response: getResponseMetadata(response),
680
601
  warnings,
681
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
682
602
  providerMetadata
683
603
  };
684
604
  }
685
605
  async doStream(options) {
686
- if (this.settings.simulateStreaming) {
687
- const result = await this.doGenerate(options);
688
- const simulatedStream = new ReadableStream({
689
- start(controller) {
690
- controller.enqueue({ type: "response-metadata", ...result.response });
691
- if (result.text) {
692
- controller.enqueue({
693
- type: "text-delta",
694
- textDelta: result.text
695
- });
696
- }
697
- if (result.toolCalls) {
698
- for (const toolCall of result.toolCalls) {
699
- controller.enqueue({
700
- type: "tool-call-delta",
701
- toolCallType: "function",
702
- toolCallId: toolCall.toolCallId,
703
- toolName: toolCall.toolName,
704
- argsTextDelta: toolCall.args
705
- });
706
- controller.enqueue({
707
- type: "tool-call",
708
- ...toolCall
709
- });
710
- }
711
- }
712
- controller.enqueue({
713
- type: "finish",
714
- finishReason: result.finishReason,
715
- usage: result.usage,
716
- logprobs: result.logprobs,
717
- providerMetadata: result.providerMetadata
718
- });
719
- controller.close();
720
- }
721
- });
722
- return {
723
- stream: simulatedStream,
724
- rawCall: result.rawCall,
725
- rawResponse: result.rawResponse,
726
- warnings: result.warnings
727
- };
728
- }
729
- const { args, warnings } = this.getArgs(options);
606
+ const { args, warnings } = await this.getArgs(options);
730
607
  const body = {
731
608
  ...args,
732
609
  stream: true,
733
- // only include stream_options when in strict compatibility mode:
734
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
610
+ stream_options: {
611
+ include_usage: true
612
+ }
735
613
  };
736
614
  const { responseHeaders, value: response } = await postJsonToApi({
737
615
  url: this.config.url({
@@ -747,22 +625,23 @@ var OpenAIChatLanguageModel = class {
747
625
  abortSignal: options.abortSignal,
748
626
  fetch: this.config.fetch
749
627
  });
750
- const { messages: rawPrompt, ...rawSettings } = args;
751
628
  const toolCalls = [];
752
629
  let finishReason = "unknown";
753
- let usage = {
754
- promptTokens: void 0,
755
- completionTokens: void 0
630
+ const usage = {
631
+ inputTokens: void 0,
632
+ outputTokens: void 0,
633
+ totalTokens: void 0
756
634
  };
757
- let logprobs;
758
635
  let isFirstChunk = true;
759
- const { useLegacyFunctionCalling } = this.settings;
760
636
  const providerMetadata = { openai: {} };
761
637
  return {
762
638
  stream: response.pipeThrough(
763
639
  new TransformStream({
640
+ start(controller) {
641
+ controller.enqueue({ type: "stream-start", warnings });
642
+ },
764
643
  transform(chunk, controller) {
765
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
766
645
  if (!chunk.success) {
767
646
  finishReason = "error";
768
647
  controller.enqueue({ type: "error", error: chunk.error });
@@ -782,60 +661,37 @@ var OpenAIChatLanguageModel = class {
782
661
  });
783
662
  }
784
663
  if (value.usage != null) {
785
- const {
786
- prompt_tokens,
787
- completion_tokens,
788
- prompt_tokens_details,
789
- completion_tokens_details
790
- } = value.usage;
791
- usage = {
792
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
793
- completionTokens: completion_tokens != null ? completion_tokens : void 0
794
- };
795
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
796
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
797
- }
798
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
799
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
800
- }
801
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
802
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
664
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
665
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
666
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
667
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
668
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
669
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
670
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
803
671
  }
804
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
805
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
672
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
673
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
806
674
  }
807
675
  }
808
676
  const choice = value.choices[0];
809
677
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
810
678
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
811
679
  }
680
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
681
+ providerMetadata.openai.logprobs = choice.logprobs.content;
682
+ }
812
683
  if ((choice == null ? void 0 : choice.delta) == null) {
813
684
  return;
814
685
  }
815
686
  const delta = choice.delta;
816
687
  if (delta.content != null) {
817
688
  controller.enqueue({
818
- type: "text-delta",
819
- textDelta: delta.content
689
+ type: "text",
690
+ text: delta.content
820
691
  });
821
692
  }
822
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
823
- choice == null ? void 0 : choice.logprobs
824
- );
825
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
826
- if (logprobs === void 0) logprobs = [];
827
- logprobs.push(...mappedLogprobs);
828
- }
829
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
830
- {
831
- type: "function",
832
- id: generateId(),
833
- function: delta.function_call,
834
- index: 0
835
- }
836
- ] : delta.tool_calls;
837
- if (mappedToolCalls != null) {
838
- for (const toolCallDelta of mappedToolCalls) {
693
+ if (delta.tool_calls != null) {
694
+ for (const toolCallDelta of delta.tool_calls) {
839
695
  const index = toolCallDelta.index;
840
696
  if (toolCalls[index] == null) {
841
697
  if (toolCallDelta.type !== "function") {
@@ -850,7 +706,7 @@ var OpenAIChatLanguageModel = class {
850
706
  message: `Expected 'id' to be a string.`
851
707
  });
852
708
  }
853
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
709
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
854
710
  throw new InvalidResponseDataError({
855
711
  data: toolCallDelta,
856
712
  message: `Expected 'function.name' to be a string.`
@@ -861,12 +717,12 @@ var OpenAIChatLanguageModel = class {
861
717
  type: "function",
862
718
  function: {
863
719
  name: toolCallDelta.function.name,
864
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
720
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
865
721
  },
866
722
  hasFinished: false
867
723
  };
868
724
  const toolCall2 = toolCalls[index];
869
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
725
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
870
726
  if (toolCall2.function.arguments.length > 0) {
871
727
  controller.enqueue({
872
728
  type: "tool-call-delta",
@@ -880,7 +736,7 @@ var OpenAIChatLanguageModel = class {
880
736
  controller.enqueue({
881
737
  type: "tool-call",
882
738
  toolCallType: "function",
883
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
739
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
884
740
  toolName: toolCall2.function.name,
885
741
  args: toolCall2.function.arguments
886
742
  });
@@ -893,21 +749,21 @@ var OpenAIChatLanguageModel = class {
893
749
  if (toolCall.hasFinished) {
894
750
  continue;
895
751
  }
896
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
897
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
752
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
753
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
898
754
  }
899
755
  controller.enqueue({
900
756
  type: "tool-call-delta",
901
757
  toolCallType: "function",
902
758
  toolCallId: toolCall.id,
903
759
  toolName: toolCall.function.name,
904
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
760
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
905
761
  });
906
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
762
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
907
763
  controller.enqueue({
908
764
  type: "tool-call",
909
765
  toolCallType: "function",
910
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
766
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
911
767
  toolName: toolCall.function.name,
912
768
  args: toolCall.function.arguments
913
769
  });
@@ -917,125 +773,111 @@ var OpenAIChatLanguageModel = class {
917
773
  }
918
774
  },
919
775
  flush(controller) {
920
- var _a, _b;
921
776
  controller.enqueue({
922
777
  type: "finish",
923
778
  finishReason,
924
- logprobs,
925
- usage: {
926
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
927
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
928
- },
779
+ usage,
929
780
  ...providerMetadata != null ? { providerMetadata } : {}
930
781
  });
931
782
  }
932
783
  })
933
784
  ),
934
- rawCall: { rawPrompt, rawSettings },
935
- rawResponse: { headers: responseHeaders },
936
- request: { body: JSON.stringify(body) },
937
- warnings
785
+ request: { body },
786
+ response: { headers: responseHeaders }
938
787
  };
939
788
  }
940
789
  };
941
- var openaiTokenUsageSchema = z2.object({
942
- prompt_tokens: z2.number().nullish(),
943
- completion_tokens: z2.number().nullish(),
944
- prompt_tokens_details: z2.object({
945
- cached_tokens: z2.number().nullish()
790
+ var openaiTokenUsageSchema = z3.object({
791
+ prompt_tokens: z3.number().nullish(),
792
+ completion_tokens: z3.number().nullish(),
793
+ total_tokens: z3.number().nullish(),
794
+ prompt_tokens_details: z3.object({
795
+ cached_tokens: z3.number().nullish()
946
796
  }).nullish(),
947
- completion_tokens_details: z2.object({
948
- reasoning_tokens: z2.number().nullish(),
949
- accepted_prediction_tokens: z2.number().nullish(),
950
- rejected_prediction_tokens: z2.number().nullish()
797
+ completion_tokens_details: z3.object({
798
+ reasoning_tokens: z3.number().nullish(),
799
+ accepted_prediction_tokens: z3.number().nullish(),
800
+ rejected_prediction_tokens: z3.number().nullish()
951
801
  }).nullish()
952
802
  }).nullish();
953
- var openaiChatResponseSchema = z2.object({
954
- id: z2.string().nullish(),
955
- created: z2.number().nullish(),
956
- model: z2.string().nullish(),
957
- choices: z2.array(
958
- z2.object({
959
- message: z2.object({
960
- role: z2.literal("assistant").nullish(),
961
- content: z2.string().nullish(),
962
- function_call: z2.object({
963
- arguments: z2.string(),
964
- name: z2.string()
965
- }).nullish(),
966
- tool_calls: z2.array(
967
- z2.object({
968
- id: z2.string().nullish(),
969
- type: z2.literal("function"),
970
- function: z2.object({
971
- name: z2.string(),
972
- arguments: z2.string()
803
+ var openaiChatResponseSchema = z3.object({
804
+ id: z3.string().nullish(),
805
+ created: z3.number().nullish(),
806
+ model: z3.string().nullish(),
807
+ choices: z3.array(
808
+ z3.object({
809
+ message: z3.object({
810
+ role: z3.literal("assistant").nullish(),
811
+ content: z3.string().nullish(),
812
+ tool_calls: z3.array(
813
+ z3.object({
814
+ id: z3.string().nullish(),
815
+ type: z3.literal("function"),
816
+ function: z3.object({
817
+ name: z3.string(),
818
+ arguments: z3.string()
973
819
  })
974
820
  })
975
821
  ).nullish()
976
822
  }),
977
- index: z2.number(),
978
- logprobs: z2.object({
979
- content: z2.array(
980
- z2.object({
981
- token: z2.string(),
982
- logprob: z2.number(),
983
- top_logprobs: z2.array(
984
- z2.object({
985
- token: z2.string(),
986
- logprob: z2.number()
823
+ index: z3.number(),
824
+ logprobs: z3.object({
825
+ content: z3.array(
826
+ z3.object({
827
+ token: z3.string(),
828
+ logprob: z3.number(),
829
+ top_logprobs: z3.array(
830
+ z3.object({
831
+ token: z3.string(),
832
+ logprob: z3.number()
987
833
  })
988
834
  )
989
835
  })
990
- ).nullable()
836
+ ).nullish()
991
837
  }).nullish(),
992
- finish_reason: z2.string().nullish()
838
+ finish_reason: z3.string().nullish()
993
839
  })
994
840
  ),
995
841
  usage: openaiTokenUsageSchema
996
842
  });
997
- var openaiChatChunkSchema = z2.union([
998
- z2.object({
999
- id: z2.string().nullish(),
1000
- created: z2.number().nullish(),
1001
- model: z2.string().nullish(),
1002
- choices: z2.array(
1003
- z2.object({
1004
- delta: z2.object({
1005
- role: z2.enum(["assistant"]).nullish(),
1006
- content: z2.string().nullish(),
1007
- function_call: z2.object({
1008
- name: z2.string().optional(),
1009
- arguments: z2.string().optional()
1010
- }).nullish(),
1011
- tool_calls: z2.array(
1012
- z2.object({
1013
- index: z2.number(),
1014
- id: z2.string().nullish(),
1015
- type: z2.literal("function").nullish(),
1016
- function: z2.object({
1017
- name: z2.string().nullish(),
1018
- arguments: z2.string().nullish()
843
+ var openaiChatChunkSchema = z3.union([
844
+ z3.object({
845
+ id: z3.string().nullish(),
846
+ created: z3.number().nullish(),
847
+ model: z3.string().nullish(),
848
+ choices: z3.array(
849
+ z3.object({
850
+ delta: z3.object({
851
+ role: z3.enum(["assistant"]).nullish(),
852
+ content: z3.string().nullish(),
853
+ tool_calls: z3.array(
854
+ z3.object({
855
+ index: z3.number(),
856
+ id: z3.string().nullish(),
857
+ type: z3.literal("function").nullish(),
858
+ function: z3.object({
859
+ name: z3.string().nullish(),
860
+ arguments: z3.string().nullish()
1019
861
  })
1020
862
  })
1021
863
  ).nullish()
1022
864
  }).nullish(),
1023
- logprobs: z2.object({
1024
- content: z2.array(
1025
- z2.object({
1026
- token: z2.string(),
1027
- logprob: z2.number(),
1028
- top_logprobs: z2.array(
1029
- z2.object({
1030
- token: z2.string(),
1031
- logprob: z2.number()
865
+ logprobs: z3.object({
866
+ content: z3.array(
867
+ z3.object({
868
+ token: z3.string(),
869
+ logprob: z3.number(),
870
+ top_logprobs: z3.array(
871
+ z3.object({
872
+ token: z3.string(),
873
+ logprob: z3.number()
1032
874
  })
1033
875
  )
1034
876
  })
1035
- ).nullable()
877
+ ).nullish()
1036
878
  }).nullish(),
1037
- finish_reason: z2.string().nullish(),
1038
- index: z2.number()
879
+ finish_reason: z3.string().nullish(),
880
+ index: z3.number()
1039
881
  })
1040
882
  ),
1041
883
  usage: openaiTokenUsageSchema
@@ -1045,9 +887,6 @@ var openaiChatChunkSchema = z2.union([
1045
887
  function isReasoningModel(modelId) {
1046
888
  return modelId.startsWith("o");
1047
889
  }
1048
- function isAudioModel(modelId) {
1049
- return modelId.startsWith("gpt-4o-audio-preview");
1050
- }
1051
890
  function getSystemMessageMode(modelId) {
1052
891
  var _a, _b;
1053
892
  if (!isReasoningModel(modelId)) {
@@ -1089,31 +928,25 @@ var reasoningModels = {
1089
928
  };
1090
929
 
1091
930
  // src/openai-completion-language-model.ts
1092
- import {
1093
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1094
- } from "@ai-sdk/provider";
1095
931
  import {
1096
932
  combineHeaders as combineHeaders2,
1097
933
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
1098
934
  createJsonResponseHandler as createJsonResponseHandler2,
935
+ parseProviderOptions as parseProviderOptions2,
1099
936
  postJsonToApi as postJsonToApi2
1100
937
  } from "@ai-sdk/provider-utils";
1101
- import { z as z3 } from "zod";
938
+ import { z as z5 } from "zod";
1102
939
 
1103
940
  // src/convert-to-openai-completion-prompt.ts
1104
941
  import {
1105
942
  InvalidPromptError,
1106
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
943
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1107
944
  } from "@ai-sdk/provider";
1108
945
  function convertToOpenAICompletionPrompt({
1109
946
  prompt,
1110
- inputFormat,
1111
947
  user = "user",
1112
948
  assistant = "assistant"
1113
949
  }) {
1114
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1115
- return { prompt: prompt[0].content[0].text };
1116
- }
1117
950
  let text = "";
1118
951
  if (prompt[0].role === "system") {
1119
952
  text += `${prompt[0].content}
@@ -1135,13 +968,8 @@ function convertToOpenAICompletionPrompt({
1135
968
  case "text": {
1136
969
  return part.text;
1137
970
  }
1138
- case "image": {
1139
- throw new UnsupportedFunctionalityError4({
1140
- functionality: "images"
1141
- });
1142
- }
1143
971
  }
1144
- }).join("");
972
+ }).filter(Boolean).join("");
1145
973
  text += `${user}:
1146
974
  ${userMessage}
1147
975
 
@@ -1155,7 +983,7 @@ ${userMessage}
1155
983
  return part.text;
1156
984
  }
1157
985
  case "tool-call": {
1158
- throw new UnsupportedFunctionalityError4({
986
+ throw new UnsupportedFunctionalityError3({
1159
987
  functionality: "tool-call messages"
1160
988
  });
1161
989
  }
@@ -1168,7 +996,7 @@ ${assistantMessage}
1168
996
  break;
1169
997
  }
1170
998
  case "tool": {
1171
- throw new UnsupportedFunctionalityError4({
999
+ throw new UnsupportedFunctionalityError3({
1172
1000
  functionality: "tool messages"
1173
1001
  });
1174
1002
  }
@@ -1187,37 +1015,68 @@ ${user}:`]
1187
1015
  };
1188
1016
  }
1189
1017
 
1190
- // src/map-openai-completion-logprobs.ts
1191
- function mapOpenAICompletionLogProbs(logprobs) {
1192
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1193
- token,
1194
- logprob: logprobs.token_logprobs[index],
1195
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1196
- ([token2, logprob]) => ({
1197
- token: token2,
1198
- logprob
1199
- })
1200
- ) : []
1201
- }));
1202
- }
1018
+ // src/openai-completion-options.ts
1019
+ import { z as z4 } from "zod";
1020
+ var openaiCompletionProviderOptions = z4.object({
1021
+ /**
1022
+ Echo back the prompt in addition to the completion.
1023
+ */
1024
+ echo: z4.boolean().optional(),
1025
+ /**
1026
+ Modify the likelihood of specified tokens appearing in the completion.
1027
+
1028
+ Accepts a JSON object that maps tokens (specified by their token ID in
1029
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1030
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1031
+ the bias is added to the logits generated by the model prior to sampling.
1032
+ The exact effect will vary per model, but values between -1 and 1 should
1033
+ decrease or increase likelihood of selection; values like -100 or 100
1034
+ should result in a ban or exclusive selection of the relevant token.
1035
+
1036
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1037
+ token from being generated.
1038
+ */
1039
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
1040
+ /**
1041
+ The suffix that comes after a completion of inserted text.
1042
+ */
1043
+ suffix: z4.string().optional(),
1044
+ /**
1045
+ A unique identifier representing your end-user, which can help OpenAI to
1046
+ monitor and detect abuse. Learn more.
1047
+ */
1048
+ user: z4.string().optional(),
1049
+ /**
1050
+ Return the log probabilities of the tokens. Including logprobs will increase
1051
+ the response size and can slow down response times. However, it can
1052
+ be useful to better understand how the model is behaving.
1053
+ Setting to true will return the log probabilities of the tokens that
1054
+ were generated.
1055
+ Setting to a number will return the log probabilities of the top n
1056
+ tokens that were generated.
1057
+ */
1058
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1059
+ });
1203
1060
 
1204
1061
  // src/openai-completion-language-model.ts
1205
1062
  var OpenAICompletionLanguageModel = class {
1206
- constructor(modelId, settings, config) {
1207
- this.specificationVersion = "v1";
1208
- this.defaultObjectGenerationMode = void 0;
1063
+ constructor(modelId, config) {
1064
+ this.specificationVersion = "v2";
1065
+ this.supportedUrls = {
1066
+ // No URLs are supported for completion models.
1067
+ };
1209
1068
  this.modelId = modelId;
1210
- this.settings = settings;
1211
1069
  this.config = config;
1212
1070
  }
1071
+ get providerOptionsName() {
1072
+ return this.config.provider.split(".")[0].trim();
1073
+ }
1213
1074
  get provider() {
1214
1075
  return this.config.provider;
1215
1076
  }
1216
- getArgs({
1217
- mode,
1218
- inputFormat,
1077
+ async getArgs({
1219
1078
  prompt,
1220
- maxTokens,
1079
+ maxOutputTokens,
1221
1080
  temperature,
1222
1081
  topP,
1223
1082
  topK,
@@ -1225,16 +1084,32 @@ var OpenAICompletionLanguageModel = class {
1225
1084
  presencePenalty,
1226
1085
  stopSequences: userStopSequences,
1227
1086
  responseFormat,
1228
- seed
1087
+ tools,
1088
+ toolChoice,
1089
+ seed,
1090
+ providerOptions
1229
1091
  }) {
1230
- var _a;
1231
- const type = mode.type;
1232
1092
  const warnings = [];
1093
+ const openaiOptions = {
1094
+ ...await parseProviderOptions2({
1095
+ provider: "openai",
1096
+ providerOptions,
1097
+ schema: openaiCompletionProviderOptions
1098
+ }),
1099
+ ...await parseProviderOptions2({
1100
+ provider: this.providerOptionsName,
1101
+ providerOptions,
1102
+ schema: openaiCompletionProviderOptions
1103
+ })
1104
+ };
1233
1105
  if (topK != null) {
1234
- warnings.push({
1235
- type: "unsupported-setting",
1236
- setting: "topK"
1237
- });
1106
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1107
+ }
1108
+ if (tools == null ? void 0 : tools.length) {
1109
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1110
+ }
1111
+ if (toolChoice != null) {
1112
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1238
1113
  }
1239
1114
  if (responseFormat != null && responseFormat.type !== "text") {
1240
1115
  warnings.push({
@@ -1243,61 +1118,36 @@ var OpenAICompletionLanguageModel = class {
1243
1118
  details: "JSON response format is not supported."
1244
1119
  });
1245
1120
  }
1246
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1121
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1247
1122
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1248
- const baseArgs = {
1249
- // model id:
1250
- model: this.modelId,
1251
- // model specific settings:
1252
- echo: this.settings.echo,
1253
- logit_bias: this.settings.logitBias,
1254
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1255
- suffix: this.settings.suffix,
1256
- user: this.settings.user,
1257
- // standardized settings:
1258
- max_tokens: maxTokens,
1259
- temperature,
1260
- top_p: topP,
1261
- frequency_penalty: frequencyPenalty,
1262
- presence_penalty: presencePenalty,
1263
- seed,
1264
- // prompt:
1265
- prompt: completionPrompt,
1266
- // stop sequences:
1267
- stop: stop.length > 0 ? stop : void 0
1123
+ return {
1124
+ args: {
1125
+ // model id:
1126
+ model: this.modelId,
1127
+ // model specific settings:
1128
+ echo: openaiOptions.echo,
1129
+ logit_bias: openaiOptions.logitBias,
1130
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1131
+ suffix: openaiOptions.suffix,
1132
+ user: openaiOptions.user,
1133
+ // standardized settings:
1134
+ max_tokens: maxOutputTokens,
1135
+ temperature,
1136
+ top_p: topP,
1137
+ frequency_penalty: frequencyPenalty,
1138
+ presence_penalty: presencePenalty,
1139
+ seed,
1140
+ // prompt:
1141
+ prompt: completionPrompt,
1142
+ // stop sequences:
1143
+ stop: stop.length > 0 ? stop : void 0
1144
+ },
1145
+ warnings
1268
1146
  };
1269
- switch (type) {
1270
- case "regular": {
1271
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1272
- throw new UnsupportedFunctionalityError5({
1273
- functionality: "tools"
1274
- });
1275
- }
1276
- if (mode.toolChoice) {
1277
- throw new UnsupportedFunctionalityError5({
1278
- functionality: "toolChoice"
1279
- });
1280
- }
1281
- return { args: baseArgs, warnings };
1282
- }
1283
- case "object-json": {
1284
- throw new UnsupportedFunctionalityError5({
1285
- functionality: "object-json mode"
1286
- });
1287
- }
1288
- case "object-tool": {
1289
- throw new UnsupportedFunctionalityError5({
1290
- functionality: "object-tool mode"
1291
- });
1292
- }
1293
- default: {
1294
- const _exhaustiveCheck = type;
1295
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1296
- }
1297
- }
1298
1147
  }
1299
1148
  async doGenerate(options) {
1300
- const { args, warnings } = this.getArgs(options);
1149
+ var _a, _b, _c;
1150
+ const { args, warnings } = await this.getArgs(options);
1301
1151
  const {
1302
1152
  responseHeaders,
1303
1153
  value: response,
@@ -1316,30 +1166,37 @@ var OpenAICompletionLanguageModel = class {
1316
1166
  abortSignal: options.abortSignal,
1317
1167
  fetch: this.config.fetch
1318
1168
  });
1319
- const { prompt: rawPrompt, ...rawSettings } = args;
1320
1169
  const choice = response.choices[0];
1170
+ const providerMetadata = { openai: {} };
1171
+ if (choice.logprobs != null) {
1172
+ providerMetadata.openai.logprobs = choice.logprobs;
1173
+ }
1321
1174
  return {
1322
- text: choice.text,
1175
+ content: [{ type: "text", text: choice.text }],
1323
1176
  usage: {
1324
- promptTokens: response.usage.prompt_tokens,
1325
- completionTokens: response.usage.completion_tokens
1177
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1178
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1179
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1326
1180
  },
1327
1181
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1328
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1329
- rawCall: { rawPrompt, rawSettings },
1330
- rawResponse: { headers: responseHeaders, body: rawResponse },
1331
- response: getResponseMetadata(response),
1332
- warnings,
1333
- request: { body: JSON.stringify(args) }
1182
+ request: { body: args },
1183
+ response: {
1184
+ ...getResponseMetadata(response),
1185
+ headers: responseHeaders,
1186
+ body: rawResponse
1187
+ },
1188
+ providerMetadata,
1189
+ warnings
1334
1190
  };
1335
1191
  }
1336
1192
  async doStream(options) {
1337
- const { args, warnings } = this.getArgs(options);
1193
+ const { args, warnings } = await this.getArgs(options);
1338
1194
  const body = {
1339
1195
  ...args,
1340
1196
  stream: true,
1341
- // only include stream_options when in strict compatibility mode:
1342
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1197
+ stream_options: {
1198
+ include_usage: true
1199
+ }
1343
1200
  };
1344
1201
  const { responseHeaders, value: response } = await postJsonToApi2({
1345
1202
  url: this.config.url({
@@ -1355,17 +1212,20 @@ var OpenAICompletionLanguageModel = class {
1355
1212
  abortSignal: options.abortSignal,
1356
1213
  fetch: this.config.fetch
1357
1214
  });
1358
- const { prompt: rawPrompt, ...rawSettings } = args;
1359
1215
  let finishReason = "unknown";
1360
- let usage = {
1361
- promptTokens: Number.NaN,
1362
- completionTokens: Number.NaN
1216
+ const providerMetadata = { openai: {} };
1217
+ const usage = {
1218
+ inputTokens: void 0,
1219
+ outputTokens: void 0,
1220
+ totalTokens: void 0
1363
1221
  };
1364
- let logprobs;
1365
1222
  let isFirstChunk = true;
1366
1223
  return {
1367
1224
  stream: response.pipeThrough(
1368
1225
  new TransformStream({
1226
+ start(controller) {
1227
+ controller.enqueue({ type: "stream-start", warnings });
1228
+ },
1369
1229
  transform(chunk, controller) {
1370
1230
  if (!chunk.success) {
1371
1231
  finishReason = "error";
@@ -1386,87 +1246,79 @@ var OpenAICompletionLanguageModel = class {
1386
1246
  });
1387
1247
  }
1388
1248
  if (value.usage != null) {
1389
- usage = {
1390
- promptTokens: value.usage.prompt_tokens,
1391
- completionTokens: value.usage.completion_tokens
1392
- };
1249
+ usage.inputTokens = value.usage.prompt_tokens;
1250
+ usage.outputTokens = value.usage.completion_tokens;
1251
+ usage.totalTokens = value.usage.total_tokens;
1393
1252
  }
1394
1253
  const choice = value.choices[0];
1395
1254
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1396
1255
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1397
1256
  }
1257
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1258
+ providerMetadata.openai.logprobs = choice.logprobs;
1259
+ }
1398
1260
  if ((choice == null ? void 0 : choice.text) != null) {
1399
1261
  controller.enqueue({
1400
- type: "text-delta",
1401
- textDelta: choice.text
1262
+ type: "text",
1263
+ text: choice.text
1402
1264
  });
1403
1265
  }
1404
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1405
- choice == null ? void 0 : choice.logprobs
1406
- );
1407
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1408
- if (logprobs === void 0) logprobs = [];
1409
- logprobs.push(...mappedLogprobs);
1410
- }
1411
1266
  },
1412
1267
  flush(controller) {
1413
1268
  controller.enqueue({
1414
1269
  type: "finish",
1415
1270
  finishReason,
1416
- logprobs,
1271
+ providerMetadata,
1417
1272
  usage
1418
1273
  });
1419
1274
  }
1420
1275
  })
1421
1276
  ),
1422
- rawCall: { rawPrompt, rawSettings },
1423
- rawResponse: { headers: responseHeaders },
1424
- warnings,
1425
- request: { body: JSON.stringify(body) }
1277
+ request: { body },
1278
+ response: { headers: responseHeaders }
1426
1279
  };
1427
1280
  }
1428
1281
  };
1429
- var openaiCompletionResponseSchema = z3.object({
1430
- id: z3.string().nullish(),
1431
- created: z3.number().nullish(),
1432
- model: z3.string().nullish(),
1433
- choices: z3.array(
1434
- z3.object({
1435
- text: z3.string(),
1436
- finish_reason: z3.string(),
1437
- logprobs: z3.object({
1438
- tokens: z3.array(z3.string()),
1439
- token_logprobs: z3.array(z3.number()),
1440
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1282
+ var usageSchema = z5.object({
1283
+ prompt_tokens: z5.number(),
1284
+ completion_tokens: z5.number(),
1285
+ total_tokens: z5.number()
1286
+ });
1287
+ var openaiCompletionResponseSchema = z5.object({
1288
+ id: z5.string().nullish(),
1289
+ created: z5.number().nullish(),
1290
+ model: z5.string().nullish(),
1291
+ choices: z5.array(
1292
+ z5.object({
1293
+ text: z5.string(),
1294
+ finish_reason: z5.string(),
1295
+ logprobs: z5.object({
1296
+ tokens: z5.array(z5.string()),
1297
+ token_logprobs: z5.array(z5.number()),
1298
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1441
1299
  }).nullish()
1442
1300
  })
1443
1301
  ),
1444
- usage: z3.object({
1445
- prompt_tokens: z3.number(),
1446
- completion_tokens: z3.number()
1447
- })
1302
+ usage: usageSchema.nullish()
1448
1303
  });
1449
- var openaiCompletionChunkSchema = z3.union([
1450
- z3.object({
1451
- id: z3.string().nullish(),
1452
- created: z3.number().nullish(),
1453
- model: z3.string().nullish(),
1454
- choices: z3.array(
1455
- z3.object({
1456
- text: z3.string(),
1457
- finish_reason: z3.string().nullish(),
1458
- index: z3.number(),
1459
- logprobs: z3.object({
1460
- tokens: z3.array(z3.string()),
1461
- token_logprobs: z3.array(z3.number()),
1462
- top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1304
+ var openaiCompletionChunkSchema = z5.union([
1305
+ z5.object({
1306
+ id: z5.string().nullish(),
1307
+ created: z5.number().nullish(),
1308
+ model: z5.string().nullish(),
1309
+ choices: z5.array(
1310
+ z5.object({
1311
+ text: z5.string(),
1312
+ finish_reason: z5.string().nullish(),
1313
+ index: z5.number(),
1314
+ logprobs: z5.object({
1315
+ tokens: z5.array(z5.string()),
1316
+ token_logprobs: z5.array(z5.number()),
1317
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1463
1318
  }).nullish()
1464
1319
  })
1465
1320
  ),
1466
- usage: z3.object({
1467
- prompt_tokens: z3.number(),
1468
- completion_tokens: z3.number()
1469
- }).nullish()
1321
+ usage: usageSchema.nullish()
1470
1322
  }),
1471
1323
  openaiErrorDataSchema
1472
1324
  ]);
@@ -1478,32 +1330,45 @@ import {
1478
1330
  import {
1479
1331
  combineHeaders as combineHeaders3,
1480
1332
  createJsonResponseHandler as createJsonResponseHandler3,
1333
+ parseProviderOptions as parseProviderOptions3,
1481
1334
  postJsonToApi as postJsonToApi3
1482
1335
  } from "@ai-sdk/provider-utils";
1483
- import { z as z4 } from "zod";
1336
+ import { z as z7 } from "zod";
1337
+
1338
+ // src/openai-embedding-options.ts
1339
+ import { z as z6 } from "zod";
1340
+ var openaiEmbeddingProviderOptions = z6.object({
1341
+ /**
1342
+ The number of dimensions the resulting output embeddings should have.
1343
+ Only supported in text-embedding-3 and later models.
1344
+ */
1345
+ dimensions: z6.number().optional(),
1346
+ /**
1347
+ A unique identifier representing your end-user, which can help OpenAI to
1348
+ monitor and detect abuse. Learn more.
1349
+ */
1350
+ user: z6.string().optional()
1351
+ });
1352
+
1353
+ // src/openai-embedding-model.ts
1484
1354
  var OpenAIEmbeddingModel = class {
1485
- constructor(modelId, settings, config) {
1486
- this.specificationVersion = "v1";
1355
+ constructor(modelId, config) {
1356
+ this.specificationVersion = "v2";
1357
+ this.maxEmbeddingsPerCall = 2048;
1358
+ this.supportsParallelCalls = true;
1487
1359
  this.modelId = modelId;
1488
- this.settings = settings;
1489
1360
  this.config = config;
1490
1361
  }
1491
1362
  get provider() {
1492
1363
  return this.config.provider;
1493
1364
  }
1494
- get maxEmbeddingsPerCall() {
1495
- var _a;
1496
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1497
- }
1498
- get supportsParallelCalls() {
1499
- var _a;
1500
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1501
- }
1502
1365
  async doEmbed({
1503
1366
  values,
1504
1367
  headers,
1505
- abortSignal
1368
+ abortSignal,
1369
+ providerOptions
1506
1370
  }) {
1371
+ var _a;
1507
1372
  if (values.length > this.maxEmbeddingsPerCall) {
1508
1373
  throw new TooManyEmbeddingValuesForCallError({
1509
1374
  provider: this.provider,
@@ -1512,7 +1377,16 @@ var OpenAIEmbeddingModel = class {
1512
1377
  values
1513
1378
  });
1514
1379
  }
1515
- const { responseHeaders, value: response } = await postJsonToApi3({
1380
+ const openaiOptions = (_a = await parseProviderOptions3({
1381
+ provider: "openai",
1382
+ providerOptions,
1383
+ schema: openaiEmbeddingProviderOptions
1384
+ })) != null ? _a : {};
1385
+ const {
1386
+ responseHeaders,
1387
+ value: response,
1388
+ rawValue
1389
+ } = await postJsonToApi3({
1516
1390
  url: this.config.url({
1517
1391
  path: "/embeddings",
1518
1392
  modelId: this.modelId
@@ -1522,8 +1396,8 @@ var OpenAIEmbeddingModel = class {
1522
1396
  model: this.modelId,
1523
1397
  input: values,
1524
1398
  encoding_format: "float",
1525
- dimensions: this.settings.dimensions,
1526
- user: this.settings.user
1399
+ dimensions: openaiOptions.dimensions,
1400
+ user: openaiOptions.user
1527
1401
  },
1528
1402
  failedResponseHandler: openaiFailedResponseHandler,
1529
1403
  successfulResponseHandler: createJsonResponseHandler3(
@@ -1535,13 +1409,13 @@ var OpenAIEmbeddingModel = class {
1535
1409
  return {
1536
1410
  embeddings: response.data.map((item) => item.embedding),
1537
1411
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1538
- rawResponse: { headers: responseHeaders }
1412
+ response: { headers: responseHeaders, body: rawValue }
1539
1413
  };
1540
1414
  }
1541
1415
  };
1542
- var openaiTextEmbeddingResponseSchema = z4.object({
1543
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1544
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1416
+ var openaiTextEmbeddingResponseSchema = z7.object({
1417
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1418
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1545
1419
  });
1546
1420
 
1547
1421
  // src/openai-image-model.ts
@@ -1550,7 +1424,7 @@ import {
1550
1424
  createJsonResponseHandler as createJsonResponseHandler4,
1551
1425
  postJsonToApi as postJsonToApi4
1552
1426
  } from "@ai-sdk/provider-utils";
1553
- import { z as z5 } from "zod";
1427
+ import { z as z8 } from "zod";
1554
1428
 
1555
1429
  // src/openai-image-settings.ts
1556
1430
  var modelMaxImagesPerCall = {
@@ -1562,15 +1436,14 @@ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1562
1436
 
1563
1437
  // src/openai-image-model.ts
1564
1438
  var OpenAIImageModel = class {
1565
- constructor(modelId, settings, config) {
1439
+ constructor(modelId, config) {
1566
1440
  this.modelId = modelId;
1567
- this.settings = settings;
1568
1441
  this.config = config;
1569
- this.specificationVersion = "v1";
1442
+ this.specificationVersion = "v2";
1570
1443
  }
1571
1444
  get maxImagesPerCall() {
1572
- var _a, _b;
1573
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1445
+ var _a;
1446
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1574
1447
  }
1575
1448
  get provider() {
1576
1449
  return this.config.provider;
@@ -1626,30 +1499,84 @@ var OpenAIImageModel = class {
1626
1499
  timestamp: currentDate,
1627
1500
  modelId: this.modelId,
1628
1501
  headers: responseHeaders
1502
+ },
1503
+ providerMetadata: {
1504
+ openai: {
1505
+ images: response.data.map(
1506
+ (item) => item.revised_prompt ? {
1507
+ revisedPrompt: item.revised_prompt
1508
+ } : null
1509
+ )
1510
+ }
1629
1511
  }
1630
1512
  };
1631
1513
  }
1632
1514
  };
1633
- var openaiImageResponseSchema = z5.object({
1634
- data: z5.array(z5.object({ b64_json: z5.string() }))
1515
+ var openaiImageResponseSchema = z8.object({
1516
+ data: z8.array(
1517
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1518
+ )
1635
1519
  });
1636
1520
 
1521
+ // src/openai-tools.ts
1522
+ import { z as z9 } from "zod";
1523
+ var WebSearchPreviewParameters = z9.object({});
1524
+ function webSearchPreviewTool({
1525
+ searchContextSize,
1526
+ userLocation
1527
+ } = {}) {
1528
+ return {
1529
+ type: "provider-defined",
1530
+ id: "openai.web_search_preview",
1531
+ args: {
1532
+ searchContextSize,
1533
+ userLocation
1534
+ },
1535
+ parameters: WebSearchPreviewParameters
1536
+ };
1537
+ }
1538
+ var openaiTools = {
1539
+ webSearchPreview: webSearchPreviewTool
1540
+ };
1541
+
1637
1542
  // src/openai-transcription-model.ts
1638
1543
  import {
1639
1544
  combineHeaders as combineHeaders5,
1640
1545
  convertBase64ToUint8Array,
1641
1546
  createJsonResponseHandler as createJsonResponseHandler5,
1642
- parseProviderOptions,
1547
+ parseProviderOptions as parseProviderOptions4,
1643
1548
  postFormDataToApi
1644
1549
  } from "@ai-sdk/provider-utils";
1645
- import { z as z6 } from "zod";
1646
- var openAIProviderOptionsSchema = z6.object({
1647
- include: z6.array(z6.string()).nullish(),
1648
- language: z6.string().nullish(),
1649
- prompt: z6.string().nullish(),
1650
- temperature: z6.number().min(0).max(1).nullish().default(0),
1651
- timestampGranularities: z6.array(z6.enum(["word", "segment"])).nullish().default(["segment"])
1550
+ import { z as z11 } from "zod";
1551
+
1552
+ // src/openai-transcription-options.ts
1553
+ import { z as z10 } from "zod";
1554
+ var openAITranscriptionProviderOptions = z10.object({
1555
+ /**
1556
+ * Additional information to include in the transcription response.
1557
+ */
1558
+ include: z10.array(z10.string()).optional(),
1559
+ /**
1560
+ * The language of the input audio in ISO-639-1 format.
1561
+ */
1562
+ language: z10.string().optional(),
1563
+ /**
1564
+ * An optional text to guide the model's style or continue a previous audio segment.
1565
+ */
1566
+ prompt: z10.string().optional(),
1567
+ /**
1568
+ * The sampling temperature, between 0 and 1.
1569
+ * @default 0
1570
+ */
1571
+ temperature: z10.number().min(0).max(1).default(0).optional(),
1572
+ /**
1573
+ * The timestamp granularities to populate for this transcription.
1574
+ * @default ['segment']
1575
+ */
1576
+ timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
1652
1577
  });
1578
+
1579
+ // src/openai-transcription-model.ts
1653
1580
  var languageMap = {
1654
1581
  afrikaans: "af",
1655
1582
  arabic: "ar",
@@ -1718,17 +1645,16 @@ var OpenAITranscriptionModel = class {
1718
1645
  get provider() {
1719
1646
  return this.config.provider;
1720
1647
  }
1721
- getArgs({
1648
+ async getArgs({
1722
1649
  audio,
1723
1650
  mediaType,
1724
1651
  providerOptions
1725
1652
  }) {
1726
- var _a, _b, _c, _d, _e;
1727
1653
  const warnings = [];
1728
- const openAIOptions = parseProviderOptions({
1654
+ const openAIOptions = await parseProviderOptions4({
1729
1655
  provider: "openai",
1730
1656
  providerOptions,
1731
- schema: openAIProviderOptionsSchema
1657
+ schema: openAITranscriptionProviderOptions
1732
1658
  });
1733
1659
  const formData = new FormData();
1734
1660
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
@@ -1736,15 +1662,14 @@ var OpenAITranscriptionModel = class {
1736
1662
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1737
1663
  if (openAIOptions) {
1738
1664
  const transcriptionModelOptions = {
1739
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1740
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1741
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1742
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1743
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1665
+ include: openAIOptions.include,
1666
+ language: openAIOptions.language,
1667
+ prompt: openAIOptions.prompt,
1668
+ temperature: openAIOptions.temperature,
1669
+ timestamp_granularities: openAIOptions.timestampGranularities
1744
1670
  };
1745
- for (const key in transcriptionModelOptions) {
1746
- const value = transcriptionModelOptions[key];
1747
- if (value !== void 0) {
1671
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1672
+ if (value != null) {
1748
1673
  formData.append(key, String(value));
1749
1674
  }
1750
1675
  }
@@ -1757,7 +1682,7 @@ var OpenAITranscriptionModel = class {
1757
1682
  async doGenerate(options) {
1758
1683
  var _a, _b, _c, _d, _e, _f;
1759
1684
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1760
- const { formData, warnings } = this.getArgs(options);
1685
+ const { formData, warnings } = await this.getArgs(options);
1761
1686
  const {
1762
1687
  value: response,
1763
1688
  responseHeaders,
@@ -1796,15 +1721,15 @@ var OpenAITranscriptionModel = class {
1796
1721
  };
1797
1722
  }
1798
1723
  };
1799
- var openaiTranscriptionResponseSchema = z6.object({
1800
- text: z6.string(),
1801
- language: z6.string().nullish(),
1802
- duration: z6.number().nullish(),
1803
- words: z6.array(
1804
- z6.object({
1805
- word: z6.string(),
1806
- start: z6.number(),
1807
- end: z6.number()
1724
+ var openaiTranscriptionResponseSchema = z11.object({
1725
+ text: z11.string(),
1726
+ language: z11.string().nullish(),
1727
+ duration: z11.number().nullish(),
1728
+ words: z11.array(
1729
+ z11.object({
1730
+ word: z11.string(),
1731
+ start: z11.number(),
1732
+ end: z11.number()
1808
1733
  })
1809
1734
  ).nullish()
1810
1735
  });
@@ -1815,16 +1740,15 @@ import {
1815
1740
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1816
1741
  createJsonResponseHandler as createJsonResponseHandler6,
1817
1742
  generateId as generateId2,
1818
- parseProviderOptions as parseProviderOptions2,
1743
+ parseProviderOptions as parseProviderOptions5,
1819
1744
  postJsonToApi as postJsonToApi5
1820
1745
  } from "@ai-sdk/provider-utils";
1821
- import { z as z7 } from "zod";
1746
+ import { z as z12 } from "zod";
1822
1747
 
1823
1748
  // src/responses/convert-to-openai-responses-messages.ts
1824
1749
  import {
1825
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1750
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1826
1751
  } from "@ai-sdk/provider";
1827
- import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1828
1752
  function convertToOpenAIResponsesMessages({
1829
1753
  prompt,
1830
1754
  systemMessageMode
@@ -1863,38 +1787,35 @@ function convertToOpenAIResponsesMessages({
1863
1787
  messages.push({
1864
1788
  role: "user",
1865
1789
  content: content.map((part, index) => {
1866
- var _a, _b, _c, _d;
1790
+ var _a, _b, _c;
1867
1791
  switch (part.type) {
1868
1792
  case "text": {
1869
1793
  return { type: "input_text", text: part.text };
1870
1794
  }
1871
- case "image": {
1872
- return {
1873
- type: "input_image",
1874
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1875
- // OpenAI specific extension: image detail
1876
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1877
- };
1878
- }
1879
1795
  case "file": {
1880
- if (part.data instanceof URL) {
1881
- throw new UnsupportedFunctionalityError6({
1882
- functionality: "File URLs in user messages"
1883
- });
1884
- }
1885
- switch (part.mimeType) {
1886
- case "application/pdf": {
1887
- return {
1888
- type: "input_file",
1889
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1890
- file_data: `data:application/pdf;base64,${part.data}`
1891
- };
1892
- }
1893
- default: {
1894
- throw new UnsupportedFunctionalityError6({
1895
- functionality: "Only PDF files are supported in user messages"
1796
+ if (part.mediaType.startsWith("image/")) {
1797
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1798
+ return {
1799
+ type: "input_image",
1800
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1801
+ // OpenAI specific extension: image detail
1802
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1803
+ };
1804
+ } else if (part.mediaType === "application/pdf") {
1805
+ if (part.data instanceof URL) {
1806
+ throw new UnsupportedFunctionalityError4({
1807
+ functionality: "PDF file parts with URLs"
1896
1808
  });
1897
1809
  }
1810
+ return {
1811
+ type: "input_file",
1812
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1813
+ file_data: `data:application/pdf;base64,${part.data}`
1814
+ };
1815
+ } else {
1816
+ throw new UnsupportedFunctionalityError4({
1817
+ functionality: `file part media type ${part.mediaType}`
1818
+ });
1898
1819
  }
1899
1820
  }
1900
1821
  }
@@ -1964,19 +1885,18 @@ function mapOpenAIResponseFinishReason({
1964
1885
 
1965
1886
  // src/responses/openai-responses-prepare-tools.ts
1966
1887
  import {
1967
- UnsupportedFunctionalityError as UnsupportedFunctionalityError7
1888
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1968
1889
  } from "@ai-sdk/provider";
1969
1890
  function prepareResponsesTools({
1970
- mode,
1891
+ tools,
1892
+ toolChoice,
1971
1893
  strict
1972
1894
  }) {
1973
- var _a;
1974
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1895
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1975
1896
  const toolWarnings = [];
1976
1897
  if (tools == null) {
1977
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1898
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1978
1899
  }
1979
- const toolChoice = mode.toolChoice;
1980
1900
  const openaiTools2 = [];
1981
1901
  for (const tool of tools) {
1982
1902
  switch (tool.type) {
@@ -2009,37 +1929,24 @@ function prepareResponsesTools({
2009
1929
  }
2010
1930
  }
2011
1931
  if (toolChoice == null) {
2012
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1932
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
2013
1933
  }
2014
1934
  const type = toolChoice.type;
2015
1935
  switch (type) {
2016
1936
  case "auto":
2017
1937
  case "none":
2018
1938
  case "required":
2019
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
2020
- case "tool": {
2021
- if (toolChoice.toolName === "web_search_preview") {
2022
- return {
2023
- tools: openaiTools2,
2024
- tool_choice: {
2025
- type: "web_search_preview"
2026
- },
2027
- toolWarnings
2028
- };
2029
- }
1939
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
1940
+ case "tool":
2030
1941
  return {
2031
1942
  tools: openaiTools2,
2032
- tool_choice: {
2033
- type: "function",
2034
- name: toolChoice.toolName
2035
- },
1943
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2036
1944
  toolWarnings
2037
1945
  };
2038
- }
2039
1946
  default: {
2040
1947
  const _exhaustiveCheck = type;
2041
- throw new UnsupportedFunctionalityError7({
2042
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1948
+ throw new UnsupportedFunctionalityError5({
1949
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2043
1950
  });
2044
1951
  }
2045
1952
  }
@@ -2048,18 +1955,18 @@ function prepareResponsesTools({
2048
1955
  // src/responses/openai-responses-language-model.ts
2049
1956
  var OpenAIResponsesLanguageModel = class {
2050
1957
  constructor(modelId, config) {
2051
- this.specificationVersion = "v1";
2052
- this.defaultObjectGenerationMode = "json";
2053
- this.supportsStructuredOutputs = true;
1958
+ this.specificationVersion = "v2";
1959
+ this.supportedUrls = {
1960
+ "image/*": [/^https?:\/\/.*$/]
1961
+ };
2054
1962
  this.modelId = modelId;
2055
1963
  this.config = config;
2056
1964
  }
2057
1965
  get provider() {
2058
1966
  return this.config.provider;
2059
1967
  }
2060
- getArgs({
2061
- mode,
2062
- maxTokens,
1968
+ async getArgs({
1969
+ maxOutputTokens,
2063
1970
  temperature,
2064
1971
  stopSequences,
2065
1972
  topP,
@@ -2068,24 +1975,19 @@ var OpenAIResponsesLanguageModel = class {
2068
1975
  frequencyPenalty,
2069
1976
  seed,
2070
1977
  prompt,
2071
- providerMetadata,
1978
+ providerOptions,
1979
+ tools,
1980
+ toolChoice,
2072
1981
  responseFormat
2073
1982
  }) {
2074
- var _a, _b, _c;
1983
+ var _a, _b;
2075
1984
  const warnings = [];
2076
1985
  const modelConfig = getResponsesModelConfig(this.modelId);
2077
- const type = mode.type;
2078
1986
  if (topK != null) {
2079
- warnings.push({
2080
- type: "unsupported-setting",
2081
- setting: "topK"
2082
- });
1987
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2083
1988
  }
2084
1989
  if (seed != null) {
2085
- warnings.push({
2086
- type: "unsupported-setting",
2087
- setting: "seed"
2088
- });
1990
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2089
1991
  }
2090
1992
  if (presencePenalty != null) {
2091
1993
  warnings.push({
@@ -2100,19 +2002,16 @@ var OpenAIResponsesLanguageModel = class {
2100
2002
  });
2101
2003
  }
2102
2004
  if (stopSequences != null) {
2103
- warnings.push({
2104
- type: "unsupported-setting",
2105
- setting: "stopSequences"
2106
- });
2005
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2107
2006
  }
2108
2007
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2109
2008
  prompt,
2110
2009
  systemMessageMode: modelConfig.systemMessageMode
2111
2010
  });
2112
2011
  warnings.push(...messageWarnings);
2113
- const openaiOptions = parseProviderOptions2({
2012
+ const openaiOptions = await parseProviderOptions5({
2114
2013
  provider: "openai",
2115
- providerOptions: providerMetadata,
2014
+ providerOptions,
2116
2015
  schema: openaiResponsesProviderOptionsSchema
2117
2016
  });
2118
2017
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -2121,7 +2020,7 @@ var OpenAIResponsesLanguageModel = class {
2121
2020
  input: messages,
2122
2021
  temperature,
2123
2022
  top_p: topP,
2124
- max_output_tokens: maxTokens,
2023
+ max_output_tokens: maxOutputTokens,
2125
2024
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2126
2025
  text: {
2127
2026
  format: responseFormat.schema != null ? {
@@ -2173,66 +2072,27 @@ var OpenAIResponsesLanguageModel = class {
2173
2072
  });
2174
2073
  }
2175
2074
  }
2176
- switch (type) {
2177
- case "regular": {
2178
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2179
- mode,
2180
- strict: isStrict
2181
- // TODO support provider options on tools
2182
- });
2183
- return {
2184
- args: {
2185
- ...baseArgs,
2186
- tools,
2187
- tool_choice
2188
- },
2189
- warnings: [...warnings, ...toolWarnings]
2190
- };
2191
- }
2192
- case "object-json": {
2193
- return {
2194
- args: {
2195
- ...baseArgs,
2196
- text: {
2197
- format: mode.schema != null ? {
2198
- type: "json_schema",
2199
- strict: isStrict,
2200
- name: (_c = mode.name) != null ? _c : "response",
2201
- description: mode.description,
2202
- schema: mode.schema
2203
- } : { type: "json_object" }
2204
- }
2205
- },
2206
- warnings
2207
- };
2208
- }
2209
- case "object-tool": {
2210
- return {
2211
- args: {
2212
- ...baseArgs,
2213
- tool_choice: { type: "function", name: mode.tool.name },
2214
- tools: [
2215
- {
2216
- type: "function",
2217
- name: mode.tool.name,
2218
- description: mode.tool.description,
2219
- parameters: mode.tool.parameters,
2220
- strict: isStrict
2221
- }
2222
- ]
2223
- },
2224
- warnings
2225
- };
2226
- }
2227
- default: {
2228
- const _exhaustiveCheck = type;
2229
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2230
- }
2231
- }
2075
+ const {
2076
+ tools: openaiTools2,
2077
+ toolChoice: openaiToolChoice,
2078
+ toolWarnings
2079
+ } = prepareResponsesTools({
2080
+ tools,
2081
+ toolChoice,
2082
+ strict: isStrict
2083
+ });
2084
+ return {
2085
+ args: {
2086
+ ...baseArgs,
2087
+ tools: openaiTools2,
2088
+ tool_choice: openaiToolChoice
2089
+ },
2090
+ warnings: [...warnings, ...toolWarnings]
2091
+ };
2232
2092
  }
2233
2093
  async doGenerate(options) {
2234
- var _a, _b, _c, _d, _e, _f, _g;
2235
- const { args: body, warnings } = this.getArgs(options);
2094
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2095
+ const { args: body, warnings } = await this.getArgs(options);
2236
2096
  const {
2237
2097
  responseHeaders,
2238
2098
  value: response,
@@ -2246,123 +2106,132 @@ var OpenAIResponsesLanguageModel = class {
2246
2106
  body,
2247
2107
  failedResponseHandler: openaiFailedResponseHandler,
2248
2108
  successfulResponseHandler: createJsonResponseHandler6(
2249
- z7.object({
2250
- id: z7.string(),
2251
- created_at: z7.number(),
2252
- model: z7.string(),
2253
- output: z7.array(
2254
- z7.discriminatedUnion("type", [
2255
- z7.object({
2256
- type: z7.literal("message"),
2257
- role: z7.literal("assistant"),
2258
- content: z7.array(
2259
- z7.object({
2260
- type: z7.literal("output_text"),
2261
- text: z7.string(),
2262
- annotations: z7.array(
2263
- z7.object({
2264
- type: z7.literal("url_citation"),
2265
- start_index: z7.number(),
2266
- end_index: z7.number(),
2267
- url: z7.string(),
2268
- title: z7.string()
2109
+ z12.object({
2110
+ id: z12.string(),
2111
+ created_at: z12.number(),
2112
+ model: z12.string(),
2113
+ output: z12.array(
2114
+ z12.discriminatedUnion("type", [
2115
+ z12.object({
2116
+ type: z12.literal("message"),
2117
+ role: z12.literal("assistant"),
2118
+ content: z12.array(
2119
+ z12.object({
2120
+ type: z12.literal("output_text"),
2121
+ text: z12.string(),
2122
+ annotations: z12.array(
2123
+ z12.object({
2124
+ type: z12.literal("url_citation"),
2125
+ start_index: z12.number(),
2126
+ end_index: z12.number(),
2127
+ url: z12.string(),
2128
+ title: z12.string()
2269
2129
  })
2270
2130
  )
2271
2131
  })
2272
2132
  )
2273
2133
  }),
2274
- z7.object({
2275
- type: z7.literal("function_call"),
2276
- call_id: z7.string(),
2277
- name: z7.string(),
2278
- arguments: z7.string()
2134
+ z12.object({
2135
+ type: z12.literal("function_call"),
2136
+ call_id: z12.string(),
2137
+ name: z12.string(),
2138
+ arguments: z12.string()
2279
2139
  }),
2280
- z7.object({
2281
- type: z7.literal("web_search_call")
2140
+ z12.object({
2141
+ type: z12.literal("web_search_call")
2282
2142
  }),
2283
- z7.object({
2284
- type: z7.literal("computer_call")
2143
+ z12.object({
2144
+ type: z12.literal("computer_call")
2285
2145
  }),
2286
- z7.object({
2287
- type: z7.literal("reasoning"),
2288
- summary: z7.array(
2289
- z7.object({
2290
- type: z7.literal("summary_text"),
2291
- text: z7.string()
2146
+ z12.object({
2147
+ type: z12.literal("reasoning"),
2148
+ summary: z12.array(
2149
+ z12.object({
2150
+ type: z12.literal("summary_text"),
2151
+ text: z12.string()
2292
2152
  })
2293
2153
  )
2294
2154
  })
2295
2155
  ])
2296
2156
  ),
2297
- incomplete_details: z7.object({ reason: z7.string() }).nullable(),
2298
- usage: usageSchema
2157
+ incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2158
+ usage: usageSchema2
2299
2159
  })
2300
2160
  ),
2301
2161
  abortSignal: options.abortSignal,
2302
2162
  fetch: this.config.fetch
2303
2163
  });
2304
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2305
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2306
- toolCallType: "function",
2307
- toolCallId: output.call_id,
2308
- toolName: output.name,
2309
- args: output.arguments
2310
- }));
2311
- const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2164
+ const content = [];
2165
+ for (const part of response.output) {
2166
+ switch (part.type) {
2167
+ case "reasoning": {
2168
+ content.push({
2169
+ type: "reasoning",
2170
+ text: part.summary.map((summary) => summary.text).join()
2171
+ });
2172
+ break;
2173
+ }
2174
+ case "message": {
2175
+ for (const contentPart of part.content) {
2176
+ content.push({
2177
+ type: "text",
2178
+ text: contentPart.text
2179
+ });
2180
+ for (const annotation of contentPart.annotations) {
2181
+ content.push({
2182
+ type: "source",
2183
+ sourceType: "url",
2184
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2185
+ url: annotation.url,
2186
+ title: annotation.title
2187
+ });
2188
+ }
2189
+ }
2190
+ break;
2191
+ }
2192
+ case "function_call": {
2193
+ content.push({
2194
+ type: "tool-call",
2195
+ toolCallType: "function",
2196
+ toolCallId: part.call_id,
2197
+ toolName: part.name,
2198
+ args: part.arguments
2199
+ });
2200
+ break;
2201
+ }
2202
+ }
2203
+ }
2312
2204
  return {
2313
- text: outputTextElements.map((content) => content.text).join("\n"),
2314
- sources: outputTextElements.flatMap(
2315
- (content) => content.annotations.map((annotation) => {
2316
- var _a2, _b2, _c2;
2317
- return {
2318
- sourceType: "url",
2319
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2320
- url: annotation.url,
2321
- title: annotation.title
2322
- };
2323
- })
2324
- ),
2205
+ content,
2325
2206
  finishReason: mapOpenAIResponseFinishReason({
2326
- finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2327
- hasToolCalls: toolCalls.length > 0
2207
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2208
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2328
2209
  }),
2329
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2330
- reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2331
- type: "text",
2332
- text: summary.text
2333
- })) : void 0,
2334
2210
  usage: {
2335
- promptTokens: response.usage.input_tokens,
2336
- completionTokens: response.usage.output_tokens
2337
- },
2338
- rawCall: {
2339
- rawPrompt: void 0,
2340
- rawSettings: {}
2341
- },
2342
- rawResponse: {
2343
- headers: responseHeaders,
2344
- body: rawResponse
2345
- },
2346
- request: {
2347
- body: JSON.stringify(body)
2211
+ inputTokens: response.usage.input_tokens,
2212
+ outputTokens: response.usage.output_tokens,
2213
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2214
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2215
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2348
2216
  },
2217
+ request: { body },
2349
2218
  response: {
2350
2219
  id: response.id,
2351
2220
  timestamp: new Date(response.created_at * 1e3),
2352
- modelId: response.model
2221
+ modelId: response.model,
2222
+ headers: responseHeaders,
2223
+ body: rawResponse
2353
2224
  },
2354
2225
  providerMetadata: {
2355
2226
  openai: {
2356
- responseId: response.id,
2357
- cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2358
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2227
+ responseId: response.id
2359
2228
  }
2360
2229
  },
2361
2230
  warnings
2362
2231
  };
2363
2232
  }
2364
2233
  async doStream(options) {
2365
- const { args: body, warnings } = this.getArgs(options);
2234
+ const { args: body, warnings } = await this.getArgs(options);
2366
2235
  const { responseHeaders, value: response } = await postJsonToApi5({
2367
2236
  url: this.config.url({
2368
2237
  path: "/responses",
@@ -2382,16 +2251,20 @@ var OpenAIResponsesLanguageModel = class {
2382
2251
  });
2383
2252
  const self = this;
2384
2253
  let finishReason = "unknown";
2385
- let promptTokens = NaN;
2386
- let completionTokens = NaN;
2387
- let cachedPromptTokens = null;
2388
- let reasoningTokens = null;
2254
+ const usage = {
2255
+ inputTokens: void 0,
2256
+ outputTokens: void 0,
2257
+ totalTokens: void 0
2258
+ };
2389
2259
  let responseId = null;
2390
2260
  const ongoingToolCalls = {};
2391
2261
  let hasToolCalls = false;
2392
2262
  return {
2393
2263
  stream: response.pipeThrough(
2394
2264
  new TransformStream({
2265
+ start(controller) {
2266
+ controller.enqueue({ type: "stream-start", warnings });
2267
+ },
2395
2268
  transform(chunk, controller) {
2396
2269
  var _a, _b, _c, _d, _e, _f, _g, _h;
2397
2270
  if (!chunk.success) {
@@ -2435,13 +2308,13 @@ var OpenAIResponsesLanguageModel = class {
2435
2308
  });
2436
2309
  } else if (isTextDeltaChunk(value)) {
2437
2310
  controller.enqueue({
2438
- type: "text-delta",
2439
- textDelta: value.delta
2311
+ type: "text",
2312
+ text: value.delta
2440
2313
  });
2441
2314
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2442
2315
  controller.enqueue({
2443
2316
  type: "reasoning",
2444
- textDelta: value.delta
2317
+ text: value.delta
2445
2318
  });
2446
2319
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2447
2320
  ongoingToolCalls[value.output_index] = void 0;
@@ -2458,19 +2331,18 @@ var OpenAIResponsesLanguageModel = class {
2458
2331
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2459
2332
  hasToolCalls
2460
2333
  });
2461
- promptTokens = value.response.usage.input_tokens;
2462
- completionTokens = value.response.usage.output_tokens;
2463
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2464
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2334
+ usage.inputTokens = value.response.usage.input_tokens;
2335
+ usage.outputTokens = value.response.usage.output_tokens;
2336
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2337
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2338
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2465
2339
  } else if (isResponseAnnotationAddedChunk(value)) {
2466
2340
  controller.enqueue({
2467
2341
  type: "source",
2468
- source: {
2469
- sourceType: "url",
2470
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2471
- url: value.annotation.url,
2472
- title: value.annotation.title
2473
- }
2342
+ sourceType: "url",
2343
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2344
+ url: value.annotation.url,
2345
+ title: value.annotation.title
2474
2346
  });
2475
2347
  }
2476
2348
  },
@@ -2478,110 +2350,101 @@ var OpenAIResponsesLanguageModel = class {
2478
2350
  controller.enqueue({
2479
2351
  type: "finish",
2480
2352
  finishReason,
2481
- usage: { promptTokens, completionTokens },
2482
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2483
- providerMetadata: {
2484
- openai: {
2485
- responseId,
2486
- cachedPromptTokens,
2487
- reasoningTokens
2488
- }
2353
+ usage,
2354
+ providerMetadata: {
2355
+ openai: {
2356
+ responseId
2489
2357
  }
2490
2358
  }
2491
2359
  });
2492
2360
  }
2493
2361
  })
2494
2362
  ),
2495
- rawCall: {
2496
- rawPrompt: void 0,
2497
- rawSettings: {}
2498
- },
2499
- rawResponse: { headers: responseHeaders },
2500
- request: { body: JSON.stringify(body) },
2501
- warnings
2363
+ request: { body },
2364
+ response: { headers: responseHeaders }
2502
2365
  };
2503
2366
  }
2504
2367
  };
2505
- var usageSchema = z7.object({
2506
- input_tokens: z7.number(),
2507
- input_tokens_details: z7.object({ cached_tokens: z7.number().nullish() }).nullish(),
2508
- output_tokens: z7.number(),
2509
- output_tokens_details: z7.object({ reasoning_tokens: z7.number().nullish() }).nullish()
2368
+ var usageSchema2 = z12.object({
2369
+ input_tokens: z12.number(),
2370
+ input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2371
+ output_tokens: z12.number(),
2372
+ output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
2510
2373
  });
2511
- var textDeltaChunkSchema = z7.object({
2512
- type: z7.literal("response.output_text.delta"),
2513
- delta: z7.string()
2374
+ var textDeltaChunkSchema = z12.object({
2375
+ type: z12.literal("response.output_text.delta"),
2376
+ delta: z12.string()
2514
2377
  });
2515
- var responseFinishedChunkSchema = z7.object({
2516
- type: z7.enum(["response.completed", "response.incomplete"]),
2517
- response: z7.object({
2518
- incomplete_details: z7.object({ reason: z7.string() }).nullish(),
2519
- usage: usageSchema
2378
+ var responseFinishedChunkSchema = z12.object({
2379
+ type: z12.enum(["response.completed", "response.incomplete"]),
2380
+ response: z12.object({
2381
+ incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2382
+ usage: usageSchema2
2520
2383
  })
2521
2384
  });
2522
- var responseCreatedChunkSchema = z7.object({
2523
- type: z7.literal("response.created"),
2524
- response: z7.object({
2525
- id: z7.string(),
2526
- created_at: z7.number(),
2527
- model: z7.string()
2385
+ var responseCreatedChunkSchema = z12.object({
2386
+ type: z12.literal("response.created"),
2387
+ response: z12.object({
2388
+ id: z12.string(),
2389
+ created_at: z12.number(),
2390
+ model: z12.string()
2528
2391
  })
2529
2392
  });
2530
- var responseOutputItemDoneSchema = z7.object({
2531
- type: z7.literal("response.output_item.done"),
2532
- output_index: z7.number(),
2533
- item: z7.discriminatedUnion("type", [
2534
- z7.object({
2535
- type: z7.literal("message")
2393
+ var responseOutputItemDoneSchema = z12.object({
2394
+ type: z12.literal("response.output_item.done"),
2395
+ output_index: z12.number(),
2396
+ item: z12.discriminatedUnion("type", [
2397
+ z12.object({
2398
+ type: z12.literal("message")
2536
2399
  }),
2537
- z7.object({
2538
- type: z7.literal("function_call"),
2539
- id: z7.string(),
2540
- call_id: z7.string(),
2541
- name: z7.string(),
2542
- arguments: z7.string(),
2543
- status: z7.literal("completed")
2400
+ z12.object({
2401
+ type: z12.literal("function_call"),
2402
+ id: z12.string(),
2403
+ call_id: z12.string(),
2404
+ name: z12.string(),
2405
+ arguments: z12.string(),
2406
+ status: z12.literal("completed")
2544
2407
  })
2545
2408
  ])
2546
2409
  });
2547
- var responseFunctionCallArgumentsDeltaSchema = z7.object({
2548
- type: z7.literal("response.function_call_arguments.delta"),
2549
- item_id: z7.string(),
2550
- output_index: z7.number(),
2551
- delta: z7.string()
2410
+ var responseFunctionCallArgumentsDeltaSchema = z12.object({
2411
+ type: z12.literal("response.function_call_arguments.delta"),
2412
+ item_id: z12.string(),
2413
+ output_index: z12.number(),
2414
+ delta: z12.string()
2552
2415
  });
2553
- var responseOutputItemAddedSchema = z7.object({
2554
- type: z7.literal("response.output_item.added"),
2555
- output_index: z7.number(),
2556
- item: z7.discriminatedUnion("type", [
2557
- z7.object({
2558
- type: z7.literal("message")
2416
+ var responseOutputItemAddedSchema = z12.object({
2417
+ type: z12.literal("response.output_item.added"),
2418
+ output_index: z12.number(),
2419
+ item: z12.discriminatedUnion("type", [
2420
+ z12.object({
2421
+ type: z12.literal("message")
2559
2422
  }),
2560
- z7.object({
2561
- type: z7.literal("function_call"),
2562
- id: z7.string(),
2563
- call_id: z7.string(),
2564
- name: z7.string(),
2565
- arguments: z7.string()
2423
+ z12.object({
2424
+ type: z12.literal("function_call"),
2425
+ id: z12.string(),
2426
+ call_id: z12.string(),
2427
+ name: z12.string(),
2428
+ arguments: z12.string()
2566
2429
  })
2567
2430
  ])
2568
2431
  });
2569
- var responseAnnotationAddedSchema = z7.object({
2570
- type: z7.literal("response.output_text.annotation.added"),
2571
- annotation: z7.object({
2572
- type: z7.literal("url_citation"),
2573
- url: z7.string(),
2574
- title: z7.string()
2432
+ var responseAnnotationAddedSchema = z12.object({
2433
+ type: z12.literal("response.output_text.annotation.added"),
2434
+ annotation: z12.object({
2435
+ type: z12.literal("url_citation"),
2436
+ url: z12.string(),
2437
+ title: z12.string()
2575
2438
  })
2576
2439
  });
2577
- var responseReasoningSummaryTextDeltaSchema = z7.object({
2578
- type: z7.literal("response.reasoning_summary_text.delta"),
2579
- item_id: z7.string(),
2580
- output_index: z7.number(),
2581
- summary_index: z7.number(),
2582
- delta: z7.string()
2440
+ var responseReasoningSummaryTextDeltaSchema = z12.object({
2441
+ type: z12.literal("response.reasoning_summary_text.delta"),
2442
+ item_id: z12.string(),
2443
+ output_index: z12.number(),
2444
+ summary_index: z12.number(),
2445
+ delta: z12.string()
2583
2446
  });
2584
- var openaiResponsesChunkSchema = z7.union([
2447
+ var openaiResponsesChunkSchema = z12.union([
2585
2448
  textDeltaChunkSchema,
2586
2449
  responseFinishedChunkSchema,
2587
2450
  responseCreatedChunkSchema,
@@ -2590,7 +2453,7 @@ var openaiResponsesChunkSchema = z7.union([
2590
2453
  responseOutputItemAddedSchema,
2591
2454
  responseAnnotationAddedSchema,
2592
2455
  responseReasoningSummaryTextDeltaSchema,
2593
- z7.object({ type: z7.string() }).passthrough()
2456
+ z12.object({ type: z12.string() }).passthrough()
2594
2457
  // fallback for unknown chunks
2595
2458
  ]);
2596
2459
  function isTextDeltaChunk(chunk) {
@@ -2638,50 +2501,29 @@ function getResponsesModelConfig(modelId) {
2638
2501
  requiredAutoTruncation: false
2639
2502
  };
2640
2503
  }
2641
- var openaiResponsesProviderOptionsSchema = z7.object({
2642
- metadata: z7.any().nullish(),
2643
- parallelToolCalls: z7.boolean().nullish(),
2644
- previousResponseId: z7.string().nullish(),
2645
- store: z7.boolean().nullish(),
2646
- user: z7.string().nullish(),
2647
- reasoningEffort: z7.string().nullish(),
2648
- strictSchemas: z7.boolean().nullish(),
2649
- instructions: z7.string().nullish(),
2650
- reasoningSummary: z7.string().nullish()
2504
+ var openaiResponsesProviderOptionsSchema = z12.object({
2505
+ metadata: z12.any().nullish(),
2506
+ parallelToolCalls: z12.boolean().nullish(),
2507
+ previousResponseId: z12.string().nullish(),
2508
+ store: z12.boolean().nullish(),
2509
+ user: z12.string().nullish(),
2510
+ reasoningEffort: z12.string().nullish(),
2511
+ strictSchemas: z12.boolean().nullish(),
2512
+ instructions: z12.string().nullish(),
2513
+ reasoningSummary: z12.string().nullish()
2651
2514
  });
2652
2515
 
2653
- // src/openai-tools.ts
2654
- import { z as z8 } from "zod";
2655
- var WebSearchPreviewParameters = z8.object({});
2656
- function webSearchPreviewTool({
2657
- searchContextSize,
2658
- userLocation
2659
- } = {}) {
2660
- return {
2661
- type: "provider-defined",
2662
- id: "openai.web_search_preview",
2663
- args: {
2664
- searchContextSize,
2665
- userLocation
2666
- },
2667
- parameters: WebSearchPreviewParameters
2668
- };
2669
- }
2670
- var openaiTools = {
2671
- webSearchPreview: webSearchPreviewTool
2672
- };
2673
-
2674
2516
  // src/openai-speech-model.ts
2675
2517
  import {
2676
2518
  combineHeaders as combineHeaders7,
2677
2519
  createBinaryResponseHandler,
2678
- parseProviderOptions as parseProviderOptions3,
2520
+ parseProviderOptions as parseProviderOptions6,
2679
2521
  postJsonToApi as postJsonToApi6
2680
2522
  } from "@ai-sdk/provider-utils";
2681
- import { z as z9 } from "zod";
2682
- var OpenAIProviderOptionsSchema = z9.object({
2683
- instructions: z9.string().nullish(),
2684
- speed: z9.number().min(0.25).max(4).default(1).nullish()
2523
+ import { z as z13 } from "zod";
2524
+ var OpenAIProviderOptionsSchema = z13.object({
2525
+ instructions: z13.string().nullish(),
2526
+ speed: z13.number().min(0.25).max(4).default(1).nullish()
2685
2527
  });
2686
2528
  var OpenAISpeechModel = class {
2687
2529
  constructor(modelId, config) {
@@ -2692,7 +2534,7 @@ var OpenAISpeechModel = class {
2692
2534
  get provider() {
2693
2535
  return this.config.provider;
2694
2536
  }
2695
- getArgs({
2537
+ async getArgs({
2696
2538
  text,
2697
2539
  voice = "alloy",
2698
2540
  outputFormat = "mp3",
@@ -2701,7 +2543,7 @@ var OpenAISpeechModel = class {
2701
2543
  providerOptions
2702
2544
  }) {
2703
2545
  const warnings = [];
2704
- const openAIOptions = parseProviderOptions3({
2546
+ const openAIOptions = await parseProviderOptions6({
2705
2547
  provider: "openai",
2706
2548
  providerOptions,
2707
2549
  schema: OpenAIProviderOptionsSchema
@@ -2742,7 +2584,7 @@ var OpenAISpeechModel = class {
2742
2584
  async doGenerate(options) {
2743
2585
  var _a, _b, _c;
2744
2586
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2745
- const { requestBody, warnings } = this.getArgs(options);
2587
+ const { requestBody, warnings } = await this.getArgs(options);
2746
2588
  const {
2747
2589
  value: audio,
2748
2590
  responseHeaders,
@@ -2777,10 +2619,9 @@ var OpenAISpeechModel = class {
2777
2619
 
2778
2620
  // src/openai-provider.ts
2779
2621
  function createOpenAI(options = {}) {
2780
- var _a, _b, _c;
2622
+ var _a, _b;
2781
2623
  const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2782
- const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2783
- const providerName = (_c = options.name) != null ? _c : "openai";
2624
+ const providerName = (_b = options.name) != null ? _b : "openai";
2784
2625
  const getHeaders = () => ({
2785
2626
  Authorization: `Bearer ${loadApiKey({
2786
2627
  apiKey: options.apiKey,
@@ -2791,27 +2632,25 @@ function createOpenAI(options = {}) {
2791
2632
  "OpenAI-Project": options.project,
2792
2633
  ...options.headers
2793
2634
  });
2794
- const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
2635
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
2795
2636
  provider: `${providerName}.chat`,
2796
2637
  url: ({ path }) => `${baseURL}${path}`,
2797
2638
  headers: getHeaders,
2798
- compatibility,
2799
2639
  fetch: options.fetch
2800
2640
  });
2801
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
2641
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
2802
2642
  provider: `${providerName}.completion`,
2803
2643
  url: ({ path }) => `${baseURL}${path}`,
2804
2644
  headers: getHeaders,
2805
- compatibility,
2806
2645
  fetch: options.fetch
2807
2646
  });
2808
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
2647
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
2809
2648
  provider: `${providerName}.embedding`,
2810
2649
  url: ({ path }) => `${baseURL}${path}`,
2811
2650
  headers: getHeaders,
2812
2651
  fetch: options.fetch
2813
2652
  });
2814
- const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
2653
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
2815
2654
  provider: `${providerName}.image`,
2816
2655
  url: ({ path }) => `${baseURL}${path}`,
2817
2656
  headers: getHeaders,
@@ -2829,19 +2668,16 @@ function createOpenAI(options = {}) {
2829
2668
  headers: getHeaders,
2830
2669
  fetch: options.fetch
2831
2670
  });
2832
- const createLanguageModel = (modelId, settings) => {
2671
+ const createLanguageModel = (modelId) => {
2833
2672
  if (new.target) {
2834
2673
  throw new Error(
2835
2674
  "The OpenAI model function cannot be called with the new keyword."
2836
2675
  );
2837
2676
  }
2838
2677
  if (modelId === "gpt-3.5-turbo-instruct") {
2839
- return createCompletionModel(
2840
- modelId,
2841
- settings
2842
- );
2678
+ return createCompletionModel(modelId);
2843
2679
  }
2844
- return createChatModel(modelId, settings);
2680
+ return createChatModel(modelId);
2845
2681
  };
2846
2682
  const createResponsesModel = (modelId) => {
2847
2683
  return new OpenAIResponsesLanguageModel(modelId, {
@@ -2851,8 +2687,8 @@ function createOpenAI(options = {}) {
2851
2687
  fetch: options.fetch
2852
2688
  });
2853
2689
  };
2854
- const provider = function(modelId, settings) {
2855
- return createLanguageModel(modelId, settings);
2690
+ const provider = function(modelId) {
2691
+ return createLanguageModel(modelId);
2856
2692
  };
2857
2693
  provider.languageModel = createLanguageModel;
2858
2694
  provider.chat = createChatModel;
@@ -2870,10 +2706,7 @@ function createOpenAI(options = {}) {
2870
2706
  provider.tools = openaiTools;
2871
2707
  return provider;
2872
2708
  }
2873
- var openai = createOpenAI({
2874
- compatibility: "strict"
2875
- // strict for OpenAI API
2876
- });
2709
+ var openai = createOpenAI();
2877
2710
  export {
2878
2711
  createOpenAI,
2879
2712
  openai