@ai-sdk/openai 0.0.36 → 0.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
2
2
 
3
- type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k-0613' | (string & {});
3
+ type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
4
  interface OpenAIChatSettings {
5
5
  /**
6
6
  Modify the likelihood of specified tokens appearing in the completion.
@@ -34,6 +34,18 @@ interface OpenAIChatSettings {
34
34
  */
35
35
  parallelToolCalls?: boolean;
36
36
  /**
37
+ Whether to use legacy function calling. Defaults to false.
38
+
39
+ Required by some open source inference engines which do not support the `tools` API. May also
40
+ provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
41
+ which causes `streamObject` to be non-streaming.
42
+
43
+ Prefer setting `parallelToolCalls: false` over this option.
44
+
45
+ @deprecated this API is supported but deprecated by OpenAI.
46
+ */
47
+ useLegacyFunctionCalling?: boolean;
48
+ /**
37
49
  A unique identifier representing your end-user, which can help OpenAI to
38
50
  monitor and detect abuse. Learn more.
39
51
  */
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
2
2
 
3
- type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k-0613' | (string & {});
3
+ type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
4
  interface OpenAIChatSettings {
5
5
  /**
6
6
  Modify the likelihood of specified tokens appearing in the completion.
@@ -34,6 +34,18 @@ interface OpenAIChatSettings {
34
34
  */
35
35
  parallelToolCalls?: boolean;
36
36
  /**
37
+ Whether to use legacy function calling. Defaults to false.
38
+
39
+ Required by some open source inference engines which do not support the `tools` API. May also
40
+ provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
41
+ which causes `streamObject` to be non-streaming.
42
+
43
+ Prefer setting `parallelToolCalls: false` over this option.
44
+
45
+ @deprecated this API is supported but deprecated by OpenAI.
46
+ */
47
+ useLegacyFunctionCalling?: boolean;
48
+ /**
37
49
  A unique identifier representing your end-user, which can help OpenAI to
38
50
  monitor and detect abuse. Learn more.
39
51
  */
package/dist/index.js CHANGED
@@ -30,13 +30,17 @@ module.exports = __toCommonJS(src_exports);
30
30
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/openai-chat-language-model.ts
33
- var import_provider = require("@ai-sdk/provider");
33
+ var import_provider2 = require("@ai-sdk/provider");
34
34
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
35
  var import_zod2 = require("zod");
36
36
 
37
37
  // src/convert-to-openai-chat-messages.ts
38
+ var import_provider = require("@ai-sdk/provider");
38
39
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
- function convertToOpenAIChatMessages(prompt) {
40
+ function convertToOpenAIChatMessages({
41
+ prompt,
42
+ useLegacyFunctionCalling = false
43
+ }) {
40
44
  const messages = [];
41
45
  for (const { role, content } of prompt) {
42
46
  switch (role) {
@@ -96,20 +100,41 @@ function convertToOpenAIChatMessages(prompt) {
96
100
  }
97
101
  }
98
102
  }
99
- messages.push({
100
- role: "assistant",
101
- content: text,
102
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
103
- });
103
+ if (useLegacyFunctionCalling) {
104
+ if (toolCalls.length > 1) {
105
+ throw new import_provider.UnsupportedFunctionalityError({
106
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
107
+ });
108
+ }
109
+ messages.push({
110
+ role: "assistant",
111
+ content: text,
112
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
113
+ });
114
+ } else {
115
+ messages.push({
116
+ role: "assistant",
117
+ content: text,
118
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
119
+ });
120
+ }
104
121
  break;
105
122
  }
106
123
  case "tool": {
107
124
  for (const toolResponse of content) {
108
- messages.push({
109
- role: "tool",
110
- tool_call_id: toolResponse.toolCallId,
111
- content: JSON.stringify(toolResponse.result)
112
- });
125
+ if (useLegacyFunctionCalling) {
126
+ messages.push({
127
+ role: "function",
128
+ name: toolResponse.toolName,
129
+ content: JSON.stringify(toolResponse.result)
130
+ });
131
+ } else {
132
+ messages.push({
133
+ role: "tool",
134
+ tool_call_id: toolResponse.toolCallId,
135
+ content: JSON.stringify(toolResponse.result)
136
+ });
137
+ }
113
138
  }
114
139
  break;
115
140
  }
@@ -186,11 +211,34 @@ var OpenAIChatLanguageModel = class {
186
211
  maxTokens,
187
212
  temperature,
188
213
  topP,
214
+ topK,
189
215
  frequencyPenalty,
190
216
  presencePenalty,
217
+ stopSequences,
218
+ responseFormat,
191
219
  seed
192
220
  }) {
193
221
  const type = mode.type;
222
+ const warnings = [];
223
+ if (topK != null) {
224
+ warnings.push({
225
+ type: "unsupported-setting",
226
+ setting: "topK"
227
+ });
228
+ }
229
+ if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
230
+ warnings.push({
231
+ type: "unsupported-setting",
232
+ setting: "responseFormat",
233
+ details: "JSON response format schema is not supported"
234
+ });
235
+ }
236
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
237
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
238
+ throw new import_provider2.UnsupportedFunctionalityError({
239
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
240
+ });
241
+ }
194
242
  const baseArgs = {
195
243
  // model id:
196
244
  model: this.modelId,
@@ -206,41 +254,69 @@ var OpenAIChatLanguageModel = class {
206
254
  top_p: topP,
207
255
  frequency_penalty: frequencyPenalty,
208
256
  presence_penalty: presencePenalty,
257
+ stop: stopSequences,
209
258
  seed,
259
+ // response format:
260
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
210
261
  // messages:
211
- messages: convertToOpenAIChatMessages(prompt)
262
+ messages: convertToOpenAIChatMessages({
263
+ prompt,
264
+ useLegacyFunctionCalling
265
+ })
212
266
  };
213
267
  switch (type) {
214
268
  case "regular": {
215
- return { ...baseArgs, ...prepareToolsAndToolChoice(mode) };
269
+ return {
270
+ args: {
271
+ ...baseArgs,
272
+ ...prepareToolsAndToolChoice({ mode, useLegacyFunctionCalling })
273
+ },
274
+ warnings
275
+ };
216
276
  }
217
277
  case "object-json": {
218
278
  return {
219
- ...baseArgs,
220
- response_format: { type: "json_object" }
279
+ args: {
280
+ ...baseArgs,
281
+ response_format: { type: "json_object" }
282
+ },
283
+ warnings
221
284
  };
222
285
  }
223
286
  case "object-tool": {
224
287
  return {
225
- ...baseArgs,
226
- tool_choice: { type: "function", function: { name: mode.tool.name } },
227
- tools: [
228
- {
229
- type: "function",
230
- function: {
288
+ args: useLegacyFunctionCalling ? {
289
+ ...baseArgs,
290
+ function_call: {
291
+ name: mode.tool.name
292
+ },
293
+ functions: [
294
+ {
231
295
  name: mode.tool.name,
232
296
  description: mode.tool.description,
233
297
  parameters: mode.tool.parameters
234
298
  }
235
- }
236
- ]
299
+ ]
300
+ } : {
301
+ ...baseArgs,
302
+ tool_choice: {
303
+ type: "function",
304
+ function: { name: mode.tool.name }
305
+ },
306
+ tools: [
307
+ {
308
+ type: "function",
309
+ function: {
310
+ name: mode.tool.name,
311
+ description: mode.tool.description,
312
+ parameters: mode.tool.parameters
313
+ }
314
+ }
315
+ ]
316
+ },
317
+ warnings
237
318
  };
238
319
  }
239
- case "object-grammar": {
240
- throw new import_provider.UnsupportedFunctionalityError({
241
- functionality: "object-grammar mode"
242
- });
243
- }
244
320
  default: {
245
321
  const _exhaustiveCheck = type;
246
322
  throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
@@ -249,7 +325,7 @@ var OpenAIChatLanguageModel = class {
249
325
  }
250
326
  async doGenerate(options) {
251
327
  var _a, _b;
252
- const args = this.getArgs(options);
328
+ const { args, warnings } = this.getArgs(options);
253
329
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
254
330
  url: this.config.url({
255
331
  path: "/chat/completions",
@@ -268,7 +344,14 @@ var OpenAIChatLanguageModel = class {
268
344
  const choice = response.choices[0];
269
345
  return {
270
346
  text: (_a = choice.message.content) != null ? _a : void 0,
271
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
347
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
348
+ {
349
+ toolCallType: "function",
350
+ toolCallId: (0, import_provider_utils3.generateId)(),
351
+ toolName: choice.message.function_call.name,
352
+ args: choice.message.function_call.arguments
353
+ }
354
+ ] : (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
272
355
  var _a2;
273
356
  return {
274
357
  toolCallType: "function",
@@ -284,12 +367,12 @@ var OpenAIChatLanguageModel = class {
284
367
  },
285
368
  rawCall: { rawPrompt, rawSettings },
286
369
  rawResponse: { headers: responseHeaders },
287
- warnings: [],
370
+ warnings,
288
371
  logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
289
372
  };
290
373
  }
291
374
  async doStream(options) {
292
- const args = this.getArgs(options);
375
+ const { args, warnings } = this.getArgs(options);
293
376
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
294
377
  url: this.config.url({
295
378
  path: "/chat/completions",
@@ -317,6 +400,7 @@ var OpenAIChatLanguageModel = class {
317
400
  completionTokens: Number.NaN
318
401
  };
319
402
  let logprobs;
403
+ const { useLegacyFunctionCalling } = this.settings;
320
404
  return {
321
405
  stream: response.pipeThrough(
322
406
  new TransformStream({
@@ -361,24 +445,32 @@ var OpenAIChatLanguageModel = class {
361
445
  logprobs = [];
362
446
  logprobs.push(...mappedLogprobs);
363
447
  }
364
- if (delta.tool_calls != null) {
365
- for (const toolCallDelta of delta.tool_calls) {
448
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
449
+ {
450
+ type: "function",
451
+ id: (0, import_provider_utils3.generateId)(),
452
+ function: delta.function_call,
453
+ index: 0
454
+ }
455
+ ] : delta.tool_calls;
456
+ if (mappedToolCalls != null) {
457
+ for (const toolCallDelta of mappedToolCalls) {
366
458
  const index = toolCallDelta.index;
367
459
  if (toolCalls[index] == null) {
368
460
  if (toolCallDelta.type !== "function") {
369
- throw new import_provider.InvalidResponseDataError({
461
+ throw new import_provider2.InvalidResponseDataError({
370
462
  data: toolCallDelta,
371
463
  message: `Expected 'function' type.`
372
464
  });
373
465
  }
374
466
  if (toolCallDelta.id == null) {
375
- throw new import_provider.InvalidResponseDataError({
467
+ throw new import_provider2.InvalidResponseDataError({
376
468
  data: toolCallDelta,
377
469
  message: `Expected 'id' to be a string.`
378
470
  });
379
471
  }
380
472
  if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
381
- throw new import_provider.InvalidResponseDataError({
473
+ throw new import_provider2.InvalidResponseDataError({
382
474
  data: toolCallDelta,
383
475
  message: `Expected 'function.name' to be a string.`
384
476
  });
@@ -445,7 +537,7 @@ var OpenAIChatLanguageModel = class {
445
537
  ),
446
538
  rawCall: { rawPrompt, rawSettings },
447
539
  rawResponse: { headers: responseHeaders },
448
- warnings: []
540
+ warnings
449
541
  };
450
542
  }
451
543
  };
@@ -454,17 +546,21 @@ var openAIChatResponseSchema = import_zod2.z.object({
454
546
  import_zod2.z.object({
455
547
  message: import_zod2.z.object({
456
548
  role: import_zod2.z.literal("assistant"),
457
- content: import_zod2.z.string().nullable().optional(),
549
+ content: import_zod2.z.string().nullish(),
550
+ function_call: import_zod2.z.object({
551
+ arguments: import_zod2.z.string(),
552
+ name: import_zod2.z.string()
553
+ }).nullish(),
458
554
  tool_calls: import_zod2.z.array(
459
555
  import_zod2.z.object({
460
- id: import_zod2.z.string().optional().nullable(),
556
+ id: import_zod2.z.string().nullish(),
461
557
  type: import_zod2.z.literal("function"),
462
558
  function: import_zod2.z.object({
463
559
  name: import_zod2.z.string(),
464
560
  arguments: import_zod2.z.string()
465
561
  })
466
562
  })
467
- ).optional()
563
+ ).nullish()
468
564
  }),
469
565
  index: import_zod2.z.number(),
470
566
  logprobs: import_zod2.z.object({
@@ -480,8 +576,8 @@ var openAIChatResponseSchema = import_zod2.z.object({
480
576
  )
481
577
  })
482
578
  ).nullable()
483
- }).nullable().optional(),
484
- finish_reason: import_zod2.z.string().optional().nullable()
579
+ }).nullish(),
580
+ finish_reason: import_zod2.z.string().nullish()
485
581
  })
486
582
  ),
487
583
  usage: import_zod2.z.object({
@@ -496,6 +592,10 @@ var openaiChatChunkSchema = import_zod2.z.union([
496
592
  delta: import_zod2.z.object({
497
593
  role: import_zod2.z.enum(["assistant"]).optional(),
498
594
  content: import_zod2.z.string().nullish(),
595
+ function_call: import_zod2.z.object({
596
+ name: import_zod2.z.string().optional(),
597
+ arguments: import_zod2.z.string().optional()
598
+ }).nullish(),
499
599
  tool_calls: import_zod2.z.array(
500
600
  import_zod2.z.object({
501
601
  index: import_zod2.z.number(),
@@ -533,12 +633,45 @@ var openaiChatChunkSchema = import_zod2.z.union([
533
633
  }),
534
634
  openAIErrorDataSchema
535
635
  ]);
536
- function prepareToolsAndToolChoice(mode) {
636
+ function prepareToolsAndToolChoice({
637
+ mode,
638
+ useLegacyFunctionCalling = false
639
+ }) {
537
640
  var _a;
538
641
  const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
539
642
  if (tools == null) {
540
643
  return { tools: void 0, tool_choice: void 0 };
541
644
  }
645
+ const toolChoice = mode.toolChoice;
646
+ if (useLegacyFunctionCalling) {
647
+ const mappedFunctions = tools.map((tool) => ({
648
+ name: tool.name,
649
+ description: tool.description,
650
+ parameters: tool.parameters
651
+ }));
652
+ if (toolChoice == null) {
653
+ return { functions: mappedFunctions, function_call: void 0 };
654
+ }
655
+ const type2 = toolChoice.type;
656
+ switch (type2) {
657
+ case "auto":
658
+ case "none":
659
+ case void 0:
660
+ return {
661
+ functions: mappedFunctions,
662
+ function_call: void 0
663
+ };
664
+ case "required":
665
+ throw new import_provider2.UnsupportedFunctionalityError({
666
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
667
+ });
668
+ default:
669
+ return {
670
+ functions: mappedFunctions,
671
+ function_call: { name: toolChoice.toolName }
672
+ };
673
+ }
674
+ }
542
675
  const mappedTools = tools.map((tool) => ({
543
676
  type: "function",
544
677
  function: {
@@ -547,7 +680,6 @@ function prepareToolsAndToolChoice(mode) {
547
680
  parameters: tool.parameters
548
681
  }
549
682
  }));
550
- const toolChoice = mode.toolChoice;
551
683
  if (toolChoice == null) {
552
684
  return { tools: mappedTools, tool_choice: void 0 };
553
685
  }
@@ -575,12 +707,12 @@ function prepareToolsAndToolChoice(mode) {
575
707
  }
576
708
 
577
709
  // src/openai-completion-language-model.ts
578
- var import_provider3 = require("@ai-sdk/provider");
710
+ var import_provider4 = require("@ai-sdk/provider");
579
711
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
580
712
  var import_zod3 = require("zod");
581
713
 
582
714
  // src/convert-to-openai-completion-prompt.ts
583
- var import_provider2 = require("@ai-sdk/provider");
715
+ var import_provider3 = require("@ai-sdk/provider");
584
716
  function convertToOpenAICompletionPrompt({
585
717
  prompt,
586
718
  inputFormat,
@@ -600,7 +732,7 @@ function convertToOpenAICompletionPrompt({
600
732
  for (const { role, content } of prompt) {
601
733
  switch (role) {
602
734
  case "system": {
603
- throw new import_provider2.InvalidPromptError({
735
+ throw new import_provider3.InvalidPromptError({
604
736
  message: "Unexpected system message in prompt: ${content}",
605
737
  prompt
606
738
  });
@@ -612,7 +744,7 @@ function convertToOpenAICompletionPrompt({
612
744
  return part.text;
613
745
  }
614
746
  case "image": {
615
- throw new import_provider2.UnsupportedFunctionalityError({
747
+ throw new import_provider3.UnsupportedFunctionalityError({
616
748
  functionality: "images"
617
749
  });
618
750
  }
@@ -631,7 +763,7 @@ ${userMessage}
631
763
  return part.text;
632
764
  }
633
765
  case "tool-call": {
634
- throw new import_provider2.UnsupportedFunctionalityError({
766
+ throw new import_provider3.UnsupportedFunctionalityError({
635
767
  functionality: "tool-call messages"
636
768
  });
637
769
  }
@@ -644,7 +776,7 @@ ${assistantMessage}
644
776
  break;
645
777
  }
646
778
  case "tool": {
647
- throw new import_provider2.UnsupportedFunctionalityError({
779
+ throw new import_provider3.UnsupportedFunctionalityError({
648
780
  functionality: "tool messages"
649
781
  });
650
782
  }
@@ -696,13 +828,31 @@ var OpenAICompletionLanguageModel = class {
696
828
  maxTokens,
697
829
  temperature,
698
830
  topP,
831
+ topK,
699
832
  frequencyPenalty,
700
833
  presencePenalty,
834
+ stopSequences: userStopSequences,
835
+ responseFormat,
701
836
  seed
702
837
  }) {
703
838
  var _a;
704
839
  const type = mode.type;
840
+ const warnings = [];
841
+ if (topK != null) {
842
+ warnings.push({
843
+ type: "unsupported-setting",
844
+ setting: "topK"
845
+ });
846
+ }
847
+ if (responseFormat != null && responseFormat.type !== "text") {
848
+ warnings.push({
849
+ type: "unsupported-setting",
850
+ setting: "responseFormat",
851
+ details: "JSON response format is not supported."
852
+ });
853
+ }
705
854
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
855
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
706
856
  const baseArgs = {
707
857
  // model id:
708
858
  model: this.modelId,
@@ -722,37 +872,32 @@ var OpenAICompletionLanguageModel = class {
722
872
  // prompt:
723
873
  prompt: completionPrompt,
724
874
  // stop sequences:
725
- stop: stopSequences
875
+ stop: stop.length > 0 ? stop : void 0
726
876
  };
727
877
  switch (type) {
728
878
  case "regular": {
729
879
  if ((_a = mode.tools) == null ? void 0 : _a.length) {
730
- throw new import_provider3.UnsupportedFunctionalityError({
880
+ throw new import_provider4.UnsupportedFunctionalityError({
731
881
  functionality: "tools"
732
882
  });
733
883
  }
734
884
  if (mode.toolChoice) {
735
- throw new import_provider3.UnsupportedFunctionalityError({
885
+ throw new import_provider4.UnsupportedFunctionalityError({
736
886
  functionality: "toolChoice"
737
887
  });
738
888
  }
739
- return baseArgs;
889
+ return { args: baseArgs, warnings };
740
890
  }
741
891
  case "object-json": {
742
- throw new import_provider3.UnsupportedFunctionalityError({
892
+ throw new import_provider4.UnsupportedFunctionalityError({
743
893
  functionality: "object-json mode"
744
894
  });
745
895
  }
746
896
  case "object-tool": {
747
- throw new import_provider3.UnsupportedFunctionalityError({
897
+ throw new import_provider4.UnsupportedFunctionalityError({
748
898
  functionality: "object-tool mode"
749
899
  });
750
900
  }
751
- case "object-grammar": {
752
- throw new import_provider3.UnsupportedFunctionalityError({
753
- functionality: "object-grammar mode"
754
- });
755
- }
756
901
  default: {
757
902
  const _exhaustiveCheck = type;
758
903
  throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
@@ -760,7 +905,7 @@ var OpenAICompletionLanguageModel = class {
760
905
  }
761
906
  }
762
907
  async doGenerate(options) {
763
- const args = this.getArgs(options);
908
+ const { args, warnings } = this.getArgs(options);
764
909
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
765
910
  url: this.config.url({
766
911
  path: "/completions",
@@ -787,11 +932,11 @@ var OpenAICompletionLanguageModel = class {
787
932
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
788
933
  rawCall: { rawPrompt, rawSettings },
789
934
  rawResponse: { headers: responseHeaders },
790
- warnings: []
935
+ warnings
791
936
  };
792
937
  }
793
938
  async doStream(options) {
794
- const args = this.getArgs(options);
939
+ const { args, warnings } = this.getArgs(options);
795
940
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
796
941
  url: this.config.url({
797
942
  path: "/completions",
@@ -799,7 +944,7 @@ var OpenAICompletionLanguageModel = class {
799
944
  }),
800
945
  headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
801
946
  body: {
802
- ...this.getArgs(options),
947
+ ...args,
803
948
  stream: true,
804
949
  // only include stream_options when in strict compatibility mode:
805
950
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
@@ -870,7 +1015,7 @@ var OpenAICompletionLanguageModel = class {
870
1015
  ),
871
1016
  rawCall: { rawPrompt, rawSettings },
872
1017
  rawResponse: { headers: responseHeaders },
873
- warnings: []
1018
+ warnings
874
1019
  };
875
1020
  }
876
1021
  };
@@ -964,7 +1109,7 @@ var OpenAI = class {
964
1109
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
965
1110
 
966
1111
  // src/openai-embedding-model.ts
967
- var import_provider4 = require("@ai-sdk/provider");
1112
+ var import_provider5 = require("@ai-sdk/provider");
968
1113
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
969
1114
  var import_zod4 = require("zod");
970
1115
  var OpenAIEmbeddingModel = class {
@@ -991,7 +1136,7 @@ var OpenAIEmbeddingModel = class {
991
1136
  abortSignal
992
1137
  }) {
993
1138
  if (values.length > this.maxEmbeddingsPerCall) {
994
- throw new import_provider4.TooManyEmbeddingValuesForCallError({
1139
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
995
1140
  provider: this.provider,
996
1141
  modelId: this.modelId,
997
1142
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,