@ai-sdk/openai 0.0.37 → 0.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  // src/openai-chat-language-model.ts
2
2
  import {
3
3
  InvalidResponseDataError,
4
- UnsupportedFunctionalityError
4
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
5
5
  } from "@ai-sdk/provider";
6
6
  import {
7
7
  combineHeaders,
@@ -14,8 +14,14 @@ import {
14
14
  import { z as z2 } from "zod";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
+ import {
18
+ UnsupportedFunctionalityError
19
+ } from "@ai-sdk/provider";
17
20
  import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
18
- function convertToOpenAIChatMessages(prompt) {
21
+ function convertToOpenAIChatMessages({
22
+ prompt,
23
+ useLegacyFunctionCalling = false
24
+ }) {
19
25
  const messages = [];
20
26
  for (const { role, content } of prompt) {
21
27
  switch (role) {
@@ -75,20 +81,41 @@ function convertToOpenAIChatMessages(prompt) {
75
81
  }
76
82
  }
77
83
  }
78
- messages.push({
79
- role: "assistant",
80
- content: text,
81
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
82
- });
84
+ if (useLegacyFunctionCalling) {
85
+ if (toolCalls.length > 1) {
86
+ throw new UnsupportedFunctionalityError({
87
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
88
+ });
89
+ }
90
+ messages.push({
91
+ role: "assistant",
92
+ content: text,
93
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
94
+ });
95
+ } else {
96
+ messages.push({
97
+ role: "assistant",
98
+ content: text,
99
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
100
+ });
101
+ }
83
102
  break;
84
103
  }
85
104
  case "tool": {
86
105
  for (const toolResponse of content) {
87
- messages.push({
88
- role: "tool",
89
- tool_call_id: toolResponse.toolCallId,
90
- content: JSON.stringify(toolResponse.result)
91
- });
106
+ if (useLegacyFunctionCalling) {
107
+ messages.push({
108
+ role: "function",
109
+ name: toolResponse.toolName,
110
+ content: JSON.stringify(toolResponse.result)
111
+ });
112
+ } else {
113
+ messages.push({
114
+ role: "tool",
115
+ tool_call_id: toolResponse.toolCallId,
116
+ content: JSON.stringify(toolResponse.result)
117
+ });
118
+ }
92
119
  }
93
120
  break;
94
121
  }
@@ -165,11 +192,34 @@ var OpenAIChatLanguageModel = class {
165
192
  maxTokens,
166
193
  temperature,
167
194
  topP,
195
+ topK,
168
196
  frequencyPenalty,
169
197
  presencePenalty,
198
+ stopSequences,
199
+ responseFormat,
170
200
  seed
171
201
  }) {
172
202
  const type = mode.type;
203
+ const warnings = [];
204
+ if (topK != null) {
205
+ warnings.push({
206
+ type: "unsupported-setting",
207
+ setting: "topK"
208
+ });
209
+ }
210
+ if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
211
+ warnings.push({
212
+ type: "unsupported-setting",
213
+ setting: "responseFormat",
214
+ details: "JSON response format schema is not supported"
215
+ });
216
+ }
217
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
218
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
219
+ throw new UnsupportedFunctionalityError2({
220
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
221
+ });
222
+ }
173
223
  const baseArgs = {
174
224
  // model id:
175
225
  model: this.modelId,
@@ -185,41 +235,69 @@ var OpenAIChatLanguageModel = class {
185
235
  top_p: topP,
186
236
  frequency_penalty: frequencyPenalty,
187
237
  presence_penalty: presencePenalty,
238
+ stop: stopSequences,
188
239
  seed,
240
+ // response format:
241
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
189
242
  // messages:
190
- messages: convertToOpenAIChatMessages(prompt)
243
+ messages: convertToOpenAIChatMessages({
244
+ prompt,
245
+ useLegacyFunctionCalling
246
+ })
191
247
  };
192
248
  switch (type) {
193
249
  case "regular": {
194
- return { ...baseArgs, ...prepareToolsAndToolChoice(mode) };
250
+ return {
251
+ args: {
252
+ ...baseArgs,
253
+ ...prepareToolsAndToolChoice({ mode, useLegacyFunctionCalling })
254
+ },
255
+ warnings
256
+ };
195
257
  }
196
258
  case "object-json": {
197
259
  return {
198
- ...baseArgs,
199
- response_format: { type: "json_object" }
260
+ args: {
261
+ ...baseArgs,
262
+ response_format: { type: "json_object" }
263
+ },
264
+ warnings
200
265
  };
201
266
  }
202
267
  case "object-tool": {
203
268
  return {
204
- ...baseArgs,
205
- tool_choice: { type: "function", function: { name: mode.tool.name } },
206
- tools: [
207
- {
208
- type: "function",
209
- function: {
269
+ args: useLegacyFunctionCalling ? {
270
+ ...baseArgs,
271
+ function_call: {
272
+ name: mode.tool.name
273
+ },
274
+ functions: [
275
+ {
210
276
  name: mode.tool.name,
211
277
  description: mode.tool.description,
212
278
  parameters: mode.tool.parameters
213
279
  }
214
- }
215
- ]
280
+ ]
281
+ } : {
282
+ ...baseArgs,
283
+ tool_choice: {
284
+ type: "function",
285
+ function: { name: mode.tool.name }
286
+ },
287
+ tools: [
288
+ {
289
+ type: "function",
290
+ function: {
291
+ name: mode.tool.name,
292
+ description: mode.tool.description,
293
+ parameters: mode.tool.parameters
294
+ }
295
+ }
296
+ ]
297
+ },
298
+ warnings
216
299
  };
217
300
  }
218
- case "object-grammar": {
219
- throw new UnsupportedFunctionalityError({
220
- functionality: "object-grammar mode"
221
- });
222
- }
223
301
  default: {
224
302
  const _exhaustiveCheck = type;
225
303
  throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
@@ -228,7 +306,7 @@ var OpenAIChatLanguageModel = class {
228
306
  }
229
307
  async doGenerate(options) {
230
308
  var _a, _b;
231
- const args = this.getArgs(options);
309
+ const { args, warnings } = this.getArgs(options);
232
310
  const { responseHeaders, value: response } = await postJsonToApi({
233
311
  url: this.config.url({
234
312
  path: "/chat/completions",
@@ -247,7 +325,14 @@ var OpenAIChatLanguageModel = class {
247
325
  const choice = response.choices[0];
248
326
  return {
249
327
  text: (_a = choice.message.content) != null ? _a : void 0,
250
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
328
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
329
+ {
330
+ toolCallType: "function",
331
+ toolCallId: generateId(),
332
+ toolName: choice.message.function_call.name,
333
+ args: choice.message.function_call.arguments
334
+ }
335
+ ] : (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
251
336
  var _a2;
252
337
  return {
253
338
  toolCallType: "function",
@@ -263,12 +348,12 @@ var OpenAIChatLanguageModel = class {
263
348
  },
264
349
  rawCall: { rawPrompt, rawSettings },
265
350
  rawResponse: { headers: responseHeaders },
266
- warnings: [],
351
+ warnings,
267
352
  logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
268
353
  };
269
354
  }
270
355
  async doStream(options) {
271
- const args = this.getArgs(options);
356
+ const { args, warnings } = this.getArgs(options);
272
357
  const { responseHeaders, value: response } = await postJsonToApi({
273
358
  url: this.config.url({
274
359
  path: "/chat/completions",
@@ -296,6 +381,7 @@ var OpenAIChatLanguageModel = class {
296
381
  completionTokens: Number.NaN
297
382
  };
298
383
  let logprobs;
384
+ const { useLegacyFunctionCalling } = this.settings;
299
385
  return {
300
386
  stream: response.pipeThrough(
301
387
  new TransformStream({
@@ -340,8 +426,16 @@ var OpenAIChatLanguageModel = class {
340
426
  logprobs = [];
341
427
  logprobs.push(...mappedLogprobs);
342
428
  }
343
- if (delta.tool_calls != null) {
344
- for (const toolCallDelta of delta.tool_calls) {
429
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
430
+ {
431
+ type: "function",
432
+ id: generateId(),
433
+ function: delta.function_call,
434
+ index: 0
435
+ }
436
+ ] : delta.tool_calls;
437
+ if (mappedToolCalls != null) {
438
+ for (const toolCallDelta of mappedToolCalls) {
345
439
  const index = toolCallDelta.index;
346
440
  if (toolCalls[index] == null) {
347
441
  if (toolCallDelta.type !== "function") {
@@ -424,7 +518,7 @@ var OpenAIChatLanguageModel = class {
424
518
  ),
425
519
  rawCall: { rawPrompt, rawSettings },
426
520
  rawResponse: { headers: responseHeaders },
427
- warnings: []
521
+ warnings
428
522
  };
429
523
  }
430
524
  };
@@ -433,17 +527,21 @@ var openAIChatResponseSchema = z2.object({
433
527
  z2.object({
434
528
  message: z2.object({
435
529
  role: z2.literal("assistant"),
436
- content: z2.string().nullable().optional(),
530
+ content: z2.string().nullish(),
531
+ function_call: z2.object({
532
+ arguments: z2.string(),
533
+ name: z2.string()
534
+ }).nullish(),
437
535
  tool_calls: z2.array(
438
536
  z2.object({
439
- id: z2.string().optional().nullable(),
537
+ id: z2.string().nullish(),
440
538
  type: z2.literal("function"),
441
539
  function: z2.object({
442
540
  name: z2.string(),
443
541
  arguments: z2.string()
444
542
  })
445
543
  })
446
- ).optional()
544
+ ).nullish()
447
545
  }),
448
546
  index: z2.number(),
449
547
  logprobs: z2.object({
@@ -459,8 +557,8 @@ var openAIChatResponseSchema = z2.object({
459
557
  )
460
558
  })
461
559
  ).nullable()
462
- }).nullable().optional(),
463
- finish_reason: z2.string().optional().nullable()
560
+ }).nullish(),
561
+ finish_reason: z2.string().nullish()
464
562
  })
465
563
  ),
466
564
  usage: z2.object({
@@ -475,6 +573,10 @@ var openaiChatChunkSchema = z2.union([
475
573
  delta: z2.object({
476
574
  role: z2.enum(["assistant"]).optional(),
477
575
  content: z2.string().nullish(),
576
+ function_call: z2.object({
577
+ name: z2.string().optional(),
578
+ arguments: z2.string().optional()
579
+ }).nullish(),
478
580
  tool_calls: z2.array(
479
581
  z2.object({
480
582
  index: z2.number(),
@@ -512,12 +614,45 @@ var openaiChatChunkSchema = z2.union([
512
614
  }),
513
615
  openAIErrorDataSchema
514
616
  ]);
515
- function prepareToolsAndToolChoice(mode) {
617
+ function prepareToolsAndToolChoice({
618
+ mode,
619
+ useLegacyFunctionCalling = false
620
+ }) {
516
621
  var _a;
517
622
  const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
518
623
  if (tools == null) {
519
624
  return { tools: void 0, tool_choice: void 0 };
520
625
  }
626
+ const toolChoice = mode.toolChoice;
627
+ if (useLegacyFunctionCalling) {
628
+ const mappedFunctions = tools.map((tool) => ({
629
+ name: tool.name,
630
+ description: tool.description,
631
+ parameters: tool.parameters
632
+ }));
633
+ if (toolChoice == null) {
634
+ return { functions: mappedFunctions, function_call: void 0 };
635
+ }
636
+ const type2 = toolChoice.type;
637
+ switch (type2) {
638
+ case "auto":
639
+ case "none":
640
+ case void 0:
641
+ return {
642
+ functions: mappedFunctions,
643
+ function_call: void 0
644
+ };
645
+ case "required":
646
+ throw new UnsupportedFunctionalityError2({
647
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
648
+ });
649
+ default:
650
+ return {
651
+ functions: mappedFunctions,
652
+ function_call: { name: toolChoice.toolName }
653
+ };
654
+ }
655
+ }
521
656
  const mappedTools = tools.map((tool) => ({
522
657
  type: "function",
523
658
  function: {
@@ -526,7 +661,6 @@ function prepareToolsAndToolChoice(mode) {
526
661
  parameters: tool.parameters
527
662
  }
528
663
  }));
529
- const toolChoice = mode.toolChoice;
530
664
  if (toolChoice == null) {
531
665
  return { tools: mappedTools, tool_choice: void 0 };
532
666
  }
@@ -555,7 +689,7 @@ function prepareToolsAndToolChoice(mode) {
555
689
 
556
690
  // src/openai-completion-language-model.ts
557
691
  import {
558
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
692
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
559
693
  } from "@ai-sdk/provider";
560
694
  import {
561
695
  combineHeaders as combineHeaders2,
@@ -568,7 +702,7 @@ import { z as z3 } from "zod";
568
702
  // src/convert-to-openai-completion-prompt.ts
569
703
  import {
570
704
  InvalidPromptError,
571
- UnsupportedFunctionalityError as UnsupportedFunctionalityError2
705
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
572
706
  } from "@ai-sdk/provider";
573
707
  function convertToOpenAICompletionPrompt({
574
708
  prompt,
@@ -601,7 +735,7 @@ function convertToOpenAICompletionPrompt({
601
735
  return part.text;
602
736
  }
603
737
  case "image": {
604
- throw new UnsupportedFunctionalityError2({
738
+ throw new UnsupportedFunctionalityError3({
605
739
  functionality: "images"
606
740
  });
607
741
  }
@@ -620,7 +754,7 @@ ${userMessage}
620
754
  return part.text;
621
755
  }
622
756
  case "tool-call": {
623
- throw new UnsupportedFunctionalityError2({
757
+ throw new UnsupportedFunctionalityError3({
624
758
  functionality: "tool-call messages"
625
759
  });
626
760
  }
@@ -633,7 +767,7 @@ ${assistantMessage}
633
767
  break;
634
768
  }
635
769
  case "tool": {
636
- throw new UnsupportedFunctionalityError2({
770
+ throw new UnsupportedFunctionalityError3({
637
771
  functionality: "tool messages"
638
772
  });
639
773
  }
@@ -685,13 +819,31 @@ var OpenAICompletionLanguageModel = class {
685
819
  maxTokens,
686
820
  temperature,
687
821
  topP,
822
+ topK,
688
823
  frequencyPenalty,
689
824
  presencePenalty,
825
+ stopSequences: userStopSequences,
826
+ responseFormat,
690
827
  seed
691
828
  }) {
692
829
  var _a;
693
830
  const type = mode.type;
831
+ const warnings = [];
832
+ if (topK != null) {
833
+ warnings.push({
834
+ type: "unsupported-setting",
835
+ setting: "topK"
836
+ });
837
+ }
838
+ if (responseFormat != null && responseFormat.type !== "text") {
839
+ warnings.push({
840
+ type: "unsupported-setting",
841
+ setting: "responseFormat",
842
+ details: "JSON response format is not supported."
843
+ });
844
+ }
694
845
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
846
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
695
847
  const baseArgs = {
696
848
  // model id:
697
849
  model: this.modelId,
@@ -711,37 +863,32 @@ var OpenAICompletionLanguageModel = class {
711
863
  // prompt:
712
864
  prompt: completionPrompt,
713
865
  // stop sequences:
714
- stop: stopSequences
866
+ stop: stop.length > 0 ? stop : void 0
715
867
  };
716
868
  switch (type) {
717
869
  case "regular": {
718
870
  if ((_a = mode.tools) == null ? void 0 : _a.length) {
719
- throw new UnsupportedFunctionalityError3({
871
+ throw new UnsupportedFunctionalityError4({
720
872
  functionality: "tools"
721
873
  });
722
874
  }
723
875
  if (mode.toolChoice) {
724
- throw new UnsupportedFunctionalityError3({
876
+ throw new UnsupportedFunctionalityError4({
725
877
  functionality: "toolChoice"
726
878
  });
727
879
  }
728
- return baseArgs;
880
+ return { args: baseArgs, warnings };
729
881
  }
730
882
  case "object-json": {
731
- throw new UnsupportedFunctionalityError3({
883
+ throw new UnsupportedFunctionalityError4({
732
884
  functionality: "object-json mode"
733
885
  });
734
886
  }
735
887
  case "object-tool": {
736
- throw new UnsupportedFunctionalityError3({
888
+ throw new UnsupportedFunctionalityError4({
737
889
  functionality: "object-tool mode"
738
890
  });
739
891
  }
740
- case "object-grammar": {
741
- throw new UnsupportedFunctionalityError3({
742
- functionality: "object-grammar mode"
743
- });
744
- }
745
892
  default: {
746
893
  const _exhaustiveCheck = type;
747
894
  throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
@@ -749,7 +896,7 @@ var OpenAICompletionLanguageModel = class {
749
896
  }
750
897
  }
751
898
  async doGenerate(options) {
752
- const args = this.getArgs(options);
899
+ const { args, warnings } = this.getArgs(options);
753
900
  const { responseHeaders, value: response } = await postJsonToApi2({
754
901
  url: this.config.url({
755
902
  path: "/completions",
@@ -776,11 +923,11 @@ var OpenAICompletionLanguageModel = class {
776
923
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
777
924
  rawCall: { rawPrompt, rawSettings },
778
925
  rawResponse: { headers: responseHeaders },
779
- warnings: []
926
+ warnings
780
927
  };
781
928
  }
782
929
  async doStream(options) {
783
- const args = this.getArgs(options);
930
+ const { args, warnings } = this.getArgs(options);
784
931
  const { responseHeaders, value: response } = await postJsonToApi2({
785
932
  url: this.config.url({
786
933
  path: "/completions",
@@ -788,7 +935,7 @@ var OpenAICompletionLanguageModel = class {
788
935
  }),
789
936
  headers: combineHeaders2(this.config.headers(), options.headers),
790
937
  body: {
791
- ...this.getArgs(options),
938
+ ...args,
792
939
  stream: true,
793
940
  // only include stream_options when in strict compatibility mode:
794
941
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
@@ -859,7 +1006,7 @@ var OpenAICompletionLanguageModel = class {
859
1006
  ),
860
1007
  rawCall: { rawPrompt, rawSettings },
861
1008
  rawResponse: { headers: responseHeaders },
862
- warnings: []
1009
+ warnings
863
1010
  };
864
1011
  }
865
1012
  };