@ai-sdk/openai-compatible 1.0.0-alpha.9 → 1.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ module.exports = __toCommonJS(src_exports);
31
31
  // src/openai-compatible-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
33
  var import_provider_utils = require("@ai-sdk/provider-utils");
34
- var import_zod3 = require("zod");
34
+ var import_v43 = require("zod/v4");
35
35
 
36
36
  // src/convert-to-openai-compatible-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
@@ -103,7 +103,7 @@ function convertToOpenAICompatibleChatMessages(prompt) {
103
103
  type: "function",
104
104
  function: {
105
105
  name: part.toolName,
106
- arguments: JSON.stringify(part.args)
106
+ arguments: JSON.stringify(part.input)
107
107
  },
108
108
  ...partMetadata
109
109
  });
@@ -121,11 +121,24 @@ function convertToOpenAICompatibleChatMessages(prompt) {
121
121
  }
122
122
  case "tool": {
123
123
  for (const toolResponse of content) {
124
+ const output = toolResponse.output;
125
+ let contentValue;
126
+ switch (output.type) {
127
+ case "text":
128
+ case "error-text":
129
+ contentValue = output.value;
130
+ break;
131
+ case "content":
132
+ case "json":
133
+ case "error-json":
134
+ contentValue = JSON.stringify(output.value);
135
+ break;
136
+ }
124
137
  const toolResponseMetadata = getOpenAIMetadata(toolResponse);
125
138
  messages.push({
126
139
  role: "tool",
127
140
  tool_call_id: toolResponse.toolCallId,
128
- content: JSON.stringify(toolResponse.result),
141
+ content: contentValue,
129
142
  ...toolResponseMetadata
130
143
  });
131
144
  }
@@ -171,30 +184,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
171
184
  }
172
185
 
173
186
  // src/openai-compatible-chat-options.ts
174
- var import_zod = require("zod");
175
- var openaiCompatibleProviderOptions = import_zod.z.object({
187
+ var import_v4 = require("zod/v4");
188
+ var openaiCompatibleProviderOptions = import_v4.z.object({
176
189
  /**
177
190
  * A unique identifier representing your end-user, which can help the provider to
178
191
  * monitor and detect abuse.
179
192
  */
180
- user: import_zod.z.string().optional(),
193
+ user: import_v4.z.string().optional(),
181
194
  /**
182
195
  * Reasoning effort for reasoning models. Defaults to `medium`.
183
196
  */
184
- reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional()
197
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional()
185
198
  });
186
199
 
187
200
  // src/openai-compatible-error.ts
188
- var import_zod2 = require("zod");
189
- var openaiCompatibleErrorDataSchema = import_zod2.z.object({
190
- error: import_zod2.z.object({
191
- message: import_zod2.z.string(),
201
+ var import_v42 = require("zod/v4");
202
+ var openaiCompatibleErrorDataSchema = import_v42.z.object({
203
+ error: import_v42.z.object({
204
+ message: import_v42.z.string(),
192
205
  // The additional information below is handled loosely to support
193
206
  // OpenAI-compatible providers that have slightly different error
194
207
  // responses:
195
- type: import_zod2.z.string().nullish(),
196
- param: import_zod2.z.any().nullish(),
197
- code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
208
+ type: import_v42.z.string().nullish(),
209
+ param: import_v42.z.any().nullish(),
210
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
198
211
  })
199
212
  });
200
213
  var defaultOpenAICompatibleErrorStructure = {
@@ -223,7 +236,7 @@ function prepareTools({
223
236
  function: {
224
237
  name: tool.name,
225
238
  description: tool.description,
226
- parameters: tool.parameters
239
+ parameters: tool.inputSchema
227
240
  }
228
241
  });
229
242
  }
@@ -399,10 +412,9 @@ var OpenAICompatibleChatLanguageModel = class {
399
412
  for (const toolCall of choice.message.tool_calls) {
400
413
  content.push({
401
414
  type: "tool-call",
402
- toolCallType: "function",
403
415
  toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils.generateId)(),
404
416
  toolName: toolCall.function.name,
405
- args: toolCall.function.arguments
417
+ input: toolCall.function.arguments
406
418
  });
407
419
  }
408
420
  }
@@ -480,6 +492,8 @@ var OpenAICompatibleChatLanguageModel = class {
480
492
  };
481
493
  let isFirstChunk = true;
482
494
  const providerOptionsName = this.providerOptionsName;
495
+ let isActiveReasoning = false;
496
+ let isActiveText = false;
483
497
  return {
484
498
  stream: response.pipeThrough(
485
499
  new TransformStream({
@@ -489,6 +503,9 @@ var OpenAICompatibleChatLanguageModel = class {
489
503
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
490
504
  transform(chunk, controller) {
491
505
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
506
+ if (options.includeRawChunks) {
507
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
508
+ }
492
509
  if (!chunk.success) {
493
510
  finishReason = "error";
494
511
  controller.enqueue({ type: "error", error: chunk.error });
@@ -543,15 +560,28 @@ var OpenAICompatibleChatLanguageModel = class {
543
560
  }
544
561
  const delta = choice.delta;
545
562
  if (delta.reasoning_content != null) {
563
+ if (!isActiveReasoning) {
564
+ controller.enqueue({
565
+ type: "reasoning-start",
566
+ id: "reasoning-0"
567
+ });
568
+ isActiveReasoning = true;
569
+ }
546
570
  controller.enqueue({
547
- type: "reasoning",
548
- text: delta.reasoning_content
571
+ type: "reasoning-delta",
572
+ id: "reasoning-0",
573
+ delta: delta.reasoning_content
549
574
  });
550
575
  }
551
576
  if (delta.content != null) {
577
+ if (!isActiveText) {
578
+ controller.enqueue({ type: "text-start", id: "txt-0" });
579
+ isActiveText = true;
580
+ }
552
581
  controller.enqueue({
553
- type: "text",
554
- text: delta.content
582
+ type: "text-delta",
583
+ id: "txt-0",
584
+ delta: delta.content
555
585
  });
556
586
  }
557
587
  if (delta.tool_calls != null) {
@@ -576,6 +606,11 @@ var OpenAICompatibleChatLanguageModel = class {
576
606
  message: `Expected 'function.name' to be a string.`
577
607
  });
578
608
  }
609
+ controller.enqueue({
610
+ type: "tool-input-start",
611
+ id: toolCallDelta.id,
612
+ toolName: toolCallDelta.function.name
613
+ });
579
614
  toolCalls[index] = {
580
615
  id: toolCallDelta.id,
581
616
  type: "function",
@@ -589,20 +624,21 @@ var OpenAICompatibleChatLanguageModel = class {
589
624
  if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
590
625
  if (toolCall2.function.arguments.length > 0) {
591
626
  controller.enqueue({
592
- type: "tool-call-delta",
593
- toolCallType: "function",
594
- toolCallId: toolCall2.id,
595
- toolName: toolCall2.function.name,
596
- argsTextDelta: toolCall2.function.arguments
627
+ type: "tool-input-start",
628
+ id: toolCall2.id,
629
+ toolName: toolCall2.function.name
597
630
  });
598
631
  }
599
632
  if ((0, import_provider_utils.isParsableJson)(toolCall2.function.arguments)) {
633
+ controller.enqueue({
634
+ type: "tool-input-end",
635
+ id: toolCall2.id
636
+ });
600
637
  controller.enqueue({
601
638
  type: "tool-call",
602
- toolCallType: "function",
603
639
  toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils.generateId)(),
604
640
  toolName: toolCall2.function.name,
605
- args: toolCall2.function.arguments
641
+ input: toolCall2.function.arguments
606
642
  });
607
643
  toolCall2.hasFinished = true;
608
644
  }
@@ -617,19 +653,20 @@ var OpenAICompatibleChatLanguageModel = class {
617
653
  toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
618
654
  }
619
655
  controller.enqueue({
620
- type: "tool-call-delta",
621
- toolCallType: "function",
622
- toolCallId: toolCall.id,
623
- toolName: toolCall.function.name,
624
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
656
+ type: "tool-input-delta",
657
+ id: toolCall.id,
658
+ delta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
625
659
  });
626
660
  if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils.isParsableJson)(toolCall.function.arguments)) {
661
+ controller.enqueue({
662
+ type: "tool-input-end",
663
+ id: toolCall.id
664
+ });
627
665
  controller.enqueue({
628
666
  type: "tool-call",
629
- toolCallType: "function",
630
667
  toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils.generateId)(),
631
668
  toolName: toolCall.function.name,
632
- args: toolCall.function.arguments
669
+ input: toolCall.function.arguments
633
670
  });
634
671
  toolCall.hasFinished = true;
635
672
  }
@@ -638,6 +675,12 @@ var OpenAICompatibleChatLanguageModel = class {
638
675
  },
639
676
  flush(controller) {
640
677
  var _a2, _b, _c, _d, _e;
678
+ if (isActiveReasoning) {
679
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
680
+ }
681
+ if (isActiveText) {
682
+ controller.enqueue({ type: "text-end", id: "txt-0" });
683
+ }
641
684
  const providerMetadata = {
642
685
  [providerOptionsName]: {},
643
686
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
@@ -668,69 +711,69 @@ var OpenAICompatibleChatLanguageModel = class {
668
711
  };
669
712
  }
670
713
  };
671
- var openaiCompatibleTokenUsageSchema = import_zod3.z.object({
672
- prompt_tokens: import_zod3.z.number().nullish(),
673
- completion_tokens: import_zod3.z.number().nullish(),
674
- total_tokens: import_zod3.z.number().nullish(),
675
- prompt_tokens_details: import_zod3.z.object({
676
- cached_tokens: import_zod3.z.number().nullish()
714
+ var openaiCompatibleTokenUsageSchema = import_v43.z.object({
715
+ prompt_tokens: import_v43.z.number().nullish(),
716
+ completion_tokens: import_v43.z.number().nullish(),
717
+ total_tokens: import_v43.z.number().nullish(),
718
+ prompt_tokens_details: import_v43.z.object({
719
+ cached_tokens: import_v43.z.number().nullish()
677
720
  }).nullish(),
678
- completion_tokens_details: import_zod3.z.object({
679
- reasoning_tokens: import_zod3.z.number().nullish(),
680
- accepted_prediction_tokens: import_zod3.z.number().nullish(),
681
- rejected_prediction_tokens: import_zod3.z.number().nullish()
721
+ completion_tokens_details: import_v43.z.object({
722
+ reasoning_tokens: import_v43.z.number().nullish(),
723
+ accepted_prediction_tokens: import_v43.z.number().nullish(),
724
+ rejected_prediction_tokens: import_v43.z.number().nullish()
682
725
  }).nullish()
683
726
  }).nullish();
684
- var OpenAICompatibleChatResponseSchema = import_zod3.z.object({
685
- id: import_zod3.z.string().nullish(),
686
- created: import_zod3.z.number().nullish(),
687
- model: import_zod3.z.string().nullish(),
688
- choices: import_zod3.z.array(
689
- import_zod3.z.object({
690
- message: import_zod3.z.object({
691
- role: import_zod3.z.literal("assistant").nullish(),
692
- content: import_zod3.z.string().nullish(),
693
- reasoning_content: import_zod3.z.string().nullish(),
694
- tool_calls: import_zod3.z.array(
695
- import_zod3.z.object({
696
- id: import_zod3.z.string().nullish(),
697
- type: import_zod3.z.literal("function"),
698
- function: import_zod3.z.object({
699
- name: import_zod3.z.string(),
700
- arguments: import_zod3.z.string()
727
+ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
728
+ id: import_v43.z.string().nullish(),
729
+ created: import_v43.z.number().nullish(),
730
+ model: import_v43.z.string().nullish(),
731
+ choices: import_v43.z.array(
732
+ import_v43.z.object({
733
+ message: import_v43.z.object({
734
+ role: import_v43.z.literal("assistant").nullish(),
735
+ content: import_v43.z.string().nullish(),
736
+ reasoning_content: import_v43.z.string().nullish(),
737
+ tool_calls: import_v43.z.array(
738
+ import_v43.z.object({
739
+ id: import_v43.z.string().nullish(),
740
+ type: import_v43.z.literal("function"),
741
+ function: import_v43.z.object({
742
+ name: import_v43.z.string(),
743
+ arguments: import_v43.z.string()
701
744
  })
702
745
  })
703
746
  ).nullish()
704
747
  }),
705
- finish_reason: import_zod3.z.string().nullish()
748
+ finish_reason: import_v43.z.string().nullish()
706
749
  })
707
750
  ),
708
751
  usage: openaiCompatibleTokenUsageSchema
709
752
  });
710
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod3.z.union([
711
- import_zod3.z.object({
712
- id: import_zod3.z.string().nullish(),
713
- created: import_zod3.z.number().nullish(),
714
- model: import_zod3.z.string().nullish(),
715
- choices: import_zod3.z.array(
716
- import_zod3.z.object({
717
- delta: import_zod3.z.object({
718
- role: import_zod3.z.enum(["assistant"]).nullish(),
719
- content: import_zod3.z.string().nullish(),
720
- reasoning_content: import_zod3.z.string().nullish(),
721
- tool_calls: import_zod3.z.array(
722
- import_zod3.z.object({
723
- index: import_zod3.z.number(),
724
- id: import_zod3.z.string().nullish(),
725
- type: import_zod3.z.literal("function").nullish(),
726
- function: import_zod3.z.object({
727
- name: import_zod3.z.string().nullish(),
728
- arguments: import_zod3.z.string().nullish()
753
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
754
+ import_v43.z.object({
755
+ id: import_v43.z.string().nullish(),
756
+ created: import_v43.z.number().nullish(),
757
+ model: import_v43.z.string().nullish(),
758
+ choices: import_v43.z.array(
759
+ import_v43.z.object({
760
+ delta: import_v43.z.object({
761
+ role: import_v43.z.enum(["assistant"]).nullish(),
762
+ content: import_v43.z.string().nullish(),
763
+ reasoning_content: import_v43.z.string().nullish(),
764
+ tool_calls: import_v43.z.array(
765
+ import_v43.z.object({
766
+ index: import_v43.z.number(),
767
+ id: import_v43.z.string().nullish(),
768
+ type: import_v43.z.literal("function").nullish(),
769
+ function: import_v43.z.object({
770
+ name: import_v43.z.string().nullish(),
771
+ arguments: import_v43.z.string().nullish()
729
772
  })
730
773
  })
731
774
  ).nullish()
732
775
  }).nullish(),
733
- finish_reason: import_zod3.z.string().nullish()
776
+ finish_reason: import_v43.z.string().nullish()
734
777
  })
735
778
  ),
736
779
  usage: openaiCompatibleTokenUsageSchema
@@ -740,7 +783,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod3.z.union
740
783
 
741
784
  // src/openai-compatible-completion-language-model.ts
742
785
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
743
- var import_zod5 = require("zod");
786
+ var import_v45 = require("zod/v4");
744
787
 
745
788
  // src/convert-to-openai-compatible-completion-prompt.ts
746
789
  var import_provider4 = require("@ai-sdk/provider");
@@ -818,28 +861,28 @@ ${user}:`]
818
861
  }
819
862
 
820
863
  // src/openai-compatible-completion-options.ts
821
- var import_zod4 = require("zod");
822
- var openaiCompatibleCompletionProviderOptions = import_zod4.z.object({
864
+ var import_v44 = require("zod/v4");
865
+ var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
823
866
  /**
824
867
  * Echo back the prompt in addition to the completion.
825
868
  */
826
- echo: import_zod4.z.boolean().optional(),
869
+ echo: import_v44.z.boolean().optional(),
827
870
  /**
828
871
  * Modify the likelihood of specified tokens appearing in the completion.
829
872
  *
830
873
  * Accepts a JSON object that maps tokens (specified by their token ID in
831
874
  * the GPT tokenizer) to an associated bias value from -100 to 100.
832
875
  */
833
- logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
876
+ logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
834
877
  /**
835
878
  * The suffix that comes after a completion of inserted text.
836
879
  */
837
- suffix: import_zod4.z.string().optional(),
880
+ suffix: import_v44.z.string().optional(),
838
881
  /**
839
882
  * A unique identifier representing your end-user, which can help providers to
840
883
  * monitor and detect abuse.
841
884
  */
842
- user: import_zod4.z.string().optional()
885
+ user: import_v44.z.string().optional()
843
886
  });
844
887
 
845
888
  // src/openai-compatible-completion-language-model.ts
@@ -1011,6 +1054,9 @@ var OpenAICompatibleCompletionLanguageModel = class {
1011
1054
  },
1012
1055
  transform(chunk, controller) {
1013
1056
  var _a, _b, _c;
1057
+ if (options.includeRawChunks) {
1058
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1059
+ }
1014
1060
  if (!chunk.success) {
1015
1061
  finishReason = "error";
1016
1062
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1028,6 +1074,10 @@ var OpenAICompatibleCompletionLanguageModel = class {
1028
1074
  type: "response-metadata",
1029
1075
  ...getResponseMetadata(value)
1030
1076
  });
1077
+ controller.enqueue({
1078
+ type: "text-start",
1079
+ id: "0"
1080
+ });
1031
1081
  }
1032
1082
  if (value.usage != null) {
1033
1083
  usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
@@ -1042,12 +1092,16 @@ var OpenAICompatibleCompletionLanguageModel = class {
1042
1092
  }
1043
1093
  if ((choice == null ? void 0 : choice.text) != null) {
1044
1094
  controller.enqueue({
1045
- type: "text",
1046
- text: choice.text
1095
+ type: "text-delta",
1096
+ id: "0",
1097
+ delta: choice.text
1047
1098
  });
1048
1099
  }
1049
1100
  },
1050
1101
  flush(controller) {
1102
+ if (!isFirstChunk) {
1103
+ controller.enqueue({ type: "text-end", id: "0" });
1104
+ }
1051
1105
  controller.enqueue({
1052
1106
  type: "finish",
1053
1107
  finishReason,
@@ -1061,33 +1115,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
1061
1115
  };
1062
1116
  }
1063
1117
  };
1064
- var usageSchema = import_zod5.z.object({
1065
- prompt_tokens: import_zod5.z.number(),
1066
- completion_tokens: import_zod5.z.number(),
1067
- total_tokens: import_zod5.z.number()
1118
+ var usageSchema = import_v45.z.object({
1119
+ prompt_tokens: import_v45.z.number(),
1120
+ completion_tokens: import_v45.z.number(),
1121
+ total_tokens: import_v45.z.number()
1068
1122
  });
1069
- var openaiCompatibleCompletionResponseSchema = import_zod5.z.object({
1070
- id: import_zod5.z.string().nullish(),
1071
- created: import_zod5.z.number().nullish(),
1072
- model: import_zod5.z.string().nullish(),
1073
- choices: import_zod5.z.array(
1074
- import_zod5.z.object({
1075
- text: import_zod5.z.string(),
1076
- finish_reason: import_zod5.z.string()
1123
+ var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
1124
+ id: import_v45.z.string().nullish(),
1125
+ created: import_v45.z.number().nullish(),
1126
+ model: import_v45.z.string().nullish(),
1127
+ choices: import_v45.z.array(
1128
+ import_v45.z.object({
1129
+ text: import_v45.z.string(),
1130
+ finish_reason: import_v45.z.string()
1077
1131
  })
1078
1132
  ),
1079
1133
  usage: usageSchema.nullish()
1080
1134
  });
1081
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z.union([
1082
- import_zod5.z.object({
1083
- id: import_zod5.z.string().nullish(),
1084
- created: import_zod5.z.number().nullish(),
1085
- model: import_zod5.z.string().nullish(),
1086
- choices: import_zod5.z.array(
1087
- import_zod5.z.object({
1088
- text: import_zod5.z.string(),
1089
- finish_reason: import_zod5.z.string().nullish(),
1090
- index: import_zod5.z.number()
1135
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
1136
+ import_v45.z.object({
1137
+ id: import_v45.z.string().nullish(),
1138
+ created: import_v45.z.number().nullish(),
1139
+ model: import_v45.z.string().nullish(),
1140
+ choices: import_v45.z.array(
1141
+ import_v45.z.object({
1142
+ text: import_v45.z.string(),
1143
+ finish_reason: import_v45.z.string().nullish(),
1144
+ index: import_v45.z.number()
1091
1145
  })
1092
1146
  ),
1093
1147
  usage: usageSchema.nullish()
@@ -1098,21 +1152,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z
1098
1152
  // src/openai-compatible-embedding-model.ts
1099
1153
  var import_provider5 = require("@ai-sdk/provider");
1100
1154
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
1101
- var import_zod7 = require("zod");
1155
+ var import_v47 = require("zod/v4");
1102
1156
 
1103
1157
  // src/openai-compatible-embedding-options.ts
1104
- var import_zod6 = require("zod");
1105
- var openaiCompatibleEmbeddingProviderOptions = import_zod6.z.object({
1158
+ var import_v46 = require("zod/v4");
1159
+ var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
1106
1160
  /**
1107
1161
  * The number of dimensions the resulting output embeddings should have.
1108
1162
  * Only supported in text-embedding-3 and later models.
1109
1163
  */
1110
- dimensions: import_zod6.z.number().optional(),
1164
+ dimensions: import_v46.z.number().optional(),
1111
1165
  /**
1112
1166
  * A unique identifier representing your end-user, which can help providers to
1113
1167
  * monitor and detect abuse.
1114
1168
  */
1115
- user: import_zod6.z.string().optional()
1169
+ user: import_v46.z.string().optional()
1116
1170
  });
1117
1171
 
1118
1172
  // src/openai-compatible-embedding-model.ts
@@ -1196,14 +1250,14 @@ var OpenAICompatibleEmbeddingModel = class {
1196
1250
  };
1197
1251
  }
1198
1252
  };
1199
- var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1200
- data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1201
- usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1253
+ var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1254
+ data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1255
+ usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
1202
1256
  });
1203
1257
 
1204
1258
  // src/openai-compatible-image-model.ts
1205
1259
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1206
- var import_zod8 = require("zod");
1260
+ var import_v48 = require("zod/v4");
1207
1261
  var OpenAICompatibleImageModel = class {
1208
1262
  constructor(modelId, config) {
1209
1263
  this.modelId = modelId;
@@ -1271,8 +1325,8 @@ var OpenAICompatibleImageModel = class {
1271
1325
  };
1272
1326
  }
1273
1327
  };
1274
- var openaiCompatibleImageResponseSchema = import_zod8.z.object({
1275
- data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
1328
+ var openaiCompatibleImageResponseSchema = import_v48.z.object({
1329
+ data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1276
1330
  });
1277
1331
 
1278
1332
  // src/openai-compatible-provider.ts