@ai-sdk/openai-compatible 1.0.0-alpha.9 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +86 -0
- package/dist/index.d.mts +13 -86
- package/dist/index.d.ts +13 -86
- package/dist/index.js +179 -125
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +87 -33
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -2
- package/dist/internal/index.d.ts +2 -2
- package/dist/internal/index.js +15 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +15 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
@@ -31,7 +31,7 @@ module.exports = __toCommonJS(src_exports);
|
|
31
31
|
// src/openai-compatible-chat-language-model.ts
|
32
32
|
var import_provider3 = require("@ai-sdk/provider");
|
33
33
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
34
|
-
var
|
34
|
+
var import_v43 = require("zod/v4");
|
35
35
|
|
36
36
|
// src/convert-to-openai-compatible-chat-messages.ts
|
37
37
|
var import_provider = require("@ai-sdk/provider");
|
@@ -103,7 +103,7 @@ function convertToOpenAICompatibleChatMessages(prompt) {
|
|
103
103
|
type: "function",
|
104
104
|
function: {
|
105
105
|
name: part.toolName,
|
106
|
-
arguments: JSON.stringify(part.
|
106
|
+
arguments: JSON.stringify(part.input)
|
107
107
|
},
|
108
108
|
...partMetadata
|
109
109
|
});
|
@@ -121,11 +121,24 @@ function convertToOpenAICompatibleChatMessages(prompt) {
|
|
121
121
|
}
|
122
122
|
case "tool": {
|
123
123
|
for (const toolResponse of content) {
|
124
|
+
const output = toolResponse.output;
|
125
|
+
let contentValue;
|
126
|
+
switch (output.type) {
|
127
|
+
case "text":
|
128
|
+
case "error-text":
|
129
|
+
contentValue = output.value;
|
130
|
+
break;
|
131
|
+
case "content":
|
132
|
+
case "json":
|
133
|
+
case "error-json":
|
134
|
+
contentValue = JSON.stringify(output.value);
|
135
|
+
break;
|
136
|
+
}
|
124
137
|
const toolResponseMetadata = getOpenAIMetadata(toolResponse);
|
125
138
|
messages.push({
|
126
139
|
role: "tool",
|
127
140
|
tool_call_id: toolResponse.toolCallId,
|
128
|
-
content:
|
141
|
+
content: contentValue,
|
129
142
|
...toolResponseMetadata
|
130
143
|
});
|
131
144
|
}
|
@@ -171,30 +184,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
|
|
171
184
|
}
|
172
185
|
|
173
186
|
// src/openai-compatible-chat-options.ts
|
174
|
-
var
|
175
|
-
var openaiCompatibleProviderOptions =
|
187
|
+
var import_v4 = require("zod/v4");
|
188
|
+
var openaiCompatibleProviderOptions = import_v4.z.object({
|
176
189
|
/**
|
177
190
|
* A unique identifier representing your end-user, which can help the provider to
|
178
191
|
* monitor and detect abuse.
|
179
192
|
*/
|
180
|
-
user:
|
193
|
+
user: import_v4.z.string().optional(),
|
181
194
|
/**
|
182
195
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
183
196
|
*/
|
184
|
-
reasoningEffort:
|
197
|
+
reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional()
|
185
198
|
});
|
186
199
|
|
187
200
|
// src/openai-compatible-error.ts
|
188
|
-
var
|
189
|
-
var openaiCompatibleErrorDataSchema =
|
190
|
-
error:
|
191
|
-
message:
|
201
|
+
var import_v42 = require("zod/v4");
|
202
|
+
var openaiCompatibleErrorDataSchema = import_v42.z.object({
|
203
|
+
error: import_v42.z.object({
|
204
|
+
message: import_v42.z.string(),
|
192
205
|
// The additional information below is handled loosely to support
|
193
206
|
// OpenAI-compatible providers that have slightly different error
|
194
207
|
// responses:
|
195
|
-
type:
|
196
|
-
param:
|
197
|
-
code:
|
208
|
+
type: import_v42.z.string().nullish(),
|
209
|
+
param: import_v42.z.any().nullish(),
|
210
|
+
code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
|
198
211
|
})
|
199
212
|
});
|
200
213
|
var defaultOpenAICompatibleErrorStructure = {
|
@@ -223,7 +236,7 @@ function prepareTools({
|
|
223
236
|
function: {
|
224
237
|
name: tool.name,
|
225
238
|
description: tool.description,
|
226
|
-
parameters: tool.
|
239
|
+
parameters: tool.inputSchema
|
227
240
|
}
|
228
241
|
});
|
229
242
|
}
|
@@ -399,10 +412,9 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
399
412
|
for (const toolCall of choice.message.tool_calls) {
|
400
413
|
content.push({
|
401
414
|
type: "tool-call",
|
402
|
-
toolCallType: "function",
|
403
415
|
toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils.generateId)(),
|
404
416
|
toolName: toolCall.function.name,
|
405
|
-
|
417
|
+
input: toolCall.function.arguments
|
406
418
|
});
|
407
419
|
}
|
408
420
|
}
|
@@ -480,6 +492,8 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
480
492
|
};
|
481
493
|
let isFirstChunk = true;
|
482
494
|
const providerOptionsName = this.providerOptionsName;
|
495
|
+
let isActiveReasoning = false;
|
496
|
+
let isActiveText = false;
|
483
497
|
return {
|
484
498
|
stream: response.pipeThrough(
|
485
499
|
new TransformStream({
|
@@ -489,6 +503,9 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
489
503
|
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
|
490
504
|
transform(chunk, controller) {
|
491
505
|
var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
506
|
+
if (options.includeRawChunks) {
|
507
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
508
|
+
}
|
492
509
|
if (!chunk.success) {
|
493
510
|
finishReason = "error";
|
494
511
|
controller.enqueue({ type: "error", error: chunk.error });
|
@@ -543,15 +560,28 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
543
560
|
}
|
544
561
|
const delta = choice.delta;
|
545
562
|
if (delta.reasoning_content != null) {
|
563
|
+
if (!isActiveReasoning) {
|
564
|
+
controller.enqueue({
|
565
|
+
type: "reasoning-start",
|
566
|
+
id: "reasoning-0"
|
567
|
+
});
|
568
|
+
isActiveReasoning = true;
|
569
|
+
}
|
546
570
|
controller.enqueue({
|
547
|
-
type: "reasoning",
|
548
|
-
|
571
|
+
type: "reasoning-delta",
|
572
|
+
id: "reasoning-0",
|
573
|
+
delta: delta.reasoning_content
|
549
574
|
});
|
550
575
|
}
|
551
576
|
if (delta.content != null) {
|
577
|
+
if (!isActiveText) {
|
578
|
+
controller.enqueue({ type: "text-start", id: "txt-0" });
|
579
|
+
isActiveText = true;
|
580
|
+
}
|
552
581
|
controller.enqueue({
|
553
|
-
type: "text",
|
554
|
-
|
582
|
+
type: "text-delta",
|
583
|
+
id: "txt-0",
|
584
|
+
delta: delta.content
|
555
585
|
});
|
556
586
|
}
|
557
587
|
if (delta.tool_calls != null) {
|
@@ -576,6 +606,11 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
576
606
|
message: `Expected 'function.name' to be a string.`
|
577
607
|
});
|
578
608
|
}
|
609
|
+
controller.enqueue({
|
610
|
+
type: "tool-input-start",
|
611
|
+
id: toolCallDelta.id,
|
612
|
+
toolName: toolCallDelta.function.name
|
613
|
+
});
|
579
614
|
toolCalls[index] = {
|
580
615
|
id: toolCallDelta.id,
|
581
616
|
type: "function",
|
@@ -589,20 +624,21 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
589
624
|
if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
|
590
625
|
if (toolCall2.function.arguments.length > 0) {
|
591
626
|
controller.enqueue({
|
592
|
-
type: "tool-
|
593
|
-
|
594
|
-
|
595
|
-
toolName: toolCall2.function.name,
|
596
|
-
argsTextDelta: toolCall2.function.arguments
|
627
|
+
type: "tool-input-start",
|
628
|
+
id: toolCall2.id,
|
629
|
+
toolName: toolCall2.function.name
|
597
630
|
});
|
598
631
|
}
|
599
632
|
if ((0, import_provider_utils.isParsableJson)(toolCall2.function.arguments)) {
|
633
|
+
controller.enqueue({
|
634
|
+
type: "tool-input-end",
|
635
|
+
id: toolCall2.id
|
636
|
+
});
|
600
637
|
controller.enqueue({
|
601
638
|
type: "tool-call",
|
602
|
-
toolCallType: "function",
|
603
639
|
toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils.generateId)(),
|
604
640
|
toolName: toolCall2.function.name,
|
605
|
-
|
641
|
+
input: toolCall2.function.arguments
|
606
642
|
});
|
607
643
|
toolCall2.hasFinished = true;
|
608
644
|
}
|
@@ -617,19 +653,20 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
617
653
|
toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
|
618
654
|
}
|
619
655
|
controller.enqueue({
|
620
|
-
type: "tool-
|
621
|
-
|
622
|
-
|
623
|
-
toolName: toolCall.function.name,
|
624
|
-
argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
|
656
|
+
type: "tool-input-delta",
|
657
|
+
id: toolCall.id,
|
658
|
+
delta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
|
625
659
|
});
|
626
660
|
if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils.isParsableJson)(toolCall.function.arguments)) {
|
661
|
+
controller.enqueue({
|
662
|
+
type: "tool-input-end",
|
663
|
+
id: toolCall.id
|
664
|
+
});
|
627
665
|
controller.enqueue({
|
628
666
|
type: "tool-call",
|
629
|
-
toolCallType: "function",
|
630
667
|
toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils.generateId)(),
|
631
668
|
toolName: toolCall.function.name,
|
632
|
-
|
669
|
+
input: toolCall.function.arguments
|
633
670
|
});
|
634
671
|
toolCall.hasFinished = true;
|
635
672
|
}
|
@@ -638,6 +675,12 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
638
675
|
},
|
639
676
|
flush(controller) {
|
640
677
|
var _a2, _b, _c, _d, _e;
|
678
|
+
if (isActiveReasoning) {
|
679
|
+
controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
|
680
|
+
}
|
681
|
+
if (isActiveText) {
|
682
|
+
controller.enqueue({ type: "text-end", id: "txt-0" });
|
683
|
+
}
|
641
684
|
const providerMetadata = {
|
642
685
|
[providerOptionsName]: {},
|
643
686
|
...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
|
@@ -668,69 +711,69 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
668
711
|
};
|
669
712
|
}
|
670
713
|
};
|
671
|
-
var openaiCompatibleTokenUsageSchema =
|
672
|
-
prompt_tokens:
|
673
|
-
completion_tokens:
|
674
|
-
total_tokens:
|
675
|
-
prompt_tokens_details:
|
676
|
-
cached_tokens:
|
714
|
+
var openaiCompatibleTokenUsageSchema = import_v43.z.object({
|
715
|
+
prompt_tokens: import_v43.z.number().nullish(),
|
716
|
+
completion_tokens: import_v43.z.number().nullish(),
|
717
|
+
total_tokens: import_v43.z.number().nullish(),
|
718
|
+
prompt_tokens_details: import_v43.z.object({
|
719
|
+
cached_tokens: import_v43.z.number().nullish()
|
677
720
|
}).nullish(),
|
678
|
-
completion_tokens_details:
|
679
|
-
reasoning_tokens:
|
680
|
-
accepted_prediction_tokens:
|
681
|
-
rejected_prediction_tokens:
|
721
|
+
completion_tokens_details: import_v43.z.object({
|
722
|
+
reasoning_tokens: import_v43.z.number().nullish(),
|
723
|
+
accepted_prediction_tokens: import_v43.z.number().nullish(),
|
724
|
+
rejected_prediction_tokens: import_v43.z.number().nullish()
|
682
725
|
}).nullish()
|
683
726
|
}).nullish();
|
684
|
-
var OpenAICompatibleChatResponseSchema =
|
685
|
-
id:
|
686
|
-
created:
|
687
|
-
model:
|
688
|
-
choices:
|
689
|
-
|
690
|
-
message:
|
691
|
-
role:
|
692
|
-
content:
|
693
|
-
reasoning_content:
|
694
|
-
tool_calls:
|
695
|
-
|
696
|
-
id:
|
697
|
-
type:
|
698
|
-
function:
|
699
|
-
name:
|
700
|
-
arguments:
|
727
|
+
var OpenAICompatibleChatResponseSchema = import_v43.z.object({
|
728
|
+
id: import_v43.z.string().nullish(),
|
729
|
+
created: import_v43.z.number().nullish(),
|
730
|
+
model: import_v43.z.string().nullish(),
|
731
|
+
choices: import_v43.z.array(
|
732
|
+
import_v43.z.object({
|
733
|
+
message: import_v43.z.object({
|
734
|
+
role: import_v43.z.literal("assistant").nullish(),
|
735
|
+
content: import_v43.z.string().nullish(),
|
736
|
+
reasoning_content: import_v43.z.string().nullish(),
|
737
|
+
tool_calls: import_v43.z.array(
|
738
|
+
import_v43.z.object({
|
739
|
+
id: import_v43.z.string().nullish(),
|
740
|
+
type: import_v43.z.literal("function"),
|
741
|
+
function: import_v43.z.object({
|
742
|
+
name: import_v43.z.string(),
|
743
|
+
arguments: import_v43.z.string()
|
701
744
|
})
|
702
745
|
})
|
703
746
|
).nullish()
|
704
747
|
}),
|
705
|
-
finish_reason:
|
748
|
+
finish_reason: import_v43.z.string().nullish()
|
706
749
|
})
|
707
750
|
),
|
708
751
|
usage: openaiCompatibleTokenUsageSchema
|
709
752
|
});
|
710
|
-
var createOpenAICompatibleChatChunkSchema = (errorSchema) =>
|
711
|
-
|
712
|
-
id:
|
713
|
-
created:
|
714
|
-
model:
|
715
|
-
choices:
|
716
|
-
|
717
|
-
delta:
|
718
|
-
role:
|
719
|
-
content:
|
720
|
-
reasoning_content:
|
721
|
-
tool_calls:
|
722
|
-
|
723
|
-
index:
|
724
|
-
id:
|
725
|
-
type:
|
726
|
-
function:
|
727
|
-
name:
|
728
|
-
arguments:
|
753
|
+
var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
|
754
|
+
import_v43.z.object({
|
755
|
+
id: import_v43.z.string().nullish(),
|
756
|
+
created: import_v43.z.number().nullish(),
|
757
|
+
model: import_v43.z.string().nullish(),
|
758
|
+
choices: import_v43.z.array(
|
759
|
+
import_v43.z.object({
|
760
|
+
delta: import_v43.z.object({
|
761
|
+
role: import_v43.z.enum(["assistant"]).nullish(),
|
762
|
+
content: import_v43.z.string().nullish(),
|
763
|
+
reasoning_content: import_v43.z.string().nullish(),
|
764
|
+
tool_calls: import_v43.z.array(
|
765
|
+
import_v43.z.object({
|
766
|
+
index: import_v43.z.number(),
|
767
|
+
id: import_v43.z.string().nullish(),
|
768
|
+
type: import_v43.z.literal("function").nullish(),
|
769
|
+
function: import_v43.z.object({
|
770
|
+
name: import_v43.z.string().nullish(),
|
771
|
+
arguments: import_v43.z.string().nullish()
|
729
772
|
})
|
730
773
|
})
|
731
774
|
).nullish()
|
732
775
|
}).nullish(),
|
733
|
-
finish_reason:
|
776
|
+
finish_reason: import_v43.z.string().nullish()
|
734
777
|
})
|
735
778
|
),
|
736
779
|
usage: openaiCompatibleTokenUsageSchema
|
@@ -740,7 +783,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod3.z.union
|
|
740
783
|
|
741
784
|
// src/openai-compatible-completion-language-model.ts
|
742
785
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
743
|
-
var
|
786
|
+
var import_v45 = require("zod/v4");
|
744
787
|
|
745
788
|
// src/convert-to-openai-compatible-completion-prompt.ts
|
746
789
|
var import_provider4 = require("@ai-sdk/provider");
|
@@ -818,28 +861,28 @@ ${user}:`]
|
|
818
861
|
}
|
819
862
|
|
820
863
|
// src/openai-compatible-completion-options.ts
|
821
|
-
var
|
822
|
-
var openaiCompatibleCompletionProviderOptions =
|
864
|
+
var import_v44 = require("zod/v4");
|
865
|
+
var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
|
823
866
|
/**
|
824
867
|
* Echo back the prompt in addition to the completion.
|
825
868
|
*/
|
826
|
-
echo:
|
869
|
+
echo: import_v44.z.boolean().optional(),
|
827
870
|
/**
|
828
871
|
* Modify the likelihood of specified tokens appearing in the completion.
|
829
872
|
*
|
830
873
|
* Accepts a JSON object that maps tokens (specified by their token ID in
|
831
874
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
832
875
|
*/
|
833
|
-
logitBias:
|
876
|
+
logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
|
834
877
|
/**
|
835
878
|
* The suffix that comes after a completion of inserted text.
|
836
879
|
*/
|
837
|
-
suffix:
|
880
|
+
suffix: import_v44.z.string().optional(),
|
838
881
|
/**
|
839
882
|
* A unique identifier representing your end-user, which can help providers to
|
840
883
|
* monitor and detect abuse.
|
841
884
|
*/
|
842
|
-
user:
|
885
|
+
user: import_v44.z.string().optional()
|
843
886
|
});
|
844
887
|
|
845
888
|
// src/openai-compatible-completion-language-model.ts
|
@@ -1011,6 +1054,9 @@ var OpenAICompatibleCompletionLanguageModel = class {
|
|
1011
1054
|
},
|
1012
1055
|
transform(chunk, controller) {
|
1013
1056
|
var _a, _b, _c;
|
1057
|
+
if (options.includeRawChunks) {
|
1058
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
1059
|
+
}
|
1014
1060
|
if (!chunk.success) {
|
1015
1061
|
finishReason = "error";
|
1016
1062
|
controller.enqueue({ type: "error", error: chunk.error });
|
@@ -1028,6 +1074,10 @@ var OpenAICompatibleCompletionLanguageModel = class {
|
|
1028
1074
|
type: "response-metadata",
|
1029
1075
|
...getResponseMetadata(value)
|
1030
1076
|
});
|
1077
|
+
controller.enqueue({
|
1078
|
+
type: "text-start",
|
1079
|
+
id: "0"
|
1080
|
+
});
|
1031
1081
|
}
|
1032
1082
|
if (value.usage != null) {
|
1033
1083
|
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
@@ -1042,12 +1092,16 @@ var OpenAICompatibleCompletionLanguageModel = class {
|
|
1042
1092
|
}
|
1043
1093
|
if ((choice == null ? void 0 : choice.text) != null) {
|
1044
1094
|
controller.enqueue({
|
1045
|
-
type: "text",
|
1046
|
-
|
1095
|
+
type: "text-delta",
|
1096
|
+
id: "0",
|
1097
|
+
delta: choice.text
|
1047
1098
|
});
|
1048
1099
|
}
|
1049
1100
|
},
|
1050
1101
|
flush(controller) {
|
1102
|
+
if (!isFirstChunk) {
|
1103
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
1104
|
+
}
|
1051
1105
|
controller.enqueue({
|
1052
1106
|
type: "finish",
|
1053
1107
|
finishReason,
|
@@ -1061,33 +1115,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
|
|
1061
1115
|
};
|
1062
1116
|
}
|
1063
1117
|
};
|
1064
|
-
var usageSchema =
|
1065
|
-
prompt_tokens:
|
1066
|
-
completion_tokens:
|
1067
|
-
total_tokens:
|
1118
|
+
var usageSchema = import_v45.z.object({
|
1119
|
+
prompt_tokens: import_v45.z.number(),
|
1120
|
+
completion_tokens: import_v45.z.number(),
|
1121
|
+
total_tokens: import_v45.z.number()
|
1068
1122
|
});
|
1069
|
-
var openaiCompatibleCompletionResponseSchema =
|
1070
|
-
id:
|
1071
|
-
created:
|
1072
|
-
model:
|
1073
|
-
choices:
|
1074
|
-
|
1075
|
-
text:
|
1076
|
-
finish_reason:
|
1123
|
+
var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
|
1124
|
+
id: import_v45.z.string().nullish(),
|
1125
|
+
created: import_v45.z.number().nullish(),
|
1126
|
+
model: import_v45.z.string().nullish(),
|
1127
|
+
choices: import_v45.z.array(
|
1128
|
+
import_v45.z.object({
|
1129
|
+
text: import_v45.z.string(),
|
1130
|
+
finish_reason: import_v45.z.string()
|
1077
1131
|
})
|
1078
1132
|
),
|
1079
1133
|
usage: usageSchema.nullish()
|
1080
1134
|
});
|
1081
|
-
var createOpenAICompatibleCompletionChunkSchema = (errorSchema) =>
|
1082
|
-
|
1083
|
-
id:
|
1084
|
-
created:
|
1085
|
-
model:
|
1086
|
-
choices:
|
1087
|
-
|
1088
|
-
text:
|
1089
|
-
finish_reason:
|
1090
|
-
index:
|
1135
|
+
var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
|
1136
|
+
import_v45.z.object({
|
1137
|
+
id: import_v45.z.string().nullish(),
|
1138
|
+
created: import_v45.z.number().nullish(),
|
1139
|
+
model: import_v45.z.string().nullish(),
|
1140
|
+
choices: import_v45.z.array(
|
1141
|
+
import_v45.z.object({
|
1142
|
+
text: import_v45.z.string(),
|
1143
|
+
finish_reason: import_v45.z.string().nullish(),
|
1144
|
+
index: import_v45.z.number()
|
1091
1145
|
})
|
1092
1146
|
),
|
1093
1147
|
usage: usageSchema.nullish()
|
@@ -1098,21 +1152,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z
|
|
1098
1152
|
// src/openai-compatible-embedding-model.ts
|
1099
1153
|
var import_provider5 = require("@ai-sdk/provider");
|
1100
1154
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
1101
|
-
var
|
1155
|
+
var import_v47 = require("zod/v4");
|
1102
1156
|
|
1103
1157
|
// src/openai-compatible-embedding-options.ts
|
1104
|
-
var
|
1105
|
-
var openaiCompatibleEmbeddingProviderOptions =
|
1158
|
+
var import_v46 = require("zod/v4");
|
1159
|
+
var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
|
1106
1160
|
/**
|
1107
1161
|
* The number of dimensions the resulting output embeddings should have.
|
1108
1162
|
* Only supported in text-embedding-3 and later models.
|
1109
1163
|
*/
|
1110
|
-
dimensions:
|
1164
|
+
dimensions: import_v46.z.number().optional(),
|
1111
1165
|
/**
|
1112
1166
|
* A unique identifier representing your end-user, which can help providers to
|
1113
1167
|
* monitor and detect abuse.
|
1114
1168
|
*/
|
1115
|
-
user:
|
1169
|
+
user: import_v46.z.string().optional()
|
1116
1170
|
});
|
1117
1171
|
|
1118
1172
|
// src/openai-compatible-embedding-model.ts
|
@@ -1196,14 +1250,14 @@ var OpenAICompatibleEmbeddingModel = class {
|
|
1196
1250
|
};
|
1197
1251
|
}
|
1198
1252
|
};
|
1199
|
-
var openaiTextEmbeddingResponseSchema =
|
1200
|
-
data:
|
1201
|
-
usage:
|
1253
|
+
var openaiTextEmbeddingResponseSchema = import_v47.z.object({
|
1254
|
+
data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
|
1255
|
+
usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish()
|
1202
1256
|
});
|
1203
1257
|
|
1204
1258
|
// src/openai-compatible-image-model.ts
|
1205
1259
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1206
|
-
var
|
1260
|
+
var import_v48 = require("zod/v4");
|
1207
1261
|
var OpenAICompatibleImageModel = class {
|
1208
1262
|
constructor(modelId, config) {
|
1209
1263
|
this.modelId = modelId;
|
@@ -1271,8 +1325,8 @@ var OpenAICompatibleImageModel = class {
|
|
1271
1325
|
};
|
1272
1326
|
}
|
1273
1327
|
};
|
1274
|
-
var openaiCompatibleImageResponseSchema =
|
1275
|
-
data:
|
1328
|
+
var openaiCompatibleImageResponseSchema = import_v48.z.object({
|
1329
|
+
data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
|
1276
1330
|
});
|
1277
1331
|
|
1278
1332
|
// src/openai-compatible-provider.ts
|