@roo-code/types 1.105.0 → 1.108.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +485 -705
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1623 -2502
- package/dist/index.d.ts +1623 -2502
- package/dist/index.js +473 -696
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -78,7 +78,9 @@ var clineSays = [
|
|
|
78
78
|
"condense_context_error",
|
|
79
79
|
"sliding_window_truncation",
|
|
80
80
|
"codebase_search_result",
|
|
81
|
-
"user_edit_todos"
|
|
81
|
+
"user_edit_todos",
|
|
82
|
+
"too_many_tools_warning",
|
|
83
|
+
"tool"
|
|
82
84
|
];
|
|
83
85
|
var clineSaySchema = z.enum(clineSays);
|
|
84
86
|
var toolProgressStatusSchema = z.object({
|
|
@@ -146,6 +148,7 @@ var toolGroupsSchema = z2.enum(toolGroups);
|
|
|
146
148
|
var toolNames = [
|
|
147
149
|
"execute_command",
|
|
148
150
|
"read_file",
|
|
151
|
+
"read_command_output",
|
|
149
152
|
"write_to_file",
|
|
150
153
|
"apply_diff",
|
|
151
154
|
"search_and_replace",
|
|
@@ -161,10 +164,10 @@ var toolNames = [
|
|
|
161
164
|
"attempt_completion",
|
|
162
165
|
"switch_mode",
|
|
163
166
|
"new_task",
|
|
164
|
-
"fetch_instructions",
|
|
165
167
|
"codebase_search",
|
|
166
168
|
"update_todo_list",
|
|
167
169
|
"run_slash_command",
|
|
170
|
+
"skill",
|
|
168
171
|
"generate_image",
|
|
169
172
|
"custom_tool"
|
|
170
173
|
];
|
|
@@ -176,20 +179,6 @@ var toolUsageSchema = z2.record(
|
|
|
176
179
|
failures: z2.number()
|
|
177
180
|
})
|
|
178
181
|
);
|
|
179
|
-
var TOOL_PROTOCOL = {
|
|
180
|
-
XML: "xml",
|
|
181
|
-
NATIVE: "native"
|
|
182
|
-
};
|
|
183
|
-
var NATIVE_TOOL_DEFAULTS = {
|
|
184
|
-
supportsNativeTools: true,
|
|
185
|
-
defaultToolProtocol: TOOL_PROTOCOL.NATIVE
|
|
186
|
-
};
|
|
187
|
-
function isNativeProtocol(protocol) {
|
|
188
|
-
return protocol === TOOL_PROTOCOL.NATIVE;
|
|
189
|
-
}
|
|
190
|
-
function getEffectiveProtocol(toolProtocol) {
|
|
191
|
-
return toolProtocol || TOOL_PROTOCOL.XML;
|
|
192
|
-
}
|
|
193
182
|
|
|
194
183
|
// src/events.ts
|
|
195
184
|
var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
@@ -477,10 +466,6 @@ var modelInfoSchema = z5.object({
|
|
|
477
466
|
isStealthModel: z5.boolean().optional(),
|
|
478
467
|
// Flag to indicate if the model is free (no cost)
|
|
479
468
|
isFree: z5.boolean().optional(),
|
|
480
|
-
// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
|
|
481
|
-
supportsNativeTools: z5.boolean().optional(),
|
|
482
|
-
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
483
|
-
defaultToolProtocol: z5.enum(["xml", "native"]).optional(),
|
|
484
469
|
// Exclude specific native tools from being available (only applies to native protocol)
|
|
485
470
|
// These tools will be removed from the set of tools available to the model
|
|
486
471
|
excludedTools: z5.array(z5.string()).optional(),
|
|
@@ -577,8 +562,6 @@ var anthropicModels = {
|
|
|
577
562
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
578
563
|
supportsImages: true,
|
|
579
564
|
supportsPromptCache: true,
|
|
580
|
-
supportsNativeTools: true,
|
|
581
|
-
defaultToolProtocol: "native",
|
|
582
565
|
inputPrice: 3,
|
|
583
566
|
// $3 per million input tokens (≤200K context)
|
|
584
567
|
outputPrice: 15,
|
|
@@ -611,8 +594,6 @@ var anthropicModels = {
|
|
|
611
594
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
612
595
|
supportsImages: true,
|
|
613
596
|
supportsPromptCache: true,
|
|
614
|
-
supportsNativeTools: true,
|
|
615
|
-
defaultToolProtocol: "native",
|
|
616
597
|
inputPrice: 3,
|
|
617
598
|
// $3 per million input tokens (≤200K context)
|
|
618
599
|
outputPrice: 15,
|
|
@@ -644,8 +625,6 @@ var anthropicModels = {
|
|
|
644
625
|
contextWindow: 2e5,
|
|
645
626
|
supportsImages: true,
|
|
646
627
|
supportsPromptCache: true,
|
|
647
|
-
supportsNativeTools: true,
|
|
648
|
-
defaultToolProtocol: "native",
|
|
649
628
|
inputPrice: 5,
|
|
650
629
|
// $5 per million input tokens
|
|
651
630
|
outputPrice: 25,
|
|
@@ -662,8 +641,6 @@ var anthropicModels = {
|
|
|
662
641
|
contextWindow: 2e5,
|
|
663
642
|
supportsImages: true,
|
|
664
643
|
supportsPromptCache: true,
|
|
665
|
-
supportsNativeTools: true,
|
|
666
|
-
defaultToolProtocol: "native",
|
|
667
644
|
inputPrice: 15,
|
|
668
645
|
// $15 per million input tokens
|
|
669
646
|
outputPrice: 75,
|
|
@@ -680,8 +657,6 @@ var anthropicModels = {
|
|
|
680
657
|
contextWindow: 2e5,
|
|
681
658
|
supportsImages: true,
|
|
682
659
|
supportsPromptCache: true,
|
|
683
|
-
supportsNativeTools: true,
|
|
684
|
-
defaultToolProtocol: "native",
|
|
685
660
|
inputPrice: 15,
|
|
686
661
|
// $15 per million input tokens
|
|
687
662
|
outputPrice: 75,
|
|
@@ -698,8 +673,6 @@ var anthropicModels = {
|
|
|
698
673
|
contextWindow: 2e5,
|
|
699
674
|
supportsImages: true,
|
|
700
675
|
supportsPromptCache: true,
|
|
701
|
-
supportsNativeTools: true,
|
|
702
|
-
defaultToolProtocol: "native",
|
|
703
676
|
inputPrice: 3,
|
|
704
677
|
// $3 per million input tokens
|
|
705
678
|
outputPrice: 15,
|
|
@@ -717,8 +690,6 @@ var anthropicModels = {
|
|
|
717
690
|
contextWindow: 2e5,
|
|
718
691
|
supportsImages: true,
|
|
719
692
|
supportsPromptCache: true,
|
|
720
|
-
supportsNativeTools: true,
|
|
721
|
-
defaultToolProtocol: "native",
|
|
722
693
|
inputPrice: 3,
|
|
723
694
|
// $3 per million input tokens
|
|
724
695
|
outputPrice: 15,
|
|
@@ -733,8 +704,6 @@ var anthropicModels = {
|
|
|
733
704
|
contextWindow: 2e5,
|
|
734
705
|
supportsImages: true,
|
|
735
706
|
supportsPromptCache: true,
|
|
736
|
-
supportsNativeTools: true,
|
|
737
|
-
defaultToolProtocol: "native",
|
|
738
707
|
inputPrice: 3,
|
|
739
708
|
// $3 per million input tokens
|
|
740
709
|
outputPrice: 15,
|
|
@@ -749,8 +718,6 @@ var anthropicModels = {
|
|
|
749
718
|
contextWindow: 2e5,
|
|
750
719
|
supportsImages: false,
|
|
751
720
|
supportsPromptCache: true,
|
|
752
|
-
supportsNativeTools: true,
|
|
753
|
-
defaultToolProtocol: "native",
|
|
754
721
|
inputPrice: 1,
|
|
755
722
|
outputPrice: 5,
|
|
756
723
|
cacheWritesPrice: 1.25,
|
|
@@ -761,8 +728,6 @@ var anthropicModels = {
|
|
|
761
728
|
contextWindow: 2e5,
|
|
762
729
|
supportsImages: true,
|
|
763
730
|
supportsPromptCache: true,
|
|
764
|
-
supportsNativeTools: true,
|
|
765
|
-
defaultToolProtocol: "native",
|
|
766
731
|
inputPrice: 15,
|
|
767
732
|
outputPrice: 75,
|
|
768
733
|
cacheWritesPrice: 18.75,
|
|
@@ -773,8 +738,6 @@ var anthropicModels = {
|
|
|
773
738
|
contextWindow: 2e5,
|
|
774
739
|
supportsImages: true,
|
|
775
740
|
supportsPromptCache: true,
|
|
776
|
-
supportsNativeTools: true,
|
|
777
|
-
defaultToolProtocol: "native",
|
|
778
741
|
inputPrice: 0.25,
|
|
779
742
|
outputPrice: 1.25,
|
|
780
743
|
cacheWritesPrice: 0.3,
|
|
@@ -785,8 +748,6 @@ var anthropicModels = {
|
|
|
785
748
|
contextWindow: 2e5,
|
|
786
749
|
supportsImages: true,
|
|
787
750
|
supportsPromptCache: true,
|
|
788
|
-
supportsNativeTools: true,
|
|
789
|
-
defaultToolProtocol: "native",
|
|
790
751
|
inputPrice: 1,
|
|
791
752
|
outputPrice: 5,
|
|
792
753
|
cacheWritesPrice: 1.25,
|
|
@@ -804,7 +765,6 @@ var basetenModels = {
|
|
|
804
765
|
contextWindow: 262e3,
|
|
805
766
|
supportsImages: false,
|
|
806
767
|
supportsPromptCache: false,
|
|
807
|
-
supportsNativeTools: true,
|
|
808
768
|
inputPrice: 0.6,
|
|
809
769
|
outputPrice: 2.5,
|
|
810
770
|
cacheWritesPrice: 0,
|
|
@@ -816,7 +776,6 @@ var basetenModels = {
|
|
|
816
776
|
contextWindow: 2e5,
|
|
817
777
|
supportsImages: false,
|
|
818
778
|
supportsPromptCache: false,
|
|
819
|
-
supportsNativeTools: true,
|
|
820
779
|
inputPrice: 0.6,
|
|
821
780
|
outputPrice: 2.2,
|
|
822
781
|
cacheWritesPrice: 0,
|
|
@@ -828,7 +787,6 @@ var basetenModels = {
|
|
|
828
787
|
contextWindow: 163840,
|
|
829
788
|
supportsImages: false,
|
|
830
789
|
supportsPromptCache: false,
|
|
831
|
-
supportsNativeTools: true,
|
|
832
790
|
inputPrice: 2.55,
|
|
833
791
|
outputPrice: 5.95,
|
|
834
792
|
cacheWritesPrice: 0,
|
|
@@ -840,7 +798,6 @@ var basetenModels = {
|
|
|
840
798
|
contextWindow: 163840,
|
|
841
799
|
supportsImages: false,
|
|
842
800
|
supportsPromptCache: false,
|
|
843
|
-
supportsNativeTools: true,
|
|
844
801
|
inputPrice: 2.55,
|
|
845
802
|
outputPrice: 5.95,
|
|
846
803
|
cacheWritesPrice: 0,
|
|
@@ -852,7 +809,6 @@ var basetenModels = {
|
|
|
852
809
|
contextWindow: 163840,
|
|
853
810
|
supportsImages: false,
|
|
854
811
|
supportsPromptCache: false,
|
|
855
|
-
supportsNativeTools: true,
|
|
856
812
|
inputPrice: 0.77,
|
|
857
813
|
outputPrice: 0.77,
|
|
858
814
|
cacheWritesPrice: 0,
|
|
@@ -864,7 +820,6 @@ var basetenModels = {
|
|
|
864
820
|
contextWindow: 163840,
|
|
865
821
|
supportsImages: false,
|
|
866
822
|
supportsPromptCache: false,
|
|
867
|
-
supportsNativeTools: true,
|
|
868
823
|
inputPrice: 0.5,
|
|
869
824
|
outputPrice: 1.5,
|
|
870
825
|
cacheWritesPrice: 0,
|
|
@@ -876,7 +831,6 @@ var basetenModels = {
|
|
|
876
831
|
contextWindow: 163840,
|
|
877
832
|
supportsImages: false,
|
|
878
833
|
supportsPromptCache: false,
|
|
879
|
-
supportsNativeTools: true,
|
|
880
834
|
inputPrice: 0.3,
|
|
881
835
|
outputPrice: 0.45,
|
|
882
836
|
cacheWritesPrice: 0,
|
|
@@ -888,7 +842,6 @@ var basetenModels = {
|
|
|
888
842
|
contextWindow: 128072,
|
|
889
843
|
supportsImages: false,
|
|
890
844
|
supportsPromptCache: false,
|
|
891
|
-
supportsNativeTools: true,
|
|
892
845
|
inputPrice: 0.1,
|
|
893
846
|
outputPrice: 0.5,
|
|
894
847
|
cacheWritesPrice: 0,
|
|
@@ -900,7 +853,6 @@ var basetenModels = {
|
|
|
900
853
|
contextWindow: 262144,
|
|
901
854
|
supportsImages: false,
|
|
902
855
|
supportsPromptCache: false,
|
|
903
|
-
supportsNativeTools: true,
|
|
904
856
|
inputPrice: 0.22,
|
|
905
857
|
outputPrice: 0.8,
|
|
906
858
|
cacheWritesPrice: 0,
|
|
@@ -912,7 +864,6 @@ var basetenModels = {
|
|
|
912
864
|
contextWindow: 262144,
|
|
913
865
|
supportsImages: false,
|
|
914
866
|
supportsPromptCache: false,
|
|
915
|
-
supportsNativeTools: true,
|
|
916
867
|
inputPrice: 0.38,
|
|
917
868
|
outputPrice: 1.53,
|
|
918
869
|
cacheWritesPrice: 0,
|
|
@@ -924,7 +875,6 @@ var basetenModels = {
|
|
|
924
875
|
contextWindow: 262e3,
|
|
925
876
|
supportsImages: false,
|
|
926
877
|
supportsPromptCache: false,
|
|
927
|
-
supportsNativeTools: true,
|
|
928
878
|
inputPrice: 0.6,
|
|
929
879
|
outputPrice: 2.5,
|
|
930
880
|
cacheWritesPrice: 0,
|
|
@@ -944,8 +894,6 @@ var bedrockModels = {
|
|
|
944
894
|
supportsImages: true,
|
|
945
895
|
supportsPromptCache: true,
|
|
946
896
|
supportsReasoningBudget: true,
|
|
947
|
-
supportsNativeTools: true,
|
|
948
|
-
defaultToolProtocol: "native",
|
|
949
897
|
inputPrice: 3,
|
|
950
898
|
outputPrice: 15,
|
|
951
899
|
cacheWritesPrice: 3.75,
|
|
@@ -959,7 +907,6 @@ var bedrockModels = {
|
|
|
959
907
|
contextWindow: 3e5,
|
|
960
908
|
supportsImages: true,
|
|
961
909
|
supportsPromptCache: true,
|
|
962
|
-
supportsNativeTools: true,
|
|
963
910
|
inputPrice: 0.8,
|
|
964
911
|
outputPrice: 3.2,
|
|
965
912
|
cacheWritesPrice: 0.8,
|
|
@@ -975,7 +922,6 @@ var bedrockModels = {
|
|
|
975
922
|
contextWindow: 3e5,
|
|
976
923
|
supportsImages: true,
|
|
977
924
|
supportsPromptCache: false,
|
|
978
|
-
supportsNativeTools: true,
|
|
979
925
|
inputPrice: 1,
|
|
980
926
|
outputPrice: 4,
|
|
981
927
|
cacheWritesPrice: 1,
|
|
@@ -989,7 +935,6 @@ var bedrockModels = {
|
|
|
989
935
|
contextWindow: 3e5,
|
|
990
936
|
supportsImages: true,
|
|
991
937
|
supportsPromptCache: true,
|
|
992
|
-
supportsNativeTools: true,
|
|
993
938
|
inputPrice: 0.06,
|
|
994
939
|
outputPrice: 0.24,
|
|
995
940
|
cacheWritesPrice: 0.06,
|
|
@@ -1005,7 +950,6 @@ var bedrockModels = {
|
|
|
1005
950
|
contextWindow: 1e6,
|
|
1006
951
|
supportsImages: true,
|
|
1007
952
|
supportsPromptCache: true,
|
|
1008
|
-
supportsNativeTools: true,
|
|
1009
953
|
inputPrice: 0.33,
|
|
1010
954
|
outputPrice: 2.75,
|
|
1011
955
|
cacheWritesPrice: 0,
|
|
@@ -1021,7 +965,6 @@ var bedrockModels = {
|
|
|
1021
965
|
contextWindow: 128e3,
|
|
1022
966
|
supportsImages: false,
|
|
1023
967
|
supportsPromptCache: true,
|
|
1024
|
-
supportsNativeTools: true,
|
|
1025
968
|
inputPrice: 0.035,
|
|
1026
969
|
outputPrice: 0.14,
|
|
1027
970
|
cacheWritesPrice: 0.035,
|
|
@@ -1038,8 +981,6 @@ var bedrockModels = {
|
|
|
1038
981
|
supportsImages: true,
|
|
1039
982
|
supportsPromptCache: true,
|
|
1040
983
|
supportsReasoningBudget: true,
|
|
1041
|
-
supportsNativeTools: true,
|
|
1042
|
-
defaultToolProtocol: "native",
|
|
1043
984
|
inputPrice: 3,
|
|
1044
985
|
outputPrice: 15,
|
|
1045
986
|
cacheWritesPrice: 3.75,
|
|
@@ -1054,8 +995,6 @@ var bedrockModels = {
|
|
|
1054
995
|
supportsImages: true,
|
|
1055
996
|
supportsPromptCache: true,
|
|
1056
997
|
supportsReasoningBudget: true,
|
|
1057
|
-
supportsNativeTools: true,
|
|
1058
|
-
defaultToolProtocol: "native",
|
|
1059
998
|
inputPrice: 15,
|
|
1060
999
|
outputPrice: 75,
|
|
1061
1000
|
cacheWritesPrice: 18.75,
|
|
@@ -1070,8 +1009,6 @@ var bedrockModels = {
|
|
|
1070
1009
|
supportsImages: true,
|
|
1071
1010
|
supportsPromptCache: true,
|
|
1072
1011
|
supportsReasoningBudget: true,
|
|
1073
|
-
supportsNativeTools: true,
|
|
1074
|
-
defaultToolProtocol: "native",
|
|
1075
1012
|
inputPrice: 5,
|
|
1076
1013
|
outputPrice: 25,
|
|
1077
1014
|
cacheWritesPrice: 6.25,
|
|
@@ -1086,8 +1023,6 @@ var bedrockModels = {
|
|
|
1086
1023
|
supportsImages: true,
|
|
1087
1024
|
supportsPromptCache: true,
|
|
1088
1025
|
supportsReasoningBudget: true,
|
|
1089
|
-
supportsNativeTools: true,
|
|
1090
|
-
defaultToolProtocol: "native",
|
|
1091
1026
|
inputPrice: 15,
|
|
1092
1027
|
outputPrice: 75,
|
|
1093
1028
|
cacheWritesPrice: 18.75,
|
|
@@ -1102,8 +1037,6 @@ var bedrockModels = {
|
|
|
1102
1037
|
supportsImages: true,
|
|
1103
1038
|
supportsPromptCache: true,
|
|
1104
1039
|
supportsReasoningBudget: true,
|
|
1105
|
-
supportsNativeTools: true,
|
|
1106
|
-
defaultToolProtocol: "native",
|
|
1107
1040
|
inputPrice: 3,
|
|
1108
1041
|
outputPrice: 15,
|
|
1109
1042
|
cacheWritesPrice: 3.75,
|
|
@@ -1117,8 +1050,6 @@ var bedrockModels = {
|
|
|
1117
1050
|
contextWindow: 2e5,
|
|
1118
1051
|
supportsImages: true,
|
|
1119
1052
|
supportsPromptCache: true,
|
|
1120
|
-
supportsNativeTools: true,
|
|
1121
|
-
defaultToolProtocol: "native",
|
|
1122
1053
|
inputPrice: 3,
|
|
1123
1054
|
outputPrice: 15,
|
|
1124
1055
|
cacheWritesPrice: 3.75,
|
|
@@ -1132,8 +1063,6 @@ var bedrockModels = {
|
|
|
1132
1063
|
contextWindow: 2e5,
|
|
1133
1064
|
supportsImages: false,
|
|
1134
1065
|
supportsPromptCache: true,
|
|
1135
|
-
supportsNativeTools: true,
|
|
1136
|
-
defaultToolProtocol: "native",
|
|
1137
1066
|
inputPrice: 0.8,
|
|
1138
1067
|
outputPrice: 4,
|
|
1139
1068
|
cacheWritesPrice: 1,
|
|
@@ -1148,8 +1077,6 @@ var bedrockModels = {
|
|
|
1148
1077
|
supportsImages: true,
|
|
1149
1078
|
supportsPromptCache: true,
|
|
1150
1079
|
supportsReasoningBudget: true,
|
|
1151
|
-
supportsNativeTools: true,
|
|
1152
|
-
defaultToolProtocol: "native",
|
|
1153
1080
|
inputPrice: 1,
|
|
1154
1081
|
outputPrice: 5,
|
|
1155
1082
|
cacheWritesPrice: 1.25,
|
|
@@ -1165,8 +1092,6 @@ var bedrockModels = {
|
|
|
1165
1092
|
contextWindow: 2e5,
|
|
1166
1093
|
supportsImages: true,
|
|
1167
1094
|
supportsPromptCache: false,
|
|
1168
|
-
supportsNativeTools: true,
|
|
1169
|
-
defaultToolProtocol: "native",
|
|
1170
1095
|
inputPrice: 3,
|
|
1171
1096
|
outputPrice: 15
|
|
1172
1097
|
},
|
|
@@ -1175,8 +1100,6 @@ var bedrockModels = {
|
|
|
1175
1100
|
contextWindow: 2e5,
|
|
1176
1101
|
supportsImages: true,
|
|
1177
1102
|
supportsPromptCache: false,
|
|
1178
|
-
supportsNativeTools: true,
|
|
1179
|
-
defaultToolProtocol: "native",
|
|
1180
1103
|
inputPrice: 15,
|
|
1181
1104
|
outputPrice: 75
|
|
1182
1105
|
},
|
|
@@ -1185,8 +1108,6 @@ var bedrockModels = {
|
|
|
1185
1108
|
contextWindow: 2e5,
|
|
1186
1109
|
supportsImages: true,
|
|
1187
1110
|
supportsPromptCache: false,
|
|
1188
|
-
supportsNativeTools: true,
|
|
1189
|
-
defaultToolProtocol: "native",
|
|
1190
1111
|
inputPrice: 3,
|
|
1191
1112
|
outputPrice: 15
|
|
1192
1113
|
},
|
|
@@ -1195,8 +1116,6 @@ var bedrockModels = {
|
|
|
1195
1116
|
contextWindow: 2e5,
|
|
1196
1117
|
supportsImages: true,
|
|
1197
1118
|
supportsPromptCache: false,
|
|
1198
|
-
supportsNativeTools: true,
|
|
1199
|
-
defaultToolProtocol: "native",
|
|
1200
1119
|
inputPrice: 0.25,
|
|
1201
1120
|
outputPrice: 1.25
|
|
1202
1121
|
},
|
|
@@ -1205,7 +1124,6 @@ var bedrockModels = {
|
|
|
1205
1124
|
contextWindow: 128e3,
|
|
1206
1125
|
supportsImages: false,
|
|
1207
1126
|
supportsPromptCache: false,
|
|
1208
|
-
supportsNativeTools: true,
|
|
1209
1127
|
inputPrice: 1.35,
|
|
1210
1128
|
outputPrice: 5.4
|
|
1211
1129
|
},
|
|
@@ -1214,7 +1132,6 @@ var bedrockModels = {
|
|
|
1214
1132
|
contextWindow: 128e3,
|
|
1215
1133
|
supportsImages: false,
|
|
1216
1134
|
supportsPromptCache: false,
|
|
1217
|
-
supportsNativeTools: true,
|
|
1218
1135
|
inputPrice: 0.5,
|
|
1219
1136
|
outputPrice: 1.5,
|
|
1220
1137
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -1224,7 +1141,6 @@ var bedrockModels = {
|
|
|
1224
1141
|
contextWindow: 128e3,
|
|
1225
1142
|
supportsImages: false,
|
|
1226
1143
|
supportsPromptCache: false,
|
|
1227
|
-
supportsNativeTools: true,
|
|
1228
1144
|
inputPrice: 2,
|
|
1229
1145
|
outputPrice: 6,
|
|
1230
1146
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -1234,7 +1150,6 @@ var bedrockModels = {
|
|
|
1234
1150
|
contextWindow: 128e3,
|
|
1235
1151
|
supportsImages: false,
|
|
1236
1152
|
supportsPromptCache: false,
|
|
1237
|
-
supportsNativeTools: true,
|
|
1238
1153
|
inputPrice: 0.72,
|
|
1239
1154
|
outputPrice: 0.72,
|
|
1240
1155
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -1244,7 +1159,6 @@ var bedrockModels = {
|
|
|
1244
1159
|
contextWindow: 128e3,
|
|
1245
1160
|
supportsImages: true,
|
|
1246
1161
|
supportsPromptCache: false,
|
|
1247
|
-
supportsNativeTools: true,
|
|
1248
1162
|
inputPrice: 0.72,
|
|
1249
1163
|
outputPrice: 0.72,
|
|
1250
1164
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -1254,7 +1168,6 @@ var bedrockModels = {
|
|
|
1254
1168
|
contextWindow: 128e3,
|
|
1255
1169
|
supportsImages: true,
|
|
1256
1170
|
supportsPromptCache: false,
|
|
1257
|
-
supportsNativeTools: true,
|
|
1258
1171
|
inputPrice: 0.16,
|
|
1259
1172
|
outputPrice: 0.16,
|
|
1260
1173
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -1264,7 +1177,6 @@ var bedrockModels = {
|
|
|
1264
1177
|
contextWindow: 128e3,
|
|
1265
1178
|
supportsImages: false,
|
|
1266
1179
|
supportsPromptCache: false,
|
|
1267
|
-
supportsNativeTools: true,
|
|
1268
1180
|
inputPrice: 0.15,
|
|
1269
1181
|
outputPrice: 0.15,
|
|
1270
1182
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -1274,7 +1186,6 @@ var bedrockModels = {
|
|
|
1274
1186
|
contextWindow: 128e3,
|
|
1275
1187
|
supportsImages: false,
|
|
1276
1188
|
supportsPromptCache: false,
|
|
1277
|
-
supportsNativeTools: true,
|
|
1278
1189
|
inputPrice: 0.1,
|
|
1279
1190
|
outputPrice: 0.1,
|
|
1280
1191
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -1284,7 +1195,6 @@ var bedrockModels = {
|
|
|
1284
1195
|
contextWindow: 128e3,
|
|
1285
1196
|
supportsImages: false,
|
|
1286
1197
|
supportsPromptCache: false,
|
|
1287
|
-
supportsNativeTools: true,
|
|
1288
1198
|
inputPrice: 2.4,
|
|
1289
1199
|
outputPrice: 2.4,
|
|
1290
1200
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -1294,7 +1204,6 @@ var bedrockModels = {
|
|
|
1294
1204
|
contextWindow: 128e3,
|
|
1295
1205
|
supportsImages: false,
|
|
1296
1206
|
supportsPromptCache: false,
|
|
1297
|
-
supportsNativeTools: true,
|
|
1298
1207
|
inputPrice: 0.72,
|
|
1299
1208
|
outputPrice: 0.72,
|
|
1300
1209
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -1304,7 +1213,6 @@ var bedrockModels = {
|
|
|
1304
1213
|
contextWindow: 128e3,
|
|
1305
1214
|
supportsImages: false,
|
|
1306
1215
|
supportsPromptCache: false,
|
|
1307
|
-
supportsNativeTools: true,
|
|
1308
1216
|
inputPrice: 0.9,
|
|
1309
1217
|
outputPrice: 0.9,
|
|
1310
1218
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1314,7 +1222,6 @@ var bedrockModels = {
|
|
|
1314
1222
|
contextWindow: 8e3,
|
|
1315
1223
|
supportsImages: false,
|
|
1316
1224
|
supportsPromptCache: false,
|
|
1317
|
-
supportsNativeTools: true,
|
|
1318
1225
|
inputPrice: 0.22,
|
|
1319
1226
|
outputPrice: 0.22,
|
|
1320
1227
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1324,7 +1231,6 @@ var bedrockModels = {
|
|
|
1324
1231
|
contextWindow: 8e3,
|
|
1325
1232
|
supportsImages: false,
|
|
1326
1233
|
supportsPromptCache: false,
|
|
1327
|
-
supportsNativeTools: true,
|
|
1328
1234
|
inputPrice: 2.65,
|
|
1329
1235
|
outputPrice: 3.5
|
|
1330
1236
|
},
|
|
@@ -1333,7 +1239,6 @@ var bedrockModels = {
|
|
|
1333
1239
|
contextWindow: 4e3,
|
|
1334
1240
|
supportsImages: false,
|
|
1335
1241
|
supportsPromptCache: false,
|
|
1336
|
-
supportsNativeTools: true,
|
|
1337
1242
|
inputPrice: 0.3,
|
|
1338
1243
|
outputPrice: 0.6
|
|
1339
1244
|
},
|
|
@@ -1342,7 +1247,6 @@ var bedrockModels = {
|
|
|
1342
1247
|
contextWindow: 8e3,
|
|
1343
1248
|
supportsImages: false,
|
|
1344
1249
|
supportsPromptCache: false,
|
|
1345
|
-
supportsNativeTools: true,
|
|
1346
1250
|
inputPrice: 0.15,
|
|
1347
1251
|
outputPrice: 0.2,
|
|
1348
1252
|
description: "Amazon Titan Text Lite"
|
|
@@ -1352,7 +1256,6 @@ var bedrockModels = {
|
|
|
1352
1256
|
contextWindow: 8e3,
|
|
1353
1257
|
supportsImages: false,
|
|
1354
1258
|
supportsPromptCache: false,
|
|
1355
|
-
supportsNativeTools: true,
|
|
1356
1259
|
inputPrice: 0.2,
|
|
1357
1260
|
outputPrice: 0.6,
|
|
1358
1261
|
description: "Amazon Titan Text Express"
|
|
@@ -1362,8 +1265,6 @@ var bedrockModels = {
|
|
|
1362
1265
|
contextWindow: 262144,
|
|
1363
1266
|
supportsImages: false,
|
|
1364
1267
|
supportsPromptCache: false,
|
|
1365
|
-
supportsNativeTools: true,
|
|
1366
|
-
defaultToolProtocol: "native",
|
|
1367
1268
|
preserveReasoning: true,
|
|
1368
1269
|
inputPrice: 0.6,
|
|
1369
1270
|
outputPrice: 2.5,
|
|
@@ -1374,8 +1275,6 @@ var bedrockModels = {
|
|
|
1374
1275
|
contextWindow: 196608,
|
|
1375
1276
|
supportsImages: false,
|
|
1376
1277
|
supportsPromptCache: false,
|
|
1377
|
-
supportsNativeTools: true,
|
|
1378
|
-
defaultToolProtocol: "native",
|
|
1379
1278
|
preserveReasoning: true,
|
|
1380
1279
|
inputPrice: 0.3,
|
|
1381
1280
|
outputPrice: 1.2,
|
|
@@ -1386,8 +1285,6 @@ var bedrockModels = {
|
|
|
1386
1285
|
contextWindow: 262144,
|
|
1387
1286
|
supportsImages: false,
|
|
1388
1287
|
supportsPromptCache: false,
|
|
1389
|
-
supportsNativeTools: true,
|
|
1390
|
-
defaultToolProtocol: "native",
|
|
1391
1288
|
inputPrice: 0.15,
|
|
1392
1289
|
outputPrice: 1.2,
|
|
1393
1290
|
description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
|
|
@@ -1397,8 +1294,6 @@ var bedrockModels = {
|
|
|
1397
1294
|
contextWindow: 262144,
|
|
1398
1295
|
supportsImages: false,
|
|
1399
1296
|
supportsPromptCache: false,
|
|
1400
|
-
supportsNativeTools: true,
|
|
1401
|
-
defaultToolProtocol: "native",
|
|
1402
1297
|
inputPrice: 0.45,
|
|
1403
1298
|
outputPrice: 1.8,
|
|
1404
1299
|
description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
|
|
@@ -1489,26 +1384,14 @@ var BEDROCK_SERVICE_TIER_PRICING = {
|
|
|
1489
1384
|
// src/providers/cerebras.ts
|
|
1490
1385
|
var cerebrasDefaultModelId = "gpt-oss-120b";
|
|
1491
1386
|
var cerebrasModels = {
|
|
1492
|
-
"zai-glm-4.6": {
|
|
1493
|
-
maxTokens: 16384,
|
|
1494
|
-
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1495
|
-
contextWindow: 131072,
|
|
1496
|
-
supportsImages: false,
|
|
1497
|
-
supportsPromptCache: false,
|
|
1498
|
-
supportsNativeTools: true,
|
|
1499
|
-
defaultToolProtocol: "native",
|
|
1500
|
-
inputPrice: 0,
|
|
1501
|
-
outputPrice: 0,
|
|
1502
|
-
description: "Fast general-purpose model on Cerebras (up to 1,000 tokens/s). To be deprecated soon."
|
|
1503
|
-
},
|
|
1504
1387
|
"zai-glm-4.7": {
|
|
1505
1388
|
maxTokens: 16384,
|
|
1506
1389
|
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1507
1390
|
contextWindow: 131072,
|
|
1508
1391
|
supportsImages: false,
|
|
1509
|
-
supportsPromptCache:
|
|
1510
|
-
|
|
1511
|
-
|
|
1392
|
+
supportsPromptCache: true,
|
|
1393
|
+
supportsTemperature: true,
|
|
1394
|
+
defaultTemperature: 1,
|
|
1512
1395
|
inputPrice: 0,
|
|
1513
1396
|
outputPrice: 0,
|
|
1514
1397
|
description: "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks."
|
|
@@ -1519,8 +1402,6 @@ var cerebrasModels = {
|
|
|
1519
1402
|
contextWindow: 64e3,
|
|
1520
1403
|
supportsImages: false,
|
|
1521
1404
|
supportsPromptCache: false,
|
|
1522
|
-
supportsNativeTools: true,
|
|
1523
|
-
defaultToolProtocol: "native",
|
|
1524
1405
|
inputPrice: 0,
|
|
1525
1406
|
outputPrice: 0,
|
|
1526
1407
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1531,8 +1412,6 @@ var cerebrasModels = {
|
|
|
1531
1412
|
contextWindow: 64e3,
|
|
1532
1413
|
supportsImages: false,
|
|
1533
1414
|
supportsPromptCache: false,
|
|
1534
|
-
supportsNativeTools: true,
|
|
1535
|
-
defaultToolProtocol: "native",
|
|
1536
1415
|
inputPrice: 0,
|
|
1537
1416
|
outputPrice: 0,
|
|
1538
1417
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1543,8 +1422,6 @@ var cerebrasModels = {
|
|
|
1543
1422
|
contextWindow: 64e3,
|
|
1544
1423
|
supportsImages: false,
|
|
1545
1424
|
supportsPromptCache: false,
|
|
1546
|
-
supportsNativeTools: true,
|
|
1547
|
-
defaultToolProtocol: "native",
|
|
1548
1425
|
inputPrice: 0,
|
|
1549
1426
|
outputPrice: 0,
|
|
1550
1427
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
@@ -1555,8 +1432,6 @@ var cerebrasModels = {
|
|
|
1555
1432
|
contextWindow: 64e3,
|
|
1556
1433
|
supportsImages: false,
|
|
1557
1434
|
supportsPromptCache: false,
|
|
1558
|
-
supportsNativeTools: true,
|
|
1559
|
-
defaultToolProtocol: "native",
|
|
1560
1435
|
inputPrice: 0,
|
|
1561
1436
|
outputPrice: 0,
|
|
1562
1437
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1571,8 +1446,6 @@ var chutesModels = {
|
|
|
1571
1446
|
contextWindow: 163840,
|
|
1572
1447
|
supportsImages: false,
|
|
1573
1448
|
supportsPromptCache: false,
|
|
1574
|
-
supportsNativeTools: true,
|
|
1575
|
-
defaultToolProtocol: "native",
|
|
1576
1449
|
inputPrice: 0,
|
|
1577
1450
|
outputPrice: 0,
|
|
1578
1451
|
description: "DeepSeek R1 0528 model."
|
|
@@ -1582,8 +1455,6 @@ var chutesModels = {
|
|
|
1582
1455
|
contextWindow: 163840,
|
|
1583
1456
|
supportsImages: false,
|
|
1584
1457
|
supportsPromptCache: false,
|
|
1585
|
-
supportsNativeTools: true,
|
|
1586
|
-
defaultToolProtocol: "native",
|
|
1587
1458
|
inputPrice: 0,
|
|
1588
1459
|
outputPrice: 0,
|
|
1589
1460
|
description: "DeepSeek R1 model."
|
|
@@ -1593,8 +1464,6 @@ var chutesModels = {
|
|
|
1593
1464
|
contextWindow: 163840,
|
|
1594
1465
|
supportsImages: false,
|
|
1595
1466
|
supportsPromptCache: false,
|
|
1596
|
-
supportsNativeTools: true,
|
|
1597
|
-
defaultToolProtocol: "native",
|
|
1598
1467
|
inputPrice: 0,
|
|
1599
1468
|
outputPrice: 0,
|
|
1600
1469
|
description: "DeepSeek V3 model."
|
|
@@ -1604,8 +1473,6 @@ var chutesModels = {
|
|
|
1604
1473
|
contextWindow: 163840,
|
|
1605
1474
|
supportsImages: false,
|
|
1606
1475
|
supportsPromptCache: false,
|
|
1607
|
-
supportsNativeTools: true,
|
|
1608
|
-
defaultToolProtocol: "native",
|
|
1609
1476
|
inputPrice: 0,
|
|
1610
1477
|
outputPrice: 0,
|
|
1611
1478
|
description: "DeepSeek V3.1 model."
|
|
@@ -1615,8 +1482,6 @@ var chutesModels = {
|
|
|
1615
1482
|
contextWindow: 163840,
|
|
1616
1483
|
supportsImages: false,
|
|
1617
1484
|
supportsPromptCache: false,
|
|
1618
|
-
supportsNativeTools: true,
|
|
1619
|
-
defaultToolProtocol: "native",
|
|
1620
1485
|
inputPrice: 0.23,
|
|
1621
1486
|
outputPrice: 0.9,
|
|
1622
1487
|
description: "DeepSeek\u2011V3.1\u2011Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix\u2011ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance."
|
|
@@ -1626,8 +1491,6 @@ var chutesModels = {
|
|
|
1626
1491
|
contextWindow: 163840,
|
|
1627
1492
|
supportsImages: false,
|
|
1628
1493
|
supportsPromptCache: false,
|
|
1629
|
-
supportsNativeTools: true,
|
|
1630
|
-
defaultToolProtocol: "native",
|
|
1631
1494
|
inputPrice: 1,
|
|
1632
1495
|
outputPrice: 3,
|
|
1633
1496
|
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2\xD7 quota per request and not intended for bulk workloads."
|
|
@@ -1637,8 +1500,6 @@ var chutesModels = {
|
|
|
1637
1500
|
contextWindow: 163840,
|
|
1638
1501
|
supportsImages: false,
|
|
1639
1502
|
supportsPromptCache: false,
|
|
1640
|
-
supportsNativeTools: true,
|
|
1641
|
-
defaultToolProtocol: "native",
|
|
1642
1503
|
inputPrice: 0.25,
|
|
1643
1504
|
outputPrice: 0.35,
|
|
1644
1505
|
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long\u2011context training and inference efficiency while maintaining performance comparable to V3.1\u2011Terminus."
|
|
@@ -1650,8 +1511,6 @@ var chutesModels = {
|
|
|
1650
1511
|
// From Groq
|
|
1651
1512
|
supportsImages: false,
|
|
1652
1513
|
supportsPromptCache: false,
|
|
1653
|
-
supportsNativeTools: true,
|
|
1654
|
-
defaultToolProtocol: "native",
|
|
1655
1514
|
inputPrice: 0,
|
|
1656
1515
|
outputPrice: 0,
|
|
1657
1516
|
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
@@ -1661,8 +1520,6 @@ var chutesModels = {
|
|
|
1661
1520
|
contextWindow: 512e3,
|
|
1662
1521
|
supportsImages: false,
|
|
1663
1522
|
supportsPromptCache: false,
|
|
1664
|
-
supportsNativeTools: true,
|
|
1665
|
-
defaultToolProtocol: "native",
|
|
1666
1523
|
inputPrice: 0,
|
|
1667
1524
|
outputPrice: 0,
|
|
1668
1525
|
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
@@ -1672,8 +1529,6 @@ var chutesModels = {
|
|
|
1672
1529
|
contextWindow: 128e3,
|
|
1673
1530
|
supportsImages: false,
|
|
1674
1531
|
supportsPromptCache: false,
|
|
1675
|
-
supportsNativeTools: true,
|
|
1676
|
-
defaultToolProtocol: "native",
|
|
1677
1532
|
inputPrice: 0,
|
|
1678
1533
|
outputPrice: 0,
|
|
1679
1534
|
description: "Unsloth Mistral Nemo Instruct model."
|
|
@@ -1683,8 +1538,6 @@ var chutesModels = {
|
|
|
1683
1538
|
contextWindow: 131072,
|
|
1684
1539
|
supportsImages: false,
|
|
1685
1540
|
supportsPromptCache: false,
|
|
1686
|
-
supportsNativeTools: true,
|
|
1687
|
-
defaultToolProtocol: "native",
|
|
1688
1541
|
inputPrice: 0,
|
|
1689
1542
|
outputPrice: 0,
|
|
1690
1543
|
description: "Unsloth Gemma 3 12B IT model."
|
|
@@ -1694,8 +1547,6 @@ var chutesModels = {
|
|
|
1694
1547
|
contextWindow: 131072,
|
|
1695
1548
|
supportsImages: false,
|
|
1696
1549
|
supportsPromptCache: false,
|
|
1697
|
-
supportsNativeTools: true,
|
|
1698
|
-
defaultToolProtocol: "native",
|
|
1699
1550
|
inputPrice: 0,
|
|
1700
1551
|
outputPrice: 0,
|
|
1701
1552
|
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
@@ -1705,8 +1556,6 @@ var chutesModels = {
|
|
|
1705
1556
|
contextWindow: 131072,
|
|
1706
1557
|
supportsImages: false,
|
|
1707
1558
|
supportsPromptCache: false,
|
|
1708
|
-
supportsNativeTools: true,
|
|
1709
|
-
defaultToolProtocol: "native",
|
|
1710
1559
|
inputPrice: 0,
|
|
1711
1560
|
outputPrice: 0,
|
|
1712
1561
|
description: "Unsloth Gemma 3 4B IT model."
|
|
@@ -1716,8 +1565,6 @@ var chutesModels = {
|
|
|
1716
1565
|
contextWindow: 131072,
|
|
1717
1566
|
supportsImages: false,
|
|
1718
1567
|
supportsPromptCache: false,
|
|
1719
|
-
supportsNativeTools: true,
|
|
1720
|
-
defaultToolProtocol: "native",
|
|
1721
1568
|
inputPrice: 0,
|
|
1722
1569
|
outputPrice: 0,
|
|
1723
1570
|
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
@@ -1727,8 +1574,6 @@ var chutesModels = {
|
|
|
1727
1574
|
contextWindow: 131072,
|
|
1728
1575
|
supportsImages: false,
|
|
1729
1576
|
supportsPromptCache: false,
|
|
1730
|
-
supportsNativeTools: true,
|
|
1731
|
-
defaultToolProtocol: "native",
|
|
1732
1577
|
inputPrice: 0,
|
|
1733
1578
|
outputPrice: 0,
|
|
1734
1579
|
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
@@ -1738,8 +1583,6 @@ var chutesModels = {
|
|
|
1738
1583
|
contextWindow: 256e3,
|
|
1739
1584
|
supportsImages: false,
|
|
1740
1585
|
supportsPromptCache: false,
|
|
1741
|
-
supportsNativeTools: true,
|
|
1742
|
-
defaultToolProtocol: "native",
|
|
1743
1586
|
inputPrice: 0,
|
|
1744
1587
|
outputPrice: 0,
|
|
1745
1588
|
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
@@ -1749,8 +1592,6 @@ var chutesModels = {
|
|
|
1749
1592
|
contextWindow: 163840,
|
|
1750
1593
|
supportsImages: false,
|
|
1751
1594
|
supportsPromptCache: false,
|
|
1752
|
-
supportsNativeTools: true,
|
|
1753
|
-
defaultToolProtocol: "native",
|
|
1754
1595
|
inputPrice: 0,
|
|
1755
1596
|
outputPrice: 0,
|
|
1756
1597
|
description: "DeepSeek V3 Base model."
|
|
@@ -1760,8 +1601,6 @@ var chutesModels = {
|
|
|
1760
1601
|
contextWindow: 163840,
|
|
1761
1602
|
supportsImages: false,
|
|
1762
1603
|
supportsPromptCache: false,
|
|
1763
|
-
supportsNativeTools: true,
|
|
1764
|
-
defaultToolProtocol: "native",
|
|
1765
1604
|
inputPrice: 0,
|
|
1766
1605
|
outputPrice: 0,
|
|
1767
1606
|
description: "DeepSeek R1 Zero model."
|
|
@@ -1771,8 +1610,6 @@ var chutesModels = {
|
|
|
1771
1610
|
contextWindow: 163840,
|
|
1772
1611
|
supportsImages: false,
|
|
1773
1612
|
supportsPromptCache: false,
|
|
1774
|
-
supportsNativeTools: true,
|
|
1775
|
-
defaultToolProtocol: "native",
|
|
1776
1613
|
inputPrice: 0,
|
|
1777
1614
|
outputPrice: 0,
|
|
1778
1615
|
description: "DeepSeek V3 (0324) model."
|
|
@@ -1782,8 +1619,6 @@ var chutesModels = {
|
|
|
1782
1619
|
contextWindow: 262144,
|
|
1783
1620
|
supportsImages: false,
|
|
1784
1621
|
supportsPromptCache: false,
|
|
1785
|
-
supportsNativeTools: true,
|
|
1786
|
-
defaultToolProtocol: "native",
|
|
1787
1622
|
inputPrice: 0,
|
|
1788
1623
|
outputPrice: 0,
|
|
1789
1624
|
description: "Qwen3 235B A22B Instruct 2507 model with 262K context window."
|
|
@@ -1793,8 +1628,6 @@ var chutesModels = {
|
|
|
1793
1628
|
contextWindow: 40960,
|
|
1794
1629
|
supportsImages: false,
|
|
1795
1630
|
supportsPromptCache: false,
|
|
1796
|
-
supportsNativeTools: true,
|
|
1797
|
-
defaultToolProtocol: "native",
|
|
1798
1631
|
inputPrice: 0,
|
|
1799
1632
|
outputPrice: 0,
|
|
1800
1633
|
description: "Qwen3 235B A22B model."
|
|
@@ -1804,8 +1637,6 @@ var chutesModels = {
|
|
|
1804
1637
|
contextWindow: 40960,
|
|
1805
1638
|
supportsImages: false,
|
|
1806
1639
|
supportsPromptCache: false,
|
|
1807
|
-
supportsNativeTools: true,
|
|
1808
|
-
defaultToolProtocol: "native",
|
|
1809
1640
|
inputPrice: 0,
|
|
1810
1641
|
outputPrice: 0,
|
|
1811
1642
|
description: "Qwen3 32B model."
|
|
@@ -1815,8 +1646,6 @@ var chutesModels = {
|
|
|
1815
1646
|
contextWindow: 40960,
|
|
1816
1647
|
supportsImages: false,
|
|
1817
1648
|
supportsPromptCache: false,
|
|
1818
|
-
supportsNativeTools: true,
|
|
1819
|
-
defaultToolProtocol: "native",
|
|
1820
1649
|
inputPrice: 0,
|
|
1821
1650
|
outputPrice: 0,
|
|
1822
1651
|
description: "Qwen3 30B A3B model."
|
|
@@ -1826,8 +1655,6 @@ var chutesModels = {
|
|
|
1826
1655
|
contextWindow: 40960,
|
|
1827
1656
|
supportsImages: false,
|
|
1828
1657
|
supportsPromptCache: false,
|
|
1829
|
-
supportsNativeTools: true,
|
|
1830
|
-
defaultToolProtocol: "native",
|
|
1831
1658
|
inputPrice: 0,
|
|
1832
1659
|
outputPrice: 0,
|
|
1833
1660
|
description: "Qwen3 14B model."
|
|
@@ -1837,8 +1664,6 @@ var chutesModels = {
|
|
|
1837
1664
|
contextWindow: 40960,
|
|
1838
1665
|
supportsImages: false,
|
|
1839
1666
|
supportsPromptCache: false,
|
|
1840
|
-
supportsNativeTools: true,
|
|
1841
|
-
defaultToolProtocol: "native",
|
|
1842
1667
|
inputPrice: 0,
|
|
1843
1668
|
outputPrice: 0,
|
|
1844
1669
|
description: "Qwen3 8B model."
|
|
@@ -1848,8 +1673,6 @@ var chutesModels = {
|
|
|
1848
1673
|
contextWindow: 163840,
|
|
1849
1674
|
supportsImages: false,
|
|
1850
1675
|
supportsPromptCache: false,
|
|
1851
|
-
supportsNativeTools: true,
|
|
1852
|
-
defaultToolProtocol: "native",
|
|
1853
1676
|
inputPrice: 0,
|
|
1854
1677
|
outputPrice: 0,
|
|
1855
1678
|
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
@@ -1859,8 +1682,6 @@ var chutesModels = {
|
|
|
1859
1682
|
contextWindow: 163840,
|
|
1860
1683
|
supportsImages: false,
|
|
1861
1684
|
supportsPromptCache: false,
|
|
1862
|
-
supportsNativeTools: true,
|
|
1863
|
-
defaultToolProtocol: "native",
|
|
1864
1685
|
inputPrice: 0,
|
|
1865
1686
|
outputPrice: 0,
|
|
1866
1687
|
description: "TNGTech DeepSeek R1T Chimera model."
|
|
@@ -1870,8 +1691,6 @@ var chutesModels = {
|
|
|
1870
1691
|
contextWindow: 151329,
|
|
1871
1692
|
supportsImages: false,
|
|
1872
1693
|
supportsPromptCache: false,
|
|
1873
|
-
supportsNativeTools: true,
|
|
1874
|
-
defaultToolProtocol: "native",
|
|
1875
1694
|
inputPrice: 0,
|
|
1876
1695
|
outputPrice: 0,
|
|
1877
1696
|
description: "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated."
|
|
@@ -1881,8 +1700,6 @@ var chutesModels = {
|
|
|
1881
1700
|
contextWindow: 131072,
|
|
1882
1701
|
supportsImages: false,
|
|
1883
1702
|
supportsPromptCache: false,
|
|
1884
|
-
supportsNativeTools: true,
|
|
1885
|
-
defaultToolProtocol: "native",
|
|
1886
1703
|
inputPrice: 0,
|
|
1887
1704
|
outputPrice: 0,
|
|
1888
1705
|
description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
|
|
@@ -1892,8 +1709,6 @@ var chutesModels = {
|
|
|
1892
1709
|
contextWindow: 131072,
|
|
1893
1710
|
supportsImages: false,
|
|
1894
1711
|
supportsPromptCache: false,
|
|
1895
|
-
supportsNativeTools: true,
|
|
1896
|
-
defaultToolProtocol: "native",
|
|
1897
1712
|
inputPrice: 1,
|
|
1898
1713
|
outputPrice: 3,
|
|
1899
1714
|
description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
|
|
@@ -1903,8 +1718,6 @@ var chutesModels = {
|
|
|
1903
1718
|
contextWindow: 202752,
|
|
1904
1719
|
supportsImages: false,
|
|
1905
1720
|
supportsPromptCache: false,
|
|
1906
|
-
supportsNativeTools: true,
|
|
1907
|
-
defaultToolProtocol: "native",
|
|
1908
1721
|
inputPrice: 0,
|
|
1909
1722
|
outputPrice: 0,
|
|
1910
1723
|
description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios."
|
|
@@ -1915,8 +1728,6 @@ var chutesModels = {
|
|
|
1915
1728
|
contextWindow: 202752,
|
|
1916
1729
|
supportsImages: false,
|
|
1917
1730
|
supportsPromptCache: false,
|
|
1918
|
-
supportsNativeTools: true,
|
|
1919
|
-
defaultToolProtocol: "native",
|
|
1920
1731
|
inputPrice: 1.15,
|
|
1921
1732
|
outputPrice: 3.25,
|
|
1922
1733
|
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference."
|
|
@@ -1926,8 +1737,6 @@ var chutesModels = {
|
|
|
1926
1737
|
contextWindow: 128e3,
|
|
1927
1738
|
supportsImages: false,
|
|
1928
1739
|
supportsPromptCache: false,
|
|
1929
|
-
supportsNativeTools: true,
|
|
1930
|
-
defaultToolProtocol: "native",
|
|
1931
1740
|
inputPrice: 0,
|
|
1932
1741
|
outputPrice: 0,
|
|
1933
1742
|
description: "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks."
|
|
@@ -1937,8 +1746,6 @@ var chutesModels = {
|
|
|
1937
1746
|
contextWindow: 262144,
|
|
1938
1747
|
supportsImages: false,
|
|
1939
1748
|
supportsPromptCache: false,
|
|
1940
|
-
supportsNativeTools: true,
|
|
1941
|
-
defaultToolProtocol: "native",
|
|
1942
1749
|
inputPrice: 0,
|
|
1943
1750
|
outputPrice: 0,
|
|
1944
1751
|
description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks."
|
|
@@ -1948,8 +1755,6 @@ var chutesModels = {
|
|
|
1948
1755
|
contextWindow: 75e3,
|
|
1949
1756
|
supportsImages: false,
|
|
1950
1757
|
supportsPromptCache: false,
|
|
1951
|
-
supportsNativeTools: true,
|
|
1952
|
-
defaultToolProtocol: "native",
|
|
1953
1758
|
inputPrice: 0.1481,
|
|
1954
1759
|
outputPrice: 0.5926,
|
|
1955
1760
|
description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
|
|
@@ -1959,8 +1764,6 @@ var chutesModels = {
|
|
|
1959
1764
|
contextWindow: 262144,
|
|
1960
1765
|
supportsImages: false,
|
|
1961
1766
|
supportsPromptCache: false,
|
|
1962
|
-
supportsNativeTools: true,
|
|
1963
|
-
defaultToolProtocol: "native",
|
|
1964
1767
|
inputPrice: 0.1999,
|
|
1965
1768
|
outputPrice: 0.8001,
|
|
1966
1769
|
description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
|
|
@@ -1970,8 +1773,6 @@ var chutesModels = {
|
|
|
1970
1773
|
contextWindow: 262144,
|
|
1971
1774
|
supportsImages: false,
|
|
1972
1775
|
supportsPromptCache: false,
|
|
1973
|
-
supportsNativeTools: true,
|
|
1974
|
-
defaultToolProtocol: "native",
|
|
1975
1776
|
inputPrice: 0.077968332,
|
|
1976
1777
|
outputPrice: 0.31202496,
|
|
1977
1778
|
description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
|
|
@@ -1981,8 +1782,6 @@ var chutesModels = {
|
|
|
1981
1782
|
contextWindow: 131072,
|
|
1982
1783
|
supportsImages: false,
|
|
1983
1784
|
supportsPromptCache: false,
|
|
1984
|
-
supportsNativeTools: true,
|
|
1985
|
-
defaultToolProtocol: "native",
|
|
1986
1785
|
inputPrice: 0,
|
|
1987
1786
|
outputPrice: 0,
|
|
1988
1787
|
description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
|
|
@@ -1992,8 +1791,6 @@ var chutesModels = {
|
|
|
1992
1791
|
contextWindow: 131072,
|
|
1993
1792
|
supportsImages: false,
|
|
1994
1793
|
supportsPromptCache: false,
|
|
1995
|
-
supportsNativeTools: true,
|
|
1996
|
-
defaultToolProtocol: "native",
|
|
1997
1794
|
inputPrice: 0,
|
|
1998
1795
|
outputPrice: 0,
|
|
1999
1796
|
description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
|
|
@@ -2003,8 +1800,6 @@ var chutesModels = {
|
|
|
2003
1800
|
contextWindow: 262144,
|
|
2004
1801
|
supportsImages: true,
|
|
2005
1802
|
supportsPromptCache: false,
|
|
2006
|
-
supportsNativeTools: true,
|
|
2007
|
-
defaultToolProtocol: "native",
|
|
2008
1803
|
inputPrice: 0.16,
|
|
2009
1804
|
outputPrice: 0.65,
|
|
2010
1805
|
description: "Qwen3\u2011VL\u2011235B\u2011A22B\u2011Thinking is an open\u2011weight MoE vision\u2011language model (235B total, ~22B activated) optimized for deliberate multi\u2011step reasoning with strong text\u2011image\u2011video understanding and long\u2011context capabilities."
|
|
@@ -2012,73 +1807,6 @@ var chutesModels = {
|
|
|
2012
1807
|
};
|
|
2013
1808
|
var chutesDefaultModelInfo = chutesModels[chutesDefaultModelId];
|
|
2014
1809
|
|
|
2015
|
-
// src/providers/claude-code.ts
|
|
2016
|
-
var DATE_SUFFIX_PATTERN = /-\d{8}$/;
|
|
2017
|
-
var claudeCodeModels = {
|
|
2018
|
-
"claude-haiku-4-5": {
|
|
2019
|
-
maxTokens: 32768,
|
|
2020
|
-
contextWindow: 2e5,
|
|
2021
|
-
supportsImages: true,
|
|
2022
|
-
supportsPromptCache: true,
|
|
2023
|
-
supportsNativeTools: true,
|
|
2024
|
-
defaultToolProtocol: "native",
|
|
2025
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2026
|
-
reasoningEffort: "medium",
|
|
2027
|
-
description: "Claude Haiku 4.5 - Fast and efficient with thinking"
|
|
2028
|
-
},
|
|
2029
|
-
"claude-sonnet-4-5": {
|
|
2030
|
-
maxTokens: 32768,
|
|
2031
|
-
contextWindow: 2e5,
|
|
2032
|
-
supportsImages: true,
|
|
2033
|
-
supportsPromptCache: true,
|
|
2034
|
-
supportsNativeTools: true,
|
|
2035
|
-
defaultToolProtocol: "native",
|
|
2036
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2037
|
-
reasoningEffort: "medium",
|
|
2038
|
-
description: "Claude Sonnet 4.5 - Balanced performance with thinking"
|
|
2039
|
-
},
|
|
2040
|
-
"claude-opus-4-5": {
|
|
2041
|
-
maxTokens: 32768,
|
|
2042
|
-
contextWindow: 2e5,
|
|
2043
|
-
supportsImages: true,
|
|
2044
|
-
supportsPromptCache: true,
|
|
2045
|
-
supportsNativeTools: true,
|
|
2046
|
-
defaultToolProtocol: "native",
|
|
2047
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2048
|
-
reasoningEffort: "medium",
|
|
2049
|
-
description: "Claude Opus 4.5 - Most capable with thinking"
|
|
2050
|
-
}
|
|
2051
|
-
};
|
|
2052
|
-
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2053
|
-
var MODEL_FAMILY_PATTERNS = [
|
|
2054
|
-
// Opus models (any version) → claude-opus-4-5
|
|
2055
|
-
{ pattern: /opus/i, target: "claude-opus-4-5" },
|
|
2056
|
-
// Haiku models (any version) → claude-haiku-4-5
|
|
2057
|
-
{ pattern: /haiku/i, target: "claude-haiku-4-5" },
|
|
2058
|
-
// Sonnet models (any version) → claude-sonnet-4-5
|
|
2059
|
-
{ pattern: /sonnet/i, target: "claude-sonnet-4-5" }
|
|
2060
|
-
];
|
|
2061
|
-
function normalizeClaudeCodeModelId(modelId) {
|
|
2062
|
-
if (Object.hasOwn(claudeCodeModels, modelId)) {
|
|
2063
|
-
return modelId;
|
|
2064
|
-
}
|
|
2065
|
-
const withoutDate = modelId.replace(DATE_SUFFIX_PATTERN, "");
|
|
2066
|
-
if (Object.hasOwn(claudeCodeModels, withoutDate)) {
|
|
2067
|
-
return withoutDate;
|
|
2068
|
-
}
|
|
2069
|
-
for (const { pattern, target } of MODEL_FAMILY_PATTERNS) {
|
|
2070
|
-
if (pattern.test(modelId)) {
|
|
2071
|
-
return target;
|
|
2072
|
-
}
|
|
2073
|
-
}
|
|
2074
|
-
return claudeCodeDefaultModelId;
|
|
2075
|
-
}
|
|
2076
|
-
var claudeCodeReasoningConfig = {
|
|
2077
|
-
low: { budgetTokens: 16e3 },
|
|
2078
|
-
medium: { budgetTokens: 32e3 },
|
|
2079
|
-
high: { budgetTokens: 64e3 }
|
|
2080
|
-
};
|
|
2081
|
-
|
|
2082
1810
|
// src/providers/deepseek.ts
|
|
2083
1811
|
var deepSeekDefaultModelId = "deepseek-chat";
|
|
2084
1812
|
var deepSeekModels = {
|
|
@@ -2088,8 +1816,6 @@ var deepSeekModels = {
|
|
|
2088
1816
|
contextWindow: 128e3,
|
|
2089
1817
|
supportsImages: false,
|
|
2090
1818
|
supportsPromptCache: true,
|
|
2091
|
-
supportsNativeTools: true,
|
|
2092
|
-
defaultToolProtocol: "native",
|
|
2093
1819
|
inputPrice: 0.28,
|
|
2094
1820
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
2095
1821
|
outputPrice: 0.42,
|
|
@@ -2106,8 +1832,6 @@ var deepSeekModels = {
|
|
|
2106
1832
|
contextWindow: 128e3,
|
|
2107
1833
|
supportsImages: false,
|
|
2108
1834
|
supportsPromptCache: true,
|
|
2109
|
-
supportsNativeTools: true,
|
|
2110
|
-
defaultToolProtocol: "native",
|
|
2111
1835
|
preserveReasoning: true,
|
|
2112
1836
|
inputPrice: 0.28,
|
|
2113
1837
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
@@ -2130,8 +1854,6 @@ var doubaoModels = {
|
|
|
2130
1854
|
contextWindow: 128e3,
|
|
2131
1855
|
supportsImages: true,
|
|
2132
1856
|
supportsPromptCache: true,
|
|
2133
|
-
supportsNativeTools: true,
|
|
2134
|
-
defaultToolProtocol: "native",
|
|
2135
1857
|
inputPrice: 1e-4,
|
|
2136
1858
|
// $0.0001 per million tokens (cache miss)
|
|
2137
1859
|
outputPrice: 4e-4,
|
|
@@ -2147,8 +1869,6 @@ var doubaoModels = {
|
|
|
2147
1869
|
contextWindow: 128e3,
|
|
2148
1870
|
supportsImages: true,
|
|
2149
1871
|
supportsPromptCache: true,
|
|
2150
|
-
supportsNativeTools: true,
|
|
2151
|
-
defaultToolProtocol: "native",
|
|
2152
1872
|
inputPrice: 2e-4,
|
|
2153
1873
|
// $0.0002 per million tokens
|
|
2154
1874
|
outputPrice: 8e-4,
|
|
@@ -2164,8 +1884,6 @@ var doubaoModels = {
|
|
|
2164
1884
|
contextWindow: 128e3,
|
|
2165
1885
|
supportsImages: true,
|
|
2166
1886
|
supportsPromptCache: true,
|
|
2167
|
-
supportsNativeTools: true,
|
|
2168
|
-
defaultToolProtocol: "native",
|
|
2169
1887
|
inputPrice: 15e-5,
|
|
2170
1888
|
// $0.00015 per million tokens
|
|
2171
1889
|
outputPrice: 6e-4,
|
|
@@ -2188,7 +1906,6 @@ var featherlessModels = {
|
|
|
2188
1906
|
contextWindow: 32678,
|
|
2189
1907
|
supportsImages: false,
|
|
2190
1908
|
supportsPromptCache: false,
|
|
2191
|
-
supportsNativeTools: true,
|
|
2192
1909
|
inputPrice: 0,
|
|
2193
1910
|
outputPrice: 0,
|
|
2194
1911
|
description: "DeepSeek V3 0324 model."
|
|
@@ -2198,7 +1915,6 @@ var featherlessModels = {
|
|
|
2198
1915
|
contextWindow: 32678,
|
|
2199
1916
|
supportsImages: false,
|
|
2200
1917
|
supportsPromptCache: false,
|
|
2201
|
-
supportsNativeTools: true,
|
|
2202
1918
|
inputPrice: 0,
|
|
2203
1919
|
outputPrice: 0,
|
|
2204
1920
|
description: "DeepSeek R1 0528 model."
|
|
@@ -2208,7 +1924,6 @@ var featherlessModels = {
|
|
|
2208
1924
|
contextWindow: 32678,
|
|
2209
1925
|
supportsImages: false,
|
|
2210
1926
|
supportsPromptCache: false,
|
|
2211
|
-
supportsNativeTools: true,
|
|
2212
1927
|
inputPrice: 0,
|
|
2213
1928
|
outputPrice: 0,
|
|
2214
1929
|
description: "Kimi K2 Instruct model."
|
|
@@ -2218,7 +1933,6 @@ var featherlessModels = {
|
|
|
2218
1933
|
contextWindow: 32678,
|
|
2219
1934
|
supportsImages: false,
|
|
2220
1935
|
supportsPromptCache: false,
|
|
2221
|
-
supportsNativeTools: true,
|
|
2222
1936
|
inputPrice: 0,
|
|
2223
1937
|
outputPrice: 0,
|
|
2224
1938
|
description: "GPT-OSS 120B model."
|
|
@@ -2228,7 +1942,6 @@ var featherlessModels = {
|
|
|
2228
1942
|
contextWindow: 32678,
|
|
2229
1943
|
supportsImages: false,
|
|
2230
1944
|
supportsPromptCache: false,
|
|
2231
|
-
supportsNativeTools: true,
|
|
2232
1945
|
inputPrice: 0,
|
|
2233
1946
|
outputPrice: 0,
|
|
2234
1947
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -2244,8 +1957,6 @@ var fireworksModels = {
|
|
|
2244
1957
|
contextWindow: 262144,
|
|
2245
1958
|
supportsImages: false,
|
|
2246
1959
|
supportsPromptCache: true,
|
|
2247
|
-
supportsNativeTools: true,
|
|
2248
|
-
defaultToolProtocol: "native",
|
|
2249
1960
|
inputPrice: 0.6,
|
|
2250
1961
|
outputPrice: 2.5,
|
|
2251
1962
|
cacheReadsPrice: 0.15,
|
|
@@ -2256,8 +1967,6 @@ var fireworksModels = {
|
|
|
2256
1967
|
contextWindow: 128e3,
|
|
2257
1968
|
supportsImages: false,
|
|
2258
1969
|
supportsPromptCache: false,
|
|
2259
|
-
supportsNativeTools: true,
|
|
2260
|
-
defaultToolProtocol: "native",
|
|
2261
1970
|
inputPrice: 0.6,
|
|
2262
1971
|
outputPrice: 2.5,
|
|
2263
1972
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2267,7 +1976,6 @@ var fireworksModels = {
|
|
|
2267
1976
|
contextWindow: 256e3,
|
|
2268
1977
|
supportsImages: false,
|
|
2269
1978
|
supportsPromptCache: true,
|
|
2270
|
-
supportsNativeTools: true,
|
|
2271
1979
|
supportsTemperature: true,
|
|
2272
1980
|
preserveReasoning: true,
|
|
2273
1981
|
defaultTemperature: 1,
|
|
@@ -2281,8 +1989,6 @@ var fireworksModels = {
|
|
|
2281
1989
|
contextWindow: 204800,
|
|
2282
1990
|
supportsImages: false,
|
|
2283
1991
|
supportsPromptCache: false,
|
|
2284
|
-
supportsNativeTools: true,
|
|
2285
|
-
defaultToolProtocol: "native",
|
|
2286
1992
|
inputPrice: 0.3,
|
|
2287
1993
|
outputPrice: 1.2,
|
|
2288
1994
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2292,8 +1998,6 @@ var fireworksModels = {
|
|
|
2292
1998
|
contextWindow: 256e3,
|
|
2293
1999
|
supportsImages: false,
|
|
2294
2000
|
supportsPromptCache: false,
|
|
2295
|
-
supportsNativeTools: true,
|
|
2296
|
-
defaultToolProtocol: "native",
|
|
2297
2001
|
inputPrice: 0.22,
|
|
2298
2002
|
outputPrice: 0.88,
|
|
2299
2003
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2303,8 +2007,6 @@ var fireworksModels = {
|
|
|
2303
2007
|
contextWindow: 256e3,
|
|
2304
2008
|
supportsImages: false,
|
|
2305
2009
|
supportsPromptCache: false,
|
|
2306
|
-
supportsNativeTools: true,
|
|
2307
|
-
defaultToolProtocol: "native",
|
|
2308
2010
|
inputPrice: 0.45,
|
|
2309
2011
|
outputPrice: 1.8,
|
|
2310
2012
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2314,8 +2016,6 @@ var fireworksModels = {
|
|
|
2314
2016
|
contextWindow: 16e4,
|
|
2315
2017
|
supportsImages: false,
|
|
2316
2018
|
supportsPromptCache: false,
|
|
2317
|
-
supportsNativeTools: true,
|
|
2318
|
-
defaultToolProtocol: "native",
|
|
2319
2019
|
inputPrice: 3,
|
|
2320
2020
|
outputPrice: 8,
|
|
2321
2021
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2325,8 +2025,6 @@ var fireworksModels = {
|
|
|
2325
2025
|
contextWindow: 128e3,
|
|
2326
2026
|
supportsImages: false,
|
|
2327
2027
|
supportsPromptCache: false,
|
|
2328
|
-
supportsNativeTools: true,
|
|
2329
|
-
defaultToolProtocol: "native",
|
|
2330
2028
|
inputPrice: 0.9,
|
|
2331
2029
|
outputPrice: 0.9,
|
|
2332
2030
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2336,8 +2034,6 @@ var fireworksModels = {
|
|
|
2336
2034
|
contextWindow: 163840,
|
|
2337
2035
|
supportsImages: false,
|
|
2338
2036
|
supportsPromptCache: false,
|
|
2339
|
-
supportsNativeTools: true,
|
|
2340
|
-
defaultToolProtocol: "native",
|
|
2341
2037
|
inputPrice: 0.56,
|
|
2342
2038
|
outputPrice: 1.68,
|
|
2343
2039
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2347,8 +2043,6 @@ var fireworksModels = {
|
|
|
2347
2043
|
contextWindow: 128e3,
|
|
2348
2044
|
supportsImages: false,
|
|
2349
2045
|
supportsPromptCache: false,
|
|
2350
|
-
supportsNativeTools: true,
|
|
2351
|
-
defaultToolProtocol: "native",
|
|
2352
2046
|
inputPrice: 0.55,
|
|
2353
2047
|
outputPrice: 2.19,
|
|
2354
2048
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2358,8 +2052,6 @@ var fireworksModels = {
|
|
|
2358
2052
|
contextWindow: 128e3,
|
|
2359
2053
|
supportsImages: false,
|
|
2360
2054
|
supportsPromptCache: false,
|
|
2361
|
-
supportsNativeTools: true,
|
|
2362
|
-
defaultToolProtocol: "native",
|
|
2363
2055
|
inputPrice: 0.55,
|
|
2364
2056
|
outputPrice: 2.19,
|
|
2365
2057
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2369,8 +2061,6 @@ var fireworksModels = {
|
|
|
2369
2061
|
contextWindow: 198e3,
|
|
2370
2062
|
supportsImages: false,
|
|
2371
2063
|
supportsPromptCache: false,
|
|
2372
|
-
supportsNativeTools: true,
|
|
2373
|
-
defaultToolProtocol: "native",
|
|
2374
2064
|
inputPrice: 0.55,
|
|
2375
2065
|
outputPrice: 2.19,
|
|
2376
2066
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2380,8 +2070,6 @@ var fireworksModels = {
|
|
|
2380
2070
|
contextWindow: 128e3,
|
|
2381
2071
|
supportsImages: false,
|
|
2382
2072
|
supportsPromptCache: false,
|
|
2383
|
-
supportsNativeTools: true,
|
|
2384
|
-
defaultToolProtocol: "native",
|
|
2385
2073
|
inputPrice: 0.07,
|
|
2386
2074
|
outputPrice: 0.3,
|
|
2387
2075
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2391,11 +2079,63 @@ var fireworksModels = {
|
|
|
2391
2079
|
contextWindow: 128e3,
|
|
2392
2080
|
supportsImages: false,
|
|
2393
2081
|
supportsPromptCache: false,
|
|
2394
|
-
supportsNativeTools: true,
|
|
2395
|
-
defaultToolProtocol: "native",
|
|
2396
2082
|
inputPrice: 0.15,
|
|
2397
2083
|
outputPrice: 0.6,
|
|
2398
2084
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
2085
|
+
},
|
|
2086
|
+
"accounts/fireworks/models/minimax-m2p1": {
|
|
2087
|
+
maxTokens: 4096,
|
|
2088
|
+
contextWindow: 204800,
|
|
2089
|
+
supportsImages: false,
|
|
2090
|
+
supportsPromptCache: false,
|
|
2091
|
+
inputPrice: 0.3,
|
|
2092
|
+
outputPrice: 1.2,
|
|
2093
|
+
description: "MiniMax M2.1 is an upgraded version of M2 with improved performance on complex reasoning, coding, and long-context understanding tasks."
|
|
2094
|
+
},
|
|
2095
|
+
"accounts/fireworks/models/deepseek-v3p2": {
|
|
2096
|
+
maxTokens: 16384,
|
|
2097
|
+
contextWindow: 163840,
|
|
2098
|
+
supportsImages: false,
|
|
2099
|
+
supportsPromptCache: false,
|
|
2100
|
+
inputPrice: 0.56,
|
|
2101
|
+
outputPrice: 1.68,
|
|
2102
|
+
description: "DeepSeek V3.2 is the latest iteration of the V3 model family with enhanced reasoning capabilities, improved code generation, and better instruction following."
|
|
2103
|
+
},
|
|
2104
|
+
"accounts/fireworks/models/glm-4p7": {
|
|
2105
|
+
maxTokens: 25344,
|
|
2106
|
+
contextWindow: 198e3,
|
|
2107
|
+
supportsImages: false,
|
|
2108
|
+
supportsPromptCache: false,
|
|
2109
|
+
inputPrice: 0.55,
|
|
2110
|
+
outputPrice: 2.19,
|
|
2111
|
+
description: "Z.ai GLM-4.7 is the latest coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality."
|
|
2112
|
+
},
|
|
2113
|
+
"accounts/fireworks/models/llama-v3p3-70b-instruct": {
|
|
2114
|
+
maxTokens: 16384,
|
|
2115
|
+
contextWindow: 131072,
|
|
2116
|
+
supportsImages: false,
|
|
2117
|
+
supportsPromptCache: false,
|
|
2118
|
+
inputPrice: 0.9,
|
|
2119
|
+
outputPrice: 0.9,
|
|
2120
|
+
description: "Meta Llama 3.3 70B Instruct is a highly capable instruction-tuned model with strong reasoning, coding, and general task performance."
|
|
2121
|
+
},
|
|
2122
|
+
"accounts/fireworks/models/llama4-maverick-instruct-basic": {
|
|
2123
|
+
maxTokens: 16384,
|
|
2124
|
+
contextWindow: 131072,
|
|
2125
|
+
supportsImages: true,
|
|
2126
|
+
supportsPromptCache: false,
|
|
2127
|
+
inputPrice: 0.22,
|
|
2128
|
+
outputPrice: 0.88,
|
|
2129
|
+
description: "Llama 4 Maverick is Meta's latest multimodal model with vision capabilities, optimized for instruction following and coding tasks."
|
|
2130
|
+
},
|
|
2131
|
+
"accounts/fireworks/models/llama4-scout-instruct-basic": {
|
|
2132
|
+
maxTokens: 16384,
|
|
2133
|
+
contextWindow: 131072,
|
|
2134
|
+
supportsImages: true,
|
|
2135
|
+
supportsPromptCache: false,
|
|
2136
|
+
inputPrice: 0.15,
|
|
2137
|
+
outputPrice: 0.6,
|
|
2138
|
+
description: "Llama 4 Scout is a smaller, faster variant of Llama 4 with multimodal capabilities, ideal for quick iterations and cost-effective deployments."
|
|
2399
2139
|
}
|
|
2400
2140
|
};
|
|
2401
2141
|
|
|
@@ -2406,8 +2146,6 @@ var geminiModels = {
|
|
|
2406
2146
|
maxTokens: 65536,
|
|
2407
2147
|
contextWindow: 1048576,
|
|
2408
2148
|
supportsImages: true,
|
|
2409
|
-
supportsNativeTools: true,
|
|
2410
|
-
defaultToolProtocol: "native",
|
|
2411
2149
|
supportsPromptCache: true,
|
|
2412
2150
|
supportsReasoningEffort: ["low", "high"],
|
|
2413
2151
|
reasoningEffort: "low",
|
|
@@ -2415,16 +2153,19 @@ var geminiModels = {
|
|
|
2415
2153
|
defaultTemperature: 1,
|
|
2416
2154
|
inputPrice: 4,
|
|
2417
2155
|
outputPrice: 18,
|
|
2156
|
+
cacheReadsPrice: 0.4,
|
|
2418
2157
|
tiers: [
|
|
2419
2158
|
{
|
|
2420
2159
|
contextWindow: 2e5,
|
|
2421
2160
|
inputPrice: 2,
|
|
2422
|
-
outputPrice: 12
|
|
2161
|
+
outputPrice: 12,
|
|
2162
|
+
cacheReadsPrice: 0.2
|
|
2423
2163
|
},
|
|
2424
2164
|
{
|
|
2425
2165
|
contextWindow: Infinity,
|
|
2426
2166
|
inputPrice: 4,
|
|
2427
|
-
outputPrice: 18
|
|
2167
|
+
outputPrice: 18,
|
|
2168
|
+
cacheReadsPrice: 0.4
|
|
2428
2169
|
}
|
|
2429
2170
|
]
|
|
2430
2171
|
},
|
|
@@ -2432,25 +2173,20 @@ var geminiModels = {
|
|
|
2432
2173
|
maxTokens: 65536,
|
|
2433
2174
|
contextWindow: 1048576,
|
|
2434
2175
|
supportsImages: true,
|
|
2435
|
-
supportsNativeTools: true,
|
|
2436
|
-
defaultToolProtocol: "native",
|
|
2437
2176
|
supportsPromptCache: true,
|
|
2438
2177
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2439
2178
|
reasoningEffort: "medium",
|
|
2440
2179
|
supportsTemperature: true,
|
|
2441
2180
|
defaultTemperature: 1,
|
|
2442
|
-
inputPrice: 0.
|
|
2443
|
-
outputPrice:
|
|
2444
|
-
cacheReadsPrice: 0.
|
|
2445
|
-
cacheWritesPrice: 1
|
|
2181
|
+
inputPrice: 0.5,
|
|
2182
|
+
outputPrice: 3,
|
|
2183
|
+
cacheReadsPrice: 0.05
|
|
2446
2184
|
},
|
|
2447
2185
|
// 2.5 Pro models
|
|
2448
2186
|
"gemini-2.5-pro": {
|
|
2449
2187
|
maxTokens: 64e3,
|
|
2450
2188
|
contextWindow: 1048576,
|
|
2451
2189
|
supportsImages: true,
|
|
2452
|
-
supportsNativeTools: true,
|
|
2453
|
-
defaultToolProtocol: "native",
|
|
2454
2190
|
supportsPromptCache: true,
|
|
2455
2191
|
inputPrice: 2.5,
|
|
2456
2192
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2479,8 +2215,6 @@ var geminiModels = {
|
|
|
2479
2215
|
maxTokens: 65535,
|
|
2480
2216
|
contextWindow: 1048576,
|
|
2481
2217
|
supportsImages: true,
|
|
2482
|
-
supportsNativeTools: true,
|
|
2483
|
-
defaultToolProtocol: "native",
|
|
2484
2218
|
supportsPromptCache: true,
|
|
2485
2219
|
inputPrice: 2.5,
|
|
2486
2220
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2508,8 +2242,6 @@ var geminiModels = {
|
|
|
2508
2242
|
maxTokens: 65535,
|
|
2509
2243
|
contextWindow: 1048576,
|
|
2510
2244
|
supportsImages: true,
|
|
2511
|
-
supportsNativeTools: true,
|
|
2512
|
-
defaultToolProtocol: "native",
|
|
2513
2245
|
supportsPromptCache: true,
|
|
2514
2246
|
inputPrice: 2.5,
|
|
2515
2247
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2535,8 +2267,6 @@ var geminiModels = {
|
|
|
2535
2267
|
maxTokens: 65535,
|
|
2536
2268
|
contextWindow: 1048576,
|
|
2537
2269
|
supportsImages: true,
|
|
2538
|
-
supportsNativeTools: true,
|
|
2539
|
-
defaultToolProtocol: "native",
|
|
2540
2270
|
supportsPromptCache: true,
|
|
2541
2271
|
inputPrice: 2.5,
|
|
2542
2272
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2565,8 +2295,6 @@ var geminiModels = {
|
|
|
2565
2295
|
maxTokens: 65536,
|
|
2566
2296
|
contextWindow: 1048576,
|
|
2567
2297
|
supportsImages: true,
|
|
2568
|
-
supportsNativeTools: true,
|
|
2569
|
-
defaultToolProtocol: "native",
|
|
2570
2298
|
supportsPromptCache: true,
|
|
2571
2299
|
inputPrice: 0.3,
|
|
2572
2300
|
outputPrice: 2.5,
|
|
@@ -2579,8 +2307,6 @@ var geminiModels = {
|
|
|
2579
2307
|
maxTokens: 65536,
|
|
2580
2308
|
contextWindow: 1048576,
|
|
2581
2309
|
supportsImages: true,
|
|
2582
|
-
supportsNativeTools: true,
|
|
2583
|
-
defaultToolProtocol: "native",
|
|
2584
2310
|
supportsPromptCache: true,
|
|
2585
2311
|
inputPrice: 0.3,
|
|
2586
2312
|
outputPrice: 2.5,
|
|
@@ -2593,8 +2319,6 @@ var geminiModels = {
|
|
|
2593
2319
|
maxTokens: 64e3,
|
|
2594
2320
|
contextWindow: 1048576,
|
|
2595
2321
|
supportsImages: true,
|
|
2596
|
-
supportsNativeTools: true,
|
|
2597
|
-
defaultToolProtocol: "native",
|
|
2598
2322
|
supportsPromptCache: true,
|
|
2599
2323
|
inputPrice: 0.3,
|
|
2600
2324
|
outputPrice: 2.5,
|
|
@@ -2608,8 +2332,6 @@ var geminiModels = {
|
|
|
2608
2332
|
maxTokens: 65536,
|
|
2609
2333
|
contextWindow: 1048576,
|
|
2610
2334
|
supportsImages: true,
|
|
2611
|
-
supportsNativeTools: true,
|
|
2612
|
-
defaultToolProtocol: "native",
|
|
2613
2335
|
supportsPromptCache: true,
|
|
2614
2336
|
inputPrice: 0.1,
|
|
2615
2337
|
outputPrice: 0.4,
|
|
@@ -2622,8 +2344,6 @@ var geminiModels = {
|
|
|
2622
2344
|
maxTokens: 65536,
|
|
2623
2345
|
contextWindow: 1048576,
|
|
2624
2346
|
supportsImages: true,
|
|
2625
|
-
supportsNativeTools: true,
|
|
2626
|
-
defaultToolProtocol: "native",
|
|
2627
2347
|
supportsPromptCache: true,
|
|
2628
2348
|
inputPrice: 0.1,
|
|
2629
2349
|
outputPrice: 0.4,
|
|
@@ -2643,8 +2363,6 @@ var groqModels = {
|
|
|
2643
2363
|
contextWindow: 131072,
|
|
2644
2364
|
supportsImages: false,
|
|
2645
2365
|
supportsPromptCache: false,
|
|
2646
|
-
supportsNativeTools: true,
|
|
2647
|
-
defaultToolProtocol: "native",
|
|
2648
2366
|
inputPrice: 0.05,
|
|
2649
2367
|
outputPrice: 0.08,
|
|
2650
2368
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2654,8 +2372,6 @@ var groqModels = {
|
|
|
2654
2372
|
contextWindow: 131072,
|
|
2655
2373
|
supportsImages: false,
|
|
2656
2374
|
supportsPromptCache: false,
|
|
2657
|
-
supportsNativeTools: true,
|
|
2658
|
-
defaultToolProtocol: "native",
|
|
2659
2375
|
inputPrice: 0.59,
|
|
2660
2376
|
outputPrice: 0.79,
|
|
2661
2377
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -2665,8 +2381,6 @@ var groqModels = {
|
|
|
2665
2381
|
contextWindow: 131072,
|
|
2666
2382
|
supportsImages: false,
|
|
2667
2383
|
supportsPromptCache: false,
|
|
2668
|
-
supportsNativeTools: true,
|
|
2669
|
-
defaultToolProtocol: "native",
|
|
2670
2384
|
inputPrice: 0.11,
|
|
2671
2385
|
outputPrice: 0.34,
|
|
2672
2386
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -2676,8 +2390,6 @@ var groqModels = {
|
|
|
2676
2390
|
contextWindow: 131072,
|
|
2677
2391
|
supportsImages: false,
|
|
2678
2392
|
supportsPromptCache: false,
|
|
2679
|
-
supportsNativeTools: true,
|
|
2680
|
-
defaultToolProtocol: "native",
|
|
2681
2393
|
inputPrice: 0.29,
|
|
2682
2394
|
outputPrice: 0.59,
|
|
2683
2395
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -2687,8 +2399,6 @@ var groqModels = {
|
|
|
2687
2399
|
contextWindow: 262144,
|
|
2688
2400
|
supportsImages: false,
|
|
2689
2401
|
supportsPromptCache: true,
|
|
2690
|
-
supportsNativeTools: true,
|
|
2691
|
-
defaultToolProtocol: "native",
|
|
2692
2402
|
inputPrice: 0.6,
|
|
2693
2403
|
outputPrice: 2.5,
|
|
2694
2404
|
cacheReadsPrice: 0.15,
|
|
@@ -2699,8 +2409,6 @@ var groqModels = {
|
|
|
2699
2409
|
contextWindow: 131072,
|
|
2700
2410
|
supportsImages: false,
|
|
2701
2411
|
supportsPromptCache: false,
|
|
2702
|
-
supportsNativeTools: true,
|
|
2703
|
-
defaultToolProtocol: "native",
|
|
2704
2412
|
inputPrice: 0.15,
|
|
2705
2413
|
outputPrice: 0.75,
|
|
2706
2414
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -2710,8 +2418,6 @@ var groqModels = {
|
|
|
2710
2418
|
contextWindow: 131072,
|
|
2711
2419
|
supportsImages: false,
|
|
2712
2420
|
supportsPromptCache: false,
|
|
2713
|
-
supportsNativeTools: true,
|
|
2714
|
-
defaultToolProtocol: "native",
|
|
2715
2421
|
inputPrice: 0.1,
|
|
2716
2422
|
outputPrice: 0.5,
|
|
2717
2423
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -2738,7 +2444,6 @@ var ioIntelligenceModels = {
|
|
|
2738
2444
|
contextWindow: 128e3,
|
|
2739
2445
|
supportsImages: false,
|
|
2740
2446
|
supportsPromptCache: false,
|
|
2741
|
-
supportsNativeTools: true,
|
|
2742
2447
|
description: "DeepSeek R1 reasoning model"
|
|
2743
2448
|
},
|
|
2744
2449
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -2746,7 +2451,6 @@ var ioIntelligenceModels = {
|
|
|
2746
2451
|
contextWindow: 43e4,
|
|
2747
2452
|
supportsImages: true,
|
|
2748
2453
|
supportsPromptCache: false,
|
|
2749
|
-
supportsNativeTools: true,
|
|
2750
2454
|
description: "Llama 4 Maverick 17B model"
|
|
2751
2455
|
},
|
|
2752
2456
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -2754,7 +2458,6 @@ var ioIntelligenceModels = {
|
|
|
2754
2458
|
contextWindow: 106e3,
|
|
2755
2459
|
supportsImages: false,
|
|
2756
2460
|
supportsPromptCache: false,
|
|
2757
|
-
supportsNativeTools: true,
|
|
2758
2461
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
2759
2462
|
},
|
|
2760
2463
|
"openai/gpt-oss-120b": {
|
|
@@ -2762,7 +2465,6 @@ var ioIntelligenceModels = {
|
|
|
2762
2465
|
contextWindow: 131072,
|
|
2763
2466
|
supportsImages: false,
|
|
2764
2467
|
supportsPromptCache: false,
|
|
2765
|
-
supportsNativeTools: true,
|
|
2766
2468
|
description: "OpenAI GPT-OSS 120B model"
|
|
2767
2469
|
}
|
|
2768
2470
|
};
|
|
@@ -2774,8 +2476,6 @@ var litellmDefaultModelInfo = {
|
|
|
2774
2476
|
contextWindow: 2e5,
|
|
2775
2477
|
supportsImages: true,
|
|
2776
2478
|
supportsPromptCache: true,
|
|
2777
|
-
supportsNativeTools: true,
|
|
2778
|
-
defaultToolProtocol: "native",
|
|
2779
2479
|
inputPrice: 3,
|
|
2780
2480
|
outputPrice: 15,
|
|
2781
2481
|
cacheWritesPrice: 3.75,
|
|
@@ -2790,8 +2490,6 @@ var lMStudioDefaultModelInfo = {
|
|
|
2790
2490
|
contextWindow: 2e5,
|
|
2791
2491
|
supportsImages: true,
|
|
2792
2492
|
supportsPromptCache: true,
|
|
2793
|
-
supportsNativeTools: true,
|
|
2794
|
-
defaultToolProtocol: "native",
|
|
2795
2493
|
inputPrice: 0,
|
|
2796
2494
|
outputPrice: 0,
|
|
2797
2495
|
cacheWritesPrice: 0,
|
|
@@ -2807,8 +2505,6 @@ var mistralModels = {
|
|
|
2807
2505
|
contextWindow: 128e3,
|
|
2808
2506
|
supportsImages: true,
|
|
2809
2507
|
supportsPromptCache: false,
|
|
2810
|
-
supportsNativeTools: true,
|
|
2811
|
-
defaultToolProtocol: "native",
|
|
2812
2508
|
inputPrice: 2,
|
|
2813
2509
|
outputPrice: 5
|
|
2814
2510
|
},
|
|
@@ -2817,8 +2513,6 @@ var mistralModels = {
|
|
|
2817
2513
|
contextWindow: 131e3,
|
|
2818
2514
|
supportsImages: true,
|
|
2819
2515
|
supportsPromptCache: false,
|
|
2820
|
-
supportsNativeTools: true,
|
|
2821
|
-
defaultToolProtocol: "native",
|
|
2822
2516
|
inputPrice: 0.4,
|
|
2823
2517
|
outputPrice: 2
|
|
2824
2518
|
},
|
|
@@ -2827,8 +2521,6 @@ var mistralModels = {
|
|
|
2827
2521
|
contextWindow: 131e3,
|
|
2828
2522
|
supportsImages: true,
|
|
2829
2523
|
supportsPromptCache: false,
|
|
2830
|
-
supportsNativeTools: true,
|
|
2831
|
-
defaultToolProtocol: "native",
|
|
2832
2524
|
inputPrice: 0.4,
|
|
2833
2525
|
outputPrice: 2
|
|
2834
2526
|
},
|
|
@@ -2837,8 +2529,6 @@ var mistralModels = {
|
|
|
2837
2529
|
contextWindow: 256e3,
|
|
2838
2530
|
supportsImages: false,
|
|
2839
2531
|
supportsPromptCache: false,
|
|
2840
|
-
supportsNativeTools: true,
|
|
2841
|
-
defaultToolProtocol: "native",
|
|
2842
2532
|
inputPrice: 0.3,
|
|
2843
2533
|
outputPrice: 0.9
|
|
2844
2534
|
},
|
|
@@ -2847,8 +2537,6 @@ var mistralModels = {
|
|
|
2847
2537
|
contextWindow: 131e3,
|
|
2848
2538
|
supportsImages: false,
|
|
2849
2539
|
supportsPromptCache: false,
|
|
2850
|
-
supportsNativeTools: true,
|
|
2851
|
-
defaultToolProtocol: "native",
|
|
2852
2540
|
inputPrice: 2,
|
|
2853
2541
|
outputPrice: 6
|
|
2854
2542
|
},
|
|
@@ -2857,8 +2545,6 @@ var mistralModels = {
|
|
|
2857
2545
|
contextWindow: 131e3,
|
|
2858
2546
|
supportsImages: false,
|
|
2859
2547
|
supportsPromptCache: false,
|
|
2860
|
-
supportsNativeTools: true,
|
|
2861
|
-
defaultToolProtocol: "native",
|
|
2862
2548
|
inputPrice: 0.1,
|
|
2863
2549
|
outputPrice: 0.1
|
|
2864
2550
|
},
|
|
@@ -2867,8 +2553,6 @@ var mistralModels = {
|
|
|
2867
2553
|
contextWindow: 131e3,
|
|
2868
2554
|
supportsImages: false,
|
|
2869
2555
|
supportsPromptCache: false,
|
|
2870
|
-
supportsNativeTools: true,
|
|
2871
|
-
defaultToolProtocol: "native",
|
|
2872
2556
|
inputPrice: 0.04,
|
|
2873
2557
|
outputPrice: 0.04
|
|
2874
2558
|
},
|
|
@@ -2877,8 +2561,6 @@ var mistralModels = {
|
|
|
2877
2561
|
contextWindow: 32e3,
|
|
2878
2562
|
supportsImages: false,
|
|
2879
2563
|
supportsPromptCache: false,
|
|
2880
|
-
supportsNativeTools: true,
|
|
2881
|
-
defaultToolProtocol: "native",
|
|
2882
2564
|
inputPrice: 0.2,
|
|
2883
2565
|
outputPrice: 0.6
|
|
2884
2566
|
},
|
|
@@ -2887,8 +2569,6 @@ var mistralModels = {
|
|
|
2887
2569
|
contextWindow: 131e3,
|
|
2888
2570
|
supportsImages: true,
|
|
2889
2571
|
supportsPromptCache: false,
|
|
2890
|
-
supportsNativeTools: true,
|
|
2891
|
-
defaultToolProtocol: "native",
|
|
2892
2572
|
inputPrice: 2,
|
|
2893
2573
|
outputPrice: 6
|
|
2894
2574
|
}
|
|
@@ -2903,8 +2583,6 @@ var moonshotModels = {
|
|
|
2903
2583
|
contextWindow: 131072,
|
|
2904
2584
|
supportsImages: false,
|
|
2905
2585
|
supportsPromptCache: true,
|
|
2906
|
-
supportsNativeTools: true,
|
|
2907
|
-
defaultToolProtocol: "native",
|
|
2908
2586
|
inputPrice: 0.6,
|
|
2909
2587
|
// $0.60 per million tokens (cache miss)
|
|
2910
2588
|
outputPrice: 2.5,
|
|
@@ -2920,8 +2598,6 @@ var moonshotModels = {
|
|
|
2920
2598
|
contextWindow: 262144,
|
|
2921
2599
|
supportsImages: false,
|
|
2922
2600
|
supportsPromptCache: true,
|
|
2923
|
-
supportsNativeTools: true,
|
|
2924
|
-
defaultToolProtocol: "native",
|
|
2925
2601
|
inputPrice: 0.6,
|
|
2926
2602
|
outputPrice: 2.5,
|
|
2927
2603
|
cacheReadsPrice: 0.15,
|
|
@@ -2932,8 +2608,6 @@ var moonshotModels = {
|
|
|
2932
2608
|
contextWindow: 262144,
|
|
2933
2609
|
supportsImages: false,
|
|
2934
2610
|
supportsPromptCache: true,
|
|
2935
|
-
supportsNativeTools: true,
|
|
2936
|
-
defaultToolProtocol: "native",
|
|
2937
2611
|
inputPrice: 2.4,
|
|
2938
2612
|
// $2.40 per million tokens (cache miss)
|
|
2939
2613
|
outputPrice: 10,
|
|
@@ -2952,8 +2626,6 @@ var moonshotModels = {
|
|
|
2952
2626
|
supportsImages: false,
|
|
2953
2627
|
// Text-only (no image/vision support)
|
|
2954
2628
|
supportsPromptCache: true,
|
|
2955
|
-
supportsNativeTools: true,
|
|
2956
|
-
defaultToolProtocol: "native",
|
|
2957
2629
|
inputPrice: 0.6,
|
|
2958
2630
|
// $0.60 per million tokens (cache miss)
|
|
2959
2631
|
outputPrice: 2.5,
|
|
@@ -2967,6 +2639,21 @@ var moonshotModels = {
|
|
|
2967
2639
|
preserveReasoning: true,
|
|
2968
2640
|
defaultTemperature: 1,
|
|
2969
2641
|
description: `The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.`
|
|
2642
|
+
},
|
|
2643
|
+
"kimi-k2.5": {
|
|
2644
|
+
maxTokens: 16384,
|
|
2645
|
+
contextWindow: 262144,
|
|
2646
|
+
supportsImages: false,
|
|
2647
|
+
supportsPromptCache: true,
|
|
2648
|
+
inputPrice: 0.6,
|
|
2649
|
+
// $0.60 per million tokens (cache miss)
|
|
2650
|
+
outputPrice: 3,
|
|
2651
|
+
// $3.00 per million tokens
|
|
2652
|
+
cacheReadsPrice: 0.1,
|
|
2653
|
+
// $0.10 per million tokens (cache hit)
|
|
2654
|
+
supportsTemperature: true,
|
|
2655
|
+
defaultTemperature: 1,
|
|
2656
|
+
description: "Kimi K2.5 is the latest generation of Moonshot AI's Kimi series, featuring improved reasoning capabilities and enhanced performance across diverse tasks."
|
|
2970
2657
|
}
|
|
2971
2658
|
};
|
|
2972
2659
|
var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -2978,7 +2665,6 @@ var ollamaDefaultModelInfo = {
|
|
|
2978
2665
|
contextWindow: 2e5,
|
|
2979
2666
|
supportsImages: true,
|
|
2980
2667
|
supportsPromptCache: true,
|
|
2981
|
-
supportsNativeTools: true,
|
|
2982
2668
|
inputPrice: 0,
|
|
2983
2669
|
outputPrice: 0,
|
|
2984
2670
|
cacheWritesPrice: 0,
|
|
@@ -2992,8 +2678,6 @@ var openAiNativeModels = {
|
|
|
2992
2678
|
"gpt-5.1-codex-max": {
|
|
2993
2679
|
maxTokens: 128e3,
|
|
2994
2680
|
contextWindow: 4e5,
|
|
2995
|
-
supportsNativeTools: true,
|
|
2996
|
-
defaultToolProtocol: "native",
|
|
2997
2681
|
includedTools: ["apply_patch"],
|
|
2998
2682
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
2999
2683
|
supportsImages: true,
|
|
@@ -3011,8 +2695,6 @@ var openAiNativeModels = {
|
|
|
3011
2695
|
"gpt-5.2": {
|
|
3012
2696
|
maxTokens: 128e3,
|
|
3013
2697
|
contextWindow: 4e5,
|
|
3014
|
-
supportsNativeTools: true,
|
|
3015
|
-
defaultToolProtocol: "native",
|
|
3016
2698
|
includedTools: ["apply_patch"],
|
|
3017
2699
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3018
2700
|
supportsImages: true,
|
|
@@ -3031,11 +2713,26 @@ var openAiNativeModels = {
|
|
|
3031
2713
|
],
|
|
3032
2714
|
description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries"
|
|
3033
2715
|
},
|
|
2716
|
+
"gpt-5.2-codex": {
|
|
2717
|
+
maxTokens: 128e3,
|
|
2718
|
+
contextWindow: 4e5,
|
|
2719
|
+
includedTools: ["apply_patch"],
|
|
2720
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
2721
|
+
supportsImages: true,
|
|
2722
|
+
supportsPromptCache: true,
|
|
2723
|
+
promptCacheRetention: "24h",
|
|
2724
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
2725
|
+
reasoningEffort: "medium",
|
|
2726
|
+
inputPrice: 1.75,
|
|
2727
|
+
outputPrice: 14,
|
|
2728
|
+
cacheReadsPrice: 0.175,
|
|
2729
|
+
supportsTemperature: false,
|
|
2730
|
+
tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 3.5, outputPrice: 28, cacheReadsPrice: 0.35 }],
|
|
2731
|
+
description: "GPT-5.2 Codex: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
|
|
2732
|
+
},
|
|
3034
2733
|
"gpt-5.2-chat-latest": {
|
|
3035
2734
|
maxTokens: 16384,
|
|
3036
2735
|
contextWindow: 128e3,
|
|
3037
|
-
supportsNativeTools: true,
|
|
3038
|
-
defaultToolProtocol: "native",
|
|
3039
2736
|
includedTools: ["apply_patch"],
|
|
3040
2737
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3041
2738
|
supportsImages: true,
|
|
@@ -3048,8 +2745,6 @@ var openAiNativeModels = {
|
|
|
3048
2745
|
"gpt-5.1": {
|
|
3049
2746
|
maxTokens: 128e3,
|
|
3050
2747
|
contextWindow: 4e5,
|
|
3051
|
-
supportsNativeTools: true,
|
|
3052
|
-
defaultToolProtocol: "native",
|
|
3053
2748
|
includedTools: ["apply_patch"],
|
|
3054
2749
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3055
2750
|
supportsImages: true,
|
|
@@ -3071,8 +2766,6 @@ var openAiNativeModels = {
|
|
|
3071
2766
|
"gpt-5.1-codex": {
|
|
3072
2767
|
maxTokens: 128e3,
|
|
3073
2768
|
contextWindow: 4e5,
|
|
3074
|
-
supportsNativeTools: true,
|
|
3075
|
-
defaultToolProtocol: "native",
|
|
3076
2769
|
includedTools: ["apply_patch"],
|
|
3077
2770
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3078
2771
|
supportsImages: true,
|
|
@@ -3090,8 +2783,6 @@ var openAiNativeModels = {
|
|
|
3090
2783
|
"gpt-5.1-codex-mini": {
|
|
3091
2784
|
maxTokens: 128e3,
|
|
3092
2785
|
contextWindow: 4e5,
|
|
3093
|
-
supportsNativeTools: true,
|
|
3094
|
-
defaultToolProtocol: "native",
|
|
3095
2786
|
includedTools: ["apply_patch"],
|
|
3096
2787
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3097
2788
|
supportsImages: true,
|
|
@@ -3108,8 +2799,6 @@ var openAiNativeModels = {
|
|
|
3108
2799
|
"gpt-5": {
|
|
3109
2800
|
maxTokens: 128e3,
|
|
3110
2801
|
contextWindow: 4e5,
|
|
3111
|
-
supportsNativeTools: true,
|
|
3112
|
-
defaultToolProtocol: "native",
|
|
3113
2802
|
includedTools: ["apply_patch"],
|
|
3114
2803
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3115
2804
|
supportsImages: true,
|
|
@@ -3130,8 +2819,6 @@ var openAiNativeModels = {
|
|
|
3130
2819
|
"gpt-5-mini": {
|
|
3131
2820
|
maxTokens: 128e3,
|
|
3132
2821
|
contextWindow: 4e5,
|
|
3133
|
-
supportsNativeTools: true,
|
|
3134
|
-
defaultToolProtocol: "native",
|
|
3135
2822
|
includedTools: ["apply_patch"],
|
|
3136
2823
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3137
2824
|
supportsImages: true,
|
|
@@ -3152,8 +2839,6 @@ var openAiNativeModels = {
|
|
|
3152
2839
|
"gpt-5-codex": {
|
|
3153
2840
|
maxTokens: 128e3,
|
|
3154
2841
|
contextWindow: 4e5,
|
|
3155
|
-
supportsNativeTools: true,
|
|
3156
|
-
defaultToolProtocol: "native",
|
|
3157
2842
|
includedTools: ["apply_patch"],
|
|
3158
2843
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3159
2844
|
supportsImages: true,
|
|
@@ -3170,8 +2855,6 @@ var openAiNativeModels = {
|
|
|
3170
2855
|
"gpt-5-nano": {
|
|
3171
2856
|
maxTokens: 128e3,
|
|
3172
2857
|
contextWindow: 4e5,
|
|
3173
|
-
supportsNativeTools: true,
|
|
3174
|
-
defaultToolProtocol: "native",
|
|
3175
2858
|
includedTools: ["apply_patch"],
|
|
3176
2859
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3177
2860
|
supportsImages: true,
|
|
@@ -3189,8 +2872,6 @@ var openAiNativeModels = {
|
|
|
3189
2872
|
"gpt-5-chat-latest": {
|
|
3190
2873
|
maxTokens: 128e3,
|
|
3191
2874
|
contextWindow: 4e5,
|
|
3192
|
-
supportsNativeTools: true,
|
|
3193
|
-
defaultToolProtocol: "native",
|
|
3194
2875
|
includedTools: ["apply_patch"],
|
|
3195
2876
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3196
2877
|
supportsImages: true,
|
|
@@ -3203,8 +2884,6 @@ var openAiNativeModels = {
|
|
|
3203
2884
|
"gpt-4.1": {
|
|
3204
2885
|
maxTokens: 32768,
|
|
3205
2886
|
contextWindow: 1047576,
|
|
3206
|
-
supportsNativeTools: true,
|
|
3207
|
-
defaultToolProtocol: "native",
|
|
3208
2887
|
includedTools: ["apply_patch"],
|
|
3209
2888
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3210
2889
|
supportsImages: true,
|
|
@@ -3220,8 +2899,6 @@ var openAiNativeModels = {
|
|
|
3220
2899
|
"gpt-4.1-mini": {
|
|
3221
2900
|
maxTokens: 32768,
|
|
3222
2901
|
contextWindow: 1047576,
|
|
3223
|
-
supportsNativeTools: true,
|
|
3224
|
-
defaultToolProtocol: "native",
|
|
3225
2902
|
includedTools: ["apply_patch"],
|
|
3226
2903
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3227
2904
|
supportsImages: true,
|
|
@@ -3237,8 +2914,6 @@ var openAiNativeModels = {
|
|
|
3237
2914
|
"gpt-4.1-nano": {
|
|
3238
2915
|
maxTokens: 32768,
|
|
3239
2916
|
contextWindow: 1047576,
|
|
3240
|
-
supportsNativeTools: true,
|
|
3241
|
-
defaultToolProtocol: "native",
|
|
3242
2917
|
includedTools: ["apply_patch"],
|
|
3243
2918
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3244
2919
|
supportsImages: true,
|
|
@@ -3254,8 +2929,6 @@ var openAiNativeModels = {
|
|
|
3254
2929
|
o3: {
|
|
3255
2930
|
maxTokens: 1e5,
|
|
3256
2931
|
contextWindow: 2e5,
|
|
3257
|
-
supportsNativeTools: true,
|
|
3258
|
-
defaultToolProtocol: "native",
|
|
3259
2932
|
supportsImages: true,
|
|
3260
2933
|
supportsPromptCache: true,
|
|
3261
2934
|
inputPrice: 2,
|
|
@@ -3272,8 +2945,6 @@ var openAiNativeModels = {
|
|
|
3272
2945
|
"o3-high": {
|
|
3273
2946
|
maxTokens: 1e5,
|
|
3274
2947
|
contextWindow: 2e5,
|
|
3275
|
-
supportsNativeTools: true,
|
|
3276
|
-
defaultToolProtocol: "native",
|
|
3277
2948
|
supportsImages: true,
|
|
3278
2949
|
supportsPromptCache: true,
|
|
3279
2950
|
inputPrice: 2,
|
|
@@ -3285,8 +2956,6 @@ var openAiNativeModels = {
|
|
|
3285
2956
|
"o3-low": {
|
|
3286
2957
|
maxTokens: 1e5,
|
|
3287
2958
|
contextWindow: 2e5,
|
|
3288
|
-
supportsNativeTools: true,
|
|
3289
|
-
defaultToolProtocol: "native",
|
|
3290
2959
|
supportsImages: true,
|
|
3291
2960
|
supportsPromptCache: true,
|
|
3292
2961
|
inputPrice: 2,
|
|
@@ -3298,8 +2967,6 @@ var openAiNativeModels = {
|
|
|
3298
2967
|
"o4-mini": {
|
|
3299
2968
|
maxTokens: 1e5,
|
|
3300
2969
|
contextWindow: 2e5,
|
|
3301
|
-
supportsNativeTools: true,
|
|
3302
|
-
defaultToolProtocol: "native",
|
|
3303
2970
|
supportsImages: true,
|
|
3304
2971
|
supportsPromptCache: true,
|
|
3305
2972
|
inputPrice: 1.1,
|
|
@@ -3316,8 +2983,6 @@ var openAiNativeModels = {
|
|
|
3316
2983
|
"o4-mini-high": {
|
|
3317
2984
|
maxTokens: 1e5,
|
|
3318
2985
|
contextWindow: 2e5,
|
|
3319
|
-
supportsNativeTools: true,
|
|
3320
|
-
defaultToolProtocol: "native",
|
|
3321
2986
|
supportsImages: true,
|
|
3322
2987
|
supportsPromptCache: true,
|
|
3323
2988
|
inputPrice: 1.1,
|
|
@@ -3329,8 +2994,6 @@ var openAiNativeModels = {
|
|
|
3329
2994
|
"o4-mini-low": {
|
|
3330
2995
|
maxTokens: 1e5,
|
|
3331
2996
|
contextWindow: 2e5,
|
|
3332
|
-
supportsNativeTools: true,
|
|
3333
|
-
defaultToolProtocol: "native",
|
|
3334
2997
|
supportsImages: true,
|
|
3335
2998
|
supportsPromptCache: true,
|
|
3336
2999
|
inputPrice: 1.1,
|
|
@@ -3342,8 +3005,6 @@ var openAiNativeModels = {
|
|
|
3342
3005
|
"o3-mini": {
|
|
3343
3006
|
maxTokens: 1e5,
|
|
3344
3007
|
contextWindow: 2e5,
|
|
3345
|
-
supportsNativeTools: true,
|
|
3346
|
-
defaultToolProtocol: "native",
|
|
3347
3008
|
supportsImages: false,
|
|
3348
3009
|
supportsPromptCache: true,
|
|
3349
3010
|
inputPrice: 1.1,
|
|
@@ -3356,8 +3017,6 @@ var openAiNativeModels = {
|
|
|
3356
3017
|
"o3-mini-high": {
|
|
3357
3018
|
maxTokens: 1e5,
|
|
3358
3019
|
contextWindow: 2e5,
|
|
3359
|
-
supportsNativeTools: true,
|
|
3360
|
-
defaultToolProtocol: "native",
|
|
3361
3020
|
supportsImages: false,
|
|
3362
3021
|
supportsPromptCache: true,
|
|
3363
3022
|
inputPrice: 1.1,
|
|
@@ -3369,8 +3028,6 @@ var openAiNativeModels = {
|
|
|
3369
3028
|
"o3-mini-low": {
|
|
3370
3029
|
maxTokens: 1e5,
|
|
3371
3030
|
contextWindow: 2e5,
|
|
3372
|
-
supportsNativeTools: true,
|
|
3373
|
-
defaultToolProtocol: "native",
|
|
3374
3031
|
supportsImages: false,
|
|
3375
3032
|
supportsPromptCache: true,
|
|
3376
3033
|
inputPrice: 1.1,
|
|
@@ -3382,8 +3039,6 @@ var openAiNativeModels = {
|
|
|
3382
3039
|
o1: {
|
|
3383
3040
|
maxTokens: 1e5,
|
|
3384
3041
|
contextWindow: 2e5,
|
|
3385
|
-
supportsNativeTools: true,
|
|
3386
|
-
defaultToolProtocol: "native",
|
|
3387
3042
|
supportsImages: true,
|
|
3388
3043
|
supportsPromptCache: true,
|
|
3389
3044
|
inputPrice: 15,
|
|
@@ -3394,8 +3049,6 @@ var openAiNativeModels = {
|
|
|
3394
3049
|
"o1-preview": {
|
|
3395
3050
|
maxTokens: 32768,
|
|
3396
3051
|
contextWindow: 128e3,
|
|
3397
|
-
supportsNativeTools: true,
|
|
3398
|
-
defaultToolProtocol: "native",
|
|
3399
3052
|
supportsImages: true,
|
|
3400
3053
|
supportsPromptCache: true,
|
|
3401
3054
|
inputPrice: 15,
|
|
@@ -3406,8 +3059,6 @@ var openAiNativeModels = {
|
|
|
3406
3059
|
"o1-mini": {
|
|
3407
3060
|
maxTokens: 65536,
|
|
3408
3061
|
contextWindow: 128e3,
|
|
3409
|
-
supportsNativeTools: true,
|
|
3410
|
-
defaultToolProtocol: "native",
|
|
3411
3062
|
supportsImages: true,
|
|
3412
3063
|
supportsPromptCache: true,
|
|
3413
3064
|
inputPrice: 1.1,
|
|
@@ -3418,8 +3069,6 @@ var openAiNativeModels = {
|
|
|
3418
3069
|
"gpt-4o": {
|
|
3419
3070
|
maxTokens: 16384,
|
|
3420
3071
|
contextWindow: 128e3,
|
|
3421
|
-
supportsNativeTools: true,
|
|
3422
|
-
defaultToolProtocol: "native",
|
|
3423
3072
|
supportsImages: true,
|
|
3424
3073
|
supportsPromptCache: true,
|
|
3425
3074
|
inputPrice: 2.5,
|
|
@@ -3433,8 +3082,6 @@ var openAiNativeModels = {
|
|
|
3433
3082
|
"gpt-4o-mini": {
|
|
3434
3083
|
maxTokens: 16384,
|
|
3435
3084
|
contextWindow: 128e3,
|
|
3436
|
-
supportsNativeTools: true,
|
|
3437
|
-
defaultToolProtocol: "native",
|
|
3438
3085
|
supportsImages: true,
|
|
3439
3086
|
supportsPromptCache: true,
|
|
3440
3087
|
inputPrice: 0.15,
|
|
@@ -3448,8 +3095,6 @@ var openAiNativeModels = {
|
|
|
3448
3095
|
"codex-mini-latest": {
|
|
3449
3096
|
maxTokens: 16384,
|
|
3450
3097
|
contextWindow: 2e5,
|
|
3451
|
-
supportsNativeTools: true,
|
|
3452
|
-
defaultToolProtocol: "native",
|
|
3453
3098
|
supportsImages: false,
|
|
3454
3099
|
supportsPromptCache: false,
|
|
3455
3100
|
inputPrice: 1.5,
|
|
@@ -3462,8 +3107,6 @@ var openAiNativeModels = {
|
|
|
3462
3107
|
"gpt-5-2025-08-07": {
|
|
3463
3108
|
maxTokens: 128e3,
|
|
3464
3109
|
contextWindow: 4e5,
|
|
3465
|
-
supportsNativeTools: true,
|
|
3466
|
-
defaultToolProtocol: "native",
|
|
3467
3110
|
includedTools: ["apply_patch"],
|
|
3468
3111
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3469
3112
|
supportsImages: true,
|
|
@@ -3484,8 +3127,6 @@ var openAiNativeModels = {
|
|
|
3484
3127
|
"gpt-5-mini-2025-08-07": {
|
|
3485
3128
|
maxTokens: 128e3,
|
|
3486
3129
|
contextWindow: 4e5,
|
|
3487
|
-
supportsNativeTools: true,
|
|
3488
|
-
defaultToolProtocol: "native",
|
|
3489
3130
|
includedTools: ["apply_patch"],
|
|
3490
3131
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3491
3132
|
supportsImages: true,
|
|
@@ -3506,8 +3147,6 @@ var openAiNativeModels = {
|
|
|
3506
3147
|
"gpt-5-nano-2025-08-07": {
|
|
3507
3148
|
maxTokens: 128e3,
|
|
3508
3149
|
contextWindow: 4e5,
|
|
3509
|
-
supportsNativeTools: true,
|
|
3510
|
-
defaultToolProtocol: "native",
|
|
3511
3150
|
includedTools: ["apply_patch"],
|
|
3512
3151
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3513
3152
|
supportsImages: true,
|
|
@@ -3529,14 +3168,151 @@ var openAiModelInfoSaneDefaults = {
|
|
|
3529
3168
|
supportsImages: true,
|
|
3530
3169
|
supportsPromptCache: false,
|
|
3531
3170
|
inputPrice: 0,
|
|
3532
|
-
outputPrice: 0
|
|
3533
|
-
supportsNativeTools: true,
|
|
3534
|
-
defaultToolProtocol: "native"
|
|
3171
|
+
outputPrice: 0
|
|
3535
3172
|
};
|
|
3536
3173
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
3537
3174
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
3538
3175
|
var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
|
|
3539
3176
|
|
|
3177
|
+
// src/providers/openai-codex.ts
|
|
3178
|
+
var openAiCodexDefaultModelId = "gpt-5.2-codex";
|
|
3179
|
+
var openAiCodexModels = {
|
|
3180
|
+
"gpt-5.1-codex-max": {
|
|
3181
|
+
maxTokens: 128e3,
|
|
3182
|
+
contextWindow: 4e5,
|
|
3183
|
+
includedTools: ["apply_patch"],
|
|
3184
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3185
|
+
supportsImages: true,
|
|
3186
|
+
supportsPromptCache: true,
|
|
3187
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
3188
|
+
reasoningEffort: "xhigh",
|
|
3189
|
+
// Subscription-based: no per-token costs
|
|
3190
|
+
inputPrice: 0,
|
|
3191
|
+
outputPrice: 0,
|
|
3192
|
+
supportsTemperature: false,
|
|
3193
|
+
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription"
|
|
3194
|
+
},
|
|
3195
|
+
"gpt-5.1-codex": {
|
|
3196
|
+
maxTokens: 128e3,
|
|
3197
|
+
contextWindow: 4e5,
|
|
3198
|
+
includedTools: ["apply_patch"],
|
|
3199
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3200
|
+
supportsImages: true,
|
|
3201
|
+
supportsPromptCache: true,
|
|
3202
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3203
|
+
reasoningEffort: "medium",
|
|
3204
|
+
// Subscription-based: no per-token costs
|
|
3205
|
+
inputPrice: 0,
|
|
3206
|
+
outputPrice: 0,
|
|
3207
|
+
supportsTemperature: false,
|
|
3208
|
+
description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription"
|
|
3209
|
+
},
|
|
3210
|
+
"gpt-5.2-codex": {
|
|
3211
|
+
maxTokens: 128e3,
|
|
3212
|
+
contextWindow: 4e5,
|
|
3213
|
+
includedTools: ["apply_patch"],
|
|
3214
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3215
|
+
supportsImages: true,
|
|
3216
|
+
supportsPromptCache: true,
|
|
3217
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
3218
|
+
reasoningEffort: "medium",
|
|
3219
|
+
inputPrice: 0,
|
|
3220
|
+
outputPrice: 0,
|
|
3221
|
+
supportsTemperature: false,
|
|
3222
|
+
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription"
|
|
3223
|
+
},
|
|
3224
|
+
"gpt-5.1": {
|
|
3225
|
+
maxTokens: 128e3,
|
|
3226
|
+
contextWindow: 4e5,
|
|
3227
|
+
includedTools: ["apply_patch"],
|
|
3228
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3229
|
+
supportsImages: true,
|
|
3230
|
+
supportsPromptCache: true,
|
|
3231
|
+
supportsReasoningEffort: ["none", "low", "medium", "high"],
|
|
3232
|
+
reasoningEffort: "medium",
|
|
3233
|
+
// Subscription-based: no per-token costs
|
|
3234
|
+
inputPrice: 0,
|
|
3235
|
+
outputPrice: 0,
|
|
3236
|
+
supportsVerbosity: true,
|
|
3237
|
+
supportsTemperature: false,
|
|
3238
|
+
description: "GPT-5.1: General GPT-5.1 model via ChatGPT subscription"
|
|
3239
|
+
},
|
|
3240
|
+
"gpt-5": {
|
|
3241
|
+
maxTokens: 128e3,
|
|
3242
|
+
contextWindow: 4e5,
|
|
3243
|
+
includedTools: ["apply_patch"],
|
|
3244
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3245
|
+
supportsImages: true,
|
|
3246
|
+
supportsPromptCache: true,
|
|
3247
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
3248
|
+
reasoningEffort: "medium",
|
|
3249
|
+
// Subscription-based: no per-token costs
|
|
3250
|
+
inputPrice: 0,
|
|
3251
|
+
outputPrice: 0,
|
|
3252
|
+
supportsVerbosity: true,
|
|
3253
|
+
supportsTemperature: false,
|
|
3254
|
+
description: "GPT-5: General GPT-5 model via ChatGPT subscription"
|
|
3255
|
+
},
|
|
3256
|
+
"gpt-5-codex": {
|
|
3257
|
+
maxTokens: 128e3,
|
|
3258
|
+
contextWindow: 4e5,
|
|
3259
|
+
includedTools: ["apply_patch"],
|
|
3260
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3261
|
+
supportsImages: true,
|
|
3262
|
+
supportsPromptCache: true,
|
|
3263
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3264
|
+
reasoningEffort: "medium",
|
|
3265
|
+
// Subscription-based: no per-token costs
|
|
3266
|
+
inputPrice: 0,
|
|
3267
|
+
outputPrice: 0,
|
|
3268
|
+
supportsTemperature: false,
|
|
3269
|
+
description: "GPT-5 Codex: GPT-5 optimized for agentic coding via ChatGPT subscription"
|
|
3270
|
+
},
|
|
3271
|
+
"gpt-5-codex-mini": {
|
|
3272
|
+
maxTokens: 128e3,
|
|
3273
|
+
contextWindow: 4e5,
|
|
3274
|
+
includedTools: ["apply_patch"],
|
|
3275
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3276
|
+
supportsImages: true,
|
|
3277
|
+
supportsPromptCache: true,
|
|
3278
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3279
|
+
reasoningEffort: "medium",
|
|
3280
|
+
// Subscription-based: no per-token costs
|
|
3281
|
+
inputPrice: 0,
|
|
3282
|
+
outputPrice: 0,
|
|
3283
|
+
supportsTemperature: false,
|
|
3284
|
+
description: "GPT-5 Codex Mini: Faster coding model via ChatGPT subscription"
|
|
3285
|
+
},
|
|
3286
|
+
"gpt-5.1-codex-mini": {
|
|
3287
|
+
maxTokens: 128e3,
|
|
3288
|
+
contextWindow: 4e5,
|
|
3289
|
+
includedTools: ["apply_patch"],
|
|
3290
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3291
|
+
supportsImages: true,
|
|
3292
|
+
supportsPromptCache: true,
|
|
3293
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3294
|
+
reasoningEffort: "medium",
|
|
3295
|
+
inputPrice: 0,
|
|
3296
|
+
outputPrice: 0,
|
|
3297
|
+
supportsTemperature: false,
|
|
3298
|
+
description: "GPT-5.1 Codex Mini: Faster version for coding tasks via ChatGPT subscription"
|
|
3299
|
+
},
|
|
3300
|
+
"gpt-5.2": {
|
|
3301
|
+
maxTokens: 128e3,
|
|
3302
|
+
contextWindow: 4e5,
|
|
3303
|
+
includedTools: ["apply_patch"],
|
|
3304
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3305
|
+
supportsImages: true,
|
|
3306
|
+
supportsPromptCache: true,
|
|
3307
|
+
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
|
|
3308
|
+
reasoningEffort: "medium",
|
|
3309
|
+
inputPrice: 0,
|
|
3310
|
+
outputPrice: 0,
|
|
3311
|
+
supportsTemperature: false,
|
|
3312
|
+
description: "GPT-5.2: Latest GPT model via ChatGPT subscription"
|
|
3313
|
+
}
|
|
3314
|
+
};
|
|
3315
|
+
|
|
3540
3316
|
// src/providers/openrouter.ts
|
|
3541
3317
|
var openRouterDefaultModelId = "anthropic/claude-sonnet-4.5";
|
|
3542
3318
|
var openRouterDefaultModelInfo = {
|
|
@@ -3544,7 +3320,6 @@ var openRouterDefaultModelInfo = {
|
|
|
3544
3320
|
contextWindow: 2e5,
|
|
3545
3321
|
supportsImages: true,
|
|
3546
3322
|
supportsPromptCache: true,
|
|
3547
|
-
supportsNativeTools: true,
|
|
3548
3323
|
inputPrice: 3,
|
|
3549
3324
|
outputPrice: 15,
|
|
3550
3325
|
cacheWritesPrice: 3.75,
|
|
@@ -3618,8 +3393,6 @@ var qwenCodeModels = {
|
|
|
3618
3393
|
contextWindow: 1e6,
|
|
3619
3394
|
supportsImages: false,
|
|
3620
3395
|
supportsPromptCache: false,
|
|
3621
|
-
supportsNativeTools: true,
|
|
3622
|
-
defaultToolProtocol: "native",
|
|
3623
3396
|
inputPrice: 0,
|
|
3624
3397
|
outputPrice: 0,
|
|
3625
3398
|
cacheWritesPrice: 0,
|
|
@@ -3631,8 +3404,6 @@ var qwenCodeModels = {
|
|
|
3631
3404
|
contextWindow: 1e6,
|
|
3632
3405
|
supportsImages: false,
|
|
3633
3406
|
supportsPromptCache: false,
|
|
3634
|
-
supportsNativeTools: true,
|
|
3635
|
-
defaultToolProtocol: "native",
|
|
3636
3407
|
inputPrice: 0,
|
|
3637
3408
|
outputPrice: 0,
|
|
3638
3409
|
cacheWritesPrice: 0,
|
|
@@ -3648,8 +3419,6 @@ var requestyDefaultModelInfo = {
|
|
|
3648
3419
|
contextWindow: 2e5,
|
|
3649
3420
|
supportsImages: true,
|
|
3650
3421
|
supportsPromptCache: true,
|
|
3651
|
-
supportsNativeTools: true,
|
|
3652
|
-
defaultToolProtocol: "native",
|
|
3653
3422
|
inputPrice: 3,
|
|
3654
3423
|
outputPrice: 15,
|
|
3655
3424
|
cacheWritesPrice: 3.75,
|
|
@@ -3705,8 +3474,6 @@ var sambaNovaModels = {
|
|
|
3705
3474
|
contextWindow: 16384,
|
|
3706
3475
|
supportsImages: false,
|
|
3707
3476
|
supportsPromptCache: false,
|
|
3708
|
-
supportsNativeTools: true,
|
|
3709
|
-
defaultToolProtocol: "native",
|
|
3710
3477
|
inputPrice: 0.1,
|
|
3711
3478
|
outputPrice: 0.2,
|
|
3712
3479
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -3716,8 +3483,6 @@ var sambaNovaModels = {
|
|
|
3716
3483
|
contextWindow: 131072,
|
|
3717
3484
|
supportsImages: false,
|
|
3718
3485
|
supportsPromptCache: false,
|
|
3719
|
-
supportsNativeTools: true,
|
|
3720
|
-
defaultToolProtocol: "native",
|
|
3721
3486
|
inputPrice: 0.6,
|
|
3722
3487
|
outputPrice: 1.2,
|
|
3723
3488
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -3728,8 +3493,6 @@ var sambaNovaModels = {
|
|
|
3728
3493
|
supportsImages: false,
|
|
3729
3494
|
supportsPromptCache: false,
|
|
3730
3495
|
supportsReasoningBudget: true,
|
|
3731
|
-
supportsNativeTools: true,
|
|
3732
|
-
defaultToolProtocol: "native",
|
|
3733
3496
|
inputPrice: 5,
|
|
3734
3497
|
outputPrice: 7,
|
|
3735
3498
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -3739,8 +3502,6 @@ var sambaNovaModels = {
|
|
|
3739
3502
|
contextWindow: 32768,
|
|
3740
3503
|
supportsImages: false,
|
|
3741
3504
|
supportsPromptCache: false,
|
|
3742
|
-
supportsNativeTools: true,
|
|
3743
|
-
defaultToolProtocol: "native",
|
|
3744
3505
|
inputPrice: 3,
|
|
3745
3506
|
outputPrice: 4.5,
|
|
3746
3507
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -3750,8 +3511,6 @@ var sambaNovaModels = {
|
|
|
3750
3511
|
contextWindow: 32768,
|
|
3751
3512
|
supportsImages: false,
|
|
3752
3513
|
supportsPromptCache: false,
|
|
3753
|
-
supportsNativeTools: true,
|
|
3754
|
-
defaultToolProtocol: "native",
|
|
3755
3514
|
inputPrice: 3,
|
|
3756
3515
|
outputPrice: 4.5,
|
|
3757
3516
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -3761,8 +3520,6 @@ var sambaNovaModels = {
|
|
|
3761
3520
|
contextWindow: 131072,
|
|
3762
3521
|
supportsImages: true,
|
|
3763
3522
|
supportsPromptCache: false,
|
|
3764
|
-
supportsNativeTools: true,
|
|
3765
|
-
defaultToolProtocol: "native",
|
|
3766
3523
|
inputPrice: 0.63,
|
|
3767
3524
|
outputPrice: 1.8,
|
|
3768
3525
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -3772,8 +3529,6 @@ var sambaNovaModels = {
|
|
|
3772
3529
|
contextWindow: 8192,
|
|
3773
3530
|
supportsImages: false,
|
|
3774
3531
|
supportsPromptCache: false,
|
|
3775
|
-
supportsNativeTools: true,
|
|
3776
|
-
defaultToolProtocol: "native",
|
|
3777
3532
|
inputPrice: 0.4,
|
|
3778
3533
|
outputPrice: 0.8,
|
|
3779
3534
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -3783,8 +3538,6 @@ var sambaNovaModels = {
|
|
|
3783
3538
|
contextWindow: 131072,
|
|
3784
3539
|
supportsImages: false,
|
|
3785
3540
|
supportsPromptCache: false,
|
|
3786
|
-
supportsNativeTools: true,
|
|
3787
|
-
defaultToolProtocol: "native",
|
|
3788
3541
|
inputPrice: 0.22,
|
|
3789
3542
|
outputPrice: 0.59,
|
|
3790
3543
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -3798,7 +3551,6 @@ var unboundDefaultModelInfo = {
|
|
|
3798
3551
|
contextWindow: 2e5,
|
|
3799
3552
|
supportsImages: true,
|
|
3800
3553
|
supportsPromptCache: true,
|
|
3801
|
-
supportsNativeTools: true,
|
|
3802
3554
|
inputPrice: 3,
|
|
3803
3555
|
outputPrice: 15,
|
|
3804
3556
|
cacheWritesPrice: 3.75,
|
|
@@ -3812,8 +3564,6 @@ var vertexModels = {
|
|
|
3812
3564
|
maxTokens: 65536,
|
|
3813
3565
|
contextWindow: 1048576,
|
|
3814
3566
|
supportsImages: true,
|
|
3815
|
-
supportsNativeTools: true,
|
|
3816
|
-
defaultToolProtocol: "native",
|
|
3817
3567
|
supportsPromptCache: true,
|
|
3818
3568
|
supportsReasoningEffort: ["low", "high"],
|
|
3819
3569
|
reasoningEffort: "low",
|
|
@@ -3821,16 +3571,19 @@ var vertexModels = {
|
|
|
3821
3571
|
defaultTemperature: 1,
|
|
3822
3572
|
inputPrice: 4,
|
|
3823
3573
|
outputPrice: 18,
|
|
3574
|
+
cacheReadsPrice: 0.4,
|
|
3824
3575
|
tiers: [
|
|
3825
3576
|
{
|
|
3826
3577
|
contextWindow: 2e5,
|
|
3827
3578
|
inputPrice: 2,
|
|
3828
|
-
outputPrice: 12
|
|
3579
|
+
outputPrice: 12,
|
|
3580
|
+
cacheReadsPrice: 0.2
|
|
3829
3581
|
},
|
|
3830
3582
|
{
|
|
3831
3583
|
contextWindow: Infinity,
|
|
3832
3584
|
inputPrice: 4,
|
|
3833
|
-
outputPrice: 18
|
|
3585
|
+
outputPrice: 18,
|
|
3586
|
+
cacheReadsPrice: 0.4
|
|
3834
3587
|
}
|
|
3835
3588
|
]
|
|
3836
3589
|
},
|
|
@@ -3838,24 +3591,19 @@ var vertexModels = {
|
|
|
3838
3591
|
maxTokens: 65536,
|
|
3839
3592
|
contextWindow: 1048576,
|
|
3840
3593
|
supportsImages: true,
|
|
3841
|
-
supportsNativeTools: true,
|
|
3842
|
-
defaultToolProtocol: "native",
|
|
3843
3594
|
supportsPromptCache: true,
|
|
3844
3595
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
3845
3596
|
reasoningEffort: "medium",
|
|
3846
3597
|
supportsTemperature: true,
|
|
3847
3598
|
defaultTemperature: 1,
|
|
3848
|
-
inputPrice: 0.
|
|
3849
|
-
outputPrice:
|
|
3850
|
-
cacheReadsPrice: 0.
|
|
3851
|
-
cacheWritesPrice: 1
|
|
3599
|
+
inputPrice: 0.5,
|
|
3600
|
+
outputPrice: 3,
|
|
3601
|
+
cacheReadsPrice: 0.05
|
|
3852
3602
|
},
|
|
3853
3603
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
3854
3604
|
maxTokens: 65535,
|
|
3855
3605
|
contextWindow: 1048576,
|
|
3856
3606
|
supportsImages: true,
|
|
3857
|
-
supportsNativeTools: true,
|
|
3858
|
-
defaultToolProtocol: "native",
|
|
3859
3607
|
supportsPromptCache: true,
|
|
3860
3608
|
inputPrice: 0.15,
|
|
3861
3609
|
outputPrice: 3.5,
|
|
@@ -3867,8 +3615,6 @@ var vertexModels = {
|
|
|
3867
3615
|
maxTokens: 65535,
|
|
3868
3616
|
contextWindow: 1048576,
|
|
3869
3617
|
supportsImages: true,
|
|
3870
|
-
supportsNativeTools: true,
|
|
3871
|
-
defaultToolProtocol: "native",
|
|
3872
3618
|
supportsPromptCache: true,
|
|
3873
3619
|
inputPrice: 0.15,
|
|
3874
3620
|
outputPrice: 0.6
|
|
@@ -3877,8 +3623,6 @@ var vertexModels = {
|
|
|
3877
3623
|
maxTokens: 64e3,
|
|
3878
3624
|
contextWindow: 1048576,
|
|
3879
3625
|
supportsImages: true,
|
|
3880
|
-
supportsNativeTools: true,
|
|
3881
|
-
defaultToolProtocol: "native",
|
|
3882
3626
|
supportsPromptCache: true,
|
|
3883
3627
|
inputPrice: 0.3,
|
|
3884
3628
|
outputPrice: 2.5,
|
|
@@ -3891,8 +3635,6 @@ var vertexModels = {
|
|
|
3891
3635
|
maxTokens: 65535,
|
|
3892
3636
|
contextWindow: 1048576,
|
|
3893
3637
|
supportsImages: true,
|
|
3894
|
-
supportsNativeTools: true,
|
|
3895
|
-
defaultToolProtocol: "native",
|
|
3896
3638
|
supportsPromptCache: false,
|
|
3897
3639
|
inputPrice: 0.15,
|
|
3898
3640
|
outputPrice: 3.5,
|
|
@@ -3904,8 +3646,6 @@ var vertexModels = {
|
|
|
3904
3646
|
maxTokens: 65535,
|
|
3905
3647
|
contextWindow: 1048576,
|
|
3906
3648
|
supportsImages: true,
|
|
3907
|
-
supportsNativeTools: true,
|
|
3908
|
-
defaultToolProtocol: "native",
|
|
3909
3649
|
supportsPromptCache: false,
|
|
3910
3650
|
inputPrice: 0.15,
|
|
3911
3651
|
outputPrice: 0.6
|
|
@@ -3914,8 +3654,6 @@ var vertexModels = {
|
|
|
3914
3654
|
maxTokens: 65535,
|
|
3915
3655
|
contextWindow: 1048576,
|
|
3916
3656
|
supportsImages: true,
|
|
3917
|
-
supportsNativeTools: true,
|
|
3918
|
-
defaultToolProtocol: "native",
|
|
3919
3657
|
supportsPromptCache: true,
|
|
3920
3658
|
inputPrice: 2.5,
|
|
3921
3659
|
outputPrice: 15
|
|
@@ -3924,8 +3662,6 @@ var vertexModels = {
|
|
|
3924
3662
|
maxTokens: 65535,
|
|
3925
3663
|
contextWindow: 1048576,
|
|
3926
3664
|
supportsImages: true,
|
|
3927
|
-
supportsNativeTools: true,
|
|
3928
|
-
defaultToolProtocol: "native",
|
|
3929
3665
|
supportsPromptCache: true,
|
|
3930
3666
|
inputPrice: 2.5,
|
|
3931
3667
|
outputPrice: 15
|
|
@@ -3934,8 +3670,6 @@ var vertexModels = {
|
|
|
3934
3670
|
maxTokens: 65535,
|
|
3935
3671
|
contextWindow: 1048576,
|
|
3936
3672
|
supportsImages: true,
|
|
3937
|
-
supportsNativeTools: true,
|
|
3938
|
-
defaultToolProtocol: "native",
|
|
3939
3673
|
supportsPromptCache: true,
|
|
3940
3674
|
inputPrice: 2.5,
|
|
3941
3675
|
outputPrice: 15,
|
|
@@ -3946,8 +3680,6 @@ var vertexModels = {
|
|
|
3946
3680
|
maxTokens: 64e3,
|
|
3947
3681
|
contextWindow: 1048576,
|
|
3948
3682
|
supportsImages: true,
|
|
3949
|
-
supportsNativeTools: true,
|
|
3950
|
-
defaultToolProtocol: "native",
|
|
3951
3683
|
supportsPromptCache: true,
|
|
3952
3684
|
inputPrice: 2.5,
|
|
3953
3685
|
outputPrice: 15,
|
|
@@ -3973,8 +3705,6 @@ var vertexModels = {
|
|
|
3973
3705
|
maxTokens: 65535,
|
|
3974
3706
|
contextWindow: 1048576,
|
|
3975
3707
|
supportsImages: true,
|
|
3976
|
-
supportsNativeTools: true,
|
|
3977
|
-
defaultToolProtocol: "native",
|
|
3978
3708
|
supportsPromptCache: false,
|
|
3979
3709
|
inputPrice: 0,
|
|
3980
3710
|
outputPrice: 0
|
|
@@ -3983,8 +3713,6 @@ var vertexModels = {
|
|
|
3983
3713
|
maxTokens: 8192,
|
|
3984
3714
|
contextWindow: 2097152,
|
|
3985
3715
|
supportsImages: true,
|
|
3986
|
-
supportsNativeTools: true,
|
|
3987
|
-
defaultToolProtocol: "native",
|
|
3988
3716
|
supportsPromptCache: false,
|
|
3989
3717
|
inputPrice: 0,
|
|
3990
3718
|
outputPrice: 0
|
|
@@ -3993,8 +3721,6 @@ var vertexModels = {
|
|
|
3993
3721
|
maxTokens: 8192,
|
|
3994
3722
|
contextWindow: 1048576,
|
|
3995
3723
|
supportsImages: true,
|
|
3996
|
-
supportsNativeTools: true,
|
|
3997
|
-
defaultToolProtocol: "native",
|
|
3998
3724
|
supportsPromptCache: true,
|
|
3999
3725
|
inputPrice: 0.15,
|
|
4000
3726
|
outputPrice: 0.6
|
|
@@ -4003,8 +3729,6 @@ var vertexModels = {
|
|
|
4003
3729
|
maxTokens: 8192,
|
|
4004
3730
|
contextWindow: 1048576,
|
|
4005
3731
|
supportsImages: true,
|
|
4006
|
-
supportsNativeTools: true,
|
|
4007
|
-
defaultToolProtocol: "native",
|
|
4008
3732
|
supportsPromptCache: false,
|
|
4009
3733
|
inputPrice: 0.075,
|
|
4010
3734
|
outputPrice: 0.3
|
|
@@ -4013,8 +3737,6 @@ var vertexModels = {
|
|
|
4013
3737
|
maxTokens: 8192,
|
|
4014
3738
|
contextWindow: 32768,
|
|
4015
3739
|
supportsImages: true,
|
|
4016
|
-
supportsNativeTools: true,
|
|
4017
|
-
defaultToolProtocol: "native",
|
|
4018
3740
|
supportsPromptCache: false,
|
|
4019
3741
|
inputPrice: 0,
|
|
4020
3742
|
outputPrice: 0
|
|
@@ -4023,8 +3745,6 @@ var vertexModels = {
|
|
|
4023
3745
|
maxTokens: 8192,
|
|
4024
3746
|
contextWindow: 1048576,
|
|
4025
3747
|
supportsImages: true,
|
|
4026
|
-
supportsNativeTools: true,
|
|
4027
|
-
defaultToolProtocol: "native",
|
|
4028
3748
|
supportsPromptCache: true,
|
|
4029
3749
|
inputPrice: 0.075,
|
|
4030
3750
|
outputPrice: 0.3
|
|
@@ -4033,8 +3753,6 @@ var vertexModels = {
|
|
|
4033
3753
|
maxTokens: 8192,
|
|
4034
3754
|
contextWindow: 2097152,
|
|
4035
3755
|
supportsImages: true,
|
|
4036
|
-
supportsNativeTools: true,
|
|
4037
|
-
defaultToolProtocol: "native",
|
|
4038
3756
|
supportsPromptCache: false,
|
|
4039
3757
|
inputPrice: 1.25,
|
|
4040
3758
|
outputPrice: 5
|
|
@@ -4045,8 +3763,6 @@ var vertexModels = {
|
|
|
4045
3763
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4046
3764
|
supportsImages: true,
|
|
4047
3765
|
supportsPromptCache: true,
|
|
4048
|
-
supportsNativeTools: true,
|
|
4049
|
-
defaultToolProtocol: "native",
|
|
4050
3766
|
inputPrice: 3,
|
|
4051
3767
|
// $3 per million input tokens (≤200K context)
|
|
4052
3768
|
outputPrice: 15,
|
|
@@ -4078,8 +3794,6 @@ var vertexModels = {
|
|
|
4078
3794
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4079
3795
|
supportsImages: true,
|
|
4080
3796
|
supportsPromptCache: true,
|
|
4081
|
-
supportsNativeTools: true,
|
|
4082
|
-
defaultToolProtocol: "native",
|
|
4083
3797
|
inputPrice: 3,
|
|
4084
3798
|
// $3 per million input tokens (≤200K context)
|
|
4085
3799
|
outputPrice: 15,
|
|
@@ -4110,8 +3824,6 @@ var vertexModels = {
|
|
|
4110
3824
|
contextWindow: 2e5,
|
|
4111
3825
|
supportsImages: true,
|
|
4112
3826
|
supportsPromptCache: true,
|
|
4113
|
-
supportsNativeTools: true,
|
|
4114
|
-
defaultToolProtocol: "native",
|
|
4115
3827
|
inputPrice: 1,
|
|
4116
3828
|
outputPrice: 5,
|
|
4117
3829
|
cacheWritesPrice: 1.25,
|
|
@@ -4123,8 +3835,6 @@ var vertexModels = {
|
|
|
4123
3835
|
contextWindow: 2e5,
|
|
4124
3836
|
supportsImages: true,
|
|
4125
3837
|
supportsPromptCache: true,
|
|
4126
|
-
supportsNativeTools: true,
|
|
4127
|
-
defaultToolProtocol: "native",
|
|
4128
3838
|
inputPrice: 5,
|
|
4129
3839
|
outputPrice: 25,
|
|
4130
3840
|
cacheWritesPrice: 6.25,
|
|
@@ -4136,8 +3846,6 @@ var vertexModels = {
|
|
|
4136
3846
|
contextWindow: 2e5,
|
|
4137
3847
|
supportsImages: true,
|
|
4138
3848
|
supportsPromptCache: true,
|
|
4139
|
-
supportsNativeTools: true,
|
|
4140
|
-
defaultToolProtocol: "native",
|
|
4141
3849
|
inputPrice: 15,
|
|
4142
3850
|
outputPrice: 75,
|
|
4143
3851
|
cacheWritesPrice: 18.75,
|
|
@@ -4149,8 +3857,6 @@ var vertexModels = {
|
|
|
4149
3857
|
contextWindow: 2e5,
|
|
4150
3858
|
supportsImages: true,
|
|
4151
3859
|
supportsPromptCache: true,
|
|
4152
|
-
supportsNativeTools: true,
|
|
4153
|
-
defaultToolProtocol: "native",
|
|
4154
3860
|
inputPrice: 15,
|
|
4155
3861
|
outputPrice: 75,
|
|
4156
3862
|
cacheWritesPrice: 18.75,
|
|
@@ -4161,8 +3867,6 @@ var vertexModels = {
|
|
|
4161
3867
|
contextWindow: 2e5,
|
|
4162
3868
|
supportsImages: true,
|
|
4163
3869
|
supportsPromptCache: true,
|
|
4164
|
-
supportsNativeTools: true,
|
|
4165
|
-
defaultToolProtocol: "native",
|
|
4166
3870
|
inputPrice: 3,
|
|
4167
3871
|
outputPrice: 15,
|
|
4168
3872
|
cacheWritesPrice: 3.75,
|
|
@@ -4175,8 +3879,6 @@ var vertexModels = {
|
|
|
4175
3879
|
contextWindow: 2e5,
|
|
4176
3880
|
supportsImages: true,
|
|
4177
3881
|
supportsPromptCache: true,
|
|
4178
|
-
supportsNativeTools: true,
|
|
4179
|
-
defaultToolProtocol: "native",
|
|
4180
3882
|
inputPrice: 3,
|
|
4181
3883
|
outputPrice: 15,
|
|
4182
3884
|
cacheWritesPrice: 3.75,
|
|
@@ -4187,8 +3889,6 @@ var vertexModels = {
|
|
|
4187
3889
|
contextWindow: 2e5,
|
|
4188
3890
|
supportsImages: true,
|
|
4189
3891
|
supportsPromptCache: true,
|
|
4190
|
-
supportsNativeTools: true,
|
|
4191
|
-
defaultToolProtocol: "native",
|
|
4192
3892
|
inputPrice: 3,
|
|
4193
3893
|
outputPrice: 15,
|
|
4194
3894
|
cacheWritesPrice: 3.75,
|
|
@@ -4199,8 +3899,6 @@ var vertexModels = {
|
|
|
4199
3899
|
contextWindow: 2e5,
|
|
4200
3900
|
supportsImages: true,
|
|
4201
3901
|
supportsPromptCache: true,
|
|
4202
|
-
supportsNativeTools: true,
|
|
4203
|
-
defaultToolProtocol: "native",
|
|
4204
3902
|
inputPrice: 3,
|
|
4205
3903
|
outputPrice: 15,
|
|
4206
3904
|
cacheWritesPrice: 3.75,
|
|
@@ -4211,8 +3909,6 @@ var vertexModels = {
|
|
|
4211
3909
|
contextWindow: 2e5,
|
|
4212
3910
|
supportsImages: false,
|
|
4213
3911
|
supportsPromptCache: true,
|
|
4214
|
-
supportsNativeTools: true,
|
|
4215
|
-
defaultToolProtocol: "native",
|
|
4216
3912
|
inputPrice: 1,
|
|
4217
3913
|
outputPrice: 5,
|
|
4218
3914
|
cacheWritesPrice: 1.25,
|
|
@@ -4223,8 +3919,6 @@ var vertexModels = {
|
|
|
4223
3919
|
contextWindow: 2e5,
|
|
4224
3920
|
supportsImages: true,
|
|
4225
3921
|
supportsPromptCache: true,
|
|
4226
|
-
supportsNativeTools: true,
|
|
4227
|
-
defaultToolProtocol: "native",
|
|
4228
3922
|
inputPrice: 15,
|
|
4229
3923
|
outputPrice: 75,
|
|
4230
3924
|
cacheWritesPrice: 18.75,
|
|
@@ -4235,8 +3929,6 @@ var vertexModels = {
|
|
|
4235
3929
|
contextWindow: 2e5,
|
|
4236
3930
|
supportsImages: true,
|
|
4237
3931
|
supportsPromptCache: true,
|
|
4238
|
-
supportsNativeTools: true,
|
|
4239
|
-
defaultToolProtocol: "native",
|
|
4240
3932
|
inputPrice: 0.25,
|
|
4241
3933
|
outputPrice: 1.25,
|
|
4242
3934
|
cacheWritesPrice: 0.3,
|
|
@@ -4246,8 +3938,6 @@ var vertexModels = {
|
|
|
4246
3938
|
maxTokens: 64e3,
|
|
4247
3939
|
contextWindow: 1048576,
|
|
4248
3940
|
supportsImages: true,
|
|
4249
|
-
supportsNativeTools: true,
|
|
4250
|
-
defaultToolProtocol: "native",
|
|
4251
3941
|
supportsPromptCache: true,
|
|
4252
3942
|
inputPrice: 0.1,
|
|
4253
3943
|
outputPrice: 0.4,
|
|
@@ -4261,7 +3951,6 @@ var vertexModels = {
|
|
|
4261
3951
|
contextWindow: 131072,
|
|
4262
3952
|
supportsImages: false,
|
|
4263
3953
|
supportsPromptCache: false,
|
|
4264
|
-
supportsNativeTools: true,
|
|
4265
3954
|
inputPrice: 0.35,
|
|
4266
3955
|
outputPrice: 1.15,
|
|
4267
3956
|
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
@@ -4271,7 +3960,6 @@ var vertexModels = {
|
|
|
4271
3960
|
contextWindow: 163840,
|
|
4272
3961
|
supportsImages: false,
|
|
4273
3962
|
supportsPromptCache: false,
|
|
4274
|
-
supportsNativeTools: true,
|
|
4275
3963
|
inputPrice: 1.35,
|
|
4276
3964
|
outputPrice: 5.4,
|
|
4277
3965
|
description: "DeepSeek R1 (0528). Available in us-central1"
|
|
@@ -4281,7 +3969,6 @@ var vertexModels = {
|
|
|
4281
3969
|
contextWindow: 163840,
|
|
4282
3970
|
supportsImages: false,
|
|
4283
3971
|
supportsPromptCache: false,
|
|
4284
|
-
supportsNativeTools: true,
|
|
4285
3972
|
inputPrice: 0.6,
|
|
4286
3973
|
outputPrice: 1.7,
|
|
4287
3974
|
description: "DeepSeek V3.1. Available in us-west2"
|
|
@@ -4291,7 +3978,6 @@ var vertexModels = {
|
|
|
4291
3978
|
contextWindow: 131072,
|
|
4292
3979
|
supportsImages: false,
|
|
4293
3980
|
supportsPromptCache: false,
|
|
4294
|
-
supportsNativeTools: true,
|
|
4295
3981
|
inputPrice: 0.15,
|
|
4296
3982
|
outputPrice: 0.6,
|
|
4297
3983
|
description: "OpenAI gpt-oss 120B. Available in us-central1"
|
|
@@ -4301,7 +3987,6 @@ var vertexModels = {
|
|
|
4301
3987
|
contextWindow: 131072,
|
|
4302
3988
|
supportsImages: false,
|
|
4303
3989
|
supportsPromptCache: false,
|
|
4304
|
-
supportsNativeTools: true,
|
|
4305
3990
|
inputPrice: 0.075,
|
|
4306
3991
|
outputPrice: 0.3,
|
|
4307
3992
|
description: "OpenAI gpt-oss 20B. Available in us-central1"
|
|
@@ -4311,7 +3996,6 @@ var vertexModels = {
|
|
|
4311
3996
|
contextWindow: 262144,
|
|
4312
3997
|
supportsImages: false,
|
|
4313
3998
|
supportsPromptCache: false,
|
|
4314
|
-
supportsNativeTools: true,
|
|
4315
3999
|
inputPrice: 1,
|
|
4316
4000
|
outputPrice: 4,
|
|
4317
4001
|
description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
|
|
@@ -4321,10 +4005,18 @@ var vertexModels = {
|
|
|
4321
4005
|
contextWindow: 262144,
|
|
4322
4006
|
supportsImages: false,
|
|
4323
4007
|
supportsPromptCache: false,
|
|
4324
|
-
supportsNativeTools: true,
|
|
4325
4008
|
inputPrice: 0.25,
|
|
4326
4009
|
outputPrice: 1,
|
|
4327
4010
|
description: "Qwen3 235B A22B Instruct. Available in us-south1"
|
|
4011
|
+
},
|
|
4012
|
+
"moonshotai/kimi-k2-thinking-maas": {
|
|
4013
|
+
maxTokens: 16384,
|
|
4014
|
+
contextWindow: 262144,
|
|
4015
|
+
supportsPromptCache: false,
|
|
4016
|
+
supportsImages: false,
|
|
4017
|
+
inputPrice: 0.6,
|
|
4018
|
+
outputPrice: 2.5,
|
|
4019
|
+
description: "Kimi K2 Thinking Model with 256K context window."
|
|
4328
4020
|
}
|
|
4329
4021
|
};
|
|
4330
4022
|
var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
|
|
@@ -4557,8 +4249,6 @@ var xaiModels = {
|
|
|
4557
4249
|
contextWindow: 256e3,
|
|
4558
4250
|
supportsImages: true,
|
|
4559
4251
|
supportsPromptCache: true,
|
|
4560
|
-
supportsNativeTools: true,
|
|
4561
|
-
defaultToolProtocol: "native",
|
|
4562
4252
|
inputPrice: 0.2,
|
|
4563
4253
|
outputPrice: 1.5,
|
|
4564
4254
|
cacheWritesPrice: 0.02,
|
|
@@ -4572,8 +4262,6 @@ var xaiModels = {
|
|
|
4572
4262
|
contextWindow: 2e6,
|
|
4573
4263
|
supportsImages: true,
|
|
4574
4264
|
supportsPromptCache: true,
|
|
4575
|
-
supportsNativeTools: true,
|
|
4576
|
-
defaultToolProtocol: "native",
|
|
4577
4265
|
inputPrice: 0.2,
|
|
4578
4266
|
outputPrice: 0.5,
|
|
4579
4267
|
cacheWritesPrice: 0.05,
|
|
@@ -4587,8 +4275,6 @@ var xaiModels = {
|
|
|
4587
4275
|
contextWindow: 2e6,
|
|
4588
4276
|
supportsImages: true,
|
|
4589
4277
|
supportsPromptCache: true,
|
|
4590
|
-
supportsNativeTools: true,
|
|
4591
|
-
defaultToolProtocol: "native",
|
|
4592
4278
|
inputPrice: 0.2,
|
|
4593
4279
|
outputPrice: 0.5,
|
|
4594
4280
|
cacheWritesPrice: 0.05,
|
|
@@ -4602,8 +4288,6 @@ var xaiModels = {
|
|
|
4602
4288
|
contextWindow: 2e6,
|
|
4603
4289
|
supportsImages: true,
|
|
4604
4290
|
supportsPromptCache: true,
|
|
4605
|
-
supportsNativeTools: true,
|
|
4606
|
-
defaultToolProtocol: "native",
|
|
4607
4291
|
inputPrice: 0.2,
|
|
4608
4292
|
outputPrice: 0.5,
|
|
4609
4293
|
cacheWritesPrice: 0.05,
|
|
@@ -4617,8 +4301,6 @@ var xaiModels = {
|
|
|
4617
4301
|
contextWindow: 2e6,
|
|
4618
4302
|
supportsImages: true,
|
|
4619
4303
|
supportsPromptCache: true,
|
|
4620
|
-
supportsNativeTools: true,
|
|
4621
|
-
defaultToolProtocol: "native",
|
|
4622
4304
|
inputPrice: 0.2,
|
|
4623
4305
|
outputPrice: 0.5,
|
|
4624
4306
|
cacheWritesPrice: 0.05,
|
|
@@ -4632,8 +4314,6 @@ var xaiModels = {
|
|
|
4632
4314
|
contextWindow: 256e3,
|
|
4633
4315
|
supportsImages: true,
|
|
4634
4316
|
supportsPromptCache: true,
|
|
4635
|
-
supportsNativeTools: true,
|
|
4636
|
-
defaultToolProtocol: "native",
|
|
4637
4317
|
inputPrice: 3,
|
|
4638
4318
|
outputPrice: 15,
|
|
4639
4319
|
cacheWritesPrice: 0.75,
|
|
@@ -4647,8 +4327,6 @@ var xaiModels = {
|
|
|
4647
4327
|
contextWindow: 131072,
|
|
4648
4328
|
supportsImages: true,
|
|
4649
4329
|
supportsPromptCache: true,
|
|
4650
|
-
supportsNativeTools: true,
|
|
4651
|
-
defaultToolProtocol: "native",
|
|
4652
4330
|
inputPrice: 0.3,
|
|
4653
4331
|
outputPrice: 0.5,
|
|
4654
4332
|
cacheWritesPrice: 0.07,
|
|
@@ -4664,8 +4342,6 @@ var xaiModels = {
|
|
|
4664
4342
|
contextWindow: 131072,
|
|
4665
4343
|
supportsImages: true,
|
|
4666
4344
|
supportsPromptCache: true,
|
|
4667
|
-
supportsNativeTools: true,
|
|
4668
|
-
defaultToolProtocol: "native",
|
|
4669
4345
|
inputPrice: 3,
|
|
4670
4346
|
outputPrice: 15,
|
|
4671
4347
|
cacheWritesPrice: 0.75,
|
|
@@ -4762,7 +4438,6 @@ var vercelAiGatewayDefaultModelInfo = {
|
|
|
4762
4438
|
contextWindow: 2e5,
|
|
4763
4439
|
supportsImages: true,
|
|
4764
4440
|
supportsPromptCache: true,
|
|
4765
|
-
supportsNativeTools: true,
|
|
4766
4441
|
inputPrice: 3,
|
|
4767
4442
|
outputPrice: 15,
|
|
4768
4443
|
cacheWritesPrice: 3.75,
|
|
@@ -4779,8 +4454,6 @@ var internationalZAiModels = {
|
|
|
4779
4454
|
contextWindow: 131072,
|
|
4780
4455
|
supportsImages: false,
|
|
4781
4456
|
supportsPromptCache: true,
|
|
4782
|
-
supportsNativeTools: true,
|
|
4783
|
-
defaultToolProtocol: "native",
|
|
4784
4457
|
inputPrice: 0.6,
|
|
4785
4458
|
outputPrice: 2.2,
|
|
4786
4459
|
cacheWritesPrice: 0,
|
|
@@ -4792,8 +4465,6 @@ var internationalZAiModels = {
|
|
|
4792
4465
|
contextWindow: 131072,
|
|
4793
4466
|
supportsImages: false,
|
|
4794
4467
|
supportsPromptCache: true,
|
|
4795
|
-
supportsNativeTools: true,
|
|
4796
|
-
defaultToolProtocol: "native",
|
|
4797
4468
|
inputPrice: 0.2,
|
|
4798
4469
|
outputPrice: 1.1,
|
|
4799
4470
|
cacheWritesPrice: 0,
|
|
@@ -4805,8 +4476,6 @@ var internationalZAiModels = {
|
|
|
4805
4476
|
contextWindow: 131072,
|
|
4806
4477
|
supportsImages: false,
|
|
4807
4478
|
supportsPromptCache: true,
|
|
4808
|
-
supportsNativeTools: true,
|
|
4809
|
-
defaultToolProtocol: "native",
|
|
4810
4479
|
inputPrice: 2.2,
|
|
4811
4480
|
outputPrice: 8.9,
|
|
4812
4481
|
cacheWritesPrice: 0,
|
|
@@ -4818,8 +4487,6 @@ var internationalZAiModels = {
|
|
|
4818
4487
|
contextWindow: 131072,
|
|
4819
4488
|
supportsImages: false,
|
|
4820
4489
|
supportsPromptCache: true,
|
|
4821
|
-
supportsNativeTools: true,
|
|
4822
|
-
defaultToolProtocol: "native",
|
|
4823
4490
|
inputPrice: 1.1,
|
|
4824
4491
|
outputPrice: 4.5,
|
|
4825
4492
|
cacheWritesPrice: 0,
|
|
@@ -4831,8 +4498,6 @@ var internationalZAiModels = {
|
|
|
4831
4498
|
contextWindow: 131072,
|
|
4832
4499
|
supportsImages: false,
|
|
4833
4500
|
supportsPromptCache: true,
|
|
4834
|
-
supportsNativeTools: true,
|
|
4835
|
-
defaultToolProtocol: "native",
|
|
4836
4501
|
inputPrice: 0,
|
|
4837
4502
|
outputPrice: 0,
|
|
4838
4503
|
cacheWritesPrice: 0,
|
|
@@ -4844,21 +4509,28 @@ var internationalZAiModels = {
|
|
|
4844
4509
|
contextWindow: 131072,
|
|
4845
4510
|
supportsImages: true,
|
|
4846
4511
|
supportsPromptCache: true,
|
|
4847
|
-
supportsNativeTools: true,
|
|
4848
|
-
defaultToolProtocol: "native",
|
|
4849
4512
|
inputPrice: 0.6,
|
|
4850
4513
|
outputPrice: 1.8,
|
|
4851
4514
|
cacheWritesPrice: 0,
|
|
4852
4515
|
cacheReadsPrice: 0.11,
|
|
4853
4516
|
description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
|
|
4854
4517
|
},
|
|
4518
|
+
"glm-4.6v": {
|
|
4519
|
+
maxTokens: 16384,
|
|
4520
|
+
contextWindow: 131072,
|
|
4521
|
+
supportsImages: true,
|
|
4522
|
+
supportsPromptCache: true,
|
|
4523
|
+
inputPrice: 0.3,
|
|
4524
|
+
outputPrice: 0.9,
|
|
4525
|
+
cacheWritesPrice: 0,
|
|
4526
|
+
cacheReadsPrice: 0.05,
|
|
4527
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
4528
|
+
},
|
|
4855
4529
|
"glm-4.6": {
|
|
4856
4530
|
maxTokens: 16384,
|
|
4857
4531
|
contextWindow: 2e5,
|
|
4858
4532
|
supportsImages: false,
|
|
4859
4533
|
supportsPromptCache: true,
|
|
4860
|
-
supportsNativeTools: true,
|
|
4861
|
-
defaultToolProtocol: "native",
|
|
4862
4534
|
inputPrice: 0.6,
|
|
4863
4535
|
outputPrice: 2.2,
|
|
4864
4536
|
cacheWritesPrice: 0,
|
|
@@ -4870,8 +4542,6 @@ var internationalZAiModels = {
|
|
|
4870
4542
|
contextWindow: 2e5,
|
|
4871
4543
|
supportsImages: false,
|
|
4872
4544
|
supportsPromptCache: true,
|
|
4873
|
-
supportsNativeTools: true,
|
|
4874
|
-
defaultToolProtocol: "native",
|
|
4875
4545
|
supportsReasoningEffort: ["disable", "medium"],
|
|
4876
4546
|
reasoningEffort: "medium",
|
|
4877
4547
|
preserveReasoning: true,
|
|
@@ -4881,13 +4551,55 @@ var internationalZAiModels = {
|
|
|
4881
4551
|
cacheReadsPrice: 0.11,
|
|
4882
4552
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
4883
4553
|
},
|
|
4554
|
+
"glm-4.7-flash": {
|
|
4555
|
+
maxTokens: 16384,
|
|
4556
|
+
contextWindow: 2e5,
|
|
4557
|
+
supportsImages: false,
|
|
4558
|
+
supportsPromptCache: true,
|
|
4559
|
+
inputPrice: 0,
|
|
4560
|
+
outputPrice: 0,
|
|
4561
|
+
cacheWritesPrice: 0,
|
|
4562
|
+
cacheReadsPrice: 0,
|
|
4563
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
4564
|
+
},
|
|
4565
|
+
"glm-4.7-flashx": {
|
|
4566
|
+
maxTokens: 16384,
|
|
4567
|
+
contextWindow: 2e5,
|
|
4568
|
+
supportsImages: false,
|
|
4569
|
+
supportsPromptCache: true,
|
|
4570
|
+
inputPrice: 0.07,
|
|
4571
|
+
outputPrice: 0.4,
|
|
4572
|
+
cacheWritesPrice: 0,
|
|
4573
|
+
cacheReadsPrice: 0.01,
|
|
4574
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
4575
|
+
},
|
|
4576
|
+
"glm-4.6v-flash": {
|
|
4577
|
+
maxTokens: 16384,
|
|
4578
|
+
contextWindow: 131072,
|
|
4579
|
+
supportsImages: true,
|
|
4580
|
+
supportsPromptCache: true,
|
|
4581
|
+
inputPrice: 0,
|
|
4582
|
+
outputPrice: 0,
|
|
4583
|
+
cacheWritesPrice: 0,
|
|
4584
|
+
cacheReadsPrice: 0,
|
|
4585
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
4586
|
+
},
|
|
4587
|
+
"glm-4.6v-flashx": {
|
|
4588
|
+
maxTokens: 16384,
|
|
4589
|
+
contextWindow: 131072,
|
|
4590
|
+
supportsImages: true,
|
|
4591
|
+
supportsPromptCache: true,
|
|
4592
|
+
inputPrice: 0.04,
|
|
4593
|
+
outputPrice: 0.4,
|
|
4594
|
+
cacheWritesPrice: 0,
|
|
4595
|
+
cacheReadsPrice: 4e-3,
|
|
4596
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
4597
|
+
},
|
|
4884
4598
|
"glm-4-32b-0414-128k": {
|
|
4885
4599
|
maxTokens: 16384,
|
|
4886
4600
|
contextWindow: 131072,
|
|
4887
4601
|
supportsImages: false,
|
|
4888
4602
|
supportsPromptCache: false,
|
|
4889
|
-
supportsNativeTools: true,
|
|
4890
|
-
defaultToolProtocol: "native",
|
|
4891
4603
|
inputPrice: 0.1,
|
|
4892
4604
|
outputPrice: 0.1,
|
|
4893
4605
|
cacheWritesPrice: 0,
|
|
@@ -4902,8 +4614,6 @@ var mainlandZAiModels = {
|
|
|
4902
4614
|
contextWindow: 131072,
|
|
4903
4615
|
supportsImages: false,
|
|
4904
4616
|
supportsPromptCache: true,
|
|
4905
|
-
supportsNativeTools: true,
|
|
4906
|
-
defaultToolProtocol: "native",
|
|
4907
4617
|
inputPrice: 0.29,
|
|
4908
4618
|
outputPrice: 1.14,
|
|
4909
4619
|
cacheWritesPrice: 0,
|
|
@@ -4915,8 +4625,6 @@ var mainlandZAiModels = {
|
|
|
4915
4625
|
contextWindow: 131072,
|
|
4916
4626
|
supportsImages: false,
|
|
4917
4627
|
supportsPromptCache: true,
|
|
4918
|
-
supportsNativeTools: true,
|
|
4919
|
-
defaultToolProtocol: "native",
|
|
4920
4628
|
inputPrice: 0.1,
|
|
4921
4629
|
outputPrice: 0.6,
|
|
4922
4630
|
cacheWritesPrice: 0,
|
|
@@ -4928,8 +4636,6 @@ var mainlandZAiModels = {
|
|
|
4928
4636
|
contextWindow: 131072,
|
|
4929
4637
|
supportsImages: false,
|
|
4930
4638
|
supportsPromptCache: true,
|
|
4931
|
-
supportsNativeTools: true,
|
|
4932
|
-
defaultToolProtocol: "native",
|
|
4933
4639
|
inputPrice: 0.29,
|
|
4934
4640
|
outputPrice: 1.14,
|
|
4935
4641
|
cacheWritesPrice: 0,
|
|
@@ -4941,8 +4647,6 @@ var mainlandZAiModels = {
|
|
|
4941
4647
|
contextWindow: 131072,
|
|
4942
4648
|
supportsImages: false,
|
|
4943
4649
|
supportsPromptCache: true,
|
|
4944
|
-
supportsNativeTools: true,
|
|
4945
|
-
defaultToolProtocol: "native",
|
|
4946
4650
|
inputPrice: 0.1,
|
|
4947
4651
|
outputPrice: 0.6,
|
|
4948
4652
|
cacheWritesPrice: 0,
|
|
@@ -4954,8 +4658,6 @@ var mainlandZAiModels = {
|
|
|
4954
4658
|
contextWindow: 131072,
|
|
4955
4659
|
supportsImages: false,
|
|
4956
4660
|
supportsPromptCache: true,
|
|
4957
|
-
supportsNativeTools: true,
|
|
4958
|
-
defaultToolProtocol: "native",
|
|
4959
4661
|
inputPrice: 0,
|
|
4960
4662
|
outputPrice: 0,
|
|
4961
4663
|
cacheWritesPrice: 0,
|
|
@@ -4967,8 +4669,6 @@ var mainlandZAiModels = {
|
|
|
4967
4669
|
contextWindow: 131072,
|
|
4968
4670
|
supportsImages: true,
|
|
4969
4671
|
supportsPromptCache: true,
|
|
4970
|
-
supportsNativeTools: true,
|
|
4971
|
-
defaultToolProtocol: "native",
|
|
4972
4672
|
inputPrice: 0.29,
|
|
4973
4673
|
outputPrice: 0.93,
|
|
4974
4674
|
cacheWritesPrice: 0,
|
|
@@ -4980,8 +4680,6 @@ var mainlandZAiModels = {
|
|
|
4980
4680
|
contextWindow: 204800,
|
|
4981
4681
|
supportsImages: false,
|
|
4982
4682
|
supportsPromptCache: true,
|
|
4983
|
-
supportsNativeTools: true,
|
|
4984
|
-
defaultToolProtocol: "native",
|
|
4985
4683
|
inputPrice: 0.29,
|
|
4986
4684
|
outputPrice: 1.14,
|
|
4987
4685
|
cacheWritesPrice: 0,
|
|
@@ -4993,8 +4691,6 @@ var mainlandZAiModels = {
|
|
|
4993
4691
|
contextWindow: 204800,
|
|
4994
4692
|
supportsImages: false,
|
|
4995
4693
|
supportsPromptCache: true,
|
|
4996
|
-
supportsNativeTools: true,
|
|
4997
|
-
defaultToolProtocol: "native",
|
|
4998
4694
|
supportsReasoningEffort: ["disable", "medium"],
|
|
4999
4695
|
reasoningEffort: "medium",
|
|
5000
4696
|
preserveReasoning: true,
|
|
@@ -5003,6 +4699,61 @@ var mainlandZAiModels = {
|
|
|
5003
4699
|
cacheWritesPrice: 0,
|
|
5004
4700
|
cacheReadsPrice: 0.057,
|
|
5005
4701
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
4702
|
+
},
|
|
4703
|
+
"glm-4.7-flash": {
|
|
4704
|
+
maxTokens: 16384,
|
|
4705
|
+
contextWindow: 204800,
|
|
4706
|
+
supportsImages: false,
|
|
4707
|
+
supportsPromptCache: true,
|
|
4708
|
+
inputPrice: 0,
|
|
4709
|
+
outputPrice: 0,
|
|
4710
|
+
cacheWritesPrice: 0,
|
|
4711
|
+
cacheReadsPrice: 0,
|
|
4712
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
4713
|
+
},
|
|
4714
|
+
"glm-4.7-flashx": {
|
|
4715
|
+
maxTokens: 16384,
|
|
4716
|
+
contextWindow: 204800,
|
|
4717
|
+
supportsImages: false,
|
|
4718
|
+
supportsPromptCache: true,
|
|
4719
|
+
inputPrice: 0.035,
|
|
4720
|
+
outputPrice: 0.2,
|
|
4721
|
+
cacheWritesPrice: 0,
|
|
4722
|
+
cacheReadsPrice: 5e-3,
|
|
4723
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
4724
|
+
},
|
|
4725
|
+
"glm-4.6v": {
|
|
4726
|
+
maxTokens: 16384,
|
|
4727
|
+
contextWindow: 131072,
|
|
4728
|
+
supportsImages: true,
|
|
4729
|
+
supportsPromptCache: true,
|
|
4730
|
+
inputPrice: 0.15,
|
|
4731
|
+
outputPrice: 0.45,
|
|
4732
|
+
cacheWritesPrice: 0,
|
|
4733
|
+
cacheReadsPrice: 0.025,
|
|
4734
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
4735
|
+
},
|
|
4736
|
+
"glm-4.6v-flash": {
|
|
4737
|
+
maxTokens: 16384,
|
|
4738
|
+
contextWindow: 131072,
|
|
4739
|
+
supportsImages: true,
|
|
4740
|
+
supportsPromptCache: true,
|
|
4741
|
+
inputPrice: 0,
|
|
4742
|
+
outputPrice: 0,
|
|
4743
|
+
cacheWritesPrice: 0,
|
|
4744
|
+
cacheReadsPrice: 0,
|
|
4745
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
4746
|
+
},
|
|
4747
|
+
"glm-4.6v-flashx": {
|
|
4748
|
+
maxTokens: 16384,
|
|
4749
|
+
contextWindow: 131072,
|
|
4750
|
+
supportsImages: true,
|
|
4751
|
+
supportsPromptCache: true,
|
|
4752
|
+
inputPrice: 0.02,
|
|
4753
|
+
outputPrice: 0.2,
|
|
4754
|
+
cacheWritesPrice: 0,
|
|
4755
|
+
cacheReadsPrice: 2e-3,
|
|
4756
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
5006
4757
|
}
|
|
5007
4758
|
};
|
|
5008
4759
|
var ZAI_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -5036,7 +4787,6 @@ var deepInfraDefaultModelInfo = {
|
|
|
5036
4787
|
contextWindow: 262144,
|
|
5037
4788
|
supportsImages: false,
|
|
5038
4789
|
supportsPromptCache: false,
|
|
5039
|
-
supportsNativeTools: true,
|
|
5040
4790
|
inputPrice: 0.3,
|
|
5041
4791
|
outputPrice: 1.2,
|
|
5042
4792
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -5050,8 +4800,6 @@ var minimaxModels = {
|
|
|
5050
4800
|
contextWindow: 192e3,
|
|
5051
4801
|
supportsImages: false,
|
|
5052
4802
|
supportsPromptCache: true,
|
|
5053
|
-
supportsNativeTools: true,
|
|
5054
|
-
defaultToolProtocol: "native",
|
|
5055
4803
|
includedTools: ["search_and_replace"],
|
|
5056
4804
|
excludedTools: ["apply_diff"],
|
|
5057
4805
|
preserveReasoning: true,
|
|
@@ -5066,8 +4814,6 @@ var minimaxModels = {
|
|
|
5066
4814
|
contextWindow: 192e3,
|
|
5067
4815
|
supportsImages: false,
|
|
5068
4816
|
supportsPromptCache: true,
|
|
5069
|
-
supportsNativeTools: true,
|
|
5070
|
-
defaultToolProtocol: "native",
|
|
5071
4817
|
includedTools: ["search_and_replace"],
|
|
5072
4818
|
excludedTools: ["apply_diff"],
|
|
5073
4819
|
preserveReasoning: true,
|
|
@@ -5082,8 +4828,6 @@ var minimaxModels = {
|
|
|
5082
4828
|
contextWindow: 192e3,
|
|
5083
4829
|
supportsImages: false,
|
|
5084
4830
|
supportsPromptCache: true,
|
|
5085
|
-
supportsNativeTools: true,
|
|
5086
|
-
defaultToolProtocol: "native",
|
|
5087
4831
|
includedTools: ["search_and_replace"],
|
|
5088
4832
|
excludedTools: ["apply_diff"],
|
|
5089
4833
|
preserveReasoning: true,
|
|
@@ -5138,6 +4882,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5138
4882
|
case "openai-native":
|
|
5139
4883
|
return "gpt-4o";
|
|
5140
4884
|
// Based on openai-native patterns
|
|
4885
|
+
case "openai-codex":
|
|
4886
|
+
return openAiCodexDefaultModelId;
|
|
5141
4887
|
case "mistral":
|
|
5142
4888
|
return mistralDefaultModelId;
|
|
5143
4889
|
case "openai":
|
|
@@ -5153,8 +4899,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5153
4899
|
return deepInfraDefaultModelId;
|
|
5154
4900
|
case "vscode-lm":
|
|
5155
4901
|
return vscodeLlmDefaultModelId;
|
|
5156
|
-
case "claude-code":
|
|
5157
|
-
return claudeCodeDefaultModelId;
|
|
5158
4902
|
case "cerebras":
|
|
5159
4903
|
return cerebrasDefaultModelId;
|
|
5160
4904
|
case "sambanova":
|
|
@@ -5212,7 +4956,6 @@ var providerNames = [
|
|
|
5212
4956
|
"bedrock",
|
|
5213
4957
|
"baseten",
|
|
5214
4958
|
"cerebras",
|
|
5215
|
-
"claude-code",
|
|
5216
4959
|
"doubao",
|
|
5217
4960
|
"deepseek",
|
|
5218
4961
|
"featherless",
|
|
@@ -5223,6 +4966,7 @@ var providerNames = [
|
|
|
5223
4966
|
"mistral",
|
|
5224
4967
|
"moonshot",
|
|
5225
4968
|
"minimax",
|
|
4969
|
+
"openai-codex",
|
|
5226
4970
|
"openai-native",
|
|
5227
4971
|
"qwen-code",
|
|
5228
4972
|
"roo",
|
|
@@ -5241,9 +4985,7 @@ var providerSettingsEntrySchema = z8.object({
|
|
|
5241
4985
|
});
|
|
5242
4986
|
var baseProviderSettingsSchema = z8.object({
|
|
5243
4987
|
includeMaxTokens: z8.boolean().optional(),
|
|
5244
|
-
diffEnabled: z8.boolean().optional(),
|
|
5245
4988
|
todoListEnabled: z8.boolean().optional(),
|
|
5246
|
-
fuzzyMatchThreshold: z8.number().optional(),
|
|
5247
4989
|
modelTemperature: z8.number().nullish(),
|
|
5248
4990
|
rateLimitSeconds: z8.number().optional(),
|
|
5249
4991
|
consecutiveMistakeLimit: z8.number().min(0).optional(),
|
|
@@ -5253,9 +4995,7 @@ var baseProviderSettingsSchema = z8.object({
|
|
|
5253
4995
|
modelMaxTokens: z8.number().optional(),
|
|
5254
4996
|
modelMaxThinkingTokens: z8.number().optional(),
|
|
5255
4997
|
// Model verbosity.
|
|
5256
|
-
verbosity: verbosityLevelsSchema.optional()
|
|
5257
|
-
// Tool protocol override for this profile.
|
|
5258
|
-
toolProtocol: z8.enum(["xml", "native"]).optional()
|
|
4998
|
+
verbosity: verbosityLevelsSchema.optional()
|
|
5259
4999
|
});
|
|
5260
5000
|
var apiModelIdProviderModelSchema = baseProviderSettingsSchema.extend({
|
|
5261
5001
|
apiModelId: z8.string().optional()
|
|
@@ -5267,7 +5007,6 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5267
5007
|
anthropicBeta1MContext: z8.boolean().optional()
|
|
5268
5008
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5269
5009
|
});
|
|
5270
|
-
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({});
|
|
5271
5010
|
var openRouterSchema = baseProviderSettingsSchema.extend({
|
|
5272
5011
|
openRouterApiKey: z8.string().optional(),
|
|
5273
5012
|
openRouterModelId: z8.string().optional(),
|
|
@@ -5349,6 +5088,9 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5349
5088
|
geminiCliOAuthPath: z8.string().optional(),
|
|
5350
5089
|
geminiCliProjectId: z8.string().optional()
|
|
5351
5090
|
});
|
|
5091
|
+
var openAiCodexSchema = apiModelIdProviderModelSchema.extend({
|
|
5092
|
+
// No additional settings needed - uses OAuth authentication
|
|
5093
|
+
});
|
|
5352
5094
|
var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
|
|
5353
5095
|
openAiNativeApiKey: z8.string().optional(),
|
|
5354
5096
|
openAiNativeBaseUrl: z8.string().optional(),
|
|
@@ -5453,7 +5195,6 @@ var defaultSchema = z8.object({
|
|
|
5453
5195
|
});
|
|
5454
5196
|
var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
5455
5197
|
anthropicSchema.merge(z8.object({ apiProvider: z8.literal("anthropic") })),
|
|
5456
|
-
claudeCodeSchema.merge(z8.object({ apiProvider: z8.literal("claude-code") })),
|
|
5457
5198
|
openRouterSchema.merge(z8.object({ apiProvider: z8.literal("openrouter") })),
|
|
5458
5199
|
bedrockSchema.merge(z8.object({ apiProvider: z8.literal("bedrock") })),
|
|
5459
5200
|
vertexSchema.merge(z8.object({ apiProvider: z8.literal("vertex") })),
|
|
@@ -5463,6 +5204,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
|
5463
5204
|
lmStudioSchema.merge(z8.object({ apiProvider: z8.literal("lmstudio") })),
|
|
5464
5205
|
geminiSchema.merge(z8.object({ apiProvider: z8.literal("gemini") })),
|
|
5465
5206
|
geminiCliSchema.merge(z8.object({ apiProvider: z8.literal("gemini-cli") })),
|
|
5207
|
+
openAiCodexSchema.merge(z8.object({ apiProvider: z8.literal("openai-codex") })),
|
|
5466
5208
|
openAiNativeSchema.merge(z8.object({ apiProvider: z8.literal("openai-native") })),
|
|
5467
5209
|
mistralSchema.merge(z8.object({ apiProvider: z8.literal("mistral") })),
|
|
5468
5210
|
deepSeekSchema.merge(z8.object({ apiProvider: z8.literal("deepseek") })),
|
|
@@ -5493,7 +5235,6 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
|
5493
5235
|
var providerSettingsSchema = z8.object({
|
|
5494
5236
|
apiProvider: providerNamesSchema.optional(),
|
|
5495
5237
|
...anthropicSchema.shape,
|
|
5496
|
-
...claudeCodeSchema.shape,
|
|
5497
5238
|
...openRouterSchema.shape,
|
|
5498
5239
|
...bedrockSchema.shape,
|
|
5499
5240
|
...vertexSchema.shape,
|
|
@@ -5503,6 +5244,7 @@ var providerSettingsSchema = z8.object({
|
|
|
5503
5244
|
...lmStudioSchema.shape,
|
|
5504
5245
|
...geminiSchema.shape,
|
|
5505
5246
|
...geminiCliSchema.shape,
|
|
5247
|
+
...openAiCodexSchema.shape,
|
|
5506
5248
|
...openAiNativeSchema.shape,
|
|
5507
5249
|
...mistralSchema.shape,
|
|
5508
5250
|
...deepSeekSchema.shape,
|
|
@@ -5557,10 +5299,10 @@ var getModelId = (settings) => {
|
|
|
5557
5299
|
var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key);
|
|
5558
5300
|
var modelIdKeysByProvider = {
|
|
5559
5301
|
anthropic: "apiModelId",
|
|
5560
|
-
"claude-code": "apiModelId",
|
|
5561
5302
|
openrouter: "openRouterModelId",
|
|
5562
5303
|
bedrock: "apiModelId",
|
|
5563
5304
|
vertex: "apiModelId",
|
|
5305
|
+
"openai-codex": "apiModelId",
|
|
5564
5306
|
"openai-native": "openAiModelId",
|
|
5565
5307
|
ollama: "ollamaModelId",
|
|
5566
5308
|
lmstudio: "lmStudioModelId",
|
|
@@ -5590,7 +5332,7 @@ var modelIdKeysByProvider = {
|
|
|
5590
5332
|
roo: "apiModelId",
|
|
5591
5333
|
"vercel-ai-gateway": "vercelAiGatewayModelId"
|
|
5592
5334
|
};
|
|
5593
|
-
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "
|
|
5335
|
+
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "bedrock", "minimax"];
|
|
5594
5336
|
var getApiProtocol = (provider, modelId) => {
|
|
5595
5337
|
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
|
|
5596
5338
|
return "anthropic";
|
|
@@ -5619,7 +5361,6 @@ var MODELS_BY_PROVIDER = {
|
|
|
5619
5361
|
label: "Cerebras",
|
|
5620
5362
|
models: Object.keys(cerebrasModels)
|
|
5621
5363
|
},
|
|
5622
|
-
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
|
|
5623
5364
|
deepseek: {
|
|
5624
5365
|
id: "deepseek",
|
|
5625
5366
|
label: "DeepSeek",
|
|
@@ -5662,6 +5403,11 @@ var MODELS_BY_PROVIDER = {
|
|
|
5662
5403
|
label: "MiniMax",
|
|
5663
5404
|
models: Object.keys(minimaxModels)
|
|
5664
5405
|
},
|
|
5406
|
+
"openai-codex": {
|
|
5407
|
+
id: "openai-codex",
|
|
5408
|
+
label: "OpenAI - ChatGPT Plus/Pro",
|
|
5409
|
+
models: Object.keys(openAiCodexModels)
|
|
5410
|
+
},
|
|
5665
5411
|
"openai-native": {
|
|
5666
5412
|
id: "openai-native",
|
|
5667
5413
|
label: "OpenAI",
|
|
@@ -5718,16 +5464,6 @@ var historyItemSchema = z9.object({
|
|
|
5718
5464
|
size: z9.number().optional(),
|
|
5719
5465
|
workspace: z9.string().optional(),
|
|
5720
5466
|
mode: z9.string().optional(),
|
|
5721
|
-
/**
|
|
5722
|
-
* The tool protocol used by this task. Once a task uses tools with a specific
|
|
5723
|
-
* protocol (XML or Native), it is permanently locked to that protocol.
|
|
5724
|
-
*
|
|
5725
|
-
* - "xml": Tool calls are parsed from XML text (no tool IDs)
|
|
5726
|
-
* - "native": Tool calls come as tool_call chunks with IDs
|
|
5727
|
-
*
|
|
5728
|
-
* This ensures task resumption works correctly even when NTC settings change.
|
|
5729
|
-
*/
|
|
5730
|
-
toolProtocol: z9.enum(["xml", "native"]).optional(),
|
|
5731
5467
|
apiConfigName: z9.string().optional(),
|
|
5732
5468
|
// Provider profile name for sticky profile feature
|
|
5733
5469
|
status: z9.enum(["active", "completed", "delegated"]).optional(),
|
|
@@ -5745,23 +5481,12 @@ var historyItemSchema = z9.object({
|
|
|
5745
5481
|
|
|
5746
5482
|
// src/experiment.ts
|
|
5747
5483
|
import { z as z10 } from "zod";
|
|
5748
|
-
var experimentIds = [
|
|
5749
|
-
"powerSteering",
|
|
5750
|
-
"multiFileApplyDiff",
|
|
5751
|
-
"preventFocusDisruption",
|
|
5752
|
-
"imageGeneration",
|
|
5753
|
-
"runSlashCommand",
|
|
5754
|
-
"multipleNativeToolCalls",
|
|
5755
|
-
"customTools"
|
|
5756
|
-
];
|
|
5484
|
+
var experimentIds = ["preventFocusDisruption", "imageGeneration", "runSlashCommand", "customTools"];
|
|
5757
5485
|
var experimentIdsSchema = z10.enum(experimentIds);
|
|
5758
5486
|
var experimentsSchema = z10.object({
|
|
5759
|
-
powerSteering: z10.boolean().optional(),
|
|
5760
|
-
multiFileApplyDiff: z10.boolean().optional(),
|
|
5761
5487
|
preventFocusDisruption: z10.boolean().optional(),
|
|
5762
5488
|
imageGeneration: z10.boolean().optional(),
|
|
5763
5489
|
runSlashCommand: z10.boolean().optional(),
|
|
5764
|
-
multipleNativeToolCalls: z10.boolean().optional(),
|
|
5765
5490
|
customTools: z10.boolean().optional()
|
|
5766
5491
|
});
|
|
5767
5492
|
|
|
@@ -5813,6 +5538,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
5813
5538
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
5814
5539
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
5815
5540
|
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
5541
|
+
TelemetryEventName2["READ_FILE_LEGACY_FORMAT_USED"] = "Read File Legacy Format Used";
|
|
5816
5542
|
return TelemetryEventName2;
|
|
5817
5543
|
})(TelemetryEventName || {});
|
|
5818
5544
|
var staticAppPropertiesSchema = z11.object({
|
|
@@ -5901,7 +5627,8 @@ var rooCodeTelemetryEventSchema = z11.discriminatedUnion("type", [
|
|
|
5901
5627
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
5902
5628
|
"Tab Shown" /* TAB_SHOWN */,
|
|
5903
5629
|
"Mode Setting Changed" /* MODE_SETTINGS_CHANGED */,
|
|
5904
|
-
"Custom Mode Created" /* CUSTOM_MODE_CREATED
|
|
5630
|
+
"Custom Mode Created" /* CUSTOM_MODE_CREATED */,
|
|
5631
|
+
"Read File Legacy Format Used" /* READ_FILE_LEGACY_FORMAT_USED */
|
|
5905
5632
|
]),
|
|
5906
5633
|
properties: telemetryPropertiesSchema
|
|
5907
5634
|
}),
|
|
@@ -6215,7 +5942,15 @@ var isLanguage = (value) => languages.includes(value);
|
|
|
6215
5942
|
|
|
6216
5943
|
// src/global-settings.ts
|
|
6217
5944
|
var DEFAULT_WRITE_DELAY_MS = 1e3;
|
|
6218
|
-
var
|
|
5945
|
+
var TERMINAL_PREVIEW_BYTES = {
|
|
5946
|
+
small: 5 * 1024,
|
|
5947
|
+
// 5KB
|
|
5948
|
+
medium: 10 * 1024,
|
|
5949
|
+
// 10KB
|
|
5950
|
+
large: 20 * 1024
|
|
5951
|
+
// 20KB
|
|
5952
|
+
};
|
|
5953
|
+
var DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE = "medium";
|
|
6219
5954
|
var MIN_CHECKPOINT_TIMEOUT_SECONDS = 10;
|
|
6220
5955
|
var MAX_CHECKPOINT_TIMEOUT_SECONDS = 60;
|
|
6221
5956
|
var DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15;
|
|
@@ -6231,7 +5966,6 @@ var globalSettingsSchema = z14.object({
|
|
|
6231
5966
|
imageGenerationProvider: z14.enum(["openrouter", "roo"]).optional(),
|
|
6232
5967
|
openRouterImageApiKey: z14.string().optional(),
|
|
6233
5968
|
openRouterImageGenerationSelectedModel: z14.string().optional(),
|
|
6234
|
-
condensingApiConfigId: z14.string().optional(),
|
|
6235
5969
|
customCondensingPrompt: z14.string().optional(),
|
|
6236
5970
|
autoApprovalEnabled: z14.boolean().optional(),
|
|
6237
5971
|
alwaysAllowReadOnly: z14.boolean().optional(),
|
|
@@ -6257,7 +5991,6 @@ var globalSettingsSchema = z14.object({
|
|
|
6257
5991
|
allowedMaxCost: z14.number().nullish(),
|
|
6258
5992
|
autoCondenseContext: z14.boolean().optional(),
|
|
6259
5993
|
autoCondenseContextPercent: z14.number().optional(),
|
|
6260
|
-
maxConcurrentFileReads: z14.number().optional(),
|
|
6261
5994
|
/**
|
|
6262
5995
|
* Whether to include current time in the environment details
|
|
6263
5996
|
* @default true
|
|
@@ -6300,11 +6033,9 @@ var globalSettingsSchema = z14.object({
|
|
|
6300
6033
|
maxWorkspaceFiles: z14.number().optional(),
|
|
6301
6034
|
showRooIgnoredFiles: z14.boolean().optional(),
|
|
6302
6035
|
enableSubfolderRules: z14.boolean().optional(),
|
|
6303
|
-
maxReadFileLine: z14.number().optional(),
|
|
6304
6036
|
maxImageFileSize: z14.number().optional(),
|
|
6305
6037
|
maxTotalImageSize: z14.number().optional(),
|
|
6306
|
-
|
|
6307
|
-
terminalOutputCharacterLimit: z14.number().optional(),
|
|
6038
|
+
terminalOutputPreviewSize: z14.enum(["small", "medium", "large"]).optional(),
|
|
6308
6039
|
terminalShellIntegrationTimeout: z14.number().optional(),
|
|
6309
6040
|
terminalShellIntegrationDisabled: z14.boolean().optional(),
|
|
6310
6041
|
terminalCommandDelay: z14.number().optional(),
|
|
@@ -6313,18 +6044,14 @@ var globalSettingsSchema = z14.object({
|
|
|
6313
6044
|
terminalZshOhMy: z14.boolean().optional(),
|
|
6314
6045
|
terminalZshP10k: z14.boolean().optional(),
|
|
6315
6046
|
terminalZdotdir: z14.boolean().optional(),
|
|
6316
|
-
terminalCompressProgressBar: z14.boolean().optional(),
|
|
6317
6047
|
diagnosticsEnabled: z14.boolean().optional(),
|
|
6318
6048
|
rateLimitSeconds: z14.number().optional(),
|
|
6319
|
-
diffEnabled: z14.boolean().optional(),
|
|
6320
|
-
fuzzyMatchThreshold: z14.number().optional(),
|
|
6321
6049
|
experiments: experimentsSchema.optional(),
|
|
6322
6050
|
codebaseIndexModels: codebaseIndexModelsSchema.optional(),
|
|
6323
6051
|
codebaseIndexConfig: codebaseIndexConfigSchema.optional(),
|
|
6324
6052
|
language: languagesSchema.optional(),
|
|
6325
6053
|
telemetrySetting: telemetrySettingsSchema.optional(),
|
|
6326
6054
|
mcpEnabled: z14.boolean().optional(),
|
|
6327
|
-
enableMcpServerCreation: z14.boolean().optional(),
|
|
6328
6055
|
mode: z14.string().optional(),
|
|
6329
6056
|
modeApiConfigs: z14.record(z14.string(), z14.string()).optional(),
|
|
6330
6057
|
customModes: z14.array(modeConfigSchema).optional(),
|
|
@@ -6344,7 +6071,20 @@ var globalSettingsSchema = z14.object({
|
|
|
6344
6071
|
profileThresholds: z14.record(z14.string(), z14.number()).optional(),
|
|
6345
6072
|
hasOpenedModeSelector: z14.boolean().optional(),
|
|
6346
6073
|
lastModeExportPath: z14.string().optional(),
|
|
6347
|
-
lastModeImportPath: z14.string().optional()
|
|
6074
|
+
lastModeImportPath: z14.string().optional(),
|
|
6075
|
+
lastSettingsExportPath: z14.string().optional(),
|
|
6076
|
+
lastTaskExportPath: z14.string().optional(),
|
|
6077
|
+
lastImageSavePath: z14.string().optional(),
|
|
6078
|
+
/**
|
|
6079
|
+
* Path to worktree to auto-open after switching workspaces.
|
|
6080
|
+
* Used by the worktree feature to open the Roo Code sidebar in a new window.
|
|
6081
|
+
*/
|
|
6082
|
+
worktreeAutoOpenPath: z14.string().optional(),
|
|
6083
|
+
/**
|
|
6084
|
+
* Whether to show the worktree selector in the home screen.
|
|
6085
|
+
* @default true
|
|
6086
|
+
*/
|
|
6087
|
+
showWorktreesInHomeScreen: z14.boolean().optional()
|
|
6348
6088
|
});
|
|
6349
6089
|
var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
|
|
6350
6090
|
var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
|
|
@@ -6428,8 +6168,6 @@ var EVALS_SETTINGS = {
|
|
|
6428
6168
|
ttsSpeed: 1,
|
|
6429
6169
|
soundEnabled: false,
|
|
6430
6170
|
soundVolume: 0.5,
|
|
6431
|
-
terminalOutputLineLimit: 500,
|
|
6432
|
-
terminalOutputCharacterLimit: DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT,
|
|
6433
6171
|
terminalShellIntegrationTimeout: 3e4,
|
|
6434
6172
|
terminalCommandDelay: 0,
|
|
6435
6173
|
terminalPowershellCounter: false,
|
|
@@ -6437,19 +6175,14 @@ var EVALS_SETTINGS = {
|
|
|
6437
6175
|
terminalZshClearEolMark: true,
|
|
6438
6176
|
terminalZshP10k: false,
|
|
6439
6177
|
terminalZdotdir: true,
|
|
6440
|
-
terminalCompressProgressBar: true,
|
|
6441
6178
|
terminalShellIntegrationDisabled: true,
|
|
6442
6179
|
diagnosticsEnabled: true,
|
|
6443
|
-
diffEnabled: true,
|
|
6444
|
-
fuzzyMatchThreshold: 1,
|
|
6445
6180
|
enableCheckpoints: false,
|
|
6446
6181
|
rateLimitSeconds: 0,
|
|
6447
6182
|
maxOpenTabsContext: 20,
|
|
6448
6183
|
maxWorkspaceFiles: 200,
|
|
6449
6184
|
maxGitStatusFiles: 20,
|
|
6450
6185
|
showRooIgnoredFiles: true,
|
|
6451
|
-
maxReadFileLine: -1,
|
|
6452
|
-
// -1 to enable full file reading.
|
|
6453
6186
|
includeDiagnosticMessages: true,
|
|
6454
6187
|
maxDiagnosticMessages: 50,
|
|
6455
6188
|
language: "en",
|
|
@@ -6523,24 +6256,18 @@ var organizationAllowListSchema = z16.object({
|
|
|
6523
6256
|
});
|
|
6524
6257
|
var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
|
|
6525
6258
|
enableCheckpoints: true,
|
|
6526
|
-
fuzzyMatchThreshold: true,
|
|
6527
6259
|
maxOpenTabsContext: true,
|
|
6528
|
-
maxReadFileLine: true,
|
|
6529
6260
|
maxWorkspaceFiles: true,
|
|
6530
6261
|
showRooIgnoredFiles: true,
|
|
6531
6262
|
terminalCommandDelay: true,
|
|
6532
|
-
terminalCompressProgressBar: true,
|
|
6533
|
-
terminalOutputLineLimit: true,
|
|
6534
6263
|
terminalShellIntegrationDisabled: true,
|
|
6535
6264
|
terminalShellIntegrationTimeout: true,
|
|
6536
6265
|
terminalZshClearEolMark: true
|
|
6537
6266
|
}).merge(
|
|
6538
6267
|
z16.object({
|
|
6539
6268
|
maxOpenTabsContext: z16.number().int().nonnegative().optional(),
|
|
6540
|
-
maxReadFileLine: z16.number().int().gte(-1).optional(),
|
|
6541
6269
|
maxWorkspaceFiles: z16.number().int().nonnegative().optional(),
|
|
6542
6270
|
terminalCommandDelay: z16.number().int().nonnegative().optional(),
|
|
6543
|
-
terminalOutputLineLimit: z16.number().int().nonnegative().optional(),
|
|
6544
6271
|
terminalShellIntegrationTimeout: z16.number().int().nonnegative().optional()
|
|
6545
6272
|
})
|
|
6546
6273
|
);
|
|
@@ -6573,7 +6300,8 @@ var userFeaturesSchema = z16.object({
|
|
|
6573
6300
|
});
|
|
6574
6301
|
var userSettingsConfigSchema = z16.object({
|
|
6575
6302
|
extensionBridgeEnabled: z16.boolean().optional(),
|
|
6576
|
-
taskSyncEnabled: z16.boolean().optional()
|
|
6303
|
+
taskSyncEnabled: z16.boolean().optional(),
|
|
6304
|
+
llmEnhancedFeaturesEnabled: z16.boolean().optional()
|
|
6577
6305
|
});
|
|
6578
6306
|
var userSettingsDataSchema = z16.object({
|
|
6579
6307
|
features: userFeaturesSchema,
|
|
@@ -7055,6 +6783,7 @@ var ipcMessageSchema = z19.discriminatedUnion("type", [
|
|
|
7055
6783
|
|
|
7056
6784
|
// src/mcp.ts
|
|
7057
6785
|
import { z as z20 } from "zod";
|
|
6786
|
+
var MAX_MCP_TOOLS_THRESHOLD = 60;
|
|
7058
6787
|
var mcpExecutionStatusSchema = z20.discriminatedUnion("status", [
|
|
7059
6788
|
z20.object({
|
|
7060
6789
|
executionId: z20.string(),
|
|
@@ -7078,6 +6807,46 @@ var mcpExecutionStatusSchema = z20.discriminatedUnion("status", [
|
|
|
7078
6807
|
error: z20.string().optional()
|
|
7079
6808
|
})
|
|
7080
6809
|
]);
|
|
6810
|
+
function countEnabledMcpTools(servers) {
|
|
6811
|
+
let serverCount = 0;
|
|
6812
|
+
let toolCount = 0;
|
|
6813
|
+
for (const server of servers) {
|
|
6814
|
+
if (server.disabled) continue;
|
|
6815
|
+
if (server.status !== "connected") continue;
|
|
6816
|
+
serverCount++;
|
|
6817
|
+
if (server.tools) {
|
|
6818
|
+
for (const tool of server.tools) {
|
|
6819
|
+
if (tool.enabledForPrompt !== false) {
|
|
6820
|
+
toolCount++;
|
|
6821
|
+
}
|
|
6822
|
+
}
|
|
6823
|
+
}
|
|
6824
|
+
}
|
|
6825
|
+
return { enabledToolCount: toolCount, enabledServerCount: serverCount };
|
|
6826
|
+
}
|
|
6827
|
+
|
|
6828
|
+
// src/skills.ts
|
|
6829
|
+
var SKILL_NAME_MIN_LENGTH = 1;
|
|
6830
|
+
var SKILL_NAME_MAX_LENGTH = 64;
|
|
6831
|
+
var SKILL_NAME_REGEX = /^[a-z0-9]+(?:-[a-z0-9]+)*$/;
|
|
6832
|
+
var SkillNameValidationError = /* @__PURE__ */ ((SkillNameValidationError2) => {
|
|
6833
|
+
SkillNameValidationError2["Empty"] = "empty";
|
|
6834
|
+
SkillNameValidationError2["TooLong"] = "too_long";
|
|
6835
|
+
SkillNameValidationError2["InvalidFormat"] = "invalid_format";
|
|
6836
|
+
return SkillNameValidationError2;
|
|
6837
|
+
})(SkillNameValidationError || {});
|
|
6838
|
+
function validateSkillName(name) {
|
|
6839
|
+
if (!name || name.length < SKILL_NAME_MIN_LENGTH) {
|
|
6840
|
+
return { valid: false, error: "empty" /* Empty */ };
|
|
6841
|
+
}
|
|
6842
|
+
if (name.length > SKILL_NAME_MAX_LENGTH) {
|
|
6843
|
+
return { valid: false, error: "too_long" /* TooLong */ };
|
|
6844
|
+
}
|
|
6845
|
+
if (!SKILL_NAME_REGEX.test(name)) {
|
|
6846
|
+
return { valid: false, error: "invalid_format" /* InvalidFormat */ };
|
|
6847
|
+
}
|
|
6848
|
+
return { valid: true };
|
|
6849
|
+
}
|
|
7081
6850
|
|
|
7082
6851
|
// src/todo.ts
|
|
7083
6852
|
import { z as z21 } from "zod";
|
|
@@ -7117,6 +6886,11 @@ var commandExecutionStatusSchema = z22.discriminatedUnion("status", [
|
|
|
7117
6886
|
})
|
|
7118
6887
|
]);
|
|
7119
6888
|
|
|
6889
|
+
// src/tool-params.ts
|
|
6890
|
+
function isLegacyReadFileParams(params) {
|
|
6891
|
+
return "_legacyFormat" in params && params._legacyFormat === true;
|
|
6892
|
+
}
|
|
6893
|
+
|
|
7120
6894
|
// src/vscode-extension-host.ts
|
|
7121
6895
|
import { z as z23 } from "zod";
|
|
7122
6896
|
var checkoutDiffPayloadSchema = z23.object({
|
|
@@ -7169,7 +6943,7 @@ export {
|
|
|
7169
6943
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7170
6944
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
7171
6945
|
DEFAULT_MODES,
|
|
7172
|
-
|
|
6946
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
7173
6947
|
DEFAULT_WRITE_DELAY_MS,
|
|
7174
6948
|
DOUBAO_API_BASE_URL,
|
|
7175
6949
|
DOUBAO_API_CHAT_PATH,
|
|
@@ -7199,13 +6973,13 @@ export {
|
|
|
7199
6973
|
IpcOrigin,
|
|
7200
6974
|
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
7201
6975
|
MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
6976
|
+
MAX_MCP_TOOLS_THRESHOLD,
|
|
7202
6977
|
MINIMAX_DEFAULT_MAX_TOKENS,
|
|
7203
6978
|
MINIMAX_DEFAULT_TEMPERATURE,
|
|
7204
6979
|
MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7205
6980
|
MISTRAL_DEFAULT_TEMPERATURE,
|
|
7206
6981
|
MODELS_BY_PROVIDER,
|
|
7207
6982
|
MOONSHOT_DEFAULT_TEMPERATURE,
|
|
7208
|
-
NATIVE_TOOL_DEFAULTS,
|
|
7209
6983
|
OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
7210
6984
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
7211
6985
|
OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -7220,7 +6994,11 @@ export {
|
|
|
7220
6994
|
RooModelsResponseSchema,
|
|
7221
6995
|
RooPricingSchema,
|
|
7222
6996
|
SECRET_STATE_KEYS,
|
|
7223
|
-
|
|
6997
|
+
SKILL_NAME_MAX_LENGTH,
|
|
6998
|
+
SKILL_NAME_MIN_LENGTH,
|
|
6999
|
+
SKILL_NAME_REGEX,
|
|
7000
|
+
SkillNameValidationError,
|
|
7001
|
+
TERMINAL_PREVIEW_BYTES,
|
|
7224
7002
|
TaskBridgeCommandName,
|
|
7225
7003
|
TaskBridgeEventName,
|
|
7226
7004
|
TaskCommandName,
|
|
@@ -7252,9 +7030,6 @@ export {
|
|
|
7252
7030
|
chutesDefaultModelId,
|
|
7253
7031
|
chutesDefaultModelInfo,
|
|
7254
7032
|
chutesModels,
|
|
7255
|
-
claudeCodeDefaultModelId,
|
|
7256
|
-
claudeCodeModels,
|
|
7257
|
-
claudeCodeReasoningConfig,
|
|
7258
7033
|
clineAskSchema,
|
|
7259
7034
|
clineAsks,
|
|
7260
7035
|
clineMessageSchema,
|
|
@@ -7269,6 +7044,7 @@ export {
|
|
|
7269
7044
|
commandIds,
|
|
7270
7045
|
contextCondenseSchema,
|
|
7271
7046
|
contextTruncationSchema,
|
|
7047
|
+
countEnabledMcpTools,
|
|
7272
7048
|
customModePromptsSchema,
|
|
7273
7049
|
customModesSettingsSchema,
|
|
7274
7050
|
customProviders,
|
|
@@ -7302,7 +7078,6 @@ export {
|
|
|
7302
7078
|
geminiDefaultModelId,
|
|
7303
7079
|
geminiModels,
|
|
7304
7080
|
getApiProtocol,
|
|
7305
|
-
getEffectiveProtocol,
|
|
7306
7081
|
getErrorMessage,
|
|
7307
7082
|
getErrorStatusCode,
|
|
7308
7083
|
getImageGenerationProvider,
|
|
@@ -7337,9 +7112,9 @@ export {
|
|
|
7337
7112
|
isInteractiveAsk,
|
|
7338
7113
|
isInternalProvider,
|
|
7339
7114
|
isLanguage,
|
|
7115
|
+
isLegacyReadFileParams,
|
|
7340
7116
|
isLocalProvider,
|
|
7341
7117
|
isModelParameter,
|
|
7342
|
-
isNativeProtocol,
|
|
7343
7118
|
isNonBlockingAsk,
|
|
7344
7119
|
isProviderName,
|
|
7345
7120
|
isResumableAsk,
|
|
@@ -7375,9 +7150,10 @@ export {
|
|
|
7375
7150
|
moonshotDefaultModelId,
|
|
7376
7151
|
moonshotModels,
|
|
7377
7152
|
nonBlockingAsks,
|
|
7378
|
-
normalizeClaudeCodeModelId,
|
|
7379
7153
|
ollamaDefaultModelId,
|
|
7380
7154
|
ollamaDefaultModelInfo,
|
|
7155
|
+
openAiCodexDefaultModelId,
|
|
7156
|
+
openAiCodexModels,
|
|
7381
7157
|
openAiModelInfoSaneDefaults,
|
|
7382
7158
|
openAiNativeDefaultModelId,
|
|
7383
7159
|
openAiNativeModels,
|
|
@@ -7447,6 +7223,7 @@ export {
|
|
|
7447
7223
|
userFeaturesSchema,
|
|
7448
7224
|
userSettingsConfigSchema,
|
|
7449
7225
|
userSettingsDataSchema,
|
|
7226
|
+
validateSkillName,
|
|
7450
7227
|
verbosityLevels,
|
|
7451
7228
|
verbosityLevelsSchema,
|
|
7452
7229
|
vercelAiGatewayDefaultModelId,
|