@roo-code/types 1.106.0 → 1.108.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +386 -714
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1430 -2530
- package/dist/index.d.ts +1430 -2530
- package/dist/index.js +376 -705
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.cjs
CHANGED
|
@@ -42,7 +42,7 @@ __export(index_exports, {
|
|
|
42
42
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS: () => DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
43
43
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT: () => DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
44
44
|
DEFAULT_MODES: () => DEFAULT_MODES,
|
|
45
|
-
|
|
45
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE: () => DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
46
46
|
DEFAULT_WRITE_DELAY_MS: () => DEFAULT_WRITE_DELAY_MS,
|
|
47
47
|
DOUBAO_API_BASE_URL: () => DOUBAO_API_BASE_URL,
|
|
48
48
|
DOUBAO_API_CHAT_PATH: () => DOUBAO_API_CHAT_PATH,
|
|
@@ -72,13 +72,13 @@ __export(index_exports, {
|
|
|
72
72
|
IpcOrigin: () => IpcOrigin,
|
|
73
73
|
LMSTUDIO_DEFAULT_TEMPERATURE: () => LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
74
74
|
MAX_CHECKPOINT_TIMEOUT_SECONDS: () => MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
75
|
+
MAX_MCP_TOOLS_THRESHOLD: () => MAX_MCP_TOOLS_THRESHOLD,
|
|
75
76
|
MINIMAX_DEFAULT_MAX_TOKENS: () => MINIMAX_DEFAULT_MAX_TOKENS,
|
|
76
77
|
MINIMAX_DEFAULT_TEMPERATURE: () => MINIMAX_DEFAULT_TEMPERATURE,
|
|
77
78
|
MIN_CHECKPOINT_TIMEOUT_SECONDS: () => MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
78
79
|
MISTRAL_DEFAULT_TEMPERATURE: () => MISTRAL_DEFAULT_TEMPERATURE,
|
|
79
80
|
MODELS_BY_PROVIDER: () => MODELS_BY_PROVIDER,
|
|
80
81
|
MOONSHOT_DEFAULT_TEMPERATURE: () => MOONSHOT_DEFAULT_TEMPERATURE,
|
|
81
|
-
NATIVE_TOOL_DEFAULTS: () => NATIVE_TOOL_DEFAULTS,
|
|
82
82
|
OPENAI_AZURE_AI_INFERENCE_PATH: () => OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
83
83
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE: () => OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
84
84
|
OPENROUTER_DEFAULT_PROVIDER_NAME: () => OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -93,7 +93,11 @@ __export(index_exports, {
|
|
|
93
93
|
RooModelsResponseSchema: () => RooModelsResponseSchema,
|
|
94
94
|
RooPricingSchema: () => RooPricingSchema,
|
|
95
95
|
SECRET_STATE_KEYS: () => SECRET_STATE_KEYS,
|
|
96
|
-
|
|
96
|
+
SKILL_NAME_MAX_LENGTH: () => SKILL_NAME_MAX_LENGTH,
|
|
97
|
+
SKILL_NAME_MIN_LENGTH: () => SKILL_NAME_MIN_LENGTH,
|
|
98
|
+
SKILL_NAME_REGEX: () => SKILL_NAME_REGEX,
|
|
99
|
+
SkillNameValidationError: () => SkillNameValidationError,
|
|
100
|
+
TERMINAL_PREVIEW_BYTES: () => TERMINAL_PREVIEW_BYTES,
|
|
97
101
|
TaskBridgeCommandName: () => TaskBridgeCommandName,
|
|
98
102
|
TaskBridgeEventName: () => TaskBridgeEventName,
|
|
99
103
|
TaskCommandName: () => TaskCommandName,
|
|
@@ -125,9 +129,6 @@ __export(index_exports, {
|
|
|
125
129
|
chutesDefaultModelId: () => chutesDefaultModelId,
|
|
126
130
|
chutesDefaultModelInfo: () => chutesDefaultModelInfo,
|
|
127
131
|
chutesModels: () => chutesModels,
|
|
128
|
-
claudeCodeDefaultModelId: () => claudeCodeDefaultModelId,
|
|
129
|
-
claudeCodeModels: () => claudeCodeModels,
|
|
130
|
-
claudeCodeReasoningConfig: () => claudeCodeReasoningConfig,
|
|
131
132
|
clineAskSchema: () => clineAskSchema,
|
|
132
133
|
clineAsks: () => clineAsks,
|
|
133
134
|
clineMessageSchema: () => clineMessageSchema,
|
|
@@ -142,6 +143,7 @@ __export(index_exports, {
|
|
|
142
143
|
commandIds: () => commandIds,
|
|
143
144
|
contextCondenseSchema: () => contextCondenseSchema,
|
|
144
145
|
contextTruncationSchema: () => contextTruncationSchema,
|
|
146
|
+
countEnabledMcpTools: () => countEnabledMcpTools,
|
|
145
147
|
customModePromptsSchema: () => customModePromptsSchema,
|
|
146
148
|
customModesSettingsSchema: () => customModesSettingsSchema,
|
|
147
149
|
customProviders: () => customProviders,
|
|
@@ -175,7 +177,6 @@ __export(index_exports, {
|
|
|
175
177
|
geminiDefaultModelId: () => geminiDefaultModelId,
|
|
176
178
|
geminiModels: () => geminiModels,
|
|
177
179
|
getApiProtocol: () => getApiProtocol,
|
|
178
|
-
getEffectiveProtocol: () => getEffectiveProtocol,
|
|
179
180
|
getErrorMessage: () => getErrorMessage,
|
|
180
181
|
getErrorStatusCode: () => getErrorStatusCode,
|
|
181
182
|
getImageGenerationProvider: () => getImageGenerationProvider,
|
|
@@ -210,9 +211,9 @@ __export(index_exports, {
|
|
|
210
211
|
isInteractiveAsk: () => isInteractiveAsk,
|
|
211
212
|
isInternalProvider: () => isInternalProvider,
|
|
212
213
|
isLanguage: () => isLanguage,
|
|
214
|
+
isLegacyReadFileParams: () => isLegacyReadFileParams,
|
|
213
215
|
isLocalProvider: () => isLocalProvider,
|
|
214
216
|
isModelParameter: () => isModelParameter,
|
|
215
|
-
isNativeProtocol: () => isNativeProtocol,
|
|
216
217
|
isNonBlockingAsk: () => isNonBlockingAsk,
|
|
217
218
|
isProviderName: () => isProviderName,
|
|
218
219
|
isResumableAsk: () => isResumableAsk,
|
|
@@ -248,7 +249,6 @@ __export(index_exports, {
|
|
|
248
249
|
moonshotDefaultModelId: () => moonshotDefaultModelId,
|
|
249
250
|
moonshotModels: () => moonshotModels,
|
|
250
251
|
nonBlockingAsks: () => nonBlockingAsks,
|
|
251
|
-
normalizeClaudeCodeModelId: () => normalizeClaudeCodeModelId,
|
|
252
252
|
ollamaDefaultModelId: () => ollamaDefaultModelId,
|
|
253
253
|
ollamaDefaultModelInfo: () => ollamaDefaultModelInfo,
|
|
254
254
|
openAiCodexDefaultModelId: () => openAiCodexDefaultModelId,
|
|
@@ -322,6 +322,7 @@ __export(index_exports, {
|
|
|
322
322
|
userFeaturesSchema: () => userFeaturesSchema,
|
|
323
323
|
userSettingsConfigSchema: () => userSettingsConfigSchema,
|
|
324
324
|
userSettingsDataSchema: () => userSettingsDataSchema,
|
|
325
|
+
validateSkillName: () => validateSkillName,
|
|
325
326
|
verbosityLevels: () => verbosityLevels,
|
|
326
327
|
verbosityLevelsSchema: () => verbosityLevelsSchema,
|
|
327
328
|
vercelAiGatewayDefaultModelId: () => vercelAiGatewayDefaultModelId,
|
|
@@ -417,7 +418,9 @@ var clineSays = [
|
|
|
417
418
|
"condense_context_error",
|
|
418
419
|
"sliding_window_truncation",
|
|
419
420
|
"codebase_search_result",
|
|
420
|
-
"user_edit_todos"
|
|
421
|
+
"user_edit_todos",
|
|
422
|
+
"too_many_tools_warning",
|
|
423
|
+
"tool"
|
|
421
424
|
];
|
|
422
425
|
var clineSaySchema = import_zod.z.enum(clineSays);
|
|
423
426
|
var toolProgressStatusSchema = import_zod.z.object({
|
|
@@ -485,6 +488,7 @@ var toolGroupsSchema = import_zod2.z.enum(toolGroups);
|
|
|
485
488
|
var toolNames = [
|
|
486
489
|
"execute_command",
|
|
487
490
|
"read_file",
|
|
491
|
+
"read_command_output",
|
|
488
492
|
"write_to_file",
|
|
489
493
|
"apply_diff",
|
|
490
494
|
"search_and_replace",
|
|
@@ -500,10 +504,10 @@ var toolNames = [
|
|
|
500
504
|
"attempt_completion",
|
|
501
505
|
"switch_mode",
|
|
502
506
|
"new_task",
|
|
503
|
-
"fetch_instructions",
|
|
504
507
|
"codebase_search",
|
|
505
508
|
"update_todo_list",
|
|
506
509
|
"run_slash_command",
|
|
510
|
+
"skill",
|
|
507
511
|
"generate_image",
|
|
508
512
|
"custom_tool"
|
|
509
513
|
];
|
|
@@ -515,20 +519,6 @@ var toolUsageSchema = import_zod2.z.record(
|
|
|
515
519
|
failures: import_zod2.z.number()
|
|
516
520
|
})
|
|
517
521
|
);
|
|
518
|
-
var TOOL_PROTOCOL = {
|
|
519
|
-
XML: "xml",
|
|
520
|
-
NATIVE: "native"
|
|
521
|
-
};
|
|
522
|
-
var NATIVE_TOOL_DEFAULTS = {
|
|
523
|
-
supportsNativeTools: true,
|
|
524
|
-
defaultToolProtocol: TOOL_PROTOCOL.NATIVE
|
|
525
|
-
};
|
|
526
|
-
function isNativeProtocol(protocol) {
|
|
527
|
-
return protocol === TOOL_PROTOCOL.NATIVE;
|
|
528
|
-
}
|
|
529
|
-
function getEffectiveProtocol(toolProtocol) {
|
|
530
|
-
return toolProtocol || TOOL_PROTOCOL.XML;
|
|
531
|
-
}
|
|
532
522
|
|
|
533
523
|
// src/events.ts
|
|
534
524
|
var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
@@ -816,10 +806,6 @@ var modelInfoSchema = import_zod5.z.object({
|
|
|
816
806
|
isStealthModel: import_zod5.z.boolean().optional(),
|
|
817
807
|
// Flag to indicate if the model is free (no cost)
|
|
818
808
|
isFree: import_zod5.z.boolean().optional(),
|
|
819
|
-
// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
|
|
820
|
-
supportsNativeTools: import_zod5.z.boolean().optional(),
|
|
821
|
-
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
822
|
-
defaultToolProtocol: import_zod5.z.enum(["xml", "native"]).optional(),
|
|
823
809
|
// Exclude specific native tools from being available (only applies to native protocol)
|
|
824
810
|
// These tools will be removed from the set of tools available to the model
|
|
825
811
|
excludedTools: import_zod5.z.array(import_zod5.z.string()).optional(),
|
|
@@ -916,8 +902,6 @@ var anthropicModels = {
|
|
|
916
902
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
917
903
|
supportsImages: true,
|
|
918
904
|
supportsPromptCache: true,
|
|
919
|
-
supportsNativeTools: true,
|
|
920
|
-
defaultToolProtocol: "native",
|
|
921
905
|
inputPrice: 3,
|
|
922
906
|
// $3 per million input tokens (≤200K context)
|
|
923
907
|
outputPrice: 15,
|
|
@@ -950,8 +934,6 @@ var anthropicModels = {
|
|
|
950
934
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
951
935
|
supportsImages: true,
|
|
952
936
|
supportsPromptCache: true,
|
|
953
|
-
supportsNativeTools: true,
|
|
954
|
-
defaultToolProtocol: "native",
|
|
955
937
|
inputPrice: 3,
|
|
956
938
|
// $3 per million input tokens (≤200K context)
|
|
957
939
|
outputPrice: 15,
|
|
@@ -983,8 +965,6 @@ var anthropicModels = {
|
|
|
983
965
|
contextWindow: 2e5,
|
|
984
966
|
supportsImages: true,
|
|
985
967
|
supportsPromptCache: true,
|
|
986
|
-
supportsNativeTools: true,
|
|
987
|
-
defaultToolProtocol: "native",
|
|
988
968
|
inputPrice: 5,
|
|
989
969
|
// $5 per million input tokens
|
|
990
970
|
outputPrice: 25,
|
|
@@ -1001,8 +981,6 @@ var anthropicModels = {
|
|
|
1001
981
|
contextWindow: 2e5,
|
|
1002
982
|
supportsImages: true,
|
|
1003
983
|
supportsPromptCache: true,
|
|
1004
|
-
supportsNativeTools: true,
|
|
1005
|
-
defaultToolProtocol: "native",
|
|
1006
984
|
inputPrice: 15,
|
|
1007
985
|
// $15 per million input tokens
|
|
1008
986
|
outputPrice: 75,
|
|
@@ -1019,8 +997,6 @@ var anthropicModels = {
|
|
|
1019
997
|
contextWindow: 2e5,
|
|
1020
998
|
supportsImages: true,
|
|
1021
999
|
supportsPromptCache: true,
|
|
1022
|
-
supportsNativeTools: true,
|
|
1023
|
-
defaultToolProtocol: "native",
|
|
1024
1000
|
inputPrice: 15,
|
|
1025
1001
|
// $15 per million input tokens
|
|
1026
1002
|
outputPrice: 75,
|
|
@@ -1037,8 +1013,6 @@ var anthropicModels = {
|
|
|
1037
1013
|
contextWindow: 2e5,
|
|
1038
1014
|
supportsImages: true,
|
|
1039
1015
|
supportsPromptCache: true,
|
|
1040
|
-
supportsNativeTools: true,
|
|
1041
|
-
defaultToolProtocol: "native",
|
|
1042
1016
|
inputPrice: 3,
|
|
1043
1017
|
// $3 per million input tokens
|
|
1044
1018
|
outputPrice: 15,
|
|
@@ -1056,8 +1030,6 @@ var anthropicModels = {
|
|
|
1056
1030
|
contextWindow: 2e5,
|
|
1057
1031
|
supportsImages: true,
|
|
1058
1032
|
supportsPromptCache: true,
|
|
1059
|
-
supportsNativeTools: true,
|
|
1060
|
-
defaultToolProtocol: "native",
|
|
1061
1033
|
inputPrice: 3,
|
|
1062
1034
|
// $3 per million input tokens
|
|
1063
1035
|
outputPrice: 15,
|
|
@@ -1072,8 +1044,6 @@ var anthropicModels = {
|
|
|
1072
1044
|
contextWindow: 2e5,
|
|
1073
1045
|
supportsImages: true,
|
|
1074
1046
|
supportsPromptCache: true,
|
|
1075
|
-
supportsNativeTools: true,
|
|
1076
|
-
defaultToolProtocol: "native",
|
|
1077
1047
|
inputPrice: 3,
|
|
1078
1048
|
// $3 per million input tokens
|
|
1079
1049
|
outputPrice: 15,
|
|
@@ -1088,8 +1058,6 @@ var anthropicModels = {
|
|
|
1088
1058
|
contextWindow: 2e5,
|
|
1089
1059
|
supportsImages: false,
|
|
1090
1060
|
supportsPromptCache: true,
|
|
1091
|
-
supportsNativeTools: true,
|
|
1092
|
-
defaultToolProtocol: "native",
|
|
1093
1061
|
inputPrice: 1,
|
|
1094
1062
|
outputPrice: 5,
|
|
1095
1063
|
cacheWritesPrice: 1.25,
|
|
@@ -1100,8 +1068,6 @@ var anthropicModels = {
|
|
|
1100
1068
|
contextWindow: 2e5,
|
|
1101
1069
|
supportsImages: true,
|
|
1102
1070
|
supportsPromptCache: true,
|
|
1103
|
-
supportsNativeTools: true,
|
|
1104
|
-
defaultToolProtocol: "native",
|
|
1105
1071
|
inputPrice: 15,
|
|
1106
1072
|
outputPrice: 75,
|
|
1107
1073
|
cacheWritesPrice: 18.75,
|
|
@@ -1112,8 +1078,6 @@ var anthropicModels = {
|
|
|
1112
1078
|
contextWindow: 2e5,
|
|
1113
1079
|
supportsImages: true,
|
|
1114
1080
|
supportsPromptCache: true,
|
|
1115
|
-
supportsNativeTools: true,
|
|
1116
|
-
defaultToolProtocol: "native",
|
|
1117
1081
|
inputPrice: 0.25,
|
|
1118
1082
|
outputPrice: 1.25,
|
|
1119
1083
|
cacheWritesPrice: 0.3,
|
|
@@ -1124,8 +1088,6 @@ var anthropicModels = {
|
|
|
1124
1088
|
contextWindow: 2e5,
|
|
1125
1089
|
supportsImages: true,
|
|
1126
1090
|
supportsPromptCache: true,
|
|
1127
|
-
supportsNativeTools: true,
|
|
1128
|
-
defaultToolProtocol: "native",
|
|
1129
1091
|
inputPrice: 1,
|
|
1130
1092
|
outputPrice: 5,
|
|
1131
1093
|
cacheWritesPrice: 1.25,
|
|
@@ -1143,7 +1105,6 @@ var basetenModels = {
|
|
|
1143
1105
|
contextWindow: 262e3,
|
|
1144
1106
|
supportsImages: false,
|
|
1145
1107
|
supportsPromptCache: false,
|
|
1146
|
-
supportsNativeTools: true,
|
|
1147
1108
|
inputPrice: 0.6,
|
|
1148
1109
|
outputPrice: 2.5,
|
|
1149
1110
|
cacheWritesPrice: 0,
|
|
@@ -1155,7 +1116,6 @@ var basetenModels = {
|
|
|
1155
1116
|
contextWindow: 2e5,
|
|
1156
1117
|
supportsImages: false,
|
|
1157
1118
|
supportsPromptCache: false,
|
|
1158
|
-
supportsNativeTools: true,
|
|
1159
1119
|
inputPrice: 0.6,
|
|
1160
1120
|
outputPrice: 2.2,
|
|
1161
1121
|
cacheWritesPrice: 0,
|
|
@@ -1167,7 +1127,6 @@ var basetenModels = {
|
|
|
1167
1127
|
contextWindow: 163840,
|
|
1168
1128
|
supportsImages: false,
|
|
1169
1129
|
supportsPromptCache: false,
|
|
1170
|
-
supportsNativeTools: true,
|
|
1171
1130
|
inputPrice: 2.55,
|
|
1172
1131
|
outputPrice: 5.95,
|
|
1173
1132
|
cacheWritesPrice: 0,
|
|
@@ -1179,7 +1138,6 @@ var basetenModels = {
|
|
|
1179
1138
|
contextWindow: 163840,
|
|
1180
1139
|
supportsImages: false,
|
|
1181
1140
|
supportsPromptCache: false,
|
|
1182
|
-
supportsNativeTools: true,
|
|
1183
1141
|
inputPrice: 2.55,
|
|
1184
1142
|
outputPrice: 5.95,
|
|
1185
1143
|
cacheWritesPrice: 0,
|
|
@@ -1191,7 +1149,6 @@ var basetenModels = {
|
|
|
1191
1149
|
contextWindow: 163840,
|
|
1192
1150
|
supportsImages: false,
|
|
1193
1151
|
supportsPromptCache: false,
|
|
1194
|
-
supportsNativeTools: true,
|
|
1195
1152
|
inputPrice: 0.77,
|
|
1196
1153
|
outputPrice: 0.77,
|
|
1197
1154
|
cacheWritesPrice: 0,
|
|
@@ -1203,7 +1160,6 @@ var basetenModels = {
|
|
|
1203
1160
|
contextWindow: 163840,
|
|
1204
1161
|
supportsImages: false,
|
|
1205
1162
|
supportsPromptCache: false,
|
|
1206
|
-
supportsNativeTools: true,
|
|
1207
1163
|
inputPrice: 0.5,
|
|
1208
1164
|
outputPrice: 1.5,
|
|
1209
1165
|
cacheWritesPrice: 0,
|
|
@@ -1215,7 +1171,6 @@ var basetenModels = {
|
|
|
1215
1171
|
contextWindow: 163840,
|
|
1216
1172
|
supportsImages: false,
|
|
1217
1173
|
supportsPromptCache: false,
|
|
1218
|
-
supportsNativeTools: true,
|
|
1219
1174
|
inputPrice: 0.3,
|
|
1220
1175
|
outputPrice: 0.45,
|
|
1221
1176
|
cacheWritesPrice: 0,
|
|
@@ -1227,7 +1182,6 @@ var basetenModels = {
|
|
|
1227
1182
|
contextWindow: 128072,
|
|
1228
1183
|
supportsImages: false,
|
|
1229
1184
|
supportsPromptCache: false,
|
|
1230
|
-
supportsNativeTools: true,
|
|
1231
1185
|
inputPrice: 0.1,
|
|
1232
1186
|
outputPrice: 0.5,
|
|
1233
1187
|
cacheWritesPrice: 0,
|
|
@@ -1239,7 +1193,6 @@ var basetenModels = {
|
|
|
1239
1193
|
contextWindow: 262144,
|
|
1240
1194
|
supportsImages: false,
|
|
1241
1195
|
supportsPromptCache: false,
|
|
1242
|
-
supportsNativeTools: true,
|
|
1243
1196
|
inputPrice: 0.22,
|
|
1244
1197
|
outputPrice: 0.8,
|
|
1245
1198
|
cacheWritesPrice: 0,
|
|
@@ -1251,7 +1204,6 @@ var basetenModels = {
|
|
|
1251
1204
|
contextWindow: 262144,
|
|
1252
1205
|
supportsImages: false,
|
|
1253
1206
|
supportsPromptCache: false,
|
|
1254
|
-
supportsNativeTools: true,
|
|
1255
1207
|
inputPrice: 0.38,
|
|
1256
1208
|
outputPrice: 1.53,
|
|
1257
1209
|
cacheWritesPrice: 0,
|
|
@@ -1263,7 +1215,6 @@ var basetenModels = {
|
|
|
1263
1215
|
contextWindow: 262e3,
|
|
1264
1216
|
supportsImages: false,
|
|
1265
1217
|
supportsPromptCache: false,
|
|
1266
|
-
supportsNativeTools: true,
|
|
1267
1218
|
inputPrice: 0.6,
|
|
1268
1219
|
outputPrice: 2.5,
|
|
1269
1220
|
cacheWritesPrice: 0,
|
|
@@ -1283,8 +1234,6 @@ var bedrockModels = {
|
|
|
1283
1234
|
supportsImages: true,
|
|
1284
1235
|
supportsPromptCache: true,
|
|
1285
1236
|
supportsReasoningBudget: true,
|
|
1286
|
-
supportsNativeTools: true,
|
|
1287
|
-
defaultToolProtocol: "native",
|
|
1288
1237
|
inputPrice: 3,
|
|
1289
1238
|
outputPrice: 15,
|
|
1290
1239
|
cacheWritesPrice: 3.75,
|
|
@@ -1298,7 +1247,6 @@ var bedrockModels = {
|
|
|
1298
1247
|
contextWindow: 3e5,
|
|
1299
1248
|
supportsImages: true,
|
|
1300
1249
|
supportsPromptCache: true,
|
|
1301
|
-
supportsNativeTools: true,
|
|
1302
1250
|
inputPrice: 0.8,
|
|
1303
1251
|
outputPrice: 3.2,
|
|
1304
1252
|
cacheWritesPrice: 0.8,
|
|
@@ -1314,7 +1262,6 @@ var bedrockModels = {
|
|
|
1314
1262
|
contextWindow: 3e5,
|
|
1315
1263
|
supportsImages: true,
|
|
1316
1264
|
supportsPromptCache: false,
|
|
1317
|
-
supportsNativeTools: true,
|
|
1318
1265
|
inputPrice: 1,
|
|
1319
1266
|
outputPrice: 4,
|
|
1320
1267
|
cacheWritesPrice: 1,
|
|
@@ -1328,7 +1275,6 @@ var bedrockModels = {
|
|
|
1328
1275
|
contextWindow: 3e5,
|
|
1329
1276
|
supportsImages: true,
|
|
1330
1277
|
supportsPromptCache: true,
|
|
1331
|
-
supportsNativeTools: true,
|
|
1332
1278
|
inputPrice: 0.06,
|
|
1333
1279
|
outputPrice: 0.24,
|
|
1334
1280
|
cacheWritesPrice: 0.06,
|
|
@@ -1344,7 +1290,6 @@ var bedrockModels = {
|
|
|
1344
1290
|
contextWindow: 1e6,
|
|
1345
1291
|
supportsImages: true,
|
|
1346
1292
|
supportsPromptCache: true,
|
|
1347
|
-
supportsNativeTools: true,
|
|
1348
1293
|
inputPrice: 0.33,
|
|
1349
1294
|
outputPrice: 2.75,
|
|
1350
1295
|
cacheWritesPrice: 0,
|
|
@@ -1360,7 +1305,6 @@ var bedrockModels = {
|
|
|
1360
1305
|
contextWindow: 128e3,
|
|
1361
1306
|
supportsImages: false,
|
|
1362
1307
|
supportsPromptCache: true,
|
|
1363
|
-
supportsNativeTools: true,
|
|
1364
1308
|
inputPrice: 0.035,
|
|
1365
1309
|
outputPrice: 0.14,
|
|
1366
1310
|
cacheWritesPrice: 0.035,
|
|
@@ -1377,8 +1321,6 @@ var bedrockModels = {
|
|
|
1377
1321
|
supportsImages: true,
|
|
1378
1322
|
supportsPromptCache: true,
|
|
1379
1323
|
supportsReasoningBudget: true,
|
|
1380
|
-
supportsNativeTools: true,
|
|
1381
|
-
defaultToolProtocol: "native",
|
|
1382
1324
|
inputPrice: 3,
|
|
1383
1325
|
outputPrice: 15,
|
|
1384
1326
|
cacheWritesPrice: 3.75,
|
|
@@ -1393,8 +1335,6 @@ var bedrockModels = {
|
|
|
1393
1335
|
supportsImages: true,
|
|
1394
1336
|
supportsPromptCache: true,
|
|
1395
1337
|
supportsReasoningBudget: true,
|
|
1396
|
-
supportsNativeTools: true,
|
|
1397
|
-
defaultToolProtocol: "native",
|
|
1398
1338
|
inputPrice: 15,
|
|
1399
1339
|
outputPrice: 75,
|
|
1400
1340
|
cacheWritesPrice: 18.75,
|
|
@@ -1409,8 +1349,6 @@ var bedrockModels = {
|
|
|
1409
1349
|
supportsImages: true,
|
|
1410
1350
|
supportsPromptCache: true,
|
|
1411
1351
|
supportsReasoningBudget: true,
|
|
1412
|
-
supportsNativeTools: true,
|
|
1413
|
-
defaultToolProtocol: "native",
|
|
1414
1352
|
inputPrice: 5,
|
|
1415
1353
|
outputPrice: 25,
|
|
1416
1354
|
cacheWritesPrice: 6.25,
|
|
@@ -1425,8 +1363,6 @@ var bedrockModels = {
|
|
|
1425
1363
|
supportsImages: true,
|
|
1426
1364
|
supportsPromptCache: true,
|
|
1427
1365
|
supportsReasoningBudget: true,
|
|
1428
|
-
supportsNativeTools: true,
|
|
1429
|
-
defaultToolProtocol: "native",
|
|
1430
1366
|
inputPrice: 15,
|
|
1431
1367
|
outputPrice: 75,
|
|
1432
1368
|
cacheWritesPrice: 18.75,
|
|
@@ -1441,8 +1377,6 @@ var bedrockModels = {
|
|
|
1441
1377
|
supportsImages: true,
|
|
1442
1378
|
supportsPromptCache: true,
|
|
1443
1379
|
supportsReasoningBudget: true,
|
|
1444
|
-
supportsNativeTools: true,
|
|
1445
|
-
defaultToolProtocol: "native",
|
|
1446
1380
|
inputPrice: 3,
|
|
1447
1381
|
outputPrice: 15,
|
|
1448
1382
|
cacheWritesPrice: 3.75,
|
|
@@ -1456,8 +1390,6 @@ var bedrockModels = {
|
|
|
1456
1390
|
contextWindow: 2e5,
|
|
1457
1391
|
supportsImages: true,
|
|
1458
1392
|
supportsPromptCache: true,
|
|
1459
|
-
supportsNativeTools: true,
|
|
1460
|
-
defaultToolProtocol: "native",
|
|
1461
1393
|
inputPrice: 3,
|
|
1462
1394
|
outputPrice: 15,
|
|
1463
1395
|
cacheWritesPrice: 3.75,
|
|
@@ -1471,8 +1403,6 @@ var bedrockModels = {
|
|
|
1471
1403
|
contextWindow: 2e5,
|
|
1472
1404
|
supportsImages: false,
|
|
1473
1405
|
supportsPromptCache: true,
|
|
1474
|
-
supportsNativeTools: true,
|
|
1475
|
-
defaultToolProtocol: "native",
|
|
1476
1406
|
inputPrice: 0.8,
|
|
1477
1407
|
outputPrice: 4,
|
|
1478
1408
|
cacheWritesPrice: 1,
|
|
@@ -1487,8 +1417,6 @@ var bedrockModels = {
|
|
|
1487
1417
|
supportsImages: true,
|
|
1488
1418
|
supportsPromptCache: true,
|
|
1489
1419
|
supportsReasoningBudget: true,
|
|
1490
|
-
supportsNativeTools: true,
|
|
1491
|
-
defaultToolProtocol: "native",
|
|
1492
1420
|
inputPrice: 1,
|
|
1493
1421
|
outputPrice: 5,
|
|
1494
1422
|
cacheWritesPrice: 1.25,
|
|
@@ -1504,8 +1432,6 @@ var bedrockModels = {
|
|
|
1504
1432
|
contextWindow: 2e5,
|
|
1505
1433
|
supportsImages: true,
|
|
1506
1434
|
supportsPromptCache: false,
|
|
1507
|
-
supportsNativeTools: true,
|
|
1508
|
-
defaultToolProtocol: "native",
|
|
1509
1435
|
inputPrice: 3,
|
|
1510
1436
|
outputPrice: 15
|
|
1511
1437
|
},
|
|
@@ -1514,8 +1440,6 @@ var bedrockModels = {
|
|
|
1514
1440
|
contextWindow: 2e5,
|
|
1515
1441
|
supportsImages: true,
|
|
1516
1442
|
supportsPromptCache: false,
|
|
1517
|
-
supportsNativeTools: true,
|
|
1518
|
-
defaultToolProtocol: "native",
|
|
1519
1443
|
inputPrice: 15,
|
|
1520
1444
|
outputPrice: 75
|
|
1521
1445
|
},
|
|
@@ -1524,8 +1448,6 @@ var bedrockModels = {
|
|
|
1524
1448
|
contextWindow: 2e5,
|
|
1525
1449
|
supportsImages: true,
|
|
1526
1450
|
supportsPromptCache: false,
|
|
1527
|
-
supportsNativeTools: true,
|
|
1528
|
-
defaultToolProtocol: "native",
|
|
1529
1451
|
inputPrice: 3,
|
|
1530
1452
|
outputPrice: 15
|
|
1531
1453
|
},
|
|
@@ -1534,8 +1456,6 @@ var bedrockModels = {
|
|
|
1534
1456
|
contextWindow: 2e5,
|
|
1535
1457
|
supportsImages: true,
|
|
1536
1458
|
supportsPromptCache: false,
|
|
1537
|
-
supportsNativeTools: true,
|
|
1538
|
-
defaultToolProtocol: "native",
|
|
1539
1459
|
inputPrice: 0.25,
|
|
1540
1460
|
outputPrice: 1.25
|
|
1541
1461
|
},
|
|
@@ -1544,7 +1464,6 @@ var bedrockModels = {
|
|
|
1544
1464
|
contextWindow: 128e3,
|
|
1545
1465
|
supportsImages: false,
|
|
1546
1466
|
supportsPromptCache: false,
|
|
1547
|
-
supportsNativeTools: true,
|
|
1548
1467
|
inputPrice: 1.35,
|
|
1549
1468
|
outputPrice: 5.4
|
|
1550
1469
|
},
|
|
@@ -1553,7 +1472,6 @@ var bedrockModels = {
|
|
|
1553
1472
|
contextWindow: 128e3,
|
|
1554
1473
|
supportsImages: false,
|
|
1555
1474
|
supportsPromptCache: false,
|
|
1556
|
-
supportsNativeTools: true,
|
|
1557
1475
|
inputPrice: 0.5,
|
|
1558
1476
|
outputPrice: 1.5,
|
|
1559
1477
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -1563,7 +1481,6 @@ var bedrockModels = {
|
|
|
1563
1481
|
contextWindow: 128e3,
|
|
1564
1482
|
supportsImages: false,
|
|
1565
1483
|
supportsPromptCache: false,
|
|
1566
|
-
supportsNativeTools: true,
|
|
1567
1484
|
inputPrice: 2,
|
|
1568
1485
|
outputPrice: 6,
|
|
1569
1486
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -1573,7 +1490,6 @@ var bedrockModels = {
|
|
|
1573
1490
|
contextWindow: 128e3,
|
|
1574
1491
|
supportsImages: false,
|
|
1575
1492
|
supportsPromptCache: false,
|
|
1576
|
-
supportsNativeTools: true,
|
|
1577
1493
|
inputPrice: 0.72,
|
|
1578
1494
|
outputPrice: 0.72,
|
|
1579
1495
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -1583,7 +1499,6 @@ var bedrockModels = {
|
|
|
1583
1499
|
contextWindow: 128e3,
|
|
1584
1500
|
supportsImages: true,
|
|
1585
1501
|
supportsPromptCache: false,
|
|
1586
|
-
supportsNativeTools: true,
|
|
1587
1502
|
inputPrice: 0.72,
|
|
1588
1503
|
outputPrice: 0.72,
|
|
1589
1504
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -1593,7 +1508,6 @@ var bedrockModels = {
|
|
|
1593
1508
|
contextWindow: 128e3,
|
|
1594
1509
|
supportsImages: true,
|
|
1595
1510
|
supportsPromptCache: false,
|
|
1596
|
-
supportsNativeTools: true,
|
|
1597
1511
|
inputPrice: 0.16,
|
|
1598
1512
|
outputPrice: 0.16,
|
|
1599
1513
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -1603,7 +1517,6 @@ var bedrockModels = {
|
|
|
1603
1517
|
contextWindow: 128e3,
|
|
1604
1518
|
supportsImages: false,
|
|
1605
1519
|
supportsPromptCache: false,
|
|
1606
|
-
supportsNativeTools: true,
|
|
1607
1520
|
inputPrice: 0.15,
|
|
1608
1521
|
outputPrice: 0.15,
|
|
1609
1522
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -1613,7 +1526,6 @@ var bedrockModels = {
|
|
|
1613
1526
|
contextWindow: 128e3,
|
|
1614
1527
|
supportsImages: false,
|
|
1615
1528
|
supportsPromptCache: false,
|
|
1616
|
-
supportsNativeTools: true,
|
|
1617
1529
|
inputPrice: 0.1,
|
|
1618
1530
|
outputPrice: 0.1,
|
|
1619
1531
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -1623,7 +1535,6 @@ var bedrockModels = {
|
|
|
1623
1535
|
contextWindow: 128e3,
|
|
1624
1536
|
supportsImages: false,
|
|
1625
1537
|
supportsPromptCache: false,
|
|
1626
|
-
supportsNativeTools: true,
|
|
1627
1538
|
inputPrice: 2.4,
|
|
1628
1539
|
outputPrice: 2.4,
|
|
1629
1540
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -1633,7 +1544,6 @@ var bedrockModels = {
|
|
|
1633
1544
|
contextWindow: 128e3,
|
|
1634
1545
|
supportsImages: false,
|
|
1635
1546
|
supportsPromptCache: false,
|
|
1636
|
-
supportsNativeTools: true,
|
|
1637
1547
|
inputPrice: 0.72,
|
|
1638
1548
|
outputPrice: 0.72,
|
|
1639
1549
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -1643,7 +1553,6 @@ var bedrockModels = {
|
|
|
1643
1553
|
contextWindow: 128e3,
|
|
1644
1554
|
supportsImages: false,
|
|
1645
1555
|
supportsPromptCache: false,
|
|
1646
|
-
supportsNativeTools: true,
|
|
1647
1556
|
inputPrice: 0.9,
|
|
1648
1557
|
outputPrice: 0.9,
|
|
1649
1558
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1653,7 +1562,6 @@ var bedrockModels = {
|
|
|
1653
1562
|
contextWindow: 8e3,
|
|
1654
1563
|
supportsImages: false,
|
|
1655
1564
|
supportsPromptCache: false,
|
|
1656
|
-
supportsNativeTools: true,
|
|
1657
1565
|
inputPrice: 0.22,
|
|
1658
1566
|
outputPrice: 0.22,
|
|
1659
1567
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1663,7 +1571,6 @@ var bedrockModels = {
|
|
|
1663
1571
|
contextWindow: 8e3,
|
|
1664
1572
|
supportsImages: false,
|
|
1665
1573
|
supportsPromptCache: false,
|
|
1666
|
-
supportsNativeTools: true,
|
|
1667
1574
|
inputPrice: 2.65,
|
|
1668
1575
|
outputPrice: 3.5
|
|
1669
1576
|
},
|
|
@@ -1672,7 +1579,6 @@ var bedrockModels = {
|
|
|
1672
1579
|
contextWindow: 4e3,
|
|
1673
1580
|
supportsImages: false,
|
|
1674
1581
|
supportsPromptCache: false,
|
|
1675
|
-
supportsNativeTools: true,
|
|
1676
1582
|
inputPrice: 0.3,
|
|
1677
1583
|
outputPrice: 0.6
|
|
1678
1584
|
},
|
|
@@ -1681,7 +1587,6 @@ var bedrockModels = {
|
|
|
1681
1587
|
contextWindow: 8e3,
|
|
1682
1588
|
supportsImages: false,
|
|
1683
1589
|
supportsPromptCache: false,
|
|
1684
|
-
supportsNativeTools: true,
|
|
1685
1590
|
inputPrice: 0.15,
|
|
1686
1591
|
outputPrice: 0.2,
|
|
1687
1592
|
description: "Amazon Titan Text Lite"
|
|
@@ -1691,7 +1596,6 @@ var bedrockModels = {
|
|
|
1691
1596
|
contextWindow: 8e3,
|
|
1692
1597
|
supportsImages: false,
|
|
1693
1598
|
supportsPromptCache: false,
|
|
1694
|
-
supportsNativeTools: true,
|
|
1695
1599
|
inputPrice: 0.2,
|
|
1696
1600
|
outputPrice: 0.6,
|
|
1697
1601
|
description: "Amazon Titan Text Express"
|
|
@@ -1701,8 +1605,6 @@ var bedrockModels = {
|
|
|
1701
1605
|
contextWindow: 262144,
|
|
1702
1606
|
supportsImages: false,
|
|
1703
1607
|
supportsPromptCache: false,
|
|
1704
|
-
supportsNativeTools: true,
|
|
1705
|
-
defaultToolProtocol: "native",
|
|
1706
1608
|
preserveReasoning: true,
|
|
1707
1609
|
inputPrice: 0.6,
|
|
1708
1610
|
outputPrice: 2.5,
|
|
@@ -1713,8 +1615,6 @@ var bedrockModels = {
|
|
|
1713
1615
|
contextWindow: 196608,
|
|
1714
1616
|
supportsImages: false,
|
|
1715
1617
|
supportsPromptCache: false,
|
|
1716
|
-
supportsNativeTools: true,
|
|
1717
|
-
defaultToolProtocol: "native",
|
|
1718
1618
|
preserveReasoning: true,
|
|
1719
1619
|
inputPrice: 0.3,
|
|
1720
1620
|
outputPrice: 1.2,
|
|
@@ -1725,8 +1625,6 @@ var bedrockModels = {
|
|
|
1725
1625
|
contextWindow: 262144,
|
|
1726
1626
|
supportsImages: false,
|
|
1727
1627
|
supportsPromptCache: false,
|
|
1728
|
-
supportsNativeTools: true,
|
|
1729
|
-
defaultToolProtocol: "native",
|
|
1730
1628
|
inputPrice: 0.15,
|
|
1731
1629
|
outputPrice: 1.2,
|
|
1732
1630
|
description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
|
|
@@ -1736,8 +1634,6 @@ var bedrockModels = {
|
|
|
1736
1634
|
contextWindow: 262144,
|
|
1737
1635
|
supportsImages: false,
|
|
1738
1636
|
supportsPromptCache: false,
|
|
1739
|
-
supportsNativeTools: true,
|
|
1740
|
-
defaultToolProtocol: "native",
|
|
1741
1637
|
inputPrice: 0.45,
|
|
1742
1638
|
outputPrice: 1.8,
|
|
1743
1639
|
description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
|
|
@@ -1828,26 +1724,14 @@ var BEDROCK_SERVICE_TIER_PRICING = {
|
|
|
1828
1724
|
// src/providers/cerebras.ts
|
|
1829
1725
|
var cerebrasDefaultModelId = "gpt-oss-120b";
|
|
1830
1726
|
var cerebrasModels = {
|
|
1831
|
-
"zai-glm-4.6": {
|
|
1832
|
-
maxTokens: 16384,
|
|
1833
|
-
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1834
|
-
contextWindow: 131072,
|
|
1835
|
-
supportsImages: false,
|
|
1836
|
-
supportsPromptCache: false,
|
|
1837
|
-
supportsNativeTools: true,
|
|
1838
|
-
defaultToolProtocol: "native",
|
|
1839
|
-
inputPrice: 0,
|
|
1840
|
-
outputPrice: 0,
|
|
1841
|
-
description: "Fast general-purpose model on Cerebras (up to 1,000 tokens/s). To be deprecated soon."
|
|
1842
|
-
},
|
|
1843
1727
|
"zai-glm-4.7": {
|
|
1844
1728
|
maxTokens: 16384,
|
|
1845
1729
|
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1846
1730
|
contextWindow: 131072,
|
|
1847
1731
|
supportsImages: false,
|
|
1848
|
-
supportsPromptCache:
|
|
1849
|
-
|
|
1850
|
-
|
|
1732
|
+
supportsPromptCache: true,
|
|
1733
|
+
supportsTemperature: true,
|
|
1734
|
+
defaultTemperature: 1,
|
|
1851
1735
|
inputPrice: 0,
|
|
1852
1736
|
outputPrice: 0,
|
|
1853
1737
|
description: "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks."
|
|
@@ -1858,8 +1742,6 @@ var cerebrasModels = {
|
|
|
1858
1742
|
contextWindow: 64e3,
|
|
1859
1743
|
supportsImages: false,
|
|
1860
1744
|
supportsPromptCache: false,
|
|
1861
|
-
supportsNativeTools: true,
|
|
1862
|
-
defaultToolProtocol: "native",
|
|
1863
1745
|
inputPrice: 0,
|
|
1864
1746
|
outputPrice: 0,
|
|
1865
1747
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1870,8 +1752,6 @@ var cerebrasModels = {
|
|
|
1870
1752
|
contextWindow: 64e3,
|
|
1871
1753
|
supportsImages: false,
|
|
1872
1754
|
supportsPromptCache: false,
|
|
1873
|
-
supportsNativeTools: true,
|
|
1874
|
-
defaultToolProtocol: "native",
|
|
1875
1755
|
inputPrice: 0,
|
|
1876
1756
|
outputPrice: 0,
|
|
1877
1757
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1882,8 +1762,6 @@ var cerebrasModels = {
|
|
|
1882
1762
|
contextWindow: 64e3,
|
|
1883
1763
|
supportsImages: false,
|
|
1884
1764
|
supportsPromptCache: false,
|
|
1885
|
-
supportsNativeTools: true,
|
|
1886
|
-
defaultToolProtocol: "native",
|
|
1887
1765
|
inputPrice: 0,
|
|
1888
1766
|
outputPrice: 0,
|
|
1889
1767
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
@@ -1894,8 +1772,6 @@ var cerebrasModels = {
|
|
|
1894
1772
|
contextWindow: 64e3,
|
|
1895
1773
|
supportsImages: false,
|
|
1896
1774
|
supportsPromptCache: false,
|
|
1897
|
-
supportsNativeTools: true,
|
|
1898
|
-
defaultToolProtocol: "native",
|
|
1899
1775
|
inputPrice: 0,
|
|
1900
1776
|
outputPrice: 0,
|
|
1901
1777
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1910,8 +1786,6 @@ var chutesModels = {
|
|
|
1910
1786
|
contextWindow: 163840,
|
|
1911
1787
|
supportsImages: false,
|
|
1912
1788
|
supportsPromptCache: false,
|
|
1913
|
-
supportsNativeTools: true,
|
|
1914
|
-
defaultToolProtocol: "native",
|
|
1915
1789
|
inputPrice: 0,
|
|
1916
1790
|
outputPrice: 0,
|
|
1917
1791
|
description: "DeepSeek R1 0528 model."
|
|
@@ -1921,8 +1795,6 @@ var chutesModels = {
|
|
|
1921
1795
|
contextWindow: 163840,
|
|
1922
1796
|
supportsImages: false,
|
|
1923
1797
|
supportsPromptCache: false,
|
|
1924
|
-
supportsNativeTools: true,
|
|
1925
|
-
defaultToolProtocol: "native",
|
|
1926
1798
|
inputPrice: 0,
|
|
1927
1799
|
outputPrice: 0,
|
|
1928
1800
|
description: "DeepSeek R1 model."
|
|
@@ -1932,8 +1804,6 @@ var chutesModels = {
|
|
|
1932
1804
|
contextWindow: 163840,
|
|
1933
1805
|
supportsImages: false,
|
|
1934
1806
|
supportsPromptCache: false,
|
|
1935
|
-
supportsNativeTools: true,
|
|
1936
|
-
defaultToolProtocol: "native",
|
|
1937
1807
|
inputPrice: 0,
|
|
1938
1808
|
outputPrice: 0,
|
|
1939
1809
|
description: "DeepSeek V3 model."
|
|
@@ -1943,8 +1813,6 @@ var chutesModels = {
|
|
|
1943
1813
|
contextWindow: 163840,
|
|
1944
1814
|
supportsImages: false,
|
|
1945
1815
|
supportsPromptCache: false,
|
|
1946
|
-
supportsNativeTools: true,
|
|
1947
|
-
defaultToolProtocol: "native",
|
|
1948
1816
|
inputPrice: 0,
|
|
1949
1817
|
outputPrice: 0,
|
|
1950
1818
|
description: "DeepSeek V3.1 model."
|
|
@@ -1954,8 +1822,6 @@ var chutesModels = {
|
|
|
1954
1822
|
contextWindow: 163840,
|
|
1955
1823
|
supportsImages: false,
|
|
1956
1824
|
supportsPromptCache: false,
|
|
1957
|
-
supportsNativeTools: true,
|
|
1958
|
-
defaultToolProtocol: "native",
|
|
1959
1825
|
inputPrice: 0.23,
|
|
1960
1826
|
outputPrice: 0.9,
|
|
1961
1827
|
description: "DeepSeek\u2011V3.1\u2011Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix\u2011ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance."
|
|
@@ -1965,8 +1831,6 @@ var chutesModels = {
|
|
|
1965
1831
|
contextWindow: 163840,
|
|
1966
1832
|
supportsImages: false,
|
|
1967
1833
|
supportsPromptCache: false,
|
|
1968
|
-
supportsNativeTools: true,
|
|
1969
|
-
defaultToolProtocol: "native",
|
|
1970
1834
|
inputPrice: 1,
|
|
1971
1835
|
outputPrice: 3,
|
|
1972
1836
|
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2\xD7 quota per request and not intended for bulk workloads."
|
|
@@ -1976,8 +1840,6 @@ var chutesModels = {
|
|
|
1976
1840
|
contextWindow: 163840,
|
|
1977
1841
|
supportsImages: false,
|
|
1978
1842
|
supportsPromptCache: false,
|
|
1979
|
-
supportsNativeTools: true,
|
|
1980
|
-
defaultToolProtocol: "native",
|
|
1981
1843
|
inputPrice: 0.25,
|
|
1982
1844
|
outputPrice: 0.35,
|
|
1983
1845
|
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long\u2011context training and inference efficiency while maintaining performance comparable to V3.1\u2011Terminus."
|
|
@@ -1989,8 +1851,6 @@ var chutesModels = {
|
|
|
1989
1851
|
// From Groq
|
|
1990
1852
|
supportsImages: false,
|
|
1991
1853
|
supportsPromptCache: false,
|
|
1992
|
-
supportsNativeTools: true,
|
|
1993
|
-
defaultToolProtocol: "native",
|
|
1994
1854
|
inputPrice: 0,
|
|
1995
1855
|
outputPrice: 0,
|
|
1996
1856
|
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
@@ -2000,8 +1860,6 @@ var chutesModels = {
|
|
|
2000
1860
|
contextWindow: 512e3,
|
|
2001
1861
|
supportsImages: false,
|
|
2002
1862
|
supportsPromptCache: false,
|
|
2003
|
-
supportsNativeTools: true,
|
|
2004
|
-
defaultToolProtocol: "native",
|
|
2005
1863
|
inputPrice: 0,
|
|
2006
1864
|
outputPrice: 0,
|
|
2007
1865
|
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
@@ -2011,8 +1869,6 @@ var chutesModels = {
|
|
|
2011
1869
|
contextWindow: 128e3,
|
|
2012
1870
|
supportsImages: false,
|
|
2013
1871
|
supportsPromptCache: false,
|
|
2014
|
-
supportsNativeTools: true,
|
|
2015
|
-
defaultToolProtocol: "native",
|
|
2016
1872
|
inputPrice: 0,
|
|
2017
1873
|
outputPrice: 0,
|
|
2018
1874
|
description: "Unsloth Mistral Nemo Instruct model."
|
|
@@ -2022,8 +1878,6 @@ var chutesModels = {
|
|
|
2022
1878
|
contextWindow: 131072,
|
|
2023
1879
|
supportsImages: false,
|
|
2024
1880
|
supportsPromptCache: false,
|
|
2025
|
-
supportsNativeTools: true,
|
|
2026
|
-
defaultToolProtocol: "native",
|
|
2027
1881
|
inputPrice: 0,
|
|
2028
1882
|
outputPrice: 0,
|
|
2029
1883
|
description: "Unsloth Gemma 3 12B IT model."
|
|
@@ -2033,8 +1887,6 @@ var chutesModels = {
|
|
|
2033
1887
|
contextWindow: 131072,
|
|
2034
1888
|
supportsImages: false,
|
|
2035
1889
|
supportsPromptCache: false,
|
|
2036
|
-
supportsNativeTools: true,
|
|
2037
|
-
defaultToolProtocol: "native",
|
|
2038
1890
|
inputPrice: 0,
|
|
2039
1891
|
outputPrice: 0,
|
|
2040
1892
|
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
@@ -2044,8 +1896,6 @@ var chutesModels = {
|
|
|
2044
1896
|
contextWindow: 131072,
|
|
2045
1897
|
supportsImages: false,
|
|
2046
1898
|
supportsPromptCache: false,
|
|
2047
|
-
supportsNativeTools: true,
|
|
2048
|
-
defaultToolProtocol: "native",
|
|
2049
1899
|
inputPrice: 0,
|
|
2050
1900
|
outputPrice: 0,
|
|
2051
1901
|
description: "Unsloth Gemma 3 4B IT model."
|
|
@@ -2055,8 +1905,6 @@ var chutesModels = {
|
|
|
2055
1905
|
contextWindow: 131072,
|
|
2056
1906
|
supportsImages: false,
|
|
2057
1907
|
supportsPromptCache: false,
|
|
2058
|
-
supportsNativeTools: true,
|
|
2059
|
-
defaultToolProtocol: "native",
|
|
2060
1908
|
inputPrice: 0,
|
|
2061
1909
|
outputPrice: 0,
|
|
2062
1910
|
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
@@ -2066,8 +1914,6 @@ var chutesModels = {
|
|
|
2066
1914
|
contextWindow: 131072,
|
|
2067
1915
|
supportsImages: false,
|
|
2068
1916
|
supportsPromptCache: false,
|
|
2069
|
-
supportsNativeTools: true,
|
|
2070
|
-
defaultToolProtocol: "native",
|
|
2071
1917
|
inputPrice: 0,
|
|
2072
1918
|
outputPrice: 0,
|
|
2073
1919
|
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
@@ -2077,8 +1923,6 @@ var chutesModels = {
|
|
|
2077
1923
|
contextWindow: 256e3,
|
|
2078
1924
|
supportsImages: false,
|
|
2079
1925
|
supportsPromptCache: false,
|
|
2080
|
-
supportsNativeTools: true,
|
|
2081
|
-
defaultToolProtocol: "native",
|
|
2082
1926
|
inputPrice: 0,
|
|
2083
1927
|
outputPrice: 0,
|
|
2084
1928
|
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
@@ -2088,8 +1932,6 @@ var chutesModels = {
|
|
|
2088
1932
|
contextWindow: 163840,
|
|
2089
1933
|
supportsImages: false,
|
|
2090
1934
|
supportsPromptCache: false,
|
|
2091
|
-
supportsNativeTools: true,
|
|
2092
|
-
defaultToolProtocol: "native",
|
|
2093
1935
|
inputPrice: 0,
|
|
2094
1936
|
outputPrice: 0,
|
|
2095
1937
|
description: "DeepSeek V3 Base model."
|
|
@@ -2099,8 +1941,6 @@ var chutesModels = {
|
|
|
2099
1941
|
contextWindow: 163840,
|
|
2100
1942
|
supportsImages: false,
|
|
2101
1943
|
supportsPromptCache: false,
|
|
2102
|
-
supportsNativeTools: true,
|
|
2103
|
-
defaultToolProtocol: "native",
|
|
2104
1944
|
inputPrice: 0,
|
|
2105
1945
|
outputPrice: 0,
|
|
2106
1946
|
description: "DeepSeek R1 Zero model."
|
|
@@ -2110,8 +1950,6 @@ var chutesModels = {
|
|
|
2110
1950
|
contextWindow: 163840,
|
|
2111
1951
|
supportsImages: false,
|
|
2112
1952
|
supportsPromptCache: false,
|
|
2113
|
-
supportsNativeTools: true,
|
|
2114
|
-
defaultToolProtocol: "native",
|
|
2115
1953
|
inputPrice: 0,
|
|
2116
1954
|
outputPrice: 0,
|
|
2117
1955
|
description: "DeepSeek V3 (0324) model."
|
|
@@ -2121,8 +1959,6 @@ var chutesModels = {
|
|
|
2121
1959
|
contextWindow: 262144,
|
|
2122
1960
|
supportsImages: false,
|
|
2123
1961
|
supportsPromptCache: false,
|
|
2124
|
-
supportsNativeTools: true,
|
|
2125
|
-
defaultToolProtocol: "native",
|
|
2126
1962
|
inputPrice: 0,
|
|
2127
1963
|
outputPrice: 0,
|
|
2128
1964
|
description: "Qwen3 235B A22B Instruct 2507 model with 262K context window."
|
|
@@ -2132,8 +1968,6 @@ var chutesModels = {
|
|
|
2132
1968
|
contextWindow: 40960,
|
|
2133
1969
|
supportsImages: false,
|
|
2134
1970
|
supportsPromptCache: false,
|
|
2135
|
-
supportsNativeTools: true,
|
|
2136
|
-
defaultToolProtocol: "native",
|
|
2137
1971
|
inputPrice: 0,
|
|
2138
1972
|
outputPrice: 0,
|
|
2139
1973
|
description: "Qwen3 235B A22B model."
|
|
@@ -2143,8 +1977,6 @@ var chutesModels = {
|
|
|
2143
1977
|
contextWindow: 40960,
|
|
2144
1978
|
supportsImages: false,
|
|
2145
1979
|
supportsPromptCache: false,
|
|
2146
|
-
supportsNativeTools: true,
|
|
2147
|
-
defaultToolProtocol: "native",
|
|
2148
1980
|
inputPrice: 0,
|
|
2149
1981
|
outputPrice: 0,
|
|
2150
1982
|
description: "Qwen3 32B model."
|
|
@@ -2154,8 +1986,6 @@ var chutesModels = {
|
|
|
2154
1986
|
contextWindow: 40960,
|
|
2155
1987
|
supportsImages: false,
|
|
2156
1988
|
supportsPromptCache: false,
|
|
2157
|
-
supportsNativeTools: true,
|
|
2158
|
-
defaultToolProtocol: "native",
|
|
2159
1989
|
inputPrice: 0,
|
|
2160
1990
|
outputPrice: 0,
|
|
2161
1991
|
description: "Qwen3 30B A3B model."
|
|
@@ -2165,8 +1995,6 @@ var chutesModels = {
|
|
|
2165
1995
|
contextWindow: 40960,
|
|
2166
1996
|
supportsImages: false,
|
|
2167
1997
|
supportsPromptCache: false,
|
|
2168
|
-
supportsNativeTools: true,
|
|
2169
|
-
defaultToolProtocol: "native",
|
|
2170
1998
|
inputPrice: 0,
|
|
2171
1999
|
outputPrice: 0,
|
|
2172
2000
|
description: "Qwen3 14B model."
|
|
@@ -2176,8 +2004,6 @@ var chutesModels = {
|
|
|
2176
2004
|
contextWindow: 40960,
|
|
2177
2005
|
supportsImages: false,
|
|
2178
2006
|
supportsPromptCache: false,
|
|
2179
|
-
supportsNativeTools: true,
|
|
2180
|
-
defaultToolProtocol: "native",
|
|
2181
2007
|
inputPrice: 0,
|
|
2182
2008
|
outputPrice: 0,
|
|
2183
2009
|
description: "Qwen3 8B model."
|
|
@@ -2187,8 +2013,6 @@ var chutesModels = {
|
|
|
2187
2013
|
contextWindow: 163840,
|
|
2188
2014
|
supportsImages: false,
|
|
2189
2015
|
supportsPromptCache: false,
|
|
2190
|
-
supportsNativeTools: true,
|
|
2191
|
-
defaultToolProtocol: "native",
|
|
2192
2016
|
inputPrice: 0,
|
|
2193
2017
|
outputPrice: 0,
|
|
2194
2018
|
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
@@ -2198,8 +2022,6 @@ var chutesModels = {
|
|
|
2198
2022
|
contextWindow: 163840,
|
|
2199
2023
|
supportsImages: false,
|
|
2200
2024
|
supportsPromptCache: false,
|
|
2201
|
-
supportsNativeTools: true,
|
|
2202
|
-
defaultToolProtocol: "native",
|
|
2203
2025
|
inputPrice: 0,
|
|
2204
2026
|
outputPrice: 0,
|
|
2205
2027
|
description: "TNGTech DeepSeek R1T Chimera model."
|
|
@@ -2209,8 +2031,6 @@ var chutesModels = {
|
|
|
2209
2031
|
contextWindow: 151329,
|
|
2210
2032
|
supportsImages: false,
|
|
2211
2033
|
supportsPromptCache: false,
|
|
2212
|
-
supportsNativeTools: true,
|
|
2213
|
-
defaultToolProtocol: "native",
|
|
2214
2034
|
inputPrice: 0,
|
|
2215
2035
|
outputPrice: 0,
|
|
2216
2036
|
description: "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated."
|
|
@@ -2220,8 +2040,6 @@ var chutesModels = {
|
|
|
2220
2040
|
contextWindow: 131072,
|
|
2221
2041
|
supportsImages: false,
|
|
2222
2042
|
supportsPromptCache: false,
|
|
2223
|
-
supportsNativeTools: true,
|
|
2224
|
-
defaultToolProtocol: "native",
|
|
2225
2043
|
inputPrice: 0,
|
|
2226
2044
|
outputPrice: 0,
|
|
2227
2045
|
description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
|
|
@@ -2231,8 +2049,6 @@ var chutesModels = {
|
|
|
2231
2049
|
contextWindow: 131072,
|
|
2232
2050
|
supportsImages: false,
|
|
2233
2051
|
supportsPromptCache: false,
|
|
2234
|
-
supportsNativeTools: true,
|
|
2235
|
-
defaultToolProtocol: "native",
|
|
2236
2052
|
inputPrice: 1,
|
|
2237
2053
|
outputPrice: 3,
|
|
2238
2054
|
description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
|
|
@@ -2242,8 +2058,6 @@ var chutesModels = {
|
|
|
2242
2058
|
contextWindow: 202752,
|
|
2243
2059
|
supportsImages: false,
|
|
2244
2060
|
supportsPromptCache: false,
|
|
2245
|
-
supportsNativeTools: true,
|
|
2246
|
-
defaultToolProtocol: "native",
|
|
2247
2061
|
inputPrice: 0,
|
|
2248
2062
|
outputPrice: 0,
|
|
2249
2063
|
description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios."
|
|
@@ -2254,8 +2068,6 @@ var chutesModels = {
|
|
|
2254
2068
|
contextWindow: 202752,
|
|
2255
2069
|
supportsImages: false,
|
|
2256
2070
|
supportsPromptCache: false,
|
|
2257
|
-
supportsNativeTools: true,
|
|
2258
|
-
defaultToolProtocol: "native",
|
|
2259
2071
|
inputPrice: 1.15,
|
|
2260
2072
|
outputPrice: 3.25,
|
|
2261
2073
|
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference."
|
|
@@ -2265,8 +2077,6 @@ var chutesModels = {
|
|
|
2265
2077
|
contextWindow: 128e3,
|
|
2266
2078
|
supportsImages: false,
|
|
2267
2079
|
supportsPromptCache: false,
|
|
2268
|
-
supportsNativeTools: true,
|
|
2269
|
-
defaultToolProtocol: "native",
|
|
2270
2080
|
inputPrice: 0,
|
|
2271
2081
|
outputPrice: 0,
|
|
2272
2082
|
description: "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks."
|
|
@@ -2276,8 +2086,6 @@ var chutesModels = {
|
|
|
2276
2086
|
contextWindow: 262144,
|
|
2277
2087
|
supportsImages: false,
|
|
2278
2088
|
supportsPromptCache: false,
|
|
2279
|
-
supportsNativeTools: true,
|
|
2280
|
-
defaultToolProtocol: "native",
|
|
2281
2089
|
inputPrice: 0,
|
|
2282
2090
|
outputPrice: 0,
|
|
2283
2091
|
description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks."
|
|
@@ -2287,8 +2095,6 @@ var chutesModels = {
|
|
|
2287
2095
|
contextWindow: 75e3,
|
|
2288
2096
|
supportsImages: false,
|
|
2289
2097
|
supportsPromptCache: false,
|
|
2290
|
-
supportsNativeTools: true,
|
|
2291
|
-
defaultToolProtocol: "native",
|
|
2292
2098
|
inputPrice: 0.1481,
|
|
2293
2099
|
outputPrice: 0.5926,
|
|
2294
2100
|
description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
|
|
@@ -2298,8 +2104,6 @@ var chutesModels = {
|
|
|
2298
2104
|
contextWindow: 262144,
|
|
2299
2105
|
supportsImages: false,
|
|
2300
2106
|
supportsPromptCache: false,
|
|
2301
|
-
supportsNativeTools: true,
|
|
2302
|
-
defaultToolProtocol: "native",
|
|
2303
2107
|
inputPrice: 0.1999,
|
|
2304
2108
|
outputPrice: 0.8001,
|
|
2305
2109
|
description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
|
|
@@ -2309,8 +2113,6 @@ var chutesModels = {
|
|
|
2309
2113
|
contextWindow: 262144,
|
|
2310
2114
|
supportsImages: false,
|
|
2311
2115
|
supportsPromptCache: false,
|
|
2312
|
-
supportsNativeTools: true,
|
|
2313
|
-
defaultToolProtocol: "native",
|
|
2314
2116
|
inputPrice: 0.077968332,
|
|
2315
2117
|
outputPrice: 0.31202496,
|
|
2316
2118
|
description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
|
|
@@ -2320,8 +2122,6 @@ var chutesModels = {
|
|
|
2320
2122
|
contextWindow: 131072,
|
|
2321
2123
|
supportsImages: false,
|
|
2322
2124
|
supportsPromptCache: false,
|
|
2323
|
-
supportsNativeTools: true,
|
|
2324
|
-
defaultToolProtocol: "native",
|
|
2325
2125
|
inputPrice: 0,
|
|
2326
2126
|
outputPrice: 0,
|
|
2327
2127
|
description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
|
|
@@ -2331,8 +2131,6 @@ var chutesModels = {
|
|
|
2331
2131
|
contextWindow: 131072,
|
|
2332
2132
|
supportsImages: false,
|
|
2333
2133
|
supportsPromptCache: false,
|
|
2334
|
-
supportsNativeTools: true,
|
|
2335
|
-
defaultToolProtocol: "native",
|
|
2336
2134
|
inputPrice: 0,
|
|
2337
2135
|
outputPrice: 0,
|
|
2338
2136
|
description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
|
|
@@ -2342,8 +2140,6 @@ var chutesModels = {
|
|
|
2342
2140
|
contextWindow: 262144,
|
|
2343
2141
|
supportsImages: true,
|
|
2344
2142
|
supportsPromptCache: false,
|
|
2345
|
-
supportsNativeTools: true,
|
|
2346
|
-
defaultToolProtocol: "native",
|
|
2347
2143
|
inputPrice: 0.16,
|
|
2348
2144
|
outputPrice: 0.65,
|
|
2349
2145
|
description: "Qwen3\u2011VL\u2011235B\u2011A22B\u2011Thinking is an open\u2011weight MoE vision\u2011language model (235B total, ~22B activated) optimized for deliberate multi\u2011step reasoning with strong text\u2011image\u2011video understanding and long\u2011context capabilities."
|
|
@@ -2351,73 +2147,6 @@ var chutesModels = {
|
|
|
2351
2147
|
};
|
|
2352
2148
|
var chutesDefaultModelInfo = chutesModels[chutesDefaultModelId];
|
|
2353
2149
|
|
|
2354
|
-
// src/providers/claude-code.ts
|
|
2355
|
-
var DATE_SUFFIX_PATTERN = /-\d{8}$/;
|
|
2356
|
-
var claudeCodeModels = {
|
|
2357
|
-
"claude-haiku-4-5": {
|
|
2358
|
-
maxTokens: 32768,
|
|
2359
|
-
contextWindow: 2e5,
|
|
2360
|
-
supportsImages: true,
|
|
2361
|
-
supportsPromptCache: true,
|
|
2362
|
-
supportsNativeTools: true,
|
|
2363
|
-
defaultToolProtocol: "native",
|
|
2364
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2365
|
-
reasoningEffort: "medium",
|
|
2366
|
-
description: "Claude Haiku 4.5 - Fast and efficient with thinking"
|
|
2367
|
-
},
|
|
2368
|
-
"claude-sonnet-4-5": {
|
|
2369
|
-
maxTokens: 32768,
|
|
2370
|
-
contextWindow: 2e5,
|
|
2371
|
-
supportsImages: true,
|
|
2372
|
-
supportsPromptCache: true,
|
|
2373
|
-
supportsNativeTools: true,
|
|
2374
|
-
defaultToolProtocol: "native",
|
|
2375
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2376
|
-
reasoningEffort: "medium",
|
|
2377
|
-
description: "Claude Sonnet 4.5 - Balanced performance with thinking"
|
|
2378
|
-
},
|
|
2379
|
-
"claude-opus-4-5": {
|
|
2380
|
-
maxTokens: 32768,
|
|
2381
|
-
contextWindow: 2e5,
|
|
2382
|
-
supportsImages: true,
|
|
2383
|
-
supportsPromptCache: true,
|
|
2384
|
-
supportsNativeTools: true,
|
|
2385
|
-
defaultToolProtocol: "native",
|
|
2386
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2387
|
-
reasoningEffort: "medium",
|
|
2388
|
-
description: "Claude Opus 4.5 - Most capable with thinking"
|
|
2389
|
-
}
|
|
2390
|
-
};
|
|
2391
|
-
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2392
|
-
var MODEL_FAMILY_PATTERNS = [
|
|
2393
|
-
// Opus models (any version) → claude-opus-4-5
|
|
2394
|
-
{ pattern: /opus/i, target: "claude-opus-4-5" },
|
|
2395
|
-
// Haiku models (any version) → claude-haiku-4-5
|
|
2396
|
-
{ pattern: /haiku/i, target: "claude-haiku-4-5" },
|
|
2397
|
-
// Sonnet models (any version) → claude-sonnet-4-5
|
|
2398
|
-
{ pattern: /sonnet/i, target: "claude-sonnet-4-5" }
|
|
2399
|
-
];
|
|
2400
|
-
function normalizeClaudeCodeModelId(modelId) {
|
|
2401
|
-
if (Object.hasOwn(claudeCodeModels, modelId)) {
|
|
2402
|
-
return modelId;
|
|
2403
|
-
}
|
|
2404
|
-
const withoutDate = modelId.replace(DATE_SUFFIX_PATTERN, "");
|
|
2405
|
-
if (Object.hasOwn(claudeCodeModels, withoutDate)) {
|
|
2406
|
-
return withoutDate;
|
|
2407
|
-
}
|
|
2408
|
-
for (const { pattern, target } of MODEL_FAMILY_PATTERNS) {
|
|
2409
|
-
if (pattern.test(modelId)) {
|
|
2410
|
-
return target;
|
|
2411
|
-
}
|
|
2412
|
-
}
|
|
2413
|
-
return claudeCodeDefaultModelId;
|
|
2414
|
-
}
|
|
2415
|
-
var claudeCodeReasoningConfig = {
|
|
2416
|
-
low: { budgetTokens: 16e3 },
|
|
2417
|
-
medium: { budgetTokens: 32e3 },
|
|
2418
|
-
high: { budgetTokens: 64e3 }
|
|
2419
|
-
};
|
|
2420
|
-
|
|
2421
2150
|
// src/providers/deepseek.ts
|
|
2422
2151
|
var deepSeekDefaultModelId = "deepseek-chat";
|
|
2423
2152
|
var deepSeekModels = {
|
|
@@ -2427,8 +2156,6 @@ var deepSeekModels = {
|
|
|
2427
2156
|
contextWindow: 128e3,
|
|
2428
2157
|
supportsImages: false,
|
|
2429
2158
|
supportsPromptCache: true,
|
|
2430
|
-
supportsNativeTools: true,
|
|
2431
|
-
defaultToolProtocol: "native",
|
|
2432
2159
|
inputPrice: 0.28,
|
|
2433
2160
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
2434
2161
|
outputPrice: 0.42,
|
|
@@ -2445,8 +2172,6 @@ var deepSeekModels = {
|
|
|
2445
2172
|
contextWindow: 128e3,
|
|
2446
2173
|
supportsImages: false,
|
|
2447
2174
|
supportsPromptCache: true,
|
|
2448
|
-
supportsNativeTools: true,
|
|
2449
|
-
defaultToolProtocol: "native",
|
|
2450
2175
|
preserveReasoning: true,
|
|
2451
2176
|
inputPrice: 0.28,
|
|
2452
2177
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
@@ -2469,8 +2194,6 @@ var doubaoModels = {
|
|
|
2469
2194
|
contextWindow: 128e3,
|
|
2470
2195
|
supportsImages: true,
|
|
2471
2196
|
supportsPromptCache: true,
|
|
2472
|
-
supportsNativeTools: true,
|
|
2473
|
-
defaultToolProtocol: "native",
|
|
2474
2197
|
inputPrice: 1e-4,
|
|
2475
2198
|
// $0.0001 per million tokens (cache miss)
|
|
2476
2199
|
outputPrice: 4e-4,
|
|
@@ -2486,8 +2209,6 @@ var doubaoModels = {
|
|
|
2486
2209
|
contextWindow: 128e3,
|
|
2487
2210
|
supportsImages: true,
|
|
2488
2211
|
supportsPromptCache: true,
|
|
2489
|
-
supportsNativeTools: true,
|
|
2490
|
-
defaultToolProtocol: "native",
|
|
2491
2212
|
inputPrice: 2e-4,
|
|
2492
2213
|
// $0.0002 per million tokens
|
|
2493
2214
|
outputPrice: 8e-4,
|
|
@@ -2503,8 +2224,6 @@ var doubaoModels = {
|
|
|
2503
2224
|
contextWindow: 128e3,
|
|
2504
2225
|
supportsImages: true,
|
|
2505
2226
|
supportsPromptCache: true,
|
|
2506
|
-
supportsNativeTools: true,
|
|
2507
|
-
defaultToolProtocol: "native",
|
|
2508
2227
|
inputPrice: 15e-5,
|
|
2509
2228
|
// $0.00015 per million tokens
|
|
2510
2229
|
outputPrice: 6e-4,
|
|
@@ -2527,7 +2246,6 @@ var featherlessModels = {
|
|
|
2527
2246
|
contextWindow: 32678,
|
|
2528
2247
|
supportsImages: false,
|
|
2529
2248
|
supportsPromptCache: false,
|
|
2530
|
-
supportsNativeTools: true,
|
|
2531
2249
|
inputPrice: 0,
|
|
2532
2250
|
outputPrice: 0,
|
|
2533
2251
|
description: "DeepSeek V3 0324 model."
|
|
@@ -2537,7 +2255,6 @@ var featherlessModels = {
|
|
|
2537
2255
|
contextWindow: 32678,
|
|
2538
2256
|
supportsImages: false,
|
|
2539
2257
|
supportsPromptCache: false,
|
|
2540
|
-
supportsNativeTools: true,
|
|
2541
2258
|
inputPrice: 0,
|
|
2542
2259
|
outputPrice: 0,
|
|
2543
2260
|
description: "DeepSeek R1 0528 model."
|
|
@@ -2547,7 +2264,6 @@ var featherlessModels = {
|
|
|
2547
2264
|
contextWindow: 32678,
|
|
2548
2265
|
supportsImages: false,
|
|
2549
2266
|
supportsPromptCache: false,
|
|
2550
|
-
supportsNativeTools: true,
|
|
2551
2267
|
inputPrice: 0,
|
|
2552
2268
|
outputPrice: 0,
|
|
2553
2269
|
description: "Kimi K2 Instruct model."
|
|
@@ -2557,7 +2273,6 @@ var featherlessModels = {
|
|
|
2557
2273
|
contextWindow: 32678,
|
|
2558
2274
|
supportsImages: false,
|
|
2559
2275
|
supportsPromptCache: false,
|
|
2560
|
-
supportsNativeTools: true,
|
|
2561
2276
|
inputPrice: 0,
|
|
2562
2277
|
outputPrice: 0,
|
|
2563
2278
|
description: "GPT-OSS 120B model."
|
|
@@ -2567,7 +2282,6 @@ var featherlessModels = {
|
|
|
2567
2282
|
contextWindow: 32678,
|
|
2568
2283
|
supportsImages: false,
|
|
2569
2284
|
supportsPromptCache: false,
|
|
2570
|
-
supportsNativeTools: true,
|
|
2571
2285
|
inputPrice: 0,
|
|
2572
2286
|
outputPrice: 0,
|
|
2573
2287
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -2583,8 +2297,6 @@ var fireworksModels = {
|
|
|
2583
2297
|
contextWindow: 262144,
|
|
2584
2298
|
supportsImages: false,
|
|
2585
2299
|
supportsPromptCache: true,
|
|
2586
|
-
supportsNativeTools: true,
|
|
2587
|
-
defaultToolProtocol: "native",
|
|
2588
2300
|
inputPrice: 0.6,
|
|
2589
2301
|
outputPrice: 2.5,
|
|
2590
2302
|
cacheReadsPrice: 0.15,
|
|
@@ -2595,8 +2307,6 @@ var fireworksModels = {
|
|
|
2595
2307
|
contextWindow: 128e3,
|
|
2596
2308
|
supportsImages: false,
|
|
2597
2309
|
supportsPromptCache: false,
|
|
2598
|
-
supportsNativeTools: true,
|
|
2599
|
-
defaultToolProtocol: "native",
|
|
2600
2310
|
inputPrice: 0.6,
|
|
2601
2311
|
outputPrice: 2.5,
|
|
2602
2312
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2606,7 +2316,6 @@ var fireworksModels = {
|
|
|
2606
2316
|
contextWindow: 256e3,
|
|
2607
2317
|
supportsImages: false,
|
|
2608
2318
|
supportsPromptCache: true,
|
|
2609
|
-
supportsNativeTools: true,
|
|
2610
2319
|
supportsTemperature: true,
|
|
2611
2320
|
preserveReasoning: true,
|
|
2612
2321
|
defaultTemperature: 1,
|
|
@@ -2620,8 +2329,6 @@ var fireworksModels = {
|
|
|
2620
2329
|
contextWindow: 204800,
|
|
2621
2330
|
supportsImages: false,
|
|
2622
2331
|
supportsPromptCache: false,
|
|
2623
|
-
supportsNativeTools: true,
|
|
2624
|
-
defaultToolProtocol: "native",
|
|
2625
2332
|
inputPrice: 0.3,
|
|
2626
2333
|
outputPrice: 1.2,
|
|
2627
2334
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2631,8 +2338,6 @@ var fireworksModels = {
|
|
|
2631
2338
|
contextWindow: 256e3,
|
|
2632
2339
|
supportsImages: false,
|
|
2633
2340
|
supportsPromptCache: false,
|
|
2634
|
-
supportsNativeTools: true,
|
|
2635
|
-
defaultToolProtocol: "native",
|
|
2636
2341
|
inputPrice: 0.22,
|
|
2637
2342
|
outputPrice: 0.88,
|
|
2638
2343
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2642,8 +2347,6 @@ var fireworksModels = {
|
|
|
2642
2347
|
contextWindow: 256e3,
|
|
2643
2348
|
supportsImages: false,
|
|
2644
2349
|
supportsPromptCache: false,
|
|
2645
|
-
supportsNativeTools: true,
|
|
2646
|
-
defaultToolProtocol: "native",
|
|
2647
2350
|
inputPrice: 0.45,
|
|
2648
2351
|
outputPrice: 1.8,
|
|
2649
2352
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2653,8 +2356,6 @@ var fireworksModels = {
|
|
|
2653
2356
|
contextWindow: 16e4,
|
|
2654
2357
|
supportsImages: false,
|
|
2655
2358
|
supportsPromptCache: false,
|
|
2656
|
-
supportsNativeTools: true,
|
|
2657
|
-
defaultToolProtocol: "native",
|
|
2658
2359
|
inputPrice: 3,
|
|
2659
2360
|
outputPrice: 8,
|
|
2660
2361
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2664,8 +2365,6 @@ var fireworksModels = {
|
|
|
2664
2365
|
contextWindow: 128e3,
|
|
2665
2366
|
supportsImages: false,
|
|
2666
2367
|
supportsPromptCache: false,
|
|
2667
|
-
supportsNativeTools: true,
|
|
2668
|
-
defaultToolProtocol: "native",
|
|
2669
2368
|
inputPrice: 0.9,
|
|
2670
2369
|
outputPrice: 0.9,
|
|
2671
2370
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2675,8 +2374,6 @@ var fireworksModels = {
|
|
|
2675
2374
|
contextWindow: 163840,
|
|
2676
2375
|
supportsImages: false,
|
|
2677
2376
|
supportsPromptCache: false,
|
|
2678
|
-
supportsNativeTools: true,
|
|
2679
|
-
defaultToolProtocol: "native",
|
|
2680
2377
|
inputPrice: 0.56,
|
|
2681
2378
|
outputPrice: 1.68,
|
|
2682
2379
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2686,8 +2383,6 @@ var fireworksModels = {
|
|
|
2686
2383
|
contextWindow: 128e3,
|
|
2687
2384
|
supportsImages: false,
|
|
2688
2385
|
supportsPromptCache: false,
|
|
2689
|
-
supportsNativeTools: true,
|
|
2690
|
-
defaultToolProtocol: "native",
|
|
2691
2386
|
inputPrice: 0.55,
|
|
2692
2387
|
outputPrice: 2.19,
|
|
2693
2388
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2697,8 +2392,6 @@ var fireworksModels = {
|
|
|
2697
2392
|
contextWindow: 128e3,
|
|
2698
2393
|
supportsImages: false,
|
|
2699
2394
|
supportsPromptCache: false,
|
|
2700
|
-
supportsNativeTools: true,
|
|
2701
|
-
defaultToolProtocol: "native",
|
|
2702
2395
|
inputPrice: 0.55,
|
|
2703
2396
|
outputPrice: 2.19,
|
|
2704
2397
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2708,8 +2401,6 @@ var fireworksModels = {
|
|
|
2708
2401
|
contextWindow: 198e3,
|
|
2709
2402
|
supportsImages: false,
|
|
2710
2403
|
supportsPromptCache: false,
|
|
2711
|
-
supportsNativeTools: true,
|
|
2712
|
-
defaultToolProtocol: "native",
|
|
2713
2404
|
inputPrice: 0.55,
|
|
2714
2405
|
outputPrice: 2.19,
|
|
2715
2406
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2719,8 +2410,6 @@ var fireworksModels = {
|
|
|
2719
2410
|
contextWindow: 128e3,
|
|
2720
2411
|
supportsImages: false,
|
|
2721
2412
|
supportsPromptCache: false,
|
|
2722
|
-
supportsNativeTools: true,
|
|
2723
|
-
defaultToolProtocol: "native",
|
|
2724
2413
|
inputPrice: 0.07,
|
|
2725
2414
|
outputPrice: 0.3,
|
|
2726
2415
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2730,11 +2419,63 @@ var fireworksModels = {
|
|
|
2730
2419
|
contextWindow: 128e3,
|
|
2731
2420
|
supportsImages: false,
|
|
2732
2421
|
supportsPromptCache: false,
|
|
2733
|
-
supportsNativeTools: true,
|
|
2734
|
-
defaultToolProtocol: "native",
|
|
2735
2422
|
inputPrice: 0.15,
|
|
2736
2423
|
outputPrice: 0.6,
|
|
2737
2424
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
2425
|
+
},
|
|
2426
|
+
"accounts/fireworks/models/minimax-m2p1": {
|
|
2427
|
+
maxTokens: 4096,
|
|
2428
|
+
contextWindow: 204800,
|
|
2429
|
+
supportsImages: false,
|
|
2430
|
+
supportsPromptCache: false,
|
|
2431
|
+
inputPrice: 0.3,
|
|
2432
|
+
outputPrice: 1.2,
|
|
2433
|
+
description: "MiniMax M2.1 is an upgraded version of M2 with improved performance on complex reasoning, coding, and long-context understanding tasks."
|
|
2434
|
+
},
|
|
2435
|
+
"accounts/fireworks/models/deepseek-v3p2": {
|
|
2436
|
+
maxTokens: 16384,
|
|
2437
|
+
contextWindow: 163840,
|
|
2438
|
+
supportsImages: false,
|
|
2439
|
+
supportsPromptCache: false,
|
|
2440
|
+
inputPrice: 0.56,
|
|
2441
|
+
outputPrice: 1.68,
|
|
2442
|
+
description: "DeepSeek V3.2 is the latest iteration of the V3 model family with enhanced reasoning capabilities, improved code generation, and better instruction following."
|
|
2443
|
+
},
|
|
2444
|
+
"accounts/fireworks/models/glm-4p7": {
|
|
2445
|
+
maxTokens: 25344,
|
|
2446
|
+
contextWindow: 198e3,
|
|
2447
|
+
supportsImages: false,
|
|
2448
|
+
supportsPromptCache: false,
|
|
2449
|
+
inputPrice: 0.55,
|
|
2450
|
+
outputPrice: 2.19,
|
|
2451
|
+
description: "Z.ai GLM-4.7 is the latest coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality."
|
|
2452
|
+
},
|
|
2453
|
+
"accounts/fireworks/models/llama-v3p3-70b-instruct": {
|
|
2454
|
+
maxTokens: 16384,
|
|
2455
|
+
contextWindow: 131072,
|
|
2456
|
+
supportsImages: false,
|
|
2457
|
+
supportsPromptCache: false,
|
|
2458
|
+
inputPrice: 0.9,
|
|
2459
|
+
outputPrice: 0.9,
|
|
2460
|
+
description: "Meta Llama 3.3 70B Instruct is a highly capable instruction-tuned model with strong reasoning, coding, and general task performance."
|
|
2461
|
+
},
|
|
2462
|
+
"accounts/fireworks/models/llama4-maverick-instruct-basic": {
|
|
2463
|
+
maxTokens: 16384,
|
|
2464
|
+
contextWindow: 131072,
|
|
2465
|
+
supportsImages: true,
|
|
2466
|
+
supportsPromptCache: false,
|
|
2467
|
+
inputPrice: 0.22,
|
|
2468
|
+
outputPrice: 0.88,
|
|
2469
|
+
description: "Llama 4 Maverick is Meta's latest multimodal model with vision capabilities, optimized for instruction following and coding tasks."
|
|
2470
|
+
},
|
|
2471
|
+
"accounts/fireworks/models/llama4-scout-instruct-basic": {
|
|
2472
|
+
maxTokens: 16384,
|
|
2473
|
+
contextWindow: 131072,
|
|
2474
|
+
supportsImages: true,
|
|
2475
|
+
supportsPromptCache: false,
|
|
2476
|
+
inputPrice: 0.15,
|
|
2477
|
+
outputPrice: 0.6,
|
|
2478
|
+
description: "Llama 4 Scout is a smaller, faster variant of Llama 4 with multimodal capabilities, ideal for quick iterations and cost-effective deployments."
|
|
2738
2479
|
}
|
|
2739
2480
|
};
|
|
2740
2481
|
|
|
@@ -2745,8 +2486,6 @@ var geminiModels = {
|
|
|
2745
2486
|
maxTokens: 65536,
|
|
2746
2487
|
contextWindow: 1048576,
|
|
2747
2488
|
supportsImages: true,
|
|
2748
|
-
supportsNativeTools: true,
|
|
2749
|
-
defaultToolProtocol: "native",
|
|
2750
2489
|
supportsPromptCache: true,
|
|
2751
2490
|
supportsReasoningEffort: ["low", "high"],
|
|
2752
2491
|
reasoningEffort: "low",
|
|
@@ -2754,16 +2493,19 @@ var geminiModels = {
|
|
|
2754
2493
|
defaultTemperature: 1,
|
|
2755
2494
|
inputPrice: 4,
|
|
2756
2495
|
outputPrice: 18,
|
|
2496
|
+
cacheReadsPrice: 0.4,
|
|
2757
2497
|
tiers: [
|
|
2758
2498
|
{
|
|
2759
2499
|
contextWindow: 2e5,
|
|
2760
2500
|
inputPrice: 2,
|
|
2761
|
-
outputPrice: 12
|
|
2501
|
+
outputPrice: 12,
|
|
2502
|
+
cacheReadsPrice: 0.2
|
|
2762
2503
|
},
|
|
2763
2504
|
{
|
|
2764
2505
|
contextWindow: Infinity,
|
|
2765
2506
|
inputPrice: 4,
|
|
2766
|
-
outputPrice: 18
|
|
2507
|
+
outputPrice: 18,
|
|
2508
|
+
cacheReadsPrice: 0.4
|
|
2767
2509
|
}
|
|
2768
2510
|
]
|
|
2769
2511
|
},
|
|
@@ -2771,25 +2513,20 @@ var geminiModels = {
|
|
|
2771
2513
|
maxTokens: 65536,
|
|
2772
2514
|
contextWindow: 1048576,
|
|
2773
2515
|
supportsImages: true,
|
|
2774
|
-
supportsNativeTools: true,
|
|
2775
|
-
defaultToolProtocol: "native",
|
|
2776
2516
|
supportsPromptCache: true,
|
|
2777
2517
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2778
2518
|
reasoningEffort: "medium",
|
|
2779
2519
|
supportsTemperature: true,
|
|
2780
2520
|
defaultTemperature: 1,
|
|
2781
|
-
inputPrice: 0.
|
|
2782
|
-
outputPrice:
|
|
2783
|
-
cacheReadsPrice: 0.
|
|
2784
|
-
cacheWritesPrice: 1
|
|
2521
|
+
inputPrice: 0.5,
|
|
2522
|
+
outputPrice: 3,
|
|
2523
|
+
cacheReadsPrice: 0.05
|
|
2785
2524
|
},
|
|
2786
2525
|
// 2.5 Pro models
|
|
2787
2526
|
"gemini-2.5-pro": {
|
|
2788
2527
|
maxTokens: 64e3,
|
|
2789
2528
|
contextWindow: 1048576,
|
|
2790
2529
|
supportsImages: true,
|
|
2791
|
-
supportsNativeTools: true,
|
|
2792
|
-
defaultToolProtocol: "native",
|
|
2793
2530
|
supportsPromptCache: true,
|
|
2794
2531
|
inputPrice: 2.5,
|
|
2795
2532
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2818,8 +2555,6 @@ var geminiModels = {
|
|
|
2818
2555
|
maxTokens: 65535,
|
|
2819
2556
|
contextWindow: 1048576,
|
|
2820
2557
|
supportsImages: true,
|
|
2821
|
-
supportsNativeTools: true,
|
|
2822
|
-
defaultToolProtocol: "native",
|
|
2823
2558
|
supportsPromptCache: true,
|
|
2824
2559
|
inputPrice: 2.5,
|
|
2825
2560
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2847,8 +2582,6 @@ var geminiModels = {
|
|
|
2847
2582
|
maxTokens: 65535,
|
|
2848
2583
|
contextWindow: 1048576,
|
|
2849
2584
|
supportsImages: true,
|
|
2850
|
-
supportsNativeTools: true,
|
|
2851
|
-
defaultToolProtocol: "native",
|
|
2852
2585
|
supportsPromptCache: true,
|
|
2853
2586
|
inputPrice: 2.5,
|
|
2854
2587
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2874,8 +2607,6 @@ var geminiModels = {
|
|
|
2874
2607
|
maxTokens: 65535,
|
|
2875
2608
|
contextWindow: 1048576,
|
|
2876
2609
|
supportsImages: true,
|
|
2877
|
-
supportsNativeTools: true,
|
|
2878
|
-
defaultToolProtocol: "native",
|
|
2879
2610
|
supportsPromptCache: true,
|
|
2880
2611
|
inputPrice: 2.5,
|
|
2881
2612
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2904,8 +2635,6 @@ var geminiModels = {
|
|
|
2904
2635
|
maxTokens: 65536,
|
|
2905
2636
|
contextWindow: 1048576,
|
|
2906
2637
|
supportsImages: true,
|
|
2907
|
-
supportsNativeTools: true,
|
|
2908
|
-
defaultToolProtocol: "native",
|
|
2909
2638
|
supportsPromptCache: true,
|
|
2910
2639
|
inputPrice: 0.3,
|
|
2911
2640
|
outputPrice: 2.5,
|
|
@@ -2918,8 +2647,6 @@ var geminiModels = {
|
|
|
2918
2647
|
maxTokens: 65536,
|
|
2919
2648
|
contextWindow: 1048576,
|
|
2920
2649
|
supportsImages: true,
|
|
2921
|
-
supportsNativeTools: true,
|
|
2922
|
-
defaultToolProtocol: "native",
|
|
2923
2650
|
supportsPromptCache: true,
|
|
2924
2651
|
inputPrice: 0.3,
|
|
2925
2652
|
outputPrice: 2.5,
|
|
@@ -2932,8 +2659,6 @@ var geminiModels = {
|
|
|
2932
2659
|
maxTokens: 64e3,
|
|
2933
2660
|
contextWindow: 1048576,
|
|
2934
2661
|
supportsImages: true,
|
|
2935
|
-
supportsNativeTools: true,
|
|
2936
|
-
defaultToolProtocol: "native",
|
|
2937
2662
|
supportsPromptCache: true,
|
|
2938
2663
|
inputPrice: 0.3,
|
|
2939
2664
|
outputPrice: 2.5,
|
|
@@ -2947,8 +2672,6 @@ var geminiModels = {
|
|
|
2947
2672
|
maxTokens: 65536,
|
|
2948
2673
|
contextWindow: 1048576,
|
|
2949
2674
|
supportsImages: true,
|
|
2950
|
-
supportsNativeTools: true,
|
|
2951
|
-
defaultToolProtocol: "native",
|
|
2952
2675
|
supportsPromptCache: true,
|
|
2953
2676
|
inputPrice: 0.1,
|
|
2954
2677
|
outputPrice: 0.4,
|
|
@@ -2961,8 +2684,6 @@ var geminiModels = {
|
|
|
2961
2684
|
maxTokens: 65536,
|
|
2962
2685
|
contextWindow: 1048576,
|
|
2963
2686
|
supportsImages: true,
|
|
2964
|
-
supportsNativeTools: true,
|
|
2965
|
-
defaultToolProtocol: "native",
|
|
2966
2687
|
supportsPromptCache: true,
|
|
2967
2688
|
inputPrice: 0.1,
|
|
2968
2689
|
outputPrice: 0.4,
|
|
@@ -2982,8 +2703,6 @@ var groqModels = {
|
|
|
2982
2703
|
contextWindow: 131072,
|
|
2983
2704
|
supportsImages: false,
|
|
2984
2705
|
supportsPromptCache: false,
|
|
2985
|
-
supportsNativeTools: true,
|
|
2986
|
-
defaultToolProtocol: "native",
|
|
2987
2706
|
inputPrice: 0.05,
|
|
2988
2707
|
outputPrice: 0.08,
|
|
2989
2708
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2993,8 +2712,6 @@ var groqModels = {
|
|
|
2993
2712
|
contextWindow: 131072,
|
|
2994
2713
|
supportsImages: false,
|
|
2995
2714
|
supportsPromptCache: false,
|
|
2996
|
-
supportsNativeTools: true,
|
|
2997
|
-
defaultToolProtocol: "native",
|
|
2998
2715
|
inputPrice: 0.59,
|
|
2999
2716
|
outputPrice: 0.79,
|
|
3000
2717
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -3004,8 +2721,6 @@ var groqModels = {
|
|
|
3004
2721
|
contextWindow: 131072,
|
|
3005
2722
|
supportsImages: false,
|
|
3006
2723
|
supportsPromptCache: false,
|
|
3007
|
-
supportsNativeTools: true,
|
|
3008
|
-
defaultToolProtocol: "native",
|
|
3009
2724
|
inputPrice: 0.11,
|
|
3010
2725
|
outputPrice: 0.34,
|
|
3011
2726
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -3015,8 +2730,6 @@ var groqModels = {
|
|
|
3015
2730
|
contextWindow: 131072,
|
|
3016
2731
|
supportsImages: false,
|
|
3017
2732
|
supportsPromptCache: false,
|
|
3018
|
-
supportsNativeTools: true,
|
|
3019
|
-
defaultToolProtocol: "native",
|
|
3020
2733
|
inputPrice: 0.29,
|
|
3021
2734
|
outputPrice: 0.59,
|
|
3022
2735
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -3026,8 +2739,6 @@ var groqModels = {
|
|
|
3026
2739
|
contextWindow: 262144,
|
|
3027
2740
|
supportsImages: false,
|
|
3028
2741
|
supportsPromptCache: true,
|
|
3029
|
-
supportsNativeTools: true,
|
|
3030
|
-
defaultToolProtocol: "native",
|
|
3031
2742
|
inputPrice: 0.6,
|
|
3032
2743
|
outputPrice: 2.5,
|
|
3033
2744
|
cacheReadsPrice: 0.15,
|
|
@@ -3038,8 +2749,6 @@ var groqModels = {
|
|
|
3038
2749
|
contextWindow: 131072,
|
|
3039
2750
|
supportsImages: false,
|
|
3040
2751
|
supportsPromptCache: false,
|
|
3041
|
-
supportsNativeTools: true,
|
|
3042
|
-
defaultToolProtocol: "native",
|
|
3043
2752
|
inputPrice: 0.15,
|
|
3044
2753
|
outputPrice: 0.75,
|
|
3045
2754
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -3049,8 +2758,6 @@ var groqModels = {
|
|
|
3049
2758
|
contextWindow: 131072,
|
|
3050
2759
|
supportsImages: false,
|
|
3051
2760
|
supportsPromptCache: false,
|
|
3052
|
-
supportsNativeTools: true,
|
|
3053
|
-
defaultToolProtocol: "native",
|
|
3054
2761
|
inputPrice: 0.1,
|
|
3055
2762
|
outputPrice: 0.5,
|
|
3056
2763
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -3077,7 +2784,6 @@ var ioIntelligenceModels = {
|
|
|
3077
2784
|
contextWindow: 128e3,
|
|
3078
2785
|
supportsImages: false,
|
|
3079
2786
|
supportsPromptCache: false,
|
|
3080
|
-
supportsNativeTools: true,
|
|
3081
2787
|
description: "DeepSeek R1 reasoning model"
|
|
3082
2788
|
},
|
|
3083
2789
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -3085,7 +2791,6 @@ var ioIntelligenceModels = {
|
|
|
3085
2791
|
contextWindow: 43e4,
|
|
3086
2792
|
supportsImages: true,
|
|
3087
2793
|
supportsPromptCache: false,
|
|
3088
|
-
supportsNativeTools: true,
|
|
3089
2794
|
description: "Llama 4 Maverick 17B model"
|
|
3090
2795
|
},
|
|
3091
2796
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -3093,7 +2798,6 @@ var ioIntelligenceModels = {
|
|
|
3093
2798
|
contextWindow: 106e3,
|
|
3094
2799
|
supportsImages: false,
|
|
3095
2800
|
supportsPromptCache: false,
|
|
3096
|
-
supportsNativeTools: true,
|
|
3097
2801
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
3098
2802
|
},
|
|
3099
2803
|
"openai/gpt-oss-120b": {
|
|
@@ -3101,7 +2805,6 @@ var ioIntelligenceModels = {
|
|
|
3101
2805
|
contextWindow: 131072,
|
|
3102
2806
|
supportsImages: false,
|
|
3103
2807
|
supportsPromptCache: false,
|
|
3104
|
-
supportsNativeTools: true,
|
|
3105
2808
|
description: "OpenAI GPT-OSS 120B model"
|
|
3106
2809
|
}
|
|
3107
2810
|
};
|
|
@@ -3113,8 +2816,6 @@ var litellmDefaultModelInfo = {
|
|
|
3113
2816
|
contextWindow: 2e5,
|
|
3114
2817
|
supportsImages: true,
|
|
3115
2818
|
supportsPromptCache: true,
|
|
3116
|
-
supportsNativeTools: true,
|
|
3117
|
-
defaultToolProtocol: "native",
|
|
3118
2819
|
inputPrice: 3,
|
|
3119
2820
|
outputPrice: 15,
|
|
3120
2821
|
cacheWritesPrice: 3.75,
|
|
@@ -3129,8 +2830,6 @@ var lMStudioDefaultModelInfo = {
|
|
|
3129
2830
|
contextWindow: 2e5,
|
|
3130
2831
|
supportsImages: true,
|
|
3131
2832
|
supportsPromptCache: true,
|
|
3132
|
-
supportsNativeTools: true,
|
|
3133
|
-
defaultToolProtocol: "native",
|
|
3134
2833
|
inputPrice: 0,
|
|
3135
2834
|
outputPrice: 0,
|
|
3136
2835
|
cacheWritesPrice: 0,
|
|
@@ -3146,8 +2845,6 @@ var mistralModels = {
|
|
|
3146
2845
|
contextWindow: 128e3,
|
|
3147
2846
|
supportsImages: true,
|
|
3148
2847
|
supportsPromptCache: false,
|
|
3149
|
-
supportsNativeTools: true,
|
|
3150
|
-
defaultToolProtocol: "native",
|
|
3151
2848
|
inputPrice: 2,
|
|
3152
2849
|
outputPrice: 5
|
|
3153
2850
|
},
|
|
@@ -3156,8 +2853,6 @@ var mistralModels = {
|
|
|
3156
2853
|
contextWindow: 131e3,
|
|
3157
2854
|
supportsImages: true,
|
|
3158
2855
|
supportsPromptCache: false,
|
|
3159
|
-
supportsNativeTools: true,
|
|
3160
|
-
defaultToolProtocol: "native",
|
|
3161
2856
|
inputPrice: 0.4,
|
|
3162
2857
|
outputPrice: 2
|
|
3163
2858
|
},
|
|
@@ -3166,8 +2861,6 @@ var mistralModels = {
|
|
|
3166
2861
|
contextWindow: 131e3,
|
|
3167
2862
|
supportsImages: true,
|
|
3168
2863
|
supportsPromptCache: false,
|
|
3169
|
-
supportsNativeTools: true,
|
|
3170
|
-
defaultToolProtocol: "native",
|
|
3171
2864
|
inputPrice: 0.4,
|
|
3172
2865
|
outputPrice: 2
|
|
3173
2866
|
},
|
|
@@ -3176,8 +2869,6 @@ var mistralModels = {
|
|
|
3176
2869
|
contextWindow: 256e3,
|
|
3177
2870
|
supportsImages: false,
|
|
3178
2871
|
supportsPromptCache: false,
|
|
3179
|
-
supportsNativeTools: true,
|
|
3180
|
-
defaultToolProtocol: "native",
|
|
3181
2872
|
inputPrice: 0.3,
|
|
3182
2873
|
outputPrice: 0.9
|
|
3183
2874
|
},
|
|
@@ -3186,8 +2877,6 @@ var mistralModels = {
|
|
|
3186
2877
|
contextWindow: 131e3,
|
|
3187
2878
|
supportsImages: false,
|
|
3188
2879
|
supportsPromptCache: false,
|
|
3189
|
-
supportsNativeTools: true,
|
|
3190
|
-
defaultToolProtocol: "native",
|
|
3191
2880
|
inputPrice: 2,
|
|
3192
2881
|
outputPrice: 6
|
|
3193
2882
|
},
|
|
@@ -3196,8 +2885,6 @@ var mistralModels = {
|
|
|
3196
2885
|
contextWindow: 131e3,
|
|
3197
2886
|
supportsImages: false,
|
|
3198
2887
|
supportsPromptCache: false,
|
|
3199
|
-
supportsNativeTools: true,
|
|
3200
|
-
defaultToolProtocol: "native",
|
|
3201
2888
|
inputPrice: 0.1,
|
|
3202
2889
|
outputPrice: 0.1
|
|
3203
2890
|
},
|
|
@@ -3206,8 +2893,6 @@ var mistralModels = {
|
|
|
3206
2893
|
contextWindow: 131e3,
|
|
3207
2894
|
supportsImages: false,
|
|
3208
2895
|
supportsPromptCache: false,
|
|
3209
|
-
supportsNativeTools: true,
|
|
3210
|
-
defaultToolProtocol: "native",
|
|
3211
2896
|
inputPrice: 0.04,
|
|
3212
2897
|
outputPrice: 0.04
|
|
3213
2898
|
},
|
|
@@ -3216,8 +2901,6 @@ var mistralModels = {
|
|
|
3216
2901
|
contextWindow: 32e3,
|
|
3217
2902
|
supportsImages: false,
|
|
3218
2903
|
supportsPromptCache: false,
|
|
3219
|
-
supportsNativeTools: true,
|
|
3220
|
-
defaultToolProtocol: "native",
|
|
3221
2904
|
inputPrice: 0.2,
|
|
3222
2905
|
outputPrice: 0.6
|
|
3223
2906
|
},
|
|
@@ -3226,8 +2909,6 @@ var mistralModels = {
|
|
|
3226
2909
|
contextWindow: 131e3,
|
|
3227
2910
|
supportsImages: true,
|
|
3228
2911
|
supportsPromptCache: false,
|
|
3229
|
-
supportsNativeTools: true,
|
|
3230
|
-
defaultToolProtocol: "native",
|
|
3231
2912
|
inputPrice: 2,
|
|
3232
2913
|
outputPrice: 6
|
|
3233
2914
|
}
|
|
@@ -3242,8 +2923,6 @@ var moonshotModels = {
|
|
|
3242
2923
|
contextWindow: 131072,
|
|
3243
2924
|
supportsImages: false,
|
|
3244
2925
|
supportsPromptCache: true,
|
|
3245
|
-
supportsNativeTools: true,
|
|
3246
|
-
defaultToolProtocol: "native",
|
|
3247
2926
|
inputPrice: 0.6,
|
|
3248
2927
|
// $0.60 per million tokens (cache miss)
|
|
3249
2928
|
outputPrice: 2.5,
|
|
@@ -3259,8 +2938,6 @@ var moonshotModels = {
|
|
|
3259
2938
|
contextWindow: 262144,
|
|
3260
2939
|
supportsImages: false,
|
|
3261
2940
|
supportsPromptCache: true,
|
|
3262
|
-
supportsNativeTools: true,
|
|
3263
|
-
defaultToolProtocol: "native",
|
|
3264
2941
|
inputPrice: 0.6,
|
|
3265
2942
|
outputPrice: 2.5,
|
|
3266
2943
|
cacheReadsPrice: 0.15,
|
|
@@ -3271,8 +2948,6 @@ var moonshotModels = {
|
|
|
3271
2948
|
contextWindow: 262144,
|
|
3272
2949
|
supportsImages: false,
|
|
3273
2950
|
supportsPromptCache: true,
|
|
3274
|
-
supportsNativeTools: true,
|
|
3275
|
-
defaultToolProtocol: "native",
|
|
3276
2951
|
inputPrice: 2.4,
|
|
3277
2952
|
// $2.40 per million tokens (cache miss)
|
|
3278
2953
|
outputPrice: 10,
|
|
@@ -3291,8 +2966,6 @@ var moonshotModels = {
|
|
|
3291
2966
|
supportsImages: false,
|
|
3292
2967
|
// Text-only (no image/vision support)
|
|
3293
2968
|
supportsPromptCache: true,
|
|
3294
|
-
supportsNativeTools: true,
|
|
3295
|
-
defaultToolProtocol: "native",
|
|
3296
2969
|
inputPrice: 0.6,
|
|
3297
2970
|
// $0.60 per million tokens (cache miss)
|
|
3298
2971
|
outputPrice: 2.5,
|
|
@@ -3306,6 +2979,21 @@ var moonshotModels = {
|
|
|
3306
2979
|
preserveReasoning: true,
|
|
3307
2980
|
defaultTemperature: 1,
|
|
3308
2981
|
description: `The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.`
|
|
2982
|
+
},
|
|
2983
|
+
"kimi-k2.5": {
|
|
2984
|
+
maxTokens: 16384,
|
|
2985
|
+
contextWindow: 262144,
|
|
2986
|
+
supportsImages: false,
|
|
2987
|
+
supportsPromptCache: true,
|
|
2988
|
+
inputPrice: 0.6,
|
|
2989
|
+
// $0.60 per million tokens (cache miss)
|
|
2990
|
+
outputPrice: 3,
|
|
2991
|
+
// $3.00 per million tokens
|
|
2992
|
+
cacheReadsPrice: 0.1,
|
|
2993
|
+
// $0.10 per million tokens (cache hit)
|
|
2994
|
+
supportsTemperature: true,
|
|
2995
|
+
defaultTemperature: 1,
|
|
2996
|
+
description: "Kimi K2.5 is the latest generation of Moonshot AI's Kimi series, featuring improved reasoning capabilities and enhanced performance across diverse tasks."
|
|
3309
2997
|
}
|
|
3310
2998
|
};
|
|
3311
2999
|
var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -3317,7 +3005,6 @@ var ollamaDefaultModelInfo = {
|
|
|
3317
3005
|
contextWindow: 2e5,
|
|
3318
3006
|
supportsImages: true,
|
|
3319
3007
|
supportsPromptCache: true,
|
|
3320
|
-
supportsNativeTools: true,
|
|
3321
3008
|
inputPrice: 0,
|
|
3322
3009
|
outputPrice: 0,
|
|
3323
3010
|
cacheWritesPrice: 0,
|
|
@@ -3331,8 +3018,6 @@ var openAiNativeModels = {
|
|
|
3331
3018
|
"gpt-5.1-codex-max": {
|
|
3332
3019
|
maxTokens: 128e3,
|
|
3333
3020
|
contextWindow: 4e5,
|
|
3334
|
-
supportsNativeTools: true,
|
|
3335
|
-
defaultToolProtocol: "native",
|
|
3336
3021
|
includedTools: ["apply_patch"],
|
|
3337
3022
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3338
3023
|
supportsImages: true,
|
|
@@ -3350,8 +3035,6 @@ var openAiNativeModels = {
|
|
|
3350
3035
|
"gpt-5.2": {
|
|
3351
3036
|
maxTokens: 128e3,
|
|
3352
3037
|
contextWindow: 4e5,
|
|
3353
|
-
supportsNativeTools: true,
|
|
3354
|
-
defaultToolProtocol: "native",
|
|
3355
3038
|
includedTools: ["apply_patch"],
|
|
3356
3039
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3357
3040
|
supportsImages: true,
|
|
@@ -3373,8 +3056,6 @@ var openAiNativeModels = {
|
|
|
3373
3056
|
"gpt-5.2-codex": {
|
|
3374
3057
|
maxTokens: 128e3,
|
|
3375
3058
|
contextWindow: 4e5,
|
|
3376
|
-
supportsNativeTools: true,
|
|
3377
|
-
defaultToolProtocol: "native",
|
|
3378
3059
|
includedTools: ["apply_patch"],
|
|
3379
3060
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3380
3061
|
supportsImages: true,
|
|
@@ -3392,8 +3073,6 @@ var openAiNativeModels = {
|
|
|
3392
3073
|
"gpt-5.2-chat-latest": {
|
|
3393
3074
|
maxTokens: 16384,
|
|
3394
3075
|
contextWindow: 128e3,
|
|
3395
|
-
supportsNativeTools: true,
|
|
3396
|
-
defaultToolProtocol: "native",
|
|
3397
3076
|
includedTools: ["apply_patch"],
|
|
3398
3077
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3399
3078
|
supportsImages: true,
|
|
@@ -3406,8 +3085,6 @@ var openAiNativeModels = {
|
|
|
3406
3085
|
"gpt-5.1": {
|
|
3407
3086
|
maxTokens: 128e3,
|
|
3408
3087
|
contextWindow: 4e5,
|
|
3409
|
-
supportsNativeTools: true,
|
|
3410
|
-
defaultToolProtocol: "native",
|
|
3411
3088
|
includedTools: ["apply_patch"],
|
|
3412
3089
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3413
3090
|
supportsImages: true,
|
|
@@ -3429,8 +3106,6 @@ var openAiNativeModels = {
|
|
|
3429
3106
|
"gpt-5.1-codex": {
|
|
3430
3107
|
maxTokens: 128e3,
|
|
3431
3108
|
contextWindow: 4e5,
|
|
3432
|
-
supportsNativeTools: true,
|
|
3433
|
-
defaultToolProtocol: "native",
|
|
3434
3109
|
includedTools: ["apply_patch"],
|
|
3435
3110
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3436
3111
|
supportsImages: true,
|
|
@@ -3448,8 +3123,6 @@ var openAiNativeModels = {
|
|
|
3448
3123
|
"gpt-5.1-codex-mini": {
|
|
3449
3124
|
maxTokens: 128e3,
|
|
3450
3125
|
contextWindow: 4e5,
|
|
3451
|
-
supportsNativeTools: true,
|
|
3452
|
-
defaultToolProtocol: "native",
|
|
3453
3126
|
includedTools: ["apply_patch"],
|
|
3454
3127
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3455
3128
|
supportsImages: true,
|
|
@@ -3466,8 +3139,6 @@ var openAiNativeModels = {
|
|
|
3466
3139
|
"gpt-5": {
|
|
3467
3140
|
maxTokens: 128e3,
|
|
3468
3141
|
contextWindow: 4e5,
|
|
3469
|
-
supportsNativeTools: true,
|
|
3470
|
-
defaultToolProtocol: "native",
|
|
3471
3142
|
includedTools: ["apply_patch"],
|
|
3472
3143
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3473
3144
|
supportsImages: true,
|
|
@@ -3488,8 +3159,6 @@ var openAiNativeModels = {
|
|
|
3488
3159
|
"gpt-5-mini": {
|
|
3489
3160
|
maxTokens: 128e3,
|
|
3490
3161
|
contextWindow: 4e5,
|
|
3491
|
-
supportsNativeTools: true,
|
|
3492
|
-
defaultToolProtocol: "native",
|
|
3493
3162
|
includedTools: ["apply_patch"],
|
|
3494
3163
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3495
3164
|
supportsImages: true,
|
|
@@ -3510,8 +3179,6 @@ var openAiNativeModels = {
|
|
|
3510
3179
|
"gpt-5-codex": {
|
|
3511
3180
|
maxTokens: 128e3,
|
|
3512
3181
|
contextWindow: 4e5,
|
|
3513
|
-
supportsNativeTools: true,
|
|
3514
|
-
defaultToolProtocol: "native",
|
|
3515
3182
|
includedTools: ["apply_patch"],
|
|
3516
3183
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3517
3184
|
supportsImages: true,
|
|
@@ -3528,8 +3195,6 @@ var openAiNativeModels = {
|
|
|
3528
3195
|
"gpt-5-nano": {
|
|
3529
3196
|
maxTokens: 128e3,
|
|
3530
3197
|
contextWindow: 4e5,
|
|
3531
|
-
supportsNativeTools: true,
|
|
3532
|
-
defaultToolProtocol: "native",
|
|
3533
3198
|
includedTools: ["apply_patch"],
|
|
3534
3199
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3535
3200
|
supportsImages: true,
|
|
@@ -3547,8 +3212,6 @@ var openAiNativeModels = {
|
|
|
3547
3212
|
"gpt-5-chat-latest": {
|
|
3548
3213
|
maxTokens: 128e3,
|
|
3549
3214
|
contextWindow: 4e5,
|
|
3550
|
-
supportsNativeTools: true,
|
|
3551
|
-
defaultToolProtocol: "native",
|
|
3552
3215
|
includedTools: ["apply_patch"],
|
|
3553
3216
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3554
3217
|
supportsImages: true,
|
|
@@ -3561,8 +3224,6 @@ var openAiNativeModels = {
|
|
|
3561
3224
|
"gpt-4.1": {
|
|
3562
3225
|
maxTokens: 32768,
|
|
3563
3226
|
contextWindow: 1047576,
|
|
3564
|
-
supportsNativeTools: true,
|
|
3565
|
-
defaultToolProtocol: "native",
|
|
3566
3227
|
includedTools: ["apply_patch"],
|
|
3567
3228
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3568
3229
|
supportsImages: true,
|
|
@@ -3578,8 +3239,6 @@ var openAiNativeModels = {
|
|
|
3578
3239
|
"gpt-4.1-mini": {
|
|
3579
3240
|
maxTokens: 32768,
|
|
3580
3241
|
contextWindow: 1047576,
|
|
3581
|
-
supportsNativeTools: true,
|
|
3582
|
-
defaultToolProtocol: "native",
|
|
3583
3242
|
includedTools: ["apply_patch"],
|
|
3584
3243
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3585
3244
|
supportsImages: true,
|
|
@@ -3595,8 +3254,6 @@ var openAiNativeModels = {
|
|
|
3595
3254
|
"gpt-4.1-nano": {
|
|
3596
3255
|
maxTokens: 32768,
|
|
3597
3256
|
contextWindow: 1047576,
|
|
3598
|
-
supportsNativeTools: true,
|
|
3599
|
-
defaultToolProtocol: "native",
|
|
3600
3257
|
includedTools: ["apply_patch"],
|
|
3601
3258
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3602
3259
|
supportsImages: true,
|
|
@@ -3612,8 +3269,6 @@ var openAiNativeModels = {
|
|
|
3612
3269
|
o3: {
|
|
3613
3270
|
maxTokens: 1e5,
|
|
3614
3271
|
contextWindow: 2e5,
|
|
3615
|
-
supportsNativeTools: true,
|
|
3616
|
-
defaultToolProtocol: "native",
|
|
3617
3272
|
supportsImages: true,
|
|
3618
3273
|
supportsPromptCache: true,
|
|
3619
3274
|
inputPrice: 2,
|
|
@@ -3630,8 +3285,6 @@ var openAiNativeModels = {
|
|
|
3630
3285
|
"o3-high": {
|
|
3631
3286
|
maxTokens: 1e5,
|
|
3632
3287
|
contextWindow: 2e5,
|
|
3633
|
-
supportsNativeTools: true,
|
|
3634
|
-
defaultToolProtocol: "native",
|
|
3635
3288
|
supportsImages: true,
|
|
3636
3289
|
supportsPromptCache: true,
|
|
3637
3290
|
inputPrice: 2,
|
|
@@ -3643,8 +3296,6 @@ var openAiNativeModels = {
|
|
|
3643
3296
|
"o3-low": {
|
|
3644
3297
|
maxTokens: 1e5,
|
|
3645
3298
|
contextWindow: 2e5,
|
|
3646
|
-
supportsNativeTools: true,
|
|
3647
|
-
defaultToolProtocol: "native",
|
|
3648
3299
|
supportsImages: true,
|
|
3649
3300
|
supportsPromptCache: true,
|
|
3650
3301
|
inputPrice: 2,
|
|
@@ -3656,8 +3307,6 @@ var openAiNativeModels = {
|
|
|
3656
3307
|
"o4-mini": {
|
|
3657
3308
|
maxTokens: 1e5,
|
|
3658
3309
|
contextWindow: 2e5,
|
|
3659
|
-
supportsNativeTools: true,
|
|
3660
|
-
defaultToolProtocol: "native",
|
|
3661
3310
|
supportsImages: true,
|
|
3662
3311
|
supportsPromptCache: true,
|
|
3663
3312
|
inputPrice: 1.1,
|
|
@@ -3674,8 +3323,6 @@ var openAiNativeModels = {
|
|
|
3674
3323
|
"o4-mini-high": {
|
|
3675
3324
|
maxTokens: 1e5,
|
|
3676
3325
|
contextWindow: 2e5,
|
|
3677
|
-
supportsNativeTools: true,
|
|
3678
|
-
defaultToolProtocol: "native",
|
|
3679
3326
|
supportsImages: true,
|
|
3680
3327
|
supportsPromptCache: true,
|
|
3681
3328
|
inputPrice: 1.1,
|
|
@@ -3687,8 +3334,6 @@ var openAiNativeModels = {
|
|
|
3687
3334
|
"o4-mini-low": {
|
|
3688
3335
|
maxTokens: 1e5,
|
|
3689
3336
|
contextWindow: 2e5,
|
|
3690
|
-
supportsNativeTools: true,
|
|
3691
|
-
defaultToolProtocol: "native",
|
|
3692
3337
|
supportsImages: true,
|
|
3693
3338
|
supportsPromptCache: true,
|
|
3694
3339
|
inputPrice: 1.1,
|
|
@@ -3700,8 +3345,6 @@ var openAiNativeModels = {
|
|
|
3700
3345
|
"o3-mini": {
|
|
3701
3346
|
maxTokens: 1e5,
|
|
3702
3347
|
contextWindow: 2e5,
|
|
3703
|
-
supportsNativeTools: true,
|
|
3704
|
-
defaultToolProtocol: "native",
|
|
3705
3348
|
supportsImages: false,
|
|
3706
3349
|
supportsPromptCache: true,
|
|
3707
3350
|
inputPrice: 1.1,
|
|
@@ -3714,8 +3357,6 @@ var openAiNativeModels = {
|
|
|
3714
3357
|
"o3-mini-high": {
|
|
3715
3358
|
maxTokens: 1e5,
|
|
3716
3359
|
contextWindow: 2e5,
|
|
3717
|
-
supportsNativeTools: true,
|
|
3718
|
-
defaultToolProtocol: "native",
|
|
3719
3360
|
supportsImages: false,
|
|
3720
3361
|
supportsPromptCache: true,
|
|
3721
3362
|
inputPrice: 1.1,
|
|
@@ -3727,8 +3368,6 @@ var openAiNativeModels = {
|
|
|
3727
3368
|
"o3-mini-low": {
|
|
3728
3369
|
maxTokens: 1e5,
|
|
3729
3370
|
contextWindow: 2e5,
|
|
3730
|
-
supportsNativeTools: true,
|
|
3731
|
-
defaultToolProtocol: "native",
|
|
3732
3371
|
supportsImages: false,
|
|
3733
3372
|
supportsPromptCache: true,
|
|
3734
3373
|
inputPrice: 1.1,
|
|
@@ -3740,8 +3379,6 @@ var openAiNativeModels = {
|
|
|
3740
3379
|
o1: {
|
|
3741
3380
|
maxTokens: 1e5,
|
|
3742
3381
|
contextWindow: 2e5,
|
|
3743
|
-
supportsNativeTools: true,
|
|
3744
|
-
defaultToolProtocol: "native",
|
|
3745
3382
|
supportsImages: true,
|
|
3746
3383
|
supportsPromptCache: true,
|
|
3747
3384
|
inputPrice: 15,
|
|
@@ -3752,8 +3389,6 @@ var openAiNativeModels = {
|
|
|
3752
3389
|
"o1-preview": {
|
|
3753
3390
|
maxTokens: 32768,
|
|
3754
3391
|
contextWindow: 128e3,
|
|
3755
|
-
supportsNativeTools: true,
|
|
3756
|
-
defaultToolProtocol: "native",
|
|
3757
3392
|
supportsImages: true,
|
|
3758
3393
|
supportsPromptCache: true,
|
|
3759
3394
|
inputPrice: 15,
|
|
@@ -3764,8 +3399,6 @@ var openAiNativeModels = {
|
|
|
3764
3399
|
"o1-mini": {
|
|
3765
3400
|
maxTokens: 65536,
|
|
3766
3401
|
contextWindow: 128e3,
|
|
3767
|
-
supportsNativeTools: true,
|
|
3768
|
-
defaultToolProtocol: "native",
|
|
3769
3402
|
supportsImages: true,
|
|
3770
3403
|
supportsPromptCache: true,
|
|
3771
3404
|
inputPrice: 1.1,
|
|
@@ -3776,8 +3409,6 @@ var openAiNativeModels = {
|
|
|
3776
3409
|
"gpt-4o": {
|
|
3777
3410
|
maxTokens: 16384,
|
|
3778
3411
|
contextWindow: 128e3,
|
|
3779
|
-
supportsNativeTools: true,
|
|
3780
|
-
defaultToolProtocol: "native",
|
|
3781
3412
|
supportsImages: true,
|
|
3782
3413
|
supportsPromptCache: true,
|
|
3783
3414
|
inputPrice: 2.5,
|
|
@@ -3791,8 +3422,6 @@ var openAiNativeModels = {
|
|
|
3791
3422
|
"gpt-4o-mini": {
|
|
3792
3423
|
maxTokens: 16384,
|
|
3793
3424
|
contextWindow: 128e3,
|
|
3794
|
-
supportsNativeTools: true,
|
|
3795
|
-
defaultToolProtocol: "native",
|
|
3796
3425
|
supportsImages: true,
|
|
3797
3426
|
supportsPromptCache: true,
|
|
3798
3427
|
inputPrice: 0.15,
|
|
@@ -3806,8 +3435,6 @@ var openAiNativeModels = {
|
|
|
3806
3435
|
"codex-mini-latest": {
|
|
3807
3436
|
maxTokens: 16384,
|
|
3808
3437
|
contextWindow: 2e5,
|
|
3809
|
-
supportsNativeTools: true,
|
|
3810
|
-
defaultToolProtocol: "native",
|
|
3811
3438
|
supportsImages: false,
|
|
3812
3439
|
supportsPromptCache: false,
|
|
3813
3440
|
inputPrice: 1.5,
|
|
@@ -3820,8 +3447,6 @@ var openAiNativeModels = {
|
|
|
3820
3447
|
"gpt-5-2025-08-07": {
|
|
3821
3448
|
maxTokens: 128e3,
|
|
3822
3449
|
contextWindow: 4e5,
|
|
3823
|
-
supportsNativeTools: true,
|
|
3824
|
-
defaultToolProtocol: "native",
|
|
3825
3450
|
includedTools: ["apply_patch"],
|
|
3826
3451
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3827
3452
|
supportsImages: true,
|
|
@@ -3842,8 +3467,6 @@ var openAiNativeModels = {
|
|
|
3842
3467
|
"gpt-5-mini-2025-08-07": {
|
|
3843
3468
|
maxTokens: 128e3,
|
|
3844
3469
|
contextWindow: 4e5,
|
|
3845
|
-
supportsNativeTools: true,
|
|
3846
|
-
defaultToolProtocol: "native",
|
|
3847
3470
|
includedTools: ["apply_patch"],
|
|
3848
3471
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3849
3472
|
supportsImages: true,
|
|
@@ -3864,8 +3487,6 @@ var openAiNativeModels = {
|
|
|
3864
3487
|
"gpt-5-nano-2025-08-07": {
|
|
3865
3488
|
maxTokens: 128e3,
|
|
3866
3489
|
contextWindow: 4e5,
|
|
3867
|
-
supportsNativeTools: true,
|
|
3868
|
-
defaultToolProtocol: "native",
|
|
3869
3490
|
includedTools: ["apply_patch"],
|
|
3870
3491
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3871
3492
|
supportsImages: true,
|
|
@@ -3887,9 +3508,7 @@ var openAiModelInfoSaneDefaults = {
|
|
|
3887
3508
|
supportsImages: true,
|
|
3888
3509
|
supportsPromptCache: false,
|
|
3889
3510
|
inputPrice: 0,
|
|
3890
|
-
outputPrice: 0
|
|
3891
|
-
supportsNativeTools: true,
|
|
3892
|
-
defaultToolProtocol: "native"
|
|
3511
|
+
outputPrice: 0
|
|
3893
3512
|
};
|
|
3894
3513
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
3895
3514
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
@@ -3901,8 +3520,6 @@ var openAiCodexModels = {
|
|
|
3901
3520
|
"gpt-5.1-codex-max": {
|
|
3902
3521
|
maxTokens: 128e3,
|
|
3903
3522
|
contextWindow: 4e5,
|
|
3904
|
-
supportsNativeTools: true,
|
|
3905
|
-
defaultToolProtocol: "native",
|
|
3906
3523
|
includedTools: ["apply_patch"],
|
|
3907
3524
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3908
3525
|
supportsImages: true,
|
|
@@ -3915,11 +3532,24 @@ var openAiCodexModels = {
|
|
|
3915
3532
|
supportsTemperature: false,
|
|
3916
3533
|
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription"
|
|
3917
3534
|
},
|
|
3535
|
+
"gpt-5.1-codex": {
|
|
3536
|
+
maxTokens: 128e3,
|
|
3537
|
+
contextWindow: 4e5,
|
|
3538
|
+
includedTools: ["apply_patch"],
|
|
3539
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3540
|
+
supportsImages: true,
|
|
3541
|
+
supportsPromptCache: true,
|
|
3542
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3543
|
+
reasoningEffort: "medium",
|
|
3544
|
+
// Subscription-based: no per-token costs
|
|
3545
|
+
inputPrice: 0,
|
|
3546
|
+
outputPrice: 0,
|
|
3547
|
+
supportsTemperature: false,
|
|
3548
|
+
description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription"
|
|
3549
|
+
},
|
|
3918
3550
|
"gpt-5.2-codex": {
|
|
3919
3551
|
maxTokens: 128e3,
|
|
3920
3552
|
contextWindow: 4e5,
|
|
3921
|
-
supportsNativeTools: true,
|
|
3922
|
-
defaultToolProtocol: "native",
|
|
3923
3553
|
includedTools: ["apply_patch"],
|
|
3924
3554
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3925
3555
|
supportsImages: true,
|
|
@@ -3931,11 +3561,71 @@ var openAiCodexModels = {
|
|
|
3931
3561
|
supportsTemperature: false,
|
|
3932
3562
|
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription"
|
|
3933
3563
|
},
|
|
3564
|
+
"gpt-5.1": {
|
|
3565
|
+
maxTokens: 128e3,
|
|
3566
|
+
contextWindow: 4e5,
|
|
3567
|
+
includedTools: ["apply_patch"],
|
|
3568
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3569
|
+
supportsImages: true,
|
|
3570
|
+
supportsPromptCache: true,
|
|
3571
|
+
supportsReasoningEffort: ["none", "low", "medium", "high"],
|
|
3572
|
+
reasoningEffort: "medium",
|
|
3573
|
+
// Subscription-based: no per-token costs
|
|
3574
|
+
inputPrice: 0,
|
|
3575
|
+
outputPrice: 0,
|
|
3576
|
+
supportsVerbosity: true,
|
|
3577
|
+
supportsTemperature: false,
|
|
3578
|
+
description: "GPT-5.1: General GPT-5.1 model via ChatGPT subscription"
|
|
3579
|
+
},
|
|
3580
|
+
"gpt-5": {
|
|
3581
|
+
maxTokens: 128e3,
|
|
3582
|
+
contextWindow: 4e5,
|
|
3583
|
+
includedTools: ["apply_patch"],
|
|
3584
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3585
|
+
supportsImages: true,
|
|
3586
|
+
supportsPromptCache: true,
|
|
3587
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
3588
|
+
reasoningEffort: "medium",
|
|
3589
|
+
// Subscription-based: no per-token costs
|
|
3590
|
+
inputPrice: 0,
|
|
3591
|
+
outputPrice: 0,
|
|
3592
|
+
supportsVerbosity: true,
|
|
3593
|
+
supportsTemperature: false,
|
|
3594
|
+
description: "GPT-5: General GPT-5 model via ChatGPT subscription"
|
|
3595
|
+
},
|
|
3596
|
+
"gpt-5-codex": {
|
|
3597
|
+
maxTokens: 128e3,
|
|
3598
|
+
contextWindow: 4e5,
|
|
3599
|
+
includedTools: ["apply_patch"],
|
|
3600
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3601
|
+
supportsImages: true,
|
|
3602
|
+
supportsPromptCache: true,
|
|
3603
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3604
|
+
reasoningEffort: "medium",
|
|
3605
|
+
// Subscription-based: no per-token costs
|
|
3606
|
+
inputPrice: 0,
|
|
3607
|
+
outputPrice: 0,
|
|
3608
|
+
supportsTemperature: false,
|
|
3609
|
+
description: "GPT-5 Codex: GPT-5 optimized for agentic coding via ChatGPT subscription"
|
|
3610
|
+
},
|
|
3611
|
+
"gpt-5-codex-mini": {
|
|
3612
|
+
maxTokens: 128e3,
|
|
3613
|
+
contextWindow: 4e5,
|
|
3614
|
+
includedTools: ["apply_patch"],
|
|
3615
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3616
|
+
supportsImages: true,
|
|
3617
|
+
supportsPromptCache: true,
|
|
3618
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3619
|
+
reasoningEffort: "medium",
|
|
3620
|
+
// Subscription-based: no per-token costs
|
|
3621
|
+
inputPrice: 0,
|
|
3622
|
+
outputPrice: 0,
|
|
3623
|
+
supportsTemperature: false,
|
|
3624
|
+
description: "GPT-5 Codex Mini: Faster coding model via ChatGPT subscription"
|
|
3625
|
+
},
|
|
3934
3626
|
"gpt-5.1-codex-mini": {
|
|
3935
3627
|
maxTokens: 128e3,
|
|
3936
3628
|
contextWindow: 4e5,
|
|
3937
|
-
supportsNativeTools: true,
|
|
3938
|
-
defaultToolProtocol: "native",
|
|
3939
3629
|
includedTools: ["apply_patch"],
|
|
3940
3630
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3941
3631
|
supportsImages: true,
|
|
@@ -3950,8 +3640,6 @@ var openAiCodexModels = {
|
|
|
3950
3640
|
"gpt-5.2": {
|
|
3951
3641
|
maxTokens: 128e3,
|
|
3952
3642
|
contextWindow: 4e5,
|
|
3953
|
-
supportsNativeTools: true,
|
|
3954
|
-
defaultToolProtocol: "native",
|
|
3955
3643
|
includedTools: ["apply_patch"],
|
|
3956
3644
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3957
3645
|
supportsImages: true,
|
|
@@ -3972,7 +3660,6 @@ var openRouterDefaultModelInfo = {
|
|
|
3972
3660
|
contextWindow: 2e5,
|
|
3973
3661
|
supportsImages: true,
|
|
3974
3662
|
supportsPromptCache: true,
|
|
3975
|
-
supportsNativeTools: true,
|
|
3976
3663
|
inputPrice: 3,
|
|
3977
3664
|
outputPrice: 15,
|
|
3978
3665
|
cacheWritesPrice: 3.75,
|
|
@@ -4046,8 +3733,6 @@ var qwenCodeModels = {
|
|
|
4046
3733
|
contextWindow: 1e6,
|
|
4047
3734
|
supportsImages: false,
|
|
4048
3735
|
supportsPromptCache: false,
|
|
4049
|
-
supportsNativeTools: true,
|
|
4050
|
-
defaultToolProtocol: "native",
|
|
4051
3736
|
inputPrice: 0,
|
|
4052
3737
|
outputPrice: 0,
|
|
4053
3738
|
cacheWritesPrice: 0,
|
|
@@ -4059,8 +3744,6 @@ var qwenCodeModels = {
|
|
|
4059
3744
|
contextWindow: 1e6,
|
|
4060
3745
|
supportsImages: false,
|
|
4061
3746
|
supportsPromptCache: false,
|
|
4062
|
-
supportsNativeTools: true,
|
|
4063
|
-
defaultToolProtocol: "native",
|
|
4064
3747
|
inputPrice: 0,
|
|
4065
3748
|
outputPrice: 0,
|
|
4066
3749
|
cacheWritesPrice: 0,
|
|
@@ -4076,8 +3759,6 @@ var requestyDefaultModelInfo = {
|
|
|
4076
3759
|
contextWindow: 2e5,
|
|
4077
3760
|
supportsImages: true,
|
|
4078
3761
|
supportsPromptCache: true,
|
|
4079
|
-
supportsNativeTools: true,
|
|
4080
|
-
defaultToolProtocol: "native",
|
|
4081
3762
|
inputPrice: 3,
|
|
4082
3763
|
outputPrice: 15,
|
|
4083
3764
|
cacheWritesPrice: 3.75,
|
|
@@ -4133,8 +3814,6 @@ var sambaNovaModels = {
|
|
|
4133
3814
|
contextWindow: 16384,
|
|
4134
3815
|
supportsImages: false,
|
|
4135
3816
|
supportsPromptCache: false,
|
|
4136
|
-
supportsNativeTools: true,
|
|
4137
|
-
defaultToolProtocol: "native",
|
|
4138
3817
|
inputPrice: 0.1,
|
|
4139
3818
|
outputPrice: 0.2,
|
|
4140
3819
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -4144,8 +3823,6 @@ var sambaNovaModels = {
|
|
|
4144
3823
|
contextWindow: 131072,
|
|
4145
3824
|
supportsImages: false,
|
|
4146
3825
|
supportsPromptCache: false,
|
|
4147
|
-
supportsNativeTools: true,
|
|
4148
|
-
defaultToolProtocol: "native",
|
|
4149
3826
|
inputPrice: 0.6,
|
|
4150
3827
|
outputPrice: 1.2,
|
|
4151
3828
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -4156,8 +3833,6 @@ var sambaNovaModels = {
|
|
|
4156
3833
|
supportsImages: false,
|
|
4157
3834
|
supportsPromptCache: false,
|
|
4158
3835
|
supportsReasoningBudget: true,
|
|
4159
|
-
supportsNativeTools: true,
|
|
4160
|
-
defaultToolProtocol: "native",
|
|
4161
3836
|
inputPrice: 5,
|
|
4162
3837
|
outputPrice: 7,
|
|
4163
3838
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -4167,8 +3842,6 @@ var sambaNovaModels = {
|
|
|
4167
3842
|
contextWindow: 32768,
|
|
4168
3843
|
supportsImages: false,
|
|
4169
3844
|
supportsPromptCache: false,
|
|
4170
|
-
supportsNativeTools: true,
|
|
4171
|
-
defaultToolProtocol: "native",
|
|
4172
3845
|
inputPrice: 3,
|
|
4173
3846
|
outputPrice: 4.5,
|
|
4174
3847
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -4178,8 +3851,6 @@ var sambaNovaModels = {
|
|
|
4178
3851
|
contextWindow: 32768,
|
|
4179
3852
|
supportsImages: false,
|
|
4180
3853
|
supportsPromptCache: false,
|
|
4181
|
-
supportsNativeTools: true,
|
|
4182
|
-
defaultToolProtocol: "native",
|
|
4183
3854
|
inputPrice: 3,
|
|
4184
3855
|
outputPrice: 4.5,
|
|
4185
3856
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -4189,8 +3860,6 @@ var sambaNovaModels = {
|
|
|
4189
3860
|
contextWindow: 131072,
|
|
4190
3861
|
supportsImages: true,
|
|
4191
3862
|
supportsPromptCache: false,
|
|
4192
|
-
supportsNativeTools: true,
|
|
4193
|
-
defaultToolProtocol: "native",
|
|
4194
3863
|
inputPrice: 0.63,
|
|
4195
3864
|
outputPrice: 1.8,
|
|
4196
3865
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -4200,8 +3869,6 @@ var sambaNovaModels = {
|
|
|
4200
3869
|
contextWindow: 8192,
|
|
4201
3870
|
supportsImages: false,
|
|
4202
3871
|
supportsPromptCache: false,
|
|
4203
|
-
supportsNativeTools: true,
|
|
4204
|
-
defaultToolProtocol: "native",
|
|
4205
3872
|
inputPrice: 0.4,
|
|
4206
3873
|
outputPrice: 0.8,
|
|
4207
3874
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -4211,8 +3878,6 @@ var sambaNovaModels = {
|
|
|
4211
3878
|
contextWindow: 131072,
|
|
4212
3879
|
supportsImages: false,
|
|
4213
3880
|
supportsPromptCache: false,
|
|
4214
|
-
supportsNativeTools: true,
|
|
4215
|
-
defaultToolProtocol: "native",
|
|
4216
3881
|
inputPrice: 0.22,
|
|
4217
3882
|
outputPrice: 0.59,
|
|
4218
3883
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -4226,7 +3891,6 @@ var unboundDefaultModelInfo = {
|
|
|
4226
3891
|
contextWindow: 2e5,
|
|
4227
3892
|
supportsImages: true,
|
|
4228
3893
|
supportsPromptCache: true,
|
|
4229
|
-
supportsNativeTools: true,
|
|
4230
3894
|
inputPrice: 3,
|
|
4231
3895
|
outputPrice: 15,
|
|
4232
3896
|
cacheWritesPrice: 3.75,
|
|
@@ -4240,8 +3904,6 @@ var vertexModels = {
|
|
|
4240
3904
|
maxTokens: 65536,
|
|
4241
3905
|
contextWindow: 1048576,
|
|
4242
3906
|
supportsImages: true,
|
|
4243
|
-
supportsNativeTools: true,
|
|
4244
|
-
defaultToolProtocol: "native",
|
|
4245
3907
|
supportsPromptCache: true,
|
|
4246
3908
|
supportsReasoningEffort: ["low", "high"],
|
|
4247
3909
|
reasoningEffort: "low",
|
|
@@ -4249,16 +3911,19 @@ var vertexModels = {
|
|
|
4249
3911
|
defaultTemperature: 1,
|
|
4250
3912
|
inputPrice: 4,
|
|
4251
3913
|
outputPrice: 18,
|
|
3914
|
+
cacheReadsPrice: 0.4,
|
|
4252
3915
|
tiers: [
|
|
4253
3916
|
{
|
|
4254
3917
|
contextWindow: 2e5,
|
|
4255
3918
|
inputPrice: 2,
|
|
4256
|
-
outputPrice: 12
|
|
3919
|
+
outputPrice: 12,
|
|
3920
|
+
cacheReadsPrice: 0.2
|
|
4257
3921
|
},
|
|
4258
3922
|
{
|
|
4259
3923
|
contextWindow: Infinity,
|
|
4260
3924
|
inputPrice: 4,
|
|
4261
|
-
outputPrice: 18
|
|
3925
|
+
outputPrice: 18,
|
|
3926
|
+
cacheReadsPrice: 0.4
|
|
4262
3927
|
}
|
|
4263
3928
|
]
|
|
4264
3929
|
},
|
|
@@ -4266,24 +3931,19 @@ var vertexModels = {
|
|
|
4266
3931
|
maxTokens: 65536,
|
|
4267
3932
|
contextWindow: 1048576,
|
|
4268
3933
|
supportsImages: true,
|
|
4269
|
-
supportsNativeTools: true,
|
|
4270
|
-
defaultToolProtocol: "native",
|
|
4271
3934
|
supportsPromptCache: true,
|
|
4272
3935
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
4273
3936
|
reasoningEffort: "medium",
|
|
4274
3937
|
supportsTemperature: true,
|
|
4275
3938
|
defaultTemperature: 1,
|
|
4276
|
-
inputPrice: 0.
|
|
4277
|
-
outputPrice:
|
|
4278
|
-
cacheReadsPrice: 0.
|
|
4279
|
-
cacheWritesPrice: 1
|
|
3939
|
+
inputPrice: 0.5,
|
|
3940
|
+
outputPrice: 3,
|
|
3941
|
+
cacheReadsPrice: 0.05
|
|
4280
3942
|
},
|
|
4281
3943
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
4282
3944
|
maxTokens: 65535,
|
|
4283
3945
|
contextWindow: 1048576,
|
|
4284
3946
|
supportsImages: true,
|
|
4285
|
-
supportsNativeTools: true,
|
|
4286
|
-
defaultToolProtocol: "native",
|
|
4287
3947
|
supportsPromptCache: true,
|
|
4288
3948
|
inputPrice: 0.15,
|
|
4289
3949
|
outputPrice: 3.5,
|
|
@@ -4295,8 +3955,6 @@ var vertexModels = {
|
|
|
4295
3955
|
maxTokens: 65535,
|
|
4296
3956
|
contextWindow: 1048576,
|
|
4297
3957
|
supportsImages: true,
|
|
4298
|
-
supportsNativeTools: true,
|
|
4299
|
-
defaultToolProtocol: "native",
|
|
4300
3958
|
supportsPromptCache: true,
|
|
4301
3959
|
inputPrice: 0.15,
|
|
4302
3960
|
outputPrice: 0.6
|
|
@@ -4305,8 +3963,6 @@ var vertexModels = {
|
|
|
4305
3963
|
maxTokens: 64e3,
|
|
4306
3964
|
contextWindow: 1048576,
|
|
4307
3965
|
supportsImages: true,
|
|
4308
|
-
supportsNativeTools: true,
|
|
4309
|
-
defaultToolProtocol: "native",
|
|
4310
3966
|
supportsPromptCache: true,
|
|
4311
3967
|
inputPrice: 0.3,
|
|
4312
3968
|
outputPrice: 2.5,
|
|
@@ -4319,8 +3975,6 @@ var vertexModels = {
|
|
|
4319
3975
|
maxTokens: 65535,
|
|
4320
3976
|
contextWindow: 1048576,
|
|
4321
3977
|
supportsImages: true,
|
|
4322
|
-
supportsNativeTools: true,
|
|
4323
|
-
defaultToolProtocol: "native",
|
|
4324
3978
|
supportsPromptCache: false,
|
|
4325
3979
|
inputPrice: 0.15,
|
|
4326
3980
|
outputPrice: 3.5,
|
|
@@ -4332,8 +3986,6 @@ var vertexModels = {
|
|
|
4332
3986
|
maxTokens: 65535,
|
|
4333
3987
|
contextWindow: 1048576,
|
|
4334
3988
|
supportsImages: true,
|
|
4335
|
-
supportsNativeTools: true,
|
|
4336
|
-
defaultToolProtocol: "native",
|
|
4337
3989
|
supportsPromptCache: false,
|
|
4338
3990
|
inputPrice: 0.15,
|
|
4339
3991
|
outputPrice: 0.6
|
|
@@ -4342,8 +3994,6 @@ var vertexModels = {
|
|
|
4342
3994
|
maxTokens: 65535,
|
|
4343
3995
|
contextWindow: 1048576,
|
|
4344
3996
|
supportsImages: true,
|
|
4345
|
-
supportsNativeTools: true,
|
|
4346
|
-
defaultToolProtocol: "native",
|
|
4347
3997
|
supportsPromptCache: true,
|
|
4348
3998
|
inputPrice: 2.5,
|
|
4349
3999
|
outputPrice: 15
|
|
@@ -4352,8 +4002,6 @@ var vertexModels = {
|
|
|
4352
4002
|
maxTokens: 65535,
|
|
4353
4003
|
contextWindow: 1048576,
|
|
4354
4004
|
supportsImages: true,
|
|
4355
|
-
supportsNativeTools: true,
|
|
4356
|
-
defaultToolProtocol: "native",
|
|
4357
4005
|
supportsPromptCache: true,
|
|
4358
4006
|
inputPrice: 2.5,
|
|
4359
4007
|
outputPrice: 15
|
|
@@ -4362,8 +4010,6 @@ var vertexModels = {
|
|
|
4362
4010
|
maxTokens: 65535,
|
|
4363
4011
|
contextWindow: 1048576,
|
|
4364
4012
|
supportsImages: true,
|
|
4365
|
-
supportsNativeTools: true,
|
|
4366
|
-
defaultToolProtocol: "native",
|
|
4367
4013
|
supportsPromptCache: true,
|
|
4368
4014
|
inputPrice: 2.5,
|
|
4369
4015
|
outputPrice: 15,
|
|
@@ -4374,8 +4020,6 @@ var vertexModels = {
|
|
|
4374
4020
|
maxTokens: 64e3,
|
|
4375
4021
|
contextWindow: 1048576,
|
|
4376
4022
|
supportsImages: true,
|
|
4377
|
-
supportsNativeTools: true,
|
|
4378
|
-
defaultToolProtocol: "native",
|
|
4379
4023
|
supportsPromptCache: true,
|
|
4380
4024
|
inputPrice: 2.5,
|
|
4381
4025
|
outputPrice: 15,
|
|
@@ -4401,8 +4045,6 @@ var vertexModels = {
|
|
|
4401
4045
|
maxTokens: 65535,
|
|
4402
4046
|
contextWindow: 1048576,
|
|
4403
4047
|
supportsImages: true,
|
|
4404
|
-
supportsNativeTools: true,
|
|
4405
|
-
defaultToolProtocol: "native",
|
|
4406
4048
|
supportsPromptCache: false,
|
|
4407
4049
|
inputPrice: 0,
|
|
4408
4050
|
outputPrice: 0
|
|
@@ -4411,8 +4053,6 @@ var vertexModels = {
|
|
|
4411
4053
|
maxTokens: 8192,
|
|
4412
4054
|
contextWindow: 2097152,
|
|
4413
4055
|
supportsImages: true,
|
|
4414
|
-
supportsNativeTools: true,
|
|
4415
|
-
defaultToolProtocol: "native",
|
|
4416
4056
|
supportsPromptCache: false,
|
|
4417
4057
|
inputPrice: 0,
|
|
4418
4058
|
outputPrice: 0
|
|
@@ -4421,8 +4061,6 @@ var vertexModels = {
|
|
|
4421
4061
|
maxTokens: 8192,
|
|
4422
4062
|
contextWindow: 1048576,
|
|
4423
4063
|
supportsImages: true,
|
|
4424
|
-
supportsNativeTools: true,
|
|
4425
|
-
defaultToolProtocol: "native",
|
|
4426
4064
|
supportsPromptCache: true,
|
|
4427
4065
|
inputPrice: 0.15,
|
|
4428
4066
|
outputPrice: 0.6
|
|
@@ -4431,8 +4069,6 @@ var vertexModels = {
|
|
|
4431
4069
|
maxTokens: 8192,
|
|
4432
4070
|
contextWindow: 1048576,
|
|
4433
4071
|
supportsImages: true,
|
|
4434
|
-
supportsNativeTools: true,
|
|
4435
|
-
defaultToolProtocol: "native",
|
|
4436
4072
|
supportsPromptCache: false,
|
|
4437
4073
|
inputPrice: 0.075,
|
|
4438
4074
|
outputPrice: 0.3
|
|
@@ -4441,8 +4077,6 @@ var vertexModels = {
|
|
|
4441
4077
|
maxTokens: 8192,
|
|
4442
4078
|
contextWindow: 32768,
|
|
4443
4079
|
supportsImages: true,
|
|
4444
|
-
supportsNativeTools: true,
|
|
4445
|
-
defaultToolProtocol: "native",
|
|
4446
4080
|
supportsPromptCache: false,
|
|
4447
4081
|
inputPrice: 0,
|
|
4448
4082
|
outputPrice: 0
|
|
@@ -4451,8 +4085,6 @@ var vertexModels = {
|
|
|
4451
4085
|
maxTokens: 8192,
|
|
4452
4086
|
contextWindow: 1048576,
|
|
4453
4087
|
supportsImages: true,
|
|
4454
|
-
supportsNativeTools: true,
|
|
4455
|
-
defaultToolProtocol: "native",
|
|
4456
4088
|
supportsPromptCache: true,
|
|
4457
4089
|
inputPrice: 0.075,
|
|
4458
4090
|
outputPrice: 0.3
|
|
@@ -4461,8 +4093,6 @@ var vertexModels = {
|
|
|
4461
4093
|
maxTokens: 8192,
|
|
4462
4094
|
contextWindow: 2097152,
|
|
4463
4095
|
supportsImages: true,
|
|
4464
|
-
supportsNativeTools: true,
|
|
4465
|
-
defaultToolProtocol: "native",
|
|
4466
4096
|
supportsPromptCache: false,
|
|
4467
4097
|
inputPrice: 1.25,
|
|
4468
4098
|
outputPrice: 5
|
|
@@ -4473,8 +4103,6 @@ var vertexModels = {
|
|
|
4473
4103
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4474
4104
|
supportsImages: true,
|
|
4475
4105
|
supportsPromptCache: true,
|
|
4476
|
-
supportsNativeTools: true,
|
|
4477
|
-
defaultToolProtocol: "native",
|
|
4478
4106
|
inputPrice: 3,
|
|
4479
4107
|
// $3 per million input tokens (≤200K context)
|
|
4480
4108
|
outputPrice: 15,
|
|
@@ -4506,8 +4134,6 @@ var vertexModels = {
|
|
|
4506
4134
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4507
4135
|
supportsImages: true,
|
|
4508
4136
|
supportsPromptCache: true,
|
|
4509
|
-
supportsNativeTools: true,
|
|
4510
|
-
defaultToolProtocol: "native",
|
|
4511
4137
|
inputPrice: 3,
|
|
4512
4138
|
// $3 per million input tokens (≤200K context)
|
|
4513
4139
|
outputPrice: 15,
|
|
@@ -4538,8 +4164,6 @@ var vertexModels = {
|
|
|
4538
4164
|
contextWindow: 2e5,
|
|
4539
4165
|
supportsImages: true,
|
|
4540
4166
|
supportsPromptCache: true,
|
|
4541
|
-
supportsNativeTools: true,
|
|
4542
|
-
defaultToolProtocol: "native",
|
|
4543
4167
|
inputPrice: 1,
|
|
4544
4168
|
outputPrice: 5,
|
|
4545
4169
|
cacheWritesPrice: 1.25,
|
|
@@ -4551,8 +4175,6 @@ var vertexModels = {
|
|
|
4551
4175
|
contextWindow: 2e5,
|
|
4552
4176
|
supportsImages: true,
|
|
4553
4177
|
supportsPromptCache: true,
|
|
4554
|
-
supportsNativeTools: true,
|
|
4555
|
-
defaultToolProtocol: "native",
|
|
4556
4178
|
inputPrice: 5,
|
|
4557
4179
|
outputPrice: 25,
|
|
4558
4180
|
cacheWritesPrice: 6.25,
|
|
@@ -4564,8 +4186,6 @@ var vertexModels = {
|
|
|
4564
4186
|
contextWindow: 2e5,
|
|
4565
4187
|
supportsImages: true,
|
|
4566
4188
|
supportsPromptCache: true,
|
|
4567
|
-
supportsNativeTools: true,
|
|
4568
|
-
defaultToolProtocol: "native",
|
|
4569
4189
|
inputPrice: 15,
|
|
4570
4190
|
outputPrice: 75,
|
|
4571
4191
|
cacheWritesPrice: 18.75,
|
|
@@ -4577,8 +4197,6 @@ var vertexModels = {
|
|
|
4577
4197
|
contextWindow: 2e5,
|
|
4578
4198
|
supportsImages: true,
|
|
4579
4199
|
supportsPromptCache: true,
|
|
4580
|
-
supportsNativeTools: true,
|
|
4581
|
-
defaultToolProtocol: "native",
|
|
4582
4200
|
inputPrice: 15,
|
|
4583
4201
|
outputPrice: 75,
|
|
4584
4202
|
cacheWritesPrice: 18.75,
|
|
@@ -4589,8 +4207,6 @@ var vertexModels = {
|
|
|
4589
4207
|
contextWindow: 2e5,
|
|
4590
4208
|
supportsImages: true,
|
|
4591
4209
|
supportsPromptCache: true,
|
|
4592
|
-
supportsNativeTools: true,
|
|
4593
|
-
defaultToolProtocol: "native",
|
|
4594
4210
|
inputPrice: 3,
|
|
4595
4211
|
outputPrice: 15,
|
|
4596
4212
|
cacheWritesPrice: 3.75,
|
|
@@ -4603,8 +4219,6 @@ var vertexModels = {
|
|
|
4603
4219
|
contextWindow: 2e5,
|
|
4604
4220
|
supportsImages: true,
|
|
4605
4221
|
supportsPromptCache: true,
|
|
4606
|
-
supportsNativeTools: true,
|
|
4607
|
-
defaultToolProtocol: "native",
|
|
4608
4222
|
inputPrice: 3,
|
|
4609
4223
|
outputPrice: 15,
|
|
4610
4224
|
cacheWritesPrice: 3.75,
|
|
@@ -4615,8 +4229,6 @@ var vertexModels = {
|
|
|
4615
4229
|
contextWindow: 2e5,
|
|
4616
4230
|
supportsImages: true,
|
|
4617
4231
|
supportsPromptCache: true,
|
|
4618
|
-
supportsNativeTools: true,
|
|
4619
|
-
defaultToolProtocol: "native",
|
|
4620
4232
|
inputPrice: 3,
|
|
4621
4233
|
outputPrice: 15,
|
|
4622
4234
|
cacheWritesPrice: 3.75,
|
|
@@ -4627,8 +4239,6 @@ var vertexModels = {
|
|
|
4627
4239
|
contextWindow: 2e5,
|
|
4628
4240
|
supportsImages: true,
|
|
4629
4241
|
supportsPromptCache: true,
|
|
4630
|
-
supportsNativeTools: true,
|
|
4631
|
-
defaultToolProtocol: "native",
|
|
4632
4242
|
inputPrice: 3,
|
|
4633
4243
|
outputPrice: 15,
|
|
4634
4244
|
cacheWritesPrice: 3.75,
|
|
@@ -4639,8 +4249,6 @@ var vertexModels = {
|
|
|
4639
4249
|
contextWindow: 2e5,
|
|
4640
4250
|
supportsImages: false,
|
|
4641
4251
|
supportsPromptCache: true,
|
|
4642
|
-
supportsNativeTools: true,
|
|
4643
|
-
defaultToolProtocol: "native",
|
|
4644
4252
|
inputPrice: 1,
|
|
4645
4253
|
outputPrice: 5,
|
|
4646
4254
|
cacheWritesPrice: 1.25,
|
|
@@ -4651,8 +4259,6 @@ var vertexModels = {
|
|
|
4651
4259
|
contextWindow: 2e5,
|
|
4652
4260
|
supportsImages: true,
|
|
4653
4261
|
supportsPromptCache: true,
|
|
4654
|
-
supportsNativeTools: true,
|
|
4655
|
-
defaultToolProtocol: "native",
|
|
4656
4262
|
inputPrice: 15,
|
|
4657
4263
|
outputPrice: 75,
|
|
4658
4264
|
cacheWritesPrice: 18.75,
|
|
@@ -4663,8 +4269,6 @@ var vertexModels = {
|
|
|
4663
4269
|
contextWindow: 2e5,
|
|
4664
4270
|
supportsImages: true,
|
|
4665
4271
|
supportsPromptCache: true,
|
|
4666
|
-
supportsNativeTools: true,
|
|
4667
|
-
defaultToolProtocol: "native",
|
|
4668
4272
|
inputPrice: 0.25,
|
|
4669
4273
|
outputPrice: 1.25,
|
|
4670
4274
|
cacheWritesPrice: 0.3,
|
|
@@ -4674,8 +4278,6 @@ var vertexModels = {
|
|
|
4674
4278
|
maxTokens: 64e3,
|
|
4675
4279
|
contextWindow: 1048576,
|
|
4676
4280
|
supportsImages: true,
|
|
4677
|
-
supportsNativeTools: true,
|
|
4678
|
-
defaultToolProtocol: "native",
|
|
4679
4281
|
supportsPromptCache: true,
|
|
4680
4282
|
inputPrice: 0.1,
|
|
4681
4283
|
outputPrice: 0.4,
|
|
@@ -4689,7 +4291,6 @@ var vertexModels = {
|
|
|
4689
4291
|
contextWindow: 131072,
|
|
4690
4292
|
supportsImages: false,
|
|
4691
4293
|
supportsPromptCache: false,
|
|
4692
|
-
supportsNativeTools: true,
|
|
4693
4294
|
inputPrice: 0.35,
|
|
4694
4295
|
outputPrice: 1.15,
|
|
4695
4296
|
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
@@ -4699,7 +4300,6 @@ var vertexModels = {
|
|
|
4699
4300
|
contextWindow: 163840,
|
|
4700
4301
|
supportsImages: false,
|
|
4701
4302
|
supportsPromptCache: false,
|
|
4702
|
-
supportsNativeTools: true,
|
|
4703
4303
|
inputPrice: 1.35,
|
|
4704
4304
|
outputPrice: 5.4,
|
|
4705
4305
|
description: "DeepSeek R1 (0528). Available in us-central1"
|
|
@@ -4709,7 +4309,6 @@ var vertexModels = {
|
|
|
4709
4309
|
contextWindow: 163840,
|
|
4710
4310
|
supportsImages: false,
|
|
4711
4311
|
supportsPromptCache: false,
|
|
4712
|
-
supportsNativeTools: true,
|
|
4713
4312
|
inputPrice: 0.6,
|
|
4714
4313
|
outputPrice: 1.7,
|
|
4715
4314
|
description: "DeepSeek V3.1. Available in us-west2"
|
|
@@ -4719,7 +4318,6 @@ var vertexModels = {
|
|
|
4719
4318
|
contextWindow: 131072,
|
|
4720
4319
|
supportsImages: false,
|
|
4721
4320
|
supportsPromptCache: false,
|
|
4722
|
-
supportsNativeTools: true,
|
|
4723
4321
|
inputPrice: 0.15,
|
|
4724
4322
|
outputPrice: 0.6,
|
|
4725
4323
|
description: "OpenAI gpt-oss 120B. Available in us-central1"
|
|
@@ -4729,7 +4327,6 @@ var vertexModels = {
|
|
|
4729
4327
|
contextWindow: 131072,
|
|
4730
4328
|
supportsImages: false,
|
|
4731
4329
|
supportsPromptCache: false,
|
|
4732
|
-
supportsNativeTools: true,
|
|
4733
4330
|
inputPrice: 0.075,
|
|
4734
4331
|
outputPrice: 0.3,
|
|
4735
4332
|
description: "OpenAI gpt-oss 20B. Available in us-central1"
|
|
@@ -4739,7 +4336,6 @@ var vertexModels = {
|
|
|
4739
4336
|
contextWindow: 262144,
|
|
4740
4337
|
supportsImages: false,
|
|
4741
4338
|
supportsPromptCache: false,
|
|
4742
|
-
supportsNativeTools: true,
|
|
4743
4339
|
inputPrice: 1,
|
|
4744
4340
|
outputPrice: 4,
|
|
4745
4341
|
description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
|
|
@@ -4749,10 +4345,18 @@ var vertexModels = {
|
|
|
4749
4345
|
contextWindow: 262144,
|
|
4750
4346
|
supportsImages: false,
|
|
4751
4347
|
supportsPromptCache: false,
|
|
4752
|
-
supportsNativeTools: true,
|
|
4753
4348
|
inputPrice: 0.25,
|
|
4754
4349
|
outputPrice: 1,
|
|
4755
4350
|
description: "Qwen3 235B A22B Instruct. Available in us-south1"
|
|
4351
|
+
},
|
|
4352
|
+
"moonshotai/kimi-k2-thinking-maas": {
|
|
4353
|
+
maxTokens: 16384,
|
|
4354
|
+
contextWindow: 262144,
|
|
4355
|
+
supportsPromptCache: false,
|
|
4356
|
+
supportsImages: false,
|
|
4357
|
+
inputPrice: 0.6,
|
|
4358
|
+
outputPrice: 2.5,
|
|
4359
|
+
description: "Kimi K2 Thinking Model with 256K context window."
|
|
4756
4360
|
}
|
|
4757
4361
|
};
|
|
4758
4362
|
var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
|
|
@@ -4985,8 +4589,6 @@ var xaiModels = {
|
|
|
4985
4589
|
contextWindow: 256e3,
|
|
4986
4590
|
supportsImages: true,
|
|
4987
4591
|
supportsPromptCache: true,
|
|
4988
|
-
supportsNativeTools: true,
|
|
4989
|
-
defaultToolProtocol: "native",
|
|
4990
4592
|
inputPrice: 0.2,
|
|
4991
4593
|
outputPrice: 1.5,
|
|
4992
4594
|
cacheWritesPrice: 0.02,
|
|
@@ -5000,8 +4602,6 @@ var xaiModels = {
|
|
|
5000
4602
|
contextWindow: 2e6,
|
|
5001
4603
|
supportsImages: true,
|
|
5002
4604
|
supportsPromptCache: true,
|
|
5003
|
-
supportsNativeTools: true,
|
|
5004
|
-
defaultToolProtocol: "native",
|
|
5005
4605
|
inputPrice: 0.2,
|
|
5006
4606
|
outputPrice: 0.5,
|
|
5007
4607
|
cacheWritesPrice: 0.05,
|
|
@@ -5015,8 +4615,6 @@ var xaiModels = {
|
|
|
5015
4615
|
contextWindow: 2e6,
|
|
5016
4616
|
supportsImages: true,
|
|
5017
4617
|
supportsPromptCache: true,
|
|
5018
|
-
supportsNativeTools: true,
|
|
5019
|
-
defaultToolProtocol: "native",
|
|
5020
4618
|
inputPrice: 0.2,
|
|
5021
4619
|
outputPrice: 0.5,
|
|
5022
4620
|
cacheWritesPrice: 0.05,
|
|
@@ -5030,8 +4628,6 @@ var xaiModels = {
|
|
|
5030
4628
|
contextWindow: 2e6,
|
|
5031
4629
|
supportsImages: true,
|
|
5032
4630
|
supportsPromptCache: true,
|
|
5033
|
-
supportsNativeTools: true,
|
|
5034
|
-
defaultToolProtocol: "native",
|
|
5035
4631
|
inputPrice: 0.2,
|
|
5036
4632
|
outputPrice: 0.5,
|
|
5037
4633
|
cacheWritesPrice: 0.05,
|
|
@@ -5045,8 +4641,6 @@ var xaiModels = {
|
|
|
5045
4641
|
contextWindow: 2e6,
|
|
5046
4642
|
supportsImages: true,
|
|
5047
4643
|
supportsPromptCache: true,
|
|
5048
|
-
supportsNativeTools: true,
|
|
5049
|
-
defaultToolProtocol: "native",
|
|
5050
4644
|
inputPrice: 0.2,
|
|
5051
4645
|
outputPrice: 0.5,
|
|
5052
4646
|
cacheWritesPrice: 0.05,
|
|
@@ -5060,8 +4654,6 @@ var xaiModels = {
|
|
|
5060
4654
|
contextWindow: 256e3,
|
|
5061
4655
|
supportsImages: true,
|
|
5062
4656
|
supportsPromptCache: true,
|
|
5063
|
-
supportsNativeTools: true,
|
|
5064
|
-
defaultToolProtocol: "native",
|
|
5065
4657
|
inputPrice: 3,
|
|
5066
4658
|
outputPrice: 15,
|
|
5067
4659
|
cacheWritesPrice: 0.75,
|
|
@@ -5075,8 +4667,6 @@ var xaiModels = {
|
|
|
5075
4667
|
contextWindow: 131072,
|
|
5076
4668
|
supportsImages: true,
|
|
5077
4669
|
supportsPromptCache: true,
|
|
5078
|
-
supportsNativeTools: true,
|
|
5079
|
-
defaultToolProtocol: "native",
|
|
5080
4670
|
inputPrice: 0.3,
|
|
5081
4671
|
outputPrice: 0.5,
|
|
5082
4672
|
cacheWritesPrice: 0.07,
|
|
@@ -5092,8 +4682,6 @@ var xaiModels = {
|
|
|
5092
4682
|
contextWindow: 131072,
|
|
5093
4683
|
supportsImages: true,
|
|
5094
4684
|
supportsPromptCache: true,
|
|
5095
|
-
supportsNativeTools: true,
|
|
5096
|
-
defaultToolProtocol: "native",
|
|
5097
4685
|
inputPrice: 3,
|
|
5098
4686
|
outputPrice: 15,
|
|
5099
4687
|
cacheWritesPrice: 0.75,
|
|
@@ -5190,7 +4778,6 @@ var vercelAiGatewayDefaultModelInfo = {
|
|
|
5190
4778
|
contextWindow: 2e5,
|
|
5191
4779
|
supportsImages: true,
|
|
5192
4780
|
supportsPromptCache: true,
|
|
5193
|
-
supportsNativeTools: true,
|
|
5194
4781
|
inputPrice: 3,
|
|
5195
4782
|
outputPrice: 15,
|
|
5196
4783
|
cacheWritesPrice: 3.75,
|
|
@@ -5207,8 +4794,6 @@ var internationalZAiModels = {
|
|
|
5207
4794
|
contextWindow: 131072,
|
|
5208
4795
|
supportsImages: false,
|
|
5209
4796
|
supportsPromptCache: true,
|
|
5210
|
-
supportsNativeTools: true,
|
|
5211
|
-
defaultToolProtocol: "native",
|
|
5212
4797
|
inputPrice: 0.6,
|
|
5213
4798
|
outputPrice: 2.2,
|
|
5214
4799
|
cacheWritesPrice: 0,
|
|
@@ -5220,8 +4805,6 @@ var internationalZAiModels = {
|
|
|
5220
4805
|
contextWindow: 131072,
|
|
5221
4806
|
supportsImages: false,
|
|
5222
4807
|
supportsPromptCache: true,
|
|
5223
|
-
supportsNativeTools: true,
|
|
5224
|
-
defaultToolProtocol: "native",
|
|
5225
4808
|
inputPrice: 0.2,
|
|
5226
4809
|
outputPrice: 1.1,
|
|
5227
4810
|
cacheWritesPrice: 0,
|
|
@@ -5233,8 +4816,6 @@ var internationalZAiModels = {
|
|
|
5233
4816
|
contextWindow: 131072,
|
|
5234
4817
|
supportsImages: false,
|
|
5235
4818
|
supportsPromptCache: true,
|
|
5236
|
-
supportsNativeTools: true,
|
|
5237
|
-
defaultToolProtocol: "native",
|
|
5238
4819
|
inputPrice: 2.2,
|
|
5239
4820
|
outputPrice: 8.9,
|
|
5240
4821
|
cacheWritesPrice: 0,
|
|
@@ -5246,8 +4827,6 @@ var internationalZAiModels = {
|
|
|
5246
4827
|
contextWindow: 131072,
|
|
5247
4828
|
supportsImages: false,
|
|
5248
4829
|
supportsPromptCache: true,
|
|
5249
|
-
supportsNativeTools: true,
|
|
5250
|
-
defaultToolProtocol: "native",
|
|
5251
4830
|
inputPrice: 1.1,
|
|
5252
4831
|
outputPrice: 4.5,
|
|
5253
4832
|
cacheWritesPrice: 0,
|
|
@@ -5259,8 +4838,6 @@ var internationalZAiModels = {
|
|
|
5259
4838
|
contextWindow: 131072,
|
|
5260
4839
|
supportsImages: false,
|
|
5261
4840
|
supportsPromptCache: true,
|
|
5262
|
-
supportsNativeTools: true,
|
|
5263
|
-
defaultToolProtocol: "native",
|
|
5264
4841
|
inputPrice: 0,
|
|
5265
4842
|
outputPrice: 0,
|
|
5266
4843
|
cacheWritesPrice: 0,
|
|
@@ -5272,21 +4849,28 @@ var internationalZAiModels = {
|
|
|
5272
4849
|
contextWindow: 131072,
|
|
5273
4850
|
supportsImages: true,
|
|
5274
4851
|
supportsPromptCache: true,
|
|
5275
|
-
supportsNativeTools: true,
|
|
5276
|
-
defaultToolProtocol: "native",
|
|
5277
4852
|
inputPrice: 0.6,
|
|
5278
4853
|
outputPrice: 1.8,
|
|
5279
4854
|
cacheWritesPrice: 0,
|
|
5280
4855
|
cacheReadsPrice: 0.11,
|
|
5281
4856
|
description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
|
|
5282
4857
|
},
|
|
4858
|
+
"glm-4.6v": {
|
|
4859
|
+
maxTokens: 16384,
|
|
4860
|
+
contextWindow: 131072,
|
|
4861
|
+
supportsImages: true,
|
|
4862
|
+
supportsPromptCache: true,
|
|
4863
|
+
inputPrice: 0.3,
|
|
4864
|
+
outputPrice: 0.9,
|
|
4865
|
+
cacheWritesPrice: 0,
|
|
4866
|
+
cacheReadsPrice: 0.05,
|
|
4867
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
4868
|
+
},
|
|
5283
4869
|
"glm-4.6": {
|
|
5284
4870
|
maxTokens: 16384,
|
|
5285
4871
|
contextWindow: 2e5,
|
|
5286
4872
|
supportsImages: false,
|
|
5287
4873
|
supportsPromptCache: true,
|
|
5288
|
-
supportsNativeTools: true,
|
|
5289
|
-
defaultToolProtocol: "native",
|
|
5290
4874
|
inputPrice: 0.6,
|
|
5291
4875
|
outputPrice: 2.2,
|
|
5292
4876
|
cacheWritesPrice: 0,
|
|
@@ -5298,8 +4882,6 @@ var internationalZAiModels = {
|
|
|
5298
4882
|
contextWindow: 2e5,
|
|
5299
4883
|
supportsImages: false,
|
|
5300
4884
|
supportsPromptCache: true,
|
|
5301
|
-
supportsNativeTools: true,
|
|
5302
|
-
defaultToolProtocol: "native",
|
|
5303
4885
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5304
4886
|
reasoningEffort: "medium",
|
|
5305
4887
|
preserveReasoning: true,
|
|
@@ -5309,13 +4891,55 @@ var internationalZAiModels = {
|
|
|
5309
4891
|
cacheReadsPrice: 0.11,
|
|
5310
4892
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5311
4893
|
},
|
|
4894
|
+
"glm-4.7-flash": {
|
|
4895
|
+
maxTokens: 16384,
|
|
4896
|
+
contextWindow: 2e5,
|
|
4897
|
+
supportsImages: false,
|
|
4898
|
+
supportsPromptCache: true,
|
|
4899
|
+
inputPrice: 0,
|
|
4900
|
+
outputPrice: 0,
|
|
4901
|
+
cacheWritesPrice: 0,
|
|
4902
|
+
cacheReadsPrice: 0,
|
|
4903
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
4904
|
+
},
|
|
4905
|
+
"glm-4.7-flashx": {
|
|
4906
|
+
maxTokens: 16384,
|
|
4907
|
+
contextWindow: 2e5,
|
|
4908
|
+
supportsImages: false,
|
|
4909
|
+
supportsPromptCache: true,
|
|
4910
|
+
inputPrice: 0.07,
|
|
4911
|
+
outputPrice: 0.4,
|
|
4912
|
+
cacheWritesPrice: 0,
|
|
4913
|
+
cacheReadsPrice: 0.01,
|
|
4914
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
4915
|
+
},
|
|
4916
|
+
"glm-4.6v-flash": {
|
|
4917
|
+
maxTokens: 16384,
|
|
4918
|
+
contextWindow: 131072,
|
|
4919
|
+
supportsImages: true,
|
|
4920
|
+
supportsPromptCache: true,
|
|
4921
|
+
inputPrice: 0,
|
|
4922
|
+
outputPrice: 0,
|
|
4923
|
+
cacheWritesPrice: 0,
|
|
4924
|
+
cacheReadsPrice: 0,
|
|
4925
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
4926
|
+
},
|
|
4927
|
+
"glm-4.6v-flashx": {
|
|
4928
|
+
maxTokens: 16384,
|
|
4929
|
+
contextWindow: 131072,
|
|
4930
|
+
supportsImages: true,
|
|
4931
|
+
supportsPromptCache: true,
|
|
4932
|
+
inputPrice: 0.04,
|
|
4933
|
+
outputPrice: 0.4,
|
|
4934
|
+
cacheWritesPrice: 0,
|
|
4935
|
+
cacheReadsPrice: 4e-3,
|
|
4936
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
4937
|
+
},
|
|
5312
4938
|
"glm-4-32b-0414-128k": {
|
|
5313
4939
|
maxTokens: 16384,
|
|
5314
4940
|
contextWindow: 131072,
|
|
5315
4941
|
supportsImages: false,
|
|
5316
4942
|
supportsPromptCache: false,
|
|
5317
|
-
supportsNativeTools: true,
|
|
5318
|
-
defaultToolProtocol: "native",
|
|
5319
4943
|
inputPrice: 0.1,
|
|
5320
4944
|
outputPrice: 0.1,
|
|
5321
4945
|
cacheWritesPrice: 0,
|
|
@@ -5330,8 +4954,6 @@ var mainlandZAiModels = {
|
|
|
5330
4954
|
contextWindow: 131072,
|
|
5331
4955
|
supportsImages: false,
|
|
5332
4956
|
supportsPromptCache: true,
|
|
5333
|
-
supportsNativeTools: true,
|
|
5334
|
-
defaultToolProtocol: "native",
|
|
5335
4957
|
inputPrice: 0.29,
|
|
5336
4958
|
outputPrice: 1.14,
|
|
5337
4959
|
cacheWritesPrice: 0,
|
|
@@ -5343,8 +4965,6 @@ var mainlandZAiModels = {
|
|
|
5343
4965
|
contextWindow: 131072,
|
|
5344
4966
|
supportsImages: false,
|
|
5345
4967
|
supportsPromptCache: true,
|
|
5346
|
-
supportsNativeTools: true,
|
|
5347
|
-
defaultToolProtocol: "native",
|
|
5348
4968
|
inputPrice: 0.1,
|
|
5349
4969
|
outputPrice: 0.6,
|
|
5350
4970
|
cacheWritesPrice: 0,
|
|
@@ -5356,8 +4976,6 @@ var mainlandZAiModels = {
|
|
|
5356
4976
|
contextWindow: 131072,
|
|
5357
4977
|
supportsImages: false,
|
|
5358
4978
|
supportsPromptCache: true,
|
|
5359
|
-
supportsNativeTools: true,
|
|
5360
|
-
defaultToolProtocol: "native",
|
|
5361
4979
|
inputPrice: 0.29,
|
|
5362
4980
|
outputPrice: 1.14,
|
|
5363
4981
|
cacheWritesPrice: 0,
|
|
@@ -5369,8 +4987,6 @@ var mainlandZAiModels = {
|
|
|
5369
4987
|
contextWindow: 131072,
|
|
5370
4988
|
supportsImages: false,
|
|
5371
4989
|
supportsPromptCache: true,
|
|
5372
|
-
supportsNativeTools: true,
|
|
5373
|
-
defaultToolProtocol: "native",
|
|
5374
4990
|
inputPrice: 0.1,
|
|
5375
4991
|
outputPrice: 0.6,
|
|
5376
4992
|
cacheWritesPrice: 0,
|
|
@@ -5382,8 +4998,6 @@ var mainlandZAiModels = {
|
|
|
5382
4998
|
contextWindow: 131072,
|
|
5383
4999
|
supportsImages: false,
|
|
5384
5000
|
supportsPromptCache: true,
|
|
5385
|
-
supportsNativeTools: true,
|
|
5386
|
-
defaultToolProtocol: "native",
|
|
5387
5001
|
inputPrice: 0,
|
|
5388
5002
|
outputPrice: 0,
|
|
5389
5003
|
cacheWritesPrice: 0,
|
|
@@ -5395,8 +5009,6 @@ var mainlandZAiModels = {
|
|
|
5395
5009
|
contextWindow: 131072,
|
|
5396
5010
|
supportsImages: true,
|
|
5397
5011
|
supportsPromptCache: true,
|
|
5398
|
-
supportsNativeTools: true,
|
|
5399
|
-
defaultToolProtocol: "native",
|
|
5400
5012
|
inputPrice: 0.29,
|
|
5401
5013
|
outputPrice: 0.93,
|
|
5402
5014
|
cacheWritesPrice: 0,
|
|
@@ -5408,8 +5020,6 @@ var mainlandZAiModels = {
|
|
|
5408
5020
|
contextWindow: 204800,
|
|
5409
5021
|
supportsImages: false,
|
|
5410
5022
|
supportsPromptCache: true,
|
|
5411
|
-
supportsNativeTools: true,
|
|
5412
|
-
defaultToolProtocol: "native",
|
|
5413
5023
|
inputPrice: 0.29,
|
|
5414
5024
|
outputPrice: 1.14,
|
|
5415
5025
|
cacheWritesPrice: 0,
|
|
@@ -5421,8 +5031,6 @@ var mainlandZAiModels = {
|
|
|
5421
5031
|
contextWindow: 204800,
|
|
5422
5032
|
supportsImages: false,
|
|
5423
5033
|
supportsPromptCache: true,
|
|
5424
|
-
supportsNativeTools: true,
|
|
5425
|
-
defaultToolProtocol: "native",
|
|
5426
5034
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5427
5035
|
reasoningEffort: "medium",
|
|
5428
5036
|
preserveReasoning: true,
|
|
@@ -5431,6 +5039,61 @@ var mainlandZAiModels = {
|
|
|
5431
5039
|
cacheWritesPrice: 0,
|
|
5432
5040
|
cacheReadsPrice: 0.057,
|
|
5433
5041
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5042
|
+
},
|
|
5043
|
+
"glm-4.7-flash": {
|
|
5044
|
+
maxTokens: 16384,
|
|
5045
|
+
contextWindow: 204800,
|
|
5046
|
+
supportsImages: false,
|
|
5047
|
+
supportsPromptCache: true,
|
|
5048
|
+
inputPrice: 0,
|
|
5049
|
+
outputPrice: 0,
|
|
5050
|
+
cacheWritesPrice: 0,
|
|
5051
|
+
cacheReadsPrice: 0,
|
|
5052
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
5053
|
+
},
|
|
5054
|
+
"glm-4.7-flashx": {
|
|
5055
|
+
maxTokens: 16384,
|
|
5056
|
+
contextWindow: 204800,
|
|
5057
|
+
supportsImages: false,
|
|
5058
|
+
supportsPromptCache: true,
|
|
5059
|
+
inputPrice: 0.035,
|
|
5060
|
+
outputPrice: 0.2,
|
|
5061
|
+
cacheWritesPrice: 0,
|
|
5062
|
+
cacheReadsPrice: 5e-3,
|
|
5063
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
5064
|
+
},
|
|
5065
|
+
"glm-4.6v": {
|
|
5066
|
+
maxTokens: 16384,
|
|
5067
|
+
contextWindow: 131072,
|
|
5068
|
+
supportsImages: true,
|
|
5069
|
+
supportsPromptCache: true,
|
|
5070
|
+
inputPrice: 0.15,
|
|
5071
|
+
outputPrice: 0.45,
|
|
5072
|
+
cacheWritesPrice: 0,
|
|
5073
|
+
cacheReadsPrice: 0.025,
|
|
5074
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
5075
|
+
},
|
|
5076
|
+
"glm-4.6v-flash": {
|
|
5077
|
+
maxTokens: 16384,
|
|
5078
|
+
contextWindow: 131072,
|
|
5079
|
+
supportsImages: true,
|
|
5080
|
+
supportsPromptCache: true,
|
|
5081
|
+
inputPrice: 0,
|
|
5082
|
+
outputPrice: 0,
|
|
5083
|
+
cacheWritesPrice: 0,
|
|
5084
|
+
cacheReadsPrice: 0,
|
|
5085
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
5086
|
+
},
|
|
5087
|
+
"glm-4.6v-flashx": {
|
|
5088
|
+
maxTokens: 16384,
|
|
5089
|
+
contextWindow: 131072,
|
|
5090
|
+
supportsImages: true,
|
|
5091
|
+
supportsPromptCache: true,
|
|
5092
|
+
inputPrice: 0.02,
|
|
5093
|
+
outputPrice: 0.2,
|
|
5094
|
+
cacheWritesPrice: 0,
|
|
5095
|
+
cacheReadsPrice: 2e-3,
|
|
5096
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
5434
5097
|
}
|
|
5435
5098
|
};
|
|
5436
5099
|
var ZAI_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -5464,7 +5127,6 @@ var deepInfraDefaultModelInfo = {
|
|
|
5464
5127
|
contextWindow: 262144,
|
|
5465
5128
|
supportsImages: false,
|
|
5466
5129
|
supportsPromptCache: false,
|
|
5467
|
-
supportsNativeTools: true,
|
|
5468
5130
|
inputPrice: 0.3,
|
|
5469
5131
|
outputPrice: 1.2,
|
|
5470
5132
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -5478,8 +5140,6 @@ var minimaxModels = {
|
|
|
5478
5140
|
contextWindow: 192e3,
|
|
5479
5141
|
supportsImages: false,
|
|
5480
5142
|
supportsPromptCache: true,
|
|
5481
|
-
supportsNativeTools: true,
|
|
5482
|
-
defaultToolProtocol: "native",
|
|
5483
5143
|
includedTools: ["search_and_replace"],
|
|
5484
5144
|
excludedTools: ["apply_diff"],
|
|
5485
5145
|
preserveReasoning: true,
|
|
@@ -5494,8 +5154,6 @@ var minimaxModels = {
|
|
|
5494
5154
|
contextWindow: 192e3,
|
|
5495
5155
|
supportsImages: false,
|
|
5496
5156
|
supportsPromptCache: true,
|
|
5497
|
-
supportsNativeTools: true,
|
|
5498
|
-
defaultToolProtocol: "native",
|
|
5499
5157
|
includedTools: ["search_and_replace"],
|
|
5500
5158
|
excludedTools: ["apply_diff"],
|
|
5501
5159
|
preserveReasoning: true,
|
|
@@ -5510,8 +5168,6 @@ var minimaxModels = {
|
|
|
5510
5168
|
contextWindow: 192e3,
|
|
5511
5169
|
supportsImages: false,
|
|
5512
5170
|
supportsPromptCache: true,
|
|
5513
|
-
supportsNativeTools: true,
|
|
5514
|
-
defaultToolProtocol: "native",
|
|
5515
5171
|
includedTools: ["search_and_replace"],
|
|
5516
5172
|
excludedTools: ["apply_diff"],
|
|
5517
5173
|
preserveReasoning: true,
|
|
@@ -5583,8 +5239,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5583
5239
|
return deepInfraDefaultModelId;
|
|
5584
5240
|
case "vscode-lm":
|
|
5585
5241
|
return vscodeLlmDefaultModelId;
|
|
5586
|
-
case "claude-code":
|
|
5587
|
-
return claudeCodeDefaultModelId;
|
|
5588
5242
|
case "cerebras":
|
|
5589
5243
|
return cerebrasDefaultModelId;
|
|
5590
5244
|
case "sambanova":
|
|
@@ -5642,7 +5296,6 @@ var providerNames = [
|
|
|
5642
5296
|
"bedrock",
|
|
5643
5297
|
"baseten",
|
|
5644
5298
|
"cerebras",
|
|
5645
|
-
"claude-code",
|
|
5646
5299
|
"doubao",
|
|
5647
5300
|
"deepseek",
|
|
5648
5301
|
"featherless",
|
|
@@ -5672,9 +5325,7 @@ var providerSettingsEntrySchema = import_zod8.z.object({
|
|
|
5672
5325
|
});
|
|
5673
5326
|
var baseProviderSettingsSchema = import_zod8.z.object({
|
|
5674
5327
|
includeMaxTokens: import_zod8.z.boolean().optional(),
|
|
5675
|
-
diffEnabled: import_zod8.z.boolean().optional(),
|
|
5676
5328
|
todoListEnabled: import_zod8.z.boolean().optional(),
|
|
5677
|
-
fuzzyMatchThreshold: import_zod8.z.number().optional(),
|
|
5678
5329
|
modelTemperature: import_zod8.z.number().nullish(),
|
|
5679
5330
|
rateLimitSeconds: import_zod8.z.number().optional(),
|
|
5680
5331
|
consecutiveMistakeLimit: import_zod8.z.number().min(0).optional(),
|
|
@@ -5684,9 +5335,7 @@ var baseProviderSettingsSchema = import_zod8.z.object({
|
|
|
5684
5335
|
modelMaxTokens: import_zod8.z.number().optional(),
|
|
5685
5336
|
modelMaxThinkingTokens: import_zod8.z.number().optional(),
|
|
5686
5337
|
// Model verbosity.
|
|
5687
|
-
verbosity: verbosityLevelsSchema.optional()
|
|
5688
|
-
// Tool protocol override for this profile.
|
|
5689
|
-
toolProtocol: import_zod8.z.enum(["xml", "native"]).optional()
|
|
5338
|
+
verbosity: verbosityLevelsSchema.optional()
|
|
5690
5339
|
});
|
|
5691
5340
|
var apiModelIdProviderModelSchema = baseProviderSettingsSchema.extend({
|
|
5692
5341
|
apiModelId: import_zod8.z.string().optional()
|
|
@@ -5698,7 +5347,6 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5698
5347
|
anthropicBeta1MContext: import_zod8.z.boolean().optional()
|
|
5699
5348
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5700
5349
|
});
|
|
5701
|
-
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({});
|
|
5702
5350
|
var openRouterSchema = baseProviderSettingsSchema.extend({
|
|
5703
5351
|
openRouterApiKey: import_zod8.z.string().optional(),
|
|
5704
5352
|
openRouterModelId: import_zod8.z.string().optional(),
|
|
@@ -5887,7 +5535,6 @@ var defaultSchema = import_zod8.z.object({
|
|
|
5887
5535
|
});
|
|
5888
5536
|
var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiProvider", [
|
|
5889
5537
|
anthropicSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("anthropic") })),
|
|
5890
|
-
claudeCodeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("claude-code") })),
|
|
5891
5538
|
openRouterSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openrouter") })),
|
|
5892
5539
|
bedrockSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("bedrock") })),
|
|
5893
5540
|
vertexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("vertex") })),
|
|
@@ -5928,7 +5575,6 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
5928
5575
|
var providerSettingsSchema = import_zod8.z.object({
|
|
5929
5576
|
apiProvider: providerNamesSchema.optional(),
|
|
5930
5577
|
...anthropicSchema.shape,
|
|
5931
|
-
...claudeCodeSchema.shape,
|
|
5932
5578
|
...openRouterSchema.shape,
|
|
5933
5579
|
...bedrockSchema.shape,
|
|
5934
5580
|
...vertexSchema.shape,
|
|
@@ -5993,7 +5639,6 @@ var getModelId = (settings) => {
|
|
|
5993
5639
|
var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key);
|
|
5994
5640
|
var modelIdKeysByProvider = {
|
|
5995
5641
|
anthropic: "apiModelId",
|
|
5996
|
-
"claude-code": "apiModelId",
|
|
5997
5642
|
openrouter: "openRouterModelId",
|
|
5998
5643
|
bedrock: "apiModelId",
|
|
5999
5644
|
vertex: "apiModelId",
|
|
@@ -6027,7 +5672,7 @@ var modelIdKeysByProvider = {
|
|
|
6027
5672
|
roo: "apiModelId",
|
|
6028
5673
|
"vercel-ai-gateway": "vercelAiGatewayModelId"
|
|
6029
5674
|
};
|
|
6030
|
-
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "
|
|
5675
|
+
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "bedrock", "minimax"];
|
|
6031
5676
|
var getApiProtocol = (provider, modelId) => {
|
|
6032
5677
|
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
|
|
6033
5678
|
return "anthropic";
|
|
@@ -6056,7 +5701,6 @@ var MODELS_BY_PROVIDER = {
|
|
|
6056
5701
|
label: "Cerebras",
|
|
6057
5702
|
models: Object.keys(cerebrasModels)
|
|
6058
5703
|
},
|
|
6059
|
-
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
|
|
6060
5704
|
deepseek: {
|
|
6061
5705
|
id: "deepseek",
|
|
6062
5706
|
label: "DeepSeek",
|
|
@@ -6160,16 +5804,6 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6160
5804
|
size: import_zod9.z.number().optional(),
|
|
6161
5805
|
workspace: import_zod9.z.string().optional(),
|
|
6162
5806
|
mode: import_zod9.z.string().optional(),
|
|
6163
|
-
/**
|
|
6164
|
-
* The tool protocol used by this task. Once a task uses tools with a specific
|
|
6165
|
-
* protocol (XML or Native), it is permanently locked to that protocol.
|
|
6166
|
-
*
|
|
6167
|
-
* - "xml": Tool calls are parsed from XML text (no tool IDs)
|
|
6168
|
-
* - "native": Tool calls come as tool_call chunks with IDs
|
|
6169
|
-
*
|
|
6170
|
-
* This ensures task resumption works correctly even when NTC settings change.
|
|
6171
|
-
*/
|
|
6172
|
-
toolProtocol: import_zod9.z.enum(["xml", "native"]).optional(),
|
|
6173
5807
|
apiConfigName: import_zod9.z.string().optional(),
|
|
6174
5808
|
// Provider profile name for sticky profile feature
|
|
6175
5809
|
status: import_zod9.z.enum(["active", "completed", "delegated"]).optional(),
|
|
@@ -6187,23 +5821,12 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6187
5821
|
|
|
6188
5822
|
// src/experiment.ts
|
|
6189
5823
|
var import_zod10 = require("zod");
|
|
6190
|
-
var experimentIds = [
|
|
6191
|
-
"powerSteering",
|
|
6192
|
-
"multiFileApplyDiff",
|
|
6193
|
-
"preventFocusDisruption",
|
|
6194
|
-
"imageGeneration",
|
|
6195
|
-
"runSlashCommand",
|
|
6196
|
-
"multipleNativeToolCalls",
|
|
6197
|
-
"customTools"
|
|
6198
|
-
];
|
|
5824
|
+
var experimentIds = ["preventFocusDisruption", "imageGeneration", "runSlashCommand", "customTools"];
|
|
6199
5825
|
var experimentIdsSchema = import_zod10.z.enum(experimentIds);
|
|
6200
5826
|
var experimentsSchema = import_zod10.z.object({
|
|
6201
|
-
powerSteering: import_zod10.z.boolean().optional(),
|
|
6202
|
-
multiFileApplyDiff: import_zod10.z.boolean().optional(),
|
|
6203
5827
|
preventFocusDisruption: import_zod10.z.boolean().optional(),
|
|
6204
5828
|
imageGeneration: import_zod10.z.boolean().optional(),
|
|
6205
5829
|
runSlashCommand: import_zod10.z.boolean().optional(),
|
|
6206
|
-
multipleNativeToolCalls: import_zod10.z.boolean().optional(),
|
|
6207
5830
|
customTools: import_zod10.z.boolean().optional()
|
|
6208
5831
|
});
|
|
6209
5832
|
|
|
@@ -6255,6 +5878,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
6255
5878
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
6256
5879
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
6257
5880
|
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
5881
|
+
TelemetryEventName2["READ_FILE_LEGACY_FORMAT_USED"] = "Read File Legacy Format Used";
|
|
6258
5882
|
return TelemetryEventName2;
|
|
6259
5883
|
})(TelemetryEventName || {});
|
|
6260
5884
|
var staticAppPropertiesSchema = import_zod11.z.object({
|
|
@@ -6343,7 +5967,8 @@ var rooCodeTelemetryEventSchema = import_zod11.z.discriminatedUnion("type", [
|
|
|
6343
5967
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
6344
5968
|
"Tab Shown" /* TAB_SHOWN */,
|
|
6345
5969
|
"Mode Setting Changed" /* MODE_SETTINGS_CHANGED */,
|
|
6346
|
-
"Custom Mode Created" /* CUSTOM_MODE_CREATED
|
|
5970
|
+
"Custom Mode Created" /* CUSTOM_MODE_CREATED */,
|
|
5971
|
+
"Read File Legacy Format Used" /* READ_FILE_LEGACY_FORMAT_USED */
|
|
6347
5972
|
]),
|
|
6348
5973
|
properties: telemetryPropertiesSchema
|
|
6349
5974
|
}),
|
|
@@ -6657,7 +6282,15 @@ var isLanguage = (value) => languages.includes(value);
|
|
|
6657
6282
|
|
|
6658
6283
|
// src/global-settings.ts
|
|
6659
6284
|
var DEFAULT_WRITE_DELAY_MS = 1e3;
|
|
6660
|
-
var
|
|
6285
|
+
var TERMINAL_PREVIEW_BYTES = {
|
|
6286
|
+
small: 5 * 1024,
|
|
6287
|
+
// 5KB
|
|
6288
|
+
medium: 10 * 1024,
|
|
6289
|
+
// 10KB
|
|
6290
|
+
large: 20 * 1024
|
|
6291
|
+
// 20KB
|
|
6292
|
+
};
|
|
6293
|
+
var DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE = "medium";
|
|
6661
6294
|
var MIN_CHECKPOINT_TIMEOUT_SECONDS = 10;
|
|
6662
6295
|
var MAX_CHECKPOINT_TIMEOUT_SECONDS = 60;
|
|
6663
6296
|
var DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15;
|
|
@@ -6673,7 +6306,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6673
6306
|
imageGenerationProvider: import_zod14.z.enum(["openrouter", "roo"]).optional(),
|
|
6674
6307
|
openRouterImageApiKey: import_zod14.z.string().optional(),
|
|
6675
6308
|
openRouterImageGenerationSelectedModel: import_zod14.z.string().optional(),
|
|
6676
|
-
condensingApiConfigId: import_zod14.z.string().optional(),
|
|
6677
6309
|
customCondensingPrompt: import_zod14.z.string().optional(),
|
|
6678
6310
|
autoApprovalEnabled: import_zod14.z.boolean().optional(),
|
|
6679
6311
|
alwaysAllowReadOnly: import_zod14.z.boolean().optional(),
|
|
@@ -6699,7 +6331,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6699
6331
|
allowedMaxCost: import_zod14.z.number().nullish(),
|
|
6700
6332
|
autoCondenseContext: import_zod14.z.boolean().optional(),
|
|
6701
6333
|
autoCondenseContextPercent: import_zod14.z.number().optional(),
|
|
6702
|
-
maxConcurrentFileReads: import_zod14.z.number().optional(),
|
|
6703
6334
|
/**
|
|
6704
6335
|
* Whether to include current time in the environment details
|
|
6705
6336
|
* @default true
|
|
@@ -6742,11 +6373,9 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6742
6373
|
maxWorkspaceFiles: import_zod14.z.number().optional(),
|
|
6743
6374
|
showRooIgnoredFiles: import_zod14.z.boolean().optional(),
|
|
6744
6375
|
enableSubfolderRules: import_zod14.z.boolean().optional(),
|
|
6745
|
-
maxReadFileLine: import_zod14.z.number().optional(),
|
|
6746
6376
|
maxImageFileSize: import_zod14.z.number().optional(),
|
|
6747
6377
|
maxTotalImageSize: import_zod14.z.number().optional(),
|
|
6748
|
-
|
|
6749
|
-
terminalOutputCharacterLimit: import_zod14.z.number().optional(),
|
|
6378
|
+
terminalOutputPreviewSize: import_zod14.z.enum(["small", "medium", "large"]).optional(),
|
|
6750
6379
|
terminalShellIntegrationTimeout: import_zod14.z.number().optional(),
|
|
6751
6380
|
terminalShellIntegrationDisabled: import_zod14.z.boolean().optional(),
|
|
6752
6381
|
terminalCommandDelay: import_zod14.z.number().optional(),
|
|
@@ -6755,18 +6384,14 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6755
6384
|
terminalZshOhMy: import_zod14.z.boolean().optional(),
|
|
6756
6385
|
terminalZshP10k: import_zod14.z.boolean().optional(),
|
|
6757
6386
|
terminalZdotdir: import_zod14.z.boolean().optional(),
|
|
6758
|
-
terminalCompressProgressBar: import_zod14.z.boolean().optional(),
|
|
6759
6387
|
diagnosticsEnabled: import_zod14.z.boolean().optional(),
|
|
6760
6388
|
rateLimitSeconds: import_zod14.z.number().optional(),
|
|
6761
|
-
diffEnabled: import_zod14.z.boolean().optional(),
|
|
6762
|
-
fuzzyMatchThreshold: import_zod14.z.number().optional(),
|
|
6763
6389
|
experiments: experimentsSchema.optional(),
|
|
6764
6390
|
codebaseIndexModels: codebaseIndexModelsSchema.optional(),
|
|
6765
6391
|
codebaseIndexConfig: codebaseIndexConfigSchema.optional(),
|
|
6766
6392
|
language: languagesSchema.optional(),
|
|
6767
6393
|
telemetrySetting: telemetrySettingsSchema.optional(),
|
|
6768
6394
|
mcpEnabled: import_zod14.z.boolean().optional(),
|
|
6769
|
-
enableMcpServerCreation: import_zod14.z.boolean().optional(),
|
|
6770
6395
|
mode: import_zod14.z.string().optional(),
|
|
6771
6396
|
modeApiConfigs: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.string()).optional(),
|
|
6772
6397
|
customModes: import_zod14.z.array(modeConfigSchema).optional(),
|
|
@@ -6786,7 +6411,20 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6786
6411
|
profileThresholds: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.number()).optional(),
|
|
6787
6412
|
hasOpenedModeSelector: import_zod14.z.boolean().optional(),
|
|
6788
6413
|
lastModeExportPath: import_zod14.z.string().optional(),
|
|
6789
|
-
lastModeImportPath: import_zod14.z.string().optional()
|
|
6414
|
+
lastModeImportPath: import_zod14.z.string().optional(),
|
|
6415
|
+
lastSettingsExportPath: import_zod14.z.string().optional(),
|
|
6416
|
+
lastTaskExportPath: import_zod14.z.string().optional(),
|
|
6417
|
+
lastImageSavePath: import_zod14.z.string().optional(),
|
|
6418
|
+
/**
|
|
6419
|
+
* Path to worktree to auto-open after switching workspaces.
|
|
6420
|
+
* Used by the worktree feature to open the Roo Code sidebar in a new window.
|
|
6421
|
+
*/
|
|
6422
|
+
worktreeAutoOpenPath: import_zod14.z.string().optional(),
|
|
6423
|
+
/**
|
|
6424
|
+
* Whether to show the worktree selector in the home screen.
|
|
6425
|
+
* @default true
|
|
6426
|
+
*/
|
|
6427
|
+
showWorktreesInHomeScreen: import_zod14.z.boolean().optional()
|
|
6790
6428
|
});
|
|
6791
6429
|
var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
|
|
6792
6430
|
var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
|
|
@@ -6870,8 +6508,6 @@ var EVALS_SETTINGS = {
|
|
|
6870
6508
|
ttsSpeed: 1,
|
|
6871
6509
|
soundEnabled: false,
|
|
6872
6510
|
soundVolume: 0.5,
|
|
6873
|
-
terminalOutputLineLimit: 500,
|
|
6874
|
-
terminalOutputCharacterLimit: DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT,
|
|
6875
6511
|
terminalShellIntegrationTimeout: 3e4,
|
|
6876
6512
|
terminalCommandDelay: 0,
|
|
6877
6513
|
terminalPowershellCounter: false,
|
|
@@ -6879,19 +6515,14 @@ var EVALS_SETTINGS = {
|
|
|
6879
6515
|
terminalZshClearEolMark: true,
|
|
6880
6516
|
terminalZshP10k: false,
|
|
6881
6517
|
terminalZdotdir: true,
|
|
6882
|
-
terminalCompressProgressBar: true,
|
|
6883
6518
|
terminalShellIntegrationDisabled: true,
|
|
6884
6519
|
diagnosticsEnabled: true,
|
|
6885
|
-
diffEnabled: true,
|
|
6886
|
-
fuzzyMatchThreshold: 1,
|
|
6887
6520
|
enableCheckpoints: false,
|
|
6888
6521
|
rateLimitSeconds: 0,
|
|
6889
6522
|
maxOpenTabsContext: 20,
|
|
6890
6523
|
maxWorkspaceFiles: 200,
|
|
6891
6524
|
maxGitStatusFiles: 20,
|
|
6892
6525
|
showRooIgnoredFiles: true,
|
|
6893
|
-
maxReadFileLine: -1,
|
|
6894
|
-
// -1 to enable full file reading.
|
|
6895
6526
|
includeDiagnosticMessages: true,
|
|
6896
6527
|
maxDiagnosticMessages: 50,
|
|
6897
6528
|
language: "en",
|
|
@@ -6965,24 +6596,18 @@ var organizationAllowListSchema = import_zod16.z.object({
|
|
|
6965
6596
|
});
|
|
6966
6597
|
var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
|
|
6967
6598
|
enableCheckpoints: true,
|
|
6968
|
-
fuzzyMatchThreshold: true,
|
|
6969
6599
|
maxOpenTabsContext: true,
|
|
6970
|
-
maxReadFileLine: true,
|
|
6971
6600
|
maxWorkspaceFiles: true,
|
|
6972
6601
|
showRooIgnoredFiles: true,
|
|
6973
6602
|
terminalCommandDelay: true,
|
|
6974
|
-
terminalCompressProgressBar: true,
|
|
6975
|
-
terminalOutputLineLimit: true,
|
|
6976
6603
|
terminalShellIntegrationDisabled: true,
|
|
6977
6604
|
terminalShellIntegrationTimeout: true,
|
|
6978
6605
|
terminalZshClearEolMark: true
|
|
6979
6606
|
}).merge(
|
|
6980
6607
|
import_zod16.z.object({
|
|
6981
6608
|
maxOpenTabsContext: import_zod16.z.number().int().nonnegative().optional(),
|
|
6982
|
-
maxReadFileLine: import_zod16.z.number().int().gte(-1).optional(),
|
|
6983
6609
|
maxWorkspaceFiles: import_zod16.z.number().int().nonnegative().optional(),
|
|
6984
6610
|
terminalCommandDelay: import_zod16.z.number().int().nonnegative().optional(),
|
|
6985
|
-
terminalOutputLineLimit: import_zod16.z.number().int().nonnegative().optional(),
|
|
6986
6611
|
terminalShellIntegrationTimeout: import_zod16.z.number().int().nonnegative().optional()
|
|
6987
6612
|
})
|
|
6988
6613
|
);
|
|
@@ -7498,6 +7123,7 @@ var ipcMessageSchema = import_zod18.z.discriminatedUnion("type", [
|
|
|
7498
7123
|
|
|
7499
7124
|
// src/mcp.ts
|
|
7500
7125
|
var import_zod19 = require("zod");
|
|
7126
|
+
var MAX_MCP_TOOLS_THRESHOLD = 60;
|
|
7501
7127
|
var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
7502
7128
|
import_zod19.z.object({
|
|
7503
7129
|
executionId: import_zod19.z.string(),
|
|
@@ -7521,6 +7147,46 @@ var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
7521
7147
|
error: import_zod19.z.string().optional()
|
|
7522
7148
|
})
|
|
7523
7149
|
]);
|
|
7150
|
+
function countEnabledMcpTools(servers) {
|
|
7151
|
+
let serverCount = 0;
|
|
7152
|
+
let toolCount = 0;
|
|
7153
|
+
for (const server of servers) {
|
|
7154
|
+
if (server.disabled) continue;
|
|
7155
|
+
if (server.status !== "connected") continue;
|
|
7156
|
+
serverCount++;
|
|
7157
|
+
if (server.tools) {
|
|
7158
|
+
for (const tool of server.tools) {
|
|
7159
|
+
if (tool.enabledForPrompt !== false) {
|
|
7160
|
+
toolCount++;
|
|
7161
|
+
}
|
|
7162
|
+
}
|
|
7163
|
+
}
|
|
7164
|
+
}
|
|
7165
|
+
return { enabledToolCount: toolCount, enabledServerCount: serverCount };
|
|
7166
|
+
}
|
|
7167
|
+
|
|
7168
|
+
// src/skills.ts
|
|
7169
|
+
var SKILL_NAME_MIN_LENGTH = 1;
|
|
7170
|
+
var SKILL_NAME_MAX_LENGTH = 64;
|
|
7171
|
+
var SKILL_NAME_REGEX = /^[a-z0-9]+(?:-[a-z0-9]+)*$/;
|
|
7172
|
+
var SkillNameValidationError = /* @__PURE__ */ ((SkillNameValidationError2) => {
|
|
7173
|
+
SkillNameValidationError2["Empty"] = "empty";
|
|
7174
|
+
SkillNameValidationError2["TooLong"] = "too_long";
|
|
7175
|
+
SkillNameValidationError2["InvalidFormat"] = "invalid_format";
|
|
7176
|
+
return SkillNameValidationError2;
|
|
7177
|
+
})(SkillNameValidationError || {});
|
|
7178
|
+
function validateSkillName(name) {
|
|
7179
|
+
if (!name || name.length < SKILL_NAME_MIN_LENGTH) {
|
|
7180
|
+
return { valid: false, error: "empty" /* Empty */ };
|
|
7181
|
+
}
|
|
7182
|
+
if (name.length > SKILL_NAME_MAX_LENGTH) {
|
|
7183
|
+
return { valid: false, error: "too_long" /* TooLong */ };
|
|
7184
|
+
}
|
|
7185
|
+
if (!SKILL_NAME_REGEX.test(name)) {
|
|
7186
|
+
return { valid: false, error: "invalid_format" /* InvalidFormat */ };
|
|
7187
|
+
}
|
|
7188
|
+
return { valid: true };
|
|
7189
|
+
}
|
|
7524
7190
|
|
|
7525
7191
|
// src/todo.ts
|
|
7526
7192
|
var import_zod20 = require("zod");
|
|
@@ -7560,6 +7226,11 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7560
7226
|
})
|
|
7561
7227
|
]);
|
|
7562
7228
|
|
|
7229
|
+
// src/tool-params.ts
|
|
7230
|
+
function isLegacyReadFileParams(params) {
|
|
7231
|
+
return "_legacyFormat" in params && params._legacyFormat === true;
|
|
7232
|
+
}
|
|
7233
|
+
|
|
7563
7234
|
// src/vscode-extension-host.ts
|
|
7564
7235
|
var import_zod22 = require("zod");
|
|
7565
7236
|
var checkoutDiffPayloadSchema = import_zod22.z.object({
|
|
@@ -7613,7 +7284,7 @@ var browserActions = [
|
|
|
7613
7284
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7614
7285
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
7615
7286
|
DEFAULT_MODES,
|
|
7616
|
-
|
|
7287
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
7617
7288
|
DEFAULT_WRITE_DELAY_MS,
|
|
7618
7289
|
DOUBAO_API_BASE_URL,
|
|
7619
7290
|
DOUBAO_API_CHAT_PATH,
|
|
@@ -7643,13 +7314,13 @@ var browserActions = [
|
|
|
7643
7314
|
IpcOrigin,
|
|
7644
7315
|
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
7645
7316
|
MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7317
|
+
MAX_MCP_TOOLS_THRESHOLD,
|
|
7646
7318
|
MINIMAX_DEFAULT_MAX_TOKENS,
|
|
7647
7319
|
MINIMAX_DEFAULT_TEMPERATURE,
|
|
7648
7320
|
MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7649
7321
|
MISTRAL_DEFAULT_TEMPERATURE,
|
|
7650
7322
|
MODELS_BY_PROVIDER,
|
|
7651
7323
|
MOONSHOT_DEFAULT_TEMPERATURE,
|
|
7652
|
-
NATIVE_TOOL_DEFAULTS,
|
|
7653
7324
|
OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
7654
7325
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
7655
7326
|
OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -7664,7 +7335,11 @@ var browserActions = [
|
|
|
7664
7335
|
RooModelsResponseSchema,
|
|
7665
7336
|
RooPricingSchema,
|
|
7666
7337
|
SECRET_STATE_KEYS,
|
|
7667
|
-
|
|
7338
|
+
SKILL_NAME_MAX_LENGTH,
|
|
7339
|
+
SKILL_NAME_MIN_LENGTH,
|
|
7340
|
+
SKILL_NAME_REGEX,
|
|
7341
|
+
SkillNameValidationError,
|
|
7342
|
+
TERMINAL_PREVIEW_BYTES,
|
|
7668
7343
|
TaskBridgeCommandName,
|
|
7669
7344
|
TaskBridgeEventName,
|
|
7670
7345
|
TaskCommandName,
|
|
@@ -7696,9 +7371,6 @@ var browserActions = [
|
|
|
7696
7371
|
chutesDefaultModelId,
|
|
7697
7372
|
chutesDefaultModelInfo,
|
|
7698
7373
|
chutesModels,
|
|
7699
|
-
claudeCodeDefaultModelId,
|
|
7700
|
-
claudeCodeModels,
|
|
7701
|
-
claudeCodeReasoningConfig,
|
|
7702
7374
|
clineAskSchema,
|
|
7703
7375
|
clineAsks,
|
|
7704
7376
|
clineMessageSchema,
|
|
@@ -7713,6 +7385,7 @@ var browserActions = [
|
|
|
7713
7385
|
commandIds,
|
|
7714
7386
|
contextCondenseSchema,
|
|
7715
7387
|
contextTruncationSchema,
|
|
7388
|
+
countEnabledMcpTools,
|
|
7716
7389
|
customModePromptsSchema,
|
|
7717
7390
|
customModesSettingsSchema,
|
|
7718
7391
|
customProviders,
|
|
@@ -7746,7 +7419,6 @@ var browserActions = [
|
|
|
7746
7419
|
geminiDefaultModelId,
|
|
7747
7420
|
geminiModels,
|
|
7748
7421
|
getApiProtocol,
|
|
7749
|
-
getEffectiveProtocol,
|
|
7750
7422
|
getErrorMessage,
|
|
7751
7423
|
getErrorStatusCode,
|
|
7752
7424
|
getImageGenerationProvider,
|
|
@@ -7781,9 +7453,9 @@ var browserActions = [
|
|
|
7781
7453
|
isInteractiveAsk,
|
|
7782
7454
|
isInternalProvider,
|
|
7783
7455
|
isLanguage,
|
|
7456
|
+
isLegacyReadFileParams,
|
|
7784
7457
|
isLocalProvider,
|
|
7785
7458
|
isModelParameter,
|
|
7786
|
-
isNativeProtocol,
|
|
7787
7459
|
isNonBlockingAsk,
|
|
7788
7460
|
isProviderName,
|
|
7789
7461
|
isResumableAsk,
|
|
@@ -7819,7 +7491,6 @@ var browserActions = [
|
|
|
7819
7491
|
moonshotDefaultModelId,
|
|
7820
7492
|
moonshotModels,
|
|
7821
7493
|
nonBlockingAsks,
|
|
7822
|
-
normalizeClaudeCodeModelId,
|
|
7823
7494
|
ollamaDefaultModelId,
|
|
7824
7495
|
ollamaDefaultModelInfo,
|
|
7825
7496
|
openAiCodexDefaultModelId,
|
|
@@ -7893,6 +7564,7 @@ var browserActions = [
|
|
|
7893
7564
|
userFeaturesSchema,
|
|
7894
7565
|
userSettingsConfigSchema,
|
|
7895
7566
|
userSettingsDataSchema,
|
|
7567
|
+
validateSkillName,
|
|
7896
7568
|
verbosityLevels,
|
|
7897
7569
|
verbosityLevelsSchema,
|
|
7898
7570
|
vercelAiGatewayDefaultModelId,
|