@roo-code/types 1.105.0 → 1.108.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +485 -705
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1623 -2502
- package/dist/index.d.ts +1623 -2502
- package/dist/index.js +473 -696
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.cjs
CHANGED
|
@@ -42,7 +42,7 @@ __export(index_exports, {
|
|
|
42
42
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS: () => DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
43
43
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT: () => DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
44
44
|
DEFAULT_MODES: () => DEFAULT_MODES,
|
|
45
|
-
|
|
45
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE: () => DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
46
46
|
DEFAULT_WRITE_DELAY_MS: () => DEFAULT_WRITE_DELAY_MS,
|
|
47
47
|
DOUBAO_API_BASE_URL: () => DOUBAO_API_BASE_URL,
|
|
48
48
|
DOUBAO_API_CHAT_PATH: () => DOUBAO_API_CHAT_PATH,
|
|
@@ -72,13 +72,13 @@ __export(index_exports, {
|
|
|
72
72
|
IpcOrigin: () => IpcOrigin,
|
|
73
73
|
LMSTUDIO_DEFAULT_TEMPERATURE: () => LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
74
74
|
MAX_CHECKPOINT_TIMEOUT_SECONDS: () => MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
75
|
+
MAX_MCP_TOOLS_THRESHOLD: () => MAX_MCP_TOOLS_THRESHOLD,
|
|
75
76
|
MINIMAX_DEFAULT_MAX_TOKENS: () => MINIMAX_DEFAULT_MAX_TOKENS,
|
|
76
77
|
MINIMAX_DEFAULT_TEMPERATURE: () => MINIMAX_DEFAULT_TEMPERATURE,
|
|
77
78
|
MIN_CHECKPOINT_TIMEOUT_SECONDS: () => MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
78
79
|
MISTRAL_DEFAULT_TEMPERATURE: () => MISTRAL_DEFAULT_TEMPERATURE,
|
|
79
80
|
MODELS_BY_PROVIDER: () => MODELS_BY_PROVIDER,
|
|
80
81
|
MOONSHOT_DEFAULT_TEMPERATURE: () => MOONSHOT_DEFAULT_TEMPERATURE,
|
|
81
|
-
NATIVE_TOOL_DEFAULTS: () => NATIVE_TOOL_DEFAULTS,
|
|
82
82
|
OPENAI_AZURE_AI_INFERENCE_PATH: () => OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
83
83
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE: () => OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
84
84
|
OPENROUTER_DEFAULT_PROVIDER_NAME: () => OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -93,7 +93,11 @@ __export(index_exports, {
|
|
|
93
93
|
RooModelsResponseSchema: () => RooModelsResponseSchema,
|
|
94
94
|
RooPricingSchema: () => RooPricingSchema,
|
|
95
95
|
SECRET_STATE_KEYS: () => SECRET_STATE_KEYS,
|
|
96
|
-
|
|
96
|
+
SKILL_NAME_MAX_LENGTH: () => SKILL_NAME_MAX_LENGTH,
|
|
97
|
+
SKILL_NAME_MIN_LENGTH: () => SKILL_NAME_MIN_LENGTH,
|
|
98
|
+
SKILL_NAME_REGEX: () => SKILL_NAME_REGEX,
|
|
99
|
+
SkillNameValidationError: () => SkillNameValidationError,
|
|
100
|
+
TERMINAL_PREVIEW_BYTES: () => TERMINAL_PREVIEW_BYTES,
|
|
97
101
|
TaskBridgeCommandName: () => TaskBridgeCommandName,
|
|
98
102
|
TaskBridgeEventName: () => TaskBridgeEventName,
|
|
99
103
|
TaskCommandName: () => TaskCommandName,
|
|
@@ -125,9 +129,6 @@ __export(index_exports, {
|
|
|
125
129
|
chutesDefaultModelId: () => chutesDefaultModelId,
|
|
126
130
|
chutesDefaultModelInfo: () => chutesDefaultModelInfo,
|
|
127
131
|
chutesModels: () => chutesModels,
|
|
128
|
-
claudeCodeDefaultModelId: () => claudeCodeDefaultModelId,
|
|
129
|
-
claudeCodeModels: () => claudeCodeModels,
|
|
130
|
-
claudeCodeReasoningConfig: () => claudeCodeReasoningConfig,
|
|
131
132
|
clineAskSchema: () => clineAskSchema,
|
|
132
133
|
clineAsks: () => clineAsks,
|
|
133
134
|
clineMessageSchema: () => clineMessageSchema,
|
|
@@ -142,6 +143,7 @@ __export(index_exports, {
|
|
|
142
143
|
commandIds: () => commandIds,
|
|
143
144
|
contextCondenseSchema: () => contextCondenseSchema,
|
|
144
145
|
contextTruncationSchema: () => contextTruncationSchema,
|
|
146
|
+
countEnabledMcpTools: () => countEnabledMcpTools,
|
|
145
147
|
customModePromptsSchema: () => customModePromptsSchema,
|
|
146
148
|
customModesSettingsSchema: () => customModesSettingsSchema,
|
|
147
149
|
customProviders: () => customProviders,
|
|
@@ -175,7 +177,6 @@ __export(index_exports, {
|
|
|
175
177
|
geminiDefaultModelId: () => geminiDefaultModelId,
|
|
176
178
|
geminiModels: () => geminiModels,
|
|
177
179
|
getApiProtocol: () => getApiProtocol,
|
|
178
|
-
getEffectiveProtocol: () => getEffectiveProtocol,
|
|
179
180
|
getErrorMessage: () => getErrorMessage,
|
|
180
181
|
getErrorStatusCode: () => getErrorStatusCode,
|
|
181
182
|
getImageGenerationProvider: () => getImageGenerationProvider,
|
|
@@ -210,9 +211,9 @@ __export(index_exports, {
|
|
|
210
211
|
isInteractiveAsk: () => isInteractiveAsk,
|
|
211
212
|
isInternalProvider: () => isInternalProvider,
|
|
212
213
|
isLanguage: () => isLanguage,
|
|
214
|
+
isLegacyReadFileParams: () => isLegacyReadFileParams,
|
|
213
215
|
isLocalProvider: () => isLocalProvider,
|
|
214
216
|
isModelParameter: () => isModelParameter,
|
|
215
|
-
isNativeProtocol: () => isNativeProtocol,
|
|
216
217
|
isNonBlockingAsk: () => isNonBlockingAsk,
|
|
217
218
|
isProviderName: () => isProviderName,
|
|
218
219
|
isResumableAsk: () => isResumableAsk,
|
|
@@ -248,9 +249,10 @@ __export(index_exports, {
|
|
|
248
249
|
moonshotDefaultModelId: () => moonshotDefaultModelId,
|
|
249
250
|
moonshotModels: () => moonshotModels,
|
|
250
251
|
nonBlockingAsks: () => nonBlockingAsks,
|
|
251
|
-
normalizeClaudeCodeModelId: () => normalizeClaudeCodeModelId,
|
|
252
252
|
ollamaDefaultModelId: () => ollamaDefaultModelId,
|
|
253
253
|
ollamaDefaultModelInfo: () => ollamaDefaultModelInfo,
|
|
254
|
+
openAiCodexDefaultModelId: () => openAiCodexDefaultModelId,
|
|
255
|
+
openAiCodexModels: () => openAiCodexModels,
|
|
254
256
|
openAiModelInfoSaneDefaults: () => openAiModelInfoSaneDefaults,
|
|
255
257
|
openAiNativeDefaultModelId: () => openAiNativeDefaultModelId,
|
|
256
258
|
openAiNativeModels: () => openAiNativeModels,
|
|
@@ -320,6 +322,7 @@ __export(index_exports, {
|
|
|
320
322
|
userFeaturesSchema: () => userFeaturesSchema,
|
|
321
323
|
userSettingsConfigSchema: () => userSettingsConfigSchema,
|
|
322
324
|
userSettingsDataSchema: () => userSettingsDataSchema,
|
|
325
|
+
validateSkillName: () => validateSkillName,
|
|
323
326
|
verbosityLevels: () => verbosityLevels,
|
|
324
327
|
verbosityLevelsSchema: () => verbosityLevelsSchema,
|
|
325
328
|
vercelAiGatewayDefaultModelId: () => vercelAiGatewayDefaultModelId,
|
|
@@ -415,7 +418,9 @@ var clineSays = [
|
|
|
415
418
|
"condense_context_error",
|
|
416
419
|
"sliding_window_truncation",
|
|
417
420
|
"codebase_search_result",
|
|
418
|
-
"user_edit_todos"
|
|
421
|
+
"user_edit_todos",
|
|
422
|
+
"too_many_tools_warning",
|
|
423
|
+
"tool"
|
|
419
424
|
];
|
|
420
425
|
var clineSaySchema = import_zod.z.enum(clineSays);
|
|
421
426
|
var toolProgressStatusSchema = import_zod.z.object({
|
|
@@ -483,6 +488,7 @@ var toolGroupsSchema = import_zod2.z.enum(toolGroups);
|
|
|
483
488
|
var toolNames = [
|
|
484
489
|
"execute_command",
|
|
485
490
|
"read_file",
|
|
491
|
+
"read_command_output",
|
|
486
492
|
"write_to_file",
|
|
487
493
|
"apply_diff",
|
|
488
494
|
"search_and_replace",
|
|
@@ -498,10 +504,10 @@ var toolNames = [
|
|
|
498
504
|
"attempt_completion",
|
|
499
505
|
"switch_mode",
|
|
500
506
|
"new_task",
|
|
501
|
-
"fetch_instructions",
|
|
502
507
|
"codebase_search",
|
|
503
508
|
"update_todo_list",
|
|
504
509
|
"run_slash_command",
|
|
510
|
+
"skill",
|
|
505
511
|
"generate_image",
|
|
506
512
|
"custom_tool"
|
|
507
513
|
];
|
|
@@ -513,20 +519,6 @@ var toolUsageSchema = import_zod2.z.record(
|
|
|
513
519
|
failures: import_zod2.z.number()
|
|
514
520
|
})
|
|
515
521
|
);
|
|
516
|
-
var TOOL_PROTOCOL = {
|
|
517
|
-
XML: "xml",
|
|
518
|
-
NATIVE: "native"
|
|
519
|
-
};
|
|
520
|
-
var NATIVE_TOOL_DEFAULTS = {
|
|
521
|
-
supportsNativeTools: true,
|
|
522
|
-
defaultToolProtocol: TOOL_PROTOCOL.NATIVE
|
|
523
|
-
};
|
|
524
|
-
function isNativeProtocol(protocol) {
|
|
525
|
-
return protocol === TOOL_PROTOCOL.NATIVE;
|
|
526
|
-
}
|
|
527
|
-
function getEffectiveProtocol(toolProtocol) {
|
|
528
|
-
return toolProtocol || TOOL_PROTOCOL.XML;
|
|
529
|
-
}
|
|
530
522
|
|
|
531
523
|
// src/events.ts
|
|
532
524
|
var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
@@ -814,10 +806,6 @@ var modelInfoSchema = import_zod5.z.object({
|
|
|
814
806
|
isStealthModel: import_zod5.z.boolean().optional(),
|
|
815
807
|
// Flag to indicate if the model is free (no cost)
|
|
816
808
|
isFree: import_zod5.z.boolean().optional(),
|
|
817
|
-
// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
|
|
818
|
-
supportsNativeTools: import_zod5.z.boolean().optional(),
|
|
819
|
-
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
820
|
-
defaultToolProtocol: import_zod5.z.enum(["xml", "native"]).optional(),
|
|
821
809
|
// Exclude specific native tools from being available (only applies to native protocol)
|
|
822
810
|
// These tools will be removed from the set of tools available to the model
|
|
823
811
|
excludedTools: import_zod5.z.array(import_zod5.z.string()).optional(),
|
|
@@ -914,8 +902,6 @@ var anthropicModels = {
|
|
|
914
902
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
915
903
|
supportsImages: true,
|
|
916
904
|
supportsPromptCache: true,
|
|
917
|
-
supportsNativeTools: true,
|
|
918
|
-
defaultToolProtocol: "native",
|
|
919
905
|
inputPrice: 3,
|
|
920
906
|
// $3 per million input tokens (≤200K context)
|
|
921
907
|
outputPrice: 15,
|
|
@@ -948,8 +934,6 @@ var anthropicModels = {
|
|
|
948
934
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
949
935
|
supportsImages: true,
|
|
950
936
|
supportsPromptCache: true,
|
|
951
|
-
supportsNativeTools: true,
|
|
952
|
-
defaultToolProtocol: "native",
|
|
953
937
|
inputPrice: 3,
|
|
954
938
|
// $3 per million input tokens (≤200K context)
|
|
955
939
|
outputPrice: 15,
|
|
@@ -981,8 +965,6 @@ var anthropicModels = {
|
|
|
981
965
|
contextWindow: 2e5,
|
|
982
966
|
supportsImages: true,
|
|
983
967
|
supportsPromptCache: true,
|
|
984
|
-
supportsNativeTools: true,
|
|
985
|
-
defaultToolProtocol: "native",
|
|
986
968
|
inputPrice: 5,
|
|
987
969
|
// $5 per million input tokens
|
|
988
970
|
outputPrice: 25,
|
|
@@ -999,8 +981,6 @@ var anthropicModels = {
|
|
|
999
981
|
contextWindow: 2e5,
|
|
1000
982
|
supportsImages: true,
|
|
1001
983
|
supportsPromptCache: true,
|
|
1002
|
-
supportsNativeTools: true,
|
|
1003
|
-
defaultToolProtocol: "native",
|
|
1004
984
|
inputPrice: 15,
|
|
1005
985
|
// $15 per million input tokens
|
|
1006
986
|
outputPrice: 75,
|
|
@@ -1017,8 +997,6 @@ var anthropicModels = {
|
|
|
1017
997
|
contextWindow: 2e5,
|
|
1018
998
|
supportsImages: true,
|
|
1019
999
|
supportsPromptCache: true,
|
|
1020
|
-
supportsNativeTools: true,
|
|
1021
|
-
defaultToolProtocol: "native",
|
|
1022
1000
|
inputPrice: 15,
|
|
1023
1001
|
// $15 per million input tokens
|
|
1024
1002
|
outputPrice: 75,
|
|
@@ -1035,8 +1013,6 @@ var anthropicModels = {
|
|
|
1035
1013
|
contextWindow: 2e5,
|
|
1036
1014
|
supportsImages: true,
|
|
1037
1015
|
supportsPromptCache: true,
|
|
1038
|
-
supportsNativeTools: true,
|
|
1039
|
-
defaultToolProtocol: "native",
|
|
1040
1016
|
inputPrice: 3,
|
|
1041
1017
|
// $3 per million input tokens
|
|
1042
1018
|
outputPrice: 15,
|
|
@@ -1054,8 +1030,6 @@ var anthropicModels = {
|
|
|
1054
1030
|
contextWindow: 2e5,
|
|
1055
1031
|
supportsImages: true,
|
|
1056
1032
|
supportsPromptCache: true,
|
|
1057
|
-
supportsNativeTools: true,
|
|
1058
|
-
defaultToolProtocol: "native",
|
|
1059
1033
|
inputPrice: 3,
|
|
1060
1034
|
// $3 per million input tokens
|
|
1061
1035
|
outputPrice: 15,
|
|
@@ -1070,8 +1044,6 @@ var anthropicModels = {
|
|
|
1070
1044
|
contextWindow: 2e5,
|
|
1071
1045
|
supportsImages: true,
|
|
1072
1046
|
supportsPromptCache: true,
|
|
1073
|
-
supportsNativeTools: true,
|
|
1074
|
-
defaultToolProtocol: "native",
|
|
1075
1047
|
inputPrice: 3,
|
|
1076
1048
|
// $3 per million input tokens
|
|
1077
1049
|
outputPrice: 15,
|
|
@@ -1086,8 +1058,6 @@ var anthropicModels = {
|
|
|
1086
1058
|
contextWindow: 2e5,
|
|
1087
1059
|
supportsImages: false,
|
|
1088
1060
|
supportsPromptCache: true,
|
|
1089
|
-
supportsNativeTools: true,
|
|
1090
|
-
defaultToolProtocol: "native",
|
|
1091
1061
|
inputPrice: 1,
|
|
1092
1062
|
outputPrice: 5,
|
|
1093
1063
|
cacheWritesPrice: 1.25,
|
|
@@ -1098,8 +1068,6 @@ var anthropicModels = {
|
|
|
1098
1068
|
contextWindow: 2e5,
|
|
1099
1069
|
supportsImages: true,
|
|
1100
1070
|
supportsPromptCache: true,
|
|
1101
|
-
supportsNativeTools: true,
|
|
1102
|
-
defaultToolProtocol: "native",
|
|
1103
1071
|
inputPrice: 15,
|
|
1104
1072
|
outputPrice: 75,
|
|
1105
1073
|
cacheWritesPrice: 18.75,
|
|
@@ -1110,8 +1078,6 @@ var anthropicModels = {
|
|
|
1110
1078
|
contextWindow: 2e5,
|
|
1111
1079
|
supportsImages: true,
|
|
1112
1080
|
supportsPromptCache: true,
|
|
1113
|
-
supportsNativeTools: true,
|
|
1114
|
-
defaultToolProtocol: "native",
|
|
1115
1081
|
inputPrice: 0.25,
|
|
1116
1082
|
outputPrice: 1.25,
|
|
1117
1083
|
cacheWritesPrice: 0.3,
|
|
@@ -1122,8 +1088,6 @@ var anthropicModels = {
|
|
|
1122
1088
|
contextWindow: 2e5,
|
|
1123
1089
|
supportsImages: true,
|
|
1124
1090
|
supportsPromptCache: true,
|
|
1125
|
-
supportsNativeTools: true,
|
|
1126
|
-
defaultToolProtocol: "native",
|
|
1127
1091
|
inputPrice: 1,
|
|
1128
1092
|
outputPrice: 5,
|
|
1129
1093
|
cacheWritesPrice: 1.25,
|
|
@@ -1141,7 +1105,6 @@ var basetenModels = {
|
|
|
1141
1105
|
contextWindow: 262e3,
|
|
1142
1106
|
supportsImages: false,
|
|
1143
1107
|
supportsPromptCache: false,
|
|
1144
|
-
supportsNativeTools: true,
|
|
1145
1108
|
inputPrice: 0.6,
|
|
1146
1109
|
outputPrice: 2.5,
|
|
1147
1110
|
cacheWritesPrice: 0,
|
|
@@ -1153,7 +1116,6 @@ var basetenModels = {
|
|
|
1153
1116
|
contextWindow: 2e5,
|
|
1154
1117
|
supportsImages: false,
|
|
1155
1118
|
supportsPromptCache: false,
|
|
1156
|
-
supportsNativeTools: true,
|
|
1157
1119
|
inputPrice: 0.6,
|
|
1158
1120
|
outputPrice: 2.2,
|
|
1159
1121
|
cacheWritesPrice: 0,
|
|
@@ -1165,7 +1127,6 @@ var basetenModels = {
|
|
|
1165
1127
|
contextWindow: 163840,
|
|
1166
1128
|
supportsImages: false,
|
|
1167
1129
|
supportsPromptCache: false,
|
|
1168
|
-
supportsNativeTools: true,
|
|
1169
1130
|
inputPrice: 2.55,
|
|
1170
1131
|
outputPrice: 5.95,
|
|
1171
1132
|
cacheWritesPrice: 0,
|
|
@@ -1177,7 +1138,6 @@ var basetenModels = {
|
|
|
1177
1138
|
contextWindow: 163840,
|
|
1178
1139
|
supportsImages: false,
|
|
1179
1140
|
supportsPromptCache: false,
|
|
1180
|
-
supportsNativeTools: true,
|
|
1181
1141
|
inputPrice: 2.55,
|
|
1182
1142
|
outputPrice: 5.95,
|
|
1183
1143
|
cacheWritesPrice: 0,
|
|
@@ -1189,7 +1149,6 @@ var basetenModels = {
|
|
|
1189
1149
|
contextWindow: 163840,
|
|
1190
1150
|
supportsImages: false,
|
|
1191
1151
|
supportsPromptCache: false,
|
|
1192
|
-
supportsNativeTools: true,
|
|
1193
1152
|
inputPrice: 0.77,
|
|
1194
1153
|
outputPrice: 0.77,
|
|
1195
1154
|
cacheWritesPrice: 0,
|
|
@@ -1201,7 +1160,6 @@ var basetenModels = {
|
|
|
1201
1160
|
contextWindow: 163840,
|
|
1202
1161
|
supportsImages: false,
|
|
1203
1162
|
supportsPromptCache: false,
|
|
1204
|
-
supportsNativeTools: true,
|
|
1205
1163
|
inputPrice: 0.5,
|
|
1206
1164
|
outputPrice: 1.5,
|
|
1207
1165
|
cacheWritesPrice: 0,
|
|
@@ -1213,7 +1171,6 @@ var basetenModels = {
|
|
|
1213
1171
|
contextWindow: 163840,
|
|
1214
1172
|
supportsImages: false,
|
|
1215
1173
|
supportsPromptCache: false,
|
|
1216
|
-
supportsNativeTools: true,
|
|
1217
1174
|
inputPrice: 0.3,
|
|
1218
1175
|
outputPrice: 0.45,
|
|
1219
1176
|
cacheWritesPrice: 0,
|
|
@@ -1225,7 +1182,6 @@ var basetenModels = {
|
|
|
1225
1182
|
contextWindow: 128072,
|
|
1226
1183
|
supportsImages: false,
|
|
1227
1184
|
supportsPromptCache: false,
|
|
1228
|
-
supportsNativeTools: true,
|
|
1229
1185
|
inputPrice: 0.1,
|
|
1230
1186
|
outputPrice: 0.5,
|
|
1231
1187
|
cacheWritesPrice: 0,
|
|
@@ -1237,7 +1193,6 @@ var basetenModels = {
|
|
|
1237
1193
|
contextWindow: 262144,
|
|
1238
1194
|
supportsImages: false,
|
|
1239
1195
|
supportsPromptCache: false,
|
|
1240
|
-
supportsNativeTools: true,
|
|
1241
1196
|
inputPrice: 0.22,
|
|
1242
1197
|
outputPrice: 0.8,
|
|
1243
1198
|
cacheWritesPrice: 0,
|
|
@@ -1249,7 +1204,6 @@ var basetenModels = {
|
|
|
1249
1204
|
contextWindow: 262144,
|
|
1250
1205
|
supportsImages: false,
|
|
1251
1206
|
supportsPromptCache: false,
|
|
1252
|
-
supportsNativeTools: true,
|
|
1253
1207
|
inputPrice: 0.38,
|
|
1254
1208
|
outputPrice: 1.53,
|
|
1255
1209
|
cacheWritesPrice: 0,
|
|
@@ -1261,7 +1215,6 @@ var basetenModels = {
|
|
|
1261
1215
|
contextWindow: 262e3,
|
|
1262
1216
|
supportsImages: false,
|
|
1263
1217
|
supportsPromptCache: false,
|
|
1264
|
-
supportsNativeTools: true,
|
|
1265
1218
|
inputPrice: 0.6,
|
|
1266
1219
|
outputPrice: 2.5,
|
|
1267
1220
|
cacheWritesPrice: 0,
|
|
@@ -1281,8 +1234,6 @@ var bedrockModels = {
|
|
|
1281
1234
|
supportsImages: true,
|
|
1282
1235
|
supportsPromptCache: true,
|
|
1283
1236
|
supportsReasoningBudget: true,
|
|
1284
|
-
supportsNativeTools: true,
|
|
1285
|
-
defaultToolProtocol: "native",
|
|
1286
1237
|
inputPrice: 3,
|
|
1287
1238
|
outputPrice: 15,
|
|
1288
1239
|
cacheWritesPrice: 3.75,
|
|
@@ -1296,7 +1247,6 @@ var bedrockModels = {
|
|
|
1296
1247
|
contextWindow: 3e5,
|
|
1297
1248
|
supportsImages: true,
|
|
1298
1249
|
supportsPromptCache: true,
|
|
1299
|
-
supportsNativeTools: true,
|
|
1300
1250
|
inputPrice: 0.8,
|
|
1301
1251
|
outputPrice: 3.2,
|
|
1302
1252
|
cacheWritesPrice: 0.8,
|
|
@@ -1312,7 +1262,6 @@ var bedrockModels = {
|
|
|
1312
1262
|
contextWindow: 3e5,
|
|
1313
1263
|
supportsImages: true,
|
|
1314
1264
|
supportsPromptCache: false,
|
|
1315
|
-
supportsNativeTools: true,
|
|
1316
1265
|
inputPrice: 1,
|
|
1317
1266
|
outputPrice: 4,
|
|
1318
1267
|
cacheWritesPrice: 1,
|
|
@@ -1326,7 +1275,6 @@ var bedrockModels = {
|
|
|
1326
1275
|
contextWindow: 3e5,
|
|
1327
1276
|
supportsImages: true,
|
|
1328
1277
|
supportsPromptCache: true,
|
|
1329
|
-
supportsNativeTools: true,
|
|
1330
1278
|
inputPrice: 0.06,
|
|
1331
1279
|
outputPrice: 0.24,
|
|
1332
1280
|
cacheWritesPrice: 0.06,
|
|
@@ -1342,7 +1290,6 @@ var bedrockModels = {
|
|
|
1342
1290
|
contextWindow: 1e6,
|
|
1343
1291
|
supportsImages: true,
|
|
1344
1292
|
supportsPromptCache: true,
|
|
1345
|
-
supportsNativeTools: true,
|
|
1346
1293
|
inputPrice: 0.33,
|
|
1347
1294
|
outputPrice: 2.75,
|
|
1348
1295
|
cacheWritesPrice: 0,
|
|
@@ -1358,7 +1305,6 @@ var bedrockModels = {
|
|
|
1358
1305
|
contextWindow: 128e3,
|
|
1359
1306
|
supportsImages: false,
|
|
1360
1307
|
supportsPromptCache: true,
|
|
1361
|
-
supportsNativeTools: true,
|
|
1362
1308
|
inputPrice: 0.035,
|
|
1363
1309
|
outputPrice: 0.14,
|
|
1364
1310
|
cacheWritesPrice: 0.035,
|
|
@@ -1375,8 +1321,6 @@ var bedrockModels = {
|
|
|
1375
1321
|
supportsImages: true,
|
|
1376
1322
|
supportsPromptCache: true,
|
|
1377
1323
|
supportsReasoningBudget: true,
|
|
1378
|
-
supportsNativeTools: true,
|
|
1379
|
-
defaultToolProtocol: "native",
|
|
1380
1324
|
inputPrice: 3,
|
|
1381
1325
|
outputPrice: 15,
|
|
1382
1326
|
cacheWritesPrice: 3.75,
|
|
@@ -1391,8 +1335,6 @@ var bedrockModels = {
|
|
|
1391
1335
|
supportsImages: true,
|
|
1392
1336
|
supportsPromptCache: true,
|
|
1393
1337
|
supportsReasoningBudget: true,
|
|
1394
|
-
supportsNativeTools: true,
|
|
1395
|
-
defaultToolProtocol: "native",
|
|
1396
1338
|
inputPrice: 15,
|
|
1397
1339
|
outputPrice: 75,
|
|
1398
1340
|
cacheWritesPrice: 18.75,
|
|
@@ -1407,8 +1349,6 @@ var bedrockModels = {
|
|
|
1407
1349
|
supportsImages: true,
|
|
1408
1350
|
supportsPromptCache: true,
|
|
1409
1351
|
supportsReasoningBudget: true,
|
|
1410
|
-
supportsNativeTools: true,
|
|
1411
|
-
defaultToolProtocol: "native",
|
|
1412
1352
|
inputPrice: 5,
|
|
1413
1353
|
outputPrice: 25,
|
|
1414
1354
|
cacheWritesPrice: 6.25,
|
|
@@ -1423,8 +1363,6 @@ var bedrockModels = {
|
|
|
1423
1363
|
supportsImages: true,
|
|
1424
1364
|
supportsPromptCache: true,
|
|
1425
1365
|
supportsReasoningBudget: true,
|
|
1426
|
-
supportsNativeTools: true,
|
|
1427
|
-
defaultToolProtocol: "native",
|
|
1428
1366
|
inputPrice: 15,
|
|
1429
1367
|
outputPrice: 75,
|
|
1430
1368
|
cacheWritesPrice: 18.75,
|
|
@@ -1439,8 +1377,6 @@ var bedrockModels = {
|
|
|
1439
1377
|
supportsImages: true,
|
|
1440
1378
|
supportsPromptCache: true,
|
|
1441
1379
|
supportsReasoningBudget: true,
|
|
1442
|
-
supportsNativeTools: true,
|
|
1443
|
-
defaultToolProtocol: "native",
|
|
1444
1380
|
inputPrice: 3,
|
|
1445
1381
|
outputPrice: 15,
|
|
1446
1382
|
cacheWritesPrice: 3.75,
|
|
@@ -1454,8 +1390,6 @@ var bedrockModels = {
|
|
|
1454
1390
|
contextWindow: 2e5,
|
|
1455
1391
|
supportsImages: true,
|
|
1456
1392
|
supportsPromptCache: true,
|
|
1457
|
-
supportsNativeTools: true,
|
|
1458
|
-
defaultToolProtocol: "native",
|
|
1459
1393
|
inputPrice: 3,
|
|
1460
1394
|
outputPrice: 15,
|
|
1461
1395
|
cacheWritesPrice: 3.75,
|
|
@@ -1469,8 +1403,6 @@ var bedrockModels = {
|
|
|
1469
1403
|
contextWindow: 2e5,
|
|
1470
1404
|
supportsImages: false,
|
|
1471
1405
|
supportsPromptCache: true,
|
|
1472
|
-
supportsNativeTools: true,
|
|
1473
|
-
defaultToolProtocol: "native",
|
|
1474
1406
|
inputPrice: 0.8,
|
|
1475
1407
|
outputPrice: 4,
|
|
1476
1408
|
cacheWritesPrice: 1,
|
|
@@ -1485,8 +1417,6 @@ var bedrockModels = {
|
|
|
1485
1417
|
supportsImages: true,
|
|
1486
1418
|
supportsPromptCache: true,
|
|
1487
1419
|
supportsReasoningBudget: true,
|
|
1488
|
-
supportsNativeTools: true,
|
|
1489
|
-
defaultToolProtocol: "native",
|
|
1490
1420
|
inputPrice: 1,
|
|
1491
1421
|
outputPrice: 5,
|
|
1492
1422
|
cacheWritesPrice: 1.25,
|
|
@@ -1502,8 +1432,6 @@ var bedrockModels = {
|
|
|
1502
1432
|
contextWindow: 2e5,
|
|
1503
1433
|
supportsImages: true,
|
|
1504
1434
|
supportsPromptCache: false,
|
|
1505
|
-
supportsNativeTools: true,
|
|
1506
|
-
defaultToolProtocol: "native",
|
|
1507
1435
|
inputPrice: 3,
|
|
1508
1436
|
outputPrice: 15
|
|
1509
1437
|
},
|
|
@@ -1512,8 +1440,6 @@ var bedrockModels = {
|
|
|
1512
1440
|
contextWindow: 2e5,
|
|
1513
1441
|
supportsImages: true,
|
|
1514
1442
|
supportsPromptCache: false,
|
|
1515
|
-
supportsNativeTools: true,
|
|
1516
|
-
defaultToolProtocol: "native",
|
|
1517
1443
|
inputPrice: 15,
|
|
1518
1444
|
outputPrice: 75
|
|
1519
1445
|
},
|
|
@@ -1522,8 +1448,6 @@ var bedrockModels = {
|
|
|
1522
1448
|
contextWindow: 2e5,
|
|
1523
1449
|
supportsImages: true,
|
|
1524
1450
|
supportsPromptCache: false,
|
|
1525
|
-
supportsNativeTools: true,
|
|
1526
|
-
defaultToolProtocol: "native",
|
|
1527
1451
|
inputPrice: 3,
|
|
1528
1452
|
outputPrice: 15
|
|
1529
1453
|
},
|
|
@@ -1532,8 +1456,6 @@ var bedrockModels = {
|
|
|
1532
1456
|
contextWindow: 2e5,
|
|
1533
1457
|
supportsImages: true,
|
|
1534
1458
|
supportsPromptCache: false,
|
|
1535
|
-
supportsNativeTools: true,
|
|
1536
|
-
defaultToolProtocol: "native",
|
|
1537
1459
|
inputPrice: 0.25,
|
|
1538
1460
|
outputPrice: 1.25
|
|
1539
1461
|
},
|
|
@@ -1542,7 +1464,6 @@ var bedrockModels = {
|
|
|
1542
1464
|
contextWindow: 128e3,
|
|
1543
1465
|
supportsImages: false,
|
|
1544
1466
|
supportsPromptCache: false,
|
|
1545
|
-
supportsNativeTools: true,
|
|
1546
1467
|
inputPrice: 1.35,
|
|
1547
1468
|
outputPrice: 5.4
|
|
1548
1469
|
},
|
|
@@ -1551,7 +1472,6 @@ var bedrockModels = {
|
|
|
1551
1472
|
contextWindow: 128e3,
|
|
1552
1473
|
supportsImages: false,
|
|
1553
1474
|
supportsPromptCache: false,
|
|
1554
|
-
supportsNativeTools: true,
|
|
1555
1475
|
inputPrice: 0.5,
|
|
1556
1476
|
outputPrice: 1.5,
|
|
1557
1477
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -1561,7 +1481,6 @@ var bedrockModels = {
|
|
|
1561
1481
|
contextWindow: 128e3,
|
|
1562
1482
|
supportsImages: false,
|
|
1563
1483
|
supportsPromptCache: false,
|
|
1564
|
-
supportsNativeTools: true,
|
|
1565
1484
|
inputPrice: 2,
|
|
1566
1485
|
outputPrice: 6,
|
|
1567
1486
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -1571,7 +1490,6 @@ var bedrockModels = {
|
|
|
1571
1490
|
contextWindow: 128e3,
|
|
1572
1491
|
supportsImages: false,
|
|
1573
1492
|
supportsPromptCache: false,
|
|
1574
|
-
supportsNativeTools: true,
|
|
1575
1493
|
inputPrice: 0.72,
|
|
1576
1494
|
outputPrice: 0.72,
|
|
1577
1495
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -1581,7 +1499,6 @@ var bedrockModels = {
|
|
|
1581
1499
|
contextWindow: 128e3,
|
|
1582
1500
|
supportsImages: true,
|
|
1583
1501
|
supportsPromptCache: false,
|
|
1584
|
-
supportsNativeTools: true,
|
|
1585
1502
|
inputPrice: 0.72,
|
|
1586
1503
|
outputPrice: 0.72,
|
|
1587
1504
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -1591,7 +1508,6 @@ var bedrockModels = {
|
|
|
1591
1508
|
contextWindow: 128e3,
|
|
1592
1509
|
supportsImages: true,
|
|
1593
1510
|
supportsPromptCache: false,
|
|
1594
|
-
supportsNativeTools: true,
|
|
1595
1511
|
inputPrice: 0.16,
|
|
1596
1512
|
outputPrice: 0.16,
|
|
1597
1513
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -1601,7 +1517,6 @@ var bedrockModels = {
|
|
|
1601
1517
|
contextWindow: 128e3,
|
|
1602
1518
|
supportsImages: false,
|
|
1603
1519
|
supportsPromptCache: false,
|
|
1604
|
-
supportsNativeTools: true,
|
|
1605
1520
|
inputPrice: 0.15,
|
|
1606
1521
|
outputPrice: 0.15,
|
|
1607
1522
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -1611,7 +1526,6 @@ var bedrockModels = {
|
|
|
1611
1526
|
contextWindow: 128e3,
|
|
1612
1527
|
supportsImages: false,
|
|
1613
1528
|
supportsPromptCache: false,
|
|
1614
|
-
supportsNativeTools: true,
|
|
1615
1529
|
inputPrice: 0.1,
|
|
1616
1530
|
outputPrice: 0.1,
|
|
1617
1531
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -1621,7 +1535,6 @@ var bedrockModels = {
|
|
|
1621
1535
|
contextWindow: 128e3,
|
|
1622
1536
|
supportsImages: false,
|
|
1623
1537
|
supportsPromptCache: false,
|
|
1624
|
-
supportsNativeTools: true,
|
|
1625
1538
|
inputPrice: 2.4,
|
|
1626
1539
|
outputPrice: 2.4,
|
|
1627
1540
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -1631,7 +1544,6 @@ var bedrockModels = {
|
|
|
1631
1544
|
contextWindow: 128e3,
|
|
1632
1545
|
supportsImages: false,
|
|
1633
1546
|
supportsPromptCache: false,
|
|
1634
|
-
supportsNativeTools: true,
|
|
1635
1547
|
inputPrice: 0.72,
|
|
1636
1548
|
outputPrice: 0.72,
|
|
1637
1549
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -1641,7 +1553,6 @@ var bedrockModels = {
|
|
|
1641
1553
|
contextWindow: 128e3,
|
|
1642
1554
|
supportsImages: false,
|
|
1643
1555
|
supportsPromptCache: false,
|
|
1644
|
-
supportsNativeTools: true,
|
|
1645
1556
|
inputPrice: 0.9,
|
|
1646
1557
|
outputPrice: 0.9,
|
|
1647
1558
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1651,7 +1562,6 @@ var bedrockModels = {
|
|
|
1651
1562
|
contextWindow: 8e3,
|
|
1652
1563
|
supportsImages: false,
|
|
1653
1564
|
supportsPromptCache: false,
|
|
1654
|
-
supportsNativeTools: true,
|
|
1655
1565
|
inputPrice: 0.22,
|
|
1656
1566
|
outputPrice: 0.22,
|
|
1657
1567
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1661,7 +1571,6 @@ var bedrockModels = {
|
|
|
1661
1571
|
contextWindow: 8e3,
|
|
1662
1572
|
supportsImages: false,
|
|
1663
1573
|
supportsPromptCache: false,
|
|
1664
|
-
supportsNativeTools: true,
|
|
1665
1574
|
inputPrice: 2.65,
|
|
1666
1575
|
outputPrice: 3.5
|
|
1667
1576
|
},
|
|
@@ -1670,7 +1579,6 @@ var bedrockModels = {
|
|
|
1670
1579
|
contextWindow: 4e3,
|
|
1671
1580
|
supportsImages: false,
|
|
1672
1581
|
supportsPromptCache: false,
|
|
1673
|
-
supportsNativeTools: true,
|
|
1674
1582
|
inputPrice: 0.3,
|
|
1675
1583
|
outputPrice: 0.6
|
|
1676
1584
|
},
|
|
@@ -1679,7 +1587,6 @@ var bedrockModels = {
|
|
|
1679
1587
|
contextWindow: 8e3,
|
|
1680
1588
|
supportsImages: false,
|
|
1681
1589
|
supportsPromptCache: false,
|
|
1682
|
-
supportsNativeTools: true,
|
|
1683
1590
|
inputPrice: 0.15,
|
|
1684
1591
|
outputPrice: 0.2,
|
|
1685
1592
|
description: "Amazon Titan Text Lite"
|
|
@@ -1689,7 +1596,6 @@ var bedrockModels = {
|
|
|
1689
1596
|
contextWindow: 8e3,
|
|
1690
1597
|
supportsImages: false,
|
|
1691
1598
|
supportsPromptCache: false,
|
|
1692
|
-
supportsNativeTools: true,
|
|
1693
1599
|
inputPrice: 0.2,
|
|
1694
1600
|
outputPrice: 0.6,
|
|
1695
1601
|
description: "Amazon Titan Text Express"
|
|
@@ -1699,8 +1605,6 @@ var bedrockModels = {
|
|
|
1699
1605
|
contextWindow: 262144,
|
|
1700
1606
|
supportsImages: false,
|
|
1701
1607
|
supportsPromptCache: false,
|
|
1702
|
-
supportsNativeTools: true,
|
|
1703
|
-
defaultToolProtocol: "native",
|
|
1704
1608
|
preserveReasoning: true,
|
|
1705
1609
|
inputPrice: 0.6,
|
|
1706
1610
|
outputPrice: 2.5,
|
|
@@ -1711,8 +1615,6 @@ var bedrockModels = {
|
|
|
1711
1615
|
contextWindow: 196608,
|
|
1712
1616
|
supportsImages: false,
|
|
1713
1617
|
supportsPromptCache: false,
|
|
1714
|
-
supportsNativeTools: true,
|
|
1715
|
-
defaultToolProtocol: "native",
|
|
1716
1618
|
preserveReasoning: true,
|
|
1717
1619
|
inputPrice: 0.3,
|
|
1718
1620
|
outputPrice: 1.2,
|
|
@@ -1723,8 +1625,6 @@ var bedrockModels = {
|
|
|
1723
1625
|
contextWindow: 262144,
|
|
1724
1626
|
supportsImages: false,
|
|
1725
1627
|
supportsPromptCache: false,
|
|
1726
|
-
supportsNativeTools: true,
|
|
1727
|
-
defaultToolProtocol: "native",
|
|
1728
1628
|
inputPrice: 0.15,
|
|
1729
1629
|
outputPrice: 1.2,
|
|
1730
1630
|
description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
|
|
@@ -1734,8 +1634,6 @@ var bedrockModels = {
|
|
|
1734
1634
|
contextWindow: 262144,
|
|
1735
1635
|
supportsImages: false,
|
|
1736
1636
|
supportsPromptCache: false,
|
|
1737
|
-
supportsNativeTools: true,
|
|
1738
|
-
defaultToolProtocol: "native",
|
|
1739
1637
|
inputPrice: 0.45,
|
|
1740
1638
|
outputPrice: 1.8,
|
|
1741
1639
|
description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
|
|
@@ -1826,26 +1724,14 @@ var BEDROCK_SERVICE_TIER_PRICING = {
|
|
|
1826
1724
|
// src/providers/cerebras.ts
|
|
1827
1725
|
var cerebrasDefaultModelId = "gpt-oss-120b";
|
|
1828
1726
|
var cerebrasModels = {
|
|
1829
|
-
"zai-glm-4.6": {
|
|
1830
|
-
maxTokens: 16384,
|
|
1831
|
-
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1832
|
-
contextWindow: 131072,
|
|
1833
|
-
supportsImages: false,
|
|
1834
|
-
supportsPromptCache: false,
|
|
1835
|
-
supportsNativeTools: true,
|
|
1836
|
-
defaultToolProtocol: "native",
|
|
1837
|
-
inputPrice: 0,
|
|
1838
|
-
outputPrice: 0,
|
|
1839
|
-
description: "Fast general-purpose model on Cerebras (up to 1,000 tokens/s). To be deprecated soon."
|
|
1840
|
-
},
|
|
1841
1727
|
"zai-glm-4.7": {
|
|
1842
1728
|
maxTokens: 16384,
|
|
1843
1729
|
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1844
1730
|
contextWindow: 131072,
|
|
1845
1731
|
supportsImages: false,
|
|
1846
|
-
supportsPromptCache:
|
|
1847
|
-
|
|
1848
|
-
|
|
1732
|
+
supportsPromptCache: true,
|
|
1733
|
+
supportsTemperature: true,
|
|
1734
|
+
defaultTemperature: 1,
|
|
1849
1735
|
inputPrice: 0,
|
|
1850
1736
|
outputPrice: 0,
|
|
1851
1737
|
description: "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks."
|
|
@@ -1856,8 +1742,6 @@ var cerebrasModels = {
|
|
|
1856
1742
|
contextWindow: 64e3,
|
|
1857
1743
|
supportsImages: false,
|
|
1858
1744
|
supportsPromptCache: false,
|
|
1859
|
-
supportsNativeTools: true,
|
|
1860
|
-
defaultToolProtocol: "native",
|
|
1861
1745
|
inputPrice: 0,
|
|
1862
1746
|
outputPrice: 0,
|
|
1863
1747
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1868,8 +1752,6 @@ var cerebrasModels = {
|
|
|
1868
1752
|
contextWindow: 64e3,
|
|
1869
1753
|
supportsImages: false,
|
|
1870
1754
|
supportsPromptCache: false,
|
|
1871
|
-
supportsNativeTools: true,
|
|
1872
|
-
defaultToolProtocol: "native",
|
|
1873
1755
|
inputPrice: 0,
|
|
1874
1756
|
outputPrice: 0,
|
|
1875
1757
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1880,8 +1762,6 @@ var cerebrasModels = {
|
|
|
1880
1762
|
contextWindow: 64e3,
|
|
1881
1763
|
supportsImages: false,
|
|
1882
1764
|
supportsPromptCache: false,
|
|
1883
|
-
supportsNativeTools: true,
|
|
1884
|
-
defaultToolProtocol: "native",
|
|
1885
1765
|
inputPrice: 0,
|
|
1886
1766
|
outputPrice: 0,
|
|
1887
1767
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
@@ -1892,8 +1772,6 @@ var cerebrasModels = {
|
|
|
1892
1772
|
contextWindow: 64e3,
|
|
1893
1773
|
supportsImages: false,
|
|
1894
1774
|
supportsPromptCache: false,
|
|
1895
|
-
supportsNativeTools: true,
|
|
1896
|
-
defaultToolProtocol: "native",
|
|
1897
1775
|
inputPrice: 0,
|
|
1898
1776
|
outputPrice: 0,
|
|
1899
1777
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1908,8 +1786,6 @@ var chutesModels = {
|
|
|
1908
1786
|
contextWindow: 163840,
|
|
1909
1787
|
supportsImages: false,
|
|
1910
1788
|
supportsPromptCache: false,
|
|
1911
|
-
supportsNativeTools: true,
|
|
1912
|
-
defaultToolProtocol: "native",
|
|
1913
1789
|
inputPrice: 0,
|
|
1914
1790
|
outputPrice: 0,
|
|
1915
1791
|
description: "DeepSeek R1 0528 model."
|
|
@@ -1919,8 +1795,6 @@ var chutesModels = {
|
|
|
1919
1795
|
contextWindow: 163840,
|
|
1920
1796
|
supportsImages: false,
|
|
1921
1797
|
supportsPromptCache: false,
|
|
1922
|
-
supportsNativeTools: true,
|
|
1923
|
-
defaultToolProtocol: "native",
|
|
1924
1798
|
inputPrice: 0,
|
|
1925
1799
|
outputPrice: 0,
|
|
1926
1800
|
description: "DeepSeek R1 model."
|
|
@@ -1930,8 +1804,6 @@ var chutesModels = {
|
|
|
1930
1804
|
contextWindow: 163840,
|
|
1931
1805
|
supportsImages: false,
|
|
1932
1806
|
supportsPromptCache: false,
|
|
1933
|
-
supportsNativeTools: true,
|
|
1934
|
-
defaultToolProtocol: "native",
|
|
1935
1807
|
inputPrice: 0,
|
|
1936
1808
|
outputPrice: 0,
|
|
1937
1809
|
description: "DeepSeek V3 model."
|
|
@@ -1941,8 +1813,6 @@ var chutesModels = {
|
|
|
1941
1813
|
contextWindow: 163840,
|
|
1942
1814
|
supportsImages: false,
|
|
1943
1815
|
supportsPromptCache: false,
|
|
1944
|
-
supportsNativeTools: true,
|
|
1945
|
-
defaultToolProtocol: "native",
|
|
1946
1816
|
inputPrice: 0,
|
|
1947
1817
|
outputPrice: 0,
|
|
1948
1818
|
description: "DeepSeek V3.1 model."
|
|
@@ -1952,8 +1822,6 @@ var chutesModels = {
|
|
|
1952
1822
|
contextWindow: 163840,
|
|
1953
1823
|
supportsImages: false,
|
|
1954
1824
|
supportsPromptCache: false,
|
|
1955
|
-
supportsNativeTools: true,
|
|
1956
|
-
defaultToolProtocol: "native",
|
|
1957
1825
|
inputPrice: 0.23,
|
|
1958
1826
|
outputPrice: 0.9,
|
|
1959
1827
|
description: "DeepSeek\u2011V3.1\u2011Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix\u2011ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance."
|
|
@@ -1963,8 +1831,6 @@ var chutesModels = {
|
|
|
1963
1831
|
contextWindow: 163840,
|
|
1964
1832
|
supportsImages: false,
|
|
1965
1833
|
supportsPromptCache: false,
|
|
1966
|
-
supportsNativeTools: true,
|
|
1967
|
-
defaultToolProtocol: "native",
|
|
1968
1834
|
inputPrice: 1,
|
|
1969
1835
|
outputPrice: 3,
|
|
1970
1836
|
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2\xD7 quota per request and not intended for bulk workloads."
|
|
@@ -1974,8 +1840,6 @@ var chutesModels = {
|
|
|
1974
1840
|
contextWindow: 163840,
|
|
1975
1841
|
supportsImages: false,
|
|
1976
1842
|
supportsPromptCache: false,
|
|
1977
|
-
supportsNativeTools: true,
|
|
1978
|
-
defaultToolProtocol: "native",
|
|
1979
1843
|
inputPrice: 0.25,
|
|
1980
1844
|
outputPrice: 0.35,
|
|
1981
1845
|
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long\u2011context training and inference efficiency while maintaining performance comparable to V3.1\u2011Terminus."
|
|
@@ -1987,8 +1851,6 @@ var chutesModels = {
|
|
|
1987
1851
|
// From Groq
|
|
1988
1852
|
supportsImages: false,
|
|
1989
1853
|
supportsPromptCache: false,
|
|
1990
|
-
supportsNativeTools: true,
|
|
1991
|
-
defaultToolProtocol: "native",
|
|
1992
1854
|
inputPrice: 0,
|
|
1993
1855
|
outputPrice: 0,
|
|
1994
1856
|
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
@@ -1998,8 +1860,6 @@ var chutesModels = {
|
|
|
1998
1860
|
contextWindow: 512e3,
|
|
1999
1861
|
supportsImages: false,
|
|
2000
1862
|
supportsPromptCache: false,
|
|
2001
|
-
supportsNativeTools: true,
|
|
2002
|
-
defaultToolProtocol: "native",
|
|
2003
1863
|
inputPrice: 0,
|
|
2004
1864
|
outputPrice: 0,
|
|
2005
1865
|
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
@@ -2009,8 +1869,6 @@ var chutesModels = {
|
|
|
2009
1869
|
contextWindow: 128e3,
|
|
2010
1870
|
supportsImages: false,
|
|
2011
1871
|
supportsPromptCache: false,
|
|
2012
|
-
supportsNativeTools: true,
|
|
2013
|
-
defaultToolProtocol: "native",
|
|
2014
1872
|
inputPrice: 0,
|
|
2015
1873
|
outputPrice: 0,
|
|
2016
1874
|
description: "Unsloth Mistral Nemo Instruct model."
|
|
@@ -2020,8 +1878,6 @@ var chutesModels = {
|
|
|
2020
1878
|
contextWindow: 131072,
|
|
2021
1879
|
supportsImages: false,
|
|
2022
1880
|
supportsPromptCache: false,
|
|
2023
|
-
supportsNativeTools: true,
|
|
2024
|
-
defaultToolProtocol: "native",
|
|
2025
1881
|
inputPrice: 0,
|
|
2026
1882
|
outputPrice: 0,
|
|
2027
1883
|
description: "Unsloth Gemma 3 12B IT model."
|
|
@@ -2031,8 +1887,6 @@ var chutesModels = {
|
|
|
2031
1887
|
contextWindow: 131072,
|
|
2032
1888
|
supportsImages: false,
|
|
2033
1889
|
supportsPromptCache: false,
|
|
2034
|
-
supportsNativeTools: true,
|
|
2035
|
-
defaultToolProtocol: "native",
|
|
2036
1890
|
inputPrice: 0,
|
|
2037
1891
|
outputPrice: 0,
|
|
2038
1892
|
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
@@ -2042,8 +1896,6 @@ var chutesModels = {
|
|
|
2042
1896
|
contextWindow: 131072,
|
|
2043
1897
|
supportsImages: false,
|
|
2044
1898
|
supportsPromptCache: false,
|
|
2045
|
-
supportsNativeTools: true,
|
|
2046
|
-
defaultToolProtocol: "native",
|
|
2047
1899
|
inputPrice: 0,
|
|
2048
1900
|
outputPrice: 0,
|
|
2049
1901
|
description: "Unsloth Gemma 3 4B IT model."
|
|
@@ -2053,8 +1905,6 @@ var chutesModels = {
|
|
|
2053
1905
|
contextWindow: 131072,
|
|
2054
1906
|
supportsImages: false,
|
|
2055
1907
|
supportsPromptCache: false,
|
|
2056
|
-
supportsNativeTools: true,
|
|
2057
|
-
defaultToolProtocol: "native",
|
|
2058
1908
|
inputPrice: 0,
|
|
2059
1909
|
outputPrice: 0,
|
|
2060
1910
|
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
@@ -2064,8 +1914,6 @@ var chutesModels = {
|
|
|
2064
1914
|
contextWindow: 131072,
|
|
2065
1915
|
supportsImages: false,
|
|
2066
1916
|
supportsPromptCache: false,
|
|
2067
|
-
supportsNativeTools: true,
|
|
2068
|
-
defaultToolProtocol: "native",
|
|
2069
1917
|
inputPrice: 0,
|
|
2070
1918
|
outputPrice: 0,
|
|
2071
1919
|
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
@@ -2075,8 +1923,6 @@ var chutesModels = {
|
|
|
2075
1923
|
contextWindow: 256e3,
|
|
2076
1924
|
supportsImages: false,
|
|
2077
1925
|
supportsPromptCache: false,
|
|
2078
|
-
supportsNativeTools: true,
|
|
2079
|
-
defaultToolProtocol: "native",
|
|
2080
1926
|
inputPrice: 0,
|
|
2081
1927
|
outputPrice: 0,
|
|
2082
1928
|
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
@@ -2086,8 +1932,6 @@ var chutesModels = {
|
|
|
2086
1932
|
contextWindow: 163840,
|
|
2087
1933
|
supportsImages: false,
|
|
2088
1934
|
supportsPromptCache: false,
|
|
2089
|
-
supportsNativeTools: true,
|
|
2090
|
-
defaultToolProtocol: "native",
|
|
2091
1935
|
inputPrice: 0,
|
|
2092
1936
|
outputPrice: 0,
|
|
2093
1937
|
description: "DeepSeek V3 Base model."
|
|
@@ -2097,8 +1941,6 @@ var chutesModels = {
|
|
|
2097
1941
|
contextWindow: 163840,
|
|
2098
1942
|
supportsImages: false,
|
|
2099
1943
|
supportsPromptCache: false,
|
|
2100
|
-
supportsNativeTools: true,
|
|
2101
|
-
defaultToolProtocol: "native",
|
|
2102
1944
|
inputPrice: 0,
|
|
2103
1945
|
outputPrice: 0,
|
|
2104
1946
|
description: "DeepSeek R1 Zero model."
|
|
@@ -2108,8 +1950,6 @@ var chutesModels = {
|
|
|
2108
1950
|
contextWindow: 163840,
|
|
2109
1951
|
supportsImages: false,
|
|
2110
1952
|
supportsPromptCache: false,
|
|
2111
|
-
supportsNativeTools: true,
|
|
2112
|
-
defaultToolProtocol: "native",
|
|
2113
1953
|
inputPrice: 0,
|
|
2114
1954
|
outputPrice: 0,
|
|
2115
1955
|
description: "DeepSeek V3 (0324) model."
|
|
@@ -2119,8 +1959,6 @@ var chutesModels = {
|
|
|
2119
1959
|
contextWindow: 262144,
|
|
2120
1960
|
supportsImages: false,
|
|
2121
1961
|
supportsPromptCache: false,
|
|
2122
|
-
supportsNativeTools: true,
|
|
2123
|
-
defaultToolProtocol: "native",
|
|
2124
1962
|
inputPrice: 0,
|
|
2125
1963
|
outputPrice: 0,
|
|
2126
1964
|
description: "Qwen3 235B A22B Instruct 2507 model with 262K context window."
|
|
@@ -2130,8 +1968,6 @@ var chutesModels = {
|
|
|
2130
1968
|
contextWindow: 40960,
|
|
2131
1969
|
supportsImages: false,
|
|
2132
1970
|
supportsPromptCache: false,
|
|
2133
|
-
supportsNativeTools: true,
|
|
2134
|
-
defaultToolProtocol: "native",
|
|
2135
1971
|
inputPrice: 0,
|
|
2136
1972
|
outputPrice: 0,
|
|
2137
1973
|
description: "Qwen3 235B A22B model."
|
|
@@ -2141,8 +1977,6 @@ var chutesModels = {
|
|
|
2141
1977
|
contextWindow: 40960,
|
|
2142
1978
|
supportsImages: false,
|
|
2143
1979
|
supportsPromptCache: false,
|
|
2144
|
-
supportsNativeTools: true,
|
|
2145
|
-
defaultToolProtocol: "native",
|
|
2146
1980
|
inputPrice: 0,
|
|
2147
1981
|
outputPrice: 0,
|
|
2148
1982
|
description: "Qwen3 32B model."
|
|
@@ -2152,8 +1986,6 @@ var chutesModels = {
|
|
|
2152
1986
|
contextWindow: 40960,
|
|
2153
1987
|
supportsImages: false,
|
|
2154
1988
|
supportsPromptCache: false,
|
|
2155
|
-
supportsNativeTools: true,
|
|
2156
|
-
defaultToolProtocol: "native",
|
|
2157
1989
|
inputPrice: 0,
|
|
2158
1990
|
outputPrice: 0,
|
|
2159
1991
|
description: "Qwen3 30B A3B model."
|
|
@@ -2163,8 +1995,6 @@ var chutesModels = {
|
|
|
2163
1995
|
contextWindow: 40960,
|
|
2164
1996
|
supportsImages: false,
|
|
2165
1997
|
supportsPromptCache: false,
|
|
2166
|
-
supportsNativeTools: true,
|
|
2167
|
-
defaultToolProtocol: "native",
|
|
2168
1998
|
inputPrice: 0,
|
|
2169
1999
|
outputPrice: 0,
|
|
2170
2000
|
description: "Qwen3 14B model."
|
|
@@ -2174,8 +2004,6 @@ var chutesModels = {
|
|
|
2174
2004
|
contextWindow: 40960,
|
|
2175
2005
|
supportsImages: false,
|
|
2176
2006
|
supportsPromptCache: false,
|
|
2177
|
-
supportsNativeTools: true,
|
|
2178
|
-
defaultToolProtocol: "native",
|
|
2179
2007
|
inputPrice: 0,
|
|
2180
2008
|
outputPrice: 0,
|
|
2181
2009
|
description: "Qwen3 8B model."
|
|
@@ -2185,8 +2013,6 @@ var chutesModels = {
|
|
|
2185
2013
|
contextWindow: 163840,
|
|
2186
2014
|
supportsImages: false,
|
|
2187
2015
|
supportsPromptCache: false,
|
|
2188
|
-
supportsNativeTools: true,
|
|
2189
|
-
defaultToolProtocol: "native",
|
|
2190
2016
|
inputPrice: 0,
|
|
2191
2017
|
outputPrice: 0,
|
|
2192
2018
|
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
@@ -2196,8 +2022,6 @@ var chutesModels = {
|
|
|
2196
2022
|
contextWindow: 163840,
|
|
2197
2023
|
supportsImages: false,
|
|
2198
2024
|
supportsPromptCache: false,
|
|
2199
|
-
supportsNativeTools: true,
|
|
2200
|
-
defaultToolProtocol: "native",
|
|
2201
2025
|
inputPrice: 0,
|
|
2202
2026
|
outputPrice: 0,
|
|
2203
2027
|
description: "TNGTech DeepSeek R1T Chimera model."
|
|
@@ -2207,8 +2031,6 @@ var chutesModels = {
|
|
|
2207
2031
|
contextWindow: 151329,
|
|
2208
2032
|
supportsImages: false,
|
|
2209
2033
|
supportsPromptCache: false,
|
|
2210
|
-
supportsNativeTools: true,
|
|
2211
|
-
defaultToolProtocol: "native",
|
|
2212
2034
|
inputPrice: 0,
|
|
2213
2035
|
outputPrice: 0,
|
|
2214
2036
|
description: "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated."
|
|
@@ -2218,8 +2040,6 @@ var chutesModels = {
|
|
|
2218
2040
|
contextWindow: 131072,
|
|
2219
2041
|
supportsImages: false,
|
|
2220
2042
|
supportsPromptCache: false,
|
|
2221
|
-
supportsNativeTools: true,
|
|
2222
|
-
defaultToolProtocol: "native",
|
|
2223
2043
|
inputPrice: 0,
|
|
2224
2044
|
outputPrice: 0,
|
|
2225
2045
|
description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
|
|
@@ -2229,8 +2049,6 @@ var chutesModels = {
|
|
|
2229
2049
|
contextWindow: 131072,
|
|
2230
2050
|
supportsImages: false,
|
|
2231
2051
|
supportsPromptCache: false,
|
|
2232
|
-
supportsNativeTools: true,
|
|
2233
|
-
defaultToolProtocol: "native",
|
|
2234
2052
|
inputPrice: 1,
|
|
2235
2053
|
outputPrice: 3,
|
|
2236
2054
|
description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
|
|
@@ -2240,8 +2058,6 @@ var chutesModels = {
|
|
|
2240
2058
|
contextWindow: 202752,
|
|
2241
2059
|
supportsImages: false,
|
|
2242
2060
|
supportsPromptCache: false,
|
|
2243
|
-
supportsNativeTools: true,
|
|
2244
|
-
defaultToolProtocol: "native",
|
|
2245
2061
|
inputPrice: 0,
|
|
2246
2062
|
outputPrice: 0,
|
|
2247
2063
|
description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios."
|
|
@@ -2252,8 +2068,6 @@ var chutesModels = {
|
|
|
2252
2068
|
contextWindow: 202752,
|
|
2253
2069
|
supportsImages: false,
|
|
2254
2070
|
supportsPromptCache: false,
|
|
2255
|
-
supportsNativeTools: true,
|
|
2256
|
-
defaultToolProtocol: "native",
|
|
2257
2071
|
inputPrice: 1.15,
|
|
2258
2072
|
outputPrice: 3.25,
|
|
2259
2073
|
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference."
|
|
@@ -2263,8 +2077,6 @@ var chutesModels = {
|
|
|
2263
2077
|
contextWindow: 128e3,
|
|
2264
2078
|
supportsImages: false,
|
|
2265
2079
|
supportsPromptCache: false,
|
|
2266
|
-
supportsNativeTools: true,
|
|
2267
|
-
defaultToolProtocol: "native",
|
|
2268
2080
|
inputPrice: 0,
|
|
2269
2081
|
outputPrice: 0,
|
|
2270
2082
|
description: "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks."
|
|
@@ -2274,8 +2086,6 @@ var chutesModels = {
|
|
|
2274
2086
|
contextWindow: 262144,
|
|
2275
2087
|
supportsImages: false,
|
|
2276
2088
|
supportsPromptCache: false,
|
|
2277
|
-
supportsNativeTools: true,
|
|
2278
|
-
defaultToolProtocol: "native",
|
|
2279
2089
|
inputPrice: 0,
|
|
2280
2090
|
outputPrice: 0,
|
|
2281
2091
|
description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks."
|
|
@@ -2285,8 +2095,6 @@ var chutesModels = {
|
|
|
2285
2095
|
contextWindow: 75e3,
|
|
2286
2096
|
supportsImages: false,
|
|
2287
2097
|
supportsPromptCache: false,
|
|
2288
|
-
supportsNativeTools: true,
|
|
2289
|
-
defaultToolProtocol: "native",
|
|
2290
2098
|
inputPrice: 0.1481,
|
|
2291
2099
|
outputPrice: 0.5926,
|
|
2292
2100
|
description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
|
|
@@ -2296,8 +2104,6 @@ var chutesModels = {
|
|
|
2296
2104
|
contextWindow: 262144,
|
|
2297
2105
|
supportsImages: false,
|
|
2298
2106
|
supportsPromptCache: false,
|
|
2299
|
-
supportsNativeTools: true,
|
|
2300
|
-
defaultToolProtocol: "native",
|
|
2301
2107
|
inputPrice: 0.1999,
|
|
2302
2108
|
outputPrice: 0.8001,
|
|
2303
2109
|
description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
|
|
@@ -2307,8 +2113,6 @@ var chutesModels = {
|
|
|
2307
2113
|
contextWindow: 262144,
|
|
2308
2114
|
supportsImages: false,
|
|
2309
2115
|
supportsPromptCache: false,
|
|
2310
|
-
supportsNativeTools: true,
|
|
2311
|
-
defaultToolProtocol: "native",
|
|
2312
2116
|
inputPrice: 0.077968332,
|
|
2313
2117
|
outputPrice: 0.31202496,
|
|
2314
2118
|
description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
|
|
@@ -2318,8 +2122,6 @@ var chutesModels = {
|
|
|
2318
2122
|
contextWindow: 131072,
|
|
2319
2123
|
supportsImages: false,
|
|
2320
2124
|
supportsPromptCache: false,
|
|
2321
|
-
supportsNativeTools: true,
|
|
2322
|
-
defaultToolProtocol: "native",
|
|
2323
2125
|
inputPrice: 0,
|
|
2324
2126
|
outputPrice: 0,
|
|
2325
2127
|
description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
|
|
@@ -2329,8 +2131,6 @@ var chutesModels = {
|
|
|
2329
2131
|
contextWindow: 131072,
|
|
2330
2132
|
supportsImages: false,
|
|
2331
2133
|
supportsPromptCache: false,
|
|
2332
|
-
supportsNativeTools: true,
|
|
2333
|
-
defaultToolProtocol: "native",
|
|
2334
2134
|
inputPrice: 0,
|
|
2335
2135
|
outputPrice: 0,
|
|
2336
2136
|
description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
|
|
@@ -2340,8 +2140,6 @@ var chutesModels = {
|
|
|
2340
2140
|
contextWindow: 262144,
|
|
2341
2141
|
supportsImages: true,
|
|
2342
2142
|
supportsPromptCache: false,
|
|
2343
|
-
supportsNativeTools: true,
|
|
2344
|
-
defaultToolProtocol: "native",
|
|
2345
2143
|
inputPrice: 0.16,
|
|
2346
2144
|
outputPrice: 0.65,
|
|
2347
2145
|
description: "Qwen3\u2011VL\u2011235B\u2011A22B\u2011Thinking is an open\u2011weight MoE vision\u2011language model (235B total, ~22B activated) optimized for deliberate multi\u2011step reasoning with strong text\u2011image\u2011video understanding and long\u2011context capabilities."
|
|
@@ -2349,73 +2147,6 @@ var chutesModels = {
|
|
|
2349
2147
|
};
|
|
2350
2148
|
var chutesDefaultModelInfo = chutesModels[chutesDefaultModelId];
|
|
2351
2149
|
|
|
2352
|
-
// src/providers/claude-code.ts
|
|
2353
|
-
var DATE_SUFFIX_PATTERN = /-\d{8}$/;
|
|
2354
|
-
var claudeCodeModels = {
|
|
2355
|
-
"claude-haiku-4-5": {
|
|
2356
|
-
maxTokens: 32768,
|
|
2357
|
-
contextWindow: 2e5,
|
|
2358
|
-
supportsImages: true,
|
|
2359
|
-
supportsPromptCache: true,
|
|
2360
|
-
supportsNativeTools: true,
|
|
2361
|
-
defaultToolProtocol: "native",
|
|
2362
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2363
|
-
reasoningEffort: "medium",
|
|
2364
|
-
description: "Claude Haiku 4.5 - Fast and efficient with thinking"
|
|
2365
|
-
},
|
|
2366
|
-
"claude-sonnet-4-5": {
|
|
2367
|
-
maxTokens: 32768,
|
|
2368
|
-
contextWindow: 2e5,
|
|
2369
|
-
supportsImages: true,
|
|
2370
|
-
supportsPromptCache: true,
|
|
2371
|
-
supportsNativeTools: true,
|
|
2372
|
-
defaultToolProtocol: "native",
|
|
2373
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2374
|
-
reasoningEffort: "medium",
|
|
2375
|
-
description: "Claude Sonnet 4.5 - Balanced performance with thinking"
|
|
2376
|
-
},
|
|
2377
|
-
"claude-opus-4-5": {
|
|
2378
|
-
maxTokens: 32768,
|
|
2379
|
-
contextWindow: 2e5,
|
|
2380
|
-
supportsImages: true,
|
|
2381
|
-
supportsPromptCache: true,
|
|
2382
|
-
supportsNativeTools: true,
|
|
2383
|
-
defaultToolProtocol: "native",
|
|
2384
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2385
|
-
reasoningEffort: "medium",
|
|
2386
|
-
description: "Claude Opus 4.5 - Most capable with thinking"
|
|
2387
|
-
}
|
|
2388
|
-
};
|
|
2389
|
-
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2390
|
-
var MODEL_FAMILY_PATTERNS = [
|
|
2391
|
-
// Opus models (any version) → claude-opus-4-5
|
|
2392
|
-
{ pattern: /opus/i, target: "claude-opus-4-5" },
|
|
2393
|
-
// Haiku models (any version) → claude-haiku-4-5
|
|
2394
|
-
{ pattern: /haiku/i, target: "claude-haiku-4-5" },
|
|
2395
|
-
// Sonnet models (any version) → claude-sonnet-4-5
|
|
2396
|
-
{ pattern: /sonnet/i, target: "claude-sonnet-4-5" }
|
|
2397
|
-
];
|
|
2398
|
-
function normalizeClaudeCodeModelId(modelId) {
|
|
2399
|
-
if (Object.hasOwn(claudeCodeModels, modelId)) {
|
|
2400
|
-
return modelId;
|
|
2401
|
-
}
|
|
2402
|
-
const withoutDate = modelId.replace(DATE_SUFFIX_PATTERN, "");
|
|
2403
|
-
if (Object.hasOwn(claudeCodeModels, withoutDate)) {
|
|
2404
|
-
return withoutDate;
|
|
2405
|
-
}
|
|
2406
|
-
for (const { pattern, target } of MODEL_FAMILY_PATTERNS) {
|
|
2407
|
-
if (pattern.test(modelId)) {
|
|
2408
|
-
return target;
|
|
2409
|
-
}
|
|
2410
|
-
}
|
|
2411
|
-
return claudeCodeDefaultModelId;
|
|
2412
|
-
}
|
|
2413
|
-
var claudeCodeReasoningConfig = {
|
|
2414
|
-
low: { budgetTokens: 16e3 },
|
|
2415
|
-
medium: { budgetTokens: 32e3 },
|
|
2416
|
-
high: { budgetTokens: 64e3 }
|
|
2417
|
-
};
|
|
2418
|
-
|
|
2419
2150
|
// src/providers/deepseek.ts
|
|
2420
2151
|
var deepSeekDefaultModelId = "deepseek-chat";
|
|
2421
2152
|
var deepSeekModels = {
|
|
@@ -2425,8 +2156,6 @@ var deepSeekModels = {
|
|
|
2425
2156
|
contextWindow: 128e3,
|
|
2426
2157
|
supportsImages: false,
|
|
2427
2158
|
supportsPromptCache: true,
|
|
2428
|
-
supportsNativeTools: true,
|
|
2429
|
-
defaultToolProtocol: "native",
|
|
2430
2159
|
inputPrice: 0.28,
|
|
2431
2160
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
2432
2161
|
outputPrice: 0.42,
|
|
@@ -2443,8 +2172,6 @@ var deepSeekModels = {
|
|
|
2443
2172
|
contextWindow: 128e3,
|
|
2444
2173
|
supportsImages: false,
|
|
2445
2174
|
supportsPromptCache: true,
|
|
2446
|
-
supportsNativeTools: true,
|
|
2447
|
-
defaultToolProtocol: "native",
|
|
2448
2175
|
preserveReasoning: true,
|
|
2449
2176
|
inputPrice: 0.28,
|
|
2450
2177
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
@@ -2467,8 +2194,6 @@ var doubaoModels = {
|
|
|
2467
2194
|
contextWindow: 128e3,
|
|
2468
2195
|
supportsImages: true,
|
|
2469
2196
|
supportsPromptCache: true,
|
|
2470
|
-
supportsNativeTools: true,
|
|
2471
|
-
defaultToolProtocol: "native",
|
|
2472
2197
|
inputPrice: 1e-4,
|
|
2473
2198
|
// $0.0001 per million tokens (cache miss)
|
|
2474
2199
|
outputPrice: 4e-4,
|
|
@@ -2484,8 +2209,6 @@ var doubaoModels = {
|
|
|
2484
2209
|
contextWindow: 128e3,
|
|
2485
2210
|
supportsImages: true,
|
|
2486
2211
|
supportsPromptCache: true,
|
|
2487
|
-
supportsNativeTools: true,
|
|
2488
|
-
defaultToolProtocol: "native",
|
|
2489
2212
|
inputPrice: 2e-4,
|
|
2490
2213
|
// $0.0002 per million tokens
|
|
2491
2214
|
outputPrice: 8e-4,
|
|
@@ -2501,8 +2224,6 @@ var doubaoModels = {
|
|
|
2501
2224
|
contextWindow: 128e3,
|
|
2502
2225
|
supportsImages: true,
|
|
2503
2226
|
supportsPromptCache: true,
|
|
2504
|
-
supportsNativeTools: true,
|
|
2505
|
-
defaultToolProtocol: "native",
|
|
2506
2227
|
inputPrice: 15e-5,
|
|
2507
2228
|
// $0.00015 per million tokens
|
|
2508
2229
|
outputPrice: 6e-4,
|
|
@@ -2525,7 +2246,6 @@ var featherlessModels = {
|
|
|
2525
2246
|
contextWindow: 32678,
|
|
2526
2247
|
supportsImages: false,
|
|
2527
2248
|
supportsPromptCache: false,
|
|
2528
|
-
supportsNativeTools: true,
|
|
2529
2249
|
inputPrice: 0,
|
|
2530
2250
|
outputPrice: 0,
|
|
2531
2251
|
description: "DeepSeek V3 0324 model."
|
|
@@ -2535,7 +2255,6 @@ var featherlessModels = {
|
|
|
2535
2255
|
contextWindow: 32678,
|
|
2536
2256
|
supportsImages: false,
|
|
2537
2257
|
supportsPromptCache: false,
|
|
2538
|
-
supportsNativeTools: true,
|
|
2539
2258
|
inputPrice: 0,
|
|
2540
2259
|
outputPrice: 0,
|
|
2541
2260
|
description: "DeepSeek R1 0528 model."
|
|
@@ -2545,7 +2264,6 @@ var featherlessModels = {
|
|
|
2545
2264
|
contextWindow: 32678,
|
|
2546
2265
|
supportsImages: false,
|
|
2547
2266
|
supportsPromptCache: false,
|
|
2548
|
-
supportsNativeTools: true,
|
|
2549
2267
|
inputPrice: 0,
|
|
2550
2268
|
outputPrice: 0,
|
|
2551
2269
|
description: "Kimi K2 Instruct model."
|
|
@@ -2555,7 +2273,6 @@ var featherlessModels = {
|
|
|
2555
2273
|
contextWindow: 32678,
|
|
2556
2274
|
supportsImages: false,
|
|
2557
2275
|
supportsPromptCache: false,
|
|
2558
|
-
supportsNativeTools: true,
|
|
2559
2276
|
inputPrice: 0,
|
|
2560
2277
|
outputPrice: 0,
|
|
2561
2278
|
description: "GPT-OSS 120B model."
|
|
@@ -2565,7 +2282,6 @@ var featherlessModels = {
|
|
|
2565
2282
|
contextWindow: 32678,
|
|
2566
2283
|
supportsImages: false,
|
|
2567
2284
|
supportsPromptCache: false,
|
|
2568
|
-
supportsNativeTools: true,
|
|
2569
2285
|
inputPrice: 0,
|
|
2570
2286
|
outputPrice: 0,
|
|
2571
2287
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -2581,8 +2297,6 @@ var fireworksModels = {
|
|
|
2581
2297
|
contextWindow: 262144,
|
|
2582
2298
|
supportsImages: false,
|
|
2583
2299
|
supportsPromptCache: true,
|
|
2584
|
-
supportsNativeTools: true,
|
|
2585
|
-
defaultToolProtocol: "native",
|
|
2586
2300
|
inputPrice: 0.6,
|
|
2587
2301
|
outputPrice: 2.5,
|
|
2588
2302
|
cacheReadsPrice: 0.15,
|
|
@@ -2593,8 +2307,6 @@ var fireworksModels = {
|
|
|
2593
2307
|
contextWindow: 128e3,
|
|
2594
2308
|
supportsImages: false,
|
|
2595
2309
|
supportsPromptCache: false,
|
|
2596
|
-
supportsNativeTools: true,
|
|
2597
|
-
defaultToolProtocol: "native",
|
|
2598
2310
|
inputPrice: 0.6,
|
|
2599
2311
|
outputPrice: 2.5,
|
|
2600
2312
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2604,7 +2316,6 @@ var fireworksModels = {
|
|
|
2604
2316
|
contextWindow: 256e3,
|
|
2605
2317
|
supportsImages: false,
|
|
2606
2318
|
supportsPromptCache: true,
|
|
2607
|
-
supportsNativeTools: true,
|
|
2608
2319
|
supportsTemperature: true,
|
|
2609
2320
|
preserveReasoning: true,
|
|
2610
2321
|
defaultTemperature: 1,
|
|
@@ -2618,8 +2329,6 @@ var fireworksModels = {
|
|
|
2618
2329
|
contextWindow: 204800,
|
|
2619
2330
|
supportsImages: false,
|
|
2620
2331
|
supportsPromptCache: false,
|
|
2621
|
-
supportsNativeTools: true,
|
|
2622
|
-
defaultToolProtocol: "native",
|
|
2623
2332
|
inputPrice: 0.3,
|
|
2624
2333
|
outputPrice: 1.2,
|
|
2625
2334
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2629,8 +2338,6 @@ var fireworksModels = {
|
|
|
2629
2338
|
contextWindow: 256e3,
|
|
2630
2339
|
supportsImages: false,
|
|
2631
2340
|
supportsPromptCache: false,
|
|
2632
|
-
supportsNativeTools: true,
|
|
2633
|
-
defaultToolProtocol: "native",
|
|
2634
2341
|
inputPrice: 0.22,
|
|
2635
2342
|
outputPrice: 0.88,
|
|
2636
2343
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2640,8 +2347,6 @@ var fireworksModels = {
|
|
|
2640
2347
|
contextWindow: 256e3,
|
|
2641
2348
|
supportsImages: false,
|
|
2642
2349
|
supportsPromptCache: false,
|
|
2643
|
-
supportsNativeTools: true,
|
|
2644
|
-
defaultToolProtocol: "native",
|
|
2645
2350
|
inputPrice: 0.45,
|
|
2646
2351
|
outputPrice: 1.8,
|
|
2647
2352
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2651,8 +2356,6 @@ var fireworksModels = {
|
|
|
2651
2356
|
contextWindow: 16e4,
|
|
2652
2357
|
supportsImages: false,
|
|
2653
2358
|
supportsPromptCache: false,
|
|
2654
|
-
supportsNativeTools: true,
|
|
2655
|
-
defaultToolProtocol: "native",
|
|
2656
2359
|
inputPrice: 3,
|
|
2657
2360
|
outputPrice: 8,
|
|
2658
2361
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2662,8 +2365,6 @@ var fireworksModels = {
|
|
|
2662
2365
|
contextWindow: 128e3,
|
|
2663
2366
|
supportsImages: false,
|
|
2664
2367
|
supportsPromptCache: false,
|
|
2665
|
-
supportsNativeTools: true,
|
|
2666
|
-
defaultToolProtocol: "native",
|
|
2667
2368
|
inputPrice: 0.9,
|
|
2668
2369
|
outputPrice: 0.9,
|
|
2669
2370
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2673,8 +2374,6 @@ var fireworksModels = {
|
|
|
2673
2374
|
contextWindow: 163840,
|
|
2674
2375
|
supportsImages: false,
|
|
2675
2376
|
supportsPromptCache: false,
|
|
2676
|
-
supportsNativeTools: true,
|
|
2677
|
-
defaultToolProtocol: "native",
|
|
2678
2377
|
inputPrice: 0.56,
|
|
2679
2378
|
outputPrice: 1.68,
|
|
2680
2379
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2684,8 +2383,6 @@ var fireworksModels = {
|
|
|
2684
2383
|
contextWindow: 128e3,
|
|
2685
2384
|
supportsImages: false,
|
|
2686
2385
|
supportsPromptCache: false,
|
|
2687
|
-
supportsNativeTools: true,
|
|
2688
|
-
defaultToolProtocol: "native",
|
|
2689
2386
|
inputPrice: 0.55,
|
|
2690
2387
|
outputPrice: 2.19,
|
|
2691
2388
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2695,8 +2392,6 @@ var fireworksModels = {
|
|
|
2695
2392
|
contextWindow: 128e3,
|
|
2696
2393
|
supportsImages: false,
|
|
2697
2394
|
supportsPromptCache: false,
|
|
2698
|
-
supportsNativeTools: true,
|
|
2699
|
-
defaultToolProtocol: "native",
|
|
2700
2395
|
inputPrice: 0.55,
|
|
2701
2396
|
outputPrice: 2.19,
|
|
2702
2397
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2706,8 +2401,6 @@ var fireworksModels = {
|
|
|
2706
2401
|
contextWindow: 198e3,
|
|
2707
2402
|
supportsImages: false,
|
|
2708
2403
|
supportsPromptCache: false,
|
|
2709
|
-
supportsNativeTools: true,
|
|
2710
|
-
defaultToolProtocol: "native",
|
|
2711
2404
|
inputPrice: 0.55,
|
|
2712
2405
|
outputPrice: 2.19,
|
|
2713
2406
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2717,8 +2410,6 @@ var fireworksModels = {
|
|
|
2717
2410
|
contextWindow: 128e3,
|
|
2718
2411
|
supportsImages: false,
|
|
2719
2412
|
supportsPromptCache: false,
|
|
2720
|
-
supportsNativeTools: true,
|
|
2721
|
-
defaultToolProtocol: "native",
|
|
2722
2413
|
inputPrice: 0.07,
|
|
2723
2414
|
outputPrice: 0.3,
|
|
2724
2415
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2728,11 +2419,63 @@ var fireworksModels = {
|
|
|
2728
2419
|
contextWindow: 128e3,
|
|
2729
2420
|
supportsImages: false,
|
|
2730
2421
|
supportsPromptCache: false,
|
|
2731
|
-
supportsNativeTools: true,
|
|
2732
|
-
defaultToolProtocol: "native",
|
|
2733
2422
|
inputPrice: 0.15,
|
|
2734
2423
|
outputPrice: 0.6,
|
|
2735
2424
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
2425
|
+
},
|
|
2426
|
+
"accounts/fireworks/models/minimax-m2p1": {
|
|
2427
|
+
maxTokens: 4096,
|
|
2428
|
+
contextWindow: 204800,
|
|
2429
|
+
supportsImages: false,
|
|
2430
|
+
supportsPromptCache: false,
|
|
2431
|
+
inputPrice: 0.3,
|
|
2432
|
+
outputPrice: 1.2,
|
|
2433
|
+
description: "MiniMax M2.1 is an upgraded version of M2 with improved performance on complex reasoning, coding, and long-context understanding tasks."
|
|
2434
|
+
},
|
|
2435
|
+
"accounts/fireworks/models/deepseek-v3p2": {
|
|
2436
|
+
maxTokens: 16384,
|
|
2437
|
+
contextWindow: 163840,
|
|
2438
|
+
supportsImages: false,
|
|
2439
|
+
supportsPromptCache: false,
|
|
2440
|
+
inputPrice: 0.56,
|
|
2441
|
+
outputPrice: 1.68,
|
|
2442
|
+
description: "DeepSeek V3.2 is the latest iteration of the V3 model family with enhanced reasoning capabilities, improved code generation, and better instruction following."
|
|
2443
|
+
},
|
|
2444
|
+
"accounts/fireworks/models/glm-4p7": {
|
|
2445
|
+
maxTokens: 25344,
|
|
2446
|
+
contextWindow: 198e3,
|
|
2447
|
+
supportsImages: false,
|
|
2448
|
+
supportsPromptCache: false,
|
|
2449
|
+
inputPrice: 0.55,
|
|
2450
|
+
outputPrice: 2.19,
|
|
2451
|
+
description: "Z.ai GLM-4.7 is the latest coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality."
|
|
2452
|
+
},
|
|
2453
|
+
"accounts/fireworks/models/llama-v3p3-70b-instruct": {
|
|
2454
|
+
maxTokens: 16384,
|
|
2455
|
+
contextWindow: 131072,
|
|
2456
|
+
supportsImages: false,
|
|
2457
|
+
supportsPromptCache: false,
|
|
2458
|
+
inputPrice: 0.9,
|
|
2459
|
+
outputPrice: 0.9,
|
|
2460
|
+
description: "Meta Llama 3.3 70B Instruct is a highly capable instruction-tuned model with strong reasoning, coding, and general task performance."
|
|
2461
|
+
},
|
|
2462
|
+
"accounts/fireworks/models/llama4-maverick-instruct-basic": {
|
|
2463
|
+
maxTokens: 16384,
|
|
2464
|
+
contextWindow: 131072,
|
|
2465
|
+
supportsImages: true,
|
|
2466
|
+
supportsPromptCache: false,
|
|
2467
|
+
inputPrice: 0.22,
|
|
2468
|
+
outputPrice: 0.88,
|
|
2469
|
+
description: "Llama 4 Maverick is Meta's latest multimodal model with vision capabilities, optimized for instruction following and coding tasks."
|
|
2470
|
+
},
|
|
2471
|
+
"accounts/fireworks/models/llama4-scout-instruct-basic": {
|
|
2472
|
+
maxTokens: 16384,
|
|
2473
|
+
contextWindow: 131072,
|
|
2474
|
+
supportsImages: true,
|
|
2475
|
+
supportsPromptCache: false,
|
|
2476
|
+
inputPrice: 0.15,
|
|
2477
|
+
outputPrice: 0.6,
|
|
2478
|
+
description: "Llama 4 Scout is a smaller, faster variant of Llama 4 with multimodal capabilities, ideal for quick iterations and cost-effective deployments."
|
|
2736
2479
|
}
|
|
2737
2480
|
};
|
|
2738
2481
|
|
|
@@ -2743,8 +2486,6 @@ var geminiModels = {
|
|
|
2743
2486
|
maxTokens: 65536,
|
|
2744
2487
|
contextWindow: 1048576,
|
|
2745
2488
|
supportsImages: true,
|
|
2746
|
-
supportsNativeTools: true,
|
|
2747
|
-
defaultToolProtocol: "native",
|
|
2748
2489
|
supportsPromptCache: true,
|
|
2749
2490
|
supportsReasoningEffort: ["low", "high"],
|
|
2750
2491
|
reasoningEffort: "low",
|
|
@@ -2752,16 +2493,19 @@ var geminiModels = {
|
|
|
2752
2493
|
defaultTemperature: 1,
|
|
2753
2494
|
inputPrice: 4,
|
|
2754
2495
|
outputPrice: 18,
|
|
2496
|
+
cacheReadsPrice: 0.4,
|
|
2755
2497
|
tiers: [
|
|
2756
2498
|
{
|
|
2757
2499
|
contextWindow: 2e5,
|
|
2758
2500
|
inputPrice: 2,
|
|
2759
|
-
outputPrice: 12
|
|
2501
|
+
outputPrice: 12,
|
|
2502
|
+
cacheReadsPrice: 0.2
|
|
2760
2503
|
},
|
|
2761
2504
|
{
|
|
2762
2505
|
contextWindow: Infinity,
|
|
2763
2506
|
inputPrice: 4,
|
|
2764
|
-
outputPrice: 18
|
|
2507
|
+
outputPrice: 18,
|
|
2508
|
+
cacheReadsPrice: 0.4
|
|
2765
2509
|
}
|
|
2766
2510
|
]
|
|
2767
2511
|
},
|
|
@@ -2769,25 +2513,20 @@ var geminiModels = {
|
|
|
2769
2513
|
maxTokens: 65536,
|
|
2770
2514
|
contextWindow: 1048576,
|
|
2771
2515
|
supportsImages: true,
|
|
2772
|
-
supportsNativeTools: true,
|
|
2773
|
-
defaultToolProtocol: "native",
|
|
2774
2516
|
supportsPromptCache: true,
|
|
2775
2517
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2776
2518
|
reasoningEffort: "medium",
|
|
2777
2519
|
supportsTemperature: true,
|
|
2778
2520
|
defaultTemperature: 1,
|
|
2779
|
-
inputPrice: 0.
|
|
2780
|
-
outputPrice:
|
|
2781
|
-
cacheReadsPrice: 0.
|
|
2782
|
-
cacheWritesPrice: 1
|
|
2521
|
+
inputPrice: 0.5,
|
|
2522
|
+
outputPrice: 3,
|
|
2523
|
+
cacheReadsPrice: 0.05
|
|
2783
2524
|
},
|
|
2784
2525
|
// 2.5 Pro models
|
|
2785
2526
|
"gemini-2.5-pro": {
|
|
2786
2527
|
maxTokens: 64e3,
|
|
2787
2528
|
contextWindow: 1048576,
|
|
2788
2529
|
supportsImages: true,
|
|
2789
|
-
supportsNativeTools: true,
|
|
2790
|
-
defaultToolProtocol: "native",
|
|
2791
2530
|
supportsPromptCache: true,
|
|
2792
2531
|
inputPrice: 2.5,
|
|
2793
2532
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2816,8 +2555,6 @@ var geminiModels = {
|
|
|
2816
2555
|
maxTokens: 65535,
|
|
2817
2556
|
contextWindow: 1048576,
|
|
2818
2557
|
supportsImages: true,
|
|
2819
|
-
supportsNativeTools: true,
|
|
2820
|
-
defaultToolProtocol: "native",
|
|
2821
2558
|
supportsPromptCache: true,
|
|
2822
2559
|
inputPrice: 2.5,
|
|
2823
2560
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2845,8 +2582,6 @@ var geminiModels = {
|
|
|
2845
2582
|
maxTokens: 65535,
|
|
2846
2583
|
contextWindow: 1048576,
|
|
2847
2584
|
supportsImages: true,
|
|
2848
|
-
supportsNativeTools: true,
|
|
2849
|
-
defaultToolProtocol: "native",
|
|
2850
2585
|
supportsPromptCache: true,
|
|
2851
2586
|
inputPrice: 2.5,
|
|
2852
2587
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2872,8 +2607,6 @@ var geminiModels = {
|
|
|
2872
2607
|
maxTokens: 65535,
|
|
2873
2608
|
contextWindow: 1048576,
|
|
2874
2609
|
supportsImages: true,
|
|
2875
|
-
supportsNativeTools: true,
|
|
2876
|
-
defaultToolProtocol: "native",
|
|
2877
2610
|
supportsPromptCache: true,
|
|
2878
2611
|
inputPrice: 2.5,
|
|
2879
2612
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2902,8 +2635,6 @@ var geminiModels = {
|
|
|
2902
2635
|
maxTokens: 65536,
|
|
2903
2636
|
contextWindow: 1048576,
|
|
2904
2637
|
supportsImages: true,
|
|
2905
|
-
supportsNativeTools: true,
|
|
2906
|
-
defaultToolProtocol: "native",
|
|
2907
2638
|
supportsPromptCache: true,
|
|
2908
2639
|
inputPrice: 0.3,
|
|
2909
2640
|
outputPrice: 2.5,
|
|
@@ -2916,8 +2647,6 @@ var geminiModels = {
|
|
|
2916
2647
|
maxTokens: 65536,
|
|
2917
2648
|
contextWindow: 1048576,
|
|
2918
2649
|
supportsImages: true,
|
|
2919
|
-
supportsNativeTools: true,
|
|
2920
|
-
defaultToolProtocol: "native",
|
|
2921
2650
|
supportsPromptCache: true,
|
|
2922
2651
|
inputPrice: 0.3,
|
|
2923
2652
|
outputPrice: 2.5,
|
|
@@ -2930,8 +2659,6 @@ var geminiModels = {
|
|
|
2930
2659
|
maxTokens: 64e3,
|
|
2931
2660
|
contextWindow: 1048576,
|
|
2932
2661
|
supportsImages: true,
|
|
2933
|
-
supportsNativeTools: true,
|
|
2934
|
-
defaultToolProtocol: "native",
|
|
2935
2662
|
supportsPromptCache: true,
|
|
2936
2663
|
inputPrice: 0.3,
|
|
2937
2664
|
outputPrice: 2.5,
|
|
@@ -2945,8 +2672,6 @@ var geminiModels = {
|
|
|
2945
2672
|
maxTokens: 65536,
|
|
2946
2673
|
contextWindow: 1048576,
|
|
2947
2674
|
supportsImages: true,
|
|
2948
|
-
supportsNativeTools: true,
|
|
2949
|
-
defaultToolProtocol: "native",
|
|
2950
2675
|
supportsPromptCache: true,
|
|
2951
2676
|
inputPrice: 0.1,
|
|
2952
2677
|
outputPrice: 0.4,
|
|
@@ -2959,8 +2684,6 @@ var geminiModels = {
|
|
|
2959
2684
|
maxTokens: 65536,
|
|
2960
2685
|
contextWindow: 1048576,
|
|
2961
2686
|
supportsImages: true,
|
|
2962
|
-
supportsNativeTools: true,
|
|
2963
|
-
defaultToolProtocol: "native",
|
|
2964
2687
|
supportsPromptCache: true,
|
|
2965
2688
|
inputPrice: 0.1,
|
|
2966
2689
|
outputPrice: 0.4,
|
|
@@ -2980,8 +2703,6 @@ var groqModels = {
|
|
|
2980
2703
|
contextWindow: 131072,
|
|
2981
2704
|
supportsImages: false,
|
|
2982
2705
|
supportsPromptCache: false,
|
|
2983
|
-
supportsNativeTools: true,
|
|
2984
|
-
defaultToolProtocol: "native",
|
|
2985
2706
|
inputPrice: 0.05,
|
|
2986
2707
|
outputPrice: 0.08,
|
|
2987
2708
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2991,8 +2712,6 @@ var groqModels = {
|
|
|
2991
2712
|
contextWindow: 131072,
|
|
2992
2713
|
supportsImages: false,
|
|
2993
2714
|
supportsPromptCache: false,
|
|
2994
|
-
supportsNativeTools: true,
|
|
2995
|
-
defaultToolProtocol: "native",
|
|
2996
2715
|
inputPrice: 0.59,
|
|
2997
2716
|
outputPrice: 0.79,
|
|
2998
2717
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -3002,8 +2721,6 @@ var groqModels = {
|
|
|
3002
2721
|
contextWindow: 131072,
|
|
3003
2722
|
supportsImages: false,
|
|
3004
2723
|
supportsPromptCache: false,
|
|
3005
|
-
supportsNativeTools: true,
|
|
3006
|
-
defaultToolProtocol: "native",
|
|
3007
2724
|
inputPrice: 0.11,
|
|
3008
2725
|
outputPrice: 0.34,
|
|
3009
2726
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -3013,8 +2730,6 @@ var groqModels = {
|
|
|
3013
2730
|
contextWindow: 131072,
|
|
3014
2731
|
supportsImages: false,
|
|
3015
2732
|
supportsPromptCache: false,
|
|
3016
|
-
supportsNativeTools: true,
|
|
3017
|
-
defaultToolProtocol: "native",
|
|
3018
2733
|
inputPrice: 0.29,
|
|
3019
2734
|
outputPrice: 0.59,
|
|
3020
2735
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -3024,8 +2739,6 @@ var groqModels = {
|
|
|
3024
2739
|
contextWindow: 262144,
|
|
3025
2740
|
supportsImages: false,
|
|
3026
2741
|
supportsPromptCache: true,
|
|
3027
|
-
supportsNativeTools: true,
|
|
3028
|
-
defaultToolProtocol: "native",
|
|
3029
2742
|
inputPrice: 0.6,
|
|
3030
2743
|
outputPrice: 2.5,
|
|
3031
2744
|
cacheReadsPrice: 0.15,
|
|
@@ -3036,8 +2749,6 @@ var groqModels = {
|
|
|
3036
2749
|
contextWindow: 131072,
|
|
3037
2750
|
supportsImages: false,
|
|
3038
2751
|
supportsPromptCache: false,
|
|
3039
|
-
supportsNativeTools: true,
|
|
3040
|
-
defaultToolProtocol: "native",
|
|
3041
2752
|
inputPrice: 0.15,
|
|
3042
2753
|
outputPrice: 0.75,
|
|
3043
2754
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -3047,8 +2758,6 @@ var groqModels = {
|
|
|
3047
2758
|
contextWindow: 131072,
|
|
3048
2759
|
supportsImages: false,
|
|
3049
2760
|
supportsPromptCache: false,
|
|
3050
|
-
supportsNativeTools: true,
|
|
3051
|
-
defaultToolProtocol: "native",
|
|
3052
2761
|
inputPrice: 0.1,
|
|
3053
2762
|
outputPrice: 0.5,
|
|
3054
2763
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -3075,7 +2784,6 @@ var ioIntelligenceModels = {
|
|
|
3075
2784
|
contextWindow: 128e3,
|
|
3076
2785
|
supportsImages: false,
|
|
3077
2786
|
supportsPromptCache: false,
|
|
3078
|
-
supportsNativeTools: true,
|
|
3079
2787
|
description: "DeepSeek R1 reasoning model"
|
|
3080
2788
|
},
|
|
3081
2789
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -3083,7 +2791,6 @@ var ioIntelligenceModels = {
|
|
|
3083
2791
|
contextWindow: 43e4,
|
|
3084
2792
|
supportsImages: true,
|
|
3085
2793
|
supportsPromptCache: false,
|
|
3086
|
-
supportsNativeTools: true,
|
|
3087
2794
|
description: "Llama 4 Maverick 17B model"
|
|
3088
2795
|
},
|
|
3089
2796
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -3091,7 +2798,6 @@ var ioIntelligenceModels = {
|
|
|
3091
2798
|
contextWindow: 106e3,
|
|
3092
2799
|
supportsImages: false,
|
|
3093
2800
|
supportsPromptCache: false,
|
|
3094
|
-
supportsNativeTools: true,
|
|
3095
2801
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
3096
2802
|
},
|
|
3097
2803
|
"openai/gpt-oss-120b": {
|
|
@@ -3099,7 +2805,6 @@ var ioIntelligenceModels = {
|
|
|
3099
2805
|
contextWindow: 131072,
|
|
3100
2806
|
supportsImages: false,
|
|
3101
2807
|
supportsPromptCache: false,
|
|
3102
|
-
supportsNativeTools: true,
|
|
3103
2808
|
description: "OpenAI GPT-OSS 120B model"
|
|
3104
2809
|
}
|
|
3105
2810
|
};
|
|
@@ -3111,8 +2816,6 @@ var litellmDefaultModelInfo = {
|
|
|
3111
2816
|
contextWindow: 2e5,
|
|
3112
2817
|
supportsImages: true,
|
|
3113
2818
|
supportsPromptCache: true,
|
|
3114
|
-
supportsNativeTools: true,
|
|
3115
|
-
defaultToolProtocol: "native",
|
|
3116
2819
|
inputPrice: 3,
|
|
3117
2820
|
outputPrice: 15,
|
|
3118
2821
|
cacheWritesPrice: 3.75,
|
|
@@ -3127,8 +2830,6 @@ var lMStudioDefaultModelInfo = {
|
|
|
3127
2830
|
contextWindow: 2e5,
|
|
3128
2831
|
supportsImages: true,
|
|
3129
2832
|
supportsPromptCache: true,
|
|
3130
|
-
supportsNativeTools: true,
|
|
3131
|
-
defaultToolProtocol: "native",
|
|
3132
2833
|
inputPrice: 0,
|
|
3133
2834
|
outputPrice: 0,
|
|
3134
2835
|
cacheWritesPrice: 0,
|
|
@@ -3144,8 +2845,6 @@ var mistralModels = {
|
|
|
3144
2845
|
contextWindow: 128e3,
|
|
3145
2846
|
supportsImages: true,
|
|
3146
2847
|
supportsPromptCache: false,
|
|
3147
|
-
supportsNativeTools: true,
|
|
3148
|
-
defaultToolProtocol: "native",
|
|
3149
2848
|
inputPrice: 2,
|
|
3150
2849
|
outputPrice: 5
|
|
3151
2850
|
},
|
|
@@ -3154,8 +2853,6 @@ var mistralModels = {
|
|
|
3154
2853
|
contextWindow: 131e3,
|
|
3155
2854
|
supportsImages: true,
|
|
3156
2855
|
supportsPromptCache: false,
|
|
3157
|
-
supportsNativeTools: true,
|
|
3158
|
-
defaultToolProtocol: "native",
|
|
3159
2856
|
inputPrice: 0.4,
|
|
3160
2857
|
outputPrice: 2
|
|
3161
2858
|
},
|
|
@@ -3164,8 +2861,6 @@ var mistralModels = {
|
|
|
3164
2861
|
contextWindow: 131e3,
|
|
3165
2862
|
supportsImages: true,
|
|
3166
2863
|
supportsPromptCache: false,
|
|
3167
|
-
supportsNativeTools: true,
|
|
3168
|
-
defaultToolProtocol: "native",
|
|
3169
2864
|
inputPrice: 0.4,
|
|
3170
2865
|
outputPrice: 2
|
|
3171
2866
|
},
|
|
@@ -3174,8 +2869,6 @@ var mistralModels = {
|
|
|
3174
2869
|
contextWindow: 256e3,
|
|
3175
2870
|
supportsImages: false,
|
|
3176
2871
|
supportsPromptCache: false,
|
|
3177
|
-
supportsNativeTools: true,
|
|
3178
|
-
defaultToolProtocol: "native",
|
|
3179
2872
|
inputPrice: 0.3,
|
|
3180
2873
|
outputPrice: 0.9
|
|
3181
2874
|
},
|
|
@@ -3184,8 +2877,6 @@ var mistralModels = {
|
|
|
3184
2877
|
contextWindow: 131e3,
|
|
3185
2878
|
supportsImages: false,
|
|
3186
2879
|
supportsPromptCache: false,
|
|
3187
|
-
supportsNativeTools: true,
|
|
3188
|
-
defaultToolProtocol: "native",
|
|
3189
2880
|
inputPrice: 2,
|
|
3190
2881
|
outputPrice: 6
|
|
3191
2882
|
},
|
|
@@ -3194,8 +2885,6 @@ var mistralModels = {
|
|
|
3194
2885
|
contextWindow: 131e3,
|
|
3195
2886
|
supportsImages: false,
|
|
3196
2887
|
supportsPromptCache: false,
|
|
3197
|
-
supportsNativeTools: true,
|
|
3198
|
-
defaultToolProtocol: "native",
|
|
3199
2888
|
inputPrice: 0.1,
|
|
3200
2889
|
outputPrice: 0.1
|
|
3201
2890
|
},
|
|
@@ -3204,8 +2893,6 @@ var mistralModels = {
|
|
|
3204
2893
|
contextWindow: 131e3,
|
|
3205
2894
|
supportsImages: false,
|
|
3206
2895
|
supportsPromptCache: false,
|
|
3207
|
-
supportsNativeTools: true,
|
|
3208
|
-
defaultToolProtocol: "native",
|
|
3209
2896
|
inputPrice: 0.04,
|
|
3210
2897
|
outputPrice: 0.04
|
|
3211
2898
|
},
|
|
@@ -3214,8 +2901,6 @@ var mistralModels = {
|
|
|
3214
2901
|
contextWindow: 32e3,
|
|
3215
2902
|
supportsImages: false,
|
|
3216
2903
|
supportsPromptCache: false,
|
|
3217
|
-
supportsNativeTools: true,
|
|
3218
|
-
defaultToolProtocol: "native",
|
|
3219
2904
|
inputPrice: 0.2,
|
|
3220
2905
|
outputPrice: 0.6
|
|
3221
2906
|
},
|
|
@@ -3224,8 +2909,6 @@ var mistralModels = {
|
|
|
3224
2909
|
contextWindow: 131e3,
|
|
3225
2910
|
supportsImages: true,
|
|
3226
2911
|
supportsPromptCache: false,
|
|
3227
|
-
supportsNativeTools: true,
|
|
3228
|
-
defaultToolProtocol: "native",
|
|
3229
2912
|
inputPrice: 2,
|
|
3230
2913
|
outputPrice: 6
|
|
3231
2914
|
}
|
|
@@ -3240,8 +2923,6 @@ var moonshotModels = {
|
|
|
3240
2923
|
contextWindow: 131072,
|
|
3241
2924
|
supportsImages: false,
|
|
3242
2925
|
supportsPromptCache: true,
|
|
3243
|
-
supportsNativeTools: true,
|
|
3244
|
-
defaultToolProtocol: "native",
|
|
3245
2926
|
inputPrice: 0.6,
|
|
3246
2927
|
// $0.60 per million tokens (cache miss)
|
|
3247
2928
|
outputPrice: 2.5,
|
|
@@ -3257,8 +2938,6 @@ var moonshotModels = {
|
|
|
3257
2938
|
contextWindow: 262144,
|
|
3258
2939
|
supportsImages: false,
|
|
3259
2940
|
supportsPromptCache: true,
|
|
3260
|
-
supportsNativeTools: true,
|
|
3261
|
-
defaultToolProtocol: "native",
|
|
3262
2941
|
inputPrice: 0.6,
|
|
3263
2942
|
outputPrice: 2.5,
|
|
3264
2943
|
cacheReadsPrice: 0.15,
|
|
@@ -3269,8 +2948,6 @@ var moonshotModels = {
|
|
|
3269
2948
|
contextWindow: 262144,
|
|
3270
2949
|
supportsImages: false,
|
|
3271
2950
|
supportsPromptCache: true,
|
|
3272
|
-
supportsNativeTools: true,
|
|
3273
|
-
defaultToolProtocol: "native",
|
|
3274
2951
|
inputPrice: 2.4,
|
|
3275
2952
|
// $2.40 per million tokens (cache miss)
|
|
3276
2953
|
outputPrice: 10,
|
|
@@ -3289,8 +2966,6 @@ var moonshotModels = {
|
|
|
3289
2966
|
supportsImages: false,
|
|
3290
2967
|
// Text-only (no image/vision support)
|
|
3291
2968
|
supportsPromptCache: true,
|
|
3292
|
-
supportsNativeTools: true,
|
|
3293
|
-
defaultToolProtocol: "native",
|
|
3294
2969
|
inputPrice: 0.6,
|
|
3295
2970
|
// $0.60 per million tokens (cache miss)
|
|
3296
2971
|
outputPrice: 2.5,
|
|
@@ -3304,6 +2979,21 @@ var moonshotModels = {
|
|
|
3304
2979
|
preserveReasoning: true,
|
|
3305
2980
|
defaultTemperature: 1,
|
|
3306
2981
|
description: `The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.`
|
|
2982
|
+
},
|
|
2983
|
+
"kimi-k2.5": {
|
|
2984
|
+
maxTokens: 16384,
|
|
2985
|
+
contextWindow: 262144,
|
|
2986
|
+
supportsImages: false,
|
|
2987
|
+
supportsPromptCache: true,
|
|
2988
|
+
inputPrice: 0.6,
|
|
2989
|
+
// $0.60 per million tokens (cache miss)
|
|
2990
|
+
outputPrice: 3,
|
|
2991
|
+
// $3.00 per million tokens
|
|
2992
|
+
cacheReadsPrice: 0.1,
|
|
2993
|
+
// $0.10 per million tokens (cache hit)
|
|
2994
|
+
supportsTemperature: true,
|
|
2995
|
+
defaultTemperature: 1,
|
|
2996
|
+
description: "Kimi K2.5 is the latest generation of Moonshot AI's Kimi series, featuring improved reasoning capabilities and enhanced performance across diverse tasks."
|
|
3307
2997
|
}
|
|
3308
2998
|
};
|
|
3309
2999
|
var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -3315,7 +3005,6 @@ var ollamaDefaultModelInfo = {
|
|
|
3315
3005
|
contextWindow: 2e5,
|
|
3316
3006
|
supportsImages: true,
|
|
3317
3007
|
supportsPromptCache: true,
|
|
3318
|
-
supportsNativeTools: true,
|
|
3319
3008
|
inputPrice: 0,
|
|
3320
3009
|
outputPrice: 0,
|
|
3321
3010
|
cacheWritesPrice: 0,
|
|
@@ -3329,8 +3018,6 @@ var openAiNativeModels = {
|
|
|
3329
3018
|
"gpt-5.1-codex-max": {
|
|
3330
3019
|
maxTokens: 128e3,
|
|
3331
3020
|
contextWindow: 4e5,
|
|
3332
|
-
supportsNativeTools: true,
|
|
3333
|
-
defaultToolProtocol: "native",
|
|
3334
3021
|
includedTools: ["apply_patch"],
|
|
3335
3022
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3336
3023
|
supportsImages: true,
|
|
@@ -3348,8 +3035,6 @@ var openAiNativeModels = {
|
|
|
3348
3035
|
"gpt-5.2": {
|
|
3349
3036
|
maxTokens: 128e3,
|
|
3350
3037
|
contextWindow: 4e5,
|
|
3351
|
-
supportsNativeTools: true,
|
|
3352
|
-
defaultToolProtocol: "native",
|
|
3353
3038
|
includedTools: ["apply_patch"],
|
|
3354
3039
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3355
3040
|
supportsImages: true,
|
|
@@ -3368,11 +3053,26 @@ var openAiNativeModels = {
|
|
|
3368
3053
|
],
|
|
3369
3054
|
description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries"
|
|
3370
3055
|
},
|
|
3056
|
+
"gpt-5.2-codex": {
|
|
3057
|
+
maxTokens: 128e3,
|
|
3058
|
+
contextWindow: 4e5,
|
|
3059
|
+
includedTools: ["apply_patch"],
|
|
3060
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3061
|
+
supportsImages: true,
|
|
3062
|
+
supportsPromptCache: true,
|
|
3063
|
+
promptCacheRetention: "24h",
|
|
3064
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
3065
|
+
reasoningEffort: "medium",
|
|
3066
|
+
inputPrice: 1.75,
|
|
3067
|
+
outputPrice: 14,
|
|
3068
|
+
cacheReadsPrice: 0.175,
|
|
3069
|
+
supportsTemperature: false,
|
|
3070
|
+
tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 3.5, outputPrice: 28, cacheReadsPrice: 0.35 }],
|
|
3071
|
+
description: "GPT-5.2 Codex: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
|
|
3072
|
+
},
|
|
3371
3073
|
"gpt-5.2-chat-latest": {
|
|
3372
3074
|
maxTokens: 16384,
|
|
3373
3075
|
contextWindow: 128e3,
|
|
3374
|
-
supportsNativeTools: true,
|
|
3375
|
-
defaultToolProtocol: "native",
|
|
3376
3076
|
includedTools: ["apply_patch"],
|
|
3377
3077
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3378
3078
|
supportsImages: true,
|
|
@@ -3385,8 +3085,6 @@ var openAiNativeModels = {
|
|
|
3385
3085
|
"gpt-5.1": {
|
|
3386
3086
|
maxTokens: 128e3,
|
|
3387
3087
|
contextWindow: 4e5,
|
|
3388
|
-
supportsNativeTools: true,
|
|
3389
|
-
defaultToolProtocol: "native",
|
|
3390
3088
|
includedTools: ["apply_patch"],
|
|
3391
3089
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3392
3090
|
supportsImages: true,
|
|
@@ -3408,8 +3106,6 @@ var openAiNativeModels = {
|
|
|
3408
3106
|
"gpt-5.1-codex": {
|
|
3409
3107
|
maxTokens: 128e3,
|
|
3410
3108
|
contextWindow: 4e5,
|
|
3411
|
-
supportsNativeTools: true,
|
|
3412
|
-
defaultToolProtocol: "native",
|
|
3413
3109
|
includedTools: ["apply_patch"],
|
|
3414
3110
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3415
3111
|
supportsImages: true,
|
|
@@ -3427,8 +3123,6 @@ var openAiNativeModels = {
|
|
|
3427
3123
|
"gpt-5.1-codex-mini": {
|
|
3428
3124
|
maxTokens: 128e3,
|
|
3429
3125
|
contextWindow: 4e5,
|
|
3430
|
-
supportsNativeTools: true,
|
|
3431
|
-
defaultToolProtocol: "native",
|
|
3432
3126
|
includedTools: ["apply_patch"],
|
|
3433
3127
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3434
3128
|
supportsImages: true,
|
|
@@ -3445,8 +3139,6 @@ var openAiNativeModels = {
|
|
|
3445
3139
|
"gpt-5": {
|
|
3446
3140
|
maxTokens: 128e3,
|
|
3447
3141
|
contextWindow: 4e5,
|
|
3448
|
-
supportsNativeTools: true,
|
|
3449
|
-
defaultToolProtocol: "native",
|
|
3450
3142
|
includedTools: ["apply_patch"],
|
|
3451
3143
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3452
3144
|
supportsImages: true,
|
|
@@ -3467,8 +3159,6 @@ var openAiNativeModels = {
|
|
|
3467
3159
|
"gpt-5-mini": {
|
|
3468
3160
|
maxTokens: 128e3,
|
|
3469
3161
|
contextWindow: 4e5,
|
|
3470
|
-
supportsNativeTools: true,
|
|
3471
|
-
defaultToolProtocol: "native",
|
|
3472
3162
|
includedTools: ["apply_patch"],
|
|
3473
3163
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3474
3164
|
supportsImages: true,
|
|
@@ -3489,8 +3179,6 @@ var openAiNativeModels = {
|
|
|
3489
3179
|
"gpt-5-codex": {
|
|
3490
3180
|
maxTokens: 128e3,
|
|
3491
3181
|
contextWindow: 4e5,
|
|
3492
|
-
supportsNativeTools: true,
|
|
3493
|
-
defaultToolProtocol: "native",
|
|
3494
3182
|
includedTools: ["apply_patch"],
|
|
3495
3183
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3496
3184
|
supportsImages: true,
|
|
@@ -3507,8 +3195,6 @@ var openAiNativeModels = {
|
|
|
3507
3195
|
"gpt-5-nano": {
|
|
3508
3196
|
maxTokens: 128e3,
|
|
3509
3197
|
contextWindow: 4e5,
|
|
3510
|
-
supportsNativeTools: true,
|
|
3511
|
-
defaultToolProtocol: "native",
|
|
3512
3198
|
includedTools: ["apply_patch"],
|
|
3513
3199
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3514
3200
|
supportsImages: true,
|
|
@@ -3526,8 +3212,6 @@ var openAiNativeModels = {
|
|
|
3526
3212
|
"gpt-5-chat-latest": {
|
|
3527
3213
|
maxTokens: 128e3,
|
|
3528
3214
|
contextWindow: 4e5,
|
|
3529
|
-
supportsNativeTools: true,
|
|
3530
|
-
defaultToolProtocol: "native",
|
|
3531
3215
|
includedTools: ["apply_patch"],
|
|
3532
3216
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3533
3217
|
supportsImages: true,
|
|
@@ -3540,8 +3224,6 @@ var openAiNativeModels = {
|
|
|
3540
3224
|
"gpt-4.1": {
|
|
3541
3225
|
maxTokens: 32768,
|
|
3542
3226
|
contextWindow: 1047576,
|
|
3543
|
-
supportsNativeTools: true,
|
|
3544
|
-
defaultToolProtocol: "native",
|
|
3545
3227
|
includedTools: ["apply_patch"],
|
|
3546
3228
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3547
3229
|
supportsImages: true,
|
|
@@ -3557,8 +3239,6 @@ var openAiNativeModels = {
|
|
|
3557
3239
|
"gpt-4.1-mini": {
|
|
3558
3240
|
maxTokens: 32768,
|
|
3559
3241
|
contextWindow: 1047576,
|
|
3560
|
-
supportsNativeTools: true,
|
|
3561
|
-
defaultToolProtocol: "native",
|
|
3562
3242
|
includedTools: ["apply_patch"],
|
|
3563
3243
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3564
3244
|
supportsImages: true,
|
|
@@ -3574,8 +3254,6 @@ var openAiNativeModels = {
|
|
|
3574
3254
|
"gpt-4.1-nano": {
|
|
3575
3255
|
maxTokens: 32768,
|
|
3576
3256
|
contextWindow: 1047576,
|
|
3577
|
-
supportsNativeTools: true,
|
|
3578
|
-
defaultToolProtocol: "native",
|
|
3579
3257
|
includedTools: ["apply_patch"],
|
|
3580
3258
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3581
3259
|
supportsImages: true,
|
|
@@ -3591,8 +3269,6 @@ var openAiNativeModels = {
|
|
|
3591
3269
|
o3: {
|
|
3592
3270
|
maxTokens: 1e5,
|
|
3593
3271
|
contextWindow: 2e5,
|
|
3594
|
-
supportsNativeTools: true,
|
|
3595
|
-
defaultToolProtocol: "native",
|
|
3596
3272
|
supportsImages: true,
|
|
3597
3273
|
supportsPromptCache: true,
|
|
3598
3274
|
inputPrice: 2,
|
|
@@ -3609,8 +3285,6 @@ var openAiNativeModels = {
|
|
|
3609
3285
|
"o3-high": {
|
|
3610
3286
|
maxTokens: 1e5,
|
|
3611
3287
|
contextWindow: 2e5,
|
|
3612
|
-
supportsNativeTools: true,
|
|
3613
|
-
defaultToolProtocol: "native",
|
|
3614
3288
|
supportsImages: true,
|
|
3615
3289
|
supportsPromptCache: true,
|
|
3616
3290
|
inputPrice: 2,
|
|
@@ -3622,8 +3296,6 @@ var openAiNativeModels = {
|
|
|
3622
3296
|
"o3-low": {
|
|
3623
3297
|
maxTokens: 1e5,
|
|
3624
3298
|
contextWindow: 2e5,
|
|
3625
|
-
supportsNativeTools: true,
|
|
3626
|
-
defaultToolProtocol: "native",
|
|
3627
3299
|
supportsImages: true,
|
|
3628
3300
|
supportsPromptCache: true,
|
|
3629
3301
|
inputPrice: 2,
|
|
@@ -3635,8 +3307,6 @@ var openAiNativeModels = {
|
|
|
3635
3307
|
"o4-mini": {
|
|
3636
3308
|
maxTokens: 1e5,
|
|
3637
3309
|
contextWindow: 2e5,
|
|
3638
|
-
supportsNativeTools: true,
|
|
3639
|
-
defaultToolProtocol: "native",
|
|
3640
3310
|
supportsImages: true,
|
|
3641
3311
|
supportsPromptCache: true,
|
|
3642
3312
|
inputPrice: 1.1,
|
|
@@ -3653,8 +3323,6 @@ var openAiNativeModels = {
|
|
|
3653
3323
|
"o4-mini-high": {
|
|
3654
3324
|
maxTokens: 1e5,
|
|
3655
3325
|
contextWindow: 2e5,
|
|
3656
|
-
supportsNativeTools: true,
|
|
3657
|
-
defaultToolProtocol: "native",
|
|
3658
3326
|
supportsImages: true,
|
|
3659
3327
|
supportsPromptCache: true,
|
|
3660
3328
|
inputPrice: 1.1,
|
|
@@ -3666,8 +3334,6 @@ var openAiNativeModels = {
|
|
|
3666
3334
|
"o4-mini-low": {
|
|
3667
3335
|
maxTokens: 1e5,
|
|
3668
3336
|
contextWindow: 2e5,
|
|
3669
|
-
supportsNativeTools: true,
|
|
3670
|
-
defaultToolProtocol: "native",
|
|
3671
3337
|
supportsImages: true,
|
|
3672
3338
|
supportsPromptCache: true,
|
|
3673
3339
|
inputPrice: 1.1,
|
|
@@ -3679,8 +3345,6 @@ var openAiNativeModels = {
|
|
|
3679
3345
|
"o3-mini": {
|
|
3680
3346
|
maxTokens: 1e5,
|
|
3681
3347
|
contextWindow: 2e5,
|
|
3682
|
-
supportsNativeTools: true,
|
|
3683
|
-
defaultToolProtocol: "native",
|
|
3684
3348
|
supportsImages: false,
|
|
3685
3349
|
supportsPromptCache: true,
|
|
3686
3350
|
inputPrice: 1.1,
|
|
@@ -3693,8 +3357,6 @@ var openAiNativeModels = {
|
|
|
3693
3357
|
"o3-mini-high": {
|
|
3694
3358
|
maxTokens: 1e5,
|
|
3695
3359
|
contextWindow: 2e5,
|
|
3696
|
-
supportsNativeTools: true,
|
|
3697
|
-
defaultToolProtocol: "native",
|
|
3698
3360
|
supportsImages: false,
|
|
3699
3361
|
supportsPromptCache: true,
|
|
3700
3362
|
inputPrice: 1.1,
|
|
@@ -3706,8 +3368,6 @@ var openAiNativeModels = {
|
|
|
3706
3368
|
"o3-mini-low": {
|
|
3707
3369
|
maxTokens: 1e5,
|
|
3708
3370
|
contextWindow: 2e5,
|
|
3709
|
-
supportsNativeTools: true,
|
|
3710
|
-
defaultToolProtocol: "native",
|
|
3711
3371
|
supportsImages: false,
|
|
3712
3372
|
supportsPromptCache: true,
|
|
3713
3373
|
inputPrice: 1.1,
|
|
@@ -3719,8 +3379,6 @@ var openAiNativeModels = {
|
|
|
3719
3379
|
o1: {
|
|
3720
3380
|
maxTokens: 1e5,
|
|
3721
3381
|
contextWindow: 2e5,
|
|
3722
|
-
supportsNativeTools: true,
|
|
3723
|
-
defaultToolProtocol: "native",
|
|
3724
3382
|
supportsImages: true,
|
|
3725
3383
|
supportsPromptCache: true,
|
|
3726
3384
|
inputPrice: 15,
|
|
@@ -3731,8 +3389,6 @@ var openAiNativeModels = {
|
|
|
3731
3389
|
"o1-preview": {
|
|
3732
3390
|
maxTokens: 32768,
|
|
3733
3391
|
contextWindow: 128e3,
|
|
3734
|
-
supportsNativeTools: true,
|
|
3735
|
-
defaultToolProtocol: "native",
|
|
3736
3392
|
supportsImages: true,
|
|
3737
3393
|
supportsPromptCache: true,
|
|
3738
3394
|
inputPrice: 15,
|
|
@@ -3743,8 +3399,6 @@ var openAiNativeModels = {
|
|
|
3743
3399
|
"o1-mini": {
|
|
3744
3400
|
maxTokens: 65536,
|
|
3745
3401
|
contextWindow: 128e3,
|
|
3746
|
-
supportsNativeTools: true,
|
|
3747
|
-
defaultToolProtocol: "native",
|
|
3748
3402
|
supportsImages: true,
|
|
3749
3403
|
supportsPromptCache: true,
|
|
3750
3404
|
inputPrice: 1.1,
|
|
@@ -3755,8 +3409,6 @@ var openAiNativeModels = {
|
|
|
3755
3409
|
"gpt-4o": {
|
|
3756
3410
|
maxTokens: 16384,
|
|
3757
3411
|
contextWindow: 128e3,
|
|
3758
|
-
supportsNativeTools: true,
|
|
3759
|
-
defaultToolProtocol: "native",
|
|
3760
3412
|
supportsImages: true,
|
|
3761
3413
|
supportsPromptCache: true,
|
|
3762
3414
|
inputPrice: 2.5,
|
|
@@ -3770,8 +3422,6 @@ var openAiNativeModels = {
|
|
|
3770
3422
|
"gpt-4o-mini": {
|
|
3771
3423
|
maxTokens: 16384,
|
|
3772
3424
|
contextWindow: 128e3,
|
|
3773
|
-
supportsNativeTools: true,
|
|
3774
|
-
defaultToolProtocol: "native",
|
|
3775
3425
|
supportsImages: true,
|
|
3776
3426
|
supportsPromptCache: true,
|
|
3777
3427
|
inputPrice: 0.15,
|
|
@@ -3785,8 +3435,6 @@ var openAiNativeModels = {
|
|
|
3785
3435
|
"codex-mini-latest": {
|
|
3786
3436
|
maxTokens: 16384,
|
|
3787
3437
|
contextWindow: 2e5,
|
|
3788
|
-
supportsNativeTools: true,
|
|
3789
|
-
defaultToolProtocol: "native",
|
|
3790
3438
|
supportsImages: false,
|
|
3791
3439
|
supportsPromptCache: false,
|
|
3792
3440
|
inputPrice: 1.5,
|
|
@@ -3799,8 +3447,6 @@ var openAiNativeModels = {
|
|
|
3799
3447
|
"gpt-5-2025-08-07": {
|
|
3800
3448
|
maxTokens: 128e3,
|
|
3801
3449
|
contextWindow: 4e5,
|
|
3802
|
-
supportsNativeTools: true,
|
|
3803
|
-
defaultToolProtocol: "native",
|
|
3804
3450
|
includedTools: ["apply_patch"],
|
|
3805
3451
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3806
3452
|
supportsImages: true,
|
|
@@ -3821,8 +3467,6 @@ var openAiNativeModels = {
|
|
|
3821
3467
|
"gpt-5-mini-2025-08-07": {
|
|
3822
3468
|
maxTokens: 128e3,
|
|
3823
3469
|
contextWindow: 4e5,
|
|
3824
|
-
supportsNativeTools: true,
|
|
3825
|
-
defaultToolProtocol: "native",
|
|
3826
3470
|
includedTools: ["apply_patch"],
|
|
3827
3471
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3828
3472
|
supportsImages: true,
|
|
@@ -3843,8 +3487,6 @@ var openAiNativeModels = {
|
|
|
3843
3487
|
"gpt-5-nano-2025-08-07": {
|
|
3844
3488
|
maxTokens: 128e3,
|
|
3845
3489
|
contextWindow: 4e5,
|
|
3846
|
-
supportsNativeTools: true,
|
|
3847
|
-
defaultToolProtocol: "native",
|
|
3848
3490
|
includedTools: ["apply_patch"],
|
|
3849
3491
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3850
3492
|
supportsImages: true,
|
|
@@ -3866,14 +3508,151 @@ var openAiModelInfoSaneDefaults = {
|
|
|
3866
3508
|
supportsImages: true,
|
|
3867
3509
|
supportsPromptCache: false,
|
|
3868
3510
|
inputPrice: 0,
|
|
3869
|
-
outputPrice: 0
|
|
3870
|
-
supportsNativeTools: true,
|
|
3871
|
-
defaultToolProtocol: "native"
|
|
3511
|
+
outputPrice: 0
|
|
3872
3512
|
};
|
|
3873
3513
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
3874
3514
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
3875
3515
|
var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
|
|
3876
3516
|
|
|
3517
|
+
// src/providers/openai-codex.ts
|
|
3518
|
+
var openAiCodexDefaultModelId = "gpt-5.2-codex";
|
|
3519
|
+
var openAiCodexModels = {
|
|
3520
|
+
"gpt-5.1-codex-max": {
|
|
3521
|
+
maxTokens: 128e3,
|
|
3522
|
+
contextWindow: 4e5,
|
|
3523
|
+
includedTools: ["apply_patch"],
|
|
3524
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3525
|
+
supportsImages: true,
|
|
3526
|
+
supportsPromptCache: true,
|
|
3527
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
3528
|
+
reasoningEffort: "xhigh",
|
|
3529
|
+
// Subscription-based: no per-token costs
|
|
3530
|
+
inputPrice: 0,
|
|
3531
|
+
outputPrice: 0,
|
|
3532
|
+
supportsTemperature: false,
|
|
3533
|
+
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription"
|
|
3534
|
+
},
|
|
3535
|
+
"gpt-5.1-codex": {
|
|
3536
|
+
maxTokens: 128e3,
|
|
3537
|
+
contextWindow: 4e5,
|
|
3538
|
+
includedTools: ["apply_patch"],
|
|
3539
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3540
|
+
supportsImages: true,
|
|
3541
|
+
supportsPromptCache: true,
|
|
3542
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3543
|
+
reasoningEffort: "medium",
|
|
3544
|
+
// Subscription-based: no per-token costs
|
|
3545
|
+
inputPrice: 0,
|
|
3546
|
+
outputPrice: 0,
|
|
3547
|
+
supportsTemperature: false,
|
|
3548
|
+
description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription"
|
|
3549
|
+
},
|
|
3550
|
+
"gpt-5.2-codex": {
|
|
3551
|
+
maxTokens: 128e3,
|
|
3552
|
+
contextWindow: 4e5,
|
|
3553
|
+
includedTools: ["apply_patch"],
|
|
3554
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3555
|
+
supportsImages: true,
|
|
3556
|
+
supportsPromptCache: true,
|
|
3557
|
+
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
|
|
3558
|
+
reasoningEffort: "medium",
|
|
3559
|
+
inputPrice: 0,
|
|
3560
|
+
outputPrice: 0,
|
|
3561
|
+
supportsTemperature: false,
|
|
3562
|
+
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription"
|
|
3563
|
+
},
|
|
3564
|
+
"gpt-5.1": {
|
|
3565
|
+
maxTokens: 128e3,
|
|
3566
|
+
contextWindow: 4e5,
|
|
3567
|
+
includedTools: ["apply_patch"],
|
|
3568
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3569
|
+
supportsImages: true,
|
|
3570
|
+
supportsPromptCache: true,
|
|
3571
|
+
supportsReasoningEffort: ["none", "low", "medium", "high"],
|
|
3572
|
+
reasoningEffort: "medium",
|
|
3573
|
+
// Subscription-based: no per-token costs
|
|
3574
|
+
inputPrice: 0,
|
|
3575
|
+
outputPrice: 0,
|
|
3576
|
+
supportsVerbosity: true,
|
|
3577
|
+
supportsTemperature: false,
|
|
3578
|
+
description: "GPT-5.1: General GPT-5.1 model via ChatGPT subscription"
|
|
3579
|
+
},
|
|
3580
|
+
"gpt-5": {
|
|
3581
|
+
maxTokens: 128e3,
|
|
3582
|
+
contextWindow: 4e5,
|
|
3583
|
+
includedTools: ["apply_patch"],
|
|
3584
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3585
|
+
supportsImages: true,
|
|
3586
|
+
supportsPromptCache: true,
|
|
3587
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
3588
|
+
reasoningEffort: "medium",
|
|
3589
|
+
// Subscription-based: no per-token costs
|
|
3590
|
+
inputPrice: 0,
|
|
3591
|
+
outputPrice: 0,
|
|
3592
|
+
supportsVerbosity: true,
|
|
3593
|
+
supportsTemperature: false,
|
|
3594
|
+
description: "GPT-5: General GPT-5 model via ChatGPT subscription"
|
|
3595
|
+
},
|
|
3596
|
+
"gpt-5-codex": {
|
|
3597
|
+
maxTokens: 128e3,
|
|
3598
|
+
contextWindow: 4e5,
|
|
3599
|
+
includedTools: ["apply_patch"],
|
|
3600
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3601
|
+
supportsImages: true,
|
|
3602
|
+
supportsPromptCache: true,
|
|
3603
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3604
|
+
reasoningEffort: "medium",
|
|
3605
|
+
// Subscription-based: no per-token costs
|
|
3606
|
+
inputPrice: 0,
|
|
3607
|
+
outputPrice: 0,
|
|
3608
|
+
supportsTemperature: false,
|
|
3609
|
+
description: "GPT-5 Codex: GPT-5 optimized for agentic coding via ChatGPT subscription"
|
|
3610
|
+
},
|
|
3611
|
+
"gpt-5-codex-mini": {
|
|
3612
|
+
maxTokens: 128e3,
|
|
3613
|
+
contextWindow: 4e5,
|
|
3614
|
+
includedTools: ["apply_patch"],
|
|
3615
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3616
|
+
supportsImages: true,
|
|
3617
|
+
supportsPromptCache: true,
|
|
3618
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3619
|
+
reasoningEffort: "medium",
|
|
3620
|
+
// Subscription-based: no per-token costs
|
|
3621
|
+
inputPrice: 0,
|
|
3622
|
+
outputPrice: 0,
|
|
3623
|
+
supportsTemperature: false,
|
|
3624
|
+
description: "GPT-5 Codex Mini: Faster coding model via ChatGPT subscription"
|
|
3625
|
+
},
|
|
3626
|
+
"gpt-5.1-codex-mini": {
|
|
3627
|
+
maxTokens: 128e3,
|
|
3628
|
+
contextWindow: 4e5,
|
|
3629
|
+
includedTools: ["apply_patch"],
|
|
3630
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3631
|
+
supportsImages: true,
|
|
3632
|
+
supportsPromptCache: true,
|
|
3633
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3634
|
+
reasoningEffort: "medium",
|
|
3635
|
+
inputPrice: 0,
|
|
3636
|
+
outputPrice: 0,
|
|
3637
|
+
supportsTemperature: false,
|
|
3638
|
+
description: "GPT-5.1 Codex Mini: Faster version for coding tasks via ChatGPT subscription"
|
|
3639
|
+
},
|
|
3640
|
+
"gpt-5.2": {
|
|
3641
|
+
maxTokens: 128e3,
|
|
3642
|
+
contextWindow: 4e5,
|
|
3643
|
+
includedTools: ["apply_patch"],
|
|
3644
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3645
|
+
supportsImages: true,
|
|
3646
|
+
supportsPromptCache: true,
|
|
3647
|
+
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
|
|
3648
|
+
reasoningEffort: "medium",
|
|
3649
|
+
inputPrice: 0,
|
|
3650
|
+
outputPrice: 0,
|
|
3651
|
+
supportsTemperature: false,
|
|
3652
|
+
description: "GPT-5.2: Latest GPT model via ChatGPT subscription"
|
|
3653
|
+
}
|
|
3654
|
+
};
|
|
3655
|
+
|
|
3877
3656
|
// src/providers/openrouter.ts
|
|
3878
3657
|
var openRouterDefaultModelId = "anthropic/claude-sonnet-4.5";
|
|
3879
3658
|
var openRouterDefaultModelInfo = {
|
|
@@ -3881,7 +3660,6 @@ var openRouterDefaultModelInfo = {
|
|
|
3881
3660
|
contextWindow: 2e5,
|
|
3882
3661
|
supportsImages: true,
|
|
3883
3662
|
supportsPromptCache: true,
|
|
3884
|
-
supportsNativeTools: true,
|
|
3885
3663
|
inputPrice: 3,
|
|
3886
3664
|
outputPrice: 15,
|
|
3887
3665
|
cacheWritesPrice: 3.75,
|
|
@@ -3955,8 +3733,6 @@ var qwenCodeModels = {
|
|
|
3955
3733
|
contextWindow: 1e6,
|
|
3956
3734
|
supportsImages: false,
|
|
3957
3735
|
supportsPromptCache: false,
|
|
3958
|
-
supportsNativeTools: true,
|
|
3959
|
-
defaultToolProtocol: "native",
|
|
3960
3736
|
inputPrice: 0,
|
|
3961
3737
|
outputPrice: 0,
|
|
3962
3738
|
cacheWritesPrice: 0,
|
|
@@ -3968,8 +3744,6 @@ var qwenCodeModels = {
|
|
|
3968
3744
|
contextWindow: 1e6,
|
|
3969
3745
|
supportsImages: false,
|
|
3970
3746
|
supportsPromptCache: false,
|
|
3971
|
-
supportsNativeTools: true,
|
|
3972
|
-
defaultToolProtocol: "native",
|
|
3973
3747
|
inputPrice: 0,
|
|
3974
3748
|
outputPrice: 0,
|
|
3975
3749
|
cacheWritesPrice: 0,
|
|
@@ -3985,8 +3759,6 @@ var requestyDefaultModelInfo = {
|
|
|
3985
3759
|
contextWindow: 2e5,
|
|
3986
3760
|
supportsImages: true,
|
|
3987
3761
|
supportsPromptCache: true,
|
|
3988
|
-
supportsNativeTools: true,
|
|
3989
|
-
defaultToolProtocol: "native",
|
|
3990
3762
|
inputPrice: 3,
|
|
3991
3763
|
outputPrice: 15,
|
|
3992
3764
|
cacheWritesPrice: 3.75,
|
|
@@ -4042,8 +3814,6 @@ var sambaNovaModels = {
|
|
|
4042
3814
|
contextWindow: 16384,
|
|
4043
3815
|
supportsImages: false,
|
|
4044
3816
|
supportsPromptCache: false,
|
|
4045
|
-
supportsNativeTools: true,
|
|
4046
|
-
defaultToolProtocol: "native",
|
|
4047
3817
|
inputPrice: 0.1,
|
|
4048
3818
|
outputPrice: 0.2,
|
|
4049
3819
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -4053,8 +3823,6 @@ var sambaNovaModels = {
|
|
|
4053
3823
|
contextWindow: 131072,
|
|
4054
3824
|
supportsImages: false,
|
|
4055
3825
|
supportsPromptCache: false,
|
|
4056
|
-
supportsNativeTools: true,
|
|
4057
|
-
defaultToolProtocol: "native",
|
|
4058
3826
|
inputPrice: 0.6,
|
|
4059
3827
|
outputPrice: 1.2,
|
|
4060
3828
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -4065,8 +3833,6 @@ var sambaNovaModels = {
|
|
|
4065
3833
|
supportsImages: false,
|
|
4066
3834
|
supportsPromptCache: false,
|
|
4067
3835
|
supportsReasoningBudget: true,
|
|
4068
|
-
supportsNativeTools: true,
|
|
4069
|
-
defaultToolProtocol: "native",
|
|
4070
3836
|
inputPrice: 5,
|
|
4071
3837
|
outputPrice: 7,
|
|
4072
3838
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -4076,8 +3842,6 @@ var sambaNovaModels = {
|
|
|
4076
3842
|
contextWindow: 32768,
|
|
4077
3843
|
supportsImages: false,
|
|
4078
3844
|
supportsPromptCache: false,
|
|
4079
|
-
supportsNativeTools: true,
|
|
4080
|
-
defaultToolProtocol: "native",
|
|
4081
3845
|
inputPrice: 3,
|
|
4082
3846
|
outputPrice: 4.5,
|
|
4083
3847
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -4087,8 +3851,6 @@ var sambaNovaModels = {
|
|
|
4087
3851
|
contextWindow: 32768,
|
|
4088
3852
|
supportsImages: false,
|
|
4089
3853
|
supportsPromptCache: false,
|
|
4090
|
-
supportsNativeTools: true,
|
|
4091
|
-
defaultToolProtocol: "native",
|
|
4092
3854
|
inputPrice: 3,
|
|
4093
3855
|
outputPrice: 4.5,
|
|
4094
3856
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -4098,8 +3860,6 @@ var sambaNovaModels = {
|
|
|
4098
3860
|
contextWindow: 131072,
|
|
4099
3861
|
supportsImages: true,
|
|
4100
3862
|
supportsPromptCache: false,
|
|
4101
|
-
supportsNativeTools: true,
|
|
4102
|
-
defaultToolProtocol: "native",
|
|
4103
3863
|
inputPrice: 0.63,
|
|
4104
3864
|
outputPrice: 1.8,
|
|
4105
3865
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -4109,8 +3869,6 @@ var sambaNovaModels = {
|
|
|
4109
3869
|
contextWindow: 8192,
|
|
4110
3870
|
supportsImages: false,
|
|
4111
3871
|
supportsPromptCache: false,
|
|
4112
|
-
supportsNativeTools: true,
|
|
4113
|
-
defaultToolProtocol: "native",
|
|
4114
3872
|
inputPrice: 0.4,
|
|
4115
3873
|
outputPrice: 0.8,
|
|
4116
3874
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -4120,8 +3878,6 @@ var sambaNovaModels = {
|
|
|
4120
3878
|
contextWindow: 131072,
|
|
4121
3879
|
supportsImages: false,
|
|
4122
3880
|
supportsPromptCache: false,
|
|
4123
|
-
supportsNativeTools: true,
|
|
4124
|
-
defaultToolProtocol: "native",
|
|
4125
3881
|
inputPrice: 0.22,
|
|
4126
3882
|
outputPrice: 0.59,
|
|
4127
3883
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -4135,7 +3891,6 @@ var unboundDefaultModelInfo = {
|
|
|
4135
3891
|
contextWindow: 2e5,
|
|
4136
3892
|
supportsImages: true,
|
|
4137
3893
|
supportsPromptCache: true,
|
|
4138
|
-
supportsNativeTools: true,
|
|
4139
3894
|
inputPrice: 3,
|
|
4140
3895
|
outputPrice: 15,
|
|
4141
3896
|
cacheWritesPrice: 3.75,
|
|
@@ -4149,8 +3904,6 @@ var vertexModels = {
|
|
|
4149
3904
|
maxTokens: 65536,
|
|
4150
3905
|
contextWindow: 1048576,
|
|
4151
3906
|
supportsImages: true,
|
|
4152
|
-
supportsNativeTools: true,
|
|
4153
|
-
defaultToolProtocol: "native",
|
|
4154
3907
|
supportsPromptCache: true,
|
|
4155
3908
|
supportsReasoningEffort: ["low", "high"],
|
|
4156
3909
|
reasoningEffort: "low",
|
|
@@ -4158,16 +3911,19 @@ var vertexModels = {
|
|
|
4158
3911
|
defaultTemperature: 1,
|
|
4159
3912
|
inputPrice: 4,
|
|
4160
3913
|
outputPrice: 18,
|
|
3914
|
+
cacheReadsPrice: 0.4,
|
|
4161
3915
|
tiers: [
|
|
4162
3916
|
{
|
|
4163
3917
|
contextWindow: 2e5,
|
|
4164
3918
|
inputPrice: 2,
|
|
4165
|
-
outputPrice: 12
|
|
3919
|
+
outputPrice: 12,
|
|
3920
|
+
cacheReadsPrice: 0.2
|
|
4166
3921
|
},
|
|
4167
3922
|
{
|
|
4168
3923
|
contextWindow: Infinity,
|
|
4169
3924
|
inputPrice: 4,
|
|
4170
|
-
outputPrice: 18
|
|
3925
|
+
outputPrice: 18,
|
|
3926
|
+
cacheReadsPrice: 0.4
|
|
4171
3927
|
}
|
|
4172
3928
|
]
|
|
4173
3929
|
},
|
|
@@ -4175,24 +3931,19 @@ var vertexModels = {
|
|
|
4175
3931
|
maxTokens: 65536,
|
|
4176
3932
|
contextWindow: 1048576,
|
|
4177
3933
|
supportsImages: true,
|
|
4178
|
-
supportsNativeTools: true,
|
|
4179
|
-
defaultToolProtocol: "native",
|
|
4180
3934
|
supportsPromptCache: true,
|
|
4181
3935
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
4182
3936
|
reasoningEffort: "medium",
|
|
4183
3937
|
supportsTemperature: true,
|
|
4184
3938
|
defaultTemperature: 1,
|
|
4185
|
-
inputPrice: 0.
|
|
4186
|
-
outputPrice:
|
|
4187
|
-
cacheReadsPrice: 0.
|
|
4188
|
-
cacheWritesPrice: 1
|
|
3939
|
+
inputPrice: 0.5,
|
|
3940
|
+
outputPrice: 3,
|
|
3941
|
+
cacheReadsPrice: 0.05
|
|
4189
3942
|
},
|
|
4190
3943
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
4191
3944
|
maxTokens: 65535,
|
|
4192
3945
|
contextWindow: 1048576,
|
|
4193
3946
|
supportsImages: true,
|
|
4194
|
-
supportsNativeTools: true,
|
|
4195
|
-
defaultToolProtocol: "native",
|
|
4196
3947
|
supportsPromptCache: true,
|
|
4197
3948
|
inputPrice: 0.15,
|
|
4198
3949
|
outputPrice: 3.5,
|
|
@@ -4204,8 +3955,6 @@ var vertexModels = {
|
|
|
4204
3955
|
maxTokens: 65535,
|
|
4205
3956
|
contextWindow: 1048576,
|
|
4206
3957
|
supportsImages: true,
|
|
4207
|
-
supportsNativeTools: true,
|
|
4208
|
-
defaultToolProtocol: "native",
|
|
4209
3958
|
supportsPromptCache: true,
|
|
4210
3959
|
inputPrice: 0.15,
|
|
4211
3960
|
outputPrice: 0.6
|
|
@@ -4214,8 +3963,6 @@ var vertexModels = {
|
|
|
4214
3963
|
maxTokens: 64e3,
|
|
4215
3964
|
contextWindow: 1048576,
|
|
4216
3965
|
supportsImages: true,
|
|
4217
|
-
supportsNativeTools: true,
|
|
4218
|
-
defaultToolProtocol: "native",
|
|
4219
3966
|
supportsPromptCache: true,
|
|
4220
3967
|
inputPrice: 0.3,
|
|
4221
3968
|
outputPrice: 2.5,
|
|
@@ -4228,8 +3975,6 @@ var vertexModels = {
|
|
|
4228
3975
|
maxTokens: 65535,
|
|
4229
3976
|
contextWindow: 1048576,
|
|
4230
3977
|
supportsImages: true,
|
|
4231
|
-
supportsNativeTools: true,
|
|
4232
|
-
defaultToolProtocol: "native",
|
|
4233
3978
|
supportsPromptCache: false,
|
|
4234
3979
|
inputPrice: 0.15,
|
|
4235
3980
|
outputPrice: 3.5,
|
|
@@ -4241,8 +3986,6 @@ var vertexModels = {
|
|
|
4241
3986
|
maxTokens: 65535,
|
|
4242
3987
|
contextWindow: 1048576,
|
|
4243
3988
|
supportsImages: true,
|
|
4244
|
-
supportsNativeTools: true,
|
|
4245
|
-
defaultToolProtocol: "native",
|
|
4246
3989
|
supportsPromptCache: false,
|
|
4247
3990
|
inputPrice: 0.15,
|
|
4248
3991
|
outputPrice: 0.6
|
|
@@ -4251,8 +3994,6 @@ var vertexModels = {
|
|
|
4251
3994
|
maxTokens: 65535,
|
|
4252
3995
|
contextWindow: 1048576,
|
|
4253
3996
|
supportsImages: true,
|
|
4254
|
-
supportsNativeTools: true,
|
|
4255
|
-
defaultToolProtocol: "native",
|
|
4256
3997
|
supportsPromptCache: true,
|
|
4257
3998
|
inputPrice: 2.5,
|
|
4258
3999
|
outputPrice: 15
|
|
@@ -4261,8 +4002,6 @@ var vertexModels = {
|
|
|
4261
4002
|
maxTokens: 65535,
|
|
4262
4003
|
contextWindow: 1048576,
|
|
4263
4004
|
supportsImages: true,
|
|
4264
|
-
supportsNativeTools: true,
|
|
4265
|
-
defaultToolProtocol: "native",
|
|
4266
4005
|
supportsPromptCache: true,
|
|
4267
4006
|
inputPrice: 2.5,
|
|
4268
4007
|
outputPrice: 15
|
|
@@ -4271,8 +4010,6 @@ var vertexModels = {
|
|
|
4271
4010
|
maxTokens: 65535,
|
|
4272
4011
|
contextWindow: 1048576,
|
|
4273
4012
|
supportsImages: true,
|
|
4274
|
-
supportsNativeTools: true,
|
|
4275
|
-
defaultToolProtocol: "native",
|
|
4276
4013
|
supportsPromptCache: true,
|
|
4277
4014
|
inputPrice: 2.5,
|
|
4278
4015
|
outputPrice: 15,
|
|
@@ -4283,8 +4020,6 @@ var vertexModels = {
|
|
|
4283
4020
|
maxTokens: 64e3,
|
|
4284
4021
|
contextWindow: 1048576,
|
|
4285
4022
|
supportsImages: true,
|
|
4286
|
-
supportsNativeTools: true,
|
|
4287
|
-
defaultToolProtocol: "native",
|
|
4288
4023
|
supportsPromptCache: true,
|
|
4289
4024
|
inputPrice: 2.5,
|
|
4290
4025
|
outputPrice: 15,
|
|
@@ -4310,8 +4045,6 @@ var vertexModels = {
|
|
|
4310
4045
|
maxTokens: 65535,
|
|
4311
4046
|
contextWindow: 1048576,
|
|
4312
4047
|
supportsImages: true,
|
|
4313
|
-
supportsNativeTools: true,
|
|
4314
|
-
defaultToolProtocol: "native",
|
|
4315
4048
|
supportsPromptCache: false,
|
|
4316
4049
|
inputPrice: 0,
|
|
4317
4050
|
outputPrice: 0
|
|
@@ -4320,8 +4053,6 @@ var vertexModels = {
|
|
|
4320
4053
|
maxTokens: 8192,
|
|
4321
4054
|
contextWindow: 2097152,
|
|
4322
4055
|
supportsImages: true,
|
|
4323
|
-
supportsNativeTools: true,
|
|
4324
|
-
defaultToolProtocol: "native",
|
|
4325
4056
|
supportsPromptCache: false,
|
|
4326
4057
|
inputPrice: 0,
|
|
4327
4058
|
outputPrice: 0
|
|
@@ -4330,8 +4061,6 @@ var vertexModels = {
|
|
|
4330
4061
|
maxTokens: 8192,
|
|
4331
4062
|
contextWindow: 1048576,
|
|
4332
4063
|
supportsImages: true,
|
|
4333
|
-
supportsNativeTools: true,
|
|
4334
|
-
defaultToolProtocol: "native",
|
|
4335
4064
|
supportsPromptCache: true,
|
|
4336
4065
|
inputPrice: 0.15,
|
|
4337
4066
|
outputPrice: 0.6
|
|
@@ -4340,8 +4069,6 @@ var vertexModels = {
|
|
|
4340
4069
|
maxTokens: 8192,
|
|
4341
4070
|
contextWindow: 1048576,
|
|
4342
4071
|
supportsImages: true,
|
|
4343
|
-
supportsNativeTools: true,
|
|
4344
|
-
defaultToolProtocol: "native",
|
|
4345
4072
|
supportsPromptCache: false,
|
|
4346
4073
|
inputPrice: 0.075,
|
|
4347
4074
|
outputPrice: 0.3
|
|
@@ -4350,8 +4077,6 @@ var vertexModels = {
|
|
|
4350
4077
|
maxTokens: 8192,
|
|
4351
4078
|
contextWindow: 32768,
|
|
4352
4079
|
supportsImages: true,
|
|
4353
|
-
supportsNativeTools: true,
|
|
4354
|
-
defaultToolProtocol: "native",
|
|
4355
4080
|
supportsPromptCache: false,
|
|
4356
4081
|
inputPrice: 0,
|
|
4357
4082
|
outputPrice: 0
|
|
@@ -4360,8 +4085,6 @@ var vertexModels = {
|
|
|
4360
4085
|
maxTokens: 8192,
|
|
4361
4086
|
contextWindow: 1048576,
|
|
4362
4087
|
supportsImages: true,
|
|
4363
|
-
supportsNativeTools: true,
|
|
4364
|
-
defaultToolProtocol: "native",
|
|
4365
4088
|
supportsPromptCache: true,
|
|
4366
4089
|
inputPrice: 0.075,
|
|
4367
4090
|
outputPrice: 0.3
|
|
@@ -4370,8 +4093,6 @@ var vertexModels = {
|
|
|
4370
4093
|
maxTokens: 8192,
|
|
4371
4094
|
contextWindow: 2097152,
|
|
4372
4095
|
supportsImages: true,
|
|
4373
|
-
supportsNativeTools: true,
|
|
4374
|
-
defaultToolProtocol: "native",
|
|
4375
4096
|
supportsPromptCache: false,
|
|
4376
4097
|
inputPrice: 1.25,
|
|
4377
4098
|
outputPrice: 5
|
|
@@ -4382,8 +4103,6 @@ var vertexModels = {
|
|
|
4382
4103
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4383
4104
|
supportsImages: true,
|
|
4384
4105
|
supportsPromptCache: true,
|
|
4385
|
-
supportsNativeTools: true,
|
|
4386
|
-
defaultToolProtocol: "native",
|
|
4387
4106
|
inputPrice: 3,
|
|
4388
4107
|
// $3 per million input tokens (≤200K context)
|
|
4389
4108
|
outputPrice: 15,
|
|
@@ -4415,8 +4134,6 @@ var vertexModels = {
|
|
|
4415
4134
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4416
4135
|
supportsImages: true,
|
|
4417
4136
|
supportsPromptCache: true,
|
|
4418
|
-
supportsNativeTools: true,
|
|
4419
|
-
defaultToolProtocol: "native",
|
|
4420
4137
|
inputPrice: 3,
|
|
4421
4138
|
// $3 per million input tokens (≤200K context)
|
|
4422
4139
|
outputPrice: 15,
|
|
@@ -4447,8 +4164,6 @@ var vertexModels = {
|
|
|
4447
4164
|
contextWindow: 2e5,
|
|
4448
4165
|
supportsImages: true,
|
|
4449
4166
|
supportsPromptCache: true,
|
|
4450
|
-
supportsNativeTools: true,
|
|
4451
|
-
defaultToolProtocol: "native",
|
|
4452
4167
|
inputPrice: 1,
|
|
4453
4168
|
outputPrice: 5,
|
|
4454
4169
|
cacheWritesPrice: 1.25,
|
|
@@ -4460,8 +4175,6 @@ var vertexModels = {
|
|
|
4460
4175
|
contextWindow: 2e5,
|
|
4461
4176
|
supportsImages: true,
|
|
4462
4177
|
supportsPromptCache: true,
|
|
4463
|
-
supportsNativeTools: true,
|
|
4464
|
-
defaultToolProtocol: "native",
|
|
4465
4178
|
inputPrice: 5,
|
|
4466
4179
|
outputPrice: 25,
|
|
4467
4180
|
cacheWritesPrice: 6.25,
|
|
@@ -4473,8 +4186,6 @@ var vertexModels = {
|
|
|
4473
4186
|
contextWindow: 2e5,
|
|
4474
4187
|
supportsImages: true,
|
|
4475
4188
|
supportsPromptCache: true,
|
|
4476
|
-
supportsNativeTools: true,
|
|
4477
|
-
defaultToolProtocol: "native",
|
|
4478
4189
|
inputPrice: 15,
|
|
4479
4190
|
outputPrice: 75,
|
|
4480
4191
|
cacheWritesPrice: 18.75,
|
|
@@ -4486,8 +4197,6 @@ var vertexModels = {
|
|
|
4486
4197
|
contextWindow: 2e5,
|
|
4487
4198
|
supportsImages: true,
|
|
4488
4199
|
supportsPromptCache: true,
|
|
4489
|
-
supportsNativeTools: true,
|
|
4490
|
-
defaultToolProtocol: "native",
|
|
4491
4200
|
inputPrice: 15,
|
|
4492
4201
|
outputPrice: 75,
|
|
4493
4202
|
cacheWritesPrice: 18.75,
|
|
@@ -4498,8 +4207,6 @@ var vertexModels = {
|
|
|
4498
4207
|
contextWindow: 2e5,
|
|
4499
4208
|
supportsImages: true,
|
|
4500
4209
|
supportsPromptCache: true,
|
|
4501
|
-
supportsNativeTools: true,
|
|
4502
|
-
defaultToolProtocol: "native",
|
|
4503
4210
|
inputPrice: 3,
|
|
4504
4211
|
outputPrice: 15,
|
|
4505
4212
|
cacheWritesPrice: 3.75,
|
|
@@ -4512,8 +4219,6 @@ var vertexModels = {
|
|
|
4512
4219
|
contextWindow: 2e5,
|
|
4513
4220
|
supportsImages: true,
|
|
4514
4221
|
supportsPromptCache: true,
|
|
4515
|
-
supportsNativeTools: true,
|
|
4516
|
-
defaultToolProtocol: "native",
|
|
4517
4222
|
inputPrice: 3,
|
|
4518
4223
|
outputPrice: 15,
|
|
4519
4224
|
cacheWritesPrice: 3.75,
|
|
@@ -4524,8 +4229,6 @@ var vertexModels = {
|
|
|
4524
4229
|
contextWindow: 2e5,
|
|
4525
4230
|
supportsImages: true,
|
|
4526
4231
|
supportsPromptCache: true,
|
|
4527
|
-
supportsNativeTools: true,
|
|
4528
|
-
defaultToolProtocol: "native",
|
|
4529
4232
|
inputPrice: 3,
|
|
4530
4233
|
outputPrice: 15,
|
|
4531
4234
|
cacheWritesPrice: 3.75,
|
|
@@ -4536,8 +4239,6 @@ var vertexModels = {
|
|
|
4536
4239
|
contextWindow: 2e5,
|
|
4537
4240
|
supportsImages: true,
|
|
4538
4241
|
supportsPromptCache: true,
|
|
4539
|
-
supportsNativeTools: true,
|
|
4540
|
-
defaultToolProtocol: "native",
|
|
4541
4242
|
inputPrice: 3,
|
|
4542
4243
|
outputPrice: 15,
|
|
4543
4244
|
cacheWritesPrice: 3.75,
|
|
@@ -4548,8 +4249,6 @@ var vertexModels = {
|
|
|
4548
4249
|
contextWindow: 2e5,
|
|
4549
4250
|
supportsImages: false,
|
|
4550
4251
|
supportsPromptCache: true,
|
|
4551
|
-
supportsNativeTools: true,
|
|
4552
|
-
defaultToolProtocol: "native",
|
|
4553
4252
|
inputPrice: 1,
|
|
4554
4253
|
outputPrice: 5,
|
|
4555
4254
|
cacheWritesPrice: 1.25,
|
|
@@ -4560,8 +4259,6 @@ var vertexModels = {
|
|
|
4560
4259
|
contextWindow: 2e5,
|
|
4561
4260
|
supportsImages: true,
|
|
4562
4261
|
supportsPromptCache: true,
|
|
4563
|
-
supportsNativeTools: true,
|
|
4564
|
-
defaultToolProtocol: "native",
|
|
4565
4262
|
inputPrice: 15,
|
|
4566
4263
|
outputPrice: 75,
|
|
4567
4264
|
cacheWritesPrice: 18.75,
|
|
@@ -4572,8 +4269,6 @@ var vertexModels = {
|
|
|
4572
4269
|
contextWindow: 2e5,
|
|
4573
4270
|
supportsImages: true,
|
|
4574
4271
|
supportsPromptCache: true,
|
|
4575
|
-
supportsNativeTools: true,
|
|
4576
|
-
defaultToolProtocol: "native",
|
|
4577
4272
|
inputPrice: 0.25,
|
|
4578
4273
|
outputPrice: 1.25,
|
|
4579
4274
|
cacheWritesPrice: 0.3,
|
|
@@ -4583,8 +4278,6 @@ var vertexModels = {
|
|
|
4583
4278
|
maxTokens: 64e3,
|
|
4584
4279
|
contextWindow: 1048576,
|
|
4585
4280
|
supportsImages: true,
|
|
4586
|
-
supportsNativeTools: true,
|
|
4587
|
-
defaultToolProtocol: "native",
|
|
4588
4281
|
supportsPromptCache: true,
|
|
4589
4282
|
inputPrice: 0.1,
|
|
4590
4283
|
outputPrice: 0.4,
|
|
@@ -4598,7 +4291,6 @@ var vertexModels = {
|
|
|
4598
4291
|
contextWindow: 131072,
|
|
4599
4292
|
supportsImages: false,
|
|
4600
4293
|
supportsPromptCache: false,
|
|
4601
|
-
supportsNativeTools: true,
|
|
4602
4294
|
inputPrice: 0.35,
|
|
4603
4295
|
outputPrice: 1.15,
|
|
4604
4296
|
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
@@ -4608,7 +4300,6 @@ var vertexModels = {
|
|
|
4608
4300
|
contextWindow: 163840,
|
|
4609
4301
|
supportsImages: false,
|
|
4610
4302
|
supportsPromptCache: false,
|
|
4611
|
-
supportsNativeTools: true,
|
|
4612
4303
|
inputPrice: 1.35,
|
|
4613
4304
|
outputPrice: 5.4,
|
|
4614
4305
|
description: "DeepSeek R1 (0528). Available in us-central1"
|
|
@@ -4618,7 +4309,6 @@ var vertexModels = {
|
|
|
4618
4309
|
contextWindow: 163840,
|
|
4619
4310
|
supportsImages: false,
|
|
4620
4311
|
supportsPromptCache: false,
|
|
4621
|
-
supportsNativeTools: true,
|
|
4622
4312
|
inputPrice: 0.6,
|
|
4623
4313
|
outputPrice: 1.7,
|
|
4624
4314
|
description: "DeepSeek V3.1. Available in us-west2"
|
|
@@ -4628,7 +4318,6 @@ var vertexModels = {
|
|
|
4628
4318
|
contextWindow: 131072,
|
|
4629
4319
|
supportsImages: false,
|
|
4630
4320
|
supportsPromptCache: false,
|
|
4631
|
-
supportsNativeTools: true,
|
|
4632
4321
|
inputPrice: 0.15,
|
|
4633
4322
|
outputPrice: 0.6,
|
|
4634
4323
|
description: "OpenAI gpt-oss 120B. Available in us-central1"
|
|
@@ -4638,7 +4327,6 @@ var vertexModels = {
|
|
|
4638
4327
|
contextWindow: 131072,
|
|
4639
4328
|
supportsImages: false,
|
|
4640
4329
|
supportsPromptCache: false,
|
|
4641
|
-
supportsNativeTools: true,
|
|
4642
4330
|
inputPrice: 0.075,
|
|
4643
4331
|
outputPrice: 0.3,
|
|
4644
4332
|
description: "OpenAI gpt-oss 20B. Available in us-central1"
|
|
@@ -4648,7 +4336,6 @@ var vertexModels = {
|
|
|
4648
4336
|
contextWindow: 262144,
|
|
4649
4337
|
supportsImages: false,
|
|
4650
4338
|
supportsPromptCache: false,
|
|
4651
|
-
supportsNativeTools: true,
|
|
4652
4339
|
inputPrice: 1,
|
|
4653
4340
|
outputPrice: 4,
|
|
4654
4341
|
description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
|
|
@@ -4658,10 +4345,18 @@ var vertexModels = {
|
|
|
4658
4345
|
contextWindow: 262144,
|
|
4659
4346
|
supportsImages: false,
|
|
4660
4347
|
supportsPromptCache: false,
|
|
4661
|
-
supportsNativeTools: true,
|
|
4662
4348
|
inputPrice: 0.25,
|
|
4663
4349
|
outputPrice: 1,
|
|
4664
4350
|
description: "Qwen3 235B A22B Instruct. Available in us-south1"
|
|
4351
|
+
},
|
|
4352
|
+
"moonshotai/kimi-k2-thinking-maas": {
|
|
4353
|
+
maxTokens: 16384,
|
|
4354
|
+
contextWindow: 262144,
|
|
4355
|
+
supportsPromptCache: false,
|
|
4356
|
+
supportsImages: false,
|
|
4357
|
+
inputPrice: 0.6,
|
|
4358
|
+
outputPrice: 2.5,
|
|
4359
|
+
description: "Kimi K2 Thinking Model with 256K context window."
|
|
4665
4360
|
}
|
|
4666
4361
|
};
|
|
4667
4362
|
var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
|
|
@@ -4894,8 +4589,6 @@ var xaiModels = {
|
|
|
4894
4589
|
contextWindow: 256e3,
|
|
4895
4590
|
supportsImages: true,
|
|
4896
4591
|
supportsPromptCache: true,
|
|
4897
|
-
supportsNativeTools: true,
|
|
4898
|
-
defaultToolProtocol: "native",
|
|
4899
4592
|
inputPrice: 0.2,
|
|
4900
4593
|
outputPrice: 1.5,
|
|
4901
4594
|
cacheWritesPrice: 0.02,
|
|
@@ -4909,8 +4602,6 @@ var xaiModels = {
|
|
|
4909
4602
|
contextWindow: 2e6,
|
|
4910
4603
|
supportsImages: true,
|
|
4911
4604
|
supportsPromptCache: true,
|
|
4912
|
-
supportsNativeTools: true,
|
|
4913
|
-
defaultToolProtocol: "native",
|
|
4914
4605
|
inputPrice: 0.2,
|
|
4915
4606
|
outputPrice: 0.5,
|
|
4916
4607
|
cacheWritesPrice: 0.05,
|
|
@@ -4924,8 +4615,6 @@ var xaiModels = {
|
|
|
4924
4615
|
contextWindow: 2e6,
|
|
4925
4616
|
supportsImages: true,
|
|
4926
4617
|
supportsPromptCache: true,
|
|
4927
|
-
supportsNativeTools: true,
|
|
4928
|
-
defaultToolProtocol: "native",
|
|
4929
4618
|
inputPrice: 0.2,
|
|
4930
4619
|
outputPrice: 0.5,
|
|
4931
4620
|
cacheWritesPrice: 0.05,
|
|
@@ -4939,8 +4628,6 @@ var xaiModels = {
|
|
|
4939
4628
|
contextWindow: 2e6,
|
|
4940
4629
|
supportsImages: true,
|
|
4941
4630
|
supportsPromptCache: true,
|
|
4942
|
-
supportsNativeTools: true,
|
|
4943
|
-
defaultToolProtocol: "native",
|
|
4944
4631
|
inputPrice: 0.2,
|
|
4945
4632
|
outputPrice: 0.5,
|
|
4946
4633
|
cacheWritesPrice: 0.05,
|
|
@@ -4954,8 +4641,6 @@ var xaiModels = {
|
|
|
4954
4641
|
contextWindow: 2e6,
|
|
4955
4642
|
supportsImages: true,
|
|
4956
4643
|
supportsPromptCache: true,
|
|
4957
|
-
supportsNativeTools: true,
|
|
4958
|
-
defaultToolProtocol: "native",
|
|
4959
4644
|
inputPrice: 0.2,
|
|
4960
4645
|
outputPrice: 0.5,
|
|
4961
4646
|
cacheWritesPrice: 0.05,
|
|
@@ -4969,8 +4654,6 @@ var xaiModels = {
|
|
|
4969
4654
|
contextWindow: 256e3,
|
|
4970
4655
|
supportsImages: true,
|
|
4971
4656
|
supportsPromptCache: true,
|
|
4972
|
-
supportsNativeTools: true,
|
|
4973
|
-
defaultToolProtocol: "native",
|
|
4974
4657
|
inputPrice: 3,
|
|
4975
4658
|
outputPrice: 15,
|
|
4976
4659
|
cacheWritesPrice: 0.75,
|
|
@@ -4984,8 +4667,6 @@ var xaiModels = {
|
|
|
4984
4667
|
contextWindow: 131072,
|
|
4985
4668
|
supportsImages: true,
|
|
4986
4669
|
supportsPromptCache: true,
|
|
4987
|
-
supportsNativeTools: true,
|
|
4988
|
-
defaultToolProtocol: "native",
|
|
4989
4670
|
inputPrice: 0.3,
|
|
4990
4671
|
outputPrice: 0.5,
|
|
4991
4672
|
cacheWritesPrice: 0.07,
|
|
@@ -5001,8 +4682,6 @@ var xaiModels = {
|
|
|
5001
4682
|
contextWindow: 131072,
|
|
5002
4683
|
supportsImages: true,
|
|
5003
4684
|
supportsPromptCache: true,
|
|
5004
|
-
supportsNativeTools: true,
|
|
5005
|
-
defaultToolProtocol: "native",
|
|
5006
4685
|
inputPrice: 3,
|
|
5007
4686
|
outputPrice: 15,
|
|
5008
4687
|
cacheWritesPrice: 0.75,
|
|
@@ -5099,7 +4778,6 @@ var vercelAiGatewayDefaultModelInfo = {
|
|
|
5099
4778
|
contextWindow: 2e5,
|
|
5100
4779
|
supportsImages: true,
|
|
5101
4780
|
supportsPromptCache: true,
|
|
5102
|
-
supportsNativeTools: true,
|
|
5103
4781
|
inputPrice: 3,
|
|
5104
4782
|
outputPrice: 15,
|
|
5105
4783
|
cacheWritesPrice: 3.75,
|
|
@@ -5116,8 +4794,6 @@ var internationalZAiModels = {
|
|
|
5116
4794
|
contextWindow: 131072,
|
|
5117
4795
|
supportsImages: false,
|
|
5118
4796
|
supportsPromptCache: true,
|
|
5119
|
-
supportsNativeTools: true,
|
|
5120
|
-
defaultToolProtocol: "native",
|
|
5121
4797
|
inputPrice: 0.6,
|
|
5122
4798
|
outputPrice: 2.2,
|
|
5123
4799
|
cacheWritesPrice: 0,
|
|
@@ -5129,8 +4805,6 @@ var internationalZAiModels = {
|
|
|
5129
4805
|
contextWindow: 131072,
|
|
5130
4806
|
supportsImages: false,
|
|
5131
4807
|
supportsPromptCache: true,
|
|
5132
|
-
supportsNativeTools: true,
|
|
5133
|
-
defaultToolProtocol: "native",
|
|
5134
4808
|
inputPrice: 0.2,
|
|
5135
4809
|
outputPrice: 1.1,
|
|
5136
4810
|
cacheWritesPrice: 0,
|
|
@@ -5142,8 +4816,6 @@ var internationalZAiModels = {
|
|
|
5142
4816
|
contextWindow: 131072,
|
|
5143
4817
|
supportsImages: false,
|
|
5144
4818
|
supportsPromptCache: true,
|
|
5145
|
-
supportsNativeTools: true,
|
|
5146
|
-
defaultToolProtocol: "native",
|
|
5147
4819
|
inputPrice: 2.2,
|
|
5148
4820
|
outputPrice: 8.9,
|
|
5149
4821
|
cacheWritesPrice: 0,
|
|
@@ -5155,8 +4827,6 @@ var internationalZAiModels = {
|
|
|
5155
4827
|
contextWindow: 131072,
|
|
5156
4828
|
supportsImages: false,
|
|
5157
4829
|
supportsPromptCache: true,
|
|
5158
|
-
supportsNativeTools: true,
|
|
5159
|
-
defaultToolProtocol: "native",
|
|
5160
4830
|
inputPrice: 1.1,
|
|
5161
4831
|
outputPrice: 4.5,
|
|
5162
4832
|
cacheWritesPrice: 0,
|
|
@@ -5168,8 +4838,6 @@ var internationalZAiModels = {
|
|
|
5168
4838
|
contextWindow: 131072,
|
|
5169
4839
|
supportsImages: false,
|
|
5170
4840
|
supportsPromptCache: true,
|
|
5171
|
-
supportsNativeTools: true,
|
|
5172
|
-
defaultToolProtocol: "native",
|
|
5173
4841
|
inputPrice: 0,
|
|
5174
4842
|
outputPrice: 0,
|
|
5175
4843
|
cacheWritesPrice: 0,
|
|
@@ -5181,21 +4849,28 @@ var internationalZAiModels = {
|
|
|
5181
4849
|
contextWindow: 131072,
|
|
5182
4850
|
supportsImages: true,
|
|
5183
4851
|
supportsPromptCache: true,
|
|
5184
|
-
supportsNativeTools: true,
|
|
5185
|
-
defaultToolProtocol: "native",
|
|
5186
4852
|
inputPrice: 0.6,
|
|
5187
4853
|
outputPrice: 1.8,
|
|
5188
4854
|
cacheWritesPrice: 0,
|
|
5189
4855
|
cacheReadsPrice: 0.11,
|
|
5190
4856
|
description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
|
|
5191
4857
|
},
|
|
4858
|
+
"glm-4.6v": {
|
|
4859
|
+
maxTokens: 16384,
|
|
4860
|
+
contextWindow: 131072,
|
|
4861
|
+
supportsImages: true,
|
|
4862
|
+
supportsPromptCache: true,
|
|
4863
|
+
inputPrice: 0.3,
|
|
4864
|
+
outputPrice: 0.9,
|
|
4865
|
+
cacheWritesPrice: 0,
|
|
4866
|
+
cacheReadsPrice: 0.05,
|
|
4867
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
4868
|
+
},
|
|
5192
4869
|
"glm-4.6": {
|
|
5193
4870
|
maxTokens: 16384,
|
|
5194
4871
|
contextWindow: 2e5,
|
|
5195
4872
|
supportsImages: false,
|
|
5196
4873
|
supportsPromptCache: true,
|
|
5197
|
-
supportsNativeTools: true,
|
|
5198
|
-
defaultToolProtocol: "native",
|
|
5199
4874
|
inputPrice: 0.6,
|
|
5200
4875
|
outputPrice: 2.2,
|
|
5201
4876
|
cacheWritesPrice: 0,
|
|
@@ -5207,8 +4882,6 @@ var internationalZAiModels = {
|
|
|
5207
4882
|
contextWindow: 2e5,
|
|
5208
4883
|
supportsImages: false,
|
|
5209
4884
|
supportsPromptCache: true,
|
|
5210
|
-
supportsNativeTools: true,
|
|
5211
|
-
defaultToolProtocol: "native",
|
|
5212
4885
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5213
4886
|
reasoningEffort: "medium",
|
|
5214
4887
|
preserveReasoning: true,
|
|
@@ -5218,13 +4891,55 @@ var internationalZAiModels = {
|
|
|
5218
4891
|
cacheReadsPrice: 0.11,
|
|
5219
4892
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5220
4893
|
},
|
|
4894
|
+
"glm-4.7-flash": {
|
|
4895
|
+
maxTokens: 16384,
|
|
4896
|
+
contextWindow: 2e5,
|
|
4897
|
+
supportsImages: false,
|
|
4898
|
+
supportsPromptCache: true,
|
|
4899
|
+
inputPrice: 0,
|
|
4900
|
+
outputPrice: 0,
|
|
4901
|
+
cacheWritesPrice: 0,
|
|
4902
|
+
cacheReadsPrice: 0,
|
|
4903
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
4904
|
+
},
|
|
4905
|
+
"glm-4.7-flashx": {
|
|
4906
|
+
maxTokens: 16384,
|
|
4907
|
+
contextWindow: 2e5,
|
|
4908
|
+
supportsImages: false,
|
|
4909
|
+
supportsPromptCache: true,
|
|
4910
|
+
inputPrice: 0.07,
|
|
4911
|
+
outputPrice: 0.4,
|
|
4912
|
+
cacheWritesPrice: 0,
|
|
4913
|
+
cacheReadsPrice: 0.01,
|
|
4914
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
4915
|
+
},
|
|
4916
|
+
"glm-4.6v-flash": {
|
|
4917
|
+
maxTokens: 16384,
|
|
4918
|
+
contextWindow: 131072,
|
|
4919
|
+
supportsImages: true,
|
|
4920
|
+
supportsPromptCache: true,
|
|
4921
|
+
inputPrice: 0,
|
|
4922
|
+
outputPrice: 0,
|
|
4923
|
+
cacheWritesPrice: 0,
|
|
4924
|
+
cacheReadsPrice: 0,
|
|
4925
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
4926
|
+
},
|
|
4927
|
+
"glm-4.6v-flashx": {
|
|
4928
|
+
maxTokens: 16384,
|
|
4929
|
+
contextWindow: 131072,
|
|
4930
|
+
supportsImages: true,
|
|
4931
|
+
supportsPromptCache: true,
|
|
4932
|
+
inputPrice: 0.04,
|
|
4933
|
+
outputPrice: 0.4,
|
|
4934
|
+
cacheWritesPrice: 0,
|
|
4935
|
+
cacheReadsPrice: 4e-3,
|
|
4936
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
4937
|
+
},
|
|
5221
4938
|
"glm-4-32b-0414-128k": {
|
|
5222
4939
|
maxTokens: 16384,
|
|
5223
4940
|
contextWindow: 131072,
|
|
5224
4941
|
supportsImages: false,
|
|
5225
4942
|
supportsPromptCache: false,
|
|
5226
|
-
supportsNativeTools: true,
|
|
5227
|
-
defaultToolProtocol: "native",
|
|
5228
4943
|
inputPrice: 0.1,
|
|
5229
4944
|
outputPrice: 0.1,
|
|
5230
4945
|
cacheWritesPrice: 0,
|
|
@@ -5239,8 +4954,6 @@ var mainlandZAiModels = {
|
|
|
5239
4954
|
contextWindow: 131072,
|
|
5240
4955
|
supportsImages: false,
|
|
5241
4956
|
supportsPromptCache: true,
|
|
5242
|
-
supportsNativeTools: true,
|
|
5243
|
-
defaultToolProtocol: "native",
|
|
5244
4957
|
inputPrice: 0.29,
|
|
5245
4958
|
outputPrice: 1.14,
|
|
5246
4959
|
cacheWritesPrice: 0,
|
|
@@ -5252,8 +4965,6 @@ var mainlandZAiModels = {
|
|
|
5252
4965
|
contextWindow: 131072,
|
|
5253
4966
|
supportsImages: false,
|
|
5254
4967
|
supportsPromptCache: true,
|
|
5255
|
-
supportsNativeTools: true,
|
|
5256
|
-
defaultToolProtocol: "native",
|
|
5257
4968
|
inputPrice: 0.1,
|
|
5258
4969
|
outputPrice: 0.6,
|
|
5259
4970
|
cacheWritesPrice: 0,
|
|
@@ -5265,8 +4976,6 @@ var mainlandZAiModels = {
|
|
|
5265
4976
|
contextWindow: 131072,
|
|
5266
4977
|
supportsImages: false,
|
|
5267
4978
|
supportsPromptCache: true,
|
|
5268
|
-
supportsNativeTools: true,
|
|
5269
|
-
defaultToolProtocol: "native",
|
|
5270
4979
|
inputPrice: 0.29,
|
|
5271
4980
|
outputPrice: 1.14,
|
|
5272
4981
|
cacheWritesPrice: 0,
|
|
@@ -5278,8 +4987,6 @@ var mainlandZAiModels = {
|
|
|
5278
4987
|
contextWindow: 131072,
|
|
5279
4988
|
supportsImages: false,
|
|
5280
4989
|
supportsPromptCache: true,
|
|
5281
|
-
supportsNativeTools: true,
|
|
5282
|
-
defaultToolProtocol: "native",
|
|
5283
4990
|
inputPrice: 0.1,
|
|
5284
4991
|
outputPrice: 0.6,
|
|
5285
4992
|
cacheWritesPrice: 0,
|
|
@@ -5291,8 +4998,6 @@ var mainlandZAiModels = {
|
|
|
5291
4998
|
contextWindow: 131072,
|
|
5292
4999
|
supportsImages: false,
|
|
5293
5000
|
supportsPromptCache: true,
|
|
5294
|
-
supportsNativeTools: true,
|
|
5295
|
-
defaultToolProtocol: "native",
|
|
5296
5001
|
inputPrice: 0,
|
|
5297
5002
|
outputPrice: 0,
|
|
5298
5003
|
cacheWritesPrice: 0,
|
|
@@ -5304,8 +5009,6 @@ var mainlandZAiModels = {
|
|
|
5304
5009
|
contextWindow: 131072,
|
|
5305
5010
|
supportsImages: true,
|
|
5306
5011
|
supportsPromptCache: true,
|
|
5307
|
-
supportsNativeTools: true,
|
|
5308
|
-
defaultToolProtocol: "native",
|
|
5309
5012
|
inputPrice: 0.29,
|
|
5310
5013
|
outputPrice: 0.93,
|
|
5311
5014
|
cacheWritesPrice: 0,
|
|
@@ -5317,8 +5020,6 @@ var mainlandZAiModels = {
|
|
|
5317
5020
|
contextWindow: 204800,
|
|
5318
5021
|
supportsImages: false,
|
|
5319
5022
|
supportsPromptCache: true,
|
|
5320
|
-
supportsNativeTools: true,
|
|
5321
|
-
defaultToolProtocol: "native",
|
|
5322
5023
|
inputPrice: 0.29,
|
|
5323
5024
|
outputPrice: 1.14,
|
|
5324
5025
|
cacheWritesPrice: 0,
|
|
@@ -5330,8 +5031,6 @@ var mainlandZAiModels = {
|
|
|
5330
5031
|
contextWindow: 204800,
|
|
5331
5032
|
supportsImages: false,
|
|
5332
5033
|
supportsPromptCache: true,
|
|
5333
|
-
supportsNativeTools: true,
|
|
5334
|
-
defaultToolProtocol: "native",
|
|
5335
5034
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5336
5035
|
reasoningEffort: "medium",
|
|
5337
5036
|
preserveReasoning: true,
|
|
@@ -5340,6 +5039,61 @@ var mainlandZAiModels = {
|
|
|
5340
5039
|
cacheWritesPrice: 0,
|
|
5341
5040
|
cacheReadsPrice: 0.057,
|
|
5342
5041
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5042
|
+
},
|
|
5043
|
+
"glm-4.7-flash": {
|
|
5044
|
+
maxTokens: 16384,
|
|
5045
|
+
contextWindow: 204800,
|
|
5046
|
+
supportsImages: false,
|
|
5047
|
+
supportsPromptCache: true,
|
|
5048
|
+
inputPrice: 0,
|
|
5049
|
+
outputPrice: 0,
|
|
5050
|
+
cacheWritesPrice: 0,
|
|
5051
|
+
cacheReadsPrice: 0,
|
|
5052
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
5053
|
+
},
|
|
5054
|
+
"glm-4.7-flashx": {
|
|
5055
|
+
maxTokens: 16384,
|
|
5056
|
+
contextWindow: 204800,
|
|
5057
|
+
supportsImages: false,
|
|
5058
|
+
supportsPromptCache: true,
|
|
5059
|
+
inputPrice: 0.035,
|
|
5060
|
+
outputPrice: 0.2,
|
|
5061
|
+
cacheWritesPrice: 0,
|
|
5062
|
+
cacheReadsPrice: 5e-3,
|
|
5063
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
5064
|
+
},
|
|
5065
|
+
"glm-4.6v": {
|
|
5066
|
+
maxTokens: 16384,
|
|
5067
|
+
contextWindow: 131072,
|
|
5068
|
+
supportsImages: true,
|
|
5069
|
+
supportsPromptCache: true,
|
|
5070
|
+
inputPrice: 0.15,
|
|
5071
|
+
outputPrice: 0.45,
|
|
5072
|
+
cacheWritesPrice: 0,
|
|
5073
|
+
cacheReadsPrice: 0.025,
|
|
5074
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
5075
|
+
},
|
|
5076
|
+
"glm-4.6v-flash": {
|
|
5077
|
+
maxTokens: 16384,
|
|
5078
|
+
contextWindow: 131072,
|
|
5079
|
+
supportsImages: true,
|
|
5080
|
+
supportsPromptCache: true,
|
|
5081
|
+
inputPrice: 0,
|
|
5082
|
+
outputPrice: 0,
|
|
5083
|
+
cacheWritesPrice: 0,
|
|
5084
|
+
cacheReadsPrice: 0,
|
|
5085
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
5086
|
+
},
|
|
5087
|
+
"glm-4.6v-flashx": {
|
|
5088
|
+
maxTokens: 16384,
|
|
5089
|
+
contextWindow: 131072,
|
|
5090
|
+
supportsImages: true,
|
|
5091
|
+
supportsPromptCache: true,
|
|
5092
|
+
inputPrice: 0.02,
|
|
5093
|
+
outputPrice: 0.2,
|
|
5094
|
+
cacheWritesPrice: 0,
|
|
5095
|
+
cacheReadsPrice: 2e-3,
|
|
5096
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
5343
5097
|
}
|
|
5344
5098
|
};
|
|
5345
5099
|
var ZAI_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -5373,7 +5127,6 @@ var deepInfraDefaultModelInfo = {
|
|
|
5373
5127
|
contextWindow: 262144,
|
|
5374
5128
|
supportsImages: false,
|
|
5375
5129
|
supportsPromptCache: false,
|
|
5376
|
-
supportsNativeTools: true,
|
|
5377
5130
|
inputPrice: 0.3,
|
|
5378
5131
|
outputPrice: 1.2,
|
|
5379
5132
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -5387,8 +5140,6 @@ var minimaxModels = {
|
|
|
5387
5140
|
contextWindow: 192e3,
|
|
5388
5141
|
supportsImages: false,
|
|
5389
5142
|
supportsPromptCache: true,
|
|
5390
|
-
supportsNativeTools: true,
|
|
5391
|
-
defaultToolProtocol: "native",
|
|
5392
5143
|
includedTools: ["search_and_replace"],
|
|
5393
5144
|
excludedTools: ["apply_diff"],
|
|
5394
5145
|
preserveReasoning: true,
|
|
@@ -5403,8 +5154,6 @@ var minimaxModels = {
|
|
|
5403
5154
|
contextWindow: 192e3,
|
|
5404
5155
|
supportsImages: false,
|
|
5405
5156
|
supportsPromptCache: true,
|
|
5406
|
-
supportsNativeTools: true,
|
|
5407
|
-
defaultToolProtocol: "native",
|
|
5408
5157
|
includedTools: ["search_and_replace"],
|
|
5409
5158
|
excludedTools: ["apply_diff"],
|
|
5410
5159
|
preserveReasoning: true,
|
|
@@ -5419,8 +5168,6 @@ var minimaxModels = {
|
|
|
5419
5168
|
contextWindow: 192e3,
|
|
5420
5169
|
supportsImages: false,
|
|
5421
5170
|
supportsPromptCache: true,
|
|
5422
|
-
supportsNativeTools: true,
|
|
5423
|
-
defaultToolProtocol: "native",
|
|
5424
5171
|
includedTools: ["search_and_replace"],
|
|
5425
5172
|
excludedTools: ["apply_diff"],
|
|
5426
5173
|
preserveReasoning: true,
|
|
@@ -5475,6 +5222,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5475
5222
|
case "openai-native":
|
|
5476
5223
|
return "gpt-4o";
|
|
5477
5224
|
// Based on openai-native patterns
|
|
5225
|
+
case "openai-codex":
|
|
5226
|
+
return openAiCodexDefaultModelId;
|
|
5478
5227
|
case "mistral":
|
|
5479
5228
|
return mistralDefaultModelId;
|
|
5480
5229
|
case "openai":
|
|
@@ -5490,8 +5239,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5490
5239
|
return deepInfraDefaultModelId;
|
|
5491
5240
|
case "vscode-lm":
|
|
5492
5241
|
return vscodeLlmDefaultModelId;
|
|
5493
|
-
case "claude-code":
|
|
5494
|
-
return claudeCodeDefaultModelId;
|
|
5495
5242
|
case "cerebras":
|
|
5496
5243
|
return cerebrasDefaultModelId;
|
|
5497
5244
|
case "sambanova":
|
|
@@ -5549,7 +5296,6 @@ var providerNames = [
|
|
|
5549
5296
|
"bedrock",
|
|
5550
5297
|
"baseten",
|
|
5551
5298
|
"cerebras",
|
|
5552
|
-
"claude-code",
|
|
5553
5299
|
"doubao",
|
|
5554
5300
|
"deepseek",
|
|
5555
5301
|
"featherless",
|
|
@@ -5560,6 +5306,7 @@ var providerNames = [
|
|
|
5560
5306
|
"mistral",
|
|
5561
5307
|
"moonshot",
|
|
5562
5308
|
"minimax",
|
|
5309
|
+
"openai-codex",
|
|
5563
5310
|
"openai-native",
|
|
5564
5311
|
"qwen-code",
|
|
5565
5312
|
"roo",
|
|
@@ -5578,9 +5325,7 @@ var providerSettingsEntrySchema = import_zod8.z.object({
|
|
|
5578
5325
|
});
|
|
5579
5326
|
var baseProviderSettingsSchema = import_zod8.z.object({
|
|
5580
5327
|
includeMaxTokens: import_zod8.z.boolean().optional(),
|
|
5581
|
-
diffEnabled: import_zod8.z.boolean().optional(),
|
|
5582
5328
|
todoListEnabled: import_zod8.z.boolean().optional(),
|
|
5583
|
-
fuzzyMatchThreshold: import_zod8.z.number().optional(),
|
|
5584
5329
|
modelTemperature: import_zod8.z.number().nullish(),
|
|
5585
5330
|
rateLimitSeconds: import_zod8.z.number().optional(),
|
|
5586
5331
|
consecutiveMistakeLimit: import_zod8.z.number().min(0).optional(),
|
|
@@ -5590,9 +5335,7 @@ var baseProviderSettingsSchema = import_zod8.z.object({
|
|
|
5590
5335
|
modelMaxTokens: import_zod8.z.number().optional(),
|
|
5591
5336
|
modelMaxThinkingTokens: import_zod8.z.number().optional(),
|
|
5592
5337
|
// Model verbosity.
|
|
5593
|
-
verbosity: verbosityLevelsSchema.optional()
|
|
5594
|
-
// Tool protocol override for this profile.
|
|
5595
|
-
toolProtocol: import_zod8.z.enum(["xml", "native"]).optional()
|
|
5338
|
+
verbosity: verbosityLevelsSchema.optional()
|
|
5596
5339
|
});
|
|
5597
5340
|
var apiModelIdProviderModelSchema = baseProviderSettingsSchema.extend({
|
|
5598
5341
|
apiModelId: import_zod8.z.string().optional()
|
|
@@ -5604,7 +5347,6 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5604
5347
|
anthropicBeta1MContext: import_zod8.z.boolean().optional()
|
|
5605
5348
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5606
5349
|
});
|
|
5607
|
-
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({});
|
|
5608
5350
|
var openRouterSchema = baseProviderSettingsSchema.extend({
|
|
5609
5351
|
openRouterApiKey: import_zod8.z.string().optional(),
|
|
5610
5352
|
openRouterModelId: import_zod8.z.string().optional(),
|
|
@@ -5686,6 +5428,9 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5686
5428
|
geminiCliOAuthPath: import_zod8.z.string().optional(),
|
|
5687
5429
|
geminiCliProjectId: import_zod8.z.string().optional()
|
|
5688
5430
|
});
|
|
5431
|
+
var openAiCodexSchema = apiModelIdProviderModelSchema.extend({
|
|
5432
|
+
// No additional settings needed - uses OAuth authentication
|
|
5433
|
+
});
|
|
5689
5434
|
var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
|
|
5690
5435
|
openAiNativeApiKey: import_zod8.z.string().optional(),
|
|
5691
5436
|
openAiNativeBaseUrl: import_zod8.z.string().optional(),
|
|
@@ -5790,7 +5535,6 @@ var defaultSchema = import_zod8.z.object({
|
|
|
5790
5535
|
});
|
|
5791
5536
|
var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiProvider", [
|
|
5792
5537
|
anthropicSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("anthropic") })),
|
|
5793
|
-
claudeCodeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("claude-code") })),
|
|
5794
5538
|
openRouterSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openrouter") })),
|
|
5795
5539
|
bedrockSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("bedrock") })),
|
|
5796
5540
|
vertexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("vertex") })),
|
|
@@ -5800,6 +5544,7 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
5800
5544
|
lmStudioSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("lmstudio") })),
|
|
5801
5545
|
geminiSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("gemini") })),
|
|
5802
5546
|
geminiCliSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("gemini-cli") })),
|
|
5547
|
+
openAiCodexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openai-codex") })),
|
|
5803
5548
|
openAiNativeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openai-native") })),
|
|
5804
5549
|
mistralSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("mistral") })),
|
|
5805
5550
|
deepSeekSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("deepseek") })),
|
|
@@ -5830,7 +5575,6 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
5830
5575
|
var providerSettingsSchema = import_zod8.z.object({
|
|
5831
5576
|
apiProvider: providerNamesSchema.optional(),
|
|
5832
5577
|
...anthropicSchema.shape,
|
|
5833
|
-
...claudeCodeSchema.shape,
|
|
5834
5578
|
...openRouterSchema.shape,
|
|
5835
5579
|
...bedrockSchema.shape,
|
|
5836
5580
|
...vertexSchema.shape,
|
|
@@ -5840,6 +5584,7 @@ var providerSettingsSchema = import_zod8.z.object({
|
|
|
5840
5584
|
...lmStudioSchema.shape,
|
|
5841
5585
|
...geminiSchema.shape,
|
|
5842
5586
|
...geminiCliSchema.shape,
|
|
5587
|
+
...openAiCodexSchema.shape,
|
|
5843
5588
|
...openAiNativeSchema.shape,
|
|
5844
5589
|
...mistralSchema.shape,
|
|
5845
5590
|
...deepSeekSchema.shape,
|
|
@@ -5894,10 +5639,10 @@ var getModelId = (settings) => {
|
|
|
5894
5639
|
var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key);
|
|
5895
5640
|
var modelIdKeysByProvider = {
|
|
5896
5641
|
anthropic: "apiModelId",
|
|
5897
|
-
"claude-code": "apiModelId",
|
|
5898
5642
|
openrouter: "openRouterModelId",
|
|
5899
5643
|
bedrock: "apiModelId",
|
|
5900
5644
|
vertex: "apiModelId",
|
|
5645
|
+
"openai-codex": "apiModelId",
|
|
5901
5646
|
"openai-native": "openAiModelId",
|
|
5902
5647
|
ollama: "ollamaModelId",
|
|
5903
5648
|
lmstudio: "lmStudioModelId",
|
|
@@ -5927,7 +5672,7 @@ var modelIdKeysByProvider = {
|
|
|
5927
5672
|
roo: "apiModelId",
|
|
5928
5673
|
"vercel-ai-gateway": "vercelAiGatewayModelId"
|
|
5929
5674
|
};
|
|
5930
|
-
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "
|
|
5675
|
+
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "bedrock", "minimax"];
|
|
5931
5676
|
var getApiProtocol = (provider, modelId) => {
|
|
5932
5677
|
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
|
|
5933
5678
|
return "anthropic";
|
|
@@ -5956,7 +5701,6 @@ var MODELS_BY_PROVIDER = {
|
|
|
5956
5701
|
label: "Cerebras",
|
|
5957
5702
|
models: Object.keys(cerebrasModels)
|
|
5958
5703
|
},
|
|
5959
|
-
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
|
|
5960
5704
|
deepseek: {
|
|
5961
5705
|
id: "deepseek",
|
|
5962
5706
|
label: "DeepSeek",
|
|
@@ -5999,6 +5743,11 @@ var MODELS_BY_PROVIDER = {
|
|
|
5999
5743
|
label: "MiniMax",
|
|
6000
5744
|
models: Object.keys(minimaxModels)
|
|
6001
5745
|
},
|
|
5746
|
+
"openai-codex": {
|
|
5747
|
+
id: "openai-codex",
|
|
5748
|
+
label: "OpenAI - ChatGPT Plus/Pro",
|
|
5749
|
+
models: Object.keys(openAiCodexModels)
|
|
5750
|
+
},
|
|
6002
5751
|
"openai-native": {
|
|
6003
5752
|
id: "openai-native",
|
|
6004
5753
|
label: "OpenAI",
|
|
@@ -6055,16 +5804,6 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6055
5804
|
size: import_zod9.z.number().optional(),
|
|
6056
5805
|
workspace: import_zod9.z.string().optional(),
|
|
6057
5806
|
mode: import_zod9.z.string().optional(),
|
|
6058
|
-
/**
|
|
6059
|
-
* The tool protocol used by this task. Once a task uses tools with a specific
|
|
6060
|
-
* protocol (XML or Native), it is permanently locked to that protocol.
|
|
6061
|
-
*
|
|
6062
|
-
* - "xml": Tool calls are parsed from XML text (no tool IDs)
|
|
6063
|
-
* - "native": Tool calls come as tool_call chunks with IDs
|
|
6064
|
-
*
|
|
6065
|
-
* This ensures task resumption works correctly even when NTC settings change.
|
|
6066
|
-
*/
|
|
6067
|
-
toolProtocol: import_zod9.z.enum(["xml", "native"]).optional(),
|
|
6068
5807
|
apiConfigName: import_zod9.z.string().optional(),
|
|
6069
5808
|
// Provider profile name for sticky profile feature
|
|
6070
5809
|
status: import_zod9.z.enum(["active", "completed", "delegated"]).optional(),
|
|
@@ -6082,23 +5821,12 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6082
5821
|
|
|
6083
5822
|
// src/experiment.ts
|
|
6084
5823
|
var import_zod10 = require("zod");
|
|
6085
|
-
var experimentIds = [
|
|
6086
|
-
"powerSteering",
|
|
6087
|
-
"multiFileApplyDiff",
|
|
6088
|
-
"preventFocusDisruption",
|
|
6089
|
-
"imageGeneration",
|
|
6090
|
-
"runSlashCommand",
|
|
6091
|
-
"multipleNativeToolCalls",
|
|
6092
|
-
"customTools"
|
|
6093
|
-
];
|
|
5824
|
+
var experimentIds = ["preventFocusDisruption", "imageGeneration", "runSlashCommand", "customTools"];
|
|
6094
5825
|
var experimentIdsSchema = import_zod10.z.enum(experimentIds);
|
|
6095
5826
|
var experimentsSchema = import_zod10.z.object({
|
|
6096
|
-
powerSteering: import_zod10.z.boolean().optional(),
|
|
6097
|
-
multiFileApplyDiff: import_zod10.z.boolean().optional(),
|
|
6098
5827
|
preventFocusDisruption: import_zod10.z.boolean().optional(),
|
|
6099
5828
|
imageGeneration: import_zod10.z.boolean().optional(),
|
|
6100
5829
|
runSlashCommand: import_zod10.z.boolean().optional(),
|
|
6101
|
-
multipleNativeToolCalls: import_zod10.z.boolean().optional(),
|
|
6102
5830
|
customTools: import_zod10.z.boolean().optional()
|
|
6103
5831
|
});
|
|
6104
5832
|
|
|
@@ -6150,6 +5878,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
6150
5878
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
6151
5879
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
6152
5880
|
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
5881
|
+
TelemetryEventName2["READ_FILE_LEGACY_FORMAT_USED"] = "Read File Legacy Format Used";
|
|
6153
5882
|
return TelemetryEventName2;
|
|
6154
5883
|
})(TelemetryEventName || {});
|
|
6155
5884
|
var staticAppPropertiesSchema = import_zod11.z.object({
|
|
@@ -6238,7 +5967,8 @@ var rooCodeTelemetryEventSchema = import_zod11.z.discriminatedUnion("type", [
|
|
|
6238
5967
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
6239
5968
|
"Tab Shown" /* TAB_SHOWN */,
|
|
6240
5969
|
"Mode Setting Changed" /* MODE_SETTINGS_CHANGED */,
|
|
6241
|
-
"Custom Mode Created" /* CUSTOM_MODE_CREATED
|
|
5970
|
+
"Custom Mode Created" /* CUSTOM_MODE_CREATED */,
|
|
5971
|
+
"Read File Legacy Format Used" /* READ_FILE_LEGACY_FORMAT_USED */
|
|
6242
5972
|
]),
|
|
6243
5973
|
properties: telemetryPropertiesSchema
|
|
6244
5974
|
}),
|
|
@@ -6552,7 +6282,15 @@ var isLanguage = (value) => languages.includes(value);
|
|
|
6552
6282
|
|
|
6553
6283
|
// src/global-settings.ts
|
|
6554
6284
|
var DEFAULT_WRITE_DELAY_MS = 1e3;
|
|
6555
|
-
var
|
|
6285
|
+
var TERMINAL_PREVIEW_BYTES = {
|
|
6286
|
+
small: 5 * 1024,
|
|
6287
|
+
// 5KB
|
|
6288
|
+
medium: 10 * 1024,
|
|
6289
|
+
// 10KB
|
|
6290
|
+
large: 20 * 1024
|
|
6291
|
+
// 20KB
|
|
6292
|
+
};
|
|
6293
|
+
var DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE = "medium";
|
|
6556
6294
|
var MIN_CHECKPOINT_TIMEOUT_SECONDS = 10;
|
|
6557
6295
|
var MAX_CHECKPOINT_TIMEOUT_SECONDS = 60;
|
|
6558
6296
|
var DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15;
|
|
@@ -6568,7 +6306,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6568
6306
|
imageGenerationProvider: import_zod14.z.enum(["openrouter", "roo"]).optional(),
|
|
6569
6307
|
openRouterImageApiKey: import_zod14.z.string().optional(),
|
|
6570
6308
|
openRouterImageGenerationSelectedModel: import_zod14.z.string().optional(),
|
|
6571
|
-
condensingApiConfigId: import_zod14.z.string().optional(),
|
|
6572
6309
|
customCondensingPrompt: import_zod14.z.string().optional(),
|
|
6573
6310
|
autoApprovalEnabled: import_zod14.z.boolean().optional(),
|
|
6574
6311
|
alwaysAllowReadOnly: import_zod14.z.boolean().optional(),
|
|
@@ -6594,7 +6331,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6594
6331
|
allowedMaxCost: import_zod14.z.number().nullish(),
|
|
6595
6332
|
autoCondenseContext: import_zod14.z.boolean().optional(),
|
|
6596
6333
|
autoCondenseContextPercent: import_zod14.z.number().optional(),
|
|
6597
|
-
maxConcurrentFileReads: import_zod14.z.number().optional(),
|
|
6598
6334
|
/**
|
|
6599
6335
|
* Whether to include current time in the environment details
|
|
6600
6336
|
* @default true
|
|
@@ -6637,11 +6373,9 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6637
6373
|
maxWorkspaceFiles: import_zod14.z.number().optional(),
|
|
6638
6374
|
showRooIgnoredFiles: import_zod14.z.boolean().optional(),
|
|
6639
6375
|
enableSubfolderRules: import_zod14.z.boolean().optional(),
|
|
6640
|
-
maxReadFileLine: import_zod14.z.number().optional(),
|
|
6641
6376
|
maxImageFileSize: import_zod14.z.number().optional(),
|
|
6642
6377
|
maxTotalImageSize: import_zod14.z.number().optional(),
|
|
6643
|
-
|
|
6644
|
-
terminalOutputCharacterLimit: import_zod14.z.number().optional(),
|
|
6378
|
+
terminalOutputPreviewSize: import_zod14.z.enum(["small", "medium", "large"]).optional(),
|
|
6645
6379
|
terminalShellIntegrationTimeout: import_zod14.z.number().optional(),
|
|
6646
6380
|
terminalShellIntegrationDisabled: import_zod14.z.boolean().optional(),
|
|
6647
6381
|
terminalCommandDelay: import_zod14.z.number().optional(),
|
|
@@ -6650,18 +6384,14 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6650
6384
|
terminalZshOhMy: import_zod14.z.boolean().optional(),
|
|
6651
6385
|
terminalZshP10k: import_zod14.z.boolean().optional(),
|
|
6652
6386
|
terminalZdotdir: import_zod14.z.boolean().optional(),
|
|
6653
|
-
terminalCompressProgressBar: import_zod14.z.boolean().optional(),
|
|
6654
6387
|
diagnosticsEnabled: import_zod14.z.boolean().optional(),
|
|
6655
6388
|
rateLimitSeconds: import_zod14.z.number().optional(),
|
|
6656
|
-
diffEnabled: import_zod14.z.boolean().optional(),
|
|
6657
|
-
fuzzyMatchThreshold: import_zod14.z.number().optional(),
|
|
6658
6389
|
experiments: experimentsSchema.optional(),
|
|
6659
6390
|
codebaseIndexModels: codebaseIndexModelsSchema.optional(),
|
|
6660
6391
|
codebaseIndexConfig: codebaseIndexConfigSchema.optional(),
|
|
6661
6392
|
language: languagesSchema.optional(),
|
|
6662
6393
|
telemetrySetting: telemetrySettingsSchema.optional(),
|
|
6663
6394
|
mcpEnabled: import_zod14.z.boolean().optional(),
|
|
6664
|
-
enableMcpServerCreation: import_zod14.z.boolean().optional(),
|
|
6665
6395
|
mode: import_zod14.z.string().optional(),
|
|
6666
6396
|
modeApiConfigs: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.string()).optional(),
|
|
6667
6397
|
customModes: import_zod14.z.array(modeConfigSchema).optional(),
|
|
@@ -6681,7 +6411,20 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6681
6411
|
profileThresholds: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.number()).optional(),
|
|
6682
6412
|
hasOpenedModeSelector: import_zod14.z.boolean().optional(),
|
|
6683
6413
|
lastModeExportPath: import_zod14.z.string().optional(),
|
|
6684
|
-
lastModeImportPath: import_zod14.z.string().optional()
|
|
6414
|
+
lastModeImportPath: import_zod14.z.string().optional(),
|
|
6415
|
+
lastSettingsExportPath: import_zod14.z.string().optional(),
|
|
6416
|
+
lastTaskExportPath: import_zod14.z.string().optional(),
|
|
6417
|
+
lastImageSavePath: import_zod14.z.string().optional(),
|
|
6418
|
+
/**
|
|
6419
|
+
* Path to worktree to auto-open after switching workspaces.
|
|
6420
|
+
* Used by the worktree feature to open the Roo Code sidebar in a new window.
|
|
6421
|
+
*/
|
|
6422
|
+
worktreeAutoOpenPath: import_zod14.z.string().optional(),
|
|
6423
|
+
/**
|
|
6424
|
+
* Whether to show the worktree selector in the home screen.
|
|
6425
|
+
* @default true
|
|
6426
|
+
*/
|
|
6427
|
+
showWorktreesInHomeScreen: import_zod14.z.boolean().optional()
|
|
6685
6428
|
});
|
|
6686
6429
|
var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
|
|
6687
6430
|
var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
|
|
@@ -6765,8 +6508,6 @@ var EVALS_SETTINGS = {
|
|
|
6765
6508
|
ttsSpeed: 1,
|
|
6766
6509
|
soundEnabled: false,
|
|
6767
6510
|
soundVolume: 0.5,
|
|
6768
|
-
terminalOutputLineLimit: 500,
|
|
6769
|
-
terminalOutputCharacterLimit: DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT,
|
|
6770
6511
|
terminalShellIntegrationTimeout: 3e4,
|
|
6771
6512
|
terminalCommandDelay: 0,
|
|
6772
6513
|
terminalPowershellCounter: false,
|
|
@@ -6774,19 +6515,14 @@ var EVALS_SETTINGS = {
|
|
|
6774
6515
|
terminalZshClearEolMark: true,
|
|
6775
6516
|
terminalZshP10k: false,
|
|
6776
6517
|
terminalZdotdir: true,
|
|
6777
|
-
terminalCompressProgressBar: true,
|
|
6778
6518
|
terminalShellIntegrationDisabled: true,
|
|
6779
6519
|
diagnosticsEnabled: true,
|
|
6780
|
-
diffEnabled: true,
|
|
6781
|
-
fuzzyMatchThreshold: 1,
|
|
6782
6520
|
enableCheckpoints: false,
|
|
6783
6521
|
rateLimitSeconds: 0,
|
|
6784
6522
|
maxOpenTabsContext: 20,
|
|
6785
6523
|
maxWorkspaceFiles: 200,
|
|
6786
6524
|
maxGitStatusFiles: 20,
|
|
6787
6525
|
showRooIgnoredFiles: true,
|
|
6788
|
-
maxReadFileLine: -1,
|
|
6789
|
-
// -1 to enable full file reading.
|
|
6790
6526
|
includeDiagnosticMessages: true,
|
|
6791
6527
|
maxDiagnosticMessages: 50,
|
|
6792
6528
|
language: "en",
|
|
@@ -6860,24 +6596,18 @@ var organizationAllowListSchema = import_zod16.z.object({
|
|
|
6860
6596
|
});
|
|
6861
6597
|
var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
|
|
6862
6598
|
enableCheckpoints: true,
|
|
6863
|
-
fuzzyMatchThreshold: true,
|
|
6864
6599
|
maxOpenTabsContext: true,
|
|
6865
|
-
maxReadFileLine: true,
|
|
6866
6600
|
maxWorkspaceFiles: true,
|
|
6867
6601
|
showRooIgnoredFiles: true,
|
|
6868
6602
|
terminalCommandDelay: true,
|
|
6869
|
-
terminalCompressProgressBar: true,
|
|
6870
|
-
terminalOutputLineLimit: true,
|
|
6871
6603
|
terminalShellIntegrationDisabled: true,
|
|
6872
6604
|
terminalShellIntegrationTimeout: true,
|
|
6873
6605
|
terminalZshClearEolMark: true
|
|
6874
6606
|
}).merge(
|
|
6875
6607
|
import_zod16.z.object({
|
|
6876
6608
|
maxOpenTabsContext: import_zod16.z.number().int().nonnegative().optional(),
|
|
6877
|
-
maxReadFileLine: import_zod16.z.number().int().gte(-1).optional(),
|
|
6878
6609
|
maxWorkspaceFiles: import_zod16.z.number().int().nonnegative().optional(),
|
|
6879
6610
|
terminalCommandDelay: import_zod16.z.number().int().nonnegative().optional(),
|
|
6880
|
-
terminalOutputLineLimit: import_zod16.z.number().int().nonnegative().optional(),
|
|
6881
6611
|
terminalShellIntegrationTimeout: import_zod16.z.number().int().nonnegative().optional()
|
|
6882
6612
|
})
|
|
6883
6613
|
);
|
|
@@ -6910,7 +6640,8 @@ var userFeaturesSchema = import_zod16.z.object({
|
|
|
6910
6640
|
});
|
|
6911
6641
|
var userSettingsConfigSchema = import_zod16.z.object({
|
|
6912
6642
|
extensionBridgeEnabled: import_zod16.z.boolean().optional(),
|
|
6913
|
-
taskSyncEnabled: import_zod16.z.boolean().optional()
|
|
6643
|
+
taskSyncEnabled: import_zod16.z.boolean().optional(),
|
|
6644
|
+
llmEnhancedFeaturesEnabled: import_zod16.z.boolean().optional()
|
|
6914
6645
|
});
|
|
6915
6646
|
var userSettingsDataSchema = import_zod16.z.object({
|
|
6916
6647
|
features: userFeaturesSchema,
|
|
@@ -7392,6 +7123,7 @@ var ipcMessageSchema = import_zod18.z.discriminatedUnion("type", [
|
|
|
7392
7123
|
|
|
7393
7124
|
// src/mcp.ts
|
|
7394
7125
|
var import_zod19 = require("zod");
|
|
7126
|
+
var MAX_MCP_TOOLS_THRESHOLD = 60;
|
|
7395
7127
|
var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
7396
7128
|
import_zod19.z.object({
|
|
7397
7129
|
executionId: import_zod19.z.string(),
|
|
@@ -7415,6 +7147,46 @@ var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
7415
7147
|
error: import_zod19.z.string().optional()
|
|
7416
7148
|
})
|
|
7417
7149
|
]);
|
|
7150
|
+
function countEnabledMcpTools(servers) {
|
|
7151
|
+
let serverCount = 0;
|
|
7152
|
+
let toolCount = 0;
|
|
7153
|
+
for (const server of servers) {
|
|
7154
|
+
if (server.disabled) continue;
|
|
7155
|
+
if (server.status !== "connected") continue;
|
|
7156
|
+
serverCount++;
|
|
7157
|
+
if (server.tools) {
|
|
7158
|
+
for (const tool of server.tools) {
|
|
7159
|
+
if (tool.enabledForPrompt !== false) {
|
|
7160
|
+
toolCount++;
|
|
7161
|
+
}
|
|
7162
|
+
}
|
|
7163
|
+
}
|
|
7164
|
+
}
|
|
7165
|
+
return { enabledToolCount: toolCount, enabledServerCount: serverCount };
|
|
7166
|
+
}
|
|
7167
|
+
|
|
7168
|
+
// src/skills.ts
|
|
7169
|
+
var SKILL_NAME_MIN_LENGTH = 1;
|
|
7170
|
+
var SKILL_NAME_MAX_LENGTH = 64;
|
|
7171
|
+
var SKILL_NAME_REGEX = /^[a-z0-9]+(?:-[a-z0-9]+)*$/;
|
|
7172
|
+
var SkillNameValidationError = /* @__PURE__ */ ((SkillNameValidationError2) => {
|
|
7173
|
+
SkillNameValidationError2["Empty"] = "empty";
|
|
7174
|
+
SkillNameValidationError2["TooLong"] = "too_long";
|
|
7175
|
+
SkillNameValidationError2["InvalidFormat"] = "invalid_format";
|
|
7176
|
+
return SkillNameValidationError2;
|
|
7177
|
+
})(SkillNameValidationError || {});
|
|
7178
|
+
function validateSkillName(name) {
|
|
7179
|
+
if (!name || name.length < SKILL_NAME_MIN_LENGTH) {
|
|
7180
|
+
return { valid: false, error: "empty" /* Empty */ };
|
|
7181
|
+
}
|
|
7182
|
+
if (name.length > SKILL_NAME_MAX_LENGTH) {
|
|
7183
|
+
return { valid: false, error: "too_long" /* TooLong */ };
|
|
7184
|
+
}
|
|
7185
|
+
if (!SKILL_NAME_REGEX.test(name)) {
|
|
7186
|
+
return { valid: false, error: "invalid_format" /* InvalidFormat */ };
|
|
7187
|
+
}
|
|
7188
|
+
return { valid: true };
|
|
7189
|
+
}
|
|
7418
7190
|
|
|
7419
7191
|
// src/todo.ts
|
|
7420
7192
|
var import_zod20 = require("zod");
|
|
@@ -7454,6 +7226,11 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7454
7226
|
})
|
|
7455
7227
|
]);
|
|
7456
7228
|
|
|
7229
|
+
// src/tool-params.ts
|
|
7230
|
+
function isLegacyReadFileParams(params) {
|
|
7231
|
+
return "_legacyFormat" in params && params._legacyFormat === true;
|
|
7232
|
+
}
|
|
7233
|
+
|
|
7457
7234
|
// src/vscode-extension-host.ts
|
|
7458
7235
|
var import_zod22 = require("zod");
|
|
7459
7236
|
var checkoutDiffPayloadSchema = import_zod22.z.object({
|
|
@@ -7507,7 +7284,7 @@ var browserActions = [
|
|
|
7507
7284
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7508
7285
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
7509
7286
|
DEFAULT_MODES,
|
|
7510
|
-
|
|
7287
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
7511
7288
|
DEFAULT_WRITE_DELAY_MS,
|
|
7512
7289
|
DOUBAO_API_BASE_URL,
|
|
7513
7290
|
DOUBAO_API_CHAT_PATH,
|
|
@@ -7537,13 +7314,13 @@ var browserActions = [
|
|
|
7537
7314
|
IpcOrigin,
|
|
7538
7315
|
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
7539
7316
|
MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7317
|
+
MAX_MCP_TOOLS_THRESHOLD,
|
|
7540
7318
|
MINIMAX_DEFAULT_MAX_TOKENS,
|
|
7541
7319
|
MINIMAX_DEFAULT_TEMPERATURE,
|
|
7542
7320
|
MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7543
7321
|
MISTRAL_DEFAULT_TEMPERATURE,
|
|
7544
7322
|
MODELS_BY_PROVIDER,
|
|
7545
7323
|
MOONSHOT_DEFAULT_TEMPERATURE,
|
|
7546
|
-
NATIVE_TOOL_DEFAULTS,
|
|
7547
7324
|
OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
7548
7325
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
7549
7326
|
OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -7558,7 +7335,11 @@ var browserActions = [
|
|
|
7558
7335
|
RooModelsResponseSchema,
|
|
7559
7336
|
RooPricingSchema,
|
|
7560
7337
|
SECRET_STATE_KEYS,
|
|
7561
|
-
|
|
7338
|
+
SKILL_NAME_MAX_LENGTH,
|
|
7339
|
+
SKILL_NAME_MIN_LENGTH,
|
|
7340
|
+
SKILL_NAME_REGEX,
|
|
7341
|
+
SkillNameValidationError,
|
|
7342
|
+
TERMINAL_PREVIEW_BYTES,
|
|
7562
7343
|
TaskBridgeCommandName,
|
|
7563
7344
|
TaskBridgeEventName,
|
|
7564
7345
|
TaskCommandName,
|
|
@@ -7590,9 +7371,6 @@ var browserActions = [
|
|
|
7590
7371
|
chutesDefaultModelId,
|
|
7591
7372
|
chutesDefaultModelInfo,
|
|
7592
7373
|
chutesModels,
|
|
7593
|
-
claudeCodeDefaultModelId,
|
|
7594
|
-
claudeCodeModels,
|
|
7595
|
-
claudeCodeReasoningConfig,
|
|
7596
7374
|
clineAskSchema,
|
|
7597
7375
|
clineAsks,
|
|
7598
7376
|
clineMessageSchema,
|
|
@@ -7607,6 +7385,7 @@ var browserActions = [
|
|
|
7607
7385
|
commandIds,
|
|
7608
7386
|
contextCondenseSchema,
|
|
7609
7387
|
contextTruncationSchema,
|
|
7388
|
+
countEnabledMcpTools,
|
|
7610
7389
|
customModePromptsSchema,
|
|
7611
7390
|
customModesSettingsSchema,
|
|
7612
7391
|
customProviders,
|
|
@@ -7640,7 +7419,6 @@ var browserActions = [
|
|
|
7640
7419
|
geminiDefaultModelId,
|
|
7641
7420
|
geminiModels,
|
|
7642
7421
|
getApiProtocol,
|
|
7643
|
-
getEffectiveProtocol,
|
|
7644
7422
|
getErrorMessage,
|
|
7645
7423
|
getErrorStatusCode,
|
|
7646
7424
|
getImageGenerationProvider,
|
|
@@ -7675,9 +7453,9 @@ var browserActions = [
|
|
|
7675
7453
|
isInteractiveAsk,
|
|
7676
7454
|
isInternalProvider,
|
|
7677
7455
|
isLanguage,
|
|
7456
|
+
isLegacyReadFileParams,
|
|
7678
7457
|
isLocalProvider,
|
|
7679
7458
|
isModelParameter,
|
|
7680
|
-
isNativeProtocol,
|
|
7681
7459
|
isNonBlockingAsk,
|
|
7682
7460
|
isProviderName,
|
|
7683
7461
|
isResumableAsk,
|
|
@@ -7713,9 +7491,10 @@ var browserActions = [
|
|
|
7713
7491
|
moonshotDefaultModelId,
|
|
7714
7492
|
moonshotModels,
|
|
7715
7493
|
nonBlockingAsks,
|
|
7716
|
-
normalizeClaudeCodeModelId,
|
|
7717
7494
|
ollamaDefaultModelId,
|
|
7718
7495
|
ollamaDefaultModelInfo,
|
|
7496
|
+
openAiCodexDefaultModelId,
|
|
7497
|
+
openAiCodexModels,
|
|
7719
7498
|
openAiModelInfoSaneDefaults,
|
|
7720
7499
|
openAiNativeDefaultModelId,
|
|
7721
7500
|
openAiNativeModels,
|
|
@@ -7785,6 +7564,7 @@ var browserActions = [
|
|
|
7785
7564
|
userFeaturesSchema,
|
|
7786
7565
|
userSettingsConfigSchema,
|
|
7787
7566
|
userSettingsDataSchema,
|
|
7567
|
+
validateSkillName,
|
|
7788
7568
|
verbosityLevels,
|
|
7789
7569
|
verbosityLevelsSchema,
|
|
7790
7570
|
vercelAiGatewayDefaultModelId,
|