@roo-code/types 1.106.0 → 1.107.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +395 -718
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1553 -2546
- package/dist/index.d.ts +1553 -2546
- package/dist/index.js +385 -709
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.cjs
CHANGED
|
@@ -42,7 +42,7 @@ __export(index_exports, {
|
|
|
42
42
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS: () => DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
43
43
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT: () => DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
44
44
|
DEFAULT_MODES: () => DEFAULT_MODES,
|
|
45
|
-
|
|
45
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE: () => DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
46
46
|
DEFAULT_WRITE_DELAY_MS: () => DEFAULT_WRITE_DELAY_MS,
|
|
47
47
|
DOUBAO_API_BASE_URL: () => DOUBAO_API_BASE_URL,
|
|
48
48
|
DOUBAO_API_CHAT_PATH: () => DOUBAO_API_CHAT_PATH,
|
|
@@ -72,13 +72,13 @@ __export(index_exports, {
|
|
|
72
72
|
IpcOrigin: () => IpcOrigin,
|
|
73
73
|
LMSTUDIO_DEFAULT_TEMPERATURE: () => LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
74
74
|
MAX_CHECKPOINT_TIMEOUT_SECONDS: () => MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
75
|
+
MAX_MCP_TOOLS_THRESHOLD: () => MAX_MCP_TOOLS_THRESHOLD,
|
|
75
76
|
MINIMAX_DEFAULT_MAX_TOKENS: () => MINIMAX_DEFAULT_MAX_TOKENS,
|
|
76
77
|
MINIMAX_DEFAULT_TEMPERATURE: () => MINIMAX_DEFAULT_TEMPERATURE,
|
|
77
78
|
MIN_CHECKPOINT_TIMEOUT_SECONDS: () => MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
78
79
|
MISTRAL_DEFAULT_TEMPERATURE: () => MISTRAL_DEFAULT_TEMPERATURE,
|
|
79
80
|
MODELS_BY_PROVIDER: () => MODELS_BY_PROVIDER,
|
|
80
81
|
MOONSHOT_DEFAULT_TEMPERATURE: () => MOONSHOT_DEFAULT_TEMPERATURE,
|
|
81
|
-
NATIVE_TOOL_DEFAULTS: () => NATIVE_TOOL_DEFAULTS,
|
|
82
82
|
OPENAI_AZURE_AI_INFERENCE_PATH: () => OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
83
83
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE: () => OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
84
84
|
OPENROUTER_DEFAULT_PROVIDER_NAME: () => OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -93,7 +93,11 @@ __export(index_exports, {
|
|
|
93
93
|
RooModelsResponseSchema: () => RooModelsResponseSchema,
|
|
94
94
|
RooPricingSchema: () => RooPricingSchema,
|
|
95
95
|
SECRET_STATE_KEYS: () => SECRET_STATE_KEYS,
|
|
96
|
-
|
|
96
|
+
SKILL_NAME_MAX_LENGTH: () => SKILL_NAME_MAX_LENGTH,
|
|
97
|
+
SKILL_NAME_MIN_LENGTH: () => SKILL_NAME_MIN_LENGTH,
|
|
98
|
+
SKILL_NAME_REGEX: () => SKILL_NAME_REGEX,
|
|
99
|
+
SkillNameValidationError: () => SkillNameValidationError,
|
|
100
|
+
TERMINAL_PREVIEW_BYTES: () => TERMINAL_PREVIEW_BYTES,
|
|
97
101
|
TaskBridgeCommandName: () => TaskBridgeCommandName,
|
|
98
102
|
TaskBridgeEventName: () => TaskBridgeEventName,
|
|
99
103
|
TaskCommandName: () => TaskCommandName,
|
|
@@ -125,9 +129,6 @@ __export(index_exports, {
|
|
|
125
129
|
chutesDefaultModelId: () => chutesDefaultModelId,
|
|
126
130
|
chutesDefaultModelInfo: () => chutesDefaultModelInfo,
|
|
127
131
|
chutesModels: () => chutesModels,
|
|
128
|
-
claudeCodeDefaultModelId: () => claudeCodeDefaultModelId,
|
|
129
|
-
claudeCodeModels: () => claudeCodeModels,
|
|
130
|
-
claudeCodeReasoningConfig: () => claudeCodeReasoningConfig,
|
|
131
132
|
clineAskSchema: () => clineAskSchema,
|
|
132
133
|
clineAsks: () => clineAsks,
|
|
133
134
|
clineMessageSchema: () => clineMessageSchema,
|
|
@@ -142,6 +143,7 @@ __export(index_exports, {
|
|
|
142
143
|
commandIds: () => commandIds,
|
|
143
144
|
contextCondenseSchema: () => contextCondenseSchema,
|
|
144
145
|
contextTruncationSchema: () => contextTruncationSchema,
|
|
146
|
+
countEnabledMcpTools: () => countEnabledMcpTools,
|
|
145
147
|
customModePromptsSchema: () => customModePromptsSchema,
|
|
146
148
|
customModesSettingsSchema: () => customModesSettingsSchema,
|
|
147
149
|
customProviders: () => customProviders,
|
|
@@ -175,7 +177,6 @@ __export(index_exports, {
|
|
|
175
177
|
geminiDefaultModelId: () => geminiDefaultModelId,
|
|
176
178
|
geminiModels: () => geminiModels,
|
|
177
179
|
getApiProtocol: () => getApiProtocol,
|
|
178
|
-
getEffectiveProtocol: () => getEffectiveProtocol,
|
|
179
180
|
getErrorMessage: () => getErrorMessage,
|
|
180
181
|
getErrorStatusCode: () => getErrorStatusCode,
|
|
181
182
|
getImageGenerationProvider: () => getImageGenerationProvider,
|
|
@@ -210,9 +211,9 @@ __export(index_exports, {
|
|
|
210
211
|
isInteractiveAsk: () => isInteractiveAsk,
|
|
211
212
|
isInternalProvider: () => isInternalProvider,
|
|
212
213
|
isLanguage: () => isLanguage,
|
|
214
|
+
isLegacyReadFileParams: () => isLegacyReadFileParams,
|
|
213
215
|
isLocalProvider: () => isLocalProvider,
|
|
214
216
|
isModelParameter: () => isModelParameter,
|
|
215
|
-
isNativeProtocol: () => isNativeProtocol,
|
|
216
217
|
isNonBlockingAsk: () => isNonBlockingAsk,
|
|
217
218
|
isProviderName: () => isProviderName,
|
|
218
219
|
isResumableAsk: () => isResumableAsk,
|
|
@@ -248,7 +249,6 @@ __export(index_exports, {
|
|
|
248
249
|
moonshotDefaultModelId: () => moonshotDefaultModelId,
|
|
249
250
|
moonshotModels: () => moonshotModels,
|
|
250
251
|
nonBlockingAsks: () => nonBlockingAsks,
|
|
251
|
-
normalizeClaudeCodeModelId: () => normalizeClaudeCodeModelId,
|
|
252
252
|
ollamaDefaultModelId: () => ollamaDefaultModelId,
|
|
253
253
|
ollamaDefaultModelInfo: () => ollamaDefaultModelInfo,
|
|
254
254
|
openAiCodexDefaultModelId: () => openAiCodexDefaultModelId,
|
|
@@ -322,6 +322,7 @@ __export(index_exports, {
|
|
|
322
322
|
userFeaturesSchema: () => userFeaturesSchema,
|
|
323
323
|
userSettingsConfigSchema: () => userSettingsConfigSchema,
|
|
324
324
|
userSettingsDataSchema: () => userSettingsDataSchema,
|
|
325
|
+
validateSkillName: () => validateSkillName,
|
|
325
326
|
verbosityLevels: () => verbosityLevels,
|
|
326
327
|
verbosityLevelsSchema: () => verbosityLevelsSchema,
|
|
327
328
|
vercelAiGatewayDefaultModelId: () => vercelAiGatewayDefaultModelId,
|
|
@@ -417,7 +418,9 @@ var clineSays = [
|
|
|
417
418
|
"condense_context_error",
|
|
418
419
|
"sliding_window_truncation",
|
|
419
420
|
"codebase_search_result",
|
|
420
|
-
"user_edit_todos"
|
|
421
|
+
"user_edit_todos",
|
|
422
|
+
"too_many_tools_warning",
|
|
423
|
+
"tool"
|
|
421
424
|
];
|
|
422
425
|
var clineSaySchema = import_zod.z.enum(clineSays);
|
|
423
426
|
var toolProgressStatusSchema = import_zod.z.object({
|
|
@@ -485,6 +488,7 @@ var toolGroupsSchema = import_zod2.z.enum(toolGroups);
|
|
|
485
488
|
var toolNames = [
|
|
486
489
|
"execute_command",
|
|
487
490
|
"read_file",
|
|
491
|
+
"read_command_output",
|
|
488
492
|
"write_to_file",
|
|
489
493
|
"apply_diff",
|
|
490
494
|
"search_and_replace",
|
|
@@ -500,10 +504,10 @@ var toolNames = [
|
|
|
500
504
|
"attempt_completion",
|
|
501
505
|
"switch_mode",
|
|
502
506
|
"new_task",
|
|
503
|
-
"fetch_instructions",
|
|
504
507
|
"codebase_search",
|
|
505
508
|
"update_todo_list",
|
|
506
509
|
"run_slash_command",
|
|
510
|
+
"skill",
|
|
507
511
|
"generate_image",
|
|
508
512
|
"custom_tool"
|
|
509
513
|
];
|
|
@@ -515,20 +519,6 @@ var toolUsageSchema = import_zod2.z.record(
|
|
|
515
519
|
failures: import_zod2.z.number()
|
|
516
520
|
})
|
|
517
521
|
);
|
|
518
|
-
var TOOL_PROTOCOL = {
|
|
519
|
-
XML: "xml",
|
|
520
|
-
NATIVE: "native"
|
|
521
|
-
};
|
|
522
|
-
var NATIVE_TOOL_DEFAULTS = {
|
|
523
|
-
supportsNativeTools: true,
|
|
524
|
-
defaultToolProtocol: TOOL_PROTOCOL.NATIVE
|
|
525
|
-
};
|
|
526
|
-
function isNativeProtocol(protocol) {
|
|
527
|
-
return protocol === TOOL_PROTOCOL.NATIVE;
|
|
528
|
-
}
|
|
529
|
-
function getEffectiveProtocol(toolProtocol) {
|
|
530
|
-
return toolProtocol || TOOL_PROTOCOL.XML;
|
|
531
|
-
}
|
|
532
522
|
|
|
533
523
|
// src/events.ts
|
|
534
524
|
var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
@@ -552,6 +542,7 @@ var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
|
552
542
|
RooCodeEventName2["TaskModeSwitched"] = "taskModeSwitched";
|
|
553
543
|
RooCodeEventName2["TaskAskResponded"] = "taskAskResponded";
|
|
554
544
|
RooCodeEventName2["TaskUserMessage"] = "taskUserMessage";
|
|
545
|
+
RooCodeEventName2["QueuedMessagesUpdated"] = "queuedMessagesUpdated";
|
|
555
546
|
RooCodeEventName2["TaskTokenUsageUpdated"] = "taskTokenUsageUpdated";
|
|
556
547
|
RooCodeEventName2["TaskToolFailed"] = "taskToolFailed";
|
|
557
548
|
RooCodeEventName2["ModeChanged"] = "modeChanged";
|
|
@@ -611,6 +602,7 @@ var rooCodeEventsSchema = import_zod3.z.object({
|
|
|
611
602
|
["taskModeSwitched" /* TaskModeSwitched */]: import_zod3.z.tuple([import_zod3.z.string(), import_zod3.z.string()]),
|
|
612
603
|
["taskAskResponded" /* TaskAskResponded */]: import_zod3.z.tuple([import_zod3.z.string()]),
|
|
613
604
|
["taskUserMessage" /* TaskUserMessage */]: import_zod3.z.tuple([import_zod3.z.string()]),
|
|
605
|
+
["queuedMessagesUpdated" /* QueuedMessagesUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), import_zod3.z.array(queuedMessageSchema)]),
|
|
614
606
|
["taskToolFailed" /* TaskToolFailed */]: import_zod3.z.tuple([import_zod3.z.string(), toolNamesSchema, import_zod3.z.string()]),
|
|
615
607
|
["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), tokenUsageSchema, toolUsageSchema]),
|
|
616
608
|
["modeChanged" /* ModeChanged */]: import_zod3.z.tuple([import_zod3.z.string()]),
|
|
@@ -716,6 +708,11 @@ var taskEventSchema = import_zod3.z.discriminatedUnion("eventName", [
|
|
|
716
708
|
payload: rooCodeEventsSchema.shape["taskAskResponded" /* TaskAskResponded */],
|
|
717
709
|
taskId: import_zod3.z.number().optional()
|
|
718
710
|
}),
|
|
711
|
+
import_zod3.z.object({
|
|
712
|
+
eventName: import_zod3.z.literal("queuedMessagesUpdated" /* QueuedMessagesUpdated */),
|
|
713
|
+
payload: rooCodeEventsSchema.shape["queuedMessagesUpdated" /* QueuedMessagesUpdated */],
|
|
714
|
+
taskId: import_zod3.z.number().optional()
|
|
715
|
+
}),
|
|
719
716
|
// Task Analytics
|
|
720
717
|
import_zod3.z.object({
|
|
721
718
|
eventName: import_zod3.z.literal("taskToolFailed" /* TaskToolFailed */),
|
|
@@ -816,10 +813,6 @@ var modelInfoSchema = import_zod5.z.object({
|
|
|
816
813
|
isStealthModel: import_zod5.z.boolean().optional(),
|
|
817
814
|
// Flag to indicate if the model is free (no cost)
|
|
818
815
|
isFree: import_zod5.z.boolean().optional(),
|
|
819
|
-
// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
|
|
820
|
-
supportsNativeTools: import_zod5.z.boolean().optional(),
|
|
821
|
-
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
822
|
-
defaultToolProtocol: import_zod5.z.enum(["xml", "native"]).optional(),
|
|
823
816
|
// Exclude specific native tools from being available (only applies to native protocol)
|
|
824
817
|
// These tools will be removed from the set of tools available to the model
|
|
825
818
|
excludedTools: import_zod5.z.array(import_zod5.z.string()).optional(),
|
|
@@ -916,8 +909,6 @@ var anthropicModels = {
|
|
|
916
909
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
917
910
|
supportsImages: true,
|
|
918
911
|
supportsPromptCache: true,
|
|
919
|
-
supportsNativeTools: true,
|
|
920
|
-
defaultToolProtocol: "native",
|
|
921
912
|
inputPrice: 3,
|
|
922
913
|
// $3 per million input tokens (≤200K context)
|
|
923
914
|
outputPrice: 15,
|
|
@@ -950,8 +941,6 @@ var anthropicModels = {
|
|
|
950
941
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
951
942
|
supportsImages: true,
|
|
952
943
|
supportsPromptCache: true,
|
|
953
|
-
supportsNativeTools: true,
|
|
954
|
-
defaultToolProtocol: "native",
|
|
955
944
|
inputPrice: 3,
|
|
956
945
|
// $3 per million input tokens (≤200K context)
|
|
957
946
|
outputPrice: 15,
|
|
@@ -983,8 +972,6 @@ var anthropicModels = {
|
|
|
983
972
|
contextWindow: 2e5,
|
|
984
973
|
supportsImages: true,
|
|
985
974
|
supportsPromptCache: true,
|
|
986
|
-
supportsNativeTools: true,
|
|
987
|
-
defaultToolProtocol: "native",
|
|
988
975
|
inputPrice: 5,
|
|
989
976
|
// $5 per million input tokens
|
|
990
977
|
outputPrice: 25,
|
|
@@ -1001,8 +988,6 @@ var anthropicModels = {
|
|
|
1001
988
|
contextWindow: 2e5,
|
|
1002
989
|
supportsImages: true,
|
|
1003
990
|
supportsPromptCache: true,
|
|
1004
|
-
supportsNativeTools: true,
|
|
1005
|
-
defaultToolProtocol: "native",
|
|
1006
991
|
inputPrice: 15,
|
|
1007
992
|
// $15 per million input tokens
|
|
1008
993
|
outputPrice: 75,
|
|
@@ -1019,8 +1004,6 @@ var anthropicModels = {
|
|
|
1019
1004
|
contextWindow: 2e5,
|
|
1020
1005
|
supportsImages: true,
|
|
1021
1006
|
supportsPromptCache: true,
|
|
1022
|
-
supportsNativeTools: true,
|
|
1023
|
-
defaultToolProtocol: "native",
|
|
1024
1007
|
inputPrice: 15,
|
|
1025
1008
|
// $15 per million input tokens
|
|
1026
1009
|
outputPrice: 75,
|
|
@@ -1037,8 +1020,6 @@ var anthropicModels = {
|
|
|
1037
1020
|
contextWindow: 2e5,
|
|
1038
1021
|
supportsImages: true,
|
|
1039
1022
|
supportsPromptCache: true,
|
|
1040
|
-
supportsNativeTools: true,
|
|
1041
|
-
defaultToolProtocol: "native",
|
|
1042
1023
|
inputPrice: 3,
|
|
1043
1024
|
// $3 per million input tokens
|
|
1044
1025
|
outputPrice: 15,
|
|
@@ -1056,8 +1037,6 @@ var anthropicModels = {
|
|
|
1056
1037
|
contextWindow: 2e5,
|
|
1057
1038
|
supportsImages: true,
|
|
1058
1039
|
supportsPromptCache: true,
|
|
1059
|
-
supportsNativeTools: true,
|
|
1060
|
-
defaultToolProtocol: "native",
|
|
1061
1040
|
inputPrice: 3,
|
|
1062
1041
|
// $3 per million input tokens
|
|
1063
1042
|
outputPrice: 15,
|
|
@@ -1072,8 +1051,6 @@ var anthropicModels = {
|
|
|
1072
1051
|
contextWindow: 2e5,
|
|
1073
1052
|
supportsImages: true,
|
|
1074
1053
|
supportsPromptCache: true,
|
|
1075
|
-
supportsNativeTools: true,
|
|
1076
|
-
defaultToolProtocol: "native",
|
|
1077
1054
|
inputPrice: 3,
|
|
1078
1055
|
// $3 per million input tokens
|
|
1079
1056
|
outputPrice: 15,
|
|
@@ -1088,8 +1065,6 @@ var anthropicModels = {
|
|
|
1088
1065
|
contextWindow: 2e5,
|
|
1089
1066
|
supportsImages: false,
|
|
1090
1067
|
supportsPromptCache: true,
|
|
1091
|
-
supportsNativeTools: true,
|
|
1092
|
-
defaultToolProtocol: "native",
|
|
1093
1068
|
inputPrice: 1,
|
|
1094
1069
|
outputPrice: 5,
|
|
1095
1070
|
cacheWritesPrice: 1.25,
|
|
@@ -1100,8 +1075,6 @@ var anthropicModels = {
|
|
|
1100
1075
|
contextWindow: 2e5,
|
|
1101
1076
|
supportsImages: true,
|
|
1102
1077
|
supportsPromptCache: true,
|
|
1103
|
-
supportsNativeTools: true,
|
|
1104
|
-
defaultToolProtocol: "native",
|
|
1105
1078
|
inputPrice: 15,
|
|
1106
1079
|
outputPrice: 75,
|
|
1107
1080
|
cacheWritesPrice: 18.75,
|
|
@@ -1112,8 +1085,6 @@ var anthropicModels = {
|
|
|
1112
1085
|
contextWindow: 2e5,
|
|
1113
1086
|
supportsImages: true,
|
|
1114
1087
|
supportsPromptCache: true,
|
|
1115
|
-
supportsNativeTools: true,
|
|
1116
|
-
defaultToolProtocol: "native",
|
|
1117
1088
|
inputPrice: 0.25,
|
|
1118
1089
|
outputPrice: 1.25,
|
|
1119
1090
|
cacheWritesPrice: 0.3,
|
|
@@ -1124,8 +1095,6 @@ var anthropicModels = {
|
|
|
1124
1095
|
contextWindow: 2e5,
|
|
1125
1096
|
supportsImages: true,
|
|
1126
1097
|
supportsPromptCache: true,
|
|
1127
|
-
supportsNativeTools: true,
|
|
1128
|
-
defaultToolProtocol: "native",
|
|
1129
1098
|
inputPrice: 1,
|
|
1130
1099
|
outputPrice: 5,
|
|
1131
1100
|
cacheWritesPrice: 1.25,
|
|
@@ -1143,7 +1112,6 @@ var basetenModels = {
|
|
|
1143
1112
|
contextWindow: 262e3,
|
|
1144
1113
|
supportsImages: false,
|
|
1145
1114
|
supportsPromptCache: false,
|
|
1146
|
-
supportsNativeTools: true,
|
|
1147
1115
|
inputPrice: 0.6,
|
|
1148
1116
|
outputPrice: 2.5,
|
|
1149
1117
|
cacheWritesPrice: 0,
|
|
@@ -1155,7 +1123,6 @@ var basetenModels = {
|
|
|
1155
1123
|
contextWindow: 2e5,
|
|
1156
1124
|
supportsImages: false,
|
|
1157
1125
|
supportsPromptCache: false,
|
|
1158
|
-
supportsNativeTools: true,
|
|
1159
1126
|
inputPrice: 0.6,
|
|
1160
1127
|
outputPrice: 2.2,
|
|
1161
1128
|
cacheWritesPrice: 0,
|
|
@@ -1167,7 +1134,6 @@ var basetenModels = {
|
|
|
1167
1134
|
contextWindow: 163840,
|
|
1168
1135
|
supportsImages: false,
|
|
1169
1136
|
supportsPromptCache: false,
|
|
1170
|
-
supportsNativeTools: true,
|
|
1171
1137
|
inputPrice: 2.55,
|
|
1172
1138
|
outputPrice: 5.95,
|
|
1173
1139
|
cacheWritesPrice: 0,
|
|
@@ -1179,7 +1145,6 @@ var basetenModels = {
|
|
|
1179
1145
|
contextWindow: 163840,
|
|
1180
1146
|
supportsImages: false,
|
|
1181
1147
|
supportsPromptCache: false,
|
|
1182
|
-
supportsNativeTools: true,
|
|
1183
1148
|
inputPrice: 2.55,
|
|
1184
1149
|
outputPrice: 5.95,
|
|
1185
1150
|
cacheWritesPrice: 0,
|
|
@@ -1191,7 +1156,6 @@ var basetenModels = {
|
|
|
1191
1156
|
contextWindow: 163840,
|
|
1192
1157
|
supportsImages: false,
|
|
1193
1158
|
supportsPromptCache: false,
|
|
1194
|
-
supportsNativeTools: true,
|
|
1195
1159
|
inputPrice: 0.77,
|
|
1196
1160
|
outputPrice: 0.77,
|
|
1197
1161
|
cacheWritesPrice: 0,
|
|
@@ -1203,7 +1167,6 @@ var basetenModels = {
|
|
|
1203
1167
|
contextWindow: 163840,
|
|
1204
1168
|
supportsImages: false,
|
|
1205
1169
|
supportsPromptCache: false,
|
|
1206
|
-
supportsNativeTools: true,
|
|
1207
1170
|
inputPrice: 0.5,
|
|
1208
1171
|
outputPrice: 1.5,
|
|
1209
1172
|
cacheWritesPrice: 0,
|
|
@@ -1215,7 +1178,6 @@ var basetenModels = {
|
|
|
1215
1178
|
contextWindow: 163840,
|
|
1216
1179
|
supportsImages: false,
|
|
1217
1180
|
supportsPromptCache: false,
|
|
1218
|
-
supportsNativeTools: true,
|
|
1219
1181
|
inputPrice: 0.3,
|
|
1220
1182
|
outputPrice: 0.45,
|
|
1221
1183
|
cacheWritesPrice: 0,
|
|
@@ -1227,7 +1189,6 @@ var basetenModels = {
|
|
|
1227
1189
|
contextWindow: 128072,
|
|
1228
1190
|
supportsImages: false,
|
|
1229
1191
|
supportsPromptCache: false,
|
|
1230
|
-
supportsNativeTools: true,
|
|
1231
1192
|
inputPrice: 0.1,
|
|
1232
1193
|
outputPrice: 0.5,
|
|
1233
1194
|
cacheWritesPrice: 0,
|
|
@@ -1239,7 +1200,6 @@ var basetenModels = {
|
|
|
1239
1200
|
contextWindow: 262144,
|
|
1240
1201
|
supportsImages: false,
|
|
1241
1202
|
supportsPromptCache: false,
|
|
1242
|
-
supportsNativeTools: true,
|
|
1243
1203
|
inputPrice: 0.22,
|
|
1244
1204
|
outputPrice: 0.8,
|
|
1245
1205
|
cacheWritesPrice: 0,
|
|
@@ -1251,7 +1211,6 @@ var basetenModels = {
|
|
|
1251
1211
|
contextWindow: 262144,
|
|
1252
1212
|
supportsImages: false,
|
|
1253
1213
|
supportsPromptCache: false,
|
|
1254
|
-
supportsNativeTools: true,
|
|
1255
1214
|
inputPrice: 0.38,
|
|
1256
1215
|
outputPrice: 1.53,
|
|
1257
1216
|
cacheWritesPrice: 0,
|
|
@@ -1263,7 +1222,6 @@ var basetenModels = {
|
|
|
1263
1222
|
contextWindow: 262e3,
|
|
1264
1223
|
supportsImages: false,
|
|
1265
1224
|
supportsPromptCache: false,
|
|
1266
|
-
supportsNativeTools: true,
|
|
1267
1225
|
inputPrice: 0.6,
|
|
1268
1226
|
outputPrice: 2.5,
|
|
1269
1227
|
cacheWritesPrice: 0,
|
|
@@ -1283,8 +1241,6 @@ var bedrockModels = {
|
|
|
1283
1241
|
supportsImages: true,
|
|
1284
1242
|
supportsPromptCache: true,
|
|
1285
1243
|
supportsReasoningBudget: true,
|
|
1286
|
-
supportsNativeTools: true,
|
|
1287
|
-
defaultToolProtocol: "native",
|
|
1288
1244
|
inputPrice: 3,
|
|
1289
1245
|
outputPrice: 15,
|
|
1290
1246
|
cacheWritesPrice: 3.75,
|
|
@@ -1298,7 +1254,6 @@ var bedrockModels = {
|
|
|
1298
1254
|
contextWindow: 3e5,
|
|
1299
1255
|
supportsImages: true,
|
|
1300
1256
|
supportsPromptCache: true,
|
|
1301
|
-
supportsNativeTools: true,
|
|
1302
1257
|
inputPrice: 0.8,
|
|
1303
1258
|
outputPrice: 3.2,
|
|
1304
1259
|
cacheWritesPrice: 0.8,
|
|
@@ -1314,7 +1269,6 @@ var bedrockModels = {
|
|
|
1314
1269
|
contextWindow: 3e5,
|
|
1315
1270
|
supportsImages: true,
|
|
1316
1271
|
supportsPromptCache: false,
|
|
1317
|
-
supportsNativeTools: true,
|
|
1318
1272
|
inputPrice: 1,
|
|
1319
1273
|
outputPrice: 4,
|
|
1320
1274
|
cacheWritesPrice: 1,
|
|
@@ -1328,7 +1282,6 @@ var bedrockModels = {
|
|
|
1328
1282
|
contextWindow: 3e5,
|
|
1329
1283
|
supportsImages: true,
|
|
1330
1284
|
supportsPromptCache: true,
|
|
1331
|
-
supportsNativeTools: true,
|
|
1332
1285
|
inputPrice: 0.06,
|
|
1333
1286
|
outputPrice: 0.24,
|
|
1334
1287
|
cacheWritesPrice: 0.06,
|
|
@@ -1344,7 +1297,6 @@ var bedrockModels = {
|
|
|
1344
1297
|
contextWindow: 1e6,
|
|
1345
1298
|
supportsImages: true,
|
|
1346
1299
|
supportsPromptCache: true,
|
|
1347
|
-
supportsNativeTools: true,
|
|
1348
1300
|
inputPrice: 0.33,
|
|
1349
1301
|
outputPrice: 2.75,
|
|
1350
1302
|
cacheWritesPrice: 0,
|
|
@@ -1360,7 +1312,6 @@ var bedrockModels = {
|
|
|
1360
1312
|
contextWindow: 128e3,
|
|
1361
1313
|
supportsImages: false,
|
|
1362
1314
|
supportsPromptCache: true,
|
|
1363
|
-
supportsNativeTools: true,
|
|
1364
1315
|
inputPrice: 0.035,
|
|
1365
1316
|
outputPrice: 0.14,
|
|
1366
1317
|
cacheWritesPrice: 0.035,
|
|
@@ -1377,8 +1328,6 @@ var bedrockModels = {
|
|
|
1377
1328
|
supportsImages: true,
|
|
1378
1329
|
supportsPromptCache: true,
|
|
1379
1330
|
supportsReasoningBudget: true,
|
|
1380
|
-
supportsNativeTools: true,
|
|
1381
|
-
defaultToolProtocol: "native",
|
|
1382
1331
|
inputPrice: 3,
|
|
1383
1332
|
outputPrice: 15,
|
|
1384
1333
|
cacheWritesPrice: 3.75,
|
|
@@ -1393,8 +1342,6 @@ var bedrockModels = {
|
|
|
1393
1342
|
supportsImages: true,
|
|
1394
1343
|
supportsPromptCache: true,
|
|
1395
1344
|
supportsReasoningBudget: true,
|
|
1396
|
-
supportsNativeTools: true,
|
|
1397
|
-
defaultToolProtocol: "native",
|
|
1398
1345
|
inputPrice: 15,
|
|
1399
1346
|
outputPrice: 75,
|
|
1400
1347
|
cacheWritesPrice: 18.75,
|
|
@@ -1409,8 +1356,6 @@ var bedrockModels = {
|
|
|
1409
1356
|
supportsImages: true,
|
|
1410
1357
|
supportsPromptCache: true,
|
|
1411
1358
|
supportsReasoningBudget: true,
|
|
1412
|
-
supportsNativeTools: true,
|
|
1413
|
-
defaultToolProtocol: "native",
|
|
1414
1359
|
inputPrice: 5,
|
|
1415
1360
|
outputPrice: 25,
|
|
1416
1361
|
cacheWritesPrice: 6.25,
|
|
@@ -1425,8 +1370,6 @@ var bedrockModels = {
|
|
|
1425
1370
|
supportsImages: true,
|
|
1426
1371
|
supportsPromptCache: true,
|
|
1427
1372
|
supportsReasoningBudget: true,
|
|
1428
|
-
supportsNativeTools: true,
|
|
1429
|
-
defaultToolProtocol: "native",
|
|
1430
1373
|
inputPrice: 15,
|
|
1431
1374
|
outputPrice: 75,
|
|
1432
1375
|
cacheWritesPrice: 18.75,
|
|
@@ -1441,8 +1384,6 @@ var bedrockModels = {
|
|
|
1441
1384
|
supportsImages: true,
|
|
1442
1385
|
supportsPromptCache: true,
|
|
1443
1386
|
supportsReasoningBudget: true,
|
|
1444
|
-
supportsNativeTools: true,
|
|
1445
|
-
defaultToolProtocol: "native",
|
|
1446
1387
|
inputPrice: 3,
|
|
1447
1388
|
outputPrice: 15,
|
|
1448
1389
|
cacheWritesPrice: 3.75,
|
|
@@ -1456,8 +1397,6 @@ var bedrockModels = {
|
|
|
1456
1397
|
contextWindow: 2e5,
|
|
1457
1398
|
supportsImages: true,
|
|
1458
1399
|
supportsPromptCache: true,
|
|
1459
|
-
supportsNativeTools: true,
|
|
1460
|
-
defaultToolProtocol: "native",
|
|
1461
1400
|
inputPrice: 3,
|
|
1462
1401
|
outputPrice: 15,
|
|
1463
1402
|
cacheWritesPrice: 3.75,
|
|
@@ -1471,8 +1410,6 @@ var bedrockModels = {
|
|
|
1471
1410
|
contextWindow: 2e5,
|
|
1472
1411
|
supportsImages: false,
|
|
1473
1412
|
supportsPromptCache: true,
|
|
1474
|
-
supportsNativeTools: true,
|
|
1475
|
-
defaultToolProtocol: "native",
|
|
1476
1413
|
inputPrice: 0.8,
|
|
1477
1414
|
outputPrice: 4,
|
|
1478
1415
|
cacheWritesPrice: 1,
|
|
@@ -1487,8 +1424,6 @@ var bedrockModels = {
|
|
|
1487
1424
|
supportsImages: true,
|
|
1488
1425
|
supportsPromptCache: true,
|
|
1489
1426
|
supportsReasoningBudget: true,
|
|
1490
|
-
supportsNativeTools: true,
|
|
1491
|
-
defaultToolProtocol: "native",
|
|
1492
1427
|
inputPrice: 1,
|
|
1493
1428
|
outputPrice: 5,
|
|
1494
1429
|
cacheWritesPrice: 1.25,
|
|
@@ -1504,8 +1439,6 @@ var bedrockModels = {
|
|
|
1504
1439
|
contextWindow: 2e5,
|
|
1505
1440
|
supportsImages: true,
|
|
1506
1441
|
supportsPromptCache: false,
|
|
1507
|
-
supportsNativeTools: true,
|
|
1508
|
-
defaultToolProtocol: "native",
|
|
1509
1442
|
inputPrice: 3,
|
|
1510
1443
|
outputPrice: 15
|
|
1511
1444
|
},
|
|
@@ -1514,8 +1447,6 @@ var bedrockModels = {
|
|
|
1514
1447
|
contextWindow: 2e5,
|
|
1515
1448
|
supportsImages: true,
|
|
1516
1449
|
supportsPromptCache: false,
|
|
1517
|
-
supportsNativeTools: true,
|
|
1518
|
-
defaultToolProtocol: "native",
|
|
1519
1450
|
inputPrice: 15,
|
|
1520
1451
|
outputPrice: 75
|
|
1521
1452
|
},
|
|
@@ -1524,8 +1455,6 @@ var bedrockModels = {
|
|
|
1524
1455
|
contextWindow: 2e5,
|
|
1525
1456
|
supportsImages: true,
|
|
1526
1457
|
supportsPromptCache: false,
|
|
1527
|
-
supportsNativeTools: true,
|
|
1528
|
-
defaultToolProtocol: "native",
|
|
1529
1458
|
inputPrice: 3,
|
|
1530
1459
|
outputPrice: 15
|
|
1531
1460
|
},
|
|
@@ -1534,8 +1463,6 @@ var bedrockModels = {
|
|
|
1534
1463
|
contextWindow: 2e5,
|
|
1535
1464
|
supportsImages: true,
|
|
1536
1465
|
supportsPromptCache: false,
|
|
1537
|
-
supportsNativeTools: true,
|
|
1538
|
-
defaultToolProtocol: "native",
|
|
1539
1466
|
inputPrice: 0.25,
|
|
1540
1467
|
outputPrice: 1.25
|
|
1541
1468
|
},
|
|
@@ -1544,7 +1471,6 @@ var bedrockModels = {
|
|
|
1544
1471
|
contextWindow: 128e3,
|
|
1545
1472
|
supportsImages: false,
|
|
1546
1473
|
supportsPromptCache: false,
|
|
1547
|
-
supportsNativeTools: true,
|
|
1548
1474
|
inputPrice: 1.35,
|
|
1549
1475
|
outputPrice: 5.4
|
|
1550
1476
|
},
|
|
@@ -1553,7 +1479,6 @@ var bedrockModels = {
|
|
|
1553
1479
|
contextWindow: 128e3,
|
|
1554
1480
|
supportsImages: false,
|
|
1555
1481
|
supportsPromptCache: false,
|
|
1556
|
-
supportsNativeTools: true,
|
|
1557
1482
|
inputPrice: 0.5,
|
|
1558
1483
|
outputPrice: 1.5,
|
|
1559
1484
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -1563,7 +1488,6 @@ var bedrockModels = {
|
|
|
1563
1488
|
contextWindow: 128e3,
|
|
1564
1489
|
supportsImages: false,
|
|
1565
1490
|
supportsPromptCache: false,
|
|
1566
|
-
supportsNativeTools: true,
|
|
1567
1491
|
inputPrice: 2,
|
|
1568
1492
|
outputPrice: 6,
|
|
1569
1493
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -1573,7 +1497,6 @@ var bedrockModels = {
|
|
|
1573
1497
|
contextWindow: 128e3,
|
|
1574
1498
|
supportsImages: false,
|
|
1575
1499
|
supportsPromptCache: false,
|
|
1576
|
-
supportsNativeTools: true,
|
|
1577
1500
|
inputPrice: 0.72,
|
|
1578
1501
|
outputPrice: 0.72,
|
|
1579
1502
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -1583,7 +1506,6 @@ var bedrockModels = {
|
|
|
1583
1506
|
contextWindow: 128e3,
|
|
1584
1507
|
supportsImages: true,
|
|
1585
1508
|
supportsPromptCache: false,
|
|
1586
|
-
supportsNativeTools: true,
|
|
1587
1509
|
inputPrice: 0.72,
|
|
1588
1510
|
outputPrice: 0.72,
|
|
1589
1511
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -1593,7 +1515,6 @@ var bedrockModels = {
|
|
|
1593
1515
|
contextWindow: 128e3,
|
|
1594
1516
|
supportsImages: true,
|
|
1595
1517
|
supportsPromptCache: false,
|
|
1596
|
-
supportsNativeTools: true,
|
|
1597
1518
|
inputPrice: 0.16,
|
|
1598
1519
|
outputPrice: 0.16,
|
|
1599
1520
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -1603,7 +1524,6 @@ var bedrockModels = {
|
|
|
1603
1524
|
contextWindow: 128e3,
|
|
1604
1525
|
supportsImages: false,
|
|
1605
1526
|
supportsPromptCache: false,
|
|
1606
|
-
supportsNativeTools: true,
|
|
1607
1527
|
inputPrice: 0.15,
|
|
1608
1528
|
outputPrice: 0.15,
|
|
1609
1529
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -1613,7 +1533,6 @@ var bedrockModels = {
|
|
|
1613
1533
|
contextWindow: 128e3,
|
|
1614
1534
|
supportsImages: false,
|
|
1615
1535
|
supportsPromptCache: false,
|
|
1616
|
-
supportsNativeTools: true,
|
|
1617
1536
|
inputPrice: 0.1,
|
|
1618
1537
|
outputPrice: 0.1,
|
|
1619
1538
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -1623,7 +1542,6 @@ var bedrockModels = {
|
|
|
1623
1542
|
contextWindow: 128e3,
|
|
1624
1543
|
supportsImages: false,
|
|
1625
1544
|
supportsPromptCache: false,
|
|
1626
|
-
supportsNativeTools: true,
|
|
1627
1545
|
inputPrice: 2.4,
|
|
1628
1546
|
outputPrice: 2.4,
|
|
1629
1547
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -1633,7 +1551,6 @@ var bedrockModels = {
|
|
|
1633
1551
|
contextWindow: 128e3,
|
|
1634
1552
|
supportsImages: false,
|
|
1635
1553
|
supportsPromptCache: false,
|
|
1636
|
-
supportsNativeTools: true,
|
|
1637
1554
|
inputPrice: 0.72,
|
|
1638
1555
|
outputPrice: 0.72,
|
|
1639
1556
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -1643,7 +1560,6 @@ var bedrockModels = {
|
|
|
1643
1560
|
contextWindow: 128e3,
|
|
1644
1561
|
supportsImages: false,
|
|
1645
1562
|
supportsPromptCache: false,
|
|
1646
|
-
supportsNativeTools: true,
|
|
1647
1563
|
inputPrice: 0.9,
|
|
1648
1564
|
outputPrice: 0.9,
|
|
1649
1565
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1653,7 +1569,6 @@ var bedrockModels = {
|
|
|
1653
1569
|
contextWindow: 8e3,
|
|
1654
1570
|
supportsImages: false,
|
|
1655
1571
|
supportsPromptCache: false,
|
|
1656
|
-
supportsNativeTools: true,
|
|
1657
1572
|
inputPrice: 0.22,
|
|
1658
1573
|
outputPrice: 0.22,
|
|
1659
1574
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1663,7 +1578,6 @@ var bedrockModels = {
|
|
|
1663
1578
|
contextWindow: 8e3,
|
|
1664
1579
|
supportsImages: false,
|
|
1665
1580
|
supportsPromptCache: false,
|
|
1666
|
-
supportsNativeTools: true,
|
|
1667
1581
|
inputPrice: 2.65,
|
|
1668
1582
|
outputPrice: 3.5
|
|
1669
1583
|
},
|
|
@@ -1672,7 +1586,6 @@ var bedrockModels = {
|
|
|
1672
1586
|
contextWindow: 4e3,
|
|
1673
1587
|
supportsImages: false,
|
|
1674
1588
|
supportsPromptCache: false,
|
|
1675
|
-
supportsNativeTools: true,
|
|
1676
1589
|
inputPrice: 0.3,
|
|
1677
1590
|
outputPrice: 0.6
|
|
1678
1591
|
},
|
|
@@ -1681,7 +1594,6 @@ var bedrockModels = {
|
|
|
1681
1594
|
contextWindow: 8e3,
|
|
1682
1595
|
supportsImages: false,
|
|
1683
1596
|
supportsPromptCache: false,
|
|
1684
|
-
supportsNativeTools: true,
|
|
1685
1597
|
inputPrice: 0.15,
|
|
1686
1598
|
outputPrice: 0.2,
|
|
1687
1599
|
description: "Amazon Titan Text Lite"
|
|
@@ -1691,7 +1603,6 @@ var bedrockModels = {
|
|
|
1691
1603
|
contextWindow: 8e3,
|
|
1692
1604
|
supportsImages: false,
|
|
1693
1605
|
supportsPromptCache: false,
|
|
1694
|
-
supportsNativeTools: true,
|
|
1695
1606
|
inputPrice: 0.2,
|
|
1696
1607
|
outputPrice: 0.6,
|
|
1697
1608
|
description: "Amazon Titan Text Express"
|
|
@@ -1701,8 +1612,6 @@ var bedrockModels = {
|
|
|
1701
1612
|
contextWindow: 262144,
|
|
1702
1613
|
supportsImages: false,
|
|
1703
1614
|
supportsPromptCache: false,
|
|
1704
|
-
supportsNativeTools: true,
|
|
1705
|
-
defaultToolProtocol: "native",
|
|
1706
1615
|
preserveReasoning: true,
|
|
1707
1616
|
inputPrice: 0.6,
|
|
1708
1617
|
outputPrice: 2.5,
|
|
@@ -1713,8 +1622,6 @@ var bedrockModels = {
|
|
|
1713
1622
|
contextWindow: 196608,
|
|
1714
1623
|
supportsImages: false,
|
|
1715
1624
|
supportsPromptCache: false,
|
|
1716
|
-
supportsNativeTools: true,
|
|
1717
|
-
defaultToolProtocol: "native",
|
|
1718
1625
|
preserveReasoning: true,
|
|
1719
1626
|
inputPrice: 0.3,
|
|
1720
1627
|
outputPrice: 1.2,
|
|
@@ -1725,8 +1632,6 @@ var bedrockModels = {
|
|
|
1725
1632
|
contextWindow: 262144,
|
|
1726
1633
|
supportsImages: false,
|
|
1727
1634
|
supportsPromptCache: false,
|
|
1728
|
-
supportsNativeTools: true,
|
|
1729
|
-
defaultToolProtocol: "native",
|
|
1730
1635
|
inputPrice: 0.15,
|
|
1731
1636
|
outputPrice: 1.2,
|
|
1732
1637
|
description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
|
|
@@ -1736,8 +1641,6 @@ var bedrockModels = {
|
|
|
1736
1641
|
contextWindow: 262144,
|
|
1737
1642
|
supportsImages: false,
|
|
1738
1643
|
supportsPromptCache: false,
|
|
1739
|
-
supportsNativeTools: true,
|
|
1740
|
-
defaultToolProtocol: "native",
|
|
1741
1644
|
inputPrice: 0.45,
|
|
1742
1645
|
outputPrice: 1.8,
|
|
1743
1646
|
description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
|
|
@@ -1828,26 +1731,14 @@ var BEDROCK_SERVICE_TIER_PRICING = {
|
|
|
1828
1731
|
// src/providers/cerebras.ts
|
|
1829
1732
|
var cerebrasDefaultModelId = "gpt-oss-120b";
|
|
1830
1733
|
var cerebrasModels = {
|
|
1831
|
-
"zai-glm-4.6": {
|
|
1832
|
-
maxTokens: 16384,
|
|
1833
|
-
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1834
|
-
contextWindow: 131072,
|
|
1835
|
-
supportsImages: false,
|
|
1836
|
-
supportsPromptCache: false,
|
|
1837
|
-
supportsNativeTools: true,
|
|
1838
|
-
defaultToolProtocol: "native",
|
|
1839
|
-
inputPrice: 0,
|
|
1840
|
-
outputPrice: 0,
|
|
1841
|
-
description: "Fast general-purpose model on Cerebras (up to 1,000 tokens/s). To be deprecated soon."
|
|
1842
|
-
},
|
|
1843
1734
|
"zai-glm-4.7": {
|
|
1844
1735
|
maxTokens: 16384,
|
|
1845
1736
|
// Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
|
|
1846
1737
|
contextWindow: 131072,
|
|
1847
1738
|
supportsImages: false,
|
|
1848
|
-
supportsPromptCache:
|
|
1849
|
-
|
|
1850
|
-
|
|
1739
|
+
supportsPromptCache: true,
|
|
1740
|
+
supportsTemperature: true,
|
|
1741
|
+
defaultTemperature: 1,
|
|
1851
1742
|
inputPrice: 0,
|
|
1852
1743
|
outputPrice: 0,
|
|
1853
1744
|
description: "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks."
|
|
@@ -1858,8 +1749,6 @@ var cerebrasModels = {
|
|
|
1858
1749
|
contextWindow: 64e3,
|
|
1859
1750
|
supportsImages: false,
|
|
1860
1751
|
supportsPromptCache: false,
|
|
1861
|
-
supportsNativeTools: true,
|
|
1862
|
-
defaultToolProtocol: "native",
|
|
1863
1752
|
inputPrice: 0,
|
|
1864
1753
|
outputPrice: 0,
|
|
1865
1754
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1870,8 +1759,6 @@ var cerebrasModels = {
|
|
|
1870
1759
|
contextWindow: 64e3,
|
|
1871
1760
|
supportsImages: false,
|
|
1872
1761
|
supportsPromptCache: false,
|
|
1873
|
-
supportsNativeTools: true,
|
|
1874
|
-
defaultToolProtocol: "native",
|
|
1875
1762
|
inputPrice: 0,
|
|
1876
1763
|
outputPrice: 0,
|
|
1877
1764
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1882,8 +1769,6 @@ var cerebrasModels = {
|
|
|
1882
1769
|
contextWindow: 64e3,
|
|
1883
1770
|
supportsImages: false,
|
|
1884
1771
|
supportsPromptCache: false,
|
|
1885
|
-
supportsNativeTools: true,
|
|
1886
|
-
defaultToolProtocol: "native",
|
|
1887
1772
|
inputPrice: 0,
|
|
1888
1773
|
outputPrice: 0,
|
|
1889
1774
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
@@ -1894,8 +1779,6 @@ var cerebrasModels = {
|
|
|
1894
1779
|
contextWindow: 64e3,
|
|
1895
1780
|
supportsImages: false,
|
|
1896
1781
|
supportsPromptCache: false,
|
|
1897
|
-
supportsNativeTools: true,
|
|
1898
|
-
defaultToolProtocol: "native",
|
|
1899
1782
|
inputPrice: 0,
|
|
1900
1783
|
outputPrice: 0,
|
|
1901
1784
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1910,8 +1793,6 @@ var chutesModels = {
|
|
|
1910
1793
|
contextWindow: 163840,
|
|
1911
1794
|
supportsImages: false,
|
|
1912
1795
|
supportsPromptCache: false,
|
|
1913
|
-
supportsNativeTools: true,
|
|
1914
|
-
defaultToolProtocol: "native",
|
|
1915
1796
|
inputPrice: 0,
|
|
1916
1797
|
outputPrice: 0,
|
|
1917
1798
|
description: "DeepSeek R1 0528 model."
|
|
@@ -1921,8 +1802,6 @@ var chutesModels = {
|
|
|
1921
1802
|
contextWindow: 163840,
|
|
1922
1803
|
supportsImages: false,
|
|
1923
1804
|
supportsPromptCache: false,
|
|
1924
|
-
supportsNativeTools: true,
|
|
1925
|
-
defaultToolProtocol: "native",
|
|
1926
1805
|
inputPrice: 0,
|
|
1927
1806
|
outputPrice: 0,
|
|
1928
1807
|
description: "DeepSeek R1 model."
|
|
@@ -1932,8 +1811,6 @@ var chutesModels = {
|
|
|
1932
1811
|
contextWindow: 163840,
|
|
1933
1812
|
supportsImages: false,
|
|
1934
1813
|
supportsPromptCache: false,
|
|
1935
|
-
supportsNativeTools: true,
|
|
1936
|
-
defaultToolProtocol: "native",
|
|
1937
1814
|
inputPrice: 0,
|
|
1938
1815
|
outputPrice: 0,
|
|
1939
1816
|
description: "DeepSeek V3 model."
|
|
@@ -1943,8 +1820,6 @@ var chutesModels = {
|
|
|
1943
1820
|
contextWindow: 163840,
|
|
1944
1821
|
supportsImages: false,
|
|
1945
1822
|
supportsPromptCache: false,
|
|
1946
|
-
supportsNativeTools: true,
|
|
1947
|
-
defaultToolProtocol: "native",
|
|
1948
1823
|
inputPrice: 0,
|
|
1949
1824
|
outputPrice: 0,
|
|
1950
1825
|
description: "DeepSeek V3.1 model."
|
|
@@ -1954,8 +1829,6 @@ var chutesModels = {
|
|
|
1954
1829
|
contextWindow: 163840,
|
|
1955
1830
|
supportsImages: false,
|
|
1956
1831
|
supportsPromptCache: false,
|
|
1957
|
-
supportsNativeTools: true,
|
|
1958
|
-
defaultToolProtocol: "native",
|
|
1959
1832
|
inputPrice: 0.23,
|
|
1960
1833
|
outputPrice: 0.9,
|
|
1961
1834
|
description: "DeepSeek\u2011V3.1\u2011Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix\u2011ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance."
|
|
@@ -1965,8 +1838,6 @@ var chutesModels = {
|
|
|
1965
1838
|
contextWindow: 163840,
|
|
1966
1839
|
supportsImages: false,
|
|
1967
1840
|
supportsPromptCache: false,
|
|
1968
|
-
supportsNativeTools: true,
|
|
1969
|
-
defaultToolProtocol: "native",
|
|
1970
1841
|
inputPrice: 1,
|
|
1971
1842
|
outputPrice: 3,
|
|
1972
1843
|
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2\xD7 quota per request and not intended for bulk workloads."
|
|
@@ -1976,8 +1847,6 @@ var chutesModels = {
|
|
|
1976
1847
|
contextWindow: 163840,
|
|
1977
1848
|
supportsImages: false,
|
|
1978
1849
|
supportsPromptCache: false,
|
|
1979
|
-
supportsNativeTools: true,
|
|
1980
|
-
defaultToolProtocol: "native",
|
|
1981
1850
|
inputPrice: 0.25,
|
|
1982
1851
|
outputPrice: 0.35,
|
|
1983
1852
|
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long\u2011context training and inference efficiency while maintaining performance comparable to V3.1\u2011Terminus."
|
|
@@ -1989,8 +1858,6 @@ var chutesModels = {
|
|
|
1989
1858
|
// From Groq
|
|
1990
1859
|
supportsImages: false,
|
|
1991
1860
|
supportsPromptCache: false,
|
|
1992
|
-
supportsNativeTools: true,
|
|
1993
|
-
defaultToolProtocol: "native",
|
|
1994
1861
|
inputPrice: 0,
|
|
1995
1862
|
outputPrice: 0,
|
|
1996
1863
|
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
@@ -2000,8 +1867,6 @@ var chutesModels = {
|
|
|
2000
1867
|
contextWindow: 512e3,
|
|
2001
1868
|
supportsImages: false,
|
|
2002
1869
|
supportsPromptCache: false,
|
|
2003
|
-
supportsNativeTools: true,
|
|
2004
|
-
defaultToolProtocol: "native",
|
|
2005
1870
|
inputPrice: 0,
|
|
2006
1871
|
outputPrice: 0,
|
|
2007
1872
|
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
@@ -2011,8 +1876,6 @@ var chutesModels = {
|
|
|
2011
1876
|
contextWindow: 128e3,
|
|
2012
1877
|
supportsImages: false,
|
|
2013
1878
|
supportsPromptCache: false,
|
|
2014
|
-
supportsNativeTools: true,
|
|
2015
|
-
defaultToolProtocol: "native",
|
|
2016
1879
|
inputPrice: 0,
|
|
2017
1880
|
outputPrice: 0,
|
|
2018
1881
|
description: "Unsloth Mistral Nemo Instruct model."
|
|
@@ -2022,8 +1885,6 @@ var chutesModels = {
|
|
|
2022
1885
|
contextWindow: 131072,
|
|
2023
1886
|
supportsImages: false,
|
|
2024
1887
|
supportsPromptCache: false,
|
|
2025
|
-
supportsNativeTools: true,
|
|
2026
|
-
defaultToolProtocol: "native",
|
|
2027
1888
|
inputPrice: 0,
|
|
2028
1889
|
outputPrice: 0,
|
|
2029
1890
|
description: "Unsloth Gemma 3 12B IT model."
|
|
@@ -2033,8 +1894,6 @@ var chutesModels = {
|
|
|
2033
1894
|
contextWindow: 131072,
|
|
2034
1895
|
supportsImages: false,
|
|
2035
1896
|
supportsPromptCache: false,
|
|
2036
|
-
supportsNativeTools: true,
|
|
2037
|
-
defaultToolProtocol: "native",
|
|
2038
1897
|
inputPrice: 0,
|
|
2039
1898
|
outputPrice: 0,
|
|
2040
1899
|
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
@@ -2044,8 +1903,6 @@ var chutesModels = {
|
|
|
2044
1903
|
contextWindow: 131072,
|
|
2045
1904
|
supportsImages: false,
|
|
2046
1905
|
supportsPromptCache: false,
|
|
2047
|
-
supportsNativeTools: true,
|
|
2048
|
-
defaultToolProtocol: "native",
|
|
2049
1906
|
inputPrice: 0,
|
|
2050
1907
|
outputPrice: 0,
|
|
2051
1908
|
description: "Unsloth Gemma 3 4B IT model."
|
|
@@ -2055,8 +1912,6 @@ var chutesModels = {
|
|
|
2055
1912
|
contextWindow: 131072,
|
|
2056
1913
|
supportsImages: false,
|
|
2057
1914
|
supportsPromptCache: false,
|
|
2058
|
-
supportsNativeTools: true,
|
|
2059
|
-
defaultToolProtocol: "native",
|
|
2060
1915
|
inputPrice: 0,
|
|
2061
1916
|
outputPrice: 0,
|
|
2062
1917
|
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
@@ -2066,8 +1921,6 @@ var chutesModels = {
|
|
|
2066
1921
|
contextWindow: 131072,
|
|
2067
1922
|
supportsImages: false,
|
|
2068
1923
|
supportsPromptCache: false,
|
|
2069
|
-
supportsNativeTools: true,
|
|
2070
|
-
defaultToolProtocol: "native",
|
|
2071
1924
|
inputPrice: 0,
|
|
2072
1925
|
outputPrice: 0,
|
|
2073
1926
|
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
@@ -2077,8 +1930,6 @@ var chutesModels = {
|
|
|
2077
1930
|
contextWindow: 256e3,
|
|
2078
1931
|
supportsImages: false,
|
|
2079
1932
|
supportsPromptCache: false,
|
|
2080
|
-
supportsNativeTools: true,
|
|
2081
|
-
defaultToolProtocol: "native",
|
|
2082
1933
|
inputPrice: 0,
|
|
2083
1934
|
outputPrice: 0,
|
|
2084
1935
|
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
@@ -2088,8 +1939,6 @@ var chutesModels = {
|
|
|
2088
1939
|
contextWindow: 163840,
|
|
2089
1940
|
supportsImages: false,
|
|
2090
1941
|
supportsPromptCache: false,
|
|
2091
|
-
supportsNativeTools: true,
|
|
2092
|
-
defaultToolProtocol: "native",
|
|
2093
1942
|
inputPrice: 0,
|
|
2094
1943
|
outputPrice: 0,
|
|
2095
1944
|
description: "DeepSeek V3 Base model."
|
|
@@ -2099,8 +1948,6 @@ var chutesModels = {
|
|
|
2099
1948
|
contextWindow: 163840,
|
|
2100
1949
|
supportsImages: false,
|
|
2101
1950
|
supportsPromptCache: false,
|
|
2102
|
-
supportsNativeTools: true,
|
|
2103
|
-
defaultToolProtocol: "native",
|
|
2104
1951
|
inputPrice: 0,
|
|
2105
1952
|
outputPrice: 0,
|
|
2106
1953
|
description: "DeepSeek R1 Zero model."
|
|
@@ -2110,8 +1957,6 @@ var chutesModels = {
|
|
|
2110
1957
|
contextWindow: 163840,
|
|
2111
1958
|
supportsImages: false,
|
|
2112
1959
|
supportsPromptCache: false,
|
|
2113
|
-
supportsNativeTools: true,
|
|
2114
|
-
defaultToolProtocol: "native",
|
|
2115
1960
|
inputPrice: 0,
|
|
2116
1961
|
outputPrice: 0,
|
|
2117
1962
|
description: "DeepSeek V3 (0324) model."
|
|
@@ -2121,8 +1966,6 @@ var chutesModels = {
|
|
|
2121
1966
|
contextWindow: 262144,
|
|
2122
1967
|
supportsImages: false,
|
|
2123
1968
|
supportsPromptCache: false,
|
|
2124
|
-
supportsNativeTools: true,
|
|
2125
|
-
defaultToolProtocol: "native",
|
|
2126
1969
|
inputPrice: 0,
|
|
2127
1970
|
outputPrice: 0,
|
|
2128
1971
|
description: "Qwen3 235B A22B Instruct 2507 model with 262K context window."
|
|
@@ -2132,8 +1975,6 @@ var chutesModels = {
|
|
|
2132
1975
|
contextWindow: 40960,
|
|
2133
1976
|
supportsImages: false,
|
|
2134
1977
|
supportsPromptCache: false,
|
|
2135
|
-
supportsNativeTools: true,
|
|
2136
|
-
defaultToolProtocol: "native",
|
|
2137
1978
|
inputPrice: 0,
|
|
2138
1979
|
outputPrice: 0,
|
|
2139
1980
|
description: "Qwen3 235B A22B model."
|
|
@@ -2143,8 +1984,6 @@ var chutesModels = {
|
|
|
2143
1984
|
contextWindow: 40960,
|
|
2144
1985
|
supportsImages: false,
|
|
2145
1986
|
supportsPromptCache: false,
|
|
2146
|
-
supportsNativeTools: true,
|
|
2147
|
-
defaultToolProtocol: "native",
|
|
2148
1987
|
inputPrice: 0,
|
|
2149
1988
|
outputPrice: 0,
|
|
2150
1989
|
description: "Qwen3 32B model."
|
|
@@ -2154,8 +1993,6 @@ var chutesModels = {
|
|
|
2154
1993
|
contextWindow: 40960,
|
|
2155
1994
|
supportsImages: false,
|
|
2156
1995
|
supportsPromptCache: false,
|
|
2157
|
-
supportsNativeTools: true,
|
|
2158
|
-
defaultToolProtocol: "native",
|
|
2159
1996
|
inputPrice: 0,
|
|
2160
1997
|
outputPrice: 0,
|
|
2161
1998
|
description: "Qwen3 30B A3B model."
|
|
@@ -2165,8 +2002,6 @@ var chutesModels = {
|
|
|
2165
2002
|
contextWindow: 40960,
|
|
2166
2003
|
supportsImages: false,
|
|
2167
2004
|
supportsPromptCache: false,
|
|
2168
|
-
supportsNativeTools: true,
|
|
2169
|
-
defaultToolProtocol: "native",
|
|
2170
2005
|
inputPrice: 0,
|
|
2171
2006
|
outputPrice: 0,
|
|
2172
2007
|
description: "Qwen3 14B model."
|
|
@@ -2176,8 +2011,6 @@ var chutesModels = {
|
|
|
2176
2011
|
contextWindow: 40960,
|
|
2177
2012
|
supportsImages: false,
|
|
2178
2013
|
supportsPromptCache: false,
|
|
2179
|
-
supportsNativeTools: true,
|
|
2180
|
-
defaultToolProtocol: "native",
|
|
2181
2014
|
inputPrice: 0,
|
|
2182
2015
|
outputPrice: 0,
|
|
2183
2016
|
description: "Qwen3 8B model."
|
|
@@ -2187,8 +2020,6 @@ var chutesModels = {
|
|
|
2187
2020
|
contextWindow: 163840,
|
|
2188
2021
|
supportsImages: false,
|
|
2189
2022
|
supportsPromptCache: false,
|
|
2190
|
-
supportsNativeTools: true,
|
|
2191
|
-
defaultToolProtocol: "native",
|
|
2192
2023
|
inputPrice: 0,
|
|
2193
2024
|
outputPrice: 0,
|
|
2194
2025
|
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
@@ -2198,8 +2029,6 @@ var chutesModels = {
|
|
|
2198
2029
|
contextWindow: 163840,
|
|
2199
2030
|
supportsImages: false,
|
|
2200
2031
|
supportsPromptCache: false,
|
|
2201
|
-
supportsNativeTools: true,
|
|
2202
|
-
defaultToolProtocol: "native",
|
|
2203
2032
|
inputPrice: 0,
|
|
2204
2033
|
outputPrice: 0,
|
|
2205
2034
|
description: "TNGTech DeepSeek R1T Chimera model."
|
|
@@ -2209,8 +2038,6 @@ var chutesModels = {
|
|
|
2209
2038
|
contextWindow: 151329,
|
|
2210
2039
|
supportsImages: false,
|
|
2211
2040
|
supportsPromptCache: false,
|
|
2212
|
-
supportsNativeTools: true,
|
|
2213
|
-
defaultToolProtocol: "native",
|
|
2214
2041
|
inputPrice: 0,
|
|
2215
2042
|
outputPrice: 0,
|
|
2216
2043
|
description: "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated."
|
|
@@ -2220,8 +2047,6 @@ var chutesModels = {
|
|
|
2220
2047
|
contextWindow: 131072,
|
|
2221
2048
|
supportsImages: false,
|
|
2222
2049
|
supportsPromptCache: false,
|
|
2223
|
-
supportsNativeTools: true,
|
|
2224
|
-
defaultToolProtocol: "native",
|
|
2225
2050
|
inputPrice: 0,
|
|
2226
2051
|
outputPrice: 0,
|
|
2227
2052
|
description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
|
|
@@ -2231,8 +2056,6 @@ var chutesModels = {
|
|
|
2231
2056
|
contextWindow: 131072,
|
|
2232
2057
|
supportsImages: false,
|
|
2233
2058
|
supportsPromptCache: false,
|
|
2234
|
-
supportsNativeTools: true,
|
|
2235
|
-
defaultToolProtocol: "native",
|
|
2236
2059
|
inputPrice: 1,
|
|
2237
2060
|
outputPrice: 3,
|
|
2238
2061
|
description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
|
|
@@ -2242,8 +2065,6 @@ var chutesModels = {
|
|
|
2242
2065
|
contextWindow: 202752,
|
|
2243
2066
|
supportsImages: false,
|
|
2244
2067
|
supportsPromptCache: false,
|
|
2245
|
-
supportsNativeTools: true,
|
|
2246
|
-
defaultToolProtocol: "native",
|
|
2247
2068
|
inputPrice: 0,
|
|
2248
2069
|
outputPrice: 0,
|
|
2249
2070
|
description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios."
|
|
@@ -2254,8 +2075,6 @@ var chutesModels = {
|
|
|
2254
2075
|
contextWindow: 202752,
|
|
2255
2076
|
supportsImages: false,
|
|
2256
2077
|
supportsPromptCache: false,
|
|
2257
|
-
supportsNativeTools: true,
|
|
2258
|
-
defaultToolProtocol: "native",
|
|
2259
2078
|
inputPrice: 1.15,
|
|
2260
2079
|
outputPrice: 3.25,
|
|
2261
2080
|
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference."
|
|
@@ -2265,8 +2084,6 @@ var chutesModels = {
|
|
|
2265
2084
|
contextWindow: 128e3,
|
|
2266
2085
|
supportsImages: false,
|
|
2267
2086
|
supportsPromptCache: false,
|
|
2268
|
-
supportsNativeTools: true,
|
|
2269
|
-
defaultToolProtocol: "native",
|
|
2270
2087
|
inputPrice: 0,
|
|
2271
2088
|
outputPrice: 0,
|
|
2272
2089
|
description: "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks."
|
|
@@ -2276,8 +2093,6 @@ var chutesModels = {
|
|
|
2276
2093
|
contextWindow: 262144,
|
|
2277
2094
|
supportsImages: false,
|
|
2278
2095
|
supportsPromptCache: false,
|
|
2279
|
-
supportsNativeTools: true,
|
|
2280
|
-
defaultToolProtocol: "native",
|
|
2281
2096
|
inputPrice: 0,
|
|
2282
2097
|
outputPrice: 0,
|
|
2283
2098
|
description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks."
|
|
@@ -2287,8 +2102,6 @@ var chutesModels = {
|
|
|
2287
2102
|
contextWindow: 75e3,
|
|
2288
2103
|
supportsImages: false,
|
|
2289
2104
|
supportsPromptCache: false,
|
|
2290
|
-
supportsNativeTools: true,
|
|
2291
|
-
defaultToolProtocol: "native",
|
|
2292
2105
|
inputPrice: 0.1481,
|
|
2293
2106
|
outputPrice: 0.5926,
|
|
2294
2107
|
description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
|
|
@@ -2298,8 +2111,6 @@ var chutesModels = {
|
|
|
2298
2111
|
contextWindow: 262144,
|
|
2299
2112
|
supportsImages: false,
|
|
2300
2113
|
supportsPromptCache: false,
|
|
2301
|
-
supportsNativeTools: true,
|
|
2302
|
-
defaultToolProtocol: "native",
|
|
2303
2114
|
inputPrice: 0.1999,
|
|
2304
2115
|
outputPrice: 0.8001,
|
|
2305
2116
|
description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
|
|
@@ -2309,8 +2120,6 @@ var chutesModels = {
|
|
|
2309
2120
|
contextWindow: 262144,
|
|
2310
2121
|
supportsImages: false,
|
|
2311
2122
|
supportsPromptCache: false,
|
|
2312
|
-
supportsNativeTools: true,
|
|
2313
|
-
defaultToolProtocol: "native",
|
|
2314
2123
|
inputPrice: 0.077968332,
|
|
2315
2124
|
outputPrice: 0.31202496,
|
|
2316
2125
|
description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
|
|
@@ -2320,8 +2129,6 @@ var chutesModels = {
|
|
|
2320
2129
|
contextWindow: 131072,
|
|
2321
2130
|
supportsImages: false,
|
|
2322
2131
|
supportsPromptCache: false,
|
|
2323
|
-
supportsNativeTools: true,
|
|
2324
|
-
defaultToolProtocol: "native",
|
|
2325
2132
|
inputPrice: 0,
|
|
2326
2133
|
outputPrice: 0,
|
|
2327
2134
|
description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
|
|
@@ -2331,8 +2138,6 @@ var chutesModels = {
|
|
|
2331
2138
|
contextWindow: 131072,
|
|
2332
2139
|
supportsImages: false,
|
|
2333
2140
|
supportsPromptCache: false,
|
|
2334
|
-
supportsNativeTools: true,
|
|
2335
|
-
defaultToolProtocol: "native",
|
|
2336
2141
|
inputPrice: 0,
|
|
2337
2142
|
outputPrice: 0,
|
|
2338
2143
|
description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
|
|
@@ -2342,8 +2147,6 @@ var chutesModels = {
|
|
|
2342
2147
|
contextWindow: 262144,
|
|
2343
2148
|
supportsImages: true,
|
|
2344
2149
|
supportsPromptCache: false,
|
|
2345
|
-
supportsNativeTools: true,
|
|
2346
|
-
defaultToolProtocol: "native",
|
|
2347
2150
|
inputPrice: 0.16,
|
|
2348
2151
|
outputPrice: 0.65,
|
|
2349
2152
|
description: "Qwen3\u2011VL\u2011235B\u2011A22B\u2011Thinking is an open\u2011weight MoE vision\u2011language model (235B total, ~22B activated) optimized for deliberate multi\u2011step reasoning with strong text\u2011image\u2011video understanding and long\u2011context capabilities."
|
|
@@ -2351,73 +2154,6 @@ var chutesModels = {
|
|
|
2351
2154
|
};
|
|
2352
2155
|
var chutesDefaultModelInfo = chutesModels[chutesDefaultModelId];
|
|
2353
2156
|
|
|
2354
|
-
// src/providers/claude-code.ts
|
|
2355
|
-
var DATE_SUFFIX_PATTERN = /-\d{8}$/;
|
|
2356
|
-
var claudeCodeModels = {
|
|
2357
|
-
"claude-haiku-4-5": {
|
|
2358
|
-
maxTokens: 32768,
|
|
2359
|
-
contextWindow: 2e5,
|
|
2360
|
-
supportsImages: true,
|
|
2361
|
-
supportsPromptCache: true,
|
|
2362
|
-
supportsNativeTools: true,
|
|
2363
|
-
defaultToolProtocol: "native",
|
|
2364
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2365
|
-
reasoningEffort: "medium",
|
|
2366
|
-
description: "Claude Haiku 4.5 - Fast and efficient with thinking"
|
|
2367
|
-
},
|
|
2368
|
-
"claude-sonnet-4-5": {
|
|
2369
|
-
maxTokens: 32768,
|
|
2370
|
-
contextWindow: 2e5,
|
|
2371
|
-
supportsImages: true,
|
|
2372
|
-
supportsPromptCache: true,
|
|
2373
|
-
supportsNativeTools: true,
|
|
2374
|
-
defaultToolProtocol: "native",
|
|
2375
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2376
|
-
reasoningEffort: "medium",
|
|
2377
|
-
description: "Claude Sonnet 4.5 - Balanced performance with thinking"
|
|
2378
|
-
},
|
|
2379
|
-
"claude-opus-4-5": {
|
|
2380
|
-
maxTokens: 32768,
|
|
2381
|
-
contextWindow: 2e5,
|
|
2382
|
-
supportsImages: true,
|
|
2383
|
-
supportsPromptCache: true,
|
|
2384
|
-
supportsNativeTools: true,
|
|
2385
|
-
defaultToolProtocol: "native",
|
|
2386
|
-
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2387
|
-
reasoningEffort: "medium",
|
|
2388
|
-
description: "Claude Opus 4.5 - Most capable with thinking"
|
|
2389
|
-
}
|
|
2390
|
-
};
|
|
2391
|
-
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2392
|
-
var MODEL_FAMILY_PATTERNS = [
|
|
2393
|
-
// Opus models (any version) → claude-opus-4-5
|
|
2394
|
-
{ pattern: /opus/i, target: "claude-opus-4-5" },
|
|
2395
|
-
// Haiku models (any version) → claude-haiku-4-5
|
|
2396
|
-
{ pattern: /haiku/i, target: "claude-haiku-4-5" },
|
|
2397
|
-
// Sonnet models (any version) → claude-sonnet-4-5
|
|
2398
|
-
{ pattern: /sonnet/i, target: "claude-sonnet-4-5" }
|
|
2399
|
-
];
|
|
2400
|
-
function normalizeClaudeCodeModelId(modelId) {
|
|
2401
|
-
if (Object.hasOwn(claudeCodeModels, modelId)) {
|
|
2402
|
-
return modelId;
|
|
2403
|
-
}
|
|
2404
|
-
const withoutDate = modelId.replace(DATE_SUFFIX_PATTERN, "");
|
|
2405
|
-
if (Object.hasOwn(claudeCodeModels, withoutDate)) {
|
|
2406
|
-
return withoutDate;
|
|
2407
|
-
}
|
|
2408
|
-
for (const { pattern, target } of MODEL_FAMILY_PATTERNS) {
|
|
2409
|
-
if (pattern.test(modelId)) {
|
|
2410
|
-
return target;
|
|
2411
|
-
}
|
|
2412
|
-
}
|
|
2413
|
-
return claudeCodeDefaultModelId;
|
|
2414
|
-
}
|
|
2415
|
-
var claudeCodeReasoningConfig = {
|
|
2416
|
-
low: { budgetTokens: 16e3 },
|
|
2417
|
-
medium: { budgetTokens: 32e3 },
|
|
2418
|
-
high: { budgetTokens: 64e3 }
|
|
2419
|
-
};
|
|
2420
|
-
|
|
2421
2157
|
// src/providers/deepseek.ts
|
|
2422
2158
|
var deepSeekDefaultModelId = "deepseek-chat";
|
|
2423
2159
|
var deepSeekModels = {
|
|
@@ -2427,8 +2163,6 @@ var deepSeekModels = {
|
|
|
2427
2163
|
contextWindow: 128e3,
|
|
2428
2164
|
supportsImages: false,
|
|
2429
2165
|
supportsPromptCache: true,
|
|
2430
|
-
supportsNativeTools: true,
|
|
2431
|
-
defaultToolProtocol: "native",
|
|
2432
2166
|
inputPrice: 0.28,
|
|
2433
2167
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
2434
2168
|
outputPrice: 0.42,
|
|
@@ -2445,8 +2179,6 @@ var deepSeekModels = {
|
|
|
2445
2179
|
contextWindow: 128e3,
|
|
2446
2180
|
supportsImages: false,
|
|
2447
2181
|
supportsPromptCache: true,
|
|
2448
|
-
supportsNativeTools: true,
|
|
2449
|
-
defaultToolProtocol: "native",
|
|
2450
2182
|
preserveReasoning: true,
|
|
2451
2183
|
inputPrice: 0.28,
|
|
2452
2184
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
@@ -2469,8 +2201,6 @@ var doubaoModels = {
|
|
|
2469
2201
|
contextWindow: 128e3,
|
|
2470
2202
|
supportsImages: true,
|
|
2471
2203
|
supportsPromptCache: true,
|
|
2472
|
-
supportsNativeTools: true,
|
|
2473
|
-
defaultToolProtocol: "native",
|
|
2474
2204
|
inputPrice: 1e-4,
|
|
2475
2205
|
// $0.0001 per million tokens (cache miss)
|
|
2476
2206
|
outputPrice: 4e-4,
|
|
@@ -2486,8 +2216,6 @@ var doubaoModels = {
|
|
|
2486
2216
|
contextWindow: 128e3,
|
|
2487
2217
|
supportsImages: true,
|
|
2488
2218
|
supportsPromptCache: true,
|
|
2489
|
-
supportsNativeTools: true,
|
|
2490
|
-
defaultToolProtocol: "native",
|
|
2491
2219
|
inputPrice: 2e-4,
|
|
2492
2220
|
// $0.0002 per million tokens
|
|
2493
2221
|
outputPrice: 8e-4,
|
|
@@ -2503,8 +2231,6 @@ var doubaoModels = {
|
|
|
2503
2231
|
contextWindow: 128e3,
|
|
2504
2232
|
supportsImages: true,
|
|
2505
2233
|
supportsPromptCache: true,
|
|
2506
|
-
supportsNativeTools: true,
|
|
2507
|
-
defaultToolProtocol: "native",
|
|
2508
2234
|
inputPrice: 15e-5,
|
|
2509
2235
|
// $0.00015 per million tokens
|
|
2510
2236
|
outputPrice: 6e-4,
|
|
@@ -2527,7 +2253,6 @@ var featherlessModels = {
|
|
|
2527
2253
|
contextWindow: 32678,
|
|
2528
2254
|
supportsImages: false,
|
|
2529
2255
|
supportsPromptCache: false,
|
|
2530
|
-
supportsNativeTools: true,
|
|
2531
2256
|
inputPrice: 0,
|
|
2532
2257
|
outputPrice: 0,
|
|
2533
2258
|
description: "DeepSeek V3 0324 model."
|
|
@@ -2537,7 +2262,6 @@ var featherlessModels = {
|
|
|
2537
2262
|
contextWindow: 32678,
|
|
2538
2263
|
supportsImages: false,
|
|
2539
2264
|
supportsPromptCache: false,
|
|
2540
|
-
supportsNativeTools: true,
|
|
2541
2265
|
inputPrice: 0,
|
|
2542
2266
|
outputPrice: 0,
|
|
2543
2267
|
description: "DeepSeek R1 0528 model."
|
|
@@ -2547,7 +2271,6 @@ var featherlessModels = {
|
|
|
2547
2271
|
contextWindow: 32678,
|
|
2548
2272
|
supportsImages: false,
|
|
2549
2273
|
supportsPromptCache: false,
|
|
2550
|
-
supportsNativeTools: true,
|
|
2551
2274
|
inputPrice: 0,
|
|
2552
2275
|
outputPrice: 0,
|
|
2553
2276
|
description: "Kimi K2 Instruct model."
|
|
@@ -2557,7 +2280,6 @@ var featherlessModels = {
|
|
|
2557
2280
|
contextWindow: 32678,
|
|
2558
2281
|
supportsImages: false,
|
|
2559
2282
|
supportsPromptCache: false,
|
|
2560
|
-
supportsNativeTools: true,
|
|
2561
2283
|
inputPrice: 0,
|
|
2562
2284
|
outputPrice: 0,
|
|
2563
2285
|
description: "GPT-OSS 120B model."
|
|
@@ -2567,7 +2289,6 @@ var featherlessModels = {
|
|
|
2567
2289
|
contextWindow: 32678,
|
|
2568
2290
|
supportsImages: false,
|
|
2569
2291
|
supportsPromptCache: false,
|
|
2570
|
-
supportsNativeTools: true,
|
|
2571
2292
|
inputPrice: 0,
|
|
2572
2293
|
outputPrice: 0,
|
|
2573
2294
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -2583,8 +2304,6 @@ var fireworksModels = {
|
|
|
2583
2304
|
contextWindow: 262144,
|
|
2584
2305
|
supportsImages: false,
|
|
2585
2306
|
supportsPromptCache: true,
|
|
2586
|
-
supportsNativeTools: true,
|
|
2587
|
-
defaultToolProtocol: "native",
|
|
2588
2307
|
inputPrice: 0.6,
|
|
2589
2308
|
outputPrice: 2.5,
|
|
2590
2309
|
cacheReadsPrice: 0.15,
|
|
@@ -2595,8 +2314,6 @@ var fireworksModels = {
|
|
|
2595
2314
|
contextWindow: 128e3,
|
|
2596
2315
|
supportsImages: false,
|
|
2597
2316
|
supportsPromptCache: false,
|
|
2598
|
-
supportsNativeTools: true,
|
|
2599
|
-
defaultToolProtocol: "native",
|
|
2600
2317
|
inputPrice: 0.6,
|
|
2601
2318
|
outputPrice: 2.5,
|
|
2602
2319
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2606,7 +2323,6 @@ var fireworksModels = {
|
|
|
2606
2323
|
contextWindow: 256e3,
|
|
2607
2324
|
supportsImages: false,
|
|
2608
2325
|
supportsPromptCache: true,
|
|
2609
|
-
supportsNativeTools: true,
|
|
2610
2326
|
supportsTemperature: true,
|
|
2611
2327
|
preserveReasoning: true,
|
|
2612
2328
|
defaultTemperature: 1,
|
|
@@ -2620,8 +2336,6 @@ var fireworksModels = {
|
|
|
2620
2336
|
contextWindow: 204800,
|
|
2621
2337
|
supportsImages: false,
|
|
2622
2338
|
supportsPromptCache: false,
|
|
2623
|
-
supportsNativeTools: true,
|
|
2624
|
-
defaultToolProtocol: "native",
|
|
2625
2339
|
inputPrice: 0.3,
|
|
2626
2340
|
outputPrice: 1.2,
|
|
2627
2341
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2631,8 +2345,6 @@ var fireworksModels = {
|
|
|
2631
2345
|
contextWindow: 256e3,
|
|
2632
2346
|
supportsImages: false,
|
|
2633
2347
|
supportsPromptCache: false,
|
|
2634
|
-
supportsNativeTools: true,
|
|
2635
|
-
defaultToolProtocol: "native",
|
|
2636
2348
|
inputPrice: 0.22,
|
|
2637
2349
|
outputPrice: 0.88,
|
|
2638
2350
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2642,8 +2354,6 @@ var fireworksModels = {
|
|
|
2642
2354
|
contextWindow: 256e3,
|
|
2643
2355
|
supportsImages: false,
|
|
2644
2356
|
supportsPromptCache: false,
|
|
2645
|
-
supportsNativeTools: true,
|
|
2646
|
-
defaultToolProtocol: "native",
|
|
2647
2357
|
inputPrice: 0.45,
|
|
2648
2358
|
outputPrice: 1.8,
|
|
2649
2359
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2653,8 +2363,6 @@ var fireworksModels = {
|
|
|
2653
2363
|
contextWindow: 16e4,
|
|
2654
2364
|
supportsImages: false,
|
|
2655
2365
|
supportsPromptCache: false,
|
|
2656
|
-
supportsNativeTools: true,
|
|
2657
|
-
defaultToolProtocol: "native",
|
|
2658
2366
|
inputPrice: 3,
|
|
2659
2367
|
outputPrice: 8,
|
|
2660
2368
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2664,8 +2372,6 @@ var fireworksModels = {
|
|
|
2664
2372
|
contextWindow: 128e3,
|
|
2665
2373
|
supportsImages: false,
|
|
2666
2374
|
supportsPromptCache: false,
|
|
2667
|
-
supportsNativeTools: true,
|
|
2668
|
-
defaultToolProtocol: "native",
|
|
2669
2375
|
inputPrice: 0.9,
|
|
2670
2376
|
outputPrice: 0.9,
|
|
2671
2377
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2675,8 +2381,6 @@ var fireworksModels = {
|
|
|
2675
2381
|
contextWindow: 163840,
|
|
2676
2382
|
supportsImages: false,
|
|
2677
2383
|
supportsPromptCache: false,
|
|
2678
|
-
supportsNativeTools: true,
|
|
2679
|
-
defaultToolProtocol: "native",
|
|
2680
2384
|
inputPrice: 0.56,
|
|
2681
2385
|
outputPrice: 1.68,
|
|
2682
2386
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2686,8 +2390,6 @@ var fireworksModels = {
|
|
|
2686
2390
|
contextWindow: 128e3,
|
|
2687
2391
|
supportsImages: false,
|
|
2688
2392
|
supportsPromptCache: false,
|
|
2689
|
-
supportsNativeTools: true,
|
|
2690
|
-
defaultToolProtocol: "native",
|
|
2691
2393
|
inputPrice: 0.55,
|
|
2692
2394
|
outputPrice: 2.19,
|
|
2693
2395
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2697,8 +2399,6 @@ var fireworksModels = {
|
|
|
2697
2399
|
contextWindow: 128e3,
|
|
2698
2400
|
supportsImages: false,
|
|
2699
2401
|
supportsPromptCache: false,
|
|
2700
|
-
supportsNativeTools: true,
|
|
2701
|
-
defaultToolProtocol: "native",
|
|
2702
2402
|
inputPrice: 0.55,
|
|
2703
2403
|
outputPrice: 2.19,
|
|
2704
2404
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2708,8 +2408,6 @@ var fireworksModels = {
|
|
|
2708
2408
|
contextWindow: 198e3,
|
|
2709
2409
|
supportsImages: false,
|
|
2710
2410
|
supportsPromptCache: false,
|
|
2711
|
-
supportsNativeTools: true,
|
|
2712
|
-
defaultToolProtocol: "native",
|
|
2713
2411
|
inputPrice: 0.55,
|
|
2714
2412
|
outputPrice: 2.19,
|
|
2715
2413
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2719,8 +2417,6 @@ var fireworksModels = {
|
|
|
2719
2417
|
contextWindow: 128e3,
|
|
2720
2418
|
supportsImages: false,
|
|
2721
2419
|
supportsPromptCache: false,
|
|
2722
|
-
supportsNativeTools: true,
|
|
2723
|
-
defaultToolProtocol: "native",
|
|
2724
2420
|
inputPrice: 0.07,
|
|
2725
2421
|
outputPrice: 0.3,
|
|
2726
2422
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2730,11 +2426,63 @@ var fireworksModels = {
|
|
|
2730
2426
|
contextWindow: 128e3,
|
|
2731
2427
|
supportsImages: false,
|
|
2732
2428
|
supportsPromptCache: false,
|
|
2733
|
-
supportsNativeTools: true,
|
|
2734
|
-
defaultToolProtocol: "native",
|
|
2735
2429
|
inputPrice: 0.15,
|
|
2736
2430
|
outputPrice: 0.6,
|
|
2737
2431
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
2432
|
+
},
|
|
2433
|
+
"accounts/fireworks/models/minimax-m2p1": {
|
|
2434
|
+
maxTokens: 4096,
|
|
2435
|
+
contextWindow: 204800,
|
|
2436
|
+
supportsImages: false,
|
|
2437
|
+
supportsPromptCache: false,
|
|
2438
|
+
inputPrice: 0.3,
|
|
2439
|
+
outputPrice: 1.2,
|
|
2440
|
+
description: "MiniMax M2.1 is an upgraded version of M2 with improved performance on complex reasoning, coding, and long-context understanding tasks."
|
|
2441
|
+
},
|
|
2442
|
+
"accounts/fireworks/models/deepseek-v3p2": {
|
|
2443
|
+
maxTokens: 16384,
|
|
2444
|
+
contextWindow: 163840,
|
|
2445
|
+
supportsImages: false,
|
|
2446
|
+
supportsPromptCache: false,
|
|
2447
|
+
inputPrice: 0.56,
|
|
2448
|
+
outputPrice: 1.68,
|
|
2449
|
+
description: "DeepSeek V3.2 is the latest iteration of the V3 model family with enhanced reasoning capabilities, improved code generation, and better instruction following."
|
|
2450
|
+
},
|
|
2451
|
+
"accounts/fireworks/models/glm-4p7": {
|
|
2452
|
+
maxTokens: 25344,
|
|
2453
|
+
contextWindow: 198e3,
|
|
2454
|
+
supportsImages: false,
|
|
2455
|
+
supportsPromptCache: false,
|
|
2456
|
+
inputPrice: 0.55,
|
|
2457
|
+
outputPrice: 2.19,
|
|
2458
|
+
description: "Z.ai GLM-4.7 is the latest coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality."
|
|
2459
|
+
},
|
|
2460
|
+
"accounts/fireworks/models/llama-v3p3-70b-instruct": {
|
|
2461
|
+
maxTokens: 16384,
|
|
2462
|
+
contextWindow: 131072,
|
|
2463
|
+
supportsImages: false,
|
|
2464
|
+
supportsPromptCache: false,
|
|
2465
|
+
inputPrice: 0.9,
|
|
2466
|
+
outputPrice: 0.9,
|
|
2467
|
+
description: "Meta Llama 3.3 70B Instruct is a highly capable instruction-tuned model with strong reasoning, coding, and general task performance."
|
|
2468
|
+
},
|
|
2469
|
+
"accounts/fireworks/models/llama4-maverick-instruct-basic": {
|
|
2470
|
+
maxTokens: 16384,
|
|
2471
|
+
contextWindow: 131072,
|
|
2472
|
+
supportsImages: true,
|
|
2473
|
+
supportsPromptCache: false,
|
|
2474
|
+
inputPrice: 0.22,
|
|
2475
|
+
outputPrice: 0.88,
|
|
2476
|
+
description: "Llama 4 Maverick is Meta's latest multimodal model with vision capabilities, optimized for instruction following and coding tasks."
|
|
2477
|
+
},
|
|
2478
|
+
"accounts/fireworks/models/llama4-scout-instruct-basic": {
|
|
2479
|
+
maxTokens: 16384,
|
|
2480
|
+
contextWindow: 131072,
|
|
2481
|
+
supportsImages: true,
|
|
2482
|
+
supportsPromptCache: false,
|
|
2483
|
+
inputPrice: 0.15,
|
|
2484
|
+
outputPrice: 0.6,
|
|
2485
|
+
description: "Llama 4 Scout is a smaller, faster variant of Llama 4 with multimodal capabilities, ideal for quick iterations and cost-effective deployments."
|
|
2738
2486
|
}
|
|
2739
2487
|
};
|
|
2740
2488
|
|
|
@@ -2745,8 +2493,6 @@ var geminiModels = {
|
|
|
2745
2493
|
maxTokens: 65536,
|
|
2746
2494
|
contextWindow: 1048576,
|
|
2747
2495
|
supportsImages: true,
|
|
2748
|
-
supportsNativeTools: true,
|
|
2749
|
-
defaultToolProtocol: "native",
|
|
2750
2496
|
supportsPromptCache: true,
|
|
2751
2497
|
supportsReasoningEffort: ["low", "high"],
|
|
2752
2498
|
reasoningEffort: "low",
|
|
@@ -2754,16 +2500,19 @@ var geminiModels = {
|
|
|
2754
2500
|
defaultTemperature: 1,
|
|
2755
2501
|
inputPrice: 4,
|
|
2756
2502
|
outputPrice: 18,
|
|
2503
|
+
cacheReadsPrice: 0.4,
|
|
2757
2504
|
tiers: [
|
|
2758
2505
|
{
|
|
2759
2506
|
contextWindow: 2e5,
|
|
2760
2507
|
inputPrice: 2,
|
|
2761
|
-
outputPrice: 12
|
|
2508
|
+
outputPrice: 12,
|
|
2509
|
+
cacheReadsPrice: 0.2
|
|
2762
2510
|
},
|
|
2763
2511
|
{
|
|
2764
2512
|
contextWindow: Infinity,
|
|
2765
2513
|
inputPrice: 4,
|
|
2766
|
-
outputPrice: 18
|
|
2514
|
+
outputPrice: 18,
|
|
2515
|
+
cacheReadsPrice: 0.4
|
|
2767
2516
|
}
|
|
2768
2517
|
]
|
|
2769
2518
|
},
|
|
@@ -2771,25 +2520,20 @@ var geminiModels = {
|
|
|
2771
2520
|
maxTokens: 65536,
|
|
2772
2521
|
contextWindow: 1048576,
|
|
2773
2522
|
supportsImages: true,
|
|
2774
|
-
supportsNativeTools: true,
|
|
2775
|
-
defaultToolProtocol: "native",
|
|
2776
2523
|
supportsPromptCache: true,
|
|
2777
2524
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2778
2525
|
reasoningEffort: "medium",
|
|
2779
2526
|
supportsTemperature: true,
|
|
2780
2527
|
defaultTemperature: 1,
|
|
2781
|
-
inputPrice: 0.
|
|
2782
|
-
outputPrice:
|
|
2783
|
-
cacheReadsPrice: 0.
|
|
2784
|
-
cacheWritesPrice: 1
|
|
2528
|
+
inputPrice: 0.5,
|
|
2529
|
+
outputPrice: 3,
|
|
2530
|
+
cacheReadsPrice: 0.05
|
|
2785
2531
|
},
|
|
2786
2532
|
// 2.5 Pro models
|
|
2787
2533
|
"gemini-2.5-pro": {
|
|
2788
2534
|
maxTokens: 64e3,
|
|
2789
2535
|
contextWindow: 1048576,
|
|
2790
2536
|
supportsImages: true,
|
|
2791
|
-
supportsNativeTools: true,
|
|
2792
|
-
defaultToolProtocol: "native",
|
|
2793
2537
|
supportsPromptCache: true,
|
|
2794
2538
|
inputPrice: 2.5,
|
|
2795
2539
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2818,8 +2562,6 @@ var geminiModels = {
|
|
|
2818
2562
|
maxTokens: 65535,
|
|
2819
2563
|
contextWindow: 1048576,
|
|
2820
2564
|
supportsImages: true,
|
|
2821
|
-
supportsNativeTools: true,
|
|
2822
|
-
defaultToolProtocol: "native",
|
|
2823
2565
|
supportsPromptCache: true,
|
|
2824
2566
|
inputPrice: 2.5,
|
|
2825
2567
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2847,8 +2589,6 @@ var geminiModels = {
|
|
|
2847
2589
|
maxTokens: 65535,
|
|
2848
2590
|
contextWindow: 1048576,
|
|
2849
2591
|
supportsImages: true,
|
|
2850
|
-
supportsNativeTools: true,
|
|
2851
|
-
defaultToolProtocol: "native",
|
|
2852
2592
|
supportsPromptCache: true,
|
|
2853
2593
|
inputPrice: 2.5,
|
|
2854
2594
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2874,8 +2614,6 @@ var geminiModels = {
|
|
|
2874
2614
|
maxTokens: 65535,
|
|
2875
2615
|
contextWindow: 1048576,
|
|
2876
2616
|
supportsImages: true,
|
|
2877
|
-
supportsNativeTools: true,
|
|
2878
|
-
defaultToolProtocol: "native",
|
|
2879
2617
|
supportsPromptCache: true,
|
|
2880
2618
|
inputPrice: 2.5,
|
|
2881
2619
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2904,8 +2642,6 @@ var geminiModels = {
|
|
|
2904
2642
|
maxTokens: 65536,
|
|
2905
2643
|
contextWindow: 1048576,
|
|
2906
2644
|
supportsImages: true,
|
|
2907
|
-
supportsNativeTools: true,
|
|
2908
|
-
defaultToolProtocol: "native",
|
|
2909
2645
|
supportsPromptCache: true,
|
|
2910
2646
|
inputPrice: 0.3,
|
|
2911
2647
|
outputPrice: 2.5,
|
|
@@ -2918,8 +2654,6 @@ var geminiModels = {
|
|
|
2918
2654
|
maxTokens: 65536,
|
|
2919
2655
|
contextWindow: 1048576,
|
|
2920
2656
|
supportsImages: true,
|
|
2921
|
-
supportsNativeTools: true,
|
|
2922
|
-
defaultToolProtocol: "native",
|
|
2923
2657
|
supportsPromptCache: true,
|
|
2924
2658
|
inputPrice: 0.3,
|
|
2925
2659
|
outputPrice: 2.5,
|
|
@@ -2932,8 +2666,6 @@ var geminiModels = {
|
|
|
2932
2666
|
maxTokens: 64e3,
|
|
2933
2667
|
contextWindow: 1048576,
|
|
2934
2668
|
supportsImages: true,
|
|
2935
|
-
supportsNativeTools: true,
|
|
2936
|
-
defaultToolProtocol: "native",
|
|
2937
2669
|
supportsPromptCache: true,
|
|
2938
2670
|
inputPrice: 0.3,
|
|
2939
2671
|
outputPrice: 2.5,
|
|
@@ -2947,8 +2679,6 @@ var geminiModels = {
|
|
|
2947
2679
|
maxTokens: 65536,
|
|
2948
2680
|
contextWindow: 1048576,
|
|
2949
2681
|
supportsImages: true,
|
|
2950
|
-
supportsNativeTools: true,
|
|
2951
|
-
defaultToolProtocol: "native",
|
|
2952
2682
|
supportsPromptCache: true,
|
|
2953
2683
|
inputPrice: 0.1,
|
|
2954
2684
|
outputPrice: 0.4,
|
|
@@ -2961,8 +2691,6 @@ var geminiModels = {
|
|
|
2961
2691
|
maxTokens: 65536,
|
|
2962
2692
|
contextWindow: 1048576,
|
|
2963
2693
|
supportsImages: true,
|
|
2964
|
-
supportsNativeTools: true,
|
|
2965
|
-
defaultToolProtocol: "native",
|
|
2966
2694
|
supportsPromptCache: true,
|
|
2967
2695
|
inputPrice: 0.1,
|
|
2968
2696
|
outputPrice: 0.4,
|
|
@@ -2982,8 +2710,6 @@ var groqModels = {
|
|
|
2982
2710
|
contextWindow: 131072,
|
|
2983
2711
|
supportsImages: false,
|
|
2984
2712
|
supportsPromptCache: false,
|
|
2985
|
-
supportsNativeTools: true,
|
|
2986
|
-
defaultToolProtocol: "native",
|
|
2987
2713
|
inputPrice: 0.05,
|
|
2988
2714
|
outputPrice: 0.08,
|
|
2989
2715
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2993,8 +2719,6 @@ var groqModels = {
|
|
|
2993
2719
|
contextWindow: 131072,
|
|
2994
2720
|
supportsImages: false,
|
|
2995
2721
|
supportsPromptCache: false,
|
|
2996
|
-
supportsNativeTools: true,
|
|
2997
|
-
defaultToolProtocol: "native",
|
|
2998
2722
|
inputPrice: 0.59,
|
|
2999
2723
|
outputPrice: 0.79,
|
|
3000
2724
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -3004,8 +2728,6 @@ var groqModels = {
|
|
|
3004
2728
|
contextWindow: 131072,
|
|
3005
2729
|
supportsImages: false,
|
|
3006
2730
|
supportsPromptCache: false,
|
|
3007
|
-
supportsNativeTools: true,
|
|
3008
|
-
defaultToolProtocol: "native",
|
|
3009
2731
|
inputPrice: 0.11,
|
|
3010
2732
|
outputPrice: 0.34,
|
|
3011
2733
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -3015,8 +2737,6 @@ var groqModels = {
|
|
|
3015
2737
|
contextWindow: 131072,
|
|
3016
2738
|
supportsImages: false,
|
|
3017
2739
|
supportsPromptCache: false,
|
|
3018
|
-
supportsNativeTools: true,
|
|
3019
|
-
defaultToolProtocol: "native",
|
|
3020
2740
|
inputPrice: 0.29,
|
|
3021
2741
|
outputPrice: 0.59,
|
|
3022
2742
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -3026,8 +2746,6 @@ var groqModels = {
|
|
|
3026
2746
|
contextWindow: 262144,
|
|
3027
2747
|
supportsImages: false,
|
|
3028
2748
|
supportsPromptCache: true,
|
|
3029
|
-
supportsNativeTools: true,
|
|
3030
|
-
defaultToolProtocol: "native",
|
|
3031
2749
|
inputPrice: 0.6,
|
|
3032
2750
|
outputPrice: 2.5,
|
|
3033
2751
|
cacheReadsPrice: 0.15,
|
|
@@ -3038,8 +2756,6 @@ var groqModels = {
|
|
|
3038
2756
|
contextWindow: 131072,
|
|
3039
2757
|
supportsImages: false,
|
|
3040
2758
|
supportsPromptCache: false,
|
|
3041
|
-
supportsNativeTools: true,
|
|
3042
|
-
defaultToolProtocol: "native",
|
|
3043
2759
|
inputPrice: 0.15,
|
|
3044
2760
|
outputPrice: 0.75,
|
|
3045
2761
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -3049,8 +2765,6 @@ var groqModels = {
|
|
|
3049
2765
|
contextWindow: 131072,
|
|
3050
2766
|
supportsImages: false,
|
|
3051
2767
|
supportsPromptCache: false,
|
|
3052
|
-
supportsNativeTools: true,
|
|
3053
|
-
defaultToolProtocol: "native",
|
|
3054
2768
|
inputPrice: 0.1,
|
|
3055
2769
|
outputPrice: 0.5,
|
|
3056
2770
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -3077,7 +2791,6 @@ var ioIntelligenceModels = {
|
|
|
3077
2791
|
contextWindow: 128e3,
|
|
3078
2792
|
supportsImages: false,
|
|
3079
2793
|
supportsPromptCache: false,
|
|
3080
|
-
supportsNativeTools: true,
|
|
3081
2794
|
description: "DeepSeek R1 reasoning model"
|
|
3082
2795
|
},
|
|
3083
2796
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -3085,7 +2798,6 @@ var ioIntelligenceModels = {
|
|
|
3085
2798
|
contextWindow: 43e4,
|
|
3086
2799
|
supportsImages: true,
|
|
3087
2800
|
supportsPromptCache: false,
|
|
3088
|
-
supportsNativeTools: true,
|
|
3089
2801
|
description: "Llama 4 Maverick 17B model"
|
|
3090
2802
|
},
|
|
3091
2803
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -3093,7 +2805,6 @@ var ioIntelligenceModels = {
|
|
|
3093
2805
|
contextWindow: 106e3,
|
|
3094
2806
|
supportsImages: false,
|
|
3095
2807
|
supportsPromptCache: false,
|
|
3096
|
-
supportsNativeTools: true,
|
|
3097
2808
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
3098
2809
|
},
|
|
3099
2810
|
"openai/gpt-oss-120b": {
|
|
@@ -3101,7 +2812,6 @@ var ioIntelligenceModels = {
|
|
|
3101
2812
|
contextWindow: 131072,
|
|
3102
2813
|
supportsImages: false,
|
|
3103
2814
|
supportsPromptCache: false,
|
|
3104
|
-
supportsNativeTools: true,
|
|
3105
2815
|
description: "OpenAI GPT-OSS 120B model"
|
|
3106
2816
|
}
|
|
3107
2817
|
};
|
|
@@ -3113,8 +2823,6 @@ var litellmDefaultModelInfo = {
|
|
|
3113
2823
|
contextWindow: 2e5,
|
|
3114
2824
|
supportsImages: true,
|
|
3115
2825
|
supportsPromptCache: true,
|
|
3116
|
-
supportsNativeTools: true,
|
|
3117
|
-
defaultToolProtocol: "native",
|
|
3118
2826
|
inputPrice: 3,
|
|
3119
2827
|
outputPrice: 15,
|
|
3120
2828
|
cacheWritesPrice: 3.75,
|
|
@@ -3129,8 +2837,6 @@ var lMStudioDefaultModelInfo = {
|
|
|
3129
2837
|
contextWindow: 2e5,
|
|
3130
2838
|
supportsImages: true,
|
|
3131
2839
|
supportsPromptCache: true,
|
|
3132
|
-
supportsNativeTools: true,
|
|
3133
|
-
defaultToolProtocol: "native",
|
|
3134
2840
|
inputPrice: 0,
|
|
3135
2841
|
outputPrice: 0,
|
|
3136
2842
|
cacheWritesPrice: 0,
|
|
@@ -3146,8 +2852,6 @@ var mistralModels = {
|
|
|
3146
2852
|
contextWindow: 128e3,
|
|
3147
2853
|
supportsImages: true,
|
|
3148
2854
|
supportsPromptCache: false,
|
|
3149
|
-
supportsNativeTools: true,
|
|
3150
|
-
defaultToolProtocol: "native",
|
|
3151
2855
|
inputPrice: 2,
|
|
3152
2856
|
outputPrice: 5
|
|
3153
2857
|
},
|
|
@@ -3156,8 +2860,6 @@ var mistralModels = {
|
|
|
3156
2860
|
contextWindow: 131e3,
|
|
3157
2861
|
supportsImages: true,
|
|
3158
2862
|
supportsPromptCache: false,
|
|
3159
|
-
supportsNativeTools: true,
|
|
3160
|
-
defaultToolProtocol: "native",
|
|
3161
2863
|
inputPrice: 0.4,
|
|
3162
2864
|
outputPrice: 2
|
|
3163
2865
|
},
|
|
@@ -3166,8 +2868,6 @@ var mistralModels = {
|
|
|
3166
2868
|
contextWindow: 131e3,
|
|
3167
2869
|
supportsImages: true,
|
|
3168
2870
|
supportsPromptCache: false,
|
|
3169
|
-
supportsNativeTools: true,
|
|
3170
|
-
defaultToolProtocol: "native",
|
|
3171
2871
|
inputPrice: 0.4,
|
|
3172
2872
|
outputPrice: 2
|
|
3173
2873
|
},
|
|
@@ -3176,8 +2876,6 @@ var mistralModels = {
|
|
|
3176
2876
|
contextWindow: 256e3,
|
|
3177
2877
|
supportsImages: false,
|
|
3178
2878
|
supportsPromptCache: false,
|
|
3179
|
-
supportsNativeTools: true,
|
|
3180
|
-
defaultToolProtocol: "native",
|
|
3181
2879
|
inputPrice: 0.3,
|
|
3182
2880
|
outputPrice: 0.9
|
|
3183
2881
|
},
|
|
@@ -3186,8 +2884,6 @@ var mistralModels = {
|
|
|
3186
2884
|
contextWindow: 131e3,
|
|
3187
2885
|
supportsImages: false,
|
|
3188
2886
|
supportsPromptCache: false,
|
|
3189
|
-
supportsNativeTools: true,
|
|
3190
|
-
defaultToolProtocol: "native",
|
|
3191
2887
|
inputPrice: 2,
|
|
3192
2888
|
outputPrice: 6
|
|
3193
2889
|
},
|
|
@@ -3196,8 +2892,6 @@ var mistralModels = {
|
|
|
3196
2892
|
contextWindow: 131e3,
|
|
3197
2893
|
supportsImages: false,
|
|
3198
2894
|
supportsPromptCache: false,
|
|
3199
|
-
supportsNativeTools: true,
|
|
3200
|
-
defaultToolProtocol: "native",
|
|
3201
2895
|
inputPrice: 0.1,
|
|
3202
2896
|
outputPrice: 0.1
|
|
3203
2897
|
},
|
|
@@ -3206,8 +2900,6 @@ var mistralModels = {
|
|
|
3206
2900
|
contextWindow: 131e3,
|
|
3207
2901
|
supportsImages: false,
|
|
3208
2902
|
supportsPromptCache: false,
|
|
3209
|
-
supportsNativeTools: true,
|
|
3210
|
-
defaultToolProtocol: "native",
|
|
3211
2903
|
inputPrice: 0.04,
|
|
3212
2904
|
outputPrice: 0.04
|
|
3213
2905
|
},
|
|
@@ -3216,8 +2908,6 @@ var mistralModels = {
|
|
|
3216
2908
|
contextWindow: 32e3,
|
|
3217
2909
|
supportsImages: false,
|
|
3218
2910
|
supportsPromptCache: false,
|
|
3219
|
-
supportsNativeTools: true,
|
|
3220
|
-
defaultToolProtocol: "native",
|
|
3221
2911
|
inputPrice: 0.2,
|
|
3222
2912
|
outputPrice: 0.6
|
|
3223
2913
|
},
|
|
@@ -3226,8 +2916,6 @@ var mistralModels = {
|
|
|
3226
2916
|
contextWindow: 131e3,
|
|
3227
2917
|
supportsImages: true,
|
|
3228
2918
|
supportsPromptCache: false,
|
|
3229
|
-
supportsNativeTools: true,
|
|
3230
|
-
defaultToolProtocol: "native",
|
|
3231
2919
|
inputPrice: 2,
|
|
3232
2920
|
outputPrice: 6
|
|
3233
2921
|
}
|
|
@@ -3242,8 +2930,6 @@ var moonshotModels = {
|
|
|
3242
2930
|
contextWindow: 131072,
|
|
3243
2931
|
supportsImages: false,
|
|
3244
2932
|
supportsPromptCache: true,
|
|
3245
|
-
supportsNativeTools: true,
|
|
3246
|
-
defaultToolProtocol: "native",
|
|
3247
2933
|
inputPrice: 0.6,
|
|
3248
2934
|
// $0.60 per million tokens (cache miss)
|
|
3249
2935
|
outputPrice: 2.5,
|
|
@@ -3259,8 +2945,6 @@ var moonshotModels = {
|
|
|
3259
2945
|
contextWindow: 262144,
|
|
3260
2946
|
supportsImages: false,
|
|
3261
2947
|
supportsPromptCache: true,
|
|
3262
|
-
supportsNativeTools: true,
|
|
3263
|
-
defaultToolProtocol: "native",
|
|
3264
2948
|
inputPrice: 0.6,
|
|
3265
2949
|
outputPrice: 2.5,
|
|
3266
2950
|
cacheReadsPrice: 0.15,
|
|
@@ -3271,8 +2955,6 @@ var moonshotModels = {
|
|
|
3271
2955
|
contextWindow: 262144,
|
|
3272
2956
|
supportsImages: false,
|
|
3273
2957
|
supportsPromptCache: true,
|
|
3274
|
-
supportsNativeTools: true,
|
|
3275
|
-
defaultToolProtocol: "native",
|
|
3276
2958
|
inputPrice: 2.4,
|
|
3277
2959
|
// $2.40 per million tokens (cache miss)
|
|
3278
2960
|
outputPrice: 10,
|
|
@@ -3291,8 +2973,6 @@ var moonshotModels = {
|
|
|
3291
2973
|
supportsImages: false,
|
|
3292
2974
|
// Text-only (no image/vision support)
|
|
3293
2975
|
supportsPromptCache: true,
|
|
3294
|
-
supportsNativeTools: true,
|
|
3295
|
-
defaultToolProtocol: "native",
|
|
3296
2976
|
inputPrice: 0.6,
|
|
3297
2977
|
// $0.60 per million tokens (cache miss)
|
|
3298
2978
|
outputPrice: 2.5,
|
|
@@ -3306,6 +2986,21 @@ var moonshotModels = {
|
|
|
3306
2986
|
preserveReasoning: true,
|
|
3307
2987
|
defaultTemperature: 1,
|
|
3308
2988
|
description: `The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.`
|
|
2989
|
+
},
|
|
2990
|
+
"kimi-k2.5": {
|
|
2991
|
+
maxTokens: 16384,
|
|
2992
|
+
contextWindow: 262144,
|
|
2993
|
+
supportsImages: false,
|
|
2994
|
+
supportsPromptCache: true,
|
|
2995
|
+
inputPrice: 0.6,
|
|
2996
|
+
// $0.60 per million tokens (cache miss)
|
|
2997
|
+
outputPrice: 3,
|
|
2998
|
+
// $3.00 per million tokens
|
|
2999
|
+
cacheReadsPrice: 0.1,
|
|
3000
|
+
// $0.10 per million tokens (cache hit)
|
|
3001
|
+
supportsTemperature: true,
|
|
3002
|
+
defaultTemperature: 1,
|
|
3003
|
+
description: "Kimi K2.5 is the latest generation of Moonshot AI's Kimi series, featuring improved reasoning capabilities and enhanced performance across diverse tasks."
|
|
3309
3004
|
}
|
|
3310
3005
|
};
|
|
3311
3006
|
var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -3317,7 +3012,6 @@ var ollamaDefaultModelInfo = {
|
|
|
3317
3012
|
contextWindow: 2e5,
|
|
3318
3013
|
supportsImages: true,
|
|
3319
3014
|
supportsPromptCache: true,
|
|
3320
|
-
supportsNativeTools: true,
|
|
3321
3015
|
inputPrice: 0,
|
|
3322
3016
|
outputPrice: 0,
|
|
3323
3017
|
cacheWritesPrice: 0,
|
|
@@ -3331,8 +3025,6 @@ var openAiNativeModels = {
|
|
|
3331
3025
|
"gpt-5.1-codex-max": {
|
|
3332
3026
|
maxTokens: 128e3,
|
|
3333
3027
|
contextWindow: 4e5,
|
|
3334
|
-
supportsNativeTools: true,
|
|
3335
|
-
defaultToolProtocol: "native",
|
|
3336
3028
|
includedTools: ["apply_patch"],
|
|
3337
3029
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3338
3030
|
supportsImages: true,
|
|
@@ -3350,8 +3042,6 @@ var openAiNativeModels = {
|
|
|
3350
3042
|
"gpt-5.2": {
|
|
3351
3043
|
maxTokens: 128e3,
|
|
3352
3044
|
contextWindow: 4e5,
|
|
3353
|
-
supportsNativeTools: true,
|
|
3354
|
-
defaultToolProtocol: "native",
|
|
3355
3045
|
includedTools: ["apply_patch"],
|
|
3356
3046
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3357
3047
|
supportsImages: true,
|
|
@@ -3373,8 +3063,6 @@ var openAiNativeModels = {
|
|
|
3373
3063
|
"gpt-5.2-codex": {
|
|
3374
3064
|
maxTokens: 128e3,
|
|
3375
3065
|
contextWindow: 4e5,
|
|
3376
|
-
supportsNativeTools: true,
|
|
3377
|
-
defaultToolProtocol: "native",
|
|
3378
3066
|
includedTools: ["apply_patch"],
|
|
3379
3067
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3380
3068
|
supportsImages: true,
|
|
@@ -3392,8 +3080,6 @@ var openAiNativeModels = {
|
|
|
3392
3080
|
"gpt-5.2-chat-latest": {
|
|
3393
3081
|
maxTokens: 16384,
|
|
3394
3082
|
contextWindow: 128e3,
|
|
3395
|
-
supportsNativeTools: true,
|
|
3396
|
-
defaultToolProtocol: "native",
|
|
3397
3083
|
includedTools: ["apply_patch"],
|
|
3398
3084
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3399
3085
|
supportsImages: true,
|
|
@@ -3406,8 +3092,6 @@ var openAiNativeModels = {
|
|
|
3406
3092
|
"gpt-5.1": {
|
|
3407
3093
|
maxTokens: 128e3,
|
|
3408
3094
|
contextWindow: 4e5,
|
|
3409
|
-
supportsNativeTools: true,
|
|
3410
|
-
defaultToolProtocol: "native",
|
|
3411
3095
|
includedTools: ["apply_patch"],
|
|
3412
3096
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3413
3097
|
supportsImages: true,
|
|
@@ -3429,8 +3113,6 @@ var openAiNativeModels = {
|
|
|
3429
3113
|
"gpt-5.1-codex": {
|
|
3430
3114
|
maxTokens: 128e3,
|
|
3431
3115
|
contextWindow: 4e5,
|
|
3432
|
-
supportsNativeTools: true,
|
|
3433
|
-
defaultToolProtocol: "native",
|
|
3434
3116
|
includedTools: ["apply_patch"],
|
|
3435
3117
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3436
3118
|
supportsImages: true,
|
|
@@ -3448,8 +3130,6 @@ var openAiNativeModels = {
|
|
|
3448
3130
|
"gpt-5.1-codex-mini": {
|
|
3449
3131
|
maxTokens: 128e3,
|
|
3450
3132
|
contextWindow: 4e5,
|
|
3451
|
-
supportsNativeTools: true,
|
|
3452
|
-
defaultToolProtocol: "native",
|
|
3453
3133
|
includedTools: ["apply_patch"],
|
|
3454
3134
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3455
3135
|
supportsImages: true,
|
|
@@ -3466,8 +3146,6 @@ var openAiNativeModels = {
|
|
|
3466
3146
|
"gpt-5": {
|
|
3467
3147
|
maxTokens: 128e3,
|
|
3468
3148
|
contextWindow: 4e5,
|
|
3469
|
-
supportsNativeTools: true,
|
|
3470
|
-
defaultToolProtocol: "native",
|
|
3471
3149
|
includedTools: ["apply_patch"],
|
|
3472
3150
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3473
3151
|
supportsImages: true,
|
|
@@ -3488,8 +3166,6 @@ var openAiNativeModels = {
|
|
|
3488
3166
|
"gpt-5-mini": {
|
|
3489
3167
|
maxTokens: 128e3,
|
|
3490
3168
|
contextWindow: 4e5,
|
|
3491
|
-
supportsNativeTools: true,
|
|
3492
|
-
defaultToolProtocol: "native",
|
|
3493
3169
|
includedTools: ["apply_patch"],
|
|
3494
3170
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3495
3171
|
supportsImages: true,
|
|
@@ -3510,8 +3186,6 @@ var openAiNativeModels = {
|
|
|
3510
3186
|
"gpt-5-codex": {
|
|
3511
3187
|
maxTokens: 128e3,
|
|
3512
3188
|
contextWindow: 4e5,
|
|
3513
|
-
supportsNativeTools: true,
|
|
3514
|
-
defaultToolProtocol: "native",
|
|
3515
3189
|
includedTools: ["apply_patch"],
|
|
3516
3190
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3517
3191
|
supportsImages: true,
|
|
@@ -3528,8 +3202,6 @@ var openAiNativeModels = {
|
|
|
3528
3202
|
"gpt-5-nano": {
|
|
3529
3203
|
maxTokens: 128e3,
|
|
3530
3204
|
contextWindow: 4e5,
|
|
3531
|
-
supportsNativeTools: true,
|
|
3532
|
-
defaultToolProtocol: "native",
|
|
3533
3205
|
includedTools: ["apply_patch"],
|
|
3534
3206
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3535
3207
|
supportsImages: true,
|
|
@@ -3547,8 +3219,6 @@ var openAiNativeModels = {
|
|
|
3547
3219
|
"gpt-5-chat-latest": {
|
|
3548
3220
|
maxTokens: 128e3,
|
|
3549
3221
|
contextWindow: 4e5,
|
|
3550
|
-
supportsNativeTools: true,
|
|
3551
|
-
defaultToolProtocol: "native",
|
|
3552
3222
|
includedTools: ["apply_patch"],
|
|
3553
3223
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3554
3224
|
supportsImages: true,
|
|
@@ -3561,8 +3231,6 @@ var openAiNativeModels = {
|
|
|
3561
3231
|
"gpt-4.1": {
|
|
3562
3232
|
maxTokens: 32768,
|
|
3563
3233
|
contextWindow: 1047576,
|
|
3564
|
-
supportsNativeTools: true,
|
|
3565
|
-
defaultToolProtocol: "native",
|
|
3566
3234
|
includedTools: ["apply_patch"],
|
|
3567
3235
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3568
3236
|
supportsImages: true,
|
|
@@ -3578,8 +3246,6 @@ var openAiNativeModels = {
|
|
|
3578
3246
|
"gpt-4.1-mini": {
|
|
3579
3247
|
maxTokens: 32768,
|
|
3580
3248
|
contextWindow: 1047576,
|
|
3581
|
-
supportsNativeTools: true,
|
|
3582
|
-
defaultToolProtocol: "native",
|
|
3583
3249
|
includedTools: ["apply_patch"],
|
|
3584
3250
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3585
3251
|
supportsImages: true,
|
|
@@ -3595,8 +3261,6 @@ var openAiNativeModels = {
|
|
|
3595
3261
|
"gpt-4.1-nano": {
|
|
3596
3262
|
maxTokens: 32768,
|
|
3597
3263
|
contextWindow: 1047576,
|
|
3598
|
-
supportsNativeTools: true,
|
|
3599
|
-
defaultToolProtocol: "native",
|
|
3600
3264
|
includedTools: ["apply_patch"],
|
|
3601
3265
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3602
3266
|
supportsImages: true,
|
|
@@ -3612,8 +3276,6 @@ var openAiNativeModels = {
|
|
|
3612
3276
|
o3: {
|
|
3613
3277
|
maxTokens: 1e5,
|
|
3614
3278
|
contextWindow: 2e5,
|
|
3615
|
-
supportsNativeTools: true,
|
|
3616
|
-
defaultToolProtocol: "native",
|
|
3617
3279
|
supportsImages: true,
|
|
3618
3280
|
supportsPromptCache: true,
|
|
3619
3281
|
inputPrice: 2,
|
|
@@ -3630,8 +3292,6 @@ var openAiNativeModels = {
|
|
|
3630
3292
|
"o3-high": {
|
|
3631
3293
|
maxTokens: 1e5,
|
|
3632
3294
|
contextWindow: 2e5,
|
|
3633
|
-
supportsNativeTools: true,
|
|
3634
|
-
defaultToolProtocol: "native",
|
|
3635
3295
|
supportsImages: true,
|
|
3636
3296
|
supportsPromptCache: true,
|
|
3637
3297
|
inputPrice: 2,
|
|
@@ -3643,8 +3303,6 @@ var openAiNativeModels = {
|
|
|
3643
3303
|
"o3-low": {
|
|
3644
3304
|
maxTokens: 1e5,
|
|
3645
3305
|
contextWindow: 2e5,
|
|
3646
|
-
supportsNativeTools: true,
|
|
3647
|
-
defaultToolProtocol: "native",
|
|
3648
3306
|
supportsImages: true,
|
|
3649
3307
|
supportsPromptCache: true,
|
|
3650
3308
|
inputPrice: 2,
|
|
@@ -3656,8 +3314,6 @@ var openAiNativeModels = {
|
|
|
3656
3314
|
"o4-mini": {
|
|
3657
3315
|
maxTokens: 1e5,
|
|
3658
3316
|
contextWindow: 2e5,
|
|
3659
|
-
supportsNativeTools: true,
|
|
3660
|
-
defaultToolProtocol: "native",
|
|
3661
3317
|
supportsImages: true,
|
|
3662
3318
|
supportsPromptCache: true,
|
|
3663
3319
|
inputPrice: 1.1,
|
|
@@ -3674,8 +3330,6 @@ var openAiNativeModels = {
|
|
|
3674
3330
|
"o4-mini-high": {
|
|
3675
3331
|
maxTokens: 1e5,
|
|
3676
3332
|
contextWindow: 2e5,
|
|
3677
|
-
supportsNativeTools: true,
|
|
3678
|
-
defaultToolProtocol: "native",
|
|
3679
3333
|
supportsImages: true,
|
|
3680
3334
|
supportsPromptCache: true,
|
|
3681
3335
|
inputPrice: 1.1,
|
|
@@ -3687,8 +3341,6 @@ var openAiNativeModels = {
|
|
|
3687
3341
|
"o4-mini-low": {
|
|
3688
3342
|
maxTokens: 1e5,
|
|
3689
3343
|
contextWindow: 2e5,
|
|
3690
|
-
supportsNativeTools: true,
|
|
3691
|
-
defaultToolProtocol: "native",
|
|
3692
3344
|
supportsImages: true,
|
|
3693
3345
|
supportsPromptCache: true,
|
|
3694
3346
|
inputPrice: 1.1,
|
|
@@ -3700,8 +3352,6 @@ var openAiNativeModels = {
|
|
|
3700
3352
|
"o3-mini": {
|
|
3701
3353
|
maxTokens: 1e5,
|
|
3702
3354
|
contextWindow: 2e5,
|
|
3703
|
-
supportsNativeTools: true,
|
|
3704
|
-
defaultToolProtocol: "native",
|
|
3705
3355
|
supportsImages: false,
|
|
3706
3356
|
supportsPromptCache: true,
|
|
3707
3357
|
inputPrice: 1.1,
|
|
@@ -3714,8 +3364,6 @@ var openAiNativeModels = {
|
|
|
3714
3364
|
"o3-mini-high": {
|
|
3715
3365
|
maxTokens: 1e5,
|
|
3716
3366
|
contextWindow: 2e5,
|
|
3717
|
-
supportsNativeTools: true,
|
|
3718
|
-
defaultToolProtocol: "native",
|
|
3719
3367
|
supportsImages: false,
|
|
3720
3368
|
supportsPromptCache: true,
|
|
3721
3369
|
inputPrice: 1.1,
|
|
@@ -3727,8 +3375,6 @@ var openAiNativeModels = {
|
|
|
3727
3375
|
"o3-mini-low": {
|
|
3728
3376
|
maxTokens: 1e5,
|
|
3729
3377
|
contextWindow: 2e5,
|
|
3730
|
-
supportsNativeTools: true,
|
|
3731
|
-
defaultToolProtocol: "native",
|
|
3732
3378
|
supportsImages: false,
|
|
3733
3379
|
supportsPromptCache: true,
|
|
3734
3380
|
inputPrice: 1.1,
|
|
@@ -3740,8 +3386,6 @@ var openAiNativeModels = {
|
|
|
3740
3386
|
o1: {
|
|
3741
3387
|
maxTokens: 1e5,
|
|
3742
3388
|
contextWindow: 2e5,
|
|
3743
|
-
supportsNativeTools: true,
|
|
3744
|
-
defaultToolProtocol: "native",
|
|
3745
3389
|
supportsImages: true,
|
|
3746
3390
|
supportsPromptCache: true,
|
|
3747
3391
|
inputPrice: 15,
|
|
@@ -3752,8 +3396,6 @@ var openAiNativeModels = {
|
|
|
3752
3396
|
"o1-preview": {
|
|
3753
3397
|
maxTokens: 32768,
|
|
3754
3398
|
contextWindow: 128e3,
|
|
3755
|
-
supportsNativeTools: true,
|
|
3756
|
-
defaultToolProtocol: "native",
|
|
3757
3399
|
supportsImages: true,
|
|
3758
3400
|
supportsPromptCache: true,
|
|
3759
3401
|
inputPrice: 15,
|
|
@@ -3764,8 +3406,6 @@ var openAiNativeModels = {
|
|
|
3764
3406
|
"o1-mini": {
|
|
3765
3407
|
maxTokens: 65536,
|
|
3766
3408
|
contextWindow: 128e3,
|
|
3767
|
-
supportsNativeTools: true,
|
|
3768
|
-
defaultToolProtocol: "native",
|
|
3769
3409
|
supportsImages: true,
|
|
3770
3410
|
supportsPromptCache: true,
|
|
3771
3411
|
inputPrice: 1.1,
|
|
@@ -3776,8 +3416,6 @@ var openAiNativeModels = {
|
|
|
3776
3416
|
"gpt-4o": {
|
|
3777
3417
|
maxTokens: 16384,
|
|
3778
3418
|
contextWindow: 128e3,
|
|
3779
|
-
supportsNativeTools: true,
|
|
3780
|
-
defaultToolProtocol: "native",
|
|
3781
3419
|
supportsImages: true,
|
|
3782
3420
|
supportsPromptCache: true,
|
|
3783
3421
|
inputPrice: 2.5,
|
|
@@ -3791,8 +3429,6 @@ var openAiNativeModels = {
|
|
|
3791
3429
|
"gpt-4o-mini": {
|
|
3792
3430
|
maxTokens: 16384,
|
|
3793
3431
|
contextWindow: 128e3,
|
|
3794
|
-
supportsNativeTools: true,
|
|
3795
|
-
defaultToolProtocol: "native",
|
|
3796
3432
|
supportsImages: true,
|
|
3797
3433
|
supportsPromptCache: true,
|
|
3798
3434
|
inputPrice: 0.15,
|
|
@@ -3806,8 +3442,6 @@ var openAiNativeModels = {
|
|
|
3806
3442
|
"codex-mini-latest": {
|
|
3807
3443
|
maxTokens: 16384,
|
|
3808
3444
|
contextWindow: 2e5,
|
|
3809
|
-
supportsNativeTools: true,
|
|
3810
|
-
defaultToolProtocol: "native",
|
|
3811
3445
|
supportsImages: false,
|
|
3812
3446
|
supportsPromptCache: false,
|
|
3813
3447
|
inputPrice: 1.5,
|
|
@@ -3820,8 +3454,6 @@ var openAiNativeModels = {
|
|
|
3820
3454
|
"gpt-5-2025-08-07": {
|
|
3821
3455
|
maxTokens: 128e3,
|
|
3822
3456
|
contextWindow: 4e5,
|
|
3823
|
-
supportsNativeTools: true,
|
|
3824
|
-
defaultToolProtocol: "native",
|
|
3825
3457
|
includedTools: ["apply_patch"],
|
|
3826
3458
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3827
3459
|
supportsImages: true,
|
|
@@ -3842,8 +3474,6 @@ var openAiNativeModels = {
|
|
|
3842
3474
|
"gpt-5-mini-2025-08-07": {
|
|
3843
3475
|
maxTokens: 128e3,
|
|
3844
3476
|
contextWindow: 4e5,
|
|
3845
|
-
supportsNativeTools: true,
|
|
3846
|
-
defaultToolProtocol: "native",
|
|
3847
3477
|
includedTools: ["apply_patch"],
|
|
3848
3478
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3849
3479
|
supportsImages: true,
|
|
@@ -3864,8 +3494,6 @@ var openAiNativeModels = {
|
|
|
3864
3494
|
"gpt-5-nano-2025-08-07": {
|
|
3865
3495
|
maxTokens: 128e3,
|
|
3866
3496
|
contextWindow: 4e5,
|
|
3867
|
-
supportsNativeTools: true,
|
|
3868
|
-
defaultToolProtocol: "native",
|
|
3869
3497
|
includedTools: ["apply_patch"],
|
|
3870
3498
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3871
3499
|
supportsImages: true,
|
|
@@ -3887,9 +3515,7 @@ var openAiModelInfoSaneDefaults = {
|
|
|
3887
3515
|
supportsImages: true,
|
|
3888
3516
|
supportsPromptCache: false,
|
|
3889
3517
|
inputPrice: 0,
|
|
3890
|
-
outputPrice: 0
|
|
3891
|
-
supportsNativeTools: true,
|
|
3892
|
-
defaultToolProtocol: "native"
|
|
3518
|
+
outputPrice: 0
|
|
3893
3519
|
};
|
|
3894
3520
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
3895
3521
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
@@ -3901,8 +3527,6 @@ var openAiCodexModels = {
|
|
|
3901
3527
|
"gpt-5.1-codex-max": {
|
|
3902
3528
|
maxTokens: 128e3,
|
|
3903
3529
|
contextWindow: 4e5,
|
|
3904
|
-
supportsNativeTools: true,
|
|
3905
|
-
defaultToolProtocol: "native",
|
|
3906
3530
|
includedTools: ["apply_patch"],
|
|
3907
3531
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3908
3532
|
supportsImages: true,
|
|
@@ -3915,11 +3539,24 @@ var openAiCodexModels = {
|
|
|
3915
3539
|
supportsTemperature: false,
|
|
3916
3540
|
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription"
|
|
3917
3541
|
},
|
|
3542
|
+
"gpt-5.1-codex": {
|
|
3543
|
+
maxTokens: 128e3,
|
|
3544
|
+
contextWindow: 4e5,
|
|
3545
|
+
includedTools: ["apply_patch"],
|
|
3546
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3547
|
+
supportsImages: true,
|
|
3548
|
+
supportsPromptCache: true,
|
|
3549
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3550
|
+
reasoningEffort: "medium",
|
|
3551
|
+
// Subscription-based: no per-token costs
|
|
3552
|
+
inputPrice: 0,
|
|
3553
|
+
outputPrice: 0,
|
|
3554
|
+
supportsTemperature: false,
|
|
3555
|
+
description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription"
|
|
3556
|
+
},
|
|
3918
3557
|
"gpt-5.2-codex": {
|
|
3919
3558
|
maxTokens: 128e3,
|
|
3920
3559
|
contextWindow: 4e5,
|
|
3921
|
-
supportsNativeTools: true,
|
|
3922
|
-
defaultToolProtocol: "native",
|
|
3923
3560
|
includedTools: ["apply_patch"],
|
|
3924
3561
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3925
3562
|
supportsImages: true,
|
|
@@ -3931,11 +3568,71 @@ var openAiCodexModels = {
|
|
|
3931
3568
|
supportsTemperature: false,
|
|
3932
3569
|
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription"
|
|
3933
3570
|
},
|
|
3571
|
+
"gpt-5.1": {
|
|
3572
|
+
maxTokens: 128e3,
|
|
3573
|
+
contextWindow: 4e5,
|
|
3574
|
+
includedTools: ["apply_patch"],
|
|
3575
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3576
|
+
supportsImages: true,
|
|
3577
|
+
supportsPromptCache: true,
|
|
3578
|
+
supportsReasoningEffort: ["none", "low", "medium", "high"],
|
|
3579
|
+
reasoningEffort: "medium",
|
|
3580
|
+
// Subscription-based: no per-token costs
|
|
3581
|
+
inputPrice: 0,
|
|
3582
|
+
outputPrice: 0,
|
|
3583
|
+
supportsVerbosity: true,
|
|
3584
|
+
supportsTemperature: false,
|
|
3585
|
+
description: "GPT-5.1: General GPT-5.1 model via ChatGPT subscription"
|
|
3586
|
+
},
|
|
3587
|
+
"gpt-5": {
|
|
3588
|
+
maxTokens: 128e3,
|
|
3589
|
+
contextWindow: 4e5,
|
|
3590
|
+
includedTools: ["apply_patch"],
|
|
3591
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3592
|
+
supportsImages: true,
|
|
3593
|
+
supportsPromptCache: true,
|
|
3594
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
3595
|
+
reasoningEffort: "medium",
|
|
3596
|
+
// Subscription-based: no per-token costs
|
|
3597
|
+
inputPrice: 0,
|
|
3598
|
+
outputPrice: 0,
|
|
3599
|
+
supportsVerbosity: true,
|
|
3600
|
+
supportsTemperature: false,
|
|
3601
|
+
description: "GPT-5: General GPT-5 model via ChatGPT subscription"
|
|
3602
|
+
},
|
|
3603
|
+
"gpt-5-codex": {
|
|
3604
|
+
maxTokens: 128e3,
|
|
3605
|
+
contextWindow: 4e5,
|
|
3606
|
+
includedTools: ["apply_patch"],
|
|
3607
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3608
|
+
supportsImages: true,
|
|
3609
|
+
supportsPromptCache: true,
|
|
3610
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3611
|
+
reasoningEffort: "medium",
|
|
3612
|
+
// Subscription-based: no per-token costs
|
|
3613
|
+
inputPrice: 0,
|
|
3614
|
+
outputPrice: 0,
|
|
3615
|
+
supportsTemperature: false,
|
|
3616
|
+
description: "GPT-5 Codex: GPT-5 optimized for agentic coding via ChatGPT subscription"
|
|
3617
|
+
},
|
|
3618
|
+
"gpt-5-codex-mini": {
|
|
3619
|
+
maxTokens: 128e3,
|
|
3620
|
+
contextWindow: 4e5,
|
|
3621
|
+
includedTools: ["apply_patch"],
|
|
3622
|
+
excludedTools: ["apply_diff", "write_to_file"],
|
|
3623
|
+
supportsImages: true,
|
|
3624
|
+
supportsPromptCache: true,
|
|
3625
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
3626
|
+
reasoningEffort: "medium",
|
|
3627
|
+
// Subscription-based: no per-token costs
|
|
3628
|
+
inputPrice: 0,
|
|
3629
|
+
outputPrice: 0,
|
|
3630
|
+
supportsTemperature: false,
|
|
3631
|
+
description: "GPT-5 Codex Mini: Faster coding model via ChatGPT subscription"
|
|
3632
|
+
},
|
|
3934
3633
|
"gpt-5.1-codex-mini": {
|
|
3935
3634
|
maxTokens: 128e3,
|
|
3936
3635
|
contextWindow: 4e5,
|
|
3937
|
-
supportsNativeTools: true,
|
|
3938
|
-
defaultToolProtocol: "native",
|
|
3939
3636
|
includedTools: ["apply_patch"],
|
|
3940
3637
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3941
3638
|
supportsImages: true,
|
|
@@ -3950,8 +3647,6 @@ var openAiCodexModels = {
|
|
|
3950
3647
|
"gpt-5.2": {
|
|
3951
3648
|
maxTokens: 128e3,
|
|
3952
3649
|
contextWindow: 4e5,
|
|
3953
|
-
supportsNativeTools: true,
|
|
3954
|
-
defaultToolProtocol: "native",
|
|
3955
3650
|
includedTools: ["apply_patch"],
|
|
3956
3651
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3957
3652
|
supportsImages: true,
|
|
@@ -3972,7 +3667,6 @@ var openRouterDefaultModelInfo = {
|
|
|
3972
3667
|
contextWindow: 2e5,
|
|
3973
3668
|
supportsImages: true,
|
|
3974
3669
|
supportsPromptCache: true,
|
|
3975
|
-
supportsNativeTools: true,
|
|
3976
3670
|
inputPrice: 3,
|
|
3977
3671
|
outputPrice: 15,
|
|
3978
3672
|
cacheWritesPrice: 3.75,
|
|
@@ -4046,8 +3740,6 @@ var qwenCodeModels = {
|
|
|
4046
3740
|
contextWindow: 1e6,
|
|
4047
3741
|
supportsImages: false,
|
|
4048
3742
|
supportsPromptCache: false,
|
|
4049
|
-
supportsNativeTools: true,
|
|
4050
|
-
defaultToolProtocol: "native",
|
|
4051
3743
|
inputPrice: 0,
|
|
4052
3744
|
outputPrice: 0,
|
|
4053
3745
|
cacheWritesPrice: 0,
|
|
@@ -4059,8 +3751,6 @@ var qwenCodeModels = {
|
|
|
4059
3751
|
contextWindow: 1e6,
|
|
4060
3752
|
supportsImages: false,
|
|
4061
3753
|
supportsPromptCache: false,
|
|
4062
|
-
supportsNativeTools: true,
|
|
4063
|
-
defaultToolProtocol: "native",
|
|
4064
3754
|
inputPrice: 0,
|
|
4065
3755
|
outputPrice: 0,
|
|
4066
3756
|
cacheWritesPrice: 0,
|
|
@@ -4076,8 +3766,6 @@ var requestyDefaultModelInfo = {
|
|
|
4076
3766
|
contextWindow: 2e5,
|
|
4077
3767
|
supportsImages: true,
|
|
4078
3768
|
supportsPromptCache: true,
|
|
4079
|
-
supportsNativeTools: true,
|
|
4080
|
-
defaultToolProtocol: "native",
|
|
4081
3769
|
inputPrice: 3,
|
|
4082
3770
|
outputPrice: 15,
|
|
4083
3771
|
cacheWritesPrice: 3.75,
|
|
@@ -4133,8 +3821,6 @@ var sambaNovaModels = {
|
|
|
4133
3821
|
contextWindow: 16384,
|
|
4134
3822
|
supportsImages: false,
|
|
4135
3823
|
supportsPromptCache: false,
|
|
4136
|
-
supportsNativeTools: true,
|
|
4137
|
-
defaultToolProtocol: "native",
|
|
4138
3824
|
inputPrice: 0.1,
|
|
4139
3825
|
outputPrice: 0.2,
|
|
4140
3826
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -4144,8 +3830,6 @@ var sambaNovaModels = {
|
|
|
4144
3830
|
contextWindow: 131072,
|
|
4145
3831
|
supportsImages: false,
|
|
4146
3832
|
supportsPromptCache: false,
|
|
4147
|
-
supportsNativeTools: true,
|
|
4148
|
-
defaultToolProtocol: "native",
|
|
4149
3833
|
inputPrice: 0.6,
|
|
4150
3834
|
outputPrice: 1.2,
|
|
4151
3835
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -4156,8 +3840,6 @@ var sambaNovaModels = {
|
|
|
4156
3840
|
supportsImages: false,
|
|
4157
3841
|
supportsPromptCache: false,
|
|
4158
3842
|
supportsReasoningBudget: true,
|
|
4159
|
-
supportsNativeTools: true,
|
|
4160
|
-
defaultToolProtocol: "native",
|
|
4161
3843
|
inputPrice: 5,
|
|
4162
3844
|
outputPrice: 7,
|
|
4163
3845
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -4167,8 +3849,6 @@ var sambaNovaModels = {
|
|
|
4167
3849
|
contextWindow: 32768,
|
|
4168
3850
|
supportsImages: false,
|
|
4169
3851
|
supportsPromptCache: false,
|
|
4170
|
-
supportsNativeTools: true,
|
|
4171
|
-
defaultToolProtocol: "native",
|
|
4172
3852
|
inputPrice: 3,
|
|
4173
3853
|
outputPrice: 4.5,
|
|
4174
3854
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -4178,8 +3858,6 @@ var sambaNovaModels = {
|
|
|
4178
3858
|
contextWindow: 32768,
|
|
4179
3859
|
supportsImages: false,
|
|
4180
3860
|
supportsPromptCache: false,
|
|
4181
|
-
supportsNativeTools: true,
|
|
4182
|
-
defaultToolProtocol: "native",
|
|
4183
3861
|
inputPrice: 3,
|
|
4184
3862
|
outputPrice: 4.5,
|
|
4185
3863
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -4189,8 +3867,6 @@ var sambaNovaModels = {
|
|
|
4189
3867
|
contextWindow: 131072,
|
|
4190
3868
|
supportsImages: true,
|
|
4191
3869
|
supportsPromptCache: false,
|
|
4192
|
-
supportsNativeTools: true,
|
|
4193
|
-
defaultToolProtocol: "native",
|
|
4194
3870
|
inputPrice: 0.63,
|
|
4195
3871
|
outputPrice: 1.8,
|
|
4196
3872
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -4200,8 +3876,6 @@ var sambaNovaModels = {
|
|
|
4200
3876
|
contextWindow: 8192,
|
|
4201
3877
|
supportsImages: false,
|
|
4202
3878
|
supportsPromptCache: false,
|
|
4203
|
-
supportsNativeTools: true,
|
|
4204
|
-
defaultToolProtocol: "native",
|
|
4205
3879
|
inputPrice: 0.4,
|
|
4206
3880
|
outputPrice: 0.8,
|
|
4207
3881
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -4211,8 +3885,6 @@ var sambaNovaModels = {
|
|
|
4211
3885
|
contextWindow: 131072,
|
|
4212
3886
|
supportsImages: false,
|
|
4213
3887
|
supportsPromptCache: false,
|
|
4214
|
-
supportsNativeTools: true,
|
|
4215
|
-
defaultToolProtocol: "native",
|
|
4216
3888
|
inputPrice: 0.22,
|
|
4217
3889
|
outputPrice: 0.59,
|
|
4218
3890
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -4226,7 +3898,6 @@ var unboundDefaultModelInfo = {
|
|
|
4226
3898
|
contextWindow: 2e5,
|
|
4227
3899
|
supportsImages: true,
|
|
4228
3900
|
supportsPromptCache: true,
|
|
4229
|
-
supportsNativeTools: true,
|
|
4230
3901
|
inputPrice: 3,
|
|
4231
3902
|
outputPrice: 15,
|
|
4232
3903
|
cacheWritesPrice: 3.75,
|
|
@@ -4240,8 +3911,6 @@ var vertexModels = {
|
|
|
4240
3911
|
maxTokens: 65536,
|
|
4241
3912
|
contextWindow: 1048576,
|
|
4242
3913
|
supportsImages: true,
|
|
4243
|
-
supportsNativeTools: true,
|
|
4244
|
-
defaultToolProtocol: "native",
|
|
4245
3914
|
supportsPromptCache: true,
|
|
4246
3915
|
supportsReasoningEffort: ["low", "high"],
|
|
4247
3916
|
reasoningEffort: "low",
|
|
@@ -4249,16 +3918,19 @@ var vertexModels = {
|
|
|
4249
3918
|
defaultTemperature: 1,
|
|
4250
3919
|
inputPrice: 4,
|
|
4251
3920
|
outputPrice: 18,
|
|
3921
|
+
cacheReadsPrice: 0.4,
|
|
4252
3922
|
tiers: [
|
|
4253
3923
|
{
|
|
4254
3924
|
contextWindow: 2e5,
|
|
4255
3925
|
inputPrice: 2,
|
|
4256
|
-
outputPrice: 12
|
|
3926
|
+
outputPrice: 12,
|
|
3927
|
+
cacheReadsPrice: 0.2
|
|
4257
3928
|
},
|
|
4258
3929
|
{
|
|
4259
3930
|
contextWindow: Infinity,
|
|
4260
3931
|
inputPrice: 4,
|
|
4261
|
-
outputPrice: 18
|
|
3932
|
+
outputPrice: 18,
|
|
3933
|
+
cacheReadsPrice: 0.4
|
|
4262
3934
|
}
|
|
4263
3935
|
]
|
|
4264
3936
|
},
|
|
@@ -4266,24 +3938,19 @@ var vertexModels = {
|
|
|
4266
3938
|
maxTokens: 65536,
|
|
4267
3939
|
contextWindow: 1048576,
|
|
4268
3940
|
supportsImages: true,
|
|
4269
|
-
supportsNativeTools: true,
|
|
4270
|
-
defaultToolProtocol: "native",
|
|
4271
3941
|
supportsPromptCache: true,
|
|
4272
3942
|
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
4273
3943
|
reasoningEffort: "medium",
|
|
4274
3944
|
supportsTemperature: true,
|
|
4275
3945
|
defaultTemperature: 1,
|
|
4276
|
-
inputPrice: 0.
|
|
4277
|
-
outputPrice:
|
|
4278
|
-
cacheReadsPrice: 0.
|
|
4279
|
-
cacheWritesPrice: 1
|
|
3946
|
+
inputPrice: 0.5,
|
|
3947
|
+
outputPrice: 3,
|
|
3948
|
+
cacheReadsPrice: 0.05
|
|
4280
3949
|
},
|
|
4281
3950
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
4282
3951
|
maxTokens: 65535,
|
|
4283
3952
|
contextWindow: 1048576,
|
|
4284
3953
|
supportsImages: true,
|
|
4285
|
-
supportsNativeTools: true,
|
|
4286
|
-
defaultToolProtocol: "native",
|
|
4287
3954
|
supportsPromptCache: true,
|
|
4288
3955
|
inputPrice: 0.15,
|
|
4289
3956
|
outputPrice: 3.5,
|
|
@@ -4295,8 +3962,6 @@ var vertexModels = {
|
|
|
4295
3962
|
maxTokens: 65535,
|
|
4296
3963
|
contextWindow: 1048576,
|
|
4297
3964
|
supportsImages: true,
|
|
4298
|
-
supportsNativeTools: true,
|
|
4299
|
-
defaultToolProtocol: "native",
|
|
4300
3965
|
supportsPromptCache: true,
|
|
4301
3966
|
inputPrice: 0.15,
|
|
4302
3967
|
outputPrice: 0.6
|
|
@@ -4305,8 +3970,6 @@ var vertexModels = {
|
|
|
4305
3970
|
maxTokens: 64e3,
|
|
4306
3971
|
contextWindow: 1048576,
|
|
4307
3972
|
supportsImages: true,
|
|
4308
|
-
supportsNativeTools: true,
|
|
4309
|
-
defaultToolProtocol: "native",
|
|
4310
3973
|
supportsPromptCache: true,
|
|
4311
3974
|
inputPrice: 0.3,
|
|
4312
3975
|
outputPrice: 2.5,
|
|
@@ -4319,8 +3982,6 @@ var vertexModels = {
|
|
|
4319
3982
|
maxTokens: 65535,
|
|
4320
3983
|
contextWindow: 1048576,
|
|
4321
3984
|
supportsImages: true,
|
|
4322
|
-
supportsNativeTools: true,
|
|
4323
|
-
defaultToolProtocol: "native",
|
|
4324
3985
|
supportsPromptCache: false,
|
|
4325
3986
|
inputPrice: 0.15,
|
|
4326
3987
|
outputPrice: 3.5,
|
|
@@ -4332,8 +3993,6 @@ var vertexModels = {
|
|
|
4332
3993
|
maxTokens: 65535,
|
|
4333
3994
|
contextWindow: 1048576,
|
|
4334
3995
|
supportsImages: true,
|
|
4335
|
-
supportsNativeTools: true,
|
|
4336
|
-
defaultToolProtocol: "native",
|
|
4337
3996
|
supportsPromptCache: false,
|
|
4338
3997
|
inputPrice: 0.15,
|
|
4339
3998
|
outputPrice: 0.6
|
|
@@ -4342,8 +4001,6 @@ var vertexModels = {
|
|
|
4342
4001
|
maxTokens: 65535,
|
|
4343
4002
|
contextWindow: 1048576,
|
|
4344
4003
|
supportsImages: true,
|
|
4345
|
-
supportsNativeTools: true,
|
|
4346
|
-
defaultToolProtocol: "native",
|
|
4347
4004
|
supportsPromptCache: true,
|
|
4348
4005
|
inputPrice: 2.5,
|
|
4349
4006
|
outputPrice: 15
|
|
@@ -4352,8 +4009,6 @@ var vertexModels = {
|
|
|
4352
4009
|
maxTokens: 65535,
|
|
4353
4010
|
contextWindow: 1048576,
|
|
4354
4011
|
supportsImages: true,
|
|
4355
|
-
supportsNativeTools: true,
|
|
4356
|
-
defaultToolProtocol: "native",
|
|
4357
4012
|
supportsPromptCache: true,
|
|
4358
4013
|
inputPrice: 2.5,
|
|
4359
4014
|
outputPrice: 15
|
|
@@ -4362,8 +4017,6 @@ var vertexModels = {
|
|
|
4362
4017
|
maxTokens: 65535,
|
|
4363
4018
|
contextWindow: 1048576,
|
|
4364
4019
|
supportsImages: true,
|
|
4365
|
-
supportsNativeTools: true,
|
|
4366
|
-
defaultToolProtocol: "native",
|
|
4367
4020
|
supportsPromptCache: true,
|
|
4368
4021
|
inputPrice: 2.5,
|
|
4369
4022
|
outputPrice: 15,
|
|
@@ -4374,8 +4027,6 @@ var vertexModels = {
|
|
|
4374
4027
|
maxTokens: 64e3,
|
|
4375
4028
|
contextWindow: 1048576,
|
|
4376
4029
|
supportsImages: true,
|
|
4377
|
-
supportsNativeTools: true,
|
|
4378
|
-
defaultToolProtocol: "native",
|
|
4379
4030
|
supportsPromptCache: true,
|
|
4380
4031
|
inputPrice: 2.5,
|
|
4381
4032
|
outputPrice: 15,
|
|
@@ -4401,8 +4052,6 @@ var vertexModels = {
|
|
|
4401
4052
|
maxTokens: 65535,
|
|
4402
4053
|
contextWindow: 1048576,
|
|
4403
4054
|
supportsImages: true,
|
|
4404
|
-
supportsNativeTools: true,
|
|
4405
|
-
defaultToolProtocol: "native",
|
|
4406
4055
|
supportsPromptCache: false,
|
|
4407
4056
|
inputPrice: 0,
|
|
4408
4057
|
outputPrice: 0
|
|
@@ -4411,8 +4060,6 @@ var vertexModels = {
|
|
|
4411
4060
|
maxTokens: 8192,
|
|
4412
4061
|
contextWindow: 2097152,
|
|
4413
4062
|
supportsImages: true,
|
|
4414
|
-
supportsNativeTools: true,
|
|
4415
|
-
defaultToolProtocol: "native",
|
|
4416
4063
|
supportsPromptCache: false,
|
|
4417
4064
|
inputPrice: 0,
|
|
4418
4065
|
outputPrice: 0
|
|
@@ -4421,8 +4068,6 @@ var vertexModels = {
|
|
|
4421
4068
|
maxTokens: 8192,
|
|
4422
4069
|
contextWindow: 1048576,
|
|
4423
4070
|
supportsImages: true,
|
|
4424
|
-
supportsNativeTools: true,
|
|
4425
|
-
defaultToolProtocol: "native",
|
|
4426
4071
|
supportsPromptCache: true,
|
|
4427
4072
|
inputPrice: 0.15,
|
|
4428
4073
|
outputPrice: 0.6
|
|
@@ -4431,8 +4076,6 @@ var vertexModels = {
|
|
|
4431
4076
|
maxTokens: 8192,
|
|
4432
4077
|
contextWindow: 1048576,
|
|
4433
4078
|
supportsImages: true,
|
|
4434
|
-
supportsNativeTools: true,
|
|
4435
|
-
defaultToolProtocol: "native",
|
|
4436
4079
|
supportsPromptCache: false,
|
|
4437
4080
|
inputPrice: 0.075,
|
|
4438
4081
|
outputPrice: 0.3
|
|
@@ -4441,8 +4084,6 @@ var vertexModels = {
|
|
|
4441
4084
|
maxTokens: 8192,
|
|
4442
4085
|
contextWindow: 32768,
|
|
4443
4086
|
supportsImages: true,
|
|
4444
|
-
supportsNativeTools: true,
|
|
4445
|
-
defaultToolProtocol: "native",
|
|
4446
4087
|
supportsPromptCache: false,
|
|
4447
4088
|
inputPrice: 0,
|
|
4448
4089
|
outputPrice: 0
|
|
@@ -4451,8 +4092,6 @@ var vertexModels = {
|
|
|
4451
4092
|
maxTokens: 8192,
|
|
4452
4093
|
contextWindow: 1048576,
|
|
4453
4094
|
supportsImages: true,
|
|
4454
|
-
supportsNativeTools: true,
|
|
4455
|
-
defaultToolProtocol: "native",
|
|
4456
4095
|
supportsPromptCache: true,
|
|
4457
4096
|
inputPrice: 0.075,
|
|
4458
4097
|
outputPrice: 0.3
|
|
@@ -4461,8 +4100,6 @@ var vertexModels = {
|
|
|
4461
4100
|
maxTokens: 8192,
|
|
4462
4101
|
contextWindow: 2097152,
|
|
4463
4102
|
supportsImages: true,
|
|
4464
|
-
supportsNativeTools: true,
|
|
4465
|
-
defaultToolProtocol: "native",
|
|
4466
4103
|
supportsPromptCache: false,
|
|
4467
4104
|
inputPrice: 1.25,
|
|
4468
4105
|
outputPrice: 5
|
|
@@ -4473,8 +4110,6 @@ var vertexModels = {
|
|
|
4473
4110
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4474
4111
|
supportsImages: true,
|
|
4475
4112
|
supportsPromptCache: true,
|
|
4476
|
-
supportsNativeTools: true,
|
|
4477
|
-
defaultToolProtocol: "native",
|
|
4478
4113
|
inputPrice: 3,
|
|
4479
4114
|
// $3 per million input tokens (≤200K context)
|
|
4480
4115
|
outputPrice: 15,
|
|
@@ -4506,8 +4141,6 @@ var vertexModels = {
|
|
|
4506
4141
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
4507
4142
|
supportsImages: true,
|
|
4508
4143
|
supportsPromptCache: true,
|
|
4509
|
-
supportsNativeTools: true,
|
|
4510
|
-
defaultToolProtocol: "native",
|
|
4511
4144
|
inputPrice: 3,
|
|
4512
4145
|
// $3 per million input tokens (≤200K context)
|
|
4513
4146
|
outputPrice: 15,
|
|
@@ -4538,8 +4171,6 @@ var vertexModels = {
|
|
|
4538
4171
|
contextWindow: 2e5,
|
|
4539
4172
|
supportsImages: true,
|
|
4540
4173
|
supportsPromptCache: true,
|
|
4541
|
-
supportsNativeTools: true,
|
|
4542
|
-
defaultToolProtocol: "native",
|
|
4543
4174
|
inputPrice: 1,
|
|
4544
4175
|
outputPrice: 5,
|
|
4545
4176
|
cacheWritesPrice: 1.25,
|
|
@@ -4551,8 +4182,6 @@ var vertexModels = {
|
|
|
4551
4182
|
contextWindow: 2e5,
|
|
4552
4183
|
supportsImages: true,
|
|
4553
4184
|
supportsPromptCache: true,
|
|
4554
|
-
supportsNativeTools: true,
|
|
4555
|
-
defaultToolProtocol: "native",
|
|
4556
4185
|
inputPrice: 5,
|
|
4557
4186
|
outputPrice: 25,
|
|
4558
4187
|
cacheWritesPrice: 6.25,
|
|
@@ -4564,8 +4193,6 @@ var vertexModels = {
|
|
|
4564
4193
|
contextWindow: 2e5,
|
|
4565
4194
|
supportsImages: true,
|
|
4566
4195
|
supportsPromptCache: true,
|
|
4567
|
-
supportsNativeTools: true,
|
|
4568
|
-
defaultToolProtocol: "native",
|
|
4569
4196
|
inputPrice: 15,
|
|
4570
4197
|
outputPrice: 75,
|
|
4571
4198
|
cacheWritesPrice: 18.75,
|
|
@@ -4577,8 +4204,6 @@ var vertexModels = {
|
|
|
4577
4204
|
contextWindow: 2e5,
|
|
4578
4205
|
supportsImages: true,
|
|
4579
4206
|
supportsPromptCache: true,
|
|
4580
|
-
supportsNativeTools: true,
|
|
4581
|
-
defaultToolProtocol: "native",
|
|
4582
4207
|
inputPrice: 15,
|
|
4583
4208
|
outputPrice: 75,
|
|
4584
4209
|
cacheWritesPrice: 18.75,
|
|
@@ -4589,8 +4214,6 @@ var vertexModels = {
|
|
|
4589
4214
|
contextWindow: 2e5,
|
|
4590
4215
|
supportsImages: true,
|
|
4591
4216
|
supportsPromptCache: true,
|
|
4592
|
-
supportsNativeTools: true,
|
|
4593
|
-
defaultToolProtocol: "native",
|
|
4594
4217
|
inputPrice: 3,
|
|
4595
4218
|
outputPrice: 15,
|
|
4596
4219
|
cacheWritesPrice: 3.75,
|
|
@@ -4603,8 +4226,6 @@ var vertexModels = {
|
|
|
4603
4226
|
contextWindow: 2e5,
|
|
4604
4227
|
supportsImages: true,
|
|
4605
4228
|
supportsPromptCache: true,
|
|
4606
|
-
supportsNativeTools: true,
|
|
4607
|
-
defaultToolProtocol: "native",
|
|
4608
4229
|
inputPrice: 3,
|
|
4609
4230
|
outputPrice: 15,
|
|
4610
4231
|
cacheWritesPrice: 3.75,
|
|
@@ -4615,8 +4236,6 @@ var vertexModels = {
|
|
|
4615
4236
|
contextWindow: 2e5,
|
|
4616
4237
|
supportsImages: true,
|
|
4617
4238
|
supportsPromptCache: true,
|
|
4618
|
-
supportsNativeTools: true,
|
|
4619
|
-
defaultToolProtocol: "native",
|
|
4620
4239
|
inputPrice: 3,
|
|
4621
4240
|
outputPrice: 15,
|
|
4622
4241
|
cacheWritesPrice: 3.75,
|
|
@@ -4627,8 +4246,6 @@ var vertexModels = {
|
|
|
4627
4246
|
contextWindow: 2e5,
|
|
4628
4247
|
supportsImages: true,
|
|
4629
4248
|
supportsPromptCache: true,
|
|
4630
|
-
supportsNativeTools: true,
|
|
4631
|
-
defaultToolProtocol: "native",
|
|
4632
4249
|
inputPrice: 3,
|
|
4633
4250
|
outputPrice: 15,
|
|
4634
4251
|
cacheWritesPrice: 3.75,
|
|
@@ -4639,8 +4256,6 @@ var vertexModels = {
|
|
|
4639
4256
|
contextWindow: 2e5,
|
|
4640
4257
|
supportsImages: false,
|
|
4641
4258
|
supportsPromptCache: true,
|
|
4642
|
-
supportsNativeTools: true,
|
|
4643
|
-
defaultToolProtocol: "native",
|
|
4644
4259
|
inputPrice: 1,
|
|
4645
4260
|
outputPrice: 5,
|
|
4646
4261
|
cacheWritesPrice: 1.25,
|
|
@@ -4651,8 +4266,6 @@ var vertexModels = {
|
|
|
4651
4266
|
contextWindow: 2e5,
|
|
4652
4267
|
supportsImages: true,
|
|
4653
4268
|
supportsPromptCache: true,
|
|
4654
|
-
supportsNativeTools: true,
|
|
4655
|
-
defaultToolProtocol: "native",
|
|
4656
4269
|
inputPrice: 15,
|
|
4657
4270
|
outputPrice: 75,
|
|
4658
4271
|
cacheWritesPrice: 18.75,
|
|
@@ -4663,8 +4276,6 @@ var vertexModels = {
|
|
|
4663
4276
|
contextWindow: 2e5,
|
|
4664
4277
|
supportsImages: true,
|
|
4665
4278
|
supportsPromptCache: true,
|
|
4666
|
-
supportsNativeTools: true,
|
|
4667
|
-
defaultToolProtocol: "native",
|
|
4668
4279
|
inputPrice: 0.25,
|
|
4669
4280
|
outputPrice: 1.25,
|
|
4670
4281
|
cacheWritesPrice: 0.3,
|
|
@@ -4674,8 +4285,6 @@ var vertexModels = {
|
|
|
4674
4285
|
maxTokens: 64e3,
|
|
4675
4286
|
contextWindow: 1048576,
|
|
4676
4287
|
supportsImages: true,
|
|
4677
|
-
supportsNativeTools: true,
|
|
4678
|
-
defaultToolProtocol: "native",
|
|
4679
4288
|
supportsPromptCache: true,
|
|
4680
4289
|
inputPrice: 0.1,
|
|
4681
4290
|
outputPrice: 0.4,
|
|
@@ -4689,7 +4298,6 @@ var vertexModels = {
|
|
|
4689
4298
|
contextWindow: 131072,
|
|
4690
4299
|
supportsImages: false,
|
|
4691
4300
|
supportsPromptCache: false,
|
|
4692
|
-
supportsNativeTools: true,
|
|
4693
4301
|
inputPrice: 0.35,
|
|
4694
4302
|
outputPrice: 1.15,
|
|
4695
4303
|
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
@@ -4699,7 +4307,6 @@ var vertexModels = {
|
|
|
4699
4307
|
contextWindow: 163840,
|
|
4700
4308
|
supportsImages: false,
|
|
4701
4309
|
supportsPromptCache: false,
|
|
4702
|
-
supportsNativeTools: true,
|
|
4703
4310
|
inputPrice: 1.35,
|
|
4704
4311
|
outputPrice: 5.4,
|
|
4705
4312
|
description: "DeepSeek R1 (0528). Available in us-central1"
|
|
@@ -4709,7 +4316,6 @@ var vertexModels = {
|
|
|
4709
4316
|
contextWindow: 163840,
|
|
4710
4317
|
supportsImages: false,
|
|
4711
4318
|
supportsPromptCache: false,
|
|
4712
|
-
supportsNativeTools: true,
|
|
4713
4319
|
inputPrice: 0.6,
|
|
4714
4320
|
outputPrice: 1.7,
|
|
4715
4321
|
description: "DeepSeek V3.1. Available in us-west2"
|
|
@@ -4719,7 +4325,6 @@ var vertexModels = {
|
|
|
4719
4325
|
contextWindow: 131072,
|
|
4720
4326
|
supportsImages: false,
|
|
4721
4327
|
supportsPromptCache: false,
|
|
4722
|
-
supportsNativeTools: true,
|
|
4723
4328
|
inputPrice: 0.15,
|
|
4724
4329
|
outputPrice: 0.6,
|
|
4725
4330
|
description: "OpenAI gpt-oss 120B. Available in us-central1"
|
|
@@ -4729,7 +4334,6 @@ var vertexModels = {
|
|
|
4729
4334
|
contextWindow: 131072,
|
|
4730
4335
|
supportsImages: false,
|
|
4731
4336
|
supportsPromptCache: false,
|
|
4732
|
-
supportsNativeTools: true,
|
|
4733
4337
|
inputPrice: 0.075,
|
|
4734
4338
|
outputPrice: 0.3,
|
|
4735
4339
|
description: "OpenAI gpt-oss 20B. Available in us-central1"
|
|
@@ -4739,7 +4343,6 @@ var vertexModels = {
|
|
|
4739
4343
|
contextWindow: 262144,
|
|
4740
4344
|
supportsImages: false,
|
|
4741
4345
|
supportsPromptCache: false,
|
|
4742
|
-
supportsNativeTools: true,
|
|
4743
4346
|
inputPrice: 1,
|
|
4744
4347
|
outputPrice: 4,
|
|
4745
4348
|
description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
|
|
@@ -4749,10 +4352,18 @@ var vertexModels = {
|
|
|
4749
4352
|
contextWindow: 262144,
|
|
4750
4353
|
supportsImages: false,
|
|
4751
4354
|
supportsPromptCache: false,
|
|
4752
|
-
supportsNativeTools: true,
|
|
4753
4355
|
inputPrice: 0.25,
|
|
4754
4356
|
outputPrice: 1,
|
|
4755
4357
|
description: "Qwen3 235B A22B Instruct. Available in us-south1"
|
|
4358
|
+
},
|
|
4359
|
+
"moonshotai/kimi-k2-thinking-maas": {
|
|
4360
|
+
maxTokens: 16384,
|
|
4361
|
+
contextWindow: 262144,
|
|
4362
|
+
supportsPromptCache: false,
|
|
4363
|
+
supportsImages: false,
|
|
4364
|
+
inputPrice: 0.6,
|
|
4365
|
+
outputPrice: 2.5,
|
|
4366
|
+
description: "Kimi K2 Thinking Model with 256K context window."
|
|
4756
4367
|
}
|
|
4757
4368
|
};
|
|
4758
4369
|
var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
|
|
@@ -4985,8 +4596,6 @@ var xaiModels = {
|
|
|
4985
4596
|
contextWindow: 256e3,
|
|
4986
4597
|
supportsImages: true,
|
|
4987
4598
|
supportsPromptCache: true,
|
|
4988
|
-
supportsNativeTools: true,
|
|
4989
|
-
defaultToolProtocol: "native",
|
|
4990
4599
|
inputPrice: 0.2,
|
|
4991
4600
|
outputPrice: 1.5,
|
|
4992
4601
|
cacheWritesPrice: 0.02,
|
|
@@ -5000,8 +4609,6 @@ var xaiModels = {
|
|
|
5000
4609
|
contextWindow: 2e6,
|
|
5001
4610
|
supportsImages: true,
|
|
5002
4611
|
supportsPromptCache: true,
|
|
5003
|
-
supportsNativeTools: true,
|
|
5004
|
-
defaultToolProtocol: "native",
|
|
5005
4612
|
inputPrice: 0.2,
|
|
5006
4613
|
outputPrice: 0.5,
|
|
5007
4614
|
cacheWritesPrice: 0.05,
|
|
@@ -5015,8 +4622,6 @@ var xaiModels = {
|
|
|
5015
4622
|
contextWindow: 2e6,
|
|
5016
4623
|
supportsImages: true,
|
|
5017
4624
|
supportsPromptCache: true,
|
|
5018
|
-
supportsNativeTools: true,
|
|
5019
|
-
defaultToolProtocol: "native",
|
|
5020
4625
|
inputPrice: 0.2,
|
|
5021
4626
|
outputPrice: 0.5,
|
|
5022
4627
|
cacheWritesPrice: 0.05,
|
|
@@ -5030,8 +4635,6 @@ var xaiModels = {
|
|
|
5030
4635
|
contextWindow: 2e6,
|
|
5031
4636
|
supportsImages: true,
|
|
5032
4637
|
supportsPromptCache: true,
|
|
5033
|
-
supportsNativeTools: true,
|
|
5034
|
-
defaultToolProtocol: "native",
|
|
5035
4638
|
inputPrice: 0.2,
|
|
5036
4639
|
outputPrice: 0.5,
|
|
5037
4640
|
cacheWritesPrice: 0.05,
|
|
@@ -5045,8 +4648,6 @@ var xaiModels = {
|
|
|
5045
4648
|
contextWindow: 2e6,
|
|
5046
4649
|
supportsImages: true,
|
|
5047
4650
|
supportsPromptCache: true,
|
|
5048
|
-
supportsNativeTools: true,
|
|
5049
|
-
defaultToolProtocol: "native",
|
|
5050
4651
|
inputPrice: 0.2,
|
|
5051
4652
|
outputPrice: 0.5,
|
|
5052
4653
|
cacheWritesPrice: 0.05,
|
|
@@ -5060,8 +4661,6 @@ var xaiModels = {
|
|
|
5060
4661
|
contextWindow: 256e3,
|
|
5061
4662
|
supportsImages: true,
|
|
5062
4663
|
supportsPromptCache: true,
|
|
5063
|
-
supportsNativeTools: true,
|
|
5064
|
-
defaultToolProtocol: "native",
|
|
5065
4664
|
inputPrice: 3,
|
|
5066
4665
|
outputPrice: 15,
|
|
5067
4666
|
cacheWritesPrice: 0.75,
|
|
@@ -5075,8 +4674,6 @@ var xaiModels = {
|
|
|
5075
4674
|
contextWindow: 131072,
|
|
5076
4675
|
supportsImages: true,
|
|
5077
4676
|
supportsPromptCache: true,
|
|
5078
|
-
supportsNativeTools: true,
|
|
5079
|
-
defaultToolProtocol: "native",
|
|
5080
4677
|
inputPrice: 0.3,
|
|
5081
4678
|
outputPrice: 0.5,
|
|
5082
4679
|
cacheWritesPrice: 0.07,
|
|
@@ -5092,8 +4689,6 @@ var xaiModels = {
|
|
|
5092
4689
|
contextWindow: 131072,
|
|
5093
4690
|
supportsImages: true,
|
|
5094
4691
|
supportsPromptCache: true,
|
|
5095
|
-
supportsNativeTools: true,
|
|
5096
|
-
defaultToolProtocol: "native",
|
|
5097
4692
|
inputPrice: 3,
|
|
5098
4693
|
outputPrice: 15,
|
|
5099
4694
|
cacheWritesPrice: 0.75,
|
|
@@ -5190,7 +4785,6 @@ var vercelAiGatewayDefaultModelInfo = {
|
|
|
5190
4785
|
contextWindow: 2e5,
|
|
5191
4786
|
supportsImages: true,
|
|
5192
4787
|
supportsPromptCache: true,
|
|
5193
|
-
supportsNativeTools: true,
|
|
5194
4788
|
inputPrice: 3,
|
|
5195
4789
|
outputPrice: 15,
|
|
5196
4790
|
cacheWritesPrice: 3.75,
|
|
@@ -5207,8 +4801,6 @@ var internationalZAiModels = {
|
|
|
5207
4801
|
contextWindow: 131072,
|
|
5208
4802
|
supportsImages: false,
|
|
5209
4803
|
supportsPromptCache: true,
|
|
5210
|
-
supportsNativeTools: true,
|
|
5211
|
-
defaultToolProtocol: "native",
|
|
5212
4804
|
inputPrice: 0.6,
|
|
5213
4805
|
outputPrice: 2.2,
|
|
5214
4806
|
cacheWritesPrice: 0,
|
|
@@ -5220,8 +4812,6 @@ var internationalZAiModels = {
|
|
|
5220
4812
|
contextWindow: 131072,
|
|
5221
4813
|
supportsImages: false,
|
|
5222
4814
|
supportsPromptCache: true,
|
|
5223
|
-
supportsNativeTools: true,
|
|
5224
|
-
defaultToolProtocol: "native",
|
|
5225
4815
|
inputPrice: 0.2,
|
|
5226
4816
|
outputPrice: 1.1,
|
|
5227
4817
|
cacheWritesPrice: 0,
|
|
@@ -5233,8 +4823,6 @@ var internationalZAiModels = {
|
|
|
5233
4823
|
contextWindow: 131072,
|
|
5234
4824
|
supportsImages: false,
|
|
5235
4825
|
supportsPromptCache: true,
|
|
5236
|
-
supportsNativeTools: true,
|
|
5237
|
-
defaultToolProtocol: "native",
|
|
5238
4826
|
inputPrice: 2.2,
|
|
5239
4827
|
outputPrice: 8.9,
|
|
5240
4828
|
cacheWritesPrice: 0,
|
|
@@ -5246,8 +4834,6 @@ var internationalZAiModels = {
|
|
|
5246
4834
|
contextWindow: 131072,
|
|
5247
4835
|
supportsImages: false,
|
|
5248
4836
|
supportsPromptCache: true,
|
|
5249
|
-
supportsNativeTools: true,
|
|
5250
|
-
defaultToolProtocol: "native",
|
|
5251
4837
|
inputPrice: 1.1,
|
|
5252
4838
|
outputPrice: 4.5,
|
|
5253
4839
|
cacheWritesPrice: 0,
|
|
@@ -5259,8 +4845,6 @@ var internationalZAiModels = {
|
|
|
5259
4845
|
contextWindow: 131072,
|
|
5260
4846
|
supportsImages: false,
|
|
5261
4847
|
supportsPromptCache: true,
|
|
5262
|
-
supportsNativeTools: true,
|
|
5263
|
-
defaultToolProtocol: "native",
|
|
5264
4848
|
inputPrice: 0,
|
|
5265
4849
|
outputPrice: 0,
|
|
5266
4850
|
cacheWritesPrice: 0,
|
|
@@ -5272,21 +4856,28 @@ var internationalZAiModels = {
|
|
|
5272
4856
|
contextWindow: 131072,
|
|
5273
4857
|
supportsImages: true,
|
|
5274
4858
|
supportsPromptCache: true,
|
|
5275
|
-
supportsNativeTools: true,
|
|
5276
|
-
defaultToolProtocol: "native",
|
|
5277
4859
|
inputPrice: 0.6,
|
|
5278
4860
|
outputPrice: 1.8,
|
|
5279
4861
|
cacheWritesPrice: 0,
|
|
5280
4862
|
cacheReadsPrice: 0.11,
|
|
5281
4863
|
description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
|
|
5282
4864
|
},
|
|
4865
|
+
"glm-4.6v": {
|
|
4866
|
+
maxTokens: 16384,
|
|
4867
|
+
contextWindow: 131072,
|
|
4868
|
+
supportsImages: true,
|
|
4869
|
+
supportsPromptCache: true,
|
|
4870
|
+
inputPrice: 0.3,
|
|
4871
|
+
outputPrice: 0.9,
|
|
4872
|
+
cacheWritesPrice: 0,
|
|
4873
|
+
cacheReadsPrice: 0.05,
|
|
4874
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
4875
|
+
},
|
|
5283
4876
|
"glm-4.6": {
|
|
5284
4877
|
maxTokens: 16384,
|
|
5285
4878
|
contextWindow: 2e5,
|
|
5286
4879
|
supportsImages: false,
|
|
5287
4880
|
supportsPromptCache: true,
|
|
5288
|
-
supportsNativeTools: true,
|
|
5289
|
-
defaultToolProtocol: "native",
|
|
5290
4881
|
inputPrice: 0.6,
|
|
5291
4882
|
outputPrice: 2.2,
|
|
5292
4883
|
cacheWritesPrice: 0,
|
|
@@ -5298,8 +4889,6 @@ var internationalZAiModels = {
|
|
|
5298
4889
|
contextWindow: 2e5,
|
|
5299
4890
|
supportsImages: false,
|
|
5300
4891
|
supportsPromptCache: true,
|
|
5301
|
-
supportsNativeTools: true,
|
|
5302
|
-
defaultToolProtocol: "native",
|
|
5303
4892
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5304
4893
|
reasoningEffort: "medium",
|
|
5305
4894
|
preserveReasoning: true,
|
|
@@ -5309,13 +4898,55 @@ var internationalZAiModels = {
|
|
|
5309
4898
|
cacheReadsPrice: 0.11,
|
|
5310
4899
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5311
4900
|
},
|
|
4901
|
+
"glm-4.7-flash": {
|
|
4902
|
+
maxTokens: 16384,
|
|
4903
|
+
contextWindow: 2e5,
|
|
4904
|
+
supportsImages: false,
|
|
4905
|
+
supportsPromptCache: true,
|
|
4906
|
+
inputPrice: 0,
|
|
4907
|
+
outputPrice: 0,
|
|
4908
|
+
cacheWritesPrice: 0,
|
|
4909
|
+
cacheReadsPrice: 0,
|
|
4910
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
4911
|
+
},
|
|
4912
|
+
"glm-4.7-flashx": {
|
|
4913
|
+
maxTokens: 16384,
|
|
4914
|
+
contextWindow: 2e5,
|
|
4915
|
+
supportsImages: false,
|
|
4916
|
+
supportsPromptCache: true,
|
|
4917
|
+
inputPrice: 0.07,
|
|
4918
|
+
outputPrice: 0.4,
|
|
4919
|
+
cacheWritesPrice: 0,
|
|
4920
|
+
cacheReadsPrice: 0.01,
|
|
4921
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
4922
|
+
},
|
|
4923
|
+
"glm-4.6v-flash": {
|
|
4924
|
+
maxTokens: 16384,
|
|
4925
|
+
contextWindow: 131072,
|
|
4926
|
+
supportsImages: true,
|
|
4927
|
+
supportsPromptCache: true,
|
|
4928
|
+
inputPrice: 0,
|
|
4929
|
+
outputPrice: 0,
|
|
4930
|
+
cacheWritesPrice: 0,
|
|
4931
|
+
cacheReadsPrice: 0,
|
|
4932
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
4933
|
+
},
|
|
4934
|
+
"glm-4.6v-flashx": {
|
|
4935
|
+
maxTokens: 16384,
|
|
4936
|
+
contextWindow: 131072,
|
|
4937
|
+
supportsImages: true,
|
|
4938
|
+
supportsPromptCache: true,
|
|
4939
|
+
inputPrice: 0.04,
|
|
4940
|
+
outputPrice: 0.4,
|
|
4941
|
+
cacheWritesPrice: 0,
|
|
4942
|
+
cacheReadsPrice: 4e-3,
|
|
4943
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
4944
|
+
},
|
|
5312
4945
|
"glm-4-32b-0414-128k": {
|
|
5313
4946
|
maxTokens: 16384,
|
|
5314
4947
|
contextWindow: 131072,
|
|
5315
4948
|
supportsImages: false,
|
|
5316
4949
|
supportsPromptCache: false,
|
|
5317
|
-
supportsNativeTools: true,
|
|
5318
|
-
defaultToolProtocol: "native",
|
|
5319
4950
|
inputPrice: 0.1,
|
|
5320
4951
|
outputPrice: 0.1,
|
|
5321
4952
|
cacheWritesPrice: 0,
|
|
@@ -5330,8 +4961,6 @@ var mainlandZAiModels = {
|
|
|
5330
4961
|
contextWindow: 131072,
|
|
5331
4962
|
supportsImages: false,
|
|
5332
4963
|
supportsPromptCache: true,
|
|
5333
|
-
supportsNativeTools: true,
|
|
5334
|
-
defaultToolProtocol: "native",
|
|
5335
4964
|
inputPrice: 0.29,
|
|
5336
4965
|
outputPrice: 1.14,
|
|
5337
4966
|
cacheWritesPrice: 0,
|
|
@@ -5343,8 +4972,6 @@ var mainlandZAiModels = {
|
|
|
5343
4972
|
contextWindow: 131072,
|
|
5344
4973
|
supportsImages: false,
|
|
5345
4974
|
supportsPromptCache: true,
|
|
5346
|
-
supportsNativeTools: true,
|
|
5347
|
-
defaultToolProtocol: "native",
|
|
5348
4975
|
inputPrice: 0.1,
|
|
5349
4976
|
outputPrice: 0.6,
|
|
5350
4977
|
cacheWritesPrice: 0,
|
|
@@ -5356,8 +4983,6 @@ var mainlandZAiModels = {
|
|
|
5356
4983
|
contextWindow: 131072,
|
|
5357
4984
|
supportsImages: false,
|
|
5358
4985
|
supportsPromptCache: true,
|
|
5359
|
-
supportsNativeTools: true,
|
|
5360
|
-
defaultToolProtocol: "native",
|
|
5361
4986
|
inputPrice: 0.29,
|
|
5362
4987
|
outputPrice: 1.14,
|
|
5363
4988
|
cacheWritesPrice: 0,
|
|
@@ -5369,8 +4994,6 @@ var mainlandZAiModels = {
|
|
|
5369
4994
|
contextWindow: 131072,
|
|
5370
4995
|
supportsImages: false,
|
|
5371
4996
|
supportsPromptCache: true,
|
|
5372
|
-
supportsNativeTools: true,
|
|
5373
|
-
defaultToolProtocol: "native",
|
|
5374
4997
|
inputPrice: 0.1,
|
|
5375
4998
|
outputPrice: 0.6,
|
|
5376
4999
|
cacheWritesPrice: 0,
|
|
@@ -5382,8 +5005,6 @@ var mainlandZAiModels = {
|
|
|
5382
5005
|
contextWindow: 131072,
|
|
5383
5006
|
supportsImages: false,
|
|
5384
5007
|
supportsPromptCache: true,
|
|
5385
|
-
supportsNativeTools: true,
|
|
5386
|
-
defaultToolProtocol: "native",
|
|
5387
5008
|
inputPrice: 0,
|
|
5388
5009
|
outputPrice: 0,
|
|
5389
5010
|
cacheWritesPrice: 0,
|
|
@@ -5395,8 +5016,6 @@ var mainlandZAiModels = {
|
|
|
5395
5016
|
contextWindow: 131072,
|
|
5396
5017
|
supportsImages: true,
|
|
5397
5018
|
supportsPromptCache: true,
|
|
5398
|
-
supportsNativeTools: true,
|
|
5399
|
-
defaultToolProtocol: "native",
|
|
5400
5019
|
inputPrice: 0.29,
|
|
5401
5020
|
outputPrice: 0.93,
|
|
5402
5021
|
cacheWritesPrice: 0,
|
|
@@ -5408,8 +5027,6 @@ var mainlandZAiModels = {
|
|
|
5408
5027
|
contextWindow: 204800,
|
|
5409
5028
|
supportsImages: false,
|
|
5410
5029
|
supportsPromptCache: true,
|
|
5411
|
-
supportsNativeTools: true,
|
|
5412
|
-
defaultToolProtocol: "native",
|
|
5413
5030
|
inputPrice: 0.29,
|
|
5414
5031
|
outputPrice: 1.14,
|
|
5415
5032
|
cacheWritesPrice: 0,
|
|
@@ -5421,8 +5038,6 @@ var mainlandZAiModels = {
|
|
|
5421
5038
|
contextWindow: 204800,
|
|
5422
5039
|
supportsImages: false,
|
|
5423
5040
|
supportsPromptCache: true,
|
|
5424
|
-
supportsNativeTools: true,
|
|
5425
|
-
defaultToolProtocol: "native",
|
|
5426
5041
|
supportsReasoningEffort: ["disable", "medium"],
|
|
5427
5042
|
reasoningEffort: "medium",
|
|
5428
5043
|
preserveReasoning: true,
|
|
@@ -5431,6 +5046,61 @@ var mainlandZAiModels = {
|
|
|
5431
5046
|
cacheWritesPrice: 0,
|
|
5432
5047
|
cacheReadsPrice: 0.057,
|
|
5433
5048
|
description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
|
|
5049
|
+
},
|
|
5050
|
+
"glm-4.7-flash": {
|
|
5051
|
+
maxTokens: 16384,
|
|
5052
|
+
contextWindow: 204800,
|
|
5053
|
+
supportsImages: false,
|
|
5054
|
+
supportsPromptCache: true,
|
|
5055
|
+
inputPrice: 0,
|
|
5056
|
+
outputPrice: 0,
|
|
5057
|
+
cacheWritesPrice: 0,
|
|
5058
|
+
cacheReadsPrice: 0,
|
|
5059
|
+
description: "GLM-4.7-Flash is a free, high-speed variant of GLM-4.7 offering fast responses for reasoning and coding tasks."
|
|
5060
|
+
},
|
|
5061
|
+
"glm-4.7-flashx": {
|
|
5062
|
+
maxTokens: 16384,
|
|
5063
|
+
contextWindow: 204800,
|
|
5064
|
+
supportsImages: false,
|
|
5065
|
+
supportsPromptCache: true,
|
|
5066
|
+
inputPrice: 0.035,
|
|
5067
|
+
outputPrice: 0.2,
|
|
5068
|
+
cacheWritesPrice: 0,
|
|
5069
|
+
cacheReadsPrice: 5e-3,
|
|
5070
|
+
description: "GLM-4.7-FlashX is an ultra-fast variant of GLM-4.7 with exceptional speed and cost-effectiveness for high-throughput applications."
|
|
5071
|
+
},
|
|
5072
|
+
"glm-4.6v": {
|
|
5073
|
+
maxTokens: 16384,
|
|
5074
|
+
contextWindow: 131072,
|
|
5075
|
+
supportsImages: true,
|
|
5076
|
+
supportsPromptCache: true,
|
|
5077
|
+
inputPrice: 0.15,
|
|
5078
|
+
outputPrice: 0.45,
|
|
5079
|
+
cacheWritesPrice: 0,
|
|
5080
|
+
cacheReadsPrice: 0.025,
|
|
5081
|
+
description: "GLM-4.6V is an advanced multimodal vision model with improved performance and cost-efficiency for visual understanding tasks."
|
|
5082
|
+
},
|
|
5083
|
+
"glm-4.6v-flash": {
|
|
5084
|
+
maxTokens: 16384,
|
|
5085
|
+
contextWindow: 131072,
|
|
5086
|
+
supportsImages: true,
|
|
5087
|
+
supportsPromptCache: true,
|
|
5088
|
+
inputPrice: 0,
|
|
5089
|
+
outputPrice: 0,
|
|
5090
|
+
cacheWritesPrice: 0,
|
|
5091
|
+
cacheReadsPrice: 0,
|
|
5092
|
+
description: "GLM-4.6V-Flash is a free, high-speed multimodal vision model for rapid image understanding and visual reasoning tasks."
|
|
5093
|
+
},
|
|
5094
|
+
"glm-4.6v-flashx": {
|
|
5095
|
+
maxTokens: 16384,
|
|
5096
|
+
contextWindow: 131072,
|
|
5097
|
+
supportsImages: true,
|
|
5098
|
+
supportsPromptCache: true,
|
|
5099
|
+
inputPrice: 0.02,
|
|
5100
|
+
outputPrice: 0.2,
|
|
5101
|
+
cacheWritesPrice: 0,
|
|
5102
|
+
cacheReadsPrice: 2e-3,
|
|
5103
|
+
description: "GLM-4.6V-FlashX is an ultra-fast multimodal vision model optimized for high-speed visual processing at low cost."
|
|
5434
5104
|
}
|
|
5435
5105
|
};
|
|
5436
5106
|
var ZAI_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -5464,7 +5134,6 @@ var deepInfraDefaultModelInfo = {
|
|
|
5464
5134
|
contextWindow: 262144,
|
|
5465
5135
|
supportsImages: false,
|
|
5466
5136
|
supportsPromptCache: false,
|
|
5467
|
-
supportsNativeTools: true,
|
|
5468
5137
|
inputPrice: 0.3,
|
|
5469
5138
|
outputPrice: 1.2,
|
|
5470
5139
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -5478,8 +5147,6 @@ var minimaxModels = {
|
|
|
5478
5147
|
contextWindow: 192e3,
|
|
5479
5148
|
supportsImages: false,
|
|
5480
5149
|
supportsPromptCache: true,
|
|
5481
|
-
supportsNativeTools: true,
|
|
5482
|
-
defaultToolProtocol: "native",
|
|
5483
5150
|
includedTools: ["search_and_replace"],
|
|
5484
5151
|
excludedTools: ["apply_diff"],
|
|
5485
5152
|
preserveReasoning: true,
|
|
@@ -5494,8 +5161,6 @@ var minimaxModels = {
|
|
|
5494
5161
|
contextWindow: 192e3,
|
|
5495
5162
|
supportsImages: false,
|
|
5496
5163
|
supportsPromptCache: true,
|
|
5497
|
-
supportsNativeTools: true,
|
|
5498
|
-
defaultToolProtocol: "native",
|
|
5499
5164
|
includedTools: ["search_and_replace"],
|
|
5500
5165
|
excludedTools: ["apply_diff"],
|
|
5501
5166
|
preserveReasoning: true,
|
|
@@ -5510,8 +5175,6 @@ var minimaxModels = {
|
|
|
5510
5175
|
contextWindow: 192e3,
|
|
5511
5176
|
supportsImages: false,
|
|
5512
5177
|
supportsPromptCache: true,
|
|
5513
|
-
supportsNativeTools: true,
|
|
5514
|
-
defaultToolProtocol: "native",
|
|
5515
5178
|
includedTools: ["search_and_replace"],
|
|
5516
5179
|
excludedTools: ["apply_diff"],
|
|
5517
5180
|
preserveReasoning: true,
|
|
@@ -5583,8 +5246,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
5583
5246
|
return deepInfraDefaultModelId;
|
|
5584
5247
|
case "vscode-lm":
|
|
5585
5248
|
return vscodeLlmDefaultModelId;
|
|
5586
|
-
case "claude-code":
|
|
5587
|
-
return claudeCodeDefaultModelId;
|
|
5588
5249
|
case "cerebras":
|
|
5589
5250
|
return cerebrasDefaultModelId;
|
|
5590
5251
|
case "sambanova":
|
|
@@ -5642,7 +5303,6 @@ var providerNames = [
|
|
|
5642
5303
|
"bedrock",
|
|
5643
5304
|
"baseten",
|
|
5644
5305
|
"cerebras",
|
|
5645
|
-
"claude-code",
|
|
5646
5306
|
"doubao",
|
|
5647
5307
|
"deepseek",
|
|
5648
5308
|
"featherless",
|
|
@@ -5672,9 +5332,7 @@ var providerSettingsEntrySchema = import_zod8.z.object({
|
|
|
5672
5332
|
});
|
|
5673
5333
|
var baseProviderSettingsSchema = import_zod8.z.object({
|
|
5674
5334
|
includeMaxTokens: import_zod8.z.boolean().optional(),
|
|
5675
|
-
diffEnabled: import_zod8.z.boolean().optional(),
|
|
5676
5335
|
todoListEnabled: import_zod8.z.boolean().optional(),
|
|
5677
|
-
fuzzyMatchThreshold: import_zod8.z.number().optional(),
|
|
5678
5336
|
modelTemperature: import_zod8.z.number().nullish(),
|
|
5679
5337
|
rateLimitSeconds: import_zod8.z.number().optional(),
|
|
5680
5338
|
consecutiveMistakeLimit: import_zod8.z.number().min(0).optional(),
|
|
@@ -5684,9 +5342,7 @@ var baseProviderSettingsSchema = import_zod8.z.object({
|
|
|
5684
5342
|
modelMaxTokens: import_zod8.z.number().optional(),
|
|
5685
5343
|
modelMaxThinkingTokens: import_zod8.z.number().optional(),
|
|
5686
5344
|
// Model verbosity.
|
|
5687
|
-
verbosity: verbosityLevelsSchema.optional()
|
|
5688
|
-
// Tool protocol override for this profile.
|
|
5689
|
-
toolProtocol: import_zod8.z.enum(["xml", "native"]).optional()
|
|
5345
|
+
verbosity: verbosityLevelsSchema.optional()
|
|
5690
5346
|
});
|
|
5691
5347
|
var apiModelIdProviderModelSchema = baseProviderSettingsSchema.extend({
|
|
5692
5348
|
apiModelId: import_zod8.z.string().optional()
|
|
@@ -5698,7 +5354,6 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5698
5354
|
anthropicBeta1MContext: import_zod8.z.boolean().optional()
|
|
5699
5355
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5700
5356
|
});
|
|
5701
|
-
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({});
|
|
5702
5357
|
var openRouterSchema = baseProviderSettingsSchema.extend({
|
|
5703
5358
|
openRouterApiKey: import_zod8.z.string().optional(),
|
|
5704
5359
|
openRouterModelId: import_zod8.z.string().optional(),
|
|
@@ -5887,7 +5542,6 @@ var defaultSchema = import_zod8.z.object({
|
|
|
5887
5542
|
});
|
|
5888
5543
|
var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiProvider", [
|
|
5889
5544
|
anthropicSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("anthropic") })),
|
|
5890
|
-
claudeCodeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("claude-code") })),
|
|
5891
5545
|
openRouterSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openrouter") })),
|
|
5892
5546
|
bedrockSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("bedrock") })),
|
|
5893
5547
|
vertexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("vertex") })),
|
|
@@ -5928,7 +5582,6 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
5928
5582
|
var providerSettingsSchema = import_zod8.z.object({
|
|
5929
5583
|
apiProvider: providerNamesSchema.optional(),
|
|
5930
5584
|
...anthropicSchema.shape,
|
|
5931
|
-
...claudeCodeSchema.shape,
|
|
5932
5585
|
...openRouterSchema.shape,
|
|
5933
5586
|
...bedrockSchema.shape,
|
|
5934
5587
|
...vertexSchema.shape,
|
|
@@ -5993,7 +5646,6 @@ var getModelId = (settings) => {
|
|
|
5993
5646
|
var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key);
|
|
5994
5647
|
var modelIdKeysByProvider = {
|
|
5995
5648
|
anthropic: "apiModelId",
|
|
5996
|
-
"claude-code": "apiModelId",
|
|
5997
5649
|
openrouter: "openRouterModelId",
|
|
5998
5650
|
bedrock: "apiModelId",
|
|
5999
5651
|
vertex: "apiModelId",
|
|
@@ -6027,7 +5679,7 @@ var modelIdKeysByProvider = {
|
|
|
6027
5679
|
roo: "apiModelId",
|
|
6028
5680
|
"vercel-ai-gateway": "vercelAiGatewayModelId"
|
|
6029
5681
|
};
|
|
6030
|
-
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "
|
|
5682
|
+
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "bedrock", "minimax"];
|
|
6031
5683
|
var getApiProtocol = (provider, modelId) => {
|
|
6032
5684
|
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
|
|
6033
5685
|
return "anthropic";
|
|
@@ -6056,7 +5708,6 @@ var MODELS_BY_PROVIDER = {
|
|
|
6056
5708
|
label: "Cerebras",
|
|
6057
5709
|
models: Object.keys(cerebrasModels)
|
|
6058
5710
|
},
|
|
6059
|
-
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
|
|
6060
5711
|
deepseek: {
|
|
6061
5712
|
id: "deepseek",
|
|
6062
5713
|
label: "DeepSeek",
|
|
@@ -6160,16 +5811,6 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6160
5811
|
size: import_zod9.z.number().optional(),
|
|
6161
5812
|
workspace: import_zod9.z.string().optional(),
|
|
6162
5813
|
mode: import_zod9.z.string().optional(),
|
|
6163
|
-
/**
|
|
6164
|
-
* The tool protocol used by this task. Once a task uses tools with a specific
|
|
6165
|
-
* protocol (XML or Native), it is permanently locked to that protocol.
|
|
6166
|
-
*
|
|
6167
|
-
* - "xml": Tool calls are parsed from XML text (no tool IDs)
|
|
6168
|
-
* - "native": Tool calls come as tool_call chunks with IDs
|
|
6169
|
-
*
|
|
6170
|
-
* This ensures task resumption works correctly even when NTC settings change.
|
|
6171
|
-
*/
|
|
6172
|
-
toolProtocol: import_zod9.z.enum(["xml", "native"]).optional(),
|
|
6173
5814
|
apiConfigName: import_zod9.z.string().optional(),
|
|
6174
5815
|
// Provider profile name for sticky profile feature
|
|
6175
5816
|
status: import_zod9.z.enum(["active", "completed", "delegated"]).optional(),
|
|
@@ -6187,23 +5828,12 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
6187
5828
|
|
|
6188
5829
|
// src/experiment.ts
|
|
6189
5830
|
var import_zod10 = require("zod");
|
|
6190
|
-
var experimentIds = [
|
|
6191
|
-
"powerSteering",
|
|
6192
|
-
"multiFileApplyDiff",
|
|
6193
|
-
"preventFocusDisruption",
|
|
6194
|
-
"imageGeneration",
|
|
6195
|
-
"runSlashCommand",
|
|
6196
|
-
"multipleNativeToolCalls",
|
|
6197
|
-
"customTools"
|
|
6198
|
-
];
|
|
5831
|
+
var experimentIds = ["preventFocusDisruption", "imageGeneration", "runSlashCommand", "customTools"];
|
|
6199
5832
|
var experimentIdsSchema = import_zod10.z.enum(experimentIds);
|
|
6200
5833
|
var experimentsSchema = import_zod10.z.object({
|
|
6201
|
-
powerSteering: import_zod10.z.boolean().optional(),
|
|
6202
|
-
multiFileApplyDiff: import_zod10.z.boolean().optional(),
|
|
6203
5834
|
preventFocusDisruption: import_zod10.z.boolean().optional(),
|
|
6204
5835
|
imageGeneration: import_zod10.z.boolean().optional(),
|
|
6205
5836
|
runSlashCommand: import_zod10.z.boolean().optional(),
|
|
6206
|
-
multipleNativeToolCalls: import_zod10.z.boolean().optional(),
|
|
6207
5837
|
customTools: import_zod10.z.boolean().optional()
|
|
6208
5838
|
});
|
|
6209
5839
|
|
|
@@ -6255,6 +5885,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
6255
5885
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
6256
5886
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
6257
5887
|
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
5888
|
+
TelemetryEventName2["READ_FILE_LEGACY_FORMAT_USED"] = "Read File Legacy Format Used";
|
|
6258
5889
|
return TelemetryEventName2;
|
|
6259
5890
|
})(TelemetryEventName || {});
|
|
6260
5891
|
var staticAppPropertiesSchema = import_zod11.z.object({
|
|
@@ -6343,7 +5974,8 @@ var rooCodeTelemetryEventSchema = import_zod11.z.discriminatedUnion("type", [
|
|
|
6343
5974
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
6344
5975
|
"Tab Shown" /* TAB_SHOWN */,
|
|
6345
5976
|
"Mode Setting Changed" /* MODE_SETTINGS_CHANGED */,
|
|
6346
|
-
"Custom Mode Created" /* CUSTOM_MODE_CREATED
|
|
5977
|
+
"Custom Mode Created" /* CUSTOM_MODE_CREATED */,
|
|
5978
|
+
"Read File Legacy Format Used" /* READ_FILE_LEGACY_FORMAT_USED */
|
|
6347
5979
|
]),
|
|
6348
5980
|
properties: telemetryPropertiesSchema
|
|
6349
5981
|
}),
|
|
@@ -6657,7 +6289,15 @@ var isLanguage = (value) => languages.includes(value);
|
|
|
6657
6289
|
|
|
6658
6290
|
// src/global-settings.ts
|
|
6659
6291
|
var DEFAULT_WRITE_DELAY_MS = 1e3;
|
|
6660
|
-
var
|
|
6292
|
+
var TERMINAL_PREVIEW_BYTES = {
|
|
6293
|
+
small: 5 * 1024,
|
|
6294
|
+
// 5KB
|
|
6295
|
+
medium: 10 * 1024,
|
|
6296
|
+
// 10KB
|
|
6297
|
+
large: 20 * 1024
|
|
6298
|
+
// 20KB
|
|
6299
|
+
};
|
|
6300
|
+
var DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE = "medium";
|
|
6661
6301
|
var MIN_CHECKPOINT_TIMEOUT_SECONDS = 10;
|
|
6662
6302
|
var MAX_CHECKPOINT_TIMEOUT_SECONDS = 60;
|
|
6663
6303
|
var DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15;
|
|
@@ -6673,7 +6313,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6673
6313
|
imageGenerationProvider: import_zod14.z.enum(["openrouter", "roo"]).optional(),
|
|
6674
6314
|
openRouterImageApiKey: import_zod14.z.string().optional(),
|
|
6675
6315
|
openRouterImageGenerationSelectedModel: import_zod14.z.string().optional(),
|
|
6676
|
-
condensingApiConfigId: import_zod14.z.string().optional(),
|
|
6677
6316
|
customCondensingPrompt: import_zod14.z.string().optional(),
|
|
6678
6317
|
autoApprovalEnabled: import_zod14.z.boolean().optional(),
|
|
6679
6318
|
alwaysAllowReadOnly: import_zod14.z.boolean().optional(),
|
|
@@ -6699,7 +6338,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6699
6338
|
allowedMaxCost: import_zod14.z.number().nullish(),
|
|
6700
6339
|
autoCondenseContext: import_zod14.z.boolean().optional(),
|
|
6701
6340
|
autoCondenseContextPercent: import_zod14.z.number().optional(),
|
|
6702
|
-
maxConcurrentFileReads: import_zod14.z.number().optional(),
|
|
6703
6341
|
/**
|
|
6704
6342
|
* Whether to include current time in the environment details
|
|
6705
6343
|
* @default true
|
|
@@ -6742,11 +6380,9 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6742
6380
|
maxWorkspaceFiles: import_zod14.z.number().optional(),
|
|
6743
6381
|
showRooIgnoredFiles: import_zod14.z.boolean().optional(),
|
|
6744
6382
|
enableSubfolderRules: import_zod14.z.boolean().optional(),
|
|
6745
|
-
maxReadFileLine: import_zod14.z.number().optional(),
|
|
6746
6383
|
maxImageFileSize: import_zod14.z.number().optional(),
|
|
6747
6384
|
maxTotalImageSize: import_zod14.z.number().optional(),
|
|
6748
|
-
|
|
6749
|
-
terminalOutputCharacterLimit: import_zod14.z.number().optional(),
|
|
6385
|
+
terminalOutputPreviewSize: import_zod14.z.enum(["small", "medium", "large"]).optional(),
|
|
6750
6386
|
terminalShellIntegrationTimeout: import_zod14.z.number().optional(),
|
|
6751
6387
|
terminalShellIntegrationDisabled: import_zod14.z.boolean().optional(),
|
|
6752
6388
|
terminalCommandDelay: import_zod14.z.number().optional(),
|
|
@@ -6755,18 +6391,14 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6755
6391
|
terminalZshOhMy: import_zod14.z.boolean().optional(),
|
|
6756
6392
|
terminalZshP10k: import_zod14.z.boolean().optional(),
|
|
6757
6393
|
terminalZdotdir: import_zod14.z.boolean().optional(),
|
|
6758
|
-
terminalCompressProgressBar: import_zod14.z.boolean().optional(),
|
|
6759
6394
|
diagnosticsEnabled: import_zod14.z.boolean().optional(),
|
|
6760
6395
|
rateLimitSeconds: import_zod14.z.number().optional(),
|
|
6761
|
-
diffEnabled: import_zod14.z.boolean().optional(),
|
|
6762
|
-
fuzzyMatchThreshold: import_zod14.z.number().optional(),
|
|
6763
6396
|
experiments: experimentsSchema.optional(),
|
|
6764
6397
|
codebaseIndexModels: codebaseIndexModelsSchema.optional(),
|
|
6765
6398
|
codebaseIndexConfig: codebaseIndexConfigSchema.optional(),
|
|
6766
6399
|
language: languagesSchema.optional(),
|
|
6767
6400
|
telemetrySetting: telemetrySettingsSchema.optional(),
|
|
6768
6401
|
mcpEnabled: import_zod14.z.boolean().optional(),
|
|
6769
|
-
enableMcpServerCreation: import_zod14.z.boolean().optional(),
|
|
6770
6402
|
mode: import_zod14.z.string().optional(),
|
|
6771
6403
|
modeApiConfigs: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.string()).optional(),
|
|
6772
6404
|
customModes: import_zod14.z.array(modeConfigSchema).optional(),
|
|
@@ -6786,7 +6418,20 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6786
6418
|
profileThresholds: import_zod14.z.record(import_zod14.z.string(), import_zod14.z.number()).optional(),
|
|
6787
6419
|
hasOpenedModeSelector: import_zod14.z.boolean().optional(),
|
|
6788
6420
|
lastModeExportPath: import_zod14.z.string().optional(),
|
|
6789
|
-
lastModeImportPath: import_zod14.z.string().optional()
|
|
6421
|
+
lastModeImportPath: import_zod14.z.string().optional(),
|
|
6422
|
+
lastSettingsExportPath: import_zod14.z.string().optional(),
|
|
6423
|
+
lastTaskExportPath: import_zod14.z.string().optional(),
|
|
6424
|
+
lastImageSavePath: import_zod14.z.string().optional(),
|
|
6425
|
+
/**
|
|
6426
|
+
* Path to worktree to auto-open after switching workspaces.
|
|
6427
|
+
* Used by the worktree feature to open the Roo Code sidebar in a new window.
|
|
6428
|
+
*/
|
|
6429
|
+
worktreeAutoOpenPath: import_zod14.z.string().optional(),
|
|
6430
|
+
/**
|
|
6431
|
+
* Whether to show the worktree selector in the home screen.
|
|
6432
|
+
* @default true
|
|
6433
|
+
*/
|
|
6434
|
+
showWorktreesInHomeScreen: import_zod14.z.boolean().optional()
|
|
6790
6435
|
});
|
|
6791
6436
|
var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
|
|
6792
6437
|
var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
|
|
@@ -6870,8 +6515,6 @@ var EVALS_SETTINGS = {
|
|
|
6870
6515
|
ttsSpeed: 1,
|
|
6871
6516
|
soundEnabled: false,
|
|
6872
6517
|
soundVolume: 0.5,
|
|
6873
|
-
terminalOutputLineLimit: 500,
|
|
6874
|
-
terminalOutputCharacterLimit: DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT,
|
|
6875
6518
|
terminalShellIntegrationTimeout: 3e4,
|
|
6876
6519
|
terminalCommandDelay: 0,
|
|
6877
6520
|
terminalPowershellCounter: false,
|
|
@@ -6879,19 +6522,14 @@ var EVALS_SETTINGS = {
|
|
|
6879
6522
|
terminalZshClearEolMark: true,
|
|
6880
6523
|
terminalZshP10k: false,
|
|
6881
6524
|
terminalZdotdir: true,
|
|
6882
|
-
terminalCompressProgressBar: true,
|
|
6883
6525
|
terminalShellIntegrationDisabled: true,
|
|
6884
6526
|
diagnosticsEnabled: true,
|
|
6885
|
-
diffEnabled: true,
|
|
6886
|
-
fuzzyMatchThreshold: 1,
|
|
6887
6527
|
enableCheckpoints: false,
|
|
6888
6528
|
rateLimitSeconds: 0,
|
|
6889
6529
|
maxOpenTabsContext: 20,
|
|
6890
6530
|
maxWorkspaceFiles: 200,
|
|
6891
6531
|
maxGitStatusFiles: 20,
|
|
6892
6532
|
showRooIgnoredFiles: true,
|
|
6893
|
-
maxReadFileLine: -1,
|
|
6894
|
-
// -1 to enable full file reading.
|
|
6895
6533
|
includeDiagnosticMessages: true,
|
|
6896
6534
|
maxDiagnosticMessages: 50,
|
|
6897
6535
|
language: "en",
|
|
@@ -6965,24 +6603,18 @@ var organizationAllowListSchema = import_zod16.z.object({
|
|
|
6965
6603
|
});
|
|
6966
6604
|
var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
|
|
6967
6605
|
enableCheckpoints: true,
|
|
6968
|
-
fuzzyMatchThreshold: true,
|
|
6969
6606
|
maxOpenTabsContext: true,
|
|
6970
|
-
maxReadFileLine: true,
|
|
6971
6607
|
maxWorkspaceFiles: true,
|
|
6972
6608
|
showRooIgnoredFiles: true,
|
|
6973
6609
|
terminalCommandDelay: true,
|
|
6974
|
-
terminalCompressProgressBar: true,
|
|
6975
|
-
terminalOutputLineLimit: true,
|
|
6976
6610
|
terminalShellIntegrationDisabled: true,
|
|
6977
6611
|
terminalShellIntegrationTimeout: true,
|
|
6978
6612
|
terminalZshClearEolMark: true
|
|
6979
6613
|
}).merge(
|
|
6980
6614
|
import_zod16.z.object({
|
|
6981
6615
|
maxOpenTabsContext: import_zod16.z.number().int().nonnegative().optional(),
|
|
6982
|
-
maxReadFileLine: import_zod16.z.number().int().gte(-1).optional(),
|
|
6983
6616
|
maxWorkspaceFiles: import_zod16.z.number().int().nonnegative().optional(),
|
|
6984
6617
|
terminalCommandDelay: import_zod16.z.number().int().nonnegative().optional(),
|
|
6985
|
-
terminalOutputLineLimit: import_zod16.z.number().int().nonnegative().optional(),
|
|
6986
6618
|
terminalShellIntegrationTimeout: import_zod16.z.number().int().nonnegative().optional()
|
|
6987
6619
|
})
|
|
6988
6620
|
);
|
|
@@ -7457,12 +7089,10 @@ var taskCommandSchema = import_zod18.z.discriminatedUnion("commandName", [
|
|
|
7457
7089
|
})
|
|
7458
7090
|
}),
|
|
7459
7091
|
import_zod18.z.object({
|
|
7460
|
-
commandName: import_zod18.z.literal("CancelTask" /* CancelTask */)
|
|
7461
|
-
data: import_zod18.z.string()
|
|
7092
|
+
commandName: import_zod18.z.literal("CancelTask" /* CancelTask */)
|
|
7462
7093
|
}),
|
|
7463
7094
|
import_zod18.z.object({
|
|
7464
|
-
commandName: import_zod18.z.literal("CloseTask" /* CloseTask */)
|
|
7465
|
-
data: import_zod18.z.string()
|
|
7095
|
+
commandName: import_zod18.z.literal("CloseTask" /* CloseTask */)
|
|
7466
7096
|
}),
|
|
7467
7097
|
import_zod18.z.object({
|
|
7468
7098
|
commandName: import_zod18.z.literal("ResumeTask" /* ResumeTask */),
|
|
@@ -7498,6 +7128,7 @@ var ipcMessageSchema = import_zod18.z.discriminatedUnion("type", [
|
|
|
7498
7128
|
|
|
7499
7129
|
// src/mcp.ts
|
|
7500
7130
|
var import_zod19 = require("zod");
|
|
7131
|
+
var MAX_MCP_TOOLS_THRESHOLD = 60;
|
|
7501
7132
|
var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
7502
7133
|
import_zod19.z.object({
|
|
7503
7134
|
executionId: import_zod19.z.string(),
|
|
@@ -7521,6 +7152,46 @@ var mcpExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
7521
7152
|
error: import_zod19.z.string().optional()
|
|
7522
7153
|
})
|
|
7523
7154
|
]);
|
|
7155
|
+
function countEnabledMcpTools(servers) {
|
|
7156
|
+
let serverCount = 0;
|
|
7157
|
+
let toolCount = 0;
|
|
7158
|
+
for (const server of servers) {
|
|
7159
|
+
if (server.disabled) continue;
|
|
7160
|
+
if (server.status !== "connected") continue;
|
|
7161
|
+
serverCount++;
|
|
7162
|
+
if (server.tools) {
|
|
7163
|
+
for (const tool of server.tools) {
|
|
7164
|
+
if (tool.enabledForPrompt !== false) {
|
|
7165
|
+
toolCount++;
|
|
7166
|
+
}
|
|
7167
|
+
}
|
|
7168
|
+
}
|
|
7169
|
+
}
|
|
7170
|
+
return { enabledToolCount: toolCount, enabledServerCount: serverCount };
|
|
7171
|
+
}
|
|
7172
|
+
|
|
7173
|
+
// src/skills.ts
|
|
7174
|
+
var SKILL_NAME_MIN_LENGTH = 1;
|
|
7175
|
+
var SKILL_NAME_MAX_LENGTH = 64;
|
|
7176
|
+
var SKILL_NAME_REGEX = /^[a-z0-9]+(?:-[a-z0-9]+)*$/;
|
|
7177
|
+
var SkillNameValidationError = /* @__PURE__ */ ((SkillNameValidationError2) => {
|
|
7178
|
+
SkillNameValidationError2["Empty"] = "empty";
|
|
7179
|
+
SkillNameValidationError2["TooLong"] = "too_long";
|
|
7180
|
+
SkillNameValidationError2["InvalidFormat"] = "invalid_format";
|
|
7181
|
+
return SkillNameValidationError2;
|
|
7182
|
+
})(SkillNameValidationError || {});
|
|
7183
|
+
function validateSkillName(name) {
|
|
7184
|
+
if (!name || name.length < SKILL_NAME_MIN_LENGTH) {
|
|
7185
|
+
return { valid: false, error: "empty" /* Empty */ };
|
|
7186
|
+
}
|
|
7187
|
+
if (name.length > SKILL_NAME_MAX_LENGTH) {
|
|
7188
|
+
return { valid: false, error: "too_long" /* TooLong */ };
|
|
7189
|
+
}
|
|
7190
|
+
if (!SKILL_NAME_REGEX.test(name)) {
|
|
7191
|
+
return { valid: false, error: "invalid_format" /* InvalidFormat */ };
|
|
7192
|
+
}
|
|
7193
|
+
return { valid: true };
|
|
7194
|
+
}
|
|
7524
7195
|
|
|
7525
7196
|
// src/todo.ts
|
|
7526
7197
|
var import_zod20 = require("zod");
|
|
@@ -7560,6 +7231,11 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7560
7231
|
})
|
|
7561
7232
|
]);
|
|
7562
7233
|
|
|
7234
|
+
// src/tool-params.ts
|
|
7235
|
+
function isLegacyReadFileParams(params) {
|
|
7236
|
+
return "_legacyFormat" in params && params._legacyFormat === true;
|
|
7237
|
+
}
|
|
7238
|
+
|
|
7563
7239
|
// src/vscode-extension-host.ts
|
|
7564
7240
|
var import_zod22 = require("zod");
|
|
7565
7241
|
var checkoutDiffPayloadSchema = import_zod22.z.object({
|
|
@@ -7613,7 +7289,7 @@ var browserActions = [
|
|
|
7613
7289
|
DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7614
7290
|
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
7615
7291
|
DEFAULT_MODES,
|
|
7616
|
-
|
|
7292
|
+
DEFAULT_TERMINAL_OUTPUT_PREVIEW_SIZE,
|
|
7617
7293
|
DEFAULT_WRITE_DELAY_MS,
|
|
7618
7294
|
DOUBAO_API_BASE_URL,
|
|
7619
7295
|
DOUBAO_API_CHAT_PATH,
|
|
@@ -7643,13 +7319,13 @@ var browserActions = [
|
|
|
7643
7319
|
IpcOrigin,
|
|
7644
7320
|
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
7645
7321
|
MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7322
|
+
MAX_MCP_TOOLS_THRESHOLD,
|
|
7646
7323
|
MINIMAX_DEFAULT_MAX_TOKENS,
|
|
7647
7324
|
MINIMAX_DEFAULT_TEMPERATURE,
|
|
7648
7325
|
MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
7649
7326
|
MISTRAL_DEFAULT_TEMPERATURE,
|
|
7650
7327
|
MODELS_BY_PROVIDER,
|
|
7651
7328
|
MOONSHOT_DEFAULT_TEMPERATURE,
|
|
7652
|
-
NATIVE_TOOL_DEFAULTS,
|
|
7653
7329
|
OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
7654
7330
|
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
7655
7331
|
OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
@@ -7664,7 +7340,11 @@ var browserActions = [
|
|
|
7664
7340
|
RooModelsResponseSchema,
|
|
7665
7341
|
RooPricingSchema,
|
|
7666
7342
|
SECRET_STATE_KEYS,
|
|
7667
|
-
|
|
7343
|
+
SKILL_NAME_MAX_LENGTH,
|
|
7344
|
+
SKILL_NAME_MIN_LENGTH,
|
|
7345
|
+
SKILL_NAME_REGEX,
|
|
7346
|
+
SkillNameValidationError,
|
|
7347
|
+
TERMINAL_PREVIEW_BYTES,
|
|
7668
7348
|
TaskBridgeCommandName,
|
|
7669
7349
|
TaskBridgeEventName,
|
|
7670
7350
|
TaskCommandName,
|
|
@@ -7696,9 +7376,6 @@ var browserActions = [
|
|
|
7696
7376
|
chutesDefaultModelId,
|
|
7697
7377
|
chutesDefaultModelInfo,
|
|
7698
7378
|
chutesModels,
|
|
7699
|
-
claudeCodeDefaultModelId,
|
|
7700
|
-
claudeCodeModels,
|
|
7701
|
-
claudeCodeReasoningConfig,
|
|
7702
7379
|
clineAskSchema,
|
|
7703
7380
|
clineAsks,
|
|
7704
7381
|
clineMessageSchema,
|
|
@@ -7713,6 +7390,7 @@ var browserActions = [
|
|
|
7713
7390
|
commandIds,
|
|
7714
7391
|
contextCondenseSchema,
|
|
7715
7392
|
contextTruncationSchema,
|
|
7393
|
+
countEnabledMcpTools,
|
|
7716
7394
|
customModePromptsSchema,
|
|
7717
7395
|
customModesSettingsSchema,
|
|
7718
7396
|
customProviders,
|
|
@@ -7746,7 +7424,6 @@ var browserActions = [
|
|
|
7746
7424
|
geminiDefaultModelId,
|
|
7747
7425
|
geminiModels,
|
|
7748
7426
|
getApiProtocol,
|
|
7749
|
-
getEffectiveProtocol,
|
|
7750
7427
|
getErrorMessage,
|
|
7751
7428
|
getErrorStatusCode,
|
|
7752
7429
|
getImageGenerationProvider,
|
|
@@ -7781,9 +7458,9 @@ var browserActions = [
|
|
|
7781
7458
|
isInteractiveAsk,
|
|
7782
7459
|
isInternalProvider,
|
|
7783
7460
|
isLanguage,
|
|
7461
|
+
isLegacyReadFileParams,
|
|
7784
7462
|
isLocalProvider,
|
|
7785
7463
|
isModelParameter,
|
|
7786
|
-
isNativeProtocol,
|
|
7787
7464
|
isNonBlockingAsk,
|
|
7788
7465
|
isProviderName,
|
|
7789
7466
|
isResumableAsk,
|
|
@@ -7819,7 +7496,6 @@ var browserActions = [
|
|
|
7819
7496
|
moonshotDefaultModelId,
|
|
7820
7497
|
moonshotModels,
|
|
7821
7498
|
nonBlockingAsks,
|
|
7822
|
-
normalizeClaudeCodeModelId,
|
|
7823
7499
|
ollamaDefaultModelId,
|
|
7824
7500
|
ollamaDefaultModelInfo,
|
|
7825
7501
|
openAiCodexDefaultModelId,
|
|
@@ -7893,6 +7569,7 @@ var browserActions = [
|
|
|
7893
7569
|
userFeaturesSchema,
|
|
7894
7570
|
userSettingsConfigSchema,
|
|
7895
7571
|
userSettingsDataSchema,
|
|
7572
|
+
validateSkillName,
|
|
7896
7573
|
verbosityLevels,
|
|
7897
7574
|
verbosityLevelsSchema,
|
|
7898
7575
|
vercelAiGatewayDefaultModelId,
|