@roo-code/types 1.87.0 → 1.89.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +557 -61
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +4314 -740
- package/dist/index.d.ts +4314 -740
- package/dist/index.js +554 -61
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -106,6 +106,8 @@ __export(index_exports, {
|
|
|
106
106
|
anthropicModels: () => anthropicModels,
|
|
107
107
|
appPropertiesSchema: () => appPropertiesSchema,
|
|
108
108
|
azureOpenAiDefaultApiVersion: () => azureOpenAiDefaultApiVersion,
|
|
109
|
+
basetenDefaultModelId: () => basetenDefaultModelId,
|
|
110
|
+
basetenModels: () => basetenModels,
|
|
109
111
|
bedrockDefaultModelId: () => bedrockDefaultModelId,
|
|
110
112
|
bedrockDefaultPromptRouterModelId: () => bedrockDefaultPromptRouterModelId,
|
|
111
113
|
bedrockModels: () => bedrockModels,
|
|
@@ -161,6 +163,7 @@ __export(index_exports, {
|
|
|
161
163
|
getApiProtocol: () => getApiProtocol,
|
|
162
164
|
getClaudeCodeModelId: () => getClaudeCodeModelId,
|
|
163
165
|
getEffectiveProtocol: () => getEffectiveProtocol,
|
|
166
|
+
getImageGenerationProvider: () => getImageGenerationProvider,
|
|
164
167
|
getModelId: () => getModelId,
|
|
165
168
|
getProviderDefaultModelId: () => getProviderDefaultModelId,
|
|
166
169
|
gitPropertiesSchema: () => gitPropertiesSchema,
|
|
@@ -381,6 +384,7 @@ var clineSays = [
|
|
|
381
384
|
"shell_integration_warning",
|
|
382
385
|
"browser_action",
|
|
383
386
|
"browser_action_result",
|
|
387
|
+
"browser_session_status",
|
|
384
388
|
"mcp_server_request_started",
|
|
385
389
|
"mcp_server_response",
|
|
386
390
|
"subtask_result",
|
|
@@ -445,6 +449,8 @@ var toolNames = [
|
|
|
445
449
|
"write_to_file",
|
|
446
450
|
"apply_diff",
|
|
447
451
|
"insert_content",
|
|
452
|
+
"search_and_replace",
|
|
453
|
+
"apply_patch",
|
|
448
454
|
"search_files",
|
|
449
455
|
"list_files",
|
|
450
456
|
"list_code_definition_names",
|
|
@@ -495,6 +501,9 @@ var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
|
495
501
|
RooCodeEventName2["TaskPaused"] = "taskPaused";
|
|
496
502
|
RooCodeEventName2["TaskUnpaused"] = "taskUnpaused";
|
|
497
503
|
RooCodeEventName2["TaskSpawned"] = "taskSpawned";
|
|
504
|
+
RooCodeEventName2["TaskDelegated"] = "taskDelegated";
|
|
505
|
+
RooCodeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted";
|
|
506
|
+
RooCodeEventName2["TaskDelegationResumed"] = "taskDelegationResumed";
|
|
498
507
|
RooCodeEventName2["Message"] = "message";
|
|
499
508
|
RooCodeEventName2["TaskModeSwitched"] = "taskModeSwitched";
|
|
500
509
|
RooCodeEventName2["TaskAskResponded"] = "taskAskResponded";
|
|
@@ -528,6 +537,26 @@ var rooCodeEventsSchema = import_zod3.z.object({
|
|
|
528
537
|
["taskPaused" /* TaskPaused */]: import_zod3.z.tuple([import_zod3.z.string()]),
|
|
529
538
|
["taskUnpaused" /* TaskUnpaused */]: import_zod3.z.tuple([import_zod3.z.string()]),
|
|
530
539
|
["taskSpawned" /* TaskSpawned */]: import_zod3.z.tuple([import_zod3.z.string(), import_zod3.z.string()]),
|
|
540
|
+
["taskDelegated" /* TaskDelegated */]: import_zod3.z.tuple([
|
|
541
|
+
import_zod3.z.string(),
|
|
542
|
+
// parentTaskId
|
|
543
|
+
import_zod3.z.string()
|
|
544
|
+
// childTaskId
|
|
545
|
+
]),
|
|
546
|
+
["taskDelegationCompleted" /* TaskDelegationCompleted */]: import_zod3.z.tuple([
|
|
547
|
+
import_zod3.z.string(),
|
|
548
|
+
// parentTaskId
|
|
549
|
+
import_zod3.z.string(),
|
|
550
|
+
// childTaskId
|
|
551
|
+
import_zod3.z.string()
|
|
552
|
+
// completionResultSummary
|
|
553
|
+
]),
|
|
554
|
+
["taskDelegationResumed" /* TaskDelegationResumed */]: import_zod3.z.tuple([
|
|
555
|
+
import_zod3.z.string(),
|
|
556
|
+
// parentTaskId
|
|
557
|
+
import_zod3.z.string()
|
|
558
|
+
// childTaskId
|
|
559
|
+
]),
|
|
531
560
|
["message" /* Message */]: import_zod3.z.tuple([
|
|
532
561
|
import_zod3.z.object({
|
|
533
562
|
taskId: import_zod3.z.string(),
|
|
@@ -612,6 +641,21 @@ var taskEventSchema = import_zod3.z.discriminatedUnion("eventName", [
|
|
|
612
641
|
payload: rooCodeEventsSchema.shape["taskSpawned" /* TaskSpawned */],
|
|
613
642
|
taskId: import_zod3.z.number().optional()
|
|
614
643
|
}),
|
|
644
|
+
import_zod3.z.object({
|
|
645
|
+
eventName: import_zod3.z.literal("taskDelegated" /* TaskDelegated */),
|
|
646
|
+
payload: rooCodeEventsSchema.shape["taskDelegated" /* TaskDelegated */],
|
|
647
|
+
taskId: import_zod3.z.number().optional()
|
|
648
|
+
}),
|
|
649
|
+
import_zod3.z.object({
|
|
650
|
+
eventName: import_zod3.z.literal("taskDelegationCompleted" /* TaskDelegationCompleted */),
|
|
651
|
+
payload: rooCodeEventsSchema.shape["taskDelegationCompleted" /* TaskDelegationCompleted */],
|
|
652
|
+
taskId: import_zod3.z.number().optional()
|
|
653
|
+
}),
|
|
654
|
+
import_zod3.z.object({
|
|
655
|
+
eventName: import_zod3.z.literal("taskDelegationResumed" /* TaskDelegationResumed */),
|
|
656
|
+
payload: rooCodeEventsSchema.shape["taskDelegationResumed" /* TaskDelegationResumed */],
|
|
657
|
+
taskId: import_zod3.z.number().optional()
|
|
658
|
+
}),
|
|
615
659
|
// Task Execution
|
|
616
660
|
import_zod3.z.object({
|
|
617
661
|
eventName: import_zod3.z.literal("message" /* Message */),
|
|
@@ -730,6 +774,13 @@ var modelInfoSchema = import_zod5.z.object({
|
|
|
730
774
|
supportsNativeTools: import_zod5.z.boolean().optional(),
|
|
731
775
|
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
732
776
|
defaultToolProtocol: import_zod5.z.enum(["xml", "native"]).optional(),
|
|
777
|
+
// Exclude specific native tools from being available (only applies to native protocol)
|
|
778
|
+
// These tools will be removed from the set of tools available to the model
|
|
779
|
+
excludedTools: import_zod5.z.array(import_zod5.z.string()).optional(),
|
|
780
|
+
// Include specific native tools (only applies to native protocol)
|
|
781
|
+
// These tools will be added if they belong to an allowed group in the current mode
|
|
782
|
+
// Cannot force-add tools from groups the mode doesn't allow
|
|
783
|
+
includedTools: import_zod5.z.array(import_zod5.z.string()).optional(),
|
|
733
784
|
/**
|
|
734
785
|
* Service tiers with pricing information.
|
|
735
786
|
* Each tier can have a name (for OpenAI service tiers) and pricing overrides.
|
|
@@ -763,7 +814,16 @@ var CODEBASE_INDEX_DEFAULTS = {
|
|
|
763
814
|
var codebaseIndexConfigSchema = import_zod6.z.object({
|
|
764
815
|
codebaseIndexEnabled: import_zod6.z.boolean().optional(),
|
|
765
816
|
codebaseIndexQdrantUrl: import_zod6.z.string().optional(),
|
|
766
|
-
codebaseIndexEmbedderProvider: import_zod6.z.enum([
|
|
817
|
+
codebaseIndexEmbedderProvider: import_zod6.z.enum([
|
|
818
|
+
"openai",
|
|
819
|
+
"ollama",
|
|
820
|
+
"openai-compatible",
|
|
821
|
+
"gemini",
|
|
822
|
+
"mistral",
|
|
823
|
+
"vercel-ai-gateway",
|
|
824
|
+
"bedrock",
|
|
825
|
+
"openrouter"
|
|
826
|
+
]).optional(),
|
|
767
827
|
codebaseIndexEmbedderBaseUrl: import_zod6.z.string().optional(),
|
|
768
828
|
codebaseIndexEmbedderModelId: import_zod6.z.string().optional(),
|
|
769
829
|
codebaseIndexEmbedderModelDimension: import_zod6.z.number().optional(),
|
|
@@ -771,7 +831,10 @@ var codebaseIndexConfigSchema = import_zod6.z.object({
|
|
|
771
831
|
codebaseIndexSearchMaxResults: import_zod6.z.number().min(CODEBASE_INDEX_DEFAULTS.MIN_SEARCH_RESULTS).max(CODEBASE_INDEX_DEFAULTS.MAX_SEARCH_RESULTS).optional(),
|
|
772
832
|
// OpenAI Compatible specific fields
|
|
773
833
|
codebaseIndexOpenAiCompatibleBaseUrl: import_zod6.z.string().optional(),
|
|
774
|
-
codebaseIndexOpenAiCompatibleModelDimension: import_zod6.z.number().optional()
|
|
834
|
+
codebaseIndexOpenAiCompatibleModelDimension: import_zod6.z.number().optional(),
|
|
835
|
+
// Bedrock specific fields
|
|
836
|
+
codebaseIndexBedrockRegion: import_zod6.z.string().optional(),
|
|
837
|
+
codebaseIndexBedrockProfile: import_zod6.z.string().optional()
|
|
775
838
|
});
|
|
776
839
|
var codebaseIndexModelsSchema = import_zod6.z.object({
|
|
777
840
|
openai: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
|
|
@@ -780,7 +843,8 @@ var codebaseIndexModelsSchema = import_zod6.z.object({
|
|
|
780
843
|
gemini: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
|
|
781
844
|
mistral: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
|
|
782
845
|
"vercel-ai-gateway": import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
|
|
783
|
-
openrouter: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional()
|
|
846
|
+
openrouter: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
|
|
847
|
+
bedrock: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional()
|
|
784
848
|
});
|
|
785
849
|
var codebaseIndexProviderSchema = import_zod6.z.object({
|
|
786
850
|
codeIndexOpenAiKey: import_zod6.z.string().optional(),
|
|
@@ -804,6 +868,7 @@ var anthropicModels = {
|
|
|
804
868
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
805
869
|
supportsImages: true,
|
|
806
870
|
supportsPromptCache: true,
|
|
871
|
+
supportsNativeTools: true,
|
|
807
872
|
inputPrice: 3,
|
|
808
873
|
// $3 per million input tokens (≤200K context)
|
|
809
874
|
outputPrice: 15,
|
|
@@ -836,6 +901,7 @@ var anthropicModels = {
|
|
|
836
901
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
837
902
|
supportsImages: true,
|
|
838
903
|
supportsPromptCache: true,
|
|
904
|
+
supportsNativeTools: true,
|
|
839
905
|
inputPrice: 3,
|
|
840
906
|
// $3 per million input tokens (≤200K context)
|
|
841
907
|
outputPrice: 15,
|
|
@@ -861,12 +927,30 @@ var anthropicModels = {
|
|
|
861
927
|
}
|
|
862
928
|
]
|
|
863
929
|
},
|
|
930
|
+
"claude-opus-4-5-20251101": {
|
|
931
|
+
maxTokens: 32e3,
|
|
932
|
+
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
933
|
+
contextWindow: 2e5,
|
|
934
|
+
supportsImages: true,
|
|
935
|
+
supportsPromptCache: true,
|
|
936
|
+
supportsNativeTools: true,
|
|
937
|
+
inputPrice: 5,
|
|
938
|
+
// $5 per million input tokens
|
|
939
|
+
outputPrice: 25,
|
|
940
|
+
// $25 per million output tokens
|
|
941
|
+
cacheWritesPrice: 6.25,
|
|
942
|
+
// $6.25 per million tokens
|
|
943
|
+
cacheReadsPrice: 0.5,
|
|
944
|
+
// $0.50 per million tokens
|
|
945
|
+
supportsReasoningBudget: true
|
|
946
|
+
},
|
|
864
947
|
"claude-opus-4-1-20250805": {
|
|
865
948
|
maxTokens: 32e3,
|
|
866
949
|
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
867
950
|
contextWindow: 2e5,
|
|
868
951
|
supportsImages: true,
|
|
869
952
|
supportsPromptCache: true,
|
|
953
|
+
supportsNativeTools: true,
|
|
870
954
|
inputPrice: 15,
|
|
871
955
|
// $15 per million input tokens
|
|
872
956
|
outputPrice: 75,
|
|
@@ -883,6 +967,7 @@ var anthropicModels = {
|
|
|
883
967
|
contextWindow: 2e5,
|
|
884
968
|
supportsImages: true,
|
|
885
969
|
supportsPromptCache: true,
|
|
970
|
+
supportsNativeTools: true,
|
|
886
971
|
inputPrice: 15,
|
|
887
972
|
// $15 per million input tokens
|
|
888
973
|
outputPrice: 75,
|
|
@@ -899,6 +984,7 @@ var anthropicModels = {
|
|
|
899
984
|
contextWindow: 2e5,
|
|
900
985
|
supportsImages: true,
|
|
901
986
|
supportsPromptCache: true,
|
|
987
|
+
supportsNativeTools: true,
|
|
902
988
|
inputPrice: 3,
|
|
903
989
|
// $3 per million input tokens
|
|
904
990
|
outputPrice: 15,
|
|
@@ -916,6 +1002,7 @@ var anthropicModels = {
|
|
|
916
1002
|
contextWindow: 2e5,
|
|
917
1003
|
supportsImages: true,
|
|
918
1004
|
supportsPromptCache: true,
|
|
1005
|
+
supportsNativeTools: true,
|
|
919
1006
|
inputPrice: 3,
|
|
920
1007
|
// $3 per million input tokens
|
|
921
1008
|
outputPrice: 15,
|
|
@@ -930,6 +1017,7 @@ var anthropicModels = {
|
|
|
930
1017
|
contextWindow: 2e5,
|
|
931
1018
|
supportsImages: true,
|
|
932
1019
|
supportsPromptCache: true,
|
|
1020
|
+
supportsNativeTools: true,
|
|
933
1021
|
inputPrice: 3,
|
|
934
1022
|
// $3 per million input tokens
|
|
935
1023
|
outputPrice: 15,
|
|
@@ -944,6 +1032,7 @@ var anthropicModels = {
|
|
|
944
1032
|
contextWindow: 2e5,
|
|
945
1033
|
supportsImages: false,
|
|
946
1034
|
supportsPromptCache: true,
|
|
1035
|
+
supportsNativeTools: true,
|
|
947
1036
|
inputPrice: 1,
|
|
948
1037
|
outputPrice: 5,
|
|
949
1038
|
cacheWritesPrice: 1.25,
|
|
@@ -954,6 +1043,7 @@ var anthropicModels = {
|
|
|
954
1043
|
contextWindow: 2e5,
|
|
955
1044
|
supportsImages: true,
|
|
956
1045
|
supportsPromptCache: true,
|
|
1046
|
+
supportsNativeTools: true,
|
|
957
1047
|
inputPrice: 15,
|
|
958
1048
|
outputPrice: 75,
|
|
959
1049
|
cacheWritesPrice: 18.75,
|
|
@@ -964,6 +1054,7 @@ var anthropicModels = {
|
|
|
964
1054
|
contextWindow: 2e5,
|
|
965
1055
|
supportsImages: true,
|
|
966
1056
|
supportsPromptCache: true,
|
|
1057
|
+
supportsNativeTools: true,
|
|
967
1058
|
inputPrice: 0.25,
|
|
968
1059
|
outputPrice: 1.25,
|
|
969
1060
|
cacheWritesPrice: 0.3,
|
|
@@ -974,6 +1065,7 @@ var anthropicModels = {
|
|
|
974
1065
|
contextWindow: 2e5,
|
|
975
1066
|
supportsImages: true,
|
|
976
1067
|
supportsPromptCache: true,
|
|
1068
|
+
supportsNativeTools: true,
|
|
977
1069
|
inputPrice: 1,
|
|
978
1070
|
outputPrice: 5,
|
|
979
1071
|
cacheWritesPrice: 1.25,
|
|
@@ -984,6 +1076,125 @@ var anthropicModels = {
|
|
|
984
1076
|
};
|
|
985
1077
|
var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
|
|
986
1078
|
|
|
1079
|
+
// src/providers/baseten.ts
|
|
1080
|
+
var basetenModels = {
|
|
1081
|
+
"moonshotai/Kimi-K2-Thinking": {
|
|
1082
|
+
maxTokens: 163800,
|
|
1083
|
+
contextWindow: 262e3,
|
|
1084
|
+
supportsImages: false,
|
|
1085
|
+
supportsPromptCache: false,
|
|
1086
|
+
supportsNativeTools: true,
|
|
1087
|
+
inputPrice: 0.6,
|
|
1088
|
+
outputPrice: 2.5,
|
|
1089
|
+
cacheWritesPrice: 0,
|
|
1090
|
+
cacheReadsPrice: 0,
|
|
1091
|
+
description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
|
|
1092
|
+
},
|
|
1093
|
+
"zai-org/GLM-4.6": {
|
|
1094
|
+
maxTokens: 2e5,
|
|
1095
|
+
contextWindow: 2e5,
|
|
1096
|
+
supportsImages: false,
|
|
1097
|
+
supportsPromptCache: false,
|
|
1098
|
+
supportsNativeTools: true,
|
|
1099
|
+
inputPrice: 0.6,
|
|
1100
|
+
outputPrice: 2.2,
|
|
1101
|
+
cacheWritesPrice: 0,
|
|
1102
|
+
cacheReadsPrice: 0,
|
|
1103
|
+
description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
|
|
1104
|
+
},
|
|
1105
|
+
"deepseek-ai/DeepSeek-R1": {
|
|
1106
|
+
maxTokens: 131072,
|
|
1107
|
+
contextWindow: 163840,
|
|
1108
|
+
supportsImages: false,
|
|
1109
|
+
supportsPromptCache: false,
|
|
1110
|
+
inputPrice: 2.55,
|
|
1111
|
+
outputPrice: 5.95,
|
|
1112
|
+
cacheWritesPrice: 0,
|
|
1113
|
+
cacheReadsPrice: 0,
|
|
1114
|
+
description: "DeepSeek's first-generation reasoning model"
|
|
1115
|
+
},
|
|
1116
|
+
"deepseek-ai/DeepSeek-R1-0528": {
|
|
1117
|
+
maxTokens: 131072,
|
|
1118
|
+
contextWindow: 163840,
|
|
1119
|
+
supportsImages: false,
|
|
1120
|
+
supportsPromptCache: false,
|
|
1121
|
+
inputPrice: 2.55,
|
|
1122
|
+
outputPrice: 5.95,
|
|
1123
|
+
cacheWritesPrice: 0,
|
|
1124
|
+
cacheReadsPrice: 0,
|
|
1125
|
+
description: "The latest revision of DeepSeek's first-generation reasoning model"
|
|
1126
|
+
},
|
|
1127
|
+
"deepseek-ai/DeepSeek-V3-0324": {
|
|
1128
|
+
maxTokens: 131072,
|
|
1129
|
+
contextWindow: 163840,
|
|
1130
|
+
supportsImages: false,
|
|
1131
|
+
supportsPromptCache: false,
|
|
1132
|
+
inputPrice: 0.77,
|
|
1133
|
+
outputPrice: 0.77,
|
|
1134
|
+
cacheWritesPrice: 0,
|
|
1135
|
+
cacheReadsPrice: 0,
|
|
1136
|
+
description: "Fast general-purpose LLM with enhanced reasoning capabilities"
|
|
1137
|
+
},
|
|
1138
|
+
"deepseek-ai/DeepSeek-V3.1": {
|
|
1139
|
+
maxTokens: 131072,
|
|
1140
|
+
contextWindow: 163840,
|
|
1141
|
+
supportsImages: false,
|
|
1142
|
+
supportsPromptCache: false,
|
|
1143
|
+
inputPrice: 0.5,
|
|
1144
|
+
outputPrice: 1.5,
|
|
1145
|
+
cacheWritesPrice: 0,
|
|
1146
|
+
cacheReadsPrice: 0,
|
|
1147
|
+
description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
|
|
1148
|
+
},
|
|
1149
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507": {
|
|
1150
|
+
maxTokens: 262144,
|
|
1151
|
+
contextWindow: 262144,
|
|
1152
|
+
supportsImages: false,
|
|
1153
|
+
supportsPromptCache: false,
|
|
1154
|
+
inputPrice: 0.22,
|
|
1155
|
+
outputPrice: 0.8,
|
|
1156
|
+
cacheWritesPrice: 0,
|
|
1157
|
+
cacheReadsPrice: 0,
|
|
1158
|
+
description: "Mixture-of-experts LLM with math and reasoning capabilities"
|
|
1159
|
+
},
|
|
1160
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
|
1161
|
+
maxTokens: 262144,
|
|
1162
|
+
contextWindow: 262144,
|
|
1163
|
+
supportsImages: false,
|
|
1164
|
+
supportsPromptCache: false,
|
|
1165
|
+
inputPrice: 0.38,
|
|
1166
|
+
outputPrice: 1.53,
|
|
1167
|
+
cacheWritesPrice: 0,
|
|
1168
|
+
cacheReadsPrice: 0,
|
|
1169
|
+
description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
|
|
1170
|
+
},
|
|
1171
|
+
"openai/gpt-oss-120b": {
|
|
1172
|
+
maxTokens: 128072,
|
|
1173
|
+
contextWindow: 128072,
|
|
1174
|
+
supportsImages: false,
|
|
1175
|
+
supportsPromptCache: false,
|
|
1176
|
+
supportsNativeTools: true,
|
|
1177
|
+
inputPrice: 0.1,
|
|
1178
|
+
outputPrice: 0.5,
|
|
1179
|
+
cacheWritesPrice: 0,
|
|
1180
|
+
cacheReadsPrice: 0,
|
|
1181
|
+
description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
|
|
1182
|
+
},
|
|
1183
|
+
"moonshotai/Kimi-K2-Instruct-0905": {
|
|
1184
|
+
maxTokens: 168e3,
|
|
1185
|
+
contextWindow: 262e3,
|
|
1186
|
+
supportsImages: false,
|
|
1187
|
+
supportsPromptCache: false,
|
|
1188
|
+
supportsNativeTools: true,
|
|
1189
|
+
inputPrice: 0.6,
|
|
1190
|
+
outputPrice: 2.5,
|
|
1191
|
+
cacheWritesPrice: 0,
|
|
1192
|
+
cacheReadsPrice: 0,
|
|
1193
|
+
description: "State of the art language model for agentic and coding tasks. September Update."
|
|
1194
|
+
}
|
|
1195
|
+
};
|
|
1196
|
+
var basetenDefaultModelId = "zai-org/GLM-4.6";
|
|
1197
|
+
|
|
987
1198
|
// src/providers/bedrock.ts
|
|
988
1199
|
var bedrockDefaultModelId = "anthropic.claude-sonnet-4-5-20250929-v1:0";
|
|
989
1200
|
var bedrockDefaultPromptRouterModelId = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
@@ -994,6 +1205,7 @@ var bedrockModels = {
|
|
|
994
1205
|
supportsImages: true,
|
|
995
1206
|
supportsPromptCache: true,
|
|
996
1207
|
supportsReasoningBudget: true,
|
|
1208
|
+
supportsNativeTools: true,
|
|
997
1209
|
inputPrice: 3,
|
|
998
1210
|
outputPrice: 15,
|
|
999
1211
|
cacheWritesPrice: 3.75,
|
|
@@ -1007,6 +1219,7 @@ var bedrockModels = {
|
|
|
1007
1219
|
contextWindow: 3e5,
|
|
1008
1220
|
supportsImages: true,
|
|
1009
1221
|
supportsPromptCache: true,
|
|
1222
|
+
supportsNativeTools: true,
|
|
1010
1223
|
inputPrice: 0.8,
|
|
1011
1224
|
outputPrice: 3.2,
|
|
1012
1225
|
cacheWritesPrice: 0.8,
|
|
@@ -1022,6 +1235,7 @@ var bedrockModels = {
|
|
|
1022
1235
|
contextWindow: 3e5,
|
|
1023
1236
|
supportsImages: true,
|
|
1024
1237
|
supportsPromptCache: false,
|
|
1238
|
+
supportsNativeTools: true,
|
|
1025
1239
|
inputPrice: 1,
|
|
1026
1240
|
outputPrice: 4,
|
|
1027
1241
|
cacheWritesPrice: 1,
|
|
@@ -1035,6 +1249,7 @@ var bedrockModels = {
|
|
|
1035
1249
|
contextWindow: 3e5,
|
|
1036
1250
|
supportsImages: true,
|
|
1037
1251
|
supportsPromptCache: true,
|
|
1252
|
+
supportsNativeTools: true,
|
|
1038
1253
|
inputPrice: 0.06,
|
|
1039
1254
|
outputPrice: 0.24,
|
|
1040
1255
|
cacheWritesPrice: 0.06,
|
|
@@ -1050,6 +1265,7 @@ var bedrockModels = {
|
|
|
1050
1265
|
contextWindow: 128e3,
|
|
1051
1266
|
supportsImages: false,
|
|
1052
1267
|
supportsPromptCache: true,
|
|
1268
|
+
supportsNativeTools: true,
|
|
1053
1269
|
inputPrice: 0.035,
|
|
1054
1270
|
outputPrice: 0.14,
|
|
1055
1271
|
cacheWritesPrice: 0.035,
|
|
@@ -1066,6 +1282,7 @@ var bedrockModels = {
|
|
|
1066
1282
|
supportsImages: true,
|
|
1067
1283
|
supportsPromptCache: true,
|
|
1068
1284
|
supportsReasoningBudget: true,
|
|
1285
|
+
supportsNativeTools: true,
|
|
1069
1286
|
inputPrice: 3,
|
|
1070
1287
|
outputPrice: 15,
|
|
1071
1288
|
cacheWritesPrice: 3.75,
|
|
@@ -1080,6 +1297,7 @@ var bedrockModels = {
|
|
|
1080
1297
|
supportsImages: true,
|
|
1081
1298
|
supportsPromptCache: true,
|
|
1082
1299
|
supportsReasoningBudget: true,
|
|
1300
|
+
supportsNativeTools: true,
|
|
1083
1301
|
inputPrice: 15,
|
|
1084
1302
|
outputPrice: 75,
|
|
1085
1303
|
cacheWritesPrice: 18.75,
|
|
@@ -1088,12 +1306,28 @@ var bedrockModels = {
|
|
|
1088
1306
|
maxCachePoints: 4,
|
|
1089
1307
|
cachableFields: ["system", "messages", "tools"]
|
|
1090
1308
|
},
|
|
1309
|
+
"anthropic.claude-opus-4-5-20251101-v1:0": {
|
|
1310
|
+
maxTokens: 8192,
|
|
1311
|
+
contextWindow: 2e5,
|
|
1312
|
+
supportsImages: true,
|
|
1313
|
+
supportsPromptCache: true,
|
|
1314
|
+
supportsReasoningBudget: true,
|
|
1315
|
+
supportsNativeTools: true,
|
|
1316
|
+
inputPrice: 5,
|
|
1317
|
+
outputPrice: 25,
|
|
1318
|
+
cacheWritesPrice: 6.25,
|
|
1319
|
+
cacheReadsPrice: 0.5,
|
|
1320
|
+
minTokensPerCachePoint: 1024,
|
|
1321
|
+
maxCachePoints: 4,
|
|
1322
|
+
cachableFields: ["system", "messages", "tools"]
|
|
1323
|
+
},
|
|
1091
1324
|
"anthropic.claude-opus-4-20250514-v1:0": {
|
|
1092
1325
|
maxTokens: 8192,
|
|
1093
1326
|
contextWindow: 2e5,
|
|
1094
1327
|
supportsImages: true,
|
|
1095
1328
|
supportsPromptCache: true,
|
|
1096
1329
|
supportsReasoningBudget: true,
|
|
1330
|
+
supportsNativeTools: true,
|
|
1097
1331
|
inputPrice: 15,
|
|
1098
1332
|
outputPrice: 75,
|
|
1099
1333
|
cacheWritesPrice: 18.75,
|
|
@@ -1108,6 +1342,7 @@ var bedrockModels = {
|
|
|
1108
1342
|
supportsImages: true,
|
|
1109
1343
|
supportsPromptCache: true,
|
|
1110
1344
|
supportsReasoningBudget: true,
|
|
1345
|
+
supportsNativeTools: true,
|
|
1111
1346
|
inputPrice: 3,
|
|
1112
1347
|
outputPrice: 15,
|
|
1113
1348
|
cacheWritesPrice: 3.75,
|
|
@@ -1121,6 +1356,7 @@ var bedrockModels = {
|
|
|
1121
1356
|
contextWindow: 2e5,
|
|
1122
1357
|
supportsImages: true,
|
|
1123
1358
|
supportsPromptCache: true,
|
|
1359
|
+
supportsNativeTools: true,
|
|
1124
1360
|
inputPrice: 3,
|
|
1125
1361
|
outputPrice: 15,
|
|
1126
1362
|
cacheWritesPrice: 3.75,
|
|
@@ -1134,6 +1370,7 @@ var bedrockModels = {
|
|
|
1134
1370
|
contextWindow: 2e5,
|
|
1135
1371
|
supportsImages: false,
|
|
1136
1372
|
supportsPromptCache: true,
|
|
1373
|
+
supportsNativeTools: true,
|
|
1137
1374
|
inputPrice: 0.8,
|
|
1138
1375
|
outputPrice: 4,
|
|
1139
1376
|
cacheWritesPrice: 1,
|
|
@@ -1148,6 +1385,7 @@ var bedrockModels = {
|
|
|
1148
1385
|
supportsImages: true,
|
|
1149
1386
|
supportsPromptCache: true,
|
|
1150
1387
|
supportsReasoningBudget: true,
|
|
1388
|
+
supportsNativeTools: true,
|
|
1151
1389
|
inputPrice: 1,
|
|
1152
1390
|
outputPrice: 5,
|
|
1153
1391
|
cacheWritesPrice: 1.25,
|
|
@@ -1163,6 +1401,7 @@ var bedrockModels = {
|
|
|
1163
1401
|
contextWindow: 2e5,
|
|
1164
1402
|
supportsImages: true,
|
|
1165
1403
|
supportsPromptCache: false,
|
|
1404
|
+
supportsNativeTools: true,
|
|
1166
1405
|
inputPrice: 3,
|
|
1167
1406
|
outputPrice: 15
|
|
1168
1407
|
},
|
|
@@ -1171,6 +1410,7 @@ var bedrockModels = {
|
|
|
1171
1410
|
contextWindow: 2e5,
|
|
1172
1411
|
supportsImages: true,
|
|
1173
1412
|
supportsPromptCache: false,
|
|
1413
|
+
supportsNativeTools: true,
|
|
1174
1414
|
inputPrice: 15,
|
|
1175
1415
|
outputPrice: 75
|
|
1176
1416
|
},
|
|
@@ -1179,6 +1419,7 @@ var bedrockModels = {
|
|
|
1179
1419
|
contextWindow: 2e5,
|
|
1180
1420
|
supportsImages: true,
|
|
1181
1421
|
supportsPromptCache: false,
|
|
1422
|
+
supportsNativeTools: true,
|
|
1182
1423
|
inputPrice: 3,
|
|
1183
1424
|
outputPrice: 15
|
|
1184
1425
|
},
|
|
@@ -1187,6 +1428,7 @@ var bedrockModels = {
|
|
|
1187
1428
|
contextWindow: 2e5,
|
|
1188
1429
|
supportsImages: true,
|
|
1189
1430
|
supportsPromptCache: false,
|
|
1431
|
+
supportsNativeTools: true,
|
|
1190
1432
|
inputPrice: 0.25,
|
|
1191
1433
|
outputPrice: 1.25
|
|
1192
1434
|
},
|
|
@@ -1195,6 +1437,7 @@ var bedrockModels = {
|
|
|
1195
1437
|
contextWindow: 1e5,
|
|
1196
1438
|
supportsImages: false,
|
|
1197
1439
|
supportsPromptCache: false,
|
|
1440
|
+
supportsNativeTools: true,
|
|
1198
1441
|
inputPrice: 8,
|
|
1199
1442
|
outputPrice: 24,
|
|
1200
1443
|
description: "Claude 2.1"
|
|
@@ -1204,6 +1447,7 @@ var bedrockModels = {
|
|
|
1204
1447
|
contextWindow: 1e5,
|
|
1205
1448
|
supportsImages: false,
|
|
1206
1449
|
supportsPromptCache: false,
|
|
1450
|
+
supportsNativeTools: true,
|
|
1207
1451
|
inputPrice: 8,
|
|
1208
1452
|
outputPrice: 24,
|
|
1209
1453
|
description: "Claude 2.0"
|
|
@@ -1213,6 +1457,7 @@ var bedrockModels = {
|
|
|
1213
1457
|
contextWindow: 1e5,
|
|
1214
1458
|
supportsImages: false,
|
|
1215
1459
|
supportsPromptCache: false,
|
|
1460
|
+
supportsNativeTools: true,
|
|
1216
1461
|
inputPrice: 0.8,
|
|
1217
1462
|
outputPrice: 2.4,
|
|
1218
1463
|
description: "Claude Instant"
|
|
@@ -1222,6 +1467,7 @@ var bedrockModels = {
|
|
|
1222
1467
|
contextWindow: 128e3,
|
|
1223
1468
|
supportsImages: false,
|
|
1224
1469
|
supportsPromptCache: false,
|
|
1470
|
+
supportsNativeTools: true,
|
|
1225
1471
|
inputPrice: 1.35,
|
|
1226
1472
|
outputPrice: 5.4
|
|
1227
1473
|
},
|
|
@@ -1230,6 +1476,7 @@ var bedrockModels = {
|
|
|
1230
1476
|
contextWindow: 128e3,
|
|
1231
1477
|
supportsImages: false,
|
|
1232
1478
|
supportsPromptCache: false,
|
|
1479
|
+
supportsNativeTools: true,
|
|
1233
1480
|
inputPrice: 0.5,
|
|
1234
1481
|
outputPrice: 1.5,
|
|
1235
1482
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -1239,6 +1486,7 @@ var bedrockModels = {
|
|
|
1239
1486
|
contextWindow: 128e3,
|
|
1240
1487
|
supportsImages: false,
|
|
1241
1488
|
supportsPromptCache: false,
|
|
1489
|
+
supportsNativeTools: true,
|
|
1242
1490
|
inputPrice: 2,
|
|
1243
1491
|
outputPrice: 6,
|
|
1244
1492
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -1248,6 +1496,7 @@ var bedrockModels = {
|
|
|
1248
1496
|
contextWindow: 128e3,
|
|
1249
1497
|
supportsImages: false,
|
|
1250
1498
|
supportsPromptCache: false,
|
|
1499
|
+
supportsNativeTools: true,
|
|
1251
1500
|
inputPrice: 0.72,
|
|
1252
1501
|
outputPrice: 0.72,
|
|
1253
1502
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -1257,6 +1506,7 @@ var bedrockModels = {
|
|
|
1257
1506
|
contextWindow: 128e3,
|
|
1258
1507
|
supportsImages: true,
|
|
1259
1508
|
supportsPromptCache: false,
|
|
1509
|
+
supportsNativeTools: true,
|
|
1260
1510
|
inputPrice: 0.72,
|
|
1261
1511
|
outputPrice: 0.72,
|
|
1262
1512
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -1266,6 +1516,7 @@ var bedrockModels = {
|
|
|
1266
1516
|
contextWindow: 128e3,
|
|
1267
1517
|
supportsImages: true,
|
|
1268
1518
|
supportsPromptCache: false,
|
|
1519
|
+
supportsNativeTools: true,
|
|
1269
1520
|
inputPrice: 0.16,
|
|
1270
1521
|
outputPrice: 0.16,
|
|
1271
1522
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -1275,6 +1526,7 @@ var bedrockModels = {
|
|
|
1275
1526
|
contextWindow: 128e3,
|
|
1276
1527
|
supportsImages: false,
|
|
1277
1528
|
supportsPromptCache: false,
|
|
1529
|
+
supportsNativeTools: true,
|
|
1278
1530
|
inputPrice: 0.15,
|
|
1279
1531
|
outputPrice: 0.15,
|
|
1280
1532
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -1284,6 +1536,7 @@ var bedrockModels = {
|
|
|
1284
1536
|
contextWindow: 128e3,
|
|
1285
1537
|
supportsImages: false,
|
|
1286
1538
|
supportsPromptCache: false,
|
|
1539
|
+
supportsNativeTools: true,
|
|
1287
1540
|
inputPrice: 0.1,
|
|
1288
1541
|
outputPrice: 0.1,
|
|
1289
1542
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -1293,6 +1546,7 @@ var bedrockModels = {
|
|
|
1293
1546
|
contextWindow: 128e3,
|
|
1294
1547
|
supportsImages: false,
|
|
1295
1548
|
supportsPromptCache: false,
|
|
1549
|
+
supportsNativeTools: true,
|
|
1296
1550
|
inputPrice: 2.4,
|
|
1297
1551
|
outputPrice: 2.4,
|
|
1298
1552
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -1302,6 +1556,7 @@ var bedrockModels = {
|
|
|
1302
1556
|
contextWindow: 128e3,
|
|
1303
1557
|
supportsImages: false,
|
|
1304
1558
|
supportsPromptCache: false,
|
|
1559
|
+
supportsNativeTools: true,
|
|
1305
1560
|
inputPrice: 0.72,
|
|
1306
1561
|
outputPrice: 0.72,
|
|
1307
1562
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -1311,6 +1566,7 @@ var bedrockModels = {
|
|
|
1311
1566
|
contextWindow: 128e3,
|
|
1312
1567
|
supportsImages: false,
|
|
1313
1568
|
supportsPromptCache: false,
|
|
1569
|
+
supportsNativeTools: true,
|
|
1314
1570
|
inputPrice: 0.9,
|
|
1315
1571
|
outputPrice: 0.9,
|
|
1316
1572
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1320,6 +1576,7 @@ var bedrockModels = {
|
|
|
1320
1576
|
contextWindow: 8e3,
|
|
1321
1577
|
supportsImages: false,
|
|
1322
1578
|
supportsPromptCache: false,
|
|
1579
|
+
supportsNativeTools: true,
|
|
1323
1580
|
inputPrice: 0.22,
|
|
1324
1581
|
outputPrice: 0.22,
|
|
1325
1582
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1329,6 +1586,7 @@ var bedrockModels = {
|
|
|
1329
1586
|
contextWindow: 8e3,
|
|
1330
1587
|
supportsImages: false,
|
|
1331
1588
|
supportsPromptCache: false,
|
|
1589
|
+
supportsNativeTools: true,
|
|
1332
1590
|
inputPrice: 2.65,
|
|
1333
1591
|
outputPrice: 3.5
|
|
1334
1592
|
},
|
|
@@ -1337,6 +1595,7 @@ var bedrockModels = {
|
|
|
1337
1595
|
contextWindow: 4e3,
|
|
1338
1596
|
supportsImages: false,
|
|
1339
1597
|
supportsPromptCache: false,
|
|
1598
|
+
supportsNativeTools: true,
|
|
1340
1599
|
inputPrice: 0.3,
|
|
1341
1600
|
outputPrice: 0.6
|
|
1342
1601
|
},
|
|
@@ -1345,6 +1604,7 @@ var bedrockModels = {
|
|
|
1345
1604
|
contextWindow: 8e3,
|
|
1346
1605
|
supportsImages: false,
|
|
1347
1606
|
supportsPromptCache: false,
|
|
1607
|
+
supportsNativeTools: true,
|
|
1348
1608
|
inputPrice: 0.15,
|
|
1349
1609
|
outputPrice: 0.2,
|
|
1350
1610
|
description: "Amazon Titan Text Lite"
|
|
@@ -1354,6 +1614,7 @@ var bedrockModels = {
|
|
|
1354
1614
|
contextWindow: 8e3,
|
|
1355
1615
|
supportsImages: false,
|
|
1356
1616
|
supportsPromptCache: false,
|
|
1617
|
+
supportsNativeTools: true,
|
|
1357
1618
|
inputPrice: 0.2,
|
|
1358
1619
|
outputPrice: 0.6,
|
|
1359
1620
|
description: "Amazon Titan Text Express"
|
|
@@ -1430,7 +1691,8 @@ var BEDROCK_1M_CONTEXT_MODEL_IDS = [
|
|
|
1430
1691
|
var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
|
|
1431
1692
|
"anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1432
1693
|
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
|
1433
|
-
"anthropic.claude-haiku-4-5-20251001-v1:0"
|
|
1694
|
+
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
|
1695
|
+
"anthropic.claude-opus-4-5-20251101-v1:0"
|
|
1434
1696
|
];
|
|
1435
1697
|
|
|
1436
1698
|
// src/providers/cerebras.ts
|
|
@@ -1442,33 +1704,17 @@ var cerebrasModels = {
|
|
|
1442
1704
|
contextWindow: 131072,
|
|
1443
1705
|
supportsImages: false,
|
|
1444
1706
|
supportsPromptCache: false,
|
|
1707
|
+
supportsNativeTools: true,
|
|
1445
1708
|
inputPrice: 0,
|
|
1446
1709
|
outputPrice: 0,
|
|
1447
1710
|
description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
|
|
1448
1711
|
},
|
|
1449
|
-
"qwen-3-coder-480b-free": {
|
|
1450
|
-
maxTokens: 4e4,
|
|
1451
|
-
contextWindow: 64e3,
|
|
1452
|
-
supportsImages: false,
|
|
1453
|
-
supportsPromptCache: false,
|
|
1454
|
-
inputPrice: 0,
|
|
1455
|
-
outputPrice: 0,
|
|
1456
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($0 free tier)\n\n\u2022 Use this if you don't have a Cerebras subscription\n\u2022 64K context window\n\u2022 Rate limits: 150K TPM, 1M TPH/TPD, 10 RPM, 100 RPH/RPD\n\nUpgrade for higher limits: [https://cloud.cerebras.ai/?utm=roocode](https://cloud.cerebras.ai/?utm=roocode)"
|
|
1457
|
-
},
|
|
1458
|
-
"qwen-3-coder-480b": {
|
|
1459
|
-
maxTokens: 4e4,
|
|
1460
|
-
contextWindow: 128e3,
|
|
1461
|
-
supportsImages: false,
|
|
1462
|
-
supportsPromptCache: false,
|
|
1463
|
-
inputPrice: 0,
|
|
1464
|
-
outputPrice: 0,
|
|
1465
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($50/$250 paid tiers)\n\n\u2022 Use this if you have a Cerebras subscription\n\u2022 131K context window with higher rate limits"
|
|
1466
|
-
},
|
|
1467
1712
|
"qwen-3-235b-a22b-instruct-2507": {
|
|
1468
1713
|
maxTokens: 64e3,
|
|
1469
1714
|
contextWindow: 64e3,
|
|
1470
1715
|
supportsImages: false,
|
|
1471
1716
|
supportsPromptCache: false,
|
|
1717
|
+
supportsNativeTools: true,
|
|
1472
1718
|
inputPrice: 0,
|
|
1473
1719
|
outputPrice: 0,
|
|
1474
1720
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1478,6 +1724,7 @@ var cerebrasModels = {
|
|
|
1478
1724
|
contextWindow: 64e3,
|
|
1479
1725
|
supportsImages: false,
|
|
1480
1726
|
supportsPromptCache: false,
|
|
1727
|
+
supportsNativeTools: true,
|
|
1481
1728
|
inputPrice: 0,
|
|
1482
1729
|
outputPrice: 0,
|
|
1483
1730
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1487,25 +1734,17 @@ var cerebrasModels = {
|
|
|
1487
1734
|
contextWindow: 64e3,
|
|
1488
1735
|
supportsImages: false,
|
|
1489
1736
|
supportsPromptCache: false,
|
|
1737
|
+
supportsNativeTools: true,
|
|
1490
1738
|
inputPrice: 0,
|
|
1491
1739
|
outputPrice: 0,
|
|
1492
1740
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
1493
1741
|
},
|
|
1494
|
-
"qwen-3-235b-a22b-thinking-2507": {
|
|
1495
|
-
maxTokens: 4e4,
|
|
1496
|
-
contextWindow: 65e3,
|
|
1497
|
-
supportsImages: false,
|
|
1498
|
-
supportsPromptCache: false,
|
|
1499
|
-
inputPrice: 0,
|
|
1500
|
-
outputPrice: 0,
|
|
1501
|
-
description: "SOTA performance with ~1500 tokens/s",
|
|
1502
|
-
supportsReasoningEffort: true
|
|
1503
|
-
},
|
|
1504
1742
|
"gpt-oss-120b": {
|
|
1505
1743
|
maxTokens: 8e3,
|
|
1506
1744
|
contextWindow: 64e3,
|
|
1507
1745
|
supportsImages: false,
|
|
1508
1746
|
supportsPromptCache: false,
|
|
1747
|
+
supportsNativeTools: true,
|
|
1509
1748
|
inputPrice: 0,
|
|
1510
1749
|
outputPrice: 0,
|
|
1511
1750
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1899,7 +2138,10 @@ var claudeCodeModels = {
|
|
|
1899
2138
|
// Claude Code does report cache tokens
|
|
1900
2139
|
supportsReasoningEffort: false,
|
|
1901
2140
|
supportsReasoningBudget: false,
|
|
1902
|
-
requiredReasoningBudget: false
|
|
2141
|
+
requiredReasoningBudget: false,
|
|
2142
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2143
|
+
supportsNativeTools: false,
|
|
2144
|
+
supportsTemperature: false
|
|
1903
2145
|
},
|
|
1904
2146
|
"claude-sonnet-4-5-20250929[1m]": {
|
|
1905
2147
|
...anthropicModels["claude-sonnet-4-5"],
|
|
@@ -1910,7 +2152,10 @@ var claudeCodeModels = {
|
|
|
1910
2152
|
// Claude Code does report cache tokens
|
|
1911
2153
|
supportsReasoningEffort: false,
|
|
1912
2154
|
supportsReasoningBudget: false,
|
|
1913
|
-
requiredReasoningBudget: false
|
|
2155
|
+
requiredReasoningBudget: false,
|
|
2156
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2157
|
+
supportsNativeTools: false,
|
|
2158
|
+
supportsTemperature: false
|
|
1914
2159
|
},
|
|
1915
2160
|
"claude-sonnet-4-20250514": {
|
|
1916
2161
|
...anthropicModels["claude-sonnet-4-20250514"],
|
|
@@ -1919,7 +2164,22 @@ var claudeCodeModels = {
|
|
|
1919
2164
|
// Claude Code does report cache tokens
|
|
1920
2165
|
supportsReasoningEffort: false,
|
|
1921
2166
|
supportsReasoningBudget: false,
|
|
1922
|
-
requiredReasoningBudget: false
|
|
2167
|
+
requiredReasoningBudget: false,
|
|
2168
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2169
|
+
supportsNativeTools: false,
|
|
2170
|
+
supportsTemperature: false
|
|
2171
|
+
},
|
|
2172
|
+
"claude-opus-4-5-20251101": {
|
|
2173
|
+
...anthropicModels["claude-opus-4-5-20251101"],
|
|
2174
|
+
supportsImages: false,
|
|
2175
|
+
supportsPromptCache: true,
|
|
2176
|
+
// Claude Code does report cache tokens
|
|
2177
|
+
supportsReasoningEffort: false,
|
|
2178
|
+
supportsReasoningBudget: false,
|
|
2179
|
+
requiredReasoningBudget: false,
|
|
2180
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2181
|
+
supportsNativeTools: false,
|
|
2182
|
+
supportsTemperature: false
|
|
1923
2183
|
},
|
|
1924
2184
|
"claude-opus-4-1-20250805": {
|
|
1925
2185
|
...anthropicModels["claude-opus-4-1-20250805"],
|
|
@@ -1928,7 +2188,10 @@ var claudeCodeModels = {
|
|
|
1928
2188
|
// Claude Code does report cache tokens
|
|
1929
2189
|
supportsReasoningEffort: false,
|
|
1930
2190
|
supportsReasoningBudget: false,
|
|
1931
|
-
requiredReasoningBudget: false
|
|
2191
|
+
requiredReasoningBudget: false,
|
|
2192
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2193
|
+
supportsNativeTools: false,
|
|
2194
|
+
supportsTemperature: false
|
|
1932
2195
|
},
|
|
1933
2196
|
"claude-opus-4-20250514": {
|
|
1934
2197
|
...anthropicModels["claude-opus-4-20250514"],
|
|
@@ -1937,7 +2200,10 @@ var claudeCodeModels = {
|
|
|
1937
2200
|
// Claude Code does report cache tokens
|
|
1938
2201
|
supportsReasoningEffort: false,
|
|
1939
2202
|
supportsReasoningBudget: false,
|
|
1940
|
-
requiredReasoningBudget: false
|
|
2203
|
+
requiredReasoningBudget: false,
|
|
2204
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2205
|
+
supportsNativeTools: false,
|
|
2206
|
+
supportsTemperature: false
|
|
1941
2207
|
},
|
|
1942
2208
|
"claude-3-7-sonnet-20250219": {
|
|
1943
2209
|
...anthropicModels["claude-3-7-sonnet-20250219"],
|
|
@@ -1946,7 +2212,10 @@ var claudeCodeModels = {
|
|
|
1946
2212
|
// Claude Code does report cache tokens
|
|
1947
2213
|
supportsReasoningEffort: false,
|
|
1948
2214
|
supportsReasoningBudget: false,
|
|
1949
|
-
requiredReasoningBudget: false
|
|
2215
|
+
requiredReasoningBudget: false,
|
|
2216
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2217
|
+
supportsNativeTools: false,
|
|
2218
|
+
supportsTemperature: false
|
|
1950
2219
|
},
|
|
1951
2220
|
"claude-3-5-sonnet-20241022": {
|
|
1952
2221
|
...anthropicModels["claude-3-5-sonnet-20241022"],
|
|
@@ -1955,7 +2224,10 @@ var claudeCodeModels = {
|
|
|
1955
2224
|
// Claude Code does report cache tokens
|
|
1956
2225
|
supportsReasoningEffort: false,
|
|
1957
2226
|
supportsReasoningBudget: false,
|
|
1958
|
-
requiredReasoningBudget: false
|
|
2227
|
+
requiredReasoningBudget: false,
|
|
2228
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2229
|
+
supportsNativeTools: false,
|
|
2230
|
+
supportsTemperature: false
|
|
1959
2231
|
},
|
|
1960
2232
|
"claude-3-5-haiku-20241022": {
|
|
1961
2233
|
...anthropicModels["claude-3-5-haiku-20241022"],
|
|
@@ -1964,7 +2236,10 @@ var claudeCodeModels = {
|
|
|
1964
2236
|
// Claude Code does report cache tokens
|
|
1965
2237
|
supportsReasoningEffort: false,
|
|
1966
2238
|
supportsReasoningBudget: false,
|
|
1967
|
-
requiredReasoningBudget: false
|
|
2239
|
+
requiredReasoningBudget: false,
|
|
2240
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2241
|
+
supportsNativeTools: false,
|
|
2242
|
+
supportsTemperature: false
|
|
1968
2243
|
},
|
|
1969
2244
|
"claude-haiku-4-5-20251001": {
|
|
1970
2245
|
...anthropicModels["claude-haiku-4-5-20251001"],
|
|
@@ -1973,7 +2248,10 @@ var claudeCodeModels = {
|
|
|
1973
2248
|
// Claude Code does report cache tokens
|
|
1974
2249
|
supportsReasoningEffort: false,
|
|
1975
2250
|
supportsReasoningBudget: false,
|
|
1976
|
-
requiredReasoningBudget: false
|
|
2251
|
+
requiredReasoningBudget: false,
|
|
2252
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
2253
|
+
supportsNativeTools: false,
|
|
2254
|
+
supportsTemperature: false
|
|
1977
2255
|
}
|
|
1978
2256
|
};
|
|
1979
2257
|
|
|
@@ -1986,6 +2264,7 @@ var deepSeekModels = {
|
|
|
1986
2264
|
contextWindow: 128e3,
|
|
1987
2265
|
supportsImages: false,
|
|
1988
2266
|
supportsPromptCache: true,
|
|
2267
|
+
supportsNativeTools: true,
|
|
1989
2268
|
inputPrice: 0.56,
|
|
1990
2269
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
1991
2270
|
outputPrice: 1.68,
|
|
@@ -2002,6 +2281,7 @@ var deepSeekModels = {
|
|
|
2002
2281
|
contextWindow: 128e3,
|
|
2003
2282
|
supportsImages: false,
|
|
2004
2283
|
supportsPromptCache: true,
|
|
2284
|
+
supportsNativeTools: true,
|
|
2005
2285
|
inputPrice: 0.56,
|
|
2006
2286
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
2007
2287
|
outputPrice: 1.68,
|
|
@@ -2023,6 +2303,7 @@ var doubaoModels = {
|
|
|
2023
2303
|
contextWindow: 128e3,
|
|
2024
2304
|
supportsImages: true,
|
|
2025
2305
|
supportsPromptCache: true,
|
|
2306
|
+
supportsNativeTools: true,
|
|
2026
2307
|
inputPrice: 1e-4,
|
|
2027
2308
|
// $0.0001 per million tokens (cache miss)
|
|
2028
2309
|
outputPrice: 4e-4,
|
|
@@ -2038,6 +2319,7 @@ var doubaoModels = {
|
|
|
2038
2319
|
contextWindow: 128e3,
|
|
2039
2320
|
supportsImages: true,
|
|
2040
2321
|
supportsPromptCache: true,
|
|
2322
|
+
supportsNativeTools: true,
|
|
2041
2323
|
inputPrice: 2e-4,
|
|
2042
2324
|
// $0.0002 per million tokens
|
|
2043
2325
|
outputPrice: 8e-4,
|
|
@@ -2053,6 +2335,7 @@ var doubaoModels = {
|
|
|
2053
2335
|
contextWindow: 128e3,
|
|
2054
2336
|
supportsImages: true,
|
|
2055
2337
|
supportsPromptCache: true,
|
|
2338
|
+
supportsNativeTools: true,
|
|
2056
2339
|
inputPrice: 15e-5,
|
|
2057
2340
|
// $0.00015 per million tokens
|
|
2058
2341
|
outputPrice: 6e-4,
|
|
@@ -2093,6 +2376,7 @@ var featherlessModels = {
|
|
|
2093
2376
|
contextWindow: 32678,
|
|
2094
2377
|
supportsImages: false,
|
|
2095
2378
|
supportsPromptCache: false,
|
|
2379
|
+
supportsNativeTools: true,
|
|
2096
2380
|
inputPrice: 0,
|
|
2097
2381
|
outputPrice: 0,
|
|
2098
2382
|
description: "Kimi K2 Instruct model."
|
|
@@ -2111,6 +2395,7 @@ var featherlessModels = {
|
|
|
2111
2395
|
contextWindow: 32678,
|
|
2112
2396
|
supportsImages: false,
|
|
2113
2397
|
supportsPromptCache: false,
|
|
2398
|
+
supportsNativeTools: true,
|
|
2114
2399
|
inputPrice: 0,
|
|
2115
2400
|
outputPrice: 0,
|
|
2116
2401
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -2126,6 +2411,7 @@ var fireworksModels = {
|
|
|
2126
2411
|
contextWindow: 262144,
|
|
2127
2412
|
supportsImages: false,
|
|
2128
2413
|
supportsPromptCache: true,
|
|
2414
|
+
supportsNativeTools: true,
|
|
2129
2415
|
inputPrice: 0.6,
|
|
2130
2416
|
outputPrice: 2.5,
|
|
2131
2417
|
cacheReadsPrice: 0.15,
|
|
@@ -2136,6 +2422,7 @@ var fireworksModels = {
|
|
|
2136
2422
|
contextWindow: 128e3,
|
|
2137
2423
|
supportsImages: false,
|
|
2138
2424
|
supportsPromptCache: false,
|
|
2425
|
+
supportsNativeTools: true,
|
|
2139
2426
|
inputPrice: 0.6,
|
|
2140
2427
|
outputPrice: 2.5,
|
|
2141
2428
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2145,6 +2432,7 @@ var fireworksModels = {
|
|
|
2145
2432
|
contextWindow: 204800,
|
|
2146
2433
|
supportsImages: false,
|
|
2147
2434
|
supportsPromptCache: false,
|
|
2435
|
+
supportsNativeTools: true,
|
|
2148
2436
|
inputPrice: 0.3,
|
|
2149
2437
|
outputPrice: 1.2,
|
|
2150
2438
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2154,6 +2442,7 @@ var fireworksModels = {
|
|
|
2154
2442
|
contextWindow: 256e3,
|
|
2155
2443
|
supportsImages: false,
|
|
2156
2444
|
supportsPromptCache: false,
|
|
2445
|
+
supportsNativeTools: true,
|
|
2157
2446
|
inputPrice: 0.22,
|
|
2158
2447
|
outputPrice: 0.88,
|
|
2159
2448
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2163,6 +2452,7 @@ var fireworksModels = {
|
|
|
2163
2452
|
contextWindow: 256e3,
|
|
2164
2453
|
supportsImages: false,
|
|
2165
2454
|
supportsPromptCache: false,
|
|
2455
|
+
supportsNativeTools: true,
|
|
2166
2456
|
inputPrice: 0.45,
|
|
2167
2457
|
outputPrice: 1.8,
|
|
2168
2458
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2172,6 +2462,7 @@ var fireworksModels = {
|
|
|
2172
2462
|
contextWindow: 16e4,
|
|
2173
2463
|
supportsImages: false,
|
|
2174
2464
|
supportsPromptCache: false,
|
|
2465
|
+
supportsNativeTools: true,
|
|
2175
2466
|
inputPrice: 3,
|
|
2176
2467
|
outputPrice: 8,
|
|
2177
2468
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2181,6 +2472,7 @@ var fireworksModels = {
|
|
|
2181
2472
|
contextWindow: 128e3,
|
|
2182
2473
|
supportsImages: false,
|
|
2183
2474
|
supportsPromptCache: false,
|
|
2475
|
+
supportsNativeTools: true,
|
|
2184
2476
|
inputPrice: 0.9,
|
|
2185
2477
|
outputPrice: 0.9,
|
|
2186
2478
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2190,6 +2482,7 @@ var fireworksModels = {
|
|
|
2190
2482
|
contextWindow: 163840,
|
|
2191
2483
|
supportsImages: false,
|
|
2192
2484
|
supportsPromptCache: false,
|
|
2485
|
+
supportsNativeTools: true,
|
|
2193
2486
|
inputPrice: 0.56,
|
|
2194
2487
|
outputPrice: 1.68,
|
|
2195
2488
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2199,6 +2492,7 @@ var fireworksModels = {
|
|
|
2199
2492
|
contextWindow: 128e3,
|
|
2200
2493
|
supportsImages: false,
|
|
2201
2494
|
supportsPromptCache: false,
|
|
2495
|
+
supportsNativeTools: true,
|
|
2202
2496
|
inputPrice: 0.55,
|
|
2203
2497
|
outputPrice: 2.19,
|
|
2204
2498
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2208,6 +2502,7 @@ var fireworksModels = {
|
|
|
2208
2502
|
contextWindow: 128e3,
|
|
2209
2503
|
supportsImages: false,
|
|
2210
2504
|
supportsPromptCache: false,
|
|
2505
|
+
supportsNativeTools: true,
|
|
2211
2506
|
inputPrice: 0.55,
|
|
2212
2507
|
outputPrice: 2.19,
|
|
2213
2508
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2217,6 +2512,7 @@ var fireworksModels = {
|
|
|
2217
2512
|
contextWindow: 198e3,
|
|
2218
2513
|
supportsImages: false,
|
|
2219
2514
|
supportsPromptCache: false,
|
|
2515
|
+
supportsNativeTools: true,
|
|
2220
2516
|
inputPrice: 0.55,
|
|
2221
2517
|
outputPrice: 2.19,
|
|
2222
2518
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2226,6 +2522,7 @@ var fireworksModels = {
|
|
|
2226
2522
|
contextWindow: 128e3,
|
|
2227
2523
|
supportsImages: false,
|
|
2228
2524
|
supportsPromptCache: false,
|
|
2525
|
+
supportsNativeTools: true,
|
|
2229
2526
|
inputPrice: 0.07,
|
|
2230
2527
|
outputPrice: 0.3,
|
|
2231
2528
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2235,6 +2532,7 @@ var fireworksModels = {
|
|
|
2235
2532
|
contextWindow: 128e3,
|
|
2236
2533
|
supportsImages: false,
|
|
2237
2534
|
supportsPromptCache: false,
|
|
2535
|
+
supportsNativeTools: true,
|
|
2238
2536
|
inputPrice: 0.15,
|
|
2239
2537
|
outputPrice: 0.6,
|
|
2240
2538
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
@@ -2474,6 +2772,7 @@ var groqModels = {
|
|
|
2474
2772
|
contextWindow: 131072,
|
|
2475
2773
|
supportsImages: false,
|
|
2476
2774
|
supportsPromptCache: false,
|
|
2775
|
+
supportsNativeTools: true,
|
|
2477
2776
|
inputPrice: 0.05,
|
|
2478
2777
|
outputPrice: 0.08,
|
|
2479
2778
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2483,6 +2782,7 @@ var groqModels = {
|
|
|
2483
2782
|
contextWindow: 131072,
|
|
2484
2783
|
supportsImages: false,
|
|
2485
2784
|
supportsPromptCache: false,
|
|
2785
|
+
supportsNativeTools: true,
|
|
2486
2786
|
inputPrice: 0.59,
|
|
2487
2787
|
outputPrice: 0.79,
|
|
2488
2788
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -2492,6 +2792,7 @@ var groqModels = {
|
|
|
2492
2792
|
contextWindow: 131072,
|
|
2493
2793
|
supportsImages: false,
|
|
2494
2794
|
supportsPromptCache: false,
|
|
2795
|
+
supportsNativeTools: true,
|
|
2495
2796
|
inputPrice: 0.11,
|
|
2496
2797
|
outputPrice: 0.34,
|
|
2497
2798
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -2528,6 +2829,7 @@ var groqModels = {
|
|
|
2528
2829
|
contextWindow: 131072,
|
|
2529
2830
|
supportsImages: false,
|
|
2530
2831
|
supportsPromptCache: false,
|
|
2832
|
+
supportsNativeTools: true,
|
|
2531
2833
|
inputPrice: 0.29,
|
|
2532
2834
|
outputPrice: 0.59,
|
|
2533
2835
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -2557,6 +2859,7 @@ var groqModels = {
|
|
|
2557
2859
|
contextWindow: 262144,
|
|
2558
2860
|
supportsImages: false,
|
|
2559
2861
|
supportsPromptCache: true,
|
|
2862
|
+
supportsNativeTools: true,
|
|
2560
2863
|
inputPrice: 0.6,
|
|
2561
2864
|
outputPrice: 2.5,
|
|
2562
2865
|
cacheReadsPrice: 0.15,
|
|
@@ -2567,6 +2870,7 @@ var groqModels = {
|
|
|
2567
2870
|
contextWindow: 131072,
|
|
2568
2871
|
supportsImages: false,
|
|
2569
2872
|
supportsPromptCache: false,
|
|
2873
|
+
supportsNativeTools: true,
|
|
2570
2874
|
inputPrice: 0.15,
|
|
2571
2875
|
outputPrice: 0.75,
|
|
2572
2876
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -2576,6 +2880,7 @@ var groqModels = {
|
|
|
2576
2880
|
contextWindow: 131072,
|
|
2577
2881
|
supportsImages: false,
|
|
2578
2882
|
supportsPromptCache: false,
|
|
2883
|
+
supportsNativeTools: true,
|
|
2579
2884
|
inputPrice: 0.1,
|
|
2580
2885
|
outputPrice: 0.5,
|
|
2581
2886
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -2602,6 +2907,7 @@ var ioIntelligenceModels = {
|
|
|
2602
2907
|
contextWindow: 128e3,
|
|
2603
2908
|
supportsImages: false,
|
|
2604
2909
|
supportsPromptCache: false,
|
|
2910
|
+
supportsNativeTools: true,
|
|
2605
2911
|
description: "DeepSeek R1 reasoning model"
|
|
2606
2912
|
},
|
|
2607
2913
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -2609,6 +2915,7 @@ var ioIntelligenceModels = {
|
|
|
2609
2915
|
contextWindow: 43e4,
|
|
2610
2916
|
supportsImages: true,
|
|
2611
2917
|
supportsPromptCache: false,
|
|
2918
|
+
supportsNativeTools: true,
|
|
2612
2919
|
description: "Llama 4 Maverick 17B model"
|
|
2613
2920
|
},
|
|
2614
2921
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -2616,6 +2923,7 @@ var ioIntelligenceModels = {
|
|
|
2616
2923
|
contextWindow: 106e3,
|
|
2617
2924
|
supportsImages: false,
|
|
2618
2925
|
supportsPromptCache: false,
|
|
2926
|
+
supportsNativeTools: true,
|
|
2619
2927
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
2620
2928
|
},
|
|
2621
2929
|
"openai/gpt-oss-120b": {
|
|
@@ -2623,6 +2931,7 @@ var ioIntelligenceModels = {
|
|
|
2623
2931
|
contextWindow: 131072,
|
|
2624
2932
|
supportsImages: false,
|
|
2625
2933
|
supportsPromptCache: false,
|
|
2934
|
+
supportsNativeTools: true,
|
|
2626
2935
|
description: "OpenAI GPT-OSS 120B model"
|
|
2627
2936
|
}
|
|
2628
2937
|
};
|
|
@@ -2663,75 +2972,84 @@ var mistralModels = {
|
|
|
2663
2972
|
contextWindow: 128e3,
|
|
2664
2973
|
supportsImages: true,
|
|
2665
2974
|
supportsPromptCache: false,
|
|
2975
|
+
supportsNativeTools: true,
|
|
2666
2976
|
inputPrice: 2,
|
|
2667
2977
|
outputPrice: 5
|
|
2668
2978
|
},
|
|
2669
2979
|
"devstral-medium-latest": {
|
|
2670
|
-
maxTokens:
|
|
2980
|
+
maxTokens: 8192,
|
|
2671
2981
|
contextWindow: 131e3,
|
|
2672
2982
|
supportsImages: true,
|
|
2673
2983
|
supportsPromptCache: false,
|
|
2984
|
+
supportsNativeTools: true,
|
|
2674
2985
|
inputPrice: 0.4,
|
|
2675
2986
|
outputPrice: 2
|
|
2676
2987
|
},
|
|
2677
2988
|
"mistral-medium-latest": {
|
|
2678
|
-
maxTokens:
|
|
2989
|
+
maxTokens: 8192,
|
|
2679
2990
|
contextWindow: 131e3,
|
|
2680
2991
|
supportsImages: true,
|
|
2681
2992
|
supportsPromptCache: false,
|
|
2993
|
+
supportsNativeTools: true,
|
|
2682
2994
|
inputPrice: 0.4,
|
|
2683
2995
|
outputPrice: 2
|
|
2684
2996
|
},
|
|
2685
2997
|
"codestral-latest": {
|
|
2686
|
-
maxTokens:
|
|
2998
|
+
maxTokens: 8192,
|
|
2687
2999
|
contextWindow: 256e3,
|
|
2688
3000
|
supportsImages: false,
|
|
2689
3001
|
supportsPromptCache: false,
|
|
3002
|
+
supportsNativeTools: true,
|
|
2690
3003
|
inputPrice: 0.3,
|
|
2691
3004
|
outputPrice: 0.9
|
|
2692
3005
|
},
|
|
2693
3006
|
"mistral-large-latest": {
|
|
2694
|
-
maxTokens:
|
|
3007
|
+
maxTokens: 8192,
|
|
2695
3008
|
contextWindow: 131e3,
|
|
2696
3009
|
supportsImages: false,
|
|
2697
3010
|
supportsPromptCache: false,
|
|
3011
|
+
supportsNativeTools: true,
|
|
2698
3012
|
inputPrice: 2,
|
|
2699
3013
|
outputPrice: 6
|
|
2700
3014
|
},
|
|
2701
3015
|
"ministral-8b-latest": {
|
|
2702
|
-
maxTokens:
|
|
3016
|
+
maxTokens: 8192,
|
|
2703
3017
|
contextWindow: 131e3,
|
|
2704
3018
|
supportsImages: false,
|
|
2705
3019
|
supportsPromptCache: false,
|
|
3020
|
+
supportsNativeTools: true,
|
|
2706
3021
|
inputPrice: 0.1,
|
|
2707
3022
|
outputPrice: 0.1
|
|
2708
3023
|
},
|
|
2709
3024
|
"ministral-3b-latest": {
|
|
2710
|
-
maxTokens:
|
|
3025
|
+
maxTokens: 8192,
|
|
2711
3026
|
contextWindow: 131e3,
|
|
2712
3027
|
supportsImages: false,
|
|
2713
3028
|
supportsPromptCache: false,
|
|
3029
|
+
supportsNativeTools: true,
|
|
2714
3030
|
inputPrice: 0.04,
|
|
2715
3031
|
outputPrice: 0.04
|
|
2716
3032
|
},
|
|
2717
3033
|
"mistral-small-latest": {
|
|
2718
|
-
maxTokens:
|
|
3034
|
+
maxTokens: 8192,
|
|
2719
3035
|
contextWindow: 32e3,
|
|
2720
3036
|
supportsImages: false,
|
|
2721
3037
|
supportsPromptCache: false,
|
|
3038
|
+
supportsNativeTools: true,
|
|
2722
3039
|
inputPrice: 0.2,
|
|
2723
3040
|
outputPrice: 0.6
|
|
2724
3041
|
},
|
|
2725
3042
|
"pixtral-large-latest": {
|
|
2726
|
-
maxTokens:
|
|
3043
|
+
maxTokens: 8192,
|
|
2727
3044
|
contextWindow: 131e3,
|
|
2728
3045
|
supportsImages: true,
|
|
2729
3046
|
supportsPromptCache: false,
|
|
3047
|
+
supportsNativeTools: true,
|
|
2730
3048
|
inputPrice: 2,
|
|
2731
3049
|
outputPrice: 6
|
|
2732
3050
|
}
|
|
2733
3051
|
};
|
|
2734
|
-
var MISTRAL_DEFAULT_TEMPERATURE =
|
|
3052
|
+
var MISTRAL_DEFAULT_TEMPERATURE = 1;
|
|
2735
3053
|
|
|
2736
3054
|
// src/providers/moonshot.ts
|
|
2737
3055
|
var moonshotDefaultModelId = "kimi-k2-0905-preview";
|
|
@@ -2741,6 +3059,7 @@ var moonshotModels = {
|
|
|
2741
3059
|
contextWindow: 131072,
|
|
2742
3060
|
supportsImages: false,
|
|
2743
3061
|
supportsPromptCache: true,
|
|
3062
|
+
supportsNativeTools: true,
|
|
2744
3063
|
inputPrice: 0.6,
|
|
2745
3064
|
// $0.60 per million tokens (cache miss)
|
|
2746
3065
|
outputPrice: 2.5,
|
|
@@ -2756,6 +3075,7 @@ var moonshotModels = {
|
|
|
2756
3075
|
contextWindow: 262144,
|
|
2757
3076
|
supportsImages: false,
|
|
2758
3077
|
supportsPromptCache: true,
|
|
3078
|
+
supportsNativeTools: true,
|
|
2759
3079
|
inputPrice: 0.6,
|
|
2760
3080
|
outputPrice: 2.5,
|
|
2761
3081
|
cacheReadsPrice: 0.15,
|
|
@@ -2766,6 +3086,7 @@ var moonshotModels = {
|
|
|
2766
3086
|
contextWindow: 262144,
|
|
2767
3087
|
supportsImages: false,
|
|
2768
3088
|
supportsPromptCache: true,
|
|
3089
|
+
supportsNativeTools: true,
|
|
2769
3090
|
inputPrice: 2.4,
|
|
2770
3091
|
// $2.40 per million tokens (cache miss)
|
|
2771
3092
|
outputPrice: 10,
|
|
@@ -2784,6 +3105,7 @@ var moonshotModels = {
|
|
|
2784
3105
|
supportsImages: false,
|
|
2785
3106
|
// Text-only (no image/vision support)
|
|
2786
3107
|
supportsPromptCache: true,
|
|
3108
|
+
supportsNativeTools: true,
|
|
2787
3109
|
inputPrice: 0.6,
|
|
2788
3110
|
// $0.60 per million tokens (cache miss)
|
|
2789
3111
|
outputPrice: 2.5,
|
|
@@ -2808,6 +3130,7 @@ var ollamaDefaultModelInfo = {
|
|
|
2808
3130
|
contextWindow: 2e5,
|
|
2809
3131
|
supportsImages: true,
|
|
2810
3132
|
supportsPromptCache: true,
|
|
3133
|
+
supportsNativeTools: true,
|
|
2811
3134
|
inputPrice: 0,
|
|
2812
3135
|
outputPrice: 0,
|
|
2813
3136
|
cacheWritesPrice: 0,
|
|
@@ -3290,6 +3613,7 @@ var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
|
3290
3613
|
"anthropic/claude-opus-4",
|
|
3291
3614
|
"anthropic/claude-opus-4.1",
|
|
3292
3615
|
"anthropic/claude-haiku-4.5",
|
|
3616
|
+
"anthropic/claude-opus-4.5",
|
|
3293
3617
|
"google/gemini-2.5-flash-preview",
|
|
3294
3618
|
"google/gemini-2.5-flash-preview:thinking",
|
|
3295
3619
|
"google/gemini-2.5-flash-preview-05-20",
|
|
@@ -3311,6 +3635,7 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
|
3311
3635
|
"anthropic/claude-opus-4.1",
|
|
3312
3636
|
"anthropic/claude-sonnet-4",
|
|
3313
3637
|
"anthropic/claude-sonnet-4.5",
|
|
3638
|
+
"anthropic/claude-opus-4.5",
|
|
3314
3639
|
"anthropic/claude-haiku-4.5",
|
|
3315
3640
|
"google/gemini-2.5-pro-preview",
|
|
3316
3641
|
"google/gemini-2.5-pro",
|
|
@@ -3401,6 +3726,7 @@ var sambaNovaModels = {
|
|
|
3401
3726
|
contextWindow: 16384,
|
|
3402
3727
|
supportsImages: false,
|
|
3403
3728
|
supportsPromptCache: false,
|
|
3729
|
+
supportsNativeTools: true,
|
|
3404
3730
|
inputPrice: 0.1,
|
|
3405
3731
|
outputPrice: 0.2,
|
|
3406
3732
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -3410,6 +3736,7 @@ var sambaNovaModels = {
|
|
|
3410
3736
|
contextWindow: 131072,
|
|
3411
3737
|
supportsImages: false,
|
|
3412
3738
|
supportsPromptCache: false,
|
|
3739
|
+
supportsNativeTools: true,
|
|
3413
3740
|
inputPrice: 0.6,
|
|
3414
3741
|
outputPrice: 1.2,
|
|
3415
3742
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -3420,6 +3747,7 @@ var sambaNovaModels = {
|
|
|
3420
3747
|
supportsImages: false,
|
|
3421
3748
|
supportsPromptCache: false,
|
|
3422
3749
|
supportsReasoningBudget: true,
|
|
3750
|
+
supportsNativeTools: true,
|
|
3423
3751
|
inputPrice: 5,
|
|
3424
3752
|
outputPrice: 7,
|
|
3425
3753
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -3429,6 +3757,7 @@ var sambaNovaModels = {
|
|
|
3429
3757
|
contextWindow: 32768,
|
|
3430
3758
|
supportsImages: false,
|
|
3431
3759
|
supportsPromptCache: false,
|
|
3760
|
+
supportsNativeTools: true,
|
|
3432
3761
|
inputPrice: 3,
|
|
3433
3762
|
outputPrice: 4.5,
|
|
3434
3763
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -3438,6 +3767,7 @@ var sambaNovaModels = {
|
|
|
3438
3767
|
contextWindow: 32768,
|
|
3439
3768
|
supportsImages: false,
|
|
3440
3769
|
supportsPromptCache: false,
|
|
3770
|
+
supportsNativeTools: true,
|
|
3441
3771
|
inputPrice: 3,
|
|
3442
3772
|
outputPrice: 4.5,
|
|
3443
3773
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -3456,6 +3786,7 @@ var sambaNovaModels = {
|
|
|
3456
3786
|
contextWindow: 131072,
|
|
3457
3787
|
supportsImages: true,
|
|
3458
3788
|
supportsPromptCache: false,
|
|
3789
|
+
supportsNativeTools: true,
|
|
3459
3790
|
inputPrice: 0.63,
|
|
3460
3791
|
outputPrice: 1.8,
|
|
3461
3792
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -3474,6 +3805,7 @@ var sambaNovaModels = {
|
|
|
3474
3805
|
contextWindow: 8192,
|
|
3475
3806
|
supportsImages: false,
|
|
3476
3807
|
supportsPromptCache: false,
|
|
3808
|
+
supportsNativeTools: true,
|
|
3477
3809
|
inputPrice: 0.4,
|
|
3478
3810
|
outputPrice: 0.8,
|
|
3479
3811
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -3483,6 +3815,7 @@ var sambaNovaModels = {
|
|
|
3483
3815
|
contextWindow: 131072,
|
|
3484
3816
|
supportsImages: false,
|
|
3485
3817
|
supportsPromptCache: false,
|
|
3818
|
+
supportsNativeTools: true,
|
|
3486
3819
|
inputPrice: 0.22,
|
|
3487
3820
|
outputPrice: 0.59,
|
|
3488
3821
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -3496,6 +3829,7 @@ var unboundDefaultModelInfo = {
|
|
|
3496
3829
|
contextWindow: 2e5,
|
|
3497
3830
|
supportsImages: true,
|
|
3498
3831
|
supportsPromptCache: true,
|
|
3832
|
+
supportsNativeTools: true,
|
|
3499
3833
|
inputPrice: 3,
|
|
3500
3834
|
outputPrice: 15,
|
|
3501
3835
|
cacheWritesPrice: 3.75,
|
|
@@ -3509,6 +3843,7 @@ var vertexModels = {
|
|
|
3509
3843
|
maxTokens: 65536,
|
|
3510
3844
|
contextWindow: 1048576,
|
|
3511
3845
|
supportsImages: true,
|
|
3846
|
+
supportsNativeTools: true,
|
|
3512
3847
|
supportsPromptCache: true,
|
|
3513
3848
|
supportsReasoningEffort: ["low", "high"],
|
|
3514
3849
|
reasoningEffort: "low",
|
|
@@ -3533,6 +3868,7 @@ var vertexModels = {
|
|
|
3533
3868
|
maxTokens: 65535,
|
|
3534
3869
|
contextWindow: 1048576,
|
|
3535
3870
|
supportsImages: true,
|
|
3871
|
+
supportsNativeTools: true,
|
|
3536
3872
|
supportsPromptCache: true,
|
|
3537
3873
|
inputPrice: 0.15,
|
|
3538
3874
|
outputPrice: 3.5,
|
|
@@ -3544,6 +3880,7 @@ var vertexModels = {
|
|
|
3544
3880
|
maxTokens: 65535,
|
|
3545
3881
|
contextWindow: 1048576,
|
|
3546
3882
|
supportsImages: true,
|
|
3883
|
+
supportsNativeTools: true,
|
|
3547
3884
|
supportsPromptCache: true,
|
|
3548
3885
|
inputPrice: 0.15,
|
|
3549
3886
|
outputPrice: 0.6
|
|
@@ -3552,6 +3889,7 @@ var vertexModels = {
|
|
|
3552
3889
|
maxTokens: 64e3,
|
|
3553
3890
|
contextWindow: 1048576,
|
|
3554
3891
|
supportsImages: true,
|
|
3892
|
+
supportsNativeTools: true,
|
|
3555
3893
|
supportsPromptCache: true,
|
|
3556
3894
|
inputPrice: 0.3,
|
|
3557
3895
|
outputPrice: 2.5,
|
|
@@ -3564,6 +3902,7 @@ var vertexModels = {
|
|
|
3564
3902
|
maxTokens: 65535,
|
|
3565
3903
|
contextWindow: 1048576,
|
|
3566
3904
|
supportsImages: true,
|
|
3905
|
+
supportsNativeTools: true,
|
|
3567
3906
|
supportsPromptCache: false,
|
|
3568
3907
|
inputPrice: 0.15,
|
|
3569
3908
|
outputPrice: 3.5,
|
|
@@ -3575,6 +3914,7 @@ var vertexModels = {
|
|
|
3575
3914
|
maxTokens: 65535,
|
|
3576
3915
|
contextWindow: 1048576,
|
|
3577
3916
|
supportsImages: true,
|
|
3917
|
+
supportsNativeTools: true,
|
|
3578
3918
|
supportsPromptCache: false,
|
|
3579
3919
|
inputPrice: 0.15,
|
|
3580
3920
|
outputPrice: 0.6
|
|
@@ -3583,6 +3923,7 @@ var vertexModels = {
|
|
|
3583
3923
|
maxTokens: 65535,
|
|
3584
3924
|
contextWindow: 1048576,
|
|
3585
3925
|
supportsImages: true,
|
|
3926
|
+
supportsNativeTools: true,
|
|
3586
3927
|
supportsPromptCache: true,
|
|
3587
3928
|
inputPrice: 2.5,
|
|
3588
3929
|
outputPrice: 15
|
|
@@ -3591,6 +3932,7 @@ var vertexModels = {
|
|
|
3591
3932
|
maxTokens: 65535,
|
|
3592
3933
|
contextWindow: 1048576,
|
|
3593
3934
|
supportsImages: true,
|
|
3935
|
+
supportsNativeTools: true,
|
|
3594
3936
|
supportsPromptCache: true,
|
|
3595
3937
|
inputPrice: 2.5,
|
|
3596
3938
|
outputPrice: 15
|
|
@@ -3599,6 +3941,7 @@ var vertexModels = {
|
|
|
3599
3941
|
maxTokens: 65535,
|
|
3600
3942
|
contextWindow: 1048576,
|
|
3601
3943
|
supportsImages: true,
|
|
3944
|
+
supportsNativeTools: true,
|
|
3602
3945
|
supportsPromptCache: true,
|
|
3603
3946
|
inputPrice: 2.5,
|
|
3604
3947
|
outputPrice: 15,
|
|
@@ -3609,6 +3952,7 @@ var vertexModels = {
|
|
|
3609
3952
|
maxTokens: 64e3,
|
|
3610
3953
|
contextWindow: 1048576,
|
|
3611
3954
|
supportsImages: true,
|
|
3955
|
+
supportsNativeTools: true,
|
|
3612
3956
|
supportsPromptCache: true,
|
|
3613
3957
|
inputPrice: 2.5,
|
|
3614
3958
|
outputPrice: 15,
|
|
@@ -3634,6 +3978,7 @@ var vertexModels = {
|
|
|
3634
3978
|
maxTokens: 65535,
|
|
3635
3979
|
contextWindow: 1048576,
|
|
3636
3980
|
supportsImages: true,
|
|
3981
|
+
supportsNativeTools: true,
|
|
3637
3982
|
supportsPromptCache: false,
|
|
3638
3983
|
inputPrice: 0,
|
|
3639
3984
|
outputPrice: 0
|
|
@@ -3642,6 +3987,7 @@ var vertexModels = {
|
|
|
3642
3987
|
maxTokens: 8192,
|
|
3643
3988
|
contextWindow: 2097152,
|
|
3644
3989
|
supportsImages: true,
|
|
3990
|
+
supportsNativeTools: true,
|
|
3645
3991
|
supportsPromptCache: false,
|
|
3646
3992
|
inputPrice: 0,
|
|
3647
3993
|
outputPrice: 0
|
|
@@ -3650,6 +3996,7 @@ var vertexModels = {
|
|
|
3650
3996
|
maxTokens: 8192,
|
|
3651
3997
|
contextWindow: 1048576,
|
|
3652
3998
|
supportsImages: true,
|
|
3999
|
+
supportsNativeTools: true,
|
|
3653
4000
|
supportsPromptCache: true,
|
|
3654
4001
|
inputPrice: 0.15,
|
|
3655
4002
|
outputPrice: 0.6
|
|
@@ -3658,6 +4005,7 @@ var vertexModels = {
|
|
|
3658
4005
|
maxTokens: 8192,
|
|
3659
4006
|
contextWindow: 1048576,
|
|
3660
4007
|
supportsImages: true,
|
|
4008
|
+
supportsNativeTools: true,
|
|
3661
4009
|
supportsPromptCache: false,
|
|
3662
4010
|
inputPrice: 0.075,
|
|
3663
4011
|
outputPrice: 0.3
|
|
@@ -3666,6 +4014,7 @@ var vertexModels = {
|
|
|
3666
4014
|
maxTokens: 8192,
|
|
3667
4015
|
contextWindow: 32768,
|
|
3668
4016
|
supportsImages: true,
|
|
4017
|
+
supportsNativeTools: true,
|
|
3669
4018
|
supportsPromptCache: false,
|
|
3670
4019
|
inputPrice: 0,
|
|
3671
4020
|
outputPrice: 0
|
|
@@ -3674,6 +4023,7 @@ var vertexModels = {
|
|
|
3674
4023
|
maxTokens: 8192,
|
|
3675
4024
|
contextWindow: 1048576,
|
|
3676
4025
|
supportsImages: true,
|
|
4026
|
+
supportsNativeTools: true,
|
|
3677
4027
|
supportsPromptCache: true,
|
|
3678
4028
|
inputPrice: 0.075,
|
|
3679
4029
|
outputPrice: 0.3
|
|
@@ -3682,6 +4032,7 @@ var vertexModels = {
|
|
|
3682
4032
|
maxTokens: 8192,
|
|
3683
4033
|
contextWindow: 2097152,
|
|
3684
4034
|
supportsImages: true,
|
|
4035
|
+
supportsNativeTools: true,
|
|
3685
4036
|
supportsPromptCache: false,
|
|
3686
4037
|
inputPrice: 1.25,
|
|
3687
4038
|
outputPrice: 5
|
|
@@ -3719,6 +4070,17 @@ var vertexModels = {
|
|
|
3719
4070
|
cacheReadsPrice: 0.1,
|
|
3720
4071
|
supportsReasoningBudget: true
|
|
3721
4072
|
},
|
|
4073
|
+
"claude-opus-4-5@20251101": {
|
|
4074
|
+
maxTokens: 8192,
|
|
4075
|
+
contextWindow: 2e5,
|
|
4076
|
+
supportsImages: true,
|
|
4077
|
+
supportsPromptCache: true,
|
|
4078
|
+
inputPrice: 5,
|
|
4079
|
+
outputPrice: 25,
|
|
4080
|
+
cacheWritesPrice: 6.25,
|
|
4081
|
+
cacheReadsPrice: 0.5,
|
|
4082
|
+
supportsReasoningBudget: true
|
|
4083
|
+
},
|
|
3722
4084
|
"claude-opus-4-1@20250805": {
|
|
3723
4085
|
maxTokens: 8192,
|
|
3724
4086
|
contextWindow: 2e5,
|
|
@@ -3816,6 +4178,7 @@ var vertexModels = {
|
|
|
3816
4178
|
maxTokens: 64e3,
|
|
3817
4179
|
contextWindow: 1048576,
|
|
3818
4180
|
supportsImages: true,
|
|
4181
|
+
supportsNativeTools: true,
|
|
3819
4182
|
supportsPromptCache: true,
|
|
3820
4183
|
inputPrice: 0.1,
|
|
3821
4184
|
outputPrice: 0.4,
|
|
@@ -4117,17 +4480,67 @@ var xaiModels = {
|
|
|
4117
4480
|
contextWindow: 262144,
|
|
4118
4481
|
supportsImages: false,
|
|
4119
4482
|
supportsPromptCache: true,
|
|
4483
|
+
supportsNativeTools: true,
|
|
4120
4484
|
inputPrice: 0.2,
|
|
4121
4485
|
outputPrice: 1.5,
|
|
4122
4486
|
cacheWritesPrice: 0.02,
|
|
4123
4487
|
cacheReadsPrice: 0.02,
|
|
4124
4488
|
description: "xAI's Grok Code Fast model with 256K context window"
|
|
4125
4489
|
},
|
|
4490
|
+
"grok-4-1-fast-reasoning": {
|
|
4491
|
+
maxTokens: 65536,
|
|
4492
|
+
contextWindow: 2e6,
|
|
4493
|
+
supportsImages: true,
|
|
4494
|
+
supportsPromptCache: true,
|
|
4495
|
+
supportsNativeTools: true,
|
|
4496
|
+
inputPrice: 0.2,
|
|
4497
|
+
outputPrice: 0.5,
|
|
4498
|
+
cacheWritesPrice: 0.05,
|
|
4499
|
+
cacheReadsPrice: 0.05,
|
|
4500
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4501
|
+
},
|
|
4502
|
+
"grok-4-1-fast-non-reasoning": {
|
|
4503
|
+
maxTokens: 65536,
|
|
4504
|
+
contextWindow: 2e6,
|
|
4505
|
+
supportsImages: true,
|
|
4506
|
+
supportsPromptCache: true,
|
|
4507
|
+
supportsNativeTools: true,
|
|
4508
|
+
inputPrice: 0.2,
|
|
4509
|
+
outputPrice: 0.5,
|
|
4510
|
+
cacheWritesPrice: 0.05,
|
|
4511
|
+
cacheReadsPrice: 0.05,
|
|
4512
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4513
|
+
},
|
|
4514
|
+
"grok-4-fast-reasoning": {
|
|
4515
|
+
maxTokens: 65536,
|
|
4516
|
+
contextWindow: 2e6,
|
|
4517
|
+
supportsImages: true,
|
|
4518
|
+
supportsPromptCache: true,
|
|
4519
|
+
supportsNativeTools: true,
|
|
4520
|
+
inputPrice: 0.2,
|
|
4521
|
+
outputPrice: 0.5,
|
|
4522
|
+
cacheWritesPrice: 0.05,
|
|
4523
|
+
cacheReadsPrice: 0.05,
|
|
4524
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4525
|
+
},
|
|
4526
|
+
"grok-4-fast-non-reasoning": {
|
|
4527
|
+
maxTokens: 65536,
|
|
4528
|
+
contextWindow: 2e6,
|
|
4529
|
+
supportsImages: true,
|
|
4530
|
+
supportsPromptCache: true,
|
|
4531
|
+
supportsNativeTools: true,
|
|
4532
|
+
inputPrice: 0.2,
|
|
4533
|
+
outputPrice: 0.5,
|
|
4534
|
+
cacheWritesPrice: 0.05,
|
|
4535
|
+
cacheReadsPrice: 0.05,
|
|
4536
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4537
|
+
},
|
|
4126
4538
|
"grok-4": {
|
|
4127
4539
|
maxTokens: 8192,
|
|
4128
4540
|
contextWindow: 256e3,
|
|
4129
4541
|
supportsImages: true,
|
|
4130
4542
|
supportsPromptCache: true,
|
|
4543
|
+
supportsNativeTools: true,
|
|
4131
4544
|
inputPrice: 3,
|
|
4132
4545
|
outputPrice: 15,
|
|
4133
4546
|
cacheWritesPrice: 0.75,
|
|
@@ -4139,6 +4552,7 @@ var xaiModels = {
|
|
|
4139
4552
|
contextWindow: 131072,
|
|
4140
4553
|
supportsImages: false,
|
|
4141
4554
|
supportsPromptCache: true,
|
|
4555
|
+
supportsNativeTools: true,
|
|
4142
4556
|
inputPrice: 3,
|
|
4143
4557
|
outputPrice: 15,
|
|
4144
4558
|
cacheWritesPrice: 0.75,
|
|
@@ -4150,6 +4564,7 @@ var xaiModels = {
|
|
|
4150
4564
|
contextWindow: 131072,
|
|
4151
4565
|
supportsImages: false,
|
|
4152
4566
|
supportsPromptCache: true,
|
|
4567
|
+
supportsNativeTools: true,
|
|
4153
4568
|
inputPrice: 5,
|
|
4154
4569
|
outputPrice: 25,
|
|
4155
4570
|
cacheWritesPrice: 1.25,
|
|
@@ -4161,6 +4576,7 @@ var xaiModels = {
|
|
|
4161
4576
|
contextWindow: 131072,
|
|
4162
4577
|
supportsImages: false,
|
|
4163
4578
|
supportsPromptCache: true,
|
|
4579
|
+
supportsNativeTools: true,
|
|
4164
4580
|
inputPrice: 0.3,
|
|
4165
4581
|
outputPrice: 0.5,
|
|
4166
4582
|
cacheWritesPrice: 0.07,
|
|
@@ -4173,6 +4589,7 @@ var xaiModels = {
|
|
|
4173
4589
|
contextWindow: 131072,
|
|
4174
4590
|
supportsImages: false,
|
|
4175
4591
|
supportsPromptCache: true,
|
|
4592
|
+
supportsNativeTools: true,
|
|
4176
4593
|
inputPrice: 0.6,
|
|
4177
4594
|
outputPrice: 4,
|
|
4178
4595
|
cacheWritesPrice: 0.15,
|
|
@@ -4185,6 +4602,7 @@ var xaiModels = {
|
|
|
4185
4602
|
contextWindow: 131072,
|
|
4186
4603
|
supportsImages: false,
|
|
4187
4604
|
supportsPromptCache: false,
|
|
4605
|
+
supportsNativeTools: true,
|
|
4188
4606
|
inputPrice: 2,
|
|
4189
4607
|
outputPrice: 10,
|
|
4190
4608
|
description: "xAI's Grok-2 model (version 1212) with 128K context window"
|
|
@@ -4194,6 +4612,7 @@ var xaiModels = {
|
|
|
4194
4612
|
contextWindow: 32768,
|
|
4195
4613
|
supportsImages: true,
|
|
4196
4614
|
supportsPromptCache: false,
|
|
4615
|
+
supportsNativeTools: true,
|
|
4197
4616
|
inputPrice: 2,
|
|
4198
4617
|
outputPrice: 10,
|
|
4199
4618
|
description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
|
|
@@ -4302,6 +4721,7 @@ var internationalZAiModels = {
|
|
|
4302
4721
|
contextWindow: 131072,
|
|
4303
4722
|
supportsImages: false,
|
|
4304
4723
|
supportsPromptCache: true,
|
|
4724
|
+
supportsNativeTools: true,
|
|
4305
4725
|
supportsReasoningBinary: true,
|
|
4306
4726
|
inputPrice: 0.6,
|
|
4307
4727
|
outputPrice: 2.2,
|
|
@@ -4314,6 +4734,7 @@ var internationalZAiModels = {
|
|
|
4314
4734
|
contextWindow: 131072,
|
|
4315
4735
|
supportsImages: false,
|
|
4316
4736
|
supportsPromptCache: true,
|
|
4737
|
+
supportsNativeTools: true,
|
|
4317
4738
|
inputPrice: 0.2,
|
|
4318
4739
|
outputPrice: 1.1,
|
|
4319
4740
|
cacheWritesPrice: 0,
|
|
@@ -4325,6 +4746,7 @@ var internationalZAiModels = {
|
|
|
4325
4746
|
contextWindow: 131072,
|
|
4326
4747
|
supportsImages: false,
|
|
4327
4748
|
supportsPromptCache: true,
|
|
4749
|
+
supportsNativeTools: true,
|
|
4328
4750
|
inputPrice: 2.2,
|
|
4329
4751
|
outputPrice: 8.9,
|
|
4330
4752
|
cacheWritesPrice: 0,
|
|
@@ -4336,6 +4758,7 @@ var internationalZAiModels = {
|
|
|
4336
4758
|
contextWindow: 131072,
|
|
4337
4759
|
supportsImages: false,
|
|
4338
4760
|
supportsPromptCache: true,
|
|
4761
|
+
supportsNativeTools: true,
|
|
4339
4762
|
inputPrice: 1.1,
|
|
4340
4763
|
outputPrice: 4.5,
|
|
4341
4764
|
cacheWritesPrice: 0,
|
|
@@ -4347,6 +4770,7 @@ var internationalZAiModels = {
|
|
|
4347
4770
|
contextWindow: 131072,
|
|
4348
4771
|
supportsImages: false,
|
|
4349
4772
|
supportsPromptCache: true,
|
|
4773
|
+
supportsNativeTools: true,
|
|
4350
4774
|
inputPrice: 0,
|
|
4351
4775
|
outputPrice: 0,
|
|
4352
4776
|
cacheWritesPrice: 0,
|
|
@@ -4358,6 +4782,7 @@ var internationalZAiModels = {
|
|
|
4358
4782
|
contextWindow: 131072,
|
|
4359
4783
|
supportsImages: true,
|
|
4360
4784
|
supportsPromptCache: true,
|
|
4785
|
+
supportsNativeTools: true,
|
|
4361
4786
|
inputPrice: 0.6,
|
|
4362
4787
|
outputPrice: 1.8,
|
|
4363
4788
|
cacheWritesPrice: 0,
|
|
@@ -4369,6 +4794,7 @@ var internationalZAiModels = {
|
|
|
4369
4794
|
contextWindow: 2e5,
|
|
4370
4795
|
supportsImages: false,
|
|
4371
4796
|
supportsPromptCache: true,
|
|
4797
|
+
supportsNativeTools: true,
|
|
4372
4798
|
supportsReasoningBinary: true,
|
|
4373
4799
|
inputPrice: 0.6,
|
|
4374
4800
|
outputPrice: 2.2,
|
|
@@ -4381,6 +4807,7 @@ var internationalZAiModels = {
|
|
|
4381
4807
|
contextWindow: 131072,
|
|
4382
4808
|
supportsImages: false,
|
|
4383
4809
|
supportsPromptCache: false,
|
|
4810
|
+
supportsNativeTools: true,
|
|
4384
4811
|
inputPrice: 0.1,
|
|
4385
4812
|
outputPrice: 0.1,
|
|
4386
4813
|
cacheWritesPrice: 0,
|
|
@@ -4395,6 +4822,7 @@ var mainlandZAiModels = {
|
|
|
4395
4822
|
contextWindow: 131072,
|
|
4396
4823
|
supportsImages: false,
|
|
4397
4824
|
supportsPromptCache: true,
|
|
4825
|
+
supportsNativeTools: true,
|
|
4398
4826
|
supportsReasoningBinary: true,
|
|
4399
4827
|
inputPrice: 0.29,
|
|
4400
4828
|
outputPrice: 1.14,
|
|
@@ -4407,6 +4835,7 @@ var mainlandZAiModels = {
|
|
|
4407
4835
|
contextWindow: 131072,
|
|
4408
4836
|
supportsImages: false,
|
|
4409
4837
|
supportsPromptCache: true,
|
|
4838
|
+
supportsNativeTools: true,
|
|
4410
4839
|
inputPrice: 0.1,
|
|
4411
4840
|
outputPrice: 0.6,
|
|
4412
4841
|
cacheWritesPrice: 0,
|
|
@@ -4418,6 +4847,7 @@ var mainlandZAiModels = {
|
|
|
4418
4847
|
contextWindow: 131072,
|
|
4419
4848
|
supportsImages: false,
|
|
4420
4849
|
supportsPromptCache: true,
|
|
4850
|
+
supportsNativeTools: true,
|
|
4421
4851
|
inputPrice: 0.29,
|
|
4422
4852
|
outputPrice: 1.14,
|
|
4423
4853
|
cacheWritesPrice: 0,
|
|
@@ -4429,6 +4859,7 @@ var mainlandZAiModels = {
|
|
|
4429
4859
|
contextWindow: 131072,
|
|
4430
4860
|
supportsImages: false,
|
|
4431
4861
|
supportsPromptCache: true,
|
|
4862
|
+
supportsNativeTools: true,
|
|
4432
4863
|
inputPrice: 0.1,
|
|
4433
4864
|
outputPrice: 0.6,
|
|
4434
4865
|
cacheWritesPrice: 0,
|
|
@@ -4440,6 +4871,7 @@ var mainlandZAiModels = {
|
|
|
4440
4871
|
contextWindow: 131072,
|
|
4441
4872
|
supportsImages: false,
|
|
4442
4873
|
supportsPromptCache: true,
|
|
4874
|
+
supportsNativeTools: true,
|
|
4443
4875
|
inputPrice: 0,
|
|
4444
4876
|
outputPrice: 0,
|
|
4445
4877
|
cacheWritesPrice: 0,
|
|
@@ -4451,6 +4883,7 @@ var mainlandZAiModels = {
|
|
|
4451
4883
|
contextWindow: 131072,
|
|
4452
4884
|
supportsImages: true,
|
|
4453
4885
|
supportsPromptCache: true,
|
|
4886
|
+
supportsNativeTools: true,
|
|
4454
4887
|
inputPrice: 0.29,
|
|
4455
4888
|
outputPrice: 0.93,
|
|
4456
4889
|
cacheWritesPrice: 0,
|
|
@@ -4462,6 +4895,7 @@ var mainlandZAiModels = {
|
|
|
4462
4895
|
contextWindow: 204800,
|
|
4463
4896
|
supportsImages: false,
|
|
4464
4897
|
supportsPromptCache: true,
|
|
4898
|
+
supportsNativeTools: true,
|
|
4465
4899
|
supportsReasoningBinary: true,
|
|
4466
4900
|
inputPrice: 0.29,
|
|
4467
4901
|
outputPrice: 1.14,
|
|
@@ -4491,6 +4925,7 @@ var deepInfraDefaultModelInfo = {
|
|
|
4491
4925
|
contextWindow: 262144,
|
|
4492
4926
|
supportsImages: false,
|
|
4493
4927
|
supportsPromptCache: false,
|
|
4928
|
+
supportsNativeTools: true,
|
|
4494
4929
|
inputPrice: 0.3,
|
|
4495
4930
|
outputPrice: 1.2,
|
|
4496
4931
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -4551,6 +4986,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
4551
4986
|
return "meta-llama/Llama-3.3-70B-Instruct";
|
|
4552
4987
|
case "chutes":
|
|
4553
4988
|
return chutesDefaultModelId;
|
|
4989
|
+
case "baseten":
|
|
4990
|
+
return basetenDefaultModelId;
|
|
4554
4991
|
case "bedrock":
|
|
4555
4992
|
return bedrockDefaultModelId;
|
|
4556
4993
|
case "vertex":
|
|
@@ -4644,6 +5081,7 @@ var providerNames = [
|
|
|
4644
5081
|
...fauxProviders,
|
|
4645
5082
|
"anthropic",
|
|
4646
5083
|
"bedrock",
|
|
5084
|
+
"baseten",
|
|
4647
5085
|
"cerebras",
|
|
4648
5086
|
"claude-code",
|
|
4649
5087
|
"doubao",
|
|
@@ -4883,6 +5321,9 @@ var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
|
|
|
4883
5321
|
vercelAiGatewayApiKey: import_zod8.z.string().optional(),
|
|
4884
5322
|
vercelAiGatewayModelId: import_zod8.z.string().optional()
|
|
4885
5323
|
});
|
|
5324
|
+
var basetenSchema = apiModelIdProviderModelSchema.extend({
|
|
5325
|
+
basetenApiKey: import_zod8.z.string().optional()
|
|
5326
|
+
});
|
|
4886
5327
|
var defaultSchema = import_zod8.z.object({
|
|
4887
5328
|
apiProvider: import_zod8.z.undefined()
|
|
4888
5329
|
});
|
|
@@ -4912,6 +5353,7 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
4912
5353
|
fakeAiSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("fake-ai") })),
|
|
4913
5354
|
xaiSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("xai") })),
|
|
4914
5355
|
groqSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("groq") })),
|
|
5356
|
+
basetenSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("baseten") })),
|
|
4915
5357
|
huggingFaceSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("huggingface") })),
|
|
4916
5358
|
chutesSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("chutes") })),
|
|
4917
5359
|
litellmSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("litellm") })),
|
|
@@ -4953,6 +5395,7 @@ var providerSettingsSchema = import_zod8.z.object({
|
|
|
4953
5395
|
...fakeAiSchema.shape,
|
|
4954
5396
|
...xaiSchema.shape,
|
|
4955
5397
|
...groqSchema.shape,
|
|
5398
|
+
...basetenSchema.shape,
|
|
4956
5399
|
...huggingFaceSchema.shape,
|
|
4957
5400
|
...chutesSchema.shape,
|
|
4958
5401
|
...litellmSchema.shape,
|
|
@@ -5016,6 +5459,7 @@ var modelIdKeysByProvider = {
|
|
|
5016
5459
|
requesty: "requestyModelId",
|
|
5017
5460
|
xai: "apiModelId",
|
|
5018
5461
|
groq: "apiModelId",
|
|
5462
|
+
baseten: "apiModelId",
|
|
5019
5463
|
chutes: "apiModelId",
|
|
5020
5464
|
litellm: "litellmModelId",
|
|
5021
5465
|
huggingface: "huggingFaceModelId",
|
|
@@ -5123,7 +5567,8 @@ var MODELS_BY_PROVIDER = {
|
|
|
5123
5567
|
models: Object.keys(vscodeLlmModels)
|
|
5124
5568
|
},
|
|
5125
5569
|
xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) },
|
|
5126
|
-
zai: { id: "zai", label: "
|
|
5570
|
+
zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
|
|
5571
|
+
baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
|
|
5127
5572
|
// Dynamic providers; models pulled from remote APIs.
|
|
5128
5573
|
glama: { id: "glama", label: "Glama", models: [] },
|
|
5129
5574
|
huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
|
|
@@ -5155,7 +5600,18 @@ var historyItemSchema = import_zod9.z.object({
|
|
|
5155
5600
|
totalCost: import_zod9.z.number(),
|
|
5156
5601
|
size: import_zod9.z.number().optional(),
|
|
5157
5602
|
workspace: import_zod9.z.string().optional(),
|
|
5158
|
-
mode: import_zod9.z.string().optional()
|
|
5603
|
+
mode: import_zod9.z.string().optional(),
|
|
5604
|
+
status: import_zod9.z.enum(["active", "completed", "delegated"]).optional(),
|
|
5605
|
+
delegatedToId: import_zod9.z.string().optional(),
|
|
5606
|
+
// Last child this parent delegated to
|
|
5607
|
+
childIds: import_zod9.z.array(import_zod9.z.string()).optional(),
|
|
5608
|
+
// All children spawned by this task
|
|
5609
|
+
awaitingChildId: import_zod9.z.string().optional(),
|
|
5610
|
+
// Child currently awaited (set when delegated)
|
|
5611
|
+
completedByChildId: import_zod9.z.string().optional(),
|
|
5612
|
+
// Child that completed and resumed this parent
|
|
5613
|
+
completionResultSummary: import_zod9.z.string().optional()
|
|
5614
|
+
// Summary from completed child
|
|
5159
5615
|
});
|
|
5160
5616
|
|
|
5161
5617
|
// src/experiment.ts
|
|
@@ -5165,7 +5621,8 @@ var experimentIds = [
|
|
|
5165
5621
|
"multiFileApplyDiff",
|
|
5166
5622
|
"preventFocusDisruption",
|
|
5167
5623
|
"imageGeneration",
|
|
5168
|
-
"runSlashCommand"
|
|
5624
|
+
"runSlashCommand",
|
|
5625
|
+
"multipleNativeToolCalls"
|
|
5169
5626
|
];
|
|
5170
5627
|
var experimentIdsSchema = import_zod10.z.enum(experimentIds);
|
|
5171
5628
|
var experimentsSchema = import_zod10.z.object({
|
|
@@ -5173,7 +5630,8 @@ var experimentsSchema = import_zod10.z.object({
|
|
|
5173
5630
|
multiFileApplyDiff: import_zod10.z.boolean().optional(),
|
|
5174
5631
|
preventFocusDisruption: import_zod10.z.boolean().optional(),
|
|
5175
5632
|
imageGeneration: import_zod10.z.boolean().optional(),
|
|
5176
|
-
runSlashCommand: import_zod10.z.boolean().optional()
|
|
5633
|
+
runSlashCommand: import_zod10.z.boolean().optional(),
|
|
5634
|
+
multipleNativeToolCalls: import_zod10.z.boolean().optional()
|
|
5177
5635
|
});
|
|
5178
5636
|
|
|
5179
5637
|
// src/telemetry.ts
|
|
@@ -5223,6 +5681,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
5223
5681
|
TelemetryEventName2["CONSECUTIVE_MISTAKE_ERROR"] = "Consecutive Mistake Error";
|
|
5224
5682
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
5225
5683
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
5684
|
+
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
5226
5685
|
return TelemetryEventName2;
|
|
5227
5686
|
})(TelemetryEventName || {});
|
|
5228
5687
|
var staticAppPropertiesSchema = import_zod11.z.object({
|
|
@@ -5306,6 +5765,7 @@ var rooCodeTelemetryEventSchema = import_zod11.z.discriminatedUnion("type", [
|
|
|
5306
5765
|
"Shell Integration Error" /* SHELL_INTEGRATION_ERROR */,
|
|
5307
5766
|
"Consecutive Mistake Error" /* CONSECUTIVE_MISTAKE_ERROR */,
|
|
5308
5767
|
"Code Index Error" /* CODE_INDEX_ERROR */,
|
|
5768
|
+
"Model Cache Empty Response" /* MODEL_CACHE_EMPTY_RESPONSE */,
|
|
5309
5769
|
"Context Condensed" /* CONTEXT_CONDENSED */,
|
|
5310
5770
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
5311
5771
|
"Tab Shown" /* TAB_SHOWN */,
|
|
@@ -5466,8 +5926,6 @@ var terminalActionIds = ["terminalAddToContext", "terminalFixCommand", "terminal
|
|
|
5466
5926
|
var commandIds = [
|
|
5467
5927
|
"activationCompleted",
|
|
5468
5928
|
"plusButtonClicked",
|
|
5469
|
-
"promptsButtonClicked",
|
|
5470
|
-
"mcpButtonClicked",
|
|
5471
5929
|
"historyButtonClicked",
|
|
5472
5930
|
"marketplaceButtonClicked",
|
|
5473
5931
|
"popoutButtonClicked",
|
|
@@ -5524,6 +5982,7 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
5524
5982
|
taskHistory: import_zod14.z.array(historyItemSchema).optional(),
|
|
5525
5983
|
dismissedUpsells: import_zod14.z.array(import_zod14.z.string()).optional(),
|
|
5526
5984
|
// Image generation settings (experimental) - flattened for simplicity
|
|
5985
|
+
imageGenerationProvider: import_zod14.z.enum(["openrouter", "roo"]).optional(),
|
|
5527
5986
|
openRouterImageApiKey: import_zod14.z.string().optional(),
|
|
5528
5987
|
openRouterImageGenerationSelectedModel: import_zod14.z.string().optional(),
|
|
5529
5988
|
condensingApiConfigId: import_zod14.z.string().optional(),
|
|
@@ -5675,7 +6134,8 @@ var SECRET_STATE_KEYS = [
|
|
|
5675
6134
|
"fireworksApiKey",
|
|
5676
6135
|
"featherlessApiKey",
|
|
5677
6136
|
"ioIntelligenceApiKey",
|
|
5678
|
-
"vercelAiGatewayApiKey"
|
|
6137
|
+
"vercelAiGatewayApiKey",
|
|
6138
|
+
"basetenApiKey"
|
|
5679
6139
|
];
|
|
5680
6140
|
var GLOBAL_SECRET_KEYS = [
|
|
5681
6141
|
"openRouterImageApiKey"
|
|
@@ -5940,6 +6400,9 @@ var ExtensionBridgeEventName = ((ExtensionBridgeEventName2) => {
|
|
|
5940
6400
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskPaused"] = "taskPaused" /* TaskPaused */] = "TaskPaused";
|
|
5941
6401
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUnpaused"] = "taskUnpaused" /* TaskUnpaused */] = "TaskUnpaused";
|
|
5942
6402
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskSpawned"] = "taskSpawned" /* TaskSpawned */] = "TaskSpawned";
|
|
6403
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegated"] = "taskDelegated" /* TaskDelegated */] = "TaskDelegated";
|
|
6404
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted" /* TaskDelegationCompleted */] = "TaskDelegationCompleted";
|
|
6405
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationResumed"] = "taskDelegationResumed" /* TaskDelegationResumed */] = "TaskDelegationResumed";
|
|
5943
6406
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUserMessage"] = "taskUserMessage" /* TaskUserMessage */] = "TaskUserMessage";
|
|
5944
6407
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskTokenUsageUpdated"] = "taskTokenUsageUpdated" /* TaskTokenUsageUpdated */] = "TaskTokenUsageUpdated";
|
|
5945
6408
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["ModeChanged"] = "modeChanged" /* ModeChanged */] = "ModeChanged";
|
|
@@ -6015,6 +6478,21 @@ var extensionBridgeEventSchema = import_zod16.z.discriminatedUnion("type", [
|
|
|
6015
6478
|
instance: extensionInstanceSchema,
|
|
6016
6479
|
timestamp: import_zod16.z.number()
|
|
6017
6480
|
}),
|
|
6481
|
+
import_zod16.z.object({
|
|
6482
|
+
type: import_zod16.z.literal(ExtensionBridgeEventName.TaskDelegated),
|
|
6483
|
+
instance: extensionInstanceSchema,
|
|
6484
|
+
timestamp: import_zod16.z.number()
|
|
6485
|
+
}),
|
|
6486
|
+
import_zod16.z.object({
|
|
6487
|
+
type: import_zod16.z.literal(ExtensionBridgeEventName.TaskDelegationCompleted),
|
|
6488
|
+
instance: extensionInstanceSchema,
|
|
6489
|
+
timestamp: import_zod16.z.number()
|
|
6490
|
+
}),
|
|
6491
|
+
import_zod16.z.object({
|
|
6492
|
+
type: import_zod16.z.literal(ExtensionBridgeEventName.TaskDelegationResumed),
|
|
6493
|
+
instance: extensionInstanceSchema,
|
|
6494
|
+
timestamp: import_zod16.z.number()
|
|
6495
|
+
}),
|
|
6018
6496
|
import_zod16.z.object({
|
|
6019
6497
|
type: import_zod16.z.literal(ExtensionBridgeEventName.TaskUserMessage),
|
|
6020
6498
|
instance: extensionInstanceSchema,
|
|
@@ -6205,12 +6683,27 @@ var followUpDataSchema = import_zod17.z.object({
|
|
|
6205
6683
|
|
|
6206
6684
|
// src/image-generation.ts
|
|
6207
6685
|
var IMAGE_GENERATION_MODELS = [
|
|
6208
|
-
|
|
6209
|
-
{ value: "google/gemini-
|
|
6210
|
-
{ value: "
|
|
6211
|
-
{ value: "openai/gpt-5-image
|
|
6686
|
+
// OpenRouter models
|
|
6687
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "openrouter" },
|
|
6688
|
+
{ value: "google/gemini-3-pro-image-preview", label: "Gemini 3 Pro Image Preview", provider: "openrouter" },
|
|
6689
|
+
{ value: "openai/gpt-5-image", label: "GPT-5 Image", provider: "openrouter" },
|
|
6690
|
+
{ value: "openai/gpt-5-image-mini", label: "GPT-5 Image Mini", provider: "openrouter" },
|
|
6691
|
+
{ value: "black-forest-labs/flux.2-flex", label: "Black Forest Labs FLUX.2 Flex", provider: "openrouter" },
|
|
6692
|
+
{ value: "black-forest-labs/flux.2-pro", label: "Black Forest Labs FLUX.2 Pro", provider: "openrouter" },
|
|
6693
|
+
// Roo Code Cloud models
|
|
6694
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "roo" },
|
|
6695
|
+
{ value: "google/gemini-3-pro-image", label: "Gemini 3 Pro Image", provider: "roo" },
|
|
6696
|
+
{
|
|
6697
|
+
value: "bfl/flux-2-pro:free",
|
|
6698
|
+
label: "Black Forest Labs FLUX.2 Pro (Free)",
|
|
6699
|
+
provider: "roo",
|
|
6700
|
+
apiMethod: "images_api"
|
|
6701
|
+
}
|
|
6212
6702
|
];
|
|
6213
6703
|
var IMAGE_GENERATION_MODEL_IDS = IMAGE_GENERATION_MODELS.map((m) => m.value);
|
|
6704
|
+
function getImageGenerationProvider(explicitProvider, hasExistingModel) {
|
|
6705
|
+
return explicitProvider !== void 0 ? explicitProvider : hasExistingModel ? "openrouter" : "roo";
|
|
6706
|
+
}
|
|
6214
6707
|
|
|
6215
6708
|
// src/ipc.ts
|
|
6216
6709
|
var import_zod18 = require("zod");
|
|
@@ -6446,6 +6939,8 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
6446
6939
|
anthropicModels,
|
|
6447
6940
|
appPropertiesSchema,
|
|
6448
6941
|
azureOpenAiDefaultApiVersion,
|
|
6942
|
+
basetenDefaultModelId,
|
|
6943
|
+
basetenModels,
|
|
6449
6944
|
bedrockDefaultModelId,
|
|
6450
6945
|
bedrockDefaultPromptRouterModelId,
|
|
6451
6946
|
bedrockModels,
|
|
@@ -6501,6 +6996,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
6501
6996
|
getApiProtocol,
|
|
6502
6997
|
getClaudeCodeModelId,
|
|
6503
6998
|
getEffectiveProtocol,
|
|
6999
|
+
getImageGenerationProvider,
|
|
6504
7000
|
getModelId,
|
|
6505
7001
|
getProviderDefaultModelId,
|
|
6506
7002
|
gitPropertiesSchema,
|