@roo-code/types 1.87.0 → 1.88.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +514 -61
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +4273 -740
- package/dist/index.d.ts +4273 -740
- package/dist/index.js +511 -61
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -66,6 +66,7 @@ var clineSays = [
|
|
|
66
66
|
"shell_integration_warning",
|
|
67
67
|
"browser_action",
|
|
68
68
|
"browser_action_result",
|
|
69
|
+
"browser_session_status",
|
|
69
70
|
"mcp_server_request_started",
|
|
70
71
|
"mcp_server_response",
|
|
71
72
|
"subtask_result",
|
|
@@ -130,6 +131,8 @@ var toolNames = [
|
|
|
130
131
|
"write_to_file",
|
|
131
132
|
"apply_diff",
|
|
132
133
|
"insert_content",
|
|
134
|
+
"search_and_replace",
|
|
135
|
+
"apply_patch",
|
|
133
136
|
"search_files",
|
|
134
137
|
"list_files",
|
|
135
138
|
"list_code_definition_names",
|
|
@@ -180,6 +183,9 @@ var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
|
180
183
|
RooCodeEventName2["TaskPaused"] = "taskPaused";
|
|
181
184
|
RooCodeEventName2["TaskUnpaused"] = "taskUnpaused";
|
|
182
185
|
RooCodeEventName2["TaskSpawned"] = "taskSpawned";
|
|
186
|
+
RooCodeEventName2["TaskDelegated"] = "taskDelegated";
|
|
187
|
+
RooCodeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted";
|
|
188
|
+
RooCodeEventName2["TaskDelegationResumed"] = "taskDelegationResumed";
|
|
183
189
|
RooCodeEventName2["Message"] = "message";
|
|
184
190
|
RooCodeEventName2["TaskModeSwitched"] = "taskModeSwitched";
|
|
185
191
|
RooCodeEventName2["TaskAskResponded"] = "taskAskResponded";
|
|
@@ -213,6 +219,26 @@ var rooCodeEventsSchema = z3.object({
|
|
|
213
219
|
["taskPaused" /* TaskPaused */]: z3.tuple([z3.string()]),
|
|
214
220
|
["taskUnpaused" /* TaskUnpaused */]: z3.tuple([z3.string()]),
|
|
215
221
|
["taskSpawned" /* TaskSpawned */]: z3.tuple([z3.string(), z3.string()]),
|
|
222
|
+
["taskDelegated" /* TaskDelegated */]: z3.tuple([
|
|
223
|
+
z3.string(),
|
|
224
|
+
// parentTaskId
|
|
225
|
+
z3.string()
|
|
226
|
+
// childTaskId
|
|
227
|
+
]),
|
|
228
|
+
["taskDelegationCompleted" /* TaskDelegationCompleted */]: z3.tuple([
|
|
229
|
+
z3.string(),
|
|
230
|
+
// parentTaskId
|
|
231
|
+
z3.string(),
|
|
232
|
+
// childTaskId
|
|
233
|
+
z3.string()
|
|
234
|
+
// completionResultSummary
|
|
235
|
+
]),
|
|
236
|
+
["taskDelegationResumed" /* TaskDelegationResumed */]: z3.tuple([
|
|
237
|
+
z3.string(),
|
|
238
|
+
// parentTaskId
|
|
239
|
+
z3.string()
|
|
240
|
+
// childTaskId
|
|
241
|
+
]),
|
|
216
242
|
["message" /* Message */]: z3.tuple([
|
|
217
243
|
z3.object({
|
|
218
244
|
taskId: z3.string(),
|
|
@@ -297,6 +323,21 @@ var taskEventSchema = z3.discriminatedUnion("eventName", [
|
|
|
297
323
|
payload: rooCodeEventsSchema.shape["taskSpawned" /* TaskSpawned */],
|
|
298
324
|
taskId: z3.number().optional()
|
|
299
325
|
}),
|
|
326
|
+
z3.object({
|
|
327
|
+
eventName: z3.literal("taskDelegated" /* TaskDelegated */),
|
|
328
|
+
payload: rooCodeEventsSchema.shape["taskDelegated" /* TaskDelegated */],
|
|
329
|
+
taskId: z3.number().optional()
|
|
330
|
+
}),
|
|
331
|
+
z3.object({
|
|
332
|
+
eventName: z3.literal("taskDelegationCompleted" /* TaskDelegationCompleted */),
|
|
333
|
+
payload: rooCodeEventsSchema.shape["taskDelegationCompleted" /* TaskDelegationCompleted */],
|
|
334
|
+
taskId: z3.number().optional()
|
|
335
|
+
}),
|
|
336
|
+
z3.object({
|
|
337
|
+
eventName: z3.literal("taskDelegationResumed" /* TaskDelegationResumed */),
|
|
338
|
+
payload: rooCodeEventsSchema.shape["taskDelegationResumed" /* TaskDelegationResumed */],
|
|
339
|
+
taskId: z3.number().optional()
|
|
340
|
+
}),
|
|
300
341
|
// Task Execution
|
|
301
342
|
z3.object({
|
|
302
343
|
eventName: z3.literal("message" /* Message */),
|
|
@@ -415,6 +456,13 @@ var modelInfoSchema = z5.object({
|
|
|
415
456
|
supportsNativeTools: z5.boolean().optional(),
|
|
416
457
|
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
417
458
|
defaultToolProtocol: z5.enum(["xml", "native"]).optional(),
|
|
459
|
+
// Exclude specific native tools from being available (only applies to native protocol)
|
|
460
|
+
// These tools will be removed from the set of tools available to the model
|
|
461
|
+
excludedTools: z5.array(z5.string()).optional(),
|
|
462
|
+
// Include specific native tools (only applies to native protocol)
|
|
463
|
+
// These tools will be added if they belong to an allowed group in the current mode
|
|
464
|
+
// Cannot force-add tools from groups the mode doesn't allow
|
|
465
|
+
includedTools: z5.array(z5.string()).optional(),
|
|
418
466
|
/**
|
|
419
467
|
* Service tiers with pricing information.
|
|
420
468
|
* Each tier can have a name (for OpenAI service tiers) and pricing overrides.
|
|
@@ -448,7 +496,16 @@ var CODEBASE_INDEX_DEFAULTS = {
|
|
|
448
496
|
var codebaseIndexConfigSchema = z6.object({
|
|
449
497
|
codebaseIndexEnabled: z6.boolean().optional(),
|
|
450
498
|
codebaseIndexQdrantUrl: z6.string().optional(),
|
|
451
|
-
codebaseIndexEmbedderProvider: z6.enum([
|
|
499
|
+
codebaseIndexEmbedderProvider: z6.enum([
|
|
500
|
+
"openai",
|
|
501
|
+
"ollama",
|
|
502
|
+
"openai-compatible",
|
|
503
|
+
"gemini",
|
|
504
|
+
"mistral",
|
|
505
|
+
"vercel-ai-gateway",
|
|
506
|
+
"bedrock",
|
|
507
|
+
"openrouter"
|
|
508
|
+
]).optional(),
|
|
452
509
|
codebaseIndexEmbedderBaseUrl: z6.string().optional(),
|
|
453
510
|
codebaseIndexEmbedderModelId: z6.string().optional(),
|
|
454
511
|
codebaseIndexEmbedderModelDimension: z6.number().optional(),
|
|
@@ -456,7 +513,10 @@ var codebaseIndexConfigSchema = z6.object({
|
|
|
456
513
|
codebaseIndexSearchMaxResults: z6.number().min(CODEBASE_INDEX_DEFAULTS.MIN_SEARCH_RESULTS).max(CODEBASE_INDEX_DEFAULTS.MAX_SEARCH_RESULTS).optional(),
|
|
457
514
|
// OpenAI Compatible specific fields
|
|
458
515
|
codebaseIndexOpenAiCompatibleBaseUrl: z6.string().optional(),
|
|
459
|
-
codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional()
|
|
516
|
+
codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional(),
|
|
517
|
+
// Bedrock specific fields
|
|
518
|
+
codebaseIndexBedrockRegion: z6.string().optional(),
|
|
519
|
+
codebaseIndexBedrockProfile: z6.string().optional()
|
|
460
520
|
});
|
|
461
521
|
var codebaseIndexModelsSchema = z6.object({
|
|
462
522
|
openai: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
@@ -465,7 +525,8 @@ var codebaseIndexModelsSchema = z6.object({
|
|
|
465
525
|
gemini: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
466
526
|
mistral: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
467
527
|
"vercel-ai-gateway": z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
468
|
-
openrouter: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional()
|
|
528
|
+
openrouter: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
529
|
+
bedrock: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional()
|
|
469
530
|
});
|
|
470
531
|
var codebaseIndexProviderSchema = z6.object({
|
|
471
532
|
codeIndexOpenAiKey: z6.string().optional(),
|
|
@@ -489,6 +550,7 @@ var anthropicModels = {
|
|
|
489
550
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
490
551
|
supportsImages: true,
|
|
491
552
|
supportsPromptCache: true,
|
|
553
|
+
supportsNativeTools: true,
|
|
492
554
|
inputPrice: 3,
|
|
493
555
|
// $3 per million input tokens (≤200K context)
|
|
494
556
|
outputPrice: 15,
|
|
@@ -521,6 +583,7 @@ var anthropicModels = {
|
|
|
521
583
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
522
584
|
supportsImages: true,
|
|
523
585
|
supportsPromptCache: true,
|
|
586
|
+
supportsNativeTools: true,
|
|
524
587
|
inputPrice: 3,
|
|
525
588
|
// $3 per million input tokens (≤200K context)
|
|
526
589
|
outputPrice: 15,
|
|
@@ -546,12 +609,30 @@ var anthropicModels = {
|
|
|
546
609
|
}
|
|
547
610
|
]
|
|
548
611
|
},
|
|
612
|
+
"claude-opus-4-5-20251101": {
|
|
613
|
+
maxTokens: 32e3,
|
|
614
|
+
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
615
|
+
contextWindow: 2e5,
|
|
616
|
+
supportsImages: true,
|
|
617
|
+
supportsPromptCache: true,
|
|
618
|
+
supportsNativeTools: true,
|
|
619
|
+
inputPrice: 5,
|
|
620
|
+
// $5 per million input tokens
|
|
621
|
+
outputPrice: 25,
|
|
622
|
+
// $25 per million output tokens
|
|
623
|
+
cacheWritesPrice: 6.25,
|
|
624
|
+
// $6.25 per million tokens
|
|
625
|
+
cacheReadsPrice: 0.5,
|
|
626
|
+
// $0.50 per million tokens
|
|
627
|
+
supportsReasoningBudget: true
|
|
628
|
+
},
|
|
549
629
|
"claude-opus-4-1-20250805": {
|
|
550
630
|
maxTokens: 32e3,
|
|
551
631
|
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
552
632
|
contextWindow: 2e5,
|
|
553
633
|
supportsImages: true,
|
|
554
634
|
supportsPromptCache: true,
|
|
635
|
+
supportsNativeTools: true,
|
|
555
636
|
inputPrice: 15,
|
|
556
637
|
// $15 per million input tokens
|
|
557
638
|
outputPrice: 75,
|
|
@@ -568,6 +649,7 @@ var anthropicModels = {
|
|
|
568
649
|
contextWindow: 2e5,
|
|
569
650
|
supportsImages: true,
|
|
570
651
|
supportsPromptCache: true,
|
|
652
|
+
supportsNativeTools: true,
|
|
571
653
|
inputPrice: 15,
|
|
572
654
|
// $15 per million input tokens
|
|
573
655
|
outputPrice: 75,
|
|
@@ -584,6 +666,7 @@ var anthropicModels = {
|
|
|
584
666
|
contextWindow: 2e5,
|
|
585
667
|
supportsImages: true,
|
|
586
668
|
supportsPromptCache: true,
|
|
669
|
+
supportsNativeTools: true,
|
|
587
670
|
inputPrice: 3,
|
|
588
671
|
// $3 per million input tokens
|
|
589
672
|
outputPrice: 15,
|
|
@@ -601,6 +684,7 @@ var anthropicModels = {
|
|
|
601
684
|
contextWindow: 2e5,
|
|
602
685
|
supportsImages: true,
|
|
603
686
|
supportsPromptCache: true,
|
|
687
|
+
supportsNativeTools: true,
|
|
604
688
|
inputPrice: 3,
|
|
605
689
|
// $3 per million input tokens
|
|
606
690
|
outputPrice: 15,
|
|
@@ -615,6 +699,7 @@ var anthropicModels = {
|
|
|
615
699
|
contextWindow: 2e5,
|
|
616
700
|
supportsImages: true,
|
|
617
701
|
supportsPromptCache: true,
|
|
702
|
+
supportsNativeTools: true,
|
|
618
703
|
inputPrice: 3,
|
|
619
704
|
// $3 per million input tokens
|
|
620
705
|
outputPrice: 15,
|
|
@@ -629,6 +714,7 @@ var anthropicModels = {
|
|
|
629
714
|
contextWindow: 2e5,
|
|
630
715
|
supportsImages: false,
|
|
631
716
|
supportsPromptCache: true,
|
|
717
|
+
supportsNativeTools: true,
|
|
632
718
|
inputPrice: 1,
|
|
633
719
|
outputPrice: 5,
|
|
634
720
|
cacheWritesPrice: 1.25,
|
|
@@ -639,6 +725,7 @@ var anthropicModels = {
|
|
|
639
725
|
contextWindow: 2e5,
|
|
640
726
|
supportsImages: true,
|
|
641
727
|
supportsPromptCache: true,
|
|
728
|
+
supportsNativeTools: true,
|
|
642
729
|
inputPrice: 15,
|
|
643
730
|
outputPrice: 75,
|
|
644
731
|
cacheWritesPrice: 18.75,
|
|
@@ -649,6 +736,7 @@ var anthropicModels = {
|
|
|
649
736
|
contextWindow: 2e5,
|
|
650
737
|
supportsImages: true,
|
|
651
738
|
supportsPromptCache: true,
|
|
739
|
+
supportsNativeTools: true,
|
|
652
740
|
inputPrice: 0.25,
|
|
653
741
|
outputPrice: 1.25,
|
|
654
742
|
cacheWritesPrice: 0.3,
|
|
@@ -659,6 +747,7 @@ var anthropicModels = {
|
|
|
659
747
|
contextWindow: 2e5,
|
|
660
748
|
supportsImages: true,
|
|
661
749
|
supportsPromptCache: true,
|
|
750
|
+
supportsNativeTools: true,
|
|
662
751
|
inputPrice: 1,
|
|
663
752
|
outputPrice: 5,
|
|
664
753
|
cacheWritesPrice: 1.25,
|
|
@@ -669,6 +758,125 @@ var anthropicModels = {
|
|
|
669
758
|
};
|
|
670
759
|
var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
|
|
671
760
|
|
|
761
|
+
// src/providers/baseten.ts
|
|
762
|
+
var basetenModels = {
|
|
763
|
+
"moonshotai/Kimi-K2-Thinking": {
|
|
764
|
+
maxTokens: 163800,
|
|
765
|
+
contextWindow: 262e3,
|
|
766
|
+
supportsImages: false,
|
|
767
|
+
supportsPromptCache: false,
|
|
768
|
+
supportsNativeTools: true,
|
|
769
|
+
inputPrice: 0.6,
|
|
770
|
+
outputPrice: 2.5,
|
|
771
|
+
cacheWritesPrice: 0,
|
|
772
|
+
cacheReadsPrice: 0,
|
|
773
|
+
description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
|
|
774
|
+
},
|
|
775
|
+
"zai-org/GLM-4.6": {
|
|
776
|
+
maxTokens: 2e5,
|
|
777
|
+
contextWindow: 2e5,
|
|
778
|
+
supportsImages: false,
|
|
779
|
+
supportsPromptCache: false,
|
|
780
|
+
supportsNativeTools: true,
|
|
781
|
+
inputPrice: 0.6,
|
|
782
|
+
outputPrice: 2.2,
|
|
783
|
+
cacheWritesPrice: 0,
|
|
784
|
+
cacheReadsPrice: 0,
|
|
785
|
+
description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
|
|
786
|
+
},
|
|
787
|
+
"deepseek-ai/DeepSeek-R1": {
|
|
788
|
+
maxTokens: 131072,
|
|
789
|
+
contextWindow: 163840,
|
|
790
|
+
supportsImages: false,
|
|
791
|
+
supportsPromptCache: false,
|
|
792
|
+
inputPrice: 2.55,
|
|
793
|
+
outputPrice: 5.95,
|
|
794
|
+
cacheWritesPrice: 0,
|
|
795
|
+
cacheReadsPrice: 0,
|
|
796
|
+
description: "DeepSeek's first-generation reasoning model"
|
|
797
|
+
},
|
|
798
|
+
"deepseek-ai/DeepSeek-R1-0528": {
|
|
799
|
+
maxTokens: 131072,
|
|
800
|
+
contextWindow: 163840,
|
|
801
|
+
supportsImages: false,
|
|
802
|
+
supportsPromptCache: false,
|
|
803
|
+
inputPrice: 2.55,
|
|
804
|
+
outputPrice: 5.95,
|
|
805
|
+
cacheWritesPrice: 0,
|
|
806
|
+
cacheReadsPrice: 0,
|
|
807
|
+
description: "The latest revision of DeepSeek's first-generation reasoning model"
|
|
808
|
+
},
|
|
809
|
+
"deepseek-ai/DeepSeek-V3-0324": {
|
|
810
|
+
maxTokens: 131072,
|
|
811
|
+
contextWindow: 163840,
|
|
812
|
+
supportsImages: false,
|
|
813
|
+
supportsPromptCache: false,
|
|
814
|
+
inputPrice: 0.77,
|
|
815
|
+
outputPrice: 0.77,
|
|
816
|
+
cacheWritesPrice: 0,
|
|
817
|
+
cacheReadsPrice: 0,
|
|
818
|
+
description: "Fast general-purpose LLM with enhanced reasoning capabilities"
|
|
819
|
+
},
|
|
820
|
+
"deepseek-ai/DeepSeek-V3.1": {
|
|
821
|
+
maxTokens: 131072,
|
|
822
|
+
contextWindow: 163840,
|
|
823
|
+
supportsImages: false,
|
|
824
|
+
supportsPromptCache: false,
|
|
825
|
+
inputPrice: 0.5,
|
|
826
|
+
outputPrice: 1.5,
|
|
827
|
+
cacheWritesPrice: 0,
|
|
828
|
+
cacheReadsPrice: 0,
|
|
829
|
+
description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
|
|
830
|
+
},
|
|
831
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507": {
|
|
832
|
+
maxTokens: 262144,
|
|
833
|
+
contextWindow: 262144,
|
|
834
|
+
supportsImages: false,
|
|
835
|
+
supportsPromptCache: false,
|
|
836
|
+
inputPrice: 0.22,
|
|
837
|
+
outputPrice: 0.8,
|
|
838
|
+
cacheWritesPrice: 0,
|
|
839
|
+
cacheReadsPrice: 0,
|
|
840
|
+
description: "Mixture-of-experts LLM with math and reasoning capabilities"
|
|
841
|
+
},
|
|
842
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
|
843
|
+
maxTokens: 262144,
|
|
844
|
+
contextWindow: 262144,
|
|
845
|
+
supportsImages: false,
|
|
846
|
+
supportsPromptCache: false,
|
|
847
|
+
inputPrice: 0.38,
|
|
848
|
+
outputPrice: 1.53,
|
|
849
|
+
cacheWritesPrice: 0,
|
|
850
|
+
cacheReadsPrice: 0,
|
|
851
|
+
description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
|
|
852
|
+
},
|
|
853
|
+
"openai/gpt-oss-120b": {
|
|
854
|
+
maxTokens: 128072,
|
|
855
|
+
contextWindow: 128072,
|
|
856
|
+
supportsImages: false,
|
|
857
|
+
supportsPromptCache: false,
|
|
858
|
+
supportsNativeTools: true,
|
|
859
|
+
inputPrice: 0.1,
|
|
860
|
+
outputPrice: 0.5,
|
|
861
|
+
cacheWritesPrice: 0,
|
|
862
|
+
cacheReadsPrice: 0,
|
|
863
|
+
description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
|
|
864
|
+
},
|
|
865
|
+
"moonshotai/Kimi-K2-Instruct-0905": {
|
|
866
|
+
maxTokens: 168e3,
|
|
867
|
+
contextWindow: 262e3,
|
|
868
|
+
supportsImages: false,
|
|
869
|
+
supportsPromptCache: false,
|
|
870
|
+
supportsNativeTools: true,
|
|
871
|
+
inputPrice: 0.6,
|
|
872
|
+
outputPrice: 2.5,
|
|
873
|
+
cacheWritesPrice: 0,
|
|
874
|
+
cacheReadsPrice: 0,
|
|
875
|
+
description: "State of the art language model for agentic and coding tasks. September Update."
|
|
876
|
+
}
|
|
877
|
+
};
|
|
878
|
+
var basetenDefaultModelId = "zai-org/GLM-4.6";
|
|
879
|
+
|
|
672
880
|
// src/providers/bedrock.ts
|
|
673
881
|
var bedrockDefaultModelId = "anthropic.claude-sonnet-4-5-20250929-v1:0";
|
|
674
882
|
var bedrockDefaultPromptRouterModelId = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
@@ -773,6 +981,20 @@ var bedrockModels = {
|
|
|
773
981
|
maxCachePoints: 4,
|
|
774
982
|
cachableFields: ["system", "messages", "tools"]
|
|
775
983
|
},
|
|
984
|
+
"anthropic.claude-opus-4-5-20251101-v1:0": {
|
|
985
|
+
maxTokens: 8192,
|
|
986
|
+
contextWindow: 2e5,
|
|
987
|
+
supportsImages: true,
|
|
988
|
+
supportsPromptCache: true,
|
|
989
|
+
supportsReasoningBudget: true,
|
|
990
|
+
inputPrice: 5,
|
|
991
|
+
outputPrice: 25,
|
|
992
|
+
cacheWritesPrice: 6.25,
|
|
993
|
+
cacheReadsPrice: 0.5,
|
|
994
|
+
minTokensPerCachePoint: 1024,
|
|
995
|
+
maxCachePoints: 4,
|
|
996
|
+
cachableFields: ["system", "messages", "tools"]
|
|
997
|
+
},
|
|
776
998
|
"anthropic.claude-opus-4-20250514-v1:0": {
|
|
777
999
|
maxTokens: 8192,
|
|
778
1000
|
contextWindow: 2e5,
|
|
@@ -1115,7 +1337,8 @@ var BEDROCK_1M_CONTEXT_MODEL_IDS = [
|
|
|
1115
1337
|
var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
|
|
1116
1338
|
"anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1117
1339
|
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
|
1118
|
-
"anthropic.claude-haiku-4-5-20251001-v1:0"
|
|
1340
|
+
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
|
1341
|
+
"anthropic.claude-opus-4-5-20251101-v1:0"
|
|
1119
1342
|
];
|
|
1120
1343
|
|
|
1121
1344
|
// src/providers/cerebras.ts
|
|
@@ -1131,24 +1354,6 @@ var cerebrasModels = {
|
|
|
1131
1354
|
outputPrice: 0,
|
|
1132
1355
|
description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
|
|
1133
1356
|
},
|
|
1134
|
-
"qwen-3-coder-480b-free": {
|
|
1135
|
-
maxTokens: 4e4,
|
|
1136
|
-
contextWindow: 64e3,
|
|
1137
|
-
supportsImages: false,
|
|
1138
|
-
supportsPromptCache: false,
|
|
1139
|
-
inputPrice: 0,
|
|
1140
|
-
outputPrice: 0,
|
|
1141
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($0 free tier)\n\n\u2022 Use this if you don't have a Cerebras subscription\n\u2022 64K context window\n\u2022 Rate limits: 150K TPM, 1M TPH/TPD, 10 RPM, 100 RPH/RPD\n\nUpgrade for higher limits: [https://cloud.cerebras.ai/?utm=roocode](https://cloud.cerebras.ai/?utm=roocode)"
|
|
1142
|
-
},
|
|
1143
|
-
"qwen-3-coder-480b": {
|
|
1144
|
-
maxTokens: 4e4,
|
|
1145
|
-
contextWindow: 128e3,
|
|
1146
|
-
supportsImages: false,
|
|
1147
|
-
supportsPromptCache: false,
|
|
1148
|
-
inputPrice: 0,
|
|
1149
|
-
outputPrice: 0,
|
|
1150
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($50/$250 paid tiers)\n\n\u2022 Use this if you have a Cerebras subscription\n\u2022 131K context window with higher rate limits"
|
|
1151
|
-
},
|
|
1152
1357
|
"qwen-3-235b-a22b-instruct-2507": {
|
|
1153
1358
|
maxTokens: 64e3,
|
|
1154
1359
|
contextWindow: 64e3,
|
|
@@ -1176,16 +1381,6 @@ var cerebrasModels = {
|
|
|
1176
1381
|
outputPrice: 0,
|
|
1177
1382
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
1178
1383
|
},
|
|
1179
|
-
"qwen-3-235b-a22b-thinking-2507": {
|
|
1180
|
-
maxTokens: 4e4,
|
|
1181
|
-
contextWindow: 65e3,
|
|
1182
|
-
supportsImages: false,
|
|
1183
|
-
supportsPromptCache: false,
|
|
1184
|
-
inputPrice: 0,
|
|
1185
|
-
outputPrice: 0,
|
|
1186
|
-
description: "SOTA performance with ~1500 tokens/s",
|
|
1187
|
-
supportsReasoningEffort: true
|
|
1188
|
-
},
|
|
1189
1384
|
"gpt-oss-120b": {
|
|
1190
1385
|
maxTokens: 8e3,
|
|
1191
1386
|
contextWindow: 64e3,
|
|
@@ -1584,7 +1779,10 @@ var claudeCodeModels = {
|
|
|
1584
1779
|
// Claude Code does report cache tokens
|
|
1585
1780
|
supportsReasoningEffort: false,
|
|
1586
1781
|
supportsReasoningBudget: false,
|
|
1587
|
-
requiredReasoningBudget: false
|
|
1782
|
+
requiredReasoningBudget: false,
|
|
1783
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1784
|
+
supportsNativeTools: false,
|
|
1785
|
+
supportsTemperature: false
|
|
1588
1786
|
},
|
|
1589
1787
|
"claude-sonnet-4-5-20250929[1m]": {
|
|
1590
1788
|
...anthropicModels["claude-sonnet-4-5"],
|
|
@@ -1595,7 +1793,10 @@ var claudeCodeModels = {
|
|
|
1595
1793
|
// Claude Code does report cache tokens
|
|
1596
1794
|
supportsReasoningEffort: false,
|
|
1597
1795
|
supportsReasoningBudget: false,
|
|
1598
|
-
requiredReasoningBudget: false
|
|
1796
|
+
requiredReasoningBudget: false,
|
|
1797
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1798
|
+
supportsNativeTools: false,
|
|
1799
|
+
supportsTemperature: false
|
|
1599
1800
|
},
|
|
1600
1801
|
"claude-sonnet-4-20250514": {
|
|
1601
1802
|
...anthropicModels["claude-sonnet-4-20250514"],
|
|
@@ -1604,7 +1805,22 @@ var claudeCodeModels = {
|
|
|
1604
1805
|
// Claude Code does report cache tokens
|
|
1605
1806
|
supportsReasoningEffort: false,
|
|
1606
1807
|
supportsReasoningBudget: false,
|
|
1607
|
-
requiredReasoningBudget: false
|
|
1808
|
+
requiredReasoningBudget: false,
|
|
1809
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1810
|
+
supportsNativeTools: false,
|
|
1811
|
+
supportsTemperature: false
|
|
1812
|
+
},
|
|
1813
|
+
"claude-opus-4-5-20251101": {
|
|
1814
|
+
...anthropicModels["claude-opus-4-5-20251101"],
|
|
1815
|
+
supportsImages: false,
|
|
1816
|
+
supportsPromptCache: true,
|
|
1817
|
+
// Claude Code does report cache tokens
|
|
1818
|
+
supportsReasoningEffort: false,
|
|
1819
|
+
supportsReasoningBudget: false,
|
|
1820
|
+
requiredReasoningBudget: false,
|
|
1821
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1822
|
+
supportsNativeTools: false,
|
|
1823
|
+
supportsTemperature: false
|
|
1608
1824
|
},
|
|
1609
1825
|
"claude-opus-4-1-20250805": {
|
|
1610
1826
|
...anthropicModels["claude-opus-4-1-20250805"],
|
|
@@ -1613,7 +1829,10 @@ var claudeCodeModels = {
|
|
|
1613
1829
|
// Claude Code does report cache tokens
|
|
1614
1830
|
supportsReasoningEffort: false,
|
|
1615
1831
|
supportsReasoningBudget: false,
|
|
1616
|
-
requiredReasoningBudget: false
|
|
1832
|
+
requiredReasoningBudget: false,
|
|
1833
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1834
|
+
supportsNativeTools: false,
|
|
1835
|
+
supportsTemperature: false
|
|
1617
1836
|
},
|
|
1618
1837
|
"claude-opus-4-20250514": {
|
|
1619
1838
|
...anthropicModels["claude-opus-4-20250514"],
|
|
@@ -1622,7 +1841,10 @@ var claudeCodeModels = {
|
|
|
1622
1841
|
// Claude Code does report cache tokens
|
|
1623
1842
|
supportsReasoningEffort: false,
|
|
1624
1843
|
supportsReasoningBudget: false,
|
|
1625
|
-
requiredReasoningBudget: false
|
|
1844
|
+
requiredReasoningBudget: false,
|
|
1845
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1846
|
+
supportsNativeTools: false,
|
|
1847
|
+
supportsTemperature: false
|
|
1626
1848
|
},
|
|
1627
1849
|
"claude-3-7-sonnet-20250219": {
|
|
1628
1850
|
...anthropicModels["claude-3-7-sonnet-20250219"],
|
|
@@ -1631,7 +1853,10 @@ var claudeCodeModels = {
|
|
|
1631
1853
|
// Claude Code does report cache tokens
|
|
1632
1854
|
supportsReasoningEffort: false,
|
|
1633
1855
|
supportsReasoningBudget: false,
|
|
1634
|
-
requiredReasoningBudget: false
|
|
1856
|
+
requiredReasoningBudget: false,
|
|
1857
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1858
|
+
supportsNativeTools: false,
|
|
1859
|
+
supportsTemperature: false
|
|
1635
1860
|
},
|
|
1636
1861
|
"claude-3-5-sonnet-20241022": {
|
|
1637
1862
|
...anthropicModels["claude-3-5-sonnet-20241022"],
|
|
@@ -1640,7 +1865,10 @@ var claudeCodeModels = {
|
|
|
1640
1865
|
// Claude Code does report cache tokens
|
|
1641
1866
|
supportsReasoningEffort: false,
|
|
1642
1867
|
supportsReasoningBudget: false,
|
|
1643
|
-
requiredReasoningBudget: false
|
|
1868
|
+
requiredReasoningBudget: false,
|
|
1869
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1870
|
+
supportsNativeTools: false,
|
|
1871
|
+
supportsTemperature: false
|
|
1644
1872
|
},
|
|
1645
1873
|
"claude-3-5-haiku-20241022": {
|
|
1646
1874
|
...anthropicModels["claude-3-5-haiku-20241022"],
|
|
@@ -1649,7 +1877,10 @@ var claudeCodeModels = {
|
|
|
1649
1877
|
// Claude Code does report cache tokens
|
|
1650
1878
|
supportsReasoningEffort: false,
|
|
1651
1879
|
supportsReasoningBudget: false,
|
|
1652
|
-
requiredReasoningBudget: false
|
|
1880
|
+
requiredReasoningBudget: false,
|
|
1881
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1882
|
+
supportsNativeTools: false,
|
|
1883
|
+
supportsTemperature: false
|
|
1653
1884
|
},
|
|
1654
1885
|
"claude-haiku-4-5-20251001": {
|
|
1655
1886
|
...anthropicModels["claude-haiku-4-5-20251001"],
|
|
@@ -1658,7 +1889,10 @@ var claudeCodeModels = {
|
|
|
1658
1889
|
// Claude Code does report cache tokens
|
|
1659
1890
|
supportsReasoningEffort: false,
|
|
1660
1891
|
supportsReasoningBudget: false,
|
|
1661
|
-
requiredReasoningBudget: false
|
|
1892
|
+
requiredReasoningBudget: false,
|
|
1893
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1894
|
+
supportsNativeTools: false,
|
|
1895
|
+
supportsTemperature: false
|
|
1662
1896
|
}
|
|
1663
1897
|
};
|
|
1664
1898
|
|
|
@@ -1671,6 +1905,7 @@ var deepSeekModels = {
|
|
|
1671
1905
|
contextWindow: 128e3,
|
|
1672
1906
|
supportsImages: false,
|
|
1673
1907
|
supportsPromptCache: true,
|
|
1908
|
+
supportsNativeTools: true,
|
|
1674
1909
|
inputPrice: 0.56,
|
|
1675
1910
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
1676
1911
|
outputPrice: 1.68,
|
|
@@ -1687,6 +1922,7 @@ var deepSeekModels = {
|
|
|
1687
1922
|
contextWindow: 128e3,
|
|
1688
1923
|
supportsImages: false,
|
|
1689
1924
|
supportsPromptCache: true,
|
|
1925
|
+
supportsNativeTools: true,
|
|
1690
1926
|
inputPrice: 0.56,
|
|
1691
1927
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
1692
1928
|
outputPrice: 1.68,
|
|
@@ -1708,6 +1944,7 @@ var doubaoModels = {
|
|
|
1708
1944
|
contextWindow: 128e3,
|
|
1709
1945
|
supportsImages: true,
|
|
1710
1946
|
supportsPromptCache: true,
|
|
1947
|
+
supportsNativeTools: true,
|
|
1711
1948
|
inputPrice: 1e-4,
|
|
1712
1949
|
// $0.0001 per million tokens (cache miss)
|
|
1713
1950
|
outputPrice: 4e-4,
|
|
@@ -1723,6 +1960,7 @@ var doubaoModels = {
|
|
|
1723
1960
|
contextWindow: 128e3,
|
|
1724
1961
|
supportsImages: true,
|
|
1725
1962
|
supportsPromptCache: true,
|
|
1963
|
+
supportsNativeTools: true,
|
|
1726
1964
|
inputPrice: 2e-4,
|
|
1727
1965
|
// $0.0002 per million tokens
|
|
1728
1966
|
outputPrice: 8e-4,
|
|
@@ -1738,6 +1976,7 @@ var doubaoModels = {
|
|
|
1738
1976
|
contextWindow: 128e3,
|
|
1739
1977
|
supportsImages: true,
|
|
1740
1978
|
supportsPromptCache: true,
|
|
1979
|
+
supportsNativeTools: true,
|
|
1741
1980
|
inputPrice: 15e-5,
|
|
1742
1981
|
// $0.00015 per million tokens
|
|
1743
1982
|
outputPrice: 6e-4,
|
|
@@ -1778,6 +2017,7 @@ var featherlessModels = {
|
|
|
1778
2017
|
contextWindow: 32678,
|
|
1779
2018
|
supportsImages: false,
|
|
1780
2019
|
supportsPromptCache: false,
|
|
2020
|
+
supportsNativeTools: true,
|
|
1781
2021
|
inputPrice: 0,
|
|
1782
2022
|
outputPrice: 0,
|
|
1783
2023
|
description: "Kimi K2 Instruct model."
|
|
@@ -1796,6 +2036,7 @@ var featherlessModels = {
|
|
|
1796
2036
|
contextWindow: 32678,
|
|
1797
2037
|
supportsImages: false,
|
|
1798
2038
|
supportsPromptCache: false,
|
|
2039
|
+
supportsNativeTools: true,
|
|
1799
2040
|
inputPrice: 0,
|
|
1800
2041
|
outputPrice: 0,
|
|
1801
2042
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -1811,6 +2052,7 @@ var fireworksModels = {
|
|
|
1811
2052
|
contextWindow: 262144,
|
|
1812
2053
|
supportsImages: false,
|
|
1813
2054
|
supportsPromptCache: true,
|
|
2055
|
+
supportsNativeTools: true,
|
|
1814
2056
|
inputPrice: 0.6,
|
|
1815
2057
|
outputPrice: 2.5,
|
|
1816
2058
|
cacheReadsPrice: 0.15,
|
|
@@ -1821,6 +2063,7 @@ var fireworksModels = {
|
|
|
1821
2063
|
contextWindow: 128e3,
|
|
1822
2064
|
supportsImages: false,
|
|
1823
2065
|
supportsPromptCache: false,
|
|
2066
|
+
supportsNativeTools: true,
|
|
1824
2067
|
inputPrice: 0.6,
|
|
1825
2068
|
outputPrice: 2.5,
|
|
1826
2069
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -1830,6 +2073,7 @@ var fireworksModels = {
|
|
|
1830
2073
|
contextWindow: 204800,
|
|
1831
2074
|
supportsImages: false,
|
|
1832
2075
|
supportsPromptCache: false,
|
|
2076
|
+
supportsNativeTools: true,
|
|
1833
2077
|
inputPrice: 0.3,
|
|
1834
2078
|
outputPrice: 1.2,
|
|
1835
2079
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -1839,6 +2083,7 @@ var fireworksModels = {
|
|
|
1839
2083
|
contextWindow: 256e3,
|
|
1840
2084
|
supportsImages: false,
|
|
1841
2085
|
supportsPromptCache: false,
|
|
2086
|
+
supportsNativeTools: true,
|
|
1842
2087
|
inputPrice: 0.22,
|
|
1843
2088
|
outputPrice: 0.88,
|
|
1844
2089
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -1848,6 +2093,7 @@ var fireworksModels = {
|
|
|
1848
2093
|
contextWindow: 256e3,
|
|
1849
2094
|
supportsImages: false,
|
|
1850
2095
|
supportsPromptCache: false,
|
|
2096
|
+
supportsNativeTools: true,
|
|
1851
2097
|
inputPrice: 0.45,
|
|
1852
2098
|
outputPrice: 1.8,
|
|
1853
2099
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -1857,6 +2103,7 @@ var fireworksModels = {
|
|
|
1857
2103
|
contextWindow: 16e4,
|
|
1858
2104
|
supportsImages: false,
|
|
1859
2105
|
supportsPromptCache: false,
|
|
2106
|
+
supportsNativeTools: true,
|
|
1860
2107
|
inputPrice: 3,
|
|
1861
2108
|
outputPrice: 8,
|
|
1862
2109
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -1866,6 +2113,7 @@ var fireworksModels = {
|
|
|
1866
2113
|
contextWindow: 128e3,
|
|
1867
2114
|
supportsImages: false,
|
|
1868
2115
|
supportsPromptCache: false,
|
|
2116
|
+
supportsNativeTools: true,
|
|
1869
2117
|
inputPrice: 0.9,
|
|
1870
2118
|
outputPrice: 0.9,
|
|
1871
2119
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -1875,6 +2123,7 @@ var fireworksModels = {
|
|
|
1875
2123
|
contextWindow: 163840,
|
|
1876
2124
|
supportsImages: false,
|
|
1877
2125
|
supportsPromptCache: false,
|
|
2126
|
+
supportsNativeTools: true,
|
|
1878
2127
|
inputPrice: 0.56,
|
|
1879
2128
|
outputPrice: 1.68,
|
|
1880
2129
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -1884,6 +2133,7 @@ var fireworksModels = {
|
|
|
1884
2133
|
contextWindow: 128e3,
|
|
1885
2134
|
supportsImages: false,
|
|
1886
2135
|
supportsPromptCache: false,
|
|
2136
|
+
supportsNativeTools: true,
|
|
1887
2137
|
inputPrice: 0.55,
|
|
1888
2138
|
outputPrice: 2.19,
|
|
1889
2139
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -1893,6 +2143,7 @@ var fireworksModels = {
|
|
|
1893
2143
|
contextWindow: 128e3,
|
|
1894
2144
|
supportsImages: false,
|
|
1895
2145
|
supportsPromptCache: false,
|
|
2146
|
+
supportsNativeTools: true,
|
|
1896
2147
|
inputPrice: 0.55,
|
|
1897
2148
|
outputPrice: 2.19,
|
|
1898
2149
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -1902,6 +2153,7 @@ var fireworksModels = {
|
|
|
1902
2153
|
contextWindow: 198e3,
|
|
1903
2154
|
supportsImages: false,
|
|
1904
2155
|
supportsPromptCache: false,
|
|
2156
|
+
supportsNativeTools: true,
|
|
1905
2157
|
inputPrice: 0.55,
|
|
1906
2158
|
outputPrice: 2.19,
|
|
1907
2159
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -1911,6 +2163,7 @@ var fireworksModels = {
|
|
|
1911
2163
|
contextWindow: 128e3,
|
|
1912
2164
|
supportsImages: false,
|
|
1913
2165
|
supportsPromptCache: false,
|
|
2166
|
+
supportsNativeTools: true,
|
|
1914
2167
|
inputPrice: 0.07,
|
|
1915
2168
|
outputPrice: 0.3,
|
|
1916
2169
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -1920,6 +2173,7 @@ var fireworksModels = {
|
|
|
1920
2173
|
contextWindow: 128e3,
|
|
1921
2174
|
supportsImages: false,
|
|
1922
2175
|
supportsPromptCache: false,
|
|
2176
|
+
supportsNativeTools: true,
|
|
1923
2177
|
inputPrice: 0.15,
|
|
1924
2178
|
outputPrice: 0.6,
|
|
1925
2179
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
@@ -2159,6 +2413,7 @@ var groqModels = {
|
|
|
2159
2413
|
contextWindow: 131072,
|
|
2160
2414
|
supportsImages: false,
|
|
2161
2415
|
supportsPromptCache: false,
|
|
2416
|
+
supportsNativeTools: true,
|
|
2162
2417
|
inputPrice: 0.05,
|
|
2163
2418
|
outputPrice: 0.08,
|
|
2164
2419
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2168,6 +2423,7 @@ var groqModels = {
|
|
|
2168
2423
|
contextWindow: 131072,
|
|
2169
2424
|
supportsImages: false,
|
|
2170
2425
|
supportsPromptCache: false,
|
|
2426
|
+
supportsNativeTools: true,
|
|
2171
2427
|
inputPrice: 0.59,
|
|
2172
2428
|
outputPrice: 0.79,
|
|
2173
2429
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -2177,6 +2433,7 @@ var groqModels = {
|
|
|
2177
2433
|
contextWindow: 131072,
|
|
2178
2434
|
supportsImages: false,
|
|
2179
2435
|
supportsPromptCache: false,
|
|
2436
|
+
supportsNativeTools: true,
|
|
2180
2437
|
inputPrice: 0.11,
|
|
2181
2438
|
outputPrice: 0.34,
|
|
2182
2439
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -2213,6 +2470,7 @@ var groqModels = {
|
|
|
2213
2470
|
contextWindow: 131072,
|
|
2214
2471
|
supportsImages: false,
|
|
2215
2472
|
supportsPromptCache: false,
|
|
2473
|
+
supportsNativeTools: true,
|
|
2216
2474
|
inputPrice: 0.29,
|
|
2217
2475
|
outputPrice: 0.59,
|
|
2218
2476
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -2242,6 +2500,7 @@ var groqModels = {
|
|
|
2242
2500
|
contextWindow: 262144,
|
|
2243
2501
|
supportsImages: false,
|
|
2244
2502
|
supportsPromptCache: true,
|
|
2503
|
+
supportsNativeTools: true,
|
|
2245
2504
|
inputPrice: 0.6,
|
|
2246
2505
|
outputPrice: 2.5,
|
|
2247
2506
|
cacheReadsPrice: 0.15,
|
|
@@ -2252,6 +2511,7 @@ var groqModels = {
|
|
|
2252
2511
|
contextWindow: 131072,
|
|
2253
2512
|
supportsImages: false,
|
|
2254
2513
|
supportsPromptCache: false,
|
|
2514
|
+
supportsNativeTools: true,
|
|
2255
2515
|
inputPrice: 0.15,
|
|
2256
2516
|
outputPrice: 0.75,
|
|
2257
2517
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -2261,6 +2521,7 @@ var groqModels = {
|
|
|
2261
2521
|
contextWindow: 131072,
|
|
2262
2522
|
supportsImages: false,
|
|
2263
2523
|
supportsPromptCache: false,
|
|
2524
|
+
supportsNativeTools: true,
|
|
2264
2525
|
inputPrice: 0.1,
|
|
2265
2526
|
outputPrice: 0.5,
|
|
2266
2527
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -2287,6 +2548,7 @@ var ioIntelligenceModels = {
|
|
|
2287
2548
|
contextWindow: 128e3,
|
|
2288
2549
|
supportsImages: false,
|
|
2289
2550
|
supportsPromptCache: false,
|
|
2551
|
+
supportsNativeTools: true,
|
|
2290
2552
|
description: "DeepSeek R1 reasoning model"
|
|
2291
2553
|
},
|
|
2292
2554
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -2294,6 +2556,7 @@ var ioIntelligenceModels = {
|
|
|
2294
2556
|
contextWindow: 43e4,
|
|
2295
2557
|
supportsImages: true,
|
|
2296
2558
|
supportsPromptCache: false,
|
|
2559
|
+
supportsNativeTools: true,
|
|
2297
2560
|
description: "Llama 4 Maverick 17B model"
|
|
2298
2561
|
},
|
|
2299
2562
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -2301,6 +2564,7 @@ var ioIntelligenceModels = {
|
|
|
2301
2564
|
contextWindow: 106e3,
|
|
2302
2565
|
supportsImages: false,
|
|
2303
2566
|
supportsPromptCache: false,
|
|
2567
|
+
supportsNativeTools: true,
|
|
2304
2568
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
2305
2569
|
},
|
|
2306
2570
|
"openai/gpt-oss-120b": {
|
|
@@ -2308,6 +2572,7 @@ var ioIntelligenceModels = {
|
|
|
2308
2572
|
contextWindow: 131072,
|
|
2309
2573
|
supportsImages: false,
|
|
2310
2574
|
supportsPromptCache: false,
|
|
2575
|
+
supportsNativeTools: true,
|
|
2311
2576
|
description: "OpenAI GPT-OSS 120B model"
|
|
2312
2577
|
}
|
|
2313
2578
|
};
|
|
@@ -2348,75 +2613,84 @@ var mistralModels = {
|
|
|
2348
2613
|
contextWindow: 128e3,
|
|
2349
2614
|
supportsImages: true,
|
|
2350
2615
|
supportsPromptCache: false,
|
|
2616
|
+
supportsNativeTools: true,
|
|
2351
2617
|
inputPrice: 2,
|
|
2352
2618
|
outputPrice: 5
|
|
2353
2619
|
},
|
|
2354
2620
|
"devstral-medium-latest": {
|
|
2355
|
-
maxTokens:
|
|
2621
|
+
maxTokens: 8192,
|
|
2356
2622
|
contextWindow: 131e3,
|
|
2357
2623
|
supportsImages: true,
|
|
2358
2624
|
supportsPromptCache: false,
|
|
2625
|
+
supportsNativeTools: true,
|
|
2359
2626
|
inputPrice: 0.4,
|
|
2360
2627
|
outputPrice: 2
|
|
2361
2628
|
},
|
|
2362
2629
|
"mistral-medium-latest": {
|
|
2363
|
-
maxTokens:
|
|
2630
|
+
maxTokens: 8192,
|
|
2364
2631
|
contextWindow: 131e3,
|
|
2365
2632
|
supportsImages: true,
|
|
2366
2633
|
supportsPromptCache: false,
|
|
2634
|
+
supportsNativeTools: true,
|
|
2367
2635
|
inputPrice: 0.4,
|
|
2368
2636
|
outputPrice: 2
|
|
2369
2637
|
},
|
|
2370
2638
|
"codestral-latest": {
|
|
2371
|
-
maxTokens:
|
|
2639
|
+
maxTokens: 8192,
|
|
2372
2640
|
contextWindow: 256e3,
|
|
2373
2641
|
supportsImages: false,
|
|
2374
2642
|
supportsPromptCache: false,
|
|
2643
|
+
supportsNativeTools: true,
|
|
2375
2644
|
inputPrice: 0.3,
|
|
2376
2645
|
outputPrice: 0.9
|
|
2377
2646
|
},
|
|
2378
2647
|
"mistral-large-latest": {
|
|
2379
|
-
maxTokens:
|
|
2648
|
+
maxTokens: 8192,
|
|
2380
2649
|
contextWindow: 131e3,
|
|
2381
2650
|
supportsImages: false,
|
|
2382
2651
|
supportsPromptCache: false,
|
|
2652
|
+
supportsNativeTools: true,
|
|
2383
2653
|
inputPrice: 2,
|
|
2384
2654
|
outputPrice: 6
|
|
2385
2655
|
},
|
|
2386
2656
|
"ministral-8b-latest": {
|
|
2387
|
-
maxTokens:
|
|
2657
|
+
maxTokens: 8192,
|
|
2388
2658
|
contextWindow: 131e3,
|
|
2389
2659
|
supportsImages: false,
|
|
2390
2660
|
supportsPromptCache: false,
|
|
2661
|
+
supportsNativeTools: true,
|
|
2391
2662
|
inputPrice: 0.1,
|
|
2392
2663
|
outputPrice: 0.1
|
|
2393
2664
|
},
|
|
2394
2665
|
"ministral-3b-latest": {
|
|
2395
|
-
maxTokens:
|
|
2666
|
+
maxTokens: 8192,
|
|
2396
2667
|
contextWindow: 131e3,
|
|
2397
2668
|
supportsImages: false,
|
|
2398
2669
|
supportsPromptCache: false,
|
|
2670
|
+
supportsNativeTools: true,
|
|
2399
2671
|
inputPrice: 0.04,
|
|
2400
2672
|
outputPrice: 0.04
|
|
2401
2673
|
},
|
|
2402
2674
|
"mistral-small-latest": {
|
|
2403
|
-
maxTokens:
|
|
2675
|
+
maxTokens: 8192,
|
|
2404
2676
|
contextWindow: 32e3,
|
|
2405
2677
|
supportsImages: false,
|
|
2406
2678
|
supportsPromptCache: false,
|
|
2679
|
+
supportsNativeTools: true,
|
|
2407
2680
|
inputPrice: 0.2,
|
|
2408
2681
|
outputPrice: 0.6
|
|
2409
2682
|
},
|
|
2410
2683
|
"pixtral-large-latest": {
|
|
2411
|
-
maxTokens:
|
|
2684
|
+
maxTokens: 8192,
|
|
2412
2685
|
contextWindow: 131e3,
|
|
2413
2686
|
supportsImages: true,
|
|
2414
2687
|
supportsPromptCache: false,
|
|
2688
|
+
supportsNativeTools: true,
|
|
2415
2689
|
inputPrice: 2,
|
|
2416
2690
|
outputPrice: 6
|
|
2417
2691
|
}
|
|
2418
2692
|
};
|
|
2419
|
-
var MISTRAL_DEFAULT_TEMPERATURE =
|
|
2693
|
+
var MISTRAL_DEFAULT_TEMPERATURE = 1;
|
|
2420
2694
|
|
|
2421
2695
|
// src/providers/moonshot.ts
|
|
2422
2696
|
var moonshotDefaultModelId = "kimi-k2-0905-preview";
|
|
@@ -2426,6 +2700,7 @@ var moonshotModels = {
|
|
|
2426
2700
|
contextWindow: 131072,
|
|
2427
2701
|
supportsImages: false,
|
|
2428
2702
|
supportsPromptCache: true,
|
|
2703
|
+
supportsNativeTools: true,
|
|
2429
2704
|
inputPrice: 0.6,
|
|
2430
2705
|
// $0.60 per million tokens (cache miss)
|
|
2431
2706
|
outputPrice: 2.5,
|
|
@@ -2441,6 +2716,7 @@ var moonshotModels = {
|
|
|
2441
2716
|
contextWindow: 262144,
|
|
2442
2717
|
supportsImages: false,
|
|
2443
2718
|
supportsPromptCache: true,
|
|
2719
|
+
supportsNativeTools: true,
|
|
2444
2720
|
inputPrice: 0.6,
|
|
2445
2721
|
outputPrice: 2.5,
|
|
2446
2722
|
cacheReadsPrice: 0.15,
|
|
@@ -2451,6 +2727,7 @@ var moonshotModels = {
|
|
|
2451
2727
|
contextWindow: 262144,
|
|
2452
2728
|
supportsImages: false,
|
|
2453
2729
|
supportsPromptCache: true,
|
|
2730
|
+
supportsNativeTools: true,
|
|
2454
2731
|
inputPrice: 2.4,
|
|
2455
2732
|
// $2.40 per million tokens (cache miss)
|
|
2456
2733
|
outputPrice: 10,
|
|
@@ -2469,6 +2746,7 @@ var moonshotModels = {
|
|
|
2469
2746
|
supportsImages: false,
|
|
2470
2747
|
// Text-only (no image/vision support)
|
|
2471
2748
|
supportsPromptCache: true,
|
|
2749
|
+
supportsNativeTools: true,
|
|
2472
2750
|
inputPrice: 0.6,
|
|
2473
2751
|
// $0.60 per million tokens (cache miss)
|
|
2474
2752
|
outputPrice: 2.5,
|
|
@@ -2975,6 +3253,7 @@ var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
|
2975
3253
|
"anthropic/claude-opus-4",
|
|
2976
3254
|
"anthropic/claude-opus-4.1",
|
|
2977
3255
|
"anthropic/claude-haiku-4.5",
|
|
3256
|
+
"anthropic/claude-opus-4.5",
|
|
2978
3257
|
"google/gemini-2.5-flash-preview",
|
|
2979
3258
|
"google/gemini-2.5-flash-preview:thinking",
|
|
2980
3259
|
"google/gemini-2.5-flash-preview-05-20",
|
|
@@ -2996,6 +3275,7 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
|
2996
3275
|
"anthropic/claude-opus-4.1",
|
|
2997
3276
|
"anthropic/claude-sonnet-4",
|
|
2998
3277
|
"anthropic/claude-sonnet-4.5",
|
|
3278
|
+
"anthropic/claude-opus-4.5",
|
|
2999
3279
|
"anthropic/claude-haiku-4.5",
|
|
3000
3280
|
"google/gemini-2.5-pro-preview",
|
|
3001
3281
|
"google/gemini-2.5-pro",
|
|
@@ -3086,6 +3366,7 @@ var sambaNovaModels = {
|
|
|
3086
3366
|
contextWindow: 16384,
|
|
3087
3367
|
supportsImages: false,
|
|
3088
3368
|
supportsPromptCache: false,
|
|
3369
|
+
supportsNativeTools: true,
|
|
3089
3370
|
inputPrice: 0.1,
|
|
3090
3371
|
outputPrice: 0.2,
|
|
3091
3372
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -3095,6 +3376,7 @@ var sambaNovaModels = {
|
|
|
3095
3376
|
contextWindow: 131072,
|
|
3096
3377
|
supportsImages: false,
|
|
3097
3378
|
supportsPromptCache: false,
|
|
3379
|
+
supportsNativeTools: true,
|
|
3098
3380
|
inputPrice: 0.6,
|
|
3099
3381
|
outputPrice: 1.2,
|
|
3100
3382
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -3105,6 +3387,7 @@ var sambaNovaModels = {
|
|
|
3105
3387
|
supportsImages: false,
|
|
3106
3388
|
supportsPromptCache: false,
|
|
3107
3389
|
supportsReasoningBudget: true,
|
|
3390
|
+
supportsNativeTools: true,
|
|
3108
3391
|
inputPrice: 5,
|
|
3109
3392
|
outputPrice: 7,
|
|
3110
3393
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -3114,6 +3397,7 @@ var sambaNovaModels = {
|
|
|
3114
3397
|
contextWindow: 32768,
|
|
3115
3398
|
supportsImages: false,
|
|
3116
3399
|
supportsPromptCache: false,
|
|
3400
|
+
supportsNativeTools: true,
|
|
3117
3401
|
inputPrice: 3,
|
|
3118
3402
|
outputPrice: 4.5,
|
|
3119
3403
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -3123,6 +3407,7 @@ var sambaNovaModels = {
|
|
|
3123
3407
|
contextWindow: 32768,
|
|
3124
3408
|
supportsImages: false,
|
|
3125
3409
|
supportsPromptCache: false,
|
|
3410
|
+
supportsNativeTools: true,
|
|
3126
3411
|
inputPrice: 3,
|
|
3127
3412
|
outputPrice: 4.5,
|
|
3128
3413
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -3141,6 +3426,7 @@ var sambaNovaModels = {
|
|
|
3141
3426
|
contextWindow: 131072,
|
|
3142
3427
|
supportsImages: true,
|
|
3143
3428
|
supportsPromptCache: false,
|
|
3429
|
+
supportsNativeTools: true,
|
|
3144
3430
|
inputPrice: 0.63,
|
|
3145
3431
|
outputPrice: 1.8,
|
|
3146
3432
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -3159,6 +3445,7 @@ var sambaNovaModels = {
|
|
|
3159
3445
|
contextWindow: 8192,
|
|
3160
3446
|
supportsImages: false,
|
|
3161
3447
|
supportsPromptCache: false,
|
|
3448
|
+
supportsNativeTools: true,
|
|
3162
3449
|
inputPrice: 0.4,
|
|
3163
3450
|
outputPrice: 0.8,
|
|
3164
3451
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -3168,6 +3455,7 @@ var sambaNovaModels = {
|
|
|
3168
3455
|
contextWindow: 131072,
|
|
3169
3456
|
supportsImages: false,
|
|
3170
3457
|
supportsPromptCache: false,
|
|
3458
|
+
supportsNativeTools: true,
|
|
3171
3459
|
inputPrice: 0.22,
|
|
3172
3460
|
outputPrice: 0.59,
|
|
3173
3461
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -3194,6 +3482,7 @@ var vertexModels = {
|
|
|
3194
3482
|
maxTokens: 65536,
|
|
3195
3483
|
contextWindow: 1048576,
|
|
3196
3484
|
supportsImages: true,
|
|
3485
|
+
supportsNativeTools: true,
|
|
3197
3486
|
supportsPromptCache: true,
|
|
3198
3487
|
supportsReasoningEffort: ["low", "high"],
|
|
3199
3488
|
reasoningEffort: "low",
|
|
@@ -3218,6 +3507,7 @@ var vertexModels = {
|
|
|
3218
3507
|
maxTokens: 65535,
|
|
3219
3508
|
contextWindow: 1048576,
|
|
3220
3509
|
supportsImages: true,
|
|
3510
|
+
supportsNativeTools: true,
|
|
3221
3511
|
supportsPromptCache: true,
|
|
3222
3512
|
inputPrice: 0.15,
|
|
3223
3513
|
outputPrice: 3.5,
|
|
@@ -3229,6 +3519,7 @@ var vertexModels = {
|
|
|
3229
3519
|
maxTokens: 65535,
|
|
3230
3520
|
contextWindow: 1048576,
|
|
3231
3521
|
supportsImages: true,
|
|
3522
|
+
supportsNativeTools: true,
|
|
3232
3523
|
supportsPromptCache: true,
|
|
3233
3524
|
inputPrice: 0.15,
|
|
3234
3525
|
outputPrice: 0.6
|
|
@@ -3237,6 +3528,7 @@ var vertexModels = {
|
|
|
3237
3528
|
maxTokens: 64e3,
|
|
3238
3529
|
contextWindow: 1048576,
|
|
3239
3530
|
supportsImages: true,
|
|
3531
|
+
supportsNativeTools: true,
|
|
3240
3532
|
supportsPromptCache: true,
|
|
3241
3533
|
inputPrice: 0.3,
|
|
3242
3534
|
outputPrice: 2.5,
|
|
@@ -3249,6 +3541,7 @@ var vertexModels = {
|
|
|
3249
3541
|
maxTokens: 65535,
|
|
3250
3542
|
contextWindow: 1048576,
|
|
3251
3543
|
supportsImages: true,
|
|
3544
|
+
supportsNativeTools: true,
|
|
3252
3545
|
supportsPromptCache: false,
|
|
3253
3546
|
inputPrice: 0.15,
|
|
3254
3547
|
outputPrice: 3.5,
|
|
@@ -3260,6 +3553,7 @@ var vertexModels = {
|
|
|
3260
3553
|
maxTokens: 65535,
|
|
3261
3554
|
contextWindow: 1048576,
|
|
3262
3555
|
supportsImages: true,
|
|
3556
|
+
supportsNativeTools: true,
|
|
3263
3557
|
supportsPromptCache: false,
|
|
3264
3558
|
inputPrice: 0.15,
|
|
3265
3559
|
outputPrice: 0.6
|
|
@@ -3268,6 +3562,7 @@ var vertexModels = {
|
|
|
3268
3562
|
maxTokens: 65535,
|
|
3269
3563
|
contextWindow: 1048576,
|
|
3270
3564
|
supportsImages: true,
|
|
3565
|
+
supportsNativeTools: true,
|
|
3271
3566
|
supportsPromptCache: true,
|
|
3272
3567
|
inputPrice: 2.5,
|
|
3273
3568
|
outputPrice: 15
|
|
@@ -3276,6 +3571,7 @@ var vertexModels = {
|
|
|
3276
3571
|
maxTokens: 65535,
|
|
3277
3572
|
contextWindow: 1048576,
|
|
3278
3573
|
supportsImages: true,
|
|
3574
|
+
supportsNativeTools: true,
|
|
3279
3575
|
supportsPromptCache: true,
|
|
3280
3576
|
inputPrice: 2.5,
|
|
3281
3577
|
outputPrice: 15
|
|
@@ -3284,6 +3580,7 @@ var vertexModels = {
|
|
|
3284
3580
|
maxTokens: 65535,
|
|
3285
3581
|
contextWindow: 1048576,
|
|
3286
3582
|
supportsImages: true,
|
|
3583
|
+
supportsNativeTools: true,
|
|
3287
3584
|
supportsPromptCache: true,
|
|
3288
3585
|
inputPrice: 2.5,
|
|
3289
3586
|
outputPrice: 15,
|
|
@@ -3294,6 +3591,7 @@ var vertexModels = {
|
|
|
3294
3591
|
maxTokens: 64e3,
|
|
3295
3592
|
contextWindow: 1048576,
|
|
3296
3593
|
supportsImages: true,
|
|
3594
|
+
supportsNativeTools: true,
|
|
3297
3595
|
supportsPromptCache: true,
|
|
3298
3596
|
inputPrice: 2.5,
|
|
3299
3597
|
outputPrice: 15,
|
|
@@ -3319,6 +3617,7 @@ var vertexModels = {
|
|
|
3319
3617
|
maxTokens: 65535,
|
|
3320
3618
|
contextWindow: 1048576,
|
|
3321
3619
|
supportsImages: true,
|
|
3620
|
+
supportsNativeTools: true,
|
|
3322
3621
|
supportsPromptCache: false,
|
|
3323
3622
|
inputPrice: 0,
|
|
3324
3623
|
outputPrice: 0
|
|
@@ -3327,6 +3626,7 @@ var vertexModels = {
|
|
|
3327
3626
|
maxTokens: 8192,
|
|
3328
3627
|
contextWindow: 2097152,
|
|
3329
3628
|
supportsImages: true,
|
|
3629
|
+
supportsNativeTools: true,
|
|
3330
3630
|
supportsPromptCache: false,
|
|
3331
3631
|
inputPrice: 0,
|
|
3332
3632
|
outputPrice: 0
|
|
@@ -3335,6 +3635,7 @@ var vertexModels = {
|
|
|
3335
3635
|
maxTokens: 8192,
|
|
3336
3636
|
contextWindow: 1048576,
|
|
3337
3637
|
supportsImages: true,
|
|
3638
|
+
supportsNativeTools: true,
|
|
3338
3639
|
supportsPromptCache: true,
|
|
3339
3640
|
inputPrice: 0.15,
|
|
3340
3641
|
outputPrice: 0.6
|
|
@@ -3343,6 +3644,7 @@ var vertexModels = {
|
|
|
3343
3644
|
maxTokens: 8192,
|
|
3344
3645
|
contextWindow: 1048576,
|
|
3345
3646
|
supportsImages: true,
|
|
3647
|
+
supportsNativeTools: true,
|
|
3346
3648
|
supportsPromptCache: false,
|
|
3347
3649
|
inputPrice: 0.075,
|
|
3348
3650
|
outputPrice: 0.3
|
|
@@ -3351,6 +3653,7 @@ var vertexModels = {
|
|
|
3351
3653
|
maxTokens: 8192,
|
|
3352
3654
|
contextWindow: 32768,
|
|
3353
3655
|
supportsImages: true,
|
|
3656
|
+
supportsNativeTools: true,
|
|
3354
3657
|
supportsPromptCache: false,
|
|
3355
3658
|
inputPrice: 0,
|
|
3356
3659
|
outputPrice: 0
|
|
@@ -3359,6 +3662,7 @@ var vertexModels = {
|
|
|
3359
3662
|
maxTokens: 8192,
|
|
3360
3663
|
contextWindow: 1048576,
|
|
3361
3664
|
supportsImages: true,
|
|
3665
|
+
supportsNativeTools: true,
|
|
3362
3666
|
supportsPromptCache: true,
|
|
3363
3667
|
inputPrice: 0.075,
|
|
3364
3668
|
outputPrice: 0.3
|
|
@@ -3367,6 +3671,7 @@ var vertexModels = {
|
|
|
3367
3671
|
maxTokens: 8192,
|
|
3368
3672
|
contextWindow: 2097152,
|
|
3369
3673
|
supportsImages: true,
|
|
3674
|
+
supportsNativeTools: true,
|
|
3370
3675
|
supportsPromptCache: false,
|
|
3371
3676
|
inputPrice: 1.25,
|
|
3372
3677
|
outputPrice: 5
|
|
@@ -3404,6 +3709,17 @@ var vertexModels = {
|
|
|
3404
3709
|
cacheReadsPrice: 0.1,
|
|
3405
3710
|
supportsReasoningBudget: true
|
|
3406
3711
|
},
|
|
3712
|
+
"claude-opus-4-5@20251101": {
|
|
3713
|
+
maxTokens: 8192,
|
|
3714
|
+
contextWindow: 2e5,
|
|
3715
|
+
supportsImages: true,
|
|
3716
|
+
supportsPromptCache: true,
|
|
3717
|
+
inputPrice: 5,
|
|
3718
|
+
outputPrice: 25,
|
|
3719
|
+
cacheWritesPrice: 6.25,
|
|
3720
|
+
cacheReadsPrice: 0.5,
|
|
3721
|
+
supportsReasoningBudget: true
|
|
3722
|
+
},
|
|
3407
3723
|
"claude-opus-4-1@20250805": {
|
|
3408
3724
|
maxTokens: 8192,
|
|
3409
3725
|
contextWindow: 2e5,
|
|
@@ -3501,6 +3817,7 @@ var vertexModels = {
|
|
|
3501
3817
|
maxTokens: 64e3,
|
|
3502
3818
|
contextWindow: 1048576,
|
|
3503
3819
|
supportsImages: true,
|
|
3820
|
+
supportsNativeTools: true,
|
|
3504
3821
|
supportsPromptCache: true,
|
|
3505
3822
|
inputPrice: 0.1,
|
|
3506
3823
|
outputPrice: 0.4,
|
|
@@ -3802,17 +4119,67 @@ var xaiModels = {
|
|
|
3802
4119
|
contextWindow: 262144,
|
|
3803
4120
|
supportsImages: false,
|
|
3804
4121
|
supportsPromptCache: true,
|
|
4122
|
+
supportsNativeTools: true,
|
|
3805
4123
|
inputPrice: 0.2,
|
|
3806
4124
|
outputPrice: 1.5,
|
|
3807
4125
|
cacheWritesPrice: 0.02,
|
|
3808
4126
|
cacheReadsPrice: 0.02,
|
|
3809
4127
|
description: "xAI's Grok Code Fast model with 256K context window"
|
|
3810
4128
|
},
|
|
4129
|
+
"grok-4-1-fast-reasoning": {
|
|
4130
|
+
maxTokens: 65536,
|
|
4131
|
+
contextWindow: 2e6,
|
|
4132
|
+
supportsImages: true,
|
|
4133
|
+
supportsPromptCache: true,
|
|
4134
|
+
supportsNativeTools: true,
|
|
4135
|
+
inputPrice: 0.2,
|
|
4136
|
+
outputPrice: 0.5,
|
|
4137
|
+
cacheWritesPrice: 0.05,
|
|
4138
|
+
cacheReadsPrice: 0.05,
|
|
4139
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4140
|
+
},
|
|
4141
|
+
"grok-4-1-fast-non-reasoning": {
|
|
4142
|
+
maxTokens: 65536,
|
|
4143
|
+
contextWindow: 2e6,
|
|
4144
|
+
supportsImages: true,
|
|
4145
|
+
supportsPromptCache: true,
|
|
4146
|
+
supportsNativeTools: true,
|
|
4147
|
+
inputPrice: 0.2,
|
|
4148
|
+
outputPrice: 0.5,
|
|
4149
|
+
cacheWritesPrice: 0.05,
|
|
4150
|
+
cacheReadsPrice: 0.05,
|
|
4151
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4152
|
+
},
|
|
4153
|
+
"grok-4-fast-reasoning": {
|
|
4154
|
+
maxTokens: 65536,
|
|
4155
|
+
contextWindow: 2e6,
|
|
4156
|
+
supportsImages: true,
|
|
4157
|
+
supportsPromptCache: true,
|
|
4158
|
+
supportsNativeTools: true,
|
|
4159
|
+
inputPrice: 0.2,
|
|
4160
|
+
outputPrice: 0.5,
|
|
4161
|
+
cacheWritesPrice: 0.05,
|
|
4162
|
+
cacheReadsPrice: 0.05,
|
|
4163
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4164
|
+
},
|
|
4165
|
+
"grok-4-fast-non-reasoning": {
|
|
4166
|
+
maxTokens: 65536,
|
|
4167
|
+
contextWindow: 2e6,
|
|
4168
|
+
supportsImages: true,
|
|
4169
|
+
supportsPromptCache: true,
|
|
4170
|
+
supportsNativeTools: true,
|
|
4171
|
+
inputPrice: 0.2,
|
|
4172
|
+
outputPrice: 0.5,
|
|
4173
|
+
cacheWritesPrice: 0.05,
|
|
4174
|
+
cacheReadsPrice: 0.05,
|
|
4175
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4176
|
+
},
|
|
3811
4177
|
"grok-4": {
|
|
3812
4178
|
maxTokens: 8192,
|
|
3813
4179
|
contextWindow: 256e3,
|
|
3814
4180
|
supportsImages: true,
|
|
3815
4181
|
supportsPromptCache: true,
|
|
4182
|
+
supportsNativeTools: true,
|
|
3816
4183
|
inputPrice: 3,
|
|
3817
4184
|
outputPrice: 15,
|
|
3818
4185
|
cacheWritesPrice: 0.75,
|
|
@@ -3824,6 +4191,7 @@ var xaiModels = {
|
|
|
3824
4191
|
contextWindow: 131072,
|
|
3825
4192
|
supportsImages: false,
|
|
3826
4193
|
supportsPromptCache: true,
|
|
4194
|
+
supportsNativeTools: true,
|
|
3827
4195
|
inputPrice: 3,
|
|
3828
4196
|
outputPrice: 15,
|
|
3829
4197
|
cacheWritesPrice: 0.75,
|
|
@@ -3835,6 +4203,7 @@ var xaiModels = {
|
|
|
3835
4203
|
contextWindow: 131072,
|
|
3836
4204
|
supportsImages: false,
|
|
3837
4205
|
supportsPromptCache: true,
|
|
4206
|
+
supportsNativeTools: true,
|
|
3838
4207
|
inputPrice: 5,
|
|
3839
4208
|
outputPrice: 25,
|
|
3840
4209
|
cacheWritesPrice: 1.25,
|
|
@@ -3846,6 +4215,7 @@ var xaiModels = {
|
|
|
3846
4215
|
contextWindow: 131072,
|
|
3847
4216
|
supportsImages: false,
|
|
3848
4217
|
supportsPromptCache: true,
|
|
4218
|
+
supportsNativeTools: true,
|
|
3849
4219
|
inputPrice: 0.3,
|
|
3850
4220
|
outputPrice: 0.5,
|
|
3851
4221
|
cacheWritesPrice: 0.07,
|
|
@@ -3858,6 +4228,7 @@ var xaiModels = {
|
|
|
3858
4228
|
contextWindow: 131072,
|
|
3859
4229
|
supportsImages: false,
|
|
3860
4230
|
supportsPromptCache: true,
|
|
4231
|
+
supportsNativeTools: true,
|
|
3861
4232
|
inputPrice: 0.6,
|
|
3862
4233
|
outputPrice: 4,
|
|
3863
4234
|
cacheWritesPrice: 0.15,
|
|
@@ -3870,6 +4241,7 @@ var xaiModels = {
|
|
|
3870
4241
|
contextWindow: 131072,
|
|
3871
4242
|
supportsImages: false,
|
|
3872
4243
|
supportsPromptCache: false,
|
|
4244
|
+
supportsNativeTools: true,
|
|
3873
4245
|
inputPrice: 2,
|
|
3874
4246
|
outputPrice: 10,
|
|
3875
4247
|
description: "xAI's Grok-2 model (version 1212) with 128K context window"
|
|
@@ -3879,6 +4251,7 @@ var xaiModels = {
|
|
|
3879
4251
|
contextWindow: 32768,
|
|
3880
4252
|
supportsImages: true,
|
|
3881
4253
|
supportsPromptCache: false,
|
|
4254
|
+
supportsNativeTools: true,
|
|
3882
4255
|
inputPrice: 2,
|
|
3883
4256
|
outputPrice: 10,
|
|
3884
4257
|
description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
|
|
@@ -3987,6 +4360,7 @@ var internationalZAiModels = {
|
|
|
3987
4360
|
contextWindow: 131072,
|
|
3988
4361
|
supportsImages: false,
|
|
3989
4362
|
supportsPromptCache: true,
|
|
4363
|
+
supportsNativeTools: true,
|
|
3990
4364
|
supportsReasoningBinary: true,
|
|
3991
4365
|
inputPrice: 0.6,
|
|
3992
4366
|
outputPrice: 2.2,
|
|
@@ -3999,6 +4373,7 @@ var internationalZAiModels = {
|
|
|
3999
4373
|
contextWindow: 131072,
|
|
4000
4374
|
supportsImages: false,
|
|
4001
4375
|
supportsPromptCache: true,
|
|
4376
|
+
supportsNativeTools: true,
|
|
4002
4377
|
inputPrice: 0.2,
|
|
4003
4378
|
outputPrice: 1.1,
|
|
4004
4379
|
cacheWritesPrice: 0,
|
|
@@ -4010,6 +4385,7 @@ var internationalZAiModels = {
|
|
|
4010
4385
|
contextWindow: 131072,
|
|
4011
4386
|
supportsImages: false,
|
|
4012
4387
|
supportsPromptCache: true,
|
|
4388
|
+
supportsNativeTools: true,
|
|
4013
4389
|
inputPrice: 2.2,
|
|
4014
4390
|
outputPrice: 8.9,
|
|
4015
4391
|
cacheWritesPrice: 0,
|
|
@@ -4021,6 +4397,7 @@ var internationalZAiModels = {
|
|
|
4021
4397
|
contextWindow: 131072,
|
|
4022
4398
|
supportsImages: false,
|
|
4023
4399
|
supportsPromptCache: true,
|
|
4400
|
+
supportsNativeTools: true,
|
|
4024
4401
|
inputPrice: 1.1,
|
|
4025
4402
|
outputPrice: 4.5,
|
|
4026
4403
|
cacheWritesPrice: 0,
|
|
@@ -4032,6 +4409,7 @@ var internationalZAiModels = {
|
|
|
4032
4409
|
contextWindow: 131072,
|
|
4033
4410
|
supportsImages: false,
|
|
4034
4411
|
supportsPromptCache: true,
|
|
4412
|
+
supportsNativeTools: true,
|
|
4035
4413
|
inputPrice: 0,
|
|
4036
4414
|
outputPrice: 0,
|
|
4037
4415
|
cacheWritesPrice: 0,
|
|
@@ -4043,6 +4421,7 @@ var internationalZAiModels = {
|
|
|
4043
4421
|
contextWindow: 131072,
|
|
4044
4422
|
supportsImages: true,
|
|
4045
4423
|
supportsPromptCache: true,
|
|
4424
|
+
supportsNativeTools: true,
|
|
4046
4425
|
inputPrice: 0.6,
|
|
4047
4426
|
outputPrice: 1.8,
|
|
4048
4427
|
cacheWritesPrice: 0,
|
|
@@ -4054,6 +4433,7 @@ var internationalZAiModels = {
|
|
|
4054
4433
|
contextWindow: 2e5,
|
|
4055
4434
|
supportsImages: false,
|
|
4056
4435
|
supportsPromptCache: true,
|
|
4436
|
+
supportsNativeTools: true,
|
|
4057
4437
|
supportsReasoningBinary: true,
|
|
4058
4438
|
inputPrice: 0.6,
|
|
4059
4439
|
outputPrice: 2.2,
|
|
@@ -4066,6 +4446,7 @@ var internationalZAiModels = {
|
|
|
4066
4446
|
contextWindow: 131072,
|
|
4067
4447
|
supportsImages: false,
|
|
4068
4448
|
supportsPromptCache: false,
|
|
4449
|
+
supportsNativeTools: true,
|
|
4069
4450
|
inputPrice: 0.1,
|
|
4070
4451
|
outputPrice: 0.1,
|
|
4071
4452
|
cacheWritesPrice: 0,
|
|
@@ -4080,6 +4461,7 @@ var mainlandZAiModels = {
|
|
|
4080
4461
|
contextWindow: 131072,
|
|
4081
4462
|
supportsImages: false,
|
|
4082
4463
|
supportsPromptCache: true,
|
|
4464
|
+
supportsNativeTools: true,
|
|
4083
4465
|
supportsReasoningBinary: true,
|
|
4084
4466
|
inputPrice: 0.29,
|
|
4085
4467
|
outputPrice: 1.14,
|
|
@@ -4092,6 +4474,7 @@ var mainlandZAiModels = {
|
|
|
4092
4474
|
contextWindow: 131072,
|
|
4093
4475
|
supportsImages: false,
|
|
4094
4476
|
supportsPromptCache: true,
|
|
4477
|
+
supportsNativeTools: true,
|
|
4095
4478
|
inputPrice: 0.1,
|
|
4096
4479
|
outputPrice: 0.6,
|
|
4097
4480
|
cacheWritesPrice: 0,
|
|
@@ -4103,6 +4486,7 @@ var mainlandZAiModels = {
|
|
|
4103
4486
|
contextWindow: 131072,
|
|
4104
4487
|
supportsImages: false,
|
|
4105
4488
|
supportsPromptCache: true,
|
|
4489
|
+
supportsNativeTools: true,
|
|
4106
4490
|
inputPrice: 0.29,
|
|
4107
4491
|
outputPrice: 1.14,
|
|
4108
4492
|
cacheWritesPrice: 0,
|
|
@@ -4114,6 +4498,7 @@ var mainlandZAiModels = {
|
|
|
4114
4498
|
contextWindow: 131072,
|
|
4115
4499
|
supportsImages: false,
|
|
4116
4500
|
supportsPromptCache: true,
|
|
4501
|
+
supportsNativeTools: true,
|
|
4117
4502
|
inputPrice: 0.1,
|
|
4118
4503
|
outputPrice: 0.6,
|
|
4119
4504
|
cacheWritesPrice: 0,
|
|
@@ -4125,6 +4510,7 @@ var mainlandZAiModels = {
|
|
|
4125
4510
|
contextWindow: 131072,
|
|
4126
4511
|
supportsImages: false,
|
|
4127
4512
|
supportsPromptCache: true,
|
|
4513
|
+
supportsNativeTools: true,
|
|
4128
4514
|
inputPrice: 0,
|
|
4129
4515
|
outputPrice: 0,
|
|
4130
4516
|
cacheWritesPrice: 0,
|
|
@@ -4136,6 +4522,7 @@ var mainlandZAiModels = {
|
|
|
4136
4522
|
contextWindow: 131072,
|
|
4137
4523
|
supportsImages: true,
|
|
4138
4524
|
supportsPromptCache: true,
|
|
4525
|
+
supportsNativeTools: true,
|
|
4139
4526
|
inputPrice: 0.29,
|
|
4140
4527
|
outputPrice: 0.93,
|
|
4141
4528
|
cacheWritesPrice: 0,
|
|
@@ -4147,6 +4534,7 @@ var mainlandZAiModels = {
|
|
|
4147
4534
|
contextWindow: 204800,
|
|
4148
4535
|
supportsImages: false,
|
|
4149
4536
|
supportsPromptCache: true,
|
|
4537
|
+
supportsNativeTools: true,
|
|
4150
4538
|
supportsReasoningBinary: true,
|
|
4151
4539
|
inputPrice: 0.29,
|
|
4152
4540
|
outputPrice: 1.14,
|
|
@@ -4176,6 +4564,7 @@ var deepInfraDefaultModelInfo = {
|
|
|
4176
4564
|
contextWindow: 262144,
|
|
4177
4565
|
supportsImages: false,
|
|
4178
4566
|
supportsPromptCache: false,
|
|
4567
|
+
supportsNativeTools: true,
|
|
4179
4568
|
inputPrice: 0.3,
|
|
4180
4569
|
outputPrice: 1.2,
|
|
4181
4570
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -4236,6 +4625,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
4236
4625
|
return "meta-llama/Llama-3.3-70B-Instruct";
|
|
4237
4626
|
case "chutes":
|
|
4238
4627
|
return chutesDefaultModelId;
|
|
4628
|
+
case "baseten":
|
|
4629
|
+
return basetenDefaultModelId;
|
|
4239
4630
|
case "bedrock":
|
|
4240
4631
|
return bedrockDefaultModelId;
|
|
4241
4632
|
case "vertex":
|
|
@@ -4329,6 +4720,7 @@ var providerNames = [
|
|
|
4329
4720
|
...fauxProviders,
|
|
4330
4721
|
"anthropic",
|
|
4331
4722
|
"bedrock",
|
|
4723
|
+
"baseten",
|
|
4332
4724
|
"cerebras",
|
|
4333
4725
|
"claude-code",
|
|
4334
4726
|
"doubao",
|
|
@@ -4568,6 +4960,9 @@ var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
|
|
|
4568
4960
|
vercelAiGatewayApiKey: z8.string().optional(),
|
|
4569
4961
|
vercelAiGatewayModelId: z8.string().optional()
|
|
4570
4962
|
});
|
|
4963
|
+
var basetenSchema = apiModelIdProviderModelSchema.extend({
|
|
4964
|
+
basetenApiKey: z8.string().optional()
|
|
4965
|
+
});
|
|
4571
4966
|
var defaultSchema = z8.object({
|
|
4572
4967
|
apiProvider: z8.undefined()
|
|
4573
4968
|
});
|
|
@@ -4597,6 +4992,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
|
4597
4992
|
fakeAiSchema.merge(z8.object({ apiProvider: z8.literal("fake-ai") })),
|
|
4598
4993
|
xaiSchema.merge(z8.object({ apiProvider: z8.literal("xai") })),
|
|
4599
4994
|
groqSchema.merge(z8.object({ apiProvider: z8.literal("groq") })),
|
|
4995
|
+
basetenSchema.merge(z8.object({ apiProvider: z8.literal("baseten") })),
|
|
4600
4996
|
huggingFaceSchema.merge(z8.object({ apiProvider: z8.literal("huggingface") })),
|
|
4601
4997
|
chutesSchema.merge(z8.object({ apiProvider: z8.literal("chutes") })),
|
|
4602
4998
|
litellmSchema.merge(z8.object({ apiProvider: z8.literal("litellm") })),
|
|
@@ -4638,6 +5034,7 @@ var providerSettingsSchema = z8.object({
|
|
|
4638
5034
|
...fakeAiSchema.shape,
|
|
4639
5035
|
...xaiSchema.shape,
|
|
4640
5036
|
...groqSchema.shape,
|
|
5037
|
+
...basetenSchema.shape,
|
|
4641
5038
|
...huggingFaceSchema.shape,
|
|
4642
5039
|
...chutesSchema.shape,
|
|
4643
5040
|
...litellmSchema.shape,
|
|
@@ -4701,6 +5098,7 @@ var modelIdKeysByProvider = {
|
|
|
4701
5098
|
requesty: "requestyModelId",
|
|
4702
5099
|
xai: "apiModelId",
|
|
4703
5100
|
groq: "apiModelId",
|
|
5101
|
+
baseten: "apiModelId",
|
|
4704
5102
|
chutes: "apiModelId",
|
|
4705
5103
|
litellm: "litellmModelId",
|
|
4706
5104
|
huggingface: "huggingFaceModelId",
|
|
@@ -4808,7 +5206,8 @@ var MODELS_BY_PROVIDER = {
|
|
|
4808
5206
|
models: Object.keys(vscodeLlmModels)
|
|
4809
5207
|
},
|
|
4810
5208
|
xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) },
|
|
4811
|
-
zai: { id: "zai", label: "
|
|
5209
|
+
zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
|
|
5210
|
+
baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
|
|
4812
5211
|
// Dynamic providers; models pulled from remote APIs.
|
|
4813
5212
|
glama: { id: "glama", label: "Glama", models: [] },
|
|
4814
5213
|
huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
|
|
@@ -4840,7 +5239,18 @@ var historyItemSchema = z9.object({
|
|
|
4840
5239
|
totalCost: z9.number(),
|
|
4841
5240
|
size: z9.number().optional(),
|
|
4842
5241
|
workspace: z9.string().optional(),
|
|
4843
|
-
mode: z9.string().optional()
|
|
5242
|
+
mode: z9.string().optional(),
|
|
5243
|
+
status: z9.enum(["active", "completed", "delegated"]).optional(),
|
|
5244
|
+
delegatedToId: z9.string().optional(),
|
|
5245
|
+
// Last child this parent delegated to
|
|
5246
|
+
childIds: z9.array(z9.string()).optional(),
|
|
5247
|
+
// All children spawned by this task
|
|
5248
|
+
awaitingChildId: z9.string().optional(),
|
|
5249
|
+
// Child currently awaited (set when delegated)
|
|
5250
|
+
completedByChildId: z9.string().optional(),
|
|
5251
|
+
// Child that completed and resumed this parent
|
|
5252
|
+
completionResultSummary: z9.string().optional()
|
|
5253
|
+
// Summary from completed child
|
|
4844
5254
|
});
|
|
4845
5255
|
|
|
4846
5256
|
// src/experiment.ts
|
|
@@ -4850,7 +5260,8 @@ var experimentIds = [
|
|
|
4850
5260
|
"multiFileApplyDiff",
|
|
4851
5261
|
"preventFocusDisruption",
|
|
4852
5262
|
"imageGeneration",
|
|
4853
|
-
"runSlashCommand"
|
|
5263
|
+
"runSlashCommand",
|
|
5264
|
+
"multipleNativeToolCalls"
|
|
4854
5265
|
];
|
|
4855
5266
|
var experimentIdsSchema = z10.enum(experimentIds);
|
|
4856
5267
|
var experimentsSchema = z10.object({
|
|
@@ -4858,7 +5269,8 @@ var experimentsSchema = z10.object({
|
|
|
4858
5269
|
multiFileApplyDiff: z10.boolean().optional(),
|
|
4859
5270
|
preventFocusDisruption: z10.boolean().optional(),
|
|
4860
5271
|
imageGeneration: z10.boolean().optional(),
|
|
4861
|
-
runSlashCommand: z10.boolean().optional()
|
|
5272
|
+
runSlashCommand: z10.boolean().optional(),
|
|
5273
|
+
multipleNativeToolCalls: z10.boolean().optional()
|
|
4862
5274
|
});
|
|
4863
5275
|
|
|
4864
5276
|
// src/telemetry.ts
|
|
@@ -4908,6 +5320,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
4908
5320
|
TelemetryEventName2["CONSECUTIVE_MISTAKE_ERROR"] = "Consecutive Mistake Error";
|
|
4909
5321
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
4910
5322
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
5323
|
+
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
4911
5324
|
return TelemetryEventName2;
|
|
4912
5325
|
})(TelemetryEventName || {});
|
|
4913
5326
|
var staticAppPropertiesSchema = z11.object({
|
|
@@ -4991,6 +5404,7 @@ var rooCodeTelemetryEventSchema = z11.discriminatedUnion("type", [
|
|
|
4991
5404
|
"Shell Integration Error" /* SHELL_INTEGRATION_ERROR */,
|
|
4992
5405
|
"Consecutive Mistake Error" /* CONSECUTIVE_MISTAKE_ERROR */,
|
|
4993
5406
|
"Code Index Error" /* CODE_INDEX_ERROR */,
|
|
5407
|
+
"Model Cache Empty Response" /* MODEL_CACHE_EMPTY_RESPONSE */,
|
|
4994
5408
|
"Context Condensed" /* CONTEXT_CONDENSED */,
|
|
4995
5409
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
4996
5410
|
"Tab Shown" /* TAB_SHOWN */,
|
|
@@ -5151,8 +5565,6 @@ var terminalActionIds = ["terminalAddToContext", "terminalFixCommand", "terminal
|
|
|
5151
5565
|
var commandIds = [
|
|
5152
5566
|
"activationCompleted",
|
|
5153
5567
|
"plusButtonClicked",
|
|
5154
|
-
"promptsButtonClicked",
|
|
5155
|
-
"mcpButtonClicked",
|
|
5156
5568
|
"historyButtonClicked",
|
|
5157
5569
|
"marketplaceButtonClicked",
|
|
5158
5570
|
"popoutButtonClicked",
|
|
@@ -5209,6 +5621,7 @@ var globalSettingsSchema = z14.object({
|
|
|
5209
5621
|
taskHistory: z14.array(historyItemSchema).optional(),
|
|
5210
5622
|
dismissedUpsells: z14.array(z14.string()).optional(),
|
|
5211
5623
|
// Image generation settings (experimental) - flattened for simplicity
|
|
5624
|
+
imageGenerationProvider: z14.enum(["openrouter", "roo"]).optional(),
|
|
5212
5625
|
openRouterImageApiKey: z14.string().optional(),
|
|
5213
5626
|
openRouterImageGenerationSelectedModel: z14.string().optional(),
|
|
5214
5627
|
condensingApiConfigId: z14.string().optional(),
|
|
@@ -5360,7 +5773,8 @@ var SECRET_STATE_KEYS = [
|
|
|
5360
5773
|
"fireworksApiKey",
|
|
5361
5774
|
"featherlessApiKey",
|
|
5362
5775
|
"ioIntelligenceApiKey",
|
|
5363
|
-
"vercelAiGatewayApiKey"
|
|
5776
|
+
"vercelAiGatewayApiKey",
|
|
5777
|
+
"basetenApiKey"
|
|
5364
5778
|
];
|
|
5365
5779
|
var GLOBAL_SECRET_KEYS = [
|
|
5366
5780
|
"openRouterImageApiKey"
|
|
@@ -5625,6 +6039,9 @@ var ExtensionBridgeEventName = ((ExtensionBridgeEventName2) => {
|
|
|
5625
6039
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskPaused"] = "taskPaused" /* TaskPaused */] = "TaskPaused";
|
|
5626
6040
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUnpaused"] = "taskUnpaused" /* TaskUnpaused */] = "TaskUnpaused";
|
|
5627
6041
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskSpawned"] = "taskSpawned" /* TaskSpawned */] = "TaskSpawned";
|
|
6042
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegated"] = "taskDelegated" /* TaskDelegated */] = "TaskDelegated";
|
|
6043
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted" /* TaskDelegationCompleted */] = "TaskDelegationCompleted";
|
|
6044
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationResumed"] = "taskDelegationResumed" /* TaskDelegationResumed */] = "TaskDelegationResumed";
|
|
5628
6045
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUserMessage"] = "taskUserMessage" /* TaskUserMessage */] = "TaskUserMessage";
|
|
5629
6046
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskTokenUsageUpdated"] = "taskTokenUsageUpdated" /* TaskTokenUsageUpdated */] = "TaskTokenUsageUpdated";
|
|
5630
6047
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["ModeChanged"] = "modeChanged" /* ModeChanged */] = "ModeChanged";
|
|
@@ -5700,6 +6117,21 @@ var extensionBridgeEventSchema = z16.discriminatedUnion("type", [
|
|
|
5700
6117
|
instance: extensionInstanceSchema,
|
|
5701
6118
|
timestamp: z16.number()
|
|
5702
6119
|
}),
|
|
6120
|
+
z16.object({
|
|
6121
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegated),
|
|
6122
|
+
instance: extensionInstanceSchema,
|
|
6123
|
+
timestamp: z16.number()
|
|
6124
|
+
}),
|
|
6125
|
+
z16.object({
|
|
6126
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegationCompleted),
|
|
6127
|
+
instance: extensionInstanceSchema,
|
|
6128
|
+
timestamp: z16.number()
|
|
6129
|
+
}),
|
|
6130
|
+
z16.object({
|
|
6131
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegationResumed),
|
|
6132
|
+
instance: extensionInstanceSchema,
|
|
6133
|
+
timestamp: z16.number()
|
|
6134
|
+
}),
|
|
5703
6135
|
z16.object({
|
|
5704
6136
|
type: z16.literal(ExtensionBridgeEventName.TaskUserMessage),
|
|
5705
6137
|
instance: extensionInstanceSchema,
|
|
@@ -5890,12 +6322,27 @@ var followUpDataSchema = z17.object({
|
|
|
5890
6322
|
|
|
5891
6323
|
// src/image-generation.ts
|
|
5892
6324
|
var IMAGE_GENERATION_MODELS = [
|
|
5893
|
-
|
|
5894
|
-
{ value: "google/gemini-
|
|
5895
|
-
{ value: "
|
|
5896
|
-
{ value: "openai/gpt-5-image
|
|
6325
|
+
// OpenRouter models
|
|
6326
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "openrouter" },
|
|
6327
|
+
{ value: "google/gemini-3-pro-image-preview", label: "Gemini 3 Pro Image Preview", provider: "openrouter" },
|
|
6328
|
+
{ value: "openai/gpt-5-image", label: "GPT-5 Image", provider: "openrouter" },
|
|
6329
|
+
{ value: "openai/gpt-5-image-mini", label: "GPT-5 Image Mini", provider: "openrouter" },
|
|
6330
|
+
{ value: "black-forest-labs/flux.2-flex", label: "Black Forest Labs FLUX.2 Flex", provider: "openrouter" },
|
|
6331
|
+
{ value: "black-forest-labs/flux.2-pro", label: "Black Forest Labs FLUX.2 Pro", provider: "openrouter" },
|
|
6332
|
+
// Roo Code Cloud models
|
|
6333
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "roo" },
|
|
6334
|
+
{ value: "google/gemini-3-pro-image", label: "Gemini 3 Pro Image", provider: "roo" },
|
|
6335
|
+
{
|
|
6336
|
+
value: "bfl/flux-2-pro:free",
|
|
6337
|
+
label: "Black Forest Labs FLUX.2 Pro (Free)",
|
|
6338
|
+
provider: "roo",
|
|
6339
|
+
apiMethod: "images_api"
|
|
6340
|
+
}
|
|
5897
6341
|
];
|
|
5898
6342
|
var IMAGE_GENERATION_MODEL_IDS = IMAGE_GENERATION_MODELS.map((m) => m.value);
|
|
6343
|
+
function getImageGenerationProvider(explicitProvider, hasExistingModel) {
|
|
6344
|
+
return explicitProvider !== void 0 ? explicitProvider : hasExistingModel ? "openrouter" : "roo";
|
|
6345
|
+
}
|
|
5899
6346
|
|
|
5900
6347
|
// src/ipc.ts
|
|
5901
6348
|
import { z as z18 } from "zod";
|
|
@@ -6130,6 +6577,8 @@ export {
|
|
|
6130
6577
|
anthropicModels,
|
|
6131
6578
|
appPropertiesSchema,
|
|
6132
6579
|
azureOpenAiDefaultApiVersion,
|
|
6580
|
+
basetenDefaultModelId,
|
|
6581
|
+
basetenModels,
|
|
6133
6582
|
bedrockDefaultModelId,
|
|
6134
6583
|
bedrockDefaultPromptRouterModelId,
|
|
6135
6584
|
bedrockModels,
|
|
@@ -6185,6 +6634,7 @@ export {
|
|
|
6185
6634
|
getApiProtocol,
|
|
6186
6635
|
getClaudeCodeModelId,
|
|
6187
6636
|
getEffectiveProtocol,
|
|
6637
|
+
getImageGenerationProvider,
|
|
6188
6638
|
getModelId,
|
|
6189
6639
|
getProviderDefaultModelId,
|
|
6190
6640
|
gitPropertiesSchema,
|