@roo-code/types 1.87.0 → 1.89.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +557 -61
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +4314 -740
- package/dist/index.d.ts +4314 -740
- package/dist/index.js +554 -61
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -66,6 +66,7 @@ var clineSays = [
|
|
|
66
66
|
"shell_integration_warning",
|
|
67
67
|
"browser_action",
|
|
68
68
|
"browser_action_result",
|
|
69
|
+
"browser_session_status",
|
|
69
70
|
"mcp_server_request_started",
|
|
70
71
|
"mcp_server_response",
|
|
71
72
|
"subtask_result",
|
|
@@ -130,6 +131,8 @@ var toolNames = [
|
|
|
130
131
|
"write_to_file",
|
|
131
132
|
"apply_diff",
|
|
132
133
|
"insert_content",
|
|
134
|
+
"search_and_replace",
|
|
135
|
+
"apply_patch",
|
|
133
136
|
"search_files",
|
|
134
137
|
"list_files",
|
|
135
138
|
"list_code_definition_names",
|
|
@@ -180,6 +183,9 @@ var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
|
180
183
|
RooCodeEventName2["TaskPaused"] = "taskPaused";
|
|
181
184
|
RooCodeEventName2["TaskUnpaused"] = "taskUnpaused";
|
|
182
185
|
RooCodeEventName2["TaskSpawned"] = "taskSpawned";
|
|
186
|
+
RooCodeEventName2["TaskDelegated"] = "taskDelegated";
|
|
187
|
+
RooCodeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted";
|
|
188
|
+
RooCodeEventName2["TaskDelegationResumed"] = "taskDelegationResumed";
|
|
183
189
|
RooCodeEventName2["Message"] = "message";
|
|
184
190
|
RooCodeEventName2["TaskModeSwitched"] = "taskModeSwitched";
|
|
185
191
|
RooCodeEventName2["TaskAskResponded"] = "taskAskResponded";
|
|
@@ -213,6 +219,26 @@ var rooCodeEventsSchema = z3.object({
|
|
|
213
219
|
["taskPaused" /* TaskPaused */]: z3.tuple([z3.string()]),
|
|
214
220
|
["taskUnpaused" /* TaskUnpaused */]: z3.tuple([z3.string()]),
|
|
215
221
|
["taskSpawned" /* TaskSpawned */]: z3.tuple([z3.string(), z3.string()]),
|
|
222
|
+
["taskDelegated" /* TaskDelegated */]: z3.tuple([
|
|
223
|
+
z3.string(),
|
|
224
|
+
// parentTaskId
|
|
225
|
+
z3.string()
|
|
226
|
+
// childTaskId
|
|
227
|
+
]),
|
|
228
|
+
["taskDelegationCompleted" /* TaskDelegationCompleted */]: z3.tuple([
|
|
229
|
+
z3.string(),
|
|
230
|
+
// parentTaskId
|
|
231
|
+
z3.string(),
|
|
232
|
+
// childTaskId
|
|
233
|
+
z3.string()
|
|
234
|
+
// completionResultSummary
|
|
235
|
+
]),
|
|
236
|
+
["taskDelegationResumed" /* TaskDelegationResumed */]: z3.tuple([
|
|
237
|
+
z3.string(),
|
|
238
|
+
// parentTaskId
|
|
239
|
+
z3.string()
|
|
240
|
+
// childTaskId
|
|
241
|
+
]),
|
|
216
242
|
["message" /* Message */]: z3.tuple([
|
|
217
243
|
z3.object({
|
|
218
244
|
taskId: z3.string(),
|
|
@@ -297,6 +323,21 @@ var taskEventSchema = z3.discriminatedUnion("eventName", [
|
|
|
297
323
|
payload: rooCodeEventsSchema.shape["taskSpawned" /* TaskSpawned */],
|
|
298
324
|
taskId: z3.number().optional()
|
|
299
325
|
}),
|
|
326
|
+
z3.object({
|
|
327
|
+
eventName: z3.literal("taskDelegated" /* TaskDelegated */),
|
|
328
|
+
payload: rooCodeEventsSchema.shape["taskDelegated" /* TaskDelegated */],
|
|
329
|
+
taskId: z3.number().optional()
|
|
330
|
+
}),
|
|
331
|
+
z3.object({
|
|
332
|
+
eventName: z3.literal("taskDelegationCompleted" /* TaskDelegationCompleted */),
|
|
333
|
+
payload: rooCodeEventsSchema.shape["taskDelegationCompleted" /* TaskDelegationCompleted */],
|
|
334
|
+
taskId: z3.number().optional()
|
|
335
|
+
}),
|
|
336
|
+
z3.object({
|
|
337
|
+
eventName: z3.literal("taskDelegationResumed" /* TaskDelegationResumed */),
|
|
338
|
+
payload: rooCodeEventsSchema.shape["taskDelegationResumed" /* TaskDelegationResumed */],
|
|
339
|
+
taskId: z3.number().optional()
|
|
340
|
+
}),
|
|
300
341
|
// Task Execution
|
|
301
342
|
z3.object({
|
|
302
343
|
eventName: z3.literal("message" /* Message */),
|
|
@@ -415,6 +456,13 @@ var modelInfoSchema = z5.object({
|
|
|
415
456
|
supportsNativeTools: z5.boolean().optional(),
|
|
416
457
|
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
417
458
|
defaultToolProtocol: z5.enum(["xml", "native"]).optional(),
|
|
459
|
+
// Exclude specific native tools from being available (only applies to native protocol)
|
|
460
|
+
// These tools will be removed from the set of tools available to the model
|
|
461
|
+
excludedTools: z5.array(z5.string()).optional(),
|
|
462
|
+
// Include specific native tools (only applies to native protocol)
|
|
463
|
+
// These tools will be added if they belong to an allowed group in the current mode
|
|
464
|
+
// Cannot force-add tools from groups the mode doesn't allow
|
|
465
|
+
includedTools: z5.array(z5.string()).optional(),
|
|
418
466
|
/**
|
|
419
467
|
* Service tiers with pricing information.
|
|
420
468
|
* Each tier can have a name (for OpenAI service tiers) and pricing overrides.
|
|
@@ -448,7 +496,16 @@ var CODEBASE_INDEX_DEFAULTS = {
|
|
|
448
496
|
var codebaseIndexConfigSchema = z6.object({
|
|
449
497
|
codebaseIndexEnabled: z6.boolean().optional(),
|
|
450
498
|
codebaseIndexQdrantUrl: z6.string().optional(),
|
|
451
|
-
codebaseIndexEmbedderProvider: z6.enum([
|
|
499
|
+
codebaseIndexEmbedderProvider: z6.enum([
|
|
500
|
+
"openai",
|
|
501
|
+
"ollama",
|
|
502
|
+
"openai-compatible",
|
|
503
|
+
"gemini",
|
|
504
|
+
"mistral",
|
|
505
|
+
"vercel-ai-gateway",
|
|
506
|
+
"bedrock",
|
|
507
|
+
"openrouter"
|
|
508
|
+
]).optional(),
|
|
452
509
|
codebaseIndexEmbedderBaseUrl: z6.string().optional(),
|
|
453
510
|
codebaseIndexEmbedderModelId: z6.string().optional(),
|
|
454
511
|
codebaseIndexEmbedderModelDimension: z6.number().optional(),
|
|
@@ -456,7 +513,10 @@ var codebaseIndexConfigSchema = z6.object({
|
|
|
456
513
|
codebaseIndexSearchMaxResults: z6.number().min(CODEBASE_INDEX_DEFAULTS.MIN_SEARCH_RESULTS).max(CODEBASE_INDEX_DEFAULTS.MAX_SEARCH_RESULTS).optional(),
|
|
457
514
|
// OpenAI Compatible specific fields
|
|
458
515
|
codebaseIndexOpenAiCompatibleBaseUrl: z6.string().optional(),
|
|
459
|
-
codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional()
|
|
516
|
+
codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional(),
|
|
517
|
+
// Bedrock specific fields
|
|
518
|
+
codebaseIndexBedrockRegion: z6.string().optional(),
|
|
519
|
+
codebaseIndexBedrockProfile: z6.string().optional()
|
|
460
520
|
});
|
|
461
521
|
var codebaseIndexModelsSchema = z6.object({
|
|
462
522
|
openai: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
@@ -465,7 +525,8 @@ var codebaseIndexModelsSchema = z6.object({
|
|
|
465
525
|
gemini: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
466
526
|
mistral: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
467
527
|
"vercel-ai-gateway": z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
468
|
-
openrouter: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional()
|
|
528
|
+
openrouter: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
|
|
529
|
+
bedrock: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional()
|
|
469
530
|
});
|
|
470
531
|
var codebaseIndexProviderSchema = z6.object({
|
|
471
532
|
codeIndexOpenAiKey: z6.string().optional(),
|
|
@@ -489,6 +550,7 @@ var anthropicModels = {
|
|
|
489
550
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
490
551
|
supportsImages: true,
|
|
491
552
|
supportsPromptCache: true,
|
|
553
|
+
supportsNativeTools: true,
|
|
492
554
|
inputPrice: 3,
|
|
493
555
|
// $3 per million input tokens (≤200K context)
|
|
494
556
|
outputPrice: 15,
|
|
@@ -521,6 +583,7 @@ var anthropicModels = {
|
|
|
521
583
|
// Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
|
|
522
584
|
supportsImages: true,
|
|
523
585
|
supportsPromptCache: true,
|
|
586
|
+
supportsNativeTools: true,
|
|
524
587
|
inputPrice: 3,
|
|
525
588
|
// $3 per million input tokens (≤200K context)
|
|
526
589
|
outputPrice: 15,
|
|
@@ -546,12 +609,30 @@ var anthropicModels = {
|
|
|
546
609
|
}
|
|
547
610
|
]
|
|
548
611
|
},
|
|
612
|
+
"claude-opus-4-5-20251101": {
|
|
613
|
+
maxTokens: 32e3,
|
|
614
|
+
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
615
|
+
contextWindow: 2e5,
|
|
616
|
+
supportsImages: true,
|
|
617
|
+
supportsPromptCache: true,
|
|
618
|
+
supportsNativeTools: true,
|
|
619
|
+
inputPrice: 5,
|
|
620
|
+
// $5 per million input tokens
|
|
621
|
+
outputPrice: 25,
|
|
622
|
+
// $25 per million output tokens
|
|
623
|
+
cacheWritesPrice: 6.25,
|
|
624
|
+
// $6.25 per million tokens
|
|
625
|
+
cacheReadsPrice: 0.5,
|
|
626
|
+
// $0.50 per million tokens
|
|
627
|
+
supportsReasoningBudget: true
|
|
628
|
+
},
|
|
549
629
|
"claude-opus-4-1-20250805": {
|
|
550
630
|
maxTokens: 32e3,
|
|
551
631
|
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
552
632
|
contextWindow: 2e5,
|
|
553
633
|
supportsImages: true,
|
|
554
634
|
supportsPromptCache: true,
|
|
635
|
+
supportsNativeTools: true,
|
|
555
636
|
inputPrice: 15,
|
|
556
637
|
// $15 per million input tokens
|
|
557
638
|
outputPrice: 75,
|
|
@@ -568,6 +649,7 @@ var anthropicModels = {
|
|
|
568
649
|
contextWindow: 2e5,
|
|
569
650
|
supportsImages: true,
|
|
570
651
|
supportsPromptCache: true,
|
|
652
|
+
supportsNativeTools: true,
|
|
571
653
|
inputPrice: 15,
|
|
572
654
|
// $15 per million input tokens
|
|
573
655
|
outputPrice: 75,
|
|
@@ -584,6 +666,7 @@ var anthropicModels = {
|
|
|
584
666
|
contextWindow: 2e5,
|
|
585
667
|
supportsImages: true,
|
|
586
668
|
supportsPromptCache: true,
|
|
669
|
+
supportsNativeTools: true,
|
|
587
670
|
inputPrice: 3,
|
|
588
671
|
// $3 per million input tokens
|
|
589
672
|
outputPrice: 15,
|
|
@@ -601,6 +684,7 @@ var anthropicModels = {
|
|
|
601
684
|
contextWindow: 2e5,
|
|
602
685
|
supportsImages: true,
|
|
603
686
|
supportsPromptCache: true,
|
|
687
|
+
supportsNativeTools: true,
|
|
604
688
|
inputPrice: 3,
|
|
605
689
|
// $3 per million input tokens
|
|
606
690
|
outputPrice: 15,
|
|
@@ -615,6 +699,7 @@ var anthropicModels = {
|
|
|
615
699
|
contextWindow: 2e5,
|
|
616
700
|
supportsImages: true,
|
|
617
701
|
supportsPromptCache: true,
|
|
702
|
+
supportsNativeTools: true,
|
|
618
703
|
inputPrice: 3,
|
|
619
704
|
// $3 per million input tokens
|
|
620
705
|
outputPrice: 15,
|
|
@@ -629,6 +714,7 @@ var anthropicModels = {
|
|
|
629
714
|
contextWindow: 2e5,
|
|
630
715
|
supportsImages: false,
|
|
631
716
|
supportsPromptCache: true,
|
|
717
|
+
supportsNativeTools: true,
|
|
632
718
|
inputPrice: 1,
|
|
633
719
|
outputPrice: 5,
|
|
634
720
|
cacheWritesPrice: 1.25,
|
|
@@ -639,6 +725,7 @@ var anthropicModels = {
|
|
|
639
725
|
contextWindow: 2e5,
|
|
640
726
|
supportsImages: true,
|
|
641
727
|
supportsPromptCache: true,
|
|
728
|
+
supportsNativeTools: true,
|
|
642
729
|
inputPrice: 15,
|
|
643
730
|
outputPrice: 75,
|
|
644
731
|
cacheWritesPrice: 18.75,
|
|
@@ -649,6 +736,7 @@ var anthropicModels = {
|
|
|
649
736
|
contextWindow: 2e5,
|
|
650
737
|
supportsImages: true,
|
|
651
738
|
supportsPromptCache: true,
|
|
739
|
+
supportsNativeTools: true,
|
|
652
740
|
inputPrice: 0.25,
|
|
653
741
|
outputPrice: 1.25,
|
|
654
742
|
cacheWritesPrice: 0.3,
|
|
@@ -659,6 +747,7 @@ var anthropicModels = {
|
|
|
659
747
|
contextWindow: 2e5,
|
|
660
748
|
supportsImages: true,
|
|
661
749
|
supportsPromptCache: true,
|
|
750
|
+
supportsNativeTools: true,
|
|
662
751
|
inputPrice: 1,
|
|
663
752
|
outputPrice: 5,
|
|
664
753
|
cacheWritesPrice: 1.25,
|
|
@@ -669,6 +758,125 @@ var anthropicModels = {
|
|
|
669
758
|
};
|
|
670
759
|
var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
|
|
671
760
|
|
|
761
|
+
// src/providers/baseten.ts
|
|
762
|
+
var basetenModels = {
|
|
763
|
+
"moonshotai/Kimi-K2-Thinking": {
|
|
764
|
+
maxTokens: 163800,
|
|
765
|
+
contextWindow: 262e3,
|
|
766
|
+
supportsImages: false,
|
|
767
|
+
supportsPromptCache: false,
|
|
768
|
+
supportsNativeTools: true,
|
|
769
|
+
inputPrice: 0.6,
|
|
770
|
+
outputPrice: 2.5,
|
|
771
|
+
cacheWritesPrice: 0,
|
|
772
|
+
cacheReadsPrice: 0,
|
|
773
|
+
description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
|
|
774
|
+
},
|
|
775
|
+
"zai-org/GLM-4.6": {
|
|
776
|
+
maxTokens: 2e5,
|
|
777
|
+
contextWindow: 2e5,
|
|
778
|
+
supportsImages: false,
|
|
779
|
+
supportsPromptCache: false,
|
|
780
|
+
supportsNativeTools: true,
|
|
781
|
+
inputPrice: 0.6,
|
|
782
|
+
outputPrice: 2.2,
|
|
783
|
+
cacheWritesPrice: 0,
|
|
784
|
+
cacheReadsPrice: 0,
|
|
785
|
+
description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
|
|
786
|
+
},
|
|
787
|
+
"deepseek-ai/DeepSeek-R1": {
|
|
788
|
+
maxTokens: 131072,
|
|
789
|
+
contextWindow: 163840,
|
|
790
|
+
supportsImages: false,
|
|
791
|
+
supportsPromptCache: false,
|
|
792
|
+
inputPrice: 2.55,
|
|
793
|
+
outputPrice: 5.95,
|
|
794
|
+
cacheWritesPrice: 0,
|
|
795
|
+
cacheReadsPrice: 0,
|
|
796
|
+
description: "DeepSeek's first-generation reasoning model"
|
|
797
|
+
},
|
|
798
|
+
"deepseek-ai/DeepSeek-R1-0528": {
|
|
799
|
+
maxTokens: 131072,
|
|
800
|
+
contextWindow: 163840,
|
|
801
|
+
supportsImages: false,
|
|
802
|
+
supportsPromptCache: false,
|
|
803
|
+
inputPrice: 2.55,
|
|
804
|
+
outputPrice: 5.95,
|
|
805
|
+
cacheWritesPrice: 0,
|
|
806
|
+
cacheReadsPrice: 0,
|
|
807
|
+
description: "The latest revision of DeepSeek's first-generation reasoning model"
|
|
808
|
+
},
|
|
809
|
+
"deepseek-ai/DeepSeek-V3-0324": {
|
|
810
|
+
maxTokens: 131072,
|
|
811
|
+
contextWindow: 163840,
|
|
812
|
+
supportsImages: false,
|
|
813
|
+
supportsPromptCache: false,
|
|
814
|
+
inputPrice: 0.77,
|
|
815
|
+
outputPrice: 0.77,
|
|
816
|
+
cacheWritesPrice: 0,
|
|
817
|
+
cacheReadsPrice: 0,
|
|
818
|
+
description: "Fast general-purpose LLM with enhanced reasoning capabilities"
|
|
819
|
+
},
|
|
820
|
+
"deepseek-ai/DeepSeek-V3.1": {
|
|
821
|
+
maxTokens: 131072,
|
|
822
|
+
contextWindow: 163840,
|
|
823
|
+
supportsImages: false,
|
|
824
|
+
supportsPromptCache: false,
|
|
825
|
+
inputPrice: 0.5,
|
|
826
|
+
outputPrice: 1.5,
|
|
827
|
+
cacheWritesPrice: 0,
|
|
828
|
+
cacheReadsPrice: 0,
|
|
829
|
+
description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
|
|
830
|
+
},
|
|
831
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507": {
|
|
832
|
+
maxTokens: 262144,
|
|
833
|
+
contextWindow: 262144,
|
|
834
|
+
supportsImages: false,
|
|
835
|
+
supportsPromptCache: false,
|
|
836
|
+
inputPrice: 0.22,
|
|
837
|
+
outputPrice: 0.8,
|
|
838
|
+
cacheWritesPrice: 0,
|
|
839
|
+
cacheReadsPrice: 0,
|
|
840
|
+
description: "Mixture-of-experts LLM with math and reasoning capabilities"
|
|
841
|
+
},
|
|
842
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
|
843
|
+
maxTokens: 262144,
|
|
844
|
+
contextWindow: 262144,
|
|
845
|
+
supportsImages: false,
|
|
846
|
+
supportsPromptCache: false,
|
|
847
|
+
inputPrice: 0.38,
|
|
848
|
+
outputPrice: 1.53,
|
|
849
|
+
cacheWritesPrice: 0,
|
|
850
|
+
cacheReadsPrice: 0,
|
|
851
|
+
description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
|
|
852
|
+
},
|
|
853
|
+
"openai/gpt-oss-120b": {
|
|
854
|
+
maxTokens: 128072,
|
|
855
|
+
contextWindow: 128072,
|
|
856
|
+
supportsImages: false,
|
|
857
|
+
supportsPromptCache: false,
|
|
858
|
+
supportsNativeTools: true,
|
|
859
|
+
inputPrice: 0.1,
|
|
860
|
+
outputPrice: 0.5,
|
|
861
|
+
cacheWritesPrice: 0,
|
|
862
|
+
cacheReadsPrice: 0,
|
|
863
|
+
description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
|
|
864
|
+
},
|
|
865
|
+
"moonshotai/Kimi-K2-Instruct-0905": {
|
|
866
|
+
maxTokens: 168e3,
|
|
867
|
+
contextWindow: 262e3,
|
|
868
|
+
supportsImages: false,
|
|
869
|
+
supportsPromptCache: false,
|
|
870
|
+
supportsNativeTools: true,
|
|
871
|
+
inputPrice: 0.6,
|
|
872
|
+
outputPrice: 2.5,
|
|
873
|
+
cacheWritesPrice: 0,
|
|
874
|
+
cacheReadsPrice: 0,
|
|
875
|
+
description: "State of the art language model for agentic and coding tasks. September Update."
|
|
876
|
+
}
|
|
877
|
+
};
|
|
878
|
+
var basetenDefaultModelId = "zai-org/GLM-4.6";
|
|
879
|
+
|
|
672
880
|
// src/providers/bedrock.ts
|
|
673
881
|
var bedrockDefaultModelId = "anthropic.claude-sonnet-4-5-20250929-v1:0";
|
|
674
882
|
var bedrockDefaultPromptRouterModelId = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
@@ -679,6 +887,7 @@ var bedrockModels = {
|
|
|
679
887
|
supportsImages: true,
|
|
680
888
|
supportsPromptCache: true,
|
|
681
889
|
supportsReasoningBudget: true,
|
|
890
|
+
supportsNativeTools: true,
|
|
682
891
|
inputPrice: 3,
|
|
683
892
|
outputPrice: 15,
|
|
684
893
|
cacheWritesPrice: 3.75,
|
|
@@ -692,6 +901,7 @@ var bedrockModels = {
|
|
|
692
901
|
contextWindow: 3e5,
|
|
693
902
|
supportsImages: true,
|
|
694
903
|
supportsPromptCache: true,
|
|
904
|
+
supportsNativeTools: true,
|
|
695
905
|
inputPrice: 0.8,
|
|
696
906
|
outputPrice: 3.2,
|
|
697
907
|
cacheWritesPrice: 0.8,
|
|
@@ -707,6 +917,7 @@ var bedrockModels = {
|
|
|
707
917
|
contextWindow: 3e5,
|
|
708
918
|
supportsImages: true,
|
|
709
919
|
supportsPromptCache: false,
|
|
920
|
+
supportsNativeTools: true,
|
|
710
921
|
inputPrice: 1,
|
|
711
922
|
outputPrice: 4,
|
|
712
923
|
cacheWritesPrice: 1,
|
|
@@ -720,6 +931,7 @@ var bedrockModels = {
|
|
|
720
931
|
contextWindow: 3e5,
|
|
721
932
|
supportsImages: true,
|
|
722
933
|
supportsPromptCache: true,
|
|
934
|
+
supportsNativeTools: true,
|
|
723
935
|
inputPrice: 0.06,
|
|
724
936
|
outputPrice: 0.24,
|
|
725
937
|
cacheWritesPrice: 0.06,
|
|
@@ -735,6 +947,7 @@ var bedrockModels = {
|
|
|
735
947
|
contextWindow: 128e3,
|
|
736
948
|
supportsImages: false,
|
|
737
949
|
supportsPromptCache: true,
|
|
950
|
+
supportsNativeTools: true,
|
|
738
951
|
inputPrice: 0.035,
|
|
739
952
|
outputPrice: 0.14,
|
|
740
953
|
cacheWritesPrice: 0.035,
|
|
@@ -751,6 +964,7 @@ var bedrockModels = {
|
|
|
751
964
|
supportsImages: true,
|
|
752
965
|
supportsPromptCache: true,
|
|
753
966
|
supportsReasoningBudget: true,
|
|
967
|
+
supportsNativeTools: true,
|
|
754
968
|
inputPrice: 3,
|
|
755
969
|
outputPrice: 15,
|
|
756
970
|
cacheWritesPrice: 3.75,
|
|
@@ -765,6 +979,7 @@ var bedrockModels = {
|
|
|
765
979
|
supportsImages: true,
|
|
766
980
|
supportsPromptCache: true,
|
|
767
981
|
supportsReasoningBudget: true,
|
|
982
|
+
supportsNativeTools: true,
|
|
768
983
|
inputPrice: 15,
|
|
769
984
|
outputPrice: 75,
|
|
770
985
|
cacheWritesPrice: 18.75,
|
|
@@ -773,12 +988,28 @@ var bedrockModels = {
|
|
|
773
988
|
maxCachePoints: 4,
|
|
774
989
|
cachableFields: ["system", "messages", "tools"]
|
|
775
990
|
},
|
|
991
|
+
"anthropic.claude-opus-4-5-20251101-v1:0": {
|
|
992
|
+
maxTokens: 8192,
|
|
993
|
+
contextWindow: 2e5,
|
|
994
|
+
supportsImages: true,
|
|
995
|
+
supportsPromptCache: true,
|
|
996
|
+
supportsReasoningBudget: true,
|
|
997
|
+
supportsNativeTools: true,
|
|
998
|
+
inputPrice: 5,
|
|
999
|
+
outputPrice: 25,
|
|
1000
|
+
cacheWritesPrice: 6.25,
|
|
1001
|
+
cacheReadsPrice: 0.5,
|
|
1002
|
+
minTokensPerCachePoint: 1024,
|
|
1003
|
+
maxCachePoints: 4,
|
|
1004
|
+
cachableFields: ["system", "messages", "tools"]
|
|
1005
|
+
},
|
|
776
1006
|
"anthropic.claude-opus-4-20250514-v1:0": {
|
|
777
1007
|
maxTokens: 8192,
|
|
778
1008
|
contextWindow: 2e5,
|
|
779
1009
|
supportsImages: true,
|
|
780
1010
|
supportsPromptCache: true,
|
|
781
1011
|
supportsReasoningBudget: true,
|
|
1012
|
+
supportsNativeTools: true,
|
|
782
1013
|
inputPrice: 15,
|
|
783
1014
|
outputPrice: 75,
|
|
784
1015
|
cacheWritesPrice: 18.75,
|
|
@@ -793,6 +1024,7 @@ var bedrockModels = {
|
|
|
793
1024
|
supportsImages: true,
|
|
794
1025
|
supportsPromptCache: true,
|
|
795
1026
|
supportsReasoningBudget: true,
|
|
1027
|
+
supportsNativeTools: true,
|
|
796
1028
|
inputPrice: 3,
|
|
797
1029
|
outputPrice: 15,
|
|
798
1030
|
cacheWritesPrice: 3.75,
|
|
@@ -806,6 +1038,7 @@ var bedrockModels = {
|
|
|
806
1038
|
contextWindow: 2e5,
|
|
807
1039
|
supportsImages: true,
|
|
808
1040
|
supportsPromptCache: true,
|
|
1041
|
+
supportsNativeTools: true,
|
|
809
1042
|
inputPrice: 3,
|
|
810
1043
|
outputPrice: 15,
|
|
811
1044
|
cacheWritesPrice: 3.75,
|
|
@@ -819,6 +1052,7 @@ var bedrockModels = {
|
|
|
819
1052
|
contextWindow: 2e5,
|
|
820
1053
|
supportsImages: false,
|
|
821
1054
|
supportsPromptCache: true,
|
|
1055
|
+
supportsNativeTools: true,
|
|
822
1056
|
inputPrice: 0.8,
|
|
823
1057
|
outputPrice: 4,
|
|
824
1058
|
cacheWritesPrice: 1,
|
|
@@ -833,6 +1067,7 @@ var bedrockModels = {
|
|
|
833
1067
|
supportsImages: true,
|
|
834
1068
|
supportsPromptCache: true,
|
|
835
1069
|
supportsReasoningBudget: true,
|
|
1070
|
+
supportsNativeTools: true,
|
|
836
1071
|
inputPrice: 1,
|
|
837
1072
|
outputPrice: 5,
|
|
838
1073
|
cacheWritesPrice: 1.25,
|
|
@@ -848,6 +1083,7 @@ var bedrockModels = {
|
|
|
848
1083
|
contextWindow: 2e5,
|
|
849
1084
|
supportsImages: true,
|
|
850
1085
|
supportsPromptCache: false,
|
|
1086
|
+
supportsNativeTools: true,
|
|
851
1087
|
inputPrice: 3,
|
|
852
1088
|
outputPrice: 15
|
|
853
1089
|
},
|
|
@@ -856,6 +1092,7 @@ var bedrockModels = {
|
|
|
856
1092
|
contextWindow: 2e5,
|
|
857
1093
|
supportsImages: true,
|
|
858
1094
|
supportsPromptCache: false,
|
|
1095
|
+
supportsNativeTools: true,
|
|
859
1096
|
inputPrice: 15,
|
|
860
1097
|
outputPrice: 75
|
|
861
1098
|
},
|
|
@@ -864,6 +1101,7 @@ var bedrockModels = {
|
|
|
864
1101
|
contextWindow: 2e5,
|
|
865
1102
|
supportsImages: true,
|
|
866
1103
|
supportsPromptCache: false,
|
|
1104
|
+
supportsNativeTools: true,
|
|
867
1105
|
inputPrice: 3,
|
|
868
1106
|
outputPrice: 15
|
|
869
1107
|
},
|
|
@@ -872,6 +1110,7 @@ var bedrockModels = {
|
|
|
872
1110
|
contextWindow: 2e5,
|
|
873
1111
|
supportsImages: true,
|
|
874
1112
|
supportsPromptCache: false,
|
|
1113
|
+
supportsNativeTools: true,
|
|
875
1114
|
inputPrice: 0.25,
|
|
876
1115
|
outputPrice: 1.25
|
|
877
1116
|
},
|
|
@@ -880,6 +1119,7 @@ var bedrockModels = {
|
|
|
880
1119
|
contextWindow: 1e5,
|
|
881
1120
|
supportsImages: false,
|
|
882
1121
|
supportsPromptCache: false,
|
|
1122
|
+
supportsNativeTools: true,
|
|
883
1123
|
inputPrice: 8,
|
|
884
1124
|
outputPrice: 24,
|
|
885
1125
|
description: "Claude 2.1"
|
|
@@ -889,6 +1129,7 @@ var bedrockModels = {
|
|
|
889
1129
|
contextWindow: 1e5,
|
|
890
1130
|
supportsImages: false,
|
|
891
1131
|
supportsPromptCache: false,
|
|
1132
|
+
supportsNativeTools: true,
|
|
892
1133
|
inputPrice: 8,
|
|
893
1134
|
outputPrice: 24,
|
|
894
1135
|
description: "Claude 2.0"
|
|
@@ -898,6 +1139,7 @@ var bedrockModels = {
|
|
|
898
1139
|
contextWindow: 1e5,
|
|
899
1140
|
supportsImages: false,
|
|
900
1141
|
supportsPromptCache: false,
|
|
1142
|
+
supportsNativeTools: true,
|
|
901
1143
|
inputPrice: 0.8,
|
|
902
1144
|
outputPrice: 2.4,
|
|
903
1145
|
description: "Claude Instant"
|
|
@@ -907,6 +1149,7 @@ var bedrockModels = {
|
|
|
907
1149
|
contextWindow: 128e3,
|
|
908
1150
|
supportsImages: false,
|
|
909
1151
|
supportsPromptCache: false,
|
|
1152
|
+
supportsNativeTools: true,
|
|
910
1153
|
inputPrice: 1.35,
|
|
911
1154
|
outputPrice: 5.4
|
|
912
1155
|
},
|
|
@@ -915,6 +1158,7 @@ var bedrockModels = {
|
|
|
915
1158
|
contextWindow: 128e3,
|
|
916
1159
|
supportsImages: false,
|
|
917
1160
|
supportsPromptCache: false,
|
|
1161
|
+
supportsNativeTools: true,
|
|
918
1162
|
inputPrice: 0.5,
|
|
919
1163
|
outputPrice: 1.5,
|
|
920
1164
|
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
|
|
@@ -924,6 +1168,7 @@ var bedrockModels = {
|
|
|
924
1168
|
contextWindow: 128e3,
|
|
925
1169
|
supportsImages: false,
|
|
926
1170
|
supportsPromptCache: false,
|
|
1171
|
+
supportsNativeTools: true,
|
|
927
1172
|
inputPrice: 2,
|
|
928
1173
|
outputPrice: 6,
|
|
929
1174
|
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
|
|
@@ -933,6 +1178,7 @@ var bedrockModels = {
|
|
|
933
1178
|
contextWindow: 128e3,
|
|
934
1179
|
supportsImages: false,
|
|
935
1180
|
supportsPromptCache: false,
|
|
1181
|
+
supportsNativeTools: true,
|
|
936
1182
|
inputPrice: 0.72,
|
|
937
1183
|
outputPrice: 0.72,
|
|
938
1184
|
description: "Llama 3.3 Instruct (70B)"
|
|
@@ -942,6 +1188,7 @@ var bedrockModels = {
|
|
|
942
1188
|
contextWindow: 128e3,
|
|
943
1189
|
supportsImages: true,
|
|
944
1190
|
supportsPromptCache: false,
|
|
1191
|
+
supportsNativeTools: true,
|
|
945
1192
|
inputPrice: 0.72,
|
|
946
1193
|
outputPrice: 0.72,
|
|
947
1194
|
description: "Llama 3.2 Instruct (90B)"
|
|
@@ -951,6 +1198,7 @@ var bedrockModels = {
|
|
|
951
1198
|
contextWindow: 128e3,
|
|
952
1199
|
supportsImages: true,
|
|
953
1200
|
supportsPromptCache: false,
|
|
1201
|
+
supportsNativeTools: true,
|
|
954
1202
|
inputPrice: 0.16,
|
|
955
1203
|
outputPrice: 0.16,
|
|
956
1204
|
description: "Llama 3.2 Instruct (11B)"
|
|
@@ -960,6 +1208,7 @@ var bedrockModels = {
|
|
|
960
1208
|
contextWindow: 128e3,
|
|
961
1209
|
supportsImages: false,
|
|
962
1210
|
supportsPromptCache: false,
|
|
1211
|
+
supportsNativeTools: true,
|
|
963
1212
|
inputPrice: 0.15,
|
|
964
1213
|
outputPrice: 0.15,
|
|
965
1214
|
description: "Llama 3.2 Instruct (3B)"
|
|
@@ -969,6 +1218,7 @@ var bedrockModels = {
|
|
|
969
1218
|
contextWindow: 128e3,
|
|
970
1219
|
supportsImages: false,
|
|
971
1220
|
supportsPromptCache: false,
|
|
1221
|
+
supportsNativeTools: true,
|
|
972
1222
|
inputPrice: 0.1,
|
|
973
1223
|
outputPrice: 0.1,
|
|
974
1224
|
description: "Llama 3.2 Instruct (1B)"
|
|
@@ -978,6 +1228,7 @@ var bedrockModels = {
|
|
|
978
1228
|
contextWindow: 128e3,
|
|
979
1229
|
supportsImages: false,
|
|
980
1230
|
supportsPromptCache: false,
|
|
1231
|
+
supportsNativeTools: true,
|
|
981
1232
|
inputPrice: 2.4,
|
|
982
1233
|
outputPrice: 2.4,
|
|
983
1234
|
description: "Llama 3.1 Instruct (405B)"
|
|
@@ -987,6 +1238,7 @@ var bedrockModels = {
|
|
|
987
1238
|
contextWindow: 128e3,
|
|
988
1239
|
supportsImages: false,
|
|
989
1240
|
supportsPromptCache: false,
|
|
1241
|
+
supportsNativeTools: true,
|
|
990
1242
|
inputPrice: 0.72,
|
|
991
1243
|
outputPrice: 0.72,
|
|
992
1244
|
description: "Llama 3.1 Instruct (70B)"
|
|
@@ -996,6 +1248,7 @@ var bedrockModels = {
|
|
|
996
1248
|
contextWindow: 128e3,
|
|
997
1249
|
supportsImages: false,
|
|
998
1250
|
supportsPromptCache: false,
|
|
1251
|
+
supportsNativeTools: true,
|
|
999
1252
|
inputPrice: 0.9,
|
|
1000
1253
|
outputPrice: 0.9,
|
|
1001
1254
|
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
@@ -1005,6 +1258,7 @@ var bedrockModels = {
|
|
|
1005
1258
|
contextWindow: 8e3,
|
|
1006
1259
|
supportsImages: false,
|
|
1007
1260
|
supportsPromptCache: false,
|
|
1261
|
+
supportsNativeTools: true,
|
|
1008
1262
|
inputPrice: 0.22,
|
|
1009
1263
|
outputPrice: 0.22,
|
|
1010
1264
|
description: "Llama 3.1 Instruct (8B)"
|
|
@@ -1014,6 +1268,7 @@ var bedrockModels = {
|
|
|
1014
1268
|
contextWindow: 8e3,
|
|
1015
1269
|
supportsImages: false,
|
|
1016
1270
|
supportsPromptCache: false,
|
|
1271
|
+
supportsNativeTools: true,
|
|
1017
1272
|
inputPrice: 2.65,
|
|
1018
1273
|
outputPrice: 3.5
|
|
1019
1274
|
},
|
|
@@ -1022,6 +1277,7 @@ var bedrockModels = {
|
|
|
1022
1277
|
contextWindow: 4e3,
|
|
1023
1278
|
supportsImages: false,
|
|
1024
1279
|
supportsPromptCache: false,
|
|
1280
|
+
supportsNativeTools: true,
|
|
1025
1281
|
inputPrice: 0.3,
|
|
1026
1282
|
outputPrice: 0.6
|
|
1027
1283
|
},
|
|
@@ -1030,6 +1286,7 @@ var bedrockModels = {
|
|
|
1030
1286
|
contextWindow: 8e3,
|
|
1031
1287
|
supportsImages: false,
|
|
1032
1288
|
supportsPromptCache: false,
|
|
1289
|
+
supportsNativeTools: true,
|
|
1033
1290
|
inputPrice: 0.15,
|
|
1034
1291
|
outputPrice: 0.2,
|
|
1035
1292
|
description: "Amazon Titan Text Lite"
|
|
@@ -1039,6 +1296,7 @@ var bedrockModels = {
|
|
|
1039
1296
|
contextWindow: 8e3,
|
|
1040
1297
|
supportsImages: false,
|
|
1041
1298
|
supportsPromptCache: false,
|
|
1299
|
+
supportsNativeTools: true,
|
|
1042
1300
|
inputPrice: 0.2,
|
|
1043
1301
|
outputPrice: 0.6,
|
|
1044
1302
|
description: "Amazon Titan Text Express"
|
|
@@ -1115,7 +1373,8 @@ var BEDROCK_1M_CONTEXT_MODEL_IDS = [
|
|
|
1115
1373
|
var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
|
|
1116
1374
|
"anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1117
1375
|
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
|
1118
|
-
"anthropic.claude-haiku-4-5-20251001-v1:0"
|
|
1376
|
+
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
|
1377
|
+
"anthropic.claude-opus-4-5-20251101-v1:0"
|
|
1119
1378
|
];
|
|
1120
1379
|
|
|
1121
1380
|
// src/providers/cerebras.ts
|
|
@@ -1127,33 +1386,17 @@ var cerebrasModels = {
|
|
|
1127
1386
|
contextWindow: 131072,
|
|
1128
1387
|
supportsImages: false,
|
|
1129
1388
|
supportsPromptCache: false,
|
|
1389
|
+
supportsNativeTools: true,
|
|
1130
1390
|
inputPrice: 0,
|
|
1131
1391
|
outputPrice: 0,
|
|
1132
1392
|
description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
|
|
1133
1393
|
},
|
|
1134
|
-
"qwen-3-coder-480b-free": {
|
|
1135
|
-
maxTokens: 4e4,
|
|
1136
|
-
contextWindow: 64e3,
|
|
1137
|
-
supportsImages: false,
|
|
1138
|
-
supportsPromptCache: false,
|
|
1139
|
-
inputPrice: 0,
|
|
1140
|
-
outputPrice: 0,
|
|
1141
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($0 free tier)\n\n\u2022 Use this if you don't have a Cerebras subscription\n\u2022 64K context window\n\u2022 Rate limits: 150K TPM, 1M TPH/TPD, 10 RPM, 100 RPH/RPD\n\nUpgrade for higher limits: [https://cloud.cerebras.ai/?utm=roocode](https://cloud.cerebras.ai/?utm=roocode)"
|
|
1142
|
-
},
|
|
1143
|
-
"qwen-3-coder-480b": {
|
|
1144
|
-
maxTokens: 4e4,
|
|
1145
|
-
contextWindow: 128e3,
|
|
1146
|
-
supportsImages: false,
|
|
1147
|
-
supportsPromptCache: false,
|
|
1148
|
-
inputPrice: 0,
|
|
1149
|
-
outputPrice: 0,
|
|
1150
|
-
description: "[SOON TO BE DEPRECATED] SOTA coding model with ~2000 tokens/s ($50/$250 paid tiers)\n\n\u2022 Use this if you have a Cerebras subscription\n\u2022 131K context window with higher rate limits"
|
|
1151
|
-
},
|
|
1152
1394
|
"qwen-3-235b-a22b-instruct-2507": {
|
|
1153
1395
|
maxTokens: 64e3,
|
|
1154
1396
|
contextWindow: 64e3,
|
|
1155
1397
|
supportsImages: false,
|
|
1156
1398
|
supportsPromptCache: false,
|
|
1399
|
+
supportsNativeTools: true,
|
|
1157
1400
|
inputPrice: 0,
|
|
1158
1401
|
outputPrice: 0,
|
|
1159
1402
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1163,6 +1406,7 @@ var cerebrasModels = {
|
|
|
1163
1406
|
contextWindow: 64e3,
|
|
1164
1407
|
supportsImages: false,
|
|
1165
1408
|
supportsPromptCache: false,
|
|
1409
|
+
supportsNativeTools: true,
|
|
1166
1410
|
inputPrice: 0,
|
|
1167
1411
|
outputPrice: 0,
|
|
1168
1412
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1172,25 +1416,17 @@ var cerebrasModels = {
|
|
|
1172
1416
|
contextWindow: 64e3,
|
|
1173
1417
|
supportsImages: false,
|
|
1174
1418
|
supportsPromptCache: false,
|
|
1419
|
+
supportsNativeTools: true,
|
|
1175
1420
|
inputPrice: 0,
|
|
1176
1421
|
outputPrice: 0,
|
|
1177
1422
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
1178
1423
|
},
|
|
1179
|
-
"qwen-3-235b-a22b-thinking-2507": {
|
|
1180
|
-
maxTokens: 4e4,
|
|
1181
|
-
contextWindow: 65e3,
|
|
1182
|
-
supportsImages: false,
|
|
1183
|
-
supportsPromptCache: false,
|
|
1184
|
-
inputPrice: 0,
|
|
1185
|
-
outputPrice: 0,
|
|
1186
|
-
description: "SOTA performance with ~1500 tokens/s",
|
|
1187
|
-
supportsReasoningEffort: true
|
|
1188
|
-
},
|
|
1189
1424
|
"gpt-oss-120b": {
|
|
1190
1425
|
maxTokens: 8e3,
|
|
1191
1426
|
contextWindow: 64e3,
|
|
1192
1427
|
supportsImages: false,
|
|
1193
1428
|
supportsPromptCache: false,
|
|
1429
|
+
supportsNativeTools: true,
|
|
1194
1430
|
inputPrice: 0,
|
|
1195
1431
|
outputPrice: 0,
|
|
1196
1432
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1584,7 +1820,10 @@ var claudeCodeModels = {
|
|
|
1584
1820
|
// Claude Code does report cache tokens
|
|
1585
1821
|
supportsReasoningEffort: false,
|
|
1586
1822
|
supportsReasoningBudget: false,
|
|
1587
|
-
requiredReasoningBudget: false
|
|
1823
|
+
requiredReasoningBudget: false,
|
|
1824
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1825
|
+
supportsNativeTools: false,
|
|
1826
|
+
supportsTemperature: false
|
|
1588
1827
|
},
|
|
1589
1828
|
"claude-sonnet-4-5-20250929[1m]": {
|
|
1590
1829
|
...anthropicModels["claude-sonnet-4-5"],
|
|
@@ -1595,7 +1834,10 @@ var claudeCodeModels = {
|
|
|
1595
1834
|
// Claude Code does report cache tokens
|
|
1596
1835
|
supportsReasoningEffort: false,
|
|
1597
1836
|
supportsReasoningBudget: false,
|
|
1598
|
-
requiredReasoningBudget: false
|
|
1837
|
+
requiredReasoningBudget: false,
|
|
1838
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1839
|
+
supportsNativeTools: false,
|
|
1840
|
+
supportsTemperature: false
|
|
1599
1841
|
},
|
|
1600
1842
|
"claude-sonnet-4-20250514": {
|
|
1601
1843
|
...anthropicModels["claude-sonnet-4-20250514"],
|
|
@@ -1604,7 +1846,22 @@ var claudeCodeModels = {
|
|
|
1604
1846
|
// Claude Code does report cache tokens
|
|
1605
1847
|
supportsReasoningEffort: false,
|
|
1606
1848
|
supportsReasoningBudget: false,
|
|
1607
|
-
requiredReasoningBudget: false
|
|
1849
|
+
requiredReasoningBudget: false,
|
|
1850
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1851
|
+
supportsNativeTools: false,
|
|
1852
|
+
supportsTemperature: false
|
|
1853
|
+
},
|
|
1854
|
+
"claude-opus-4-5-20251101": {
|
|
1855
|
+
...anthropicModels["claude-opus-4-5-20251101"],
|
|
1856
|
+
supportsImages: false,
|
|
1857
|
+
supportsPromptCache: true,
|
|
1858
|
+
// Claude Code does report cache tokens
|
|
1859
|
+
supportsReasoningEffort: false,
|
|
1860
|
+
supportsReasoningBudget: false,
|
|
1861
|
+
requiredReasoningBudget: false,
|
|
1862
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1863
|
+
supportsNativeTools: false,
|
|
1864
|
+
supportsTemperature: false
|
|
1608
1865
|
},
|
|
1609
1866
|
"claude-opus-4-1-20250805": {
|
|
1610
1867
|
...anthropicModels["claude-opus-4-1-20250805"],
|
|
@@ -1613,7 +1870,10 @@ var claudeCodeModels = {
|
|
|
1613
1870
|
// Claude Code does report cache tokens
|
|
1614
1871
|
supportsReasoningEffort: false,
|
|
1615
1872
|
supportsReasoningBudget: false,
|
|
1616
|
-
requiredReasoningBudget: false
|
|
1873
|
+
requiredReasoningBudget: false,
|
|
1874
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1875
|
+
supportsNativeTools: false,
|
|
1876
|
+
supportsTemperature: false
|
|
1617
1877
|
},
|
|
1618
1878
|
"claude-opus-4-20250514": {
|
|
1619
1879
|
...anthropicModels["claude-opus-4-20250514"],
|
|
@@ -1622,7 +1882,10 @@ var claudeCodeModels = {
|
|
|
1622
1882
|
// Claude Code does report cache tokens
|
|
1623
1883
|
supportsReasoningEffort: false,
|
|
1624
1884
|
supportsReasoningBudget: false,
|
|
1625
|
-
requiredReasoningBudget: false
|
|
1885
|
+
requiredReasoningBudget: false,
|
|
1886
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1887
|
+
supportsNativeTools: false,
|
|
1888
|
+
supportsTemperature: false
|
|
1626
1889
|
},
|
|
1627
1890
|
"claude-3-7-sonnet-20250219": {
|
|
1628
1891
|
...anthropicModels["claude-3-7-sonnet-20250219"],
|
|
@@ -1631,7 +1894,10 @@ var claudeCodeModels = {
|
|
|
1631
1894
|
// Claude Code does report cache tokens
|
|
1632
1895
|
supportsReasoningEffort: false,
|
|
1633
1896
|
supportsReasoningBudget: false,
|
|
1634
|
-
requiredReasoningBudget: false
|
|
1897
|
+
requiredReasoningBudget: false,
|
|
1898
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1899
|
+
supportsNativeTools: false,
|
|
1900
|
+
supportsTemperature: false
|
|
1635
1901
|
},
|
|
1636
1902
|
"claude-3-5-sonnet-20241022": {
|
|
1637
1903
|
...anthropicModels["claude-3-5-sonnet-20241022"],
|
|
@@ -1640,7 +1906,10 @@ var claudeCodeModels = {
|
|
|
1640
1906
|
// Claude Code does report cache tokens
|
|
1641
1907
|
supportsReasoningEffort: false,
|
|
1642
1908
|
supportsReasoningBudget: false,
|
|
1643
|
-
requiredReasoningBudget: false
|
|
1909
|
+
requiredReasoningBudget: false,
|
|
1910
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1911
|
+
supportsNativeTools: false,
|
|
1912
|
+
supportsTemperature: false
|
|
1644
1913
|
},
|
|
1645
1914
|
"claude-3-5-haiku-20241022": {
|
|
1646
1915
|
...anthropicModels["claude-3-5-haiku-20241022"],
|
|
@@ -1649,7 +1918,10 @@ var claudeCodeModels = {
|
|
|
1649
1918
|
// Claude Code does report cache tokens
|
|
1650
1919
|
supportsReasoningEffort: false,
|
|
1651
1920
|
supportsReasoningBudget: false,
|
|
1652
|
-
requiredReasoningBudget: false
|
|
1921
|
+
requiredReasoningBudget: false,
|
|
1922
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1923
|
+
supportsNativeTools: false,
|
|
1924
|
+
supportsTemperature: false
|
|
1653
1925
|
},
|
|
1654
1926
|
"claude-haiku-4-5-20251001": {
|
|
1655
1927
|
...anthropicModels["claude-haiku-4-5-20251001"],
|
|
@@ -1658,7 +1930,10 @@ var claudeCodeModels = {
|
|
|
1658
1930
|
// Claude Code does report cache tokens
|
|
1659
1931
|
supportsReasoningEffort: false,
|
|
1660
1932
|
supportsReasoningBudget: false,
|
|
1661
|
-
requiredReasoningBudget: false
|
|
1933
|
+
requiredReasoningBudget: false,
|
|
1934
|
+
// Claude Code manages its own tools and temperature via the CLI
|
|
1935
|
+
supportsNativeTools: false,
|
|
1936
|
+
supportsTemperature: false
|
|
1662
1937
|
}
|
|
1663
1938
|
};
|
|
1664
1939
|
|
|
@@ -1671,6 +1946,7 @@ var deepSeekModels = {
|
|
|
1671
1946
|
contextWindow: 128e3,
|
|
1672
1947
|
supportsImages: false,
|
|
1673
1948
|
supportsPromptCache: true,
|
|
1949
|
+
supportsNativeTools: true,
|
|
1674
1950
|
inputPrice: 0.56,
|
|
1675
1951
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
1676
1952
|
outputPrice: 1.68,
|
|
@@ -1687,6 +1963,7 @@ var deepSeekModels = {
|
|
|
1687
1963
|
contextWindow: 128e3,
|
|
1688
1964
|
supportsImages: false,
|
|
1689
1965
|
supportsPromptCache: true,
|
|
1966
|
+
supportsNativeTools: true,
|
|
1690
1967
|
inputPrice: 0.56,
|
|
1691
1968
|
// $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
|
|
1692
1969
|
outputPrice: 1.68,
|
|
@@ -1708,6 +1985,7 @@ var doubaoModels = {
|
|
|
1708
1985
|
contextWindow: 128e3,
|
|
1709
1986
|
supportsImages: true,
|
|
1710
1987
|
supportsPromptCache: true,
|
|
1988
|
+
supportsNativeTools: true,
|
|
1711
1989
|
inputPrice: 1e-4,
|
|
1712
1990
|
// $0.0001 per million tokens (cache miss)
|
|
1713
1991
|
outputPrice: 4e-4,
|
|
@@ -1723,6 +2001,7 @@ var doubaoModels = {
|
|
|
1723
2001
|
contextWindow: 128e3,
|
|
1724
2002
|
supportsImages: true,
|
|
1725
2003
|
supportsPromptCache: true,
|
|
2004
|
+
supportsNativeTools: true,
|
|
1726
2005
|
inputPrice: 2e-4,
|
|
1727
2006
|
// $0.0002 per million tokens
|
|
1728
2007
|
outputPrice: 8e-4,
|
|
@@ -1738,6 +2017,7 @@ var doubaoModels = {
|
|
|
1738
2017
|
contextWindow: 128e3,
|
|
1739
2018
|
supportsImages: true,
|
|
1740
2019
|
supportsPromptCache: true,
|
|
2020
|
+
supportsNativeTools: true,
|
|
1741
2021
|
inputPrice: 15e-5,
|
|
1742
2022
|
// $0.00015 per million tokens
|
|
1743
2023
|
outputPrice: 6e-4,
|
|
@@ -1778,6 +2058,7 @@ var featherlessModels = {
|
|
|
1778
2058
|
contextWindow: 32678,
|
|
1779
2059
|
supportsImages: false,
|
|
1780
2060
|
supportsPromptCache: false,
|
|
2061
|
+
supportsNativeTools: true,
|
|
1781
2062
|
inputPrice: 0,
|
|
1782
2063
|
outputPrice: 0,
|
|
1783
2064
|
description: "Kimi K2 Instruct model."
|
|
@@ -1796,6 +2077,7 @@ var featherlessModels = {
|
|
|
1796
2077
|
contextWindow: 32678,
|
|
1797
2078
|
supportsImages: false,
|
|
1798
2079
|
supportsPromptCache: false,
|
|
2080
|
+
supportsNativeTools: true,
|
|
1799
2081
|
inputPrice: 0,
|
|
1800
2082
|
outputPrice: 0,
|
|
1801
2083
|
description: "Qwen3 Coder 480B A35B Instruct model."
|
|
@@ -1811,6 +2093,7 @@ var fireworksModels = {
|
|
|
1811
2093
|
contextWindow: 262144,
|
|
1812
2094
|
supportsImages: false,
|
|
1813
2095
|
supportsPromptCache: true,
|
|
2096
|
+
supportsNativeTools: true,
|
|
1814
2097
|
inputPrice: 0.6,
|
|
1815
2098
|
outputPrice: 2.5,
|
|
1816
2099
|
cacheReadsPrice: 0.15,
|
|
@@ -1821,6 +2104,7 @@ var fireworksModels = {
|
|
|
1821
2104
|
contextWindow: 128e3,
|
|
1822
2105
|
supportsImages: false,
|
|
1823
2106
|
supportsPromptCache: false,
|
|
2107
|
+
supportsNativeTools: true,
|
|
1824
2108
|
inputPrice: 0.6,
|
|
1825
2109
|
outputPrice: 2.5,
|
|
1826
2110
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -1830,6 +2114,7 @@ var fireworksModels = {
|
|
|
1830
2114
|
contextWindow: 204800,
|
|
1831
2115
|
supportsImages: false,
|
|
1832
2116
|
supportsPromptCache: false,
|
|
2117
|
+
supportsNativeTools: true,
|
|
1833
2118
|
inputPrice: 0.3,
|
|
1834
2119
|
outputPrice: 1.2,
|
|
1835
2120
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -1839,6 +2124,7 @@ var fireworksModels = {
|
|
|
1839
2124
|
contextWindow: 256e3,
|
|
1840
2125
|
supportsImages: false,
|
|
1841
2126
|
supportsPromptCache: false,
|
|
2127
|
+
supportsNativeTools: true,
|
|
1842
2128
|
inputPrice: 0.22,
|
|
1843
2129
|
outputPrice: 0.88,
|
|
1844
2130
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -1848,6 +2134,7 @@ var fireworksModels = {
|
|
|
1848
2134
|
contextWindow: 256e3,
|
|
1849
2135
|
supportsImages: false,
|
|
1850
2136
|
supportsPromptCache: false,
|
|
2137
|
+
supportsNativeTools: true,
|
|
1851
2138
|
inputPrice: 0.45,
|
|
1852
2139
|
outputPrice: 1.8,
|
|
1853
2140
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -1857,6 +2144,7 @@ var fireworksModels = {
|
|
|
1857
2144
|
contextWindow: 16e4,
|
|
1858
2145
|
supportsImages: false,
|
|
1859
2146
|
supportsPromptCache: false,
|
|
2147
|
+
supportsNativeTools: true,
|
|
1860
2148
|
inputPrice: 3,
|
|
1861
2149
|
outputPrice: 8,
|
|
1862
2150
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -1866,6 +2154,7 @@ var fireworksModels = {
|
|
|
1866
2154
|
contextWindow: 128e3,
|
|
1867
2155
|
supportsImages: false,
|
|
1868
2156
|
supportsPromptCache: false,
|
|
2157
|
+
supportsNativeTools: true,
|
|
1869
2158
|
inputPrice: 0.9,
|
|
1870
2159
|
outputPrice: 0.9,
|
|
1871
2160
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -1875,6 +2164,7 @@ var fireworksModels = {
|
|
|
1875
2164
|
contextWindow: 163840,
|
|
1876
2165
|
supportsImages: false,
|
|
1877
2166
|
supportsPromptCache: false,
|
|
2167
|
+
supportsNativeTools: true,
|
|
1878
2168
|
inputPrice: 0.56,
|
|
1879
2169
|
outputPrice: 1.68,
|
|
1880
2170
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -1884,6 +2174,7 @@ var fireworksModels = {
|
|
|
1884
2174
|
contextWindow: 128e3,
|
|
1885
2175
|
supportsImages: false,
|
|
1886
2176
|
supportsPromptCache: false,
|
|
2177
|
+
supportsNativeTools: true,
|
|
1887
2178
|
inputPrice: 0.55,
|
|
1888
2179
|
outputPrice: 2.19,
|
|
1889
2180
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -1893,6 +2184,7 @@ var fireworksModels = {
|
|
|
1893
2184
|
contextWindow: 128e3,
|
|
1894
2185
|
supportsImages: false,
|
|
1895
2186
|
supportsPromptCache: false,
|
|
2187
|
+
supportsNativeTools: true,
|
|
1896
2188
|
inputPrice: 0.55,
|
|
1897
2189
|
outputPrice: 2.19,
|
|
1898
2190
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -1902,6 +2194,7 @@ var fireworksModels = {
|
|
|
1902
2194
|
contextWindow: 198e3,
|
|
1903
2195
|
supportsImages: false,
|
|
1904
2196
|
supportsPromptCache: false,
|
|
2197
|
+
supportsNativeTools: true,
|
|
1905
2198
|
inputPrice: 0.55,
|
|
1906
2199
|
outputPrice: 2.19,
|
|
1907
2200
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -1911,6 +2204,7 @@ var fireworksModels = {
|
|
|
1911
2204
|
contextWindow: 128e3,
|
|
1912
2205
|
supportsImages: false,
|
|
1913
2206
|
supportsPromptCache: false,
|
|
2207
|
+
supportsNativeTools: true,
|
|
1914
2208
|
inputPrice: 0.07,
|
|
1915
2209
|
outputPrice: 0.3,
|
|
1916
2210
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -1920,6 +2214,7 @@ var fireworksModels = {
|
|
|
1920
2214
|
contextWindow: 128e3,
|
|
1921
2215
|
supportsImages: false,
|
|
1922
2216
|
supportsPromptCache: false,
|
|
2217
|
+
supportsNativeTools: true,
|
|
1923
2218
|
inputPrice: 0.15,
|
|
1924
2219
|
outputPrice: 0.6,
|
|
1925
2220
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
@@ -2159,6 +2454,7 @@ var groqModels = {
|
|
|
2159
2454
|
contextWindow: 131072,
|
|
2160
2455
|
supportsImages: false,
|
|
2161
2456
|
supportsPromptCache: false,
|
|
2457
|
+
supportsNativeTools: true,
|
|
2162
2458
|
inputPrice: 0.05,
|
|
2163
2459
|
outputPrice: 0.08,
|
|
2164
2460
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2168,6 +2464,7 @@ var groqModels = {
|
|
|
2168
2464
|
contextWindow: 131072,
|
|
2169
2465
|
supportsImages: false,
|
|
2170
2466
|
supportsPromptCache: false,
|
|
2467
|
+
supportsNativeTools: true,
|
|
2171
2468
|
inputPrice: 0.59,
|
|
2172
2469
|
outputPrice: 0.79,
|
|
2173
2470
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -2177,6 +2474,7 @@ var groqModels = {
|
|
|
2177
2474
|
contextWindow: 131072,
|
|
2178
2475
|
supportsImages: false,
|
|
2179
2476
|
supportsPromptCache: false,
|
|
2477
|
+
supportsNativeTools: true,
|
|
2180
2478
|
inputPrice: 0.11,
|
|
2181
2479
|
outputPrice: 0.34,
|
|
2182
2480
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -2213,6 +2511,7 @@ var groqModels = {
|
|
|
2213
2511
|
contextWindow: 131072,
|
|
2214
2512
|
supportsImages: false,
|
|
2215
2513
|
supportsPromptCache: false,
|
|
2514
|
+
supportsNativeTools: true,
|
|
2216
2515
|
inputPrice: 0.29,
|
|
2217
2516
|
outputPrice: 0.59,
|
|
2218
2517
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -2242,6 +2541,7 @@ var groqModels = {
|
|
|
2242
2541
|
contextWindow: 262144,
|
|
2243
2542
|
supportsImages: false,
|
|
2244
2543
|
supportsPromptCache: true,
|
|
2544
|
+
supportsNativeTools: true,
|
|
2245
2545
|
inputPrice: 0.6,
|
|
2246
2546
|
outputPrice: 2.5,
|
|
2247
2547
|
cacheReadsPrice: 0.15,
|
|
@@ -2252,6 +2552,7 @@ var groqModels = {
|
|
|
2252
2552
|
contextWindow: 131072,
|
|
2253
2553
|
supportsImages: false,
|
|
2254
2554
|
supportsPromptCache: false,
|
|
2555
|
+
supportsNativeTools: true,
|
|
2255
2556
|
inputPrice: 0.15,
|
|
2256
2557
|
outputPrice: 0.75,
|
|
2257
2558
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -2261,6 +2562,7 @@ var groqModels = {
|
|
|
2261
2562
|
contextWindow: 131072,
|
|
2262
2563
|
supportsImages: false,
|
|
2263
2564
|
supportsPromptCache: false,
|
|
2565
|
+
supportsNativeTools: true,
|
|
2264
2566
|
inputPrice: 0.1,
|
|
2265
2567
|
outputPrice: 0.5,
|
|
2266
2568
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -2287,6 +2589,7 @@ var ioIntelligenceModels = {
|
|
|
2287
2589
|
contextWindow: 128e3,
|
|
2288
2590
|
supportsImages: false,
|
|
2289
2591
|
supportsPromptCache: false,
|
|
2592
|
+
supportsNativeTools: true,
|
|
2290
2593
|
description: "DeepSeek R1 reasoning model"
|
|
2291
2594
|
},
|
|
2292
2595
|
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
@@ -2294,6 +2597,7 @@ var ioIntelligenceModels = {
|
|
|
2294
2597
|
contextWindow: 43e4,
|
|
2295
2598
|
supportsImages: true,
|
|
2296
2599
|
supportsPromptCache: false,
|
|
2600
|
+
supportsNativeTools: true,
|
|
2297
2601
|
description: "Llama 4 Maverick 17B model"
|
|
2298
2602
|
},
|
|
2299
2603
|
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
|
|
@@ -2301,6 +2605,7 @@ var ioIntelligenceModels = {
|
|
|
2301
2605
|
contextWindow: 106e3,
|
|
2302
2606
|
supportsImages: false,
|
|
2303
2607
|
supportsPromptCache: false,
|
|
2608
|
+
supportsNativeTools: true,
|
|
2304
2609
|
description: "Qwen3 Coder 480B specialized for coding"
|
|
2305
2610
|
},
|
|
2306
2611
|
"openai/gpt-oss-120b": {
|
|
@@ -2308,6 +2613,7 @@ var ioIntelligenceModels = {
|
|
|
2308
2613
|
contextWindow: 131072,
|
|
2309
2614
|
supportsImages: false,
|
|
2310
2615
|
supportsPromptCache: false,
|
|
2616
|
+
supportsNativeTools: true,
|
|
2311
2617
|
description: "OpenAI GPT-OSS 120B model"
|
|
2312
2618
|
}
|
|
2313
2619
|
};
|
|
@@ -2348,75 +2654,84 @@ var mistralModels = {
|
|
|
2348
2654
|
contextWindow: 128e3,
|
|
2349
2655
|
supportsImages: true,
|
|
2350
2656
|
supportsPromptCache: false,
|
|
2657
|
+
supportsNativeTools: true,
|
|
2351
2658
|
inputPrice: 2,
|
|
2352
2659
|
outputPrice: 5
|
|
2353
2660
|
},
|
|
2354
2661
|
"devstral-medium-latest": {
|
|
2355
|
-
maxTokens:
|
|
2662
|
+
maxTokens: 8192,
|
|
2356
2663
|
contextWindow: 131e3,
|
|
2357
2664
|
supportsImages: true,
|
|
2358
2665
|
supportsPromptCache: false,
|
|
2666
|
+
supportsNativeTools: true,
|
|
2359
2667
|
inputPrice: 0.4,
|
|
2360
2668
|
outputPrice: 2
|
|
2361
2669
|
},
|
|
2362
2670
|
"mistral-medium-latest": {
|
|
2363
|
-
maxTokens:
|
|
2671
|
+
maxTokens: 8192,
|
|
2364
2672
|
contextWindow: 131e3,
|
|
2365
2673
|
supportsImages: true,
|
|
2366
2674
|
supportsPromptCache: false,
|
|
2675
|
+
supportsNativeTools: true,
|
|
2367
2676
|
inputPrice: 0.4,
|
|
2368
2677
|
outputPrice: 2
|
|
2369
2678
|
},
|
|
2370
2679
|
"codestral-latest": {
|
|
2371
|
-
maxTokens:
|
|
2680
|
+
maxTokens: 8192,
|
|
2372
2681
|
contextWindow: 256e3,
|
|
2373
2682
|
supportsImages: false,
|
|
2374
2683
|
supportsPromptCache: false,
|
|
2684
|
+
supportsNativeTools: true,
|
|
2375
2685
|
inputPrice: 0.3,
|
|
2376
2686
|
outputPrice: 0.9
|
|
2377
2687
|
},
|
|
2378
2688
|
"mistral-large-latest": {
|
|
2379
|
-
maxTokens:
|
|
2689
|
+
maxTokens: 8192,
|
|
2380
2690
|
contextWindow: 131e3,
|
|
2381
2691
|
supportsImages: false,
|
|
2382
2692
|
supportsPromptCache: false,
|
|
2693
|
+
supportsNativeTools: true,
|
|
2383
2694
|
inputPrice: 2,
|
|
2384
2695
|
outputPrice: 6
|
|
2385
2696
|
},
|
|
2386
2697
|
"ministral-8b-latest": {
|
|
2387
|
-
maxTokens:
|
|
2698
|
+
maxTokens: 8192,
|
|
2388
2699
|
contextWindow: 131e3,
|
|
2389
2700
|
supportsImages: false,
|
|
2390
2701
|
supportsPromptCache: false,
|
|
2702
|
+
supportsNativeTools: true,
|
|
2391
2703
|
inputPrice: 0.1,
|
|
2392
2704
|
outputPrice: 0.1
|
|
2393
2705
|
},
|
|
2394
2706
|
"ministral-3b-latest": {
|
|
2395
|
-
maxTokens:
|
|
2707
|
+
maxTokens: 8192,
|
|
2396
2708
|
contextWindow: 131e3,
|
|
2397
2709
|
supportsImages: false,
|
|
2398
2710
|
supportsPromptCache: false,
|
|
2711
|
+
supportsNativeTools: true,
|
|
2399
2712
|
inputPrice: 0.04,
|
|
2400
2713
|
outputPrice: 0.04
|
|
2401
2714
|
},
|
|
2402
2715
|
"mistral-small-latest": {
|
|
2403
|
-
maxTokens:
|
|
2716
|
+
maxTokens: 8192,
|
|
2404
2717
|
contextWindow: 32e3,
|
|
2405
2718
|
supportsImages: false,
|
|
2406
2719
|
supportsPromptCache: false,
|
|
2720
|
+
supportsNativeTools: true,
|
|
2407
2721
|
inputPrice: 0.2,
|
|
2408
2722
|
outputPrice: 0.6
|
|
2409
2723
|
},
|
|
2410
2724
|
"pixtral-large-latest": {
|
|
2411
|
-
maxTokens:
|
|
2725
|
+
maxTokens: 8192,
|
|
2412
2726
|
contextWindow: 131e3,
|
|
2413
2727
|
supportsImages: true,
|
|
2414
2728
|
supportsPromptCache: false,
|
|
2729
|
+
supportsNativeTools: true,
|
|
2415
2730
|
inputPrice: 2,
|
|
2416
2731
|
outputPrice: 6
|
|
2417
2732
|
}
|
|
2418
2733
|
};
|
|
2419
|
-
var MISTRAL_DEFAULT_TEMPERATURE =
|
|
2734
|
+
var MISTRAL_DEFAULT_TEMPERATURE = 1;
|
|
2420
2735
|
|
|
2421
2736
|
// src/providers/moonshot.ts
|
|
2422
2737
|
var moonshotDefaultModelId = "kimi-k2-0905-preview";
|
|
@@ -2426,6 +2741,7 @@ var moonshotModels = {
|
|
|
2426
2741
|
contextWindow: 131072,
|
|
2427
2742
|
supportsImages: false,
|
|
2428
2743
|
supportsPromptCache: true,
|
|
2744
|
+
supportsNativeTools: true,
|
|
2429
2745
|
inputPrice: 0.6,
|
|
2430
2746
|
// $0.60 per million tokens (cache miss)
|
|
2431
2747
|
outputPrice: 2.5,
|
|
@@ -2441,6 +2757,7 @@ var moonshotModels = {
|
|
|
2441
2757
|
contextWindow: 262144,
|
|
2442
2758
|
supportsImages: false,
|
|
2443
2759
|
supportsPromptCache: true,
|
|
2760
|
+
supportsNativeTools: true,
|
|
2444
2761
|
inputPrice: 0.6,
|
|
2445
2762
|
outputPrice: 2.5,
|
|
2446
2763
|
cacheReadsPrice: 0.15,
|
|
@@ -2451,6 +2768,7 @@ var moonshotModels = {
|
|
|
2451
2768
|
contextWindow: 262144,
|
|
2452
2769
|
supportsImages: false,
|
|
2453
2770
|
supportsPromptCache: true,
|
|
2771
|
+
supportsNativeTools: true,
|
|
2454
2772
|
inputPrice: 2.4,
|
|
2455
2773
|
// $2.40 per million tokens (cache miss)
|
|
2456
2774
|
outputPrice: 10,
|
|
@@ -2469,6 +2787,7 @@ var moonshotModels = {
|
|
|
2469
2787
|
supportsImages: false,
|
|
2470
2788
|
// Text-only (no image/vision support)
|
|
2471
2789
|
supportsPromptCache: true,
|
|
2790
|
+
supportsNativeTools: true,
|
|
2472
2791
|
inputPrice: 0.6,
|
|
2473
2792
|
// $0.60 per million tokens (cache miss)
|
|
2474
2793
|
outputPrice: 2.5,
|
|
@@ -2493,6 +2812,7 @@ var ollamaDefaultModelInfo = {
|
|
|
2493
2812
|
contextWindow: 2e5,
|
|
2494
2813
|
supportsImages: true,
|
|
2495
2814
|
supportsPromptCache: true,
|
|
2815
|
+
supportsNativeTools: true,
|
|
2496
2816
|
inputPrice: 0,
|
|
2497
2817
|
outputPrice: 0,
|
|
2498
2818
|
cacheWritesPrice: 0,
|
|
@@ -2975,6 +3295,7 @@ var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
|
2975
3295
|
"anthropic/claude-opus-4",
|
|
2976
3296
|
"anthropic/claude-opus-4.1",
|
|
2977
3297
|
"anthropic/claude-haiku-4.5",
|
|
3298
|
+
"anthropic/claude-opus-4.5",
|
|
2978
3299
|
"google/gemini-2.5-flash-preview",
|
|
2979
3300
|
"google/gemini-2.5-flash-preview:thinking",
|
|
2980
3301
|
"google/gemini-2.5-flash-preview-05-20",
|
|
@@ -2996,6 +3317,7 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
|
2996
3317
|
"anthropic/claude-opus-4.1",
|
|
2997
3318
|
"anthropic/claude-sonnet-4",
|
|
2998
3319
|
"anthropic/claude-sonnet-4.5",
|
|
3320
|
+
"anthropic/claude-opus-4.5",
|
|
2999
3321
|
"anthropic/claude-haiku-4.5",
|
|
3000
3322
|
"google/gemini-2.5-pro-preview",
|
|
3001
3323
|
"google/gemini-2.5-pro",
|
|
@@ -3086,6 +3408,7 @@ var sambaNovaModels = {
|
|
|
3086
3408
|
contextWindow: 16384,
|
|
3087
3409
|
supportsImages: false,
|
|
3088
3410
|
supportsPromptCache: false,
|
|
3411
|
+
supportsNativeTools: true,
|
|
3089
3412
|
inputPrice: 0.1,
|
|
3090
3413
|
outputPrice: 0.2,
|
|
3091
3414
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -3095,6 +3418,7 @@ var sambaNovaModels = {
|
|
|
3095
3418
|
contextWindow: 131072,
|
|
3096
3419
|
supportsImages: false,
|
|
3097
3420
|
supportsPromptCache: false,
|
|
3421
|
+
supportsNativeTools: true,
|
|
3098
3422
|
inputPrice: 0.6,
|
|
3099
3423
|
outputPrice: 1.2,
|
|
3100
3424
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -3105,6 +3429,7 @@ var sambaNovaModels = {
|
|
|
3105
3429
|
supportsImages: false,
|
|
3106
3430
|
supportsPromptCache: false,
|
|
3107
3431
|
supportsReasoningBudget: true,
|
|
3432
|
+
supportsNativeTools: true,
|
|
3108
3433
|
inputPrice: 5,
|
|
3109
3434
|
outputPrice: 7,
|
|
3110
3435
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -3114,6 +3439,7 @@ var sambaNovaModels = {
|
|
|
3114
3439
|
contextWindow: 32768,
|
|
3115
3440
|
supportsImages: false,
|
|
3116
3441
|
supportsPromptCache: false,
|
|
3442
|
+
supportsNativeTools: true,
|
|
3117
3443
|
inputPrice: 3,
|
|
3118
3444
|
outputPrice: 4.5,
|
|
3119
3445
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -3123,6 +3449,7 @@ var sambaNovaModels = {
|
|
|
3123
3449
|
contextWindow: 32768,
|
|
3124
3450
|
supportsImages: false,
|
|
3125
3451
|
supportsPromptCache: false,
|
|
3452
|
+
supportsNativeTools: true,
|
|
3126
3453
|
inputPrice: 3,
|
|
3127
3454
|
outputPrice: 4.5,
|
|
3128
3455
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -3141,6 +3468,7 @@ var sambaNovaModels = {
|
|
|
3141
3468
|
contextWindow: 131072,
|
|
3142
3469
|
supportsImages: true,
|
|
3143
3470
|
supportsPromptCache: false,
|
|
3471
|
+
supportsNativeTools: true,
|
|
3144
3472
|
inputPrice: 0.63,
|
|
3145
3473
|
outputPrice: 1.8,
|
|
3146
3474
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -3159,6 +3487,7 @@ var sambaNovaModels = {
|
|
|
3159
3487
|
contextWindow: 8192,
|
|
3160
3488
|
supportsImages: false,
|
|
3161
3489
|
supportsPromptCache: false,
|
|
3490
|
+
supportsNativeTools: true,
|
|
3162
3491
|
inputPrice: 0.4,
|
|
3163
3492
|
outputPrice: 0.8,
|
|
3164
3493
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -3168,6 +3497,7 @@ var sambaNovaModels = {
|
|
|
3168
3497
|
contextWindow: 131072,
|
|
3169
3498
|
supportsImages: false,
|
|
3170
3499
|
supportsPromptCache: false,
|
|
3500
|
+
supportsNativeTools: true,
|
|
3171
3501
|
inputPrice: 0.22,
|
|
3172
3502
|
outputPrice: 0.59,
|
|
3173
3503
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -3181,6 +3511,7 @@ var unboundDefaultModelInfo = {
|
|
|
3181
3511
|
contextWindow: 2e5,
|
|
3182
3512
|
supportsImages: true,
|
|
3183
3513
|
supportsPromptCache: true,
|
|
3514
|
+
supportsNativeTools: true,
|
|
3184
3515
|
inputPrice: 3,
|
|
3185
3516
|
outputPrice: 15,
|
|
3186
3517
|
cacheWritesPrice: 3.75,
|
|
@@ -3194,6 +3525,7 @@ var vertexModels = {
|
|
|
3194
3525
|
maxTokens: 65536,
|
|
3195
3526
|
contextWindow: 1048576,
|
|
3196
3527
|
supportsImages: true,
|
|
3528
|
+
supportsNativeTools: true,
|
|
3197
3529
|
supportsPromptCache: true,
|
|
3198
3530
|
supportsReasoningEffort: ["low", "high"],
|
|
3199
3531
|
reasoningEffort: "low",
|
|
@@ -3218,6 +3550,7 @@ var vertexModels = {
|
|
|
3218
3550
|
maxTokens: 65535,
|
|
3219
3551
|
contextWindow: 1048576,
|
|
3220
3552
|
supportsImages: true,
|
|
3553
|
+
supportsNativeTools: true,
|
|
3221
3554
|
supportsPromptCache: true,
|
|
3222
3555
|
inputPrice: 0.15,
|
|
3223
3556
|
outputPrice: 3.5,
|
|
@@ -3229,6 +3562,7 @@ var vertexModels = {
|
|
|
3229
3562
|
maxTokens: 65535,
|
|
3230
3563
|
contextWindow: 1048576,
|
|
3231
3564
|
supportsImages: true,
|
|
3565
|
+
supportsNativeTools: true,
|
|
3232
3566
|
supportsPromptCache: true,
|
|
3233
3567
|
inputPrice: 0.15,
|
|
3234
3568
|
outputPrice: 0.6
|
|
@@ -3237,6 +3571,7 @@ var vertexModels = {
|
|
|
3237
3571
|
maxTokens: 64e3,
|
|
3238
3572
|
contextWindow: 1048576,
|
|
3239
3573
|
supportsImages: true,
|
|
3574
|
+
supportsNativeTools: true,
|
|
3240
3575
|
supportsPromptCache: true,
|
|
3241
3576
|
inputPrice: 0.3,
|
|
3242
3577
|
outputPrice: 2.5,
|
|
@@ -3249,6 +3584,7 @@ var vertexModels = {
|
|
|
3249
3584
|
maxTokens: 65535,
|
|
3250
3585
|
contextWindow: 1048576,
|
|
3251
3586
|
supportsImages: true,
|
|
3587
|
+
supportsNativeTools: true,
|
|
3252
3588
|
supportsPromptCache: false,
|
|
3253
3589
|
inputPrice: 0.15,
|
|
3254
3590
|
outputPrice: 3.5,
|
|
@@ -3260,6 +3596,7 @@ var vertexModels = {
|
|
|
3260
3596
|
maxTokens: 65535,
|
|
3261
3597
|
contextWindow: 1048576,
|
|
3262
3598
|
supportsImages: true,
|
|
3599
|
+
supportsNativeTools: true,
|
|
3263
3600
|
supportsPromptCache: false,
|
|
3264
3601
|
inputPrice: 0.15,
|
|
3265
3602
|
outputPrice: 0.6
|
|
@@ -3268,6 +3605,7 @@ var vertexModels = {
|
|
|
3268
3605
|
maxTokens: 65535,
|
|
3269
3606
|
contextWindow: 1048576,
|
|
3270
3607
|
supportsImages: true,
|
|
3608
|
+
supportsNativeTools: true,
|
|
3271
3609
|
supportsPromptCache: true,
|
|
3272
3610
|
inputPrice: 2.5,
|
|
3273
3611
|
outputPrice: 15
|
|
@@ -3276,6 +3614,7 @@ var vertexModels = {
|
|
|
3276
3614
|
maxTokens: 65535,
|
|
3277
3615
|
contextWindow: 1048576,
|
|
3278
3616
|
supportsImages: true,
|
|
3617
|
+
supportsNativeTools: true,
|
|
3279
3618
|
supportsPromptCache: true,
|
|
3280
3619
|
inputPrice: 2.5,
|
|
3281
3620
|
outputPrice: 15
|
|
@@ -3284,6 +3623,7 @@ var vertexModels = {
|
|
|
3284
3623
|
maxTokens: 65535,
|
|
3285
3624
|
contextWindow: 1048576,
|
|
3286
3625
|
supportsImages: true,
|
|
3626
|
+
supportsNativeTools: true,
|
|
3287
3627
|
supportsPromptCache: true,
|
|
3288
3628
|
inputPrice: 2.5,
|
|
3289
3629
|
outputPrice: 15,
|
|
@@ -3294,6 +3634,7 @@ var vertexModels = {
|
|
|
3294
3634
|
maxTokens: 64e3,
|
|
3295
3635
|
contextWindow: 1048576,
|
|
3296
3636
|
supportsImages: true,
|
|
3637
|
+
supportsNativeTools: true,
|
|
3297
3638
|
supportsPromptCache: true,
|
|
3298
3639
|
inputPrice: 2.5,
|
|
3299
3640
|
outputPrice: 15,
|
|
@@ -3319,6 +3660,7 @@ var vertexModels = {
|
|
|
3319
3660
|
maxTokens: 65535,
|
|
3320
3661
|
contextWindow: 1048576,
|
|
3321
3662
|
supportsImages: true,
|
|
3663
|
+
supportsNativeTools: true,
|
|
3322
3664
|
supportsPromptCache: false,
|
|
3323
3665
|
inputPrice: 0,
|
|
3324
3666
|
outputPrice: 0
|
|
@@ -3327,6 +3669,7 @@ var vertexModels = {
|
|
|
3327
3669
|
maxTokens: 8192,
|
|
3328
3670
|
contextWindow: 2097152,
|
|
3329
3671
|
supportsImages: true,
|
|
3672
|
+
supportsNativeTools: true,
|
|
3330
3673
|
supportsPromptCache: false,
|
|
3331
3674
|
inputPrice: 0,
|
|
3332
3675
|
outputPrice: 0
|
|
@@ -3335,6 +3678,7 @@ var vertexModels = {
|
|
|
3335
3678
|
maxTokens: 8192,
|
|
3336
3679
|
contextWindow: 1048576,
|
|
3337
3680
|
supportsImages: true,
|
|
3681
|
+
supportsNativeTools: true,
|
|
3338
3682
|
supportsPromptCache: true,
|
|
3339
3683
|
inputPrice: 0.15,
|
|
3340
3684
|
outputPrice: 0.6
|
|
@@ -3343,6 +3687,7 @@ var vertexModels = {
|
|
|
3343
3687
|
maxTokens: 8192,
|
|
3344
3688
|
contextWindow: 1048576,
|
|
3345
3689
|
supportsImages: true,
|
|
3690
|
+
supportsNativeTools: true,
|
|
3346
3691
|
supportsPromptCache: false,
|
|
3347
3692
|
inputPrice: 0.075,
|
|
3348
3693
|
outputPrice: 0.3
|
|
@@ -3351,6 +3696,7 @@ var vertexModels = {
|
|
|
3351
3696
|
maxTokens: 8192,
|
|
3352
3697
|
contextWindow: 32768,
|
|
3353
3698
|
supportsImages: true,
|
|
3699
|
+
supportsNativeTools: true,
|
|
3354
3700
|
supportsPromptCache: false,
|
|
3355
3701
|
inputPrice: 0,
|
|
3356
3702
|
outputPrice: 0
|
|
@@ -3359,6 +3705,7 @@ var vertexModels = {
|
|
|
3359
3705
|
maxTokens: 8192,
|
|
3360
3706
|
contextWindow: 1048576,
|
|
3361
3707
|
supportsImages: true,
|
|
3708
|
+
supportsNativeTools: true,
|
|
3362
3709
|
supportsPromptCache: true,
|
|
3363
3710
|
inputPrice: 0.075,
|
|
3364
3711
|
outputPrice: 0.3
|
|
@@ -3367,6 +3714,7 @@ var vertexModels = {
|
|
|
3367
3714
|
maxTokens: 8192,
|
|
3368
3715
|
contextWindow: 2097152,
|
|
3369
3716
|
supportsImages: true,
|
|
3717
|
+
supportsNativeTools: true,
|
|
3370
3718
|
supportsPromptCache: false,
|
|
3371
3719
|
inputPrice: 1.25,
|
|
3372
3720
|
outputPrice: 5
|
|
@@ -3404,6 +3752,17 @@ var vertexModels = {
|
|
|
3404
3752
|
cacheReadsPrice: 0.1,
|
|
3405
3753
|
supportsReasoningBudget: true
|
|
3406
3754
|
},
|
|
3755
|
+
"claude-opus-4-5@20251101": {
|
|
3756
|
+
maxTokens: 8192,
|
|
3757
|
+
contextWindow: 2e5,
|
|
3758
|
+
supportsImages: true,
|
|
3759
|
+
supportsPromptCache: true,
|
|
3760
|
+
inputPrice: 5,
|
|
3761
|
+
outputPrice: 25,
|
|
3762
|
+
cacheWritesPrice: 6.25,
|
|
3763
|
+
cacheReadsPrice: 0.5,
|
|
3764
|
+
supportsReasoningBudget: true
|
|
3765
|
+
},
|
|
3407
3766
|
"claude-opus-4-1@20250805": {
|
|
3408
3767
|
maxTokens: 8192,
|
|
3409
3768
|
contextWindow: 2e5,
|
|
@@ -3501,6 +3860,7 @@ var vertexModels = {
|
|
|
3501
3860
|
maxTokens: 64e3,
|
|
3502
3861
|
contextWindow: 1048576,
|
|
3503
3862
|
supportsImages: true,
|
|
3863
|
+
supportsNativeTools: true,
|
|
3504
3864
|
supportsPromptCache: true,
|
|
3505
3865
|
inputPrice: 0.1,
|
|
3506
3866
|
outputPrice: 0.4,
|
|
@@ -3802,17 +4162,67 @@ var xaiModels = {
|
|
|
3802
4162
|
contextWindow: 262144,
|
|
3803
4163
|
supportsImages: false,
|
|
3804
4164
|
supportsPromptCache: true,
|
|
4165
|
+
supportsNativeTools: true,
|
|
3805
4166
|
inputPrice: 0.2,
|
|
3806
4167
|
outputPrice: 1.5,
|
|
3807
4168
|
cacheWritesPrice: 0.02,
|
|
3808
4169
|
cacheReadsPrice: 0.02,
|
|
3809
4170
|
description: "xAI's Grok Code Fast model with 256K context window"
|
|
3810
4171
|
},
|
|
4172
|
+
"grok-4-1-fast-reasoning": {
|
|
4173
|
+
maxTokens: 65536,
|
|
4174
|
+
contextWindow: 2e6,
|
|
4175
|
+
supportsImages: true,
|
|
4176
|
+
supportsPromptCache: true,
|
|
4177
|
+
supportsNativeTools: true,
|
|
4178
|
+
inputPrice: 0.2,
|
|
4179
|
+
outputPrice: 0.5,
|
|
4180
|
+
cacheWritesPrice: 0.05,
|
|
4181
|
+
cacheReadsPrice: 0.05,
|
|
4182
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4183
|
+
},
|
|
4184
|
+
"grok-4-1-fast-non-reasoning": {
|
|
4185
|
+
maxTokens: 65536,
|
|
4186
|
+
contextWindow: 2e6,
|
|
4187
|
+
supportsImages: true,
|
|
4188
|
+
supportsPromptCache: true,
|
|
4189
|
+
supportsNativeTools: true,
|
|
4190
|
+
inputPrice: 0.2,
|
|
4191
|
+
outputPrice: 0.5,
|
|
4192
|
+
cacheWritesPrice: 0.05,
|
|
4193
|
+
cacheReadsPrice: 0.05,
|
|
4194
|
+
description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4195
|
+
},
|
|
4196
|
+
"grok-4-fast-reasoning": {
|
|
4197
|
+
maxTokens: 65536,
|
|
4198
|
+
contextWindow: 2e6,
|
|
4199
|
+
supportsImages: true,
|
|
4200
|
+
supportsPromptCache: true,
|
|
4201
|
+
supportsNativeTools: true,
|
|
4202
|
+
inputPrice: 0.2,
|
|
4203
|
+
outputPrice: 0.5,
|
|
4204
|
+
cacheWritesPrice: 0.05,
|
|
4205
|
+
cacheReadsPrice: 0.05,
|
|
4206
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
|
|
4207
|
+
},
|
|
4208
|
+
"grok-4-fast-non-reasoning": {
|
|
4209
|
+
maxTokens: 65536,
|
|
4210
|
+
contextWindow: 2e6,
|
|
4211
|
+
supportsImages: true,
|
|
4212
|
+
supportsPromptCache: true,
|
|
4213
|
+
supportsNativeTools: true,
|
|
4214
|
+
inputPrice: 0.2,
|
|
4215
|
+
outputPrice: 0.5,
|
|
4216
|
+
cacheWritesPrice: 0.05,
|
|
4217
|
+
cacheReadsPrice: 0.05,
|
|
4218
|
+
description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
|
|
4219
|
+
},
|
|
3811
4220
|
"grok-4": {
|
|
3812
4221
|
maxTokens: 8192,
|
|
3813
4222
|
contextWindow: 256e3,
|
|
3814
4223
|
supportsImages: true,
|
|
3815
4224
|
supportsPromptCache: true,
|
|
4225
|
+
supportsNativeTools: true,
|
|
3816
4226
|
inputPrice: 3,
|
|
3817
4227
|
outputPrice: 15,
|
|
3818
4228
|
cacheWritesPrice: 0.75,
|
|
@@ -3824,6 +4234,7 @@ var xaiModels = {
|
|
|
3824
4234
|
contextWindow: 131072,
|
|
3825
4235
|
supportsImages: false,
|
|
3826
4236
|
supportsPromptCache: true,
|
|
4237
|
+
supportsNativeTools: true,
|
|
3827
4238
|
inputPrice: 3,
|
|
3828
4239
|
outputPrice: 15,
|
|
3829
4240
|
cacheWritesPrice: 0.75,
|
|
@@ -3835,6 +4246,7 @@ var xaiModels = {
|
|
|
3835
4246
|
contextWindow: 131072,
|
|
3836
4247
|
supportsImages: false,
|
|
3837
4248
|
supportsPromptCache: true,
|
|
4249
|
+
supportsNativeTools: true,
|
|
3838
4250
|
inputPrice: 5,
|
|
3839
4251
|
outputPrice: 25,
|
|
3840
4252
|
cacheWritesPrice: 1.25,
|
|
@@ -3846,6 +4258,7 @@ var xaiModels = {
|
|
|
3846
4258
|
contextWindow: 131072,
|
|
3847
4259
|
supportsImages: false,
|
|
3848
4260
|
supportsPromptCache: true,
|
|
4261
|
+
supportsNativeTools: true,
|
|
3849
4262
|
inputPrice: 0.3,
|
|
3850
4263
|
outputPrice: 0.5,
|
|
3851
4264
|
cacheWritesPrice: 0.07,
|
|
@@ -3858,6 +4271,7 @@ var xaiModels = {
|
|
|
3858
4271
|
contextWindow: 131072,
|
|
3859
4272
|
supportsImages: false,
|
|
3860
4273
|
supportsPromptCache: true,
|
|
4274
|
+
supportsNativeTools: true,
|
|
3861
4275
|
inputPrice: 0.6,
|
|
3862
4276
|
outputPrice: 4,
|
|
3863
4277
|
cacheWritesPrice: 0.15,
|
|
@@ -3870,6 +4284,7 @@ var xaiModels = {
|
|
|
3870
4284
|
contextWindow: 131072,
|
|
3871
4285
|
supportsImages: false,
|
|
3872
4286
|
supportsPromptCache: false,
|
|
4287
|
+
supportsNativeTools: true,
|
|
3873
4288
|
inputPrice: 2,
|
|
3874
4289
|
outputPrice: 10,
|
|
3875
4290
|
description: "xAI's Grok-2 model (version 1212) with 128K context window"
|
|
@@ -3879,6 +4294,7 @@ var xaiModels = {
|
|
|
3879
4294
|
contextWindow: 32768,
|
|
3880
4295
|
supportsImages: true,
|
|
3881
4296
|
supportsPromptCache: false,
|
|
4297
|
+
supportsNativeTools: true,
|
|
3882
4298
|
inputPrice: 2,
|
|
3883
4299
|
outputPrice: 10,
|
|
3884
4300
|
description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
|
|
@@ -3987,6 +4403,7 @@ var internationalZAiModels = {
|
|
|
3987
4403
|
contextWindow: 131072,
|
|
3988
4404
|
supportsImages: false,
|
|
3989
4405
|
supportsPromptCache: true,
|
|
4406
|
+
supportsNativeTools: true,
|
|
3990
4407
|
supportsReasoningBinary: true,
|
|
3991
4408
|
inputPrice: 0.6,
|
|
3992
4409
|
outputPrice: 2.2,
|
|
@@ -3999,6 +4416,7 @@ var internationalZAiModels = {
|
|
|
3999
4416
|
contextWindow: 131072,
|
|
4000
4417
|
supportsImages: false,
|
|
4001
4418
|
supportsPromptCache: true,
|
|
4419
|
+
supportsNativeTools: true,
|
|
4002
4420
|
inputPrice: 0.2,
|
|
4003
4421
|
outputPrice: 1.1,
|
|
4004
4422
|
cacheWritesPrice: 0,
|
|
@@ -4010,6 +4428,7 @@ var internationalZAiModels = {
|
|
|
4010
4428
|
contextWindow: 131072,
|
|
4011
4429
|
supportsImages: false,
|
|
4012
4430
|
supportsPromptCache: true,
|
|
4431
|
+
supportsNativeTools: true,
|
|
4013
4432
|
inputPrice: 2.2,
|
|
4014
4433
|
outputPrice: 8.9,
|
|
4015
4434
|
cacheWritesPrice: 0,
|
|
@@ -4021,6 +4440,7 @@ var internationalZAiModels = {
|
|
|
4021
4440
|
contextWindow: 131072,
|
|
4022
4441
|
supportsImages: false,
|
|
4023
4442
|
supportsPromptCache: true,
|
|
4443
|
+
supportsNativeTools: true,
|
|
4024
4444
|
inputPrice: 1.1,
|
|
4025
4445
|
outputPrice: 4.5,
|
|
4026
4446
|
cacheWritesPrice: 0,
|
|
@@ -4032,6 +4452,7 @@ var internationalZAiModels = {
|
|
|
4032
4452
|
contextWindow: 131072,
|
|
4033
4453
|
supportsImages: false,
|
|
4034
4454
|
supportsPromptCache: true,
|
|
4455
|
+
supportsNativeTools: true,
|
|
4035
4456
|
inputPrice: 0,
|
|
4036
4457
|
outputPrice: 0,
|
|
4037
4458
|
cacheWritesPrice: 0,
|
|
@@ -4043,6 +4464,7 @@ var internationalZAiModels = {
|
|
|
4043
4464
|
contextWindow: 131072,
|
|
4044
4465
|
supportsImages: true,
|
|
4045
4466
|
supportsPromptCache: true,
|
|
4467
|
+
supportsNativeTools: true,
|
|
4046
4468
|
inputPrice: 0.6,
|
|
4047
4469
|
outputPrice: 1.8,
|
|
4048
4470
|
cacheWritesPrice: 0,
|
|
@@ -4054,6 +4476,7 @@ var internationalZAiModels = {
|
|
|
4054
4476
|
contextWindow: 2e5,
|
|
4055
4477
|
supportsImages: false,
|
|
4056
4478
|
supportsPromptCache: true,
|
|
4479
|
+
supportsNativeTools: true,
|
|
4057
4480
|
supportsReasoningBinary: true,
|
|
4058
4481
|
inputPrice: 0.6,
|
|
4059
4482
|
outputPrice: 2.2,
|
|
@@ -4066,6 +4489,7 @@ var internationalZAiModels = {
|
|
|
4066
4489
|
contextWindow: 131072,
|
|
4067
4490
|
supportsImages: false,
|
|
4068
4491
|
supportsPromptCache: false,
|
|
4492
|
+
supportsNativeTools: true,
|
|
4069
4493
|
inputPrice: 0.1,
|
|
4070
4494
|
outputPrice: 0.1,
|
|
4071
4495
|
cacheWritesPrice: 0,
|
|
@@ -4080,6 +4504,7 @@ var mainlandZAiModels = {
|
|
|
4080
4504
|
contextWindow: 131072,
|
|
4081
4505
|
supportsImages: false,
|
|
4082
4506
|
supportsPromptCache: true,
|
|
4507
|
+
supportsNativeTools: true,
|
|
4083
4508
|
supportsReasoningBinary: true,
|
|
4084
4509
|
inputPrice: 0.29,
|
|
4085
4510
|
outputPrice: 1.14,
|
|
@@ -4092,6 +4517,7 @@ var mainlandZAiModels = {
|
|
|
4092
4517
|
contextWindow: 131072,
|
|
4093
4518
|
supportsImages: false,
|
|
4094
4519
|
supportsPromptCache: true,
|
|
4520
|
+
supportsNativeTools: true,
|
|
4095
4521
|
inputPrice: 0.1,
|
|
4096
4522
|
outputPrice: 0.6,
|
|
4097
4523
|
cacheWritesPrice: 0,
|
|
@@ -4103,6 +4529,7 @@ var mainlandZAiModels = {
|
|
|
4103
4529
|
contextWindow: 131072,
|
|
4104
4530
|
supportsImages: false,
|
|
4105
4531
|
supportsPromptCache: true,
|
|
4532
|
+
supportsNativeTools: true,
|
|
4106
4533
|
inputPrice: 0.29,
|
|
4107
4534
|
outputPrice: 1.14,
|
|
4108
4535
|
cacheWritesPrice: 0,
|
|
@@ -4114,6 +4541,7 @@ var mainlandZAiModels = {
|
|
|
4114
4541
|
contextWindow: 131072,
|
|
4115
4542
|
supportsImages: false,
|
|
4116
4543
|
supportsPromptCache: true,
|
|
4544
|
+
supportsNativeTools: true,
|
|
4117
4545
|
inputPrice: 0.1,
|
|
4118
4546
|
outputPrice: 0.6,
|
|
4119
4547
|
cacheWritesPrice: 0,
|
|
@@ -4125,6 +4553,7 @@ var mainlandZAiModels = {
|
|
|
4125
4553
|
contextWindow: 131072,
|
|
4126
4554
|
supportsImages: false,
|
|
4127
4555
|
supportsPromptCache: true,
|
|
4556
|
+
supportsNativeTools: true,
|
|
4128
4557
|
inputPrice: 0,
|
|
4129
4558
|
outputPrice: 0,
|
|
4130
4559
|
cacheWritesPrice: 0,
|
|
@@ -4136,6 +4565,7 @@ var mainlandZAiModels = {
|
|
|
4136
4565
|
contextWindow: 131072,
|
|
4137
4566
|
supportsImages: true,
|
|
4138
4567
|
supportsPromptCache: true,
|
|
4568
|
+
supportsNativeTools: true,
|
|
4139
4569
|
inputPrice: 0.29,
|
|
4140
4570
|
outputPrice: 0.93,
|
|
4141
4571
|
cacheWritesPrice: 0,
|
|
@@ -4147,6 +4577,7 @@ var mainlandZAiModels = {
|
|
|
4147
4577
|
contextWindow: 204800,
|
|
4148
4578
|
supportsImages: false,
|
|
4149
4579
|
supportsPromptCache: true,
|
|
4580
|
+
supportsNativeTools: true,
|
|
4150
4581
|
supportsReasoningBinary: true,
|
|
4151
4582
|
inputPrice: 0.29,
|
|
4152
4583
|
outputPrice: 1.14,
|
|
@@ -4176,6 +4607,7 @@ var deepInfraDefaultModelInfo = {
|
|
|
4176
4607
|
contextWindow: 262144,
|
|
4177
4608
|
supportsImages: false,
|
|
4178
4609
|
supportsPromptCache: false,
|
|
4610
|
+
supportsNativeTools: true,
|
|
4179
4611
|
inputPrice: 0.3,
|
|
4180
4612
|
outputPrice: 1.2,
|
|
4181
4613
|
description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
|
|
@@ -4236,6 +4668,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
|
|
|
4236
4668
|
return "meta-llama/Llama-3.3-70B-Instruct";
|
|
4237
4669
|
case "chutes":
|
|
4238
4670
|
return chutesDefaultModelId;
|
|
4671
|
+
case "baseten":
|
|
4672
|
+
return basetenDefaultModelId;
|
|
4239
4673
|
case "bedrock":
|
|
4240
4674
|
return bedrockDefaultModelId;
|
|
4241
4675
|
case "vertex":
|
|
@@ -4329,6 +4763,7 @@ var providerNames = [
|
|
|
4329
4763
|
...fauxProviders,
|
|
4330
4764
|
"anthropic",
|
|
4331
4765
|
"bedrock",
|
|
4766
|
+
"baseten",
|
|
4332
4767
|
"cerebras",
|
|
4333
4768
|
"claude-code",
|
|
4334
4769
|
"doubao",
|
|
@@ -4568,6 +5003,9 @@ var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
|
|
|
4568
5003
|
vercelAiGatewayApiKey: z8.string().optional(),
|
|
4569
5004
|
vercelAiGatewayModelId: z8.string().optional()
|
|
4570
5005
|
});
|
|
5006
|
+
var basetenSchema = apiModelIdProviderModelSchema.extend({
|
|
5007
|
+
basetenApiKey: z8.string().optional()
|
|
5008
|
+
});
|
|
4571
5009
|
var defaultSchema = z8.object({
|
|
4572
5010
|
apiProvider: z8.undefined()
|
|
4573
5011
|
});
|
|
@@ -4597,6 +5035,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
|
4597
5035
|
fakeAiSchema.merge(z8.object({ apiProvider: z8.literal("fake-ai") })),
|
|
4598
5036
|
xaiSchema.merge(z8.object({ apiProvider: z8.literal("xai") })),
|
|
4599
5037
|
groqSchema.merge(z8.object({ apiProvider: z8.literal("groq") })),
|
|
5038
|
+
basetenSchema.merge(z8.object({ apiProvider: z8.literal("baseten") })),
|
|
4600
5039
|
huggingFaceSchema.merge(z8.object({ apiProvider: z8.literal("huggingface") })),
|
|
4601
5040
|
chutesSchema.merge(z8.object({ apiProvider: z8.literal("chutes") })),
|
|
4602
5041
|
litellmSchema.merge(z8.object({ apiProvider: z8.literal("litellm") })),
|
|
@@ -4638,6 +5077,7 @@ var providerSettingsSchema = z8.object({
|
|
|
4638
5077
|
...fakeAiSchema.shape,
|
|
4639
5078
|
...xaiSchema.shape,
|
|
4640
5079
|
...groqSchema.shape,
|
|
5080
|
+
...basetenSchema.shape,
|
|
4641
5081
|
...huggingFaceSchema.shape,
|
|
4642
5082
|
...chutesSchema.shape,
|
|
4643
5083
|
...litellmSchema.shape,
|
|
@@ -4701,6 +5141,7 @@ var modelIdKeysByProvider = {
|
|
|
4701
5141
|
requesty: "requestyModelId",
|
|
4702
5142
|
xai: "apiModelId",
|
|
4703
5143
|
groq: "apiModelId",
|
|
5144
|
+
baseten: "apiModelId",
|
|
4704
5145
|
chutes: "apiModelId",
|
|
4705
5146
|
litellm: "litellmModelId",
|
|
4706
5147
|
huggingface: "huggingFaceModelId",
|
|
@@ -4808,7 +5249,8 @@ var MODELS_BY_PROVIDER = {
|
|
|
4808
5249
|
models: Object.keys(vscodeLlmModels)
|
|
4809
5250
|
},
|
|
4810
5251
|
xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) },
|
|
4811
|
-
zai: { id: "zai", label: "
|
|
5252
|
+
zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
|
|
5253
|
+
baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
|
|
4812
5254
|
// Dynamic providers; models pulled from remote APIs.
|
|
4813
5255
|
glama: { id: "glama", label: "Glama", models: [] },
|
|
4814
5256
|
huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
|
|
@@ -4840,7 +5282,18 @@ var historyItemSchema = z9.object({
|
|
|
4840
5282
|
totalCost: z9.number(),
|
|
4841
5283
|
size: z9.number().optional(),
|
|
4842
5284
|
workspace: z9.string().optional(),
|
|
4843
|
-
mode: z9.string().optional()
|
|
5285
|
+
mode: z9.string().optional(),
|
|
5286
|
+
status: z9.enum(["active", "completed", "delegated"]).optional(),
|
|
5287
|
+
delegatedToId: z9.string().optional(),
|
|
5288
|
+
// Last child this parent delegated to
|
|
5289
|
+
childIds: z9.array(z9.string()).optional(),
|
|
5290
|
+
// All children spawned by this task
|
|
5291
|
+
awaitingChildId: z9.string().optional(),
|
|
5292
|
+
// Child currently awaited (set when delegated)
|
|
5293
|
+
completedByChildId: z9.string().optional(),
|
|
5294
|
+
// Child that completed and resumed this parent
|
|
5295
|
+
completionResultSummary: z9.string().optional()
|
|
5296
|
+
// Summary from completed child
|
|
4844
5297
|
});
|
|
4845
5298
|
|
|
4846
5299
|
// src/experiment.ts
|
|
@@ -4850,7 +5303,8 @@ var experimentIds = [
|
|
|
4850
5303
|
"multiFileApplyDiff",
|
|
4851
5304
|
"preventFocusDisruption",
|
|
4852
5305
|
"imageGeneration",
|
|
4853
|
-
"runSlashCommand"
|
|
5306
|
+
"runSlashCommand",
|
|
5307
|
+
"multipleNativeToolCalls"
|
|
4854
5308
|
];
|
|
4855
5309
|
var experimentIdsSchema = z10.enum(experimentIds);
|
|
4856
5310
|
var experimentsSchema = z10.object({
|
|
@@ -4858,7 +5312,8 @@ var experimentsSchema = z10.object({
|
|
|
4858
5312
|
multiFileApplyDiff: z10.boolean().optional(),
|
|
4859
5313
|
preventFocusDisruption: z10.boolean().optional(),
|
|
4860
5314
|
imageGeneration: z10.boolean().optional(),
|
|
4861
|
-
runSlashCommand: z10.boolean().optional()
|
|
5315
|
+
runSlashCommand: z10.boolean().optional(),
|
|
5316
|
+
multipleNativeToolCalls: z10.boolean().optional()
|
|
4862
5317
|
});
|
|
4863
5318
|
|
|
4864
5319
|
// src/telemetry.ts
|
|
@@ -4908,6 +5363,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
4908
5363
|
TelemetryEventName2["CONSECUTIVE_MISTAKE_ERROR"] = "Consecutive Mistake Error";
|
|
4909
5364
|
TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
|
|
4910
5365
|
TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
|
|
5366
|
+
TelemetryEventName2["MODEL_CACHE_EMPTY_RESPONSE"] = "Model Cache Empty Response";
|
|
4911
5367
|
return TelemetryEventName2;
|
|
4912
5368
|
})(TelemetryEventName || {});
|
|
4913
5369
|
var staticAppPropertiesSchema = z11.object({
|
|
@@ -4991,6 +5447,7 @@ var rooCodeTelemetryEventSchema = z11.discriminatedUnion("type", [
|
|
|
4991
5447
|
"Shell Integration Error" /* SHELL_INTEGRATION_ERROR */,
|
|
4992
5448
|
"Consecutive Mistake Error" /* CONSECUTIVE_MISTAKE_ERROR */,
|
|
4993
5449
|
"Code Index Error" /* CODE_INDEX_ERROR */,
|
|
5450
|
+
"Model Cache Empty Response" /* MODEL_CACHE_EMPTY_RESPONSE */,
|
|
4994
5451
|
"Context Condensed" /* CONTEXT_CONDENSED */,
|
|
4995
5452
|
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */,
|
|
4996
5453
|
"Tab Shown" /* TAB_SHOWN */,
|
|
@@ -5151,8 +5608,6 @@ var terminalActionIds = ["terminalAddToContext", "terminalFixCommand", "terminal
|
|
|
5151
5608
|
var commandIds = [
|
|
5152
5609
|
"activationCompleted",
|
|
5153
5610
|
"plusButtonClicked",
|
|
5154
|
-
"promptsButtonClicked",
|
|
5155
|
-
"mcpButtonClicked",
|
|
5156
5611
|
"historyButtonClicked",
|
|
5157
5612
|
"marketplaceButtonClicked",
|
|
5158
5613
|
"popoutButtonClicked",
|
|
@@ -5209,6 +5664,7 @@ var globalSettingsSchema = z14.object({
|
|
|
5209
5664
|
taskHistory: z14.array(historyItemSchema).optional(),
|
|
5210
5665
|
dismissedUpsells: z14.array(z14.string()).optional(),
|
|
5211
5666
|
// Image generation settings (experimental) - flattened for simplicity
|
|
5667
|
+
imageGenerationProvider: z14.enum(["openrouter", "roo"]).optional(),
|
|
5212
5668
|
openRouterImageApiKey: z14.string().optional(),
|
|
5213
5669
|
openRouterImageGenerationSelectedModel: z14.string().optional(),
|
|
5214
5670
|
condensingApiConfigId: z14.string().optional(),
|
|
@@ -5360,7 +5816,8 @@ var SECRET_STATE_KEYS = [
|
|
|
5360
5816
|
"fireworksApiKey",
|
|
5361
5817
|
"featherlessApiKey",
|
|
5362
5818
|
"ioIntelligenceApiKey",
|
|
5363
|
-
"vercelAiGatewayApiKey"
|
|
5819
|
+
"vercelAiGatewayApiKey",
|
|
5820
|
+
"basetenApiKey"
|
|
5364
5821
|
];
|
|
5365
5822
|
var GLOBAL_SECRET_KEYS = [
|
|
5366
5823
|
"openRouterImageApiKey"
|
|
@@ -5625,6 +6082,9 @@ var ExtensionBridgeEventName = ((ExtensionBridgeEventName2) => {
|
|
|
5625
6082
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskPaused"] = "taskPaused" /* TaskPaused */] = "TaskPaused";
|
|
5626
6083
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUnpaused"] = "taskUnpaused" /* TaskUnpaused */] = "TaskUnpaused";
|
|
5627
6084
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskSpawned"] = "taskSpawned" /* TaskSpawned */] = "TaskSpawned";
|
|
6085
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegated"] = "taskDelegated" /* TaskDelegated */] = "TaskDelegated";
|
|
6086
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationCompleted"] = "taskDelegationCompleted" /* TaskDelegationCompleted */] = "TaskDelegationCompleted";
|
|
6087
|
+
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskDelegationResumed"] = "taskDelegationResumed" /* TaskDelegationResumed */] = "TaskDelegationResumed";
|
|
5628
6088
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskUserMessage"] = "taskUserMessage" /* TaskUserMessage */] = "TaskUserMessage";
|
|
5629
6089
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["TaskTokenUsageUpdated"] = "taskTokenUsageUpdated" /* TaskTokenUsageUpdated */] = "TaskTokenUsageUpdated";
|
|
5630
6090
|
ExtensionBridgeEventName2[ExtensionBridgeEventName2["ModeChanged"] = "modeChanged" /* ModeChanged */] = "ModeChanged";
|
|
@@ -5700,6 +6160,21 @@ var extensionBridgeEventSchema = z16.discriminatedUnion("type", [
|
|
|
5700
6160
|
instance: extensionInstanceSchema,
|
|
5701
6161
|
timestamp: z16.number()
|
|
5702
6162
|
}),
|
|
6163
|
+
z16.object({
|
|
6164
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegated),
|
|
6165
|
+
instance: extensionInstanceSchema,
|
|
6166
|
+
timestamp: z16.number()
|
|
6167
|
+
}),
|
|
6168
|
+
z16.object({
|
|
6169
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegationCompleted),
|
|
6170
|
+
instance: extensionInstanceSchema,
|
|
6171
|
+
timestamp: z16.number()
|
|
6172
|
+
}),
|
|
6173
|
+
z16.object({
|
|
6174
|
+
type: z16.literal(ExtensionBridgeEventName.TaskDelegationResumed),
|
|
6175
|
+
instance: extensionInstanceSchema,
|
|
6176
|
+
timestamp: z16.number()
|
|
6177
|
+
}),
|
|
5703
6178
|
z16.object({
|
|
5704
6179
|
type: z16.literal(ExtensionBridgeEventName.TaskUserMessage),
|
|
5705
6180
|
instance: extensionInstanceSchema,
|
|
@@ -5890,12 +6365,27 @@ var followUpDataSchema = z17.object({
|
|
|
5890
6365
|
|
|
5891
6366
|
// src/image-generation.ts
|
|
5892
6367
|
var IMAGE_GENERATION_MODELS = [
|
|
5893
|
-
|
|
5894
|
-
{ value: "google/gemini-
|
|
5895
|
-
{ value: "
|
|
5896
|
-
{ value: "openai/gpt-5-image
|
|
6368
|
+
// OpenRouter models
|
|
6369
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "openrouter" },
|
|
6370
|
+
{ value: "google/gemini-3-pro-image-preview", label: "Gemini 3 Pro Image Preview", provider: "openrouter" },
|
|
6371
|
+
{ value: "openai/gpt-5-image", label: "GPT-5 Image", provider: "openrouter" },
|
|
6372
|
+
{ value: "openai/gpt-5-image-mini", label: "GPT-5 Image Mini", provider: "openrouter" },
|
|
6373
|
+
{ value: "black-forest-labs/flux.2-flex", label: "Black Forest Labs FLUX.2 Flex", provider: "openrouter" },
|
|
6374
|
+
{ value: "black-forest-labs/flux.2-pro", label: "Black Forest Labs FLUX.2 Pro", provider: "openrouter" },
|
|
6375
|
+
// Roo Code Cloud models
|
|
6376
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image", provider: "roo" },
|
|
6377
|
+
{ value: "google/gemini-3-pro-image", label: "Gemini 3 Pro Image", provider: "roo" },
|
|
6378
|
+
{
|
|
6379
|
+
value: "bfl/flux-2-pro:free",
|
|
6380
|
+
label: "Black Forest Labs FLUX.2 Pro (Free)",
|
|
6381
|
+
provider: "roo",
|
|
6382
|
+
apiMethod: "images_api"
|
|
6383
|
+
}
|
|
5897
6384
|
];
|
|
5898
6385
|
var IMAGE_GENERATION_MODEL_IDS = IMAGE_GENERATION_MODELS.map((m) => m.value);
|
|
6386
|
+
function getImageGenerationProvider(explicitProvider, hasExistingModel) {
|
|
6387
|
+
return explicitProvider !== void 0 ? explicitProvider : hasExistingModel ? "openrouter" : "roo";
|
|
6388
|
+
}
|
|
5899
6389
|
|
|
5900
6390
|
// src/ipc.ts
|
|
5901
6391
|
import { z as z18 } from "zod";
|
|
@@ -6130,6 +6620,8 @@ export {
|
|
|
6130
6620
|
anthropicModels,
|
|
6131
6621
|
appPropertiesSchema,
|
|
6132
6622
|
azureOpenAiDefaultApiVersion,
|
|
6623
|
+
basetenDefaultModelId,
|
|
6624
|
+
basetenModels,
|
|
6133
6625
|
bedrockDefaultModelId,
|
|
6134
6626
|
bedrockDefaultPromptRouterModelId,
|
|
6135
6627
|
bedrockModels,
|
|
@@ -6185,6 +6677,7 @@ export {
|
|
|
6185
6677
|
getApiProtocol,
|
|
6186
6678
|
getClaudeCodeModelId,
|
|
6187
6679
|
getEffectiveProtocol,
|
|
6680
|
+
getImageGenerationProvider,
|
|
6188
6681
|
getModelId,
|
|
6189
6682
|
getProviderDefaultModelId,
|
|
6190
6683
|
gitPropertiesSchema,
|