@roo-code/types 1.93.0 → 1.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -149,6 +149,7 @@ var toolNames = [
149
149
  "apply_diff",
150
150
  "search_and_replace",
151
151
  "search_replace",
152
+ "edit_file",
152
153
  "apply_patch",
153
154
  "search_files",
154
155
  "list_files",
@@ -177,6 +178,10 @@ var TOOL_PROTOCOL = {
177
178
  XML: "xml",
178
179
  NATIVE: "native"
179
180
  };
181
+ var NATIVE_TOOL_DEFAULTS = {
182
+ supportsNativeTools: true,
183
+ defaultToolProtocol: TOOL_PROTOCOL.NATIVE
184
+ };
180
185
  function isNativeProtocol(protocol) {
181
186
  return protocol === TOOL_PROTOCOL.NATIVE;
182
187
  }
@@ -821,6 +826,7 @@ var basetenModels = {
821
826
  contextWindow: 163840,
822
827
  supportsImages: false,
823
828
  supportsPromptCache: false,
829
+ supportsNativeTools: true,
824
830
  inputPrice: 2.55,
825
831
  outputPrice: 5.95,
826
832
  cacheWritesPrice: 0,
@@ -832,6 +838,7 @@ var basetenModels = {
832
838
  contextWindow: 163840,
833
839
  supportsImages: false,
834
840
  supportsPromptCache: false,
841
+ supportsNativeTools: true,
835
842
  inputPrice: 2.55,
836
843
  outputPrice: 5.95,
837
844
  cacheWritesPrice: 0,
@@ -843,6 +850,7 @@ var basetenModels = {
843
850
  contextWindow: 163840,
844
851
  supportsImages: false,
845
852
  supportsPromptCache: false,
853
+ supportsNativeTools: true,
846
854
  inputPrice: 0.77,
847
855
  outputPrice: 0.77,
848
856
  cacheWritesPrice: 0,
@@ -854,6 +862,7 @@ var basetenModels = {
854
862
  contextWindow: 163840,
855
863
  supportsImages: false,
856
864
  supportsPromptCache: false,
865
+ supportsNativeTools: true,
857
866
  inputPrice: 0.5,
858
867
  outputPrice: 1.5,
859
868
  cacheWritesPrice: 0,
@@ -872,11 +881,24 @@ var basetenModels = {
872
881
  cacheReadsPrice: 0,
873
882
  description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
874
883
  },
884
+ "openai/gpt-oss-120b": {
885
+ maxTokens: 16384,
886
+ contextWindow: 128072,
887
+ supportsImages: false,
888
+ supportsPromptCache: false,
889
+ supportsNativeTools: true,
890
+ inputPrice: 0.1,
891
+ outputPrice: 0.5,
892
+ cacheWritesPrice: 0,
893
+ cacheReadsPrice: 0,
894
+ description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
895
+ },
875
896
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
876
897
  maxTokens: 16384,
877
898
  contextWindow: 262144,
878
899
  supportsImages: false,
879
900
  supportsPromptCache: false,
901
+ supportsNativeTools: true,
880
902
  inputPrice: 0.22,
881
903
  outputPrice: 0.8,
882
904
  cacheWritesPrice: 0,
@@ -888,24 +910,13 @@ var basetenModels = {
888
910
  contextWindow: 262144,
889
911
  supportsImages: false,
890
912
  supportsPromptCache: false,
913
+ supportsNativeTools: true,
891
914
  inputPrice: 0.38,
892
915
  outputPrice: 1.53,
893
916
  cacheWritesPrice: 0,
894
917
  cacheReadsPrice: 0,
895
918
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
896
919
  },
897
- "openai/gpt-oss-120b": {
898
- maxTokens: 16384,
899
- contextWindow: 128072,
900
- supportsImages: false,
901
- supportsPromptCache: false,
902
- supportsNativeTools: true,
903
- inputPrice: 0.1,
904
- outputPrice: 0.5,
905
- cacheWritesPrice: 0,
906
- cacheReadsPrice: 0,
907
- description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
908
- },
909
920
  "moonshotai/Kimi-K2-Instruct-0905": {
910
921
  maxTokens: 16384,
911
922
  contextWindow: 262e3,
@@ -1377,22 +1388,6 @@ var bedrockModels = {
1377
1388
  outputPrice: 0.6,
1378
1389
  description: "Amazon Titan Text Express"
1379
1390
  },
1380
- "amazon.titan-text-embeddings-v1:0": {
1381
- maxTokens: 8192,
1382
- contextWindow: 8e3,
1383
- supportsImages: false,
1384
- supportsPromptCache: false,
1385
- inputPrice: 0.1,
1386
- description: "Amazon Titan Text Embeddings"
1387
- },
1388
- "amazon.titan-text-embeddings-v2:0": {
1389
- maxTokens: 8192,
1390
- contextWindow: 8e3,
1391
- supportsImages: false,
1392
- supportsPromptCache: false,
1393
- inputPrice: 0.02,
1394
- description: "Amazon Titan Text Embeddings V2"
1395
- },
1396
1391
  "moonshot.kimi-k2-thinking": {
1397
1392
  maxTokens: 32e3,
1398
1393
  contextWindow: 262144,
@@ -2212,6 +2207,7 @@ var featherlessModels = {
2212
2207
  contextWindow: 32678,
2213
2208
  supportsImages: false,
2214
2209
  supportsPromptCache: false,
2210
+ supportsNativeTools: true,
2215
2211
  inputPrice: 0,
2216
2212
  outputPrice: 0,
2217
2213
  description: "DeepSeek V3 0324 model."
@@ -2221,6 +2217,7 @@ var featherlessModels = {
2221
2217
  contextWindow: 32678,
2222
2218
  supportsImages: false,
2223
2219
  supportsPromptCache: false,
2220
+ supportsNativeTools: true,
2224
2221
  inputPrice: 0,
2225
2222
  outputPrice: 0,
2226
2223
  description: "DeepSeek R1 0528 model."
@@ -2240,6 +2237,7 @@ var featherlessModels = {
2240
2237
  contextWindow: 32678,
2241
2238
  supportsImages: false,
2242
2239
  supportsPromptCache: false,
2240
+ supportsNativeTools: true,
2243
2241
  inputPrice: 0,
2244
2242
  outputPrice: 0,
2245
2243
  description: "GPT-OSS 120B model."
@@ -2255,7 +2253,7 @@ var featherlessModels = {
2255
2253
  description: "Qwen3 Coder 480B A35B Instruct model."
2256
2254
  }
2257
2255
  };
2258
- var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
2256
+ var featherlessDefaultModelId = "moonshotai/Kimi-K2-Instruct";
2259
2257
 
2260
2258
  // src/providers/fireworks.ts
2261
2259
  var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
@@ -2419,6 +2417,7 @@ var geminiModels = {
2419
2417
  supportsReasoningEffort: ["low", "high"],
2420
2418
  reasoningEffort: "low",
2421
2419
  includedTools: ["write_file", "edit_file"],
2420
+ excludedTools: ["apply_diff"],
2422
2421
  supportsTemperature: true,
2423
2422
  defaultTemperature: 1,
2424
2423
  inputPrice: 4,
@@ -2446,6 +2445,7 @@ var geminiModels = {
2446
2445
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
2447
2446
  reasoningEffort: "medium",
2448
2447
  includedTools: ["write_file", "edit_file"],
2448
+ excludedTools: ["apply_diff"],
2449
2449
  supportsTemperature: true,
2450
2450
  defaultTemperature: 1,
2451
2451
  inputPrice: 0.3,
@@ -2462,6 +2462,7 @@ var geminiModels = {
2462
2462
  defaultToolProtocol: "native",
2463
2463
  supportsPromptCache: true,
2464
2464
  includedTools: ["write_file", "edit_file"],
2465
+ excludedTools: ["apply_diff"],
2465
2466
  inputPrice: 2.5,
2466
2467
  // This is the pricing for prompts above 200k tokens.
2467
2468
  outputPrice: 15,
@@ -2493,6 +2494,7 @@ var geminiModels = {
2493
2494
  defaultToolProtocol: "native",
2494
2495
  supportsPromptCache: true,
2495
2496
  includedTools: ["write_file", "edit_file"],
2497
+ excludedTools: ["apply_diff"],
2496
2498
  inputPrice: 2.5,
2497
2499
  // This is the pricing for prompts above 200k tokens.
2498
2500
  outputPrice: 15,
@@ -2523,6 +2525,7 @@ var geminiModels = {
2523
2525
  defaultToolProtocol: "native",
2524
2526
  supportsPromptCache: true,
2525
2527
  includedTools: ["write_file", "edit_file"],
2528
+ excludedTools: ["apply_diff"],
2526
2529
  inputPrice: 2.5,
2527
2530
  // This is the pricing for prompts above 200k tokens.
2528
2531
  outputPrice: 15,
@@ -2551,6 +2554,7 @@ var geminiModels = {
2551
2554
  defaultToolProtocol: "native",
2552
2555
  supportsPromptCache: true,
2553
2556
  includedTools: ["write_file", "edit_file"],
2557
+ excludedTools: ["apply_diff"],
2554
2558
  inputPrice: 2.5,
2555
2559
  // This is the pricing for prompts above 200k tokens.
2556
2560
  outputPrice: 15,
@@ -2582,6 +2586,7 @@ var geminiModels = {
2582
2586
  defaultToolProtocol: "native",
2583
2587
  supportsPromptCache: true,
2584
2588
  includedTools: ["write_file", "edit_file"],
2589
+ excludedTools: ["apply_diff"],
2585
2590
  inputPrice: 0.3,
2586
2591
  outputPrice: 2.5,
2587
2592
  cacheReadsPrice: 0.075,
@@ -2597,6 +2602,7 @@ var geminiModels = {
2597
2602
  defaultToolProtocol: "native",
2598
2603
  supportsPromptCache: true,
2599
2604
  includedTools: ["write_file", "edit_file"],
2605
+ excludedTools: ["apply_diff"],
2600
2606
  inputPrice: 0.3,
2601
2607
  outputPrice: 2.5,
2602
2608
  cacheReadsPrice: 0.075,
@@ -2612,6 +2618,7 @@ var geminiModels = {
2612
2618
  defaultToolProtocol: "native",
2613
2619
  supportsPromptCache: true,
2614
2620
  includedTools: ["write_file", "edit_file"],
2621
+ excludedTools: ["apply_diff"],
2615
2622
  inputPrice: 0.3,
2616
2623
  outputPrice: 2.5,
2617
2624
  cacheReadsPrice: 0.075,
@@ -2628,6 +2635,7 @@ var geminiModels = {
2628
2635
  defaultToolProtocol: "native",
2629
2636
  supportsPromptCache: true,
2630
2637
  includedTools: ["write_file", "edit_file"],
2638
+ excludedTools: ["apply_diff"],
2631
2639
  inputPrice: 0.1,
2632
2640
  outputPrice: 0.4,
2633
2641
  cacheReadsPrice: 0.025,
@@ -2643,6 +2651,7 @@ var geminiModels = {
2643
2651
  defaultToolProtocol: "native",
2644
2652
  supportsPromptCache: true,
2645
2653
  includedTools: ["write_file", "edit_file"],
2654
+ excludedTools: ["apply_diff"],
2646
2655
  inputPrice: 0.1,
2647
2656
  outputPrice: 0.4,
2648
2657
  cacheReadsPrice: 0.025,
@@ -2689,33 +2698,6 @@ var groqModels = {
2689
2698
  outputPrice: 0.34,
2690
2699
  description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
2691
2700
  },
2692
- "meta-llama/llama-4-maverick-17b-128e-instruct": {
2693
- maxTokens: 8192,
2694
- contextWindow: 131072,
2695
- supportsImages: false,
2696
- supportsPromptCache: false,
2697
- inputPrice: 0.2,
2698
- outputPrice: 0.6,
2699
- description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
2700
- },
2701
- "mistral-saba-24b": {
2702
- maxTokens: 8192,
2703
- contextWindow: 32768,
2704
- supportsImages: false,
2705
- supportsPromptCache: false,
2706
- inputPrice: 0.79,
2707
- outputPrice: 0.79,
2708
- description: "Mistral Saba 24B model, 32K context."
2709
- },
2710
- "qwen-qwq-32b": {
2711
- maxTokens: 8192,
2712
- contextWindow: 131072,
2713
- supportsImages: false,
2714
- supportsPromptCache: false,
2715
- inputPrice: 0.29,
2716
- outputPrice: 0.39,
2717
- description: "Alibaba Qwen QwQ 32B model, 128K context."
2718
- },
2719
2701
  "qwen/qwen3-32b": {
2720
2702
  maxTokens: 8192,
2721
2703
  contextWindow: 131072,
@@ -2727,26 +2709,6 @@ var groqModels = {
2727
2709
  outputPrice: 0.59,
2728
2710
  description: "Alibaba Qwen 3 32B model, 128K context."
2729
2711
  },
2730
- "deepseek-r1-distill-llama-70b": {
2731
- maxTokens: 8192,
2732
- contextWindow: 131072,
2733
- supportsImages: false,
2734
- supportsPromptCache: false,
2735
- inputPrice: 0.75,
2736
- outputPrice: 0.99,
2737
- description: "DeepSeek R1 Distill Llama 70B model, 128K context."
2738
- },
2739
- "moonshotai/kimi-k2-instruct": {
2740
- maxTokens: 16384,
2741
- contextWindow: 131072,
2742
- supportsImages: false,
2743
- supportsPromptCache: true,
2744
- inputPrice: 1,
2745
- outputPrice: 3,
2746
- cacheReadsPrice: 0.5,
2747
- // 50% discount for cached input tokens
2748
- description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2749
- },
2750
2712
  "moonshotai/kimi-k2-instruct-0905": {
2751
2713
  maxTokens: 16384,
2752
2714
  contextWindow: 262144,
@@ -2855,6 +2817,8 @@ var lMStudioDefaultModelInfo = {
2855
2817
  contextWindow: 2e5,
2856
2818
  supportsImages: true,
2857
2819
  supportsPromptCache: true,
2820
+ supportsNativeTools: true,
2821
+ defaultToolProtocol: "native",
2858
2822
  inputPrice: 0,
2859
2823
  outputPrice: 0,
2860
2824
  cacheWritesPrice: 0,
@@ -3681,6 +3645,8 @@ var qwenCodeModels = {
3681
3645
  contextWindow: 1e6,
3682
3646
  supportsImages: false,
3683
3647
  supportsPromptCache: false,
3648
+ supportsNativeTools: true,
3649
+ defaultToolProtocol: "native",
3684
3650
  inputPrice: 0,
3685
3651
  outputPrice: 0,
3686
3652
  cacheWritesPrice: 0,
@@ -3692,6 +3658,8 @@ var qwenCodeModels = {
3692
3658
  contextWindow: 1e6,
3693
3659
  supportsImages: false,
3694
3660
  supportsPromptCache: false,
3661
+ supportsNativeTools: true,
3662
+ defaultToolProtocol: "native",
3695
3663
  inputPrice: 0,
3696
3664
  outputPrice: 0,
3697
3665
  cacheWritesPrice: 0,
@@ -3815,15 +3783,6 @@ var sambaNovaModels = {
3815
3783
  outputPrice: 4.5,
3816
3784
  description: "DeepSeek V3.1 model with 32K context window."
3817
3785
  },
3818
- "DeepSeek-R1-Distill-Llama-70B": {
3819
- maxTokens: 8192,
3820
- contextWindow: 131072,
3821
- supportsImages: false,
3822
- supportsPromptCache: false,
3823
- inputPrice: 0.7,
3824
- outputPrice: 1.4,
3825
- description: "DeepSeek R1 distilled Llama 70B model with 128K context window."
3826
- },
3827
3786
  "Llama-4-Maverick-17B-128E-Instruct": {
3828
3787
  maxTokens: 8192,
3829
3788
  contextWindow: 131072,
@@ -3835,15 +3794,6 @@ var sambaNovaModels = {
3835
3794
  outputPrice: 1.8,
3836
3795
  description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
3837
3796
  },
3838
- "Llama-3.3-Swallow-70B-Instruct-v0.4": {
3839
- maxTokens: 8192,
3840
- contextWindow: 16384,
3841
- supportsImages: false,
3842
- supportsPromptCache: false,
3843
- inputPrice: 0.6,
3844
- outputPrice: 1.2,
3845
- description: "Tokyotech Llama 3.3 Swallow 70B Instruct v0.4 model with 16K context window."
3846
- },
3847
3797
  "Qwen3-32B": {
3848
3798
  maxTokens: 8192,
3849
3799
  contextWindow: 8192,
@@ -3895,6 +3845,7 @@ var vertexModels = {
3895
3845
  supportsReasoningEffort: ["low", "high"],
3896
3846
  reasoningEffort: "low",
3897
3847
  includedTools: ["write_file", "edit_file"],
3848
+ excludedTools: ["apply_diff"],
3898
3849
  supportsTemperature: true,
3899
3850
  defaultTemperature: 1,
3900
3851
  inputPrice: 4,
@@ -3922,6 +3873,7 @@ var vertexModels = {
3922
3873
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
3923
3874
  reasoningEffort: "medium",
3924
3875
  includedTools: ["write_file", "edit_file"],
3876
+ excludedTools: ["apply_diff"],
3925
3877
  supportsTemperature: true,
3926
3878
  defaultTemperature: 1,
3927
3879
  inputPrice: 0.3,
@@ -3937,6 +3889,7 @@ var vertexModels = {
3937
3889
  defaultToolProtocol: "native",
3938
3890
  supportsPromptCache: true,
3939
3891
  includedTools: ["write_file", "edit_file"],
3892
+ excludedTools: ["apply_diff"],
3940
3893
  inputPrice: 0.15,
3941
3894
  outputPrice: 3.5,
3942
3895
  maxThinkingTokens: 24576,
@@ -3951,6 +3904,7 @@ var vertexModels = {
3951
3904
  defaultToolProtocol: "native",
3952
3905
  supportsPromptCache: true,
3953
3906
  includedTools: ["write_file", "edit_file"],
3907
+ excludedTools: ["apply_diff"],
3954
3908
  inputPrice: 0.15,
3955
3909
  outputPrice: 0.6
3956
3910
  },
@@ -3962,6 +3916,7 @@ var vertexModels = {
3962
3916
  defaultToolProtocol: "native",
3963
3917
  supportsPromptCache: true,
3964
3918
  includedTools: ["write_file", "edit_file"],
3919
+ excludedTools: ["apply_diff"],
3965
3920
  inputPrice: 0.3,
3966
3921
  outputPrice: 2.5,
3967
3922
  cacheReadsPrice: 0.075,
@@ -3977,6 +3932,7 @@ var vertexModels = {
3977
3932
  defaultToolProtocol: "native",
3978
3933
  supportsPromptCache: false,
3979
3934
  includedTools: ["write_file", "edit_file"],
3935
+ excludedTools: ["apply_diff"],
3980
3936
  inputPrice: 0.15,
3981
3937
  outputPrice: 3.5,
3982
3938
  maxThinkingTokens: 24576,
@@ -3991,6 +3947,7 @@ var vertexModels = {
3991
3947
  defaultToolProtocol: "native",
3992
3948
  supportsPromptCache: false,
3993
3949
  includedTools: ["write_file", "edit_file"],
3950
+ excludedTools: ["apply_diff"],
3994
3951
  inputPrice: 0.15,
3995
3952
  outputPrice: 0.6
3996
3953
  },
@@ -4002,6 +3959,7 @@ var vertexModels = {
4002
3959
  defaultToolProtocol: "native",
4003
3960
  supportsPromptCache: true,
4004
3961
  includedTools: ["write_file", "edit_file"],
3962
+ excludedTools: ["apply_diff"],
4005
3963
  inputPrice: 2.5,
4006
3964
  outputPrice: 15
4007
3965
  },
@@ -4013,6 +3971,7 @@ var vertexModels = {
4013
3971
  defaultToolProtocol: "native",
4014
3972
  supportsPromptCache: true,
4015
3973
  includedTools: ["write_file", "edit_file"],
3974
+ excludedTools: ["apply_diff"],
4016
3975
  inputPrice: 2.5,
4017
3976
  outputPrice: 15
4018
3977
  },
@@ -4024,6 +3983,7 @@ var vertexModels = {
4024
3983
  defaultToolProtocol: "native",
4025
3984
  supportsPromptCache: true,
4026
3985
  includedTools: ["write_file", "edit_file"],
3986
+ excludedTools: ["apply_diff"],
4027
3987
  inputPrice: 2.5,
4028
3988
  outputPrice: 15,
4029
3989
  maxThinkingTokens: 32768,
@@ -4037,6 +3997,7 @@ var vertexModels = {
4037
3997
  defaultToolProtocol: "native",
4038
3998
  supportsPromptCache: true,
4039
3999
  includedTools: ["write_file", "edit_file"],
4000
+ excludedTools: ["apply_diff"],
4040
4001
  inputPrice: 2.5,
4041
4002
  outputPrice: 15,
4042
4003
  maxThinkingTokens: 32768,
@@ -4065,6 +4026,7 @@ var vertexModels = {
4065
4026
  defaultToolProtocol: "native",
4066
4027
  supportsPromptCache: false,
4067
4028
  includedTools: ["write_file", "edit_file"],
4029
+ excludedTools: ["apply_diff"],
4068
4030
  inputPrice: 0,
4069
4031
  outputPrice: 0
4070
4032
  },
@@ -4076,6 +4038,7 @@ var vertexModels = {
4076
4038
  defaultToolProtocol: "native",
4077
4039
  supportsPromptCache: false,
4078
4040
  includedTools: ["write_file", "edit_file"],
4041
+ excludedTools: ["apply_diff"],
4079
4042
  inputPrice: 0,
4080
4043
  outputPrice: 0
4081
4044
  },
@@ -4087,6 +4050,7 @@ var vertexModels = {
4087
4050
  defaultToolProtocol: "native",
4088
4051
  supportsPromptCache: true,
4089
4052
  includedTools: ["write_file", "edit_file"],
4053
+ excludedTools: ["apply_diff"],
4090
4054
  inputPrice: 0.15,
4091
4055
  outputPrice: 0.6
4092
4056
  },
@@ -4098,6 +4062,7 @@ var vertexModels = {
4098
4062
  defaultToolProtocol: "native",
4099
4063
  supportsPromptCache: false,
4100
4064
  includedTools: ["write_file", "edit_file"],
4065
+ excludedTools: ["apply_diff"],
4101
4066
  inputPrice: 0.075,
4102
4067
  outputPrice: 0.3
4103
4068
  },
@@ -4109,6 +4074,7 @@ var vertexModels = {
4109
4074
  defaultToolProtocol: "native",
4110
4075
  supportsPromptCache: false,
4111
4076
  includedTools: ["write_file", "edit_file"],
4077
+ excludedTools: ["apply_diff"],
4112
4078
  inputPrice: 0,
4113
4079
  outputPrice: 0
4114
4080
  },
@@ -4120,6 +4086,7 @@ var vertexModels = {
4120
4086
  defaultToolProtocol: "native",
4121
4087
  supportsPromptCache: true,
4122
4088
  includedTools: ["write_file", "edit_file"],
4089
+ excludedTools: ["apply_diff"],
4123
4090
  inputPrice: 0.075,
4124
4091
  outputPrice: 0.3
4125
4092
  },
@@ -4131,36 +4098,83 @@ var vertexModels = {
4131
4098
  defaultToolProtocol: "native",
4132
4099
  supportsPromptCache: false,
4133
4100
  includedTools: ["write_file", "edit_file"],
4101
+ excludedTools: ["apply_diff"],
4134
4102
  inputPrice: 1.25,
4135
4103
  outputPrice: 5
4136
4104
  },
4137
4105
  "claude-sonnet-4@20250514": {
4138
4106
  maxTokens: 8192,
4139
4107
  contextWindow: 2e5,
4108
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
4140
4109
  supportsImages: true,
4141
4110
  supportsPromptCache: true,
4111
+ supportsNativeTools: true,
4112
+ defaultToolProtocol: "native",
4142
4113
  inputPrice: 3,
4114
+ // $3 per million input tokens (≤200K context)
4143
4115
  outputPrice: 15,
4116
+ // $15 per million output tokens (≤200K context)
4144
4117
  cacheWritesPrice: 3.75,
4118
+ // $3.75 per million tokens
4145
4119
  cacheReadsPrice: 0.3,
4146
- supportsReasoningBudget: true
4120
+ // $0.30 per million tokens
4121
+ supportsReasoningBudget: true,
4122
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
4123
+ tiers: [
4124
+ {
4125
+ contextWindow: 1e6,
4126
+ // 1M tokens with beta flag
4127
+ inputPrice: 6,
4128
+ // $6 per million input tokens (>200K context)
4129
+ outputPrice: 22.5,
4130
+ // $22.50 per million output tokens (>200K context)
4131
+ cacheWritesPrice: 7.5,
4132
+ // $7.50 per million tokens (>200K context)
4133
+ cacheReadsPrice: 0.6
4134
+ // $0.60 per million tokens (>200K context)
4135
+ }
4136
+ ]
4147
4137
  },
4148
4138
  "claude-sonnet-4-5@20250929": {
4149
4139
  maxTokens: 8192,
4150
4140
  contextWindow: 2e5,
4141
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
4151
4142
  supportsImages: true,
4152
4143
  supportsPromptCache: true,
4144
+ supportsNativeTools: true,
4145
+ defaultToolProtocol: "native",
4153
4146
  inputPrice: 3,
4147
+ // $3 per million input tokens (≤200K context)
4154
4148
  outputPrice: 15,
4149
+ // $15 per million output tokens (≤200K context)
4155
4150
  cacheWritesPrice: 3.75,
4151
+ // $3.75 per million tokens
4156
4152
  cacheReadsPrice: 0.3,
4157
- supportsReasoningBudget: true
4153
+ // $0.30 per million tokens
4154
+ supportsReasoningBudget: true,
4155
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
4156
+ tiers: [
4157
+ {
4158
+ contextWindow: 1e6,
4159
+ // 1M tokens with beta flag
4160
+ inputPrice: 6,
4161
+ // $6 per million input tokens (>200K context)
4162
+ outputPrice: 22.5,
4163
+ // $22.50 per million output tokens (>200K context)
4164
+ cacheWritesPrice: 7.5,
4165
+ // $7.50 per million tokens (>200K context)
4166
+ cacheReadsPrice: 0.6
4167
+ // $0.60 per million tokens (>200K context)
4168
+ }
4169
+ ]
4158
4170
  },
4159
4171
  "claude-haiku-4-5@20251001": {
4160
4172
  maxTokens: 8192,
4161
4173
  contextWindow: 2e5,
4162
4174
  supportsImages: true,
4163
4175
  supportsPromptCache: true,
4176
+ supportsNativeTools: true,
4177
+ defaultToolProtocol: "native",
4164
4178
  inputPrice: 1,
4165
4179
  outputPrice: 5,
4166
4180
  cacheWritesPrice: 1.25,
@@ -4172,6 +4186,8 @@ var vertexModels = {
4172
4186
  contextWindow: 2e5,
4173
4187
  supportsImages: true,
4174
4188
  supportsPromptCache: true,
4189
+ supportsNativeTools: true,
4190
+ defaultToolProtocol: "native",
4175
4191
  inputPrice: 5,
4176
4192
  outputPrice: 25,
4177
4193
  cacheWritesPrice: 6.25,
@@ -4183,6 +4199,8 @@ var vertexModels = {
4183
4199
  contextWindow: 2e5,
4184
4200
  supportsImages: true,
4185
4201
  supportsPromptCache: true,
4202
+ supportsNativeTools: true,
4203
+ defaultToolProtocol: "native",
4186
4204
  inputPrice: 15,
4187
4205
  outputPrice: 75,
4188
4206
  cacheWritesPrice: 18.75,
@@ -4194,6 +4212,8 @@ var vertexModels = {
4194
4212
  contextWindow: 2e5,
4195
4213
  supportsImages: true,
4196
4214
  supportsPromptCache: true,
4215
+ supportsNativeTools: true,
4216
+ defaultToolProtocol: "native",
4197
4217
  inputPrice: 15,
4198
4218
  outputPrice: 75,
4199
4219
  cacheWritesPrice: 18.75,
@@ -4204,6 +4224,8 @@ var vertexModels = {
4204
4224
  contextWindow: 2e5,
4205
4225
  supportsImages: true,
4206
4226
  supportsPromptCache: true,
4227
+ supportsNativeTools: true,
4228
+ defaultToolProtocol: "native",
4207
4229
  inputPrice: 3,
4208
4230
  outputPrice: 15,
4209
4231
  cacheWritesPrice: 3.75,
@@ -4216,6 +4238,8 @@ var vertexModels = {
4216
4238
  contextWindow: 2e5,
4217
4239
  supportsImages: true,
4218
4240
  supportsPromptCache: true,
4241
+ supportsNativeTools: true,
4242
+ defaultToolProtocol: "native",
4219
4243
  inputPrice: 3,
4220
4244
  outputPrice: 15,
4221
4245
  cacheWritesPrice: 3.75,
@@ -4226,6 +4250,8 @@ var vertexModels = {
4226
4250
  contextWindow: 2e5,
4227
4251
  supportsImages: true,
4228
4252
  supportsPromptCache: true,
4253
+ supportsNativeTools: true,
4254
+ defaultToolProtocol: "native",
4229
4255
  inputPrice: 3,
4230
4256
  outputPrice: 15,
4231
4257
  cacheWritesPrice: 3.75,
@@ -4236,6 +4262,8 @@ var vertexModels = {
4236
4262
  contextWindow: 2e5,
4237
4263
  supportsImages: true,
4238
4264
  supportsPromptCache: true,
4265
+ supportsNativeTools: true,
4266
+ defaultToolProtocol: "native",
4239
4267
  inputPrice: 3,
4240
4268
  outputPrice: 15,
4241
4269
  cacheWritesPrice: 3.75,
@@ -4246,6 +4274,8 @@ var vertexModels = {
4246
4274
  contextWindow: 2e5,
4247
4275
  supportsImages: false,
4248
4276
  supportsPromptCache: true,
4277
+ supportsNativeTools: true,
4278
+ defaultToolProtocol: "native",
4249
4279
  inputPrice: 1,
4250
4280
  outputPrice: 5,
4251
4281
  cacheWritesPrice: 1.25,
@@ -4256,6 +4286,8 @@ var vertexModels = {
4256
4286
  contextWindow: 2e5,
4257
4287
  supportsImages: true,
4258
4288
  supportsPromptCache: true,
4289
+ supportsNativeTools: true,
4290
+ defaultToolProtocol: "native",
4259
4291
  inputPrice: 15,
4260
4292
  outputPrice: 75,
4261
4293
  cacheWritesPrice: 18.75,
@@ -4266,6 +4298,8 @@ var vertexModels = {
4266
4298
  contextWindow: 2e5,
4267
4299
  supportsImages: true,
4268
4300
  supportsPromptCache: true,
4301
+ supportsNativeTools: true,
4302
+ defaultToolProtocol: "native",
4269
4303
  inputPrice: 0.25,
4270
4304
  outputPrice: 1.25,
4271
4305
  cacheWritesPrice: 0.3,
@@ -4279,6 +4313,7 @@ var vertexModels = {
4279
4313
  defaultToolProtocol: "native",
4280
4314
  supportsPromptCache: true,
4281
4315
  includedTools: ["write_file", "edit_file"],
4316
+ excludedTools: ["apply_diff"],
4282
4317
  inputPrice: 0.1,
4283
4318
  outputPrice: 0.4,
4284
4319
  cacheReadsPrice: 0.025,
@@ -4291,6 +4326,7 @@ var vertexModels = {
4291
4326
  contextWindow: 131072,
4292
4327
  supportsImages: false,
4293
4328
  supportsPromptCache: false,
4329
+ supportsNativeTools: true,
4294
4330
  inputPrice: 0.35,
4295
4331
  outputPrice: 1.15,
4296
4332
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
@@ -4300,6 +4336,7 @@ var vertexModels = {
4300
4336
  contextWindow: 163840,
4301
4337
  supportsImages: false,
4302
4338
  supportsPromptCache: false,
4339
+ supportsNativeTools: true,
4303
4340
  inputPrice: 1.35,
4304
4341
  outputPrice: 5.4,
4305
4342
  description: "DeepSeek R1 (0528). Available in us-central1"
@@ -4309,6 +4346,7 @@ var vertexModels = {
4309
4346
  contextWindow: 163840,
4310
4347
  supportsImages: false,
4311
4348
  supportsPromptCache: false,
4349
+ supportsNativeTools: true,
4312
4350
  inputPrice: 0.6,
4313
4351
  outputPrice: 1.7,
4314
4352
  description: "DeepSeek V3.1. Available in us-west2"
@@ -4318,6 +4356,7 @@ var vertexModels = {
4318
4356
  contextWindow: 131072,
4319
4357
  supportsImages: false,
4320
4358
  supportsPromptCache: false,
4359
+ supportsNativeTools: true,
4321
4360
  inputPrice: 0.15,
4322
4361
  outputPrice: 0.6,
4323
4362
  description: "OpenAI gpt-oss 120B. Available in us-central1"
@@ -4327,6 +4366,7 @@ var vertexModels = {
4327
4366
  contextWindow: 131072,
4328
4367
  supportsImages: false,
4329
4368
  supportsPromptCache: false,
4369
+ supportsNativeTools: true,
4330
4370
  inputPrice: 0.075,
4331
4371
  outputPrice: 0.3,
4332
4372
  description: "OpenAI gpt-oss 20B. Available in us-central1"
@@ -4336,6 +4376,7 @@ var vertexModels = {
4336
4376
  contextWindow: 262144,
4337
4377
  supportsImages: false,
4338
4378
  supportsPromptCache: false,
4379
+ supportsNativeTools: true,
4339
4380
  inputPrice: 1,
4340
4381
  outputPrice: 4,
4341
4382
  description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
@@ -4345,11 +4386,13 @@ var vertexModels = {
4345
4386
  contextWindow: 262144,
4346
4387
  supportsImages: false,
4347
4388
  supportsPromptCache: false,
4389
+ supportsNativeTools: true,
4348
4390
  inputPrice: 0.25,
4349
4391
  outputPrice: 1,
4350
4392
  description: "Qwen3 235B A22B Instruct. Available in us-south1"
4351
4393
  }
4352
4394
  };
4395
+ var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
4353
4396
  var VERTEX_REGIONS = [
4354
4397
  { value: "global", label: "global" },
4355
4398
  { value: "us-central1", label: "us-central1" },
@@ -4784,6 +4827,7 @@ var vercelAiGatewayDefaultModelInfo = {
4784
4827
  contextWindow: 2e5,
4785
4828
  supportsImages: true,
4786
4829
  supportsPromptCache: true,
4830
+ supportsNativeTools: true,
4787
4831
  inputPrice: 3,
4788
4832
  outputPrice: 15,
4789
4833
  cacheWritesPrice: 3.75,
@@ -4796,7 +4840,7 @@ var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
4796
4840
  var internationalZAiDefaultModelId = "glm-4.6";
4797
4841
  var internationalZAiModels = {
4798
4842
  "glm-4.5": {
4799
- maxTokens: 98304,
4843
+ maxTokens: 16384,
4800
4844
  contextWindow: 131072,
4801
4845
  supportsImages: false,
4802
4846
  supportsPromptCache: true,
@@ -4809,7 +4853,7 @@ var internationalZAiModels = {
4809
4853
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
4810
4854
  },
4811
4855
  "glm-4.5-air": {
4812
- maxTokens: 98304,
4856
+ maxTokens: 16384,
4813
4857
  contextWindow: 131072,
4814
4858
  supportsImages: false,
4815
4859
  supportsPromptCache: true,
@@ -4822,7 +4866,7 @@ var internationalZAiModels = {
4822
4866
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
4823
4867
  },
4824
4868
  "glm-4.5-x": {
4825
- maxTokens: 98304,
4869
+ maxTokens: 16384,
4826
4870
  contextWindow: 131072,
4827
4871
  supportsImages: false,
4828
4872
  supportsPromptCache: true,
@@ -4835,7 +4879,7 @@ var internationalZAiModels = {
4835
4879
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
4836
4880
  },
4837
4881
  "glm-4.5-airx": {
4838
- maxTokens: 98304,
4882
+ maxTokens: 16384,
4839
4883
  contextWindow: 131072,
4840
4884
  supportsImages: false,
4841
4885
  supportsPromptCache: true,
@@ -4848,7 +4892,7 @@ var internationalZAiModels = {
4848
4892
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
4849
4893
  },
4850
4894
  "glm-4.5-flash": {
4851
- maxTokens: 98304,
4895
+ maxTokens: 16384,
4852
4896
  contextWindow: 131072,
4853
4897
  supportsImages: false,
4854
4898
  supportsPromptCache: true,
@@ -4874,7 +4918,7 @@ var internationalZAiModels = {
4874
4918
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
4875
4919
  },
4876
4920
  "glm-4.6": {
4877
- maxTokens: 98304,
4921
+ maxTokens: 16384,
4878
4922
  contextWindow: 2e5,
4879
4923
  supportsImages: false,
4880
4924
  supportsPromptCache: true,
@@ -4886,8 +4930,24 @@ var internationalZAiModels = {
4886
4930
  cacheReadsPrice: 0.11,
4887
4931
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
4888
4932
  },
4933
+ "glm-4.7": {
4934
+ maxTokens: 16384,
4935
+ contextWindow: 2e5,
4936
+ supportsImages: false,
4937
+ supportsPromptCache: true,
4938
+ supportsNativeTools: true,
4939
+ defaultToolProtocol: "native",
4940
+ supportsReasoningEffort: ["disable", "medium"],
4941
+ reasoningEffort: "medium",
4942
+ preserveReasoning: true,
4943
+ inputPrice: 0.6,
4944
+ outputPrice: 2.2,
4945
+ cacheWritesPrice: 0,
4946
+ cacheReadsPrice: 0.11,
4947
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
4948
+ },
4889
4949
  "glm-4-32b-0414-128k": {
4890
- maxTokens: 98304,
4950
+ maxTokens: 16384,
4891
4951
  contextWindow: 131072,
4892
4952
  supportsImages: false,
4893
4953
  supportsPromptCache: false,
@@ -4903,7 +4963,7 @@ var internationalZAiModels = {
4903
4963
  var mainlandZAiDefaultModelId = "glm-4.6";
4904
4964
  var mainlandZAiModels = {
4905
4965
  "glm-4.5": {
4906
- maxTokens: 98304,
4966
+ maxTokens: 16384,
4907
4967
  contextWindow: 131072,
4908
4968
  supportsImages: false,
4909
4969
  supportsPromptCache: true,
@@ -4916,7 +4976,7 @@ var mainlandZAiModels = {
4916
4976
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
4917
4977
  },
4918
4978
  "glm-4.5-air": {
4919
- maxTokens: 98304,
4979
+ maxTokens: 16384,
4920
4980
  contextWindow: 131072,
4921
4981
  supportsImages: false,
4922
4982
  supportsPromptCache: true,
@@ -4929,7 +4989,7 @@ var mainlandZAiModels = {
4929
4989
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
4930
4990
  },
4931
4991
  "glm-4.5-x": {
4932
- maxTokens: 98304,
4992
+ maxTokens: 16384,
4933
4993
  contextWindow: 131072,
4934
4994
  supportsImages: false,
4935
4995
  supportsPromptCache: true,
@@ -4942,7 +5002,7 @@ var mainlandZAiModels = {
4942
5002
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
4943
5003
  },
4944
5004
  "glm-4.5-airx": {
4945
- maxTokens: 98304,
5005
+ maxTokens: 16384,
4946
5006
  contextWindow: 131072,
4947
5007
  supportsImages: false,
4948
5008
  supportsPromptCache: true,
@@ -4955,7 +5015,7 @@ var mainlandZAiModels = {
4955
5015
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
4956
5016
  },
4957
5017
  "glm-4.5-flash": {
4958
- maxTokens: 98304,
5018
+ maxTokens: 16384,
4959
5019
  contextWindow: 131072,
4960
5020
  supportsImages: false,
4961
5021
  supportsPromptCache: true,
@@ -4981,7 +5041,7 @@ var mainlandZAiModels = {
4981
5041
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
4982
5042
  },
4983
5043
  "glm-4.6": {
4984
- maxTokens: 98304,
5044
+ maxTokens: 16384,
4985
5045
  contextWindow: 204800,
4986
5046
  supportsImages: false,
4987
5047
  supportsPromptCache: true,
@@ -4992,6 +5052,22 @@ var mainlandZAiModels = {
4992
5052
  cacheWritesPrice: 0,
4993
5053
  cacheReadsPrice: 0.057,
4994
5054
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
5055
+ },
5056
+ "glm-4.7": {
5057
+ maxTokens: 16384,
5058
+ contextWindow: 204800,
5059
+ supportsImages: false,
5060
+ supportsPromptCache: true,
5061
+ supportsNativeTools: true,
5062
+ defaultToolProtocol: "native",
5063
+ supportsReasoningEffort: ["disable", "medium"],
5064
+ reasoningEffort: "medium",
5065
+ preserveReasoning: true,
5066
+ inputPrice: 0.29,
5067
+ outputPrice: 1.14,
5068
+ cacheWritesPrice: 0,
5069
+ cacheReadsPrice: 0.057,
5070
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
4995
5071
  }
4996
5072
  };
4997
5073
  var ZAI_DEFAULT_TEMPERATURE = 0.6;
@@ -5041,6 +5117,8 @@ var minimaxModels = {
5041
5117
  supportsPromptCache: true,
5042
5118
  supportsNativeTools: true,
5043
5119
  defaultToolProtocol: "native",
5120
+ includedTools: ["search_and_replace"],
5121
+ excludedTools: ["apply_diff"],
5044
5122
  preserveReasoning: true,
5045
5123
  inputPrice: 0.3,
5046
5124
  outputPrice: 1.2,
@@ -5055,12 +5133,30 @@ var minimaxModels = {
5055
5133
  supportsPromptCache: true,
5056
5134
  supportsNativeTools: true,
5057
5135
  defaultToolProtocol: "native",
5136
+ includedTools: ["search_and_replace"],
5137
+ excludedTools: ["apply_diff"],
5058
5138
  preserveReasoning: true,
5059
5139
  inputPrice: 0.3,
5060
5140
  outputPrice: 1.2,
5061
5141
  cacheWritesPrice: 0.375,
5062
5142
  cacheReadsPrice: 0.03,
5063
5143
  description: "MiniMax M2 Stable (High Concurrency, Commercial Use), a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed."
5144
+ },
5145
+ "MiniMax-M2.1": {
5146
+ maxTokens: 16384,
5147
+ contextWindow: 192e3,
5148
+ supportsImages: false,
5149
+ supportsPromptCache: true,
5150
+ supportsNativeTools: true,
5151
+ defaultToolProtocol: "native",
5152
+ includedTools: ["search_and_replace"],
5153
+ excludedTools: ["apply_diff"],
5154
+ preserveReasoning: true,
5155
+ inputPrice: 0.3,
5156
+ outputPrice: 1.2,
5157
+ cacheWritesPrice: 0.375,
5158
+ cacheReadsPrice: 0.03,
5159
+ description: "MiniMax M2.1 builds on M2 with improved overall performance for agentic coding tasks and significantly faster response times."
5064
5160
  }
5065
5161
  };
5066
5162
  var minimaxDefaultModelInfo = minimaxModels[minimaxDefaultModelId];
@@ -5273,7 +5369,9 @@ var vertexSchema = apiModelIdProviderModelSchema.extend({
5273
5369
  vertexProjectId: z8.string().optional(),
5274
5370
  vertexRegion: z8.string().optional(),
5275
5371
  enableUrlContext: z8.boolean().optional(),
5276
- enableGrounding: z8.boolean().optional()
5372
+ enableGrounding: z8.boolean().optional(),
5373
+ vertex1MContext: z8.boolean().optional()
5374
+ // Enable 'context-1m-2025-08-07' beta for 1M context window.
5277
5375
  });
5278
5376
  var openAiSchema = baseProviderSettingsSchema.extend({
5279
5377
  openAiBaseUrl: z8.string().optional(),
@@ -5690,6 +5788,16 @@ var historyItemSchema = z9.object({
5690
5788
  size: z9.number().optional(),
5691
5789
  workspace: z9.string().optional(),
5692
5790
  mode: z9.string().optional(),
5791
+ /**
5792
+ * The tool protocol used by this task. Once a task uses tools with a specific
5793
+ * protocol (XML or Native), it is permanently locked to that protocol.
5794
+ *
5795
+ * - "xml": Tool calls are parsed from XML text (no tool IDs)
5796
+ * - "native": Tool calls come as tool_call chunks with IDs
5797
+ *
5798
+ * This ensures task resumption works correctly even when NTC settings change.
5799
+ */
5800
+ toolProtocol: z9.enum(["xml", "native"]).optional(),
5693
5801
  status: z9.enum(["active", "completed", "delegated"]).optional(),
5694
5802
  delegatedToId: z9.string().optional(),
5695
5803
  // Last child this parent delegated to
@@ -5711,7 +5819,8 @@ var experimentIds = [
5711
5819
  "preventFocusDisruption",
5712
5820
  "imageGeneration",
5713
5821
  "runSlashCommand",
5714
- "multipleNativeToolCalls"
5822
+ "multipleNativeToolCalls",
5823
+ "customTools"
5715
5824
  ];
5716
5825
  var experimentIdsSchema = z10.enum(experimentIds);
5717
5826
  var experimentsSchema = z10.object({
@@ -5720,7 +5829,8 @@ var experimentsSchema = z10.object({
5720
5829
  preventFocusDisruption: z10.boolean().optional(),
5721
5830
  imageGeneration: z10.boolean().optional(),
5722
5831
  runSlashCommand: z10.boolean().optional(),
5723
- multipleNativeToolCalls: z10.boolean().optional()
5832
+ multipleNativeToolCalls: z10.boolean().optional(),
5833
+ customTools: z10.boolean().optional()
5724
5834
  });
5725
5835
 
5726
5836
  // src/telemetry.ts
@@ -5985,6 +6095,31 @@ function extractApiProviderErrorProperties(error) {
5985
6095
  ...error.errorCode !== void 0 && { errorCode: error.errorCode }
5986
6096
  };
5987
6097
  }
6098
+ var ConsecutiveMistakeError = class extends Error {
6099
+ constructor(message, taskId, consecutiveMistakeCount, consecutiveMistakeLimit, reason = "unknown", provider, modelId) {
6100
+ super(message);
6101
+ this.taskId = taskId;
6102
+ this.consecutiveMistakeCount = consecutiveMistakeCount;
6103
+ this.consecutiveMistakeLimit = consecutiveMistakeLimit;
6104
+ this.reason = reason;
6105
+ this.provider = provider;
6106
+ this.modelId = modelId;
6107
+ this.name = "ConsecutiveMistakeError";
6108
+ }
6109
+ };
6110
+ function isConsecutiveMistakeError(error) {
6111
+ return error instanceof Error && error.name === "ConsecutiveMistakeError" && "taskId" in error && "consecutiveMistakeCount" in error && "consecutiveMistakeLimit" in error;
6112
+ }
6113
+ function extractConsecutiveMistakeErrorProperties(error) {
6114
+ return {
6115
+ taskId: error.taskId,
6116
+ consecutiveMistakeCount: error.consecutiveMistakeCount,
6117
+ consecutiveMistakeLimit: error.consecutiveMistakeLimit,
6118
+ reason: error.reason,
6119
+ ...error.provider !== void 0 && { provider: error.provider },
6120
+ ...error.modelId !== void 0 && { modelId: error.modelId }
6121
+ };
6122
+ }
5988
6123
 
5989
6124
  // src/mode.ts
5990
6125
  import { z as z12 } from "zod";
@@ -6869,15 +7004,21 @@ var COOKIE_CONSENT_EVENTS = {
6869
7004
  CHANGED: "cookieConsentChanged"
6870
7005
  };
6871
7006
 
7007
+ // src/custom-tool.ts
7008
+ import { z as z17 } from "zod/v4";
7009
+ function defineCustomTool(definition) {
7010
+ return definition;
7011
+ }
7012
+
6872
7013
  // src/followup.ts
6873
- import { z as z17 } from "zod";
6874
- var suggestionItemSchema = z17.object({
6875
- answer: z17.string(),
6876
- mode: z17.string().optional()
7014
+ import { z as z18 } from "zod";
7015
+ var suggestionItemSchema = z18.object({
7016
+ answer: z18.string(),
7017
+ mode: z18.string().optional()
6877
7018
  });
6878
- var followUpDataSchema = z17.object({
6879
- question: z17.string().optional(),
6880
- suggest: z17.array(suggestionItemSchema).optional()
7019
+ var followUpDataSchema = z18.object({
7020
+ question: z18.string().optional(),
7021
+ suggest: z18.array(suggestionItemSchema).optional()
6881
7022
  });
6882
7023
 
6883
7024
  // src/image-generation.ts
@@ -6905,7 +7046,7 @@ function getImageGenerationProvider(explicitProvider, hasExistingModel) {
6905
7046
  }
6906
7047
 
6907
7048
  // src/ipc.ts
6908
- import { z as z18 } from "zod";
7049
+ import { z as z19 } from "zod";
6909
7050
  var IpcMessageType = /* @__PURE__ */ ((IpcMessageType2) => {
6910
7051
  IpcMessageType2["Connect"] = "Connect";
6911
7052
  IpcMessageType2["Disconnect"] = "Disconnect";
@@ -6919,10 +7060,10 @@ var IpcOrigin = /* @__PURE__ */ ((IpcOrigin2) => {
6919
7060
  IpcOrigin2["Server"] = "server";
6920
7061
  return IpcOrigin2;
6921
7062
  })(IpcOrigin || {});
6922
- var ackSchema = z18.object({
6923
- clientId: z18.string(),
6924
- pid: z18.number(),
6925
- ppid: z18.number()
7063
+ var ackSchema = z19.object({
7064
+ clientId: z19.string(),
7065
+ pid: z19.number(),
7066
+ ppid: z19.number()
6926
7067
  });
6927
7068
  var TaskCommandName = /* @__PURE__ */ ((TaskCommandName2) => {
6928
7069
  TaskCommandName2["StartNewTask"] = "StartNewTask";
@@ -6932,79 +7073,79 @@ var TaskCommandName = /* @__PURE__ */ ((TaskCommandName2) => {
6932
7073
  TaskCommandName2["SendMessage"] = "SendMessage";
6933
7074
  return TaskCommandName2;
6934
7075
  })(TaskCommandName || {});
6935
- var taskCommandSchema = z18.discriminatedUnion("commandName", [
6936
- z18.object({
6937
- commandName: z18.literal("StartNewTask" /* StartNewTask */),
6938
- data: z18.object({
7076
+ var taskCommandSchema = z19.discriminatedUnion("commandName", [
7077
+ z19.object({
7078
+ commandName: z19.literal("StartNewTask" /* StartNewTask */),
7079
+ data: z19.object({
6939
7080
  configuration: rooCodeSettingsSchema,
6940
- text: z18.string(),
6941
- images: z18.array(z18.string()).optional(),
6942
- newTab: z18.boolean().optional()
7081
+ text: z19.string(),
7082
+ images: z19.array(z19.string()).optional(),
7083
+ newTab: z19.boolean().optional()
6943
7084
  })
6944
7085
  }),
6945
- z18.object({
6946
- commandName: z18.literal("CancelTask" /* CancelTask */),
6947
- data: z18.string()
7086
+ z19.object({
7087
+ commandName: z19.literal("CancelTask" /* CancelTask */),
7088
+ data: z19.string()
6948
7089
  }),
6949
- z18.object({
6950
- commandName: z18.literal("CloseTask" /* CloseTask */),
6951
- data: z18.string()
7090
+ z19.object({
7091
+ commandName: z19.literal("CloseTask" /* CloseTask */),
7092
+ data: z19.string()
6952
7093
  }),
6953
- z18.object({
6954
- commandName: z18.literal("ResumeTask" /* ResumeTask */),
6955
- data: z18.string()
7094
+ z19.object({
7095
+ commandName: z19.literal("ResumeTask" /* ResumeTask */),
7096
+ data: z19.string()
6956
7097
  }),
6957
- z18.object({
6958
- commandName: z18.literal("SendMessage" /* SendMessage */),
6959
- data: z18.object({
6960
- text: z18.string().optional(),
6961
- images: z18.array(z18.string()).optional()
7098
+ z19.object({
7099
+ commandName: z19.literal("SendMessage" /* SendMessage */),
7100
+ data: z19.object({
7101
+ text: z19.string().optional(),
7102
+ images: z19.array(z19.string()).optional()
6962
7103
  })
6963
7104
  })
6964
7105
  ]);
6965
- var ipcMessageSchema = z18.discriminatedUnion("type", [
6966
- z18.object({
6967
- type: z18.literal("Ack" /* Ack */),
6968
- origin: z18.literal("server" /* Server */),
7106
+ var ipcMessageSchema = z19.discriminatedUnion("type", [
7107
+ z19.object({
7108
+ type: z19.literal("Ack" /* Ack */),
7109
+ origin: z19.literal("server" /* Server */),
6969
7110
  data: ackSchema
6970
7111
  }),
6971
- z18.object({
6972
- type: z18.literal("TaskCommand" /* TaskCommand */),
6973
- origin: z18.literal("client" /* Client */),
6974
- clientId: z18.string(),
7112
+ z19.object({
7113
+ type: z19.literal("TaskCommand" /* TaskCommand */),
7114
+ origin: z19.literal("client" /* Client */),
7115
+ clientId: z19.string(),
6975
7116
  data: taskCommandSchema
6976
7117
  }),
6977
- z18.object({
6978
- type: z18.literal("TaskEvent" /* TaskEvent */),
6979
- origin: z18.literal("server" /* Server */),
6980
- relayClientId: z18.string().optional(),
7118
+ z19.object({
7119
+ type: z19.literal("TaskEvent" /* TaskEvent */),
7120
+ origin: z19.literal("server" /* Server */),
7121
+ relayClientId: z19.string().optional(),
6981
7122
  data: taskEventSchema
6982
7123
  })
6983
7124
  ]);
6984
7125
 
6985
7126
  // src/mcp.ts
6986
- import { z as z19 } from "zod";
6987
- var mcpExecutionStatusSchema = z19.discriminatedUnion("status", [
6988
- z19.object({
6989
- executionId: z19.string(),
6990
- status: z19.literal("started"),
6991
- serverName: z19.string(),
6992
- toolName: z19.string()
7127
+ import { z as z20 } from "zod";
7128
+ var mcpExecutionStatusSchema = z20.discriminatedUnion("status", [
7129
+ z20.object({
7130
+ executionId: z20.string(),
7131
+ status: z20.literal("started"),
7132
+ serverName: z20.string(),
7133
+ toolName: z20.string()
6993
7134
  }),
6994
- z19.object({
6995
- executionId: z19.string(),
6996
- status: z19.literal("output"),
6997
- response: z19.string()
7135
+ z20.object({
7136
+ executionId: z20.string(),
7137
+ status: z20.literal("output"),
7138
+ response: z20.string()
6998
7139
  }),
6999
- z19.object({
7000
- executionId: z19.string(),
7001
- status: z19.literal("completed"),
7002
- response: z19.string().optional()
7140
+ z20.object({
7141
+ executionId: z20.string(),
7142
+ status: z20.literal("completed"),
7143
+ response: z20.string().optional()
7003
7144
  }),
7004
- z19.object({
7005
- executionId: z19.string(),
7006
- status: z19.literal("error"),
7007
- error: z19.string().optional()
7145
+ z20.object({
7146
+ executionId: z20.string(),
7147
+ status: z20.literal("error"),
7148
+ error: z20.string().optional()
7008
7149
  })
7009
7150
  ]);
7010
7151
 
@@ -7014,40 +7155,40 @@ function shouldUseSingleFileRead(modelId) {
7014
7155
  }
7015
7156
 
7016
7157
  // src/todo.ts
7017
- import { z as z20 } from "zod";
7018
- var todoStatusSchema = z20.enum(["pending", "in_progress", "completed"]);
7019
- var todoItemSchema = z20.object({
7020
- id: z20.string(),
7021
- content: z20.string(),
7158
+ import { z as z21 } from "zod";
7159
+ var todoStatusSchema = z21.enum(["pending", "in_progress", "completed"]);
7160
+ var todoItemSchema = z21.object({
7161
+ id: z21.string(),
7162
+ content: z21.string(),
7022
7163
  status: todoStatusSchema
7023
7164
  });
7024
7165
 
7025
7166
  // src/terminal.ts
7026
- import { z as z21 } from "zod";
7027
- var commandExecutionStatusSchema = z21.discriminatedUnion("status", [
7028
- z21.object({
7029
- executionId: z21.string(),
7030
- status: z21.literal("started"),
7031
- pid: z21.number().optional(),
7032
- command: z21.string()
7167
+ import { z as z22 } from "zod";
7168
+ var commandExecutionStatusSchema = z22.discriminatedUnion("status", [
7169
+ z22.object({
7170
+ executionId: z22.string(),
7171
+ status: z22.literal("started"),
7172
+ pid: z22.number().optional(),
7173
+ command: z22.string()
7033
7174
  }),
7034
- z21.object({
7035
- executionId: z21.string(),
7036
- status: z21.literal("output"),
7037
- output: z21.string()
7175
+ z22.object({
7176
+ executionId: z22.string(),
7177
+ status: z22.literal("output"),
7178
+ output: z22.string()
7038
7179
  }),
7039
- z21.object({
7040
- executionId: z21.string(),
7041
- status: z21.literal("exited"),
7042
- exitCode: z21.number().optional()
7180
+ z22.object({
7181
+ executionId: z22.string(),
7182
+ status: z22.literal("exited"),
7183
+ exitCode: z22.number().optional()
7043
7184
  }),
7044
- z21.object({
7045
- executionId: z21.string(),
7046
- status: z21.literal("fallback")
7185
+ z22.object({
7186
+ executionId: z22.string(),
7187
+ status: z22.literal("fallback")
7047
7188
  }),
7048
- z21.object({
7049
- executionId: z21.string(),
7050
- status: z21.literal("timeout")
7189
+ z22.object({
7190
+ executionId: z22.string(),
7191
+ status: z22.literal("timeout")
7051
7192
  })
7052
7193
  ]);
7053
7194
  export {
@@ -7068,6 +7209,7 @@ export {
7068
7209
  CONTEXT_MANAGEMENT_EVENTS,
7069
7210
  COOKIE_CONSENT_EVENTS,
7070
7211
  ConnectionState,
7212
+ ConsecutiveMistakeError,
7071
7213
  DEEP_SEEK_DEFAULT_TEMPERATURE,
7072
7214
  DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
7073
7215
  DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
@@ -7108,6 +7250,7 @@ export {
7108
7250
  MISTRAL_DEFAULT_TEMPERATURE,
7109
7251
  MODELS_BY_PROVIDER,
7110
7252
  MOONSHOT_DEFAULT_TEMPERATURE,
7253
+ NATIVE_TOOL_DEFAULTS,
7111
7254
  OPENAI_AZURE_AI_INFERENCE_PATH,
7112
7255
  OPENAI_NATIVE_DEFAULT_TEMPERATURE,
7113
7256
  OPENROUTER_DEFAULT_PROVIDER_NAME,
@@ -7133,6 +7276,7 @@ export {
7133
7276
  VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
7134
7277
  VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
7135
7278
  VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
7279
+ VERTEX_1M_CONTEXT_MODEL_IDS,
7136
7280
  VERTEX_REGIONS,
7137
7281
  ZAI_DEFAULT_TEMPERATURE,
7138
7282
  ackSchema,
@@ -7175,6 +7319,7 @@ export {
7175
7319
  deepInfraDefaultModelInfo,
7176
7320
  deepSeekDefaultModelId,
7177
7321
  deepSeekModels,
7322
+ defineCustomTool,
7178
7323
  discriminatedProviderSettingsWithIdSchema,
7179
7324
  doubaoDefaultModelId,
7180
7325
  doubaoDefaultModelInfo,
@@ -7188,6 +7333,7 @@ export {
7188
7333
  extensionBridgeEventSchema,
7189
7334
  extensionInstanceSchema,
7190
7335
  extractApiProviderErrorProperties,
7336
+ extractConsecutiveMistakeErrorProperties,
7191
7337
  extractMessageFromJsonPayload,
7192
7338
  fauxProviders,
7193
7339
  featherlessDefaultModelId,
@@ -7222,6 +7368,7 @@ export {
7222
7368
  ioIntelligenceModels,
7223
7369
  ipcMessageSchema,
7224
7370
  isApiProviderError,
7371
+ isConsecutiveMistakeError,
7225
7372
  isContextManagementEvent,
7226
7373
  isCustomProvider,
7227
7374
  isDynamicProvider,
@@ -7282,6 +7429,7 @@ export {
7282
7429
  organizationDefaultSettingsSchema,
7283
7430
  organizationFeaturesSchema,
7284
7431
  organizationSettingsSchema,
7432
+ z17 as parametersSchema,
7285
7433
  promptComponentSchema,
7286
7434
  providerNames,
7287
7435
  providerNamesSchema,