@roo-code/types 1.94.0 → 1.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -826,6 +826,7 @@ var basetenModels = {
826
826
  contextWindow: 163840,
827
827
  supportsImages: false,
828
828
  supportsPromptCache: false,
829
+ supportsNativeTools: true,
829
830
  inputPrice: 2.55,
830
831
  outputPrice: 5.95,
831
832
  cacheWritesPrice: 0,
@@ -837,6 +838,7 @@ var basetenModels = {
837
838
  contextWindow: 163840,
838
839
  supportsImages: false,
839
840
  supportsPromptCache: false,
841
+ supportsNativeTools: true,
840
842
  inputPrice: 2.55,
841
843
  outputPrice: 5.95,
842
844
  cacheWritesPrice: 0,
@@ -848,6 +850,7 @@ var basetenModels = {
848
850
  contextWindow: 163840,
849
851
  supportsImages: false,
850
852
  supportsPromptCache: false,
853
+ supportsNativeTools: true,
851
854
  inputPrice: 0.77,
852
855
  outputPrice: 0.77,
853
856
  cacheWritesPrice: 0,
@@ -859,6 +862,7 @@ var basetenModels = {
859
862
  contextWindow: 163840,
860
863
  supportsImages: false,
861
864
  supportsPromptCache: false,
865
+ supportsNativeTools: true,
862
866
  inputPrice: 0.5,
863
867
  outputPrice: 1.5,
864
868
  cacheWritesPrice: 0,
@@ -877,11 +881,24 @@ var basetenModels = {
877
881
  cacheReadsPrice: 0,
878
882
  description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
879
883
  },
884
+ "openai/gpt-oss-120b": {
885
+ maxTokens: 16384,
886
+ contextWindow: 128072,
887
+ supportsImages: false,
888
+ supportsPromptCache: false,
889
+ supportsNativeTools: true,
890
+ inputPrice: 0.1,
891
+ outputPrice: 0.5,
892
+ cacheWritesPrice: 0,
893
+ cacheReadsPrice: 0,
894
+ description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
895
+ },
880
896
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
881
897
  maxTokens: 16384,
882
898
  contextWindow: 262144,
883
899
  supportsImages: false,
884
900
  supportsPromptCache: false,
901
+ supportsNativeTools: true,
885
902
  inputPrice: 0.22,
886
903
  outputPrice: 0.8,
887
904
  cacheWritesPrice: 0,
@@ -893,24 +910,13 @@ var basetenModels = {
893
910
  contextWindow: 262144,
894
911
  supportsImages: false,
895
912
  supportsPromptCache: false,
913
+ supportsNativeTools: true,
896
914
  inputPrice: 0.38,
897
915
  outputPrice: 1.53,
898
916
  cacheWritesPrice: 0,
899
917
  cacheReadsPrice: 0,
900
918
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
901
919
  },
902
- "openai/gpt-oss-120b": {
903
- maxTokens: 16384,
904
- contextWindow: 128072,
905
- supportsImages: false,
906
- supportsPromptCache: false,
907
- supportsNativeTools: true,
908
- inputPrice: 0.1,
909
- outputPrice: 0.5,
910
- cacheWritesPrice: 0,
911
- cacheReadsPrice: 0,
912
- description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
913
- },
914
920
  "moonshotai/Kimi-K2-Instruct-0905": {
915
921
  maxTokens: 16384,
916
922
  contextWindow: 262e3,
@@ -1382,22 +1388,6 @@ var bedrockModels = {
1382
1388
  outputPrice: 0.6,
1383
1389
  description: "Amazon Titan Text Express"
1384
1390
  },
1385
- "amazon.titan-text-embeddings-v1:0": {
1386
- maxTokens: 8192,
1387
- contextWindow: 8e3,
1388
- supportsImages: false,
1389
- supportsPromptCache: false,
1390
- inputPrice: 0.1,
1391
- description: "Amazon Titan Text Embeddings"
1392
- },
1393
- "amazon.titan-text-embeddings-v2:0": {
1394
- maxTokens: 8192,
1395
- contextWindow: 8e3,
1396
- supportsImages: false,
1397
- supportsPromptCache: false,
1398
- inputPrice: 0.02,
1399
- description: "Amazon Titan Text Embeddings V2"
1400
- },
1401
1391
  "moonshot.kimi-k2-thinking": {
1402
1392
  maxTokens: 32e3,
1403
1393
  contextWindow: 262144,
@@ -2217,6 +2207,7 @@ var featherlessModels = {
2217
2207
  contextWindow: 32678,
2218
2208
  supportsImages: false,
2219
2209
  supportsPromptCache: false,
2210
+ supportsNativeTools: true,
2220
2211
  inputPrice: 0,
2221
2212
  outputPrice: 0,
2222
2213
  description: "DeepSeek V3 0324 model."
@@ -2226,6 +2217,7 @@ var featherlessModels = {
2226
2217
  contextWindow: 32678,
2227
2218
  supportsImages: false,
2228
2219
  supportsPromptCache: false,
2220
+ supportsNativeTools: true,
2229
2221
  inputPrice: 0,
2230
2222
  outputPrice: 0,
2231
2223
  description: "DeepSeek R1 0528 model."
@@ -2245,6 +2237,7 @@ var featherlessModels = {
2245
2237
  contextWindow: 32678,
2246
2238
  supportsImages: false,
2247
2239
  supportsPromptCache: false,
2240
+ supportsNativeTools: true,
2248
2241
  inputPrice: 0,
2249
2242
  outputPrice: 0,
2250
2243
  description: "GPT-OSS 120B model."
@@ -2260,7 +2253,7 @@ var featherlessModels = {
2260
2253
  description: "Qwen3 Coder 480B A35B Instruct model."
2261
2254
  }
2262
2255
  };
2263
- var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
2256
+ var featherlessDefaultModelId = "moonshotai/Kimi-K2-Instruct";
2264
2257
 
2265
2258
  // src/providers/fireworks.ts
2266
2259
  var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
@@ -2705,33 +2698,6 @@ var groqModels = {
2705
2698
  outputPrice: 0.34,
2706
2699
  description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
2707
2700
  },
2708
- "meta-llama/llama-4-maverick-17b-128e-instruct": {
2709
- maxTokens: 8192,
2710
- contextWindow: 131072,
2711
- supportsImages: false,
2712
- supportsPromptCache: false,
2713
- inputPrice: 0.2,
2714
- outputPrice: 0.6,
2715
- description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
2716
- },
2717
- "mistral-saba-24b": {
2718
- maxTokens: 8192,
2719
- contextWindow: 32768,
2720
- supportsImages: false,
2721
- supportsPromptCache: false,
2722
- inputPrice: 0.79,
2723
- outputPrice: 0.79,
2724
- description: "Mistral Saba 24B model, 32K context."
2725
- },
2726
- "qwen-qwq-32b": {
2727
- maxTokens: 8192,
2728
- contextWindow: 131072,
2729
- supportsImages: false,
2730
- supportsPromptCache: false,
2731
- inputPrice: 0.29,
2732
- outputPrice: 0.39,
2733
- description: "Alibaba Qwen QwQ 32B model, 128K context."
2734
- },
2735
2701
  "qwen/qwen3-32b": {
2736
2702
  maxTokens: 8192,
2737
2703
  contextWindow: 131072,
@@ -2743,26 +2709,6 @@ var groqModels = {
2743
2709
  outputPrice: 0.59,
2744
2710
  description: "Alibaba Qwen 3 32B model, 128K context."
2745
2711
  },
2746
- "deepseek-r1-distill-llama-70b": {
2747
- maxTokens: 8192,
2748
- contextWindow: 131072,
2749
- supportsImages: false,
2750
- supportsPromptCache: false,
2751
- inputPrice: 0.75,
2752
- outputPrice: 0.99,
2753
- description: "DeepSeek R1 Distill Llama 70B model, 128K context."
2754
- },
2755
- "moonshotai/kimi-k2-instruct": {
2756
- maxTokens: 16384,
2757
- contextWindow: 131072,
2758
- supportsImages: false,
2759
- supportsPromptCache: true,
2760
- inputPrice: 1,
2761
- outputPrice: 3,
2762
- cacheReadsPrice: 0.5,
2763
- // 50% discount for cached input tokens
2764
- description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2765
- },
2766
2712
  "moonshotai/kimi-k2-instruct-0905": {
2767
2713
  maxTokens: 16384,
2768
2714
  contextWindow: 262144,
@@ -3837,15 +3783,6 @@ var sambaNovaModels = {
3837
3783
  outputPrice: 4.5,
3838
3784
  description: "DeepSeek V3.1 model with 32K context window."
3839
3785
  },
3840
- "DeepSeek-R1-Distill-Llama-70B": {
3841
- maxTokens: 8192,
3842
- contextWindow: 131072,
3843
- supportsImages: false,
3844
- supportsPromptCache: false,
3845
- inputPrice: 0.7,
3846
- outputPrice: 1.4,
3847
- description: "DeepSeek R1 distilled Llama 70B model with 128K context window."
3848
- },
3849
3786
  "Llama-4-Maverick-17B-128E-Instruct": {
3850
3787
  maxTokens: 8192,
3851
3788
  contextWindow: 131072,
@@ -3857,15 +3794,6 @@ var sambaNovaModels = {
3857
3794
  outputPrice: 1.8,
3858
3795
  description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
3859
3796
  },
3860
- "Llama-3.3-Swallow-70B-Instruct-v0.4": {
3861
- maxTokens: 8192,
3862
- contextWindow: 16384,
3863
- supportsImages: false,
3864
- supportsPromptCache: false,
3865
- inputPrice: 0.6,
3866
- outputPrice: 1.2,
3867
- description: "Tokyotech Llama 3.3 Swallow 70B Instruct v0.4 model with 16K context window."
3868
- },
3869
3797
  "Qwen3-32B": {
3870
3798
  maxTokens: 8192,
3871
3799
  contextWindow: 8192,
@@ -4398,6 +4326,7 @@ var vertexModels = {
4398
4326
  contextWindow: 131072,
4399
4327
  supportsImages: false,
4400
4328
  supportsPromptCache: false,
4329
+ supportsNativeTools: true,
4401
4330
  inputPrice: 0.35,
4402
4331
  outputPrice: 1.15,
4403
4332
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
@@ -4407,6 +4336,7 @@ var vertexModels = {
4407
4336
  contextWindow: 163840,
4408
4337
  supportsImages: false,
4409
4338
  supportsPromptCache: false,
4339
+ supportsNativeTools: true,
4410
4340
  inputPrice: 1.35,
4411
4341
  outputPrice: 5.4,
4412
4342
  description: "DeepSeek R1 (0528). Available in us-central1"
@@ -4416,6 +4346,7 @@ var vertexModels = {
4416
4346
  contextWindow: 163840,
4417
4347
  supportsImages: false,
4418
4348
  supportsPromptCache: false,
4349
+ supportsNativeTools: true,
4419
4350
  inputPrice: 0.6,
4420
4351
  outputPrice: 1.7,
4421
4352
  description: "DeepSeek V3.1. Available in us-west2"
@@ -4425,6 +4356,7 @@ var vertexModels = {
4425
4356
  contextWindow: 131072,
4426
4357
  supportsImages: false,
4427
4358
  supportsPromptCache: false,
4359
+ supportsNativeTools: true,
4428
4360
  inputPrice: 0.15,
4429
4361
  outputPrice: 0.6,
4430
4362
  description: "OpenAI gpt-oss 120B. Available in us-central1"
@@ -4434,6 +4366,7 @@ var vertexModels = {
4434
4366
  contextWindow: 131072,
4435
4367
  supportsImages: false,
4436
4368
  supportsPromptCache: false,
4369
+ supportsNativeTools: true,
4437
4370
  inputPrice: 0.075,
4438
4371
  outputPrice: 0.3,
4439
4372
  description: "OpenAI gpt-oss 20B. Available in us-central1"
@@ -4443,6 +4376,7 @@ var vertexModels = {
4443
4376
  contextWindow: 262144,
4444
4377
  supportsImages: false,
4445
4378
  supportsPromptCache: false,
4379
+ supportsNativeTools: true,
4446
4380
  inputPrice: 1,
4447
4381
  outputPrice: 4,
4448
4382
  description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
@@ -4452,6 +4386,7 @@ var vertexModels = {
4452
4386
  contextWindow: 262144,
4453
4387
  supportsImages: false,
4454
4388
  supportsPromptCache: false,
4389
+ supportsNativeTools: true,
4455
4390
  inputPrice: 0.25,
4456
4391
  outputPrice: 1,
4457
4392
  description: "Qwen3 235B A22B Instruct. Available in us-south1"
@@ -4892,6 +4827,7 @@ var vercelAiGatewayDefaultModelInfo = {
4892
4827
  contextWindow: 2e5,
4893
4828
  supportsImages: true,
4894
4829
  supportsPromptCache: true,
4830
+ supportsNativeTools: true,
4895
4831
  inputPrice: 3,
4896
4832
  outputPrice: 15,
4897
4833
  cacheWritesPrice: 3.75,
@@ -4904,7 +4840,7 @@ var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
4904
4840
  var internationalZAiDefaultModelId = "glm-4.6";
4905
4841
  var internationalZAiModels = {
4906
4842
  "glm-4.5": {
4907
- maxTokens: 98304,
4843
+ maxTokens: 16384,
4908
4844
  contextWindow: 131072,
4909
4845
  supportsImages: false,
4910
4846
  supportsPromptCache: true,
@@ -4917,7 +4853,7 @@ var internationalZAiModels = {
4917
4853
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
4918
4854
  },
4919
4855
  "glm-4.5-air": {
4920
- maxTokens: 98304,
4856
+ maxTokens: 16384,
4921
4857
  contextWindow: 131072,
4922
4858
  supportsImages: false,
4923
4859
  supportsPromptCache: true,
@@ -4930,7 +4866,7 @@ var internationalZAiModels = {
4930
4866
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
4931
4867
  },
4932
4868
  "glm-4.5-x": {
4933
- maxTokens: 98304,
4869
+ maxTokens: 16384,
4934
4870
  contextWindow: 131072,
4935
4871
  supportsImages: false,
4936
4872
  supportsPromptCache: true,
@@ -4943,7 +4879,7 @@ var internationalZAiModels = {
4943
4879
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
4944
4880
  },
4945
4881
  "glm-4.5-airx": {
4946
- maxTokens: 98304,
4882
+ maxTokens: 16384,
4947
4883
  contextWindow: 131072,
4948
4884
  supportsImages: false,
4949
4885
  supportsPromptCache: true,
@@ -4956,7 +4892,7 @@ var internationalZAiModels = {
4956
4892
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
4957
4893
  },
4958
4894
  "glm-4.5-flash": {
4959
- maxTokens: 98304,
4895
+ maxTokens: 16384,
4960
4896
  contextWindow: 131072,
4961
4897
  supportsImages: false,
4962
4898
  supportsPromptCache: true,
@@ -4982,7 +4918,7 @@ var internationalZAiModels = {
4982
4918
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
4983
4919
  },
4984
4920
  "glm-4.6": {
4985
- maxTokens: 98304,
4921
+ maxTokens: 16384,
4986
4922
  contextWindow: 2e5,
4987
4923
  supportsImages: false,
4988
4924
  supportsPromptCache: true,
@@ -4994,8 +4930,24 @@ var internationalZAiModels = {
4994
4930
  cacheReadsPrice: 0.11,
4995
4931
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
4996
4932
  },
4933
+ "glm-4.7": {
4934
+ maxTokens: 16384,
4935
+ contextWindow: 2e5,
4936
+ supportsImages: false,
4937
+ supportsPromptCache: true,
4938
+ supportsNativeTools: true,
4939
+ defaultToolProtocol: "native",
4940
+ supportsReasoningEffort: ["disable", "medium"],
4941
+ reasoningEffort: "medium",
4942
+ preserveReasoning: true,
4943
+ inputPrice: 0.6,
4944
+ outputPrice: 2.2,
4945
+ cacheWritesPrice: 0,
4946
+ cacheReadsPrice: 0.11,
4947
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
4948
+ },
4997
4949
  "glm-4-32b-0414-128k": {
4998
- maxTokens: 98304,
4950
+ maxTokens: 16384,
4999
4951
  contextWindow: 131072,
5000
4952
  supportsImages: false,
5001
4953
  supportsPromptCache: false,
@@ -5011,7 +4963,7 @@ var internationalZAiModels = {
5011
4963
  var mainlandZAiDefaultModelId = "glm-4.6";
5012
4964
  var mainlandZAiModels = {
5013
4965
  "glm-4.5": {
5014
- maxTokens: 98304,
4966
+ maxTokens: 16384,
5015
4967
  contextWindow: 131072,
5016
4968
  supportsImages: false,
5017
4969
  supportsPromptCache: true,
@@ -5024,7 +4976,7 @@ var mainlandZAiModels = {
5024
4976
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
5025
4977
  },
5026
4978
  "glm-4.5-air": {
5027
- maxTokens: 98304,
4979
+ maxTokens: 16384,
5028
4980
  contextWindow: 131072,
5029
4981
  supportsImages: false,
5030
4982
  supportsPromptCache: true,
@@ -5037,7 +4989,7 @@ var mainlandZAiModels = {
5037
4989
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
5038
4990
  },
5039
4991
  "glm-4.5-x": {
5040
- maxTokens: 98304,
4992
+ maxTokens: 16384,
5041
4993
  contextWindow: 131072,
5042
4994
  supportsImages: false,
5043
4995
  supportsPromptCache: true,
@@ -5050,7 +5002,7 @@ var mainlandZAiModels = {
5050
5002
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
5051
5003
  },
5052
5004
  "glm-4.5-airx": {
5053
- maxTokens: 98304,
5005
+ maxTokens: 16384,
5054
5006
  contextWindow: 131072,
5055
5007
  supportsImages: false,
5056
5008
  supportsPromptCache: true,
@@ -5063,7 +5015,7 @@ var mainlandZAiModels = {
5063
5015
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
5064
5016
  },
5065
5017
  "glm-4.5-flash": {
5066
- maxTokens: 98304,
5018
+ maxTokens: 16384,
5067
5019
  contextWindow: 131072,
5068
5020
  supportsImages: false,
5069
5021
  supportsPromptCache: true,
@@ -5089,7 +5041,7 @@ var mainlandZAiModels = {
5089
5041
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
5090
5042
  },
5091
5043
  "glm-4.6": {
5092
- maxTokens: 98304,
5044
+ maxTokens: 16384,
5093
5045
  contextWindow: 204800,
5094
5046
  supportsImages: false,
5095
5047
  supportsPromptCache: true,
@@ -5100,6 +5052,22 @@ var mainlandZAiModels = {
5100
5052
  cacheWritesPrice: 0,
5101
5053
  cacheReadsPrice: 0.057,
5102
5054
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
5055
+ },
5056
+ "glm-4.7": {
5057
+ maxTokens: 16384,
5058
+ contextWindow: 204800,
5059
+ supportsImages: false,
5060
+ supportsPromptCache: true,
5061
+ supportsNativeTools: true,
5062
+ defaultToolProtocol: "native",
5063
+ supportsReasoningEffort: ["disable", "medium"],
5064
+ reasoningEffort: "medium",
5065
+ preserveReasoning: true,
5066
+ inputPrice: 0.29,
5067
+ outputPrice: 1.14,
5068
+ cacheWritesPrice: 0,
5069
+ cacheReadsPrice: 0.057,
5070
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
5103
5071
  }
5104
5072
  };
5105
5073
  var ZAI_DEFAULT_TEMPERATURE = 0.6;
@@ -5149,6 +5117,8 @@ var minimaxModels = {
5149
5117
  supportsPromptCache: true,
5150
5118
  supportsNativeTools: true,
5151
5119
  defaultToolProtocol: "native",
5120
+ includedTools: ["search_and_replace"],
5121
+ excludedTools: ["apply_diff"],
5152
5122
  preserveReasoning: true,
5153
5123
  inputPrice: 0.3,
5154
5124
  outputPrice: 1.2,
@@ -5163,12 +5133,30 @@ var minimaxModels = {
5163
5133
  supportsPromptCache: true,
5164
5134
  supportsNativeTools: true,
5165
5135
  defaultToolProtocol: "native",
5136
+ includedTools: ["search_and_replace"],
5137
+ excludedTools: ["apply_diff"],
5166
5138
  preserveReasoning: true,
5167
5139
  inputPrice: 0.3,
5168
5140
  outputPrice: 1.2,
5169
5141
  cacheWritesPrice: 0.375,
5170
5142
  cacheReadsPrice: 0.03,
5171
5143
  description: "MiniMax M2 Stable (High Concurrency, Commercial Use), a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed."
5144
+ },
5145
+ "MiniMax-M2.1": {
5146
+ maxTokens: 16384,
5147
+ contextWindow: 192e3,
5148
+ supportsImages: false,
5149
+ supportsPromptCache: true,
5150
+ supportsNativeTools: true,
5151
+ defaultToolProtocol: "native",
5152
+ includedTools: ["search_and_replace"],
5153
+ excludedTools: ["apply_diff"],
5154
+ preserveReasoning: true,
5155
+ inputPrice: 0.3,
5156
+ outputPrice: 1.2,
5157
+ cacheWritesPrice: 0.375,
5158
+ cacheReadsPrice: 0.03,
5159
+ description: "MiniMax M2.1 builds on M2 with improved overall performance for agentic coding tasks and significantly faster response times."
5172
5160
  }
5173
5161
  };
5174
5162
  var minimaxDefaultModelInfo = minimaxModels[minimaxDefaultModelId];
@@ -5831,7 +5819,8 @@ var experimentIds = [
5831
5819
  "preventFocusDisruption",
5832
5820
  "imageGeneration",
5833
5821
  "runSlashCommand",
5834
- "multipleNativeToolCalls"
5822
+ "multipleNativeToolCalls",
5823
+ "customTools"
5835
5824
  ];
5836
5825
  var experimentIdsSchema = z10.enum(experimentIds);
5837
5826
  var experimentsSchema = z10.object({
@@ -5840,7 +5829,8 @@ var experimentsSchema = z10.object({
5840
5829
  preventFocusDisruption: z10.boolean().optional(),
5841
5830
  imageGeneration: z10.boolean().optional(),
5842
5831
  runSlashCommand: z10.boolean().optional(),
5843
- multipleNativeToolCalls: z10.boolean().optional()
5832
+ multipleNativeToolCalls: z10.boolean().optional(),
5833
+ customTools: z10.boolean().optional()
5844
5834
  });
5845
5835
 
5846
5836
  // src/telemetry.ts