@roo-code/types 1.93.0 → 1.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -37,6 +37,7 @@ __export(index_exports, {
37
37
  CONTEXT_MANAGEMENT_EVENTS: () => CONTEXT_MANAGEMENT_EVENTS,
38
38
  COOKIE_CONSENT_EVENTS: () => COOKIE_CONSENT_EVENTS,
39
39
  ConnectionState: () => ConnectionState,
40
+ ConsecutiveMistakeError: () => ConsecutiveMistakeError,
40
41
  DEEP_SEEK_DEFAULT_TEMPERATURE: () => DEEP_SEEK_DEFAULT_TEMPERATURE,
41
42
  DEFAULT_CHECKPOINT_TIMEOUT_SECONDS: () => DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
42
43
  DEFAULT_CONSECUTIVE_MISTAKE_LIMIT: () => DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
@@ -77,6 +78,7 @@ __export(index_exports, {
77
78
  MISTRAL_DEFAULT_TEMPERATURE: () => MISTRAL_DEFAULT_TEMPERATURE,
78
79
  MODELS_BY_PROVIDER: () => MODELS_BY_PROVIDER,
79
80
  MOONSHOT_DEFAULT_TEMPERATURE: () => MOONSHOT_DEFAULT_TEMPERATURE,
81
+ NATIVE_TOOL_DEFAULTS: () => NATIVE_TOOL_DEFAULTS,
80
82
  OPENAI_AZURE_AI_INFERENCE_PATH: () => OPENAI_AZURE_AI_INFERENCE_PATH,
81
83
  OPENAI_NATIVE_DEFAULT_TEMPERATURE: () => OPENAI_NATIVE_DEFAULT_TEMPERATURE,
82
84
  OPENROUTER_DEFAULT_PROVIDER_NAME: () => OPENROUTER_DEFAULT_PROVIDER_NAME,
@@ -102,6 +104,7 @@ __export(index_exports, {
102
104
  VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS: () => VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
103
105
  VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS: () => VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
104
106
  VERCEL_AI_GATEWAY_VISION_ONLY_MODELS: () => VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
107
+ VERTEX_1M_CONTEXT_MODEL_IDS: () => VERTEX_1M_CONTEXT_MODEL_IDS,
105
108
  VERTEX_REGIONS: () => VERTEX_REGIONS,
106
109
  ZAI_DEFAULT_TEMPERATURE: () => ZAI_DEFAULT_TEMPERATURE,
107
110
  ackSchema: () => ackSchema,
@@ -144,6 +147,7 @@ __export(index_exports, {
144
147
  deepInfraDefaultModelInfo: () => deepInfraDefaultModelInfo,
145
148
  deepSeekDefaultModelId: () => deepSeekDefaultModelId,
146
149
  deepSeekModels: () => deepSeekModels,
150
+ defineCustomTool: () => defineCustomTool,
147
151
  discriminatedProviderSettingsWithIdSchema: () => discriminatedProviderSettingsWithIdSchema,
148
152
  doubaoDefaultModelId: () => doubaoDefaultModelId,
149
153
  doubaoDefaultModelInfo: () => doubaoDefaultModelInfo,
@@ -157,6 +161,7 @@ __export(index_exports, {
157
161
  extensionBridgeEventSchema: () => extensionBridgeEventSchema,
158
162
  extensionInstanceSchema: () => extensionInstanceSchema,
159
163
  extractApiProviderErrorProperties: () => extractApiProviderErrorProperties,
164
+ extractConsecutiveMistakeErrorProperties: () => extractConsecutiveMistakeErrorProperties,
160
165
  extractMessageFromJsonPayload: () => extractMessageFromJsonPayload,
161
166
  fauxProviders: () => fauxProviders,
162
167
  featherlessDefaultModelId: () => featherlessDefaultModelId,
@@ -191,6 +196,7 @@ __export(index_exports, {
191
196
  ioIntelligenceModels: () => ioIntelligenceModels,
192
197
  ipcMessageSchema: () => ipcMessageSchema,
193
198
  isApiProviderError: () => isApiProviderError,
199
+ isConsecutiveMistakeError: () => isConsecutiveMistakeError,
194
200
  isContextManagementEvent: () => isContextManagementEvent,
195
201
  isCustomProvider: () => isCustomProvider,
196
202
  isDynamicProvider: () => isDynamicProvider,
@@ -251,6 +257,7 @@ __export(index_exports, {
251
257
  organizationDefaultSettingsSchema: () => organizationDefaultSettingsSchema,
252
258
  organizationFeaturesSchema: () => organizationFeaturesSchema,
253
259
  organizationSettingsSchema: () => organizationSettingsSchema,
260
+ parametersSchema: () => import_v4.z,
254
261
  promptComponentSchema: () => promptComponentSchema,
255
262
  providerNames: () => providerNames,
256
263
  providerNamesSchema: () => providerNamesSchema,
@@ -476,6 +483,7 @@ var toolNames = [
476
483
  "apply_diff",
477
484
  "search_and_replace",
478
485
  "search_replace",
486
+ "edit_file",
479
487
  "apply_patch",
480
488
  "search_files",
481
489
  "list_files",
@@ -504,6 +512,10 @@ var TOOL_PROTOCOL = {
504
512
  XML: "xml",
505
513
  NATIVE: "native"
506
514
  };
515
+ var NATIVE_TOOL_DEFAULTS = {
516
+ supportsNativeTools: true,
517
+ defaultToolProtocol: TOOL_PROTOCOL.NATIVE
518
+ };
507
519
  function isNativeProtocol(protocol) {
508
520
  return protocol === TOOL_PROTOCOL.NATIVE;
509
521
  }
@@ -1148,6 +1160,7 @@ var basetenModels = {
1148
1160
  contextWindow: 163840,
1149
1161
  supportsImages: false,
1150
1162
  supportsPromptCache: false,
1163
+ supportsNativeTools: true,
1151
1164
  inputPrice: 2.55,
1152
1165
  outputPrice: 5.95,
1153
1166
  cacheWritesPrice: 0,
@@ -1159,6 +1172,7 @@ var basetenModels = {
1159
1172
  contextWindow: 163840,
1160
1173
  supportsImages: false,
1161
1174
  supportsPromptCache: false,
1175
+ supportsNativeTools: true,
1162
1176
  inputPrice: 2.55,
1163
1177
  outputPrice: 5.95,
1164
1178
  cacheWritesPrice: 0,
@@ -1170,6 +1184,7 @@ var basetenModels = {
1170
1184
  contextWindow: 163840,
1171
1185
  supportsImages: false,
1172
1186
  supportsPromptCache: false,
1187
+ supportsNativeTools: true,
1173
1188
  inputPrice: 0.77,
1174
1189
  outputPrice: 0.77,
1175
1190
  cacheWritesPrice: 0,
@@ -1181,6 +1196,7 @@ var basetenModels = {
1181
1196
  contextWindow: 163840,
1182
1197
  supportsImages: false,
1183
1198
  supportsPromptCache: false,
1199
+ supportsNativeTools: true,
1184
1200
  inputPrice: 0.5,
1185
1201
  outputPrice: 1.5,
1186
1202
  cacheWritesPrice: 0,
@@ -1199,11 +1215,24 @@ var basetenModels = {
1199
1215
  cacheReadsPrice: 0,
1200
1216
  description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
1201
1217
  },
1218
+ "openai/gpt-oss-120b": {
1219
+ maxTokens: 16384,
1220
+ contextWindow: 128072,
1221
+ supportsImages: false,
1222
+ supportsPromptCache: false,
1223
+ supportsNativeTools: true,
1224
+ inputPrice: 0.1,
1225
+ outputPrice: 0.5,
1226
+ cacheWritesPrice: 0,
1227
+ cacheReadsPrice: 0,
1228
+ description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
1229
+ },
1202
1230
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
1203
1231
  maxTokens: 16384,
1204
1232
  contextWindow: 262144,
1205
1233
  supportsImages: false,
1206
1234
  supportsPromptCache: false,
1235
+ supportsNativeTools: true,
1207
1236
  inputPrice: 0.22,
1208
1237
  outputPrice: 0.8,
1209
1238
  cacheWritesPrice: 0,
@@ -1215,24 +1244,13 @@ var basetenModels = {
1215
1244
  contextWindow: 262144,
1216
1245
  supportsImages: false,
1217
1246
  supportsPromptCache: false,
1247
+ supportsNativeTools: true,
1218
1248
  inputPrice: 0.38,
1219
1249
  outputPrice: 1.53,
1220
1250
  cacheWritesPrice: 0,
1221
1251
  cacheReadsPrice: 0,
1222
1252
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
1223
1253
  },
1224
- "openai/gpt-oss-120b": {
1225
- maxTokens: 16384,
1226
- contextWindow: 128072,
1227
- supportsImages: false,
1228
- supportsPromptCache: false,
1229
- supportsNativeTools: true,
1230
- inputPrice: 0.1,
1231
- outputPrice: 0.5,
1232
- cacheWritesPrice: 0,
1233
- cacheReadsPrice: 0,
1234
- description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
1235
- },
1236
1254
  "moonshotai/Kimi-K2-Instruct-0905": {
1237
1255
  maxTokens: 16384,
1238
1256
  contextWindow: 262e3,
@@ -1704,22 +1722,6 @@ var bedrockModels = {
1704
1722
  outputPrice: 0.6,
1705
1723
  description: "Amazon Titan Text Express"
1706
1724
  },
1707
- "amazon.titan-text-embeddings-v1:0": {
1708
- maxTokens: 8192,
1709
- contextWindow: 8e3,
1710
- supportsImages: false,
1711
- supportsPromptCache: false,
1712
- inputPrice: 0.1,
1713
- description: "Amazon Titan Text Embeddings"
1714
- },
1715
- "amazon.titan-text-embeddings-v2:0": {
1716
- maxTokens: 8192,
1717
- contextWindow: 8e3,
1718
- supportsImages: false,
1719
- supportsPromptCache: false,
1720
- inputPrice: 0.02,
1721
- description: "Amazon Titan Text Embeddings V2"
1722
- },
1723
1725
  "moonshot.kimi-k2-thinking": {
1724
1726
  maxTokens: 32e3,
1725
1727
  contextWindow: 262144,
@@ -2539,6 +2541,7 @@ var featherlessModels = {
2539
2541
  contextWindow: 32678,
2540
2542
  supportsImages: false,
2541
2543
  supportsPromptCache: false,
2544
+ supportsNativeTools: true,
2542
2545
  inputPrice: 0,
2543
2546
  outputPrice: 0,
2544
2547
  description: "DeepSeek V3 0324 model."
@@ -2548,6 +2551,7 @@ var featherlessModels = {
2548
2551
  contextWindow: 32678,
2549
2552
  supportsImages: false,
2550
2553
  supportsPromptCache: false,
2554
+ supportsNativeTools: true,
2551
2555
  inputPrice: 0,
2552
2556
  outputPrice: 0,
2553
2557
  description: "DeepSeek R1 0528 model."
@@ -2567,6 +2571,7 @@ var featherlessModels = {
2567
2571
  contextWindow: 32678,
2568
2572
  supportsImages: false,
2569
2573
  supportsPromptCache: false,
2574
+ supportsNativeTools: true,
2570
2575
  inputPrice: 0,
2571
2576
  outputPrice: 0,
2572
2577
  description: "GPT-OSS 120B model."
@@ -2582,7 +2587,7 @@ var featherlessModels = {
2582
2587
  description: "Qwen3 Coder 480B A35B Instruct model."
2583
2588
  }
2584
2589
  };
2585
- var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
2590
+ var featherlessDefaultModelId = "moonshotai/Kimi-K2-Instruct";
2586
2591
 
2587
2592
  // src/providers/fireworks.ts
2588
2593
  var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
@@ -2746,6 +2751,7 @@ var geminiModels = {
2746
2751
  supportsReasoningEffort: ["low", "high"],
2747
2752
  reasoningEffort: "low",
2748
2753
  includedTools: ["write_file", "edit_file"],
2754
+ excludedTools: ["apply_diff"],
2749
2755
  supportsTemperature: true,
2750
2756
  defaultTemperature: 1,
2751
2757
  inputPrice: 4,
@@ -2773,6 +2779,7 @@ var geminiModels = {
2773
2779
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
2774
2780
  reasoningEffort: "medium",
2775
2781
  includedTools: ["write_file", "edit_file"],
2782
+ excludedTools: ["apply_diff"],
2776
2783
  supportsTemperature: true,
2777
2784
  defaultTemperature: 1,
2778
2785
  inputPrice: 0.3,
@@ -2789,6 +2796,7 @@ var geminiModels = {
2789
2796
  defaultToolProtocol: "native",
2790
2797
  supportsPromptCache: true,
2791
2798
  includedTools: ["write_file", "edit_file"],
2799
+ excludedTools: ["apply_diff"],
2792
2800
  inputPrice: 2.5,
2793
2801
  // This is the pricing for prompts above 200k tokens.
2794
2802
  outputPrice: 15,
@@ -2820,6 +2828,7 @@ var geminiModels = {
2820
2828
  defaultToolProtocol: "native",
2821
2829
  supportsPromptCache: true,
2822
2830
  includedTools: ["write_file", "edit_file"],
2831
+ excludedTools: ["apply_diff"],
2823
2832
  inputPrice: 2.5,
2824
2833
  // This is the pricing for prompts above 200k tokens.
2825
2834
  outputPrice: 15,
@@ -2850,6 +2859,7 @@ var geminiModels = {
2850
2859
  defaultToolProtocol: "native",
2851
2860
  supportsPromptCache: true,
2852
2861
  includedTools: ["write_file", "edit_file"],
2862
+ excludedTools: ["apply_diff"],
2853
2863
  inputPrice: 2.5,
2854
2864
  // This is the pricing for prompts above 200k tokens.
2855
2865
  outputPrice: 15,
@@ -2878,6 +2888,7 @@ var geminiModels = {
2878
2888
  defaultToolProtocol: "native",
2879
2889
  supportsPromptCache: true,
2880
2890
  includedTools: ["write_file", "edit_file"],
2891
+ excludedTools: ["apply_diff"],
2881
2892
  inputPrice: 2.5,
2882
2893
  // This is the pricing for prompts above 200k tokens.
2883
2894
  outputPrice: 15,
@@ -2909,6 +2920,7 @@ var geminiModels = {
2909
2920
  defaultToolProtocol: "native",
2910
2921
  supportsPromptCache: true,
2911
2922
  includedTools: ["write_file", "edit_file"],
2923
+ excludedTools: ["apply_diff"],
2912
2924
  inputPrice: 0.3,
2913
2925
  outputPrice: 2.5,
2914
2926
  cacheReadsPrice: 0.075,
@@ -2924,6 +2936,7 @@ var geminiModels = {
2924
2936
  defaultToolProtocol: "native",
2925
2937
  supportsPromptCache: true,
2926
2938
  includedTools: ["write_file", "edit_file"],
2939
+ excludedTools: ["apply_diff"],
2927
2940
  inputPrice: 0.3,
2928
2941
  outputPrice: 2.5,
2929
2942
  cacheReadsPrice: 0.075,
@@ -2939,6 +2952,7 @@ var geminiModels = {
2939
2952
  defaultToolProtocol: "native",
2940
2953
  supportsPromptCache: true,
2941
2954
  includedTools: ["write_file", "edit_file"],
2955
+ excludedTools: ["apply_diff"],
2942
2956
  inputPrice: 0.3,
2943
2957
  outputPrice: 2.5,
2944
2958
  cacheReadsPrice: 0.075,
@@ -2955,6 +2969,7 @@ var geminiModels = {
2955
2969
  defaultToolProtocol: "native",
2956
2970
  supportsPromptCache: true,
2957
2971
  includedTools: ["write_file", "edit_file"],
2972
+ excludedTools: ["apply_diff"],
2958
2973
  inputPrice: 0.1,
2959
2974
  outputPrice: 0.4,
2960
2975
  cacheReadsPrice: 0.025,
@@ -2970,6 +2985,7 @@ var geminiModels = {
2970
2985
  defaultToolProtocol: "native",
2971
2986
  supportsPromptCache: true,
2972
2987
  includedTools: ["write_file", "edit_file"],
2988
+ excludedTools: ["apply_diff"],
2973
2989
  inputPrice: 0.1,
2974
2990
  outputPrice: 0.4,
2975
2991
  cacheReadsPrice: 0.025,
@@ -3016,33 +3032,6 @@ var groqModels = {
3016
3032
  outputPrice: 0.34,
3017
3033
  description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
3018
3034
  },
3019
- "meta-llama/llama-4-maverick-17b-128e-instruct": {
3020
- maxTokens: 8192,
3021
- contextWindow: 131072,
3022
- supportsImages: false,
3023
- supportsPromptCache: false,
3024
- inputPrice: 0.2,
3025
- outputPrice: 0.6,
3026
- description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
3027
- },
3028
- "mistral-saba-24b": {
3029
- maxTokens: 8192,
3030
- contextWindow: 32768,
3031
- supportsImages: false,
3032
- supportsPromptCache: false,
3033
- inputPrice: 0.79,
3034
- outputPrice: 0.79,
3035
- description: "Mistral Saba 24B model, 32K context."
3036
- },
3037
- "qwen-qwq-32b": {
3038
- maxTokens: 8192,
3039
- contextWindow: 131072,
3040
- supportsImages: false,
3041
- supportsPromptCache: false,
3042
- inputPrice: 0.29,
3043
- outputPrice: 0.39,
3044
- description: "Alibaba Qwen QwQ 32B model, 128K context."
3045
- },
3046
3035
  "qwen/qwen3-32b": {
3047
3036
  maxTokens: 8192,
3048
3037
  contextWindow: 131072,
@@ -3054,26 +3043,6 @@ var groqModels = {
3054
3043
  outputPrice: 0.59,
3055
3044
  description: "Alibaba Qwen 3 32B model, 128K context."
3056
3045
  },
3057
- "deepseek-r1-distill-llama-70b": {
3058
- maxTokens: 8192,
3059
- contextWindow: 131072,
3060
- supportsImages: false,
3061
- supportsPromptCache: false,
3062
- inputPrice: 0.75,
3063
- outputPrice: 0.99,
3064
- description: "DeepSeek R1 Distill Llama 70B model, 128K context."
3065
- },
3066
- "moonshotai/kimi-k2-instruct": {
3067
- maxTokens: 16384,
3068
- contextWindow: 131072,
3069
- supportsImages: false,
3070
- supportsPromptCache: true,
3071
- inputPrice: 1,
3072
- outputPrice: 3,
3073
- cacheReadsPrice: 0.5,
3074
- // 50% discount for cached input tokens
3075
- description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
3076
- },
3077
3046
  "moonshotai/kimi-k2-instruct-0905": {
3078
3047
  maxTokens: 16384,
3079
3048
  contextWindow: 262144,
@@ -3182,6 +3151,8 @@ var lMStudioDefaultModelInfo = {
3182
3151
  contextWindow: 2e5,
3183
3152
  supportsImages: true,
3184
3153
  supportsPromptCache: true,
3154
+ supportsNativeTools: true,
3155
+ defaultToolProtocol: "native",
3185
3156
  inputPrice: 0,
3186
3157
  outputPrice: 0,
3187
3158
  cacheWritesPrice: 0,
@@ -4008,6 +3979,8 @@ var qwenCodeModels = {
4008
3979
  contextWindow: 1e6,
4009
3980
  supportsImages: false,
4010
3981
  supportsPromptCache: false,
3982
+ supportsNativeTools: true,
3983
+ defaultToolProtocol: "native",
4011
3984
  inputPrice: 0,
4012
3985
  outputPrice: 0,
4013
3986
  cacheWritesPrice: 0,
@@ -4019,6 +3992,8 @@ var qwenCodeModels = {
4019
3992
  contextWindow: 1e6,
4020
3993
  supportsImages: false,
4021
3994
  supportsPromptCache: false,
3995
+ supportsNativeTools: true,
3996
+ defaultToolProtocol: "native",
4022
3997
  inputPrice: 0,
4023
3998
  outputPrice: 0,
4024
3999
  cacheWritesPrice: 0,
@@ -4142,15 +4117,6 @@ var sambaNovaModels = {
4142
4117
  outputPrice: 4.5,
4143
4118
  description: "DeepSeek V3.1 model with 32K context window."
4144
4119
  },
4145
- "DeepSeek-R1-Distill-Llama-70B": {
4146
- maxTokens: 8192,
4147
- contextWindow: 131072,
4148
- supportsImages: false,
4149
- supportsPromptCache: false,
4150
- inputPrice: 0.7,
4151
- outputPrice: 1.4,
4152
- description: "DeepSeek R1 distilled Llama 70B model with 128K context window."
4153
- },
4154
4120
  "Llama-4-Maverick-17B-128E-Instruct": {
4155
4121
  maxTokens: 8192,
4156
4122
  contextWindow: 131072,
@@ -4162,15 +4128,6 @@ var sambaNovaModels = {
4162
4128
  outputPrice: 1.8,
4163
4129
  description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
4164
4130
  },
4165
- "Llama-3.3-Swallow-70B-Instruct-v0.4": {
4166
- maxTokens: 8192,
4167
- contextWindow: 16384,
4168
- supportsImages: false,
4169
- supportsPromptCache: false,
4170
- inputPrice: 0.6,
4171
- outputPrice: 1.2,
4172
- description: "Tokyotech Llama 3.3 Swallow 70B Instruct v0.4 model with 16K context window."
4173
- },
4174
4131
  "Qwen3-32B": {
4175
4132
  maxTokens: 8192,
4176
4133
  contextWindow: 8192,
@@ -4222,6 +4179,7 @@ var vertexModels = {
4222
4179
  supportsReasoningEffort: ["low", "high"],
4223
4180
  reasoningEffort: "low",
4224
4181
  includedTools: ["write_file", "edit_file"],
4182
+ excludedTools: ["apply_diff"],
4225
4183
  supportsTemperature: true,
4226
4184
  defaultTemperature: 1,
4227
4185
  inputPrice: 4,
@@ -4249,6 +4207,7 @@ var vertexModels = {
4249
4207
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
4250
4208
  reasoningEffort: "medium",
4251
4209
  includedTools: ["write_file", "edit_file"],
4210
+ excludedTools: ["apply_diff"],
4252
4211
  supportsTemperature: true,
4253
4212
  defaultTemperature: 1,
4254
4213
  inputPrice: 0.3,
@@ -4264,6 +4223,7 @@ var vertexModels = {
4264
4223
  defaultToolProtocol: "native",
4265
4224
  supportsPromptCache: true,
4266
4225
  includedTools: ["write_file", "edit_file"],
4226
+ excludedTools: ["apply_diff"],
4267
4227
  inputPrice: 0.15,
4268
4228
  outputPrice: 3.5,
4269
4229
  maxThinkingTokens: 24576,
@@ -4278,6 +4238,7 @@ var vertexModels = {
4278
4238
  defaultToolProtocol: "native",
4279
4239
  supportsPromptCache: true,
4280
4240
  includedTools: ["write_file", "edit_file"],
4241
+ excludedTools: ["apply_diff"],
4281
4242
  inputPrice: 0.15,
4282
4243
  outputPrice: 0.6
4283
4244
  },
@@ -4289,6 +4250,7 @@ var vertexModels = {
4289
4250
  defaultToolProtocol: "native",
4290
4251
  supportsPromptCache: true,
4291
4252
  includedTools: ["write_file", "edit_file"],
4253
+ excludedTools: ["apply_diff"],
4292
4254
  inputPrice: 0.3,
4293
4255
  outputPrice: 2.5,
4294
4256
  cacheReadsPrice: 0.075,
@@ -4304,6 +4266,7 @@ var vertexModels = {
4304
4266
  defaultToolProtocol: "native",
4305
4267
  supportsPromptCache: false,
4306
4268
  includedTools: ["write_file", "edit_file"],
4269
+ excludedTools: ["apply_diff"],
4307
4270
  inputPrice: 0.15,
4308
4271
  outputPrice: 3.5,
4309
4272
  maxThinkingTokens: 24576,
@@ -4318,6 +4281,7 @@ var vertexModels = {
4318
4281
  defaultToolProtocol: "native",
4319
4282
  supportsPromptCache: false,
4320
4283
  includedTools: ["write_file", "edit_file"],
4284
+ excludedTools: ["apply_diff"],
4321
4285
  inputPrice: 0.15,
4322
4286
  outputPrice: 0.6
4323
4287
  },
@@ -4329,6 +4293,7 @@ var vertexModels = {
4329
4293
  defaultToolProtocol: "native",
4330
4294
  supportsPromptCache: true,
4331
4295
  includedTools: ["write_file", "edit_file"],
4296
+ excludedTools: ["apply_diff"],
4332
4297
  inputPrice: 2.5,
4333
4298
  outputPrice: 15
4334
4299
  },
@@ -4340,6 +4305,7 @@ var vertexModels = {
4340
4305
  defaultToolProtocol: "native",
4341
4306
  supportsPromptCache: true,
4342
4307
  includedTools: ["write_file", "edit_file"],
4308
+ excludedTools: ["apply_diff"],
4343
4309
  inputPrice: 2.5,
4344
4310
  outputPrice: 15
4345
4311
  },
@@ -4351,6 +4317,7 @@ var vertexModels = {
4351
4317
  defaultToolProtocol: "native",
4352
4318
  supportsPromptCache: true,
4353
4319
  includedTools: ["write_file", "edit_file"],
4320
+ excludedTools: ["apply_diff"],
4354
4321
  inputPrice: 2.5,
4355
4322
  outputPrice: 15,
4356
4323
  maxThinkingTokens: 32768,
@@ -4364,6 +4331,7 @@ var vertexModels = {
4364
4331
  defaultToolProtocol: "native",
4365
4332
  supportsPromptCache: true,
4366
4333
  includedTools: ["write_file", "edit_file"],
4334
+ excludedTools: ["apply_diff"],
4367
4335
  inputPrice: 2.5,
4368
4336
  outputPrice: 15,
4369
4337
  maxThinkingTokens: 32768,
@@ -4392,6 +4360,7 @@ var vertexModels = {
4392
4360
  defaultToolProtocol: "native",
4393
4361
  supportsPromptCache: false,
4394
4362
  includedTools: ["write_file", "edit_file"],
4363
+ excludedTools: ["apply_diff"],
4395
4364
  inputPrice: 0,
4396
4365
  outputPrice: 0
4397
4366
  },
@@ -4403,6 +4372,7 @@ var vertexModels = {
4403
4372
  defaultToolProtocol: "native",
4404
4373
  supportsPromptCache: false,
4405
4374
  includedTools: ["write_file", "edit_file"],
4375
+ excludedTools: ["apply_diff"],
4406
4376
  inputPrice: 0,
4407
4377
  outputPrice: 0
4408
4378
  },
@@ -4414,6 +4384,7 @@ var vertexModels = {
4414
4384
  defaultToolProtocol: "native",
4415
4385
  supportsPromptCache: true,
4416
4386
  includedTools: ["write_file", "edit_file"],
4387
+ excludedTools: ["apply_diff"],
4417
4388
  inputPrice: 0.15,
4418
4389
  outputPrice: 0.6
4419
4390
  },
@@ -4425,6 +4396,7 @@ var vertexModels = {
4425
4396
  defaultToolProtocol: "native",
4426
4397
  supportsPromptCache: false,
4427
4398
  includedTools: ["write_file", "edit_file"],
4399
+ excludedTools: ["apply_diff"],
4428
4400
  inputPrice: 0.075,
4429
4401
  outputPrice: 0.3
4430
4402
  },
@@ -4436,6 +4408,7 @@ var vertexModels = {
4436
4408
  defaultToolProtocol: "native",
4437
4409
  supportsPromptCache: false,
4438
4410
  includedTools: ["write_file", "edit_file"],
4411
+ excludedTools: ["apply_diff"],
4439
4412
  inputPrice: 0,
4440
4413
  outputPrice: 0
4441
4414
  },
@@ -4447,6 +4420,7 @@ var vertexModels = {
4447
4420
  defaultToolProtocol: "native",
4448
4421
  supportsPromptCache: true,
4449
4422
  includedTools: ["write_file", "edit_file"],
4423
+ excludedTools: ["apply_diff"],
4450
4424
  inputPrice: 0.075,
4451
4425
  outputPrice: 0.3
4452
4426
  },
@@ -4458,36 +4432,83 @@ var vertexModels = {
4458
4432
  defaultToolProtocol: "native",
4459
4433
  supportsPromptCache: false,
4460
4434
  includedTools: ["write_file", "edit_file"],
4435
+ excludedTools: ["apply_diff"],
4461
4436
  inputPrice: 1.25,
4462
4437
  outputPrice: 5
4463
4438
  },
4464
4439
  "claude-sonnet-4@20250514": {
4465
4440
  maxTokens: 8192,
4466
4441
  contextWindow: 2e5,
4442
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
4467
4443
  supportsImages: true,
4468
4444
  supportsPromptCache: true,
4445
+ supportsNativeTools: true,
4446
+ defaultToolProtocol: "native",
4469
4447
  inputPrice: 3,
4448
+ // $3 per million input tokens (≤200K context)
4470
4449
  outputPrice: 15,
4450
+ // $15 per million output tokens (≤200K context)
4471
4451
  cacheWritesPrice: 3.75,
4452
+ // $3.75 per million tokens
4472
4453
  cacheReadsPrice: 0.3,
4473
- supportsReasoningBudget: true
4454
+ // $0.30 per million tokens
4455
+ supportsReasoningBudget: true,
4456
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
4457
+ tiers: [
4458
+ {
4459
+ contextWindow: 1e6,
4460
+ // 1M tokens with beta flag
4461
+ inputPrice: 6,
4462
+ // $6 per million input tokens (>200K context)
4463
+ outputPrice: 22.5,
4464
+ // $22.50 per million output tokens (>200K context)
4465
+ cacheWritesPrice: 7.5,
4466
+ // $7.50 per million tokens (>200K context)
4467
+ cacheReadsPrice: 0.6
4468
+ // $0.60 per million tokens (>200K context)
4469
+ }
4470
+ ]
4474
4471
  },
4475
4472
  "claude-sonnet-4-5@20250929": {
4476
4473
  maxTokens: 8192,
4477
4474
  contextWindow: 2e5,
4475
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
4478
4476
  supportsImages: true,
4479
4477
  supportsPromptCache: true,
4478
+ supportsNativeTools: true,
4479
+ defaultToolProtocol: "native",
4480
4480
  inputPrice: 3,
4481
+ // $3 per million input tokens (≤200K context)
4481
4482
  outputPrice: 15,
4483
+ // $15 per million output tokens (≤200K context)
4482
4484
  cacheWritesPrice: 3.75,
4485
+ // $3.75 per million tokens
4483
4486
  cacheReadsPrice: 0.3,
4484
- supportsReasoningBudget: true
4487
+ // $0.30 per million tokens
4488
+ supportsReasoningBudget: true,
4489
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
4490
+ tiers: [
4491
+ {
4492
+ contextWindow: 1e6,
4493
+ // 1M tokens with beta flag
4494
+ inputPrice: 6,
4495
+ // $6 per million input tokens (>200K context)
4496
+ outputPrice: 22.5,
4497
+ // $22.50 per million output tokens (>200K context)
4498
+ cacheWritesPrice: 7.5,
4499
+ // $7.50 per million tokens (>200K context)
4500
+ cacheReadsPrice: 0.6
4501
+ // $0.60 per million tokens (>200K context)
4502
+ }
4503
+ ]
4485
4504
  },
4486
4505
  "claude-haiku-4-5@20251001": {
4487
4506
  maxTokens: 8192,
4488
4507
  contextWindow: 2e5,
4489
4508
  supportsImages: true,
4490
4509
  supportsPromptCache: true,
4510
+ supportsNativeTools: true,
4511
+ defaultToolProtocol: "native",
4491
4512
  inputPrice: 1,
4492
4513
  outputPrice: 5,
4493
4514
  cacheWritesPrice: 1.25,
@@ -4499,6 +4520,8 @@ var vertexModels = {
4499
4520
  contextWindow: 2e5,
4500
4521
  supportsImages: true,
4501
4522
  supportsPromptCache: true,
4523
+ supportsNativeTools: true,
4524
+ defaultToolProtocol: "native",
4502
4525
  inputPrice: 5,
4503
4526
  outputPrice: 25,
4504
4527
  cacheWritesPrice: 6.25,
@@ -4510,6 +4533,8 @@ var vertexModels = {
4510
4533
  contextWindow: 2e5,
4511
4534
  supportsImages: true,
4512
4535
  supportsPromptCache: true,
4536
+ supportsNativeTools: true,
4537
+ defaultToolProtocol: "native",
4513
4538
  inputPrice: 15,
4514
4539
  outputPrice: 75,
4515
4540
  cacheWritesPrice: 18.75,
@@ -4521,6 +4546,8 @@ var vertexModels = {
4521
4546
  contextWindow: 2e5,
4522
4547
  supportsImages: true,
4523
4548
  supportsPromptCache: true,
4549
+ supportsNativeTools: true,
4550
+ defaultToolProtocol: "native",
4524
4551
  inputPrice: 15,
4525
4552
  outputPrice: 75,
4526
4553
  cacheWritesPrice: 18.75,
@@ -4531,6 +4558,8 @@ var vertexModels = {
4531
4558
  contextWindow: 2e5,
4532
4559
  supportsImages: true,
4533
4560
  supportsPromptCache: true,
4561
+ supportsNativeTools: true,
4562
+ defaultToolProtocol: "native",
4534
4563
  inputPrice: 3,
4535
4564
  outputPrice: 15,
4536
4565
  cacheWritesPrice: 3.75,
@@ -4543,6 +4572,8 @@ var vertexModels = {
4543
4572
  contextWindow: 2e5,
4544
4573
  supportsImages: true,
4545
4574
  supportsPromptCache: true,
4575
+ supportsNativeTools: true,
4576
+ defaultToolProtocol: "native",
4546
4577
  inputPrice: 3,
4547
4578
  outputPrice: 15,
4548
4579
  cacheWritesPrice: 3.75,
@@ -4553,6 +4584,8 @@ var vertexModels = {
4553
4584
  contextWindow: 2e5,
4554
4585
  supportsImages: true,
4555
4586
  supportsPromptCache: true,
4587
+ supportsNativeTools: true,
4588
+ defaultToolProtocol: "native",
4556
4589
  inputPrice: 3,
4557
4590
  outputPrice: 15,
4558
4591
  cacheWritesPrice: 3.75,
@@ -4563,6 +4596,8 @@ var vertexModels = {
4563
4596
  contextWindow: 2e5,
4564
4597
  supportsImages: true,
4565
4598
  supportsPromptCache: true,
4599
+ supportsNativeTools: true,
4600
+ defaultToolProtocol: "native",
4566
4601
  inputPrice: 3,
4567
4602
  outputPrice: 15,
4568
4603
  cacheWritesPrice: 3.75,
@@ -4573,6 +4608,8 @@ var vertexModels = {
4573
4608
  contextWindow: 2e5,
4574
4609
  supportsImages: false,
4575
4610
  supportsPromptCache: true,
4611
+ supportsNativeTools: true,
4612
+ defaultToolProtocol: "native",
4576
4613
  inputPrice: 1,
4577
4614
  outputPrice: 5,
4578
4615
  cacheWritesPrice: 1.25,
@@ -4583,6 +4620,8 @@ var vertexModels = {
4583
4620
  contextWindow: 2e5,
4584
4621
  supportsImages: true,
4585
4622
  supportsPromptCache: true,
4623
+ supportsNativeTools: true,
4624
+ defaultToolProtocol: "native",
4586
4625
  inputPrice: 15,
4587
4626
  outputPrice: 75,
4588
4627
  cacheWritesPrice: 18.75,
@@ -4593,6 +4632,8 @@ var vertexModels = {
4593
4632
  contextWindow: 2e5,
4594
4633
  supportsImages: true,
4595
4634
  supportsPromptCache: true,
4635
+ supportsNativeTools: true,
4636
+ defaultToolProtocol: "native",
4596
4637
  inputPrice: 0.25,
4597
4638
  outputPrice: 1.25,
4598
4639
  cacheWritesPrice: 0.3,
@@ -4606,6 +4647,7 @@ var vertexModels = {
4606
4647
  defaultToolProtocol: "native",
4607
4648
  supportsPromptCache: true,
4608
4649
  includedTools: ["write_file", "edit_file"],
4650
+ excludedTools: ["apply_diff"],
4609
4651
  inputPrice: 0.1,
4610
4652
  outputPrice: 0.4,
4611
4653
  cacheReadsPrice: 0.025,
@@ -4618,6 +4660,7 @@ var vertexModels = {
4618
4660
  contextWindow: 131072,
4619
4661
  supportsImages: false,
4620
4662
  supportsPromptCache: false,
4663
+ supportsNativeTools: true,
4621
4664
  inputPrice: 0.35,
4622
4665
  outputPrice: 1.15,
4623
4666
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
@@ -4627,6 +4670,7 @@ var vertexModels = {
4627
4670
  contextWindow: 163840,
4628
4671
  supportsImages: false,
4629
4672
  supportsPromptCache: false,
4673
+ supportsNativeTools: true,
4630
4674
  inputPrice: 1.35,
4631
4675
  outputPrice: 5.4,
4632
4676
  description: "DeepSeek R1 (0528). Available in us-central1"
@@ -4636,6 +4680,7 @@ var vertexModels = {
4636
4680
  contextWindow: 163840,
4637
4681
  supportsImages: false,
4638
4682
  supportsPromptCache: false,
4683
+ supportsNativeTools: true,
4639
4684
  inputPrice: 0.6,
4640
4685
  outputPrice: 1.7,
4641
4686
  description: "DeepSeek V3.1. Available in us-west2"
@@ -4645,6 +4690,7 @@ var vertexModels = {
4645
4690
  contextWindow: 131072,
4646
4691
  supportsImages: false,
4647
4692
  supportsPromptCache: false,
4693
+ supportsNativeTools: true,
4648
4694
  inputPrice: 0.15,
4649
4695
  outputPrice: 0.6,
4650
4696
  description: "OpenAI gpt-oss 120B. Available in us-central1"
@@ -4654,6 +4700,7 @@ var vertexModels = {
4654
4700
  contextWindow: 131072,
4655
4701
  supportsImages: false,
4656
4702
  supportsPromptCache: false,
4703
+ supportsNativeTools: true,
4657
4704
  inputPrice: 0.075,
4658
4705
  outputPrice: 0.3,
4659
4706
  description: "OpenAI gpt-oss 20B. Available in us-central1"
@@ -4663,6 +4710,7 @@ var vertexModels = {
4663
4710
  contextWindow: 262144,
4664
4711
  supportsImages: false,
4665
4712
  supportsPromptCache: false,
4713
+ supportsNativeTools: true,
4666
4714
  inputPrice: 1,
4667
4715
  outputPrice: 4,
4668
4716
  description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
@@ -4672,11 +4720,13 @@ var vertexModels = {
4672
4720
  contextWindow: 262144,
4673
4721
  supportsImages: false,
4674
4722
  supportsPromptCache: false,
4723
+ supportsNativeTools: true,
4675
4724
  inputPrice: 0.25,
4676
4725
  outputPrice: 1,
4677
4726
  description: "Qwen3 235B A22B Instruct. Available in us-south1"
4678
4727
  }
4679
4728
  };
4729
+ var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
4680
4730
  var VERTEX_REGIONS = [
4681
4731
  { value: "global", label: "global" },
4682
4732
  { value: "us-central1", label: "us-central1" },
@@ -5111,6 +5161,7 @@ var vercelAiGatewayDefaultModelInfo = {
5111
5161
  contextWindow: 2e5,
5112
5162
  supportsImages: true,
5113
5163
  supportsPromptCache: true,
5164
+ supportsNativeTools: true,
5114
5165
  inputPrice: 3,
5115
5166
  outputPrice: 15,
5116
5167
  cacheWritesPrice: 3.75,
@@ -5123,7 +5174,7 @@ var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
5123
5174
  var internationalZAiDefaultModelId = "glm-4.6";
5124
5175
  var internationalZAiModels = {
5125
5176
  "glm-4.5": {
5126
- maxTokens: 98304,
5177
+ maxTokens: 16384,
5127
5178
  contextWindow: 131072,
5128
5179
  supportsImages: false,
5129
5180
  supportsPromptCache: true,
@@ -5136,7 +5187,7 @@ var internationalZAiModels = {
5136
5187
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
5137
5188
  },
5138
5189
  "glm-4.5-air": {
5139
- maxTokens: 98304,
5190
+ maxTokens: 16384,
5140
5191
  contextWindow: 131072,
5141
5192
  supportsImages: false,
5142
5193
  supportsPromptCache: true,
@@ -5149,7 +5200,7 @@ var internationalZAiModels = {
5149
5200
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
5150
5201
  },
5151
5202
  "glm-4.5-x": {
5152
- maxTokens: 98304,
5203
+ maxTokens: 16384,
5153
5204
  contextWindow: 131072,
5154
5205
  supportsImages: false,
5155
5206
  supportsPromptCache: true,
@@ -5162,7 +5213,7 @@ var internationalZAiModels = {
5162
5213
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
5163
5214
  },
5164
5215
  "glm-4.5-airx": {
5165
- maxTokens: 98304,
5216
+ maxTokens: 16384,
5166
5217
  contextWindow: 131072,
5167
5218
  supportsImages: false,
5168
5219
  supportsPromptCache: true,
@@ -5175,7 +5226,7 @@ var internationalZAiModels = {
5175
5226
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
5176
5227
  },
5177
5228
  "glm-4.5-flash": {
5178
- maxTokens: 98304,
5229
+ maxTokens: 16384,
5179
5230
  contextWindow: 131072,
5180
5231
  supportsImages: false,
5181
5232
  supportsPromptCache: true,
@@ -5201,7 +5252,7 @@ var internationalZAiModels = {
5201
5252
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
5202
5253
  },
5203
5254
  "glm-4.6": {
5204
- maxTokens: 98304,
5255
+ maxTokens: 16384,
5205
5256
  contextWindow: 2e5,
5206
5257
  supportsImages: false,
5207
5258
  supportsPromptCache: true,
@@ -5213,8 +5264,24 @@ var internationalZAiModels = {
5213
5264
  cacheReadsPrice: 0.11,
5214
5265
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
5215
5266
  },
5267
+ "glm-4.7": {
5268
+ maxTokens: 16384,
5269
+ contextWindow: 2e5,
5270
+ supportsImages: false,
5271
+ supportsPromptCache: true,
5272
+ supportsNativeTools: true,
5273
+ defaultToolProtocol: "native",
5274
+ supportsReasoningEffort: ["disable", "medium"],
5275
+ reasoningEffort: "medium",
5276
+ preserveReasoning: true,
5277
+ inputPrice: 0.6,
5278
+ outputPrice: 2.2,
5279
+ cacheWritesPrice: 0,
5280
+ cacheReadsPrice: 0.11,
5281
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
5282
+ },
5216
5283
  "glm-4-32b-0414-128k": {
5217
- maxTokens: 98304,
5284
+ maxTokens: 16384,
5218
5285
  contextWindow: 131072,
5219
5286
  supportsImages: false,
5220
5287
  supportsPromptCache: false,
@@ -5230,7 +5297,7 @@ var internationalZAiModels = {
5230
5297
  var mainlandZAiDefaultModelId = "glm-4.6";
5231
5298
  var mainlandZAiModels = {
5232
5299
  "glm-4.5": {
5233
- maxTokens: 98304,
5300
+ maxTokens: 16384,
5234
5301
  contextWindow: 131072,
5235
5302
  supportsImages: false,
5236
5303
  supportsPromptCache: true,
@@ -5243,7 +5310,7 @@ var mainlandZAiModels = {
5243
5310
  description: "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k."
5244
5311
  },
5245
5312
  "glm-4.5-air": {
5246
- maxTokens: 98304,
5313
+ maxTokens: 16384,
5247
5314
  contextWindow: 131072,
5248
5315
  supportsImages: false,
5249
5316
  supportsPromptCache: true,
@@ -5256,7 +5323,7 @@ var mainlandZAiModels = {
5256
5323
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
5257
5324
  },
5258
5325
  "glm-4.5-x": {
5259
- maxTokens: 98304,
5326
+ maxTokens: 16384,
5260
5327
  contextWindow: 131072,
5261
5328
  supportsImages: false,
5262
5329
  supportsPromptCache: true,
@@ -5269,7 +5336,7 @@ var mainlandZAiModels = {
5269
5336
  description: "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses."
5270
5337
  },
5271
5338
  "glm-4.5-airx": {
5272
- maxTokens: 98304,
5339
+ maxTokens: 16384,
5273
5340
  contextWindow: 131072,
5274
5341
  supportsImages: false,
5275
5342
  supportsPromptCache: true,
@@ -5282,7 +5349,7 @@ var mainlandZAiModels = {
5282
5349
  description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost."
5283
5350
  },
5284
5351
  "glm-4.5-flash": {
5285
- maxTokens: 98304,
5352
+ maxTokens: 16384,
5286
5353
  contextWindow: 131072,
5287
5354
  supportsImages: false,
5288
5355
  supportsPromptCache: true,
@@ -5308,7 +5375,7 @@ var mainlandZAiModels = {
5308
5375
  description: "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding."
5309
5376
  },
5310
5377
  "glm-4.6": {
5311
- maxTokens: 98304,
5378
+ maxTokens: 16384,
5312
5379
  contextWindow: 204800,
5313
5380
  supportsImages: false,
5314
5381
  supportsPromptCache: true,
@@ -5319,6 +5386,22 @@ var mainlandZAiModels = {
5319
5386
  cacheWritesPrice: 0,
5320
5387
  cacheReadsPrice: 0.057,
5321
5388
  description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
5389
+ },
5390
+ "glm-4.7": {
5391
+ maxTokens: 16384,
5392
+ contextWindow: 204800,
5393
+ supportsImages: false,
5394
+ supportsPromptCache: true,
5395
+ supportsNativeTools: true,
5396
+ defaultToolProtocol: "native",
5397
+ supportsReasoningEffort: ["disable", "medium"],
5398
+ reasoningEffort: "medium",
5399
+ preserveReasoning: true,
5400
+ inputPrice: 0.29,
5401
+ outputPrice: 1.14,
5402
+ cacheWritesPrice: 0,
5403
+ cacheReadsPrice: 0.057,
5404
+ description: "GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times."
5322
5405
  }
5323
5406
  };
5324
5407
  var ZAI_DEFAULT_TEMPERATURE = 0.6;
@@ -5368,6 +5451,8 @@ var minimaxModels = {
5368
5451
  supportsPromptCache: true,
5369
5452
  supportsNativeTools: true,
5370
5453
  defaultToolProtocol: "native",
5454
+ includedTools: ["search_and_replace"],
5455
+ excludedTools: ["apply_diff"],
5371
5456
  preserveReasoning: true,
5372
5457
  inputPrice: 0.3,
5373
5458
  outputPrice: 1.2,
@@ -5382,12 +5467,30 @@ var minimaxModels = {
5382
5467
  supportsPromptCache: true,
5383
5468
  supportsNativeTools: true,
5384
5469
  defaultToolProtocol: "native",
5470
+ includedTools: ["search_and_replace"],
5471
+ excludedTools: ["apply_diff"],
5385
5472
  preserveReasoning: true,
5386
5473
  inputPrice: 0.3,
5387
5474
  outputPrice: 1.2,
5388
5475
  cacheWritesPrice: 0.375,
5389
5476
  cacheReadsPrice: 0.03,
5390
5477
  description: "MiniMax M2 Stable (High Concurrency, Commercial Use), a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed."
5478
+ },
5479
+ "MiniMax-M2.1": {
5480
+ maxTokens: 16384,
5481
+ contextWindow: 192e3,
5482
+ supportsImages: false,
5483
+ supportsPromptCache: true,
5484
+ supportsNativeTools: true,
5485
+ defaultToolProtocol: "native",
5486
+ includedTools: ["search_and_replace"],
5487
+ excludedTools: ["apply_diff"],
5488
+ preserveReasoning: true,
5489
+ inputPrice: 0.3,
5490
+ outputPrice: 1.2,
5491
+ cacheWritesPrice: 0.375,
5492
+ cacheReadsPrice: 0.03,
5493
+ description: "MiniMax M2.1 builds on M2 with improved overall performance for agentic coding tasks and significantly faster response times."
5391
5494
  }
5392
5495
  };
5393
5496
  var minimaxDefaultModelInfo = minimaxModels[minimaxDefaultModelId];
@@ -5600,7 +5703,9 @@ var vertexSchema = apiModelIdProviderModelSchema.extend({
5600
5703
  vertexProjectId: import_zod8.z.string().optional(),
5601
5704
  vertexRegion: import_zod8.z.string().optional(),
5602
5705
  enableUrlContext: import_zod8.z.boolean().optional(),
5603
- enableGrounding: import_zod8.z.boolean().optional()
5706
+ enableGrounding: import_zod8.z.boolean().optional(),
5707
+ vertex1MContext: import_zod8.z.boolean().optional()
5708
+ // Enable 'context-1m-2025-08-07' beta for 1M context window.
5604
5709
  });
5605
5710
  var openAiSchema = baseProviderSettingsSchema.extend({
5606
5711
  openAiBaseUrl: import_zod8.z.string().optional(),
@@ -6017,6 +6122,16 @@ var historyItemSchema = import_zod9.z.object({
6017
6122
  size: import_zod9.z.number().optional(),
6018
6123
  workspace: import_zod9.z.string().optional(),
6019
6124
  mode: import_zod9.z.string().optional(),
6125
+ /**
6126
+ * The tool protocol used by this task. Once a task uses tools with a specific
6127
+ * protocol (XML or Native), it is permanently locked to that protocol.
6128
+ *
6129
+ * - "xml": Tool calls are parsed from XML text (no tool IDs)
6130
+ * - "native": Tool calls come as tool_call chunks with IDs
6131
+ *
6132
+ * This ensures task resumption works correctly even when NTC settings change.
6133
+ */
6134
+ toolProtocol: import_zod9.z.enum(["xml", "native"]).optional(),
6020
6135
  status: import_zod9.z.enum(["active", "completed", "delegated"]).optional(),
6021
6136
  delegatedToId: import_zod9.z.string().optional(),
6022
6137
  // Last child this parent delegated to
@@ -6038,7 +6153,8 @@ var experimentIds = [
6038
6153
  "preventFocusDisruption",
6039
6154
  "imageGeneration",
6040
6155
  "runSlashCommand",
6041
- "multipleNativeToolCalls"
6156
+ "multipleNativeToolCalls",
6157
+ "customTools"
6042
6158
  ];
6043
6159
  var experimentIdsSchema = import_zod10.z.enum(experimentIds);
6044
6160
  var experimentsSchema = import_zod10.z.object({
@@ -6047,7 +6163,8 @@ var experimentsSchema = import_zod10.z.object({
6047
6163
  preventFocusDisruption: import_zod10.z.boolean().optional(),
6048
6164
  imageGeneration: import_zod10.z.boolean().optional(),
6049
6165
  runSlashCommand: import_zod10.z.boolean().optional(),
6050
- multipleNativeToolCalls: import_zod10.z.boolean().optional()
6166
+ multipleNativeToolCalls: import_zod10.z.boolean().optional(),
6167
+ customTools: import_zod10.z.boolean().optional()
6051
6168
  });
6052
6169
 
6053
6170
  // src/telemetry.ts
@@ -6312,6 +6429,31 @@ function extractApiProviderErrorProperties(error) {
6312
6429
  ...error.errorCode !== void 0 && { errorCode: error.errorCode }
6313
6430
  };
6314
6431
  }
6432
+ var ConsecutiveMistakeError = class extends Error {
6433
+ constructor(message, taskId, consecutiveMistakeCount, consecutiveMistakeLimit, reason = "unknown", provider, modelId) {
6434
+ super(message);
6435
+ this.taskId = taskId;
6436
+ this.consecutiveMistakeCount = consecutiveMistakeCount;
6437
+ this.consecutiveMistakeLimit = consecutiveMistakeLimit;
6438
+ this.reason = reason;
6439
+ this.provider = provider;
6440
+ this.modelId = modelId;
6441
+ this.name = "ConsecutiveMistakeError";
6442
+ }
6443
+ };
6444
+ function isConsecutiveMistakeError(error) {
6445
+ return error instanceof Error && error.name === "ConsecutiveMistakeError" && "taskId" in error && "consecutiveMistakeCount" in error && "consecutiveMistakeLimit" in error;
6446
+ }
6447
+ function extractConsecutiveMistakeErrorProperties(error) {
6448
+ return {
6449
+ taskId: error.taskId,
6450
+ consecutiveMistakeCount: error.consecutiveMistakeCount,
6451
+ consecutiveMistakeLimit: error.consecutiveMistakeLimit,
6452
+ reason: error.reason,
6453
+ ...error.provider !== void 0 && { provider: error.provider },
6454
+ ...error.modelId !== void 0 && { modelId: error.modelId }
6455
+ };
6456
+ }
6315
6457
 
6316
6458
  // src/mode.ts
6317
6459
  var import_zod12 = require("zod");
@@ -7196,6 +7338,12 @@ var COOKIE_CONSENT_EVENTS = {
7196
7338
  CHANGED: "cookieConsentChanged"
7197
7339
  };
7198
7340
 
7341
+ // src/custom-tool.ts
7342
+ var import_v4 = require("zod/v4");
7343
+ function defineCustomTool(definition) {
7344
+ return definition;
7345
+ }
7346
+
7199
7347
  // src/followup.ts
7200
7348
  var import_zod17 = require("zod");
7201
7349
  var suggestionItemSchema = import_zod17.z.object({
@@ -7396,6 +7544,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7396
7544
  CONTEXT_MANAGEMENT_EVENTS,
7397
7545
  COOKIE_CONSENT_EVENTS,
7398
7546
  ConnectionState,
7547
+ ConsecutiveMistakeError,
7399
7548
  DEEP_SEEK_DEFAULT_TEMPERATURE,
7400
7549
  DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
7401
7550
  DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
@@ -7436,6 +7585,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7436
7585
  MISTRAL_DEFAULT_TEMPERATURE,
7437
7586
  MODELS_BY_PROVIDER,
7438
7587
  MOONSHOT_DEFAULT_TEMPERATURE,
7588
+ NATIVE_TOOL_DEFAULTS,
7439
7589
  OPENAI_AZURE_AI_INFERENCE_PATH,
7440
7590
  OPENAI_NATIVE_DEFAULT_TEMPERATURE,
7441
7591
  OPENROUTER_DEFAULT_PROVIDER_NAME,
@@ -7461,6 +7611,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7461
7611
  VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
7462
7612
  VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
7463
7613
  VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
7614
+ VERTEX_1M_CONTEXT_MODEL_IDS,
7464
7615
  VERTEX_REGIONS,
7465
7616
  ZAI_DEFAULT_TEMPERATURE,
7466
7617
  ackSchema,
@@ -7503,6 +7654,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7503
7654
  deepInfraDefaultModelInfo,
7504
7655
  deepSeekDefaultModelId,
7505
7656
  deepSeekModels,
7657
+ defineCustomTool,
7506
7658
  discriminatedProviderSettingsWithIdSchema,
7507
7659
  doubaoDefaultModelId,
7508
7660
  doubaoDefaultModelInfo,
@@ -7516,6 +7668,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7516
7668
  extensionBridgeEventSchema,
7517
7669
  extensionInstanceSchema,
7518
7670
  extractApiProviderErrorProperties,
7671
+ extractConsecutiveMistakeErrorProperties,
7519
7672
  extractMessageFromJsonPayload,
7520
7673
  fauxProviders,
7521
7674
  featherlessDefaultModelId,
@@ -7550,6 +7703,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7550
7703
  ioIntelligenceModels,
7551
7704
  ipcMessageSchema,
7552
7705
  isApiProviderError,
7706
+ isConsecutiveMistakeError,
7553
7707
  isContextManagementEvent,
7554
7708
  isCustomProvider,
7555
7709
  isDynamicProvider,
@@ -7610,6 +7764,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7610
7764
  organizationDefaultSettingsSchema,
7611
7765
  organizationFeaturesSchema,
7612
7766
  organizationSettingsSchema,
7767
+ parametersSchema,
7613
7768
  promptComponentSchema,
7614
7769
  providerNames,
7615
7770
  providerNamesSchema,