@roo-code/types 1.78.0 → 1.80.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -36,6 +36,7 @@ function isResumableAsk(ask) {
36
36
  return resumableAsks.includes(ask);
37
37
  }
38
38
  var interactiveAsks = [
39
+ "followup",
39
40
  "command",
40
41
  "tool",
41
42
  "browser_action_launch",
@@ -452,6 +453,39 @@ var codebaseIndexProviderSchema = z6.object({
452
453
  // src/providers/anthropic.ts
453
454
  var anthropicDefaultModelId = "claude-sonnet-4-20250514";
454
455
  var anthropicModels = {
456
+ "claude-sonnet-4-5": {
457
+ maxTokens: 64e3,
458
+ // Overridden to 8k if `enableReasoningEffort` is false.
459
+ contextWindow: 2e5,
460
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
461
+ supportsImages: true,
462
+ supportsComputerUse: true,
463
+ supportsPromptCache: true,
464
+ inputPrice: 3,
465
+ // $3 per million input tokens (≤200K context)
466
+ outputPrice: 15,
467
+ // $15 per million output tokens (≤200K context)
468
+ cacheWritesPrice: 3.75,
469
+ // $3.75 per million tokens
470
+ cacheReadsPrice: 0.3,
471
+ // $0.30 per million tokens
472
+ supportsReasoningBudget: true,
473
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
474
+ tiers: [
475
+ {
476
+ contextWindow: 1e6,
477
+ // 1M tokens with beta flag
478
+ inputPrice: 6,
479
+ // $6 per million input tokens (>200K context)
480
+ outputPrice: 22.5,
481
+ // $22.50 per million output tokens (>200K context)
482
+ cacheWritesPrice: 7.5,
483
+ // $7.50 per million tokens (>200K context)
484
+ cacheReadsPrice: 0.6
485
+ // $0.60 per million tokens (>200K context)
486
+ }
487
+ ]
488
+ },
455
489
  "claude-sonnet-4-20250514": {
456
490
  maxTokens: 64e3,
457
491
  // Overridden to 8k if `enableReasoningEffort` is false.
@@ -604,6 +638,21 @@ var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
604
638
  var bedrockDefaultModelId = "anthropic.claude-sonnet-4-20250514-v1:0";
605
639
  var bedrockDefaultPromptRouterModelId = "anthropic.claude-3-sonnet-20240229-v1:0";
606
640
  var bedrockModels = {
641
+ "anthropic.claude-sonnet-4-5-20250929-v1:0": {
642
+ maxTokens: 8192,
643
+ contextWindow: 2e5,
644
+ supportsImages: true,
645
+ supportsComputerUse: true,
646
+ supportsPromptCache: true,
647
+ supportsReasoningBudget: true,
648
+ inputPrice: 3,
649
+ outputPrice: 15,
650
+ cacheWritesPrice: 3.75,
651
+ cacheReadsPrice: 0.3,
652
+ minTokensPerCachePoint: 1024,
653
+ maxCachePoints: 4,
654
+ cachableFields: ["system", "messages", "tools"]
655
+ },
607
656
  "amazon.nova-pro-v1:0": {
608
657
  maxTokens: 5e3,
609
658
  contextWindow: 3e5,
@@ -1030,7 +1079,10 @@ var BEDROCK_REGIONS = [
1030
1079
  { value: "us-gov-east-1", label: "us-gov-east-1" },
1031
1080
  { value: "us-gov-west-1", label: "us-gov-west-1" }
1032
1081
  ].sort((a, b) => a.value.localeCompare(b.value));
1033
- var BEDROCK_CLAUDE_SONNET_4_MODEL_ID = "anthropic.claude-sonnet-4-20250514-v1:0";
1082
+ var BEDROCK_1M_CONTEXT_MODEL_IDS = [
1083
+ "anthropic.claude-sonnet-4-20250514-v1:0",
1084
+ "anthropic.claude-sonnet-4-5-20250929-v1:0"
1085
+ ];
1034
1086
 
1035
1087
  // src/providers/cerebras.ts
1036
1088
  var cerebrasDefaultModelId = "qwen-3-coder-480b-free";
@@ -1340,6 +1392,15 @@ var chutesModels = {
1340
1392
  outputPrice: 0,
1341
1393
  description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
1342
1394
  },
1395
+ "zai-org/GLM-4.5-turbo": {
1396
+ maxTokens: 32768,
1397
+ contextWindow: 131072,
1398
+ supportsImages: false,
1399
+ supportsPromptCache: false,
1400
+ inputPrice: 1,
1401
+ outputPrice: 3,
1402
+ description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
1403
+ },
1343
1404
  "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": {
1344
1405
  maxTokens: 32768,
1345
1406
  contextWindow: 262144,
@@ -1407,6 +1468,15 @@ function getClaudeCodeModelId(baseModelId, useVertex = false) {
1407
1468
  return useVertex ? convertModelNameForVertex(baseModelId) : baseModelId;
1408
1469
  }
1409
1470
  var claudeCodeModels = {
1471
+ "claude-sonnet-4-5": {
1472
+ ...anthropicModels["claude-sonnet-4-5"],
1473
+ supportsImages: false,
1474
+ supportsPromptCache: true,
1475
+ // Claude Code does report cache tokens
1476
+ supportsReasoningEffort: false,
1477
+ supportsReasoningBudget: false,
1478
+ requiredReasoningBudget: false
1479
+ },
1410
1480
  "claude-sonnet-4-20250514": {
1411
1481
  ...anthropicModels["claude-sonnet-4-20250514"],
1412
1482
  supportsImages: false,
@@ -2214,6 +2284,7 @@ var LITELLM_COMPUTER_USE_MODELS = /* @__PURE__ */ new Set([
2214
2284
  "vertex_ai/claude-opus-4-1@20250805",
2215
2285
  "vertex_ai/claude-opus-4@20250514",
2216
2286
  "vertex_ai/claude-sonnet-4@20250514",
2287
+ "vertex_ai/claude-sonnet-4-5@20250929",
2217
2288
  "openrouter/anthropic/claude-3.5-sonnet",
2218
2289
  "openrouter/anthropic/claude-3.5-sonnet:beta",
2219
2290
  "openrouter/anthropic/claude-3.7-sonnet",
@@ -2458,6 +2529,20 @@ var openAiNativeModels = {
2458
2529
  supportsTemperature: false,
2459
2530
  tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }]
2460
2531
  },
2532
+ "gpt-5-codex": {
2533
+ maxTokens: 128e3,
2534
+ contextWindow: 4e5,
2535
+ supportsImages: true,
2536
+ supportsPromptCache: true,
2537
+ supportsReasoningEffort: true,
2538
+ reasoningEffort: "medium",
2539
+ inputPrice: 1.25,
2540
+ outputPrice: 10,
2541
+ cacheReadsPrice: 0.13,
2542
+ description: "GPT-5-Codex: A version of GPT-5 optimized for agentic coding in Codex",
2543
+ supportsVerbosity: true,
2544
+ supportsTemperature: false
2545
+ },
2461
2546
  "gpt-4.1": {
2462
2547
  maxTokens: 32768,
2463
2548
  contextWindow: 1047576,
@@ -2722,6 +2807,7 @@ var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
2722
2807
  "anthropic/claude-3.7-sonnet:beta",
2723
2808
  "anthropic/claude-3.7-sonnet:thinking",
2724
2809
  "anthropic/claude-sonnet-4",
2810
+ "anthropic/claude-sonnet-4.5",
2725
2811
  "anthropic/claude-opus-4",
2726
2812
  "anthropic/claude-opus-4.1",
2727
2813
  "google/gemini-2.5-flash-preview",
@@ -2741,6 +2827,7 @@ var OPEN_ROUTER_COMPUTER_USE_MODELS = /* @__PURE__ */ new Set([
2741
2827
  "anthropic/claude-3.7-sonnet:beta",
2742
2828
  "anthropic/claude-3.7-sonnet:thinking",
2743
2829
  "anthropic/claude-sonnet-4",
2830
+ "anthropic/claude-sonnet-4.5",
2744
2831
  "anthropic/claude-opus-4",
2745
2832
  "anthropic/claude-opus-4.1"
2746
2833
  ]);
@@ -2754,6 +2841,7 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
2754
2841
  "anthropic/claude-opus-4",
2755
2842
  "anthropic/claude-opus-4.1",
2756
2843
  "anthropic/claude-sonnet-4",
2844
+ "anthropic/claude-sonnet-4.5",
2757
2845
  "google/gemini-2.5-pro-preview",
2758
2846
  "google/gemini-2.5-pro",
2759
2847
  "google/gemini-2.5-flash-preview-05-20",
@@ -2818,6 +2906,33 @@ var rooModels = {
2818
2906
  inputPrice: 0,
2819
2907
  outputPrice: 0,
2820
2908
  description: "A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)"
2909
+ },
2910
+ "roo/code-supernova-1-million": {
2911
+ maxTokens: 3e4,
2912
+ contextWindow: 1e6,
2913
+ supportsImages: true,
2914
+ supportsPromptCache: true,
2915
+ inputPrice: 0,
2916
+ outputPrice: 0,
2917
+ description: "A versatile agentic coding stealth model with a 1M token context window that supports image inputs, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by the model provider and used to improve the model.)"
2918
+ },
2919
+ "xai/grok-4-fast": {
2920
+ maxTokens: 3e4,
2921
+ contextWindow: 2e6,
2922
+ supportsImages: false,
2923
+ supportsPromptCache: false,
2924
+ inputPrice: 0,
2925
+ outputPrice: 0,
2926
+ description: "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. (Note: prompts and completions are logged by xAI and used to improve the model.)"
2927
+ },
2928
+ "deepseek/deepseek-chat-v3.1": {
2929
+ maxTokens: 16384,
2930
+ contextWindow: 163840,
2931
+ supportsImages: false,
2932
+ supportsPromptCache: false,
2933
+ inputPrice: 0,
2934
+ outputPrice: 0,
2935
+ description: "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active). It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference."
2821
2936
  }
2822
2937
  };
2823
2938
 
@@ -2861,6 +2976,15 @@ var sambaNovaModels = {
2861
2976
  outputPrice: 4.5,
2862
2977
  description: "DeepSeek V3 model with 32K context window."
2863
2978
  },
2979
+ "DeepSeek-V3.1": {
2980
+ maxTokens: 8192,
2981
+ contextWindow: 32768,
2982
+ supportsImages: false,
2983
+ supportsPromptCache: false,
2984
+ inputPrice: 3,
2985
+ outputPrice: 4.5,
2986
+ description: "DeepSeek V3.1 model with 32K context window."
2987
+ },
2864
2988
  "DeepSeek-R1-Distill-Llama-70B": {
2865
2989
  maxTokens: 8192,
2866
2990
  contextWindow: 131072,
@@ -2896,6 +3020,15 @@ var sambaNovaModels = {
2896
3020
  inputPrice: 0.4,
2897
3021
  outputPrice: 0.8,
2898
3022
  description: "Alibaba Qwen 3 32B model with 8K context window."
3023
+ },
3024
+ "gpt-oss-120b": {
3025
+ maxTokens: 8192,
3026
+ contextWindow: 131072,
3027
+ supportsImages: false,
3028
+ supportsPromptCache: false,
3029
+ inputPrice: 0.22,
3030
+ outputPrice: 0.59,
3031
+ description: "OpenAI gpt oss 120b model with 128k context window."
2899
3032
  }
2900
3033
  };
2901
3034
 
@@ -2913,7 +3046,7 @@ var unboundDefaultModelInfo = {
2913
3046
  };
2914
3047
 
2915
3048
  // src/providers/vertex.ts
2916
- var vertexDefaultModelId = "claude-sonnet-4@20250514";
3049
+ var vertexDefaultModelId = "claude-sonnet-4-5@20250929";
2917
3050
  var vertexModels = {
2918
3051
  "gemini-2.5-flash-preview-05-20:thinking": {
2919
3052
  maxTokens: 65535,
@@ -3084,6 +3217,18 @@ var vertexModels = {
3084
3217
  cacheReadsPrice: 0.3,
3085
3218
  supportsReasoningBudget: true
3086
3219
  },
3220
+ "claude-sonnet-4-5@20250929": {
3221
+ maxTokens: 8192,
3222
+ contextWindow: 2e5,
3223
+ supportsImages: true,
3224
+ supportsComputerUse: true,
3225
+ supportsPromptCache: true,
3226
+ inputPrice: 3,
3227
+ outputPrice: 15,
3228
+ cacheWritesPrice: 3.75,
3229
+ cacheReadsPrice: 0.3,
3230
+ supportsReasoningBudget: true
3231
+ },
3087
3232
  "claude-opus-4-1@20250805": {
3088
3233
  maxTokens: 8192,
3089
3234
  contextWindow: 2e5,
@@ -3689,6 +3834,17 @@ var internationalZAiModels = {
3689
3834
  cacheWritesPrice: 0,
3690
3835
  cacheReadsPrice: 0.03,
3691
3836
  description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models."
3837
+ },
3838
+ "glm-4.6": {
3839
+ maxTokens: 98304,
3840
+ contextWindow: 204800,
3841
+ supportsImages: false,
3842
+ supportsPromptCache: true,
3843
+ inputPrice: 0.6,
3844
+ outputPrice: 2.2,
3845
+ cacheWritesPrice: 0,
3846
+ cacheReadsPrice: 0.11,
3847
+ description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations."
3692
3848
  }
3693
3849
  };
3694
3850
  var mainlandZAiDefaultModelId = "glm-4.5";
@@ -3754,6 +3910,43 @@ var mainlandZAiModels = {
3754
3910
  cacheReadsPrice: 0.02
3755
3911
  }
3756
3912
  ]
3913
+ },
3914
+ "glm-4.6": {
3915
+ maxTokens: 98304,
3916
+ contextWindow: 204800,
3917
+ supportsImages: false,
3918
+ supportsPromptCache: true,
3919
+ inputPrice: 0.29,
3920
+ outputPrice: 1.14,
3921
+ cacheWritesPrice: 0,
3922
+ cacheReadsPrice: 0.057,
3923
+ description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations.",
3924
+ tiers: [
3925
+ {
3926
+ contextWindow: 32e3,
3927
+ inputPrice: 0.21,
3928
+ outputPrice: 1,
3929
+ cacheReadsPrice: 0.043
3930
+ },
3931
+ {
3932
+ contextWindow: 128e3,
3933
+ inputPrice: 0.29,
3934
+ outputPrice: 1.14,
3935
+ cacheReadsPrice: 0.057
3936
+ },
3937
+ {
3938
+ contextWindow: 2e5,
3939
+ inputPrice: 0.29,
3940
+ outputPrice: 1.14,
3941
+ cacheReadsPrice: 0.057
3942
+ },
3943
+ {
3944
+ contextWindow: Infinity,
3945
+ inputPrice: 0.29,
3946
+ outputPrice: 1.14,
3947
+ cacheReadsPrice: 0.057
3948
+ }
3949
+ ]
3757
3950
  }
3758
3951
  };
3759
3952
  var ZAI_DEFAULT_TEMPERATURE = 0;
@@ -3781,52 +3974,63 @@ var deepInfraDefaultModelInfo = {
3781
3974
  };
3782
3975
 
3783
3976
  // src/provider-settings.ts
3977
+ var DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3;
3978
+ var dynamicProviders = [
3979
+ "openrouter",
3980
+ "vercel-ai-gateway",
3981
+ "huggingface",
3982
+ "litellm",
3983
+ "deepinfra",
3984
+ "io-intelligence",
3985
+ "requesty",
3986
+ "unbound",
3987
+ "glama"
3988
+ ];
3989
+ var isDynamicProvider = (key) => dynamicProviders.includes(key);
3990
+ var localProviders = ["ollama", "lmstudio"];
3991
+ var isLocalProvider = (key) => localProviders.includes(key);
3992
+ var internalProviders = ["vscode-lm"];
3993
+ var isInternalProvider = (key) => internalProviders.includes(key);
3994
+ var customProviders = ["openai"];
3995
+ var isCustomProvider = (key) => customProviders.includes(key);
3996
+ var fauxProviders = ["fake-ai", "human-relay"];
3997
+ var isFauxProvider = (key) => fauxProviders.includes(key);
3784
3998
  var providerNames = [
3999
+ ...dynamicProviders,
4000
+ ...localProviders,
4001
+ ...internalProviders,
4002
+ ...customProviders,
4003
+ ...fauxProviders,
3785
4004
  "anthropic",
3786
- "claude-code",
3787
- "glama",
3788
- "openrouter",
3789
4005
  "bedrock",
3790
- "vertex",
3791
- "openai",
3792
- "ollama",
3793
- "vscode-lm",
3794
- "lmstudio",
4006
+ "cerebras",
4007
+ "chutes",
4008
+ "claude-code",
4009
+ "doubao",
4010
+ "deepseek",
4011
+ "featherless",
4012
+ "fireworks",
3795
4013
  "gemini",
3796
4014
  "gemini-cli",
3797
- "openai-native",
4015
+ "groq",
3798
4016
  "mistral",
3799
4017
  "moonshot",
3800
- "deepseek",
3801
- "deepinfra",
3802
- "doubao",
4018
+ "openai-native",
3803
4019
  "qwen-code",
3804
- "unbound",
3805
- "requesty",
3806
- "human-relay",
3807
- "fake-ai",
3808
- "xai",
3809
- "groq",
3810
- "chutes",
3811
- "litellm",
3812
- "huggingface",
3813
- "cerebras",
3814
- "sambanova",
3815
- "zai",
3816
- "fireworks",
3817
- "featherless",
3818
- "io-intelligence",
3819
4020
  "roo",
3820
- "vercel-ai-gateway"
4021
+ "sambanova",
4022
+ "vertex",
4023
+ "xai",
4024
+ "zai"
3821
4025
  ];
3822
4026
  var providerNamesSchema = z7.enum(providerNames);
4027
+ var isProviderName = (key) => typeof key === "string" && providerNames.includes(key);
3823
4028
  var providerSettingsEntrySchema = z7.object({
3824
4029
  id: z7.string(),
3825
4030
  name: z7.string(),
3826
4031
  apiProvider: providerNamesSchema.optional(),
3827
4032
  modelId: z7.string().optional()
3828
4033
  });
3829
- var DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3;
3830
4034
  var baseProviderSettingsSchema = z7.object({
3831
4035
  includeMaxTokens: z7.boolean().optional(),
3832
4036
  diffEnabled: z7.boolean().optional(),
@@ -3851,7 +4055,7 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
3851
4055
  anthropicBaseUrl: z7.string().optional(),
3852
4056
  anthropicUseAuthToken: z7.boolean().optional(),
3853
4057
  anthropicBeta1MContext: z7.boolean().optional()
3854
- // Enable 'context-1m-2025-08-07' beta for 1M context window
4058
+ // Enable 'context-1m-2025-08-07' beta for 1M context window.
3855
4059
  });
3856
4060
  var claudeCodeSchema = apiModelIdProviderModelSchema.extend({
3857
4061
  claudeCodePath: z7.string().optional(),
@@ -3884,7 +4088,7 @@ var bedrockSchema = apiModelIdProviderModelSchema.extend({
3884
4088
  awsBedrockEndpointEnabled: z7.boolean().optional(),
3885
4089
  awsBedrockEndpoint: z7.string().optional(),
3886
4090
  awsBedrock1MContext: z7.boolean().optional()
3887
- // Enable 'context-1m-2025-08-07' beta for 1M context window
4091
+ // Enable 'context-1m-2025-08-07' beta for 1M context window.
3888
4092
  });
3889
4093
  var vertexSchema = apiModelIdProviderModelSchema.extend({
3890
4094
  vertexKeyFile: z7.string().optional(),
@@ -3911,7 +4115,8 @@ var openAiSchema = baseProviderSettingsSchema.extend({
3911
4115
  var ollamaSchema = baseProviderSettingsSchema.extend({
3912
4116
  ollamaModelId: z7.string().optional(),
3913
4117
  ollamaBaseUrl: z7.string().optional(),
3914
- ollamaApiKey: z7.string().optional()
4118
+ ollamaApiKey: z7.string().optional(),
4119
+ ollamaNumCtx: z7.number().int().min(128).optional()
3915
4120
  });
3916
4121
  var vsCodeLmSchema = baseProviderSettingsSchema.extend({
3917
4122
  vsCodeLmModelSelector: z7.object({
@@ -4023,7 +4228,7 @@ var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
4023
4228
  qwenCodeOauthPath: z7.string().optional()
4024
4229
  });
4025
4230
  var rooSchema = apiModelIdProviderModelSchema.extend({
4026
- // No additional fields needed - uses cloud authentication
4231
+ // No additional fields needed - uses cloud authentication.
4027
4232
  });
4028
4233
  var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
4029
4234
  vercelAiGatewayApiKey: z7.string().optional(),
@@ -4116,7 +4321,7 @@ var discriminatedProviderSettingsWithIdSchema = providerSettingsSchemaDiscrimina
4116
4321
  z7.object({ id: z7.string().optional() })
4117
4322
  );
4118
4323
  var PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options;
4119
- var MODEL_ID_KEYS = [
4324
+ var modelIdKeys = [
4120
4325
  "apiModelId",
4121
4326
  "glamaModelId",
4122
4327
  "openRouterModelId",
@@ -4133,9 +4338,44 @@ var MODEL_ID_KEYS = [
4133
4338
  "deepInfraModelId"
4134
4339
  ];
4135
4340
  var getModelId = (settings) => {
4136
- const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
4341
+ const modelIdKey = modelIdKeys.find((key) => settings[key]);
4137
4342
  return modelIdKey ? settings[modelIdKey] : void 0;
4138
4343
  };
4344
+ var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key);
4345
+ var modelIdKeysByProvider = {
4346
+ anthropic: "apiModelId",
4347
+ "claude-code": "apiModelId",
4348
+ glama: "glamaModelId",
4349
+ openrouter: "openRouterModelId",
4350
+ bedrock: "apiModelId",
4351
+ vertex: "apiModelId",
4352
+ "openai-native": "openAiModelId",
4353
+ ollama: "ollamaModelId",
4354
+ lmstudio: "lmStudioModelId",
4355
+ gemini: "apiModelId",
4356
+ "gemini-cli": "apiModelId",
4357
+ mistral: "apiModelId",
4358
+ moonshot: "apiModelId",
4359
+ deepseek: "apiModelId",
4360
+ deepinfra: "deepInfraModelId",
4361
+ doubao: "apiModelId",
4362
+ "qwen-code": "apiModelId",
4363
+ unbound: "unboundModelId",
4364
+ requesty: "requestyModelId",
4365
+ xai: "apiModelId",
4366
+ groq: "apiModelId",
4367
+ chutes: "apiModelId",
4368
+ litellm: "litellmModelId",
4369
+ huggingface: "huggingFaceModelId",
4370
+ cerebras: "apiModelId",
4371
+ sambanova: "apiModelId",
4372
+ zai: "apiModelId",
4373
+ fireworks: "apiModelId",
4374
+ featherless: "apiModelId",
4375
+ "io-intelligence": "ioIntelligenceModelId",
4376
+ roo: "apiModelId",
4377
+ "vercel-ai-gateway": "vercelAiGatewayModelId"
4378
+ };
4139
4379
  var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "claude-code", "bedrock"];
4140
4380
  var getApiProtocol = (provider, modelId) => {
4141
4381
  if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
@@ -4242,17 +4482,6 @@ var MODELS_BY_PROVIDER = {
4242
4482
  deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
4243
4483
  "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
4244
4484
  };
4245
- var dynamicProviders = [
4246
- "glama",
4247
- "huggingface",
4248
- "litellm",
4249
- "openrouter",
4250
- "requesty",
4251
- "unbound",
4252
- "deepinfra",
4253
- "vercel-ai-gateway"
4254
- ];
4255
- var isDynamicProvider = (key) => dynamicProviders.includes(key);
4256
4485
 
4257
4486
  // src/history.ts
4258
4487
  import { z as z8 } from "zod";
@@ -4329,11 +4558,15 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
4329
4558
  TelemetryEventName2["ACCOUNT_CONNECT_SUCCESS"] = "Account Connect Success";
4330
4559
  TelemetryEventName2["ACCOUNT_LOGOUT_CLICKED"] = "Account Logout Clicked";
4331
4560
  TelemetryEventName2["ACCOUNT_LOGOUT_SUCCESS"] = "Account Logout Success";
4561
+ TelemetryEventName2["FEATURED_PROVIDER_CLICKED"] = "Featured Provider Clicked";
4562
+ TelemetryEventName2["UPSELL_DISMISSED"] = "Upsell Dismissed";
4563
+ TelemetryEventName2["UPSELL_CLICKED"] = "Upsell Clicked";
4332
4564
  TelemetryEventName2["SCHEMA_VALIDATION_ERROR"] = "Schema Validation Error";
4333
4565
  TelemetryEventName2["DIFF_APPLICATION_ERROR"] = "Diff Application Error";
4334
4566
  TelemetryEventName2["SHELL_INTEGRATION_ERROR"] = "Shell Integration Error";
4335
4567
  TelemetryEventName2["CONSECUTIVE_MISTAKE_ERROR"] = "Consecutive Mistake Error";
4336
4568
  TelemetryEventName2["CODE_INDEX_ERROR"] = "Code Index Error";
4569
+ TelemetryEventName2["TELEMETRY_SETTINGS_CHANGED"] = "Telemetry Settings Changed";
4337
4570
  return TelemetryEventName2;
4338
4571
  })(TelemetryEventName || {});
4339
4572
  var staticAppPropertiesSchema = z10.object({
@@ -4408,6 +4641,9 @@ var rooCodeTelemetryEventSchema = z10.discriminatedUnion("type", [
4408
4641
  "Account Connect Success" /* ACCOUNT_CONNECT_SUCCESS */,
4409
4642
  "Account Logout Clicked" /* ACCOUNT_LOGOUT_CLICKED */,
4410
4643
  "Account Logout Success" /* ACCOUNT_LOGOUT_SUCCESS */,
4644
+ "Featured Provider Clicked" /* FEATURED_PROVIDER_CLICKED */,
4645
+ "Upsell Dismissed" /* UPSELL_DISMISSED */,
4646
+ "Upsell Clicked" /* UPSELL_CLICKED */,
4411
4647
  "Schema Validation Error" /* SCHEMA_VALIDATION_ERROR */,
4412
4648
  "Diff Application Error" /* DIFF_APPLICATION_ERROR */,
4413
4649
  "Shell Integration Error" /* SHELL_INTEGRATION_ERROR */,
@@ -4421,6 +4657,14 @@ var rooCodeTelemetryEventSchema = z10.discriminatedUnion("type", [
4421
4657
  ]),
4422
4658
  properties: telemetryPropertiesSchema
4423
4659
  }),
4660
+ z10.object({
4661
+ type: z10.literal("Telemetry Settings Changed" /* TELEMETRY_SETTINGS_CHANGED */),
4662
+ properties: z10.object({
4663
+ ...telemetryPropertiesSchema.shape,
4664
+ previousSetting: telemetrySettingsSchema,
4665
+ newSetting: telemetrySettingsSchema
4666
+ })
4667
+ }),
4424
4668
  z10.object({
4425
4669
  type: z10.literal("Task Message" /* TASK_MESSAGE */),
4426
4670
  properties: z10.object({
@@ -4582,7 +4826,8 @@ var commandIds = [
4582
4826
  "importSettings",
4583
4827
  "focusInput",
4584
4828
  "acceptInput",
4585
- "focusPanel"
4829
+ "focusPanel",
4830
+ "toggleAutoApprove"
4586
4831
  ];
4587
4832
  var languages = [
4588
4833
  "ca",
@@ -4707,6 +4952,7 @@ var globalSettingsSchema = z13.object({
4707
4952
  enhancementApiConfigId: z13.string().optional(),
4708
4953
  includeTaskHistoryInEnhance: z13.boolean().optional(),
4709
4954
  historyPreviewCollapsed: z13.boolean().optional(),
4955
+ reasoningBlockCollapsed: z13.boolean().optional(),
4710
4956
  profileThresholds: z13.record(z13.string(), z13.number()).optional(),
4711
4957
  hasOpenedModeSelector: z13.boolean().optional(),
4712
4958
  lastModeExportPath: z13.string().optional(),
@@ -5237,6 +5483,27 @@ var TaskSocketEvents = /* @__PURE__ */ ((TaskSocketEvents2) => {
5237
5483
  TaskSocketEvents2["RELAYED_COMMAND"] = "task:relayed_command";
5238
5484
  return TaskSocketEvents2;
5239
5485
  })(TaskSocketEvents || {});
5486
+ var usageStatsSchema = z15.object({
5487
+ success: z15.boolean(),
5488
+ data: z15.object({
5489
+ dates: z15.array(z15.string()),
5490
+ // Array of date strings
5491
+ tasks: z15.array(z15.number()),
5492
+ // Array of task counts
5493
+ tokens: z15.array(z15.number()),
5494
+ // Array of token counts
5495
+ costs: z15.array(z15.number()),
5496
+ // Array of costs in USD
5497
+ totals: z15.object({
5498
+ tasks: z15.number(),
5499
+ tokens: z15.number(),
5500
+ cost: z15.number()
5501
+ // Total cost in USD
5502
+ })
5503
+ }),
5504
+ period: z15.number()
5505
+ // Period in days (e.g., 30)
5506
+ });
5240
5507
 
5241
5508
  // src/cookie-consent.ts
5242
5509
  var CONSENT_COOKIE_NAME = "roo-code-cookie-consent";
@@ -5353,7 +5620,7 @@ var mcpExecutionStatusSchema = z18.discriminatedUnion("status", [
5353
5620
 
5354
5621
  // src/single-file-read-models.ts
5355
5622
  function shouldUseSingleFileRead(modelId) {
5356
- return modelId.includes("grok-code-fast-1");
5623
+ return modelId.includes("grok-code-fast-1") || modelId.includes("code-supernova");
5357
5624
  }
5358
5625
 
5359
5626
  // src/todo.ts
@@ -5397,7 +5664,7 @@ export {
5397
5664
  ANTHROPIC_DEFAULT_MAX_TOKENS,
5398
5665
  ANTHROPIC_STYLE_PROVIDERS,
5399
5666
  AWS_INFERENCE_PROFILE_MAPPING,
5400
- BEDROCK_CLAUDE_SONNET_4_MODEL_ID,
5667
+ BEDROCK_1M_CONTEXT_MODEL_IDS,
5401
5668
  BEDROCK_DEFAULT_CONTEXT,
5402
5669
  BEDROCK_DEFAULT_TEMPERATURE,
5403
5670
  BEDROCK_MAX_TOKENS,
@@ -5441,7 +5708,6 @@ export {
5441
5708
  LMSTUDIO_DEFAULT_TEMPERATURE,
5442
5709
  MISTRAL_DEFAULT_TEMPERATURE,
5443
5710
  MODELS_BY_PROVIDER,
5444
- MODEL_ID_KEYS,
5445
5711
  MOONSHOT_DEFAULT_TEMPERATURE,
5446
5712
  OPENAI_AZURE_AI_INFERENCE_PATH,
5447
5713
  OPENAI_NATIVE_DEFAULT_TEMPERATURE,
@@ -5497,6 +5763,7 @@ export {
5497
5763
  convertModelNameForVertex,
5498
5764
  customModePromptsSchema,
5499
5765
  customModesSettingsSchema,
5766
+ customProviders,
5500
5767
  customSupportPromptsSchema,
5501
5768
  deepInfraDefaultModelId,
5502
5769
  deepInfraDefaultModelInfo,
@@ -5514,6 +5781,7 @@ export {
5514
5781
  extensionBridgeCommandSchema,
5515
5782
  extensionBridgeEventSchema,
5516
5783
  extensionInstanceSchema,
5784
+ fauxProviders,
5517
5785
  featherlessDefaultModelId,
5518
5786
  featherlessModels,
5519
5787
  fireworksDefaultModelId,
@@ -5536,26 +5804,34 @@ export {
5536
5804
  idleAsks,
5537
5805
  installMarketplaceItemOptionsSchema,
5538
5806
  interactiveAsks,
5807
+ internalProviders,
5539
5808
  internationalZAiDefaultModelId,
5540
5809
  internationalZAiModels,
5541
5810
  ioIntelligenceDefaultBaseUrl,
5542
5811
  ioIntelligenceDefaultModelId,
5543
5812
  ioIntelligenceModels,
5544
5813
  ipcMessageSchema,
5814
+ isCustomProvider,
5545
5815
  isDynamicProvider,
5816
+ isFauxProvider,
5546
5817
  isGlobalStateKey,
5547
5818
  isIdleAsk,
5548
5819
  isInteractiveAsk,
5820
+ isInternalProvider,
5549
5821
  isLanguage,
5822
+ isLocalProvider,
5550
5823
  isModelParameter,
5824
+ isProviderName,
5551
5825
  isResumableAsk,
5552
5826
  isSecretStateKey,
5827
+ isTypicalProvider,
5553
5828
  lMStudioDefaultModelId,
5554
5829
  lMStudioDefaultModelInfo,
5555
5830
  languages,
5556
5831
  languagesSchema,
5557
5832
  litellmDefaultModelId,
5558
5833
  litellmDefaultModelInfo,
5834
+ localProviders,
5559
5835
  mainlandZAiDefaultModelId,
5560
5836
  mainlandZAiModels,
5561
5837
  marketplaceItemSchema,
@@ -5568,6 +5844,8 @@ export {
5568
5844
  mistralModels,
5569
5845
  modeConfigSchema,
5570
5846
  modeMarketplaceItemSchema,
5847
+ modelIdKeys,
5848
+ modelIdKeysByProvider,
5571
5849
  modelInfoSchema,
5572
5850
  modelParameters,
5573
5851
  modelParametersSchema,
@@ -5635,6 +5913,7 @@ export {
5635
5913
  toolUsageSchema,
5636
5914
  unboundDefaultModelId,
5637
5915
  unboundDefaultModelInfo,
5916
+ usageStatsSchema,
5638
5917
  userFeaturesSchema,
5639
5918
  userSettingsConfigSchema,
5640
5919
  userSettingsDataSchema,