@roo-code/types 1.89.0 → 1.90.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -75,6 +75,7 @@ var clineSays = [
75
75
  "diff_error",
76
76
  "condense_context",
77
77
  "condense_context_error",
78
+ "sliding_window_truncation",
78
79
  "codebase_search_result",
79
80
  "user_edit_todos"
80
81
  ];
@@ -87,7 +88,14 @@ var contextCondenseSchema = z.object({
87
88
  cost: z.number(),
88
89
  prevContextTokens: z.number(),
89
90
  newContextTokens: z.number(),
90
- summary: z.string()
91
+ summary: z.string(),
92
+ condenseId: z.string().optional()
93
+ });
94
+ var contextTruncationSchema = z.object({
95
+ truncationId: z.string(),
96
+ messagesRemoved: z.number(),
97
+ prevContextTokens: z.number(),
98
+ newContextTokens: z.number()
91
99
  });
92
100
  var clineMessageSchema = z.object({
93
101
  ts: z.number(),
@@ -101,7 +109,16 @@ var clineMessageSchema = z.object({
101
109
  conversationHistoryIndex: z.number().optional(),
102
110
  checkpoint: z.record(z.string(), z.unknown()).optional(),
103
111
  progressStatus: toolProgressStatusSchema.optional(),
112
+ /**
113
+ * Data for successful context condensation.
114
+ * Present when `say: "condense_context"` and `partial: false`.
115
+ */
104
116
  contextCondense: contextCondenseSchema.optional(),
117
+ /**
118
+ * Data for sliding window truncation.
119
+ * Present when `say: "sliding_window_truncation"`.
120
+ */
121
+ contextTruncation: contextTruncationSchema.optional(),
105
122
  isProtected: z.boolean().optional(),
106
123
  apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
107
124
  isAnswered: z.boolean().optional()
@@ -130,8 +147,8 @@ var toolNames = [
130
147
  "read_file",
131
148
  "write_to_file",
132
149
  "apply_diff",
133
- "insert_content",
134
150
  "search_and_replace",
151
+ "search_replace",
135
152
  "apply_patch",
136
153
  "search_files",
137
154
  "list_files",
@@ -250,7 +267,7 @@ var rooCodeEventsSchema = z3.object({
250
267
  ["taskAskResponded" /* TaskAskResponded */]: z3.tuple([z3.string()]),
251
268
  ["taskUserMessage" /* TaskUserMessage */]: z3.tuple([z3.string()]),
252
269
  ["taskToolFailed" /* TaskToolFailed */]: z3.tuple([z3.string(), toolNamesSchema, z3.string()]),
253
- ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: z3.tuple([z3.string(), tokenUsageSchema]),
270
+ ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: z3.tuple([z3.string(), tokenUsageSchema, toolUsageSchema]),
254
271
  ["modeChanged" /* ModeChanged */]: z3.tuple([z3.string()]),
255
272
  ["providerProfileChanged" /* ProviderProfileChanged */]: z3.tuple([z3.object({ name: z3.string(), provider: z3.string() })])
256
273
  });
@@ -404,9 +421,9 @@ import { z as z5 } from "zod";
404
421
  var reasoningEfforts = ["low", "medium", "high"];
405
422
  var reasoningEffortsSchema = z5.enum(reasoningEfforts);
406
423
  var reasoningEffortWithMinimalSchema = z5.union([reasoningEffortsSchema, z5.literal("minimal")]);
407
- var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high"];
424
+ var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high", "xhigh"];
408
425
  var reasoningEffortExtendedSchema = z5.enum(reasoningEffortsExtended);
409
- var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high"];
426
+ var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high", "xhigh"];
410
427
  var reasoningEffortSettingSchema = z5.enum(reasoningEffortSettingValues);
411
428
  var verbosityLevels = ["low", "medium", "high"];
412
429
  var verbosityLevelsSchema = z5.enum(verbosityLevels);
@@ -434,7 +451,7 @@ var modelInfoSchema = z5.object({
434
451
  supportsTemperature: z5.boolean().optional(),
435
452
  defaultTemperature: z5.number().optional(),
436
453
  requiredReasoningBudget: z5.boolean().optional(),
437
- supportsReasoningEffort: z5.union([z5.boolean(), z5.array(z5.enum(["disable", "none", "minimal", "low", "medium", "high"]))]).optional(),
454
+ supportsReasoningEffort: z5.union([z5.boolean(), z5.array(z5.enum(["disable", "none", "minimal", "low", "medium", "high", "xhigh"]))]).optional(),
438
455
  requiredReasoningEffort: z5.boolean().optional(),
439
456
  preserveReasoning: z5.boolean().optional(),
440
457
  supportedParameters: z5.array(modelParametersSchema).optional(),
@@ -450,6 +467,8 @@ var modelInfoSchema = z5.object({
450
467
  cachableFields: z5.array(z5.string()).optional(),
451
468
  // Flag to indicate if the model is deprecated and should not be used
452
469
  deprecated: z5.boolean().optional(),
470
+ // Flag to indicate if the model should hide vendor/company identity in responses
471
+ isStealthModel: z5.boolean().optional(),
453
472
  // Flag to indicate if the model is free (no cost)
454
473
  isFree: z5.boolean().optional(),
455
474
  // Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
@@ -516,7 +535,9 @@ var codebaseIndexConfigSchema = z6.object({
516
535
  codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional(),
517
536
  // Bedrock specific fields
518
537
  codebaseIndexBedrockRegion: z6.string().optional(),
519
- codebaseIndexBedrockProfile: z6.string().optional()
538
+ codebaseIndexBedrockProfile: z6.string().optional(),
539
+ // OpenRouter specific fields
540
+ codebaseIndexOpenRouterSpecificProvider: z6.string().optional()
520
541
  });
521
542
  var codebaseIndexModelsSchema = z6.object({
522
543
  openai: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
@@ -761,7 +782,7 @@ var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
761
782
  // src/providers/baseten.ts
762
783
  var basetenModels = {
763
784
  "moonshotai/Kimi-K2-Thinking": {
764
- maxTokens: 163800,
785
+ maxTokens: 16384,
765
786
  contextWindow: 262e3,
766
787
  supportsImages: false,
767
788
  supportsPromptCache: false,
@@ -773,7 +794,7 @@ var basetenModels = {
773
794
  description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
774
795
  },
775
796
  "zai-org/GLM-4.6": {
776
- maxTokens: 2e5,
797
+ maxTokens: 16384,
777
798
  contextWindow: 2e5,
778
799
  supportsImages: false,
779
800
  supportsPromptCache: false,
@@ -785,7 +806,7 @@ var basetenModels = {
785
806
  description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
786
807
  },
787
808
  "deepseek-ai/DeepSeek-R1": {
788
- maxTokens: 131072,
809
+ maxTokens: 16384,
789
810
  contextWindow: 163840,
790
811
  supportsImages: false,
791
812
  supportsPromptCache: false,
@@ -796,7 +817,7 @@ var basetenModels = {
796
817
  description: "DeepSeek's first-generation reasoning model"
797
818
  },
798
819
  "deepseek-ai/DeepSeek-R1-0528": {
799
- maxTokens: 131072,
820
+ maxTokens: 16384,
800
821
  contextWindow: 163840,
801
822
  supportsImages: false,
802
823
  supportsPromptCache: false,
@@ -807,7 +828,7 @@ var basetenModels = {
807
828
  description: "The latest revision of DeepSeek's first-generation reasoning model"
808
829
  },
809
830
  "deepseek-ai/DeepSeek-V3-0324": {
810
- maxTokens: 131072,
831
+ maxTokens: 16384,
811
832
  contextWindow: 163840,
812
833
  supportsImages: false,
813
834
  supportsPromptCache: false,
@@ -818,7 +839,7 @@ var basetenModels = {
818
839
  description: "Fast general-purpose LLM with enhanced reasoning capabilities"
819
840
  },
820
841
  "deepseek-ai/DeepSeek-V3.1": {
821
- maxTokens: 131072,
842
+ maxTokens: 16384,
822
843
  contextWindow: 163840,
823
844
  supportsImages: false,
824
845
  supportsPromptCache: false,
@@ -828,8 +849,20 @@ var basetenModels = {
828
849
  cacheReadsPrice: 0,
829
850
  description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
830
851
  },
852
+ "deepseek-ai/DeepSeek-V3.2": {
853
+ maxTokens: 16384,
854
+ contextWindow: 163840,
855
+ supportsImages: false,
856
+ supportsPromptCache: false,
857
+ supportsNativeTools: true,
858
+ inputPrice: 0.3,
859
+ outputPrice: 0.45,
860
+ cacheWritesPrice: 0,
861
+ cacheReadsPrice: 0,
862
+ description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
863
+ },
831
864
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
832
- maxTokens: 262144,
865
+ maxTokens: 16384,
833
866
  contextWindow: 262144,
834
867
  supportsImages: false,
835
868
  supportsPromptCache: false,
@@ -840,7 +873,7 @@ var basetenModels = {
840
873
  description: "Mixture-of-experts LLM with math and reasoning capabilities"
841
874
  },
842
875
  "Qwen/Qwen3-Coder-480B-A35B-Instruct": {
843
- maxTokens: 262144,
876
+ maxTokens: 16384,
844
877
  contextWindow: 262144,
845
878
  supportsImages: false,
846
879
  supportsPromptCache: false,
@@ -851,7 +884,7 @@ var basetenModels = {
851
884
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
852
885
  },
853
886
  "openai/gpt-oss-120b": {
854
- maxTokens: 128072,
887
+ maxTokens: 16384,
855
888
  contextWindow: 128072,
856
889
  supportsImages: false,
857
890
  supportsPromptCache: false,
@@ -863,7 +896,7 @@ var basetenModels = {
863
896
  description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
864
897
  },
865
898
  "moonshotai/Kimi-K2-Instruct-0905": {
866
- maxTokens: 168e3,
899
+ maxTokens: 16384,
867
900
  contextWindow: 262e3,
868
901
  supportsImages: false,
869
902
  supportsPromptCache: false,
@@ -1316,6 +1349,52 @@ var bedrockModels = {
1316
1349
  supportsPromptCache: false,
1317
1350
  inputPrice: 0.02,
1318
1351
  description: "Amazon Titan Text Embeddings V2"
1352
+ },
1353
+ "moonshot.kimi-k2-thinking": {
1354
+ maxTokens: 32e3,
1355
+ contextWindow: 262144,
1356
+ supportsImages: false,
1357
+ supportsPromptCache: false,
1358
+ supportsNativeTools: true,
1359
+ defaultToolProtocol: "native",
1360
+ preserveReasoning: true,
1361
+ inputPrice: 0.6,
1362
+ outputPrice: 2.5,
1363
+ description: "Kimi K2 Thinking (1T parameter MoE model with 32B active parameters)"
1364
+ },
1365
+ "minimax.minimax-m2": {
1366
+ maxTokens: 16384,
1367
+ contextWindow: 196608,
1368
+ supportsImages: false,
1369
+ supportsPromptCache: false,
1370
+ supportsNativeTools: true,
1371
+ defaultToolProtocol: "native",
1372
+ preserveReasoning: true,
1373
+ inputPrice: 0.3,
1374
+ outputPrice: 1.2,
1375
+ description: "MiniMax M2 (230B parameter MoE model with 10B active parameters)"
1376
+ },
1377
+ "qwen.qwen3-next-80b-a3b": {
1378
+ maxTokens: 8192,
1379
+ contextWindow: 262144,
1380
+ supportsImages: false,
1381
+ supportsPromptCache: false,
1382
+ supportsNativeTools: true,
1383
+ defaultToolProtocol: "native",
1384
+ inputPrice: 0.15,
1385
+ outputPrice: 1.2,
1386
+ description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
1387
+ },
1388
+ "qwen.qwen3-coder-480b-a35b-v1:0": {
1389
+ maxTokens: 8192,
1390
+ contextWindow: 262144,
1391
+ supportsImages: false,
1392
+ supportsPromptCache: false,
1393
+ supportsNativeTools: true,
1394
+ defaultToolProtocol: "native",
1395
+ inputPrice: 0.45,
1396
+ outputPrice: 1.8,
1397
+ description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
1319
1398
  }
1320
1399
  };
1321
1400
  var BEDROCK_DEFAULT_TEMPERATURE = 0.3;
@@ -1381,8 +1460,8 @@ var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
1381
1460
  var cerebrasDefaultModelId = "gpt-oss-120b";
1382
1461
  var cerebrasModels = {
1383
1462
  "zai-glm-4.6": {
1384
- maxTokens: 16384,
1385
- // consistent with their other models
1463
+ maxTokens: 8192,
1464
+ // Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
1386
1465
  contextWindow: 131072,
1387
1466
  supportsImages: false,
1388
1467
  supportsPromptCache: false,
@@ -1392,7 +1471,8 @@ var cerebrasModels = {
1392
1471
  description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
1393
1472
  },
1394
1473
  "qwen-3-235b-a22b-instruct-2507": {
1395
- maxTokens: 64e3,
1474
+ maxTokens: 8192,
1475
+ // Conservative default to avoid premature rate limiting
1396
1476
  contextWindow: 64e3,
1397
1477
  supportsImages: false,
1398
1478
  supportsPromptCache: false,
@@ -1402,7 +1482,8 @@ var cerebrasModels = {
1402
1482
  description: "Intelligent model with ~1400 tokens/s"
1403
1483
  },
1404
1484
  "llama-3.3-70b": {
1405
- maxTokens: 64e3,
1485
+ maxTokens: 8192,
1486
+ // Conservative default to avoid premature rate limiting
1406
1487
  contextWindow: 64e3,
1407
1488
  supportsImages: false,
1408
1489
  supportsPromptCache: false,
@@ -1412,7 +1493,8 @@ var cerebrasModels = {
1412
1493
  description: "Powerful model with ~2600 tokens/s"
1413
1494
  },
1414
1495
  "qwen-3-32b": {
1415
- maxTokens: 64e3,
1496
+ maxTokens: 8192,
1497
+ // Conservative default to avoid premature rate limiting
1416
1498
  contextWindow: 64e3,
1417
1499
  supportsImages: false,
1418
1500
  supportsPromptCache: false,
@@ -1422,7 +1504,8 @@ var cerebrasModels = {
1422
1504
  description: "SOTA coding performance with ~2500 tokens/s"
1423
1505
  },
1424
1506
  "gpt-oss-120b": {
1425
- maxTokens: 8e3,
1507
+ maxTokens: 8192,
1508
+ // Conservative default to avoid premature rate limiting
1426
1509
  contextWindow: 64e3,
1427
1510
  supportsImages: false,
1428
1511
  supportsPromptCache: false,
@@ -1947,35 +2030,37 @@ var deepSeekModels = {
1947
2030
  supportsImages: false,
1948
2031
  supportsPromptCache: true,
1949
2032
  supportsNativeTools: true,
1950
- inputPrice: 0.56,
1951
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1952
- outputPrice: 1.68,
1953
- // $1.68 per million tokens - Updated Sept 5, 2025
1954
- cacheWritesPrice: 0.56,
1955
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1956
- cacheReadsPrice: 0.07,
1957
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1958
- description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
2033
+ defaultToolProtocol: "native",
2034
+ inputPrice: 0.28,
2035
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2036
+ outputPrice: 0.42,
2037
+ // $0.42 per million tokens - Updated Dec 9, 2025
2038
+ cacheWritesPrice: 0.28,
2039
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2040
+ cacheReadsPrice: 0.028,
2041
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2042
+ description: `DeepSeek-V3.2 (Non-thinking Mode) achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally. Supports JSON output, tool calls, chat prefix completion (beta), and FIM completion (beta).`
1959
2043
  },
1960
2044
  "deepseek-reasoner": {
1961
- maxTokens: 65536,
1962
- // 64K max output for reasoning mode
2045
+ maxTokens: 8192,
2046
+ // 8K max output
1963
2047
  contextWindow: 128e3,
1964
2048
  supportsImages: false,
1965
2049
  supportsPromptCache: true,
1966
2050
  supportsNativeTools: true,
1967
- inputPrice: 0.56,
1968
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1969
- outputPrice: 1.68,
1970
- // $1.68 per million tokens - Updated Sept 5, 2025
1971
- cacheWritesPrice: 0.56,
1972
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1973
- cacheReadsPrice: 0.07,
1974
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1975
- description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
2051
+ defaultToolProtocol: "native",
2052
+ inputPrice: 0.28,
2053
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2054
+ outputPrice: 0.42,
2055
+ // $0.42 per million tokens - Updated Dec 9, 2025
2056
+ cacheWritesPrice: 0.28,
2057
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2058
+ cacheReadsPrice: 0.028,
2059
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2060
+ description: `DeepSeek-V3.2 (Thinking Mode) achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 8K output tokens. Supports JSON output, tool calls, and chat prefix completion (beta).`
1976
2061
  }
1977
2062
  };
1978
- var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
2063
+ var DEEP_SEEK_DEFAULT_TEMPERATURE = 0;
1979
2064
 
1980
2065
  // src/providers/doubao.ts
1981
2066
  var doubaoDefaultModelId = "doubao-seed-1-6-250615";
@@ -2430,21 +2515,6 @@ var geminiModels = {
2430
2515
  }
2431
2516
  };
2432
2517
 
2433
- // src/providers/glama.ts
2434
- var glamaDefaultModelId = "anthropic/claude-3-7-sonnet";
2435
- var glamaDefaultModelInfo = {
2436
- maxTokens: 8192,
2437
- contextWindow: 2e5,
2438
- supportsImages: true,
2439
- supportsPromptCache: true,
2440
- inputPrice: 3,
2441
- outputPrice: 15,
2442
- cacheWritesPrice: 3.75,
2443
- cacheReadsPrice: 0.3,
2444
- description: "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)"
2445
- };
2446
- var GLAMA_DEFAULT_TEMPERATURE = 0;
2447
-
2448
2518
  // src/providers/groq.ts
2449
2519
  var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
2450
2520
  var groqModels = {
@@ -2625,6 +2695,7 @@ var litellmDefaultModelInfo = {
2625
2695
  contextWindow: 2e5,
2626
2696
  supportsImages: true,
2627
2697
  supportsPromptCache: true,
2698
+ supportsNativeTools: true,
2628
2699
  inputPrice: 3,
2629
2700
  outputPrice: 15,
2630
2701
  cacheWritesPrice: 3.75,
@@ -2823,10 +2894,30 @@ var ollamaDefaultModelInfo = {
2823
2894
  // src/providers/openai.ts
2824
2895
  var openAiNativeDefaultModelId = "gpt-5.1";
2825
2896
  var openAiNativeModels = {
2897
+ "gpt-5.1-codex-max": {
2898
+ maxTokens: 128e3,
2899
+ contextWindow: 4e5,
2900
+ supportsNativeTools: true,
2901
+ includedTools: ["apply_patch"],
2902
+ excludedTools: ["apply_diff", "write_to_file"],
2903
+ supportsImages: true,
2904
+ supportsPromptCache: true,
2905
+ promptCacheRetention: "24h",
2906
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
2907
+ reasoningEffort: "medium",
2908
+ inputPrice: 1.25,
2909
+ outputPrice: 10,
2910
+ cacheReadsPrice: 0.125,
2911
+ supportsTemperature: false,
2912
+ tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }],
2913
+ description: "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
2914
+ },
2826
2915
  "gpt-5.1": {
2827
2916
  maxTokens: 128e3,
2828
2917
  contextWindow: 4e5,
2829
2918
  supportsNativeTools: true,
2919
+ includedTools: ["apply_patch"],
2920
+ excludedTools: ["apply_diff", "write_to_file"],
2830
2921
  supportsImages: true,
2831
2922
  supportsPromptCache: true,
2832
2923
  promptCacheRetention: "24h",
@@ -2847,6 +2938,8 @@ var openAiNativeModels = {
2847
2938
  maxTokens: 128e3,
2848
2939
  contextWindow: 4e5,
2849
2940
  supportsNativeTools: true,
2941
+ includedTools: ["apply_patch"],
2942
+ excludedTools: ["apply_diff", "write_to_file"],
2850
2943
  supportsImages: true,
2851
2944
  supportsPromptCache: true,
2852
2945
  promptCacheRetention: "24h",
@@ -2863,6 +2956,8 @@ var openAiNativeModels = {
2863
2956
  maxTokens: 128e3,
2864
2957
  contextWindow: 4e5,
2865
2958
  supportsNativeTools: true,
2959
+ includedTools: ["apply_patch"],
2960
+ excludedTools: ["apply_diff", "write_to_file"],
2866
2961
  supportsImages: true,
2867
2962
  supportsPromptCache: true,
2868
2963
  promptCacheRetention: "24h",
@@ -2878,6 +2973,8 @@ var openAiNativeModels = {
2878
2973
  maxTokens: 128e3,
2879
2974
  contextWindow: 4e5,
2880
2975
  supportsNativeTools: true,
2976
+ includedTools: ["apply_patch"],
2977
+ excludedTools: ["apply_diff", "write_to_file"],
2881
2978
  supportsImages: true,
2882
2979
  supportsPromptCache: true,
2883
2980
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2897,6 +2994,8 @@ var openAiNativeModels = {
2897
2994
  maxTokens: 128e3,
2898
2995
  contextWindow: 4e5,
2899
2996
  supportsNativeTools: true,
2997
+ includedTools: ["apply_patch"],
2998
+ excludedTools: ["apply_diff", "write_to_file"],
2900
2999
  supportsImages: true,
2901
3000
  supportsPromptCache: true,
2902
3001
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2916,6 +3015,8 @@ var openAiNativeModels = {
2916
3015
  maxTokens: 128e3,
2917
3016
  contextWindow: 4e5,
2918
3017
  supportsNativeTools: true,
3018
+ includedTools: ["apply_patch"],
3019
+ excludedTools: ["apply_diff", "write_to_file"],
2919
3020
  supportsImages: true,
2920
3021
  supportsPromptCache: true,
2921
3022
  supportsReasoningEffort: ["low", "medium", "high"],
@@ -2931,6 +3032,8 @@ var openAiNativeModels = {
2931
3032
  maxTokens: 128e3,
2932
3033
  contextWindow: 4e5,
2933
3034
  supportsNativeTools: true,
3035
+ includedTools: ["apply_patch"],
3036
+ excludedTools: ["apply_diff", "write_to_file"],
2934
3037
  supportsImages: true,
2935
3038
  supportsPromptCache: true,
2936
3039
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2947,6 +3050,8 @@ var openAiNativeModels = {
2947
3050
  maxTokens: 128e3,
2948
3051
  contextWindow: 4e5,
2949
3052
  supportsNativeTools: true,
3053
+ includedTools: ["apply_patch"],
3054
+ excludedTools: ["apply_diff", "write_to_file"],
2950
3055
  supportsImages: true,
2951
3056
  supportsPromptCache: true,
2952
3057
  inputPrice: 1.25,
@@ -2958,6 +3063,8 @@ var openAiNativeModels = {
2958
3063
  maxTokens: 32768,
2959
3064
  contextWindow: 1047576,
2960
3065
  supportsNativeTools: true,
3066
+ includedTools: ["apply_patch"],
3067
+ excludedTools: ["apply_diff", "write_to_file"],
2961
3068
  supportsImages: true,
2962
3069
  supportsPromptCache: true,
2963
3070
  inputPrice: 2,
@@ -2972,6 +3079,8 @@ var openAiNativeModels = {
2972
3079
  maxTokens: 32768,
2973
3080
  contextWindow: 1047576,
2974
3081
  supportsNativeTools: true,
3082
+ includedTools: ["apply_patch"],
3083
+ excludedTools: ["apply_diff", "write_to_file"],
2975
3084
  supportsImages: true,
2976
3085
  supportsPromptCache: true,
2977
3086
  inputPrice: 0.4,
@@ -2986,6 +3095,8 @@ var openAiNativeModels = {
2986
3095
  maxTokens: 32768,
2987
3096
  contextWindow: 1047576,
2988
3097
  supportsNativeTools: true,
3098
+ includedTools: ["apply_patch"],
3099
+ excludedTools: ["apply_diff", "write_to_file"],
2989
3100
  supportsImages: true,
2990
3101
  supportsPromptCache: true,
2991
3102
  inputPrice: 0.1,
@@ -3193,6 +3304,8 @@ var openAiNativeModels = {
3193
3304
  maxTokens: 128e3,
3194
3305
  contextWindow: 4e5,
3195
3306
  supportsNativeTools: true,
3307
+ includedTools: ["apply_patch"],
3308
+ excludedTools: ["apply_diff", "write_to_file"],
3196
3309
  supportsImages: true,
3197
3310
  supportsPromptCache: true,
3198
3311
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3212,6 +3325,8 @@ var openAiNativeModels = {
3212
3325
  maxTokens: 128e3,
3213
3326
  contextWindow: 4e5,
3214
3327
  supportsNativeTools: true,
3328
+ includedTools: ["apply_patch"],
3329
+ excludedTools: ["apply_diff", "write_to_file"],
3215
3330
  supportsImages: true,
3216
3331
  supportsPromptCache: true,
3217
3332
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3231,6 +3346,8 @@ var openAiNativeModels = {
3231
3346
  maxTokens: 128e3,
3232
3347
  contextWindow: 4e5,
3233
3348
  supportsNativeTools: true,
3349
+ includedTools: ["apply_patch"],
3350
+ excludedTools: ["apply_diff", "write_to_file"],
3234
3351
  supportsImages: true,
3235
3352
  supportsPromptCache: true,
3236
3353
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3393,7 +3510,18 @@ var RooModelSchema = z7.object({
3393
3510
  type: z7.literal("language"),
3394
3511
  tags: z7.array(z7.string()).optional(),
3395
3512
  pricing: RooPricingSchema,
3396
- deprecated: z7.boolean().optional()
3513
+ deprecated: z7.boolean().optional(),
3514
+ default_temperature: z7.number().optional(),
3515
+ // Dynamic settings that map directly to ModelInfo properties
3516
+ // Allows the API to configure model-specific defaults like includedTools, excludedTools, reasoningEffort, etc.
3517
+ // These are always direct values (e.g., includedTools: ['search_replace']) for backward compatibility with old clients.
3518
+ settings: z7.record(z7.string(), z7.unknown()).optional(),
3519
+ // Versioned settings keyed by version number (e.g., '3.36.4').
3520
+ // Each version key maps to a settings object that is used when plugin version >= that version.
3521
+ // New clients find the highest version key <= current version and use those settings.
3522
+ // Old clients ignore this field and use plain values from `settings`.
3523
+ // Example: { '3.36.4': { includedTools: ['search_replace'] }, '3.35.0': { ... } }
3524
+ versionedSettings: z7.record(z7.string(), z7.record(z7.string(), z7.unknown())).optional()
3397
3525
  });
3398
3526
  var RooModelsResponseSchema = z7.object({
3399
3527
  object: z7.literal("list"),
@@ -4159,15 +4287,17 @@ var xaiDefaultModelId = "grok-code-fast-1";
4159
4287
  var xaiModels = {
4160
4288
  "grok-code-fast-1": {
4161
4289
  maxTokens: 16384,
4162
- contextWindow: 262144,
4163
- supportsImages: false,
4290
+ contextWindow: 256e3,
4291
+ supportsImages: true,
4164
4292
  supportsPromptCache: true,
4165
4293
  supportsNativeTools: true,
4166
4294
  inputPrice: 0.2,
4167
4295
  outputPrice: 1.5,
4168
4296
  cacheWritesPrice: 0.02,
4169
4297
  cacheReadsPrice: 0.02,
4170
- description: "xAI's Grok Code Fast model with 256K context window"
4298
+ description: "xAI's Grok Code Fast model with 256K context window",
4299
+ includedTools: ["search_replace"],
4300
+ excludedTools: ["apply_diff"]
4171
4301
  },
4172
4302
  "grok-4-1-fast-reasoning": {
4173
4303
  maxTokens: 65536,
@@ -4179,7 +4309,9 @@ var xaiModels = {
4179
4309
  outputPrice: 0.5,
4180
4310
  cacheWritesPrice: 0.05,
4181
4311
  cacheReadsPrice: 0.05,
4182
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4312
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4313
+ includedTools: ["search_replace"],
4314
+ excludedTools: ["apply_diff"]
4183
4315
  },
4184
4316
  "grok-4-1-fast-non-reasoning": {
4185
4317
  maxTokens: 65536,
@@ -4191,7 +4323,9 @@ var xaiModels = {
4191
4323
  outputPrice: 0.5,
4192
4324
  cacheWritesPrice: 0.05,
4193
4325
  cacheReadsPrice: 0.05,
4194
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4326
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4327
+ includedTools: ["search_replace"],
4328
+ excludedTools: ["apply_diff"]
4195
4329
  },
4196
4330
  "grok-4-fast-reasoning": {
4197
4331
  maxTokens: 65536,
@@ -4203,7 +4337,9 @@ var xaiModels = {
4203
4337
  outputPrice: 0.5,
4204
4338
  cacheWritesPrice: 0.05,
4205
4339
  cacheReadsPrice: 0.05,
4206
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4340
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4341
+ includedTools: ["search_replace"],
4342
+ excludedTools: ["apply_diff"]
4207
4343
  },
4208
4344
  "grok-4-fast-non-reasoning": {
4209
4345
  maxTokens: 65536,
@@ -4215,9 +4351,11 @@ var xaiModels = {
4215
4351
  outputPrice: 0.5,
4216
4352
  cacheWritesPrice: 0.05,
4217
4353
  cacheReadsPrice: 0.05,
4218
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4354
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4355
+ includedTools: ["search_replace"],
4356
+ excludedTools: ["apply_diff"]
4219
4357
  },
4220
- "grok-4": {
4358
+ "grok-4-0709": {
4221
4359
  maxTokens: 8192,
4222
4360
  contextWindow: 256e3,
4223
4361
  supportsImages: true,
@@ -4227,36 +4365,14 @@ var xaiModels = {
4227
4365
  outputPrice: 15,
4228
4366
  cacheWritesPrice: 0.75,
4229
4367
  cacheReadsPrice: 0.75,
4230
- description: "xAI's Grok-4 model with 256K context window"
4231
- },
4232
- "grok-3": {
4233
- maxTokens: 8192,
4234
- contextWindow: 131072,
4235
- supportsImages: false,
4236
- supportsPromptCache: true,
4237
- supportsNativeTools: true,
4238
- inputPrice: 3,
4239
- outputPrice: 15,
4240
- cacheWritesPrice: 0.75,
4241
- cacheReadsPrice: 0.75,
4242
- description: "xAI's Grok-3 model with 128K context window"
4243
- },
4244
- "grok-3-fast": {
4245
- maxTokens: 8192,
4246
- contextWindow: 131072,
4247
- supportsImages: false,
4248
- supportsPromptCache: true,
4249
- supportsNativeTools: true,
4250
- inputPrice: 5,
4251
- outputPrice: 25,
4252
- cacheWritesPrice: 1.25,
4253
- cacheReadsPrice: 1.25,
4254
- description: "xAI's Grok-3 fast model with 128K context window"
4368
+ description: "xAI's Grok-4 model with 256K context window",
4369
+ includedTools: ["search_replace"],
4370
+ excludedTools: ["apply_diff"]
4255
4371
  },
4256
4372
  "grok-3-mini": {
4257
4373
  maxTokens: 8192,
4258
4374
  contextWindow: 131072,
4259
- supportsImages: false,
4375
+ supportsImages: true,
4260
4376
  supportsPromptCache: true,
4261
4377
  supportsNativeTools: true,
4262
4378
  inputPrice: 0.3,
@@ -4264,40 +4380,24 @@ var xaiModels = {
4264
4380
  cacheWritesPrice: 0.07,
4265
4381
  cacheReadsPrice: 0.07,
4266
4382
  description: "xAI's Grok-3 mini model with 128K context window",
4267
- supportsReasoningEffort: true
4268
- },
4269
- "grok-3-mini-fast": {
4270
- maxTokens: 8192,
4271
- contextWindow: 131072,
4272
- supportsImages: false,
4273
- supportsPromptCache: true,
4274
- supportsNativeTools: true,
4275
- inputPrice: 0.6,
4276
- outputPrice: 4,
4277
- cacheWritesPrice: 0.15,
4278
- cacheReadsPrice: 0.15,
4279
- description: "xAI's Grok-3 mini fast model with 128K context window",
4280
- supportsReasoningEffort: true
4383
+ supportsReasoningEffort: ["low", "high"],
4384
+ reasoningEffort: "low",
4385
+ includedTools: ["search_replace"],
4386
+ excludedTools: ["apply_diff"]
4281
4387
  },
4282
- "grok-2-1212": {
4388
+ "grok-3": {
4283
4389
  maxTokens: 8192,
4284
4390
  contextWindow: 131072,
4285
- supportsImages: false,
4286
- supportsPromptCache: false,
4287
- supportsNativeTools: true,
4288
- inputPrice: 2,
4289
- outputPrice: 10,
4290
- description: "xAI's Grok-2 model (version 1212) with 128K context window"
4291
- },
4292
- "grok-2-vision-1212": {
4293
- maxTokens: 8192,
4294
- contextWindow: 32768,
4295
4391
  supportsImages: true,
4296
- supportsPromptCache: false,
4392
+ supportsPromptCache: true,
4297
4393
  supportsNativeTools: true,
4298
- inputPrice: 2,
4299
- outputPrice: 10,
4300
- description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
4394
+ inputPrice: 3,
4395
+ outputPrice: 15,
4396
+ cacheWritesPrice: 0.75,
4397
+ cacheReadsPrice: 0.75,
4398
+ description: "xAI's Grok-3 model with 128K context window",
4399
+ includedTools: ["search_replace"],
4400
+ excludedTools: ["apply_diff"]
4301
4401
  }
4302
4402
  };
4303
4403
 
@@ -4404,7 +4504,6 @@ var internationalZAiModels = {
4404
4504
  supportsImages: false,
4405
4505
  supportsPromptCache: true,
4406
4506
  supportsNativeTools: true,
4407
- supportsReasoningBinary: true,
4408
4507
  inputPrice: 0.6,
4409
4508
  outputPrice: 2.2,
4410
4509
  cacheWritesPrice: 0,
@@ -4477,7 +4576,6 @@ var internationalZAiModels = {
4477
4576
  supportsImages: false,
4478
4577
  supportsPromptCache: true,
4479
4578
  supportsNativeTools: true,
4480
- supportsReasoningBinary: true,
4481
4579
  inputPrice: 0.6,
4482
4580
  outputPrice: 2.2,
4483
4581
  cacheWritesPrice: 0,
@@ -4505,7 +4603,6 @@ var mainlandZAiModels = {
4505
4603
  supportsImages: false,
4506
4604
  supportsPromptCache: true,
4507
4605
  supportsNativeTools: true,
4508
- supportsReasoningBinary: true,
4509
4606
  inputPrice: 0.29,
4510
4607
  outputPrice: 1.14,
4511
4608
  cacheWritesPrice: 0,
@@ -4578,7 +4675,6 @@ var mainlandZAiModels = {
4578
4675
  supportsImages: false,
4579
4676
  supportsPromptCache: true,
4580
4677
  supportsNativeTools: true,
4581
- supportsReasoningBinary: true,
4582
4678
  inputPrice: 0.29,
4583
4679
  outputPrice: 1.14,
4584
4680
  cacheWritesPrice: 0,
@@ -4654,8 +4750,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
4654
4750
  return openRouterDefaultModelId;
4655
4751
  case "requesty":
4656
4752
  return requestyDefaultModelId;
4657
- case "glama":
4658
- return glamaDefaultModelId;
4659
4753
  case "unbound":
4660
4754
  return unboundDefaultModelId;
4661
4755
  case "litellm":
@@ -4742,7 +4836,6 @@ var dynamicProviders = [
4742
4836
  "io-intelligence",
4743
4837
  "requesty",
4744
4838
  "unbound",
4745
- "glama",
4746
4839
  "roo",
4747
4840
  "chutes"
4748
4841
  ];
@@ -4824,10 +4917,6 @@ var claudeCodeSchema = apiModelIdProviderModelSchema.extend({
4824
4917
  claudeCodePath: z8.string().optional(),
4825
4918
  claudeCodeMaxOutputTokens: z8.number().int().min(1).max(2e5).optional()
4826
4919
  });
4827
- var glamaSchema = baseProviderSettingsSchema.extend({
4828
- glamaModelId: z8.string().optional(),
4829
- glamaApiKey: z8.string().optional()
4830
- });
4831
4920
  var openRouterSchema = baseProviderSettingsSchema.extend({
4832
4921
  openRouterApiKey: z8.string().optional(),
4833
4922
  openRouterModelId: z8.string().optional(),
@@ -5012,7 +5101,6 @@ var defaultSchema = z8.object({
5012
5101
  var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
5013
5102
  anthropicSchema.merge(z8.object({ apiProvider: z8.literal("anthropic") })),
5014
5103
  claudeCodeSchema.merge(z8.object({ apiProvider: z8.literal("claude-code") })),
5015
- glamaSchema.merge(z8.object({ apiProvider: z8.literal("glama") })),
5016
5104
  openRouterSchema.merge(z8.object({ apiProvider: z8.literal("openrouter") })),
5017
5105
  bedrockSchema.merge(z8.object({ apiProvider: z8.literal("bedrock") })),
5018
5106
  vertexSchema.merge(z8.object({ apiProvider: z8.literal("vertex") })),
@@ -5054,7 +5142,6 @@ var providerSettingsSchema = z8.object({
5054
5142
  apiProvider: providerNamesSchema.optional(),
5055
5143
  ...anthropicSchema.shape,
5056
5144
  ...claudeCodeSchema.shape,
5057
- ...glamaSchema.shape,
5058
5145
  ...openRouterSchema.shape,
5059
5146
  ...bedrockSchema.shape,
5060
5147
  ...vertexSchema.shape,
@@ -5099,7 +5186,6 @@ var discriminatedProviderSettingsWithIdSchema = providerSettingsSchemaDiscrimina
5099
5186
  var PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options;
5100
5187
  var modelIdKeys = [
5101
5188
  "apiModelId",
5102
- "glamaModelId",
5103
5189
  "openRouterModelId",
5104
5190
  "openAiModelId",
5105
5191
  "ollamaModelId",
@@ -5121,7 +5207,6 @@ var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key)
5121
5207
  var modelIdKeysByProvider = {
5122
5208
  anthropic: "apiModelId",
5123
5209
  "claude-code": "apiModelId",
5124
- glama: "glamaModelId",
5125
5210
  openrouter: "openRouterModelId",
5126
5211
  bedrock: "apiModelId",
5127
5212
  vertex: "apiModelId",
@@ -5252,7 +5337,6 @@ var MODELS_BY_PROVIDER = {
5252
5337
  zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
5253
5338
  baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
5254
5339
  // Dynamic providers; models pulled from remote APIs.
5255
- glama: { id: "glama", label: "Glama", models: [] },
5256
5340
  huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
5257
5341
  litellm: { id: "litellm", label: "LiteLLM", models: [] },
5258
5342
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
@@ -5484,6 +5568,67 @@ var rooCodeTelemetryEventSchema = z11.discriminatedUnion("type", [
5484
5568
  })
5485
5569
  })
5486
5570
  ]);
5571
+ var EXPECTED_API_ERROR_CODES = /* @__PURE__ */ new Set([
5572
+ 402,
5573
+ // Payment required - billing issues
5574
+ 429
5575
+ // Rate limit - expected when hitting API limits
5576
+ ]);
5577
+ var EXPECTED_ERROR_MESSAGE_PATTERNS = [
5578
+ /^429\b/,
5579
+ // Message starts with "429"
5580
+ /rate limit/i
5581
+ // Contains "rate limit" (case insensitive)
5582
+ ];
5583
+ function isOpenAISdkError(error) {
5584
+ return typeof error === "object" && error !== null && "status" in error && typeof error.status === "number";
5585
+ }
5586
+ function getErrorStatusCode(error) {
5587
+ if (isOpenAISdkError(error)) {
5588
+ return error.status;
5589
+ }
5590
+ return void 0;
5591
+ }
5592
+ function getErrorMessage(error) {
5593
+ if (isOpenAISdkError(error)) {
5594
+ return error.error?.metadata?.raw || error.error?.message || error.message;
5595
+ }
5596
+ return void 0;
5597
+ }
5598
+ function shouldReportApiErrorToTelemetry(errorCode, errorMessage) {
5599
+ if (errorCode !== void 0 && EXPECTED_API_ERROR_CODES.has(errorCode)) {
5600
+ return false;
5601
+ }
5602
+ if (errorMessage) {
5603
+ for (const pattern of EXPECTED_ERROR_MESSAGE_PATTERNS) {
5604
+ if (pattern.test(errorMessage)) {
5605
+ return false;
5606
+ }
5607
+ }
5608
+ }
5609
+ return true;
5610
+ }
5611
+ var ApiProviderError = class extends Error {
5612
+ constructor(message, provider, modelId, operation, errorCode) {
5613
+ super(message);
5614
+ this.provider = provider;
5615
+ this.modelId = modelId;
5616
+ this.operation = operation;
5617
+ this.errorCode = errorCode;
5618
+ this.name = "ApiProviderError";
5619
+ }
5620
+ };
5621
+ function isApiProviderError(error) {
5622
+ return error instanceof Error && error.name === "ApiProviderError" && "provider" in error && "modelId" in error && "operation" in error;
5623
+ }
5624
+ function extractApiProviderErrorProperties(error) {
5625
+ return {
5626
+ provider: error.provider,
5627
+ modelId: error.modelId,
5628
+ operation: error.operation,
5629
+ ...error.errorCode !== void 0 && { errorCode: error.errorCode }
5630
+ };
5631
+ }
5487
5632
 
5488
5633
  // src/mode.ts
5489
5634
  import { z as z12 } from "zod";
@@ -5562,7 +5707,7 @@ var DEFAULT_MODES = [
5562
5707
  whenToUse: "Use this mode when you need to plan, design, or strategize before implementation. Perfect for breaking down complex problems, creating technical specifications, designing system architecture, or brainstorming solutions before coding.",
5563
5708
  description: "Plan and design before implementation",
5564
5709
  groups: ["read", ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], "browser", "mcp"],
5565
- customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**"
5710
+ customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**\n\n**CRITICAL: Never provide level of effort time estimates (e.g., hours, days, weeks) for tasks. Focus solely on breaking down the work into clear, actionable steps without estimating how long they will take.**\n\nUnless told otherwise, if you want to save a plan file, put it in the /plans directory"
5566
5711
  },
5567
5712
  {
5568
5713
  slug: "code",
@@ -5780,7 +5925,6 @@ var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
5780
5925
  var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
5781
5926
  var SECRET_STATE_KEYS = [
5782
5927
  "apiKey",
5783
- "glamaApiKey",
5784
5928
  "openRouterApiKey",
5785
5929
  "awsAccessKey",
5786
5930
  "awsApiKey",
@@ -6346,6 +6490,16 @@ var usageStatsSchema = z16.object({
6346
6490
  // Period in days (e.g., 30)
6347
6491
  });
6348
6492
 
6493
+ // src/context-management.ts
6494
+ var CONTEXT_MANAGEMENT_EVENTS = [
6495
+ "condense_context",
6496
+ "condense_context_error",
6497
+ "sliding_window_truncation"
6498
+ ];
6499
+ function isContextManagementEvent(value) {
6500
+ return typeof value === "string" && CONTEXT_MANAGEMENT_EVENTS.includes(value);
6501
+ }
6502
+
6349
6503
  // src/cookie-consent.ts
6350
6504
  var CONSENT_COOKIE_NAME = "roo-code-cookie-consent";
6351
6505
  var COOKIE_CONSENT_EVENTS = {
@@ -6537,6 +6691,7 @@ export {
6537
6691
  ANTHROPIC_DEFAULT_MAX_TOKENS,
6538
6692
  ANTHROPIC_STYLE_PROVIDERS,
6539
6693
  AWS_INFERENCE_PROFILE_MAPPING,
6694
+ ApiProviderError,
6540
6695
  BEDROCK_1M_CONTEXT_MODEL_IDS,
6541
6696
  BEDROCK_DEFAULT_CONTEXT,
6542
6697
  BEDROCK_DEFAULT_TEMPERATURE,
@@ -6546,6 +6701,7 @@ export {
6546
6701
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
6547
6702
  CODEBASE_INDEX_DEFAULTS,
6548
6703
  CONSENT_COOKIE_NAME,
6704
+ CONTEXT_MANAGEMENT_EVENTS,
6549
6705
  COOKIE_CONSENT_EVENTS,
6550
6706
  ConnectionState,
6551
6707
  DEEP_SEEK_DEFAULT_TEMPERATURE,
@@ -6558,10 +6714,10 @@ export {
6558
6714
  DOUBAO_API_CHAT_PATH,
6559
6715
  EVALS_SETTINGS,
6560
6716
  EVALS_TIMEOUT,
6717
+ EXPECTED_API_ERROR_CODES,
6561
6718
  ExtensionBridgeCommandName,
6562
6719
  ExtensionBridgeEventName,
6563
6720
  ExtensionSocketEvents,
6564
- GLAMA_DEFAULT_TEMPERATURE,
6565
6721
  GLOBAL_SECRET_KEYS,
6566
6722
  GLOBAL_SETTINGS_KEYS,
6567
6723
  GLOBAL_STATE_KEYS,
@@ -6645,6 +6801,7 @@ export {
6645
6801
  commandExecutionStatusSchema,
6646
6802
  commandIds,
6647
6803
  contextCondenseSchema,
6804
+ contextTruncationSchema,
6648
6805
  convertModelNameForVertex,
6649
6806
  customModePromptsSchema,
6650
6807
  customModesSettingsSchema,
@@ -6666,6 +6823,7 @@ export {
6666
6823
  extensionBridgeCommandSchema,
6667
6824
  extensionBridgeEventSchema,
6668
6825
  extensionInstanceSchema,
6826
+ extractApiProviderErrorProperties,
6669
6827
  fauxProviders,
6670
6828
  featherlessDefaultModelId,
6671
6829
  featherlessModels,
@@ -6677,12 +6835,12 @@ export {
6677
6835
  getApiProtocol,
6678
6836
  getClaudeCodeModelId,
6679
6837
  getEffectiveProtocol,
6838
+ getErrorMessage,
6839
+ getErrorStatusCode,
6680
6840
  getImageGenerationProvider,
6681
6841
  getModelId,
6682
6842
  getProviderDefaultModelId,
6683
6843
  gitPropertiesSchema,
6684
- glamaDefaultModelId,
6685
- glamaDefaultModelInfo,
6686
6844
  globalSettingsSchema,
6687
6845
  groqDefaultModelId,
6688
6846
  groqModels,
@@ -6699,6 +6857,8 @@ export {
6699
6857
  ioIntelligenceDefaultModelId,
6700
6858
  ioIntelligenceModels,
6701
6859
  ipcMessageSchema,
6860
+ isApiProviderError,
6861
+ isContextManagementEvent,
6702
6862
  isCustomProvider,
6703
6863
  isDynamicProvider,
6704
6864
  isFauxProvider,
@@ -6787,6 +6947,7 @@ export {
6787
6947
  serviceTierSchema,
6788
6948
  serviceTiers,
6789
6949
  shareResponseSchema,
6950
+ shouldReportApiErrorToTelemetry,
6790
6951
  shouldUseSingleFileRead,
6791
6952
  staticAppPropertiesSchema,
6792
6953
  suggestionItemSchema,