@roo-code/types 1.89.0 → 1.91.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -75,6 +75,7 @@ var clineSays = [
75
75
  "diff_error",
76
76
  "condense_context",
77
77
  "condense_context_error",
78
+ "sliding_window_truncation",
78
79
  "codebase_search_result",
79
80
  "user_edit_todos"
80
81
  ];
@@ -87,7 +88,14 @@ var contextCondenseSchema = z.object({
87
88
  cost: z.number(),
88
89
  prevContextTokens: z.number(),
89
90
  newContextTokens: z.number(),
90
- summary: z.string()
91
+ summary: z.string(),
92
+ condenseId: z.string().optional()
93
+ });
94
+ var contextTruncationSchema = z.object({
95
+ truncationId: z.string(),
96
+ messagesRemoved: z.number(),
97
+ prevContextTokens: z.number(),
98
+ newContextTokens: z.number()
91
99
  });
92
100
  var clineMessageSchema = z.object({
93
101
  ts: z.number(),
@@ -101,7 +109,16 @@ var clineMessageSchema = z.object({
101
109
  conversationHistoryIndex: z.number().optional(),
102
110
  checkpoint: z.record(z.string(), z.unknown()).optional(),
103
111
  progressStatus: toolProgressStatusSchema.optional(),
112
+ /**
113
+ * Data for successful context condensation.
114
+ * Present when `say: "condense_context"` and `partial: false`.
115
+ */
104
116
  contextCondense: contextCondenseSchema.optional(),
117
+ /**
118
+ * Data for sliding window truncation.
119
+ * Present when `say: "sliding_window_truncation"`.
120
+ */
121
+ contextTruncation: contextTruncationSchema.optional(),
105
122
  isProtected: z.boolean().optional(),
106
123
  apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
107
124
  isAnswered: z.boolean().optional()
@@ -130,12 +147,11 @@ var toolNames = [
130
147
  "read_file",
131
148
  "write_to_file",
132
149
  "apply_diff",
133
- "insert_content",
134
150
  "search_and_replace",
151
+ "search_replace",
135
152
  "apply_patch",
136
153
  "search_files",
137
154
  "list_files",
138
- "list_code_definition_names",
139
155
  "browser_action",
140
156
  "use_mcp_tool",
141
157
  "access_mcp_resource",
@@ -250,7 +266,7 @@ var rooCodeEventsSchema = z3.object({
250
266
  ["taskAskResponded" /* TaskAskResponded */]: z3.tuple([z3.string()]),
251
267
  ["taskUserMessage" /* TaskUserMessage */]: z3.tuple([z3.string()]),
252
268
  ["taskToolFailed" /* TaskToolFailed */]: z3.tuple([z3.string(), toolNamesSchema, z3.string()]),
253
- ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: z3.tuple([z3.string(), tokenUsageSchema]),
269
+ ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: z3.tuple([z3.string(), tokenUsageSchema, toolUsageSchema]),
254
270
  ["modeChanged" /* ModeChanged */]: z3.tuple([z3.string()]),
255
271
  ["providerProfileChanged" /* ProviderProfileChanged */]: z3.tuple([z3.object({ name: z3.string(), provider: z3.string() })])
256
272
  });
@@ -404,9 +420,9 @@ import { z as z5 } from "zod";
404
420
  var reasoningEfforts = ["low", "medium", "high"];
405
421
  var reasoningEffortsSchema = z5.enum(reasoningEfforts);
406
422
  var reasoningEffortWithMinimalSchema = z5.union([reasoningEffortsSchema, z5.literal("minimal")]);
407
- var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high"];
423
+ var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high", "xhigh"];
408
424
  var reasoningEffortExtendedSchema = z5.enum(reasoningEffortsExtended);
409
- var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high"];
425
+ var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high", "xhigh"];
410
426
  var reasoningEffortSettingSchema = z5.enum(reasoningEffortSettingValues);
411
427
  var verbosityLevels = ["low", "medium", "high"];
412
428
  var verbosityLevelsSchema = z5.enum(verbosityLevels);
@@ -434,7 +450,7 @@ var modelInfoSchema = z5.object({
434
450
  supportsTemperature: z5.boolean().optional(),
435
451
  defaultTemperature: z5.number().optional(),
436
452
  requiredReasoningBudget: z5.boolean().optional(),
437
- supportsReasoningEffort: z5.union([z5.boolean(), z5.array(z5.enum(["disable", "none", "minimal", "low", "medium", "high"]))]).optional(),
453
+ supportsReasoningEffort: z5.union([z5.boolean(), z5.array(z5.enum(["disable", "none", "minimal", "low", "medium", "high", "xhigh"]))]).optional(),
438
454
  requiredReasoningEffort: z5.boolean().optional(),
439
455
  preserveReasoning: z5.boolean().optional(),
440
456
  supportedParameters: z5.array(modelParametersSchema).optional(),
@@ -450,6 +466,8 @@ var modelInfoSchema = z5.object({
450
466
  cachableFields: z5.array(z5.string()).optional(),
451
467
  // Flag to indicate if the model is deprecated and should not be used
452
468
  deprecated: z5.boolean().optional(),
469
+ // Flag to indicate if the model should hide vendor/company identity in responses
470
+ isStealthModel: z5.boolean().optional(),
453
471
  // Flag to indicate if the model is free (no cost)
454
472
  isFree: z5.boolean().optional(),
455
473
  // Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
@@ -516,7 +534,9 @@ var codebaseIndexConfigSchema = z6.object({
516
534
  codebaseIndexOpenAiCompatibleModelDimension: z6.number().optional(),
517
535
  // Bedrock specific fields
518
536
  codebaseIndexBedrockRegion: z6.string().optional(),
519
- codebaseIndexBedrockProfile: z6.string().optional()
537
+ codebaseIndexBedrockProfile: z6.string().optional(),
538
+ // OpenRouter specific fields
539
+ codebaseIndexOpenRouterSpecificProvider: z6.string().optional()
520
540
  });
521
541
  var codebaseIndexModelsSchema = z6.object({
522
542
  openai: z6.record(z6.string(), z6.object({ dimension: z6.number() })).optional(),
@@ -761,7 +781,7 @@ var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
761
781
  // src/providers/baseten.ts
762
782
  var basetenModels = {
763
783
  "moonshotai/Kimi-K2-Thinking": {
764
- maxTokens: 163800,
784
+ maxTokens: 16384,
765
785
  contextWindow: 262e3,
766
786
  supportsImages: false,
767
787
  supportsPromptCache: false,
@@ -773,7 +793,7 @@ var basetenModels = {
773
793
  description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
774
794
  },
775
795
  "zai-org/GLM-4.6": {
776
- maxTokens: 2e5,
796
+ maxTokens: 16384,
777
797
  contextWindow: 2e5,
778
798
  supportsImages: false,
779
799
  supportsPromptCache: false,
@@ -785,7 +805,7 @@ var basetenModels = {
785
805
  description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
786
806
  },
787
807
  "deepseek-ai/DeepSeek-R1": {
788
- maxTokens: 131072,
808
+ maxTokens: 16384,
789
809
  contextWindow: 163840,
790
810
  supportsImages: false,
791
811
  supportsPromptCache: false,
@@ -796,7 +816,7 @@ var basetenModels = {
796
816
  description: "DeepSeek's first-generation reasoning model"
797
817
  },
798
818
  "deepseek-ai/DeepSeek-R1-0528": {
799
- maxTokens: 131072,
819
+ maxTokens: 16384,
800
820
  contextWindow: 163840,
801
821
  supportsImages: false,
802
822
  supportsPromptCache: false,
@@ -807,7 +827,7 @@ var basetenModels = {
807
827
  description: "The latest revision of DeepSeek's first-generation reasoning model"
808
828
  },
809
829
  "deepseek-ai/DeepSeek-V3-0324": {
810
- maxTokens: 131072,
830
+ maxTokens: 16384,
811
831
  contextWindow: 163840,
812
832
  supportsImages: false,
813
833
  supportsPromptCache: false,
@@ -818,7 +838,7 @@ var basetenModels = {
818
838
  description: "Fast general-purpose LLM with enhanced reasoning capabilities"
819
839
  },
820
840
  "deepseek-ai/DeepSeek-V3.1": {
821
- maxTokens: 131072,
841
+ maxTokens: 16384,
822
842
  contextWindow: 163840,
823
843
  supportsImages: false,
824
844
  supportsPromptCache: false,
@@ -828,8 +848,20 @@ var basetenModels = {
828
848
  cacheReadsPrice: 0,
829
849
  description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
830
850
  },
851
+ "deepseek-ai/DeepSeek-V3.2": {
852
+ maxTokens: 16384,
853
+ contextWindow: 163840,
854
+ supportsImages: false,
855
+ supportsPromptCache: false,
856
+ supportsNativeTools: true,
857
+ inputPrice: 0.3,
858
+ outputPrice: 0.45,
859
+ cacheWritesPrice: 0,
860
+ cacheReadsPrice: 0,
861
+ description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
862
+ },
831
863
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
832
- maxTokens: 262144,
864
+ maxTokens: 16384,
833
865
  contextWindow: 262144,
834
866
  supportsImages: false,
835
867
  supportsPromptCache: false,
@@ -840,7 +872,7 @@ var basetenModels = {
840
872
  description: "Mixture-of-experts LLM with math and reasoning capabilities"
841
873
  },
842
874
  "Qwen/Qwen3-Coder-480B-A35B-Instruct": {
843
- maxTokens: 262144,
875
+ maxTokens: 16384,
844
876
  contextWindow: 262144,
845
877
  supportsImages: false,
846
878
  supportsPromptCache: false,
@@ -851,7 +883,7 @@ var basetenModels = {
851
883
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
852
884
  },
853
885
  "openai/gpt-oss-120b": {
854
- maxTokens: 128072,
886
+ maxTokens: 16384,
855
887
  contextWindow: 128072,
856
888
  supportsImages: false,
857
889
  supportsPromptCache: false,
@@ -863,7 +895,7 @@ var basetenModels = {
863
895
  description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
864
896
  },
865
897
  "moonshotai/Kimi-K2-Instruct-0905": {
866
- maxTokens: 168e3,
898
+ maxTokens: 16384,
867
899
  contextWindow: 262e3,
868
900
  supportsImages: false,
869
901
  supportsPromptCache: false,
@@ -1316,6 +1348,52 @@ var bedrockModels = {
1316
1348
  supportsPromptCache: false,
1317
1349
  inputPrice: 0.02,
1318
1350
  description: "Amazon Titan Text Embeddings V2"
1351
+ },
1352
+ "moonshot.kimi-k2-thinking": {
1353
+ maxTokens: 32e3,
1354
+ contextWindow: 262144,
1355
+ supportsImages: false,
1356
+ supportsPromptCache: false,
1357
+ supportsNativeTools: true,
1358
+ defaultToolProtocol: "native",
1359
+ preserveReasoning: true,
1360
+ inputPrice: 0.6,
1361
+ outputPrice: 2.5,
1362
+ description: "Kimi K2 Thinking (1T parameter MoE model with 32B active parameters)"
1363
+ },
1364
+ "minimax.minimax-m2": {
1365
+ maxTokens: 16384,
1366
+ contextWindow: 196608,
1367
+ supportsImages: false,
1368
+ supportsPromptCache: false,
1369
+ supportsNativeTools: true,
1370
+ defaultToolProtocol: "native",
1371
+ preserveReasoning: true,
1372
+ inputPrice: 0.3,
1373
+ outputPrice: 1.2,
1374
+ description: "MiniMax M2 (230B parameter MoE model with 10B active parameters)"
1375
+ },
1376
+ "qwen.qwen3-next-80b-a3b": {
1377
+ maxTokens: 8192,
1378
+ contextWindow: 262144,
1379
+ supportsImages: false,
1380
+ supportsPromptCache: false,
1381
+ supportsNativeTools: true,
1382
+ defaultToolProtocol: "native",
1383
+ inputPrice: 0.15,
1384
+ outputPrice: 1.2,
1385
+ description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
1386
+ },
1387
+ "qwen.qwen3-coder-480b-a35b-v1:0": {
1388
+ maxTokens: 8192,
1389
+ contextWindow: 262144,
1390
+ supportsImages: false,
1391
+ supportsPromptCache: false,
1392
+ supportsNativeTools: true,
1393
+ defaultToolProtocol: "native",
1394
+ inputPrice: 0.45,
1395
+ outputPrice: 1.8,
1396
+ description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
1319
1397
  }
1320
1398
  };
1321
1399
  var BEDROCK_DEFAULT_TEMPERATURE = 0.3;
@@ -1381,8 +1459,8 @@ var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
1381
1459
  var cerebrasDefaultModelId = "gpt-oss-120b";
1382
1460
  var cerebrasModels = {
1383
1461
  "zai-glm-4.6": {
1384
- maxTokens: 16384,
1385
- // consistent with their other models
1462
+ maxTokens: 8192,
1463
+ // Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
1386
1464
  contextWindow: 131072,
1387
1465
  supportsImages: false,
1388
1466
  supportsPromptCache: false,
@@ -1392,7 +1470,8 @@ var cerebrasModels = {
1392
1470
  description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
1393
1471
  },
1394
1472
  "qwen-3-235b-a22b-instruct-2507": {
1395
- maxTokens: 64e3,
1473
+ maxTokens: 8192,
1474
+ // Conservative default to avoid premature rate limiting
1396
1475
  contextWindow: 64e3,
1397
1476
  supportsImages: false,
1398
1477
  supportsPromptCache: false,
@@ -1402,7 +1481,8 @@ var cerebrasModels = {
1402
1481
  description: "Intelligent model with ~1400 tokens/s"
1403
1482
  },
1404
1483
  "llama-3.3-70b": {
1405
- maxTokens: 64e3,
1484
+ maxTokens: 8192,
1485
+ // Conservative default to avoid premature rate limiting
1406
1486
  contextWindow: 64e3,
1407
1487
  supportsImages: false,
1408
1488
  supportsPromptCache: false,
@@ -1412,7 +1492,8 @@ var cerebrasModels = {
1412
1492
  description: "Powerful model with ~2600 tokens/s"
1413
1493
  },
1414
1494
  "qwen-3-32b": {
1415
- maxTokens: 64e3,
1495
+ maxTokens: 8192,
1496
+ // Conservative default to avoid premature rate limiting
1416
1497
  contextWindow: 64e3,
1417
1498
  supportsImages: false,
1418
1499
  supportsPromptCache: false,
@@ -1422,7 +1503,8 @@ var cerebrasModels = {
1422
1503
  description: "SOTA coding performance with ~2500 tokens/s"
1423
1504
  },
1424
1505
  "gpt-oss-120b": {
1425
- maxTokens: 8e3,
1506
+ maxTokens: 8192,
1507
+ // Conservative default to avoid premature rate limiting
1426
1508
  contextWindow: 64e3,
1427
1509
  supportsImages: false,
1428
1510
  supportsPromptCache: false,
@@ -1947,35 +2029,37 @@ var deepSeekModels = {
1947
2029
  supportsImages: false,
1948
2030
  supportsPromptCache: true,
1949
2031
  supportsNativeTools: true,
1950
- inputPrice: 0.56,
1951
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1952
- outputPrice: 1.68,
1953
- // $1.68 per million tokens - Updated Sept 5, 2025
1954
- cacheWritesPrice: 0.56,
1955
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1956
- cacheReadsPrice: 0.07,
1957
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1958
- description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
2032
+ defaultToolProtocol: "native",
2033
+ inputPrice: 0.28,
2034
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2035
+ outputPrice: 0.42,
2036
+ // $0.42 per million tokens - Updated Dec 9, 2025
2037
+ cacheWritesPrice: 0.28,
2038
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2039
+ cacheReadsPrice: 0.028,
2040
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2041
+ description: `DeepSeek-V3.2 (Non-thinking Mode) achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally. Supports JSON output, tool calls, chat prefix completion (beta), and FIM completion (beta).`
1959
2042
  },
1960
2043
  "deepseek-reasoner": {
1961
- maxTokens: 65536,
1962
- // 64K max output for reasoning mode
2044
+ maxTokens: 8192,
2045
+ // 8K max output
1963
2046
  contextWindow: 128e3,
1964
2047
  supportsImages: false,
1965
2048
  supportsPromptCache: true,
1966
2049
  supportsNativeTools: true,
1967
- inputPrice: 0.56,
1968
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1969
- outputPrice: 1.68,
1970
- // $1.68 per million tokens - Updated Sept 5, 2025
1971
- cacheWritesPrice: 0.56,
1972
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1973
- cacheReadsPrice: 0.07,
1974
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1975
- description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
2050
+ defaultToolProtocol: "native",
2051
+ inputPrice: 0.28,
2052
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2053
+ outputPrice: 0.42,
2054
+ // $0.42 per million tokens - Updated Dec 9, 2025
2055
+ cacheWritesPrice: 0.28,
2056
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2057
+ cacheReadsPrice: 0.028,
2058
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2059
+ description: `DeepSeek-V3.2 (Thinking Mode) achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 8K output tokens. Supports JSON output, tool calls, and chat prefix completion (beta).`
1976
2060
  }
1977
2061
  };
1978
- var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
2062
+ var DEEP_SEEK_DEFAULT_TEMPERATURE = 0;
1979
2063
 
1980
2064
  // src/providers/doubao.ts
1981
2065
  var doubaoDefaultModelId = "doubao-seed-1-6-250615";
@@ -2430,21 +2514,6 @@ var geminiModels = {
2430
2514
  }
2431
2515
  };
2432
2516
 
2433
- // src/providers/glama.ts
2434
- var glamaDefaultModelId = "anthropic/claude-3-7-sonnet";
2435
- var glamaDefaultModelInfo = {
2436
- maxTokens: 8192,
2437
- contextWindow: 2e5,
2438
- supportsImages: true,
2439
- supportsPromptCache: true,
2440
- inputPrice: 3,
2441
- outputPrice: 15,
2442
- cacheWritesPrice: 3.75,
2443
- cacheReadsPrice: 0.3,
2444
- description: "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)"
2445
- };
2446
- var GLAMA_DEFAULT_TEMPERATURE = 0;
2447
-
2448
2517
  // src/providers/groq.ts
2449
2518
  var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
2450
2519
  var groqModels = {
@@ -2625,6 +2694,7 @@ var litellmDefaultModelInfo = {
2625
2694
  contextWindow: 2e5,
2626
2695
  supportsImages: true,
2627
2696
  supportsPromptCache: true,
2697
+ supportsNativeTools: true,
2628
2698
  inputPrice: 3,
2629
2699
  outputPrice: 15,
2630
2700
  cacheWritesPrice: 3.75,
@@ -2821,12 +2891,67 @@ var ollamaDefaultModelInfo = {
2821
2891
  };
2822
2892
 
2823
2893
  // src/providers/openai.ts
2824
- var openAiNativeDefaultModelId = "gpt-5.1";
2894
+ var openAiNativeDefaultModelId = "gpt-5.1-codex-max";
2825
2895
  var openAiNativeModels = {
2896
+ "gpt-5.1-codex-max": {
2897
+ maxTokens: 128e3,
2898
+ contextWindow: 4e5,
2899
+ supportsNativeTools: true,
2900
+ includedTools: ["apply_patch"],
2901
+ excludedTools: ["apply_diff", "write_to_file"],
2902
+ supportsImages: true,
2903
+ supportsPromptCache: true,
2904
+ promptCacheRetention: "24h",
2905
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
2906
+ reasoningEffort: "xhigh",
2907
+ inputPrice: 1.25,
2908
+ outputPrice: 10,
2909
+ cacheReadsPrice: 0.125,
2910
+ supportsTemperature: false,
2911
+ tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }],
2912
+ description: "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
2913
+ },
2914
+ "gpt-5.2": {
2915
+ maxTokens: 128e3,
2916
+ contextWindow: 4e5,
2917
+ supportsNativeTools: true,
2918
+ includedTools: ["apply_patch"],
2919
+ excludedTools: ["apply_diff", "write_to_file"],
2920
+ supportsImages: true,
2921
+ supportsPromptCache: true,
2922
+ promptCacheRetention: "24h",
2923
+ supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
2924
+ reasoningEffort: "medium",
2925
+ inputPrice: 1.75,
2926
+ outputPrice: 14,
2927
+ cacheReadsPrice: 0.175,
2928
+ supportsVerbosity: true,
2929
+ supportsTemperature: false,
2930
+ tiers: [
2931
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.875, outputPrice: 7, cacheReadsPrice: 0.0875 },
2932
+ { name: "priority", contextWindow: 4e5, inputPrice: 3.5, outputPrice: 28, cacheReadsPrice: 0.35 }
2933
+ ],
2934
+ description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries"
2935
+ },
2936
+ "gpt-5.2-chat-latest": {
2937
+ maxTokens: 16384,
2938
+ contextWindow: 128e3,
2939
+ supportsNativeTools: true,
2940
+ includedTools: ["apply_patch"],
2941
+ excludedTools: ["apply_diff", "write_to_file"],
2942
+ supportsImages: true,
2943
+ supportsPromptCache: true,
2944
+ inputPrice: 1.75,
2945
+ outputPrice: 14,
2946
+ cacheReadsPrice: 0.175,
2947
+ description: "GPT-5.2 Chat: Optimized for conversational AI and chat use cases"
2948
+ },
2826
2949
  "gpt-5.1": {
2827
2950
  maxTokens: 128e3,
2828
2951
  contextWindow: 4e5,
2829
2952
  supportsNativeTools: true,
2953
+ includedTools: ["apply_patch"],
2954
+ excludedTools: ["apply_diff", "write_to_file"],
2830
2955
  supportsImages: true,
2831
2956
  supportsPromptCache: true,
2832
2957
  promptCacheRetention: "24h",
@@ -2847,6 +2972,8 @@ var openAiNativeModels = {
2847
2972
  maxTokens: 128e3,
2848
2973
  contextWindow: 4e5,
2849
2974
  supportsNativeTools: true,
2975
+ includedTools: ["apply_patch"],
2976
+ excludedTools: ["apply_diff", "write_to_file"],
2850
2977
  supportsImages: true,
2851
2978
  supportsPromptCache: true,
2852
2979
  promptCacheRetention: "24h",
@@ -2863,6 +2990,8 @@ var openAiNativeModels = {
2863
2990
  maxTokens: 128e3,
2864
2991
  contextWindow: 4e5,
2865
2992
  supportsNativeTools: true,
2993
+ includedTools: ["apply_patch"],
2994
+ excludedTools: ["apply_diff", "write_to_file"],
2866
2995
  supportsImages: true,
2867
2996
  supportsPromptCache: true,
2868
2997
  promptCacheRetention: "24h",
@@ -2878,6 +3007,8 @@ var openAiNativeModels = {
2878
3007
  maxTokens: 128e3,
2879
3008
  contextWindow: 4e5,
2880
3009
  supportsNativeTools: true,
3010
+ includedTools: ["apply_patch"],
3011
+ excludedTools: ["apply_diff", "write_to_file"],
2881
3012
  supportsImages: true,
2882
3013
  supportsPromptCache: true,
2883
3014
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2897,6 +3028,8 @@ var openAiNativeModels = {
2897
3028
  maxTokens: 128e3,
2898
3029
  contextWindow: 4e5,
2899
3030
  supportsNativeTools: true,
3031
+ includedTools: ["apply_patch"],
3032
+ excludedTools: ["apply_diff", "write_to_file"],
2900
3033
  supportsImages: true,
2901
3034
  supportsPromptCache: true,
2902
3035
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2916,6 +3049,8 @@ var openAiNativeModels = {
2916
3049
  maxTokens: 128e3,
2917
3050
  contextWindow: 4e5,
2918
3051
  supportsNativeTools: true,
3052
+ includedTools: ["apply_patch"],
3053
+ excludedTools: ["apply_diff", "write_to_file"],
2919
3054
  supportsImages: true,
2920
3055
  supportsPromptCache: true,
2921
3056
  supportsReasoningEffort: ["low", "medium", "high"],
@@ -2931,6 +3066,8 @@ var openAiNativeModels = {
2931
3066
  maxTokens: 128e3,
2932
3067
  contextWindow: 4e5,
2933
3068
  supportsNativeTools: true,
3069
+ includedTools: ["apply_patch"],
3070
+ excludedTools: ["apply_diff", "write_to_file"],
2934
3071
  supportsImages: true,
2935
3072
  supportsPromptCache: true,
2936
3073
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -2947,6 +3084,8 @@ var openAiNativeModels = {
2947
3084
  maxTokens: 128e3,
2948
3085
  contextWindow: 4e5,
2949
3086
  supportsNativeTools: true,
3087
+ includedTools: ["apply_patch"],
3088
+ excludedTools: ["apply_diff", "write_to_file"],
2950
3089
  supportsImages: true,
2951
3090
  supportsPromptCache: true,
2952
3091
  inputPrice: 1.25,
@@ -2958,6 +3097,8 @@ var openAiNativeModels = {
2958
3097
  maxTokens: 32768,
2959
3098
  contextWindow: 1047576,
2960
3099
  supportsNativeTools: true,
3100
+ includedTools: ["apply_patch"],
3101
+ excludedTools: ["apply_diff", "write_to_file"],
2961
3102
  supportsImages: true,
2962
3103
  supportsPromptCache: true,
2963
3104
  inputPrice: 2,
@@ -2972,6 +3113,8 @@ var openAiNativeModels = {
2972
3113
  maxTokens: 32768,
2973
3114
  contextWindow: 1047576,
2974
3115
  supportsNativeTools: true,
3116
+ includedTools: ["apply_patch"],
3117
+ excludedTools: ["apply_diff", "write_to_file"],
2975
3118
  supportsImages: true,
2976
3119
  supportsPromptCache: true,
2977
3120
  inputPrice: 0.4,
@@ -2986,6 +3129,8 @@ var openAiNativeModels = {
2986
3129
  maxTokens: 32768,
2987
3130
  contextWindow: 1047576,
2988
3131
  supportsNativeTools: true,
3132
+ includedTools: ["apply_patch"],
3133
+ excludedTools: ["apply_diff", "write_to_file"],
2989
3134
  supportsImages: true,
2990
3135
  supportsPromptCache: true,
2991
3136
  inputPrice: 0.1,
@@ -3193,6 +3338,8 @@ var openAiNativeModels = {
3193
3338
  maxTokens: 128e3,
3194
3339
  contextWindow: 4e5,
3195
3340
  supportsNativeTools: true,
3341
+ includedTools: ["apply_patch"],
3342
+ excludedTools: ["apply_diff", "write_to_file"],
3196
3343
  supportsImages: true,
3197
3344
  supportsPromptCache: true,
3198
3345
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3212,6 +3359,8 @@ var openAiNativeModels = {
3212
3359
  maxTokens: 128e3,
3213
3360
  contextWindow: 4e5,
3214
3361
  supportsNativeTools: true,
3362
+ includedTools: ["apply_patch"],
3363
+ excludedTools: ["apply_diff", "write_to_file"],
3215
3364
  supportsImages: true,
3216
3365
  supportsPromptCache: true,
3217
3366
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3231,6 +3380,8 @@ var openAiNativeModels = {
3231
3380
  maxTokens: 128e3,
3232
3381
  contextWindow: 4e5,
3233
3382
  supportsNativeTools: true,
3383
+ includedTools: ["apply_patch"],
3384
+ excludedTools: ["apply_diff", "write_to_file"],
3234
3385
  supportsImages: true,
3235
3386
  supportsPromptCache: true,
3236
3387
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3393,7 +3544,18 @@ var RooModelSchema = z7.object({
3393
3544
  type: z7.literal("language"),
3394
3545
  tags: z7.array(z7.string()).optional(),
3395
3546
  pricing: RooPricingSchema,
3396
- deprecated: z7.boolean().optional()
3547
+ deprecated: z7.boolean().optional(),
3548
+ default_temperature: z7.number().optional(),
3549
+ // Dynamic settings that map directly to ModelInfo properties
3550
+ // Allows the API to configure model-specific defaults like includedTools, excludedTools, reasoningEffort, etc.
3551
+ // These are always direct values (e.g., includedTools: ['search_replace']) for backward compatibility with old clients.
3552
+ settings: z7.record(z7.string(), z7.unknown()).optional(),
3553
+ // Versioned settings keyed by version number (e.g., '3.36.4').
3554
+ // Each version key maps to a settings object that is used when plugin version >= that version.
3555
+ // New clients find the highest version key <= current version and use those settings.
3556
+ // Old clients ignore this field and use plain values from `settings`.
3557
+ // Example: { '3.36.4': { includedTools: ['search_replace'] }, '3.35.0': { ... } }
3558
+ versionedSettings: z7.record(z7.string(), z7.record(z7.string(), z7.unknown())).optional()
3397
3559
  });
3398
3560
  var RooModelsResponseSchema = z7.object({
3399
3561
  object: z7.literal("list"),
@@ -4159,15 +4321,17 @@ var xaiDefaultModelId = "grok-code-fast-1";
4159
4321
  var xaiModels = {
4160
4322
  "grok-code-fast-1": {
4161
4323
  maxTokens: 16384,
4162
- contextWindow: 262144,
4163
- supportsImages: false,
4324
+ contextWindow: 256e3,
4325
+ supportsImages: true,
4164
4326
  supportsPromptCache: true,
4165
4327
  supportsNativeTools: true,
4166
4328
  inputPrice: 0.2,
4167
4329
  outputPrice: 1.5,
4168
4330
  cacheWritesPrice: 0.02,
4169
4331
  cacheReadsPrice: 0.02,
4170
- description: "xAI's Grok Code Fast model with 256K context window"
4332
+ description: "xAI's Grok Code Fast model with 256K context window",
4333
+ includedTools: ["search_replace"],
4334
+ excludedTools: ["apply_diff"]
4171
4335
  },
4172
4336
  "grok-4-1-fast-reasoning": {
4173
4337
  maxTokens: 65536,
@@ -4179,7 +4343,9 @@ var xaiModels = {
4179
4343
  outputPrice: 0.5,
4180
4344
  cacheWritesPrice: 0.05,
4181
4345
  cacheReadsPrice: 0.05,
4182
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4346
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4347
+ includedTools: ["search_replace"],
4348
+ excludedTools: ["apply_diff"]
4183
4349
  },
4184
4350
  "grok-4-1-fast-non-reasoning": {
4185
4351
  maxTokens: 65536,
@@ -4191,7 +4357,9 @@ var xaiModels = {
4191
4357
  outputPrice: 0.5,
4192
4358
  cacheWritesPrice: 0.05,
4193
4359
  cacheReadsPrice: 0.05,
4194
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4360
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4361
+ includedTools: ["search_replace"],
4362
+ excludedTools: ["apply_diff"]
4195
4363
  },
4196
4364
  "grok-4-fast-reasoning": {
4197
4365
  maxTokens: 65536,
@@ -4203,7 +4371,9 @@ var xaiModels = {
4203
4371
  outputPrice: 0.5,
4204
4372
  cacheWritesPrice: 0.05,
4205
4373
  cacheReadsPrice: 0.05,
4206
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4374
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4375
+ includedTools: ["search_replace"],
4376
+ excludedTools: ["apply_diff"]
4207
4377
  },
4208
4378
  "grok-4-fast-non-reasoning": {
4209
4379
  maxTokens: 65536,
@@ -4215,9 +4385,11 @@ var xaiModels = {
4215
4385
  outputPrice: 0.5,
4216
4386
  cacheWritesPrice: 0.05,
4217
4387
  cacheReadsPrice: 0.05,
4218
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4388
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4389
+ includedTools: ["search_replace"],
4390
+ excludedTools: ["apply_diff"]
4219
4391
  },
4220
- "grok-4": {
4392
+ "grok-4-0709": {
4221
4393
  maxTokens: 8192,
4222
4394
  contextWindow: 256e3,
4223
4395
  supportsImages: true,
@@ -4227,36 +4399,14 @@ var xaiModels = {
4227
4399
  outputPrice: 15,
4228
4400
  cacheWritesPrice: 0.75,
4229
4401
  cacheReadsPrice: 0.75,
4230
- description: "xAI's Grok-4 model with 256K context window"
4231
- },
4232
- "grok-3": {
4233
- maxTokens: 8192,
4234
- contextWindow: 131072,
4235
- supportsImages: false,
4236
- supportsPromptCache: true,
4237
- supportsNativeTools: true,
4238
- inputPrice: 3,
4239
- outputPrice: 15,
4240
- cacheWritesPrice: 0.75,
4241
- cacheReadsPrice: 0.75,
4242
- description: "xAI's Grok-3 model with 128K context window"
4243
- },
4244
- "grok-3-fast": {
4245
- maxTokens: 8192,
4246
- contextWindow: 131072,
4247
- supportsImages: false,
4248
- supportsPromptCache: true,
4249
- supportsNativeTools: true,
4250
- inputPrice: 5,
4251
- outputPrice: 25,
4252
- cacheWritesPrice: 1.25,
4253
- cacheReadsPrice: 1.25,
4254
- description: "xAI's Grok-3 fast model with 128K context window"
4402
+ description: "xAI's Grok-4 model with 256K context window",
4403
+ includedTools: ["search_replace"],
4404
+ excludedTools: ["apply_diff"]
4255
4405
  },
4256
4406
  "grok-3-mini": {
4257
4407
  maxTokens: 8192,
4258
4408
  contextWindow: 131072,
4259
- supportsImages: false,
4409
+ supportsImages: true,
4260
4410
  supportsPromptCache: true,
4261
4411
  supportsNativeTools: true,
4262
4412
  inputPrice: 0.3,
@@ -4264,40 +4414,24 @@ var xaiModels = {
4264
4414
  cacheWritesPrice: 0.07,
4265
4415
  cacheReadsPrice: 0.07,
4266
4416
  description: "xAI's Grok-3 mini model with 128K context window",
4267
- supportsReasoningEffort: true
4268
- },
4269
- "grok-3-mini-fast": {
4270
- maxTokens: 8192,
4271
- contextWindow: 131072,
4272
- supportsImages: false,
4273
- supportsPromptCache: true,
4274
- supportsNativeTools: true,
4275
- inputPrice: 0.6,
4276
- outputPrice: 4,
4277
- cacheWritesPrice: 0.15,
4278
- cacheReadsPrice: 0.15,
4279
- description: "xAI's Grok-3 mini fast model with 128K context window",
4280
- supportsReasoningEffort: true
4417
+ supportsReasoningEffort: ["low", "high"],
4418
+ reasoningEffort: "low",
4419
+ includedTools: ["search_replace"],
4420
+ excludedTools: ["apply_diff"]
4281
4421
  },
4282
- "grok-2-1212": {
4422
+ "grok-3": {
4283
4423
  maxTokens: 8192,
4284
4424
  contextWindow: 131072,
4285
- supportsImages: false,
4286
- supportsPromptCache: false,
4287
- supportsNativeTools: true,
4288
- inputPrice: 2,
4289
- outputPrice: 10,
4290
- description: "xAI's Grok-2 model (version 1212) with 128K context window"
4291
- },
4292
- "grok-2-vision-1212": {
4293
- maxTokens: 8192,
4294
- contextWindow: 32768,
4295
4425
  supportsImages: true,
4296
- supportsPromptCache: false,
4426
+ supportsPromptCache: true,
4297
4427
  supportsNativeTools: true,
4298
- inputPrice: 2,
4299
- outputPrice: 10,
4300
- description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
4428
+ inputPrice: 3,
4429
+ outputPrice: 15,
4430
+ cacheWritesPrice: 0.75,
4431
+ cacheReadsPrice: 0.75,
4432
+ description: "xAI's Grok-3 model with 128K context window",
4433
+ includedTools: ["search_replace"],
4434
+ excludedTools: ["apply_diff"]
4301
4435
  }
4302
4436
  };
4303
4437
 
@@ -4404,7 +4538,6 @@ var internationalZAiModels = {
4404
4538
  supportsImages: false,
4405
4539
  supportsPromptCache: true,
4406
4540
  supportsNativeTools: true,
4407
- supportsReasoningBinary: true,
4408
4541
  inputPrice: 0.6,
4409
4542
  outputPrice: 2.2,
4410
4543
  cacheWritesPrice: 0,
@@ -4477,7 +4610,6 @@ var internationalZAiModels = {
4477
4610
  supportsImages: false,
4478
4611
  supportsPromptCache: true,
4479
4612
  supportsNativeTools: true,
4480
- supportsReasoningBinary: true,
4481
4613
  inputPrice: 0.6,
4482
4614
  outputPrice: 2.2,
4483
4615
  cacheWritesPrice: 0,
@@ -4505,7 +4637,6 @@ var mainlandZAiModels = {
4505
4637
  supportsImages: false,
4506
4638
  supportsPromptCache: true,
4507
4639
  supportsNativeTools: true,
4508
- supportsReasoningBinary: true,
4509
4640
  inputPrice: 0.29,
4510
4641
  outputPrice: 1.14,
4511
4642
  cacheWritesPrice: 0,
@@ -4578,7 +4709,6 @@ var mainlandZAiModels = {
4578
4709
  supportsImages: false,
4579
4710
  supportsPromptCache: true,
4580
4711
  supportsNativeTools: true,
4581
- supportsReasoningBinary: true,
4582
4712
  inputPrice: 0.29,
4583
4713
  outputPrice: 1.14,
4584
4714
  cacheWritesPrice: 0,
@@ -4589,14 +4719,24 @@ var mainlandZAiModels = {
4589
4719
  var ZAI_DEFAULT_TEMPERATURE = 0.6;
4590
4720
  var zaiApiLineConfigs = {
4591
4721
  international_coding: {
4592
- name: "International",
4722
+ name: "International Coding",
4593
4723
  baseUrl: "https://api.z.ai/api/coding/paas/v4",
4594
4724
  isChina: false
4595
4725
  },
4596
4726
  china_coding: {
4597
- name: "China",
4727
+ name: "China Coding",
4598
4728
  baseUrl: "https://open.bigmodel.cn/api/coding/paas/v4",
4599
4729
  isChina: true
4730
+ },
4731
+ international_api: {
4732
+ name: "International API",
4733
+ baseUrl: "https://api.z.ai/api/paas/v4",
4734
+ isChina: false
4735
+ },
4736
+ china_api: {
4737
+ name: "China API",
4738
+ baseUrl: "https://open.bigmodel.cn/api/paas/v4",
4739
+ isChina: true
4600
4740
  }
4601
4741
  };
4602
4742
 
@@ -4654,8 +4794,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
4654
4794
  return openRouterDefaultModelId;
4655
4795
  case "requesty":
4656
4796
  return requestyDefaultModelId;
4657
- case "glama":
4658
- return glamaDefaultModelId;
4659
4797
  case "unbound":
4660
4798
  return unboundDefaultModelId;
4661
4799
  case "litellm":
@@ -4742,7 +4880,6 @@ var dynamicProviders = [
4742
4880
  "io-intelligence",
4743
4881
  "requesty",
4744
4882
  "unbound",
4745
- "glama",
4746
4883
  "roo",
4747
4884
  "chutes"
4748
4885
  ];
@@ -4824,10 +4961,6 @@ var claudeCodeSchema = apiModelIdProviderModelSchema.extend({
4824
4961
  claudeCodePath: z8.string().optional(),
4825
4962
  claudeCodeMaxOutputTokens: z8.number().int().min(1).max(2e5).optional()
4826
4963
  });
4827
- var glamaSchema = baseProviderSettingsSchema.extend({
4828
- glamaModelId: z8.string().optional(),
4829
- glamaApiKey: z8.string().optional()
4830
- });
4831
4964
  var openRouterSchema = baseProviderSettingsSchema.extend({
4832
4965
  openRouterApiKey: z8.string().optional(),
4833
4966
  openRouterModelId: z8.string().optional(),
@@ -4978,7 +5111,7 @@ var cerebrasSchema = apiModelIdProviderModelSchema.extend({
4978
5111
  var sambaNovaSchema = apiModelIdProviderModelSchema.extend({
4979
5112
  sambaNovaApiKey: z8.string().optional()
4980
5113
  });
4981
- var zaiApiLineSchema = z8.enum(["international_coding", "china_coding"]);
5114
+ var zaiApiLineSchema = z8.enum(["international_coding", "china_coding", "international_api", "china_api"]);
4982
5115
  var zaiSchema = apiModelIdProviderModelSchema.extend({
4983
5116
  zaiApiKey: z8.string().optional(),
4984
5117
  zaiApiLine: zaiApiLineSchema.optional()
@@ -5012,7 +5145,6 @@ var defaultSchema = z8.object({
5012
5145
  var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
5013
5146
  anthropicSchema.merge(z8.object({ apiProvider: z8.literal("anthropic") })),
5014
5147
  claudeCodeSchema.merge(z8.object({ apiProvider: z8.literal("claude-code") })),
5015
- glamaSchema.merge(z8.object({ apiProvider: z8.literal("glama") })),
5016
5148
  openRouterSchema.merge(z8.object({ apiProvider: z8.literal("openrouter") })),
5017
5149
  bedrockSchema.merge(z8.object({ apiProvider: z8.literal("bedrock") })),
5018
5150
  vertexSchema.merge(z8.object({ apiProvider: z8.literal("vertex") })),
@@ -5054,7 +5186,6 @@ var providerSettingsSchema = z8.object({
5054
5186
  apiProvider: providerNamesSchema.optional(),
5055
5187
  ...anthropicSchema.shape,
5056
5188
  ...claudeCodeSchema.shape,
5057
- ...glamaSchema.shape,
5058
5189
  ...openRouterSchema.shape,
5059
5190
  ...bedrockSchema.shape,
5060
5191
  ...vertexSchema.shape,
@@ -5099,7 +5230,6 @@ var discriminatedProviderSettingsWithIdSchema = providerSettingsSchemaDiscrimina
5099
5230
  var PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options;
5100
5231
  var modelIdKeys = [
5101
5232
  "apiModelId",
5102
- "glamaModelId",
5103
5233
  "openRouterModelId",
5104
5234
  "openAiModelId",
5105
5235
  "ollamaModelId",
@@ -5121,7 +5251,6 @@ var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key)
5121
5251
  var modelIdKeysByProvider = {
5122
5252
  anthropic: "apiModelId",
5123
5253
  "claude-code": "apiModelId",
5124
- glama: "glamaModelId",
5125
5254
  openrouter: "openRouterModelId",
5126
5255
  bedrock: "apiModelId",
5127
5256
  vertex: "apiModelId",
@@ -5252,7 +5381,6 @@ var MODELS_BY_PROVIDER = {
5252
5381
  zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
5253
5382
  baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
5254
5383
  // Dynamic providers; models pulled from remote APIs.
5255
- glama: { id: "glama", label: "Glama", models: [] },
5256
5384
  huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
5257
5385
  litellm: { id: "litellm", label: "LiteLLM", models: [] },
5258
5386
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
@@ -5484,6 +5612,67 @@ var rooCodeTelemetryEventSchema = z11.discriminatedUnion("type", [
5484
5612
  })
5485
5613
  })
5486
5614
  ]);
5615
+ var EXPECTED_API_ERROR_CODES = /* @__PURE__ */ new Set([
5616
+ 402,
5617
+ // Payment required - billing issues
5618
+ 429
5619
+ // Rate limit - expected when hitting API limits
5620
+ ]);
5621
+ var EXPECTED_ERROR_MESSAGE_PATTERNS = [
5622
+ /^429\b/,
5623
+ // Message starts with "429"
5624
+ /rate limit/i
5625
+ // Contains "rate limit" (case insensitive)
5626
+ ];
5627
+ function isOpenAISdkError(error) {
5628
+ return typeof error === "object" && error !== null && "status" in error && typeof error.status === "number";
5629
+ }
5630
+ function getErrorStatusCode(error) {
5631
+ if (isOpenAISdkError(error)) {
5632
+ return error.status;
5633
+ }
5634
+ return void 0;
5635
+ }
5636
+ function getErrorMessage(error) {
5637
+ if (isOpenAISdkError(error)) {
5638
+ return error.error?.metadata?.raw || error.error?.message || error.message;
5639
+ }
5640
+ return void 0;
5641
+ }
5642
+ function shouldReportApiErrorToTelemetry(errorCode, errorMessage) {
5643
+ if (errorCode !== void 0 && EXPECTED_API_ERROR_CODES.has(errorCode)) {
5644
+ return false;
5645
+ }
5646
+ if (errorMessage) {
5647
+ for (const pattern of EXPECTED_ERROR_MESSAGE_PATTERNS) {
5648
+ if (pattern.test(errorMessage)) {
5649
+ return false;
5650
+ }
5651
+ }
5652
+ }
5653
+ return true;
5654
+ }
5655
+ var ApiProviderError = class extends Error {
5656
+ constructor(message, provider, modelId, operation, errorCode) {
5657
+ super(message);
5658
+ this.provider = provider;
5659
+ this.modelId = modelId;
5660
+ this.operation = operation;
5661
+ this.errorCode = errorCode;
5662
+ this.name = "ApiProviderError";
5663
+ }
5664
+ };
5665
+ function isApiProviderError(error) {
5666
+ return error instanceof Error && error.name === "ApiProviderError" && "provider" in error && "modelId" in error && "operation" in error;
5667
+ }
5668
+ function extractApiProviderErrorProperties(error) {
5669
+ return {
5670
+ provider: error.provider,
5671
+ modelId: error.modelId,
5672
+ operation: error.operation,
5673
+ ...error.errorCode !== void 0 && { errorCode: error.errorCode }
5674
+ };
5675
+ }
5487
5676
 
5488
5677
  // src/mode.ts
5489
5678
  import { z as z12 } from "zod";
@@ -5562,7 +5751,7 @@ var DEFAULT_MODES = [
5562
5751
  whenToUse: "Use this mode when you need to plan, design, or strategize before implementation. Perfect for breaking down complex problems, creating technical specifications, designing system architecture, or brainstorming solutions before coding.",
5563
5752
  description: "Plan and design before implementation",
5564
5753
  groups: ["read", ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], "browser", "mcp"],
5565
- customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**"
5754
+ customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**\n\n**CRITICAL: Never provide level of effort time estimates (e.g., hours, days, weeks) for tasks. Focus solely on breaking down the work into clear, actionable steps without estimating how long they will take.**\n\nUnless told otherwise, if you want to save a plan file, put it in the /plans directory"
5566
5755
  },
5567
5756
  {
5568
5757
  slug: "code",
@@ -5771,6 +5960,13 @@ var globalSettingsSchema = z14.object({
5771
5960
  includeTaskHistoryInEnhance: z14.boolean().optional(),
5772
5961
  historyPreviewCollapsed: z14.boolean().optional(),
5773
5962
  reasoningBlockCollapsed: z14.boolean().optional(),
5963
+ /**
5964
+ * Controls the keyboard behavior for sending messages in the chat input.
5965
+ * - "send": Enter sends message, Shift+Enter creates newline (default)
5966
+ * - "newline": Enter creates newline, Shift+Enter/Ctrl+Enter sends message
5967
+ * @default "send"
5968
+ */
5969
+ enterBehavior: z14.enum(["send", "newline"]).optional(),
5774
5970
  profileThresholds: z14.record(z14.string(), z14.number()).optional(),
5775
5971
  hasOpenedModeSelector: z14.boolean().optional(),
5776
5972
  lastModeExportPath: z14.string().optional(),
@@ -5780,7 +5976,6 @@ var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
5780
5976
  var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
5781
5977
  var SECRET_STATE_KEYS = [
5782
5978
  "apiKey",
5783
- "glamaApiKey",
5784
5979
  "openRouterApiKey",
5785
5980
  "awsAccessKey",
5786
5981
  "awsApiKey",
@@ -5978,11 +6173,13 @@ var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
5978
6173
  terminalShellIntegrationTimeout: z16.number().int().nonnegative().optional()
5979
6174
  })
5980
6175
  );
6176
+ var workspaceTaskVisibilitySchema = z16.enum(["all", "list-only", "full-lockdown"]);
5981
6177
  var organizationCloudSettingsSchema = z16.object({
5982
6178
  recordTaskMessages: z16.boolean().optional(),
5983
6179
  enableTaskSharing: z16.boolean().optional(),
5984
6180
  taskShareExpirationDays: z16.number().int().positive().optional(),
5985
- allowMembersViewAllTasks: z16.boolean().optional()
6181
+ allowMembersViewAllTasks: z16.boolean().optional(),
6182
+ workspaceTaskVisibility: workspaceTaskVisibilitySchema.optional()
5986
6183
  });
5987
6184
  var organizationFeaturesSchema = z16.object({
5988
6185
  roomoteControlEnabled: z16.boolean().optional()
@@ -6346,6 +6543,16 @@ var usageStatsSchema = z16.object({
6346
6543
  // Period in days (e.g., 30)
6347
6544
  });
6348
6545
 
6546
+ // src/context-management.ts
6547
+ var CONTEXT_MANAGEMENT_EVENTS = [
6548
+ "condense_context",
6549
+ "condense_context_error",
6550
+ "sliding_window_truncation"
6551
+ ];
6552
+ function isContextManagementEvent(value) {
6553
+ return typeof value === "string" && CONTEXT_MANAGEMENT_EVENTS.includes(value);
6554
+ }
6555
+
6349
6556
  // src/cookie-consent.ts
6350
6557
  var CONSENT_COOKIE_NAME = "roo-code-cookie-consent";
6351
6558
  var COOKIE_CONSENT_EVENTS = {
@@ -6537,6 +6744,7 @@ export {
6537
6744
  ANTHROPIC_DEFAULT_MAX_TOKENS,
6538
6745
  ANTHROPIC_STYLE_PROVIDERS,
6539
6746
  AWS_INFERENCE_PROFILE_MAPPING,
6747
+ ApiProviderError,
6540
6748
  BEDROCK_1M_CONTEXT_MODEL_IDS,
6541
6749
  BEDROCK_DEFAULT_CONTEXT,
6542
6750
  BEDROCK_DEFAULT_TEMPERATURE,
@@ -6546,6 +6754,7 @@ export {
6546
6754
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
6547
6755
  CODEBASE_INDEX_DEFAULTS,
6548
6756
  CONSENT_COOKIE_NAME,
6757
+ CONTEXT_MANAGEMENT_EVENTS,
6549
6758
  COOKIE_CONSENT_EVENTS,
6550
6759
  ConnectionState,
6551
6760
  DEEP_SEEK_DEFAULT_TEMPERATURE,
@@ -6558,10 +6767,10 @@ export {
6558
6767
  DOUBAO_API_CHAT_PATH,
6559
6768
  EVALS_SETTINGS,
6560
6769
  EVALS_TIMEOUT,
6770
+ EXPECTED_API_ERROR_CODES,
6561
6771
  ExtensionBridgeCommandName,
6562
6772
  ExtensionBridgeEventName,
6563
6773
  ExtensionSocketEvents,
6564
- GLAMA_DEFAULT_TEMPERATURE,
6565
6774
  GLOBAL_SECRET_KEYS,
6566
6775
  GLOBAL_SETTINGS_KEYS,
6567
6776
  GLOBAL_STATE_KEYS,
@@ -6645,6 +6854,7 @@ export {
6645
6854
  commandExecutionStatusSchema,
6646
6855
  commandIds,
6647
6856
  contextCondenseSchema,
6857
+ contextTruncationSchema,
6648
6858
  convertModelNameForVertex,
6649
6859
  customModePromptsSchema,
6650
6860
  customModesSettingsSchema,
@@ -6666,6 +6876,7 @@ export {
6666
6876
  extensionBridgeCommandSchema,
6667
6877
  extensionBridgeEventSchema,
6668
6878
  extensionInstanceSchema,
6879
+ extractApiProviderErrorProperties,
6669
6880
  fauxProviders,
6670
6881
  featherlessDefaultModelId,
6671
6882
  featherlessModels,
@@ -6677,12 +6888,12 @@ export {
6677
6888
  getApiProtocol,
6678
6889
  getClaudeCodeModelId,
6679
6890
  getEffectiveProtocol,
6891
+ getErrorMessage,
6892
+ getErrorStatusCode,
6680
6893
  getImageGenerationProvider,
6681
6894
  getModelId,
6682
6895
  getProviderDefaultModelId,
6683
6896
  gitPropertiesSchema,
6684
- glamaDefaultModelId,
6685
- glamaDefaultModelInfo,
6686
6897
  globalSettingsSchema,
6687
6898
  groqDefaultModelId,
6688
6899
  groqModels,
@@ -6699,6 +6910,8 @@ export {
6699
6910
  ioIntelligenceDefaultModelId,
6700
6911
  ioIntelligenceModels,
6701
6912
  ipcMessageSchema,
6913
+ isApiProviderError,
6914
+ isContextManagementEvent,
6702
6915
  isCustomProvider,
6703
6916
  isDynamicProvider,
6704
6917
  isFauxProvider,
@@ -6787,6 +7000,7 @@ export {
6787
7000
  serviceTierSchema,
6788
7001
  serviceTiers,
6789
7002
  shareResponseSchema,
7003
+ shouldReportApiErrorToTelemetry,
6790
7004
  shouldUseSingleFileRead,
6791
7005
  staticAppPropertiesSchema,
6792
7006
  suggestionItemSchema,