@roo-code/types 1.88.0 → 1.90.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -23,6 +23,7 @@ __export(index_exports, {
23
23
  ANTHROPIC_DEFAULT_MAX_TOKENS: () => ANTHROPIC_DEFAULT_MAX_TOKENS,
24
24
  ANTHROPIC_STYLE_PROVIDERS: () => ANTHROPIC_STYLE_PROVIDERS,
25
25
  AWS_INFERENCE_PROFILE_MAPPING: () => AWS_INFERENCE_PROFILE_MAPPING,
26
+ ApiProviderError: () => ApiProviderError,
26
27
  BEDROCK_1M_CONTEXT_MODEL_IDS: () => BEDROCK_1M_CONTEXT_MODEL_IDS,
27
28
  BEDROCK_DEFAULT_CONTEXT: () => BEDROCK_DEFAULT_CONTEXT,
28
29
  BEDROCK_DEFAULT_TEMPERATURE: () => BEDROCK_DEFAULT_TEMPERATURE,
@@ -32,6 +33,7 @@ __export(index_exports, {
32
33
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS: () => CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
33
34
  CODEBASE_INDEX_DEFAULTS: () => CODEBASE_INDEX_DEFAULTS,
34
35
  CONSENT_COOKIE_NAME: () => CONSENT_COOKIE_NAME,
36
+ CONTEXT_MANAGEMENT_EVENTS: () => CONTEXT_MANAGEMENT_EVENTS,
35
37
  COOKIE_CONSENT_EVENTS: () => COOKIE_CONSENT_EVENTS,
36
38
  ConnectionState: () => ConnectionState,
37
39
  DEEP_SEEK_DEFAULT_TEMPERATURE: () => DEEP_SEEK_DEFAULT_TEMPERATURE,
@@ -44,10 +46,10 @@ __export(index_exports, {
44
46
  DOUBAO_API_CHAT_PATH: () => DOUBAO_API_CHAT_PATH,
45
47
  EVALS_SETTINGS: () => EVALS_SETTINGS,
46
48
  EVALS_TIMEOUT: () => EVALS_TIMEOUT,
49
+ EXPECTED_API_ERROR_CODES: () => EXPECTED_API_ERROR_CODES,
47
50
  ExtensionBridgeCommandName: () => ExtensionBridgeCommandName,
48
51
  ExtensionBridgeEventName: () => ExtensionBridgeEventName,
49
52
  ExtensionSocketEvents: () => ExtensionSocketEvents,
50
- GLAMA_DEFAULT_TEMPERATURE: () => GLAMA_DEFAULT_TEMPERATURE,
51
53
  GLOBAL_SECRET_KEYS: () => GLOBAL_SECRET_KEYS,
52
54
  GLOBAL_SETTINGS_KEYS: () => GLOBAL_SETTINGS_KEYS,
53
55
  GLOBAL_STATE_KEYS: () => GLOBAL_STATE_KEYS,
@@ -131,6 +133,7 @@ __export(index_exports, {
131
133
  commandExecutionStatusSchema: () => commandExecutionStatusSchema,
132
134
  commandIds: () => commandIds,
133
135
  contextCondenseSchema: () => contextCondenseSchema,
136
+ contextTruncationSchema: () => contextTruncationSchema,
134
137
  convertModelNameForVertex: () => convertModelNameForVertex,
135
138
  customModePromptsSchema: () => customModePromptsSchema,
136
139
  customModesSettingsSchema: () => customModesSettingsSchema,
@@ -152,6 +155,7 @@ __export(index_exports, {
152
155
  extensionBridgeCommandSchema: () => extensionBridgeCommandSchema,
153
156
  extensionBridgeEventSchema: () => extensionBridgeEventSchema,
154
157
  extensionInstanceSchema: () => extensionInstanceSchema,
158
+ extractApiProviderErrorProperties: () => extractApiProviderErrorProperties,
155
159
  fauxProviders: () => fauxProviders,
156
160
  featherlessDefaultModelId: () => featherlessDefaultModelId,
157
161
  featherlessModels: () => featherlessModels,
@@ -163,12 +167,12 @@ __export(index_exports, {
163
167
  getApiProtocol: () => getApiProtocol,
164
168
  getClaudeCodeModelId: () => getClaudeCodeModelId,
165
169
  getEffectiveProtocol: () => getEffectiveProtocol,
170
+ getErrorMessage: () => getErrorMessage,
171
+ getErrorStatusCode: () => getErrorStatusCode,
166
172
  getImageGenerationProvider: () => getImageGenerationProvider,
167
173
  getModelId: () => getModelId,
168
174
  getProviderDefaultModelId: () => getProviderDefaultModelId,
169
175
  gitPropertiesSchema: () => gitPropertiesSchema,
170
- glamaDefaultModelId: () => glamaDefaultModelId,
171
- glamaDefaultModelInfo: () => glamaDefaultModelInfo,
172
176
  globalSettingsSchema: () => globalSettingsSchema,
173
177
  groqDefaultModelId: () => groqDefaultModelId,
174
178
  groqModels: () => groqModels,
@@ -185,6 +189,8 @@ __export(index_exports, {
185
189
  ioIntelligenceDefaultModelId: () => ioIntelligenceDefaultModelId,
186
190
  ioIntelligenceModels: () => ioIntelligenceModels,
187
191
  ipcMessageSchema: () => ipcMessageSchema,
192
+ isApiProviderError: () => isApiProviderError,
193
+ isContextManagementEvent: () => isContextManagementEvent,
188
194
  isCustomProvider: () => isCustomProvider,
189
195
  isDynamicProvider: () => isDynamicProvider,
190
196
  isFauxProvider: () => isFauxProvider,
@@ -273,6 +279,7 @@ __export(index_exports, {
273
279
  serviceTierSchema: () => serviceTierSchema,
274
280
  serviceTiers: () => serviceTiers,
275
281
  shareResponseSchema: () => shareResponseSchema,
282
+ shouldReportApiErrorToTelemetry: () => shouldReportApiErrorToTelemetry,
276
283
  shouldUseSingleFileRead: () => shouldUseSingleFileRead,
277
284
  staticAppPropertiesSchema: () => staticAppPropertiesSchema,
278
285
  suggestionItemSchema: () => suggestionItemSchema,
@@ -393,6 +400,7 @@ var clineSays = [
393
400
  "diff_error",
394
401
  "condense_context",
395
402
  "condense_context_error",
403
+ "sliding_window_truncation",
396
404
  "codebase_search_result",
397
405
  "user_edit_todos"
398
406
  ];
@@ -405,7 +413,14 @@ var contextCondenseSchema = import_zod.z.object({
405
413
  cost: import_zod.z.number(),
406
414
  prevContextTokens: import_zod.z.number(),
407
415
  newContextTokens: import_zod.z.number(),
408
- summary: import_zod.z.string()
416
+ summary: import_zod.z.string(),
417
+ condenseId: import_zod.z.string().optional()
418
+ });
419
+ var contextTruncationSchema = import_zod.z.object({
420
+ truncationId: import_zod.z.string(),
421
+ messagesRemoved: import_zod.z.number(),
422
+ prevContextTokens: import_zod.z.number(),
423
+ newContextTokens: import_zod.z.number()
409
424
  });
410
425
  var clineMessageSchema = import_zod.z.object({
411
426
  ts: import_zod.z.number(),
@@ -419,7 +434,16 @@ var clineMessageSchema = import_zod.z.object({
419
434
  conversationHistoryIndex: import_zod.z.number().optional(),
420
435
  checkpoint: import_zod.z.record(import_zod.z.string(), import_zod.z.unknown()).optional(),
421
436
  progressStatus: toolProgressStatusSchema.optional(),
437
+ /**
438
+ * Data for successful context condensation.
439
+ * Present when `say: "condense_context"` and `partial: false`.
440
+ */
422
441
  contextCondense: contextCondenseSchema.optional(),
442
+ /**
443
+ * Data for sliding window truncation.
444
+ * Present when `say: "sliding_window_truncation"`.
445
+ */
446
+ contextTruncation: contextTruncationSchema.optional(),
423
447
  isProtected: import_zod.z.boolean().optional(),
424
448
  apiProtocol: import_zod.z.union([import_zod.z.literal("openai"), import_zod.z.literal("anthropic")]).optional(),
425
449
  isAnswered: import_zod.z.boolean().optional()
@@ -448,8 +472,8 @@ var toolNames = [
448
472
  "read_file",
449
473
  "write_to_file",
450
474
  "apply_diff",
451
- "insert_content",
452
475
  "search_and_replace",
476
+ "search_replace",
453
477
  "apply_patch",
454
478
  "search_files",
455
479
  "list_files",
@@ -568,7 +592,7 @@ var rooCodeEventsSchema = import_zod3.z.object({
568
592
  ["taskAskResponded" /* TaskAskResponded */]: import_zod3.z.tuple([import_zod3.z.string()]),
569
593
  ["taskUserMessage" /* TaskUserMessage */]: import_zod3.z.tuple([import_zod3.z.string()]),
570
594
  ["taskToolFailed" /* TaskToolFailed */]: import_zod3.z.tuple([import_zod3.z.string(), toolNamesSchema, import_zod3.z.string()]),
571
- ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), tokenUsageSchema]),
595
+ ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), tokenUsageSchema, toolUsageSchema]),
572
596
  ["modeChanged" /* ModeChanged */]: import_zod3.z.tuple([import_zod3.z.string()]),
573
597
  ["providerProfileChanged" /* ProviderProfileChanged */]: import_zod3.z.tuple([import_zod3.z.object({ name: import_zod3.z.string(), provider: import_zod3.z.string() })])
574
598
  });
@@ -722,9 +746,9 @@ var import_zod5 = require("zod");
722
746
  var reasoningEfforts = ["low", "medium", "high"];
723
747
  var reasoningEffortsSchema = import_zod5.z.enum(reasoningEfforts);
724
748
  var reasoningEffortWithMinimalSchema = import_zod5.z.union([reasoningEffortsSchema, import_zod5.z.literal("minimal")]);
725
- var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high"];
749
+ var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high", "xhigh"];
726
750
  var reasoningEffortExtendedSchema = import_zod5.z.enum(reasoningEffortsExtended);
727
- var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high"];
751
+ var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high", "xhigh"];
728
752
  var reasoningEffortSettingSchema = import_zod5.z.enum(reasoningEffortSettingValues);
729
753
  var verbosityLevels = ["low", "medium", "high"];
730
754
  var verbosityLevelsSchema = import_zod5.z.enum(verbosityLevels);
@@ -752,7 +776,7 @@ var modelInfoSchema = import_zod5.z.object({
752
776
  supportsTemperature: import_zod5.z.boolean().optional(),
753
777
  defaultTemperature: import_zod5.z.number().optional(),
754
778
  requiredReasoningBudget: import_zod5.z.boolean().optional(),
755
- supportsReasoningEffort: import_zod5.z.union([import_zod5.z.boolean(), import_zod5.z.array(import_zod5.z.enum(["disable", "none", "minimal", "low", "medium", "high"]))]).optional(),
779
+ supportsReasoningEffort: import_zod5.z.union([import_zod5.z.boolean(), import_zod5.z.array(import_zod5.z.enum(["disable", "none", "minimal", "low", "medium", "high", "xhigh"]))]).optional(),
756
780
  requiredReasoningEffort: import_zod5.z.boolean().optional(),
757
781
  preserveReasoning: import_zod5.z.boolean().optional(),
758
782
  supportedParameters: import_zod5.z.array(modelParametersSchema).optional(),
@@ -768,6 +792,8 @@ var modelInfoSchema = import_zod5.z.object({
768
792
  cachableFields: import_zod5.z.array(import_zod5.z.string()).optional(),
769
793
  // Flag to indicate if the model is deprecated and should not be used
770
794
  deprecated: import_zod5.z.boolean().optional(),
795
+ // Flag to indicate if the model should hide vendor/company identity in responses
796
+ isStealthModel: import_zod5.z.boolean().optional(),
771
797
  // Flag to indicate if the model is free (no cost)
772
798
  isFree: import_zod5.z.boolean().optional(),
773
799
  // Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
@@ -834,7 +860,9 @@ var codebaseIndexConfigSchema = import_zod6.z.object({
834
860
  codebaseIndexOpenAiCompatibleModelDimension: import_zod6.z.number().optional(),
835
861
  // Bedrock specific fields
836
862
  codebaseIndexBedrockRegion: import_zod6.z.string().optional(),
837
- codebaseIndexBedrockProfile: import_zod6.z.string().optional()
863
+ codebaseIndexBedrockProfile: import_zod6.z.string().optional(),
864
+ // OpenRouter specific fields
865
+ codebaseIndexOpenRouterSpecificProvider: import_zod6.z.string().optional()
838
866
  });
839
867
  var codebaseIndexModelsSchema = import_zod6.z.object({
840
868
  openai: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.object({ dimension: import_zod6.z.number() })).optional(),
@@ -1079,7 +1107,7 @@ var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
1079
1107
  // src/providers/baseten.ts
1080
1108
  var basetenModels = {
1081
1109
  "moonshotai/Kimi-K2-Thinking": {
1082
- maxTokens: 163800,
1110
+ maxTokens: 16384,
1083
1111
  contextWindow: 262e3,
1084
1112
  supportsImages: false,
1085
1113
  supportsPromptCache: false,
@@ -1091,7 +1119,7 @@ var basetenModels = {
1091
1119
  description: "Kimi K2 Thinking - A model with enhanced reasoning capabilities from Kimi K2"
1092
1120
  },
1093
1121
  "zai-org/GLM-4.6": {
1094
- maxTokens: 2e5,
1122
+ maxTokens: 16384,
1095
1123
  contextWindow: 2e5,
1096
1124
  supportsImages: false,
1097
1125
  supportsPromptCache: false,
@@ -1103,7 +1131,7 @@ var basetenModels = {
1103
1131
  description: "Frontier open model with advanced agentic, reasoning and coding capabilities"
1104
1132
  },
1105
1133
  "deepseek-ai/DeepSeek-R1": {
1106
- maxTokens: 131072,
1134
+ maxTokens: 16384,
1107
1135
  contextWindow: 163840,
1108
1136
  supportsImages: false,
1109
1137
  supportsPromptCache: false,
@@ -1114,7 +1142,7 @@ var basetenModels = {
1114
1142
  description: "DeepSeek's first-generation reasoning model"
1115
1143
  },
1116
1144
  "deepseek-ai/DeepSeek-R1-0528": {
1117
- maxTokens: 131072,
1145
+ maxTokens: 16384,
1118
1146
  contextWindow: 163840,
1119
1147
  supportsImages: false,
1120
1148
  supportsPromptCache: false,
@@ -1125,7 +1153,7 @@ var basetenModels = {
1125
1153
  description: "The latest revision of DeepSeek's first-generation reasoning model"
1126
1154
  },
1127
1155
  "deepseek-ai/DeepSeek-V3-0324": {
1128
- maxTokens: 131072,
1156
+ maxTokens: 16384,
1129
1157
  contextWindow: 163840,
1130
1158
  supportsImages: false,
1131
1159
  supportsPromptCache: false,
@@ -1136,7 +1164,7 @@ var basetenModels = {
1136
1164
  description: "Fast general-purpose LLM with enhanced reasoning capabilities"
1137
1165
  },
1138
1166
  "deepseek-ai/DeepSeek-V3.1": {
1139
- maxTokens: 131072,
1167
+ maxTokens: 16384,
1140
1168
  contextWindow: 163840,
1141
1169
  supportsImages: false,
1142
1170
  supportsPromptCache: false,
@@ -1146,8 +1174,20 @@ var basetenModels = {
1146
1174
  cacheReadsPrice: 0,
1147
1175
  description: "Extremely capable general-purpose LLM with hybrid reasoning capabilities and advanced tool calling"
1148
1176
  },
1177
+ "deepseek-ai/DeepSeek-V3.2": {
1178
+ maxTokens: 16384,
1179
+ contextWindow: 163840,
1180
+ supportsImages: false,
1181
+ supportsPromptCache: false,
1182
+ supportsNativeTools: true,
1183
+ inputPrice: 0.3,
1184
+ outputPrice: 0.45,
1185
+ cacheWritesPrice: 0,
1186
+ cacheReadsPrice: 0,
1187
+ description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance"
1188
+ },
1149
1189
  "Qwen/Qwen3-235B-A22B-Instruct-2507": {
1150
- maxTokens: 262144,
1190
+ maxTokens: 16384,
1151
1191
  contextWindow: 262144,
1152
1192
  supportsImages: false,
1153
1193
  supportsPromptCache: false,
@@ -1158,7 +1198,7 @@ var basetenModels = {
1158
1198
  description: "Mixture-of-experts LLM with math and reasoning capabilities"
1159
1199
  },
1160
1200
  "Qwen/Qwen3-Coder-480B-A35B-Instruct": {
1161
- maxTokens: 262144,
1201
+ maxTokens: 16384,
1162
1202
  contextWindow: 262144,
1163
1203
  supportsImages: false,
1164
1204
  supportsPromptCache: false,
@@ -1169,7 +1209,7 @@ var basetenModels = {
1169
1209
  description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities"
1170
1210
  },
1171
1211
  "openai/gpt-oss-120b": {
1172
- maxTokens: 128072,
1212
+ maxTokens: 16384,
1173
1213
  contextWindow: 128072,
1174
1214
  supportsImages: false,
1175
1215
  supportsPromptCache: false,
@@ -1181,7 +1221,7 @@ var basetenModels = {
1181
1221
  description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities"
1182
1222
  },
1183
1223
  "moonshotai/Kimi-K2-Instruct-0905": {
1184
- maxTokens: 168e3,
1224
+ maxTokens: 16384,
1185
1225
  contextWindow: 262e3,
1186
1226
  supportsImages: false,
1187
1227
  supportsPromptCache: false,
@@ -1205,6 +1245,7 @@ var bedrockModels = {
1205
1245
  supportsImages: true,
1206
1246
  supportsPromptCache: true,
1207
1247
  supportsReasoningBudget: true,
1248
+ supportsNativeTools: true,
1208
1249
  inputPrice: 3,
1209
1250
  outputPrice: 15,
1210
1251
  cacheWritesPrice: 3.75,
@@ -1218,6 +1259,7 @@ var bedrockModels = {
1218
1259
  contextWindow: 3e5,
1219
1260
  supportsImages: true,
1220
1261
  supportsPromptCache: true,
1262
+ supportsNativeTools: true,
1221
1263
  inputPrice: 0.8,
1222
1264
  outputPrice: 3.2,
1223
1265
  cacheWritesPrice: 0.8,
@@ -1233,6 +1275,7 @@ var bedrockModels = {
1233
1275
  contextWindow: 3e5,
1234
1276
  supportsImages: true,
1235
1277
  supportsPromptCache: false,
1278
+ supportsNativeTools: true,
1236
1279
  inputPrice: 1,
1237
1280
  outputPrice: 4,
1238
1281
  cacheWritesPrice: 1,
@@ -1246,6 +1289,7 @@ var bedrockModels = {
1246
1289
  contextWindow: 3e5,
1247
1290
  supportsImages: true,
1248
1291
  supportsPromptCache: true,
1292
+ supportsNativeTools: true,
1249
1293
  inputPrice: 0.06,
1250
1294
  outputPrice: 0.24,
1251
1295
  cacheWritesPrice: 0.06,
@@ -1261,6 +1305,7 @@ var bedrockModels = {
1261
1305
  contextWindow: 128e3,
1262
1306
  supportsImages: false,
1263
1307
  supportsPromptCache: true,
1308
+ supportsNativeTools: true,
1264
1309
  inputPrice: 0.035,
1265
1310
  outputPrice: 0.14,
1266
1311
  cacheWritesPrice: 0.035,
@@ -1277,6 +1322,7 @@ var bedrockModels = {
1277
1322
  supportsImages: true,
1278
1323
  supportsPromptCache: true,
1279
1324
  supportsReasoningBudget: true,
1325
+ supportsNativeTools: true,
1280
1326
  inputPrice: 3,
1281
1327
  outputPrice: 15,
1282
1328
  cacheWritesPrice: 3.75,
@@ -1291,6 +1337,7 @@ var bedrockModels = {
1291
1337
  supportsImages: true,
1292
1338
  supportsPromptCache: true,
1293
1339
  supportsReasoningBudget: true,
1340
+ supportsNativeTools: true,
1294
1341
  inputPrice: 15,
1295
1342
  outputPrice: 75,
1296
1343
  cacheWritesPrice: 18.75,
@@ -1305,6 +1352,7 @@ var bedrockModels = {
1305
1352
  supportsImages: true,
1306
1353
  supportsPromptCache: true,
1307
1354
  supportsReasoningBudget: true,
1355
+ supportsNativeTools: true,
1308
1356
  inputPrice: 5,
1309
1357
  outputPrice: 25,
1310
1358
  cacheWritesPrice: 6.25,
@@ -1319,6 +1367,7 @@ var bedrockModels = {
1319
1367
  supportsImages: true,
1320
1368
  supportsPromptCache: true,
1321
1369
  supportsReasoningBudget: true,
1370
+ supportsNativeTools: true,
1322
1371
  inputPrice: 15,
1323
1372
  outputPrice: 75,
1324
1373
  cacheWritesPrice: 18.75,
@@ -1333,6 +1382,7 @@ var bedrockModels = {
1333
1382
  supportsImages: true,
1334
1383
  supportsPromptCache: true,
1335
1384
  supportsReasoningBudget: true,
1385
+ supportsNativeTools: true,
1336
1386
  inputPrice: 3,
1337
1387
  outputPrice: 15,
1338
1388
  cacheWritesPrice: 3.75,
@@ -1346,6 +1396,7 @@ var bedrockModels = {
1346
1396
  contextWindow: 2e5,
1347
1397
  supportsImages: true,
1348
1398
  supportsPromptCache: true,
1399
+ supportsNativeTools: true,
1349
1400
  inputPrice: 3,
1350
1401
  outputPrice: 15,
1351
1402
  cacheWritesPrice: 3.75,
@@ -1359,6 +1410,7 @@ var bedrockModels = {
1359
1410
  contextWindow: 2e5,
1360
1411
  supportsImages: false,
1361
1412
  supportsPromptCache: true,
1413
+ supportsNativeTools: true,
1362
1414
  inputPrice: 0.8,
1363
1415
  outputPrice: 4,
1364
1416
  cacheWritesPrice: 1,
@@ -1373,6 +1425,7 @@ var bedrockModels = {
1373
1425
  supportsImages: true,
1374
1426
  supportsPromptCache: true,
1375
1427
  supportsReasoningBudget: true,
1428
+ supportsNativeTools: true,
1376
1429
  inputPrice: 1,
1377
1430
  outputPrice: 5,
1378
1431
  cacheWritesPrice: 1.25,
@@ -1388,6 +1441,7 @@ var bedrockModels = {
1388
1441
  contextWindow: 2e5,
1389
1442
  supportsImages: true,
1390
1443
  supportsPromptCache: false,
1444
+ supportsNativeTools: true,
1391
1445
  inputPrice: 3,
1392
1446
  outputPrice: 15
1393
1447
  },
@@ -1396,6 +1450,7 @@ var bedrockModels = {
1396
1450
  contextWindow: 2e5,
1397
1451
  supportsImages: true,
1398
1452
  supportsPromptCache: false,
1453
+ supportsNativeTools: true,
1399
1454
  inputPrice: 15,
1400
1455
  outputPrice: 75
1401
1456
  },
@@ -1404,6 +1459,7 @@ var bedrockModels = {
1404
1459
  contextWindow: 2e5,
1405
1460
  supportsImages: true,
1406
1461
  supportsPromptCache: false,
1462
+ supportsNativeTools: true,
1407
1463
  inputPrice: 3,
1408
1464
  outputPrice: 15
1409
1465
  },
@@ -1412,6 +1468,7 @@ var bedrockModels = {
1412
1468
  contextWindow: 2e5,
1413
1469
  supportsImages: true,
1414
1470
  supportsPromptCache: false,
1471
+ supportsNativeTools: true,
1415
1472
  inputPrice: 0.25,
1416
1473
  outputPrice: 1.25
1417
1474
  },
@@ -1420,6 +1477,7 @@ var bedrockModels = {
1420
1477
  contextWindow: 1e5,
1421
1478
  supportsImages: false,
1422
1479
  supportsPromptCache: false,
1480
+ supportsNativeTools: true,
1423
1481
  inputPrice: 8,
1424
1482
  outputPrice: 24,
1425
1483
  description: "Claude 2.1"
@@ -1429,6 +1487,7 @@ var bedrockModels = {
1429
1487
  contextWindow: 1e5,
1430
1488
  supportsImages: false,
1431
1489
  supportsPromptCache: false,
1490
+ supportsNativeTools: true,
1432
1491
  inputPrice: 8,
1433
1492
  outputPrice: 24,
1434
1493
  description: "Claude 2.0"
@@ -1438,6 +1497,7 @@ var bedrockModels = {
1438
1497
  contextWindow: 1e5,
1439
1498
  supportsImages: false,
1440
1499
  supportsPromptCache: false,
1500
+ supportsNativeTools: true,
1441
1501
  inputPrice: 0.8,
1442
1502
  outputPrice: 2.4,
1443
1503
  description: "Claude Instant"
@@ -1447,6 +1507,7 @@ var bedrockModels = {
1447
1507
  contextWindow: 128e3,
1448
1508
  supportsImages: false,
1449
1509
  supportsPromptCache: false,
1510
+ supportsNativeTools: true,
1450
1511
  inputPrice: 1.35,
1451
1512
  outputPrice: 5.4
1452
1513
  },
@@ -1455,6 +1516,7 @@ var bedrockModels = {
1455
1516
  contextWindow: 128e3,
1456
1517
  supportsImages: false,
1457
1518
  supportsPromptCache: false,
1519
+ supportsNativeTools: true,
1458
1520
  inputPrice: 0.5,
1459
1521
  outputPrice: 1.5,
1460
1522
  description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases"
@@ -1464,6 +1526,7 @@ var bedrockModels = {
1464
1526
  contextWindow: 128e3,
1465
1527
  supportsImages: false,
1466
1528
  supportsPromptCache: false,
1529
+ supportsNativeTools: true,
1467
1530
  inputPrice: 2,
1468
1531
  outputPrice: 6,
1469
1532
  description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model"
@@ -1473,6 +1536,7 @@ var bedrockModels = {
1473
1536
  contextWindow: 128e3,
1474
1537
  supportsImages: false,
1475
1538
  supportsPromptCache: false,
1539
+ supportsNativeTools: true,
1476
1540
  inputPrice: 0.72,
1477
1541
  outputPrice: 0.72,
1478
1542
  description: "Llama 3.3 Instruct (70B)"
@@ -1482,6 +1546,7 @@ var bedrockModels = {
1482
1546
  contextWindow: 128e3,
1483
1547
  supportsImages: true,
1484
1548
  supportsPromptCache: false,
1549
+ supportsNativeTools: true,
1485
1550
  inputPrice: 0.72,
1486
1551
  outputPrice: 0.72,
1487
1552
  description: "Llama 3.2 Instruct (90B)"
@@ -1491,6 +1556,7 @@ var bedrockModels = {
1491
1556
  contextWindow: 128e3,
1492
1557
  supportsImages: true,
1493
1558
  supportsPromptCache: false,
1559
+ supportsNativeTools: true,
1494
1560
  inputPrice: 0.16,
1495
1561
  outputPrice: 0.16,
1496
1562
  description: "Llama 3.2 Instruct (11B)"
@@ -1500,6 +1566,7 @@ var bedrockModels = {
1500
1566
  contextWindow: 128e3,
1501
1567
  supportsImages: false,
1502
1568
  supportsPromptCache: false,
1569
+ supportsNativeTools: true,
1503
1570
  inputPrice: 0.15,
1504
1571
  outputPrice: 0.15,
1505
1572
  description: "Llama 3.2 Instruct (3B)"
@@ -1509,6 +1576,7 @@ var bedrockModels = {
1509
1576
  contextWindow: 128e3,
1510
1577
  supportsImages: false,
1511
1578
  supportsPromptCache: false,
1579
+ supportsNativeTools: true,
1512
1580
  inputPrice: 0.1,
1513
1581
  outputPrice: 0.1,
1514
1582
  description: "Llama 3.2 Instruct (1B)"
@@ -1518,6 +1586,7 @@ var bedrockModels = {
1518
1586
  contextWindow: 128e3,
1519
1587
  supportsImages: false,
1520
1588
  supportsPromptCache: false,
1589
+ supportsNativeTools: true,
1521
1590
  inputPrice: 2.4,
1522
1591
  outputPrice: 2.4,
1523
1592
  description: "Llama 3.1 Instruct (405B)"
@@ -1527,6 +1596,7 @@ var bedrockModels = {
1527
1596
  contextWindow: 128e3,
1528
1597
  supportsImages: false,
1529
1598
  supportsPromptCache: false,
1599
+ supportsNativeTools: true,
1530
1600
  inputPrice: 0.72,
1531
1601
  outputPrice: 0.72,
1532
1602
  description: "Llama 3.1 Instruct (70B)"
@@ -1536,6 +1606,7 @@ var bedrockModels = {
1536
1606
  contextWindow: 128e3,
1537
1607
  supportsImages: false,
1538
1608
  supportsPromptCache: false,
1609
+ supportsNativeTools: true,
1539
1610
  inputPrice: 0.9,
1540
1611
  outputPrice: 0.9,
1541
1612
  description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
@@ -1545,6 +1616,7 @@ var bedrockModels = {
1545
1616
  contextWindow: 8e3,
1546
1617
  supportsImages: false,
1547
1618
  supportsPromptCache: false,
1619
+ supportsNativeTools: true,
1548
1620
  inputPrice: 0.22,
1549
1621
  outputPrice: 0.22,
1550
1622
  description: "Llama 3.1 Instruct (8B)"
@@ -1554,6 +1626,7 @@ var bedrockModels = {
1554
1626
  contextWindow: 8e3,
1555
1627
  supportsImages: false,
1556
1628
  supportsPromptCache: false,
1629
+ supportsNativeTools: true,
1557
1630
  inputPrice: 2.65,
1558
1631
  outputPrice: 3.5
1559
1632
  },
@@ -1562,6 +1635,7 @@ var bedrockModels = {
1562
1635
  contextWindow: 4e3,
1563
1636
  supportsImages: false,
1564
1637
  supportsPromptCache: false,
1638
+ supportsNativeTools: true,
1565
1639
  inputPrice: 0.3,
1566
1640
  outputPrice: 0.6
1567
1641
  },
@@ -1570,6 +1644,7 @@ var bedrockModels = {
1570
1644
  contextWindow: 8e3,
1571
1645
  supportsImages: false,
1572
1646
  supportsPromptCache: false,
1647
+ supportsNativeTools: true,
1573
1648
  inputPrice: 0.15,
1574
1649
  outputPrice: 0.2,
1575
1650
  description: "Amazon Titan Text Lite"
@@ -1579,6 +1654,7 @@ var bedrockModels = {
1579
1654
  contextWindow: 8e3,
1580
1655
  supportsImages: false,
1581
1656
  supportsPromptCache: false,
1657
+ supportsNativeTools: true,
1582
1658
  inputPrice: 0.2,
1583
1659
  outputPrice: 0.6,
1584
1660
  description: "Amazon Titan Text Express"
@@ -1598,6 +1674,52 @@ var bedrockModels = {
1598
1674
  supportsPromptCache: false,
1599
1675
  inputPrice: 0.02,
1600
1676
  description: "Amazon Titan Text Embeddings V2"
1677
+ },
1678
+ "moonshot.kimi-k2-thinking": {
1679
+ maxTokens: 32e3,
1680
+ contextWindow: 262144,
1681
+ supportsImages: false,
1682
+ supportsPromptCache: false,
1683
+ supportsNativeTools: true,
1684
+ defaultToolProtocol: "native",
1685
+ preserveReasoning: true,
1686
+ inputPrice: 0.6,
1687
+ outputPrice: 2.5,
1688
+ description: "Kimi K2 Thinking (1T parameter MoE model with 32B active parameters)"
1689
+ },
1690
+ "minimax.minimax-m2": {
1691
+ maxTokens: 16384,
1692
+ contextWindow: 196608,
1693
+ supportsImages: false,
1694
+ supportsPromptCache: false,
1695
+ supportsNativeTools: true,
1696
+ defaultToolProtocol: "native",
1697
+ preserveReasoning: true,
1698
+ inputPrice: 0.3,
1699
+ outputPrice: 1.2,
1700
+ description: "MiniMax M2 (230B parameter MoE model with 10B active parameters)"
1701
+ },
1702
+ "qwen.qwen3-next-80b-a3b": {
1703
+ maxTokens: 8192,
1704
+ contextWindow: 262144,
1705
+ supportsImages: false,
1706
+ supportsPromptCache: false,
1707
+ supportsNativeTools: true,
1708
+ defaultToolProtocol: "native",
1709
+ inputPrice: 0.15,
1710
+ outputPrice: 1.2,
1711
+ description: "Qwen3 Next 80B (MoE model with 3B active parameters)"
1712
+ },
1713
+ "qwen.qwen3-coder-480b-a35b-v1:0": {
1714
+ maxTokens: 8192,
1715
+ contextWindow: 262144,
1716
+ supportsImages: false,
1717
+ supportsPromptCache: false,
1718
+ supportsNativeTools: true,
1719
+ defaultToolProtocol: "native",
1720
+ inputPrice: 0.45,
1721
+ outputPrice: 1.8,
1722
+ description: "Qwen3 Coder 480B (MoE model with 35B active parameters)"
1601
1723
  }
1602
1724
  };
1603
1725
  var BEDROCK_DEFAULT_TEMPERATURE = 0.3;
@@ -1663,47 +1785,56 @@ var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
1663
1785
  var cerebrasDefaultModelId = "gpt-oss-120b";
1664
1786
  var cerebrasModels = {
1665
1787
  "zai-glm-4.6": {
1666
- maxTokens: 16384,
1667
- // consistent with their other models
1788
+ maxTokens: 8192,
1789
+ // Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
1668
1790
  contextWindow: 131072,
1669
1791
  supportsImages: false,
1670
1792
  supportsPromptCache: false,
1793
+ supportsNativeTools: true,
1671
1794
  inputPrice: 0,
1672
1795
  outputPrice: 0,
1673
1796
  description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
1674
1797
  },
1675
1798
  "qwen-3-235b-a22b-instruct-2507": {
1676
- maxTokens: 64e3,
1799
+ maxTokens: 8192,
1800
+ // Conservative default to avoid premature rate limiting
1677
1801
  contextWindow: 64e3,
1678
1802
  supportsImages: false,
1679
1803
  supportsPromptCache: false,
1804
+ supportsNativeTools: true,
1680
1805
  inputPrice: 0,
1681
1806
  outputPrice: 0,
1682
1807
  description: "Intelligent model with ~1400 tokens/s"
1683
1808
  },
1684
1809
  "llama-3.3-70b": {
1685
- maxTokens: 64e3,
1810
+ maxTokens: 8192,
1811
+ // Conservative default to avoid premature rate limiting
1686
1812
  contextWindow: 64e3,
1687
1813
  supportsImages: false,
1688
1814
  supportsPromptCache: false,
1815
+ supportsNativeTools: true,
1689
1816
  inputPrice: 0,
1690
1817
  outputPrice: 0,
1691
1818
  description: "Powerful model with ~2600 tokens/s"
1692
1819
  },
1693
1820
  "qwen-3-32b": {
1694
- maxTokens: 64e3,
1821
+ maxTokens: 8192,
1822
+ // Conservative default to avoid premature rate limiting
1695
1823
  contextWindow: 64e3,
1696
1824
  supportsImages: false,
1697
1825
  supportsPromptCache: false,
1826
+ supportsNativeTools: true,
1698
1827
  inputPrice: 0,
1699
1828
  outputPrice: 0,
1700
1829
  description: "SOTA coding performance with ~2500 tokens/s"
1701
1830
  },
1702
1831
  "gpt-oss-120b": {
1703
- maxTokens: 8e3,
1832
+ maxTokens: 8192,
1833
+ // Conservative default to avoid premature rate limiting
1704
1834
  contextWindow: 64e3,
1705
1835
  supportsImages: false,
1706
1836
  supportsPromptCache: false,
1837
+ supportsNativeTools: true,
1707
1838
  inputPrice: 0,
1708
1839
  outputPrice: 0,
1709
1840
  description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
@@ -2224,35 +2355,37 @@ var deepSeekModels = {
2224
2355
  supportsImages: false,
2225
2356
  supportsPromptCache: true,
2226
2357
  supportsNativeTools: true,
2227
- inputPrice: 0.56,
2228
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
2229
- outputPrice: 1.68,
2230
- // $1.68 per million tokens - Updated Sept 5, 2025
2231
- cacheWritesPrice: 0.56,
2232
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
2233
- cacheReadsPrice: 0.07,
2234
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
2235
- description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
2358
+ defaultToolProtocol: "native",
2359
+ inputPrice: 0.28,
2360
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2361
+ outputPrice: 0.42,
2362
+ // $0.42 per million tokens - Updated Dec 9, 2025
2363
+ cacheWritesPrice: 0.28,
2364
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2365
+ cacheReadsPrice: 0.028,
2366
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2367
+ description: `DeepSeek-V3.2 (Non-thinking Mode) achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally. Supports JSON output, tool calls, chat prefix completion (beta), and FIM completion (beta).`
2236
2368
  },
2237
2369
  "deepseek-reasoner": {
2238
- maxTokens: 65536,
2239
- // 64K max output for reasoning mode
2370
+ maxTokens: 8192,
2371
+ // 8K max output
2240
2372
  contextWindow: 128e3,
2241
2373
  supportsImages: false,
2242
2374
  supportsPromptCache: true,
2243
2375
  supportsNativeTools: true,
2244
- inputPrice: 0.56,
2245
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
2246
- outputPrice: 1.68,
2247
- // $1.68 per million tokens - Updated Sept 5, 2025
2248
- cacheWritesPrice: 0.56,
2249
- // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
2250
- cacheReadsPrice: 0.07,
2251
- // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
2252
- description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
2376
+ defaultToolProtocol: "native",
2377
+ inputPrice: 0.28,
2378
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2379
+ outputPrice: 0.42,
2380
+ // $0.42 per million tokens - Updated Dec 9, 2025
2381
+ cacheWritesPrice: 0.28,
2382
+ // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
2383
+ cacheReadsPrice: 0.028,
2384
+ // $0.028 per million tokens (cache hit) - Updated Dec 9, 2025
2385
+ description: `DeepSeek-V3.2 (Thinking Mode) achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 8K output tokens. Supports JSON output, tool calls, and chat prefix completion (beta).`
2253
2386
  }
2254
2387
  };
2255
- var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
2388
+ var DEEP_SEEK_DEFAULT_TEMPERATURE = 0;
2256
2389
 
2257
2390
  // src/providers/doubao.ts
2258
2391
  var doubaoDefaultModelId = "doubao-seed-1-6-250615";
@@ -2707,21 +2840,6 @@ var geminiModels = {
2707
2840
  }
2708
2841
  };
2709
2842
 
2710
- // src/providers/glama.ts
2711
- var glamaDefaultModelId = "anthropic/claude-3-7-sonnet";
2712
- var glamaDefaultModelInfo = {
2713
- maxTokens: 8192,
2714
- contextWindow: 2e5,
2715
- supportsImages: true,
2716
- supportsPromptCache: true,
2717
- inputPrice: 3,
2718
- outputPrice: 15,
2719
- cacheWritesPrice: 3.75,
2720
- cacheReadsPrice: 0.3,
2721
- description: "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)"
2722
- };
2723
- var GLAMA_DEFAULT_TEMPERATURE = 0;
2724
-
2725
2843
  // src/providers/groq.ts
2726
2844
  var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
2727
2845
  var groqModels = {
@@ -2902,6 +3020,7 @@ var litellmDefaultModelInfo = {
2902
3020
  contextWindow: 2e5,
2903
3021
  supportsImages: true,
2904
3022
  supportsPromptCache: true,
3023
+ supportsNativeTools: true,
2905
3024
  inputPrice: 3,
2906
3025
  outputPrice: 15,
2907
3026
  cacheWritesPrice: 3.75,
@@ -3089,6 +3208,7 @@ var ollamaDefaultModelInfo = {
3089
3208
  contextWindow: 2e5,
3090
3209
  supportsImages: true,
3091
3210
  supportsPromptCache: true,
3211
+ supportsNativeTools: true,
3092
3212
  inputPrice: 0,
3093
3213
  outputPrice: 0,
3094
3214
  cacheWritesPrice: 0,
@@ -3099,10 +3219,30 @@ var ollamaDefaultModelInfo = {
3099
3219
  // src/providers/openai.ts
3100
3220
  var openAiNativeDefaultModelId = "gpt-5.1";
3101
3221
  var openAiNativeModels = {
3222
+ "gpt-5.1-codex-max": {
3223
+ maxTokens: 128e3,
3224
+ contextWindow: 4e5,
3225
+ supportsNativeTools: true,
3226
+ includedTools: ["apply_patch"],
3227
+ excludedTools: ["apply_diff", "write_to_file"],
3228
+ supportsImages: true,
3229
+ supportsPromptCache: true,
3230
+ promptCacheRetention: "24h",
3231
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
3232
+ reasoningEffort: "medium",
3233
+ inputPrice: 1.25,
3234
+ outputPrice: 10,
3235
+ cacheReadsPrice: 0.125,
3236
+ supportsTemperature: false,
3237
+ tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }],
3238
+ description: "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
3239
+ },
3102
3240
  "gpt-5.1": {
3103
3241
  maxTokens: 128e3,
3104
3242
  contextWindow: 4e5,
3105
3243
  supportsNativeTools: true,
3244
+ includedTools: ["apply_patch"],
3245
+ excludedTools: ["apply_diff", "write_to_file"],
3106
3246
  supportsImages: true,
3107
3247
  supportsPromptCache: true,
3108
3248
  promptCacheRetention: "24h",
@@ -3123,6 +3263,8 @@ var openAiNativeModels = {
3123
3263
  maxTokens: 128e3,
3124
3264
  contextWindow: 4e5,
3125
3265
  supportsNativeTools: true,
3266
+ includedTools: ["apply_patch"],
3267
+ excludedTools: ["apply_diff", "write_to_file"],
3126
3268
  supportsImages: true,
3127
3269
  supportsPromptCache: true,
3128
3270
  promptCacheRetention: "24h",
@@ -3139,6 +3281,8 @@ var openAiNativeModels = {
3139
3281
  maxTokens: 128e3,
3140
3282
  contextWindow: 4e5,
3141
3283
  supportsNativeTools: true,
3284
+ includedTools: ["apply_patch"],
3285
+ excludedTools: ["apply_diff", "write_to_file"],
3142
3286
  supportsImages: true,
3143
3287
  supportsPromptCache: true,
3144
3288
  promptCacheRetention: "24h",
@@ -3154,6 +3298,8 @@ var openAiNativeModels = {
3154
3298
  maxTokens: 128e3,
3155
3299
  contextWindow: 4e5,
3156
3300
  supportsNativeTools: true,
3301
+ includedTools: ["apply_patch"],
3302
+ excludedTools: ["apply_diff", "write_to_file"],
3157
3303
  supportsImages: true,
3158
3304
  supportsPromptCache: true,
3159
3305
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3173,6 +3319,8 @@ var openAiNativeModels = {
3173
3319
  maxTokens: 128e3,
3174
3320
  contextWindow: 4e5,
3175
3321
  supportsNativeTools: true,
3322
+ includedTools: ["apply_patch"],
3323
+ excludedTools: ["apply_diff", "write_to_file"],
3176
3324
  supportsImages: true,
3177
3325
  supportsPromptCache: true,
3178
3326
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3192,6 +3340,8 @@ var openAiNativeModels = {
3192
3340
  maxTokens: 128e3,
3193
3341
  contextWindow: 4e5,
3194
3342
  supportsNativeTools: true,
3343
+ includedTools: ["apply_patch"],
3344
+ excludedTools: ["apply_diff", "write_to_file"],
3195
3345
  supportsImages: true,
3196
3346
  supportsPromptCache: true,
3197
3347
  supportsReasoningEffort: ["low", "medium", "high"],
@@ -3207,6 +3357,8 @@ var openAiNativeModels = {
3207
3357
  maxTokens: 128e3,
3208
3358
  contextWindow: 4e5,
3209
3359
  supportsNativeTools: true,
3360
+ includedTools: ["apply_patch"],
3361
+ excludedTools: ["apply_diff", "write_to_file"],
3210
3362
  supportsImages: true,
3211
3363
  supportsPromptCache: true,
3212
3364
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3223,6 +3375,8 @@ var openAiNativeModels = {
3223
3375
  maxTokens: 128e3,
3224
3376
  contextWindow: 4e5,
3225
3377
  supportsNativeTools: true,
3378
+ includedTools: ["apply_patch"],
3379
+ excludedTools: ["apply_diff", "write_to_file"],
3226
3380
  supportsImages: true,
3227
3381
  supportsPromptCache: true,
3228
3382
  inputPrice: 1.25,
@@ -3234,6 +3388,8 @@ var openAiNativeModels = {
3234
3388
  maxTokens: 32768,
3235
3389
  contextWindow: 1047576,
3236
3390
  supportsNativeTools: true,
3391
+ includedTools: ["apply_patch"],
3392
+ excludedTools: ["apply_diff", "write_to_file"],
3237
3393
  supportsImages: true,
3238
3394
  supportsPromptCache: true,
3239
3395
  inputPrice: 2,
@@ -3248,6 +3404,8 @@ var openAiNativeModels = {
3248
3404
  maxTokens: 32768,
3249
3405
  contextWindow: 1047576,
3250
3406
  supportsNativeTools: true,
3407
+ includedTools: ["apply_patch"],
3408
+ excludedTools: ["apply_diff", "write_to_file"],
3251
3409
  supportsImages: true,
3252
3410
  supportsPromptCache: true,
3253
3411
  inputPrice: 0.4,
@@ -3262,6 +3420,8 @@ var openAiNativeModels = {
3262
3420
  maxTokens: 32768,
3263
3421
  contextWindow: 1047576,
3264
3422
  supportsNativeTools: true,
3423
+ includedTools: ["apply_patch"],
3424
+ excludedTools: ["apply_diff", "write_to_file"],
3265
3425
  supportsImages: true,
3266
3426
  supportsPromptCache: true,
3267
3427
  inputPrice: 0.1,
@@ -3469,6 +3629,8 @@ var openAiNativeModels = {
3469
3629
  maxTokens: 128e3,
3470
3630
  contextWindow: 4e5,
3471
3631
  supportsNativeTools: true,
3632
+ includedTools: ["apply_patch"],
3633
+ excludedTools: ["apply_diff", "write_to_file"],
3472
3634
  supportsImages: true,
3473
3635
  supportsPromptCache: true,
3474
3636
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3488,6 +3650,8 @@ var openAiNativeModels = {
3488
3650
  maxTokens: 128e3,
3489
3651
  contextWindow: 4e5,
3490
3652
  supportsNativeTools: true,
3653
+ includedTools: ["apply_patch"],
3654
+ excludedTools: ["apply_diff", "write_to_file"],
3491
3655
  supportsImages: true,
3492
3656
  supportsPromptCache: true,
3493
3657
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3507,6 +3671,8 @@ var openAiNativeModels = {
3507
3671
  maxTokens: 128e3,
3508
3672
  contextWindow: 4e5,
3509
3673
  supportsNativeTools: true,
3674
+ includedTools: ["apply_patch"],
3675
+ excludedTools: ["apply_diff", "write_to_file"],
3510
3676
  supportsImages: true,
3511
3677
  supportsPromptCache: true,
3512
3678
  supportsReasoningEffort: ["minimal", "low", "medium", "high"],
@@ -3669,7 +3835,18 @@ var RooModelSchema = import_zod7.z.object({
3669
3835
  type: import_zod7.z.literal("language"),
3670
3836
  tags: import_zod7.z.array(import_zod7.z.string()).optional(),
3671
3837
  pricing: RooPricingSchema,
3672
- deprecated: import_zod7.z.boolean().optional()
3838
+ deprecated: import_zod7.z.boolean().optional(),
3839
+ default_temperature: import_zod7.z.number().optional(),
3840
+ // Dynamic settings that map directly to ModelInfo properties
3841
+ // Allows the API to configure model-specific defaults like includedTools, excludedTools, reasoningEffort, etc.
3842
+ // These are always direct values (e.g., includedTools: ['search_replace']) for backward compatibility with old clients.
3843
+ settings: import_zod7.z.record(import_zod7.z.string(), import_zod7.z.unknown()).optional(),
3844
+ // Versioned settings keyed by version number (e.g., '3.36.4').
3845
+ // Each version key maps to a settings object that is used when plugin version >= that version.
3846
+ // New clients find the highest version key <= current version and use those settings.
3847
+ // Old clients ignore this field and use plain values from `settings`.
3848
+ // Example: { '3.36.4': { includedTools: ['search_replace'] }, '3.35.0': { ... } }
3849
+ versionedSettings: import_zod7.z.record(import_zod7.z.string(), import_zod7.z.record(import_zod7.z.string(), import_zod7.z.unknown())).optional()
3673
3850
  });
3674
3851
  var RooModelsResponseSchema = import_zod7.z.object({
3675
3852
  object: import_zod7.z.literal("list"),
@@ -3787,6 +3964,7 @@ var unboundDefaultModelInfo = {
3787
3964
  contextWindow: 2e5,
3788
3965
  supportsImages: true,
3789
3966
  supportsPromptCache: true,
3967
+ supportsNativeTools: true,
3790
3968
  inputPrice: 3,
3791
3969
  outputPrice: 15,
3792
3970
  cacheWritesPrice: 3.75,
@@ -4434,15 +4612,17 @@ var xaiDefaultModelId = "grok-code-fast-1";
4434
4612
  var xaiModels = {
4435
4613
  "grok-code-fast-1": {
4436
4614
  maxTokens: 16384,
4437
- contextWindow: 262144,
4438
- supportsImages: false,
4615
+ contextWindow: 256e3,
4616
+ supportsImages: true,
4439
4617
  supportsPromptCache: true,
4440
4618
  supportsNativeTools: true,
4441
4619
  inputPrice: 0.2,
4442
4620
  outputPrice: 1.5,
4443
4621
  cacheWritesPrice: 0.02,
4444
4622
  cacheReadsPrice: 0.02,
4445
- description: "xAI's Grok Code Fast model with 256K context window"
4623
+ description: "xAI's Grok Code Fast model with 256K context window",
4624
+ includedTools: ["search_replace"],
4625
+ excludedTools: ["apply_diff"]
4446
4626
  },
4447
4627
  "grok-4-1-fast-reasoning": {
4448
4628
  maxTokens: 65536,
@@ -4454,7 +4634,9 @@ var xaiModels = {
4454
4634
  outputPrice: 0.5,
4455
4635
  cacheWritesPrice: 0.05,
4456
4636
  cacheReadsPrice: 0.05,
4457
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4637
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4638
+ includedTools: ["search_replace"],
4639
+ excludedTools: ["apply_diff"]
4458
4640
  },
4459
4641
  "grok-4-1-fast-non-reasoning": {
4460
4642
  maxTokens: 65536,
@@ -4466,7 +4648,9 @@ var xaiModels = {
4466
4648
  outputPrice: 0.5,
4467
4649
  cacheWritesPrice: 0.05,
4468
4650
  cacheReadsPrice: 0.05,
4469
- description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4651
+ description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4652
+ includedTools: ["search_replace"],
4653
+ excludedTools: ["apply_diff"]
4470
4654
  },
4471
4655
  "grok-4-fast-reasoning": {
4472
4656
  maxTokens: 65536,
@@ -4478,7 +4662,9 @@ var xaiModels = {
4478
4662
  outputPrice: 0.5,
4479
4663
  cacheWritesPrice: 0.05,
4480
4664
  cacheReadsPrice: 0.05,
4481
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning"
4665
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4666
+ includedTools: ["search_replace"],
4667
+ excludedTools: ["apply_diff"]
4482
4668
  },
4483
4669
  "grok-4-fast-non-reasoning": {
4484
4670
  maxTokens: 65536,
@@ -4490,9 +4676,11 @@ var xaiModels = {
4490
4676
  outputPrice: 0.5,
4491
4677
  cacheWritesPrice: 0.05,
4492
4678
  cacheReadsPrice: 0.05,
4493
- description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling"
4679
+ description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling",
4680
+ includedTools: ["search_replace"],
4681
+ excludedTools: ["apply_diff"]
4494
4682
  },
4495
- "grok-4": {
4683
+ "grok-4-0709": {
4496
4684
  maxTokens: 8192,
4497
4685
  contextWindow: 256e3,
4498
4686
  supportsImages: true,
@@ -4502,36 +4690,14 @@ var xaiModels = {
4502
4690
  outputPrice: 15,
4503
4691
  cacheWritesPrice: 0.75,
4504
4692
  cacheReadsPrice: 0.75,
4505
- description: "xAI's Grok-4 model with 256K context window"
4506
- },
4507
- "grok-3": {
4508
- maxTokens: 8192,
4509
- contextWindow: 131072,
4510
- supportsImages: false,
4511
- supportsPromptCache: true,
4512
- supportsNativeTools: true,
4513
- inputPrice: 3,
4514
- outputPrice: 15,
4515
- cacheWritesPrice: 0.75,
4516
- cacheReadsPrice: 0.75,
4517
- description: "xAI's Grok-3 model with 128K context window"
4518
- },
4519
- "grok-3-fast": {
4520
- maxTokens: 8192,
4521
- contextWindow: 131072,
4522
- supportsImages: false,
4523
- supportsPromptCache: true,
4524
- supportsNativeTools: true,
4525
- inputPrice: 5,
4526
- outputPrice: 25,
4527
- cacheWritesPrice: 1.25,
4528
- cacheReadsPrice: 1.25,
4529
- description: "xAI's Grok-3 fast model with 128K context window"
4693
+ description: "xAI's Grok-4 model with 256K context window",
4694
+ includedTools: ["search_replace"],
4695
+ excludedTools: ["apply_diff"]
4530
4696
  },
4531
4697
  "grok-3-mini": {
4532
4698
  maxTokens: 8192,
4533
4699
  contextWindow: 131072,
4534
- supportsImages: false,
4700
+ supportsImages: true,
4535
4701
  supportsPromptCache: true,
4536
4702
  supportsNativeTools: true,
4537
4703
  inputPrice: 0.3,
@@ -4539,40 +4705,24 @@ var xaiModels = {
4539
4705
  cacheWritesPrice: 0.07,
4540
4706
  cacheReadsPrice: 0.07,
4541
4707
  description: "xAI's Grok-3 mini model with 128K context window",
4542
- supportsReasoningEffort: true
4543
- },
4544
- "grok-3-mini-fast": {
4545
- maxTokens: 8192,
4546
- contextWindow: 131072,
4547
- supportsImages: false,
4548
- supportsPromptCache: true,
4549
- supportsNativeTools: true,
4550
- inputPrice: 0.6,
4551
- outputPrice: 4,
4552
- cacheWritesPrice: 0.15,
4553
- cacheReadsPrice: 0.15,
4554
- description: "xAI's Grok-3 mini fast model with 128K context window",
4555
- supportsReasoningEffort: true
4708
+ supportsReasoningEffort: ["low", "high"],
4709
+ reasoningEffort: "low",
4710
+ includedTools: ["search_replace"],
4711
+ excludedTools: ["apply_diff"]
4556
4712
  },
4557
- "grok-2-1212": {
4713
+ "grok-3": {
4558
4714
  maxTokens: 8192,
4559
4715
  contextWindow: 131072,
4560
- supportsImages: false,
4561
- supportsPromptCache: false,
4562
- supportsNativeTools: true,
4563
- inputPrice: 2,
4564
- outputPrice: 10,
4565
- description: "xAI's Grok-2 model (version 1212) with 128K context window"
4566
- },
4567
- "grok-2-vision-1212": {
4568
- maxTokens: 8192,
4569
- contextWindow: 32768,
4570
4716
  supportsImages: true,
4571
- supportsPromptCache: false,
4717
+ supportsPromptCache: true,
4572
4718
  supportsNativeTools: true,
4573
- inputPrice: 2,
4574
- outputPrice: 10,
4575
- description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
4719
+ inputPrice: 3,
4720
+ outputPrice: 15,
4721
+ cacheWritesPrice: 0.75,
4722
+ cacheReadsPrice: 0.75,
4723
+ description: "xAI's Grok-3 model with 128K context window",
4724
+ includedTools: ["search_replace"],
4725
+ excludedTools: ["apply_diff"]
4576
4726
  }
4577
4727
  };
4578
4728
 
@@ -4679,7 +4829,6 @@ var internationalZAiModels = {
4679
4829
  supportsImages: false,
4680
4830
  supportsPromptCache: true,
4681
4831
  supportsNativeTools: true,
4682
- supportsReasoningBinary: true,
4683
4832
  inputPrice: 0.6,
4684
4833
  outputPrice: 2.2,
4685
4834
  cacheWritesPrice: 0,
@@ -4752,7 +4901,6 @@ var internationalZAiModels = {
4752
4901
  supportsImages: false,
4753
4902
  supportsPromptCache: true,
4754
4903
  supportsNativeTools: true,
4755
- supportsReasoningBinary: true,
4756
4904
  inputPrice: 0.6,
4757
4905
  outputPrice: 2.2,
4758
4906
  cacheWritesPrice: 0,
@@ -4780,7 +4928,6 @@ var mainlandZAiModels = {
4780
4928
  supportsImages: false,
4781
4929
  supportsPromptCache: true,
4782
4930
  supportsNativeTools: true,
4783
- supportsReasoningBinary: true,
4784
4931
  inputPrice: 0.29,
4785
4932
  outputPrice: 1.14,
4786
4933
  cacheWritesPrice: 0,
@@ -4853,7 +5000,6 @@ var mainlandZAiModels = {
4853
5000
  supportsImages: false,
4854
5001
  supportsPromptCache: true,
4855
5002
  supportsNativeTools: true,
4856
- supportsReasoningBinary: true,
4857
5003
  inputPrice: 0.29,
4858
5004
  outputPrice: 1.14,
4859
5005
  cacheWritesPrice: 0,
@@ -4929,8 +5075,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
4929
5075
  return openRouterDefaultModelId;
4930
5076
  case "requesty":
4931
5077
  return requestyDefaultModelId;
4932
- case "glama":
4933
- return glamaDefaultModelId;
4934
5078
  case "unbound":
4935
5079
  return unboundDefaultModelId;
4936
5080
  case "litellm":
@@ -5017,7 +5161,6 @@ var dynamicProviders = [
5017
5161
  "io-intelligence",
5018
5162
  "requesty",
5019
5163
  "unbound",
5020
- "glama",
5021
5164
  "roo",
5022
5165
  "chutes"
5023
5166
  ];
@@ -5099,10 +5242,6 @@ var claudeCodeSchema = apiModelIdProviderModelSchema.extend({
5099
5242
  claudeCodePath: import_zod8.z.string().optional(),
5100
5243
  claudeCodeMaxOutputTokens: import_zod8.z.number().int().min(1).max(2e5).optional()
5101
5244
  });
5102
- var glamaSchema = baseProviderSettingsSchema.extend({
5103
- glamaModelId: import_zod8.z.string().optional(),
5104
- glamaApiKey: import_zod8.z.string().optional()
5105
- });
5106
5245
  var openRouterSchema = baseProviderSettingsSchema.extend({
5107
5246
  openRouterApiKey: import_zod8.z.string().optional(),
5108
5247
  openRouterModelId: import_zod8.z.string().optional(),
@@ -5287,7 +5426,6 @@ var defaultSchema = import_zod8.z.object({
5287
5426
  var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiProvider", [
5288
5427
  anthropicSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("anthropic") })),
5289
5428
  claudeCodeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("claude-code") })),
5290
- glamaSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("glama") })),
5291
5429
  openRouterSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openrouter") })),
5292
5430
  bedrockSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("bedrock") })),
5293
5431
  vertexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("vertex") })),
@@ -5329,7 +5467,6 @@ var providerSettingsSchema = import_zod8.z.object({
5329
5467
  apiProvider: providerNamesSchema.optional(),
5330
5468
  ...anthropicSchema.shape,
5331
5469
  ...claudeCodeSchema.shape,
5332
- ...glamaSchema.shape,
5333
5470
  ...openRouterSchema.shape,
5334
5471
  ...bedrockSchema.shape,
5335
5472
  ...vertexSchema.shape,
@@ -5374,7 +5511,6 @@ var discriminatedProviderSettingsWithIdSchema = providerSettingsSchemaDiscrimina
5374
5511
  var PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options;
5375
5512
  var modelIdKeys = [
5376
5513
  "apiModelId",
5377
- "glamaModelId",
5378
5514
  "openRouterModelId",
5379
5515
  "openAiModelId",
5380
5516
  "ollamaModelId",
@@ -5396,7 +5532,6 @@ var isTypicalProvider = (key) => isProviderName(key) && !isInternalProvider(key)
5396
5532
  var modelIdKeysByProvider = {
5397
5533
  anthropic: "apiModelId",
5398
5534
  "claude-code": "apiModelId",
5399
- glama: "glamaModelId",
5400
5535
  openrouter: "openRouterModelId",
5401
5536
  bedrock: "apiModelId",
5402
5537
  vertex: "apiModelId",
@@ -5527,7 +5662,6 @@ var MODELS_BY_PROVIDER = {
5527
5662
  zai: { id: "zai", label: "Z.ai", models: Object.keys(internationalZAiModels) },
5528
5663
  baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
5529
5664
  // Dynamic providers; models pulled from remote APIs.
5530
- glama: { id: "glama", label: "Glama", models: [] },
5531
5665
  huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
5532
5666
  litellm: { id: "litellm", label: "LiteLLM", models: [] },
5533
5667
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
@@ -5759,6 +5893,67 @@ var rooCodeTelemetryEventSchema = import_zod11.z.discriminatedUnion("type", [
5759
5893
  })
5760
5894
  })
5761
5895
  ]);
5896
+ var EXPECTED_API_ERROR_CODES = /* @__PURE__ */ new Set([
5897
+ 402,
5898
+ // Payment required - billing issues
5899
+ 429
5900
+ // Rate limit - expected when hitting API limits
5901
+ ]);
5902
+ var EXPECTED_ERROR_MESSAGE_PATTERNS = [
5903
+ /^429\b/,
5904
+ // Message starts with "429"
5905
+ /rate limit/i
5906
+ // Contains "rate limit" (case insensitive)
5907
+ ];
5908
+ function isOpenAISdkError(error) {
5909
+ return typeof error === "object" && error !== null && "status" in error && typeof error.status === "number";
5910
+ }
5911
+ function getErrorStatusCode(error) {
5912
+ if (isOpenAISdkError(error)) {
5913
+ return error.status;
5914
+ }
5915
+ return void 0;
5916
+ }
5917
+ function getErrorMessage(error) {
5918
+ if (isOpenAISdkError(error)) {
5919
+ return error.error?.metadata?.raw || error.error?.message || error.message;
5920
+ }
5921
+ return void 0;
5922
+ }
5923
+ function shouldReportApiErrorToTelemetry(errorCode, errorMessage) {
5924
+ if (errorCode !== void 0 && EXPECTED_API_ERROR_CODES.has(errorCode)) {
5925
+ return false;
5926
+ }
5927
+ if (errorMessage) {
5928
+ for (const pattern of EXPECTED_ERROR_MESSAGE_PATTERNS) {
5929
+ if (pattern.test(errorMessage)) {
5930
+ return false;
5931
+ }
5932
+ }
5933
+ }
5934
+ return true;
5935
+ }
5936
+ var ApiProviderError = class extends Error {
5937
+ constructor(message, provider, modelId, operation, errorCode) {
5938
+ super(message);
5939
+ this.provider = provider;
5940
+ this.modelId = modelId;
5941
+ this.operation = operation;
5942
+ this.errorCode = errorCode;
5943
+ this.name = "ApiProviderError";
5944
+ }
5945
+ };
5946
+ function isApiProviderError(error) {
5947
+ return error instanceof Error && error.name === "ApiProviderError" && "provider" in error && "modelId" in error && "operation" in error;
5948
+ }
5949
+ function extractApiProviderErrorProperties(error) {
5950
+ return {
5951
+ provider: error.provider,
5952
+ modelId: error.modelId,
5953
+ operation: error.operation,
5954
+ ...error.errorCode !== void 0 && { errorCode: error.errorCode }
5955
+ };
5956
+ }
5762
5957
 
5763
5958
  // src/mode.ts
5764
5959
  var import_zod12 = require("zod");
@@ -5837,7 +6032,7 @@ var DEFAULT_MODES = [
5837
6032
  whenToUse: "Use this mode when you need to plan, design, or strategize before implementation. Perfect for breaking down complex problems, creating technical specifications, designing system architecture, or brainstorming solutions before coding.",
5838
6033
  description: "Plan and design before implementation",
5839
6034
  groups: ["read", ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], "browser", "mcp"],
5840
- customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**"
6035
+ customInstructions: "1. Do some information gathering (using provided tools) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, break down the task into clear, actionable steps and create a todo list using the `update_todo_list` tool. Each todo item should be:\n - Specific and actionable\n - Listed in logical execution order\n - Focused on a single, well-defined outcome\n - Clear enough that another mode could execute it independently\n\n **Note:** If the `update_todo_list` tool is not available, write the plan to a markdown file (e.g., `plan.md` or `todo.md`) instead.\n\n4. As you gather more information or discover new requirements, update the todo list to reflect the current understanding of what needs to be accomplished.\n\n5. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and refine the todo list.\n\n6. Include Mermaid diagrams if they help clarify complex workflows or system architecture. Please avoid using double quotes (\"\") and parentheses () inside square brackets ([]) in Mermaid diagrams, as this can cause parsing errors.\n\n7. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Focus on creating clear, actionable todo lists rather than lengthy markdown documents. Use the todo list as your primary planning tool to track and organize the work that needs to be done.**\n\n**CRITICAL: Never provide level of effort time estimates (e.g., hours, days, weeks) for tasks. Focus solely on breaking down the work into clear, actionable steps without estimating how long they will take.**\n\nUnless told otherwise, if you want to save a plan file, put it in the /plans directory"
5841
6036
  },
5842
6037
  {
5843
6038
  slug: "code",
@@ -6055,7 +6250,6 @@ var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
6055
6250
  var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
6056
6251
  var SECRET_STATE_KEYS = [
6057
6252
  "apiKey",
6058
- "glamaApiKey",
6059
6253
  "openRouterApiKey",
6060
6254
  "awsAccessKey",
6061
6255
  "awsApiKey",
@@ -6621,6 +6815,16 @@ var usageStatsSchema = import_zod16.z.object({
6621
6815
  // Period in days (e.g., 30)
6622
6816
  });
6623
6817
 
6818
+ // src/context-management.ts
6819
+ var CONTEXT_MANAGEMENT_EVENTS = [
6820
+ "condense_context",
6821
+ "condense_context_error",
6822
+ "sliding_window_truncation"
6823
+ ];
6824
+ function isContextManagementEvent(value) {
6825
+ return typeof value === "string" && CONTEXT_MANAGEMENT_EVENTS.includes(value);
6826
+ }
6827
+
6624
6828
  // src/cookie-consent.ts
6625
6829
  var CONSENT_COOKIE_NAME = "roo-code-cookie-consent";
6626
6830
  var COOKIE_CONSENT_EVENTS = {
@@ -6813,6 +7017,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6813
7017
  ANTHROPIC_DEFAULT_MAX_TOKENS,
6814
7018
  ANTHROPIC_STYLE_PROVIDERS,
6815
7019
  AWS_INFERENCE_PROFILE_MAPPING,
7020
+ ApiProviderError,
6816
7021
  BEDROCK_1M_CONTEXT_MODEL_IDS,
6817
7022
  BEDROCK_DEFAULT_CONTEXT,
6818
7023
  BEDROCK_DEFAULT_TEMPERATURE,
@@ -6822,6 +7027,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6822
7027
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
6823
7028
  CODEBASE_INDEX_DEFAULTS,
6824
7029
  CONSENT_COOKIE_NAME,
7030
+ CONTEXT_MANAGEMENT_EVENTS,
6825
7031
  COOKIE_CONSENT_EVENTS,
6826
7032
  ConnectionState,
6827
7033
  DEEP_SEEK_DEFAULT_TEMPERATURE,
@@ -6834,10 +7040,10 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6834
7040
  DOUBAO_API_CHAT_PATH,
6835
7041
  EVALS_SETTINGS,
6836
7042
  EVALS_TIMEOUT,
7043
+ EXPECTED_API_ERROR_CODES,
6837
7044
  ExtensionBridgeCommandName,
6838
7045
  ExtensionBridgeEventName,
6839
7046
  ExtensionSocketEvents,
6840
- GLAMA_DEFAULT_TEMPERATURE,
6841
7047
  GLOBAL_SECRET_KEYS,
6842
7048
  GLOBAL_SETTINGS_KEYS,
6843
7049
  GLOBAL_STATE_KEYS,
@@ -6921,6 +7127,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6921
7127
  commandExecutionStatusSchema,
6922
7128
  commandIds,
6923
7129
  contextCondenseSchema,
7130
+ contextTruncationSchema,
6924
7131
  convertModelNameForVertex,
6925
7132
  customModePromptsSchema,
6926
7133
  customModesSettingsSchema,
@@ -6942,6 +7149,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6942
7149
  extensionBridgeCommandSchema,
6943
7150
  extensionBridgeEventSchema,
6944
7151
  extensionInstanceSchema,
7152
+ extractApiProviderErrorProperties,
6945
7153
  fauxProviders,
6946
7154
  featherlessDefaultModelId,
6947
7155
  featherlessModels,
@@ -6953,12 +7161,12 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6953
7161
  getApiProtocol,
6954
7162
  getClaudeCodeModelId,
6955
7163
  getEffectiveProtocol,
7164
+ getErrorMessage,
7165
+ getErrorStatusCode,
6956
7166
  getImageGenerationProvider,
6957
7167
  getModelId,
6958
7168
  getProviderDefaultModelId,
6959
7169
  gitPropertiesSchema,
6960
- glamaDefaultModelId,
6961
- glamaDefaultModelInfo,
6962
7170
  globalSettingsSchema,
6963
7171
  groqDefaultModelId,
6964
7172
  groqModels,
@@ -6975,6 +7183,8 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
6975
7183
  ioIntelligenceDefaultModelId,
6976
7184
  ioIntelligenceModels,
6977
7185
  ipcMessageSchema,
7186
+ isApiProviderError,
7187
+ isContextManagementEvent,
6978
7188
  isCustomProvider,
6979
7189
  isDynamicProvider,
6980
7190
  isFauxProvider,
@@ -7063,6 +7273,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
7063
7273
  serviceTierSchema,
7064
7274
  serviceTiers,
7065
7275
  shareResponseSchema,
7276
+ shouldReportApiErrorToTelemetry,
7066
7277
  shouldUseSingleFileRead,
7067
7278
  staticAppPropertiesSchema,
7068
7279
  suggestionItemSchema,