@roo-code/types 1.32.0 → 1.34.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -719,8 +719,15 @@ var chutesModels = {
719
719
  };
720
720
 
721
721
  // src/providers/claude-code.ts
722
+ var VERTEX_DATE_PATTERN = /-(\d{8})$/;
723
+ function convertModelNameForVertex(modelName) {
724
+ return modelName.replace(VERTEX_DATE_PATTERN, "@$1");
725
+ }
722
726
  var claudeCodeDefaultModelId = "claude-sonnet-4-20250514";
723
727
  var CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS = 8e3;
728
+ function getClaudeCodeModelId(baseModelId, useVertex = false) {
729
+ return useVertex ? convertModelNameForVertex(baseModelId) : baseModelId;
730
+ }
724
731
  var claudeCodeModels = {
725
732
  "claude-sonnet-4-20250514": {
726
733
  ...anthropicModels["claude-sonnet-4-20250514"],
@@ -1125,7 +1132,7 @@ var groqDefaultModelId = "llama-3.3-70b-versatile";
1125
1132
  var groqModels = {
1126
1133
  // Models based on API response: https://api.groq.com/openai/v1/models
1127
1134
  "llama-3.1-8b-instant": {
1128
- maxTokens: 131072,
1135
+ maxTokens: 8192,
1129
1136
  contextWindow: 131072,
1130
1137
  supportsImages: false,
1131
1138
  supportsPromptCache: false,
@@ -1134,7 +1141,7 @@ var groqModels = {
1134
1141
  description: "Meta Llama 3.1 8B Instant model, 128K context."
1135
1142
  },
1136
1143
  "llama-3.3-70b-versatile": {
1137
- maxTokens: 32768,
1144
+ maxTokens: 8192,
1138
1145
  contextWindow: 131072,
1139
1146
  supportsImages: false,
1140
1147
  supportsPromptCache: false,
@@ -1161,7 +1168,7 @@ var groqModels = {
1161
1168
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
1162
1169
  },
1163
1170
  "mistral-saba-24b": {
1164
- maxTokens: 32768,
1171
+ maxTokens: 8192,
1165
1172
  contextWindow: 32768,
1166
1173
  supportsImages: false,
1167
1174
  supportsPromptCache: false,
@@ -1170,7 +1177,7 @@ var groqModels = {
1170
1177
  description: "Mistral Saba 24B model, 32K context."
1171
1178
  },
1172
1179
  "qwen-qwq-32b": {
1173
- maxTokens: 131072,
1180
+ maxTokens: 8192,
1174
1181
  contextWindow: 131072,
1175
1182
  supportsImages: false,
1176
1183
  supportsPromptCache: false,
@@ -1179,7 +1186,7 @@ var groqModels = {
1179
1186
  description: "Alibaba Qwen QwQ 32B model, 128K context."
1180
1187
  },
1181
1188
  "qwen/qwen3-32b": {
1182
- maxTokens: 40960,
1189
+ maxTokens: 8192,
1183
1190
  contextWindow: 131072,
1184
1191
  supportsImages: false,
1185
1192
  supportsPromptCache: false,
@@ -1188,13 +1195,22 @@ var groqModels = {
1188
1195
  description: "Alibaba Qwen 3 32B model, 128K context."
1189
1196
  },
1190
1197
  "deepseek-r1-distill-llama-70b": {
1191
- maxTokens: 131072,
1198
+ maxTokens: 8192,
1192
1199
  contextWindow: 131072,
1193
1200
  supportsImages: false,
1194
1201
  supportsPromptCache: false,
1195
1202
  inputPrice: 0.75,
1196
1203
  outputPrice: 0.99,
1197
1204
  description: "DeepSeek R1 Distill Llama 70B model, 128K context."
1205
+ },
1206
+ "moonshotai/kimi-k2-instruct": {
1207
+ maxTokens: 16384,
1208
+ contextWindow: 131072,
1209
+ supportsImages: false,
1210
+ supportsPromptCache: false,
1211
+ inputPrice: 1,
1212
+ outputPrice: 3,
1213
+ description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
1198
1214
  }
1199
1215
  };
1200
1216
 
@@ -2298,12 +2314,14 @@ var providerSettingsEntrySchema = z3.object({
2298
2314
  name: z3.string(),
2299
2315
  apiProvider: providerNamesSchema.optional()
2300
2316
  });
2317
+ var DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3;
2301
2318
  var baseProviderSettingsSchema = z3.object({
2302
2319
  includeMaxTokens: z3.boolean().optional(),
2303
2320
  diffEnabled: z3.boolean().optional(),
2304
2321
  fuzzyMatchThreshold: z3.number().optional(),
2305
2322
  modelTemperature: z3.number().nullish(),
2306
2323
  rateLimitSeconds: z3.number().optional(),
2324
+ consecutiveMistakeLimit: z3.number().min(0).optional(),
2307
2325
  // Model reasoning.
2308
2326
  enableReasoningEffort: z3.boolean().optional(),
2309
2327
  reasoningEffort: reasoningEffortsSchema.optional(),
@@ -2676,7 +2694,13 @@ var taskPropertiesSchema = z7.object({
2676
2694
  apiProvider: z7.enum(providerNames).optional(),
2677
2695
  modelId: z7.string().optional(),
2678
2696
  diffStrategy: z7.string().optional(),
2679
- isSubtask: z7.boolean().optional()
2697
+ isSubtask: z7.boolean().optional(),
2698
+ todos: z7.object({
2699
+ total: z7.number(),
2700
+ completed: z7.number(),
2701
+ inProgress: z7.number(),
2702
+ pending: z7.number()
2703
+ }).optional()
2680
2704
  });
2681
2705
  var gitPropertiesSchema = z7.object({
2682
2706
  repositoryUrl: z7.string().optional(),
@@ -2935,6 +2959,7 @@ var globalSettingsSchema = z11.object({
2935
2959
  alwaysAllowUpdateTodoList: z11.boolean().optional(),
2936
2960
  allowedCommands: z11.array(z11.string()).optional(),
2937
2961
  deniedCommands: z11.array(z11.string()).optional(),
2962
+ commandExecutionTimeout: z11.number().optional(),
2938
2963
  allowedMaxRequests: z11.number().nullish(),
2939
2964
  autoCondenseContext: z11.boolean().optional(),
2940
2965
  autoCondenseContextPercent: z11.number().optional(),
@@ -3039,6 +3064,7 @@ var EVALS_SETTINGS = {
3039
3064
  alwaysAllowUpdateTodoList: true,
3040
3065
  followupAutoApproveTimeoutMs: 0,
3041
3066
  allowedCommands: ["*"],
3067
+ commandExecutionTimeout: 3e4,
3042
3068
  browserToolEnabled: false,
3043
3069
  browserViewportSize: "900x600",
3044
3070
  screenshotQuality: 75,
@@ -3426,6 +3452,10 @@ var commandExecutionStatusSchema = z17.discriminatedUnion("status", [
3426
3452
  z17.object({
3427
3453
  executionId: z17.string(),
3428
3454
  status: z17.literal("fallback")
3455
+ }),
3456
+ z17.object({
3457
+ executionId: z17.string(),
3458
+ status: z17.literal("timeout")
3429
3459
  })
3430
3460
  ]);
3431
3461
 
@@ -3448,6 +3478,7 @@ export {
3448
3478
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
3449
3479
  CODEBASE_INDEX_DEFAULTS,
3450
3480
  DEEP_SEEK_DEFAULT_TEMPERATURE,
3481
+ DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
3451
3482
  EVALS_SETTINGS,
3452
3483
  EVALS_TIMEOUT,
3453
3484
  GLAMA_DEFAULT_TEMPERATURE,
@@ -3498,6 +3529,7 @@ export {
3498
3529
  commandExecutionStatusSchema,
3499
3530
  commandIds,
3500
3531
  contextCondenseSchema,
3532
+ convertModelNameForVertex,
3501
3533
  customModePromptsSchema,
3502
3534
  customModesSettingsSchema,
3503
3535
  customSupportPromptsSchema,
@@ -3510,6 +3542,7 @@ export {
3510
3542
  geminiDefaultModelId,
3511
3543
  geminiModels,
3512
3544
  getApiProtocol,
3545
+ getClaudeCodeModelId,
3513
3546
  getModelId,
3514
3547
  gitPropertiesSchema,
3515
3548
  glamaDefaultModelId,