@roo-code/types 1.32.0 → 1.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +43 -7
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +347 -10
- package/dist/index.d.ts +347 -10
- package/dist/index.js +40 -7
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -30,6 +30,7 @@ __export(index_exports, {
|
|
|
30
30
|
CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS: () => CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
|
|
31
31
|
CODEBASE_INDEX_DEFAULTS: () => CODEBASE_INDEX_DEFAULTS,
|
|
32
32
|
DEEP_SEEK_DEFAULT_TEMPERATURE: () => DEEP_SEEK_DEFAULT_TEMPERATURE,
|
|
33
|
+
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT: () => DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
33
34
|
EVALS_SETTINGS: () => EVALS_SETTINGS,
|
|
34
35
|
EVALS_TIMEOUT: () => EVALS_TIMEOUT,
|
|
35
36
|
GLAMA_DEFAULT_TEMPERATURE: () => GLAMA_DEFAULT_TEMPERATURE,
|
|
@@ -80,6 +81,7 @@ __export(index_exports, {
|
|
|
80
81
|
commandExecutionStatusSchema: () => commandExecutionStatusSchema,
|
|
81
82
|
commandIds: () => commandIds,
|
|
82
83
|
contextCondenseSchema: () => contextCondenseSchema,
|
|
84
|
+
convertModelNameForVertex: () => convertModelNameForVertex,
|
|
83
85
|
customModePromptsSchema: () => customModePromptsSchema,
|
|
84
86
|
customModesSettingsSchema: () => customModesSettingsSchema,
|
|
85
87
|
customSupportPromptsSchema: () => customSupportPromptsSchema,
|
|
@@ -92,6 +94,7 @@ __export(index_exports, {
|
|
|
92
94
|
geminiDefaultModelId: () => geminiDefaultModelId,
|
|
93
95
|
geminiModels: () => geminiModels,
|
|
94
96
|
getApiProtocol: () => getApiProtocol,
|
|
97
|
+
getClaudeCodeModelId: () => getClaudeCodeModelId,
|
|
95
98
|
getModelId: () => getModelId,
|
|
96
99
|
gitPropertiesSchema: () => gitPropertiesSchema,
|
|
97
100
|
glamaDefaultModelId: () => glamaDefaultModelId,
|
|
@@ -902,8 +905,15 @@ var chutesModels = {
|
|
|
902
905
|
};
|
|
903
906
|
|
|
904
907
|
// src/providers/claude-code.ts
|
|
908
|
+
var VERTEX_DATE_PATTERN = /-(\d{8})$/;
|
|
909
|
+
function convertModelNameForVertex(modelName) {
|
|
910
|
+
return modelName.replace(VERTEX_DATE_PATTERN, "@$1");
|
|
911
|
+
}
|
|
905
912
|
var claudeCodeDefaultModelId = "claude-sonnet-4-20250514";
|
|
906
913
|
var CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS = 8e3;
|
|
914
|
+
function getClaudeCodeModelId(baseModelId, useVertex = false) {
|
|
915
|
+
return useVertex ? convertModelNameForVertex(baseModelId) : baseModelId;
|
|
916
|
+
}
|
|
907
917
|
var claudeCodeModels = {
|
|
908
918
|
"claude-sonnet-4-20250514": {
|
|
909
919
|
...anthropicModels["claude-sonnet-4-20250514"],
|
|
@@ -1308,7 +1318,7 @@ var groqDefaultModelId = "llama-3.3-70b-versatile";
|
|
|
1308
1318
|
var groqModels = {
|
|
1309
1319
|
// Models based on API response: https://api.groq.com/openai/v1/models
|
|
1310
1320
|
"llama-3.1-8b-instant": {
|
|
1311
|
-
maxTokens:
|
|
1321
|
+
maxTokens: 8192,
|
|
1312
1322
|
contextWindow: 131072,
|
|
1313
1323
|
supportsImages: false,
|
|
1314
1324
|
supportsPromptCache: false,
|
|
@@ -1317,7 +1327,7 @@ var groqModels = {
|
|
|
1317
1327
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
1318
1328
|
},
|
|
1319
1329
|
"llama-3.3-70b-versatile": {
|
|
1320
|
-
maxTokens:
|
|
1330
|
+
maxTokens: 8192,
|
|
1321
1331
|
contextWindow: 131072,
|
|
1322
1332
|
supportsImages: false,
|
|
1323
1333
|
supportsPromptCache: false,
|
|
@@ -1344,7 +1354,7 @@ var groqModels = {
|
|
|
1344
1354
|
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
1345
1355
|
},
|
|
1346
1356
|
"mistral-saba-24b": {
|
|
1347
|
-
maxTokens:
|
|
1357
|
+
maxTokens: 8192,
|
|
1348
1358
|
contextWindow: 32768,
|
|
1349
1359
|
supportsImages: false,
|
|
1350
1360
|
supportsPromptCache: false,
|
|
@@ -1353,7 +1363,7 @@ var groqModels = {
|
|
|
1353
1363
|
description: "Mistral Saba 24B model, 32K context."
|
|
1354
1364
|
},
|
|
1355
1365
|
"qwen-qwq-32b": {
|
|
1356
|
-
maxTokens:
|
|
1366
|
+
maxTokens: 8192,
|
|
1357
1367
|
contextWindow: 131072,
|
|
1358
1368
|
supportsImages: false,
|
|
1359
1369
|
supportsPromptCache: false,
|
|
@@ -1362,7 +1372,7 @@ var groqModels = {
|
|
|
1362
1372
|
description: "Alibaba Qwen QwQ 32B model, 128K context."
|
|
1363
1373
|
},
|
|
1364
1374
|
"qwen/qwen3-32b": {
|
|
1365
|
-
maxTokens:
|
|
1375
|
+
maxTokens: 8192,
|
|
1366
1376
|
contextWindow: 131072,
|
|
1367
1377
|
supportsImages: false,
|
|
1368
1378
|
supportsPromptCache: false,
|
|
@@ -1371,13 +1381,22 @@ var groqModels = {
|
|
|
1371
1381
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
1372
1382
|
},
|
|
1373
1383
|
"deepseek-r1-distill-llama-70b": {
|
|
1374
|
-
maxTokens:
|
|
1384
|
+
maxTokens: 8192,
|
|
1375
1385
|
contextWindow: 131072,
|
|
1376
1386
|
supportsImages: false,
|
|
1377
1387
|
supportsPromptCache: false,
|
|
1378
1388
|
inputPrice: 0.75,
|
|
1379
1389
|
outputPrice: 0.99,
|
|
1380
1390
|
description: "DeepSeek R1 Distill Llama 70B model, 128K context."
|
|
1391
|
+
},
|
|
1392
|
+
"moonshotai/kimi-k2-instruct": {
|
|
1393
|
+
maxTokens: 16384,
|
|
1394
|
+
contextWindow: 131072,
|
|
1395
|
+
supportsImages: false,
|
|
1396
|
+
supportsPromptCache: false,
|
|
1397
|
+
inputPrice: 1,
|
|
1398
|
+
outputPrice: 3,
|
|
1399
|
+
description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
|
|
1381
1400
|
}
|
|
1382
1401
|
};
|
|
1383
1402
|
|
|
@@ -2481,12 +2500,14 @@ var providerSettingsEntrySchema = import_zod3.z.object({
|
|
|
2481
2500
|
name: import_zod3.z.string(),
|
|
2482
2501
|
apiProvider: providerNamesSchema.optional()
|
|
2483
2502
|
});
|
|
2503
|
+
var DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3;
|
|
2484
2504
|
var baseProviderSettingsSchema = import_zod3.z.object({
|
|
2485
2505
|
includeMaxTokens: import_zod3.z.boolean().optional(),
|
|
2486
2506
|
diffEnabled: import_zod3.z.boolean().optional(),
|
|
2487
2507
|
fuzzyMatchThreshold: import_zod3.z.number().optional(),
|
|
2488
2508
|
modelTemperature: import_zod3.z.number().nullish(),
|
|
2489
2509
|
rateLimitSeconds: import_zod3.z.number().optional(),
|
|
2510
|
+
consecutiveMistakeLimit: import_zod3.z.number().min(0).optional(),
|
|
2490
2511
|
// Model reasoning.
|
|
2491
2512
|
enableReasoningEffort: import_zod3.z.boolean().optional(),
|
|
2492
2513
|
reasoningEffort: reasoningEffortsSchema.optional(),
|
|
@@ -2859,7 +2880,13 @@ var taskPropertiesSchema = import_zod7.z.object({
|
|
|
2859
2880
|
apiProvider: import_zod7.z.enum(providerNames).optional(),
|
|
2860
2881
|
modelId: import_zod7.z.string().optional(),
|
|
2861
2882
|
diffStrategy: import_zod7.z.string().optional(),
|
|
2862
|
-
isSubtask: import_zod7.z.boolean().optional()
|
|
2883
|
+
isSubtask: import_zod7.z.boolean().optional(),
|
|
2884
|
+
todos: import_zod7.z.object({
|
|
2885
|
+
total: import_zod7.z.number(),
|
|
2886
|
+
completed: import_zod7.z.number(),
|
|
2887
|
+
inProgress: import_zod7.z.number(),
|
|
2888
|
+
pending: import_zod7.z.number()
|
|
2889
|
+
}).optional()
|
|
2863
2890
|
});
|
|
2864
2891
|
var gitPropertiesSchema = import_zod7.z.object({
|
|
2865
2892
|
repositoryUrl: import_zod7.z.string().optional(),
|
|
@@ -3118,6 +3145,7 @@ var globalSettingsSchema = import_zod11.z.object({
|
|
|
3118
3145
|
alwaysAllowUpdateTodoList: import_zod11.z.boolean().optional(),
|
|
3119
3146
|
allowedCommands: import_zod11.z.array(import_zod11.z.string()).optional(),
|
|
3120
3147
|
deniedCommands: import_zod11.z.array(import_zod11.z.string()).optional(),
|
|
3148
|
+
commandExecutionTimeout: import_zod11.z.number().optional(),
|
|
3121
3149
|
allowedMaxRequests: import_zod11.z.number().nullish(),
|
|
3122
3150
|
autoCondenseContext: import_zod11.z.boolean().optional(),
|
|
3123
3151
|
autoCondenseContextPercent: import_zod11.z.number().optional(),
|
|
@@ -3222,6 +3250,7 @@ var EVALS_SETTINGS = {
|
|
|
3222
3250
|
alwaysAllowUpdateTodoList: true,
|
|
3223
3251
|
followupAutoApproveTimeoutMs: 0,
|
|
3224
3252
|
allowedCommands: ["*"],
|
|
3253
|
+
commandExecutionTimeout: 3e4,
|
|
3225
3254
|
browserToolEnabled: false,
|
|
3226
3255
|
browserViewportSize: "900x600",
|
|
3227
3256
|
screenshotQuality: 75,
|
|
@@ -3609,6 +3638,10 @@ var commandExecutionStatusSchema = import_zod17.z.discriminatedUnion("status", [
|
|
|
3609
3638
|
import_zod17.z.object({
|
|
3610
3639
|
executionId: import_zod17.z.string(),
|
|
3611
3640
|
status: import_zod17.z.literal("fallback")
|
|
3641
|
+
}),
|
|
3642
|
+
import_zod17.z.object({
|
|
3643
|
+
executionId: import_zod17.z.string(),
|
|
3644
|
+
status: import_zod17.z.literal("timeout")
|
|
3612
3645
|
})
|
|
3613
3646
|
]);
|
|
3614
3647
|
|
|
@@ -3632,6 +3665,7 @@ var todoItemSchema = import_zod18.z.object({
|
|
|
3632
3665
|
CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
|
|
3633
3666
|
CODEBASE_INDEX_DEFAULTS,
|
|
3634
3667
|
DEEP_SEEK_DEFAULT_TEMPERATURE,
|
|
3668
|
+
DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
|
|
3635
3669
|
EVALS_SETTINGS,
|
|
3636
3670
|
EVALS_TIMEOUT,
|
|
3637
3671
|
GLAMA_DEFAULT_TEMPERATURE,
|
|
@@ -3682,6 +3716,7 @@ var todoItemSchema = import_zod18.z.object({
|
|
|
3682
3716
|
commandExecutionStatusSchema,
|
|
3683
3717
|
commandIds,
|
|
3684
3718
|
contextCondenseSchema,
|
|
3719
|
+
convertModelNameForVertex,
|
|
3685
3720
|
customModePromptsSchema,
|
|
3686
3721
|
customModesSettingsSchema,
|
|
3687
3722
|
customSupportPromptsSchema,
|
|
@@ -3694,6 +3729,7 @@ var todoItemSchema = import_zod18.z.object({
|
|
|
3694
3729
|
geminiDefaultModelId,
|
|
3695
3730
|
geminiModels,
|
|
3696
3731
|
getApiProtocol,
|
|
3732
|
+
getClaudeCodeModelId,
|
|
3697
3733
|
getModelId,
|
|
3698
3734
|
gitPropertiesSchema,
|
|
3699
3735
|
glamaDefaultModelId,
|