@roo-code/types 1.60.0 → 1.62.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +150 -26
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +484 -136
- package/dist/index.d.ts +484 -136
- package/dist/index.js +144 -25
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -13,7 +13,7 @@ var CODEBASE_INDEX_DEFAULTS = {
|
|
|
13
13
|
var codebaseIndexConfigSchema = z.object({
|
|
14
14
|
codebaseIndexEnabled: z.boolean().optional(),
|
|
15
15
|
codebaseIndexQdrantUrl: z.string().optional(),
|
|
16
|
-
codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible", "gemini", "mistral"]).optional(),
|
|
16
|
+
codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway"]).optional(),
|
|
17
17
|
codebaseIndexEmbedderBaseUrl: z.string().optional(),
|
|
18
18
|
codebaseIndexEmbedderModelId: z.string().optional(),
|
|
19
19
|
codebaseIndexEmbedderModelDimension: z.number().optional(),
|
|
@@ -28,7 +28,8 @@ var codebaseIndexModelsSchema = z.object({
|
|
|
28
28
|
ollama: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
|
|
29
29
|
"openai-compatible": z.record(z.string(), z.object({ dimension: z.number() })).optional(),
|
|
30
30
|
gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
|
|
31
|
-
mistral: z.record(z.string(), z.object({ dimension: z.number() })).optional()
|
|
31
|
+
mistral: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
|
|
32
|
+
"vercel-ai-gateway": z.record(z.string(), z.object({ dimension: z.number() })).optional()
|
|
32
33
|
});
|
|
33
34
|
var codebaseIndexProviderSchema = z.object({
|
|
34
35
|
codeIndexOpenAiKey: z.string().optional(),
|
|
@@ -37,7 +38,8 @@ var codebaseIndexProviderSchema = z.object({
|
|
|
37
38
|
codebaseIndexOpenAiCompatibleApiKey: z.string().optional(),
|
|
38
39
|
codebaseIndexOpenAiCompatibleModelDimension: z.number().optional(),
|
|
39
40
|
codebaseIndexGeminiApiKey: z.string().optional(),
|
|
40
|
-
codebaseIndexMistralApiKey: z.string().optional()
|
|
41
|
+
codebaseIndexMistralApiKey: z.string().optional(),
|
|
42
|
+
codebaseIndexVercelAiGatewayApiKey: z.string().optional()
|
|
41
43
|
});
|
|
42
44
|
|
|
43
45
|
// src/events.ts
|
|
@@ -2648,16 +2650,16 @@ var requestyDefaultModelInfo = {
|
|
|
2648
2650
|
};
|
|
2649
2651
|
|
|
2650
2652
|
// src/providers/roo.ts
|
|
2651
|
-
var rooDefaultModelId = "
|
|
2653
|
+
var rooDefaultModelId = "xai/grok-code-fast-1";
|
|
2652
2654
|
var rooModels = {
|
|
2653
|
-
"
|
|
2655
|
+
"xai/grok-code-fast-1": {
|
|
2654
2656
|
maxTokens: 16384,
|
|
2655
2657
|
contextWindow: 262144,
|
|
2656
2658
|
supportsImages: false,
|
|
2657
2659
|
supportsPromptCache: true,
|
|
2658
2660
|
inputPrice: 0,
|
|
2659
2661
|
outputPrice: 0,
|
|
2660
|
-
description: "A
|
|
2662
|
+
description: "A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)"
|
|
2661
2663
|
}
|
|
2662
2664
|
};
|
|
2663
2665
|
|
|
@@ -3265,8 +3267,19 @@ var vscodeLlmModels = {
|
|
|
3265
3267
|
};
|
|
3266
3268
|
|
|
3267
3269
|
// src/providers/xai.ts
|
|
3268
|
-
var xaiDefaultModelId = "grok-
|
|
3270
|
+
var xaiDefaultModelId = "grok-code-fast-1";
|
|
3269
3271
|
var xaiModels = {
|
|
3272
|
+
"grok-code-fast-1": {
|
|
3273
|
+
maxTokens: 16384,
|
|
3274
|
+
contextWindow: 262144,
|
|
3275
|
+
supportsImages: false,
|
|
3276
|
+
supportsPromptCache: true,
|
|
3277
|
+
inputPrice: 0.2,
|
|
3278
|
+
outputPrice: 1.5,
|
|
3279
|
+
cacheWritesPrice: 0.02,
|
|
3280
|
+
cacheReadsPrice: 0.02,
|
|
3281
|
+
description: "xAI's Grok Code Fast model with 256K context window"
|
|
3282
|
+
},
|
|
3270
3283
|
"grok-4": {
|
|
3271
3284
|
maxTokens: 8192,
|
|
3272
3285
|
contextWindow: 256e3,
|
|
@@ -3344,6 +3357,101 @@ var xaiModels = {
|
|
|
3344
3357
|
}
|
|
3345
3358
|
};
|
|
3346
3359
|
|
|
3360
|
+
// src/providers/vercel-ai-gateway.ts
|
|
3361
|
+
var vercelAiGatewayDefaultModelId = "anthropic/claude-sonnet-4";
|
|
3362
|
+
var VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
3363
|
+
"anthropic/claude-3-haiku",
|
|
3364
|
+
"anthropic/claude-3-opus",
|
|
3365
|
+
"anthropic/claude-3.5-haiku",
|
|
3366
|
+
"anthropic/claude-3.5-sonnet",
|
|
3367
|
+
"anthropic/claude-3.7-sonnet",
|
|
3368
|
+
"anthropic/claude-opus-4",
|
|
3369
|
+
"anthropic/claude-opus-4.1",
|
|
3370
|
+
"anthropic/claude-sonnet-4",
|
|
3371
|
+
"openai/gpt-4.1",
|
|
3372
|
+
"openai/gpt-4.1-mini",
|
|
3373
|
+
"openai/gpt-4.1-nano",
|
|
3374
|
+
"openai/gpt-4o",
|
|
3375
|
+
"openai/gpt-4o-mini",
|
|
3376
|
+
"openai/gpt-5",
|
|
3377
|
+
"openai/gpt-5-mini",
|
|
3378
|
+
"openai/gpt-5-nano",
|
|
3379
|
+
"openai/o1",
|
|
3380
|
+
"openai/o3",
|
|
3381
|
+
"openai/o3-mini",
|
|
3382
|
+
"openai/o4-mini"
|
|
3383
|
+
]);
|
|
3384
|
+
var VERCEL_AI_GATEWAY_VISION_ONLY_MODELS = /* @__PURE__ */ new Set([
|
|
3385
|
+
"alibaba/qwen-3-14b",
|
|
3386
|
+
"alibaba/qwen-3-235b",
|
|
3387
|
+
"alibaba/qwen-3-30b",
|
|
3388
|
+
"alibaba/qwen-3-32b",
|
|
3389
|
+
"alibaba/qwen3-coder",
|
|
3390
|
+
"amazon/nova-pro",
|
|
3391
|
+
"anthropic/claude-3.5-haiku",
|
|
3392
|
+
"google/gemini-1.5-flash-8b",
|
|
3393
|
+
"google/gemini-2.0-flash-thinking",
|
|
3394
|
+
"google/gemma-3-27b",
|
|
3395
|
+
"mistral/devstral-small",
|
|
3396
|
+
"xai/grok-vision-beta"
|
|
3397
|
+
]);
|
|
3398
|
+
var VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS = /* @__PURE__ */ new Set([
|
|
3399
|
+
"amazon/nova-lite",
|
|
3400
|
+
"anthropic/claude-3-haiku",
|
|
3401
|
+
"anthropic/claude-3-opus",
|
|
3402
|
+
"anthropic/claude-3-sonnet",
|
|
3403
|
+
"anthropic/claude-3.5-sonnet",
|
|
3404
|
+
"anthropic/claude-3.7-sonnet",
|
|
3405
|
+
"anthropic/claude-opus-4",
|
|
3406
|
+
"anthropic/claude-opus-4.1",
|
|
3407
|
+
"anthropic/claude-sonnet-4",
|
|
3408
|
+
"google/gemini-1.5-flash",
|
|
3409
|
+
"google/gemini-1.5-pro",
|
|
3410
|
+
"google/gemini-2.0-flash",
|
|
3411
|
+
"google/gemini-2.0-flash-lite",
|
|
3412
|
+
"google/gemini-2.0-pro",
|
|
3413
|
+
"google/gemini-2.5-flash",
|
|
3414
|
+
"google/gemini-2.5-flash-lite",
|
|
3415
|
+
"google/gemini-2.5-pro",
|
|
3416
|
+
"google/gemini-exp",
|
|
3417
|
+
"meta/llama-3.2-11b",
|
|
3418
|
+
"meta/llama-3.2-90b",
|
|
3419
|
+
"meta/llama-3.3",
|
|
3420
|
+
"meta/llama-4-maverick",
|
|
3421
|
+
"meta/llama-4-scout",
|
|
3422
|
+
"mistral/pixtral-12b",
|
|
3423
|
+
"mistral/pixtral-large",
|
|
3424
|
+
"moonshotai/kimi-k2",
|
|
3425
|
+
"openai/gpt-4-turbo",
|
|
3426
|
+
"openai/gpt-4.1",
|
|
3427
|
+
"openai/gpt-4.1-mini",
|
|
3428
|
+
"openai/gpt-4.1-nano",
|
|
3429
|
+
"openai/gpt-4.5-preview",
|
|
3430
|
+
"openai/gpt-4o",
|
|
3431
|
+
"openai/gpt-4o-mini",
|
|
3432
|
+
"openai/gpt-oss-120b",
|
|
3433
|
+
"openai/gpt-oss-20b",
|
|
3434
|
+
"openai/o3",
|
|
3435
|
+
"openai/o3-pro",
|
|
3436
|
+
"openai/o4-mini",
|
|
3437
|
+
"vercel/v0-1.0-md",
|
|
3438
|
+
"xai/grok-2-vision",
|
|
3439
|
+
"zai/glm-4.5v"
|
|
3440
|
+
]);
|
|
3441
|
+
var vercelAiGatewayDefaultModelInfo = {
|
|
3442
|
+
maxTokens: 64e3,
|
|
3443
|
+
contextWindow: 2e5,
|
|
3444
|
+
supportsImages: true,
|
|
3445
|
+
supportsComputerUse: true,
|
|
3446
|
+
supportsPromptCache: true,
|
|
3447
|
+
inputPrice: 3,
|
|
3448
|
+
outputPrice: 15,
|
|
3449
|
+
cacheWritesPrice: 3.75,
|
|
3450
|
+
cacheReadsPrice: 0.3,
|
|
3451
|
+
description: "Claude Sonnet 4 significantly improves on Sonnet 3.7's industry-leading capabilities, excelling in coding with a state-of-the-art 72.7% on SWE-bench. The model balances performance and efficiency for internal and external use cases, with enhanced steerability for greater control over implementations. While not matching Opus 4 in most domains, it delivers an optimal mix of capability and practicality."
|
|
3452
|
+
};
|
|
3453
|
+
var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
|
|
3454
|
+
|
|
3347
3455
|
// src/providers/zai.ts
|
|
3348
3456
|
var internationalZAiDefaultModelId = "glm-4.5";
|
|
3349
3457
|
var internationalZAiModels = {
|
|
@@ -3472,13 +3580,15 @@ var providerNames = [
|
|
|
3472
3580
|
"fireworks",
|
|
3473
3581
|
"featherless",
|
|
3474
3582
|
"io-intelligence",
|
|
3475
|
-
"roo"
|
|
3583
|
+
"roo",
|
|
3584
|
+
"vercel-ai-gateway"
|
|
3476
3585
|
];
|
|
3477
3586
|
var providerNamesSchema = z8.enum(providerNames);
|
|
3478
3587
|
var providerSettingsEntrySchema = z8.object({
|
|
3479
3588
|
id: z8.string(),
|
|
3480
3589
|
name: z8.string(),
|
|
3481
|
-
apiProvider: providerNamesSchema.optional()
|
|
3590
|
+
apiProvider: providerNamesSchema.optional(),
|
|
3591
|
+
modelId: z8.string().optional()
|
|
3482
3592
|
});
|
|
3483
3593
|
var DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3;
|
|
3484
3594
|
var baseProviderSettingsSchema = z8.object({
|
|
@@ -3669,6 +3779,10 @@ var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
|
|
|
3669
3779
|
var rooSchema = apiModelIdProviderModelSchema.extend({
|
|
3670
3780
|
// No additional fields needed - uses cloud authentication
|
|
3671
3781
|
});
|
|
3782
|
+
var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
|
|
3783
|
+
vercelAiGatewayApiKey: z8.string().optional(),
|
|
3784
|
+
vercelAiGatewayModelId: z8.string().optional()
|
|
3785
|
+
});
|
|
3672
3786
|
var defaultSchema = z8.object({
|
|
3673
3787
|
apiProvider: z8.undefined()
|
|
3674
3788
|
});
|
|
@@ -3707,6 +3821,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
|
|
|
3707
3821
|
ioIntelligenceSchema.merge(z8.object({ apiProvider: z8.literal("io-intelligence") })),
|
|
3708
3822
|
qwenCodeSchema.merge(z8.object({ apiProvider: z8.literal("qwen-code") })),
|
|
3709
3823
|
rooSchema.merge(z8.object({ apiProvider: z8.literal("roo") })),
|
|
3824
|
+
vercelAiGatewaySchema.merge(z8.object({ apiProvider: z8.literal("vercel-ai-gateway") })),
|
|
3710
3825
|
defaultSchema
|
|
3711
3826
|
]);
|
|
3712
3827
|
var providerSettingsSchema = z8.object({
|
|
@@ -3745,6 +3860,7 @@ var providerSettingsSchema = z8.object({
|
|
|
3745
3860
|
...ioIntelligenceSchema.shape,
|
|
3746
3861
|
...qwenCodeSchema.shape,
|
|
3747
3862
|
...rooSchema.shape,
|
|
3863
|
+
...vercelAiGatewaySchema.shape,
|
|
3748
3864
|
...codebaseIndexProviderSchema.shape
|
|
3749
3865
|
});
|
|
3750
3866
|
var providerSettingsWithIdSchema = providerSettingsSchema.extend({ id: z8.string().optional() });
|
|
@@ -3764,7 +3880,8 @@ var MODEL_ID_KEYS = [
|
|
|
3764
3880
|
"requestyModelId",
|
|
3765
3881
|
"litellmModelId",
|
|
3766
3882
|
"huggingFaceModelId",
|
|
3767
|
-
"ioIntelligenceModelId"
|
|
3883
|
+
"ioIntelligenceModelId",
|
|
3884
|
+
"vercelAiGatewayModelId"
|
|
3768
3885
|
];
|
|
3769
3886
|
var getModelId = (settings) => {
|
|
3770
3887
|
const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
|
|
@@ -3778,6 +3895,9 @@ var getApiProtocol = (provider, modelId) => {
|
|
|
3778
3895
|
if (provider && provider === "vertex" && modelId && modelId.toLowerCase().includes("claude")) {
|
|
3779
3896
|
return "anthropic";
|
|
3780
3897
|
}
|
|
3898
|
+
if (provider && provider === "vercel-ai-gateway" && modelId && modelId.toLowerCase().startsWith("anthropic/")) {
|
|
3899
|
+
return "anthropic";
|
|
3900
|
+
}
|
|
3781
3901
|
return "openai";
|
|
3782
3902
|
};
|
|
3783
3903
|
var MODELS_BY_PROVIDER = {
|
|
@@ -3869,7 +3989,8 @@ var MODELS_BY_PROVIDER = {
|
|
|
3869
3989
|
litellm: { id: "litellm", label: "LiteLLM", models: [] },
|
|
3870
3990
|
openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
|
|
3871
3991
|
requesty: { id: "requesty", label: "Requesty", models: [] },
|
|
3872
|
-
unbound: { id: "unbound", label: "Unbound", models: [] }
|
|
3992
|
+
unbound: { id: "unbound", label: "Unbound", models: [] },
|
|
3993
|
+
"vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
|
|
3873
3994
|
};
|
|
3874
3995
|
var dynamicProviders = [
|
|
3875
3996
|
"glama",
|
|
@@ -3877,7 +3998,8 @@ var dynamicProviders = [
|
|
|
3877
3998
|
"litellm",
|
|
3878
3999
|
"openrouter",
|
|
3879
4000
|
"requesty",
|
|
3880
|
-
"unbound"
|
|
4001
|
+
"unbound",
|
|
4002
|
+
"vercel-ai-gateway"
|
|
3881
4003
|
];
|
|
3882
4004
|
var isDynamicProvider = (key) => dynamicProviders.includes(key);
|
|
3883
4005
|
|
|
@@ -4344,12 +4466,14 @@ var SECRET_STATE_KEYS = [
|
|
|
4344
4466
|
"codebaseIndexOpenAiCompatibleApiKey",
|
|
4345
4467
|
"codebaseIndexGeminiApiKey",
|
|
4346
4468
|
"codebaseIndexMistralApiKey",
|
|
4469
|
+
"codebaseIndexVercelAiGatewayApiKey",
|
|
4347
4470
|
"huggingFaceApiKey",
|
|
4348
4471
|
"sambaNovaApiKey",
|
|
4349
4472
|
"zaiApiKey",
|
|
4350
4473
|
"fireworksApiKey",
|
|
4351
4474
|
"featherlessApiKey",
|
|
4352
|
-
"ioIntelligenceApiKey"
|
|
4475
|
+
"ioIntelligenceApiKey",
|
|
4476
|
+
"vercelAiGatewayApiKey"
|
|
4353
4477
|
];
|
|
4354
4478
|
var isSecretStateKey = (key) => SECRET_STATE_KEYS.includes(key);
|
|
4355
4479
|
var GLOBAL_STATE_KEYS = [...GLOBAL_SETTINGS_KEYS, ...PROVIDER_SETTINGS_KEYS].filter(
|
|
@@ -4570,18 +4694,8 @@ var mcpExecutionStatusSchema = z16.discriminatedUnion("status", [
|
|
|
4570
4694
|
]);
|
|
4571
4695
|
|
|
4572
4696
|
// src/single-file-read-models.ts
|
|
4573
|
-
var SINGLE_FILE_READ_MODELS = /* @__PURE__ */ new Set(["roo/sonic"]);
|
|
4574
4697
|
function shouldUseSingleFileRead(modelId) {
|
|
4575
|
-
|
|
4576
|
-
return true;
|
|
4577
|
-
}
|
|
4578
|
-
const patterns = Array.from(SINGLE_FILE_READ_MODELS);
|
|
4579
|
-
for (const pattern of patterns) {
|
|
4580
|
-
if (pattern.endsWith("*") && modelId.startsWith(pattern.slice(0, -1))) {
|
|
4581
|
-
return true;
|
|
4582
|
-
}
|
|
4583
|
-
}
|
|
4584
|
-
return false;
|
|
4698
|
+
return modelId.includes("grok-code-fast-1");
|
|
4585
4699
|
}
|
|
4586
4700
|
|
|
4587
4701
|
// src/task.ts
|
|
@@ -4687,10 +4801,13 @@ export {
|
|
|
4687
4801
|
PROVIDER_SETTINGS_KEYS,
|
|
4688
4802
|
RooCodeEventName,
|
|
4689
4803
|
SECRET_STATE_KEYS,
|
|
4690
|
-
SINGLE_FILE_READ_MODELS,
|
|
4691
4804
|
TaskCommandName,
|
|
4692
4805
|
TaskStatus,
|
|
4693
4806
|
TelemetryEventName,
|
|
4807
|
+
VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE,
|
|
4808
|
+
VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
|
|
4809
|
+
VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
|
|
4810
|
+
VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
|
|
4694
4811
|
VERTEX_REGIONS,
|
|
4695
4812
|
ZAI_DEFAULT_TEMPERATURE,
|
|
4696
4813
|
ackSchema,
|
|
@@ -4847,6 +4964,8 @@ export {
|
|
|
4847
4964
|
unboundDefaultModelInfo,
|
|
4848
4965
|
verbosityLevels,
|
|
4849
4966
|
verbosityLevelsSchema,
|
|
4967
|
+
vercelAiGatewayDefaultModelId,
|
|
4968
|
+
vercelAiGatewayDefaultModelInfo,
|
|
4850
4969
|
vertexDefaultModelId,
|
|
4851
4970
|
vertexModels,
|
|
4852
4971
|
vscodeLlmDefaultModelId,
|