@roo-code/types 1.59.0 → 1.61.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +209 -31
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +498 -179
- package/dist/index.d.ts +498 -179
- package/dist/index.js +201 -30
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -70,10 +70,13 @@ __export(index_exports, {
|
|
|
70
70
|
PROVIDER_SETTINGS_KEYS: () => PROVIDER_SETTINGS_KEYS,
|
|
71
71
|
RooCodeEventName: () => RooCodeEventName,
|
|
72
72
|
SECRET_STATE_KEYS: () => SECRET_STATE_KEYS,
|
|
73
|
-
SINGLE_FILE_READ_MODELS: () => SINGLE_FILE_READ_MODELS,
|
|
74
73
|
TaskCommandName: () => TaskCommandName,
|
|
75
74
|
TaskStatus: () => TaskStatus,
|
|
76
75
|
TelemetryEventName: () => TelemetryEventName,
|
|
76
|
+
VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE: () => VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE,
|
|
77
|
+
VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS: () => VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
|
|
78
|
+
VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS: () => VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
|
|
79
|
+
VERCEL_AI_GATEWAY_VISION_ONLY_MODELS: () => VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
|
|
77
80
|
VERTEX_REGIONS: () => VERTEX_REGIONS,
|
|
78
81
|
ZAI_DEFAULT_TEMPERATURE: () => ZAI_DEFAULT_TEMPERATURE,
|
|
79
82
|
ackSchema: () => ackSchema,
|
|
@@ -191,6 +194,8 @@ __export(index_exports, {
|
|
|
191
194
|
providerSettingsSchema: () => providerSettingsSchema,
|
|
192
195
|
providerSettingsSchemaDiscriminated: () => providerSettingsSchemaDiscriminated,
|
|
193
196
|
providerSettingsWithIdSchema: () => providerSettingsWithIdSchema,
|
|
197
|
+
qwenCodeDefaultModelId: () => qwenCodeDefaultModelId,
|
|
198
|
+
qwenCodeModels: () => qwenCodeModels,
|
|
194
199
|
reasoningEffortWithMinimalSchema: () => reasoningEffortWithMinimalSchema,
|
|
195
200
|
reasoningEfforts: () => reasoningEfforts,
|
|
196
201
|
reasoningEffortsSchema: () => reasoningEffortsSchema,
|
|
@@ -228,6 +233,8 @@ __export(index_exports, {
|
|
|
228
233
|
unboundDefaultModelInfo: () => unboundDefaultModelInfo,
|
|
229
234
|
verbosityLevels: () => verbosityLevels,
|
|
230
235
|
verbosityLevelsSchema: () => verbosityLevelsSchema,
|
|
236
|
+
vercelAiGatewayDefaultModelId: () => vercelAiGatewayDefaultModelId,
|
|
237
|
+
vercelAiGatewayDefaultModelInfo: () => vercelAiGatewayDefaultModelInfo,
|
|
231
238
|
vertexDefaultModelId: () => vertexDefaultModelId,
|
|
232
239
|
vertexModels: () => vertexModels,
|
|
233
240
|
vscodeLlmDefaultModelId: () => vscodeLlmDefaultModelId,
|
|
@@ -594,13 +601,12 @@ var taskEventSchema = import_zod4.z.discriminatedUnion("eventName", [
|
|
|
594
601
|
|
|
595
602
|
// src/experiment.ts
|
|
596
603
|
var import_zod5 = require("zod");
|
|
597
|
-
var experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption"
|
|
604
|
+
var experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption"];
|
|
598
605
|
var experimentIdsSchema = import_zod5.z.enum(experimentIds);
|
|
599
606
|
var experimentsSchema = import_zod5.z.object({
|
|
600
607
|
powerSteering: import_zod5.z.boolean().optional(),
|
|
601
608
|
multiFileApplyDiff: import_zod5.z.boolean().optional(),
|
|
602
|
-
preventFocusDisruption: import_zod5.z.boolean().optional()
|
|
603
|
-
assistantMessageParser: import_zod5.z.boolean().optional()
|
|
609
|
+
preventFocusDisruption: import_zod5.z.boolean().optional()
|
|
604
610
|
});
|
|
605
611
|
|
|
606
612
|
// src/followup.ts
|
|
@@ -1345,6 +1351,15 @@ var chutesModels = {
|
|
|
1345
1351
|
outputPrice: 0,
|
|
1346
1352
|
description: "DeepSeek V3 model."
|
|
1347
1353
|
},
|
|
1354
|
+
"deepseek-ai/DeepSeek-V3.1": {
|
|
1355
|
+
maxTokens: 32768,
|
|
1356
|
+
contextWindow: 163840,
|
|
1357
|
+
supportsImages: false,
|
|
1358
|
+
supportsPromptCache: false,
|
|
1359
|
+
inputPrice: 0,
|
|
1360
|
+
outputPrice: 0,
|
|
1361
|
+
description: "DeepSeek V3.1 model."
|
|
1362
|
+
},
|
|
1348
1363
|
"unsloth/Llama-3.3-70B-Instruct": {
|
|
1349
1364
|
maxTokens: 32768,
|
|
1350
1365
|
// From Groq
|
|
@@ -1637,7 +1652,8 @@ var deepSeekDefaultModelId = "deepseek-chat";
|
|
|
1637
1652
|
var deepSeekModels = {
|
|
1638
1653
|
"deepseek-chat": {
|
|
1639
1654
|
maxTokens: 8192,
|
|
1640
|
-
|
|
1655
|
+
// 8K max output
|
|
1656
|
+
contextWindow: 128e3,
|
|
1641
1657
|
supportsImages: false,
|
|
1642
1658
|
supportsPromptCache: true,
|
|
1643
1659
|
inputPrice: 0.27,
|
|
@@ -1651,8 +1667,9 @@ var deepSeekModels = {
|
|
|
1651
1667
|
description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
|
|
1652
1668
|
},
|
|
1653
1669
|
"deepseek-reasoner": {
|
|
1654
|
-
maxTokens:
|
|
1655
|
-
|
|
1670
|
+
maxTokens: 65536,
|
|
1671
|
+
// 64K max output for reasoning mode
|
|
1672
|
+
contextWindow: 128e3,
|
|
1656
1673
|
supportsImages: false,
|
|
1657
1674
|
supportsPromptCache: true,
|
|
1658
1675
|
inputPrice: 0.55,
|
|
@@ -1663,7 +1680,7 @@ var deepSeekModels = {
|
|
|
1663
1680
|
// $0.55 per million tokens (cache miss)
|
|
1664
1681
|
cacheReadsPrice: 0.14,
|
|
1665
1682
|
// $0.14 per million tokens (cache hit)
|
|
1666
|
-
description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to
|
|
1683
|
+
description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
|
|
1667
1684
|
}
|
|
1668
1685
|
};
|
|
1669
1686
|
var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
|
|
@@ -1819,6 +1836,15 @@ var fireworksModels = {
|
|
|
1819
1836
|
outputPrice: 0.9,
|
|
1820
1837
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
1821
1838
|
},
|
|
1839
|
+
"accounts/fireworks/models/deepseek-v3p1": {
|
|
1840
|
+
maxTokens: 16384,
|
|
1841
|
+
contextWindow: 163840,
|
|
1842
|
+
supportsImages: false,
|
|
1843
|
+
supportsPromptCache: false,
|
|
1844
|
+
inputPrice: 0.56,
|
|
1845
|
+
outputPrice: 1.68,
|
|
1846
|
+
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
1847
|
+
},
|
|
1822
1848
|
"accounts/fireworks/models/glm-4p5": {
|
|
1823
1849
|
maxTokens: 16384,
|
|
1824
1850
|
contextWindow: 128e3,
|
|
@@ -2252,9 +2278,11 @@ var groqModels = {
|
|
|
2252
2278
|
maxTokens: 16384,
|
|
2253
2279
|
contextWindow: 131072,
|
|
2254
2280
|
supportsImages: false,
|
|
2255
|
-
supportsPromptCache:
|
|
2281
|
+
supportsPromptCache: true,
|
|
2256
2282
|
inputPrice: 1,
|
|
2257
2283
|
outputPrice: 3,
|
|
2284
|
+
cacheReadsPrice: 0.5,
|
|
2285
|
+
// 50% discount for cached input tokens
|
|
2258
2286
|
description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
|
|
2259
2287
|
},
|
|
2260
2288
|
"openai/gpt-oss-120b": {
|
|
@@ -2823,6 +2851,33 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
|
2823
2851
|
"google/gemini-2.5-flash-preview-05-20:thinking"
|
|
2824
2852
|
]);
|
|
2825
2853
|
|
|
2854
|
+
// src/providers/qwen-code.ts
|
|
2855
|
+
var qwenCodeDefaultModelId = "qwen3-coder-plus";
|
|
2856
|
+
var qwenCodeModels = {
|
|
2857
|
+
"qwen3-coder-plus": {
|
|
2858
|
+
maxTokens: 65536,
|
|
2859
|
+
contextWindow: 1e6,
|
|
2860
|
+
supportsImages: false,
|
|
2861
|
+
supportsPromptCache: false,
|
|
2862
|
+
inputPrice: 0,
|
|
2863
|
+
outputPrice: 0,
|
|
2864
|
+
cacheWritesPrice: 0,
|
|
2865
|
+
cacheReadsPrice: 0,
|
|
2866
|
+
description: "Qwen3 Coder Plus - High-performance coding model with 1M context window for large codebases"
|
|
2867
|
+
},
|
|
2868
|
+
"qwen3-coder-flash": {
|
|
2869
|
+
maxTokens: 65536,
|
|
2870
|
+
contextWindow: 1e6,
|
|
2871
|
+
supportsImages: false,
|
|
2872
|
+
supportsPromptCache: false,
|
|
2873
|
+
inputPrice: 0,
|
|
2874
|
+
outputPrice: 0,
|
|
2875
|
+
cacheWritesPrice: 0,
|
|
2876
|
+
cacheReadsPrice: 0,
|
|
2877
|
+
description: "Qwen3 Coder Flash - Fast coding model with 1M context window optimized for speed"
|
|
2878
|
+
}
|
|
2879
|
+
};
|
|
2880
|
+
|
|
2826
2881
|
// src/providers/requesty.ts
|
|
2827
2882
|
var requestyDefaultModelId = "coding/claude-4-sonnet";
|
|
2828
2883
|
var requestyDefaultModelInfo = {
|
|
@@ -2839,16 +2894,16 @@ var requestyDefaultModelInfo = {
|
|
|
2839
2894
|
};
|
|
2840
2895
|
|
|
2841
2896
|
// src/providers/roo.ts
|
|
2842
|
-
var rooDefaultModelId = "
|
|
2897
|
+
var rooDefaultModelId = "xai/grok-code-fast-1";
|
|
2843
2898
|
var rooModels = {
|
|
2844
|
-
"
|
|
2899
|
+
"xai/grok-code-fast-1": {
|
|
2845
2900
|
maxTokens: 16384,
|
|
2846
2901
|
contextWindow: 262144,
|
|
2847
2902
|
supportsImages: false,
|
|
2848
2903
|
supportsPromptCache: true,
|
|
2849
2904
|
inputPrice: 0,
|
|
2850
2905
|
outputPrice: 0,
|
|
2851
|
-
description: "A
|
|
2906
|
+
description: "A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)"
|
|
2852
2907
|
}
|
|
2853
2908
|
};
|
|
2854
2909
|
|
|
@@ -3456,8 +3511,19 @@ var vscodeLlmModels = {
|
|
|
3456
3511
|
};
|
|
3457
3512
|
|
|
3458
3513
|
// src/providers/xai.ts
|
|
3459
|
-
var xaiDefaultModelId = "grok-
|
|
3514
|
+
var xaiDefaultModelId = "grok-code-fast-1";
|
|
3460
3515
|
var xaiModels = {
|
|
3516
|
+
"grok-code-fast-1": {
|
|
3517
|
+
maxTokens: 16384,
|
|
3518
|
+
contextWindow: 262144,
|
|
3519
|
+
supportsImages: false,
|
|
3520
|
+
supportsPromptCache: true,
|
|
3521
|
+
inputPrice: 0.2,
|
|
3522
|
+
outputPrice: 1.5,
|
|
3523
|
+
cacheWritesPrice: 0.02,
|
|
3524
|
+
cacheReadsPrice: 0.02,
|
|
3525
|
+
description: "xAI's Grok Code Fast model with 256K context window"
|
|
3526
|
+
},
|
|
3461
3527
|
"grok-4": {
|
|
3462
3528
|
maxTokens: 8192,
|
|
3463
3529
|
contextWindow: 256e3,
|
|
@@ -3535,6 +3601,101 @@ var xaiModels = {
|
|
|
3535
3601
|
}
|
|
3536
3602
|
};
|
|
3537
3603
|
|
|
3604
|
+
// src/providers/vercel-ai-gateway.ts
|
|
3605
|
+
var vercelAiGatewayDefaultModelId = "anthropic/claude-sonnet-4";
|
|
3606
|
+
var VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
3607
|
+
"anthropic/claude-3-haiku",
|
|
3608
|
+
"anthropic/claude-3-opus",
|
|
3609
|
+
"anthropic/claude-3.5-haiku",
|
|
3610
|
+
"anthropic/claude-3.5-sonnet",
|
|
3611
|
+
"anthropic/claude-3.7-sonnet",
|
|
3612
|
+
"anthropic/claude-opus-4",
|
|
3613
|
+
"anthropic/claude-opus-4.1",
|
|
3614
|
+
"anthropic/claude-sonnet-4",
|
|
3615
|
+
"openai/gpt-4.1",
|
|
3616
|
+
"openai/gpt-4.1-mini",
|
|
3617
|
+
"openai/gpt-4.1-nano",
|
|
3618
|
+
"openai/gpt-4o",
|
|
3619
|
+
"openai/gpt-4o-mini",
|
|
3620
|
+
"openai/gpt-5",
|
|
3621
|
+
"openai/gpt-5-mini",
|
|
3622
|
+
"openai/gpt-5-nano",
|
|
3623
|
+
"openai/o1",
|
|
3624
|
+
"openai/o3",
|
|
3625
|
+
"openai/o3-mini",
|
|
3626
|
+
"openai/o4-mini"
|
|
3627
|
+
]);
|
|
3628
|
+
var VERCEL_AI_GATEWAY_VISION_ONLY_MODELS = /* @__PURE__ */ new Set([
|
|
3629
|
+
"alibaba/qwen-3-14b",
|
|
3630
|
+
"alibaba/qwen-3-235b",
|
|
3631
|
+
"alibaba/qwen-3-30b",
|
|
3632
|
+
"alibaba/qwen-3-32b",
|
|
3633
|
+
"alibaba/qwen3-coder",
|
|
3634
|
+
"amazon/nova-pro",
|
|
3635
|
+
"anthropic/claude-3.5-haiku",
|
|
3636
|
+
"google/gemini-1.5-flash-8b",
|
|
3637
|
+
"google/gemini-2.0-flash-thinking",
|
|
3638
|
+
"google/gemma-3-27b",
|
|
3639
|
+
"mistral/devstral-small",
|
|
3640
|
+
"xai/grok-vision-beta"
|
|
3641
|
+
]);
|
|
3642
|
+
var VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS = /* @__PURE__ */ new Set([
|
|
3643
|
+
"amazon/nova-lite",
|
|
3644
|
+
"anthropic/claude-3-haiku",
|
|
3645
|
+
"anthropic/claude-3-opus",
|
|
3646
|
+
"anthropic/claude-3-sonnet",
|
|
3647
|
+
"anthropic/claude-3.5-sonnet",
|
|
3648
|
+
"anthropic/claude-3.7-sonnet",
|
|
3649
|
+
"anthropic/claude-opus-4",
|
|
3650
|
+
"anthropic/claude-opus-4.1",
|
|
3651
|
+
"anthropic/claude-sonnet-4",
|
|
3652
|
+
"google/gemini-1.5-flash",
|
|
3653
|
+
"google/gemini-1.5-pro",
|
|
3654
|
+
"google/gemini-2.0-flash",
|
|
3655
|
+
"google/gemini-2.0-flash-lite",
|
|
3656
|
+
"google/gemini-2.0-pro",
|
|
3657
|
+
"google/gemini-2.5-flash",
|
|
3658
|
+
"google/gemini-2.5-flash-lite",
|
|
3659
|
+
"google/gemini-2.5-pro",
|
|
3660
|
+
"google/gemini-exp",
|
|
3661
|
+
"meta/llama-3.2-11b",
|
|
3662
|
+
"meta/llama-3.2-90b",
|
|
3663
|
+
"meta/llama-3.3",
|
|
3664
|
+
"meta/llama-4-maverick",
|
|
3665
|
+
"meta/llama-4-scout",
|
|
3666
|
+
"mistral/pixtral-12b",
|
|
3667
|
+
"mistral/pixtral-large",
|
|
3668
|
+
"moonshotai/kimi-k2",
|
|
3669
|
+
"openai/gpt-4-turbo",
|
|
3670
|
+
"openai/gpt-4.1",
|
|
3671
|
+
"openai/gpt-4.1-mini",
|
|
3672
|
+
"openai/gpt-4.1-nano",
|
|
3673
|
+
"openai/gpt-4.5-preview",
|
|
3674
|
+
"openai/gpt-4o",
|
|
3675
|
+
"openai/gpt-4o-mini",
|
|
3676
|
+
"openai/gpt-oss-120b",
|
|
3677
|
+
"openai/gpt-oss-20b",
|
|
3678
|
+
"openai/o3",
|
|
3679
|
+
"openai/o3-pro",
|
|
3680
|
+
"openai/o4-mini",
|
|
3681
|
+
"vercel/v0-1.0-md",
|
|
3682
|
+
"xai/grok-2-vision",
|
|
3683
|
+
"zai/glm-4.5v"
|
|
3684
|
+
]);
|
|
3685
|
+
var vercelAiGatewayDefaultModelInfo = {
|
|
3686
|
+
maxTokens: 64e3,
|
|
3687
|
+
contextWindow: 2e5,
|
|
3688
|
+
supportsImages: true,
|
|
3689
|
+
supportsComputerUse: true,
|
|
3690
|
+
supportsPromptCache: true,
|
|
3691
|
+
inputPrice: 3,
|
|
3692
|
+
outputPrice: 15,
|
|
3693
|
+
cacheWritesPrice: 3.75,
|
|
3694
|
+
cacheReadsPrice: 0.3,
|
|
3695
|
+
description: "Claude Sonnet 4 significantly improves on Sonnet 3.7's industry-leading capabilities, excelling in coding with a state-of-the-art 72.7% on SWE-bench. The model balances performance and efficiency for internal and external use cases, with enhanced steerability for greater control over implementations. While not matching Opus 4 in most domains, it delivers an optimal mix of capability and practicality."
|
|
3696
|
+
};
|
|
3697
|
+
var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
|
|
3698
|
+
|
|
3538
3699
|
// src/providers/zai.ts
|
|
3539
3700
|
var internationalZAiDefaultModelId = "glm-4.5";
|
|
3540
3701
|
var internationalZAiModels = {
|
|
@@ -3647,6 +3808,7 @@ var providerNames = [
|
|
|
3647
3808
|
"moonshot",
|
|
3648
3809
|
"deepseek",
|
|
3649
3810
|
"doubao",
|
|
3811
|
+
"qwen-code",
|
|
3650
3812
|
"unbound",
|
|
3651
3813
|
"requesty",
|
|
3652
3814
|
"human-relay",
|
|
@@ -3662,7 +3824,8 @@ var providerNames = [
|
|
|
3662
3824
|
"fireworks",
|
|
3663
3825
|
"featherless",
|
|
3664
3826
|
"io-intelligence",
|
|
3665
|
-
"roo"
|
|
3827
|
+
"roo",
|
|
3828
|
+
"vercel-ai-gateway"
|
|
3666
3829
|
];
|
|
3667
3830
|
var providerNamesSchema = import_zod8.z.enum(providerNames);
|
|
3668
3831
|
var providerSettingsEntrySchema = import_zod8.z.object({
|
|
@@ -3734,7 +3897,9 @@ var vertexSchema = apiModelIdProviderModelSchema.extend({
|
|
|
3734
3897
|
vertexKeyFile: import_zod8.z.string().optional(),
|
|
3735
3898
|
vertexJsonCredentials: import_zod8.z.string().optional(),
|
|
3736
3899
|
vertexProjectId: import_zod8.z.string().optional(),
|
|
3737
|
-
vertexRegion: import_zod8.z.string().optional()
|
|
3900
|
+
vertexRegion: import_zod8.z.string().optional(),
|
|
3901
|
+
enableUrlContext: import_zod8.z.boolean().optional(),
|
|
3902
|
+
enableGrounding: import_zod8.z.boolean().optional()
|
|
3738
3903
|
});
|
|
3739
3904
|
var openAiSchema = baseProviderSettingsSchema.extend({
|
|
3740
3905
|
openAiBaseUrl: import_zod8.z.string().optional(),
|
|
@@ -3851,9 +4016,16 @@ var ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({
|
|
|
3851
4016
|
ioIntelligenceModelId: import_zod8.z.string().optional(),
|
|
3852
4017
|
ioIntelligenceApiKey: import_zod8.z.string().optional()
|
|
3853
4018
|
});
|
|
4019
|
+
var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
|
|
4020
|
+
qwenCodeOauthPath: import_zod8.z.string().optional()
|
|
4021
|
+
});
|
|
3854
4022
|
var rooSchema = apiModelIdProviderModelSchema.extend({
|
|
3855
4023
|
// No additional fields needed - uses cloud authentication
|
|
3856
4024
|
});
|
|
4025
|
+
var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
|
|
4026
|
+
vercelAiGatewayApiKey: import_zod8.z.string().optional(),
|
|
4027
|
+
vercelAiGatewayModelId: import_zod8.z.string().optional()
|
|
4028
|
+
});
|
|
3857
4029
|
var defaultSchema = import_zod8.z.object({
|
|
3858
4030
|
apiProvider: import_zod8.z.undefined()
|
|
3859
4031
|
});
|
|
@@ -3890,7 +4062,9 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
|
|
|
3890
4062
|
fireworksSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("fireworks") })),
|
|
3891
4063
|
featherlessSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("featherless") })),
|
|
3892
4064
|
ioIntelligenceSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("io-intelligence") })),
|
|
4065
|
+
qwenCodeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("qwen-code") })),
|
|
3893
4066
|
rooSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("roo") })),
|
|
4067
|
+
vercelAiGatewaySchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("vercel-ai-gateway") })),
|
|
3894
4068
|
defaultSchema
|
|
3895
4069
|
]);
|
|
3896
4070
|
var providerSettingsSchema = import_zod8.z.object({
|
|
@@ -3927,7 +4101,9 @@ var providerSettingsSchema = import_zod8.z.object({
|
|
|
3927
4101
|
...fireworksSchema.shape,
|
|
3928
4102
|
...featherlessSchema.shape,
|
|
3929
4103
|
...ioIntelligenceSchema.shape,
|
|
4104
|
+
...qwenCodeSchema.shape,
|
|
3930
4105
|
...rooSchema.shape,
|
|
4106
|
+
...vercelAiGatewaySchema.shape,
|
|
3931
4107
|
...codebaseIndexProviderSchema.shape
|
|
3932
4108
|
});
|
|
3933
4109
|
var providerSettingsWithIdSchema = providerSettingsSchema.extend({ id: import_zod8.z.string().optional() });
|
|
@@ -3947,7 +4123,8 @@ var MODEL_ID_KEYS = [
|
|
|
3947
4123
|
"requestyModelId",
|
|
3948
4124
|
"litellmModelId",
|
|
3949
4125
|
"huggingFaceModelId",
|
|
3950
|
-
"ioIntelligenceModelId"
|
|
4126
|
+
"ioIntelligenceModelId",
|
|
4127
|
+
"vercelAiGatewayModelId"
|
|
3951
4128
|
];
|
|
3952
4129
|
var getModelId = (settings) => {
|
|
3953
4130
|
const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
|
|
@@ -4027,6 +4204,7 @@ var MODELS_BY_PROVIDER = {
|
|
|
4027
4204
|
label: "OpenAI",
|
|
4028
4205
|
models: Object.keys(openAiNativeModels)
|
|
4029
4206
|
},
|
|
4207
|
+
"qwen-code": { id: "qwen-code", label: "Qwen Code", models: Object.keys(qwenCodeModels) },
|
|
4030
4208
|
roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) },
|
|
4031
4209
|
sambanova: {
|
|
4032
4210
|
id: "sambanova",
|
|
@@ -4051,7 +4229,8 @@ var MODELS_BY_PROVIDER = {
|
|
|
4051
4229
|
litellm: { id: "litellm", label: "LiteLLM", models: [] },
|
|
4052
4230
|
openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
|
|
4053
4231
|
requesty: { id: "requesty", label: "Requesty", models: [] },
|
|
4054
|
-
unbound: { id: "unbound", label: "Unbound", models: [] }
|
|
4232
|
+
unbound: { id: "unbound", label: "Unbound", models: [] },
|
|
4233
|
+
"vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
|
|
4055
4234
|
};
|
|
4056
4235
|
var dynamicProviders = [
|
|
4057
4236
|
"glama",
|
|
@@ -4059,7 +4238,8 @@ var dynamicProviders = [
|
|
|
4059
4238
|
"litellm",
|
|
4060
4239
|
"openrouter",
|
|
4061
4240
|
"requesty",
|
|
4062
|
-
"unbound"
|
|
4241
|
+
"unbound",
|
|
4242
|
+
"vercel-ai-gateway"
|
|
4063
4243
|
];
|
|
4064
4244
|
var isDynamicProvider = (key) => dynamicProviders.includes(key);
|
|
4065
4245
|
|
|
@@ -4531,7 +4711,8 @@ var SECRET_STATE_KEYS = [
|
|
|
4531
4711
|
"zaiApiKey",
|
|
4532
4712
|
"fireworksApiKey",
|
|
4533
4713
|
"featherlessApiKey",
|
|
4534
|
-
"ioIntelligenceApiKey"
|
|
4714
|
+
"ioIntelligenceApiKey",
|
|
4715
|
+
"vercelAiGatewayApiKey"
|
|
4535
4716
|
];
|
|
4536
4717
|
var isSecretStateKey = (key) => SECRET_STATE_KEYS.includes(key);
|
|
4537
4718
|
var GLOBAL_STATE_KEYS = [...GLOBAL_SETTINGS_KEYS, ...PROVIDER_SETTINGS_KEYS].filter(
|
|
@@ -4752,18 +4933,8 @@ var mcpExecutionStatusSchema = import_zod16.z.discriminatedUnion("status", [
|
|
|
4752
4933
|
]);
|
|
4753
4934
|
|
|
4754
4935
|
// src/single-file-read-models.ts
|
|
4755
|
-
var SINGLE_FILE_READ_MODELS = /* @__PURE__ */ new Set(["roo/sonic"]);
|
|
4756
4936
|
function shouldUseSingleFileRead(modelId) {
|
|
4757
|
-
|
|
4758
|
-
return true;
|
|
4759
|
-
}
|
|
4760
|
-
const patterns = Array.from(SINGLE_FILE_READ_MODELS);
|
|
4761
|
-
for (const pattern of patterns) {
|
|
4762
|
-
if (pattern.endsWith("*") && modelId.startsWith(pattern.slice(0, -1))) {
|
|
4763
|
-
return true;
|
|
4764
|
-
}
|
|
4765
|
-
}
|
|
4766
|
-
return false;
|
|
4937
|
+
return modelId.includes("grok-code-fast-1");
|
|
4767
4938
|
}
|
|
4768
4939
|
|
|
4769
4940
|
// src/task.ts
|
|
@@ -4870,10 +5041,13 @@ var commandExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
4870
5041
|
PROVIDER_SETTINGS_KEYS,
|
|
4871
5042
|
RooCodeEventName,
|
|
4872
5043
|
SECRET_STATE_KEYS,
|
|
4873
|
-
SINGLE_FILE_READ_MODELS,
|
|
4874
5044
|
TaskCommandName,
|
|
4875
5045
|
TaskStatus,
|
|
4876
5046
|
TelemetryEventName,
|
|
5047
|
+
VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE,
|
|
5048
|
+
VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
|
|
5049
|
+
VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
|
|
5050
|
+
VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
|
|
4877
5051
|
VERTEX_REGIONS,
|
|
4878
5052
|
ZAI_DEFAULT_TEMPERATURE,
|
|
4879
5053
|
ackSchema,
|
|
@@ -4991,6 +5165,8 @@ var commandExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
4991
5165
|
providerSettingsSchema,
|
|
4992
5166
|
providerSettingsSchemaDiscriminated,
|
|
4993
5167
|
providerSettingsWithIdSchema,
|
|
5168
|
+
qwenCodeDefaultModelId,
|
|
5169
|
+
qwenCodeModels,
|
|
4994
5170
|
reasoningEffortWithMinimalSchema,
|
|
4995
5171
|
reasoningEfforts,
|
|
4996
5172
|
reasoningEffortsSchema,
|
|
@@ -5028,6 +5204,8 @@ var commandExecutionStatusSchema = import_zod19.z.discriminatedUnion("status", [
|
|
|
5028
5204
|
unboundDefaultModelInfo,
|
|
5029
5205
|
verbosityLevels,
|
|
5030
5206
|
verbosityLevelsSchema,
|
|
5207
|
+
vercelAiGatewayDefaultModelId,
|
|
5208
|
+
vercelAiGatewayDefaultModelInfo,
|
|
5031
5209
|
vertexDefaultModelId,
|
|
5032
5210
|
vertexModels,
|
|
5033
5211
|
vscodeLlmDefaultModelId,
|