@roo-code/types 1.60.0 → 1.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2648,16 +2648,16 @@ var requestyDefaultModelInfo = {
2648
2648
  };
2649
2649
 
2650
2650
  // src/providers/roo.ts
2651
- var rooDefaultModelId = "roo/sonic";
2651
+ var rooDefaultModelId = "xai/grok-code-fast-1";
2652
2652
  var rooModels = {
2653
- "roo/sonic": {
2653
+ "xai/grok-code-fast-1": {
2654
2654
  maxTokens: 16384,
2655
2655
  contextWindow: 262144,
2656
2656
  supportsImages: false,
2657
2657
  supportsPromptCache: true,
2658
2658
  inputPrice: 0,
2659
2659
  outputPrice: 0,
2660
- description: "A stealth reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: prompts and completions are logged by the model creator and used to improve the model.)"
2660
+ description: "A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)"
2661
2661
  }
2662
2662
  };
2663
2663
 
@@ -3265,8 +3265,19 @@ var vscodeLlmModels = {
3265
3265
  };
3266
3266
 
3267
3267
  // src/providers/xai.ts
3268
- var xaiDefaultModelId = "grok-4";
3268
+ var xaiDefaultModelId = "grok-code-fast-1";
3269
3269
  var xaiModels = {
3270
+ "grok-code-fast-1": {
3271
+ maxTokens: 16384,
3272
+ contextWindow: 262144,
3273
+ supportsImages: false,
3274
+ supportsPromptCache: true,
3275
+ inputPrice: 0.2,
3276
+ outputPrice: 1.5,
3277
+ cacheWritesPrice: 0.02,
3278
+ cacheReadsPrice: 0.02,
3279
+ description: "xAI's Grok Code Fast model with 256K context window"
3280
+ },
3270
3281
  "grok-4": {
3271
3282
  maxTokens: 8192,
3272
3283
  contextWindow: 256e3,
@@ -3344,6 +3355,101 @@ var xaiModels = {
3344
3355
  }
3345
3356
  };
3346
3357
 
3358
+ // src/providers/vercel-ai-gateway.ts
3359
+ var vercelAiGatewayDefaultModelId = "anthropic/claude-sonnet-4";
3360
+ var VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
3361
+ "anthropic/claude-3-haiku",
3362
+ "anthropic/claude-3-opus",
3363
+ "anthropic/claude-3.5-haiku",
3364
+ "anthropic/claude-3.5-sonnet",
3365
+ "anthropic/claude-3.7-sonnet",
3366
+ "anthropic/claude-opus-4",
3367
+ "anthropic/claude-opus-4.1",
3368
+ "anthropic/claude-sonnet-4",
3369
+ "openai/gpt-4.1",
3370
+ "openai/gpt-4.1-mini",
3371
+ "openai/gpt-4.1-nano",
3372
+ "openai/gpt-4o",
3373
+ "openai/gpt-4o-mini",
3374
+ "openai/gpt-5",
3375
+ "openai/gpt-5-mini",
3376
+ "openai/gpt-5-nano",
3377
+ "openai/o1",
3378
+ "openai/o3",
3379
+ "openai/o3-mini",
3380
+ "openai/o4-mini"
3381
+ ]);
3382
+ var VERCEL_AI_GATEWAY_VISION_ONLY_MODELS = /* @__PURE__ */ new Set([
3383
+ "alibaba/qwen-3-14b",
3384
+ "alibaba/qwen-3-235b",
3385
+ "alibaba/qwen-3-30b",
3386
+ "alibaba/qwen-3-32b",
3387
+ "alibaba/qwen3-coder",
3388
+ "amazon/nova-pro",
3389
+ "anthropic/claude-3.5-haiku",
3390
+ "google/gemini-1.5-flash-8b",
3391
+ "google/gemini-2.0-flash-thinking",
3392
+ "google/gemma-3-27b",
3393
+ "mistral/devstral-small",
3394
+ "xai/grok-vision-beta"
3395
+ ]);
3396
+ var VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS = /* @__PURE__ */ new Set([
3397
+ "amazon/nova-lite",
3398
+ "anthropic/claude-3-haiku",
3399
+ "anthropic/claude-3-opus",
3400
+ "anthropic/claude-3-sonnet",
3401
+ "anthropic/claude-3.5-sonnet",
3402
+ "anthropic/claude-3.7-sonnet",
3403
+ "anthropic/claude-opus-4",
3404
+ "anthropic/claude-opus-4.1",
3405
+ "anthropic/claude-sonnet-4",
3406
+ "google/gemini-1.5-flash",
3407
+ "google/gemini-1.5-pro",
3408
+ "google/gemini-2.0-flash",
3409
+ "google/gemini-2.0-flash-lite",
3410
+ "google/gemini-2.0-pro",
3411
+ "google/gemini-2.5-flash",
3412
+ "google/gemini-2.5-flash-lite",
3413
+ "google/gemini-2.5-pro",
3414
+ "google/gemini-exp",
3415
+ "meta/llama-3.2-11b",
3416
+ "meta/llama-3.2-90b",
3417
+ "meta/llama-3.3",
3418
+ "meta/llama-4-maverick",
3419
+ "meta/llama-4-scout",
3420
+ "mistral/pixtral-12b",
3421
+ "mistral/pixtral-large",
3422
+ "moonshotai/kimi-k2",
3423
+ "openai/gpt-4-turbo",
3424
+ "openai/gpt-4.1",
3425
+ "openai/gpt-4.1-mini",
3426
+ "openai/gpt-4.1-nano",
3427
+ "openai/gpt-4.5-preview",
3428
+ "openai/gpt-4o",
3429
+ "openai/gpt-4o-mini",
3430
+ "openai/gpt-oss-120b",
3431
+ "openai/gpt-oss-20b",
3432
+ "openai/o3",
3433
+ "openai/o3-pro",
3434
+ "openai/o4-mini",
3435
+ "vercel/v0-1.0-md",
3436
+ "xai/grok-2-vision",
3437
+ "zai/glm-4.5v"
3438
+ ]);
3439
+ var vercelAiGatewayDefaultModelInfo = {
3440
+ maxTokens: 64e3,
3441
+ contextWindow: 2e5,
3442
+ supportsImages: true,
3443
+ supportsComputerUse: true,
3444
+ supportsPromptCache: true,
3445
+ inputPrice: 3,
3446
+ outputPrice: 15,
3447
+ cacheWritesPrice: 3.75,
3448
+ cacheReadsPrice: 0.3,
3449
+ description: "Claude Sonnet 4 significantly improves on Sonnet 3.7's industry-leading capabilities, excelling in coding with a state-of-the-art 72.7% on SWE-bench. The model balances performance and efficiency for internal and external use cases, with enhanced steerability for greater control over implementations. While not matching Opus 4 in most domains, it delivers an optimal mix of capability and practicality."
3450
+ };
3451
+ var VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7;
3452
+
3347
3453
  // src/providers/zai.ts
3348
3454
  var internationalZAiDefaultModelId = "glm-4.5";
3349
3455
  var internationalZAiModels = {
@@ -3472,7 +3578,8 @@ var providerNames = [
3472
3578
  "fireworks",
3473
3579
  "featherless",
3474
3580
  "io-intelligence",
3475
- "roo"
3581
+ "roo",
3582
+ "vercel-ai-gateway"
3476
3583
  ];
3477
3584
  var providerNamesSchema = z8.enum(providerNames);
3478
3585
  var providerSettingsEntrySchema = z8.object({
@@ -3669,6 +3776,10 @@ var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
3669
3776
  var rooSchema = apiModelIdProviderModelSchema.extend({
3670
3777
  // No additional fields needed - uses cloud authentication
3671
3778
  });
3779
+ var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
3780
+ vercelAiGatewayApiKey: z8.string().optional(),
3781
+ vercelAiGatewayModelId: z8.string().optional()
3782
+ });
3672
3783
  var defaultSchema = z8.object({
3673
3784
  apiProvider: z8.undefined()
3674
3785
  });
@@ -3707,6 +3818,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
3707
3818
  ioIntelligenceSchema.merge(z8.object({ apiProvider: z8.literal("io-intelligence") })),
3708
3819
  qwenCodeSchema.merge(z8.object({ apiProvider: z8.literal("qwen-code") })),
3709
3820
  rooSchema.merge(z8.object({ apiProvider: z8.literal("roo") })),
3821
+ vercelAiGatewaySchema.merge(z8.object({ apiProvider: z8.literal("vercel-ai-gateway") })),
3710
3822
  defaultSchema
3711
3823
  ]);
3712
3824
  var providerSettingsSchema = z8.object({
@@ -3745,6 +3857,7 @@ var providerSettingsSchema = z8.object({
3745
3857
  ...ioIntelligenceSchema.shape,
3746
3858
  ...qwenCodeSchema.shape,
3747
3859
  ...rooSchema.shape,
3860
+ ...vercelAiGatewaySchema.shape,
3748
3861
  ...codebaseIndexProviderSchema.shape
3749
3862
  });
3750
3863
  var providerSettingsWithIdSchema = providerSettingsSchema.extend({ id: z8.string().optional() });
@@ -3764,7 +3877,8 @@ var MODEL_ID_KEYS = [
3764
3877
  "requestyModelId",
3765
3878
  "litellmModelId",
3766
3879
  "huggingFaceModelId",
3767
- "ioIntelligenceModelId"
3880
+ "ioIntelligenceModelId",
3881
+ "vercelAiGatewayModelId"
3768
3882
  ];
3769
3883
  var getModelId = (settings) => {
3770
3884
  const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
@@ -3869,7 +3983,8 @@ var MODELS_BY_PROVIDER = {
3869
3983
  litellm: { id: "litellm", label: "LiteLLM", models: [] },
3870
3984
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
3871
3985
  requesty: { id: "requesty", label: "Requesty", models: [] },
3872
- unbound: { id: "unbound", label: "Unbound", models: [] }
3986
+ unbound: { id: "unbound", label: "Unbound", models: [] },
3987
+ "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
3873
3988
  };
3874
3989
  var dynamicProviders = [
3875
3990
  "glama",
@@ -3877,7 +3992,8 @@ var dynamicProviders = [
3877
3992
  "litellm",
3878
3993
  "openrouter",
3879
3994
  "requesty",
3880
- "unbound"
3995
+ "unbound",
3996
+ "vercel-ai-gateway"
3881
3997
  ];
3882
3998
  var isDynamicProvider = (key) => dynamicProviders.includes(key);
3883
3999
 
@@ -4349,7 +4465,8 @@ var SECRET_STATE_KEYS = [
4349
4465
  "zaiApiKey",
4350
4466
  "fireworksApiKey",
4351
4467
  "featherlessApiKey",
4352
- "ioIntelligenceApiKey"
4468
+ "ioIntelligenceApiKey",
4469
+ "vercelAiGatewayApiKey"
4353
4470
  ];
4354
4471
  var isSecretStateKey = (key) => SECRET_STATE_KEYS.includes(key);
4355
4472
  var GLOBAL_STATE_KEYS = [...GLOBAL_SETTINGS_KEYS, ...PROVIDER_SETTINGS_KEYS].filter(
@@ -4570,18 +4687,8 @@ var mcpExecutionStatusSchema = z16.discriminatedUnion("status", [
4570
4687
  ]);
4571
4688
 
4572
4689
  // src/single-file-read-models.ts
4573
- var SINGLE_FILE_READ_MODELS = /* @__PURE__ */ new Set(["roo/sonic"]);
4574
4690
  function shouldUseSingleFileRead(modelId) {
4575
- if (SINGLE_FILE_READ_MODELS.has(modelId)) {
4576
- return true;
4577
- }
4578
- const patterns = Array.from(SINGLE_FILE_READ_MODELS);
4579
- for (const pattern of patterns) {
4580
- if (pattern.endsWith("*") && modelId.startsWith(pattern.slice(0, -1))) {
4581
- return true;
4582
- }
4583
- }
4584
- return false;
4691
+ return modelId.includes("grok-code-fast-1");
4585
4692
  }
4586
4693
 
4587
4694
  // src/task.ts
@@ -4687,10 +4794,13 @@ export {
4687
4794
  PROVIDER_SETTINGS_KEYS,
4688
4795
  RooCodeEventName,
4689
4796
  SECRET_STATE_KEYS,
4690
- SINGLE_FILE_READ_MODELS,
4691
4797
  TaskCommandName,
4692
4798
  TaskStatus,
4693
4799
  TelemetryEventName,
4800
+ VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE,
4801
+ VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS,
4802
+ VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS,
4803
+ VERCEL_AI_GATEWAY_VISION_ONLY_MODELS,
4694
4804
  VERTEX_REGIONS,
4695
4805
  ZAI_DEFAULT_TEMPERATURE,
4696
4806
  ackSchema,
@@ -4847,6 +4957,8 @@ export {
4847
4957
  unboundDefaultModelInfo,
4848
4958
  verbosityLevels,
4849
4959
  verbosityLevelsSchema,
4960
+ vercelAiGatewayDefaultModelId,
4961
+ vercelAiGatewayDefaultModelInfo,
4850
4962
  vertexDefaultModelId,
4851
4963
  vertexModels,
4852
4964
  vscodeLlmDefaultModelId,