@roo-code/types 1.108.0 → 1.110.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -542,6 +542,7 @@ var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
542
542
  RooCodeEventName2["TaskModeSwitched"] = "taskModeSwitched";
543
543
  RooCodeEventName2["TaskAskResponded"] = "taskAskResponded";
544
544
  RooCodeEventName2["TaskUserMessage"] = "taskUserMessage";
545
+ RooCodeEventName2["QueuedMessagesUpdated"] = "queuedMessagesUpdated";
545
546
  RooCodeEventName2["TaskTokenUsageUpdated"] = "taskTokenUsageUpdated";
546
547
  RooCodeEventName2["TaskToolFailed"] = "taskToolFailed";
547
548
  RooCodeEventName2["ModeChanged"] = "modeChanged";
@@ -601,6 +602,7 @@ var rooCodeEventsSchema = import_zod3.z.object({
601
602
  ["taskModeSwitched" /* TaskModeSwitched */]: import_zod3.z.tuple([import_zod3.z.string(), import_zod3.z.string()]),
602
603
  ["taskAskResponded" /* TaskAskResponded */]: import_zod3.z.tuple([import_zod3.z.string()]),
603
604
  ["taskUserMessage" /* TaskUserMessage */]: import_zod3.z.tuple([import_zod3.z.string()]),
605
+ ["queuedMessagesUpdated" /* QueuedMessagesUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), import_zod3.z.array(queuedMessageSchema)]),
604
606
  ["taskToolFailed" /* TaskToolFailed */]: import_zod3.z.tuple([import_zod3.z.string(), toolNamesSchema, import_zod3.z.string()]),
605
607
  ["taskTokenUsageUpdated" /* TaskTokenUsageUpdated */]: import_zod3.z.tuple([import_zod3.z.string(), tokenUsageSchema, toolUsageSchema]),
606
608
  ["modeChanged" /* ModeChanged */]: import_zod3.z.tuple([import_zod3.z.string()]),
@@ -706,6 +708,11 @@ var taskEventSchema = import_zod3.z.discriminatedUnion("eventName", [
706
708
  payload: rooCodeEventsSchema.shape["taskAskResponded" /* TaskAskResponded */],
707
709
  taskId: import_zod3.z.number().optional()
708
710
  }),
711
+ import_zod3.z.object({
712
+ eventName: import_zod3.z.literal("queuedMessagesUpdated" /* QueuedMessagesUpdated */),
713
+ payload: rooCodeEventsSchema.shape["queuedMessagesUpdated" /* QueuedMessagesUpdated */],
714
+ taskId: import_zod3.z.number().optional()
715
+ }),
709
716
  // Task Analytics
710
717
  import_zod3.z.object({
711
718
  eventName: import_zod3.z.literal("taskToolFailed" /* TaskToolFailed */),
@@ -959,6 +966,38 @@ var anthropicModels = {
959
966
  }
960
967
  ]
961
968
  },
969
+ "claude-opus-4-6": {
970
+ maxTokens: 128e3,
971
+ // Overridden to 8k if `enableReasoningEffort` is false.
972
+ contextWindow: 2e5,
973
+ // Default 200K, extendable to 1M with beta flag
974
+ supportsImages: true,
975
+ supportsPromptCache: true,
976
+ inputPrice: 5,
977
+ // $5 per million input tokens (≤200K context)
978
+ outputPrice: 25,
979
+ // $25 per million output tokens (≤200K context)
980
+ cacheWritesPrice: 6.25,
981
+ // $6.25 per million tokens
982
+ cacheReadsPrice: 0.5,
983
+ // $0.50 per million tokens
984
+ supportsReasoningBudget: true,
985
+ // Tiered pricing for extended context (requires beta flag)
986
+ tiers: [
987
+ {
988
+ contextWindow: 1e6,
989
+ // 1M tokens with beta flag
990
+ inputPrice: 10,
991
+ // $10 per million input tokens (>200K context)
992
+ outputPrice: 37.5,
993
+ // $37.50 per million output tokens (>200K context)
994
+ cacheWritesPrice: 12.5,
995
+ // $12.50 per million tokens (>200K context)
996
+ cacheReadsPrice: 1
997
+ // $1.00 per million tokens (>200K context)
998
+ }
999
+ ]
1000
+ },
962
1001
  "claude-opus-4-5-20251101": {
963
1002
  maxTokens: 32e3,
964
1003
  // Overridden to 8k if `enableReasoningEffort` is false.
@@ -1343,6 +1382,40 @@ var bedrockModels = {
1343
1382
  maxCachePoints: 4,
1344
1383
  cachableFields: ["system", "messages", "tools"]
1345
1384
  },
1385
+ "anthropic.claude-opus-4-6-v1": {
1386
+ maxTokens: 8192,
1387
+ contextWindow: 2e5,
1388
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
1389
+ supportsImages: true,
1390
+ supportsPromptCache: true,
1391
+ supportsReasoningBudget: true,
1392
+ inputPrice: 5,
1393
+ // $5 per million input tokens (≤200K context)
1394
+ outputPrice: 25,
1395
+ // $25 per million output tokens (≤200K context)
1396
+ cacheWritesPrice: 6.25,
1397
+ // $6.25 per million tokens
1398
+ cacheReadsPrice: 0.5,
1399
+ // $0.50 per million tokens
1400
+ minTokensPerCachePoint: 1024,
1401
+ maxCachePoints: 4,
1402
+ cachableFields: ["system", "messages", "tools"],
1403
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
1404
+ tiers: [
1405
+ {
1406
+ contextWindow: 1e6,
1407
+ // 1M tokens with beta flag
1408
+ inputPrice: 10,
1409
+ // $10 per million input tokens (>200K context)
1410
+ outputPrice: 37.5,
1411
+ // $37.50 per million output tokens (>200K context)
1412
+ cacheWritesPrice: 12.5,
1413
+ // $12.50 per million tokens (>200K context)
1414
+ cacheReadsPrice: 1
1415
+ // $1.00 per million tokens (>200K context)
1416
+ }
1417
+ ]
1418
+ },
1346
1419
  "anthropic.claude-opus-4-5-20251101-v1:0": {
1347
1420
  maxTokens: 8192,
1348
1421
  contextWindow: 2e5,
@@ -1689,13 +1762,15 @@ var BEDROCK_REGIONS = [
1689
1762
  ].sort((a, b) => a.value.localeCompare(b.value));
1690
1763
  var BEDROCK_1M_CONTEXT_MODEL_IDS = [
1691
1764
  "anthropic.claude-sonnet-4-20250514-v1:0",
1692
- "anthropic.claude-sonnet-4-5-20250929-v1:0"
1765
+ "anthropic.claude-sonnet-4-5-20250929-v1:0",
1766
+ "anthropic.claude-opus-4-6-v1"
1693
1767
  ];
1694
1768
  var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
1695
1769
  "anthropic.claude-sonnet-4-20250514-v1:0",
1696
1770
  "anthropic.claude-sonnet-4-5-20250929-v1:0",
1697
1771
  "anthropic.claude-haiku-4-5-20251001-v1:0",
1698
- "anthropic.claude-opus-4-5-20251101-v1:0"
1772
+ "anthropic.claude-opus-4-5-20251101-v1:0",
1773
+ "anthropic.claude-opus-4-6-v1"
1699
1774
  ];
1700
1775
  var BEDROCK_SERVICE_TIER_MODEL_IDS = [
1701
1776
  // Amazon Nova models
@@ -2324,6 +2399,16 @@ var fireworksModels = {
2324
2399
  cacheReadsPrice: 0.15,
2325
2400
  description: "The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems."
2326
2401
  },
2402
+ "accounts/fireworks/models/kimi-k2p5": {
2403
+ maxTokens: 16384,
2404
+ contextWindow: 262144,
2405
+ supportsImages: true,
2406
+ supportsPromptCache: true,
2407
+ inputPrice: 0.6,
2408
+ outputPrice: 3,
2409
+ cacheReadsPrice: 0.1,
2410
+ description: "Kimi K2.5 is Moonshot AI's flagship agentic model and a new SOTA open model. It unifies vision and text, thinking and non-thinking modes, and single-agent and multi-agent execution into one model. Fireworks enables users to control the reasoning behavior and inspect its reasoning history for greater transparency."
2411
+ },
2327
2412
  "accounts/fireworks/models/minimax-m2": {
2328
2413
  maxTokens: 4096,
2329
2414
  contextWindow: 204800,
@@ -3515,7 +3600,7 @@ var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
3515
3600
  var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
3516
3601
 
3517
3602
  // src/providers/openai-codex.ts
3518
- var openAiCodexDefaultModelId = "gpt-5.2-codex";
3603
+ var openAiCodexDefaultModelId = "gpt-5.3-codex";
3519
3604
  var openAiCodexModels = {
3520
3605
  "gpt-5.1-codex-max": {
3521
3606
  maxTokens: 128e3,
@@ -3547,6 +3632,20 @@ var openAiCodexModels = {
3547
3632
  supportsTemperature: false,
3548
3633
  description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription"
3549
3634
  },
3635
+ "gpt-5.3-codex": {
3636
+ maxTokens: 128e3,
3637
+ contextWindow: 4e5,
3638
+ includedTools: ["apply_patch"],
3639
+ excludedTools: ["apply_diff", "write_to_file"],
3640
+ supportsImages: true,
3641
+ supportsPromptCache: true,
3642
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
3643
+ reasoningEffort: "medium",
3644
+ inputPrice: 0,
3645
+ outputPrice: 0,
3646
+ supportsTemperature: false,
3647
+ description: "GPT-5.3 Codex: OpenAI's flagship coding model via ChatGPT subscription"
3648
+ },
3550
3649
  "gpt-5.2-codex": {
3551
3650
  maxTokens: 128e3,
3552
3651
  contextWindow: 4e5,
@@ -3689,8 +3788,9 @@ var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
3689
3788
  "anthropic/claude-sonnet-4.5",
3690
3789
  "anthropic/claude-opus-4",
3691
3790
  "anthropic/claude-opus-4.1",
3692
- "anthropic/claude-haiku-4.5",
3693
3791
  "anthropic/claude-opus-4.5",
3792
+ "anthropic/claude-opus-4.6",
3793
+ "anthropic/claude-haiku-4.5",
3694
3794
  "google/gemini-2.5-flash-preview",
3695
3795
  "google/gemini-2.5-flash-preview:thinking",
3696
3796
  "google/gemini-2.5-flash-preview-05-20",
@@ -3710,9 +3810,10 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
3710
3810
  "anthropic/claude-3.7-sonnet:beta",
3711
3811
  "anthropic/claude-opus-4",
3712
3812
  "anthropic/claude-opus-4.1",
3813
+ "anthropic/claude-opus-4.5",
3814
+ "anthropic/claude-opus-4.6",
3713
3815
  "anthropic/claude-sonnet-4",
3714
3816
  "anthropic/claude-sonnet-4.5",
3715
- "anthropic/claude-opus-4.5",
3716
3817
  "anthropic/claude-haiku-4.5",
3717
3818
  "google/gemini-2.5-pro-preview",
3718
3819
  "google/gemini-2.5-pro",
@@ -4170,6 +4271,37 @@ var vertexModels = {
4170
4271
  cacheReadsPrice: 0.1,
4171
4272
  supportsReasoningBudget: true
4172
4273
  },
4274
+ "claude-opus-4-6": {
4275
+ maxTokens: 8192,
4276
+ contextWindow: 2e5,
4277
+ // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
4278
+ supportsImages: true,
4279
+ supportsPromptCache: true,
4280
+ inputPrice: 5,
4281
+ // $5 per million input tokens (≤200K context)
4282
+ outputPrice: 25,
4283
+ // $25 per million output tokens (≤200K context)
4284
+ cacheWritesPrice: 6.25,
4285
+ // $6.25 per million tokens
4286
+ cacheReadsPrice: 0.5,
4287
+ // $0.50 per million tokens
4288
+ supportsReasoningBudget: true,
4289
+ // Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
4290
+ tiers: [
4291
+ {
4292
+ contextWindow: 1e6,
4293
+ // 1M tokens with beta flag
4294
+ inputPrice: 10,
4295
+ // $10 per million input tokens (>200K context)
4296
+ outputPrice: 37.5,
4297
+ // $37.50 per million output tokens (>200K context)
4298
+ cacheWritesPrice: 12.5,
4299
+ // $12.50 per million tokens (>200K context)
4300
+ cacheReadsPrice: 1
4301
+ // $1.00 per million tokens (>200K context)
4302
+ }
4303
+ ]
4304
+ },
4173
4305
  "claude-opus-4-5@20251101": {
4174
4306
  maxTokens: 8192,
4175
4307
  contextWindow: 2e5,
@@ -4359,7 +4491,11 @@ var vertexModels = {
4359
4491
  description: "Kimi K2 Thinking Model with 256K context window."
4360
4492
  }
4361
4493
  };
4362
- var VERTEX_1M_CONTEXT_MODEL_IDS = ["claude-sonnet-4@20250514", "claude-sonnet-4-5@20250929"];
4494
+ var VERTEX_1M_CONTEXT_MODEL_IDS = [
4495
+ "claude-sonnet-4@20250514",
4496
+ "claude-sonnet-4-5@20250929",
4497
+ "claude-opus-4-6"
4498
+ ];
4363
4499
  var VERTEX_REGIONS = [
4364
4500
  { value: "global", label: "global" },
4365
4501
  { value: "us-central1", label: "us-central1" },
@@ -4607,6 +4743,8 @@ var xaiModels = {
4607
4743
  cacheWritesPrice: 0.05,
4608
4744
  cacheReadsPrice: 0.05,
4609
4745
  description: "xAI's Grok 4.1 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4746
+ supportsReasoningEffort: ["low", "high"],
4747
+ reasoningEffort: "low",
4610
4748
  includedTools: ["search_replace"],
4611
4749
  excludedTools: ["apply_diff"]
4612
4750
  },
@@ -4633,6 +4771,8 @@ var xaiModels = {
4633
4771
  cacheWritesPrice: 0.05,
4634
4772
  cacheReadsPrice: 0.05,
4635
4773
  description: "xAI's Grok 4 Fast model with 2M context window, optimized for high-performance agentic tool calling with reasoning",
4774
+ supportsReasoningEffort: ["low", "high"],
4775
+ reasoningEffort: "low",
4636
4776
  includedTools: ["search_replace"],
4637
4777
  excludedTools: ["apply_diff"]
4638
4778
  },
@@ -4702,6 +4842,8 @@ var VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
4702
4842
  "anthropic/claude-3.7-sonnet",
4703
4843
  "anthropic/claude-opus-4",
4704
4844
  "anthropic/claude-opus-4.1",
4845
+ "anthropic/claude-opus-4.5",
4846
+ "anthropic/claude-opus-4.6",
4705
4847
  "anthropic/claude-sonnet-4",
4706
4848
  "openai/gpt-4.1",
4707
4849
  "openai/gpt-4.1-mini",
@@ -4739,6 +4881,8 @@ var VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS = /* @__PURE__ */ new Set([
4739
4881
  "anthropic/claude-3.7-sonnet",
4740
4882
  "anthropic/claude-opus-4",
4741
4883
  "anthropic/claude-opus-4.1",
4884
+ "anthropic/claude-opus-4.5",
4885
+ "anthropic/claude-opus-4.6",
4742
4886
  "anthropic/claude-sonnet-4",
4743
4887
  "google/gemini-1.5-flash",
4744
4888
  "google/gemini-1.5-pro",
@@ -5380,8 +5524,6 @@ var vertexSchema = apiModelIdProviderModelSchema.extend({
5380
5524
  vertexJsonCredentials: import_zod8.z.string().optional(),
5381
5525
  vertexProjectId: import_zod8.z.string().optional(),
5382
5526
  vertexRegion: import_zod8.z.string().optional(),
5383
- enableUrlContext: import_zod8.z.boolean().optional(),
5384
- enableGrounding: import_zod8.z.boolean().optional(),
5385
5527
  vertex1MContext: import_zod8.z.boolean().optional()
5386
5528
  // Enable 'context-1m-2025-08-07' beta for 1M context window.
5387
5529
  });
@@ -5420,9 +5562,7 @@ var lmStudioSchema = baseProviderSettingsSchema.extend({
5420
5562
  });
5421
5563
  var geminiSchema = apiModelIdProviderModelSchema.extend({
5422
5564
  geminiApiKey: import_zod8.z.string().optional(),
5423
- googleGeminiBaseUrl: import_zod8.z.string().optional(),
5424
- enableUrlContext: import_zod8.z.boolean().optional(),
5425
- enableGrounding: import_zod8.z.boolean().optional()
5565
+ googleGeminiBaseUrl: import_zod8.z.string().optional()
5426
5566
  });
5427
5567
  var geminiCliSchema = apiModelIdProviderModelSchema.extend({
5428
5568
  geminiCliOAuthPath: import_zod8.z.string().optional(),
@@ -6424,7 +6564,12 @@ var globalSettingsSchema = import_zod14.z.object({
6424
6564
  * Whether to show the worktree selector in the home screen.
6425
6565
  * @default true
6426
6566
  */
6427
- showWorktreesInHomeScreen: import_zod14.z.boolean().optional()
6567
+ showWorktreesInHomeScreen: import_zod14.z.boolean().optional(),
6568
+ /**
6569
+ * List of native tool names to globally disable.
6570
+ * Tools in this list will be excluded from prompt generation and rejected at execution time.
6571
+ */
6572
+ disabledTools: import_zod14.z.array(toolNamesSchema).optional()
6428
6573
  });
6429
6574
  var GLOBAL_SETTINGS_KEYS = globalSettingsSchema.keyof().options;
6430
6575
  var rooCodeSettingsSchema = providerSettingsSchema.merge(globalSettingsSchema);
@@ -6602,7 +6747,8 @@ var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
6602
6747
  terminalCommandDelay: true,
6603
6748
  terminalShellIntegrationDisabled: true,
6604
6749
  terminalShellIntegrationTimeout: true,
6605
- terminalZshClearEolMark: true
6750
+ terminalZshClearEolMark: true,
6751
+ disabledTools: true
6606
6752
  }).merge(
6607
6753
  import_zod16.z.object({
6608
6754
  maxOpenTabsContext: import_zod16.z.number().int().nonnegative().optional(),
@@ -7082,12 +7228,10 @@ var taskCommandSchema = import_zod18.z.discriminatedUnion("commandName", [
7082
7228
  })
7083
7229
  }),
7084
7230
  import_zod18.z.object({
7085
- commandName: import_zod18.z.literal("CancelTask" /* CancelTask */),
7086
- data: import_zod18.z.string()
7231
+ commandName: import_zod18.z.literal("CancelTask" /* CancelTask */)
7087
7232
  }),
7088
7233
  import_zod18.z.object({
7089
- commandName: import_zod18.z.literal("CloseTask" /* CloseTask */),
7090
- data: import_zod18.z.string()
7234
+ commandName: import_zod18.z.literal("CloseTask" /* CloseTask */)
7091
7235
  }),
7092
7236
  import_zod18.z.object({
7093
7237
  commandName: import_zod18.z.literal("ResumeTask" /* ResumeTask */),