@roo-code/types 1.59.0 → 1.60.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -355,13 +355,12 @@ var taskEventSchema = z4.discriminatedUnion("eventName", [
355
355
 
356
356
  // src/experiment.ts
357
357
  import { z as z5 } from "zod";
358
- var experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption", "assistantMessageParser"];
358
+ var experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption"];
359
359
  var experimentIdsSchema = z5.enum(experimentIds);
360
360
  var experimentsSchema = z5.object({
361
361
  powerSteering: z5.boolean().optional(),
362
362
  multiFileApplyDiff: z5.boolean().optional(),
363
- preventFocusDisruption: z5.boolean().optional(),
364
- assistantMessageParser: z5.boolean().optional()
363
+ preventFocusDisruption: z5.boolean().optional()
365
364
  });
366
365
 
367
366
  // src/followup.ts
@@ -1106,6 +1105,15 @@ var chutesModels = {
1106
1105
  outputPrice: 0,
1107
1106
  description: "DeepSeek V3 model."
1108
1107
  },
1108
+ "deepseek-ai/DeepSeek-V3.1": {
1109
+ maxTokens: 32768,
1110
+ contextWindow: 163840,
1111
+ supportsImages: false,
1112
+ supportsPromptCache: false,
1113
+ inputPrice: 0,
1114
+ outputPrice: 0,
1115
+ description: "DeepSeek V3.1 model."
1116
+ },
1109
1117
  "unsloth/Llama-3.3-70B-Instruct": {
1110
1118
  maxTokens: 32768,
1111
1119
  // From Groq
@@ -1398,7 +1406,8 @@ var deepSeekDefaultModelId = "deepseek-chat";
1398
1406
  var deepSeekModels = {
1399
1407
  "deepseek-chat": {
1400
1408
  maxTokens: 8192,
1401
- contextWindow: 64e3,
1409
+ // 8K max output
1410
+ contextWindow: 128e3,
1402
1411
  supportsImages: false,
1403
1412
  supportsPromptCache: true,
1404
1413
  inputPrice: 0.27,
@@ -1412,8 +1421,9 @@ var deepSeekModels = {
1412
1421
  description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
1413
1422
  },
1414
1423
  "deepseek-reasoner": {
1415
- maxTokens: 8192,
1416
- contextWindow: 64e3,
1424
+ maxTokens: 65536,
1425
+ // 64K max output for reasoning mode
1426
+ contextWindow: 128e3,
1417
1427
  supportsImages: false,
1418
1428
  supportsPromptCache: true,
1419
1429
  inputPrice: 0.55,
@@ -1424,7 +1434,7 @@ var deepSeekModels = {
1424
1434
  // $0.55 per million tokens (cache miss)
1425
1435
  cacheReadsPrice: 0.14,
1426
1436
  // $0.14 per million tokens (cache hit)
1427
- description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 32K tokens.`
1437
+ description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
1428
1438
  }
1429
1439
  };
1430
1440
  var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
@@ -1580,6 +1590,15 @@ var fireworksModels = {
1580
1590
  outputPrice: 0.9,
1581
1591
  description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
1582
1592
  },
1593
+ "accounts/fireworks/models/deepseek-v3p1": {
1594
+ maxTokens: 16384,
1595
+ contextWindow: 163840,
1596
+ supportsImages: false,
1597
+ supportsPromptCache: false,
1598
+ inputPrice: 0.56,
1599
+ outputPrice: 1.68,
1600
+ description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
1601
+ },
1583
1602
  "accounts/fireworks/models/glm-4p5": {
1584
1603
  maxTokens: 16384,
1585
1604
  contextWindow: 128e3,
@@ -2013,9 +2032,11 @@ var groqModels = {
2013
2032
  maxTokens: 16384,
2014
2033
  contextWindow: 131072,
2015
2034
  supportsImages: false,
2016
- supportsPromptCache: false,
2035
+ supportsPromptCache: true,
2017
2036
  inputPrice: 1,
2018
2037
  outputPrice: 3,
2038
+ cacheReadsPrice: 0.5,
2039
+ // 50% discount for cached input tokens
2019
2040
  description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2020
2041
  },
2021
2042
  "openai/gpt-oss-120b": {
@@ -2584,6 +2605,33 @@ var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
2584
2605
  "google/gemini-2.5-flash-preview-05-20:thinking"
2585
2606
  ]);
2586
2607
 
2608
+ // src/providers/qwen-code.ts
2609
+ var qwenCodeDefaultModelId = "qwen3-coder-plus";
2610
+ var qwenCodeModels = {
2611
+ "qwen3-coder-plus": {
2612
+ maxTokens: 65536,
2613
+ contextWindow: 1e6,
2614
+ supportsImages: false,
2615
+ supportsPromptCache: false,
2616
+ inputPrice: 0,
2617
+ outputPrice: 0,
2618
+ cacheWritesPrice: 0,
2619
+ cacheReadsPrice: 0,
2620
+ description: "Qwen3 Coder Plus - High-performance coding model with 1M context window for large codebases"
2621
+ },
2622
+ "qwen3-coder-flash": {
2623
+ maxTokens: 65536,
2624
+ contextWindow: 1e6,
2625
+ supportsImages: false,
2626
+ supportsPromptCache: false,
2627
+ inputPrice: 0,
2628
+ outputPrice: 0,
2629
+ cacheWritesPrice: 0,
2630
+ cacheReadsPrice: 0,
2631
+ description: "Qwen3 Coder Flash - Fast coding model with 1M context window optimized for speed"
2632
+ }
2633
+ };
2634
+
2587
2635
  // src/providers/requesty.ts
2588
2636
  var requestyDefaultModelId = "coding/claude-4-sonnet";
2589
2637
  var requestyDefaultModelInfo = {
@@ -3408,6 +3456,7 @@ var providerNames = [
3408
3456
  "moonshot",
3409
3457
  "deepseek",
3410
3458
  "doubao",
3459
+ "qwen-code",
3411
3460
  "unbound",
3412
3461
  "requesty",
3413
3462
  "human-relay",
@@ -3495,7 +3544,9 @@ var vertexSchema = apiModelIdProviderModelSchema.extend({
3495
3544
  vertexKeyFile: z8.string().optional(),
3496
3545
  vertexJsonCredentials: z8.string().optional(),
3497
3546
  vertexProjectId: z8.string().optional(),
3498
- vertexRegion: z8.string().optional()
3547
+ vertexRegion: z8.string().optional(),
3548
+ enableUrlContext: z8.boolean().optional(),
3549
+ enableGrounding: z8.boolean().optional()
3499
3550
  });
3500
3551
  var openAiSchema = baseProviderSettingsSchema.extend({
3501
3552
  openAiBaseUrl: z8.string().optional(),
@@ -3612,6 +3663,9 @@ var ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({
3612
3663
  ioIntelligenceModelId: z8.string().optional(),
3613
3664
  ioIntelligenceApiKey: z8.string().optional()
3614
3665
  });
3666
+ var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
3667
+ qwenCodeOauthPath: z8.string().optional()
3668
+ });
3615
3669
  var rooSchema = apiModelIdProviderModelSchema.extend({
3616
3670
  // No additional fields needed - uses cloud authentication
3617
3671
  });
@@ -3651,6 +3705,7 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
3651
3705
  fireworksSchema.merge(z8.object({ apiProvider: z8.literal("fireworks") })),
3652
3706
  featherlessSchema.merge(z8.object({ apiProvider: z8.literal("featherless") })),
3653
3707
  ioIntelligenceSchema.merge(z8.object({ apiProvider: z8.literal("io-intelligence") })),
3708
+ qwenCodeSchema.merge(z8.object({ apiProvider: z8.literal("qwen-code") })),
3654
3709
  rooSchema.merge(z8.object({ apiProvider: z8.literal("roo") })),
3655
3710
  defaultSchema
3656
3711
  ]);
@@ -3688,6 +3743,7 @@ var providerSettingsSchema = z8.object({
3688
3743
  ...fireworksSchema.shape,
3689
3744
  ...featherlessSchema.shape,
3690
3745
  ...ioIntelligenceSchema.shape,
3746
+ ...qwenCodeSchema.shape,
3691
3747
  ...rooSchema.shape,
3692
3748
  ...codebaseIndexProviderSchema.shape
3693
3749
  });
@@ -3788,6 +3844,7 @@ var MODELS_BY_PROVIDER = {
3788
3844
  label: "OpenAI",
3789
3845
  models: Object.keys(openAiNativeModels)
3790
3846
  },
3847
+ "qwen-code": { id: "qwen-code", label: "Qwen Code", models: Object.keys(qwenCodeModels) },
3791
3848
  roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) },
3792
3849
  sambanova: {
3793
3850
  id: "sambanova",
@@ -4751,6 +4808,8 @@ export {
4751
4808
  providerSettingsSchema,
4752
4809
  providerSettingsSchemaDiscriminated,
4753
4810
  providerSettingsWithIdSchema,
4811
+ qwenCodeDefaultModelId,
4812
+ qwenCodeModels,
4754
4813
  reasoningEffortWithMinimalSchema,
4755
4814
  reasoningEfforts,
4756
4815
  reasoningEffortsSchema,