@roo-code/types 1.74.0 → 1.75.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -122,6 +122,8 @@ __export(index_exports, {
122
122
  customModePromptsSchema: () => customModePromptsSchema,
123
123
  customModesSettingsSchema: () => customModesSettingsSchema,
124
124
  customSupportPromptsSchema: () => customSupportPromptsSchema,
125
+ deepInfraDefaultModelId: () => deepInfraDefaultModelId,
126
+ deepInfraDefaultModelInfo: () => deepInfraDefaultModelInfo,
125
127
  deepSeekDefaultModelId: () => deepSeekDefaultModelId,
126
128
  deepSeekModels: () => deepSeekModels,
127
129
  discriminatedProviderSettingsWithIdSchema: () => discriminatedProviderSettingsWithIdSchema,
@@ -229,6 +231,8 @@ __export(index_exports, {
229
231
  rooModels: () => rooModels,
230
232
  sambaNovaDefaultModelId: () => sambaNovaDefaultModelId,
231
233
  sambaNovaModels: () => sambaNovaModels,
234
+ serviceTierSchema: () => serviceTierSchema,
235
+ serviceTiers: () => serviceTiers,
232
236
  shareResponseSchema: () => shareResponseSchema,
233
237
  shouldUseSingleFileRead: () => shouldUseSingleFileRead,
234
238
  staticAppPropertiesSchema: () => staticAppPropertiesSchema,
@@ -370,6 +374,7 @@ var clineMessageSchema = import_zod.z.object({
370
374
  contextCondense: contextCondenseSchema.optional(),
371
375
  isProtected: import_zod.z.boolean().optional(),
372
376
  apiProtocol: import_zod.z.union([import_zod.z.literal("openai"), import_zod.z.literal("anthropic")]).optional(),
377
+ isAnswered: import_zod.z.boolean().optional(),
373
378
  metadata: import_zod.z.object({
374
379
  gpt5: import_zod.z.object({
375
380
  previous_response_id: import_zod.z.string().optional(),
@@ -629,6 +634,8 @@ var reasoningEffortsSchema = import_zod5.z.enum(reasoningEfforts);
629
634
  var reasoningEffortWithMinimalSchema = import_zod5.z.union([reasoningEffortsSchema, import_zod5.z.literal("minimal")]);
630
635
  var verbosityLevels = ["low", "medium", "high"];
631
636
  var verbosityLevelsSchema = import_zod5.z.enum(verbosityLevels);
637
+ var serviceTiers = ["default", "flex", "priority"];
638
+ var serviceTierSchema = import_zod5.z.enum(serviceTiers);
632
639
  var modelParameters = ["max_tokens", "temperature", "reasoning", "include_reasoning"];
633
640
  var modelParametersSchema = import_zod5.z.enum(modelParameters);
634
641
  var isModelParameter = (value) => modelParameters.includes(value);
@@ -656,8 +663,15 @@ var modelInfoSchema = import_zod5.z.object({
656
663
  minTokensPerCachePoint: import_zod5.z.number().optional(),
657
664
  maxCachePoints: import_zod5.z.number().optional(),
658
665
  cachableFields: import_zod5.z.array(import_zod5.z.string()).optional(),
666
+ /**
667
+ * Service tiers with pricing information.
668
+ * Each tier can have a name (for OpenAI service tiers) and pricing overrides.
669
+ * The top-level input/output/cache* fields represent the default/standard tier.
670
+ */
659
671
  tiers: import_zod5.z.array(
660
672
  import_zod5.z.object({
673
+ name: serviceTierSchema.optional(),
674
+ // Service tier name (flex, priority, etc.)
661
675
  contextWindow: import_zod5.z.number(),
662
676
  inputPrice: import_zod5.z.number().optional(),
663
677
  outputPrice: import_zod5.z.number().optional(),
@@ -1620,6 +1634,15 @@ var chutesModels = {
1620
1634
  outputPrice: 0.5926,
1621
1635
  description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
1622
1636
  },
1637
+ "moonshotai/Kimi-K2-Instruct-0905": {
1638
+ maxTokens: 32768,
1639
+ contextWindow: 262144,
1640
+ supportsImages: false,
1641
+ supportsPromptCache: false,
1642
+ inputPrice: 0.1999,
1643
+ outputPrice: 0.8001,
1644
+ description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
1645
+ },
1623
1646
  "Qwen/Qwen3-235B-A22B-Thinking-2507": {
1624
1647
  maxTokens: 32768,
1625
1648
  contextWindow: 262144,
@@ -1707,14 +1730,14 @@ var deepSeekModels = {
1707
1730
  contextWindow: 128e3,
1708
1731
  supportsImages: false,
1709
1732
  supportsPromptCache: true,
1710
- inputPrice: 0.27,
1711
- // $0.27 per million tokens (cache miss)
1712
- outputPrice: 1.1,
1713
- // $1.10 per million tokens
1714
- cacheWritesPrice: 0.27,
1715
- // $0.27 per million tokens (cache miss)
1733
+ inputPrice: 0.56,
1734
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1735
+ outputPrice: 1.68,
1736
+ // $1.68 per million tokens - Updated Sept 5, 2025
1737
+ cacheWritesPrice: 0.56,
1738
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1716
1739
  cacheReadsPrice: 0.07,
1717
- // $0.07 per million tokens (cache hit).
1740
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1718
1741
  description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
1719
1742
  },
1720
1743
  "deepseek-reasoner": {
@@ -1723,14 +1746,14 @@ var deepSeekModels = {
1723
1746
  contextWindow: 128e3,
1724
1747
  supportsImages: false,
1725
1748
  supportsPromptCache: true,
1726
- inputPrice: 0.55,
1727
- // $0.55 per million tokens (cache miss)
1728
- outputPrice: 2.19,
1729
- // $2.19 per million tokens
1730
- cacheWritesPrice: 0.55,
1731
- // $0.55 per million tokens (cache miss)
1732
- cacheReadsPrice: 0.14,
1733
- // $0.14 per million tokens (cache hit)
1749
+ inputPrice: 0.56,
1750
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1751
+ outputPrice: 1.68,
1752
+ // $1.68 per million tokens - Updated Sept 5, 2025
1753
+ cacheWritesPrice: 0.56,
1754
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1755
+ cacheReadsPrice: 0.07,
1756
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1734
1757
  description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
1735
1758
  }
1736
1759
  };
@@ -1840,8 +1863,18 @@ var featherlessModels = {
1840
1863
  var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
1841
1864
 
1842
1865
  // src/providers/fireworks.ts
1843
- var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct";
1866
+ var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
1844
1867
  var fireworksModels = {
1868
+ "accounts/fireworks/models/kimi-k2-instruct-0905": {
1869
+ maxTokens: 16384,
1870
+ contextWindow: 262144,
1871
+ supportsImages: false,
1872
+ supportsPromptCache: true,
1873
+ inputPrice: 0.6,
1874
+ outputPrice: 2.5,
1875
+ cacheReadsPrice: 0.15,
1876
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
1877
+ },
1845
1878
  "accounts/fireworks/models/kimi-k2-instruct": {
1846
1879
  maxTokens: 16384,
1847
1880
  contextWindow: 128e3,
@@ -2250,7 +2283,7 @@ var glamaDefaultModelInfo = {
2250
2283
  var GLAMA_DEFAULT_TEMPERATURE = 0;
2251
2284
 
2252
2285
  // src/providers/groq.ts
2253
- var groqDefaultModelId = "llama-3.3-70b-versatile";
2286
+ var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
2254
2287
  var groqModels = {
2255
2288
  // Models based on API response: https://api.groq.com/openai/v1/models
2256
2289
  "llama-3.1-8b-instant": {
@@ -2336,6 +2369,16 @@ var groqModels = {
2336
2369
  // 50% discount for cached input tokens
2337
2370
  description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2338
2371
  },
2372
+ "moonshotai/kimi-k2-instruct-0905": {
2373
+ maxTokens: 16384,
2374
+ contextWindow: 262144,
2375
+ supportsImages: false,
2376
+ supportsPromptCache: true,
2377
+ inputPrice: 0.6,
2378
+ outputPrice: 2.5,
2379
+ cacheReadsPrice: 0.15,
2380
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2381
+ },
2339
2382
  "openai/gpt-oss-120b": {
2340
2383
  maxTokens: 32766,
2341
2384
  contextWindow: 131072,
@@ -2546,7 +2589,7 @@ var mistralModels = {
2546
2589
  var MISTRAL_DEFAULT_TEMPERATURE = 0;
2547
2590
 
2548
2591
  // src/providers/moonshot.ts
2549
- var moonshotDefaultModelId = "kimi-k2-0711-preview";
2592
+ var moonshotDefaultModelId = "kimi-k2-0905-preview";
2550
2593
  var moonshotModels = {
2551
2594
  "kimi-k2-0711-preview": {
2552
2595
  maxTokens: 32e3,
@@ -2562,6 +2605,31 @@ var moonshotModels = {
2562
2605
  cacheReadsPrice: 0.15,
2563
2606
  // $0.15 per million tokens (cache hit)
2564
2607
  description: `Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters.`
2608
+ },
2609
+ "kimi-k2-0905-preview": {
2610
+ maxTokens: 16384,
2611
+ contextWindow: 262144,
2612
+ supportsImages: false,
2613
+ supportsPromptCache: true,
2614
+ inputPrice: 0.6,
2615
+ outputPrice: 2.5,
2616
+ cacheReadsPrice: 0.15,
2617
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2618
+ },
2619
+ "kimi-k2-turbo-preview": {
2620
+ maxTokens: 32e3,
2621
+ contextWindow: 262144,
2622
+ supportsImages: false,
2623
+ supportsPromptCache: true,
2624
+ inputPrice: 2.4,
2625
+ // $2.40 per million tokens (cache miss)
2626
+ outputPrice: 10,
2627
+ // $10.00 per million tokens
2628
+ cacheWritesPrice: 0,
2629
+ // $0 per million tokens (cache miss)
2630
+ cacheReadsPrice: 0.6,
2631
+ // $0.60 per million tokens (cache hit)
2632
+ description: `Kimi K2 Turbo is a high-speed version of the state-of-the-art Kimi K2 mixture-of-experts (MoE) language model, with the same 32 billion activated parameters and 1 trillion total parameters, optimized for output speeds of up to 60 tokens per second, peaking at 100 tokens per second.`
2565
2633
  }
2566
2634
  };
2567
2635
  var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
@@ -2609,7 +2677,11 @@ var openAiNativeModels = {
2609
2677
  description: "GPT-5: The best model for coding and agentic tasks across domains",
2610
2678
  // supportsVerbosity is a new capability; ensure ModelInfo includes it
2611
2679
  supportsVerbosity: true,
2612
- supportsTemperature: false
2680
+ supportsTemperature: false,
2681
+ tiers: [
2682
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
2683
+ { name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
2684
+ ]
2613
2685
  },
2614
2686
  "gpt-5-mini-2025-08-07": {
2615
2687
  maxTokens: 128e3,
@@ -2623,7 +2695,11 @@ var openAiNativeModels = {
2623
2695
  cacheReadsPrice: 0.03,
2624
2696
  description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
2625
2697
  supportsVerbosity: true,
2626
- supportsTemperature: false
2698
+ supportsTemperature: false,
2699
+ tiers: [
2700
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.125, outputPrice: 1, cacheReadsPrice: 0.0125 },
2701
+ { name: "priority", contextWindow: 4e5, inputPrice: 0.45, outputPrice: 3.6, cacheReadsPrice: 0.045 }
2702
+ ]
2627
2703
  },
2628
2704
  "gpt-5-nano-2025-08-07": {
2629
2705
  maxTokens: 128e3,
@@ -2637,7 +2713,8 @@ var openAiNativeModels = {
2637
2713
  cacheReadsPrice: 0.01,
2638
2714
  description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
2639
2715
  supportsVerbosity: true,
2640
- supportsTemperature: false
2716
+ supportsTemperature: false,
2717
+ tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }]
2641
2718
  },
2642
2719
  "gpt-4.1": {
2643
2720
  maxTokens: 32768,
@@ -2647,7 +2724,10 @@ var openAiNativeModels = {
2647
2724
  inputPrice: 2,
2648
2725
  outputPrice: 8,
2649
2726
  cacheReadsPrice: 0.5,
2650
- supportsTemperature: true
2727
+ supportsTemperature: true,
2728
+ tiers: [
2729
+ { name: "priority", contextWindow: 1047576, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2730
+ ]
2651
2731
  },
2652
2732
  "gpt-4.1-mini": {
2653
2733
  maxTokens: 32768,
@@ -2657,7 +2737,10 @@ var openAiNativeModels = {
2657
2737
  inputPrice: 0.4,
2658
2738
  outputPrice: 1.6,
2659
2739
  cacheReadsPrice: 0.1,
2660
- supportsTemperature: true
2740
+ supportsTemperature: true,
2741
+ tiers: [
2742
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.7, outputPrice: 2.8, cacheReadsPrice: 0.175 }
2743
+ ]
2661
2744
  },
2662
2745
  "gpt-4.1-nano": {
2663
2746
  maxTokens: 32768,
@@ -2667,7 +2750,10 @@ var openAiNativeModels = {
2667
2750
  inputPrice: 0.1,
2668
2751
  outputPrice: 0.4,
2669
2752
  cacheReadsPrice: 0.025,
2670
- supportsTemperature: true
2753
+ supportsTemperature: true,
2754
+ tiers: [
2755
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.2, outputPrice: 0.8, cacheReadsPrice: 0.05 }
2756
+ ]
2671
2757
  },
2672
2758
  o3: {
2673
2759
  maxTokens: 1e5,
@@ -2679,7 +2765,11 @@ var openAiNativeModels = {
2679
2765
  cacheReadsPrice: 0.5,
2680
2766
  supportsReasoningEffort: true,
2681
2767
  reasoningEffort: "medium",
2682
- supportsTemperature: false
2768
+ supportsTemperature: false,
2769
+ tiers: [
2770
+ { name: "flex", contextWindow: 2e5, inputPrice: 1, outputPrice: 4, cacheReadsPrice: 0.25 },
2771
+ { name: "priority", contextWindow: 2e5, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2772
+ ]
2683
2773
  },
2684
2774
  "o3-high": {
2685
2775
  maxTokens: 1e5,
@@ -2713,7 +2803,11 @@ var openAiNativeModels = {
2713
2803
  cacheReadsPrice: 0.275,
2714
2804
  supportsReasoningEffort: true,
2715
2805
  reasoningEffort: "medium",
2716
- supportsTemperature: false
2806
+ supportsTemperature: false,
2807
+ tiers: [
2808
+ { name: "flex", contextWindow: 2e5, inputPrice: 0.55, outputPrice: 2.2, cacheReadsPrice: 0.138 },
2809
+ { name: "priority", contextWindow: 2e5, inputPrice: 2, outputPrice: 8, cacheReadsPrice: 0.5 }
2810
+ ]
2717
2811
  },
2718
2812
  "o4-mini-high": {
2719
2813
  maxTokens: 1e5,
@@ -2809,7 +2903,10 @@ var openAiNativeModels = {
2809
2903
  inputPrice: 2.5,
2810
2904
  outputPrice: 10,
2811
2905
  cacheReadsPrice: 1.25,
2812
- supportsTemperature: true
2906
+ supportsTemperature: true,
2907
+ tiers: [
2908
+ { name: "priority", contextWindow: 128e3, inputPrice: 4.25, outputPrice: 17, cacheReadsPrice: 2.125 }
2909
+ ]
2813
2910
  },
2814
2911
  "gpt-4o-mini": {
2815
2912
  maxTokens: 16384,
@@ -2819,7 +2916,10 @@ var openAiNativeModels = {
2819
2916
  inputPrice: 0.15,
2820
2917
  outputPrice: 0.6,
2821
2918
  cacheReadsPrice: 0.075,
2822
- supportsTemperature: true
2919
+ supportsTemperature: true,
2920
+ tiers: [
2921
+ { name: "priority", contextWindow: 128e3, inputPrice: 0.25, outputPrice: 1, cacheReadsPrice: 0.125 }
2922
+ ]
2823
2923
  },
2824
2924
  "codex-mini-latest": {
2825
2925
  maxTokens: 16384,
@@ -3360,6 +3460,60 @@ var vertexModels = {
3360
3460
  inputPrice: 0.35,
3361
3461
  outputPrice: 1.15,
3362
3462
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
3463
+ },
3464
+ "deepseek-r1-0528-maas": {
3465
+ maxTokens: 32768,
3466
+ contextWindow: 163840,
3467
+ supportsImages: false,
3468
+ supportsPromptCache: false,
3469
+ inputPrice: 1.35,
3470
+ outputPrice: 5.4,
3471
+ description: "DeepSeek R1 (0528). Available in us-central1"
3472
+ },
3473
+ "deepseek-v3.1-maas": {
3474
+ maxTokens: 32768,
3475
+ contextWindow: 163840,
3476
+ supportsImages: false,
3477
+ supportsPromptCache: false,
3478
+ inputPrice: 0.6,
3479
+ outputPrice: 1.7,
3480
+ description: "DeepSeek V3.1. Available in us-west2"
3481
+ },
3482
+ "gpt-oss-120b-maas": {
3483
+ maxTokens: 32768,
3484
+ contextWindow: 131072,
3485
+ supportsImages: false,
3486
+ supportsPromptCache: false,
3487
+ inputPrice: 0.15,
3488
+ outputPrice: 0.6,
3489
+ description: "OpenAI gpt-oss 120B. Available in us-central1"
3490
+ },
3491
+ "gpt-oss-20b-maas": {
3492
+ maxTokens: 32768,
3493
+ contextWindow: 131072,
3494
+ supportsImages: false,
3495
+ supportsPromptCache: false,
3496
+ inputPrice: 0.075,
3497
+ outputPrice: 0.3,
3498
+ description: "OpenAI gpt-oss 20B. Available in us-central1"
3499
+ },
3500
+ "qwen3-coder-480b-a35b-instruct-maas": {
3501
+ maxTokens: 32768,
3502
+ contextWindow: 262144,
3503
+ supportsImages: false,
3504
+ supportsPromptCache: false,
3505
+ inputPrice: 1,
3506
+ outputPrice: 4,
3507
+ description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
3508
+ },
3509
+ "qwen3-235b-a22b-instruct-2507-maas": {
3510
+ maxTokens: 16384,
3511
+ contextWindow: 262144,
3512
+ supportsImages: false,
3513
+ supportsPromptCache: false,
3514
+ inputPrice: 0.25,
3515
+ outputPrice: 1,
3516
+ description: "Qwen3 235B A22B Instruct. Available in us-south1"
3363
3517
  }
3364
3518
  };
3365
3519
  var VERTEX_REGIONS = [
@@ -3368,6 +3522,7 @@ var VERTEX_REGIONS = [
3368
3522
  { value: "us-east1", label: "us-east1" },
3369
3523
  { value: "us-east4", label: "us-east4" },
3370
3524
  { value: "us-east5", label: "us-east5" },
3525
+ { value: "us-south1", label: "us-south1" },
3371
3526
  { value: "us-west1", label: "us-west1" },
3372
3527
  { value: "us-west2", label: "us-west2" },
3373
3528
  { value: "us-west3", label: "us-west3" },
@@ -3861,6 +4016,18 @@ var mainlandZAiModels = {
3861
4016
  };
3862
4017
  var ZAI_DEFAULT_TEMPERATURE = 0;
3863
4018
 
4019
+ // src/providers/deepinfra.ts
4020
+ var deepInfraDefaultModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo";
4021
+ var deepInfraDefaultModelInfo = {
4022
+ maxTokens: 16384,
4023
+ contextWindow: 262144,
4024
+ supportsImages: false,
4025
+ supportsPromptCache: false,
4026
+ inputPrice: 0.3,
4027
+ outputPrice: 1.2,
4028
+ description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
4029
+ };
4030
+
3864
4031
  // src/provider-settings.ts
3865
4032
  var providerNames = [
3866
4033
  "anthropic",
@@ -3879,6 +4046,7 @@ var providerNames = [
3879
4046
  "mistral",
3880
4047
  "moonshot",
3881
4048
  "deepseek",
4049
+ "deepinfra",
3882
4050
  "doubao",
3883
4051
  "qwen-code",
3884
4052
  "unbound",
@@ -4019,7 +4187,10 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
4019
4187
  });
4020
4188
  var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
4021
4189
  openAiNativeApiKey: import_zod7.z.string().optional(),
4022
- openAiNativeBaseUrl: import_zod7.z.string().optional()
4190
+ openAiNativeBaseUrl: import_zod7.z.string().optional(),
4191
+ // OpenAI Responses API service tier for openai-native provider only.
4192
+ // UI should only expose this when the selected model supports flex/priority.
4193
+ openAiNativeServiceTier: serviceTierSchema.optional()
4023
4194
  });
4024
4195
  var mistralSchema = apiModelIdProviderModelSchema.extend({
4025
4196
  mistralApiKey: import_zod7.z.string().optional(),
@@ -4029,6 +4200,11 @@ var deepSeekSchema = apiModelIdProviderModelSchema.extend({
4029
4200
  deepSeekBaseUrl: import_zod7.z.string().optional(),
4030
4201
  deepSeekApiKey: import_zod7.z.string().optional()
4031
4202
  });
4203
+ var deepInfraSchema = apiModelIdProviderModelSchema.extend({
4204
+ deepInfraBaseUrl: import_zod7.z.string().optional(),
4205
+ deepInfraApiKey: import_zod7.z.string().optional(),
4206
+ deepInfraModelId: import_zod7.z.string().optional()
4207
+ });
4032
4208
  var doubaoSchema = apiModelIdProviderModelSchema.extend({
4033
4209
  doubaoBaseUrl: import_zod7.z.string().optional(),
4034
4210
  doubaoApiKey: import_zod7.z.string().optional()
@@ -4119,6 +4295,7 @@ var providerSettingsSchemaDiscriminated = import_zod7.z.discriminatedUnion("apiP
4119
4295
  openAiNativeSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("openai-native") })),
4120
4296
  mistralSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("mistral") })),
4121
4297
  deepSeekSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("deepseek") })),
4298
+ deepInfraSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("deepinfra") })),
4122
4299
  doubaoSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("doubao") })),
4123
4300
  moonshotSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("moonshot") })),
4124
4301
  unboundSchema.merge(import_zod7.z.object({ apiProvider: import_zod7.z.literal("unbound") })),
@@ -4158,6 +4335,7 @@ var providerSettingsSchema = import_zod7.z.object({
4158
4335
  ...openAiNativeSchema.shape,
4159
4336
  ...mistralSchema.shape,
4160
4337
  ...deepSeekSchema.shape,
4338
+ ...deepInfraSchema.shape,
4161
4339
  ...doubaoSchema.shape,
4162
4340
  ...moonshotSchema.shape,
4163
4341
  ...unboundSchema.shape,
@@ -4198,7 +4376,8 @@ var MODEL_ID_KEYS = [
4198
4376
  "litellmModelId",
4199
4377
  "huggingFaceModelId",
4200
4378
  "ioIntelligenceModelId",
4201
- "vercelAiGatewayModelId"
4379
+ "vercelAiGatewayModelId",
4380
+ "deepInfraModelId"
4202
4381
  ];
4203
4382
  var getModelId = (settings) => {
4204
4383
  const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
@@ -4307,6 +4486,7 @@ var MODELS_BY_PROVIDER = {
4307
4486
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
4308
4487
  requesty: { id: "requesty", label: "Requesty", models: [] },
4309
4488
  unbound: { id: "unbound", label: "Unbound", models: [] },
4489
+ deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
4310
4490
  "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
4311
4491
  };
4312
4492
  var dynamicProviders = [
@@ -4316,6 +4496,7 @@ var dynamicProviders = [
4316
4496
  "openrouter",
4317
4497
  "requesty",
4318
4498
  "unbound",
4499
+ "deepinfra",
4319
4500
  "vercel-ai-gateway"
4320
4501
  ];
4321
4502
  var isDynamicProvider = (key) => dynamicProviders.includes(key);
@@ -4803,6 +4984,7 @@ var SECRET_STATE_KEYS = [
4803
4984
  "groqApiKey",
4804
4985
  "chutesApiKey",
4805
4986
  "litellmApiKey",
4987
+ "deepInfraApiKey",
4806
4988
  "codeIndexOpenAiKey",
4807
4989
  "codeIndexQdrantApiKey",
4808
4990
  "codebaseIndexOpenAiCompatibleApiKey",
@@ -4996,7 +5178,8 @@ var userFeaturesSchema = import_zod15.z.object({
4996
5178
  roomoteControlEnabled: import_zod15.z.boolean().optional()
4997
5179
  });
4998
5180
  var userSettingsConfigSchema = import_zod15.z.object({
4999
- extensionBridgeEnabled: import_zod15.z.boolean().optional()
5181
+ extensionBridgeEnabled: import_zod15.z.boolean().optional(),
5182
+ taskSyncEnabled: import_zod15.z.boolean().optional()
5000
5183
  });
5001
5184
  var userSettingsDataSchema = import_zod15.z.object({
5002
5185
  features: userFeaturesSchema,
@@ -5552,6 +5735,8 @@ var commandExecutionStatusSchema = import_zod20.z.discriminatedUnion("status", [
5552
5735
  customModePromptsSchema,
5553
5736
  customModesSettingsSchema,
5554
5737
  customSupportPromptsSchema,
5738
+ deepInfraDefaultModelId,
5739
+ deepInfraDefaultModelInfo,
5555
5740
  deepSeekDefaultModelId,
5556
5741
  deepSeekModels,
5557
5742
  discriminatedProviderSettingsWithIdSchema,
@@ -5659,6 +5844,8 @@ var commandExecutionStatusSchema = import_zod20.z.discriminatedUnion("status", [
5659
5844
  rooModels,
5660
5845
  sambaNovaDefaultModelId,
5661
5846
  sambaNovaModels,
5847
+ serviceTierSchema,
5848
+ serviceTiers,
5662
5849
  shareResponseSchema,
5663
5850
  shouldUseSingleFileRead,
5664
5851
  staticAppPropertiesSchema,