@roo-code/types 1.74.0 → 1.75.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -98,6 +98,7 @@ var clineMessageSchema = z.object({
98
98
  contextCondense: contextCondenseSchema.optional(),
99
99
  isProtected: z.boolean().optional(),
100
100
  apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
101
+ isAnswered: z.boolean().optional(),
101
102
  metadata: z.object({
102
103
  gpt5: z.object({
103
104
  previous_response_id: z.string().optional(),
@@ -357,6 +358,8 @@ var reasoningEffortsSchema = z5.enum(reasoningEfforts);
357
358
  var reasoningEffortWithMinimalSchema = z5.union([reasoningEffortsSchema, z5.literal("minimal")]);
358
359
  var verbosityLevels = ["low", "medium", "high"];
359
360
  var verbosityLevelsSchema = z5.enum(verbosityLevels);
361
+ var serviceTiers = ["default", "flex", "priority"];
362
+ var serviceTierSchema = z5.enum(serviceTiers);
360
363
  var modelParameters = ["max_tokens", "temperature", "reasoning", "include_reasoning"];
361
364
  var modelParametersSchema = z5.enum(modelParameters);
362
365
  var isModelParameter = (value) => modelParameters.includes(value);
@@ -384,8 +387,15 @@ var modelInfoSchema = z5.object({
384
387
  minTokensPerCachePoint: z5.number().optional(),
385
388
  maxCachePoints: z5.number().optional(),
386
389
  cachableFields: z5.array(z5.string()).optional(),
390
+ /**
391
+ * Service tiers with pricing information.
392
+ * Each tier can have a name (for OpenAI service tiers) and pricing overrides.
393
+ * The top-level input/output/cache* fields represent the default/standard tier.
394
+ */
387
395
  tiers: z5.array(
388
396
  z5.object({
397
+ name: serviceTierSchema.optional(),
398
+ // Service tier name (flex, priority, etc.)
389
399
  contextWindow: z5.number(),
390
400
  inputPrice: z5.number().optional(),
391
401
  outputPrice: z5.number().optional(),
@@ -1348,6 +1358,15 @@ var chutesModels = {
1348
1358
  outputPrice: 0.5926,
1349
1359
  description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
1350
1360
  },
1361
+ "moonshotai/Kimi-K2-Instruct-0905": {
1362
+ maxTokens: 32768,
1363
+ contextWindow: 262144,
1364
+ supportsImages: false,
1365
+ supportsPromptCache: false,
1366
+ inputPrice: 0.1999,
1367
+ outputPrice: 0.8001,
1368
+ description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
1369
+ },
1351
1370
  "Qwen/Qwen3-235B-A22B-Thinking-2507": {
1352
1371
  maxTokens: 32768,
1353
1372
  contextWindow: 262144,
@@ -1435,14 +1454,14 @@ var deepSeekModels = {
1435
1454
  contextWindow: 128e3,
1436
1455
  supportsImages: false,
1437
1456
  supportsPromptCache: true,
1438
- inputPrice: 0.27,
1439
- // $0.27 per million tokens (cache miss)
1440
- outputPrice: 1.1,
1441
- // $1.10 per million tokens
1442
- cacheWritesPrice: 0.27,
1443
- // $0.27 per million tokens (cache miss)
1457
+ inputPrice: 0.56,
1458
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1459
+ outputPrice: 1.68,
1460
+ // $1.68 per million tokens - Updated Sept 5, 2025
1461
+ cacheWritesPrice: 0.56,
1462
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1444
1463
  cacheReadsPrice: 0.07,
1445
- // $0.07 per million tokens (cache hit).
1464
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1446
1465
  description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
1447
1466
  },
1448
1467
  "deepseek-reasoner": {
@@ -1451,14 +1470,14 @@ var deepSeekModels = {
1451
1470
  contextWindow: 128e3,
1452
1471
  supportsImages: false,
1453
1472
  supportsPromptCache: true,
1454
- inputPrice: 0.55,
1455
- // $0.55 per million tokens (cache miss)
1456
- outputPrice: 2.19,
1457
- // $2.19 per million tokens
1458
- cacheWritesPrice: 0.55,
1459
- // $0.55 per million tokens (cache miss)
1460
- cacheReadsPrice: 0.14,
1461
- // $0.14 per million tokens (cache hit)
1473
+ inputPrice: 0.56,
1474
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1475
+ outputPrice: 1.68,
1476
+ // $1.68 per million tokens - Updated Sept 5, 2025
1477
+ cacheWritesPrice: 0.56,
1478
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1479
+ cacheReadsPrice: 0.07,
1480
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1462
1481
  description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
1463
1482
  }
1464
1483
  };
@@ -1568,8 +1587,18 @@ var featherlessModels = {
1568
1587
  var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
1569
1588
 
1570
1589
  // src/providers/fireworks.ts
1571
- var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct";
1590
+ var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
1572
1591
  var fireworksModels = {
1592
+ "accounts/fireworks/models/kimi-k2-instruct-0905": {
1593
+ maxTokens: 16384,
1594
+ contextWindow: 262144,
1595
+ supportsImages: false,
1596
+ supportsPromptCache: true,
1597
+ inputPrice: 0.6,
1598
+ outputPrice: 2.5,
1599
+ cacheReadsPrice: 0.15,
1600
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
1601
+ },
1573
1602
  "accounts/fireworks/models/kimi-k2-instruct": {
1574
1603
  maxTokens: 16384,
1575
1604
  contextWindow: 128e3,
@@ -1978,7 +2007,7 @@ var glamaDefaultModelInfo = {
1978
2007
  var GLAMA_DEFAULT_TEMPERATURE = 0;
1979
2008
 
1980
2009
  // src/providers/groq.ts
1981
- var groqDefaultModelId = "llama-3.3-70b-versatile";
2010
+ var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
1982
2011
  var groqModels = {
1983
2012
  // Models based on API response: https://api.groq.com/openai/v1/models
1984
2013
  "llama-3.1-8b-instant": {
@@ -2064,6 +2093,16 @@ var groqModels = {
2064
2093
  // 50% discount for cached input tokens
2065
2094
  description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2066
2095
  },
2096
+ "moonshotai/kimi-k2-instruct-0905": {
2097
+ maxTokens: 16384,
2098
+ contextWindow: 262144,
2099
+ supportsImages: false,
2100
+ supportsPromptCache: true,
2101
+ inputPrice: 0.6,
2102
+ outputPrice: 2.5,
2103
+ cacheReadsPrice: 0.15,
2104
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2105
+ },
2067
2106
  "openai/gpt-oss-120b": {
2068
2107
  maxTokens: 32766,
2069
2108
  contextWindow: 131072,
@@ -2274,7 +2313,7 @@ var mistralModels = {
2274
2313
  var MISTRAL_DEFAULT_TEMPERATURE = 0;
2275
2314
 
2276
2315
  // src/providers/moonshot.ts
2277
- var moonshotDefaultModelId = "kimi-k2-0711-preview";
2316
+ var moonshotDefaultModelId = "kimi-k2-0905-preview";
2278
2317
  var moonshotModels = {
2279
2318
  "kimi-k2-0711-preview": {
2280
2319
  maxTokens: 32e3,
@@ -2290,6 +2329,31 @@ var moonshotModels = {
2290
2329
  cacheReadsPrice: 0.15,
2291
2330
  // $0.15 per million tokens (cache hit)
2292
2331
  description: `Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters.`
2332
+ },
2333
+ "kimi-k2-0905-preview": {
2334
+ maxTokens: 16384,
2335
+ contextWindow: 262144,
2336
+ supportsImages: false,
2337
+ supportsPromptCache: true,
2338
+ inputPrice: 0.6,
2339
+ outputPrice: 2.5,
2340
+ cacheReadsPrice: 0.15,
2341
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2342
+ },
2343
+ "kimi-k2-turbo-preview": {
2344
+ maxTokens: 32e3,
2345
+ contextWindow: 262144,
2346
+ supportsImages: false,
2347
+ supportsPromptCache: true,
2348
+ inputPrice: 2.4,
2349
+ // $2.40 per million tokens (cache miss)
2350
+ outputPrice: 10,
2351
+ // $10.00 per million tokens
2352
+ cacheWritesPrice: 0,
2353
+ // $0 per million tokens (cache miss)
2354
+ cacheReadsPrice: 0.6,
2355
+ // $0.60 per million tokens (cache hit)
2356
+ description: `Kimi K2 Turbo is a high-speed version of the state-of-the-art Kimi K2 mixture-of-experts (MoE) language model, with the same 32 billion activated parameters and 1 trillion total parameters, optimized for output speeds of up to 60 tokens per second, peaking at 100 tokens per second.`
2293
2357
  }
2294
2358
  };
2295
2359
  var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
@@ -2337,7 +2401,11 @@ var openAiNativeModels = {
2337
2401
  description: "GPT-5: The best model for coding and agentic tasks across domains",
2338
2402
  // supportsVerbosity is a new capability; ensure ModelInfo includes it
2339
2403
  supportsVerbosity: true,
2340
- supportsTemperature: false
2404
+ supportsTemperature: false,
2405
+ tiers: [
2406
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
2407
+ { name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
2408
+ ]
2341
2409
  },
2342
2410
  "gpt-5-mini-2025-08-07": {
2343
2411
  maxTokens: 128e3,
@@ -2351,7 +2419,11 @@ var openAiNativeModels = {
2351
2419
  cacheReadsPrice: 0.03,
2352
2420
  description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
2353
2421
  supportsVerbosity: true,
2354
- supportsTemperature: false
2422
+ supportsTemperature: false,
2423
+ tiers: [
2424
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.125, outputPrice: 1, cacheReadsPrice: 0.0125 },
2425
+ { name: "priority", contextWindow: 4e5, inputPrice: 0.45, outputPrice: 3.6, cacheReadsPrice: 0.045 }
2426
+ ]
2355
2427
  },
2356
2428
  "gpt-5-nano-2025-08-07": {
2357
2429
  maxTokens: 128e3,
@@ -2365,7 +2437,8 @@ var openAiNativeModels = {
2365
2437
  cacheReadsPrice: 0.01,
2366
2438
  description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
2367
2439
  supportsVerbosity: true,
2368
- supportsTemperature: false
2440
+ supportsTemperature: false,
2441
+ tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }]
2369
2442
  },
2370
2443
  "gpt-4.1": {
2371
2444
  maxTokens: 32768,
@@ -2375,7 +2448,10 @@ var openAiNativeModels = {
2375
2448
  inputPrice: 2,
2376
2449
  outputPrice: 8,
2377
2450
  cacheReadsPrice: 0.5,
2378
- supportsTemperature: true
2451
+ supportsTemperature: true,
2452
+ tiers: [
2453
+ { name: "priority", contextWindow: 1047576, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2454
+ ]
2379
2455
  },
2380
2456
  "gpt-4.1-mini": {
2381
2457
  maxTokens: 32768,
@@ -2385,7 +2461,10 @@ var openAiNativeModels = {
2385
2461
  inputPrice: 0.4,
2386
2462
  outputPrice: 1.6,
2387
2463
  cacheReadsPrice: 0.1,
2388
- supportsTemperature: true
2464
+ supportsTemperature: true,
2465
+ tiers: [
2466
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.7, outputPrice: 2.8, cacheReadsPrice: 0.175 }
2467
+ ]
2389
2468
  },
2390
2469
  "gpt-4.1-nano": {
2391
2470
  maxTokens: 32768,
@@ -2395,7 +2474,10 @@ var openAiNativeModels = {
2395
2474
  inputPrice: 0.1,
2396
2475
  outputPrice: 0.4,
2397
2476
  cacheReadsPrice: 0.025,
2398
- supportsTemperature: true
2477
+ supportsTemperature: true,
2478
+ tiers: [
2479
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.2, outputPrice: 0.8, cacheReadsPrice: 0.05 }
2480
+ ]
2399
2481
  },
2400
2482
  o3: {
2401
2483
  maxTokens: 1e5,
@@ -2407,7 +2489,11 @@ var openAiNativeModels = {
2407
2489
  cacheReadsPrice: 0.5,
2408
2490
  supportsReasoningEffort: true,
2409
2491
  reasoningEffort: "medium",
2410
- supportsTemperature: false
2492
+ supportsTemperature: false,
2493
+ tiers: [
2494
+ { name: "flex", contextWindow: 2e5, inputPrice: 1, outputPrice: 4, cacheReadsPrice: 0.25 },
2495
+ { name: "priority", contextWindow: 2e5, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2496
+ ]
2411
2497
  },
2412
2498
  "o3-high": {
2413
2499
  maxTokens: 1e5,
@@ -2441,7 +2527,11 @@ var openAiNativeModels = {
2441
2527
  cacheReadsPrice: 0.275,
2442
2528
  supportsReasoningEffort: true,
2443
2529
  reasoningEffort: "medium",
2444
- supportsTemperature: false
2530
+ supportsTemperature: false,
2531
+ tiers: [
2532
+ { name: "flex", contextWindow: 2e5, inputPrice: 0.55, outputPrice: 2.2, cacheReadsPrice: 0.138 },
2533
+ { name: "priority", contextWindow: 2e5, inputPrice: 2, outputPrice: 8, cacheReadsPrice: 0.5 }
2534
+ ]
2445
2535
  },
2446
2536
  "o4-mini-high": {
2447
2537
  maxTokens: 1e5,
@@ -2537,7 +2627,10 @@ var openAiNativeModels = {
2537
2627
  inputPrice: 2.5,
2538
2628
  outputPrice: 10,
2539
2629
  cacheReadsPrice: 1.25,
2540
- supportsTemperature: true
2630
+ supportsTemperature: true,
2631
+ tiers: [
2632
+ { name: "priority", contextWindow: 128e3, inputPrice: 4.25, outputPrice: 17, cacheReadsPrice: 2.125 }
2633
+ ]
2541
2634
  },
2542
2635
  "gpt-4o-mini": {
2543
2636
  maxTokens: 16384,
@@ -2547,7 +2640,10 @@ var openAiNativeModels = {
2547
2640
  inputPrice: 0.15,
2548
2641
  outputPrice: 0.6,
2549
2642
  cacheReadsPrice: 0.075,
2550
- supportsTemperature: true
2643
+ supportsTemperature: true,
2644
+ tiers: [
2645
+ { name: "priority", contextWindow: 128e3, inputPrice: 0.25, outputPrice: 1, cacheReadsPrice: 0.125 }
2646
+ ]
2551
2647
  },
2552
2648
  "codex-mini-latest": {
2553
2649
  maxTokens: 16384,
@@ -3088,6 +3184,60 @@ var vertexModels = {
3088
3184
  inputPrice: 0.35,
3089
3185
  outputPrice: 1.15,
3090
3186
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
3187
+ },
3188
+ "deepseek-r1-0528-maas": {
3189
+ maxTokens: 32768,
3190
+ contextWindow: 163840,
3191
+ supportsImages: false,
3192
+ supportsPromptCache: false,
3193
+ inputPrice: 1.35,
3194
+ outputPrice: 5.4,
3195
+ description: "DeepSeek R1 (0528). Available in us-central1"
3196
+ },
3197
+ "deepseek-v3.1-maas": {
3198
+ maxTokens: 32768,
3199
+ contextWindow: 163840,
3200
+ supportsImages: false,
3201
+ supportsPromptCache: false,
3202
+ inputPrice: 0.6,
3203
+ outputPrice: 1.7,
3204
+ description: "DeepSeek V3.1. Available in us-west2"
3205
+ },
3206
+ "gpt-oss-120b-maas": {
3207
+ maxTokens: 32768,
3208
+ contextWindow: 131072,
3209
+ supportsImages: false,
3210
+ supportsPromptCache: false,
3211
+ inputPrice: 0.15,
3212
+ outputPrice: 0.6,
3213
+ description: "OpenAI gpt-oss 120B. Available in us-central1"
3214
+ },
3215
+ "gpt-oss-20b-maas": {
3216
+ maxTokens: 32768,
3217
+ contextWindow: 131072,
3218
+ supportsImages: false,
3219
+ supportsPromptCache: false,
3220
+ inputPrice: 0.075,
3221
+ outputPrice: 0.3,
3222
+ description: "OpenAI gpt-oss 20B. Available in us-central1"
3223
+ },
3224
+ "qwen3-coder-480b-a35b-instruct-maas": {
3225
+ maxTokens: 32768,
3226
+ contextWindow: 262144,
3227
+ supportsImages: false,
3228
+ supportsPromptCache: false,
3229
+ inputPrice: 1,
3230
+ outputPrice: 4,
3231
+ description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
3232
+ },
3233
+ "qwen3-235b-a22b-instruct-2507-maas": {
3234
+ maxTokens: 16384,
3235
+ contextWindow: 262144,
3236
+ supportsImages: false,
3237
+ supportsPromptCache: false,
3238
+ inputPrice: 0.25,
3239
+ outputPrice: 1,
3240
+ description: "Qwen3 235B A22B Instruct. Available in us-south1"
3091
3241
  }
3092
3242
  };
3093
3243
  var VERTEX_REGIONS = [
@@ -3096,6 +3246,7 @@ var VERTEX_REGIONS = [
3096
3246
  { value: "us-east1", label: "us-east1" },
3097
3247
  { value: "us-east4", label: "us-east4" },
3098
3248
  { value: "us-east5", label: "us-east5" },
3249
+ { value: "us-south1", label: "us-south1" },
3099
3250
  { value: "us-west1", label: "us-west1" },
3100
3251
  { value: "us-west2", label: "us-west2" },
3101
3252
  { value: "us-west3", label: "us-west3" },
@@ -3589,6 +3740,18 @@ var mainlandZAiModels = {
3589
3740
  };
3590
3741
  var ZAI_DEFAULT_TEMPERATURE = 0;
3591
3742
 
3743
+ // src/providers/deepinfra.ts
3744
+ var deepInfraDefaultModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo";
3745
+ var deepInfraDefaultModelInfo = {
3746
+ maxTokens: 16384,
3747
+ contextWindow: 262144,
3748
+ supportsImages: false,
3749
+ supportsPromptCache: false,
3750
+ inputPrice: 0.3,
3751
+ outputPrice: 1.2,
3752
+ description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
3753
+ };
3754
+
3592
3755
  // src/provider-settings.ts
3593
3756
  var providerNames = [
3594
3757
  "anthropic",
@@ -3607,6 +3770,7 @@ var providerNames = [
3607
3770
  "mistral",
3608
3771
  "moonshot",
3609
3772
  "deepseek",
3773
+ "deepinfra",
3610
3774
  "doubao",
3611
3775
  "qwen-code",
3612
3776
  "unbound",
@@ -3747,7 +3911,10 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
3747
3911
  });
3748
3912
  var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
3749
3913
  openAiNativeApiKey: z7.string().optional(),
3750
- openAiNativeBaseUrl: z7.string().optional()
3914
+ openAiNativeBaseUrl: z7.string().optional(),
3915
+ // OpenAI Responses API service tier for openai-native provider only.
3916
+ // UI should only expose this when the selected model supports flex/priority.
3917
+ openAiNativeServiceTier: serviceTierSchema.optional()
3751
3918
  });
3752
3919
  var mistralSchema = apiModelIdProviderModelSchema.extend({
3753
3920
  mistralApiKey: z7.string().optional(),
@@ -3757,6 +3924,11 @@ var deepSeekSchema = apiModelIdProviderModelSchema.extend({
3757
3924
  deepSeekBaseUrl: z7.string().optional(),
3758
3925
  deepSeekApiKey: z7.string().optional()
3759
3926
  });
3927
+ var deepInfraSchema = apiModelIdProviderModelSchema.extend({
3928
+ deepInfraBaseUrl: z7.string().optional(),
3929
+ deepInfraApiKey: z7.string().optional(),
3930
+ deepInfraModelId: z7.string().optional()
3931
+ });
3760
3932
  var doubaoSchema = apiModelIdProviderModelSchema.extend({
3761
3933
  doubaoBaseUrl: z7.string().optional(),
3762
3934
  doubaoApiKey: z7.string().optional()
@@ -3847,6 +4019,7 @@ var providerSettingsSchemaDiscriminated = z7.discriminatedUnion("apiProvider", [
3847
4019
  openAiNativeSchema.merge(z7.object({ apiProvider: z7.literal("openai-native") })),
3848
4020
  mistralSchema.merge(z7.object({ apiProvider: z7.literal("mistral") })),
3849
4021
  deepSeekSchema.merge(z7.object({ apiProvider: z7.literal("deepseek") })),
4022
+ deepInfraSchema.merge(z7.object({ apiProvider: z7.literal("deepinfra") })),
3850
4023
  doubaoSchema.merge(z7.object({ apiProvider: z7.literal("doubao") })),
3851
4024
  moonshotSchema.merge(z7.object({ apiProvider: z7.literal("moonshot") })),
3852
4025
  unboundSchema.merge(z7.object({ apiProvider: z7.literal("unbound") })),
@@ -3886,6 +4059,7 @@ var providerSettingsSchema = z7.object({
3886
4059
  ...openAiNativeSchema.shape,
3887
4060
  ...mistralSchema.shape,
3888
4061
  ...deepSeekSchema.shape,
4062
+ ...deepInfraSchema.shape,
3889
4063
  ...doubaoSchema.shape,
3890
4064
  ...moonshotSchema.shape,
3891
4065
  ...unboundSchema.shape,
@@ -3926,7 +4100,8 @@ var MODEL_ID_KEYS = [
3926
4100
  "litellmModelId",
3927
4101
  "huggingFaceModelId",
3928
4102
  "ioIntelligenceModelId",
3929
- "vercelAiGatewayModelId"
4103
+ "vercelAiGatewayModelId",
4104
+ "deepInfraModelId"
3930
4105
  ];
3931
4106
  var getModelId = (settings) => {
3932
4107
  const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
@@ -4035,6 +4210,7 @@ var MODELS_BY_PROVIDER = {
4035
4210
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
4036
4211
  requesty: { id: "requesty", label: "Requesty", models: [] },
4037
4212
  unbound: { id: "unbound", label: "Unbound", models: [] },
4213
+ deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
4038
4214
  "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
4039
4215
  };
4040
4216
  var dynamicProviders = [
@@ -4044,6 +4220,7 @@ var dynamicProviders = [
4044
4220
  "openrouter",
4045
4221
  "requesty",
4046
4222
  "unbound",
4223
+ "deepinfra",
4047
4224
  "vercel-ai-gateway"
4048
4225
  ];
4049
4226
  var isDynamicProvider = (key) => dynamicProviders.includes(key);
@@ -4531,6 +4708,7 @@ var SECRET_STATE_KEYS = [
4531
4708
  "groqApiKey",
4532
4709
  "chutesApiKey",
4533
4710
  "litellmApiKey",
4711
+ "deepInfraApiKey",
4534
4712
  "codeIndexOpenAiKey",
4535
4713
  "codeIndexQdrantApiKey",
4536
4714
  "codebaseIndexOpenAiCompatibleApiKey",
@@ -4724,7 +4902,8 @@ var userFeaturesSchema = z15.object({
4724
4902
  roomoteControlEnabled: z15.boolean().optional()
4725
4903
  });
4726
4904
  var userSettingsConfigSchema = z15.object({
4727
- extensionBridgeEnabled: z15.boolean().optional()
4905
+ extensionBridgeEnabled: z15.boolean().optional(),
4906
+ taskSyncEnabled: z15.boolean().optional()
4728
4907
  });
4729
4908
  var userSettingsDataSchema = z15.object({
4730
4909
  features: userFeaturesSchema,
@@ -5279,6 +5458,8 @@ export {
5279
5458
  customModePromptsSchema,
5280
5459
  customModesSettingsSchema,
5281
5460
  customSupportPromptsSchema,
5461
+ deepInfraDefaultModelId,
5462
+ deepInfraDefaultModelInfo,
5282
5463
  deepSeekDefaultModelId,
5283
5464
  deepSeekModels,
5284
5465
  discriminatedProviderSettingsWithIdSchema,
@@ -5386,6 +5567,8 @@ export {
5386
5567
  rooModels,
5387
5568
  sambaNovaDefaultModelId,
5388
5569
  sambaNovaModels,
5570
+ serviceTierSchema,
5571
+ serviceTiers,
5389
5572
  shareResponseSchema,
5390
5573
  shouldUseSingleFileRead,
5391
5574
  staticAppPropertiesSchema,