@roo-code/types 1.74.0 → 1.76.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -98,6 +98,7 @@ var clineMessageSchema = z.object({
98
98
  contextCondense: contextCondenseSchema.optional(),
99
99
  isProtected: z.boolean().optional(),
100
100
  apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
101
+ isAnswered: z.boolean().optional(),
101
102
  metadata: z.object({
102
103
  gpt5: z.object({
103
104
  previous_response_id: z.string().optional(),
@@ -357,6 +358,8 @@ var reasoningEffortsSchema = z5.enum(reasoningEfforts);
357
358
  var reasoningEffortWithMinimalSchema = z5.union([reasoningEffortsSchema, z5.literal("minimal")]);
358
359
  var verbosityLevels = ["low", "medium", "high"];
359
360
  var verbosityLevelsSchema = z5.enum(verbosityLevels);
361
+ var serviceTiers = ["default", "flex", "priority"];
362
+ var serviceTierSchema = z5.enum(serviceTiers);
360
363
  var modelParameters = ["max_tokens", "temperature", "reasoning", "include_reasoning"];
361
364
  var modelParametersSchema = z5.enum(modelParameters);
362
365
  var isModelParameter = (value) => modelParameters.includes(value);
@@ -384,8 +387,15 @@ var modelInfoSchema = z5.object({
384
387
  minTokensPerCachePoint: z5.number().optional(),
385
388
  maxCachePoints: z5.number().optional(),
386
389
  cachableFields: z5.array(z5.string()).optional(),
390
+ /**
391
+ * Service tiers with pricing information.
392
+ * Each tier can have a name (for OpenAI service tiers) and pricing overrides.
393
+ * The top-level input/output/cache* fields represent the default/standard tier.
394
+ */
387
395
  tiers: z5.array(
388
396
  z5.object({
397
+ name: serviceTierSchema.optional(),
398
+ // Service tier name (flex, priority, etc.)
389
399
  contextWindow: z5.number(),
390
400
  inputPrice: z5.number().optional(),
391
401
  outputPrice: z5.number().optional(),
@@ -1348,6 +1358,15 @@ var chutesModels = {
1348
1358
  outputPrice: 0.5926,
1349
1359
  description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
1350
1360
  },
1361
+ "moonshotai/Kimi-K2-Instruct-0905": {
1362
+ maxTokens: 32768,
1363
+ contextWindow: 262144,
1364
+ supportsImages: false,
1365
+ supportsPromptCache: false,
1366
+ inputPrice: 0.1999,
1367
+ outputPrice: 0.8001,
1368
+ description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
1369
+ },
1351
1370
  "Qwen/Qwen3-235B-A22B-Thinking-2507": {
1352
1371
  maxTokens: 32768,
1353
1372
  contextWindow: 262144,
@@ -1356,6 +1375,24 @@ var chutesModels = {
1356
1375
  inputPrice: 0.077968332,
1357
1376
  outputPrice: 0.31202496,
1358
1377
  description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
1378
+ },
1379
+ "Qwen/Qwen3-Next-80B-A3B-Instruct": {
1380
+ maxTokens: 32768,
1381
+ contextWindow: 131072,
1382
+ supportsImages: false,
1383
+ supportsPromptCache: false,
1384
+ inputPrice: 0,
1385
+ outputPrice: 0,
1386
+ description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
1387
+ },
1388
+ "Qwen/Qwen3-Next-80B-A3B-Thinking": {
1389
+ maxTokens: 32768,
1390
+ contextWindow: 131072,
1391
+ supportsImages: false,
1392
+ supportsPromptCache: false,
1393
+ inputPrice: 0,
1394
+ outputPrice: 0,
1395
+ description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
1359
1396
  }
1360
1397
  };
1361
1398
 
@@ -1435,14 +1472,14 @@ var deepSeekModels = {
1435
1472
  contextWindow: 128e3,
1436
1473
  supportsImages: false,
1437
1474
  supportsPromptCache: true,
1438
- inputPrice: 0.27,
1439
- // $0.27 per million tokens (cache miss)
1440
- outputPrice: 1.1,
1441
- // $1.10 per million tokens
1442
- cacheWritesPrice: 0.27,
1443
- // $0.27 per million tokens (cache miss)
1475
+ inputPrice: 0.56,
1476
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1477
+ outputPrice: 1.68,
1478
+ // $1.68 per million tokens - Updated Sept 5, 2025
1479
+ cacheWritesPrice: 0.56,
1480
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1444
1481
  cacheReadsPrice: 0.07,
1445
- // $0.07 per million tokens (cache hit).
1482
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1446
1483
  description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
1447
1484
  },
1448
1485
  "deepseek-reasoner": {
@@ -1451,14 +1488,14 @@ var deepSeekModels = {
1451
1488
  contextWindow: 128e3,
1452
1489
  supportsImages: false,
1453
1490
  supportsPromptCache: true,
1454
- inputPrice: 0.55,
1455
- // $0.55 per million tokens (cache miss)
1456
- outputPrice: 2.19,
1457
- // $2.19 per million tokens
1458
- cacheWritesPrice: 0.55,
1459
- // $0.55 per million tokens (cache miss)
1460
- cacheReadsPrice: 0.14,
1461
- // $0.14 per million tokens (cache hit)
1491
+ inputPrice: 0.56,
1492
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1493
+ outputPrice: 1.68,
1494
+ // $1.68 per million tokens - Updated Sept 5, 2025
1495
+ cacheWritesPrice: 0.56,
1496
+ // $0.56 per million tokens (cache miss) - Updated Sept 5, 2025
1497
+ cacheReadsPrice: 0.07,
1498
+ // $0.07 per million tokens (cache hit) - Updated Sept 5, 2025
1462
1499
  description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`
1463
1500
  }
1464
1501
  };
@@ -1568,8 +1605,18 @@ var featherlessModels = {
1568
1605
  var featherlessDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
1569
1606
 
1570
1607
  // src/providers/fireworks.ts
1571
- var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct";
1608
+ var fireworksDefaultModelId = "accounts/fireworks/models/kimi-k2-instruct-0905";
1572
1609
  var fireworksModels = {
1610
+ "accounts/fireworks/models/kimi-k2-instruct-0905": {
1611
+ maxTokens: 16384,
1612
+ contextWindow: 262144,
1613
+ supportsImages: false,
1614
+ supportsPromptCache: true,
1615
+ inputPrice: 0.6,
1616
+ outputPrice: 2.5,
1617
+ cacheReadsPrice: 0.15,
1618
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
1619
+ },
1573
1620
  "accounts/fireworks/models/kimi-k2-instruct": {
1574
1621
  maxTokens: 16384,
1575
1622
  contextWindow: 128e3,
@@ -1978,7 +2025,7 @@ var glamaDefaultModelInfo = {
1978
2025
  var GLAMA_DEFAULT_TEMPERATURE = 0;
1979
2026
 
1980
2027
  // src/providers/groq.ts
1981
- var groqDefaultModelId = "llama-3.3-70b-versatile";
2028
+ var groqDefaultModelId = "moonshotai/kimi-k2-instruct-0905";
1982
2029
  var groqModels = {
1983
2030
  // Models based on API response: https://api.groq.com/openai/v1/models
1984
2031
  "llama-3.1-8b-instant": {
@@ -2064,6 +2111,16 @@ var groqModels = {
2064
2111
  // 50% discount for cached input tokens
2065
2112
  description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context."
2066
2113
  },
2114
+ "moonshotai/kimi-k2-instruct-0905": {
2115
+ maxTokens: 16384,
2116
+ contextWindow: 262144,
2117
+ supportsImages: false,
2118
+ supportsPromptCache: true,
2119
+ inputPrice: 0.6,
2120
+ outputPrice: 2.5,
2121
+ cacheReadsPrice: 0.15,
2122
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2123
+ },
2067
2124
  "openai/gpt-oss-120b": {
2068
2125
  maxTokens: 32766,
2069
2126
  contextWindow: 131072,
@@ -2274,7 +2331,7 @@ var mistralModels = {
2274
2331
  var MISTRAL_DEFAULT_TEMPERATURE = 0;
2275
2332
 
2276
2333
  // src/providers/moonshot.ts
2277
- var moonshotDefaultModelId = "kimi-k2-0711-preview";
2334
+ var moonshotDefaultModelId = "kimi-k2-0905-preview";
2278
2335
  var moonshotModels = {
2279
2336
  "kimi-k2-0711-preview": {
2280
2337
  maxTokens: 32e3,
@@ -2290,6 +2347,31 @@ var moonshotModels = {
2290
2347
  cacheReadsPrice: 0.15,
2291
2348
  // $0.15 per million tokens (cache hit)
2292
2349
  description: `Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters.`
2350
+ },
2351
+ "kimi-k2-0905-preview": {
2352
+ maxTokens: 16384,
2353
+ contextWindow: 262144,
2354
+ supportsImages: false,
2355
+ supportsPromptCache: true,
2356
+ inputPrice: 0.6,
2357
+ outputPrice: 2.5,
2358
+ cacheReadsPrice: 0.15,
2359
+ description: "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support."
2360
+ },
2361
+ "kimi-k2-turbo-preview": {
2362
+ maxTokens: 32e3,
2363
+ contextWindow: 262144,
2364
+ supportsImages: false,
2365
+ supportsPromptCache: true,
2366
+ inputPrice: 2.4,
2367
+ // $2.40 per million tokens (cache miss)
2368
+ outputPrice: 10,
2369
+ // $10.00 per million tokens
2370
+ cacheWritesPrice: 0,
2371
+ // $0 per million tokens (cache miss)
2372
+ cacheReadsPrice: 0.6,
2373
+ // $0.60 per million tokens (cache hit)
2374
+ description: `Kimi K2 Turbo is a high-speed version of the state-of-the-art Kimi K2 mixture-of-experts (MoE) language model, with the same 32 billion activated parameters and 1 trillion total parameters, optimized for output speeds of up to 60 tokens per second, peaking at 100 tokens per second.`
2293
2375
  }
2294
2376
  };
2295
2377
  var MOONSHOT_DEFAULT_TEMPERATURE = 0.6;
@@ -2337,7 +2419,11 @@ var openAiNativeModels = {
2337
2419
  description: "GPT-5: The best model for coding and agentic tasks across domains",
2338
2420
  // supportsVerbosity is a new capability; ensure ModelInfo includes it
2339
2421
  supportsVerbosity: true,
2340
- supportsTemperature: false
2422
+ supportsTemperature: false,
2423
+ tiers: [
2424
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
2425
+ { name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
2426
+ ]
2341
2427
  },
2342
2428
  "gpt-5-mini-2025-08-07": {
2343
2429
  maxTokens: 128e3,
@@ -2351,7 +2437,11 @@ var openAiNativeModels = {
2351
2437
  cacheReadsPrice: 0.03,
2352
2438
  description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
2353
2439
  supportsVerbosity: true,
2354
- supportsTemperature: false
2440
+ supportsTemperature: false,
2441
+ tiers: [
2442
+ { name: "flex", contextWindow: 4e5, inputPrice: 0.125, outputPrice: 1, cacheReadsPrice: 0.0125 },
2443
+ { name: "priority", contextWindow: 4e5, inputPrice: 0.45, outputPrice: 3.6, cacheReadsPrice: 0.045 }
2444
+ ]
2355
2445
  },
2356
2446
  "gpt-5-nano-2025-08-07": {
2357
2447
  maxTokens: 128e3,
@@ -2365,7 +2455,8 @@ var openAiNativeModels = {
2365
2455
  cacheReadsPrice: 0.01,
2366
2456
  description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
2367
2457
  supportsVerbosity: true,
2368
- supportsTemperature: false
2458
+ supportsTemperature: false,
2459
+ tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }]
2369
2460
  },
2370
2461
  "gpt-4.1": {
2371
2462
  maxTokens: 32768,
@@ -2375,7 +2466,10 @@ var openAiNativeModels = {
2375
2466
  inputPrice: 2,
2376
2467
  outputPrice: 8,
2377
2468
  cacheReadsPrice: 0.5,
2378
- supportsTemperature: true
2469
+ supportsTemperature: true,
2470
+ tiers: [
2471
+ { name: "priority", contextWindow: 1047576, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2472
+ ]
2379
2473
  },
2380
2474
  "gpt-4.1-mini": {
2381
2475
  maxTokens: 32768,
@@ -2385,7 +2479,10 @@ var openAiNativeModels = {
2385
2479
  inputPrice: 0.4,
2386
2480
  outputPrice: 1.6,
2387
2481
  cacheReadsPrice: 0.1,
2388
- supportsTemperature: true
2482
+ supportsTemperature: true,
2483
+ tiers: [
2484
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.7, outputPrice: 2.8, cacheReadsPrice: 0.175 }
2485
+ ]
2389
2486
  },
2390
2487
  "gpt-4.1-nano": {
2391
2488
  maxTokens: 32768,
@@ -2395,7 +2492,10 @@ var openAiNativeModels = {
2395
2492
  inputPrice: 0.1,
2396
2493
  outputPrice: 0.4,
2397
2494
  cacheReadsPrice: 0.025,
2398
- supportsTemperature: true
2495
+ supportsTemperature: true,
2496
+ tiers: [
2497
+ { name: "priority", contextWindow: 1047576, inputPrice: 0.2, outputPrice: 0.8, cacheReadsPrice: 0.05 }
2498
+ ]
2399
2499
  },
2400
2500
  o3: {
2401
2501
  maxTokens: 1e5,
@@ -2407,7 +2507,11 @@ var openAiNativeModels = {
2407
2507
  cacheReadsPrice: 0.5,
2408
2508
  supportsReasoningEffort: true,
2409
2509
  reasoningEffort: "medium",
2410
- supportsTemperature: false
2510
+ supportsTemperature: false,
2511
+ tiers: [
2512
+ { name: "flex", contextWindow: 2e5, inputPrice: 1, outputPrice: 4, cacheReadsPrice: 0.25 },
2513
+ { name: "priority", contextWindow: 2e5, inputPrice: 3.5, outputPrice: 14, cacheReadsPrice: 0.875 }
2514
+ ]
2411
2515
  },
2412
2516
  "o3-high": {
2413
2517
  maxTokens: 1e5,
@@ -2441,7 +2545,11 @@ var openAiNativeModels = {
2441
2545
  cacheReadsPrice: 0.275,
2442
2546
  supportsReasoningEffort: true,
2443
2547
  reasoningEffort: "medium",
2444
- supportsTemperature: false
2548
+ supportsTemperature: false,
2549
+ tiers: [
2550
+ { name: "flex", contextWindow: 2e5, inputPrice: 0.55, outputPrice: 2.2, cacheReadsPrice: 0.138 },
2551
+ { name: "priority", contextWindow: 2e5, inputPrice: 2, outputPrice: 8, cacheReadsPrice: 0.5 }
2552
+ ]
2445
2553
  },
2446
2554
  "o4-mini-high": {
2447
2555
  maxTokens: 1e5,
@@ -2537,7 +2645,10 @@ var openAiNativeModels = {
2537
2645
  inputPrice: 2.5,
2538
2646
  outputPrice: 10,
2539
2647
  cacheReadsPrice: 1.25,
2540
- supportsTemperature: true
2648
+ supportsTemperature: true,
2649
+ tiers: [
2650
+ { name: "priority", contextWindow: 128e3, inputPrice: 4.25, outputPrice: 17, cacheReadsPrice: 2.125 }
2651
+ ]
2541
2652
  },
2542
2653
  "gpt-4o-mini": {
2543
2654
  maxTokens: 16384,
@@ -2547,7 +2658,10 @@ var openAiNativeModels = {
2547
2658
  inputPrice: 0.15,
2548
2659
  outputPrice: 0.6,
2549
2660
  cacheReadsPrice: 0.075,
2550
- supportsTemperature: true
2661
+ supportsTemperature: true,
2662
+ tiers: [
2663
+ { name: "priority", contextWindow: 128e3, inputPrice: 0.25, outputPrice: 1, cacheReadsPrice: 0.125 }
2664
+ ]
2551
2665
  },
2552
2666
  "codex-mini-latest": {
2553
2667
  maxTokens: 16384,
@@ -3088,6 +3202,60 @@ var vertexModels = {
3088
3202
  inputPrice: 0.35,
3089
3203
  outputPrice: 1.15,
3090
3204
  description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
3205
+ },
3206
+ "deepseek-r1-0528-maas": {
3207
+ maxTokens: 32768,
3208
+ contextWindow: 163840,
3209
+ supportsImages: false,
3210
+ supportsPromptCache: false,
3211
+ inputPrice: 1.35,
3212
+ outputPrice: 5.4,
3213
+ description: "DeepSeek R1 (0528). Available in us-central1"
3214
+ },
3215
+ "deepseek-v3.1-maas": {
3216
+ maxTokens: 32768,
3217
+ contextWindow: 163840,
3218
+ supportsImages: false,
3219
+ supportsPromptCache: false,
3220
+ inputPrice: 0.6,
3221
+ outputPrice: 1.7,
3222
+ description: "DeepSeek V3.1. Available in us-west2"
3223
+ },
3224
+ "gpt-oss-120b-maas": {
3225
+ maxTokens: 32768,
3226
+ contextWindow: 131072,
3227
+ supportsImages: false,
3228
+ supportsPromptCache: false,
3229
+ inputPrice: 0.15,
3230
+ outputPrice: 0.6,
3231
+ description: "OpenAI gpt-oss 120B. Available in us-central1"
3232
+ },
3233
+ "gpt-oss-20b-maas": {
3234
+ maxTokens: 32768,
3235
+ contextWindow: 131072,
3236
+ supportsImages: false,
3237
+ supportsPromptCache: false,
3238
+ inputPrice: 0.075,
3239
+ outputPrice: 0.3,
3240
+ description: "OpenAI gpt-oss 20B. Available in us-central1"
3241
+ },
3242
+ "qwen3-coder-480b-a35b-instruct-maas": {
3243
+ maxTokens: 32768,
3244
+ contextWindow: 262144,
3245
+ supportsImages: false,
3246
+ supportsPromptCache: false,
3247
+ inputPrice: 1,
3248
+ outputPrice: 4,
3249
+ description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1"
3250
+ },
3251
+ "qwen3-235b-a22b-instruct-2507-maas": {
3252
+ maxTokens: 16384,
3253
+ contextWindow: 262144,
3254
+ supportsImages: false,
3255
+ supportsPromptCache: false,
3256
+ inputPrice: 0.25,
3257
+ outputPrice: 1,
3258
+ description: "Qwen3 235B A22B Instruct. Available in us-south1"
3091
3259
  }
3092
3260
  };
3093
3261
  var VERTEX_REGIONS = [
@@ -3096,6 +3264,7 @@ var VERTEX_REGIONS = [
3096
3264
  { value: "us-east1", label: "us-east1" },
3097
3265
  { value: "us-east4", label: "us-east4" },
3098
3266
  { value: "us-east5", label: "us-east5" },
3267
+ { value: "us-south1", label: "us-south1" },
3099
3268
  { value: "us-west1", label: "us-west1" },
3100
3269
  { value: "us-west2", label: "us-west2" },
3101
3270
  { value: "us-west3", label: "us-west3" },
@@ -3588,6 +3757,28 @@ var mainlandZAiModels = {
3588
3757
  }
3589
3758
  };
3590
3759
  var ZAI_DEFAULT_TEMPERATURE = 0;
3760
+ var zaiApiLineConfigs = {
3761
+ international_coding: {
3762
+ name: "International Coding Plan",
3763
+ baseUrl: "https://api.z.ai/api/coding/paas/v4",
3764
+ isChina: false
3765
+ },
3766
+ international: { name: "International Standard", baseUrl: "https://api.z.ai/api/paas/v4", isChina: false },
3767
+ china_coding: { name: "China Coding Plan", baseUrl: "https://open.bigmodel.cn/api/coding/paas/v4", isChina: true },
3768
+ china: { name: "China Standard", baseUrl: "https://open.bigmodel.cn/api/paas/v4", isChina: true }
3769
+ };
3770
+
3771
+ // src/providers/deepinfra.ts
3772
+ var deepInfraDefaultModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo";
3773
+ var deepInfraDefaultModelInfo = {
3774
+ maxTokens: 16384,
3775
+ contextWindow: 262144,
3776
+ supportsImages: false,
3777
+ supportsPromptCache: false,
3778
+ inputPrice: 0.3,
3779
+ outputPrice: 1.2,
3780
+ description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context."
3781
+ };
3591
3782
 
3592
3783
  // src/provider-settings.ts
3593
3784
  var providerNames = [
@@ -3607,6 +3798,7 @@ var providerNames = [
3607
3798
  "mistral",
3608
3799
  "moonshot",
3609
3800
  "deepseek",
3801
+ "deepinfra",
3610
3802
  "doubao",
3611
3803
  "qwen-code",
3612
3804
  "unbound",
@@ -3747,7 +3939,10 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
3747
3939
  });
3748
3940
  var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
3749
3941
  openAiNativeApiKey: z7.string().optional(),
3750
- openAiNativeBaseUrl: z7.string().optional()
3942
+ openAiNativeBaseUrl: z7.string().optional(),
3943
+ // OpenAI Responses API service tier for openai-native provider only.
3944
+ // UI should only expose this when the selected model supports flex/priority.
3945
+ openAiNativeServiceTier: serviceTierSchema.optional()
3751
3946
  });
3752
3947
  var mistralSchema = apiModelIdProviderModelSchema.extend({
3753
3948
  mistralApiKey: z7.string().optional(),
@@ -3757,6 +3952,11 @@ var deepSeekSchema = apiModelIdProviderModelSchema.extend({
3757
3952
  deepSeekBaseUrl: z7.string().optional(),
3758
3953
  deepSeekApiKey: z7.string().optional()
3759
3954
  });
3955
+ var deepInfraSchema = apiModelIdProviderModelSchema.extend({
3956
+ deepInfraBaseUrl: z7.string().optional(),
3957
+ deepInfraApiKey: z7.string().optional(),
3958
+ deepInfraModelId: z7.string().optional()
3959
+ });
3760
3960
  var doubaoSchema = apiModelIdProviderModelSchema.extend({
3761
3961
  doubaoBaseUrl: z7.string().optional(),
3762
3962
  doubaoApiKey: z7.string().optional()
@@ -3804,9 +4004,10 @@ var cerebrasSchema = apiModelIdProviderModelSchema.extend({
3804
4004
  var sambaNovaSchema = apiModelIdProviderModelSchema.extend({
3805
4005
  sambaNovaApiKey: z7.string().optional()
3806
4006
  });
4007
+ var zaiApiLineSchema = z7.enum(["international_coding", "international", "china_coding", "china"]);
3807
4008
  var zaiSchema = apiModelIdProviderModelSchema.extend({
3808
4009
  zaiApiKey: z7.string().optional(),
3809
- zaiApiLine: z7.union([z7.literal("china"), z7.literal("international")]).optional()
4010
+ zaiApiLine: zaiApiLineSchema.optional()
3810
4011
  });
3811
4012
  var fireworksSchema = apiModelIdProviderModelSchema.extend({
3812
4013
  fireworksApiKey: z7.string().optional()
@@ -3847,6 +4048,7 @@ var providerSettingsSchemaDiscriminated = z7.discriminatedUnion("apiProvider", [
3847
4048
  openAiNativeSchema.merge(z7.object({ apiProvider: z7.literal("openai-native") })),
3848
4049
  mistralSchema.merge(z7.object({ apiProvider: z7.literal("mistral") })),
3849
4050
  deepSeekSchema.merge(z7.object({ apiProvider: z7.literal("deepseek") })),
4051
+ deepInfraSchema.merge(z7.object({ apiProvider: z7.literal("deepinfra") })),
3850
4052
  doubaoSchema.merge(z7.object({ apiProvider: z7.literal("doubao") })),
3851
4053
  moonshotSchema.merge(z7.object({ apiProvider: z7.literal("moonshot") })),
3852
4054
  unboundSchema.merge(z7.object({ apiProvider: z7.literal("unbound") })),
@@ -3886,6 +4088,7 @@ var providerSettingsSchema = z7.object({
3886
4088
  ...openAiNativeSchema.shape,
3887
4089
  ...mistralSchema.shape,
3888
4090
  ...deepSeekSchema.shape,
4091
+ ...deepInfraSchema.shape,
3889
4092
  ...doubaoSchema.shape,
3890
4093
  ...moonshotSchema.shape,
3891
4094
  ...unboundSchema.shape,
@@ -3926,7 +4129,8 @@ var MODEL_ID_KEYS = [
3926
4129
  "litellmModelId",
3927
4130
  "huggingFaceModelId",
3928
4131
  "ioIntelligenceModelId",
3929
- "vercelAiGatewayModelId"
4132
+ "vercelAiGatewayModelId",
4133
+ "deepInfraModelId"
3930
4134
  ];
3931
4135
  var getModelId = (settings) => {
3932
4136
  const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key]);
@@ -4035,6 +4239,7 @@ var MODELS_BY_PROVIDER = {
4035
4239
  openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
4036
4240
  requesty: { id: "requesty", label: "Requesty", models: [] },
4037
4241
  unbound: { id: "unbound", label: "Unbound", models: [] },
4242
+ deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
4038
4243
  "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }
4039
4244
  };
4040
4245
  var dynamicProviders = [
@@ -4044,6 +4249,7 @@ var dynamicProviders = [
4044
4249
  "openrouter",
4045
4250
  "requesty",
4046
4251
  "unbound",
4252
+ "deepinfra",
4047
4253
  "vercel-ai-gateway"
4048
4254
  ];
4049
4255
  var isDynamicProvider = (key) => dynamicProviders.includes(key);
@@ -4411,6 +4617,7 @@ var globalSettingsSchema = z13.object({
4411
4617
  lastShownAnnouncementId: z13.string().optional(),
4412
4618
  customInstructions: z13.string().optional(),
4413
4619
  taskHistory: z13.array(historyItemSchema).optional(),
4620
+ dismissedUpsells: z13.array(z13.string()).optional(),
4414
4621
  // Image generation settings (experimental) - flattened for simplicity
4415
4622
  openRouterImageApiKey: z13.string().optional(),
4416
4623
  openRouterImageGenerationSelectedModel: z13.string().optional(),
@@ -4492,7 +4699,6 @@ var globalSettingsSchema = z13.object({
4492
4699
  telemetrySetting: telemetrySettingsSchema.optional(),
4493
4700
  mcpEnabled: z13.boolean().optional(),
4494
4701
  enableMcpServerCreation: z13.boolean().optional(),
4495
- remoteControlEnabled: z13.boolean().optional(),
4496
4702
  mode: z13.string().optional(),
4497
4703
  modeApiConfigs: z13.record(z13.string(), z13.string()).optional(),
4498
4704
  customModes: z13.array(modeConfigSchema).optional(),
@@ -4531,6 +4737,7 @@ var SECRET_STATE_KEYS = [
4531
4737
  "groqApiKey",
4532
4738
  "chutesApiKey",
4533
4739
  "litellmApiKey",
4740
+ "deepInfraApiKey",
4534
4741
  "codeIndexOpenAiKey",
4535
4742
  "codeIndexQdrantApiKey",
4536
4743
  "codebaseIndexOpenAiCompatibleApiKey",
@@ -4614,7 +4821,6 @@ var EVALS_SETTINGS = {
4614
4821
  language: "en",
4615
4822
  telemetrySetting: "enabled",
4616
4823
  mcpEnabled: false,
4617
- remoteControlEnabled: false,
4618
4824
  mode: "code",
4619
4825
  // "architect",
4620
4826
  customModes: []
@@ -4724,7 +4930,8 @@ var userFeaturesSchema = z15.object({
4724
4930
  roomoteControlEnabled: z15.boolean().optional()
4725
4931
  });
4726
4932
  var userSettingsConfigSchema = z15.object({
4727
- extensionBridgeEnabled: z15.boolean().optional()
4933
+ extensionBridgeEnabled: z15.boolean().optional(),
4934
+ taskSyncEnabled: z15.boolean().optional()
4728
4935
  });
4729
4936
  var userSettingsDataSchema = z15.object({
4730
4937
  features: userFeaturesSchema,
@@ -5027,6 +5234,12 @@ var TaskSocketEvents = /* @__PURE__ */ ((TaskSocketEvents2) => {
5027
5234
  return TaskSocketEvents2;
5028
5235
  })(TaskSocketEvents || {});
5029
5236
 
5237
+ // src/cookie-consent.ts
5238
+ var CONSENT_COOKIE_NAME = "roo-code-cookie-consent";
5239
+ var COOKIE_CONSENT_EVENTS = {
5240
+ CHANGED: "cookieConsentChanged"
5241
+ };
5242
+
5030
5243
  // src/followup.ts
5031
5244
  import { z as z16 } from "zod";
5032
5245
  var suggestionItemSchema = z16.object({
@@ -5187,6 +5400,8 @@ export {
5187
5400
  BEDROCK_REGIONS,
5188
5401
  CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
5189
5402
  CODEBASE_INDEX_DEFAULTS,
5403
+ CONSENT_COOKIE_NAME,
5404
+ COOKIE_CONSENT_EVENTS,
5190
5405
  ConnectionState,
5191
5406
  DEEP_SEEK_DEFAULT_TEMPERATURE,
5192
5407
  DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
@@ -5279,6 +5494,8 @@ export {
5279
5494
  customModePromptsSchema,
5280
5495
  customModesSettingsSchema,
5281
5496
  customSupportPromptsSchema,
5497
+ deepInfraDefaultModelId,
5498
+ deepInfraDefaultModelInfo,
5282
5499
  deepSeekDefaultModelId,
5283
5500
  deepSeekModels,
5284
5501
  discriminatedProviderSettingsWithIdSchema,
@@ -5386,6 +5603,8 @@ export {
5386
5603
  rooModels,
5387
5604
  sambaNovaDefaultModelId,
5388
5605
  sambaNovaModels,
5606
+ serviceTierSchema,
5607
+ serviceTiers,
5389
5608
  shareResponseSchema,
5390
5609
  shouldUseSingleFileRead,
5391
5610
  staticAppPropertiesSchema,
@@ -5423,6 +5642,8 @@ export {
5423
5642
  vscodeLlmDefaultModelId,
5424
5643
  vscodeLlmModels,
5425
5644
  xaiDefaultModelId,
5426
- xaiModels
5645
+ xaiModels,
5646
+ zaiApiLineConfigs,
5647
+ zaiApiLineSchema
5427
5648
  };
5428
5649
  //# sourceMappingURL=index.js.map