@roo-code/types 1.96.0 → 1.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -55,6 +55,7 @@ var clineSays = [
55
55
  "api_req_finished",
56
56
  "api_req_retried",
57
57
  "api_req_retry_delayed",
58
+ "api_req_rate_limit_wait",
58
59
  "api_req_deleted",
59
60
  "text",
60
61
  "image",
@@ -1199,39 +1200,6 @@ var bedrockModels = {
1199
1200
  inputPrice: 0.25,
1200
1201
  outputPrice: 1.25
1201
1202
  },
1202
- "anthropic.claude-2-1-v1:0": {
1203
- maxTokens: 4096,
1204
- contextWindow: 1e5,
1205
- supportsImages: false,
1206
- supportsPromptCache: false,
1207
- supportsNativeTools: true,
1208
- defaultToolProtocol: "native",
1209
- inputPrice: 8,
1210
- outputPrice: 24,
1211
- description: "Claude 2.1"
1212
- },
1213
- "anthropic.claude-2-0-v1:0": {
1214
- maxTokens: 4096,
1215
- contextWindow: 1e5,
1216
- supportsImages: false,
1217
- supportsPromptCache: false,
1218
- supportsNativeTools: true,
1219
- defaultToolProtocol: "native",
1220
- inputPrice: 8,
1221
- outputPrice: 24,
1222
- description: "Claude 2.0"
1223
- },
1224
- "anthropic.claude-instant-v1:0": {
1225
- maxTokens: 4096,
1226
- contextWindow: 1e5,
1227
- supportsImages: false,
1228
- supportsPromptCache: false,
1229
- supportsNativeTools: true,
1230
- defaultToolProtocol: "native",
1231
- inputPrice: 0.8,
1232
- outputPrice: 2.4,
1233
- description: "Claude Instant"
1234
- },
1235
1203
  "deepseek.r1-v1:0": {
1236
1204
  maxTokens: 32768,
1237
1205
  contextWindow: 128e3,
@@ -1531,7 +1499,19 @@ var cerebrasModels = {
1531
1499
  defaultToolProtocol: "native",
1532
1500
  inputPrice: 0,
1533
1501
  outputPrice: 0,
1534
- description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
1502
+ description: "Fast general-purpose model on Cerebras (up to 1,000 tokens/s). To be deprecated soon."
1503
+ },
1504
+ "zai-glm-4.7": {
1505
+ maxTokens: 16384,
1506
+ // Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront)
1507
+ contextWindow: 131072,
1508
+ supportsImages: false,
1509
+ supportsPromptCache: false,
1510
+ supportsNativeTools: true,
1511
+ defaultToolProtocol: "native",
1512
+ inputPrice: 0,
1513
+ outputPrice: 0,
1514
+ description: "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks."
1535
1515
  },
1536
1516
  "qwen-3-235b-a22b-instruct-2507": {
1537
1517
  maxTokens: 16384,
@@ -2282,6 +2262,20 @@ var fireworksModels = {
2282
2262
  outputPrice: 2.5,
2283
2263
  description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
2284
2264
  },
2265
+ "accounts/fireworks/models/kimi-k2-thinking": {
2266
+ maxTokens: 16e3,
2267
+ contextWindow: 256e3,
2268
+ supportsImages: false,
2269
+ supportsPromptCache: true,
2270
+ supportsNativeTools: true,
2271
+ supportsTemperature: true,
2272
+ preserveReasoning: true,
2273
+ defaultTemperature: 1,
2274
+ inputPrice: 0.6,
2275
+ outputPrice: 2.5,
2276
+ cacheReadsPrice: 0.15,
2277
+ description: "The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems."
2278
+ },
2285
2279
  "accounts/fireworks/models/minimax-m2": {
2286
2280
  maxTokens: 4096,
2287
2281
  contextWindow: 204800,
@@ -5239,7 +5233,6 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
5239
5233
  return vercelAiGatewayDefaultModelId;
5240
5234
  case "anthropic":
5241
5235
  case "gemini-cli":
5242
- case "human-relay":
5243
5236
  case "fake-ai":
5244
5237
  default:
5245
5238
  return anthropicDefaultModelId;
@@ -5267,7 +5260,7 @@ var internalProviders = ["vscode-lm"];
5267
5260
  var isInternalProvider = (key) => internalProviders.includes(key);
5268
5261
  var customProviders = ["openai"];
5269
5262
  var isCustomProvider = (key) => customProviders.includes(key);
5270
- var fauxProviders = ["fake-ai", "human-relay"];
5263
+ var fauxProviders = ["fake-ai"];
5271
5264
  var isFauxProvider = (key) => fauxProviders.includes(key);
5272
5265
  var providerNames = [
5273
5266
  ...dynamicProviders,
@@ -5458,7 +5451,6 @@ var requestySchema = baseProviderSettingsSchema.extend({
5458
5451
  requestyApiKey: z8.string().optional(),
5459
5452
  requestyModelId: z8.string().optional()
5460
5453
  });
5461
- var humanRelaySchema = baseProviderSettingsSchema;
5462
5454
  var fakeAiSchema = baseProviderSettingsSchema.extend({
5463
5455
  fakeAi: z8.unknown().optional()
5464
5456
  });
@@ -5507,7 +5499,8 @@ var qwenCodeSchema = apiModelIdProviderModelSchema.extend({
5507
5499
  qwenCodeOauthPath: z8.string().optional()
5508
5500
  });
5509
5501
  var rooSchema = apiModelIdProviderModelSchema.extend({
5510
- // No additional fields needed - uses cloud authentication.
5502
+ // Can use cloud authentication or provide an API key (cli).
5503
+ rooApiKey: z8.string().optional()
5511
5504
  });
5512
5505
  var vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
5513
5506
  vercelAiGatewayApiKey: z8.string().optional(),
@@ -5540,7 +5533,6 @@ var providerSettingsSchemaDiscriminated = z8.discriminatedUnion("apiProvider", [
5540
5533
  minimaxSchema.merge(z8.object({ apiProvider: z8.literal("minimax") })),
5541
5534
  unboundSchema.merge(z8.object({ apiProvider: z8.literal("unbound") })),
5542
5535
  requestySchema.merge(z8.object({ apiProvider: z8.literal("requesty") })),
5543
- humanRelaySchema.merge(z8.object({ apiProvider: z8.literal("human-relay") })),
5544
5536
  fakeAiSchema.merge(z8.object({ apiProvider: z8.literal("fake-ai") })),
5545
5537
  xaiSchema.merge(z8.object({ apiProvider: z8.literal("xai") })),
5546
5538
  groqSchema.merge(z8.object({ apiProvider: z8.literal("groq") })),
@@ -5581,7 +5573,6 @@ var providerSettingsSchema = z8.object({
5581
5573
  ...minimaxSchema.shape,
5582
5574
  ...unboundSchema.shape,
5583
5575
  ...requestySchema.shape,
5584
- ...humanRelaySchema.shape,
5585
5576
  ...fakeAiSchema.shape,
5586
5577
  ...xaiSchema.shape,
5587
5578
  ...groqSchema.shape,
@@ -5798,6 +5789,8 @@ var historyItemSchema = z9.object({
5798
5789
  * This ensures task resumption works correctly even when NTC settings change.
5799
5790
  */
5800
5791
  toolProtocol: z9.enum(["xml", "native"]).optional(),
5792
+ apiConfigName: z9.string().optional(),
5793
+ // Provider profile name for sticky profile feature
5801
5794
  status: z9.enum(["active", "completed", "delegated"]).optional(),
5802
5795
  delegatedToId: z9.string().optional(),
5803
5796
  // Last child this parent delegated to
@@ -6250,10 +6243,6 @@ var commandIds = [
6250
6243
  "cloudButtonClicked",
6251
6244
  "settingsButtonClicked",
6252
6245
  "openInNewTab",
6253
- "showHumanRelayDialog",
6254
- "registerHumanRelayCallback",
6255
- "unregisterHumanRelayCallback",
6256
- "handleHumanRelayResponse",
6257
6246
  "newTask",
6258
6247
  "setCustomStoragePath",
6259
6248
  "importSettings",
@@ -6371,6 +6360,7 @@ var globalSettingsSchema = z14.object({
6371
6360
  maxOpenTabsContext: z14.number().optional(),
6372
6361
  maxWorkspaceFiles: z14.number().optional(),
6373
6362
  showRooIgnoredFiles: z14.boolean().optional(),
6363
+ enableSubfolderRules: z14.boolean().optional(),
6374
6364
  maxReadFileLine: z14.number().optional(),
6375
6365
  maxImageFileSize: z14.number().optional(),
6376
6366
  maxTotalImageSize: z14.number().optional(),
@@ -7185,6 +7175,24 @@ var commandExecutionStatusSchema = z22.discriminatedUnion("status", [
7185
7175
  status: z22.literal("timeout")
7186
7176
  })
7187
7177
  ]);
7178
+
7179
+ // src/vscode-extension-host.ts
7180
+ import { z as z23 } from "zod";
7181
+ var checkoutDiffPayloadSchema = z23.object({
7182
+ ts: z23.number().optional(),
7183
+ previousCommitHash: z23.string().optional(),
7184
+ commitHash: z23.string(),
7185
+ mode: z23.enum(["full", "checkpoint", "from-init", "to-current"])
7186
+ });
7187
+ var checkoutRestorePayloadSchema = z23.object({
7188
+ ts: z23.number(),
7189
+ commitHash: z23.string(),
7190
+ mode: z23.enum(["preview", "restore"])
7191
+ });
7192
+ var installMarketplaceItemWithParametersPayloadSchema = z23.object({
7193
+ item: marketplaceItemSchema,
7194
+ parameters: z23.record(z23.string(), z23.any())
7195
+ });
7188
7196
  export {
7189
7197
  ANTHROPIC_DEFAULT_MAX_TOKENS,
7190
7198
  ANTHROPIC_STYLE_PROVIDERS,
@@ -7285,6 +7293,8 @@ export {
7285
7293
  bedrockModels,
7286
7294
  cerebrasDefaultModelId,
7287
7295
  cerebrasModels,
7296
+ checkoutDiffPayloadSchema,
7297
+ checkoutRestorePayloadSchema,
7288
7298
  chutesDefaultModelId,
7289
7299
  chutesDefaultModelInfo,
7290
7300
  chutesModels,
@@ -7353,6 +7363,7 @@ export {
7353
7363
  historyItemSchema,
7354
7364
  idleAsks,
7355
7365
  installMarketplaceItemOptionsSchema,
7366
+ installMarketplaceItemWithParametersPayloadSchema,
7356
7367
  interactiveAsks,
7357
7368
  internalProviders,
7358
7369
  internationalZAiDefaultModelId,