@roo-code/types 1.105.0 → 1.106.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -251,6 +251,8 @@ __export(index_exports, {
251
251
  normalizeClaudeCodeModelId: () => normalizeClaudeCodeModelId,
252
252
  ollamaDefaultModelId: () => ollamaDefaultModelId,
253
253
  ollamaDefaultModelInfo: () => ollamaDefaultModelInfo,
254
+ openAiCodexDefaultModelId: () => openAiCodexDefaultModelId,
255
+ openAiCodexModels: () => openAiCodexModels,
254
256
  openAiModelInfoSaneDefaults: () => openAiModelInfoSaneDefaults,
255
257
  openAiNativeDefaultModelId: () => openAiNativeDefaultModelId,
256
258
  openAiNativeModels: () => openAiNativeModels,
@@ -3368,6 +3370,25 @@ var openAiNativeModels = {
3368
3370
  ],
3369
3371
  description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries"
3370
3372
  },
3373
+ "gpt-5.2-codex": {
3374
+ maxTokens: 128e3,
3375
+ contextWindow: 4e5,
3376
+ supportsNativeTools: true,
3377
+ defaultToolProtocol: "native",
3378
+ includedTools: ["apply_patch"],
3379
+ excludedTools: ["apply_diff", "write_to_file"],
3380
+ supportsImages: true,
3381
+ supportsPromptCache: true,
3382
+ promptCacheRetention: "24h",
3383
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
3384
+ reasoningEffort: "medium",
3385
+ inputPrice: 1.75,
3386
+ outputPrice: 14,
3387
+ cacheReadsPrice: 0.175,
3388
+ supportsTemperature: false,
3389
+ tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 3.5, outputPrice: 28, cacheReadsPrice: 0.35 }],
3390
+ description: "GPT-5.2 Codex: Our most intelligent coding model optimized for long-horizon, agentic coding tasks"
3391
+ },
3371
3392
  "gpt-5.2-chat-latest": {
3372
3393
  maxTokens: 16384,
3373
3394
  contextWindow: 128e3,
@@ -3874,6 +3895,76 @@ var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
3874
3895
  var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
3875
3896
  var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
3876
3897
 
3898
+ // src/providers/openai-codex.ts
3899
+ var openAiCodexDefaultModelId = "gpt-5.2-codex";
3900
+ var openAiCodexModels = {
3901
+ "gpt-5.1-codex-max": {
3902
+ maxTokens: 128e3,
3903
+ contextWindow: 4e5,
3904
+ supportsNativeTools: true,
3905
+ defaultToolProtocol: "native",
3906
+ includedTools: ["apply_patch"],
3907
+ excludedTools: ["apply_diff", "write_to_file"],
3908
+ supportsImages: true,
3909
+ supportsPromptCache: true,
3910
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
3911
+ reasoningEffort: "xhigh",
3912
+ // Subscription-based: no per-token costs
3913
+ inputPrice: 0,
3914
+ outputPrice: 0,
3915
+ supportsTemperature: false,
3916
+ description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription"
3917
+ },
3918
+ "gpt-5.2-codex": {
3919
+ maxTokens: 128e3,
3920
+ contextWindow: 4e5,
3921
+ supportsNativeTools: true,
3922
+ defaultToolProtocol: "native",
3923
+ includedTools: ["apply_patch"],
3924
+ excludedTools: ["apply_diff", "write_to_file"],
3925
+ supportsImages: true,
3926
+ supportsPromptCache: true,
3927
+ supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
3928
+ reasoningEffort: "medium",
3929
+ inputPrice: 0,
3930
+ outputPrice: 0,
3931
+ supportsTemperature: false,
3932
+ description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription"
3933
+ },
3934
+ "gpt-5.1-codex-mini": {
3935
+ maxTokens: 128e3,
3936
+ contextWindow: 4e5,
3937
+ supportsNativeTools: true,
3938
+ defaultToolProtocol: "native",
3939
+ includedTools: ["apply_patch"],
3940
+ excludedTools: ["apply_diff", "write_to_file"],
3941
+ supportsImages: true,
3942
+ supportsPromptCache: true,
3943
+ supportsReasoningEffort: ["low", "medium", "high"],
3944
+ reasoningEffort: "medium",
3945
+ inputPrice: 0,
3946
+ outputPrice: 0,
3947
+ supportsTemperature: false,
3948
+ description: "GPT-5.1 Codex Mini: Faster version for coding tasks via ChatGPT subscription"
3949
+ },
3950
+ "gpt-5.2": {
3951
+ maxTokens: 128e3,
3952
+ contextWindow: 4e5,
3953
+ supportsNativeTools: true,
3954
+ defaultToolProtocol: "native",
3955
+ includedTools: ["apply_patch"],
3956
+ excludedTools: ["apply_diff", "write_to_file"],
3957
+ supportsImages: true,
3958
+ supportsPromptCache: true,
3959
+ supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
3960
+ reasoningEffort: "medium",
3961
+ inputPrice: 0,
3962
+ outputPrice: 0,
3963
+ supportsTemperature: false,
3964
+ description: "GPT-5.2: Latest GPT model via ChatGPT subscription"
3965
+ }
3966
+ };
3967
+
3877
3968
  // src/providers/openrouter.ts
3878
3969
  var openRouterDefaultModelId = "anthropic/claude-sonnet-4.5";
3879
3970
  var openRouterDefaultModelInfo = {
@@ -5475,6 +5566,8 @@ function getProviderDefaultModelId(provider, options = { isChina: false }) {
5475
5566
  case "openai-native":
5476
5567
  return "gpt-4o";
5477
5568
  // Based on openai-native patterns
5569
+ case "openai-codex":
5570
+ return openAiCodexDefaultModelId;
5478
5571
  case "mistral":
5479
5572
  return mistralDefaultModelId;
5480
5573
  case "openai":
@@ -5560,6 +5653,7 @@ var providerNames = [
5560
5653
  "mistral",
5561
5654
  "moonshot",
5562
5655
  "minimax",
5656
+ "openai-codex",
5563
5657
  "openai-native",
5564
5658
  "qwen-code",
5565
5659
  "roo",
@@ -5686,6 +5780,9 @@ var geminiCliSchema = apiModelIdProviderModelSchema.extend({
5686
5780
  geminiCliOAuthPath: import_zod8.z.string().optional(),
5687
5781
  geminiCliProjectId: import_zod8.z.string().optional()
5688
5782
  });
5783
+ var openAiCodexSchema = apiModelIdProviderModelSchema.extend({
5784
+ // No additional settings needed - uses OAuth authentication
5785
+ });
5689
5786
  var openAiNativeSchema = apiModelIdProviderModelSchema.extend({
5690
5787
  openAiNativeApiKey: import_zod8.z.string().optional(),
5691
5788
  openAiNativeBaseUrl: import_zod8.z.string().optional(),
@@ -5800,6 +5897,7 @@ var providerSettingsSchemaDiscriminated = import_zod8.z.discriminatedUnion("apiP
5800
5897
  lmStudioSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("lmstudio") })),
5801
5898
  geminiSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("gemini") })),
5802
5899
  geminiCliSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("gemini-cli") })),
5900
+ openAiCodexSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openai-codex") })),
5803
5901
  openAiNativeSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("openai-native") })),
5804
5902
  mistralSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("mistral") })),
5805
5903
  deepSeekSchema.merge(import_zod8.z.object({ apiProvider: import_zod8.z.literal("deepseek") })),
@@ -5840,6 +5938,7 @@ var providerSettingsSchema = import_zod8.z.object({
5840
5938
  ...lmStudioSchema.shape,
5841
5939
  ...geminiSchema.shape,
5842
5940
  ...geminiCliSchema.shape,
5941
+ ...openAiCodexSchema.shape,
5843
5942
  ...openAiNativeSchema.shape,
5844
5943
  ...mistralSchema.shape,
5845
5944
  ...deepSeekSchema.shape,
@@ -5898,6 +5997,7 @@ var modelIdKeysByProvider = {
5898
5997
  openrouter: "openRouterModelId",
5899
5998
  bedrock: "apiModelId",
5900
5999
  vertex: "apiModelId",
6000
+ "openai-codex": "apiModelId",
5901
6001
  "openai-native": "openAiModelId",
5902
6002
  ollama: "ollamaModelId",
5903
6003
  lmstudio: "lmStudioModelId",
@@ -5999,6 +6099,11 @@ var MODELS_BY_PROVIDER = {
5999
6099
  label: "MiniMax",
6000
6100
  models: Object.keys(minimaxModels)
6001
6101
  },
6102
+ "openai-codex": {
6103
+ id: "openai-codex",
6104
+ label: "OpenAI - ChatGPT Plus/Pro",
6105
+ models: Object.keys(openAiCodexModels)
6106
+ },
6002
6107
  "openai-native": {
6003
6108
  id: "openai-native",
6004
6109
  label: "OpenAI",
@@ -6910,7 +7015,8 @@ var userFeaturesSchema = import_zod16.z.object({
6910
7015
  });
6911
7016
  var userSettingsConfigSchema = import_zod16.z.object({
6912
7017
  extensionBridgeEnabled: import_zod16.z.boolean().optional(),
6913
- taskSyncEnabled: import_zod16.z.boolean().optional()
7018
+ taskSyncEnabled: import_zod16.z.boolean().optional(),
7019
+ llmEnhancedFeaturesEnabled: import_zod16.z.boolean().optional()
6914
7020
  });
6915
7021
  var userSettingsDataSchema = import_zod16.z.object({
6916
7022
  features: userFeaturesSchema,
@@ -7716,6 +7822,8 @@ var browserActions = [
7716
7822
  normalizeClaudeCodeModelId,
7717
7823
  ollamaDefaultModelId,
7718
7824
  ollamaDefaultModelInfo,
7825
+ openAiCodexDefaultModelId,
7826
+ openAiCodexModels,
7719
7827
  openAiModelInfoSaneDefaults,
7720
7828
  openAiNativeDefaultModelId,
7721
7829
  openAiNativeModels,