@roo-code/types 1.62.0 → 1.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -424,7 +424,8 @@ var toolNames = [
424
424
  "new_task",
425
425
  "fetch_instructions",
426
426
  "codebase_search",
427
- "update_todo_list"
427
+ "update_todo_list",
428
+ "generate_image"
428
429
  ];
429
430
  var toolNamesSchema = import_zod3.z.enum(toolNames);
430
431
  var toolUsageSchema = import_zod3.z.record(
@@ -603,12 +604,18 @@ var taskEventSchema = import_zod4.z.discriminatedUnion("eventName", [
603
604
 
604
605
  // src/experiment.ts
605
606
  var import_zod5 = require("zod");
606
- var experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption"];
607
+ var experimentIds = [
608
+ "powerSteering",
609
+ "multiFileApplyDiff",
610
+ "preventFocusDisruption",
611
+ "imageGeneration"
612
+ ];
607
613
  var experimentIdsSchema = import_zod5.z.enum(experimentIds);
608
614
  var experimentsSchema = import_zod5.z.object({
609
615
  powerSteering: import_zod5.z.boolean().optional(),
610
616
  multiFileApplyDiff: import_zod5.z.boolean().optional(),
611
- preventFocusDisruption: import_zod5.z.boolean().optional()
617
+ preventFocusDisruption: import_zod5.z.boolean().optional(),
618
+ imageGeneration: import_zod5.z.boolean().optional()
612
619
  });
613
620
 
614
621
  // src/followup.ts
@@ -648,6 +655,8 @@ var modelInfoSchema = import_zod7.z.object({
648
655
  // Capability flag to indicate whether the model supports an output verbosity parameter
649
656
  supportsVerbosity: import_zod7.z.boolean().optional(),
650
657
  supportsReasoningBudget: import_zod7.z.boolean().optional(),
658
+ // Capability flag to indicate whether the model supports temperature parameter
659
+ supportsTemperature: import_zod7.z.boolean().optional(),
651
660
  requiredReasoningBudget: import_zod7.z.boolean().optional(),
652
661
  supportsReasoningEffort: import_zod7.z.boolean().optional(),
653
662
  supportedParameters: import_zod7.z.array(modelParametersSchema).optional(),
@@ -2559,7 +2568,8 @@ var openAiNativeModels = {
2559
2568
  cacheReadsPrice: 0.13,
2560
2569
  description: "GPT-5: The best model for coding and agentic tasks across domains",
2561
2570
  // supportsVerbosity is a new capability; ensure ModelInfo includes it
2562
- supportsVerbosity: true
2571
+ supportsVerbosity: true,
2572
+ supportsTemperature: false
2563
2573
  },
2564
2574
  "gpt-5-mini-2025-08-07": {
2565
2575
  maxTokens: 128e3,
@@ -2572,7 +2582,8 @@ var openAiNativeModels = {
2572
2582
  outputPrice: 2,
2573
2583
  cacheReadsPrice: 0.03,
2574
2584
  description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
2575
- supportsVerbosity: true
2585
+ supportsVerbosity: true,
2586
+ supportsTemperature: false
2576
2587
  },
2577
2588
  "gpt-5-nano-2025-08-07": {
2578
2589
  maxTokens: 128e3,
@@ -2585,7 +2596,8 @@ var openAiNativeModels = {
2585
2596
  outputPrice: 0.4,
2586
2597
  cacheReadsPrice: 0.01,
2587
2598
  description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
2588
- supportsVerbosity: true
2599
+ supportsVerbosity: true,
2600
+ supportsTemperature: false
2589
2601
  },
2590
2602
  "gpt-4.1": {
2591
2603
  maxTokens: 32768,
@@ -2594,7 +2606,8 @@ var openAiNativeModels = {
2594
2606
  supportsPromptCache: true,
2595
2607
  inputPrice: 2,
2596
2608
  outputPrice: 8,
2597
- cacheReadsPrice: 0.5
2609
+ cacheReadsPrice: 0.5,
2610
+ supportsTemperature: true
2598
2611
  },
2599
2612
  "gpt-4.1-mini": {
2600
2613
  maxTokens: 32768,
@@ -2603,7 +2616,8 @@ var openAiNativeModels = {
2603
2616
  supportsPromptCache: true,
2604
2617
  inputPrice: 0.4,
2605
2618
  outputPrice: 1.6,
2606
- cacheReadsPrice: 0.1
2619
+ cacheReadsPrice: 0.1,
2620
+ supportsTemperature: true
2607
2621
  },
2608
2622
  "gpt-4.1-nano": {
2609
2623
  maxTokens: 32768,
@@ -2612,7 +2626,8 @@ var openAiNativeModels = {
2612
2626
  supportsPromptCache: true,
2613
2627
  inputPrice: 0.1,
2614
2628
  outputPrice: 0.4,
2615
- cacheReadsPrice: 0.025
2629
+ cacheReadsPrice: 0.025,
2630
+ supportsTemperature: true
2616
2631
  },
2617
2632
  o3: {
2618
2633
  maxTokens: 1e5,
@@ -2623,7 +2638,8 @@ var openAiNativeModels = {
2623
2638
  outputPrice: 8,
2624
2639
  cacheReadsPrice: 0.5,
2625
2640
  supportsReasoningEffort: true,
2626
- reasoningEffort: "medium"
2641
+ reasoningEffort: "medium",
2642
+ supportsTemperature: false
2627
2643
  },
2628
2644
  "o3-high": {
2629
2645
  maxTokens: 1e5,
@@ -2633,7 +2649,8 @@ var openAiNativeModels = {
2633
2649
  inputPrice: 2,
2634
2650
  outputPrice: 8,
2635
2651
  cacheReadsPrice: 0.5,
2636
- reasoningEffort: "high"
2652
+ reasoningEffort: "high",
2653
+ supportsTemperature: false
2637
2654
  },
2638
2655
  "o3-low": {
2639
2656
  maxTokens: 1e5,
@@ -2643,7 +2660,8 @@ var openAiNativeModels = {
2643
2660
  inputPrice: 2,
2644
2661
  outputPrice: 8,
2645
2662
  cacheReadsPrice: 0.5,
2646
- reasoningEffort: "low"
2663
+ reasoningEffort: "low",
2664
+ supportsTemperature: false
2647
2665
  },
2648
2666
  "o4-mini": {
2649
2667
  maxTokens: 1e5,
@@ -2654,7 +2672,8 @@ var openAiNativeModels = {
2654
2672
  outputPrice: 4.4,
2655
2673
  cacheReadsPrice: 0.275,
2656
2674
  supportsReasoningEffort: true,
2657
- reasoningEffort: "medium"
2675
+ reasoningEffort: "medium",
2676
+ supportsTemperature: false
2658
2677
  },
2659
2678
  "o4-mini-high": {
2660
2679
  maxTokens: 1e5,
@@ -2664,7 +2683,8 @@ var openAiNativeModels = {
2664
2683
  inputPrice: 1.1,
2665
2684
  outputPrice: 4.4,
2666
2685
  cacheReadsPrice: 0.275,
2667
- reasoningEffort: "high"
2686
+ reasoningEffort: "high",
2687
+ supportsTemperature: false
2668
2688
  },
2669
2689
  "o4-mini-low": {
2670
2690
  maxTokens: 1e5,
@@ -2674,7 +2694,8 @@ var openAiNativeModels = {
2674
2694
  inputPrice: 1.1,
2675
2695
  outputPrice: 4.4,
2676
2696
  cacheReadsPrice: 0.275,
2677
- reasoningEffort: "low"
2697
+ reasoningEffort: "low",
2698
+ supportsTemperature: false
2678
2699
  },
2679
2700
  "o3-mini": {
2680
2701
  maxTokens: 1e5,
@@ -2685,7 +2706,8 @@ var openAiNativeModels = {
2685
2706
  outputPrice: 4.4,
2686
2707
  cacheReadsPrice: 0.55,
2687
2708
  supportsReasoningEffort: true,
2688
- reasoningEffort: "medium"
2709
+ reasoningEffort: "medium",
2710
+ supportsTemperature: false
2689
2711
  },
2690
2712
  "o3-mini-high": {
2691
2713
  maxTokens: 1e5,
@@ -2695,7 +2717,8 @@ var openAiNativeModels = {
2695
2717
  inputPrice: 1.1,
2696
2718
  outputPrice: 4.4,
2697
2719
  cacheReadsPrice: 0.55,
2698
- reasoningEffort: "high"
2720
+ reasoningEffort: "high",
2721
+ supportsTemperature: false
2699
2722
  },
2700
2723
  "o3-mini-low": {
2701
2724
  maxTokens: 1e5,
@@ -2705,7 +2728,8 @@ var openAiNativeModels = {
2705
2728
  inputPrice: 1.1,
2706
2729
  outputPrice: 4.4,
2707
2730
  cacheReadsPrice: 0.55,
2708
- reasoningEffort: "low"
2731
+ reasoningEffort: "low",
2732
+ supportsTemperature: false
2709
2733
  },
2710
2734
  o1: {
2711
2735
  maxTokens: 1e5,
@@ -2714,7 +2738,8 @@ var openAiNativeModels = {
2714
2738
  supportsPromptCache: true,
2715
2739
  inputPrice: 15,
2716
2740
  outputPrice: 60,
2717
- cacheReadsPrice: 7.5
2741
+ cacheReadsPrice: 7.5,
2742
+ supportsTemperature: false
2718
2743
  },
2719
2744
  "o1-preview": {
2720
2745
  maxTokens: 32768,
@@ -2723,7 +2748,8 @@ var openAiNativeModels = {
2723
2748
  supportsPromptCache: true,
2724
2749
  inputPrice: 15,
2725
2750
  outputPrice: 60,
2726
- cacheReadsPrice: 7.5
2751
+ cacheReadsPrice: 7.5,
2752
+ supportsTemperature: false
2727
2753
  },
2728
2754
  "o1-mini": {
2729
2755
  maxTokens: 65536,
@@ -2732,7 +2758,8 @@ var openAiNativeModels = {
2732
2758
  supportsPromptCache: true,
2733
2759
  inputPrice: 1.1,
2734
2760
  outputPrice: 4.4,
2735
- cacheReadsPrice: 0.55
2761
+ cacheReadsPrice: 0.55,
2762
+ supportsTemperature: false
2736
2763
  },
2737
2764
  "gpt-4o": {
2738
2765
  maxTokens: 16384,
@@ -2741,7 +2768,8 @@ var openAiNativeModels = {
2741
2768
  supportsPromptCache: true,
2742
2769
  inputPrice: 2.5,
2743
2770
  outputPrice: 10,
2744
- cacheReadsPrice: 1.25
2771
+ cacheReadsPrice: 1.25,
2772
+ supportsTemperature: true
2745
2773
  },
2746
2774
  "gpt-4o-mini": {
2747
2775
  maxTokens: 16384,
@@ -2750,7 +2778,8 @@ var openAiNativeModels = {
2750
2778
  supportsPromptCache: true,
2751
2779
  inputPrice: 0.15,
2752
2780
  outputPrice: 0.6,
2753
- cacheReadsPrice: 0.075
2781
+ cacheReadsPrice: 0.075,
2782
+ supportsTemperature: true
2754
2783
  },
2755
2784
  "codex-mini-latest": {
2756
2785
  maxTokens: 16384,
@@ -2760,6 +2789,7 @@ var openAiNativeModels = {
2760
2789
  inputPrice: 1.5,
2761
2790
  outputPrice: 6,
2762
2791
  cacheReadsPrice: 0,
2792
+ supportsTemperature: false,
2763
2793
  description: "Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks. Trained with reinforcement learning to generate human-style code, adhere to instructions, and iteratively run tests."
2764
2794
  }
2765
2795
  };
@@ -3876,7 +3906,12 @@ var openRouterSchema = baseProviderSettingsSchema.extend({
3876
3906
  openRouterModelId: import_zod8.z.string().optional(),
3877
3907
  openRouterBaseUrl: import_zod8.z.string().optional(),
3878
3908
  openRouterSpecificProvider: import_zod8.z.string().optional(),
3879
- openRouterUseMiddleOutTransform: import_zod8.z.boolean().optional()
3909
+ openRouterUseMiddleOutTransform: import_zod8.z.boolean().optional(),
3910
+ // Image generation settings (experimental)
3911
+ openRouterImageGenerationSettings: import_zod8.z.object({
3912
+ openRouterApiKey: import_zod8.z.string().optional(),
3913
+ selectedModel: import_zod8.z.string().optional()
3914
+ }).optional()
3880
3915
  });
3881
3916
  var bedrockSchema = apiModelIdProviderModelSchema.extend({
3882
3917
  awsAccessKey: import_zod8.z.string().optional(),