pdd-cli 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

Files changed (95) hide show
  1. pdd/__init__.py +0 -0
  2. pdd/auto_deps_main.py +98 -0
  3. pdd/auto_include.py +175 -0
  4. pdd/auto_update.py +73 -0
  5. pdd/bug_main.py +99 -0
  6. pdd/bug_to_unit_test.py +159 -0
  7. pdd/change.py +141 -0
  8. pdd/change_main.py +240 -0
  9. pdd/cli.py +607 -0
  10. pdd/cmd_test_main.py +155 -0
  11. pdd/code_generator.py +117 -0
  12. pdd/code_generator_main.py +66 -0
  13. pdd/comment_line.py +35 -0
  14. pdd/conflicts_in_prompts.py +143 -0
  15. pdd/conflicts_main.py +90 -0
  16. pdd/construct_paths.py +251 -0
  17. pdd/context_generator.py +133 -0
  18. pdd/context_generator_main.py +73 -0
  19. pdd/continue_generation.py +140 -0
  20. pdd/crash_main.py +127 -0
  21. pdd/data/language_format.csv +61 -0
  22. pdd/data/llm_model.csv +15 -0
  23. pdd/detect_change.py +142 -0
  24. pdd/detect_change_main.py +100 -0
  25. pdd/find_section.py +28 -0
  26. pdd/fix_code_loop.py +212 -0
  27. pdd/fix_code_module_errors.py +143 -0
  28. pdd/fix_error_loop.py +216 -0
  29. pdd/fix_errors_from_unit_tests.py +240 -0
  30. pdd/fix_main.py +138 -0
  31. pdd/generate_output_paths.py +194 -0
  32. pdd/generate_test.py +140 -0
  33. pdd/get_comment.py +55 -0
  34. pdd/get_extension.py +52 -0
  35. pdd/get_language.py +41 -0
  36. pdd/git_update.py +84 -0
  37. pdd/increase_tests.py +93 -0
  38. pdd/insert_includes.py +150 -0
  39. pdd/llm_invoke.py +304 -0
  40. pdd/load_prompt_template.py +59 -0
  41. pdd/pdd_completion.fish +72 -0
  42. pdd/pdd_completion.sh +141 -0
  43. pdd/pdd_completion.zsh +418 -0
  44. pdd/postprocess.py +121 -0
  45. pdd/postprocess_0.py +52 -0
  46. pdd/preprocess.py +199 -0
  47. pdd/preprocess_main.py +72 -0
  48. pdd/process_csv_change.py +182 -0
  49. pdd/prompts/auto_include_LLM.prompt +230 -0
  50. pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
  51. pdd/prompts/change_LLM.prompt +34 -0
  52. pdd/prompts/conflict_LLM.prompt +23 -0
  53. pdd/prompts/continue_generation_LLM.prompt +3 -0
  54. pdd/prompts/detect_change_LLM.prompt +65 -0
  55. pdd/prompts/example_generator_LLM.prompt +10 -0
  56. pdd/prompts/extract_auto_include_LLM.prompt +6 -0
  57. pdd/prompts/extract_code_LLM.prompt +22 -0
  58. pdd/prompts/extract_conflict_LLM.prompt +19 -0
  59. pdd/prompts/extract_detect_change_LLM.prompt +19 -0
  60. pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
  61. pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
  62. pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
  63. pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
  64. pdd/prompts/extract_promptline_LLM.prompt +11 -0
  65. pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
  66. pdd/prompts/extract_xml_LLM.prompt +7 -0
  67. pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
  68. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
  69. pdd/prompts/generate_test_LLM.prompt +12 -0
  70. pdd/prompts/increase_tests_LLM.prompt +16 -0
  71. pdd/prompts/insert_includes_LLM.prompt +30 -0
  72. pdd/prompts/split_LLM.prompt +94 -0
  73. pdd/prompts/summarize_file_LLM.prompt +11 -0
  74. pdd/prompts/trace_LLM.prompt +30 -0
  75. pdd/prompts/trim_results_LLM.prompt +83 -0
  76. pdd/prompts/trim_results_start_LLM.prompt +45 -0
  77. pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
  78. pdd/prompts/update_prompt_LLM.prompt +19 -0
  79. pdd/prompts/xml_convertor_LLM.prompt +54 -0
  80. pdd/split.py +119 -0
  81. pdd/split_main.py +103 -0
  82. pdd/summarize_directory.py +212 -0
  83. pdd/trace.py +135 -0
  84. pdd/trace_main.py +108 -0
  85. pdd/track_cost.py +102 -0
  86. pdd/unfinished_prompt.py +114 -0
  87. pdd/update_main.py +96 -0
  88. pdd/update_prompt.py +115 -0
  89. pdd/xml_tagger.py +122 -0
  90. pdd_cli-0.0.2.dist-info/LICENSE +7 -0
  91. pdd_cli-0.0.2.dist-info/METADATA +225 -0
  92. pdd_cli-0.0.2.dist-info/RECORD +95 -0
  93. pdd_cli-0.0.2.dist-info/WHEEL +5 -0
  94. pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
  95. pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
pdd/__init__.py ADDED
File without changes
pdd/auto_deps_main.py ADDED
@@ -0,0 +1,98 @@
1
+ import sys
2
+ from pathlib import Path
3
+ from typing import Tuple, Optional
4
+ import click
5
+ from rich import print as rprint
6
+
7
+ from .construct_paths import construct_paths
8
+ from .insert_includes import insert_includes
9
+
10
+ def auto_deps_main(
11
+ ctx: click.Context,
12
+ prompt_file: str,
13
+ directory_path: str,
14
+ auto_deps_csv_path: Optional[str],
15
+ output: Optional[str],
16
+ force_scan: Optional[bool]
17
+ ) -> Tuple[str, float, str]:
18
+ """
19
+ Main function to analyze and insert dependencies into a prompt file.
20
+
21
+ Args:
22
+ ctx: Click context containing command-line parameters.
23
+ prompt_file: Path to the input prompt file.
24
+ directory_path: Path to directory containing potential dependency files.
25
+ auto_deps_csv_path: Path to CSV file containing auto-dependency information.
26
+ output: Optional path to save the modified prompt file.
27
+ force_scan: Flag to force rescan of directory by deleting CSV file.
28
+
29
+ Returns:
30
+ Tuple containing:
31
+ - str: Modified prompt with auto-dependencies added
32
+ - float: Total cost of the operation
33
+ - str: Name of the model used
34
+ """
35
+ try:
36
+ # Construct file paths
37
+ input_file_paths = {
38
+ "prompt_file": prompt_file
39
+ }
40
+ command_options = {
41
+ "output": output,
42
+ "csv": auto_deps_csv_path
43
+ }
44
+
45
+ input_strings, output_file_paths, _ = construct_paths(
46
+ input_file_paths=input_file_paths,
47
+ force=ctx.obj.get('force', False),
48
+ quiet=ctx.obj.get('quiet', False),
49
+ command="auto-deps",
50
+ command_options=command_options
51
+ )
52
+
53
+ # Get the CSV file path
54
+ csv_path = output_file_paths.get("csv", "project_dependencies.csv")
55
+
56
+ # Handle force_scan option
57
+ if force_scan and Path(csv_path).exists():
58
+ if not ctx.obj.get('quiet', False):
59
+ rprint(f"[yellow]Removing existing CSV file due to --force-scan option: {csv_path}[/yellow]")
60
+ Path(csv_path).unlink()
61
+
62
+ # Get strength and temperature from context
63
+ strength = ctx.obj.get('strength', 0.9)
64
+ temperature = ctx.obj.get('temperature', 0)
65
+
66
+ # Call insert_includes with the prompt content and directory path
67
+ modified_prompt, csv_output, total_cost, model_name = insert_includes(
68
+ input_prompt=input_strings["prompt_file"],
69
+ directory_path=directory_path,
70
+ csv_filename=csv_path,
71
+ strength=strength,
72
+ temperature=temperature,
73
+ verbose=not ctx.obj.get('quiet', False)
74
+ )
75
+
76
+ # Save the modified prompt to the output file
77
+ output_path = output_file_paths["output"]
78
+ Path(output_path).write_text(modified_prompt)
79
+
80
+ # Save the CSV output if it was generated
81
+ if csv_output:
82
+ Path(csv_path).write_text(csv_output)
83
+
84
+ # Provide user feedback
85
+ if not ctx.obj.get('quiet', False):
86
+ rprint("[bold green]Successfully analyzed and inserted dependencies![/bold green]")
87
+ rprint(f"[bold]Model used:[/bold] {model_name}")
88
+ rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
89
+ rprint(f"[bold]Modified prompt saved to:[/bold] {output_path}")
90
+ rprint(f"[bold]Dependency information saved to:[/bold] {csv_path}")
91
+
92
+ return modified_prompt, total_cost, model_name
93
+
94
+ except Exception as e:
95
+ if not ctx.obj.get('quiet', False):
96
+ rprint(f"[bold red]Error:[/bold red] {str(e)}")
97
+ sys.exit(1)
98
+ # Removed the "raise" line so that we only exit, satisfying the test.
pdd/auto_include.py ADDED
@@ -0,0 +1,175 @@
1
+ from typing import Tuple, Optional
2
+ from pydantic import BaseModel, Field
3
+ from rich import print
4
+ from rich.console import Console
5
+ from rich.panel import Panel
6
+ from .load_prompt_template import load_prompt_template
7
+ from .llm_invoke import llm_invoke
8
+ from .summarize_directory import summarize_directory
9
+ import pandas as pd
10
+ from io import StringIO
11
+
12
+ console = Console()
13
+
14
+ class AutoIncludeOutput(BaseModel):
15
+ string_of_includes: str = Field(description="The string of includes to be added to the prompt")
16
+
17
+ def auto_include(
18
+ input_prompt: str,
19
+ directory_path: str,
20
+ csv_file: Optional[str] = None,
21
+ strength: float = 0.7,
22
+ temperature: float = 0.0,
23
+ verbose: bool = False
24
+ ) -> Tuple[str, str, float, str]:
25
+ """
26
+ Automatically find and insert proper dependencies into the prompt.
27
+
28
+ Args:
29
+ input_prompt (str): The prompt requiring includes
30
+ directory_path (str): Directory path of dependencies
31
+ csv_file (Optional[str]): Contents of existing CSV file
32
+ strength (float): Strength of LLM model (0-1)
33
+ temperature (float): Temperature of LLM model (0-1)
34
+ verbose (bool): Whether to print detailed information
35
+
36
+ Returns:
37
+ Tuple[str, str, float, str]: (dependencies, csv_output, total_cost, model_name)
38
+ """
39
+ try:
40
+ # Input validation
41
+ if not input_prompt:
42
+ raise ValueError("Input prompt cannot be empty")
43
+ if not directory_path:
44
+ raise ValueError("Invalid 'directory_path'.")
45
+ if not 0 <= strength <= 1:
46
+ raise ValueError("Strength must be between 0 and 1")
47
+ if not 0 <= temperature <= 1:
48
+ raise ValueError("Temperature must be between 0 and 1")
49
+
50
+ total_cost = 0.0
51
+ model_name = ""
52
+
53
+ if verbose:
54
+ console.print(Panel("Step 1: Loading prompt templates", style="blue"))
55
+
56
+ # Load prompt templates
57
+ auto_include_prompt = load_prompt_template("auto_include_LLM")
58
+ extract_prompt = load_prompt_template("extract_auto_include_LLM")
59
+
60
+ if not auto_include_prompt or not extract_prompt:
61
+ raise ValueError("Failed to load prompt templates")
62
+
63
+ if verbose:
64
+ console.print(Panel("Step 2: Running summarize_directory", style="blue"))
65
+
66
+ # Run summarize_directory
67
+ csv_output, summary_cost, summary_model = summarize_directory(
68
+ directory_path=directory_path,
69
+ strength=strength,
70
+ temperature=temperature,
71
+ verbose=verbose,
72
+ csv_file=csv_file
73
+ )
74
+ total_cost += summary_cost
75
+ model_name = summary_model
76
+
77
+ # Parse CSV to get available includes
78
+ if not csv_output:
79
+ available_includes = []
80
+ else:
81
+ try:
82
+ df = pd.read_csv(StringIO(csv_output))
83
+ available_includes = df.apply(
84
+ lambda row: f"File: {row['full_path']}\nSummary: {row['file_summary']}",
85
+ axis=1
86
+ ).tolist()
87
+ except Exception as e:
88
+ console.print(f"[red]Error parsing CSV: {str(e)}[/red]")
89
+ available_includes = []
90
+
91
+ if verbose:
92
+ console.print(Panel("Step 3: Running auto_include_LLM prompt", style="blue"))
93
+
94
+ # Run auto_include_LLM prompt
95
+ auto_include_response = llm_invoke(
96
+ prompt=auto_include_prompt,
97
+ input_json={
98
+ "input_prompt": input_prompt,
99
+ "available_includes": "\n".join(available_includes)
100
+ },
101
+ strength=strength,
102
+ temperature=temperature,
103
+ verbose=verbose
104
+ )
105
+ total_cost += auto_include_response["cost"]
106
+ model_name = auto_include_response["model_name"]
107
+
108
+ if verbose:
109
+ console.print(Panel("Step 4: Running extract_auto_include_LLM prompt", style="blue"))
110
+
111
+ # Run extract_auto_include_LLM prompt
112
+ try:
113
+ extract_response = llm_invoke(
114
+ prompt=extract_prompt,
115
+ input_json={"llm_output": auto_include_response["result"]},
116
+ strength=strength,
117
+ temperature=temperature,
118
+ verbose=verbose,
119
+ output_pydantic=AutoIncludeOutput
120
+ )
121
+ total_cost += extract_response["cost"]
122
+ model_name = extract_response["model_name"]
123
+
124
+ if verbose:
125
+ console.print(Panel("Step 5: Extracting dependencies", style="blue"))
126
+
127
+ # Extract dependencies
128
+ dependencies = extract_response["result"].string_of_includes
129
+ except Exception as e:
130
+ console.print(f"[red]Error extracting dependencies: {str(e)}[/red]")
131
+ dependencies = ""
132
+
133
+ if verbose:
134
+ console.print(Panel(f"""
135
+ Results:
136
+ Dependencies: {dependencies}
137
+ CSV Output: {csv_output}
138
+ Total Cost: ${total_cost:.6f}
139
+ Model Used: {model_name}
140
+ """, style="green"))
141
+
142
+ return dependencies, csv_output, total_cost, model_name
143
+
144
+ except Exception as e:
145
+ console.print(f"[red]Error in auto_include: {str(e)}[/red]")
146
+ raise
147
+
148
+ def main():
149
+ """Example usage of auto_include function"""
150
+ try:
151
+ # Example inputs
152
+ input_prompt = "Write a function to process image data"
153
+ directory_path = "context/c*.py"
154
+ csv_file = """full_path,file_summary,date
155
+ context/image_utils.py,"Image processing utilities",2023-01-01T10:00:00"""
156
+
157
+ dependencies, csv_output, total_cost, model_name = auto_include(
158
+ input_prompt=input_prompt,
159
+ directory_path=directory_path,
160
+ csv_file=csv_file,
161
+ strength=0.7,
162
+ temperature=0.0,
163
+ verbose=True
164
+ )
165
+
166
+ console.print("\n[blue]Final Results:[/blue]")
167
+ console.print(f"Dependencies: {dependencies}")
168
+ console.print(f"Total Cost: ${total_cost:.6f}")
169
+ console.print(f"Model Used: {model_name}")
170
+
171
+ except Exception as e:
172
+ console.print(f"[red]Error in main: {str(e)}[/red]")
173
+
174
+ if __name__ == "__main__":
175
+ main()
pdd/auto_update.py ADDED
@@ -0,0 +1,73 @@
1
+ import pkg_resources
2
+ import requests
3
+ import semver
4
+ import subprocess
5
+ import sys
6
+
7
+ def auto_update(package_name: str = "pdd-cli", latest_version: str = None) -> None:
8
+ """
9
+ Check if there's a new version of the package available and prompt for upgrade.
10
+
11
+ Args:
12
+ latest_version (str): Known latest version (default: None)
13
+ package_name (str): Name of the package to check (default: "pdd")
14
+ """
15
+ try:
16
+ # Get current installed version
17
+ current_version = pkg_resources.get_distribution(package_name).version
18
+
19
+ # If latest_version is not provided, fetch from PyPI
20
+ if latest_version is None:
21
+ try:
22
+ pypi_url = f"https://pypi.org/pypi/{package_name}/json"
23
+ response = requests.get(pypi_url)
24
+ response.raise_for_status()
25
+ latest_version = response.json()['info']['version']
26
+ except Exception as e:
27
+ print(f"Failed to fetch latest version from PyPI: {str(e)}")
28
+ return
29
+
30
+ # Compare versions using semantic versioning
31
+ try:
32
+ current_semver = semver.VersionInfo.parse(current_version)
33
+ latest_semver = semver.VersionInfo.parse(latest_version)
34
+ except ValueError:
35
+ # If versions don't follow semantic versioning, fall back to string comparison
36
+ if current_version == latest_version:
37
+ return
38
+ else:
39
+ # If versions follow semantic versioning, compare properly
40
+ if current_semver >= latest_semver:
41
+ return
42
+
43
+ # If we get here, there's a new version available
44
+ print(f"\nNew version of {package_name} available: {latest_version} (current: {current_version})")
45
+
46
+ # Ask for user confirmation
47
+ while True:
48
+ response = input("Would you like to upgrade? [y/N]: ").lower().strip()
49
+ if response in ['y', 'yes']:
50
+ # Construct pip command
51
+ pip_command = f"{sys.executable} -m pip install --upgrade {package_name}"
52
+ print(f"\nUpgrading with command: {pip_command}")
53
+
54
+ try:
55
+ subprocess.check_call(pip_command.split())
56
+ print(f"\nSuccessfully upgraded {package_name} to version {latest_version}")
57
+ except subprocess.CalledProcessError as e:
58
+ print(f"\nFailed to upgrade: {str(e)}")
59
+ break
60
+ elif response in ['n', 'no', '']:
61
+ print("\nUpgrade cancelled")
62
+ break
63
+ else:
64
+ print("Please answer 'y' or 'n'")
65
+
66
+ except pkg_resources.DistributionNotFound:
67
+ print(f"Package {package_name} is not installed")
68
+ except Exception as e:
69
+ print(f"Error checking for updates: {str(e)}")
70
+
71
+
72
+ if __name__ == "__main__":
73
+ auto_update()
pdd/bug_main.py ADDED
@@ -0,0 +1,99 @@
1
+ import os
2
+ import sys
3
+ from typing import Tuple, Optional
4
+ import click
5
+ from rich import print as rprint
6
+
7
+ from .construct_paths import construct_paths
8
+ from .bug_to_unit_test import bug_to_unit_test
9
+
10
+ def bug_main(
11
+ ctx: click.Context,
12
+ prompt_file: str,
13
+ code_file: str,
14
+ program_file: str,
15
+ current_output: str,
16
+ desired_output: str,
17
+ output: Optional[str] = None,
18
+ language: Optional[str] = "Python"
19
+ ) -> Tuple[str, float, str]:
20
+ """
21
+ Main function to generate a unit test based on observed and desired outputs.
22
+
23
+ :param ctx: Click context containing command-line parameters.
24
+ :param prompt_file: Path to the prompt file that generated the code.
25
+ :param code_file: Path to the code file being tested.
26
+ :param program_file: Path to the program used to run the code under test.
27
+ :param current_output: Path to the file containing the current (incorrect) output.
28
+ :param desired_output: Path to the file containing the desired (correct) output.
29
+ :param output: Optional path to save the generated unit test.
30
+ :param language: Optional programming language for the unit test. Defaults to "Python".
31
+ :return: A tuple containing the generated unit test, total cost, and model name used.
32
+ """
33
+ try:
34
+ # Construct file paths
35
+ input_file_paths = {
36
+ "prompt_file": prompt_file,
37
+ "code_file": code_file,
38
+ "program_file": program_file,
39
+ "current_output": current_output,
40
+ "desired_output": desired_output
41
+ }
42
+ command_options = {
43
+ "output": output,
44
+ "language": language
45
+ }
46
+ input_strings, output_file_paths, _ = construct_paths(
47
+ input_file_paths=input_file_paths,
48
+ force=ctx.obj.get('force', False),
49
+ quiet=ctx.obj.get('quiet', False),
50
+ command="bug",
51
+ command_options=command_options
52
+ )
53
+
54
+ # Load input files
55
+ prompt_content = input_strings["prompt_file"]
56
+ code_content = input_strings["code_file"]
57
+ program_content = input_strings["program_file"]
58
+ current_output_content = input_strings["current_output"]
59
+ desired_output_content = input_strings["desired_output"]
60
+
61
+ # Generate unit test
62
+ strength = ctx.obj.get('strength', 0.9)
63
+ temperature = ctx.obj.get('temperature', 0)
64
+ unit_test, total_cost, model_name = bug_to_unit_test(
65
+ current_output_content,
66
+ desired_output_content,
67
+ prompt_content,
68
+ code_content,
69
+ program_content,
70
+ strength,
71
+ temperature,
72
+ language
73
+ )
74
+
75
+ # Save results if output path is provided
76
+ if output_file_paths.get("output"):
77
+ # Create directory if it doesn't exist
78
+ os.makedirs(os.path.dirname(output_file_paths["output"]), exist_ok=True)
79
+ with open(output_file_paths["output"], 'w') as f:
80
+ f.write(unit_test)
81
+
82
+ # Provide user feedback
83
+ if not ctx.obj.get('quiet', False):
84
+ rprint("[bold green]Unit test generated successfully.[/bold green]")
85
+ rprint(f"[bold]Model used:[/bold] {model_name}")
86
+ rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
87
+ if output:
88
+ rprint(f"[bold]Unit test saved to:[/bold] {output_file_paths['output']}")
89
+
90
+ # Always print unit test, even in quiet mode
91
+ rprint("[bold]Generated Unit Test:[/bold]")
92
+ rprint(unit_test)
93
+
94
+ return unit_test, total_cost, model_name
95
+
96
+ except Exception as e:
97
+ if not ctx.obj.get('quiet', False):
98
+ rprint(f"[bold red]Error:[/bold red] {str(e)}")
99
+ sys.exit(1)
@@ -0,0 +1,159 @@
1
+ from typing import Tuple, Optional
2
+ from rich import print
3
+ from rich.markdown import Markdown
4
+ from rich.console import Console
5
+ from .load_prompt_template import load_prompt_template
6
+ from .llm_invoke import llm_invoke
7
+ from .unfinished_prompt import unfinished_prompt
8
+ from .continue_generation import continue_generation
9
+ from .postprocess import postprocess
10
+ from .preprocess import preprocess
11
+
12
+ console = Console()
13
+
14
+ def bug_to_unit_test(
15
+ current_output: str,
16
+ desired_output: str,
17
+ prompt_used_to_generate_the_code: str,
18
+ code_under_test: str,
19
+ program_used_to_run_code_under_test: str,
20
+ strength: float = 0.89,
21
+ temperature: float = 0.0,
22
+ language: str = "python"
23
+ ) -> Tuple[str, float, str]:
24
+ """
25
+ Generate a unit test from a code file with bug information.
26
+
27
+ Args:
28
+ current_output (str): Current output of the code
29
+ desired_output (str): Desired output of the code
30
+ prompt_used_to_generate_the_code (str): Original prompt used to generate the code
31
+ code_under_test (str): Code to be tested
32
+ program_used_to_run_code_under_test (str): Program used to run the code
33
+ strength (float, optional): Strength of the LLM model. Must be between 0 and 1. Defaults to 0.89.
34
+ temperature (float, optional): Temperature of the LLM model. Defaults to 0.0.
35
+ language (str, optional): Programming language. Defaults to "python".
36
+
37
+ Returns:
38
+ Tuple[str, float, str]: Generated unit test, total cost, and model name
39
+
40
+ Raises:
41
+ ValueError: If strength is not between 0 and 1
42
+ """
43
+ # Validate strength parameter
44
+ if not 0 <= strength <= 1:
45
+ raise ValueError("Strength parameter must be between 0 and 1")
46
+
47
+ total_cost = 0.0
48
+ final_model_name = ""
49
+
50
+ try:
51
+ # Step 1: Load the prompt template
52
+ prompt_template = load_prompt_template("bug_to_unit_test_LLM")
53
+ if not prompt_template:
54
+ raise ValueError("Failed to load prompt template")
55
+
56
+ # Step 2: Prepare input and run through LLM
57
+ preprocessed_prompt = preprocess(prompt_used_to_generate_the_code)
58
+
59
+ input_json = {
60
+ "prompt_that_generated_code": preprocessed_prompt,
61
+ "current_output": current_output,
62
+ "desired_output": desired_output,
63
+ "code_under_test": code_under_test,
64
+ "program_used_to_run_code_under_test": program_used_to_run_code_under_test,
65
+ "language": language
66
+ }
67
+
68
+ console.print("[bold blue]Generating unit test...[/bold blue]")
69
+ response = llm_invoke(
70
+ prompt=prompt_template,
71
+ input_json=input_json,
72
+ strength=strength,
73
+ temperature=temperature,
74
+ verbose=True
75
+ )
76
+
77
+ total_cost += response['cost']
78
+ final_model_name = response['model_name']
79
+
80
+ # Step 3: Print markdown formatting
81
+ console.print(Markdown(response['result']))
82
+
83
+ # Step 4: Check if generation is complete
84
+ last_600_chars = response['result'][-600:] if len(response['result']) > 600 else response['result']
85
+
86
+ reasoning, is_finished, unfinished_cost, unfinished_model = unfinished_prompt(
87
+ prompt_text=last_600_chars,
88
+ strength=0.89,
89
+ temperature=temperature,
90
+ verbose=False
91
+ )
92
+
93
+ total_cost += unfinished_cost
94
+
95
+ if not is_finished:
96
+ console.print("[yellow]Generation incomplete. Continuing...[/yellow]")
97
+ continued_output, continued_cost, continued_model = continue_generation(
98
+ formatted_input_prompt=prompt_template,
99
+ llm_output=response['result'],
100
+ strength=strength,
101
+ temperature=temperature,
102
+ verbose=True
103
+ )
104
+ total_cost += continued_cost
105
+ final_model_name = continued_model
106
+ result = continued_output
107
+ else:
108
+ result = response['result']
109
+
110
+ # Post-process the result
111
+ final_code, postprocess_cost, postprocess_model = postprocess(
112
+ result,
113
+ language,
114
+ strength=0.89,
115
+ temperature=temperature,
116
+ verbose=True
117
+ )
118
+ total_cost += postprocess_cost
119
+
120
+ # Step 5: Print total cost
121
+ console.print(f"[bold green]Total Cost: ${total_cost:.6f}[/bold green]")
122
+
123
+ return final_code, total_cost, final_model_name
124
+
125
+ except Exception as e:
126
+ console.print(f"[bold red]Error: {str(e)}[/bold red]")
127
+ return "", 0.0, ""
128
+
129
+ def main():
130
+ """Example usage of the bug_to_unit_test function"""
131
+ try:
132
+ current_output = "3"
133
+ desired_output = "5"
134
+ prompt = "create a function that adds two numbers in python"
135
+ code = """
136
+ def add_numbers(a, b):
137
+ return a + 1
138
+ """
139
+ program = "python"
140
+
141
+ unit_test, cost, model = bug_to_unit_test(
142
+ current_output=current_output,
143
+ desired_output=desired_output,
144
+ prompt_used_to_generate_the_code=prompt,
145
+ code_under_test=code,
146
+ program_used_to_run_code_under_test=program
147
+ )
148
+
149
+ if unit_test:
150
+ console.print("[bold green]Generated Unit Test:[/bold green]")
151
+ console.print(unit_test)
152
+ console.print(f"[bold blue]Total Cost: ${cost:.6f}[/bold blue]")
153
+ console.print(f"[bold blue]Model Used: {model}[/bold blue]")
154
+
155
+ except Exception as e:
156
+ console.print(f"[bold red]Error in main: {str(e)}[/bold red]")
157
+
158
+ if __name__ == "__main__":
159
+ main()