pdd-cli 0.0.20__py3-none-any.whl → 0.0.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/cli.py +1 -1
- pdd/context_generator.py +1 -1
- pdd/data/llm_model.csv +1 -1
- pdd/edit_file.py +783 -0
- pdd/fix_error_loop.py +218 -66
- pdd/fix_errors_from_unit_tests.py +366 -206
- pdd/fix_main.py +25 -6
- pdd/increase_tests.py +6 -3
- pdd/mcp_config.json +7 -0
- pdd/preprocess.py +0 -26
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +2 -2
- pdd/prompts/generate_test_LLM.prompt +11 -4
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/METADATA +5 -4
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/RECORD +18 -18
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/WHEEL +1 -1
- pdd/preprocess copy.py +0 -234
- pdd/preprocess_copy_bahrat.py +0 -287
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.20.dist-info → pdd_cli-0.0.21.dist-info}/top_level.txt +0 -0
|
@@ -1,246 +1,406 @@
|
|
|
1
1
|
import os
|
|
2
|
-
import
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
import asyncio
|
|
5
|
+
import tempfile
|
|
3
6
|
from datetime import datetime
|
|
4
|
-
from typing import Tuple, Optional
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
from rich.markdown import Markdown
|
|
7
|
+
from typing import Dict, Tuple, Any, Optional, List, Union
|
|
8
|
+
import psutil # Add psutil import for process management
|
|
9
|
+
|
|
8
10
|
from rich.console import Console
|
|
11
|
+
from rich.markdown import Markdown
|
|
9
12
|
from rich.panel import Panel
|
|
10
|
-
from
|
|
13
|
+
from rich.text import Text
|
|
11
14
|
|
|
12
|
-
from .preprocess import preprocess
|
|
13
15
|
from .load_prompt_template import load_prompt_template
|
|
14
16
|
from .llm_invoke import llm_invoke
|
|
17
|
+
from .preprocess import preprocess
|
|
18
|
+
from .edit_file import edit_file, run_edit_in_subprocess
|
|
19
|
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
15
20
|
|
|
16
21
|
console = Console()
|
|
17
22
|
|
|
18
|
-
|
|
19
|
-
update_unit_test: bool = Field(description="Whether the unit test needs to be updated")
|
|
20
|
-
update_code: bool = Field(description="Whether the code needs to be updated")
|
|
21
|
-
fixed_unit_test: str = Field(description="The fixed unit test code")
|
|
22
|
-
fixed_code: str = Field(description="The fixed code under test")
|
|
23
|
-
|
|
24
|
-
def validate_inputs(strength: float, temperature: float) -> None:
|
|
25
|
-
"""Validate strength and temperature parameters."""
|
|
26
|
-
if not 0 <= strength <= 1:
|
|
27
|
-
raise ValueError("Strength must be between 0 and 1")
|
|
28
|
-
if not 0 <= temperature <= 1:
|
|
29
|
-
raise ValueError("Temperature must be between 0 and 1")
|
|
30
|
-
|
|
31
|
-
def write_to_error_file(file_path: str, content: str) -> None:
|
|
32
|
-
"""Write content to error file with timestamp and separator."""
|
|
33
|
-
try:
|
|
34
|
-
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
35
|
-
separator = f"\n{'='*80}\n{timestamp}\n{'='*80}\n"
|
|
36
|
-
|
|
37
|
-
# Ensure parent directory exists
|
|
38
|
-
parent_dir = os.path.dirname(file_path)
|
|
39
|
-
use_fallback = False
|
|
40
|
-
|
|
41
|
-
if parent_dir:
|
|
42
|
-
try:
|
|
43
|
-
os.makedirs(parent_dir, exist_ok=True)
|
|
44
|
-
except Exception as e:
|
|
45
|
-
console.print(f"[yellow]Warning: Could not create directory {parent_dir}: {str(e)}[/yellow]")
|
|
46
|
-
# Fallback to system temp directory
|
|
47
|
-
use_fallback = True
|
|
48
|
-
parent_dir = None
|
|
49
|
-
|
|
50
|
-
# Use atomic write with temporary file
|
|
51
|
-
try:
|
|
52
|
-
# First read existing content if file exists
|
|
53
|
-
existing_content = ""
|
|
54
|
-
if os.path.exists(file_path):
|
|
55
|
-
try:
|
|
56
|
-
with open(file_path, 'r') as f:
|
|
57
|
-
existing_content = f.read()
|
|
58
|
-
except Exception as e:
|
|
59
|
-
console.print(f"[yellow]Warning: Could not read existing file {file_path}: {str(e)}[/yellow]")
|
|
60
|
-
|
|
61
|
-
# Write both existing and new content to temp file
|
|
62
|
-
with NamedTemporaryFile(mode='w', dir=parent_dir, delete=False) as tmp_file:
|
|
63
|
-
if existing_content:
|
|
64
|
-
tmp_file.write(existing_content)
|
|
65
|
-
tmp_file.write(f"{separator}{content}\n")
|
|
66
|
-
tmp_path = tmp_file.name
|
|
67
|
-
|
|
68
|
-
# Only attempt atomic move if not using fallback
|
|
69
|
-
if not use_fallback:
|
|
70
|
-
try:
|
|
71
|
-
os.replace(tmp_path, file_path)
|
|
72
|
-
except Exception as e:
|
|
73
|
-
console.print(f"[yellow]Warning: Could not move file to {file_path}: {str(e)}[/yellow]")
|
|
74
|
-
use_fallback = True
|
|
75
|
-
|
|
76
|
-
if use_fallback:
|
|
77
|
-
# Write to fallback location in system temp directory
|
|
78
|
-
fallback_path = os.path.join(tempfile.gettempdir(), os.path.basename(file_path))
|
|
79
|
-
try:
|
|
80
|
-
os.replace(tmp_path, fallback_path)
|
|
81
|
-
console.print(f"[yellow]Warning: Using fallback location: {fallback_path}[/yellow]")
|
|
82
|
-
except Exception as e:
|
|
83
|
-
console.print(f"[red]Error writing to fallback location: {str(e)}[/red]")
|
|
84
|
-
try:
|
|
85
|
-
os.unlink(tmp_path)
|
|
86
|
-
except:
|
|
87
|
-
pass
|
|
88
|
-
raise
|
|
89
|
-
except Exception as e:
|
|
90
|
-
console.print(f"[red]Error writing to error file: {str(e)}[/red]")
|
|
91
|
-
try:
|
|
92
|
-
os.unlink(tmp_path)
|
|
93
|
-
except:
|
|
94
|
-
pass
|
|
95
|
-
raise
|
|
96
|
-
except Exception as e:
|
|
97
|
-
console.print(f"[red]Error in write_to_error_file: {str(e)}[/red]")
|
|
98
|
-
raise
|
|
99
|
-
|
|
100
|
-
def fix_errors_from_unit_tests(
|
|
23
|
+
async def _fix_errors_from_unit_tests_async(
|
|
101
24
|
unit_test: str,
|
|
102
25
|
code: str,
|
|
103
26
|
prompt: str,
|
|
104
27
|
error: str,
|
|
105
28
|
error_file: str,
|
|
106
29
|
strength: float,
|
|
107
|
-
temperature: float,
|
|
30
|
+
temperature: float = 0.0,
|
|
108
31
|
verbose: bool = False
|
|
109
|
-
) -> Tuple[bool, bool, str, str, float, str]:
|
|
32
|
+
) -> Tuple[bool, bool, str, str, str, float, str]:
|
|
110
33
|
"""
|
|
111
|
-
Fix
|
|
112
|
-
|
|
34
|
+
Fix unit test errors and warnings in code files.
|
|
35
|
+
|
|
113
36
|
Args:
|
|
114
|
-
unit_test
|
|
115
|
-
code
|
|
116
|
-
prompt
|
|
117
|
-
error
|
|
118
|
-
error_file
|
|
119
|
-
strength
|
|
120
|
-
temperature
|
|
121
|
-
verbose
|
|
122
|
-
|
|
37
|
+
unit_test: The unit test code as a string
|
|
38
|
+
code: The code under test as a string
|
|
39
|
+
prompt: The prompt that generated the code under test
|
|
40
|
+
error: Errors and warnings that need to be fixed
|
|
41
|
+
error_file: Path to the file where error logs will be appended
|
|
42
|
+
strength: Strength of the LLM model to use (0-1)
|
|
43
|
+
temperature: Temperature for LLM output (0-1)
|
|
44
|
+
verbose: Whether to print detailed information
|
|
45
|
+
|
|
123
46
|
Returns:
|
|
124
|
-
Tuple containing
|
|
47
|
+
Tuple containing:
|
|
48
|
+
- update_unit_test: Boolean indicating if unit test was updated
|
|
49
|
+
- update_code: Boolean indicating if code was updated
|
|
50
|
+
- fixed_unit_test: The fixed unit test code
|
|
51
|
+
- fixed_code: The fixed code under test
|
|
52
|
+
- analysis_results: The raw output of the LLM analysis
|
|
53
|
+
- total_cost: Total cost of LLM invocations
|
|
54
|
+
- model_name: Name of the LLM model used
|
|
125
55
|
"""
|
|
126
|
-
#
|
|
127
|
-
if not all([unit_test, code, prompt, error, error_file]):
|
|
128
|
-
raise ValueError("All input parameters must be non-empty")
|
|
129
|
-
|
|
130
|
-
validate_inputs(strength, temperature)
|
|
131
|
-
|
|
56
|
+
# Initialize variables to track costs and model
|
|
132
57
|
total_cost = 0.0
|
|
133
58
|
model_name = ""
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
59
|
+
|
|
60
|
+
# Step 1: Load the prompt template
|
|
61
|
+
if verbose:
|
|
62
|
+
console.print("[bold blue]Step 1: Loading prompt template...[/bold blue]")
|
|
63
|
+
|
|
64
|
+
console.print("[bold yellow]DEBUG: About to load prompt template[/bold yellow]")
|
|
65
|
+
prompt_template = load_prompt_template("fix_errors_from_unit_tests_LLM")
|
|
66
|
+
console.print(f"[bold yellow]DEBUG: Prompt template loaded: {'Success' if prompt_template else 'Failed'}[/bold yellow]")
|
|
67
|
+
|
|
68
|
+
if not prompt_template:
|
|
69
|
+
error_msg = "Failed to load prompt template 'fix_errors_from_unit_tests_LLM'"
|
|
70
|
+
if verbose:
|
|
71
|
+
console.print(f"[bold red]{error_msg}[/bold red]")
|
|
72
|
+
raise ValueError(error_msg)
|
|
139
73
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
74
|
+
if verbose:
|
|
75
|
+
console.print("[bold green]Prompt template loaded successfully[/bold green]")
|
|
76
|
+
|
|
77
|
+
# Step 2: Read contents of error_file and parse any previous fix attempts
|
|
78
|
+
if verbose:
|
|
79
|
+
console.print("[bold blue]Step 2: Reading error file for previous fixes...[/bold blue]")
|
|
80
|
+
|
|
81
|
+
prior_fixes = ""
|
|
82
|
+
try:
|
|
83
|
+
if os.path.exists(error_file):
|
|
84
|
+
console.print("[bold yellow]DEBUG: Reading error file[/bold yellow]")
|
|
85
|
+
with open(error_file, 'r') as f:
|
|
86
|
+
prior_fixes = f.read()
|
|
87
|
+
|
|
150
88
|
if verbose:
|
|
151
|
-
console.print(f"[
|
|
152
|
-
|
|
153
|
-
|
|
89
|
+
console.print(f"[bold green]Found existing error file: {error_file}[/bold green]")
|
|
90
|
+
else:
|
|
91
|
+
if verbose:
|
|
92
|
+
console.print(f"[bold yellow]Creating new error file: {error_file}[/bold yellow]")
|
|
93
|
+
|
|
94
|
+
# Ensure directory exists
|
|
95
|
+
os.makedirs(os.path.dirname(os.path.abspath(error_file)), exist_ok=True)
|
|
96
|
+
except Exception as e:
|
|
97
|
+
if verbose:
|
|
98
|
+
console.print(f"[bold red]Error reading error file: {str(e)}[/bold red]")
|
|
99
|
+
prior_fixes = f"Error reading prior fixes: {str(e)}"
|
|
100
|
+
|
|
101
|
+
# Step 3: Run the LLM analysis prompt through llm_invoke
|
|
102
|
+
if verbose:
|
|
103
|
+
console.print("[bold blue]Step 3: Running LLM analysis...[/bold blue]")
|
|
104
|
+
|
|
105
|
+
# Preprocess the prompt
|
|
106
|
+
try:
|
|
107
|
+
console.print("[bold yellow]DEBUG: Preprocessing prompt[/bold yellow]")
|
|
154
108
|
processed_prompt = preprocess(
|
|
155
109
|
prompt,
|
|
156
110
|
recursive=False,
|
|
157
|
-
double_curly_brackets=True
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
processed_fix_errors_prompt = preprocess(
|
|
161
|
-
fix_errors_prompt,
|
|
162
|
-
recursive=False,
|
|
163
|
-
double_curly_brackets=True,
|
|
164
|
-
exclude_keys=['unit_test', 'code', 'errors', 'prompt']
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
if verbose:
|
|
168
|
-
console.print(Panel("[bold green]Running fix_errors_from_unit_tests...[/bold green]"))
|
|
169
|
-
|
|
170
|
-
response1 = llm_invoke(
|
|
171
|
-
prompt=processed_fix_errors_prompt,
|
|
172
|
-
input_json={
|
|
173
|
-
"unit_test": unit_test,
|
|
174
|
-
"code": code,
|
|
175
|
-
"prompt": processed_prompt,
|
|
176
|
-
"errors": error
|
|
177
|
-
},
|
|
178
|
-
strength=strength,
|
|
179
|
-
temperature=temperature,
|
|
180
|
-
verbose=verbose
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
total_cost += response1['cost']
|
|
184
|
-
model_name = response1['model_name']
|
|
185
|
-
result1 = response1['result']
|
|
186
|
-
|
|
187
|
-
# Step 4: Pretty print results and log to error file
|
|
188
|
-
if verbose:
|
|
189
|
-
console.print(Markdown(result1))
|
|
190
|
-
console.print(f"Cost of first run: ${response1['cost']:.6f}")
|
|
191
|
-
|
|
192
|
-
write_to_error_file(error_file, f"Model: {model_name}\nResult:\n{result1}")
|
|
193
|
-
|
|
194
|
-
# Step 5: Preprocess extract_fix prompt
|
|
195
|
-
processed_extract_prompt = preprocess(
|
|
196
|
-
extract_fix_prompt,
|
|
197
|
-
recursive=False,
|
|
198
111
|
double_curly_brackets=True,
|
|
199
112
|
exclude_keys=['unit_test', 'code', 'unit_test_fix']
|
|
200
113
|
)
|
|
201
|
-
|
|
202
|
-
|
|
114
|
+
console.print("[bold yellow]DEBUG: Prompt preprocessed successfully[/bold yellow]")
|
|
115
|
+
except Exception as e:
|
|
116
|
+
processed_prompt = prompt
|
|
203
117
|
if verbose:
|
|
204
|
-
console.print(
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
118
|
+
console.print(f"[bold yellow]Error during prompt preprocessing, using original prompt: {str(e)}[/bold yellow]")
|
|
119
|
+
|
|
120
|
+
# Prepare input for LLM
|
|
121
|
+
llm_input = {
|
|
122
|
+
'unit_test': unit_test,
|
|
123
|
+
'code': code,
|
|
124
|
+
'prompt': processed_prompt,
|
|
125
|
+
'errors': error,
|
|
126
|
+
'prior_fixes': prior_fixes
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
# Log to console if verbose
|
|
130
|
+
if verbose:
|
|
131
|
+
console.print(Panel(
|
|
132
|
+
Text("Running LLM analysis", style="bold white"),
|
|
133
|
+
subtitle=f"Strength: {strength}, Temperature: {temperature}"
|
|
134
|
+
))
|
|
135
|
+
console.print(f"Input tokens: {len(unit_test.split()) + len(code.split()) + len(processed_prompt.split()) + len(error.split())}")
|
|
136
|
+
|
|
137
|
+
# Run the LLM analysis
|
|
138
|
+
try:
|
|
139
|
+
console.print("[bold yellow]DEBUG: About to invoke LLM[/bold yellow]")
|
|
140
|
+
llm_response = llm_invoke(
|
|
141
|
+
prompt=prompt_template,
|
|
142
|
+
input_json=llm_input,
|
|
143
|
+
strength=strength,
|
|
214
144
|
temperature=temperature,
|
|
215
|
-
output_pydantic=CodeFix,
|
|
216
145
|
verbose=verbose
|
|
217
146
|
)
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
147
|
+
console.print("[bold yellow]DEBUG: LLM invocation completed[/bold yellow]")
|
|
148
|
+
|
|
149
|
+
# Update tracking variables
|
|
150
|
+
total_cost += llm_response['cost']
|
|
151
|
+
model_name = llm_response['model_name']
|
|
152
|
+
|
|
153
|
+
# Extract response
|
|
154
|
+
analysis_results = llm_response['result']
|
|
155
|
+
|
|
156
|
+
# Display response if verbose
|
|
222
157
|
if verbose:
|
|
223
|
-
console.print(
|
|
224
|
-
console.print(
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
result2.fixed_code,
|
|
231
|
-
total_cost,
|
|
232
|
-
model_name
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
except ValidationError as e:
|
|
236
|
-
error_msg = f"Validation error in fix_errors_from_unit_tests: {str(e)}"
|
|
158
|
+
console.print("\n[bold green]LLM Analysis Complete[/bold green]")
|
|
159
|
+
console.print(Markdown(analysis_results))
|
|
160
|
+
console.print(f"[bold]Output tokens: {llm_response.get('output_tokens', 'unknown')}[/bold]")
|
|
161
|
+
console.print(f"[bold]Cost: ${llm_response['cost']:.6f}[/bold]")
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
error_msg = f"Error during LLM analysis: {str(e)}"
|
|
237
165
|
if verbose:
|
|
238
166
|
console.print(f"[bold red]{error_msg}[/bold red]")
|
|
239
|
-
|
|
240
|
-
|
|
167
|
+
|
|
168
|
+
# Log the error to the error file
|
|
169
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
170
|
+
error_log = f"\n\n{'='*50}\nERROR LOG - {timestamp}\n{'='*50}\n{error_msg}\n"
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
with open(error_file, 'a') as f:
|
|
174
|
+
f.write(error_log)
|
|
175
|
+
except Exception as file_err:
|
|
176
|
+
if verbose:
|
|
177
|
+
console.print(f"[bold red]Failed to write to error file: {str(file_err)}[/bold red]")
|
|
178
|
+
|
|
179
|
+
# Return default values
|
|
180
|
+
return False, False, unit_test, code, "", total_cost, model_name
|
|
181
|
+
|
|
182
|
+
# Extract corrected code sections using regex
|
|
183
|
+
if verbose:
|
|
184
|
+
console.print("[bold blue]Step 3d: Extracting code sections...[/bold blue]")
|
|
185
|
+
|
|
186
|
+
# Extract sections using regex
|
|
187
|
+
corrected_code_match = re.search(r'<corrected_code_under_test>(.*?)</corrected_code_under_test>', analysis_results, re.DOTALL)
|
|
188
|
+
corrected_unit_test_match = re.search(r'<corrected_unit_test>(.*?)</corrected_unit_test>', analysis_results, re.DOTALL)
|
|
189
|
+
|
|
190
|
+
# Extract corrected code sections from the regex matches
|
|
191
|
+
corrected_code_text = ""
|
|
192
|
+
corrected_unit_test_text = ""
|
|
193
|
+
|
|
194
|
+
if corrected_code_match:
|
|
195
|
+
corrected_code_text = corrected_code_match.group(1).strip()
|
|
196
|
+
|
|
197
|
+
if corrected_unit_test_match:
|
|
198
|
+
corrected_unit_test_text = corrected_unit_test_match.group(1).strip()
|
|
199
|
+
|
|
200
|
+
if verbose:
|
|
201
|
+
console.print(f"[bold yellow]Extracted code text: {bool(corrected_code_text)}[/bold yellow]")
|
|
202
|
+
console.print(f"[bold yellow]Extracted test text: {bool(corrected_unit_test_text)}[/bold yellow]")
|
|
203
|
+
|
|
204
|
+
# Step 3c: Append the output to error_file
|
|
205
|
+
if verbose:
|
|
206
|
+
console.print("[bold blue]Step 3c: Logging analysis results...[/bold blue]")
|
|
207
|
+
|
|
208
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
209
|
+
log_entry = f"\n\n{'='*50}\nANALYSIS LOG - {timestamp}\n{'='*50}\n{analysis_results}\n"
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
with open(error_file, 'a') as f:
|
|
213
|
+
f.write(log_entry)
|
|
214
|
+
|
|
215
|
+
if verbose:
|
|
216
|
+
console.print(f"[bold green]Analysis logged to {error_file}[/bold green]")
|
|
241
217
|
except Exception as e:
|
|
242
|
-
error_msg = f"Error in fix_errors_from_unit_tests: {str(e)}"
|
|
243
218
|
if verbose:
|
|
244
|
-
console.print(f"[bold red]{
|
|
245
|
-
|
|
246
|
-
|
|
219
|
+
console.print(f"[bold red]Failed to write to error file: {str(e)}[/bold red]")
|
|
220
|
+
|
|
221
|
+
# Step 4: Pretty print the analysis results if verbose
|
|
222
|
+
if verbose:
|
|
223
|
+
console.print("[bold blue]Step 4: Displaying analysis results...[/bold blue]")
|
|
224
|
+
console.print(Panel(
|
|
225
|
+
Markdown(analysis_results),
|
|
226
|
+
title="Analysis Results",
|
|
227
|
+
expand=False
|
|
228
|
+
))
|
|
229
|
+
|
|
230
|
+
# Initialize variables for return values
|
|
231
|
+
update_unit_test = False
|
|
232
|
+
update_code = False
|
|
233
|
+
fixed_unit_test = unit_test
|
|
234
|
+
fixed_code = code
|
|
235
|
+
|
|
236
|
+
# Step 5: Use edit_file to apply the fixes
|
|
237
|
+
if verbose:
|
|
238
|
+
console.print("[bold blue]Step 5: Applying fixes...[/bold blue]")
|
|
239
|
+
|
|
240
|
+
# Step 5a: Apply unit test fixes if available
|
|
241
|
+
if corrected_unit_test_text:
|
|
242
|
+
try:
|
|
243
|
+
# Create a temporary file for the unit test
|
|
244
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file:
|
|
245
|
+
temp_test_file = temp_file.name
|
|
246
|
+
temp_file.write(unit_test)
|
|
247
|
+
|
|
248
|
+
if verbose:
|
|
249
|
+
console.print(f"[bold]Applying unit test fixes...[/bold]")
|
|
250
|
+
|
|
251
|
+
# Apply fixes using run_edit_in_subprocess for process isolation
|
|
252
|
+
test_success, test_error = run_edit_in_subprocess(
|
|
253
|
+
file_path=temp_test_file,
|
|
254
|
+
edit_instructions=corrected_unit_test_text
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Read the modified file
|
|
258
|
+
if test_success and os.path.exists(temp_test_file):
|
|
259
|
+
with open(temp_test_file, 'r') as f:
|
|
260
|
+
fixed_unit_test = f.read()
|
|
261
|
+
update_unit_test = True
|
|
262
|
+
|
|
263
|
+
if verbose:
|
|
264
|
+
console.print(f"[bold green]Unit test fixes applied successfully[/bold green]")
|
|
265
|
+
else:
|
|
266
|
+
if verbose:
|
|
267
|
+
console.print(f"[bold red]Failed to apply unit test fixes: {test_error}[/bold red]")
|
|
268
|
+
|
|
269
|
+
# Clean up
|
|
270
|
+
if os.path.exists(temp_test_file):
|
|
271
|
+
os.remove(temp_test_file)
|
|
272
|
+
|
|
273
|
+
except Exception as e:
|
|
274
|
+
if verbose:
|
|
275
|
+
console.print(f"[bold red]Error applying unit test fixes: {str(e)}[/bold red]")
|
|
276
|
+
else:
|
|
277
|
+
if verbose:
|
|
278
|
+
console.print("[bold yellow]No unit test fixes required or provided[/bold yellow]")
|
|
279
|
+
|
|
280
|
+
# Step 5b: Apply code fixes if available
|
|
281
|
+
if corrected_code_text:
|
|
282
|
+
try:
|
|
283
|
+
# Create a temporary file for the code
|
|
284
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file:
|
|
285
|
+
temp_code_file = temp_file.name
|
|
286
|
+
temp_file.write(code)
|
|
287
|
+
|
|
288
|
+
if verbose:
|
|
289
|
+
console.print(f"[bold]Applying code fixes...[/bold]")
|
|
290
|
+
|
|
291
|
+
# Apply fixes using run_edit_in_subprocess for process isolation
|
|
292
|
+
code_success, code_error = run_edit_in_subprocess(
|
|
293
|
+
file_path=temp_code_file,
|
|
294
|
+
edit_instructions=corrected_code_text
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Read the modified file
|
|
298
|
+
if code_success and os.path.exists(temp_code_file):
|
|
299
|
+
with open(temp_code_file, 'r') as f:
|
|
300
|
+
fixed_code = f.read()
|
|
301
|
+
update_code = True
|
|
302
|
+
|
|
303
|
+
if verbose:
|
|
304
|
+
console.print(f"[bold green]Code fixes applied successfully[/bold green]")
|
|
305
|
+
else:
|
|
306
|
+
if verbose:
|
|
307
|
+
console.print(f"[bold red]Failed to apply code fixes: {code_error}[/bold red]")
|
|
308
|
+
|
|
309
|
+
# Clean up
|
|
310
|
+
if os.path.exists(temp_code_file):
|
|
311
|
+
os.remove(temp_code_file)
|
|
312
|
+
|
|
313
|
+
except Exception as e:
|
|
314
|
+
if verbose:
|
|
315
|
+
console.print(f"[bold red]Error applying code fixes: {str(e)}[/bold red]")
|
|
316
|
+
else:
|
|
317
|
+
if verbose:
|
|
318
|
+
console.print("[bold yellow]No code fixes required or provided[/bold yellow]")
|
|
319
|
+
|
|
320
|
+
# Step 6: Return the results
|
|
321
|
+
if verbose:
|
|
322
|
+
console.print("[bold blue]Step 6: Returning results...[/bold blue]")
|
|
323
|
+
console.print(f"[bold green]Fix process completed[/bold green]")
|
|
324
|
+
console.print(f"[bold]Update unit test: {update_unit_test}[/bold]")
|
|
325
|
+
console.print(f"[bold]Update code: {update_code}[/bold]")
|
|
326
|
+
console.print(f"[bold]Total cost: ${total_cost:.6f}[/bold]")
|
|
327
|
+
console.print(f"[bold]Model used: {model_name}[/bold]")
|
|
328
|
+
|
|
329
|
+
# One final cleanup of any lingering processes before returning
|
|
330
|
+
# terminate_mcp_processes() # Removed as this function doesn't exist in edit_file.py
|
|
331
|
+
|
|
332
|
+
return (
|
|
333
|
+
update_unit_test,
|
|
334
|
+
update_code,
|
|
335
|
+
fixed_unit_test,
|
|
336
|
+
fixed_code,
|
|
337
|
+
analysis_results,
|
|
338
|
+
total_cost,
|
|
339
|
+
model_name
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
def fix_errors_from_unit_tests(
|
|
343
|
+
unit_test: str,
|
|
344
|
+
code: str,
|
|
345
|
+
prompt: str,
|
|
346
|
+
error: str,
|
|
347
|
+
error_file: str,
|
|
348
|
+
strength: float,
|
|
349
|
+
temperature: float = 0.0,
|
|
350
|
+
verbose: bool = False
|
|
351
|
+
) -> Tuple[bool, bool, str, str, str, float, str]:
|
|
352
|
+
"""
|
|
353
|
+
Synchronous wrapper for fixing unit test errors and warnings in code files.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
unit_test: The unit test code as a string
|
|
357
|
+
code: The code under test as a string
|
|
358
|
+
prompt: The prompt that generated the code under test
|
|
359
|
+
error: Errors and warnings that need to be fixed
|
|
360
|
+
error_file: Path to the file where error logs will be appended
|
|
361
|
+
strength: Strength of the LLM model to use (0-1)
|
|
362
|
+
temperature: Temperature for LLM output (0-1)
|
|
363
|
+
verbose: Whether to print detailed information
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
Tuple containing:
|
|
367
|
+
- update_unit_test: Boolean indicating if unit test was updated
|
|
368
|
+
- update_code: Boolean indicating if code was updated
|
|
369
|
+
- fixed_unit_test: The fixed unit test code
|
|
370
|
+
- fixed_code: The fixed code under test
|
|
371
|
+
- analysis_results: The raw output of the LLM analysis
|
|
372
|
+
- total_cost: Total cost of LLM invocations
|
|
373
|
+
- model_name: Name of the LLM model used
|
|
374
|
+
"""
|
|
375
|
+
# Input validation
|
|
376
|
+
if not isinstance(unit_test, str) or not isinstance(code, str) or not isinstance(prompt, str) or not isinstance(error, str):
|
|
377
|
+
raise ValueError("Input parameters must be strings")
|
|
378
|
+
|
|
379
|
+
if not isinstance(error_file, str) or not error_file:
|
|
380
|
+
raise ValueError("error_file must be a non-empty string")
|
|
381
|
+
|
|
382
|
+
if not isinstance(strength, float) or strength < 0 or strength > 1:
|
|
383
|
+
strength = max(0, min(strength, 1)) # Clamp to 0-1 range instead of raising error
|
|
384
|
+
|
|
385
|
+
if not isinstance(temperature, float) or temperature < 0 or temperature > 1:
|
|
386
|
+
temperature = max(0, min(temperature, 1)) # Clamp to 0-1 range instead of raising error
|
|
387
|
+
|
|
388
|
+
# Create and use new event loop instead of trying to get the current one (which causes deprecation warning)
|
|
389
|
+
loop = asyncio.new_event_loop()
|
|
390
|
+
asyncio.set_event_loop(loop)
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
# Run the async function and return results
|
|
394
|
+
return loop.run_until_complete(_fix_errors_from_unit_tests_async(
|
|
395
|
+
unit_test=unit_test,
|
|
396
|
+
code=code,
|
|
397
|
+
prompt=prompt,
|
|
398
|
+
error=error,
|
|
399
|
+
error_file=error_file,
|
|
400
|
+
strength=strength,
|
|
401
|
+
temperature=temperature,
|
|
402
|
+
verbose=verbose
|
|
403
|
+
))
|
|
404
|
+
finally:
|
|
405
|
+
# Clean up the loop
|
|
406
|
+
loop.close()
|