pdd-cli 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

Files changed (49) hide show
  1. pdd/__init__.py +7 -1
  2. pdd/bug_main.py +21 -3
  3. pdd/bug_to_unit_test.py +16 -5
  4. pdd/change.py +2 -1
  5. pdd/change_main.py +407 -189
  6. pdd/cli.py +853 -301
  7. pdd/code_generator.py +2 -1
  8. pdd/conflicts_in_prompts.py +2 -1
  9. pdd/construct_paths.py +377 -222
  10. pdd/context_generator.py +2 -1
  11. pdd/continue_generation.py +3 -2
  12. pdd/crash_main.py +55 -20
  13. pdd/data/llm_model.csv +8 -8
  14. pdd/detect_change.py +2 -1
  15. pdd/fix_code_loop.py +465 -160
  16. pdd/fix_code_module_errors.py +7 -4
  17. pdd/fix_error_loop.py +9 -9
  18. pdd/fix_errors_from_unit_tests.py +207 -365
  19. pdd/fix_main.py +31 -4
  20. pdd/fix_verification_errors.py +285 -0
  21. pdd/fix_verification_errors_loop.py +975 -0
  22. pdd/fix_verification_main.py +412 -0
  23. pdd/generate_output_paths.py +427 -183
  24. pdd/generate_test.py +3 -2
  25. pdd/increase_tests.py +2 -2
  26. pdd/llm_invoke.py +18 -8
  27. pdd/pdd_completion.zsh +38 -1
  28. pdd/preprocess.py +3 -3
  29. pdd/process_csv_change.py +466 -154
  30. pdd/prompts/extract_prompt_split_LLM.prompt +7 -4
  31. pdd/prompts/extract_prompt_update_LLM.prompt +11 -5
  32. pdd/prompts/extract_unit_code_fix_LLM.prompt +2 -2
  33. pdd/prompts/find_verification_errors_LLM.prompt +25 -0
  34. pdd/prompts/fix_code_module_errors_LLM.prompt +29 -0
  35. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +5 -5
  36. pdd/prompts/fix_verification_errors_LLM.prompt +20 -0
  37. pdd/prompts/generate_test_LLM.prompt +9 -3
  38. pdd/prompts/split_LLM.prompt +3 -3
  39. pdd/prompts/update_prompt_LLM.prompt +3 -3
  40. pdd/split.py +13 -12
  41. pdd/split_main.py +22 -13
  42. pdd/trace_main.py +7 -0
  43. pdd/xml_tagger.py +2 -1
  44. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/METADATA +4 -4
  45. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/RECORD +49 -44
  46. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/WHEEL +1 -1
  47. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/entry_points.txt +0 -0
  48. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/licenses/LICENSE +0 -0
  49. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/top_level.txt +0 -0
@@ -1,406 +1,248 @@
1
1
  import os
2
- import re
3
- import json
4
- import asyncio
5
- import tempfile
2
+ import tempfile # Added missing import
6
3
  from datetime import datetime
7
- from typing import Dict, Tuple, Any, Optional, List, Union
8
- import psutil # Add psutil import for process management
9
-
10
- from rich.console import Console
4
+ from typing import Tuple, Optional
5
+ from pydantic import BaseModel, Field, ValidationError
6
+ from rich import print as rprint
11
7
  from rich.markdown import Markdown
8
+ from rich.console import Console
12
9
  from rich.panel import Panel
13
- from rich.text import Text
10
+ from tempfile import NamedTemporaryFile
14
11
 
12
+ from . import DEFAULT_STRENGTH
13
+ from .preprocess import preprocess
15
14
  from .load_prompt_template import load_prompt_template
16
15
  from .llm_invoke import llm_invoke
17
- from .preprocess import preprocess
18
- from .edit_file import edit_file, run_edit_in_subprocess
19
- from langchain_mcp_adapters.client import MultiServerMCPClient
20
16
 
21
17
  console = Console()
22
18
 
23
- async def _fix_errors_from_unit_tests_async(
19
+ class CodeFix(BaseModel):
20
+ update_unit_test: bool = Field(description="Whether the unit test needs to be updated")
21
+ update_code: bool = Field(description="Whether the code needs to be updated")
22
+ fixed_unit_test: str = Field(description="The fixed unit test code")
23
+ fixed_code: str = Field(description="The fixed code under test")
24
+
25
+ def validate_inputs(strength: float, temperature: float) -> None:
26
+ """Validate strength and temperature parameters."""
27
+ if not 0 <= strength <= 1:
28
+ raise ValueError("Strength must be between 0 and 1")
29
+ if not 0 <= temperature <= 1:
30
+ raise ValueError("Temperature must be between 0 and 1")
31
+
32
+ def write_to_error_file(file_path: str, content: str) -> None:
33
+ """Write content to error file with timestamp and separator."""
34
+ try:
35
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
36
+ separator = f"\n{'='*80}\n{timestamp}\n{'='*80}\n"
37
+
38
+ # Ensure parent directory exists
39
+ parent_dir = os.path.dirname(file_path)
40
+ use_fallback = False
41
+
42
+ if parent_dir:
43
+ try:
44
+ os.makedirs(parent_dir, exist_ok=True)
45
+ except Exception as e:
46
+ console.print(f"[yellow]Warning: Could not create directory {parent_dir}: {str(e)}[/yellow]")
47
+ # Fallback to system temp directory
48
+ use_fallback = True
49
+ parent_dir = None
50
+
51
+ # Use atomic write with temporary file
52
+ try:
53
+ # First read existing content if file exists
54
+ existing_content = ""
55
+ if os.path.exists(file_path):
56
+ try:
57
+ with open(file_path, 'r') as f:
58
+ existing_content = f.read()
59
+ except Exception as e:
60
+ console.print(f"[yellow]Warning: Could not read existing file {file_path}: {str(e)}[/yellow]")
61
+
62
+ # Write both existing and new content to temp file
63
+ with NamedTemporaryFile(mode='w', dir=parent_dir, delete=False) as tmp_file:
64
+ if existing_content:
65
+ tmp_file.write(existing_content)
66
+ tmp_file.write(f"{separator}{content}\n")
67
+ tmp_path = tmp_file.name
68
+
69
+ # Only attempt atomic move if not using fallback
70
+ if not use_fallback:
71
+ try:
72
+ os.replace(tmp_path, file_path)
73
+ except Exception as e:
74
+ console.print(f"[yellow]Warning: Could not move file to {file_path}: {str(e)}[/yellow]")
75
+ use_fallback = True
76
+
77
+ if use_fallback:
78
+ # Write to fallback location in system temp directory
79
+ fallback_path = os.path.join(tempfile.gettempdir(), os.path.basename(file_path))
80
+ try:
81
+ os.replace(tmp_path, fallback_path)
82
+ console.print(f"[yellow]Warning: Using fallback location: {fallback_path}[/yellow]")
83
+ except Exception as e:
84
+ console.print(f"[red]Error writing to fallback location: {str(e)}[/red]")
85
+ try:
86
+ os.unlink(tmp_path)
87
+ except:
88
+ pass
89
+ raise
90
+ except Exception as e:
91
+ console.print(f"[red]Error writing to error file: {str(e)}[/red]")
92
+ try:
93
+ os.unlink(tmp_path)
94
+ except:
95
+ pass
96
+ raise
97
+ except Exception as e:
98
+ console.print(f"[red]Error in write_to_error_file: {str(e)}[/red]")
99
+ raise
100
+
101
+ def fix_errors_from_unit_tests(
24
102
  unit_test: str,
25
103
  code: str,
26
104
  prompt: str,
27
105
  error: str,
28
106
  error_file: str,
29
107
  strength: float,
30
- temperature: float = 0.0,
108
+ temperature: float,
31
109
  verbose: bool = False
32
110
  ) -> Tuple[bool, bool, str, str, str, float, str]:
33
111
  """
34
- Fix unit test errors and warnings in code files.
35
-
112
+ Fix errors in unit tests using LLM models and log the process.
113
+
36
114
  Args:
37
- unit_test: The unit test code as a string
38
- code: The code under test as a string
39
- prompt: The prompt that generated the code under test
40
- error: Errors and warnings that need to be fixed
41
- error_file: Path to the file where error logs will be appended
42
- strength: Strength of the LLM model to use (0-1)
43
- temperature: Temperature for LLM output (0-1)
44
- verbose: Whether to print detailed information
45
-
115
+ unit_test (str): The unit test code
116
+ code (str): The code under test
117
+ prompt (str): The prompt that generated the code
118
+ error (str): The error message
119
+ error_file (str): Path to error log file
120
+ strength (float): LLM model strength (0-1)
121
+ temperature (float): LLM temperature (0-1)
122
+ verbose (bool): Whether to print detailed output
123
+
46
124
  Returns:
47
- Tuple containing:
48
- - update_unit_test: Boolean indicating if unit test was updated
49
- - update_code: Boolean indicating if code was updated
50
- - fixed_unit_test: The fixed unit test code
51
- - fixed_code: The fixed code under test
52
- - analysis_results: The raw output of the LLM analysis
53
- - total_cost: Total cost of LLM invocations
54
- - model_name: Name of the LLM model used
125
+ Tuple containing update flags, fixed code/tests, total cost, and model name
55
126
  """
56
- # Initialize variables to track costs and model
127
+ # Input validation
128
+ if not all([unit_test, code, prompt, error, error_file]):
129
+ raise ValueError("All input parameters must be non-empty")
130
+
131
+ validate_inputs(strength, temperature)
132
+
57
133
  total_cost = 0.0
58
134
  model_name = ""
59
-
60
- # Step 1: Load the prompt template
61
- if verbose:
62
- console.print("[bold blue]Step 1: Loading prompt template...[/bold blue]")
63
-
64
- console.print("[bold yellow]DEBUG: About to load prompt template[/bold yellow]")
65
- prompt_template = load_prompt_template("fix_errors_from_unit_tests_LLM")
66
- console.print(f"[bold yellow]DEBUG: Prompt template loaded: {'Success' if prompt_template else 'Failed'}[/bold yellow]")
67
-
68
- if not prompt_template:
69
- error_msg = "Failed to load prompt template 'fix_errors_from_unit_tests_LLM'"
70
- if verbose:
71
- console.print(f"[bold red]{error_msg}[/bold red]")
72
- raise ValueError(error_msg)
73
-
74
- if verbose:
75
- console.print("[bold green]Prompt template loaded successfully[/bold green]")
76
-
77
- # Step 2: Read contents of error_file and parse any previous fix attempts
78
- if verbose:
79
- console.print("[bold blue]Step 2: Reading error file for previous fixes...[/bold blue]")
80
-
81
- prior_fixes = ""
135
+
82
136
  try:
83
- if os.path.exists(error_file):
84
- console.print("[bold yellow]DEBUG: Reading error file[/bold yellow]")
85
- with open(error_file, 'r') as f:
86
- prior_fixes = f.read()
87
-
88
- if verbose:
89
- console.print(f"[bold green]Found existing error file: {error_file}[/bold green]")
90
- else:
137
+ # Step 1: Load prompt templates
138
+ fix_errors_prompt = load_prompt_template("fix_errors_from_unit_tests_LLM")
139
+ extract_fix_prompt = load_prompt_template("extract_unit_code_fix_LLM")
140
+
141
+ if not fix_errors_prompt or not extract_fix_prompt:
142
+ raise ValueError("Failed to load prompt templates")
143
+
144
+ # Step 2: Read error file content
145
+ existing_errors = ""
146
+ try:
147
+ if os.path.exists(error_file):
148
+ with open(error_file, 'r', encoding='utf-8') as f:
149
+ existing_errors = f.read()
150
+ except Exception as e:
91
151
  if verbose:
92
- console.print(f"[bold yellow]Creating new error file: {error_file}[/bold yellow]")
93
-
94
- # Ensure directory exists
95
- os.makedirs(os.path.dirname(os.path.abspath(error_file)), exist_ok=True)
96
- except Exception as e:
97
- if verbose:
98
- console.print(f"[bold red]Error reading error file: {str(e)}[/bold red]")
99
- prior_fixes = f"Error reading prior fixes: {str(e)}"
100
-
101
- # Step 3: Run the LLM analysis prompt through llm_invoke
102
- if verbose:
103
- console.print("[bold blue]Step 3: Running LLM analysis...[/bold blue]")
104
-
105
- # Preprocess the prompt
106
- try:
107
- console.print("[bold yellow]DEBUG: Preprocessing prompt[/bold yellow]")
152
+ console.print(f"[yellow]Warning: Could not read error file: {str(e)}[/yellow]")
153
+
154
+ # Step 3: Run first prompt through llm_invoke
108
155
  processed_prompt = preprocess(
109
156
  prompt,
110
157
  recursive=False,
158
+ double_curly_brackets=True
159
+ )
160
+
161
+ processed_fix_errors_prompt = preprocess(
162
+ fix_errors_prompt,
163
+ recursive=False,
111
164
  double_curly_brackets=True,
112
- exclude_keys=['unit_test', 'code', 'unit_test_fix']
165
+ exclude_keys=['unit_test', 'code', 'errors', 'prompt']
113
166
  )
114
- console.print("[bold yellow]DEBUG: Prompt preprocessed successfully[/bold yellow]")
115
- except Exception as e:
116
- processed_prompt = prompt
167
+
117
168
  if verbose:
118
- console.print(f"[bold yellow]Error during prompt preprocessing, using original prompt: {str(e)}[/bold yellow]")
119
-
120
- # Prepare input for LLM
121
- llm_input = {
122
- 'unit_test': unit_test,
123
- 'code': code,
124
- 'prompt': processed_prompt,
125
- 'errors': error,
126
- 'prior_fixes': prior_fixes
127
- }
128
-
129
- # Log to console if verbose
130
- if verbose:
131
- console.print(Panel(
132
- Text("Running LLM analysis", style="bold white"),
133
- subtitle=f"Strength: {strength}, Temperature: {temperature}"
134
- ))
135
- console.print(f"Input tokens: {len(unit_test.split()) + len(code.split()) + len(processed_prompt.split()) + len(error.split())}")
136
-
137
- # Run the LLM analysis
138
- try:
139
- console.print("[bold yellow]DEBUG: About to invoke LLM[/bold yellow]")
140
- llm_response = llm_invoke(
141
- prompt=prompt_template,
142
- input_json=llm_input,
169
+ console.print(Panel("[bold green]Running fix_errors_from_unit_tests...[/bold green]"))
170
+
171
+ response1 = llm_invoke(
172
+ prompt=processed_fix_errors_prompt,
173
+ input_json={
174
+ "unit_test": unit_test,
175
+ "code": code,
176
+ "prompt": processed_prompt,
177
+ "errors": error
178
+ },
143
179
  strength=strength,
144
180
  temperature=temperature,
145
181
  verbose=verbose
146
182
  )
147
- console.print("[bold yellow]DEBUG: LLM invocation completed[/bold yellow]")
148
-
149
- # Update tracking variables
150
- total_cost += llm_response['cost']
151
- model_name = llm_response['model_name']
152
-
153
- # Extract response
154
- analysis_results = llm_response['result']
155
-
156
- # Display response if verbose
157
- if verbose:
158
- console.print("\n[bold green]LLM Analysis Complete[/bold green]")
159
- console.print(Markdown(analysis_results))
160
- console.print(f"[bold]Output tokens: {llm_response.get('output_tokens', 'unknown')}[/bold]")
161
- console.print(f"[bold]Cost: ${llm_response['cost']:.6f}[/bold]")
162
-
163
- except Exception as e:
164
- error_msg = f"Error during LLM analysis: {str(e)}"
165
- if verbose:
166
- console.print(f"[bold red]{error_msg}[/bold red]")
167
-
168
- # Log the error to the error file
169
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
170
- error_log = f"\n\n{'='*50}\nERROR LOG - {timestamp}\n{'='*50}\n{error_msg}\n"
171
-
172
- try:
173
- with open(error_file, 'a') as f:
174
- f.write(error_log)
175
- except Exception as file_err:
176
- if verbose:
177
- console.print(f"[bold red]Failed to write to error file: {str(file_err)}[/bold red]")
178
-
179
- # Return default values
180
- return False, False, unit_test, code, "", total_cost, model_name
181
-
182
- # Extract corrected code sections using regex
183
- if verbose:
184
- console.print("[bold blue]Step 3d: Extracting code sections...[/bold blue]")
185
-
186
- # Extract sections using regex
187
- corrected_code_match = re.search(r'<corrected_code_under_test>(.*?)</corrected_code_under_test>', analysis_results, re.DOTALL)
188
- corrected_unit_test_match = re.search(r'<corrected_unit_test>(.*?)</corrected_unit_test>', analysis_results, re.DOTALL)
189
-
190
- # Extract corrected code sections from the regex matches
191
- corrected_code_text = ""
192
- corrected_unit_test_text = ""
193
-
194
- if corrected_code_match:
195
- corrected_code_text = corrected_code_match.group(1).strip()
196
-
197
- if corrected_unit_test_match:
198
- corrected_unit_test_text = corrected_unit_test_match.group(1).strip()
199
-
200
- if verbose:
201
- console.print(f"[bold yellow]Extracted code text: {bool(corrected_code_text)}[/bold yellow]")
202
- console.print(f"[bold yellow]Extracted test text: {bool(corrected_unit_test_text)}[/bold yellow]")
203
-
204
- # Step 3c: Append the output to error_file
205
- if verbose:
206
- console.print("[bold blue]Step 3c: Logging analysis results...[/bold blue]")
207
-
208
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
209
- log_entry = f"\n\n{'='*50}\nANALYSIS LOG - {timestamp}\n{'='*50}\n{analysis_results}\n"
210
-
211
- try:
212
- with open(error_file, 'a') as f:
213
- f.write(log_entry)
214
-
215
- if verbose:
216
- console.print(f"[bold green]Analysis logged to {error_file}[/bold green]")
217
- except Exception as e:
218
- if verbose:
219
- console.print(f"[bold red]Failed to write to error file: {str(e)}[/bold red]")
220
-
221
- # Step 4: Pretty print the analysis results if verbose
222
- if verbose:
223
- console.print("[bold blue]Step 4: Displaying analysis results...[/bold blue]")
224
- console.print(Panel(
225
- Markdown(analysis_results),
226
- title="Analysis Results",
227
- expand=False
228
- ))
229
-
230
- # Initialize variables for return values
231
- update_unit_test = False
232
- update_code = False
233
- fixed_unit_test = unit_test
234
- fixed_code = code
235
-
236
- # Step 5: Use edit_file to apply the fixes
237
- if verbose:
238
- console.print("[bold blue]Step 5: Applying fixes...[/bold blue]")
239
-
240
- # Step 5a: Apply unit test fixes if available
241
- if corrected_unit_test_text:
242
- try:
243
- # Create a temporary file for the unit test
244
- with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file:
245
- temp_test_file = temp_file.name
246
- temp_file.write(unit_test)
247
-
248
- if verbose:
249
- console.print(f"[bold]Applying unit test fixes...[/bold]")
250
-
251
- # Apply fixes using run_edit_in_subprocess for process isolation
252
- test_success, test_error = run_edit_in_subprocess(
253
- file_path=temp_test_file,
254
- edit_instructions=corrected_unit_test_text
255
- )
256
-
257
- # Read the modified file
258
- if test_success and os.path.exists(temp_test_file):
259
- with open(temp_test_file, 'r') as f:
260
- fixed_unit_test = f.read()
261
- update_unit_test = True
262
-
263
- if verbose:
264
- console.print(f"[bold green]Unit test fixes applied successfully[/bold green]")
265
- else:
266
- if verbose:
267
- console.print(f"[bold red]Failed to apply unit test fixes: {test_error}[/bold red]")
268
-
269
- # Clean up
270
- if os.path.exists(temp_test_file):
271
- os.remove(temp_test_file)
272
-
273
- except Exception as e:
274
- if verbose:
275
- console.print(f"[bold red]Error applying unit test fixes: {str(e)}[/bold red]")
276
- else:
183
+
184
+ total_cost += response1['cost']
185
+ model_name = response1['model_name']
186
+ result1 = response1['result']
187
+
188
+ # Step 4: Pretty print results and log to error file
277
189
  if verbose:
278
- console.print("[bold yellow]No unit test fixes required or provided[/bold yellow]")
279
-
280
- # Step 5b: Apply code fixes if available
281
- if corrected_code_text:
282
- try:
283
- # Create a temporary file for the code
284
- with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file:
285
- temp_code_file = temp_file.name
286
- temp_file.write(code)
287
-
288
- if verbose:
289
- console.print(f"[bold]Applying code fixes...[/bold]")
290
-
291
- # Apply fixes using run_edit_in_subprocess for process isolation
292
- code_success, code_error = run_edit_in_subprocess(
293
- file_path=temp_code_file,
294
- edit_instructions=corrected_code_text
295
- )
296
-
297
- # Read the modified file
298
- if code_success and os.path.exists(temp_code_file):
299
- with open(temp_code_file, 'r') as f:
300
- fixed_code = f.read()
301
- update_code = True
302
-
303
- if verbose:
304
- console.print(f"[bold green]Code fixes applied successfully[/bold green]")
305
- else:
306
- if verbose:
307
- console.print(f"[bold red]Failed to apply code fixes: {code_error}[/bold red]")
308
-
309
- # Clean up
310
- if os.path.exists(temp_code_file):
311
- os.remove(temp_code_file)
312
-
313
- except Exception as e:
314
- if verbose:
315
- console.print(f"[bold red]Error applying code fixes: {str(e)}[/bold red]")
316
- else:
190
+ console.print(Markdown(result1))
191
+ console.print(f"Cost of first run: ${response1['cost']:.6f}")
192
+
193
+ write_to_error_file(error_file, f"Model: {model_name}\nResult:\n{result1}")
194
+
195
+ # Step 5: Preprocess extract_fix prompt
196
+ processed_extract_prompt = preprocess(
197
+ extract_fix_prompt,
198
+ recursive=False,
199
+ double_curly_brackets=True,
200
+ exclude_keys=['unit_test', 'code', 'unit_test_fix']
201
+ )
202
+
203
+ # Step 6: Run second prompt through llm_invoke with fixed strength
317
204
  if verbose:
318
- console.print("[bold yellow]No code fixes required or provided[/bold yellow]")
319
-
320
- # Step 6: Return the results
321
- if verbose:
322
- console.print("[bold blue]Step 6: Returning results...[/bold blue]")
323
- console.print(f"[bold green]Fix process completed[/bold green]")
324
- console.print(f"[bold]Update unit test: {update_unit_test}[/bold]")
325
- console.print(f"[bold]Update code: {update_code}[/bold]")
326
- console.print(f"[bold]Total cost: ${total_cost:.6f}[/bold]")
327
- console.print(f"[bold]Model used: {model_name}[/bold]")
328
-
329
- # One final cleanup of any lingering processes before returning
330
- # terminate_mcp_processes() # Removed as this function doesn't exist in edit_file.py
331
-
332
- return (
333
- update_unit_test,
334
- update_code,
335
- fixed_unit_test,
336
- fixed_code,
337
- analysis_results,
338
- total_cost,
339
- model_name
340
- )
205
+ console.print(Panel("[bold green]Running extract_unit_code_fix...[/bold green]"))
341
206
 
342
- def fix_errors_from_unit_tests(
343
- unit_test: str,
344
- code: str,
345
- prompt: str,
346
- error: str,
347
- error_file: str,
348
- strength: float,
349
- temperature: float = 0.0,
350
- verbose: bool = False
351
- ) -> Tuple[bool, bool, str, str, str, float, str]:
352
- """
353
- Synchronous wrapper for fixing unit test errors and warnings in code files.
354
-
355
- Args:
356
- unit_test: The unit test code as a string
357
- code: The code under test as a string
358
- prompt: The prompt that generated the code under test
359
- error: Errors and warnings that need to be fixed
360
- error_file: Path to the file where error logs will be appended
361
- strength: Strength of the LLM model to use (0-1)
362
- temperature: Temperature for LLM output (0-1)
363
- verbose: Whether to print detailed information
364
-
365
- Returns:
366
- Tuple containing:
367
- - update_unit_test: Boolean indicating if unit test was updated
368
- - update_code: Boolean indicating if code was updated
369
- - fixed_unit_test: The fixed unit test code
370
- - fixed_code: The fixed code under test
371
- - analysis_results: The raw output of the LLM analysis
372
- - total_cost: Total cost of LLM invocations
373
- - model_name: Name of the LLM model used
374
- """
375
- # Input validation
376
- if not isinstance(unit_test, str) or not isinstance(code, str) or not isinstance(prompt, str) or not isinstance(error, str):
377
- raise ValueError("Input parameters must be strings")
378
-
379
- if not isinstance(error_file, str) or not error_file:
380
- raise ValueError("error_file must be a non-empty string")
381
-
382
- if not isinstance(strength, float) or strength < 0 or strength > 1:
383
- strength = max(0, min(strength, 1)) # Clamp to 0-1 range instead of raising error
384
-
385
- if not isinstance(temperature, float) or temperature < 0 or temperature > 1:
386
- temperature = max(0, min(temperature, 1)) # Clamp to 0-1 range instead of raising error
387
-
388
- # Create and use new event loop instead of trying to get the current one (which causes deprecation warning)
389
- loop = asyncio.new_event_loop()
390
- asyncio.set_event_loop(loop)
391
-
392
- try:
393
- # Run the async function and return results
394
- return loop.run_until_complete(_fix_errors_from_unit_tests_async(
395
- unit_test=unit_test,
396
- code=code,
397
- prompt=prompt,
398
- error=error,
399
- error_file=error_file,
400
- strength=strength,
207
+ response2 = llm_invoke(
208
+ prompt=processed_extract_prompt,
209
+ input_json={
210
+ "unit_test_fix": result1,
211
+ "unit_test": unit_test,
212
+ "code": code
213
+ },
214
+ strength=DEFAULT_STRENGTH, # Fixed strength as per requirements
401
215
  temperature=temperature,
216
+ output_pydantic=CodeFix,
402
217
  verbose=verbose
403
- ))
404
- finally:
405
- # Clean up the loop
406
- loop.close()
218
+ )
219
+
220
+ total_cost += response2['cost']
221
+ result2: CodeFix = response2['result']
222
+
223
+ if verbose:
224
+ console.print(f"Total cost: ${total_cost:.6f}")
225
+ console.print(f"Model used: {model_name}")
226
+
227
+ return (
228
+ result2.update_unit_test,
229
+ result2.update_code,
230
+ result2.fixed_unit_test,
231
+ result2.fixed_code,
232
+ result1,
233
+ total_cost,
234
+ model_name
235
+ )
236
+
237
+ except ValidationError as e:
238
+ error_msg = f"Validation error in fix_errors_from_unit_tests: {str(e)}"
239
+ if verbose:
240
+ console.print(f"[bold red]{error_msg}[/bold red]")
241
+ write_to_error_file(error_file, error_msg)
242
+ return False, False, "", "", "", 0.0, ""
243
+ except Exception as e:
244
+ error_msg = f"Error in fix_errors_from_unit_tests: {str(e)}"
245
+ if verbose:
246
+ console.print(f"[bold red]{error_msg}[/bold red]")
247
+ write_to_error_file(error_file, error_msg)
248
+ return False, False, "", "", "", 0.0, ""