pdd-cli 0.0.25__py3-none-any.whl → 0.0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/__init__.py CHANGED
@@ -1,7 +1,14 @@
1
- __version__ = "0.0.25"
1
+ __version__ = "0.0.26"
2
2
 
3
3
  # Strength parameter used for LLM extraction across the codebase
4
4
  # Used in postprocessing, XML tagging, code generation, and other extraction operations. The module should have a large context window and be affordable.
5
- EXTRACTION_STRENGTH = 0.97
5
+ EXTRACTION_STRENGTH = 0.9
6
6
 
7
- DEFAULT_STRENGTH = 0.8
7
+ DEFAULT_STRENGTH = 0.9
8
+
9
+ """PDD - Prompt Driven Development"""
10
+
11
+ # Define constants used across the package
12
+ DEFAULT_LLM_MODEL = "gpt-4.1-nano"
13
+
14
+ # You can add other package-level initializations or imports here
pdd/bug_to_unit_test.py CHANGED
@@ -91,7 +91,7 @@ def bug_to_unit_test(
91
91
 
92
92
  reasoning, is_finished, unfinished_cost, unfinished_model = unfinished_prompt(
93
93
  prompt_text=last_600_chars,
94
- strength=strength,
94
+ strength=0.75,
95
95
  temperature=temperature,
96
96
  verbose=False
97
97
  )
@@ -11,9 +11,11 @@ from . import EXTRACTION_STRENGTH
11
11
  console = Console()
12
12
 
13
13
  class TrimResultsStartOutput(BaseModel):
14
+ explanation: str = Field(description="The explanation of how you determined what to cut out")
14
15
  code_block: str = Field(description="The trimmed code block from the start")
15
16
 
16
17
  class TrimResultsOutput(BaseModel):
18
+ explanation: str = Field(description="The explanation of the code block")
17
19
  trimmed_continued_generation: str = Field(description="The trimmed continuation of the generation")
18
20
 
19
21
  def continue_generation(
@@ -70,7 +72,7 @@ def continue_generation(
70
72
  trim_start_response = llm_invoke(
71
73
  prompt=processed_prompts['trim_start'],
72
74
  input_json={"LLM_OUTPUT": llm_output},
73
- strength=0.8,
75
+ strength=0.75,
74
76
  temperature=0,
75
77
  output_pydantic=TrimResultsStartOutput,
76
78
  verbose=verbose
pdd/data/llm_model.csv CHANGED
@@ -1,17 +1,18 @@
1
- provider,model,input,output,coding_arena_elo,base_url,api_key,counter,encoder,max_tokens,max_completion_tokens,structured_output
2
- OpenAI,"gpt-4.1-nano",0.1,0.40,1246,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
3
- OpenAI,"grok-3-beta",3,15,1255,"https://api.x.ai/v1",XAI_API_KEY,tiktoken,o200k_base,131072,,False
4
- Anthropic,"claude-3-5-haiku-20241022",1,5,1259,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
5
- OpenAI,"deepseek-coder",0.14,0.28,1279,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
6
- Google,"gemini-2.5-flash-preview-04-17",.15,3.5,1291,,GOOGLE_API_KEY,,,65535,,False
7
- GoogleVertexAI,"gemini-2.5-pro-exp-03-25",1.25,10,1299,,VERTEX_AI_API_KEY,,,65535,,False
8
- Anthropic,claude-3-7-sonnet-20250219,3,15,1312,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,64000,,False
9
- Google,gemini-2.5-pro-exp-03-25,1.25,10,1313,,GOOGLE_API_KEY,,,65535,,False
10
- OpenAI,"deepseek-r1-distill-llama-70b-specdec",5,5,1314,https://api.groq.com/openai/v1,GROQ_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,16384,,False
11
- Ollama,"deepseek-r1:70b-llama-distill-q8_0",0.0,0.0,1315,,PWD,,,,,False
12
- Ollama,deepseek-r1:32b-qwen-distill-fp16,0.0,0.0,1316,,PWD,,,,,False
13
- OpenAI,"o4-mini",1.1,4.4,1319,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
14
- OpenAI,"o3",10,40,1331,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
15
- OpenAI,"gpt-4.1",2,8,1332,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
16
- OpenAI,"deepseek-reasoner",0.55,2.19,1336,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
17
- Fireworks,accounts/fireworks/models/deepseek-r1,3,8,1338,,FIREWORKS_API_KEY,,,8192,,False
1
+ provider,model,input,output,coding_arena_elo,base_url,api_key,max_reasoning_tokens,structured_output,reasoning_type
2
+ OpenAI,gpt-4.1-nano,0.1,0.4,1249,,OPENAI_API_KEY,0,True,none
3
+ xai,xai/grok-3-beta,3.0,15.0,1332,https://api.x.ai/v1,XAI_API_KEY,0,False,none
4
+ Anthropic,claude-3-5-haiku-20241022,.8,4,1261,,ANTHROPIC_API_KEY,0,True,none
5
+ OpenAI,deepseek/deepseek-chat,.27,1.1,1353,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,0,False,none
6
+ Google,vertex_ai/gemini-2.5-flash-preview-04-17,0.15,0.6,1330,,VERTEX_CREDENTIALS,0,True,effort
7
+ Google,gemini-2.5-pro-exp-03-25,1.25,10.0,1360,,GOOGLE_API_KEY,0,True,none
8
+ Anthropic,claude-3-7-sonnet-20250219,3.0,15.0,1340,,ANTHROPIC_API_KEY,64000,True,budget
9
+ Google,vertex_ai/gemini-2.5-pro-preview-05-06,1.25,10.0,1361,,VERTEX_CREDENTIALS,0,True,none
10
+ OpenAI,o4-mini,1.1,4.4,1333,,OPENAI_API_KEY,0,True,effort
11
+ OpenAI,o3,10.0,40.0,1389,,OPENAI_API_KEY,0,True,effort
12
+ OpenAI,gpt-4.1,2.0,8.0,1335,,OPENAI_API_KEY,0,True,none
13
+ OpenAI,deepseek/deepseek-reasoner,0.55,2.19,1337,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,0,False,none
14
+ Fireworks,fireworks_ai/accounts/fireworks/models/deepseek-r1,3.0,8.0,1338,,FIREWORKS_API_KEY,0,False,none
15
+ OpenAI,chatgpt-4o-latest,5,15,1369,,OPENAI_API_KEY,0,False,none
16
+ Anthropic,bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0,3.0,15.0,1339,,,64000,True,budget
17
+ OpenAI,azure/o4-mini,1.1,4.4,1334,,OPENAI_API_KEY,0,True,effort
18
+ OpenAI,openai/mlx-community/Qwen3-30B-A3B-4bit,0,0,1293,http://localhost:8080,,0,False,none
pdd/fix_main.py CHANGED
@@ -289,6 +289,7 @@ def fix_main(
289
289
  rprint(f"[bold red]Markup Error in fix_main:[/bold red]")
290
290
  rprint(escape(str(e)))
291
291
  else:
292
- # Print other errors normally (might still fail if they contain markup)
293
- rprint(f"[bold red]Error:[/bold red] {str(e)}")
292
+ # Print other errors normally, escaping the error string
293
+ from rich.markup import escape # Ensure escape is imported
294
+ rprint(f"[bold red]Error:[/bold red] {escape(str(e))}")
294
295
  sys.exit(1)
@@ -1,10 +1,22 @@
1
1
  import re
2
- from typing import Dict, Any
2
+ from typing import Dict, Any, Optional
3
3
  from rich import print as rprint
4
4
  from rich.markdown import Markdown
5
+ from pydantic import BaseModel, Field
5
6
  from .load_prompt_template import load_prompt_template
6
7
  from .llm_invoke import llm_invoke
7
8
 
9
+ # Define Pydantic model for structured LLM output for VERIFICATION
10
+ class VerificationOutput(BaseModel):
11
+ issues_count: int = Field(description="The number of issues found during verification.")
12
+ details: Optional[str] = Field(description="Detailed explanation of any discrepancies or issues found. Can be null or empty if issues_count is 0.", default=None)
13
+
14
+ # Define Pydantic model for structured LLM output for FIXES
15
+ class FixerOutput(BaseModel):
16
+ explanation: str = Field(description="Detailed explanation of the analysis and fixes applied.")
17
+ fixed_code: str = Field(description="The complete, runnable, and fixed code module.")
18
+ fixed_program: str = Field(description="The complete, runnable, and fixed program that uses the code module.")
19
+
8
20
  def fix_verification_errors(
9
21
  program: str,
10
22
  prompt: str,
@@ -41,18 +53,17 @@ def fix_verification_errors(
41
53
  verification_issues_count = 0
42
54
  verification_details = None
43
55
  fix_explanation = None
44
- fixed_program = program
45
- fixed_code = code
56
+ fixed_program = program # Initialize with original program
57
+ fixed_code = code # Initialize with original code
46
58
  final_explanation = None
47
59
 
48
60
  # Check only essential inputs, allow empty output
49
61
  if not all([program, prompt, code]):
50
- # Keep the error print for program, prompt, code missing
51
62
  rprint("[bold red]Error:[/bold red] Missing one or more required inputs (program, prompt, code).")
52
63
  return {
53
64
  "explanation": None,
54
- "fixed_program": program, # Return original if possible
55
- "fixed_code": code, # Return original if possible
65
+ "fixed_program": program,
66
+ "fixed_code": code,
56
67
  "total_cost": 0.0,
57
68
  "model_name": None,
58
69
  "verification_issues_count": 0,
@@ -104,11 +115,11 @@ def fix_verification_errors(
104
115
  input_json=verification_input_json,
105
116
  strength=strength,
106
117
  temperature=temperature,
107
- verbose=False, # Keep internal llm_invoke verbose off unless needed
118
+ verbose=False,
119
+ output_pydantic=VerificationOutput
108
120
  )
109
121
  total_cost += verification_response.get('cost', 0.0)
110
122
  model_name = verification_response.get('model_name', model_name)
111
- verification_result = verification_response.get('result', '')
112
123
 
113
124
  if verbose:
114
125
  rprint(f"[cyan]Verification LLM call complete.[/cyan]")
@@ -123,73 +134,97 @@ def fix_verification_errors(
123
134
  "fixed_code": code,
124
135
  "total_cost": total_cost,
125
136
  "model_name": model_name,
126
- "verification_issues_count": verification_issues_count,
137
+ "verification_issues_count": 0, # Reset on LLM call error
127
138
  }
128
139
 
129
- if verbose:
130
- rprint("\n[blue]Verification Result:[/blue]")
131
- # Markdown object handles its own rendering, no extra needed here
132
- rprint(Markdown(verification_result))
133
-
134
140
  issues_found = False
135
- try:
136
- # Attempt to match and extract digits directly
137
- count_match = re.search(r"<issues_count>(\d+)</issues_count>", verification_result)
138
- if count_match:
139
- verification_issues_count = int(count_match.group(1)) # Safe due to \d+
140
- else:
141
- # Specific match failed, check if tag exists with invalid content or is missing
142
- generic_count_match = re.search(r"<issues_count>(.*?)</issues_count>", verification_result, re.DOTALL)
143
- if generic_count_match:
144
- # Tag found, but content is not \d+ -> Parsing Error
145
- rprint("[bold red]Error:[/bold red] Could not parse integer value from <issues_count> tag.")
146
- # Return the specific error structure for parsing errors after verification call
147
- return {
148
- "explanation": None,
149
- "fixed_program": program,
150
- "fixed_code": code,
151
- "total_cost": total_cost, # Cost incurred so far
152
- "model_name": model_name, # Model used so far
153
- "verification_issues_count": 0, # Reset count on parsing error
154
- }
141
+ verification_result_obj = verification_response.get('result')
142
+
143
+ if isinstance(verification_result_obj, VerificationOutput):
144
+ verification_issues_count = verification_result_obj.issues_count
145
+ verification_details = verification_result_obj.details
146
+ if verbose:
147
+ rprint("[green]Successfully parsed structured output from verification LLM.[/green]")
148
+ rprint("\n[blue]Verification Result (parsed):[/blue]")
149
+ rprint(f" Issues Count: {verification_issues_count}")
150
+ if verification_details:
151
+ rprint(Markdown(f"**Details:**\n{verification_details}"))
155
152
  else:
156
- # Tag truly not found -> Warning
157
- rprint("[yellow]Warning:[/yellow] Could not find <issues_count> tag in verification result. Assuming 0 issues.")
158
- verification_issues_count = 0
153
+ rprint(" Details: None provided or no issues found.")
159
154
 
160
- # Proceed to check for details tag if count > 0
161
155
  if verification_issues_count > 0:
162
- details_match = re.search(r"<details>(.*?)</details>", verification_result, re.DOTALL)
163
- if details_match:
164
- verification_details = details_match.group(1).strip()
165
- if verification_details:
166
- issues_found = True
167
- if verbose:
168
- rprint(f"\n[yellow]Found {verification_issues_count} potential issues. Proceeding to fix step.[/yellow]")
169
- else:
170
- # Count > 0, but details empty -> Warning
171
- rprint("[yellow]Warning:[/yellow] <issues_count> is > 0, but <details> tag is empty. Treating as no issues found.")
172
- verification_issues_count = 0 # Reset count
156
+ if verification_details and verification_details.strip():
157
+ issues_found = True
158
+ if verbose:
159
+ rprint(f"\n[yellow]Found {verification_issues_count} potential issues. Proceeding to fix step.[/yellow]")
173
160
  else:
174
- # Count > 0, but no details tag -> Warning
175
- rprint("[yellow]Warning:[/yellow] <issues_count> is > 0, but could not find <details> tag. Treating as no issues found.")
176
- verification_issues_count = 0 # Reset count
161
+ rprint(f"[yellow]Warning:[/yellow] <issues_count> is {verification_issues_count}, but <details> field is empty or missing. Treating as no actionable issues found.")
162
+ verification_issues_count = 0
177
163
  else:
178
- # verification_issues_count is 0 (either parsed as 0 or defaulted after warning)
179
164
  if verbose:
180
- rprint("\n[green]No issues found during verification.[/green]")
165
+ rprint("\n[green]No issues found during verification based on structured output.[/green]")
166
+ elif isinstance(verification_result_obj, str):
167
+ try:
168
+ issues_match = re.search(r'<issues_count>(\d+)</issues_count>', verification_result_obj)
169
+ if issues_match:
170
+ parsed_issues_count = int(issues_match.group(1))
171
+ details_match = re.search(r'<details>(.*?)</details>', verification_result_obj, re.DOTALL)
172
+ parsed_verification_details = details_match.group(1).strip() if (details_match and details_match.group(1)) else None
181
173
 
182
- # Removed ValueError catch as it's handled by the logic above
183
- except Exception as e:
184
- # Generic catch for other potential parsing issues
185
- rprint(f"[bold red]Error parsing verification result:[/bold red] {e}")
174
+
175
+ if parsed_issues_count > 0:
176
+ if parsed_verification_details: # Check if details exist and are not empty
177
+ issues_found = True
178
+ verification_issues_count = parsed_issues_count
179
+ verification_details = parsed_verification_details
180
+ if verbose:
181
+ rprint(f"\n[yellow]Found {verification_issues_count} potential issues in string response. Proceeding to fix step.[/yellow]")
182
+ else:
183
+ rprint(f"[yellow]Warning:[/yellow] <issues_count> is {parsed_issues_count} in string response, but <details> field is empty or missing. Treating as no actionable issues found.")
184
+ verification_issues_count = 0
185
+ issues_found = False
186
+ else: # parsed_issues_count == 0
187
+ verification_issues_count = 0
188
+ issues_found = False
189
+ if verbose:
190
+ rprint("\n[green]No issues found in string verification based on <issues_count> being 0.[/green]")
191
+ else: # issues_match is None (tag not found or content not digits)
192
+ rprint("[bold red]Error:[/bold red] Could not find or parse integer value from <issues_count> tag in string response.")
193
+ return {
194
+ "explanation": None,
195
+ "fixed_program": program,
196
+ "fixed_code": code,
197
+ "total_cost": total_cost,
198
+ "model_name": model_name,
199
+ "verification_issues_count": 0,
200
+ }
201
+ except ValueError: # Should not be hit if regex is \d+, but as a safeguard
202
+ rprint("[bold red]Error:[/bold red] Invalid non-integer value in <issues_count> tag in string response.")
203
+ return {
204
+ "explanation": None,
205
+ "fixed_program": program,
206
+ "fixed_code": code,
207
+ "total_cost": total_cost,
208
+ "model_name": model_name,
209
+ "verification_issues_count": 0,
210
+ }
211
+ else: # Not VerificationOutput and not a successfully parsed string
212
+ rprint(f"[bold red]Error:[/bold red] Verification LLM call did not return the expected structured output (e.g., parsing failed).")
213
+ rprint(f" [dim]Expected type:[/dim] {VerificationOutput} or str")
214
+ rprint(f" [dim]Received type:[/dim] {type(verification_result_obj)}")
215
+ content_str = str(verification_result_obj)
216
+ rprint(f" [dim]Received content:[/dim] {content_str[:500]}{'...' if len(content_str) > 500 else ''}")
217
+ raw_text = verification_response.get('result_text')
218
+ if raw_text:
219
+ raw_text_str = str(raw_text)
220
+ rprint(f" [dim]Raw LLM text (if available from llm_invoke):[/dim] {raw_text_str[:500]}{'...' if len(raw_text_str) > 500 else ''}")
186
221
  return {
187
222
  "explanation": None,
188
223
  "fixed_program": program,
189
224
  "fixed_code": code,
190
225
  "total_cost": total_cost,
191
226
  "model_name": model_name,
192
- "verification_issues_count": 0, # Reset count on parsing error
227
+ "verification_issues_count": 0,
193
228
  }
194
229
 
195
230
  if issues_found and verification_details:
@@ -210,71 +245,81 @@ def fix_verification_errors(
210
245
  input_json=fix_input_json,
211
246
  strength=strength,
212
247
  temperature=temperature,
213
- verbose=False, # Keep internal llm_invoke verbose off unless needed
248
+ verbose=False,
249
+ output_pydantic=FixerOutput
214
250
  )
215
251
  total_cost += fix_response.get('cost', 0.0)
216
- model_name = fix_response.get('model_name', model_name) # Update model name to the last one used
217
- fix_result = fix_response.get('result', '')
252
+ model_name = fix_response.get('model_name', model_name)
218
253
 
219
254
  if verbose:
220
255
  rprint(f"[cyan]Fix LLM call complete.[/cyan]")
221
256
  rprint(f" [dim]Model Used:[/dim] {fix_response.get('model_name', 'N/A')}")
222
257
  rprint(f" [dim]Cost:[/dim] ${fix_response.get('cost', 0.0):.6f}")
223
- rprint("\n[blue]Fix Result:[/blue]")
224
- # Markdown object handles its own rendering, no extra needed here
225
- rprint(Markdown(fix_result))
226
258
 
227
- fixed_program_match = re.search(r"<fixed_program>(.*?)</fixed_program>", fix_result, re.DOTALL)
228
- fixed_code_match = re.search(r"<fixed_code>(.*?)</fixed_code>", fix_result, re.DOTALL)
229
- explanation_match = re.search(r"<explanation>(.*?)</explanation>", fix_result, re.DOTALL)
259
+ fix_result_obj = fix_response.get('result')
260
+ parsed_fix_successfully = False
230
261
 
231
- if fixed_program_match:
232
- fixed_program = fixed_program_match.group(1).strip()
233
- if verbose: rprint("[green]Extracted fixed program.[/green]")
234
- else:
235
- if verbose: rprint("[yellow]Warning:[/yellow] Could not find <fixed_program> tag in fix result. Using original program.")
262
+ if isinstance(fix_result_obj, FixerOutput):
263
+ fixed_program = fix_result_obj.fixed_program
264
+ fixed_code = fix_result_obj.fixed_code
265
+ fix_explanation = fix_result_obj.explanation
266
+ parsed_fix_successfully = True
267
+ if verbose:
268
+ rprint("[green]Successfully parsed structured output for fix.[/green]")
269
+ rprint(Markdown(f"**Explanation from LLM:**\n{fix_explanation}"))
270
+ elif isinstance(fix_result_obj, str):
271
+ program_match = re.search(r'<fixed_program>(.*?)</fixed_program>', fix_result_obj, re.DOTALL)
272
+ code_match = re.search(r'<fixed_code>(.*?)</fixed_code>', fix_result_obj, re.DOTALL)
273
+ explanation_match = re.search(r'<explanation>(.*?)</explanation>', fix_result_obj, re.DOTALL)
236
274
 
237
- if fixed_code_match:
238
- fixed_code = fixed_code_match.group(1).strip()
239
- if verbose: rprint("[green]Extracted fixed code module.[/green]")
240
- else:
241
- if verbose: rprint("[yellow]Warning:[/yellow] Could not find <fixed_code> tag in fix result. Using original code module.")
275
+ if program_match or code_match or explanation_match: # If any tag is found, attempt to parse
276
+ fixed_program_candidate = program_match.group(1).strip() if (program_match and program_match.group(1)) else None
277
+ fixed_code_candidate = code_match.group(1).strip() if (code_match and code_match.group(1)) else None
278
+ fix_explanation_candidate = explanation_match.group(1).strip() if (explanation_match and explanation_match.group(1)) else None
242
279
 
243
- if explanation_match:
244
- fix_explanation = explanation_match.group(1).strip()
245
- if verbose: rprint("[green]Extracted fix explanation.[/green]")
246
- else:
247
- if verbose: rprint("[yellow]Warning:[/yellow] Could not find <explanation> tag in fix result.")
248
- fix_explanation = "[Fix explanation not provided by LLM]"
280
+ fixed_program = fixed_program_candidate if fixed_program_candidate else program
281
+ fixed_code = fixed_code_candidate if fixed_code_candidate else code
282
+ fix_explanation = fix_explanation_candidate if fix_explanation_candidate else "[Fix explanation not provided by LLM]"
283
+ parsed_fix_successfully = True
284
+
285
+ if verbose:
286
+ if not program_match or not fixed_program_candidate:
287
+ rprint("[yellow]Warning:[/yellow] Could not find or parse <fixed_program> tag in fix result string. Using original program.")
288
+ if not code_match or not fixed_code_candidate:
289
+ rprint("[yellow]Warning:[/yellow] Could not find or parse <fixed_code> tag in fix result string. Using original code module.")
290
+ if not explanation_match or not fix_explanation_candidate:
291
+ rprint("[yellow]Warning:[/yellow] Could not find or parse <explanation> tag in fix result string. Using default explanation.")
292
+ # else: string, but no relevant tags. Will fall to parsed_fix_successfully = False below
293
+
294
+ if not parsed_fix_successfully:
295
+ rprint(f"[bold red]Error:[/bold red] Fix generation LLM call did not return the expected structured output (e.g., parsing failed).")
296
+ rprint(f" [dim]Expected type:[/dim] {FixerOutput} or str (with XML tags)")
297
+ rprint(f" [dim]Received type:[/dim] {type(fix_result_obj)}")
298
+ content_str = str(fix_result_obj)
299
+ rprint(f" [dim]Received content:[/dim] {content_str[:500]}{'...' if len(content_str) > 500 else ''}")
300
+ raw_text = fix_response.get('result_text')
301
+ if raw_text:
302
+ raw_text_str = str(raw_text)
303
+ rprint(f" [dim]Raw LLM text (if available from llm_invoke):[/dim] {raw_text_str[:500]}{'...' if len(raw_text_str) > 500 else ''}")
304
+ fix_explanation = "[Error: Failed to parse structured output from LLM for fix explanation]"
305
+ # fixed_program and fixed_code remain original (already initialized)
249
306
 
250
307
  except Exception as e:
251
- rprint(f"[bold red]Error during fix LLM call or extraction:[/bold red] {e}")
252
- # Combine verification details with the error message if fix failed
253
- final_explanation = f"<error>Error during fix generation: {str(e)}</error>\n"
254
- if verification_details:
255
- fix_explanation = f"[Error during fix generation: {e}]"
256
- # Note: verification_issues_count should retain its value from the verification step
308
+ rprint(f"[bold red]Error during fix LLM call or processing structured output:[/bold red] {e}")
309
+ fix_explanation = f"[Error during fix generation: {e}]"
310
+ # fixed_program and fixed_code remain original
311
+
312
+ if issues_found:
313
+ final_explanation = (
314
+ f"<verification_details>{verification_details}</verification_details>\n"
315
+ f"<fix_explanation>{fix_explanation}</fix_explanation>"
316
+ )
317
+ else:
318
+ final_explanation = None # Or "" if an empty list/None is preferred per prompt for "no issues"
257
319
 
258
320
  if verbose:
259
321
  rprint(f"\n[bold blue]Total Cost for fix_verification_errors run:[/bold blue] ${total_cost:.6f}")
260
322
 
261
- # Construct final explanation only if issues were initially found and processed
262
- if verification_details:
263
- if fix_explanation:
264
- final_explanation = (
265
- f"<verification_details>{verification_details}</verification_details>\n"
266
- f"<fix_explanation>{fix_explanation}</fix_explanation>"
267
- )
268
- else:
269
- # This case might occur if fix step wasn't run due to parsing issues after verification,
270
- # or if fix_explanation extraction failed silently (though we added a default).
271
- # Let's ensure we always provide some context if details were found.
272
- final_explanation = (
273
- f"<verification_details>{verification_details}</verification_details>\n"
274
- f"<fix_explanation>[Fix explanation not available or fix step skipped]</fix_explanation>"
275
- )
276
- # If no issues were found initially (verification_details is None), final_explanation remains None
277
-
278
323
  return {
279
324
  "explanation": final_explanation,
280
325
  "fixed_program": fixed_program,
@@ -282,4 +327,4 @@ def fix_verification_errors(
282
327
  "total_cost": total_cost,
283
328
  "model_name": model_name,
284
329
  "verification_issues_count": verification_issues_count,
285
- }
330
+ }