pdd-cli 0.0.24__py3-none-any.whl → 0.0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +7 -1
- pdd/bug_main.py +5 -1
- pdd/bug_to_unit_test.py +16 -5
- pdd/change.py +2 -1
- pdd/change_main.py +407 -189
- pdd/cli.py +853 -301
- pdd/code_generator.py +2 -1
- pdd/conflicts_in_prompts.py +2 -1
- pdd/construct_paths.py +377 -222
- pdd/context_generator.py +2 -1
- pdd/continue_generation.py +3 -2
- pdd/crash_main.py +55 -20
- pdd/detect_change.py +2 -1
- pdd/fix_code_loop.py +465 -160
- pdd/fix_code_module_errors.py +7 -4
- pdd/fix_error_loop.py +9 -9
- pdd/fix_errors_from_unit_tests.py +207 -365
- pdd/fix_main.py +31 -4
- pdd/fix_verification_errors.py +60 -34
- pdd/fix_verification_errors_loop.py +842 -768
- pdd/fix_verification_main.py +412 -0
- pdd/generate_output_paths.py +427 -189
- pdd/generate_test.py +3 -2
- pdd/increase_tests.py +2 -2
- pdd/llm_invoke.py +14 -3
- pdd/preprocess.py +3 -3
- pdd/process_csv_change.py +466 -154
- pdd/prompts/extract_prompt_update_LLM.prompt +11 -5
- pdd/prompts/extract_unit_code_fix_LLM.prompt +2 -2
- pdd/prompts/fix_code_module_errors_LLM.prompt +29 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +5 -5
- pdd/prompts/generate_test_LLM.prompt +9 -3
- pdd/prompts/update_prompt_LLM.prompt +3 -3
- pdd/split.py +6 -5
- pdd/split_main.py +13 -4
- pdd/trace_main.py +7 -0
- pdd/xml_tagger.py +2 -1
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/METADATA +4 -4
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/RECORD +43 -42
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/WHEEL +1 -1
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.25.dist-info}/top_level.txt +0 -0
pdd/fix_main.py
CHANGED
|
@@ -2,10 +2,12 @@ import sys
|
|
|
2
2
|
from typing import Tuple, Optional
|
|
3
3
|
import click
|
|
4
4
|
from rich import print as rprint
|
|
5
|
+
from rich.markup import MarkupError, escape
|
|
5
6
|
|
|
6
7
|
import requests
|
|
7
8
|
import asyncio
|
|
8
9
|
import os
|
|
10
|
+
from pathlib import Path
|
|
9
11
|
|
|
10
12
|
from .preprocess import preprocess
|
|
11
13
|
|
|
@@ -15,6 +17,9 @@ from .fix_error_loop import fix_error_loop
|
|
|
15
17
|
from .get_jwt_token import get_jwt_token
|
|
16
18
|
from .get_language import get_language
|
|
17
19
|
|
|
20
|
+
# Import DEFAULT_STRENGTH from the package
|
|
21
|
+
from . import DEFAULT_STRENGTH
|
|
22
|
+
|
|
18
23
|
def fix_main(
|
|
19
24
|
ctx: click.Context,
|
|
20
25
|
prompt_file: str,
|
|
@@ -65,6 +70,12 @@ def fix_main(
|
|
|
65
70
|
analysis_results = None
|
|
66
71
|
|
|
67
72
|
try:
|
|
73
|
+
# Verify error file exists if not in loop mode
|
|
74
|
+
if not loop:
|
|
75
|
+
error_path = Path(error_file)
|
|
76
|
+
if not error_path.exists():
|
|
77
|
+
raise FileNotFoundError(f"Error file '{error_file}' does not exist.")
|
|
78
|
+
|
|
68
79
|
# Construct file paths
|
|
69
80
|
input_file_paths = {
|
|
70
81
|
"prompt_file": prompt_file,
|
|
@@ -85,11 +96,12 @@ def fix_main(
|
|
|
85
96
|
force=ctx.obj.get('force', False),
|
|
86
97
|
quiet=ctx.obj.get('quiet', False),
|
|
87
98
|
command="fix",
|
|
88
|
-
command_options=command_options
|
|
99
|
+
command_options=command_options,
|
|
100
|
+
create_error_file=loop # Only create error file if in loop mode
|
|
89
101
|
)
|
|
90
102
|
|
|
91
103
|
# Get parameters from context
|
|
92
|
-
strength = ctx.obj.get('strength',
|
|
104
|
+
strength = ctx.obj.get('strength', DEFAULT_STRENGTH)
|
|
93
105
|
temperature = ctx.obj.get('temperature', 0)
|
|
94
106
|
verbose = ctx.obj.get('verbose', False)
|
|
95
107
|
if loop:
|
|
@@ -139,7 +151,16 @@ def fix_main(
|
|
|
139
151
|
if verbose and analysis_results:
|
|
140
152
|
# Log the first 200 characters of analysis if in verbose mode
|
|
141
153
|
analysis_preview = analysis_results[:200] + "..." if len(analysis_results) > 200 else analysis_results
|
|
142
|
-
|
|
154
|
+
try:
|
|
155
|
+
# Attempt to print the preview using rich markup parsing
|
|
156
|
+
rprint(f"[bold]Analysis preview:[/bold] {analysis_preview}")
|
|
157
|
+
except MarkupError as me:
|
|
158
|
+
# If markup fails, print a warning and the escaped preview
|
|
159
|
+
rprint(f"[bold yellow]Warning:[/bold yellow] Analysis preview contained invalid markup: {me}")
|
|
160
|
+
rprint(f"[bold]Raw Analysis preview (escaped):[/bold] {escape(analysis_preview)}")
|
|
161
|
+
except Exception as e:
|
|
162
|
+
# Handle other potential errors during preview printing
|
|
163
|
+
rprint(f"[bold red]Error printing analysis preview: {e}[/bold red]")
|
|
143
164
|
if success:
|
|
144
165
|
rprint("[bold green]Fixed files saved:[/bold green]")
|
|
145
166
|
rprint(f" Test file: {output_file_paths['output_test']}")
|
|
@@ -263,5 +284,11 @@ def fix_main(
|
|
|
263
284
|
|
|
264
285
|
except Exception as e:
|
|
265
286
|
if not ctx.obj.get('quiet', False):
|
|
266
|
-
|
|
287
|
+
# Safely handle and print MarkupError
|
|
288
|
+
if isinstance(e, MarkupError):
|
|
289
|
+
rprint(f"[bold red]Markup Error in fix_main:[/bold red]")
|
|
290
|
+
rprint(escape(str(e)))
|
|
291
|
+
else:
|
|
292
|
+
# Print other errors normally (might still fail if they contain markup)
|
|
293
|
+
rprint(f"[bold red]Error:[/bold red] {str(e)}")
|
|
267
294
|
sys.exit(1)
|
pdd/fix_verification_errors.py
CHANGED
|
@@ -45,12 +45,14 @@ def fix_verification_errors(
|
|
|
45
45
|
fixed_code = code
|
|
46
46
|
final_explanation = None
|
|
47
47
|
|
|
48
|
-
|
|
49
|
-
|
|
48
|
+
# Check only essential inputs, allow empty output
|
|
49
|
+
if not all([program, prompt, code]):
|
|
50
|
+
# Keep the error print for program, prompt, code missing
|
|
51
|
+
rprint("[bold red]Error:[/bold red] Missing one or more required inputs (program, prompt, code).")
|
|
50
52
|
return {
|
|
51
53
|
"explanation": None,
|
|
52
|
-
"fixed_program": program,
|
|
53
|
-
"fixed_code": code,
|
|
54
|
+
"fixed_program": program, # Return original if possible
|
|
55
|
+
"fixed_code": code, # Return original if possible
|
|
54
56
|
"total_cost": 0.0,
|
|
55
57
|
"model_name": None,
|
|
56
58
|
"verification_issues_count": 0,
|
|
@@ -102,7 +104,7 @@ def fix_verification_errors(
|
|
|
102
104
|
input_json=verification_input_json,
|
|
103
105
|
strength=strength,
|
|
104
106
|
temperature=temperature,
|
|
105
|
-
verbose=False,
|
|
107
|
+
verbose=False, # Keep internal llm_invoke verbose off unless needed
|
|
106
108
|
)
|
|
107
109
|
total_cost += verification_response.get('cost', 0.0)
|
|
108
110
|
model_name = verification_response.get('model_name', model_name)
|
|
@@ -126,17 +128,36 @@ def fix_verification_errors(
|
|
|
126
128
|
|
|
127
129
|
if verbose:
|
|
128
130
|
rprint("\n[blue]Verification Result:[/blue]")
|
|
131
|
+
# Markdown object handles its own rendering, no extra needed here
|
|
129
132
|
rprint(Markdown(verification_result))
|
|
130
133
|
|
|
131
134
|
issues_found = False
|
|
132
135
|
try:
|
|
136
|
+
# Attempt to match and extract digits directly
|
|
133
137
|
count_match = re.search(r"<issues_count>(\d+)</issues_count>", verification_result)
|
|
134
138
|
if count_match:
|
|
135
|
-
verification_issues_count = int(count_match.group(1))
|
|
139
|
+
verification_issues_count = int(count_match.group(1)) # Safe due to \d+
|
|
136
140
|
else:
|
|
137
|
-
|
|
138
|
-
|
|
141
|
+
# Specific match failed, check if tag exists with invalid content or is missing
|
|
142
|
+
generic_count_match = re.search(r"<issues_count>(.*?)</issues_count>", verification_result, re.DOTALL)
|
|
143
|
+
if generic_count_match:
|
|
144
|
+
# Tag found, but content is not \d+ -> Parsing Error
|
|
145
|
+
rprint("[bold red]Error:[/bold red] Could not parse integer value from <issues_count> tag.")
|
|
146
|
+
# Return the specific error structure for parsing errors after verification call
|
|
147
|
+
return {
|
|
148
|
+
"explanation": None,
|
|
149
|
+
"fixed_program": program,
|
|
150
|
+
"fixed_code": code,
|
|
151
|
+
"total_cost": total_cost, # Cost incurred so far
|
|
152
|
+
"model_name": model_name, # Model used so far
|
|
153
|
+
"verification_issues_count": 0, # Reset count on parsing error
|
|
154
|
+
}
|
|
155
|
+
else:
|
|
156
|
+
# Tag truly not found -> Warning
|
|
157
|
+
rprint("[yellow]Warning:[/yellow] Could not find <issues_count> tag in verification result. Assuming 0 issues.")
|
|
158
|
+
verification_issues_count = 0
|
|
139
159
|
|
|
160
|
+
# Proceed to check for details tag if count > 0
|
|
140
161
|
if verification_issues_count > 0:
|
|
141
162
|
details_match = re.search(r"<details>(.*?)</details>", verification_result, re.DOTALL)
|
|
142
163
|
if details_match:
|
|
@@ -146,26 +167,21 @@ def fix_verification_errors(
|
|
|
146
167
|
if verbose:
|
|
147
168
|
rprint(f"\n[yellow]Found {verification_issues_count} potential issues. Proceeding to fix step.[/yellow]")
|
|
148
169
|
else:
|
|
170
|
+
# Count > 0, but details empty -> Warning
|
|
149
171
|
rprint("[yellow]Warning:[/yellow] <issues_count> is > 0, but <details> tag is empty. Treating as no issues found.")
|
|
150
|
-
verification_issues_count = 0
|
|
172
|
+
verification_issues_count = 0 # Reset count
|
|
151
173
|
else:
|
|
174
|
+
# Count > 0, but no details tag -> Warning
|
|
152
175
|
rprint("[yellow]Warning:[/yellow] <issues_count> is > 0, but could not find <details> tag. Treating as no issues found.")
|
|
153
|
-
verification_issues_count = 0
|
|
176
|
+
verification_issues_count = 0 # Reset count
|
|
154
177
|
else:
|
|
178
|
+
# verification_issues_count is 0 (either parsed as 0 or defaulted after warning)
|
|
155
179
|
if verbose:
|
|
156
180
|
rprint("\n[green]No issues found during verification.[/green]")
|
|
157
181
|
|
|
158
|
-
|
|
159
|
-
rprint("[bold red]Error:[/bold red] Could not parse integer value from <issues_count> tag.")
|
|
160
|
-
return {
|
|
161
|
-
"explanation": None,
|
|
162
|
-
"fixed_program": program,
|
|
163
|
-
"fixed_code": code,
|
|
164
|
-
"total_cost": total_cost,
|
|
165
|
-
"model_name": model_name,
|
|
166
|
-
"verification_issues_count": 0,
|
|
167
|
-
}
|
|
182
|
+
# Removed ValueError catch as it's handled by the logic above
|
|
168
183
|
except Exception as e:
|
|
184
|
+
# Generic catch for other potential parsing issues
|
|
169
185
|
rprint(f"[bold red]Error parsing verification result:[/bold red] {e}")
|
|
170
186
|
return {
|
|
171
187
|
"explanation": None,
|
|
@@ -173,7 +189,7 @@ def fix_verification_errors(
|
|
|
173
189
|
"fixed_code": code,
|
|
174
190
|
"total_cost": total_cost,
|
|
175
191
|
"model_name": model_name,
|
|
176
|
-
"verification_issues_count": 0,
|
|
192
|
+
"verification_issues_count": 0, # Reset count on parsing error
|
|
177
193
|
}
|
|
178
194
|
|
|
179
195
|
if issues_found and verification_details:
|
|
@@ -194,10 +210,10 @@ def fix_verification_errors(
|
|
|
194
210
|
input_json=fix_input_json,
|
|
195
211
|
strength=strength,
|
|
196
212
|
temperature=temperature,
|
|
197
|
-
verbose=False,
|
|
213
|
+
verbose=False, # Keep internal llm_invoke verbose off unless needed
|
|
198
214
|
)
|
|
199
215
|
total_cost += fix_response.get('cost', 0.0)
|
|
200
|
-
model_name = fix_response.get('model_name', model_name)
|
|
216
|
+
model_name = fix_response.get('model_name', model_name) # Update model name to the last one used
|
|
201
217
|
fix_result = fix_response.get('result', '')
|
|
202
218
|
|
|
203
219
|
if verbose:
|
|
@@ -205,6 +221,7 @@ def fix_verification_errors(
|
|
|
205
221
|
rprint(f" [dim]Model Used:[/dim] {fix_response.get('model_name', 'N/A')}")
|
|
206
222
|
rprint(f" [dim]Cost:[/dim] ${fix_response.get('cost', 0.0):.6f}")
|
|
207
223
|
rprint("\n[blue]Fix Result:[/blue]")
|
|
224
|
+
# Markdown object handles its own rendering, no extra needed here
|
|
208
225
|
rprint(Markdown(fix_result))
|
|
209
226
|
|
|
210
227
|
fixed_program_match = re.search(r"<fixed_program>(.*?)</fixed_program>", fix_result, re.DOTALL)
|
|
@@ -232,22 +249,31 @@ def fix_verification_errors(
|
|
|
232
249
|
|
|
233
250
|
except Exception as e:
|
|
234
251
|
rprint(f"[bold red]Error during fix LLM call or extraction:[/bold red] {e}")
|
|
235
|
-
|
|
252
|
+
# Combine verification details with the error message if fix failed
|
|
253
|
+
final_explanation = f"<error>Error during fix generation: {str(e)}</error>\n"
|
|
254
|
+
if verification_details:
|
|
236
255
|
fix_explanation = f"[Error during fix generation: {e}]"
|
|
256
|
+
# Note: verification_issues_count should retain its value from the verification step
|
|
237
257
|
|
|
238
258
|
if verbose:
|
|
239
259
|
rprint(f"\n[bold blue]Total Cost for fix_verification_errors run:[/bold blue] ${total_cost:.6f}")
|
|
240
260
|
|
|
241
|
-
if
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
261
|
+
# Construct final explanation only if issues were initially found and processed
|
|
262
|
+
if verification_details:
|
|
263
|
+
if fix_explanation:
|
|
264
|
+
final_explanation = (
|
|
265
|
+
f"<verification_details>{verification_details}</verification_details>\n"
|
|
266
|
+
f"<fix_explanation>{fix_explanation}</fix_explanation>"
|
|
267
|
+
)
|
|
268
|
+
else:
|
|
269
|
+
# This case might occur if fix step wasn't run due to parsing issues after verification,
|
|
270
|
+
# or if fix_explanation extraction failed silently (though we added a default).
|
|
271
|
+
# Let's ensure we always provide some context if details were found.
|
|
272
|
+
final_explanation = (
|
|
273
|
+
f"<verification_details>{verification_details}</verification_details>\n"
|
|
274
|
+
f"<fix_explanation>[Fix explanation not available or fix step skipped]</fix_explanation>"
|
|
275
|
+
)
|
|
276
|
+
# If no issues were found initially (verification_details is None), final_explanation remains None
|
|
251
277
|
|
|
252
278
|
return {
|
|
253
279
|
"explanation": final_explanation,
|