pdd-cli 0.0.45__py3-none-any.whl → 0.0.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +4 -4
- pdd/agentic_common.py +863 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_fix.py +1179 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +370 -0
- pdd/agentic_verify.py +183 -0
- pdd/auto_deps_main.py +15 -5
- pdd/auto_include.py +63 -5
- pdd/bug_main.py +3 -2
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +73 -21
- pdd/code_generator.py +58 -18
- pdd/code_generator_main.py +672 -25
- pdd/commands/__init__.py +42 -0
- pdd/commands/analysis.py +248 -0
- pdd/commands/fix.py +140 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +174 -0
- pdd/commands/misc.py +79 -0
- pdd/commands/modify.py +230 -0
- pdd/commands/report.py +144 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +258 -82
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +113 -11
- pdd/continue_generation.py +47 -7
- pdd/core/__init__.py +0 -0
- pdd/core/cli.py +503 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +63 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +44 -11
- pdd/data/language_format.csv +71 -63
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/fix_code_loop.py +330 -76
- pdd/fix_error_loop.py +207 -61
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +75 -18
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +306 -272
- pdd/fix_verification_main.py +28 -9
- pdd/generate_output_paths.py +93 -10
- pdd/generate_test.py +16 -5
- pdd/get_jwt_token.py +9 -2
- pdd/get_run_command.py +73 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +11 -3
- pdd/llm_invoke.py +1269 -103
- pdd/load_prompt_template.py +36 -10
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +10 -3
- pdd/preprocess.py +228 -15
- pdd/preprocess_main.py +8 -5
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +1071 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +100 -905
- pdd/prompts/detect_change_LLM.prompt +122 -20
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +4 -2
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +8 -0
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +21 -6
- pdd/prompts/increase_tests_LLM.prompt +1 -5
- pdd/prompts/insert_includes_LLM.prompt +228 -108
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/pytest_output.py +127 -12
- pdd/render_mermaid.py +236 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +49 -6
- pdd/sync_determine_operation.py +543 -98
- pdd/sync_main.py +81 -31
- pdd/sync_orchestration.py +1334 -751
- pdd/sync_tui.py +848 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +242 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +151 -61
- pdd/unfinished_prompt.py +49 -3
- pdd/update_main.py +549 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +19 -6
- pdd_cli-0.0.90.dist-info/RECORD +153 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.45.dist-info/RECORD +0 -116
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
pdd/commands/__init__.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command registration module.
|
|
3
|
+
"""
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
from .generate import generate, test, example
|
|
7
|
+
from .fix import fix
|
|
8
|
+
from .modify import split, change, update
|
|
9
|
+
from .maintenance import sync, auto_deps, setup
|
|
10
|
+
from .analysis import detect_change, conflicts, bug, crash, trace
|
|
11
|
+
from .misc import preprocess
|
|
12
|
+
from .report import report_core
|
|
13
|
+
from .templates import templates_group
|
|
14
|
+
from .utility import install_completion_cmd, verify
|
|
15
|
+
|
|
16
|
+
def register_commands(cli: click.Group) -> None:
|
|
17
|
+
"""Register all subcommands with the main CLI group."""
|
|
18
|
+
cli.add_command(generate)
|
|
19
|
+
cli.add_command(test)
|
|
20
|
+
cli.add_command(example)
|
|
21
|
+
cli.add_command(fix)
|
|
22
|
+
cli.add_command(split)
|
|
23
|
+
cli.add_command(change)
|
|
24
|
+
cli.add_command(update)
|
|
25
|
+
cli.add_command(sync)
|
|
26
|
+
cli.add_command(auto_deps)
|
|
27
|
+
cli.add_command(setup)
|
|
28
|
+
cli.add_command(detect_change)
|
|
29
|
+
cli.add_command(conflicts)
|
|
30
|
+
cli.add_command(bug)
|
|
31
|
+
cli.add_command(crash)
|
|
32
|
+
cli.add_command(trace)
|
|
33
|
+
cli.add_command(preprocess)
|
|
34
|
+
cli.add_command(report_core)
|
|
35
|
+
cli.add_command(install_completion_cmd, name="install_completion")
|
|
36
|
+
cli.add_command(verify)
|
|
37
|
+
|
|
38
|
+
# Register templates group directly to commands dict to handle nesting if needed,
|
|
39
|
+
# or just add_command works for groups too.
|
|
40
|
+
# The original code did: cli.commands["templates"] = templates_group
|
|
41
|
+
# Using add_command is cleaner if it works for the structure.
|
|
42
|
+
cli.add_command(templates_group)
|
pdd/commands/analysis.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Analysis commands (detect-change, conflicts, bug, crash, trace).
|
|
3
|
+
"""
|
|
4
|
+
import click
|
|
5
|
+
from typing import Optional, Tuple, List
|
|
6
|
+
|
|
7
|
+
from ..detect_change_main import detect_change_main
|
|
8
|
+
from ..conflicts_main import conflicts_main
|
|
9
|
+
from ..bug_main import bug_main
|
|
10
|
+
from ..crash_main import crash_main
|
|
11
|
+
from ..trace_main import trace_main
|
|
12
|
+
from ..track_cost import track_cost
|
|
13
|
+
from ..core.errors import handle_error
|
|
14
|
+
|
|
15
|
+
@click.command("detect")
|
|
16
|
+
@click.argument("files", nargs=-1, type=click.Path(exists=True, dir_okay=False))
|
|
17
|
+
@click.option(
|
|
18
|
+
"--output",
|
|
19
|
+
type=click.Path(writable=True),
|
|
20
|
+
default=None,
|
|
21
|
+
help="Specify where to save the analysis results (CSV file).",
|
|
22
|
+
)
|
|
23
|
+
@click.pass_context
|
|
24
|
+
@track_cost
|
|
25
|
+
def detect_change(
|
|
26
|
+
ctx: click.Context,
|
|
27
|
+
files: Tuple[str, ...],
|
|
28
|
+
output: Optional[str],
|
|
29
|
+
) -> Optional[Tuple[List, float, str]]:
|
|
30
|
+
"""Detect if prompts need to be changed based on a description.
|
|
31
|
+
|
|
32
|
+
Usage: pdd detect [PROMPT_FILES...] CHANGE_FILE
|
|
33
|
+
"""
|
|
34
|
+
try:
|
|
35
|
+
if len(files) < 2:
|
|
36
|
+
raise click.UsageError("Requires at least one PROMPT_FILE and one CHANGE_FILE.")
|
|
37
|
+
|
|
38
|
+
# According to usage conventions (and README), the last file is the change file
|
|
39
|
+
change_file = files[-1]
|
|
40
|
+
prompt_files = list(files[:-1])
|
|
41
|
+
|
|
42
|
+
result, total_cost, model_name = detect_change_main(
|
|
43
|
+
ctx=ctx,
|
|
44
|
+
prompt_files=prompt_files,
|
|
45
|
+
change_file=change_file,
|
|
46
|
+
output=output,
|
|
47
|
+
)
|
|
48
|
+
return result, total_cost, model_name
|
|
49
|
+
except click.Abort:
|
|
50
|
+
raise
|
|
51
|
+
except Exception as exception:
|
|
52
|
+
handle_error(exception, "detect", ctx.obj.get("quiet", False))
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@click.command("conflicts")
|
|
57
|
+
@click.argument("prompt1", type=click.Path(exists=True, dir_okay=False))
|
|
58
|
+
@click.argument("prompt2", type=click.Path(exists=True, dir_okay=False))
|
|
59
|
+
@click.option(
|
|
60
|
+
"--output",
|
|
61
|
+
type=click.Path(writable=True),
|
|
62
|
+
default=None,
|
|
63
|
+
help="Specify where to save the conflict analysis results (CSV file).",
|
|
64
|
+
)
|
|
65
|
+
@click.pass_context
|
|
66
|
+
@track_cost
|
|
67
|
+
def conflicts(
|
|
68
|
+
ctx: click.Context,
|
|
69
|
+
prompt1: str,
|
|
70
|
+
prompt2: str,
|
|
71
|
+
output: Optional[str],
|
|
72
|
+
) -> Optional[Tuple[List, float, str]]:
|
|
73
|
+
"""Check for conflicts between two prompt files."""
|
|
74
|
+
try:
|
|
75
|
+
result, total_cost, model_name = conflicts_main(
|
|
76
|
+
ctx=ctx,
|
|
77
|
+
prompt1=prompt1,
|
|
78
|
+
prompt2=prompt2,
|
|
79
|
+
output=output,
|
|
80
|
+
verbose=ctx.obj.get("verbose", False),
|
|
81
|
+
)
|
|
82
|
+
return result, total_cost, model_name
|
|
83
|
+
except click.Abort:
|
|
84
|
+
raise
|
|
85
|
+
except Exception as exception:
|
|
86
|
+
handle_error(exception, "conflicts", ctx.obj.get("quiet", False))
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@click.command("bug")
|
|
91
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
92
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
93
|
+
@click.argument("program_file", type=click.Path(exists=True, dir_okay=False))
|
|
94
|
+
@click.argument("current_output", type=click.Path(exists=True, dir_okay=False))
|
|
95
|
+
@click.argument("desired_output", type=click.Path(exists=True, dir_okay=False))
|
|
96
|
+
@click.option(
|
|
97
|
+
"--output",
|
|
98
|
+
type=click.Path(writable=True),
|
|
99
|
+
default=None,
|
|
100
|
+
help="Specify where to save the generated unit test (file or directory).",
|
|
101
|
+
)
|
|
102
|
+
@click.option(
|
|
103
|
+
"--language",
|
|
104
|
+
type=str,
|
|
105
|
+
default="Python",
|
|
106
|
+
help="Programming language for the unit test.",
|
|
107
|
+
)
|
|
108
|
+
@click.pass_context
|
|
109
|
+
@track_cost
|
|
110
|
+
def bug(
|
|
111
|
+
ctx: click.Context,
|
|
112
|
+
prompt_file: str,
|
|
113
|
+
code_file: str,
|
|
114
|
+
program_file: str,
|
|
115
|
+
current_output: str,
|
|
116
|
+
desired_output: str,
|
|
117
|
+
output: Optional[str],
|
|
118
|
+
language: str,
|
|
119
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
120
|
+
"""Generate a unit test reproducing a bug from inputs and outputs."""
|
|
121
|
+
try:
|
|
122
|
+
result, total_cost, model_name = bug_main(
|
|
123
|
+
ctx=ctx,
|
|
124
|
+
prompt_file=prompt_file,
|
|
125
|
+
code_file=code_file,
|
|
126
|
+
program_file=program_file,
|
|
127
|
+
current_output=current_output,
|
|
128
|
+
desired_output=desired_output,
|
|
129
|
+
output=output,
|
|
130
|
+
language=language,
|
|
131
|
+
)
|
|
132
|
+
return result, total_cost, model_name
|
|
133
|
+
except click.Abort:
|
|
134
|
+
raise
|
|
135
|
+
except Exception as exception:
|
|
136
|
+
handle_error(exception, "bug", ctx.obj.get("quiet", False))
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@click.command("crash")
|
|
141
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
142
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
143
|
+
@click.argument("program_file", type=click.Path(exists=True, dir_okay=False))
|
|
144
|
+
@click.argument("error_file", type=click.Path(exists=True, dir_okay=False))
|
|
145
|
+
@click.option(
|
|
146
|
+
"--output",
|
|
147
|
+
type=click.Path(writable=True),
|
|
148
|
+
default=None,
|
|
149
|
+
help="Specify where to save the fixed code file (file or directory).",
|
|
150
|
+
)
|
|
151
|
+
@click.option(
|
|
152
|
+
"--output-program",
|
|
153
|
+
type=click.Path(writable=True),
|
|
154
|
+
default=None,
|
|
155
|
+
help="Specify where to save the fixed program file (file or directory).",
|
|
156
|
+
)
|
|
157
|
+
@click.option(
|
|
158
|
+
"--loop",
|
|
159
|
+
is_flag=True,
|
|
160
|
+
default=False,
|
|
161
|
+
help="Enable iterative fixing process.",
|
|
162
|
+
)
|
|
163
|
+
@click.option(
|
|
164
|
+
"--max-attempts",
|
|
165
|
+
type=int,
|
|
166
|
+
default=None,
|
|
167
|
+
help="Maximum number of fix attempts (default: 3).",
|
|
168
|
+
)
|
|
169
|
+
@click.option(
|
|
170
|
+
"--budget",
|
|
171
|
+
type=float,
|
|
172
|
+
default=None,
|
|
173
|
+
help="Maximum cost allowed for the fixing process (default: 5.0).",
|
|
174
|
+
)
|
|
175
|
+
@click.pass_context
|
|
176
|
+
@track_cost
|
|
177
|
+
def crash(
|
|
178
|
+
ctx: click.Context,
|
|
179
|
+
prompt_file: str,
|
|
180
|
+
code_file: str,
|
|
181
|
+
program_file: str,
|
|
182
|
+
error_file: str,
|
|
183
|
+
output: Optional[str],
|
|
184
|
+
output_program: Optional[str],
|
|
185
|
+
loop: bool,
|
|
186
|
+
max_attempts: Optional[int],
|
|
187
|
+
budget: Optional[float],
|
|
188
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
189
|
+
"""Analyze a crash and fix the code and program."""
|
|
190
|
+
try:
|
|
191
|
+
# crash_main returns: success, final_code, final_program, attempts, cost, model
|
|
192
|
+
success, final_code, final_program, attempts, total_cost, model_name = crash_main(
|
|
193
|
+
ctx=ctx,
|
|
194
|
+
prompt_file=prompt_file,
|
|
195
|
+
code_file=code_file,
|
|
196
|
+
program_file=program_file,
|
|
197
|
+
error_file=error_file,
|
|
198
|
+
output=output,
|
|
199
|
+
output_program=output_program,
|
|
200
|
+
loop=loop,
|
|
201
|
+
max_attempts=max_attempts,
|
|
202
|
+
budget=budget,
|
|
203
|
+
)
|
|
204
|
+
# Return a summary string as the result for track_cost/CLI output
|
|
205
|
+
result = f"Success: {success}, Attempts: {attempts}"
|
|
206
|
+
return result, total_cost, model_name
|
|
207
|
+
except click.Abort:
|
|
208
|
+
raise
|
|
209
|
+
except Exception as exception:
|
|
210
|
+
handle_error(exception, "crash", ctx.obj.get("quiet", False))
|
|
211
|
+
return None
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
@click.command("trace")
|
|
215
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
216
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
217
|
+
@click.argument("code_line", type=int)
|
|
218
|
+
@click.option(
|
|
219
|
+
"--output",
|
|
220
|
+
type=click.Path(writable=True),
|
|
221
|
+
default=None,
|
|
222
|
+
help="Specify where to save the trace analysis results.",
|
|
223
|
+
)
|
|
224
|
+
@click.pass_context
|
|
225
|
+
@track_cost
|
|
226
|
+
def trace(
|
|
227
|
+
ctx: click.Context,
|
|
228
|
+
prompt_file: str,
|
|
229
|
+
code_file: str,
|
|
230
|
+
code_line: int,
|
|
231
|
+
output: Optional[str],
|
|
232
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
233
|
+
"""Trace execution flow back to the prompt."""
|
|
234
|
+
try:
|
|
235
|
+
# trace_main returns: prompt_line, total_cost, model_name
|
|
236
|
+
result, total_cost, model_name = trace_main(
|
|
237
|
+
ctx=ctx,
|
|
238
|
+
prompt_file=prompt_file,
|
|
239
|
+
code_file=code_file,
|
|
240
|
+
code_line=code_line,
|
|
241
|
+
output=output,
|
|
242
|
+
)
|
|
243
|
+
return str(result), total_cost, model_name
|
|
244
|
+
except click.Abort:
|
|
245
|
+
raise
|
|
246
|
+
except Exception as exception:
|
|
247
|
+
handle_error(exception, "trace", ctx.obj.get("quiet", False))
|
|
248
|
+
return None
|
pdd/commands/fix.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Fix command.
|
|
3
|
+
"""
|
|
4
|
+
import click
|
|
5
|
+
from typing import Dict, List, Optional, Tuple, Any
|
|
6
|
+
|
|
7
|
+
from ..fix_main import fix_main
|
|
8
|
+
from ..track_cost import track_cost
|
|
9
|
+
from ..core.errors import handle_error
|
|
10
|
+
|
|
11
|
+
@click.command("fix")
|
|
12
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
13
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
14
|
+
@click.argument("unit_test_files", nargs=-1, type=click.Path(exists=True, dir_okay=False))
|
|
15
|
+
@click.argument("error_file", type=click.Path(dir_okay=False)) # Allow non-existent for loop mode
|
|
16
|
+
@click.option(
|
|
17
|
+
"--output-test",
|
|
18
|
+
type=click.Path(writable=True),
|
|
19
|
+
default=None,
|
|
20
|
+
help="Specify where to save the fixed unit test file (file or directory).",
|
|
21
|
+
)
|
|
22
|
+
@click.option(
|
|
23
|
+
"--output-code",
|
|
24
|
+
type=click.Path(writable=True),
|
|
25
|
+
default=None,
|
|
26
|
+
help="Specify where to save the fixed code file (file or directory).",
|
|
27
|
+
)
|
|
28
|
+
@click.option(
|
|
29
|
+
"--output-results",
|
|
30
|
+
type=click.Path(writable=True),
|
|
31
|
+
default=None,
|
|
32
|
+
help="Specify where to save the results log (file or directory).",
|
|
33
|
+
)
|
|
34
|
+
@click.option(
|
|
35
|
+
"--loop",
|
|
36
|
+
is_flag=True,
|
|
37
|
+
default=False,
|
|
38
|
+
help="Enable iterative fixing process."
|
|
39
|
+
)
|
|
40
|
+
@click.option(
|
|
41
|
+
"--verification-program",
|
|
42
|
+
type=click.Path(exists=True, dir_okay=False),
|
|
43
|
+
default=None,
|
|
44
|
+
help="Path to a Python program that verifies the fix.",
|
|
45
|
+
)
|
|
46
|
+
@click.option(
|
|
47
|
+
"--max-attempts",
|
|
48
|
+
type=int,
|
|
49
|
+
default=3,
|
|
50
|
+
show_default=True,
|
|
51
|
+
help="Maximum number of fix attempts.",
|
|
52
|
+
)
|
|
53
|
+
@click.option(
|
|
54
|
+
"--budget",
|
|
55
|
+
type=float,
|
|
56
|
+
default=5.0,
|
|
57
|
+
show_default=True,
|
|
58
|
+
help="Maximum cost allowed for the fixing process.",
|
|
59
|
+
)
|
|
60
|
+
@click.option(
|
|
61
|
+
"--auto-submit",
|
|
62
|
+
is_flag=True,
|
|
63
|
+
default=False,
|
|
64
|
+
help="Automatically submit the example if all unit tests pass.",
|
|
65
|
+
)
|
|
66
|
+
@click.option(
|
|
67
|
+
"--agentic-fallback/--no-agentic-fallback",
|
|
68
|
+
is_flag=True,
|
|
69
|
+
default=True,
|
|
70
|
+
help="Enable agentic fallback if the primary fix mechanism fails.",
|
|
71
|
+
)
|
|
72
|
+
@click.pass_context
|
|
73
|
+
@track_cost
|
|
74
|
+
def fix(
|
|
75
|
+
ctx: click.Context,
|
|
76
|
+
prompt_file: str,
|
|
77
|
+
code_file: str,
|
|
78
|
+
unit_test_files: Tuple[str, ...],
|
|
79
|
+
error_file: str,
|
|
80
|
+
output_test: Optional[str],
|
|
81
|
+
output_code: Optional[str],
|
|
82
|
+
output_results: Optional[str],
|
|
83
|
+
loop: bool,
|
|
84
|
+
verification_program: Optional[str],
|
|
85
|
+
max_attempts: int,
|
|
86
|
+
budget: float,
|
|
87
|
+
auto_submit: bool,
|
|
88
|
+
agentic_fallback: bool,
|
|
89
|
+
) -> Optional[Tuple[Dict[str, Any], float, str]]:
|
|
90
|
+
"""Fix code based on a prompt and unit test errors.
|
|
91
|
+
|
|
92
|
+
Accepts one or more UNIT_TEST_FILES. Each test file is processed separately,
|
|
93
|
+
allowing the AI to run and fix tests individually rather than as a concatenated blob.
|
|
94
|
+
"""
|
|
95
|
+
try:
|
|
96
|
+
all_results: List[Dict[str, Any]] = []
|
|
97
|
+
total_cost = 0.0
|
|
98
|
+
model_name = ""
|
|
99
|
+
|
|
100
|
+
# Process each unit test file separately
|
|
101
|
+
for unit_test_file in unit_test_files:
|
|
102
|
+
success, fixed_unit_test, fixed_code, attempts, cost, model = fix_main(
|
|
103
|
+
ctx=ctx,
|
|
104
|
+
prompt_file=prompt_file,
|
|
105
|
+
code_file=code_file,
|
|
106
|
+
unit_test_file=unit_test_file,
|
|
107
|
+
error_file=error_file,
|
|
108
|
+
output_test=output_test,
|
|
109
|
+
output_code=output_code,
|
|
110
|
+
output_results=output_results,
|
|
111
|
+
loop=loop,
|
|
112
|
+
verification_program=verification_program,
|
|
113
|
+
max_attempts=max_attempts,
|
|
114
|
+
budget=budget,
|
|
115
|
+
auto_submit=auto_submit,
|
|
116
|
+
agentic_fallback=agentic_fallback,
|
|
117
|
+
)
|
|
118
|
+
all_results.append({
|
|
119
|
+
"success": success,
|
|
120
|
+
"fixed_unit_test": fixed_unit_test,
|
|
121
|
+
"fixed_code": fixed_code,
|
|
122
|
+
"attempts": attempts,
|
|
123
|
+
"unit_test_file": unit_test_file,
|
|
124
|
+
})
|
|
125
|
+
total_cost += cost
|
|
126
|
+
model_name = model
|
|
127
|
+
|
|
128
|
+
# Aggregate results
|
|
129
|
+
overall_success = all(r["success"] for r in all_results)
|
|
130
|
+
result = {
|
|
131
|
+
"success": overall_success,
|
|
132
|
+
"results": all_results,
|
|
133
|
+
"total_attempts": sum(r["attempts"] for r in all_results),
|
|
134
|
+
}
|
|
135
|
+
return result, total_cost, model_name
|
|
136
|
+
except click.Abort:
|
|
137
|
+
raise
|
|
138
|
+
except Exception as exception:
|
|
139
|
+
handle_error(exception, "fix", ctx.obj.get("quiet", False))
|
|
140
|
+
ctx.exit(1)
|
pdd/commands/generate.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Generate, test, and example commands.
|
|
3
|
+
"""
|
|
4
|
+
import click
|
|
5
|
+
from typing import Dict, Optional, Tuple, List
|
|
6
|
+
|
|
7
|
+
from ..code_generator_main import code_generator_main
|
|
8
|
+
from ..context_generator_main import context_generator_main
|
|
9
|
+
from ..cmd_test_main import cmd_test_main
|
|
10
|
+
from ..track_cost import track_cost
|
|
11
|
+
from ..core.errors import handle_error, console
|
|
12
|
+
|
|
13
|
+
class GenerateCommand(click.Command):
|
|
14
|
+
"""Ensure help shows PROMPT_FILE as required even when validated at runtime."""
|
|
15
|
+
|
|
16
|
+
def collect_usage_pieces(self, ctx: click.Context) -> List[str]:
|
|
17
|
+
pieces = super().collect_usage_pieces(ctx)
|
|
18
|
+
return ["PROMPT_FILE" if piece == "[PROMPT_FILE]" else piece for piece in pieces]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@click.command("generate", cls=GenerateCommand)
|
|
22
|
+
@click.argument("prompt_file", required=False, type=click.Path(exists=True, dir_okay=False))
|
|
23
|
+
@click.option(
|
|
24
|
+
"--output",
|
|
25
|
+
type=click.Path(writable=True),
|
|
26
|
+
default=None,
|
|
27
|
+
help="Specify where to save the generated code (file or directory).",
|
|
28
|
+
)
|
|
29
|
+
@click.option(
|
|
30
|
+
"--original-prompt",
|
|
31
|
+
"original_prompt_file_path",
|
|
32
|
+
type=click.Path(exists=True, dir_okay=False),
|
|
33
|
+
default=None,
|
|
34
|
+
help="Path to the original prompt file for incremental generation.",
|
|
35
|
+
)
|
|
36
|
+
@click.option(
|
|
37
|
+
"--incremental",
|
|
38
|
+
"incremental_flag",
|
|
39
|
+
is_flag=True,
|
|
40
|
+
default=False,
|
|
41
|
+
help="Force incremental patching even if changes are significant (requires existing output).",
|
|
42
|
+
)
|
|
43
|
+
@click.option(
|
|
44
|
+
"-e",
|
|
45
|
+
"--env",
|
|
46
|
+
"env_kv",
|
|
47
|
+
multiple=True,
|
|
48
|
+
help="Set template variable (KEY=VALUE) or read KEY from env",
|
|
49
|
+
)
|
|
50
|
+
@click.option(
|
|
51
|
+
"--template",
|
|
52
|
+
"template_name",
|
|
53
|
+
type=str,
|
|
54
|
+
default=None,
|
|
55
|
+
help="Use a packaged/project template by name (e.g., architecture/architecture_json)",
|
|
56
|
+
)
|
|
57
|
+
@click.option(
|
|
58
|
+
"--unit-test",
|
|
59
|
+
"unit_test_file",
|
|
60
|
+
type=click.Path(exists=True, dir_okay=False),
|
|
61
|
+
default=None,
|
|
62
|
+
help="Path to a unit test file to include in the prompt.",
|
|
63
|
+
)
|
|
64
|
+
@click.option(
|
|
65
|
+
"--exclude-tests",
|
|
66
|
+
"exclude_tests",
|
|
67
|
+
is_flag=True,
|
|
68
|
+
default=False,
|
|
69
|
+
help="Do not automatically include test files found in the default tests directory.",
|
|
70
|
+
)
|
|
71
|
+
@click.pass_context
|
|
72
|
+
@track_cost
|
|
73
|
+
def generate(
|
|
74
|
+
ctx: click.Context,
|
|
75
|
+
prompt_file: Optional[str],
|
|
76
|
+
output: Optional[str],
|
|
77
|
+
original_prompt_file_path: Optional[str],
|
|
78
|
+
incremental_flag: bool,
|
|
79
|
+
env_kv: Tuple[str, ...],
|
|
80
|
+
template_name: Optional[str],
|
|
81
|
+
unit_test_file: Optional[str],
|
|
82
|
+
exclude_tests: bool,
|
|
83
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
84
|
+
"""
|
|
85
|
+
Generate code from a prompt file.
|
|
86
|
+
|
|
87
|
+
\b
|
|
88
|
+
Related commands:
|
|
89
|
+
test Generate unit tests for a prompt.
|
|
90
|
+
example Generate example code for a prompt.
|
|
91
|
+
|
|
92
|
+
\b
|
|
93
|
+
Note:
|
|
94
|
+
Global options (for example ``--force``, ``--temperature``, ``--time``)
|
|
95
|
+
can be placed either before or after the subcommand. For example:
|
|
96
|
+
|
|
97
|
+
pdd generate my.prompt --force --temperature 0.5
|
|
98
|
+
"""
|
|
99
|
+
try:
|
|
100
|
+
# Resolve template to a prompt path when requested
|
|
101
|
+
if template_name and prompt_file:
|
|
102
|
+
raise click.UsageError("Provide either --template or a PROMPT_FILE path, not both.")
|
|
103
|
+
if template_name:
|
|
104
|
+
try:
|
|
105
|
+
from .. import template_registry as _tpl
|
|
106
|
+
meta = _tpl.load_template(template_name)
|
|
107
|
+
prompt_file = meta.get("path")
|
|
108
|
+
if not prompt_file:
|
|
109
|
+
raise click.UsageError(f"Template '{template_name}' did not return a valid path")
|
|
110
|
+
except Exception as e:
|
|
111
|
+
raise click.UsageError(f"Failed to load template '{template_name}': {e}")
|
|
112
|
+
if not template_name and not prompt_file:
|
|
113
|
+
raise click.UsageError("Missing PROMPT_FILE. To use a template, pass --template NAME instead.")
|
|
114
|
+
# Parse -e/--env arguments into a dict
|
|
115
|
+
env_vars: Dict[str, str] = {}
|
|
116
|
+
import os as _os
|
|
117
|
+
for item in env_kv or ():
|
|
118
|
+
if "=" in item:
|
|
119
|
+
key, value = item.split("=", 1)
|
|
120
|
+
key = key.strip()
|
|
121
|
+
if key:
|
|
122
|
+
env_vars[key] = value
|
|
123
|
+
else:
|
|
124
|
+
key = item.strip()
|
|
125
|
+
if key:
|
|
126
|
+
val = _os.environ.get(key)
|
|
127
|
+
if val is not None:
|
|
128
|
+
env_vars[key] = val
|
|
129
|
+
else:
|
|
130
|
+
if ctx.obj.get("verbose") and not ctx.obj.get("quiet"):
|
|
131
|
+
console.print(f"[warning]-e {key} not found in environment; skipping[/warning]")
|
|
132
|
+
generated_code, incremental, total_cost, model_name = code_generator_main(
|
|
133
|
+
ctx=ctx,
|
|
134
|
+
prompt_file=prompt_file, # resolved template path or user path
|
|
135
|
+
output=output,
|
|
136
|
+
original_prompt_file_path=original_prompt_file_path,
|
|
137
|
+
force_incremental_flag=incremental_flag,
|
|
138
|
+
env_vars=env_vars or None,
|
|
139
|
+
unit_test_file=unit_test_file,
|
|
140
|
+
exclude_tests=exclude_tests,
|
|
141
|
+
)
|
|
142
|
+
return generated_code, total_cost, model_name
|
|
143
|
+
except click.Abort:
|
|
144
|
+
# Let user cancellation (e.g., pressing 'no' on overwrite prompt) propagate
|
|
145
|
+
# to PDDCLI.invoke() for graceful handling (fix for issue #186)
|
|
146
|
+
raise
|
|
147
|
+
except Exception as exception:
|
|
148
|
+
handle_error(exception, "generate", ctx.obj.get("quiet", False))
|
|
149
|
+
return None
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@click.command("example")
|
|
153
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
154
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
155
|
+
@click.option(
|
|
156
|
+
"--output",
|
|
157
|
+
type=click.Path(writable=True),
|
|
158
|
+
default=None,
|
|
159
|
+
help="Specify where to save the generated example code (file or directory).",
|
|
160
|
+
)
|
|
161
|
+
@click.pass_context
|
|
162
|
+
@track_cost
|
|
163
|
+
def example(
|
|
164
|
+
ctx: click.Context,
|
|
165
|
+
prompt_file: str,
|
|
166
|
+
code_file: str,
|
|
167
|
+
output: Optional[str]
|
|
168
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
169
|
+
"""Generate example code for a given prompt and implementation."""
|
|
170
|
+
try:
|
|
171
|
+
example_code, total_cost, model_name = context_generator_main(
|
|
172
|
+
ctx=ctx,
|
|
173
|
+
prompt_file=prompt_file,
|
|
174
|
+
code_file=code_file,
|
|
175
|
+
output=output,
|
|
176
|
+
)
|
|
177
|
+
return example_code, total_cost, model_name
|
|
178
|
+
except click.Abort:
|
|
179
|
+
raise
|
|
180
|
+
except Exception as exception:
|
|
181
|
+
handle_error(exception, "example", ctx.obj.get("quiet", False))
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@click.command("test")
|
|
186
|
+
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
187
|
+
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
188
|
+
@click.option(
|
|
189
|
+
"--output",
|
|
190
|
+
type=click.Path(writable=True),
|
|
191
|
+
default=None,
|
|
192
|
+
help="Specify where to save the generated test file (file or directory).",
|
|
193
|
+
)
|
|
194
|
+
@click.option(
|
|
195
|
+
"--language",
|
|
196
|
+
type=str,
|
|
197
|
+
default=None,
|
|
198
|
+
help="Specify the programming language."
|
|
199
|
+
)
|
|
200
|
+
@click.option(
|
|
201
|
+
"--coverage-report",
|
|
202
|
+
type=click.Path(exists=True, dir_okay=False),
|
|
203
|
+
default=None,
|
|
204
|
+
help="Path to the coverage report file for existing tests.",
|
|
205
|
+
)
|
|
206
|
+
@click.option(
|
|
207
|
+
"--existing-tests",
|
|
208
|
+
type=click.Path(exists=True, dir_okay=False),
|
|
209
|
+
multiple=True,
|
|
210
|
+
help="Path to existing unit test file(s). Can be specified multiple times.",
|
|
211
|
+
)
|
|
212
|
+
@click.option(
|
|
213
|
+
"--target-coverage",
|
|
214
|
+
type=click.FloatRange(0.0, 100.0),
|
|
215
|
+
default=None, # Use None, default handled in cmd_test_main or env var
|
|
216
|
+
help="Desired code coverage percentage (default: 10.0 or PDD_TEST_COVERAGE_TARGET).",
|
|
217
|
+
)
|
|
218
|
+
@click.option(
|
|
219
|
+
"--merge",
|
|
220
|
+
is_flag=True,
|
|
221
|
+
default=False,
|
|
222
|
+
help="Merge new tests with existing test file instead of creating a separate file.",
|
|
223
|
+
)
|
|
224
|
+
@click.pass_context
|
|
225
|
+
@track_cost
|
|
226
|
+
def test(
|
|
227
|
+
ctx: click.Context,
|
|
228
|
+
prompt_file: str,
|
|
229
|
+
code_file: str,
|
|
230
|
+
output: Optional[str],
|
|
231
|
+
language: Optional[str],
|
|
232
|
+
coverage_report: Optional[str],
|
|
233
|
+
existing_tests: Tuple[str, ...],
|
|
234
|
+
target_coverage: Optional[float],
|
|
235
|
+
merge: bool,
|
|
236
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
237
|
+
"""Generate unit tests for a given prompt and implementation."""
|
|
238
|
+
try:
|
|
239
|
+
# Convert empty tuple to None for cmd_test_main compatibility
|
|
240
|
+
existing_tests_list = list(existing_tests) if existing_tests else None
|
|
241
|
+
test_code, total_cost, model_name = cmd_test_main(
|
|
242
|
+
ctx=ctx,
|
|
243
|
+
prompt_file=prompt_file,
|
|
244
|
+
code_file=code_file,
|
|
245
|
+
output=output,
|
|
246
|
+
language=language,
|
|
247
|
+
coverage_report=coverage_report,
|
|
248
|
+
existing_tests=existing_tests_list,
|
|
249
|
+
target_coverage=target_coverage,
|
|
250
|
+
merge=merge,
|
|
251
|
+
)
|
|
252
|
+
return test_code, total_cost, model_name
|
|
253
|
+
except click.Abort:
|
|
254
|
+
raise
|
|
255
|
+
except Exception as exception:
|
|
256
|
+
handle_error(exception, "test", ctx.obj.get("quiet", False))
|
|
257
|
+
return None
|