pdd-cli 0.0.90__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +521 -786
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +25 -8
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +185 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +87 -29
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +136 -113
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +190 -164
- pdd/commands/sessions.py +284 -0
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +27 -3
- pdd/core/cloud.py +237 -0
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +204 -4
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +459 -95
- pdd/load_prompt_template.py +15 -34
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +4 -1
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +20 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +136 -75
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +23 -5
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +15 -10
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
pdd/sync_orchestration.py
CHANGED
|
@@ -38,6 +38,7 @@ from .sync_determine_operation import (
|
|
|
38
38
|
read_run_report,
|
|
39
39
|
calculate_sha256,
|
|
40
40
|
calculate_current_hashes,
|
|
41
|
+
_safe_basename,
|
|
41
42
|
)
|
|
42
43
|
from .auto_deps_main import auto_deps_main
|
|
43
44
|
from .code_generator_main import code_generator_main
|
|
@@ -53,6 +54,10 @@ from .pytest_output import extract_failing_files_from_output
|
|
|
53
54
|
from . import DEFAULT_STRENGTH
|
|
54
55
|
|
|
55
56
|
|
|
57
|
+
# --- Helper Functions ---
|
|
58
|
+
# Note: _safe_basename is imported from sync_determine_operation
|
|
59
|
+
|
|
60
|
+
|
|
56
61
|
# --- Atomic State Update (Issue #159 Fix) ---
|
|
57
62
|
|
|
58
63
|
@dataclass
|
|
@@ -151,7 +156,7 @@ class AtomicStateUpdate:
|
|
|
151
156
|
|
|
152
157
|
def load_sync_log(basename: str, language: str) -> List[Dict[str, Any]]:
|
|
153
158
|
"""Load sync log entries for a basename and language."""
|
|
154
|
-
log_file = META_DIR / f"{basename}_{language}_sync.log"
|
|
159
|
+
log_file = META_DIR / f"{_safe_basename(basename)}_{language}_sync.log"
|
|
155
160
|
if not log_file.exists():
|
|
156
161
|
return []
|
|
157
162
|
try:
|
|
@@ -193,7 +198,7 @@ def update_sync_log_entry(entry: Dict[str, Any], result: Dict[str, Any], duratio
|
|
|
193
198
|
|
|
194
199
|
def append_sync_log(basename: str, language: str, entry: Dict[str, Any]):
|
|
195
200
|
"""Append completed log entry to the sync log file."""
|
|
196
|
-
log_file = META_DIR / f"{basename}_{language}_sync.log"
|
|
201
|
+
log_file = META_DIR / f"{_safe_basename(basename)}_{language}_sync.log"
|
|
197
202
|
META_DIR.mkdir(parents=True, exist_ok=True)
|
|
198
203
|
with open(log_file, 'a') as f:
|
|
199
204
|
f.write(json.dumps(entry) + '\n')
|
|
@@ -217,7 +222,7 @@ def save_run_report(report: Dict[str, Any], basename: str, language: str,
|
|
|
217
222
|
language: The programming language.
|
|
218
223
|
atomic_state: Optional AtomicStateUpdate for atomic writes (Issue #159 fix).
|
|
219
224
|
"""
|
|
220
|
-
report_file = META_DIR / f"{basename}_{language}_run.json"
|
|
225
|
+
report_file = META_DIR / f"{_safe_basename(basename)}_{language}_run.json"
|
|
221
226
|
if atomic_state:
|
|
222
227
|
# Buffer for atomic write
|
|
223
228
|
atomic_state.set_run_report(report, report_file)
|
|
@@ -257,7 +262,7 @@ def _save_operation_fingerprint(basename: str, language: str, operation: str,
|
|
|
257
262
|
test_files=current_hashes.get('test_files'), # Bug #156
|
|
258
263
|
)
|
|
259
264
|
|
|
260
|
-
fingerprint_file = META_DIR / f"{basename}_{language}.json"
|
|
265
|
+
fingerprint_file = META_DIR / f"{_safe_basename(basename)}_{language}.json"
|
|
261
266
|
if atomic_state:
|
|
262
267
|
# Buffer for atomic write
|
|
263
268
|
atomic_state.set_fingerprint(asdict(fingerprint), fingerprint_file)
|
|
@@ -574,7 +579,7 @@ def _try_auto_fix_import_error(
|
|
|
574
579
|
def _run_example_with_error_detection(
|
|
575
580
|
cmd_parts: list[str],
|
|
576
581
|
env: dict,
|
|
577
|
-
cwd: str,
|
|
582
|
+
cwd: Optional[str] = None,
|
|
578
583
|
timeout: int = 60
|
|
579
584
|
) -> tuple[int, str, str]:
|
|
580
585
|
"""
|
|
@@ -635,24 +640,23 @@ def _run_example_with_error_detection(
|
|
|
635
640
|
# Check for errors in output
|
|
636
641
|
has_errors, error_summary = _detect_example_errors(combined)
|
|
637
642
|
|
|
638
|
-
# Determine result:
|
|
639
|
-
# -
|
|
643
|
+
# Determine result (check returncode first, then use error detection for signal-killed):
|
|
644
|
+
# - Zero exit code → success (trust the exit code)
|
|
640
645
|
# - Positive exit code (process failed normally, e.g., sys.exit(1)) → failure
|
|
641
646
|
# - Negative exit code (killed by signal, e.g., -9 for SIGKILL) → check output
|
|
642
|
-
# - Zero exit code → success
|
|
643
647
|
#
|
|
644
648
|
# IMPORTANT: When we kill the process after timeout, returncode is negative
|
|
645
649
|
# (the signal number). This is NOT a failure if output has no errors.
|
|
646
|
-
if
|
|
647
|
-
return
|
|
650
|
+
if proc.returncode is not None and proc.returncode == 0:
|
|
651
|
+
return 0, stdout, stderr # Clean exit = success (trust exit code)
|
|
648
652
|
elif proc.returncode is not None and proc.returncode > 0:
|
|
649
653
|
return proc.returncode, stdout, stderr # Process exited with error
|
|
650
654
|
else:
|
|
651
|
-
#
|
|
652
|
-
# -
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
return 0, stdout, stderr
|
|
655
|
+
# Killed by signal (returncode < 0 or None) - use error detection
|
|
656
|
+
# Server-style examples may run until timeout, need to check output
|
|
657
|
+
if has_errors:
|
|
658
|
+
return 1, stdout, stderr # Errors detected in output
|
|
659
|
+
return 0, stdout, stderr # No errors, server was running fine
|
|
656
660
|
|
|
657
661
|
|
|
658
662
|
def _execute_tests_and_create_run_report(
|
|
@@ -801,7 +805,7 @@ def _create_mock_context(**kwargs) -> click.Context:
|
|
|
801
805
|
|
|
802
806
|
def _display_sync_log(basename: str, language: str, verbose: bool = False) -> Dict[str, Any]:
|
|
803
807
|
"""Displays the sync log for a given basename and language."""
|
|
804
|
-
log_file = META_DIR / f"{basename}_{language}_sync.log"
|
|
808
|
+
log_file = META_DIR / f"{_safe_basename(basename)}_{language}_sync.log"
|
|
805
809
|
if not log_file.exists():
|
|
806
810
|
print(f"No sync log found for '{basename}' in language '{language}'.")
|
|
807
811
|
return {'success': False, 'errors': ['Log file not found.'], 'log_entries': []}
|
|
@@ -904,6 +908,14 @@ def sync_orchestration(
|
|
|
904
908
|
"""
|
|
905
909
|
Orchestrates the complete PDD sync workflow with parallel animation.
|
|
906
910
|
"""
|
|
911
|
+
# Handle None values from CLI (Issue #194) - defense in depth
|
|
912
|
+
if target_coverage is None:
|
|
913
|
+
target_coverage = 90.0
|
|
914
|
+
if budget is None:
|
|
915
|
+
budget = 10.0
|
|
916
|
+
if max_attempts is None:
|
|
917
|
+
max_attempts = 3
|
|
918
|
+
|
|
907
919
|
# Import get_extension at function scope
|
|
908
920
|
from .sync_determine_operation import get_extension
|
|
909
921
|
|
|
@@ -967,6 +979,10 @@ def sync_orchestration(
|
|
|
967
979
|
"""Get the confirmation callback from the app if available.
|
|
968
980
|
|
|
969
981
|
Once user confirms, we remember it so subsequent operations don't ask again.
|
|
982
|
+
|
|
983
|
+
Fix for Issue #277: In headless mode, we now return a wrapper callback
|
|
984
|
+
that uses click.confirm AND sets user_confirmed_overwrite[0] = True,
|
|
985
|
+
so subsequent calls auto-confirm instead of prompting repeatedly.
|
|
970
986
|
"""
|
|
971
987
|
if user_confirmed_overwrite[0]:
|
|
972
988
|
# User already confirmed, return a callback that always returns True
|
|
@@ -979,6 +995,26 @@ def sync_orchestration(
|
|
|
979
995
|
user_confirmed_overwrite[0] = True
|
|
980
996
|
return result
|
|
981
997
|
return confirming_callback
|
|
998
|
+
|
|
999
|
+
# Fix #277: In headless mode (app_ref is None), create a wrapper callback
|
|
1000
|
+
# that sets the flag after confirmation, preventing repeated prompts
|
|
1001
|
+
if confirm_callback is None:
|
|
1002
|
+
def headless_confirming_callback(msg: str, title: str) -> bool:
|
|
1003
|
+
"""Headless mode callback that remembers user confirmation."""
|
|
1004
|
+
try:
|
|
1005
|
+
prompt = msg or "Overwrite existing files?"
|
|
1006
|
+
result = click.confirm(
|
|
1007
|
+
click.style(prompt, fg="yellow"),
|
|
1008
|
+
default=True,
|
|
1009
|
+
show_default=True
|
|
1010
|
+
)
|
|
1011
|
+
except (click.Abort, EOFError):
|
|
1012
|
+
return False
|
|
1013
|
+
if result:
|
|
1014
|
+
user_confirmed_overwrite[0] = True
|
|
1015
|
+
return result
|
|
1016
|
+
return headless_confirming_callback
|
|
1017
|
+
|
|
982
1018
|
return confirm_callback # Fall back to provided callback
|
|
983
1019
|
|
|
984
1020
|
def sync_worker_logic():
|
|
@@ -1196,12 +1232,18 @@ def sync_orchestration(
|
|
|
1196
1232
|
Path(temp_output).unlink()
|
|
1197
1233
|
result = (new_content, 0.0, 'no-changes')
|
|
1198
1234
|
elif operation == 'generate':
|
|
1199
|
-
|
|
1235
|
+
# Ensure code directory exists before generating
|
|
1236
|
+
pdd_files['code'].parent.mkdir(parents=True, exist_ok=True)
|
|
1237
|
+
# Use absolute paths to avoid path_resolution_mode mismatch between sync (cwd) and generate (config_base)
|
|
1238
|
+
result = code_generator_main(ctx, prompt_file=str(pdd_files['prompt'].resolve()), output=str(pdd_files['code'].resolve()), original_prompt_file_path=None, force_incremental_flag=False)
|
|
1200
1239
|
# Clear stale run_report so crash/verify is required for newly generated code
|
|
1201
|
-
run_report_file = META_DIR / f"{basename}_{language}_run.json"
|
|
1240
|
+
run_report_file = META_DIR / f"{_safe_basename(basename)}_{language}_run.json"
|
|
1202
1241
|
run_report_file.unlink(missing_ok=True)
|
|
1203
1242
|
elif operation == 'example':
|
|
1204
|
-
|
|
1243
|
+
# Ensure example directory exists before generating
|
|
1244
|
+
pdd_files['example'].parent.mkdir(parents=True, exist_ok=True)
|
|
1245
|
+
# Use absolute paths to avoid path_resolution_mode mismatch between sync (cwd) and example (config_base)
|
|
1246
|
+
result = context_generator_main(ctx, prompt_file=str(pdd_files['prompt'].resolve()), code_file=str(pdd_files['code'].resolve()), output=str(pdd_files['example'].resolve()))
|
|
1205
1247
|
elif operation == 'crash':
|
|
1206
1248
|
required_files = [pdd_files['code'], pdd_files['example']]
|
|
1207
1249
|
missing_files = [f for f in required_files if not f.exists()]
|
|
@@ -1226,20 +1268,15 @@ def sync_orchestration(
|
|
|
1226
1268
|
# Remove TUI-specific env vars that might contaminate subprocess
|
|
1227
1269
|
for var in ['FORCE_COLOR', 'COLUMNS']:
|
|
1228
1270
|
env.pop(var, None)
|
|
1229
|
-
#
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
cmd_parts = run_cmd.split()
|
|
1235
|
-
else:
|
|
1236
|
-
# Fallback to Python if no run command found
|
|
1237
|
-
cmd_parts = ['python', example_path]
|
|
1271
|
+
# Bug fix: Use sys.executable to match crash_main's Python interpreter
|
|
1272
|
+
# and do NOT set cwd - inherit from pdd invocation directory
|
|
1273
|
+
# to match crash_main behavior. Setting cwd to example's parent breaks imports.
|
|
1274
|
+
example_path = str(pdd_files['example'].resolve())
|
|
1275
|
+
cmd_parts = [sys.executable, example_path]
|
|
1238
1276
|
# Use error-detection runner that handles server-style examples
|
|
1239
1277
|
returncode, stdout, stderr = _run_example_with_error_detection(
|
|
1240
1278
|
cmd_parts,
|
|
1241
1279
|
env=env,
|
|
1242
|
-
cwd=str(pdd_files['example'].parent),
|
|
1243
1280
|
timeout=60
|
|
1244
1281
|
)
|
|
1245
1282
|
|
|
@@ -1285,7 +1322,6 @@ def sync_orchestration(
|
|
|
1285
1322
|
retry_returncode, retry_stdout, retry_stderr = _run_example_with_error_detection(
|
|
1286
1323
|
cmd_parts,
|
|
1287
1324
|
env=env,
|
|
1288
|
-
cwd=str(pdd_files['example'].parent),
|
|
1289
1325
|
timeout=60
|
|
1290
1326
|
)
|
|
1291
1327
|
if retry_returncode == 0:
|
|
@@ -1314,7 +1350,10 @@ def sync_orchestration(
|
|
|
1314
1350
|
|
|
1315
1351
|
Path("crash.log").write_text(crash_log_content)
|
|
1316
1352
|
try:
|
|
1317
|
-
|
|
1353
|
+
# For non-Python languages, set max_attempts=0 to skip iterative loop
|
|
1354
|
+
# and go directly to agentic fallback
|
|
1355
|
+
effective_max_attempts = 0 if language.lower() != 'python' else max_attempts
|
|
1356
|
+
result = crash_main(ctx, prompt_file=str(pdd_files['prompt']), code_file=str(pdd_files['code']), program_file=str(pdd_files['example']), error_file="crash.log", output=str(pdd_files['code']), output_program=str(pdd_files['example']), loop=True, max_attempts=effective_max_attempts, budget=budget - current_cost_ref[0], strength=strength, temperature=temperature)
|
|
1318
1357
|
except Exception as e:
|
|
1319
1358
|
print(f"Crash fix failed: {e}")
|
|
1320
1359
|
skipped_operations.append('crash')
|
|
@@ -1324,7 +1363,10 @@ def sync_orchestration(
|
|
|
1324
1363
|
if not pdd_files['example'].exists():
|
|
1325
1364
|
skipped_operations.append('verify')
|
|
1326
1365
|
continue
|
|
1327
|
-
|
|
1366
|
+
# For non-Python languages, set max_attempts=0 to skip iterative loop
|
|
1367
|
+
# and go directly to agentic fallback
|
|
1368
|
+
effective_max_attempts = 0 if language.lower() != 'python' else max_attempts
|
|
1369
|
+
result = fix_verification_main(ctx, prompt_file=str(pdd_files['prompt']), code_file=str(pdd_files['code']), program_file=str(pdd_files['example']), output_results=f"{basename}_verify_results.log", output_code=str(pdd_files['code']), output_program=str(pdd_files['example']), loop=True, verification_program=str(pdd_files['example']), max_attempts=effective_max_attempts, budget=budget - current_cost_ref[0], strength=strength, temperature=temperature)
|
|
1328
1370
|
elif operation == 'test':
|
|
1329
1371
|
pdd_files['test'].parent.mkdir(parents=True, exist_ok=True)
|
|
1330
1372
|
# Use merge=True when test file exists to preserve fixes and append new tests
|
|
@@ -1402,11 +1444,13 @@ def sync_orchestration(
|
|
|
1402
1444
|
# Bug #156: Run pytest on ALL matching test files
|
|
1403
1445
|
test_files = pdd_files.get('test_files', [pdd_files['test']])
|
|
1404
1446
|
pytest_args = [python_executable, '-m', 'pytest'] + [str(f) for f in test_files] + ['-v', '--tb=short']
|
|
1447
|
+
# Bug fix: Run from project root (no cwd), matching _run_tests_and_report pattern
|
|
1448
|
+
# Using cwd=test.parent with paths like 'backend/tests/test_foo.py' causes
|
|
1449
|
+
# pytest to look for 'backend/tests/backend/tests/test_foo.py' (not found)
|
|
1405
1450
|
test_result = subprocess.run(
|
|
1406
1451
|
pytest_args,
|
|
1407
1452
|
capture_output=True, text=True, timeout=300,
|
|
1408
|
-
stdin=subprocess.DEVNULL, env=clean_env, start_new_session=True
|
|
1409
|
-
cwd=str(pdd_files['test'].parent)
|
|
1453
|
+
stdin=subprocess.DEVNULL, env=clean_env, start_new_session=True
|
|
1410
1454
|
)
|
|
1411
1455
|
else:
|
|
1412
1456
|
# Use shell command for non-Python
|
|
@@ -1460,7 +1504,10 @@ def sync_orchestration(
|
|
|
1460
1504
|
unit_test_file_for_fix = str(ff_path.resolve())
|
|
1461
1505
|
break
|
|
1462
1506
|
|
|
1463
|
-
|
|
1507
|
+
# For non-Python languages, set max_attempts=0 to skip iterative loop
|
|
1508
|
+
# and go directly to agentic fallback
|
|
1509
|
+
effective_max_attempts = 0 if language.lower() != 'python' else max_attempts
|
|
1510
|
+
result = fix_main(ctx, prompt_file=str(pdd_files['prompt']), code_file=str(pdd_files['code']), unit_test_file=unit_test_file_for_fix, error_file=str(error_file_path), output_test=str(pdd_files['test']), output_code=str(pdd_files['code']), output_results=f"{basename}_fix_results.log", loop=True, verification_program=str(pdd_files['example']), max_attempts=effective_max_attempts, budget=budget - current_cost_ref[0], auto_submit=True, strength=strength, temperature=temperature)
|
|
1464
1511
|
elif operation == 'update':
|
|
1465
1512
|
result = update_main(ctx, input_prompt_file=str(pdd_files['prompt']), modified_code_file=str(pdd_files['code']), input_code_file=None, output=str(pdd_files['prompt']), use_git=True, strength=strength, temperature=temperature)
|
|
1466
1513
|
else:
|
|
@@ -1509,18 +1556,18 @@ def sync_orchestration(
|
|
|
1509
1556
|
clean_env = os.environ.copy()
|
|
1510
1557
|
for var in ['FORCE_COLOR', 'COLUMNS']:
|
|
1511
1558
|
clean_env.pop(var, None)
|
|
1512
|
-
#
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1559
|
+
# Bug fix: Use sys.executable to ensure same Python interpreter as
|
|
1560
|
+
# crash_main (fix_code_loop.py:477). When both venv and conda are
|
|
1561
|
+
# active, PATH lookup for 'python' may resolve to a different
|
|
1562
|
+
# interpreter, causing infinite crash loops.
|
|
1563
|
+
# Bug fix: Do NOT set cwd - inherit from pdd invocation directory
|
|
1564
|
+
# to match crash_main behavior. Setting cwd to example's parent breaks imports.
|
|
1565
|
+
example_path = str(pdd_files['example'].resolve())
|
|
1566
|
+
cmd_parts = [sys.executable, example_path]
|
|
1519
1567
|
# Use error-detection runner that handles server-style examples
|
|
1520
1568
|
returncode, stdout, stderr = _run_example_with_error_detection(
|
|
1521
1569
|
cmd_parts,
|
|
1522
1570
|
env=clean_env,
|
|
1523
|
-
cwd=str(pdd_files['example'].parent),
|
|
1524
1571
|
timeout=60
|
|
1525
1572
|
)
|
|
1526
1573
|
# Include test_hash for staleness detection
|
|
@@ -1574,48 +1621,62 @@ def sync_orchestration(
|
|
|
1574
1621
|
'model_name': last_model_name,
|
|
1575
1622
|
}
|
|
1576
1623
|
|
|
1577
|
-
#
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1624
|
+
# Detect headless mode (no TTY, CI environment, or quiet mode)
|
|
1625
|
+
headless = quiet or not sys.stdout.isatty() or os.environ.get('CI')
|
|
1626
|
+
|
|
1627
|
+
if headless:
|
|
1628
|
+
# Set PDD_FORCE to also skip API key prompts in headless mode
|
|
1629
|
+
os.environ['PDD_FORCE'] = '1'
|
|
1630
|
+
# Run worker logic directly without TUI in headless mode
|
|
1631
|
+
if not quiet:
|
|
1632
|
+
print(f"Running sync in headless mode (CI/non-TTY environment)...")
|
|
1633
|
+
result = sync_worker_logic()
|
|
1634
|
+
# No TUI app, so no worker_exception to check
|
|
1635
|
+
worker_exception = None
|
|
1636
|
+
else:
|
|
1637
|
+
# Instantiate and run Textual App
|
|
1638
|
+
app = SyncApp(
|
|
1639
|
+
basename=basename,
|
|
1640
|
+
budget=budget,
|
|
1641
|
+
worker_func=sync_worker_logic,
|
|
1642
|
+
function_name_ref=current_function_name_ref,
|
|
1643
|
+
cost_ref=current_cost_ref,
|
|
1644
|
+
prompt_path_ref=prompt_path_ref,
|
|
1645
|
+
code_path_ref=code_path_ref,
|
|
1646
|
+
example_path_ref=example_path_ref,
|
|
1647
|
+
tests_path_ref=tests_path_ref,
|
|
1648
|
+
prompt_color_ref=prompt_box_color_ref,
|
|
1649
|
+
code_color_ref=code_box_color_ref,
|
|
1650
|
+
example_color_ref=example_box_color_ref,
|
|
1651
|
+
tests_color_ref=tests_box_color_ref,
|
|
1652
|
+
stop_event=stop_event,
|
|
1653
|
+
progress_callback_ref=progress_callback_ref
|
|
1654
|
+
)
|
|
1595
1655
|
|
|
1596
|
-
|
|
1597
|
-
|
|
1656
|
+
# Store app reference so worker can access request_confirmation
|
|
1657
|
+
app_ref[0] = app
|
|
1598
1658
|
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
if not quiet:
|
|
1659
|
+
result = app.run()
|
|
1660
|
+
|
|
1661
|
+
# Show exit animation if not quiet
|
|
1603
1662
|
from .sync_tui import show_exit_animation
|
|
1604
1663
|
show_exit_animation()
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1664
|
+
|
|
1665
|
+
worker_exception = app.worker_exception
|
|
1666
|
+
|
|
1667
|
+
# Check for worker exception that might have caused a crash (TUI mode only)
|
|
1668
|
+
if not headless and worker_exception:
|
|
1669
|
+
print(f"\n[Error] Worker thread crashed with exception: {worker_exception}", file=sys.stderr)
|
|
1670
|
+
|
|
1610
1671
|
if hasattr(app, 'captured_logs') and app.captured_logs:
|
|
1611
1672
|
print("\n[Captured Logs (last 20 lines)]", file=sys.stderr)
|
|
1612
1673
|
for line in app.captured_logs[-20:]: # Print last 20 lines
|
|
1613
1674
|
print(f" {line}", file=sys.stderr)
|
|
1614
|
-
|
|
1675
|
+
|
|
1615
1676
|
import traceback
|
|
1616
1677
|
# Use trace module to print the stored exception's traceback if available
|
|
1617
|
-
if hasattr(
|
|
1618
|
-
traceback.print_exception(type(
|
|
1678
|
+
if hasattr(worker_exception, '__traceback__'):
|
|
1679
|
+
traceback.print_exception(type(worker_exception), worker_exception, worker_exception.__traceback__, file=sys.stderr)
|
|
1619
1680
|
|
|
1620
1681
|
if result is None:
|
|
1621
1682
|
return {
|
pdd/template_expander.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
# pdd/template_expander.py
|
|
2
|
+
"""
|
|
3
|
+
Template expansion utility for output path configuration.
|
|
4
|
+
|
|
5
|
+
This module provides a function to expand path templates with placeholders
|
|
6
|
+
like {name}, {category}, {ext}, etc. It enables extensible project layouts
|
|
7
|
+
for different languages and frameworks (Python, TypeScript, Vue, Go, etc.).
|
|
8
|
+
|
|
9
|
+
Supported placeholders:
|
|
10
|
+
{name} - Base name (last segment of input path)
|
|
11
|
+
{category} - Parent path segments (empty if none)
|
|
12
|
+
{dir_prefix} - Full input directory prefix with trailing /
|
|
13
|
+
{ext} - File extension from language (e.g., "py", "tsx")
|
|
14
|
+
{language} - Full language name (e.g., "python", "typescript")
|
|
15
|
+
{name_snake} - snake_case version of name
|
|
16
|
+
{name_pascal} - PascalCase version of name
|
|
17
|
+
{name_kebab} - kebab-case version of name
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
>>> expand_template(
|
|
21
|
+
... "frontend/src/components/{category}/{name}/{name}.tsx",
|
|
22
|
+
... {"name": "AssetCard", "category": "marketplace"}
|
|
23
|
+
... )
|
|
24
|
+
'frontend/src/components/marketplace/AssetCard/AssetCard.tsx'
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
import re
|
|
28
|
+
import os
|
|
29
|
+
from typing import Dict, Any
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _to_snake_case(s: str) -> str:
|
|
33
|
+
"""
|
|
34
|
+
Convert string to snake_case.
|
|
35
|
+
|
|
36
|
+
Handles PascalCase, camelCase, and existing snake_case.
|
|
37
|
+
|
|
38
|
+
Examples:
|
|
39
|
+
AssetCard -> asset_card
|
|
40
|
+
assetCard -> asset_card
|
|
41
|
+
already_snake -> already_snake
|
|
42
|
+
"""
|
|
43
|
+
if not s:
|
|
44
|
+
return s
|
|
45
|
+
# Insert underscore before uppercase letters (except at start)
|
|
46
|
+
result = re.sub(r'(?<!^)(?=[A-Z])', '_', s)
|
|
47
|
+
return result.lower()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _to_pascal_case(s: str) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Convert string to PascalCase.
|
|
53
|
+
|
|
54
|
+
Handles snake_case, kebab-case, and existing PascalCase.
|
|
55
|
+
|
|
56
|
+
Examples:
|
|
57
|
+
asset_card -> AssetCard
|
|
58
|
+
asset-card -> AssetCard
|
|
59
|
+
AssetCard -> Assetcard (note: re-capitalizes)
|
|
60
|
+
"""
|
|
61
|
+
if not s:
|
|
62
|
+
return s
|
|
63
|
+
# Split on underscores, hyphens, or other common delimiters
|
|
64
|
+
parts = re.split(r'[_\-\s]+', s)
|
|
65
|
+
return ''.join(part.title() for part in parts if part)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _to_kebab_case(s: str) -> str:
|
|
69
|
+
"""
|
|
70
|
+
Convert string to kebab-case.
|
|
71
|
+
|
|
72
|
+
Handles PascalCase, camelCase, and existing kebab-case.
|
|
73
|
+
|
|
74
|
+
Examples:
|
|
75
|
+
AssetCard -> asset-card
|
|
76
|
+
assetCard -> asset-card
|
|
77
|
+
already-kebab -> already-kebab
|
|
78
|
+
"""
|
|
79
|
+
if not s:
|
|
80
|
+
return s
|
|
81
|
+
# Insert hyphen before uppercase letters (except at start)
|
|
82
|
+
result = re.sub(r'(?<!^)(?=[A-Z])', '-', s)
|
|
83
|
+
return result.lower()
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _normalize_path(path: str) -> str:
|
|
87
|
+
"""
|
|
88
|
+
Normalize a path to remove double slashes and resolve . and ..
|
|
89
|
+
|
|
90
|
+
This handles edge cases like empty {category} producing paths like:
|
|
91
|
+
"src/components//Button" -> "src/components/Button"
|
|
92
|
+
|
|
93
|
+
Unlike os.path.normpath, this preserves relative paths without
|
|
94
|
+
converting them to absolute paths.
|
|
95
|
+
"""
|
|
96
|
+
if not path:
|
|
97
|
+
return path
|
|
98
|
+
|
|
99
|
+
# Split path and filter empty segments (which cause double slashes)
|
|
100
|
+
parts = path.split('/')
|
|
101
|
+
normalized_parts = [p for p in parts if p]
|
|
102
|
+
|
|
103
|
+
# Rejoin with single slashes
|
|
104
|
+
result = '/'.join(normalized_parts)
|
|
105
|
+
|
|
106
|
+
# Use os.path.normpath for additional cleanup (handles . and ..)
|
|
107
|
+
# but it converts to OS-specific separators, so convert back
|
|
108
|
+
result = os.path.normpath(result)
|
|
109
|
+
|
|
110
|
+
# On Windows, normpath uses backslashes; convert back to forward slashes
|
|
111
|
+
result = result.replace('\\', '/')
|
|
112
|
+
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def expand_template(template: str, context: Dict[str, Any]) -> str:
|
|
117
|
+
"""
|
|
118
|
+
Expand a path template with placeholder values.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
template: Path template with {placeholder} syntax
|
|
122
|
+
context: Dictionary of values to substitute
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Expanded path with normalized slashes
|
|
126
|
+
|
|
127
|
+
Example:
|
|
128
|
+
>>> expand_template(
|
|
129
|
+
... "frontend/src/components/{category}/{name}/{name}.tsx",
|
|
130
|
+
... {"name": "AssetCard", "category": "marketplace"}
|
|
131
|
+
... )
|
|
132
|
+
'frontend/src/components/marketplace/AssetCard/AssetCard.tsx'
|
|
133
|
+
"""
|
|
134
|
+
# Get base values from context (with empty string defaults)
|
|
135
|
+
name = context.get('name', '')
|
|
136
|
+
category = context.get('category', '')
|
|
137
|
+
dir_prefix = context.get('dir_prefix', '')
|
|
138
|
+
ext = context.get('ext', '')
|
|
139
|
+
language = context.get('language', '')
|
|
140
|
+
|
|
141
|
+
# Build the full set of available placeholders
|
|
142
|
+
placeholders = {
|
|
143
|
+
'name': name,
|
|
144
|
+
'category': category,
|
|
145
|
+
'dir_prefix': dir_prefix,
|
|
146
|
+
'ext': ext,
|
|
147
|
+
'language': language,
|
|
148
|
+
'name_snake': _to_snake_case(name),
|
|
149
|
+
'name_pascal': _to_pascal_case(name),
|
|
150
|
+
'name_kebab': _to_kebab_case(name),
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
# Perform substitution
|
|
154
|
+
result = template
|
|
155
|
+
for key, value in placeholders.items():
|
|
156
|
+
result = result.replace(f'{{{key}}}', str(value))
|
|
157
|
+
|
|
158
|
+
# Normalize the path to handle empty segments (double slashes)
|
|
159
|
+
result = _normalize_path(result)
|
|
160
|
+
|
|
161
|
+
return result
|
|
@@ -140,56 +140,51 @@ INSTRUCTIONS:
|
|
|
140
140
|
- When interface.type is "page", each entry in `dataSources` must be an object with at least `kind` and `source` (e.g., URL or identifier). The `kind` field MUST be exactly one of: `"api"`, `"query"`, `"stream"`, `"file"`, `"cache"`, `"message"`, `"job"`, or `"other"`. Do not invent new values like `"api/mutation"`; instead, use `"api"` (for any HTTP/REST/GraphQL endpoint) or `"other"` and describe details such as queries vs. mutations in `description` or `notes`. Provide `method`, `description`, and any other useful metadata when known.
|
|
141
141
|
- Valid JSON only. No comments or trailing commas.
|
|
142
142
|
|
|
143
|
-
OUTPUT FORMAT
|
|
143
|
+
OUTPUT FORMAT - CRITICAL: Return a raw JSON array, NOT an object with "items" or "data" wrapper:
|
|
144
144
|
```json
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
"
|
|
149
|
-
"
|
|
150
|
-
"
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
"
|
|
156
|
-
"
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
},
|
|
181
|
-
"module": {"type": "object"},
|
|
182
|
-
"api": {"type": "object"},
|
|
183
|
-
"graphql": {"type": "object"},
|
|
184
|
-
"cli": {"type": "object"},
|
|
185
|
-
"job": {"type": "object"},
|
|
186
|
-
"message": {"type": "object"},
|
|
187
|
-
"config": {"type": "object"}
|
|
188
|
-
}
|
|
145
|
+
[
|
|
146
|
+
{
|
|
147
|
+
"reason": "Core data models needed by all other modules",
|
|
148
|
+
"description": "Defines Order, User, and Item data models with validation",
|
|
149
|
+
"dependencies": [],
|
|
150
|
+
"priority": 1,
|
|
151
|
+
"filename": "models_Python.prompt",
|
|
152
|
+
"filepath": "src/models.py",
|
|
153
|
+
"tags": ["backend", "data"],
|
|
154
|
+
"interface": {
|
|
155
|
+
"type": "module",
|
|
156
|
+
"module": {
|
|
157
|
+
"functions": [
|
|
158
|
+
{"name": "Order", "signature": "class Order(BaseModel)", "returns": "Order instance"}
|
|
159
|
+
]
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
},
|
|
163
|
+
{
|
|
164
|
+
"reason": "API endpoints for order management",
|
|
165
|
+
"description": "REST API for creating, reading, updating orders",
|
|
166
|
+
"dependencies": ["models_Python.prompt"],
|
|
167
|
+
"priority": 2,
|
|
168
|
+
"filename": "orders_api_Python.prompt",
|
|
169
|
+
"filepath": "src/api/orders.py",
|
|
170
|
+
"tags": ["backend", "api"],
|
|
171
|
+
"interface": {
|
|
172
|
+
"type": "api",
|
|
173
|
+
"api": {
|
|
174
|
+
"endpoints": [
|
|
175
|
+
{"method": "POST", "path": "/orders", "auth": "jwt"},
|
|
176
|
+
{"method": "GET", "path": "/orders/{id}", "auth": "jwt"}
|
|
177
|
+
]
|
|
189
178
|
}
|
|
190
179
|
}
|
|
191
180
|
}
|
|
192
|
-
|
|
181
|
+
]
|
|
182
|
+
```
|
|
183
|
+
WRONG (do NOT do this):
|
|
184
|
+
```json
|
|
185
|
+
{"items": [...]} // WRONG - no wrapper objects!
|
|
186
|
+
{"data": [...]} // WRONG - no wrapper objects!
|
|
187
|
+
{"type": "array", "items": [...]} // WRONG - this is schema, not output!
|
|
193
188
|
```
|
|
194
189
|
|
|
195
190
|
INTERFACE TYPES (emit only applicable):
|