pdd-cli 0.0.45__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +40 -8
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +598 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +1294 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +387 -0
- pdd/agentic_verify.py +183 -0
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +71 -51
- pdd/auto_include.py +245 -5
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +196 -23
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +350 -150
- pdd/code_generator.py +60 -18
- pdd/code_generator_main.py +790 -57
- pdd/commands/__init__.py +48 -0
- pdd/commands/analysis.py +306 -0
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +163 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +175 -0
- pdd/commands/misc.py +87 -0
- pdd/commands/modify.py +256 -0
- pdd/commands/report.py +144 -0
- pdd/commands/sessions.py +284 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +589 -111
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +175 -76
- pdd/continue_generation.py +53 -10
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +527 -0
- pdd/core/cloud.py +237 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +67 -0
- pdd/core/remote_session.py +61 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +262 -33
- pdd/data/language_format.csv +71 -63
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +523 -95
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +491 -92
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +278 -21
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +529 -286
- pdd/fix_verification_main.py +294 -89
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +139 -15
- pdd/generate_test.py +218 -146
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +318 -22
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +75 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +13 -4
- pdd/llm_invoke.py +1711 -181
- pdd/load_prompt_template.py +19 -12
- pdd/path_resolution.py +140 -0
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +14 -4
- pdd/preprocess.py +293 -24
- pdd/preprocess_main.py +41 -6
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +925 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +122 -905
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +686 -27
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +12 -2
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +9 -0
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +41 -7
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/increase_tests_LLM.prompt +1 -5
- pdd/prompts/insert_includes_LLM.prompt +316 -186
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/pytest_output.py +127 -12
- pdd/remote_session.py +876 -0
- pdd/render_mermaid.py +236 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +237 -195
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +839 -112
- pdd/sync_main.py +351 -57
- pdd/sync_orchestration.py +1400 -756
- pdd/sync_tui.py +848 -0
- pdd/template_expander.py +161 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +237 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +140 -63
- pdd/unfinished_prompt.py +51 -4
- pdd/update_main.py +567 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +29 -11
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.45.dist-info/RECORD +0 -116
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,526 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestrator for the 12-step agentic change workflow.
|
|
3
|
+
Runs each step as a separate agentic task, accumulates context, tracks progress/cost,
|
|
4
|
+
and supports resuming from saved state. Includes a review loop (steps 10-11).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Dict, List, Optional, Tuple, Any
|
|
14
|
+
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.markup import escape
|
|
17
|
+
|
|
18
|
+
from pdd.agentic_common import (
|
|
19
|
+
run_agentic_task,
|
|
20
|
+
load_workflow_state,
|
|
21
|
+
save_workflow_state,
|
|
22
|
+
clear_workflow_state,
|
|
23
|
+
)
|
|
24
|
+
from pdd.load_prompt_template import load_prompt_template
|
|
25
|
+
|
|
26
|
+
# Initialize console for rich output
|
|
27
|
+
console = Console()
|
|
28
|
+
|
|
29
|
+
# Per-Step Timeouts (Workflow specific)
|
|
30
|
+
CHANGE_STEP_TIMEOUTS: Dict[int, float] = {
|
|
31
|
+
1: 240.0, # Duplicate Check
|
|
32
|
+
2: 240.0, # Docs Comparison
|
|
33
|
+
3: 340.0, # Research
|
|
34
|
+
4: 340.0, # Clarify
|
|
35
|
+
5: 340.0, # Docs Changes
|
|
36
|
+
6: 340.0, # Identify Dev Units
|
|
37
|
+
7: 340.0, # Architecture Review
|
|
38
|
+
8: 600.0, # Analyze Prompt Changes (Complex)
|
|
39
|
+
9: 1000.0, # Implement Changes (Most Complex)
|
|
40
|
+
10: 340.0, # Identify Issues
|
|
41
|
+
11: 600.0, # Fix Issues (Complex)
|
|
42
|
+
12: 340.0, # Create PR
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
MAX_REVIEW_ITERATIONS = 5
|
|
46
|
+
|
|
47
|
+
def _get_git_root(cwd: Path) -> Optional[Path]:
|
|
48
|
+
"""Get repo root via git rev-parse."""
|
|
49
|
+
try:
|
|
50
|
+
result = subprocess.run(
|
|
51
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
52
|
+
cwd=cwd,
|
|
53
|
+
capture_output=True,
|
|
54
|
+
text=True,
|
|
55
|
+
check=True
|
|
56
|
+
)
|
|
57
|
+
return Path(result.stdout.strip())
|
|
58
|
+
except subprocess.CalledProcessError:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
def _setup_worktree(cwd: Path, issue_number: int, quiet: bool) -> Tuple[Optional[Path], Optional[str]]:
|
|
62
|
+
"""
|
|
63
|
+
Create an isolated git worktree for the issue.
|
|
64
|
+
Returns (worktree_path, error_message).
|
|
65
|
+
"""
|
|
66
|
+
git_root = _get_git_root(cwd)
|
|
67
|
+
if not git_root:
|
|
68
|
+
return None, "Not a git repository"
|
|
69
|
+
|
|
70
|
+
branch_name = f"change/issue-{issue_number}"
|
|
71
|
+
worktree_rel_path = Path(".pdd") / "worktrees" / f"change-issue-{issue_number}"
|
|
72
|
+
worktree_path = git_root / worktree_rel_path
|
|
73
|
+
|
|
74
|
+
# Clean up existing directory if it exists but isn't a valid worktree
|
|
75
|
+
if worktree_path.exists():
|
|
76
|
+
# Check if it's a valid worktree
|
|
77
|
+
is_worktree = False
|
|
78
|
+
try:
|
|
79
|
+
wt_list = subprocess.run(
|
|
80
|
+
["git", "worktree", "list", "--porcelain"],
|
|
81
|
+
cwd=git_root,
|
|
82
|
+
capture_output=True,
|
|
83
|
+
text=True
|
|
84
|
+
).stdout
|
|
85
|
+
if str(worktree_path) in wt_list:
|
|
86
|
+
is_worktree = True
|
|
87
|
+
except Exception:
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
if is_worktree:
|
|
91
|
+
# Remove existing worktree to start fresh or ensure clean state
|
|
92
|
+
subprocess.run(
|
|
93
|
+
["git", "worktree", "remove", "--force", str(worktree_path)],
|
|
94
|
+
cwd=git_root,
|
|
95
|
+
capture_output=True
|
|
96
|
+
)
|
|
97
|
+
else:
|
|
98
|
+
# Just a directory
|
|
99
|
+
shutil.rmtree(worktree_path)
|
|
100
|
+
|
|
101
|
+
# Clean up branch if it exists
|
|
102
|
+
try:
|
|
103
|
+
subprocess.run(
|
|
104
|
+
["git", "branch", "-D", branch_name],
|
|
105
|
+
cwd=git_root,
|
|
106
|
+
capture_output=True
|
|
107
|
+
)
|
|
108
|
+
except Exception:
|
|
109
|
+
pass
|
|
110
|
+
|
|
111
|
+
# Create worktree
|
|
112
|
+
try:
|
|
113
|
+
worktree_path.parent.mkdir(parents=True, exist_ok=True)
|
|
114
|
+
subprocess.run(
|
|
115
|
+
["git", "worktree", "add", "-b", branch_name, str(worktree_path), "HEAD"],
|
|
116
|
+
cwd=git_root,
|
|
117
|
+
capture_output=True,
|
|
118
|
+
check=True
|
|
119
|
+
)
|
|
120
|
+
if not quiet:
|
|
121
|
+
console.print(f"[blue]Working in worktree: {worktree_path}[/blue]")
|
|
122
|
+
return worktree_path, None
|
|
123
|
+
except subprocess.CalledProcessError as e:
|
|
124
|
+
return None, f"Git worktree creation failed: {e}"
|
|
125
|
+
|
|
126
|
+
def _parse_changed_files(output: str) -> List[str]:
|
|
127
|
+
"""Extract file paths from FILES_CREATED or FILES_MODIFIED lines."""
|
|
128
|
+
files = []
|
|
129
|
+
# Look for FILES_CREATED: path, path
|
|
130
|
+
created_match = re.search(r"FILES_CREATED:\s*(.*)", output)
|
|
131
|
+
if created_match:
|
|
132
|
+
files.extend([f.strip() for f in created_match.group(1).split(",") if f.strip()])
|
|
133
|
+
|
|
134
|
+
# Look for FILES_MODIFIED: path, path
|
|
135
|
+
modified_match = re.search(r"FILES_MODIFIED:\s*(.*)", output)
|
|
136
|
+
if modified_match:
|
|
137
|
+
files.extend([f.strip() for f in modified_match.group(1).split(",") if f.strip()])
|
|
138
|
+
|
|
139
|
+
return list(set(files)) # Deduplicate
|
|
140
|
+
|
|
141
|
+
def _check_hard_stop(step_num: int, output: str) -> Optional[str]:
|
|
142
|
+
"""Check output for hard stop conditions."""
|
|
143
|
+
if step_num == 1 and "Duplicate of #" in output:
|
|
144
|
+
return "Issue is a duplicate"
|
|
145
|
+
if step_num == 2 and "Already Implemented" in output:
|
|
146
|
+
return "Already implemented"
|
|
147
|
+
if step_num == 4 and "Clarification Needed" in output:
|
|
148
|
+
return "Clarification needed"
|
|
149
|
+
if step_num == 6 and "No Dev Units Found" in output:
|
|
150
|
+
return "No dev units found"
|
|
151
|
+
if step_num == 7 and "Architectural Decision Needed" in output:
|
|
152
|
+
return "Architectural decision needed"
|
|
153
|
+
if step_num == 8 and "No Changes Required" in output:
|
|
154
|
+
return "No changes needed"
|
|
155
|
+
if step_num == 9:
|
|
156
|
+
if "FAIL:" in output:
|
|
157
|
+
return "Implementation failed"
|
|
158
|
+
# Note: Missing FILES_... check is handled in logic, not just string match
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
def _get_state_dir(cwd: Path) -> Path:
|
|
162
|
+
"""Get the state directory relative to git root."""
|
|
163
|
+
root = _get_git_root(cwd) or cwd
|
|
164
|
+
return root / ".pdd" / "change-state"
|
|
165
|
+
|
|
166
|
+
def run_agentic_change_orchestrator(
|
|
167
|
+
issue_url: str,
|
|
168
|
+
issue_content: str,
|
|
169
|
+
repo_owner: str,
|
|
170
|
+
repo_name: str,
|
|
171
|
+
issue_number: int,
|
|
172
|
+
issue_author: str,
|
|
173
|
+
issue_title: str,
|
|
174
|
+
*,
|
|
175
|
+
cwd: Path,
|
|
176
|
+
verbose: bool = False,
|
|
177
|
+
quiet: bool = False,
|
|
178
|
+
timeout_adder: float = 0.0,
|
|
179
|
+
use_github_state: bool = True
|
|
180
|
+
) -> Tuple[bool, str, float, str, List[str]]:
|
|
181
|
+
"""
|
|
182
|
+
Orchestrates the 12-step agentic change workflow.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
(success, final_message, total_cost, model_used, changed_files)
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
if not quiet:
|
|
189
|
+
console.print(f"Implementing change for issue #{issue_number}: \"{issue_title}\"")
|
|
190
|
+
|
|
191
|
+
state_dir = _get_state_dir(cwd)
|
|
192
|
+
|
|
193
|
+
# Load state
|
|
194
|
+
state, loaded_gh_id = load_workflow_state(
|
|
195
|
+
cwd, issue_number, "change", state_dir, repo_owner, repo_name, use_github_state
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Initialize variables from state or defaults
|
|
199
|
+
if state is not None:
|
|
200
|
+
last_completed_step = state.get("last_completed_step", 0)
|
|
201
|
+
step_outputs = state.get("step_outputs", {})
|
|
202
|
+
total_cost = state.get("total_cost", 0.0)
|
|
203
|
+
model_used = state.get("model_used", "unknown")
|
|
204
|
+
github_comment_id = loaded_gh_id # Use the ID returned by load_workflow_state
|
|
205
|
+
worktree_path_str = state.get("worktree_path")
|
|
206
|
+
worktree_path = Path(worktree_path_str) if worktree_path_str else None
|
|
207
|
+
else:
|
|
208
|
+
# Initialize fresh state dict for new workflow
|
|
209
|
+
state = {"step_outputs": {}}
|
|
210
|
+
last_completed_step = 0
|
|
211
|
+
step_outputs = state["step_outputs"]
|
|
212
|
+
total_cost = 0.0
|
|
213
|
+
model_used = "unknown"
|
|
214
|
+
github_comment_id = None
|
|
215
|
+
worktree_path = None
|
|
216
|
+
|
|
217
|
+
# Context accumulation dictionary
|
|
218
|
+
context = {
|
|
219
|
+
"issue_url": issue_url,
|
|
220
|
+
"issue_content": issue_content,
|
|
221
|
+
"repo_owner": repo_owner,
|
|
222
|
+
"repo_name": repo_name,
|
|
223
|
+
"issue_number": issue_number,
|
|
224
|
+
"issue_author": issue_author,
|
|
225
|
+
"issue_title": issue_title,
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# Populate context with cached outputs
|
|
229
|
+
for s_num, s_out in step_outputs.items():
|
|
230
|
+
context[f"step{s_num}_output"] = s_out
|
|
231
|
+
|
|
232
|
+
# Determine start step
|
|
233
|
+
start_step = last_completed_step + 1
|
|
234
|
+
|
|
235
|
+
if last_completed_step > 0 and not quiet:
|
|
236
|
+
console.print(f"Resuming change workflow for issue #{issue_number}")
|
|
237
|
+
console.print(f" Steps 1-{last_completed_step} already complete (cached)")
|
|
238
|
+
console.print(f" Starting from Step {start_step}")
|
|
239
|
+
|
|
240
|
+
# --- Steps 1 through 9 ---
|
|
241
|
+
|
|
242
|
+
# Step definitions for 1-9
|
|
243
|
+
steps_config = [
|
|
244
|
+
(1, "duplicate", "Search for duplicate issues"),
|
|
245
|
+
(2, "docs", "Check if already implemented"),
|
|
246
|
+
(3, "research", "Research to clarify specifications"),
|
|
247
|
+
(4, "clarify", "Verify requirements are clear"),
|
|
248
|
+
(5, "docs_change", "Analyze documentation changes needed"),
|
|
249
|
+
(6, "devunits", "Identify dev units involved"),
|
|
250
|
+
(7, "architecture", "Review architecture"),
|
|
251
|
+
(8, "analyze", "Analyze prompt changes"),
|
|
252
|
+
(9, "implement", "Implement the prompt changes"),
|
|
253
|
+
]
|
|
254
|
+
|
|
255
|
+
current_work_dir = cwd
|
|
256
|
+
changed_files = []
|
|
257
|
+
|
|
258
|
+
# If we are resuming at step 9 or later, we need to ensure the worktree exists/is active
|
|
259
|
+
if start_step >= 9:
|
|
260
|
+
# If we have a path in state, verify it exists, otherwise recreate
|
|
261
|
+
if worktree_path and worktree_path.exists():
|
|
262
|
+
if not quiet:
|
|
263
|
+
console.print(f"[blue]Reusing existing worktree: {worktree_path}[/blue]")
|
|
264
|
+
current_work_dir = worktree_path
|
|
265
|
+
else:
|
|
266
|
+
# Re-create worktree if missing
|
|
267
|
+
wt_path, err = _setup_worktree(cwd, issue_number, quiet)
|
|
268
|
+
if not wt_path:
|
|
269
|
+
return False, f"Failed to restore worktree: {err}", total_cost, model_used, []
|
|
270
|
+
worktree_path = wt_path
|
|
271
|
+
current_work_dir = worktree_path
|
|
272
|
+
# Update state with new path
|
|
273
|
+
state["worktree_path"] = str(worktree_path)
|
|
274
|
+
|
|
275
|
+
for step_num, name, description in steps_config:
|
|
276
|
+
# Skip if already done
|
|
277
|
+
if step_num < start_step:
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
# Special handling before Step 9: Create Worktree
|
|
281
|
+
if step_num == 9:
|
|
282
|
+
# Check current branch before creating worktree
|
|
283
|
+
try:
|
|
284
|
+
current_branch = subprocess.run(
|
|
285
|
+
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
286
|
+
cwd=cwd,
|
|
287
|
+
capture_output=True,
|
|
288
|
+
text=True,
|
|
289
|
+
check=True
|
|
290
|
+
).stdout.strip()
|
|
291
|
+
|
|
292
|
+
if current_branch not in ["main", "master"] and not quiet:
|
|
293
|
+
console.print(f"[yellow]Note: Creating branch from HEAD ({current_branch}), not origin/main. PR will include commits from this branch. Run from main for independent changes.[/yellow]")
|
|
294
|
+
except subprocess.CalledProcessError:
|
|
295
|
+
pass # Ignore if git command fails, worktree setup will likely catch issues
|
|
296
|
+
|
|
297
|
+
wt_path, err = _setup_worktree(cwd, issue_number, quiet)
|
|
298
|
+
if not wt_path:
|
|
299
|
+
return False, f"Failed to create worktree: {err}", total_cost, model_used, []
|
|
300
|
+
worktree_path = wt_path
|
|
301
|
+
current_work_dir = worktree_path
|
|
302
|
+
state["worktree_path"] = str(worktree_path)
|
|
303
|
+
context["worktree_path"] = str(worktree_path)
|
|
304
|
+
|
|
305
|
+
if not quiet:
|
|
306
|
+
console.print(f"[bold][Step {step_num}/12][/bold] {description}...")
|
|
307
|
+
|
|
308
|
+
# Load Prompt
|
|
309
|
+
template_name = f"agentic_change_step{step_num}_{name}_LLM"
|
|
310
|
+
prompt_template = load_prompt_template(template_name)
|
|
311
|
+
if not prompt_template:
|
|
312
|
+
return False, f"Missing prompt template: {template_name}", total_cost, model_used, []
|
|
313
|
+
|
|
314
|
+
# Format Prompt
|
|
315
|
+
try:
|
|
316
|
+
formatted_prompt = prompt_template.format(**context)
|
|
317
|
+
except KeyError as e:
|
|
318
|
+
return False, f"Context missing key for step {step_num}: {e}", total_cost, model_used, []
|
|
319
|
+
|
|
320
|
+
# Run Task
|
|
321
|
+
timeout = CHANGE_STEP_TIMEOUTS.get(step_num, 340.0) + timeout_adder
|
|
322
|
+
step_success, step_output, step_cost, step_model = run_agentic_task(
|
|
323
|
+
instruction=formatted_prompt,
|
|
324
|
+
cwd=current_work_dir,
|
|
325
|
+
verbose=verbose,
|
|
326
|
+
quiet=quiet,
|
|
327
|
+
timeout=timeout,
|
|
328
|
+
label=f"step{step_num}"
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
# Update tracking
|
|
332
|
+
total_cost += step_cost
|
|
333
|
+
model_used = step_model
|
|
334
|
+
state["total_cost"] = total_cost
|
|
335
|
+
state["model_used"] = model_used
|
|
336
|
+
|
|
337
|
+
if not step_success:
|
|
338
|
+
# Check if it's a hard stop condition that caused \"failure\" or just agent error
|
|
339
|
+
stop_reason = _check_hard_stop(step_num, step_output)
|
|
340
|
+
if stop_reason:
|
|
341
|
+
if not quiet:
|
|
342
|
+
console.print(f"[yellow]Investigation stopped at Step {step_num}: {stop_reason}[/yellow]")
|
|
343
|
+
# Save state so we don't re-run previous steps
|
|
344
|
+
state["last_completed_step"] = step_num
|
|
345
|
+
state["step_outputs"][str(step_num)] = step_output
|
|
346
|
+
save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
|
|
347
|
+
return False, f"Stopped at step {step_num}: {stop_reason}", total_cost, model_used, []
|
|
348
|
+
|
|
349
|
+
# Soft failure
|
|
350
|
+
console.print(f"[yellow]Warning: Step {step_num} reported failure but continuing...[/yellow]")
|
|
351
|
+
|
|
352
|
+
# Check hard stops on success too
|
|
353
|
+
stop_reason = _check_hard_stop(step_num, step_output)
|
|
354
|
+
if stop_reason:
|
|
355
|
+
if not quiet:
|
|
356
|
+
console.print(f"[yellow]Investigation stopped at Step {step_num}: {stop_reason}[/yellow]")
|
|
357
|
+
state["last_completed_step"] = step_num
|
|
358
|
+
state["step_outputs"][str(step_num)] = step_output
|
|
359
|
+
save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
|
|
360
|
+
return False, f"Stopped at step {step_num}: {stop_reason}", total_cost, model_used, []
|
|
361
|
+
|
|
362
|
+
# Step 9 specific: Parse files
|
|
363
|
+
if step_num == 9:
|
|
364
|
+
extracted_files = _parse_changed_files(step_output)
|
|
365
|
+
changed_files = extracted_files
|
|
366
|
+
context["files_to_stage"] = ", ".join(changed_files)
|
|
367
|
+
|
|
368
|
+
if not changed_files:
|
|
369
|
+
# Hard stop if implementation produced no file changes
|
|
370
|
+
return False, "Stopped at step 9: Implementation produced no file changes", total_cost, model_used, []
|
|
371
|
+
|
|
372
|
+
# Update Context & State
|
|
373
|
+
context[f"step{step_num}_output"] = step_output
|
|
374
|
+
state["step_outputs"][str(step_num)] = step_output
|
|
375
|
+
state["last_completed_step"] = step_num
|
|
376
|
+
|
|
377
|
+
# Save State
|
|
378
|
+
save_result = save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
|
|
379
|
+
if save_result:
|
|
380
|
+
github_comment_id = save_result
|
|
381
|
+
state["github_comment_id"] = github_comment_id
|
|
382
|
+
|
|
383
|
+
if not quiet:
|
|
384
|
+
# Brief result summary
|
|
385
|
+
lines = step_output.strip().split('\n')
|
|
386
|
+
brief = lines[-1] if lines else "Done"
|
|
387
|
+
if len(brief) > 80: brief = brief[:77] + "..."
|
|
388
|
+
console.print(f" -> {escape(brief)}")
|
|
389
|
+
|
|
390
|
+
# --- Review Loop (Steps 10-11) ---
|
|
391
|
+
|
|
392
|
+
# Ensure we have files_to_stage if we resumed after step 9
|
|
393
|
+
if "files_to_stage" not in context:
|
|
394
|
+
# Try to recover from step 9 output
|
|
395
|
+
s9_out = context.get("step9_output", "")
|
|
396
|
+
c_files = _parse_changed_files(s9_out)
|
|
397
|
+
changed_files = c_files
|
|
398
|
+
context["files_to_stage"] = ", ".join(c_files)
|
|
399
|
+
|
|
400
|
+
review_iteration = state.get("review_iteration", 0)
|
|
401
|
+
previous_fixes = state.get("previous_fixes", "")
|
|
402
|
+
|
|
403
|
+
# If we haven't finished the review loop (i.e., we haven't reached step 12 yet)
|
|
404
|
+
if last_completed_step < 12:
|
|
405
|
+
while review_iteration < MAX_REVIEW_ITERATIONS:
|
|
406
|
+
review_iteration += 1
|
|
407
|
+
state["review_iteration"] = review_iteration
|
|
408
|
+
|
|
409
|
+
# --- Step 10: Identify Issues ---
|
|
410
|
+
if not quiet:
|
|
411
|
+
console.print(f"[bold][Step 10/12][/bold] Identifying issues (iteration {review_iteration}/{MAX_REVIEW_ITERATIONS})...")
|
|
412
|
+
|
|
413
|
+
s10_template = load_prompt_template("agentic_change_step10_identify_issues_LLM")
|
|
414
|
+
context["review_iteration"] = review_iteration
|
|
415
|
+
context["previous_fixes"] = previous_fixes
|
|
416
|
+
|
|
417
|
+
s10_prompt = s10_template.format(**context)
|
|
418
|
+
|
|
419
|
+
timeout10 = CHANGE_STEP_TIMEOUTS.get(10, 340.0) + timeout_adder
|
|
420
|
+
s10_success, s10_output, s10_cost, s10_model = run_agentic_task(
|
|
421
|
+
instruction=s10_prompt,
|
|
422
|
+
cwd=current_work_dir,
|
|
423
|
+
verbose=verbose,
|
|
424
|
+
quiet=quiet,
|
|
425
|
+
timeout=timeout10,
|
|
426
|
+
label=f"step10_iter{review_iteration}"
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
total_cost += s10_cost
|
|
430
|
+
model_used = s10_model
|
|
431
|
+
state["total_cost"] = total_cost
|
|
432
|
+
|
|
433
|
+
if "No Issues Found" in s10_output:
|
|
434
|
+
if not quiet:
|
|
435
|
+
console.print(" -> No issues found. Proceeding to PR.")
|
|
436
|
+
break
|
|
437
|
+
|
|
438
|
+
if not quiet:
|
|
439
|
+
console.print(" -> Issues found. Proceeding to fix.")
|
|
440
|
+
|
|
441
|
+
# --- Step 11: Fix Issues ---
|
|
442
|
+
if not quiet:
|
|
443
|
+
console.print(f"[bold][Step 11/12][/bold] Fixing issues (iteration {review_iteration}/{MAX_REVIEW_ITERATIONS})...")
|
|
444
|
+
|
|
445
|
+
s11_template = load_prompt_template("agentic_change_step11_fix_issues_LLM")
|
|
446
|
+
context["step10_output"] = s10_output
|
|
447
|
+
|
|
448
|
+
s11_prompt = s11_template.format(**context)
|
|
449
|
+
|
|
450
|
+
timeout11 = CHANGE_STEP_TIMEOUTS.get(11, 600.0) + timeout_adder
|
|
451
|
+
s11_success, s11_output, s11_cost, s11_model = run_agentic_task(
|
|
452
|
+
instruction=s11_prompt,
|
|
453
|
+
cwd=current_work_dir,
|
|
454
|
+
verbose=verbose,
|
|
455
|
+
quiet=quiet,
|
|
456
|
+
timeout=timeout11,
|
|
457
|
+
label=f"step11_iter{review_iteration}"
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
total_cost += s11_cost
|
|
461
|
+
model_used = s11_model
|
|
462
|
+
state["total_cost"] = total_cost
|
|
463
|
+
|
|
464
|
+
previous_fixes += f"\n\nIteration {review_iteration}:\n{s11_output}"
|
|
465
|
+
state["previous_fixes"] = previous_fixes
|
|
466
|
+
|
|
467
|
+
# Save state inside loop
|
|
468
|
+
save_result = save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
|
|
469
|
+
if save_result:
|
|
470
|
+
github_comment_id = save_result
|
|
471
|
+
state["github_comment_id"] = github_comment_id
|
|
472
|
+
|
|
473
|
+
if review_iteration >= MAX_REVIEW_ITERATIONS:
|
|
474
|
+
console.print("[yellow]Warning: Maximum review iterations reached. Proceeding to PR creation.[/yellow]")
|
|
475
|
+
|
|
476
|
+
# --- Step 12: Create PR ---
|
|
477
|
+
if last_completed_step < 12:
|
|
478
|
+
if not quiet:
|
|
479
|
+
console.print("[bold][Step 12/12][/bold] Create PR and link to issue...")
|
|
480
|
+
|
|
481
|
+
s12_template = load_prompt_template("agentic_change_step12_create_pr_LLM")
|
|
482
|
+
s12_prompt = s12_template.format(**context)
|
|
483
|
+
|
|
484
|
+
timeout12 = CHANGE_STEP_TIMEOUTS.get(12, 340.0) + timeout_adder
|
|
485
|
+
s12_success, s12_output, s12_cost, s12_model = run_agentic_task(
|
|
486
|
+
instruction=s12_prompt,
|
|
487
|
+
cwd=current_work_dir,
|
|
488
|
+
verbose=verbose,
|
|
489
|
+
quiet=quiet,
|
|
490
|
+
timeout=timeout12,
|
|
491
|
+
label="step12"
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
total_cost += s12_cost
|
|
495
|
+
model_used = s12_model
|
|
496
|
+
state["total_cost"] = total_cost
|
|
497
|
+
|
|
498
|
+
if not s12_success:
|
|
499
|
+
console.print("[red]Step 12 (PR Creation) failed.[/red]")
|
|
500
|
+
# Save state to allow retry
|
|
501
|
+
save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
|
|
502
|
+
return False, "PR Creation failed", total_cost, model_used, changed_files
|
|
503
|
+
|
|
504
|
+
# Extract PR URL if possible (simple heuristic)
|
|
505
|
+
pr_url = "Unknown"
|
|
506
|
+
url_match = re.search(r"https://github.com/\S+/pull/\d+", s12_output)
|
|
507
|
+
if url_match:
|
|
508
|
+
pr_url = url_match.group(0)
|
|
509
|
+
|
|
510
|
+
# Final Success
|
|
511
|
+
if not quiet:
|
|
512
|
+
console.print("\n[green]Change workflow complete[/green]")
|
|
513
|
+
console.print(f" Total cost: ${total_cost:.4f}")
|
|
514
|
+
console.print(f" Files changed: {', '.join(changed_files)}")
|
|
515
|
+
console.print(f" PR: {pr_url}")
|
|
516
|
+
console.print(f" Review iterations: {review_iteration}")
|
|
517
|
+
console.print("\nNext steps:")
|
|
518
|
+
console.print(" 1. Review and merge the PR")
|
|
519
|
+
console.print(" 2. Run `pdd sync <module>` after merge")
|
|
520
|
+
|
|
521
|
+
# Clear state on success
|
|
522
|
+
clear_workflow_state(cwd, issue_number, "change", state_dir, repo_owner, repo_name, use_github_state)
|
|
523
|
+
|
|
524
|
+
return True, f"PR Created: {pr_url}", total_cost, model_used, changed_files
|
|
525
|
+
|
|
526
|
+
return True, "Workflow already completed", total_cost, model_used, changed_files
|