pdd-cli 0.0.118__py3-none-any.whl → 0.0.121__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. pdd/__init__.py +1 -1
  2. pdd/agentic_bug_orchestrator.py +15 -6
  3. pdd/agentic_change_orchestrator.py +18 -7
  4. pdd/agentic_common.py +68 -40
  5. pdd/agentic_crash.py +2 -1
  6. pdd/agentic_e2e_fix_orchestrator.py +165 -9
  7. pdd/agentic_update.py +2 -1
  8. pdd/agentic_verify.py +3 -2
  9. pdd/auto_include.py +51 -0
  10. pdd/commands/analysis.py +32 -25
  11. pdd/commands/connect.py +69 -1
  12. pdd/commands/fix.py +31 -13
  13. pdd/commands/generate.py +5 -0
  14. pdd/commands/modify.py +47 -11
  15. pdd/commands/utility.py +12 -7
  16. pdd/core/cli.py +17 -4
  17. pdd/core/dump.py +68 -20
  18. pdd/fix_main.py +4 -2
  19. pdd/frontend/dist/assets/index-CUWd8al1.js +450 -0
  20. pdd/frontend/dist/index.html +1 -1
  21. pdd/llm_invoke.py +82 -12
  22. pdd/operation_log.py +342 -0
  23. pdd/postprocess.py +122 -100
  24. pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +11 -2
  25. pdd/prompts/generate_test_LLM.prompt +0 -1
  26. pdd/prompts/generate_test_from_example_LLM.prompt +251 -0
  27. pdd/prompts/prompt_code_diff_LLM.prompt +29 -25
  28. pdd/server/routes/prompts.py +26 -1
  29. pdd/server/terminal_spawner.py +15 -7
  30. pdd/sync_orchestration.py +164 -147
  31. pdd/sync_order.py +304 -0
  32. pdd/update_main.py +48 -24
  33. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/METADATA +3 -3
  34. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/RECORD +37 -35
  35. pdd/frontend/dist/assets/index-DQ3wkeQ2.js +0 -449
  36. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/WHEEL +0 -0
  37. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/entry_points.txt +0 -0
  38. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/licenses/LICENSE +0 -0
  39. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/top_level.txt +0 -0
pdd/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  import os
4
4
 
5
- __version__ = "0.0.118"
5
+ __version__ = "0.0.121"
6
6
 
7
7
  # Strength parameter used for LLM extraction across the codebase
8
8
  # Used in postprocessing, XML tagging, code generation, and other extraction
@@ -12,7 +12,8 @@ from .agentic_common import (
12
12
  run_agentic_task,
13
13
  load_workflow_state,
14
14
  save_workflow_state,
15
- clear_workflow_state
15
+ clear_workflow_state,
16
+ DEFAULT_MAX_RETRIES
16
17
  )
17
18
  from .load_prompt_template import load_prompt_template
18
19
 
@@ -357,6 +358,7 @@ def run_agentic_bug_orchestrator(
357
358
  quiet=quiet,
358
359
  label=f"step{step_num}",
359
360
  timeout=BUG_STEP_TIMEOUTS.get(step_num, 340.0) + timeout_adder,
361
+ max_retries=DEFAULT_MAX_RETRIES,
360
362
  )
361
363
 
362
364
  # Update tracking
@@ -441,17 +443,24 @@ def run_agentic_bug_orchestrator(
441
443
  console.print(f" → Step {step_num} complete.")
442
444
 
443
445
  # Save state after each step (for resume support)
444
- step_outputs[str(step_num)] = output
445
-
446
+ # Only mark step completed if it succeeded; failed steps get "FAILED:" prefix
447
+ # and last_completed_step stays at previous step (ensures resume re-runs failed step)
448
+ if success:
449
+ step_outputs[str(step_num)] = output
450
+ last_completed_step_to_save = step_num
451
+ else:
452
+ step_outputs[str(step_num)] = f"FAILED: {output}"
453
+ last_completed_step_to_save = step_num - 1
454
+
446
455
  new_state = {
447
456
  "workflow": "bug",
448
457
  "issue_number": issue_number,
449
458
  "issue_url": issue_url,
450
- "last_completed_step": step_num,
451
- "step_outputs": step_outputs,
459
+ "last_completed_step": last_completed_step_to_save,
460
+ "step_outputs": step_outputs.copy(), # Copy to avoid shared reference
452
461
  "total_cost": total_cost,
453
462
  "model_used": last_model_used,
454
- "changed_files": changed_files,
463
+ "changed_files": changed_files.copy(), # Copy to avoid shared reference
455
464
  "worktree_path": str(worktree_path) if worktree_path else None,
456
465
  "github_comment_id": github_comment_id
457
466
  }
@@ -20,6 +20,7 @@ from pdd.agentic_common import (
20
20
  load_workflow_state,
21
21
  save_workflow_state,
22
22
  clear_workflow_state,
23
+ DEFAULT_MAX_RETRIES,
23
24
  )
24
25
  from pdd.load_prompt_template import load_prompt_template
25
26
 
@@ -325,7 +326,8 @@ def run_agentic_change_orchestrator(
325
326
  verbose=verbose,
326
327
  quiet=quiet,
327
328
  timeout=timeout,
328
- label=f"step{step_num}"
329
+ label=f"step{step_num}",
330
+ max_retries=DEFAULT_MAX_RETRIES,
329
331
  )
330
332
 
331
333
  # Update tracking
@@ -370,10 +372,16 @@ def run_agentic_change_orchestrator(
370
372
  return False, "Stopped at step 9: Implementation produced no file changes", total_cost, model_used, []
371
373
 
372
374
  # Update Context & State
375
+ # Only mark step completed if it succeeded; failed steps get "FAILED:" prefix
376
+ # and last_completed_step stays at previous step (ensures resume re-runs failed step)
373
377
  context[f"step{step_num}_output"] = step_output
374
- state["step_outputs"][str(step_num)] = step_output
375
- state["last_completed_step"] = step_num
376
-
378
+ if step_success:
379
+ state["step_outputs"][str(step_num)] = step_output
380
+ state["last_completed_step"] = step_num
381
+ else:
382
+ state["step_outputs"][str(step_num)] = f"FAILED: {step_output}"
383
+ # Don't update last_completed_step - keep it at previous value
384
+
377
385
  # Save State
378
386
  save_result = save_workflow_state(cwd, issue_number, "change", state, state_dir, repo_owner, repo_name, use_github_state, github_comment_id)
379
387
  if save_result:
@@ -423,7 +431,8 @@ def run_agentic_change_orchestrator(
423
431
  verbose=verbose,
424
432
  quiet=quiet,
425
433
  timeout=timeout10,
426
- label=f"step10_iter{review_iteration}"
434
+ label=f"step10_iter{review_iteration}",
435
+ max_retries=DEFAULT_MAX_RETRIES,
427
436
  )
428
437
 
429
438
  total_cost += s10_cost
@@ -454,7 +463,8 @@ def run_agentic_change_orchestrator(
454
463
  verbose=verbose,
455
464
  quiet=quiet,
456
465
  timeout=timeout11,
457
- label=f"step11_iter{review_iteration}"
466
+ label=f"step11_iter{review_iteration}",
467
+ max_retries=DEFAULT_MAX_RETRIES,
458
468
  )
459
469
 
460
470
  total_cost += s11_cost
@@ -488,7 +498,8 @@ def run_agentic_change_orchestrator(
488
498
  verbose=verbose,
489
499
  quiet=quiet,
490
500
  timeout=timeout12,
491
- label="step12"
501
+ label="step12",
502
+ max_retries=DEFAULT_MAX_RETRIES,
492
503
  )
493
504
 
494
505
  total_cost += s12_cost
pdd/agentic_common.py CHANGED
@@ -6,6 +6,7 @@ import json
6
6
  import shutil
7
7
  import subprocess
8
8
  import tempfile
9
+ import time
9
10
  import uuid
10
11
  import re
11
12
  from pathlib import Path
@@ -24,6 +25,8 @@ except ImportError:
24
25
  AGENT_PROVIDER_PREFERENCE: List[str] = ["anthropic", "google", "openai"]
25
26
  DEFAULT_TIMEOUT_SECONDS: float = 240.0
26
27
  MIN_VALID_OUTPUT_LENGTH: int = 50
28
+ DEFAULT_MAX_RETRIES: int = 3
29
+ DEFAULT_RETRY_DELAY: float = 5.0
27
30
 
28
31
  # GitHub State Markers
29
32
  GITHUB_STATE_MARKER_START = "<!-- PDD_WORKFLOW_STATE:"
@@ -122,25 +125,37 @@ def _calculate_codex_cost(usage: Dict[str, Any]) -> float:
122
125
  return input_cost + cached_cost + output_cost
123
126
 
124
127
  def run_agentic_task(
125
- instruction: str,
126
- cwd: Path,
127
- *,
128
- verbose: bool = False,
129
- quiet: bool = False,
130
- label: str = "",
131
- timeout: Optional[float] = None
128
+ instruction: str,
129
+ cwd: Path,
130
+ *,
131
+ verbose: bool = False,
132
+ quiet: bool = False,
133
+ label: str = "",
134
+ timeout: Optional[float] = None,
135
+ max_retries: int = 1,
136
+ retry_delay: float = DEFAULT_RETRY_DELAY
132
137
  ) -> Tuple[bool, str, float, str]:
133
138
  """
134
139
  Runs an agentic task using available providers in preference order.
135
-
140
+
141
+ Args:
142
+ instruction: The task instruction
143
+ cwd: Working directory
144
+ verbose: Show detailed output
145
+ quiet: Suppress all non-error output
146
+ label: Task label for logging
147
+ timeout: Optional timeout override
148
+ max_retries: Number of attempts per provider before fallback (default: 1 = no retries)
149
+ retry_delay: Base delay in seconds for exponential backoff (default: DEFAULT_RETRY_DELAY)
150
+
136
151
  Returns:
137
152
  (success, output_text, cost_usd, provider_used)
138
153
  """
139
154
  agents = get_available_agents()
140
-
155
+
141
156
  # Filter agents based on preference order
142
157
  candidates = [p for p in AGENT_PROVIDER_PREFERENCE if p in agents]
143
-
158
+
144
159
  if not candidates:
145
160
  msg = "No agent providers are available (check CLI installation and API keys)"
146
161
  if not quiet:
@@ -148,11 +163,11 @@ def run_agentic_task(
148
163
  return False, msg, 0.0, ""
149
164
 
150
165
  effective_timeout = timeout if timeout is not None else DEFAULT_TIMEOUT_SECONDS
151
-
166
+
152
167
  # Create a unique temp file for the prompt
153
168
  prompt_filename = f".agentic_prompt_{uuid.uuid4().hex[:8]}.txt"
154
169
  prompt_path = cwd / prompt_filename
155
-
170
+
156
171
  full_instruction = (
157
172
  f"{instruction}\n\n"
158
173
  f"Read the file {prompt_filename} for instructions. "
@@ -168,34 +183,47 @@ def run_agentic_task(
168
183
  if verbose:
169
184
  console.print(f"[dim]Attempting provider: {provider} for task '{label}'[/dim]")
170
185
 
171
- success, output, cost = _run_with_provider(
172
- provider, prompt_path, cwd, effective_timeout, verbose, quiet
173
- )
174
-
175
- # False Positive Detection
176
- if success:
177
- is_false_positive = (cost == 0.0 and len(output.strip()) < MIN_VALID_OUTPUT_LENGTH)
178
-
179
- if is_false_positive:
180
- if not quiet:
181
- console.print(f"[bold red]Provider '{provider}' returned success but appears to be a false positive (Cost: {cost}, Len: {len(output)})[/bold red]")
182
- # Treat as failure, try next provider
183
- continue
184
-
185
- # Check for suspicious files (C, E, T)
186
- suspicious = []
187
- for name in ["C", "E", "T"]:
188
- if (cwd / name).exists():
189
- suspicious.append(name)
190
-
191
- if suspicious:
192
- console.print(f"[bold red]SUSPICIOUS FILES DETECTED: {', '.join(['- ' + s for s in suspicious])}[/bold red]")
193
-
194
- # Real success
195
- return True, output, cost, provider
196
- else:
197
- if verbose:
198
- console.print(f"[yellow]Provider {provider} failed: {output}[/yellow]")
186
+ last_output = ""
187
+ for attempt in range(1, max_retries + 1):
188
+ if verbose and attempt > 1:
189
+ console.print(f"[dim]Retry {attempt}/{max_retries} for {provider} (task: {label})[/dim]")
190
+
191
+ success, output, cost = _run_with_provider(
192
+ provider, prompt_path, cwd, effective_timeout, verbose, quiet
193
+ )
194
+ last_output = output
195
+
196
+ # False Positive Detection
197
+ if success:
198
+ is_false_positive = (cost == 0.0 and len(output.strip()) < MIN_VALID_OUTPUT_LENGTH)
199
+
200
+ if is_false_positive:
201
+ if not quiet:
202
+ console.print(f"[yellow]Provider '{provider}' returned false positive (attempt {attempt})[/yellow]")
203
+ # Treat as failure, retry
204
+ else:
205
+ # Check for suspicious files (C, E, T)
206
+ suspicious = []
207
+ for name in ["C", "E", "T"]:
208
+ if (cwd / name).exists():
209
+ suspicious.append(name)
210
+
211
+ if suspicious:
212
+ console.print(f"[bold red]SUSPICIOUS FILES DETECTED: {', '.join(['- ' + s for s in suspicious])}[/bold red]")
213
+
214
+ # Real success
215
+ return True, output, cost, provider
216
+
217
+ # Failed - retry with backoff if attempts remain
218
+ if attempt < max_retries:
219
+ backoff = retry_delay * attempt
220
+ if verbose:
221
+ console.print(f"[dim]Waiting {backoff}s before retry...[/dim]")
222
+ time.sleep(backoff)
223
+
224
+ # All retries exhausted for this provider
225
+ if verbose:
226
+ console.print(f"[yellow]Provider {provider} failed after {max_retries} attempts: {last_output}[/yellow]")
199
227
 
200
228
  return False, "All agent providers failed", 0.0, ""
201
229
 
pdd/agentic_crash.py CHANGED
@@ -8,7 +8,7 @@ from typing import Any, Iterable, Mapping
8
8
 
9
9
  from rich.console import Console
10
10
 
11
- from .agentic_common import get_available_agents, run_agentic_task
11
+ from .agentic_common import get_available_agents, run_agentic_task, DEFAULT_MAX_RETRIES
12
12
  from .get_run_command import get_run_command_for_file
13
13
  from .load_prompt_template import load_prompt_template
14
14
 
@@ -458,6 +458,7 @@ def run_agentic_crash(
458
458
  verbose=verbose,
459
459
  quiet=quiet,
460
460
  label="agentic_crash_explore",
461
+ max_retries=DEFAULT_MAX_RETRIES,
461
462
  )
462
463
  except Exception as exc: # noqa: BLE001
463
464
  msg = f"Agentic CLI invocation failed: {exc}"
@@ -1,6 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import hashlib
3
4
  import os
5
+ import subprocess
4
6
  import sys
5
7
  import time
6
8
  import json
@@ -15,6 +17,7 @@ from .agentic_common import (
15
17
  load_workflow_state,
16
18
  save_workflow_state,
17
19
  clear_workflow_state,
20
+ DEFAULT_MAX_RETRIES,
18
21
  )
19
22
  from .load_prompt_template import load_prompt_template
20
23
 
@@ -145,6 +148,131 @@ def _check_staleness(state: Dict[str, Any], cwd: Path) -> None:
145
148
  if stale:
146
149
  console.print("[yellow]Warning: Codebase may have changed since last run. Consider --no-resume for fresh start.[/yellow]")
147
150
 
151
+
152
+ def _get_modified_and_untracked(cwd: Path) -> Set[str]:
153
+ """Returns set of modified tracked files plus untracked files."""
154
+ files: Set[str] = set()
155
+
156
+ # Get modified tracked files
157
+ result = subprocess.run(
158
+ ["git", "diff", "--name-only", "HEAD"],
159
+ cwd=cwd,
160
+ capture_output=True,
161
+ text=True
162
+ )
163
+ if result.returncode == 0:
164
+ files.update(f for f in result.stdout.strip().split("\n") if f)
165
+
166
+ # Get untracked files
167
+ result = subprocess.run(
168
+ ["git", "ls-files", "--others", "--exclude-standard"],
169
+ cwd=cwd,
170
+ capture_output=True,
171
+ text=True
172
+ )
173
+ if result.returncode == 0:
174
+ files.update(f for f in result.stdout.strip().split("\n") if f)
175
+
176
+ return files
177
+
178
+
179
+ def _get_file_hashes(cwd: Path) -> Dict[str, Optional[str]]:
180
+ """
181
+ Returns {filepath: md5_hash} for all modified and untracked files.
182
+
183
+ If a file is deleted or unreadable, stores None for that file.
184
+ """
185
+ hashes: Dict[str, Optional[str]] = {}
186
+ for filepath in _get_modified_and_untracked(cwd):
187
+ path = cwd / filepath
188
+ if path.exists() and path.is_file():
189
+ try:
190
+ hashes[filepath] = hashlib.md5(path.read_bytes()).hexdigest()
191
+ except (IOError, OSError):
192
+ hashes[filepath] = None
193
+ else:
194
+ hashes[filepath] = None # Deleted or not a file
195
+ return hashes
196
+
197
+
198
+ def _commit_and_push(
199
+ cwd: Path,
200
+ issue_number: int,
201
+ issue_title: str,
202
+ initial_file_hashes: Dict[str, Optional[str]],
203
+ quiet: bool = False
204
+ ) -> Tuple[bool, str]:
205
+ """
206
+ Commits only files that changed during the workflow and pushes.
207
+
208
+ Uses hash comparison to detect actual content changes, avoiding
209
+ staging pre-existing modified/untracked files.
210
+
211
+ The PR was already created by `pdd bug`, so pushing
212
+ automatically updates it.
213
+
214
+ Args:
215
+ cwd: Working directory
216
+ issue_number: GitHub issue number
217
+ issue_title: Issue title for commit message
218
+ initial_file_hashes: File hashes from before workflow started
219
+ quiet: Suppress output
220
+
221
+ Returns:
222
+ (success, message)
223
+ """
224
+ # Get current file hashes
225
+ current_hashes = _get_file_hashes(cwd)
226
+
227
+ # Find files that changed during workflow
228
+ files_to_commit: List[str] = []
229
+ for filepath, current_hash in current_hashes.items():
230
+ if filepath not in initial_file_hashes:
231
+ # New file created during workflow
232
+ files_to_commit.append(filepath)
233
+ elif initial_file_hashes[filepath] != current_hash:
234
+ # Content changed during workflow
235
+ files_to_commit.append(filepath)
236
+
237
+ if not files_to_commit:
238
+ return True, "No changes to commit"
239
+
240
+ # Stage only workflow-changed files
241
+ for filepath in files_to_commit:
242
+ stage_result = subprocess.run(
243
+ ["git", "add", filepath],
244
+ cwd=cwd,
245
+ capture_output=True,
246
+ text=True
247
+ )
248
+ if stage_result.returncode != 0:
249
+ return False, f"Failed to stage {filepath}: {stage_result.stderr}"
250
+
251
+ # Commit with message referencing issue
252
+ commit_msg = f"fix: {issue_title}\n\nFixes #{issue_number}"
253
+ commit_result = subprocess.run(
254
+ ["git", "commit", "-m", commit_msg],
255
+ cwd=cwd,
256
+ capture_output=True,
257
+ text=True
258
+ )
259
+ if commit_result.returncode != 0:
260
+ return False, f"Failed to commit: {commit_result.stderr}"
261
+
262
+ # Push to remote (branch already exists from pdd bug)
263
+ push_result = subprocess.run(
264
+ ["git", "push"],
265
+ cwd=cwd,
266
+ capture_output=True,
267
+ text=True
268
+ )
269
+
270
+ if push_result.returncode == 0:
271
+ return True, f"Committed and pushed {len(files_to_commit)} file(s)"
272
+ else:
273
+ return False, f"Push failed: {push_result.stderr}"
274
+
275
+
148
276
  def run_agentic_e2e_fix_orchestrator(
149
277
  issue_url: str,
150
278
  issue_content: str,
@@ -213,6 +341,9 @@ def run_agentic_e2e_fix_orchestrator(
213
341
 
214
342
  console.print(f"Fixing e2e tests for issue #{issue_number}: \"{issue_title}\"")
215
343
 
344
+ # Snapshot file state before workflow (for hash-based commit detection)
345
+ initial_file_hashes = _get_file_hashes(cwd)
346
+
216
347
  success = False
217
348
  final_message = ""
218
349
 
@@ -273,21 +404,30 @@ def run_agentic_e2e_fix_orchestrator(
273
404
  # 3. Run Task
274
405
  base_timeout = E2E_FIX_STEP_TIMEOUTS.get(step_num, 340.0)
275
406
  timeout = base_timeout + timeout_adder
276
-
407
+
277
408
  step_success, step_output, step_cost, step_model = run_agentic_task(
278
409
  instruction=formatted_prompt,
279
410
  cwd=cwd,
280
411
  verbose=verbose,
281
412
  quiet=quiet,
282
413
  timeout=timeout,
283
- label=f"cycle{current_cycle}_step{step_num}"
414
+ label=f"cycle{current_cycle}_step{step_num}",
415
+ max_retries=DEFAULT_MAX_RETRIES,
284
416
  )
285
417
 
286
418
  # 4. Store Output & Accumulate
287
- step_outputs[str(step_num)] = step_output
419
+ # Only mark step completed if it succeeded; failed steps get "FAILED:" prefix
420
+ # and last_completed_step stays at previous step (ensures resume re-runs failed step)
421
+ if step_success:
422
+ step_outputs[str(step_num)] = step_output
423
+ last_completed_step = step_num
424
+ else:
425
+ step_outputs[str(step_num)] = f"FAILED: {step_output}"
426
+ # Don't update last_completed_step - keep it at previous value
427
+
288
428
  total_cost += step_cost
289
429
  model_used = step_model if step_model else model_used
290
-
430
+
291
431
  # Parse changed files
292
432
  new_files = _parse_changed_files(step_output)
293
433
  for f in new_files:
@@ -301,21 +441,23 @@ def run_agentic_e2e_fix_orchestrator(
301
441
  dev_unit_states = _update_dev_unit_states(step_output, dev_unit_states, dev_units_str)
302
442
 
303
443
  # Print brief result
304
- console.print(f" -> Step {step_num} complete. Cost: ${step_cost:.4f}")
444
+ if step_success:
445
+ console.print(f" -> Step {step_num} complete. Cost: ${step_cost:.4f}")
446
+ else:
447
+ console.print(f" -> Step {step_num} [red]failed[/red]. Cost: ${step_cost:.4f}")
305
448
 
306
449
  # 5. Save State
307
- last_completed_step = step_num
308
450
  state_data = {
309
451
  "workflow": workflow_name,
310
452
  "issue_url": issue_url,
311
453
  "issue_number": issue_number,
312
454
  "current_cycle": current_cycle,
313
455
  "last_completed_step": last_completed_step,
314
- "step_outputs": step_outputs,
315
- "dev_unit_states": dev_unit_states,
456
+ "step_outputs": step_outputs.copy(), # Copy to avoid shared reference
457
+ "dev_unit_states": dev_unit_states.copy(), # Copy to avoid shared reference
316
458
  "total_cost": total_cost,
317
459
  "model_used": model_used,
318
- "changed_files": changed_files,
460
+ "changed_files": changed_files.copy(), # Copy to avoid shared reference
319
461
  "last_saved_at": datetime.now().isoformat(),
320
462
  "github_comment_id": github_comment_id
321
463
  }
@@ -371,6 +513,20 @@ def run_agentic_e2e_fix_orchestrator(
371
513
  console.print(f" Files changed: {', '.join(changed_files)}")
372
514
  fixed_units = [u for u, s in dev_unit_states.items() if s.get("fixed")]
373
515
  console.print(f" Dev units fixed: {', '.join(fixed_units)}")
516
+
517
+ # Commit and push changes to update the existing PR
518
+ commit_success, commit_message = _commit_and_push(
519
+ cwd=cwd,
520
+ issue_number=issue_number,
521
+ issue_title=issue_title,
522
+ initial_file_hashes=initial_file_hashes,
523
+ quiet=quiet
524
+ )
525
+ if commit_success:
526
+ console.print(f" [green]{commit_message}[/green]")
527
+ else:
528
+ console.print(f" [yellow]Warning: {commit_message}[/yellow]")
529
+
374
530
  return True, final_message, total_cost, model_used, changed_files
375
531
  else:
376
532
  final_message = f"Max cycles ({max_cycles}) reached without all tests passing"
pdd/agentic_update.py CHANGED
@@ -18,7 +18,7 @@ import traceback
18
18
  from rich.console import Console
19
19
  from rich.markdown import Markdown
20
20
 
21
- from .agentic_common import get_available_agents, run_agentic_task
21
+ from .agentic_common import get_available_agents, run_agentic_task, DEFAULT_MAX_RETRIES
22
22
  from .load_prompt_template import load_prompt_template
23
23
 
24
24
  # Optional globals from package root; ignore if not present.
@@ -337,6 +337,7 @@ def run_agentic_update(
337
337
  verbose=bool(verbose and not quiet),
338
338
  quiet=quiet,
339
339
  label=f"agentic_update:{code_path.stem}",
340
+ max_retries=DEFAULT_MAX_RETRIES,
340
341
  )
341
342
  except Exception as exc:
342
343
  message = f"Agentic task failed with an exception: {exc}"
pdd/agentic_verify.py CHANGED
@@ -8,7 +8,7 @@ from typing import Any
8
8
 
9
9
  from rich.console import Console
10
10
 
11
- from .agentic_common import run_agentic_task
11
+ from .agentic_common import run_agentic_task, DEFAULT_MAX_RETRIES
12
12
  from .load_prompt_template import load_prompt_template
13
13
 
14
14
  console = Console()
@@ -133,7 +133,8 @@ def run_agentic_verify(
133
133
  cwd=project_root,
134
134
  verbose=verbose,
135
135
  quiet=quiet,
136
- label="verify-explore"
136
+ label="verify-explore",
137
+ max_retries=DEFAULT_MAX_RETRIES,
137
138
  )
138
139
 
139
140
  # 6. Record State After Execution & Detect Changes
pdd/auto_include.py CHANGED
@@ -321,6 +321,54 @@ def _filter_circular_dependencies(dependencies: str, cycles: List[List[str]]) ->
321
321
  return result
322
322
 
323
323
 
324
+ def _extract_includes(content: str) -> Set[str]:
325
+ """Extract all paths from <include> tags in the content.
326
+
327
+ Args:
328
+ content: The string content to search.
329
+
330
+ Returns:
331
+ A set of paths found in <include> tags.
332
+ """
333
+ pattern = r'<include>(.*?)</include>'
334
+ matches = re.findall(pattern, content, re.DOTALL)
335
+ return {m.strip() for m in matches}
336
+
337
+
338
+ def _filter_existing_includes(input_prompt: str, dependencies: str) -> str:
339
+ """Remove includes from dependencies that already exist in the input prompt.
340
+
341
+ If the input prompt already has <include>path/to/file</include>, and the
342
+ generated dependencies also have <wrapper><include>path/to/file</include></wrapper>,
343
+ the duplicate in dependencies should be removed.
344
+
345
+ Args:
346
+ input_prompt: The original input prompt.
347
+ dependencies: The generated dependencies string.
348
+
349
+ Returns:
350
+ The dependencies string with duplicates removed.
351
+ """
352
+ existing_includes = _extract_includes(input_prompt)
353
+ if not existing_includes:
354
+ return dependencies
355
+
356
+ result = dependencies
357
+ for include_path in existing_includes:
358
+ # Remove any include block that contains this path
359
+ # Pattern matches: <wrapper><include>path</include></wrapper>
360
+ # We use re.escape for the path to handle special chars
361
+ pattern = rf'<[^>]+><include>{re.escape(include_path)}</include></[^>]+>\s*'
362
+ result = re.sub(pattern, '', result)
363
+
364
+ # Also try to remove bare includes if they exist in the dependencies string
365
+ # Pattern matches: <include>path</include> surrounded by whitespace
366
+ pattern_bare = rf'\s*<include>{re.escape(include_path)}</include>\s*'
367
+ result = re.sub(pattern_bare, '', result)
368
+
369
+ return result
370
+
371
+
324
372
  def auto_include(
325
373
  input_prompt: str,
326
374
  directory_path: str,
@@ -408,6 +456,9 @@ def auto_include(
408
456
  f"{' -> '.join(cycle)}[/yellow]"
409
457
  )
410
458
 
459
+ # Filter out includes that already exist in the input prompt
460
+ dependencies = _filter_existing_includes(input_prompt, dependencies)
461
+
411
462
  total_cost = summary_cost + llm_cost
412
463
  model_name = llm_model_name or summary_model
413
464