pdd-cli 0.0.42__py3-none-any.whl → 0.0.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +4 -4
- pdd/agentic_common.py +863 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_fix.py +1179 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +370 -0
- pdd/agentic_verify.py +183 -0
- pdd/auto_deps_main.py +15 -5
- pdd/auto_include.py +63 -5
- pdd/bug_main.py +3 -2
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +80 -19
- pdd/code_generator.py +58 -18
- pdd/code_generator_main.py +672 -25
- pdd/commands/__init__.py +42 -0
- pdd/commands/analysis.py +248 -0
- pdd/commands/fix.py +140 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +174 -0
- pdd/commands/misc.py +79 -0
- pdd/commands/modify.py +230 -0
- pdd/commands/report.py +144 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +281 -81
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +113 -11
- pdd/continue_generation.py +47 -7
- pdd/core/__init__.py +0 -0
- pdd/core/cli.py +503 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +63 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +44 -11
- pdd/data/language_format.csv +71 -62
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/fix_code_loop.py +331 -77
- pdd/fix_error_loop.py +209 -60
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +75 -18
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +319 -272
- pdd/fix_verification_main.py +57 -17
- pdd/generate_output_paths.py +93 -10
- pdd/generate_test.py +16 -5
- pdd/get_jwt_token.py +48 -9
- pdd/get_run_command.py +73 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/increase_tests.py +7 -0
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +11 -3
- pdd/llm_invoke.py +1278 -110
- pdd/load_prompt_template.py +36 -10
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +10 -3
- pdd/preprocess.py +228 -15
- pdd/preprocess_main.py +8 -5
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +1071 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +98 -101
- pdd/prompts/change_LLM.prompt +1 -3
- pdd/prompts/detect_change_LLM.prompt +562 -3
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +14 -2
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +16 -4
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +6 -41
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +21 -6
- pdd/prompts/increase_tests_LLM.prompt +1 -2
- pdd/prompts/insert_includes_LLM.prompt +1181 -6
- pdd/prompts/split_LLM.prompt +1 -62
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/prompts/xml_convertor_LLM.prompt +3246 -7
- pdd/pytest_output.py +188 -21
- pdd/python_env_detector.py +151 -0
- pdd/render_mermaid.py +236 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +56 -7
- pdd/sync_determine_operation.py +918 -186
- pdd/sync_main.py +82 -32
- pdd/sync_orchestration.py +1456 -453
- pdd/sync_tui.py +848 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +242 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +151 -61
- pdd/unfinished_prompt.py +49 -3
- pdd/update_main.py +549 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +20 -7
- pdd_cli-0.0.90.dist-info/RECORD +153 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.42.dist-info/RECORD +0 -115
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
pdd/core/dump.py
ADDED
|
@@ -0,0 +1,554 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core dump generation and replay logic.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
import json
|
|
7
|
+
import platform
|
|
8
|
+
import datetime
|
|
9
|
+
import shlex
|
|
10
|
+
import subprocess
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
13
|
+
import click
|
|
14
|
+
import requests
|
|
15
|
+
|
|
16
|
+
from .. import __version__
|
|
17
|
+
from .errors import console, get_core_dump_errors
|
|
18
|
+
|
|
19
|
+
def _write_core_dump(
|
|
20
|
+
ctx: click.Context,
|
|
21
|
+
normalized_results: List[Any],
|
|
22
|
+
invoked_subcommands: List[str],
|
|
23
|
+
total_cost: float,
|
|
24
|
+
terminal_output: Optional[str] = None,
|
|
25
|
+
) -> None:
|
|
26
|
+
"""Write a JSON core dump for this run if --core-dump is enabled."""
|
|
27
|
+
if not ctx.obj.get("core_dump"):
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
core_dump_dir = Path.cwd() / ".pdd" / "core_dumps"
|
|
32
|
+
core_dump_dir.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
|
|
34
|
+
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y%m%dT%H%M%SZ")
|
|
35
|
+
dump_path = core_dump_dir / f"pdd-core-{timestamp}.json"
|
|
36
|
+
|
|
37
|
+
steps: List[Dict[str, Any]] = []
|
|
38
|
+
for i, result_tuple in enumerate(normalized_results):
|
|
39
|
+
command_name = (
|
|
40
|
+
invoked_subcommands[i] if i < len(invoked_subcommands) else f"Unknown Command {i+1}"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
cost = None
|
|
44
|
+
model_name = None
|
|
45
|
+
if isinstance(result_tuple, tuple) and len(result_tuple) == 3:
|
|
46
|
+
_result_data, cost, model_name = result_tuple
|
|
47
|
+
|
|
48
|
+
steps.append(
|
|
49
|
+
{
|
|
50
|
+
"step": i + 1,
|
|
51
|
+
"command": command_name,
|
|
52
|
+
"cost": cost,
|
|
53
|
+
"model": model_name,
|
|
54
|
+
}
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Only capture a limited subset of env vars to avoid leaking API keys
|
|
58
|
+
sensitive_markers = ("KEY", "TOKEN", "SECRET", "PASSWORD")
|
|
59
|
+
|
|
60
|
+
interesting_env = {}
|
|
61
|
+
for k, v in os.environ.items():
|
|
62
|
+
if k.startswith("PDD_") or k in ("VIRTUAL_ENV", "PYTHONPATH", "PATH"):
|
|
63
|
+
# Redact obviously sensitive vars
|
|
64
|
+
if any(m in k.upper() for m in sensitive_markers):
|
|
65
|
+
interesting_env[k] = "<redacted>"
|
|
66
|
+
else:
|
|
67
|
+
interesting_env[k] = v
|
|
68
|
+
|
|
69
|
+
# Collect file contents from tracked files
|
|
70
|
+
file_contents = {}
|
|
71
|
+
core_dump_files = ctx.obj.get("core_dump_files", set())
|
|
72
|
+
|
|
73
|
+
if not ctx.obj.get("quiet"):
|
|
74
|
+
console.print(f"[info]Core dump: Found {len(core_dump_files)} tracked files[/info]")
|
|
75
|
+
|
|
76
|
+
# Auto-include relevant meta files for the invoked commands
|
|
77
|
+
meta_dir = Path.cwd() / ".pdd" / "meta"
|
|
78
|
+
if meta_dir.exists():
|
|
79
|
+
for cmd in invoked_subcommands:
|
|
80
|
+
# Look for meta files related to this command
|
|
81
|
+
for meta_file in meta_dir.glob(f"*_{cmd}.json"):
|
|
82
|
+
core_dump_files.add(str(meta_file.resolve()))
|
|
83
|
+
# Also include general meta files (without command suffix)
|
|
84
|
+
for meta_file in meta_dir.glob("*.json"):
|
|
85
|
+
if meta_file.stem.endswith(f"_{cmd}") or not any(
|
|
86
|
+
meta_file.stem.endswith(f"_{c}") for c in ["generate", "test", "run", "fix", "update"]
|
|
87
|
+
):
|
|
88
|
+
core_dump_files.add(str(meta_file.resolve()))
|
|
89
|
+
|
|
90
|
+
# Auto-include PDD config files if they exist
|
|
91
|
+
config_files = [
|
|
92
|
+
Path.cwd() / ".pdd" / "config.json",
|
|
93
|
+
Path.cwd() / ".pddconfig",
|
|
94
|
+
Path.cwd() / "pdd.json",
|
|
95
|
+
]
|
|
96
|
+
for config_file in config_files:
|
|
97
|
+
if config_file.exists() and config_file.is_file():
|
|
98
|
+
core_dump_files.add(str(config_file.resolve()))
|
|
99
|
+
|
|
100
|
+
for file_path in core_dump_files:
|
|
101
|
+
try:
|
|
102
|
+
path = Path(file_path)
|
|
103
|
+
if not ctx.obj.get("quiet"):
|
|
104
|
+
console.print(f"[info]Core dump: Checking file {file_path}[/info]")
|
|
105
|
+
|
|
106
|
+
if path.exists() and path.is_file():
|
|
107
|
+
if path.stat().st_size < 50000: # 50KB limit
|
|
108
|
+
try:
|
|
109
|
+
# Use relative path if possible for cleaner keys
|
|
110
|
+
try:
|
|
111
|
+
key = str(path.relative_to(Path.cwd()))
|
|
112
|
+
except ValueError:
|
|
113
|
+
key = str(path)
|
|
114
|
+
|
|
115
|
+
file_contents[key] = path.read_text(encoding='utf-8')
|
|
116
|
+
if not ctx.obj.get("quiet"):
|
|
117
|
+
console.print(f"[info]Core dump: Added content for {key}[/info]")
|
|
118
|
+
except UnicodeDecodeError:
|
|
119
|
+
file_contents[str(path)] = "<binary>"
|
|
120
|
+
if not ctx.obj.get("quiet"):
|
|
121
|
+
console.print(f"[warning]Core dump: Binary file {path}[/warning]")
|
|
122
|
+
else:
|
|
123
|
+
file_contents[str(path)] = "<too large>"
|
|
124
|
+
if not ctx.obj.get("quiet"):
|
|
125
|
+
console.print(f"[warning]Core dump: File too large {path}[/warning]")
|
|
126
|
+
else:
|
|
127
|
+
if not ctx.obj.get("quiet"):
|
|
128
|
+
console.print(f"[warning]Core dump: File not found or not a file: {file_path}[/warning]")
|
|
129
|
+
except Exception as e:
|
|
130
|
+
file_contents[str(file_path)] = f"<error reading file: {e}>"
|
|
131
|
+
if not ctx.obj.get("quiet"):
|
|
132
|
+
console.print(f"[warning]Core dump: Error reading {file_path}: {e}[/warning]")
|
|
133
|
+
|
|
134
|
+
payload: Dict[str, Any] = {
|
|
135
|
+
"schema_version": 1,
|
|
136
|
+
"pdd_version": __version__,
|
|
137
|
+
"timestamp_utc": timestamp,
|
|
138
|
+
"argv": sys.argv[1:], # without the 'pdd' binary name
|
|
139
|
+
"cwd": str(Path.cwd()),
|
|
140
|
+
"platform": {
|
|
141
|
+
"system": platform.system(),
|
|
142
|
+
"release": platform.release(),
|
|
143
|
+
"version": platform.version(),
|
|
144
|
+
"python": sys.version,
|
|
145
|
+
},
|
|
146
|
+
"global_options": {
|
|
147
|
+
"force": ctx.obj.get("force"),
|
|
148
|
+
"strength": ctx.obj.get("strength"),
|
|
149
|
+
"temperature": ctx.obj.get("temperature"),
|
|
150
|
+
"time": ctx.obj.get("time"),
|
|
151
|
+
"verbose": ctx.obj.get("verbose"),
|
|
152
|
+
"quiet": ctx.obj.get("quiet"),
|
|
153
|
+
"local": ctx.obj.get("local"),
|
|
154
|
+
"context": ctx.obj.get("context"),
|
|
155
|
+
"output_cost": ctx.obj.get("output_cost"),
|
|
156
|
+
"review_examples": ctx.obj.get("review_examples"),
|
|
157
|
+
},
|
|
158
|
+
"invoked_subcommands": invoked_subcommands,
|
|
159
|
+
"total_cost": total_cost,
|
|
160
|
+
"steps": steps,
|
|
161
|
+
"errors": get_core_dump_errors(),
|
|
162
|
+
"environment": interesting_env,
|
|
163
|
+
"file_contents": file_contents,
|
|
164
|
+
"terminal_output": terminal_output,
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
dump_path.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
|
168
|
+
|
|
169
|
+
if not ctx.obj.get("quiet"):
|
|
170
|
+
console.print(
|
|
171
|
+
f"[info]Core dump written to [path]{dump_path}[/path]. "
|
|
172
|
+
"You can attach this file when reporting a bug.[/info]"
|
|
173
|
+
)
|
|
174
|
+
except Exception as exc:
|
|
175
|
+
# Never let core dumping itself crash the CLI
|
|
176
|
+
if not ctx.obj.get("quiet"):
|
|
177
|
+
console.print(f"[warning]Failed to write core dump: {exc}[/warning]", style="warning")
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _get_github_token() -> Optional[str]:
|
|
181
|
+
"""
|
|
182
|
+
Get GitHub token using standard authentication methods.
|
|
183
|
+
|
|
184
|
+
Tries in order:
|
|
185
|
+
1. GitHub CLI (gh) if available
|
|
186
|
+
2. GITHUB_TOKEN environment variable (standard in GitHub Actions)
|
|
187
|
+
3. GH_TOKEN environment variable (alternative standard)
|
|
188
|
+
4. PDD_GITHUB_TOKEN (backwards compatibility)
|
|
189
|
+
|
|
190
|
+
Returns None if no token found.
|
|
191
|
+
"""
|
|
192
|
+
# Try GitHub CLI first
|
|
193
|
+
try:
|
|
194
|
+
result = subprocess.run(
|
|
195
|
+
["gh", "auth", "token"],
|
|
196
|
+
capture_output=True,
|
|
197
|
+
text=True,
|
|
198
|
+
timeout=5,
|
|
199
|
+
check=False
|
|
200
|
+
)
|
|
201
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
202
|
+
token = result.stdout.strip()
|
|
203
|
+
if token:
|
|
204
|
+
return token
|
|
205
|
+
except (subprocess.TimeoutExpired, FileNotFoundError):
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
# Try standard environment variables
|
|
209
|
+
token = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN") or os.getenv("PDD_GITHUB_TOKEN")
|
|
210
|
+
if token:
|
|
211
|
+
return token
|
|
212
|
+
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _github_config(repo: Optional[str] = None) -> Optional[Tuple[str, str]]:
|
|
217
|
+
"""
|
|
218
|
+
Return (token, repo) if GitHub issue posting is configured, otherwise None.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
repo: Optional repository in format "owner/repo". If not provided,
|
|
222
|
+
will try PDD_GITHUB_REPO env var or default to "promptdriven/pdd"
|
|
223
|
+
"""
|
|
224
|
+
token = _get_github_token()
|
|
225
|
+
if not token:
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
if not repo:
|
|
229
|
+
repo = os.getenv("PDD_GITHUB_REPO", "promptdriven/pdd")
|
|
230
|
+
|
|
231
|
+
return token, repo
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def _create_gist_with_files(token: str, payload: Dict[str, Any], core_path: Path) -> Optional[str]:
|
|
235
|
+
"""
|
|
236
|
+
Create a GitHub Gist with core dump and all tracked files.
|
|
237
|
+
|
|
238
|
+
Returns the Gist URL on success, None on failure.
|
|
239
|
+
"""
|
|
240
|
+
try:
|
|
241
|
+
# Prepare files for gist
|
|
242
|
+
gist_files = {}
|
|
243
|
+
|
|
244
|
+
# Add the core dump JSON
|
|
245
|
+
gist_files["core-dump.json"] = {
|
|
246
|
+
"content": json.dumps(payload, indent=2)
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
# Add all tracked files
|
|
250
|
+
file_contents = payload.get("file_contents", {})
|
|
251
|
+
for filename, content in file_contents.items():
|
|
252
|
+
# GitHub gist filenames can't have slashes, replace with underscores
|
|
253
|
+
safe_filename = filename.replace("/", "_").replace("\\", "_")
|
|
254
|
+
gist_files[safe_filename] = {
|
|
255
|
+
"content": content if not content.startswith("<") else f"# {content}"
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
# Add terminal output as a separate file if available
|
|
259
|
+
terminal_output = payload.get("terminal_output")
|
|
260
|
+
if terminal_output:
|
|
261
|
+
gist_files["terminal_output.txt"] = {
|
|
262
|
+
"content": terminal_output
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
# Create the gist
|
|
266
|
+
url = "https://api.github.com/gists"
|
|
267
|
+
headers = {
|
|
268
|
+
"Authorization": f"Bearer {token}",
|
|
269
|
+
"Accept": "application/vnd.github+json",
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
gist_data = {
|
|
273
|
+
"description": f"PDD Core Dump - {core_path.name}",
|
|
274
|
+
"public": False, # Private gist
|
|
275
|
+
"files": gist_files
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
resp = requests.post(url, headers=headers, json=gist_data, timeout=30)
|
|
279
|
+
if 200 <= resp.status_code < 300:
|
|
280
|
+
data = resp.json()
|
|
281
|
+
return data.get("html_url")
|
|
282
|
+
except Exception as e:
|
|
283
|
+
console.print(f"[warning]Failed to create gist: {e}[/warning]", style="warning")
|
|
284
|
+
return None
|
|
285
|
+
return None
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _post_issue_to_github(token: str, repo: str, title: str, body: str) -> Optional[str]:
|
|
289
|
+
"""Post an issue to GitHub, returning the issue URL on success, otherwise None."""
|
|
290
|
+
try:
|
|
291
|
+
url = f"https://api.github.com/repos/{repo}/issues"
|
|
292
|
+
headers = {
|
|
293
|
+
"Authorization": f"Bearer {token}",
|
|
294
|
+
"Accept": "application/vnd.github+json",
|
|
295
|
+
}
|
|
296
|
+
resp = requests.post(url, headers=headers, json={"title": title, "body": body}, timeout=10)
|
|
297
|
+
if 200 <= resp.status_code < 300:
|
|
298
|
+
data = resp.json()
|
|
299
|
+
return data.get("html_url")
|
|
300
|
+
except Exception:
|
|
301
|
+
return None
|
|
302
|
+
return None
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _write_replay_script(core_path: Path, payload: Dict[str, Any]) -> Optional[Path]:
|
|
306
|
+
"""Create a small shell script to replay the original core-dumped command."""
|
|
307
|
+
cwd = payload.get("cwd")
|
|
308
|
+
argv = payload.get("argv", [])
|
|
309
|
+
env = payload.get("environment", {})
|
|
310
|
+
|
|
311
|
+
if not cwd or not argv:
|
|
312
|
+
return None
|
|
313
|
+
|
|
314
|
+
script_path = core_path.with_suffix(".replay.sh")
|
|
315
|
+
|
|
316
|
+
lines: List[str] = []
|
|
317
|
+
lines.append("#!/usr/bin/env bash")
|
|
318
|
+
lines.append("set -euo pipefail")
|
|
319
|
+
lines.append("")
|
|
320
|
+
lines.append(f"cd {shlex.quote(str(cwd))}")
|
|
321
|
+
lines.append("")
|
|
322
|
+
|
|
323
|
+
for key, value in env.items():
|
|
324
|
+
lines.append(f"export {key}={shlex.quote(str(value))}")
|
|
325
|
+
|
|
326
|
+
lines.append("")
|
|
327
|
+
arg_str = " ".join(shlex.quote(str(a)) for a in argv)
|
|
328
|
+
lines.append(f"pdd {arg_str}")
|
|
329
|
+
lines.append("")
|
|
330
|
+
|
|
331
|
+
script_path.write_text("\n".join(lines), encoding="utf-8")
|
|
332
|
+
try:
|
|
333
|
+
mode = script_path.stat().st_mode
|
|
334
|
+
script_path.chmod(mode | 0o111)
|
|
335
|
+
except OSError:
|
|
336
|
+
pass
|
|
337
|
+
|
|
338
|
+
return script_path
|
|
339
|
+
|
|
340
|
+
def _build_issue_markdown(
|
|
341
|
+
payload: Dict[str, Any],
|
|
342
|
+
description: str,
|
|
343
|
+
core_path: Path,
|
|
344
|
+
replay_path: Optional[Path],
|
|
345
|
+
attachments: List[str],
|
|
346
|
+
truncate_files: bool = False,
|
|
347
|
+
gist_url: Optional[str] = None,
|
|
348
|
+
) -> Tuple[str, str]:
|
|
349
|
+
"""
|
|
350
|
+
Build a GitHub issue title and markdown body from a core dump payload.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
truncate_files: If True, truncate file contents aggressively for URL length limits.
|
|
354
|
+
Use True for browser-based submission, False for API submission.
|
|
355
|
+
gist_url: If provided, link to a GitHub Gist containing all files instead of
|
|
356
|
+
including them in the body.
|
|
357
|
+
"""
|
|
358
|
+
platform_info = payload.get("platform", {})
|
|
359
|
+
system = platform_info.get("system", "unknown")
|
|
360
|
+
release = platform_info.get("release", "")
|
|
361
|
+
invoked = payload.get("invoked_subcommands") or []
|
|
362
|
+
cmd_summary = " ".join(invoked) if invoked else "command"
|
|
363
|
+
|
|
364
|
+
title = f"[core-dump] {cmd_summary} failed on {system}"
|
|
365
|
+
|
|
366
|
+
argv = payload.get("argv", [])
|
|
367
|
+
argv_str = " ".join(str(a) for a in argv)
|
|
368
|
+
cwd = payload.get("cwd", "")
|
|
369
|
+
total_cost = payload.get("total_cost", None)
|
|
370
|
+
errors = payload.get("errors") or []
|
|
371
|
+
pyver = platform_info.get("python")
|
|
372
|
+
pdd_ver = payload.get("pdd_version")
|
|
373
|
+
|
|
374
|
+
lines: List[str] = []
|
|
375
|
+
|
|
376
|
+
lines.append(f"Core dump file: `{core_path}`")
|
|
377
|
+
lines.append("")
|
|
378
|
+
lines.append("## What happened")
|
|
379
|
+
lines.append("")
|
|
380
|
+
desc = (description or "").strip()
|
|
381
|
+
if desc:
|
|
382
|
+
lines.append(desc)
|
|
383
|
+
else:
|
|
384
|
+
lines.append("_(no additional description provided by user)_")
|
|
385
|
+
lines.append("")
|
|
386
|
+
lines.append("## Environment")
|
|
387
|
+
lines.append("")
|
|
388
|
+
if cwd:
|
|
389
|
+
lines.append(f"- Working directory: `{cwd}`")
|
|
390
|
+
if argv_str:
|
|
391
|
+
lines.append(f"- CLI arguments: `{argv_str}`")
|
|
392
|
+
if system or release:
|
|
393
|
+
lines.append(f"- Platform: `{system} {release}`".strip())
|
|
394
|
+
if pyver:
|
|
395
|
+
lines.append(f"- Python: `{pyver}`")
|
|
396
|
+
if pdd_ver:
|
|
397
|
+
lines.append(f"- PDD version: `{pdd_ver}`")
|
|
398
|
+
if total_cost is not None:
|
|
399
|
+
try:
|
|
400
|
+
lines.append(f"- Total estimated cost: `${float(total_cost):.6f}`")
|
|
401
|
+
except (TypeError, ValueError):
|
|
402
|
+
lines.append(f"- Total estimated cost: `{total_cost}`")
|
|
403
|
+
lines.append("")
|
|
404
|
+
lines.append("## Reproduction")
|
|
405
|
+
lines.append("")
|
|
406
|
+
|
|
407
|
+
# No more replay script mention – just show how to rerun the original command
|
|
408
|
+
if cwd or argv:
|
|
409
|
+
lines.append("To reproduce this issue in a similar environment, run:")
|
|
410
|
+
lines.append("")
|
|
411
|
+
lines.append("```bash")
|
|
412
|
+
if cwd:
|
|
413
|
+
lines.append(f"cd {shlex.quote(str(cwd))}")
|
|
414
|
+
if argv:
|
|
415
|
+
cmd_line = "pdd " + " ".join(shlex.quote(str(a)) for a in argv)
|
|
416
|
+
lines.append(cmd_line)
|
|
417
|
+
lines.append("```")
|
|
418
|
+
else:
|
|
419
|
+
lines.append(
|
|
420
|
+
"Re-run the original PDD command in the same repository with `--core-dump` enabled."
|
|
421
|
+
)
|
|
422
|
+
lines.append("")
|
|
423
|
+
|
|
424
|
+
if errors:
|
|
425
|
+
lines.append("## Errors")
|
|
426
|
+
lines.append("")
|
|
427
|
+
for err in errors:
|
|
428
|
+
cmd = err.get("command", "unknown")
|
|
429
|
+
etype = err.get("type", "Error")
|
|
430
|
+
lines.append(f"### {cmd} ({etype})")
|
|
431
|
+
lines.append("")
|
|
432
|
+
tb = err.get("traceback") or err.get("message") or ""
|
|
433
|
+
lines.append("```text")
|
|
434
|
+
lines.append(tb)
|
|
435
|
+
lines.append("```")
|
|
436
|
+
lines.append("")
|
|
437
|
+
|
|
438
|
+
# Add terminal output section if available
|
|
439
|
+
terminal_output = payload.get("terminal_output")
|
|
440
|
+
if terminal_output:
|
|
441
|
+
lines.append("## Terminal Output")
|
|
442
|
+
lines.append("")
|
|
443
|
+
if gist_url:
|
|
444
|
+
# Link to gist for full output
|
|
445
|
+
lines.append(f"**Full terminal output is available in the Gist:** [{gist_url}]({gist_url})")
|
|
446
|
+
lines.append("")
|
|
447
|
+
lines.append("(See `terminal_output.txt` in the gist)")
|
|
448
|
+
lines.append("")
|
|
449
|
+
elif truncate_files:
|
|
450
|
+
# Truncate for browser mode
|
|
451
|
+
MAX_OUTPUT_CHARS = 500
|
|
452
|
+
lines.append("```text")
|
|
453
|
+
if len(terminal_output) > MAX_OUTPUT_CHARS:
|
|
454
|
+
lines.append(terminal_output[:MAX_OUTPUT_CHARS])
|
|
455
|
+
lines.append(f"\n... (truncated, {len(terminal_output)} total chars)")
|
|
456
|
+
else:
|
|
457
|
+
lines.append(terminal_output)
|
|
458
|
+
lines.append("```")
|
|
459
|
+
lines.append("")
|
|
460
|
+
else:
|
|
461
|
+
# Include full output for API mode
|
|
462
|
+
lines.append("```text")
|
|
463
|
+
lines.append(terminal_output)
|
|
464
|
+
lines.append("```")
|
|
465
|
+
lines.append("")
|
|
466
|
+
|
|
467
|
+
if attachments:
|
|
468
|
+
lines.append("## Attachments (local paths)")
|
|
469
|
+
lines.append("")
|
|
470
|
+
for p in attachments:
|
|
471
|
+
lines.append(f"- `{p}`")
|
|
472
|
+
lines.append("")
|
|
473
|
+
|
|
474
|
+
file_contents = payload.get("file_contents", {})
|
|
475
|
+
if file_contents:
|
|
476
|
+
lines.append("## File Contents")
|
|
477
|
+
lines.append("")
|
|
478
|
+
|
|
479
|
+
if gist_url:
|
|
480
|
+
# Link to gist instead of embedding files
|
|
481
|
+
lines.append(f"**All files are attached in this Gist:** [{gist_url}]({gist_url})")
|
|
482
|
+
lines.append("")
|
|
483
|
+
lines.append("Files included:")
|
|
484
|
+
for filename in file_contents.keys():
|
|
485
|
+
lines.append(f"- `{filename}`")
|
|
486
|
+
lines.append("")
|
|
487
|
+
elif truncate_files:
|
|
488
|
+
# For browser-based submission, truncate to avoid URL length limits
|
|
489
|
+
MAX_FILE_CHARS = 300 # Limit per file
|
|
490
|
+
for filename, content in file_contents.items():
|
|
491
|
+
lines.append(f"### {filename}")
|
|
492
|
+
lines.append("```")
|
|
493
|
+
if len(content) > MAX_FILE_CHARS:
|
|
494
|
+
lines.append(content[:MAX_FILE_CHARS])
|
|
495
|
+
lines.append(f"\n... (truncated, {len(content)} total chars)")
|
|
496
|
+
else:
|
|
497
|
+
lines.append(content)
|
|
498
|
+
lines.append("```")
|
|
499
|
+
lines.append("")
|
|
500
|
+
else:
|
|
501
|
+
# For API-based submission without gist, include full contents
|
|
502
|
+
for filename, content in file_contents.items():
|
|
503
|
+
lines.append(f"### {filename}")
|
|
504
|
+
lines.append("```")
|
|
505
|
+
lines.append(content)
|
|
506
|
+
lines.append("```")
|
|
507
|
+
lines.append("")
|
|
508
|
+
|
|
509
|
+
# --- Raw core dump JSON at the bottom ---
|
|
510
|
+
if gist_url:
|
|
511
|
+
# If we have a gist, no need for raw JSON (it's in the gist)
|
|
512
|
+
pass
|
|
513
|
+
elif truncate_files:
|
|
514
|
+
# For browser-based submission, skip or heavily truncate raw JSON to save URL space
|
|
515
|
+
lines.append("## Raw core dump (JSON)")
|
|
516
|
+
lines.append("")
|
|
517
|
+
lines.append("_Core dump JSON omitted to reduce URL length. Full dump available in the attached core file._")
|
|
518
|
+
lines.append("")
|
|
519
|
+
else:
|
|
520
|
+
# For API-based submission, include more of the JSON
|
|
521
|
+
try:
|
|
522
|
+
raw_json = json.dumps(payload, indent=2, sort_keys=True)
|
|
523
|
+
except TypeError:
|
|
524
|
+
# Fallback: make values JSON-safe by stringifying non-serializable objects
|
|
525
|
+
def _safe(obj: Any) -> Any:
|
|
526
|
+
try:
|
|
527
|
+
json.dumps(obj)
|
|
528
|
+
return obj
|
|
529
|
+
except TypeError:
|
|
530
|
+
return str(obj)
|
|
531
|
+
|
|
532
|
+
safe_payload = {k: _safe(v) for k, v in payload.items()}
|
|
533
|
+
raw_json = json.dumps(safe_payload, indent=2, sort_keys=True)
|
|
534
|
+
|
|
535
|
+
MAX_JSON_CHARS = 8000 # guard so huge dumps don't blow up the issue body
|
|
536
|
+
if len(raw_json) > MAX_JSON_CHARS:
|
|
537
|
+
raw_display = raw_json[:MAX_JSON_CHARS] + (
|
|
538
|
+
"\n... (truncated; see core file on disk for full dump)\n"
|
|
539
|
+
)
|
|
540
|
+
else:
|
|
541
|
+
raw_display = raw_json
|
|
542
|
+
|
|
543
|
+
lines.append("## Raw core dump (JSON)")
|
|
544
|
+
lines.append("")
|
|
545
|
+
lines.append("```json")
|
|
546
|
+
lines.append(raw_display)
|
|
547
|
+
lines.append("```")
|
|
548
|
+
lines.append("")
|
|
549
|
+
# ----------------------------------------
|
|
550
|
+
|
|
551
|
+
lines.append("<!-- Generated by `pdd report-core` -->")
|
|
552
|
+
|
|
553
|
+
body = "\n".join(lines)
|
|
554
|
+
return title, body
|
pdd/core/errors.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Error handling logic for PDD CLI.
|
|
3
|
+
"""
|
|
4
|
+
import traceback
|
|
5
|
+
from typing import Any, Dict, List
|
|
6
|
+
import click
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.markup import MarkupError, escape
|
|
9
|
+
from rich.theme import Theme
|
|
10
|
+
|
|
11
|
+
# --- Initialize Rich Console ---
|
|
12
|
+
# Define a custom theme for consistent styling
|
|
13
|
+
custom_theme = Theme({
|
|
14
|
+
"info": "cyan",
|
|
15
|
+
"warning": "yellow",
|
|
16
|
+
"error": "bold red",
|
|
17
|
+
"success": "green",
|
|
18
|
+
"path": "dim blue",
|
|
19
|
+
"command": "bold magenta",
|
|
20
|
+
})
|
|
21
|
+
console = Console(theme=custom_theme)
|
|
22
|
+
|
|
23
|
+
# Buffer to collect errors for optional core dumps
|
|
24
|
+
_core_dump_errors: List[Dict[str, Any]] = []
|
|
25
|
+
|
|
26
|
+
def get_core_dump_errors() -> List[Dict[str, Any]]:
|
|
27
|
+
"""Return the list of collected errors."""
|
|
28
|
+
return _core_dump_errors
|
|
29
|
+
|
|
30
|
+
def clear_core_dump_errors() -> None:
|
|
31
|
+
"""Clear the list of collected errors."""
|
|
32
|
+
_core_dump_errors.clear()
|
|
33
|
+
|
|
34
|
+
def handle_error(exception: Exception, command_name: str, quiet: bool):
|
|
35
|
+
"""Prints error messages using Rich console."""
|
|
36
|
+
# Record error details for potential core dump
|
|
37
|
+
_core_dump_errors.append(
|
|
38
|
+
{
|
|
39
|
+
"command": command_name,
|
|
40
|
+
"type": type(exception).__name__,
|
|
41
|
+
"message": str(exception),
|
|
42
|
+
"traceback": "".join(
|
|
43
|
+
traceback.format_exception(type(exception), exception, exception.__traceback__)
|
|
44
|
+
),
|
|
45
|
+
}
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
if not quiet:
|
|
49
|
+
console.print(f"[error]Error during '{command_name}' command:[/error]", style="error")
|
|
50
|
+
if isinstance(exception, FileNotFoundError):
|
|
51
|
+
console.print(f" [error]File not found:[/error] {exception}", style="error")
|
|
52
|
+
elif isinstance(exception, (ValueError, IOError)):
|
|
53
|
+
console.print(f" [error]Input/Output Error:[/error] {exception}", style="error")
|
|
54
|
+
elif isinstance(exception, click.UsageError): # Handle Click usage errors explicitly if needed
|
|
55
|
+
console.print(f" [error]Usage Error:[/error] {exception}", style="error")
|
|
56
|
+
# click.UsageError should typically exit with 2, but we are handling it.
|
|
57
|
+
elif isinstance(exception, MarkupError):
|
|
58
|
+
console.print(" [error]Markup Error:[/error] Invalid Rich markup encountered.", style="error")
|
|
59
|
+
# Print the error message safely escaped
|
|
60
|
+
console.print(escape(str(exception)))
|
|
61
|
+
else:
|
|
62
|
+
console.print(f" [error]An unexpected error occurred:[/error] {exception}", style="error")
|
|
63
|
+
# Do NOT re-raise e here. Let the command function return None.
|