pdd-cli 0.0.42__py3-none-any.whl → 0.0.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +4 -4
- pdd/agentic_common.py +863 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_fix.py +1179 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +370 -0
- pdd/agentic_verify.py +183 -0
- pdd/auto_deps_main.py +15 -5
- pdd/auto_include.py +63 -5
- pdd/bug_main.py +3 -2
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +80 -19
- pdd/code_generator.py +58 -18
- pdd/code_generator_main.py +672 -25
- pdd/commands/__init__.py +42 -0
- pdd/commands/analysis.py +248 -0
- pdd/commands/fix.py +140 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +174 -0
- pdd/commands/misc.py +79 -0
- pdd/commands/modify.py +230 -0
- pdd/commands/report.py +144 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +281 -81
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +113 -11
- pdd/continue_generation.py +47 -7
- pdd/core/__init__.py +0 -0
- pdd/core/cli.py +503 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +63 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +44 -11
- pdd/data/language_format.csv +71 -62
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/fix_code_loop.py +331 -77
- pdd/fix_error_loop.py +209 -60
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +75 -18
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +319 -272
- pdd/fix_verification_main.py +57 -17
- pdd/generate_output_paths.py +93 -10
- pdd/generate_test.py +16 -5
- pdd/get_jwt_token.py +48 -9
- pdd/get_run_command.py +73 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/increase_tests.py +7 -0
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +11 -3
- pdd/llm_invoke.py +1278 -110
- pdd/load_prompt_template.py +36 -10
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +10 -3
- pdd/preprocess.py +228 -15
- pdd/preprocess_main.py +8 -5
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +1071 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +98 -101
- pdd/prompts/change_LLM.prompt +1 -3
- pdd/prompts/detect_change_LLM.prompt +562 -3
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +14 -2
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +16 -4
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +6 -41
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +21 -6
- pdd/prompts/increase_tests_LLM.prompt +1 -2
- pdd/prompts/insert_includes_LLM.prompt +1181 -6
- pdd/prompts/split_LLM.prompt +1 -62
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/prompts/xml_convertor_LLM.prompt +3246 -7
- pdd/pytest_output.py +188 -21
- pdd/python_env_detector.py +151 -0
- pdd/render_mermaid.py +236 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +56 -7
- pdd/sync_determine_operation.py +918 -186
- pdd/sync_main.py +82 -32
- pdd/sync_orchestration.py +1456 -453
- pdd/sync_tui.py +848 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +242 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +151 -61
- pdd/unfinished_prompt.py +49 -3
- pdd/update_main.py +549 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +20 -7
- pdd_cli-0.0.90.dist-info/RECORD +153 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.42.dist-info/RECORD +0 -115
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
pdd/pytest_output.py
CHANGED
|
@@ -1,14 +1,93 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import json
|
|
3
3
|
import io
|
|
4
|
+
import re
|
|
4
5
|
import sys
|
|
5
6
|
import pytest
|
|
7
|
+
import subprocess
|
|
8
|
+
from pathlib import Path
|
|
6
9
|
from rich.console import Console
|
|
7
10
|
from rich.pretty import pprint
|
|
8
11
|
import os
|
|
12
|
+
from .python_env_detector import detect_host_python_executable
|
|
9
13
|
|
|
10
14
|
console = Console()
|
|
11
15
|
|
|
16
|
+
|
|
17
|
+
def _find_project_root(test_file: Path) -> Path | None:
|
|
18
|
+
"""
|
|
19
|
+
Find the project root directory by looking for .pddrc (definitive PDD marker).
|
|
20
|
+
|
|
21
|
+
Only .pddrc is used as the project marker to ensure we don't incorrectly
|
|
22
|
+
identify project roots for non-PDD projects. This is a conservative approach
|
|
23
|
+
that maintains backward compatibility.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
test_file: Path to the test file
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
The project root directory if .pddrc is found, None otherwise.
|
|
30
|
+
When None is returned, the caller should use original behavior.
|
|
31
|
+
"""
|
|
32
|
+
current = test_file.resolve().parent
|
|
33
|
+
|
|
34
|
+
# Walk up the directory tree looking for .pddrc only
|
|
35
|
+
while current != current.parent:
|
|
36
|
+
if (current / ".pddrc").exists():
|
|
37
|
+
return current
|
|
38
|
+
current = current.parent
|
|
39
|
+
|
|
40
|
+
# No .pddrc found - return None to signal original behavior should be used
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
_ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _strip_ansi(text: str) -> str:
|
|
48
|
+
"""Remove ANSI escape sequences from text for reliable parsing."""
|
|
49
|
+
return _ANSI_ESCAPE_RE.sub("", text)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def extract_failing_files_from_output(pytest_output: str) -> list[str]:
|
|
53
|
+
"""
|
|
54
|
+
Extract unique file paths from pytest FAILED output lines.
|
|
55
|
+
|
|
56
|
+
Parses patterns like:
|
|
57
|
+
- FAILED tests/test_foo.py::test_name - error message
|
|
58
|
+
- tests/test_foo.py::test_name FAILED
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
pytest_output: The combined stdout/stderr from a pytest run
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
List of unique file paths (without ::test_name suffix) that had failures,
|
|
65
|
+
in the order they were first encountered.
|
|
66
|
+
"""
|
|
67
|
+
cleaned_output = _strip_ansi(pytest_output)
|
|
68
|
+
|
|
69
|
+
failing_files = []
|
|
70
|
+
seen = set()
|
|
71
|
+
|
|
72
|
+
# Pattern 1: FAILED path/file.py::test_name (with optional error)
|
|
73
|
+
pattern1 = r'FAILED\s+([^\s:]+\.py)::'
|
|
74
|
+
for match in re.finditer(pattern1, cleaned_output):
|
|
75
|
+
file_path = match.group(1)
|
|
76
|
+
if file_path not in seen:
|
|
77
|
+
failing_files.append(file_path)
|
|
78
|
+
seen.add(file_path)
|
|
79
|
+
|
|
80
|
+
# Pattern 2: path/file.py::test_name FAILED (verbose output)
|
|
81
|
+
pattern2 = r'([^\s:]+\.py)::\S+\s+FAILED'
|
|
82
|
+
for match in re.finditer(pattern2, cleaned_output):
|
|
83
|
+
file_path = match.group(1)
|
|
84
|
+
if file_path not in seen:
|
|
85
|
+
failing_files.append(file_path)
|
|
86
|
+
seen.add(file_path)
|
|
87
|
+
|
|
88
|
+
return failing_files
|
|
89
|
+
|
|
90
|
+
|
|
12
91
|
class TestResultCollector:
|
|
13
92
|
__test__ = False # Prevent pytest from collecting this plugin as a test
|
|
14
93
|
|
|
@@ -80,28 +159,116 @@ def run_pytest_and_capture_output(test_file: str) -> dict:
|
|
|
80
159
|
)
|
|
81
160
|
return {}
|
|
82
161
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
"
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
"return_code": int(result),
|
|
97
|
-
"warnings": collector.warnings,
|
|
98
|
-
"errors": collector.errors,
|
|
99
|
-
"failures": collector.failures,
|
|
100
|
-
"passed": collector.passed,
|
|
101
|
-
}
|
|
102
|
-
],
|
|
162
|
+
# Use environment-aware Python executable for pytest execution
|
|
163
|
+
python_executable = detect_host_python_executable()
|
|
164
|
+
|
|
165
|
+
# Find the project root directory for proper pytest execution (PDD projects only)
|
|
166
|
+
test_path = Path(test_file).resolve()
|
|
167
|
+
project_root = _find_project_root(test_path)
|
|
168
|
+
|
|
169
|
+
# Build subprocess kwargs - only modify cwd/env for PDD projects (.pddrc found)
|
|
170
|
+
subprocess_kwargs = {
|
|
171
|
+
"capture_output": True,
|
|
172
|
+
"text": True,
|
|
173
|
+
"timeout": 300,
|
|
174
|
+
"stdin": subprocess.DEVNULL,
|
|
103
175
|
}
|
|
104
176
|
|
|
177
|
+
pytest_args = [python_executable, "-B", "-m", "pytest", str(test_path), "-v"]
|
|
178
|
+
|
|
179
|
+
if project_root is not None:
|
|
180
|
+
# PDD project detected - set up proper environment
|
|
181
|
+
subprocess_kwargs["cwd"] = str(project_root)
|
|
182
|
+
|
|
183
|
+
# Build PYTHONPATH with both project root and src/ if it exists
|
|
184
|
+
paths_to_add = [str(project_root)]
|
|
185
|
+
src_dir = project_root / "src"
|
|
186
|
+
if src_dir.is_dir():
|
|
187
|
+
paths_to_add.insert(0, str(src_dir)) # src/ takes priority
|
|
188
|
+
|
|
189
|
+
env = os.environ.copy()
|
|
190
|
+
existing_pythonpath = env.get("PYTHONPATH", "")
|
|
191
|
+
if existing_pythonpath:
|
|
192
|
+
paths_to_add.append(existing_pythonpath)
|
|
193
|
+
env["PYTHONPATH"] = os.pathsep.join(paths_to_add)
|
|
194
|
+
subprocess_kwargs["env"] = env
|
|
195
|
+
|
|
196
|
+
# Add --rootdir to ensure pytest uses project's config
|
|
197
|
+
pytest_args.append(f"--rootdir={project_root}")
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
# Run pytest using subprocess with the detected Python executable
|
|
201
|
+
# Use -B flag to disable bytecode caching, ensuring fresh imports
|
|
202
|
+
result = subprocess.run(pytest_args, **subprocess_kwargs)
|
|
203
|
+
|
|
204
|
+
stdout = result.stdout
|
|
205
|
+
stderr = result.stderr
|
|
206
|
+
return_code = result.returncode
|
|
207
|
+
parse_stdout = _strip_ansi(stdout or "")
|
|
208
|
+
|
|
209
|
+
# Parse the output to extract test results
|
|
210
|
+
# Count passed, failed, and skipped tests from the output
|
|
211
|
+
passed = parse_stdout.count(" PASSED")
|
|
212
|
+
failures = parse_stdout.count(" FAILED") + parse_stdout.count(" ERROR")
|
|
213
|
+
errors = 0 # Will be included in failures for subprocess execution
|
|
214
|
+
warnings = parse_stdout.lower().count("warning")
|
|
215
|
+
|
|
216
|
+
# If return code is 2, it indicates a pytest error
|
|
217
|
+
if return_code == 2:
|
|
218
|
+
errors = 1
|
|
219
|
+
# Safety net: if parsing missed failures due to formatting (e.g., ANSI colors),
|
|
220
|
+
# never report a passing result on a non-zero return code.
|
|
221
|
+
if return_code != 0 and failures == 0 and errors == 0:
|
|
222
|
+
if return_code == 1:
|
|
223
|
+
failures = 1
|
|
224
|
+
else:
|
|
225
|
+
errors = 1
|
|
226
|
+
|
|
227
|
+
return {
|
|
228
|
+
"test_file": test_file,
|
|
229
|
+
"test_results": [
|
|
230
|
+
{
|
|
231
|
+
"standard_output": stdout,
|
|
232
|
+
"standard_error": stderr,
|
|
233
|
+
"return_code": return_code,
|
|
234
|
+
"warnings": warnings,
|
|
235
|
+
"errors": errors,
|
|
236
|
+
"failures": failures,
|
|
237
|
+
"passed": passed,
|
|
238
|
+
}
|
|
239
|
+
],
|
|
240
|
+
}
|
|
241
|
+
except subprocess.TimeoutExpired:
|
|
242
|
+
return {
|
|
243
|
+
"test_file": test_file,
|
|
244
|
+
"test_results": [
|
|
245
|
+
{
|
|
246
|
+
"standard_output": "",
|
|
247
|
+
"standard_error": "Test execution timed out",
|
|
248
|
+
"return_code": -1,
|
|
249
|
+
"warnings": 0,
|
|
250
|
+
"errors": 1,
|
|
251
|
+
"failures": 0,
|
|
252
|
+
"passed": 0,
|
|
253
|
+
}
|
|
254
|
+
],
|
|
255
|
+
}
|
|
256
|
+
except Exception as e:
|
|
257
|
+
return {
|
|
258
|
+
"test_file": test_file,
|
|
259
|
+
"test_results": [
|
|
260
|
+
{
|
|
261
|
+
"standard_output": "",
|
|
262
|
+
"standard_error": f"Error running pytest: {str(e)}",
|
|
263
|
+
"return_code": -1,
|
|
264
|
+
"warnings": 0,
|
|
265
|
+
"errors": 1,
|
|
266
|
+
"failures": 0,
|
|
267
|
+
"passed": 0,
|
|
268
|
+
}
|
|
269
|
+
],
|
|
270
|
+
}
|
|
271
|
+
|
|
105
272
|
def save_output_to_json(output: dict, output_file: str = "pytest.json"):
|
|
106
273
|
"""
|
|
107
274
|
Saves the pytest output to a JSON file.
|
|
@@ -147,4 +314,4 @@ def main():
|
|
|
147
314
|
save_output_to_json(pytest_output)
|
|
148
315
|
|
|
149
316
|
if __name__ == "__main__":
|
|
150
|
-
main()
|
|
317
|
+
main()
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Python Environment Detector
|
|
3
|
+
|
|
4
|
+
Detects the host shell's Python environment (conda, venv, poetry, pipenv, etc.)
|
|
5
|
+
and returns the appropriate Python executable for subprocess calls.
|
|
6
|
+
|
|
7
|
+
This ensures that PDD operations use the same Python environment as the shell
|
|
8
|
+
that launched PDD, rather than the uv tools environment where PDD is installed.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import sys
|
|
13
|
+
import shutil
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def detect_host_python_executable() -> str:
|
|
19
|
+
"""
|
|
20
|
+
Detect the host shell's Python executable.
|
|
21
|
+
|
|
22
|
+
This function checks for various virtual environment indicators
|
|
23
|
+
and returns the appropriate Python executable path.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
str: Path to the Python executable that should be used for subprocess calls.
|
|
27
|
+
Falls back to sys.executable if no host environment is detected.
|
|
28
|
+
|
|
29
|
+
Detection order:
|
|
30
|
+
1. VIRTUAL_ENV (works for venv, virtualenv, poetry, pipenv)
|
|
31
|
+
2. CONDA_PREFIX (conda-specific)
|
|
32
|
+
3. PATH resolution with shutil.which('python')
|
|
33
|
+
4. sys.executable (fallback)
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
# Check for virtual environment (venv, virtualenv, poetry, pipenv)
|
|
37
|
+
virtual_env = os.environ.get('VIRTUAL_ENV')
|
|
38
|
+
if virtual_env:
|
|
39
|
+
# Try common Python executable locations within the virtual environment
|
|
40
|
+
for python_name in ['python', 'python3']:
|
|
41
|
+
# Unix-like systems
|
|
42
|
+
venv_python = Path(virtual_env) / 'bin' / python_name
|
|
43
|
+
if venv_python.is_file():
|
|
44
|
+
return str(venv_python)
|
|
45
|
+
|
|
46
|
+
# Windows
|
|
47
|
+
venv_python = Path(virtual_env) / 'Scripts' / f'{python_name}.exe'
|
|
48
|
+
if venv_python.is_file():
|
|
49
|
+
return str(venv_python)
|
|
50
|
+
|
|
51
|
+
# Check for conda environment
|
|
52
|
+
conda_prefix = os.environ.get('CONDA_PREFIX')
|
|
53
|
+
if conda_prefix:
|
|
54
|
+
# Try common Python executable locations within conda environment
|
|
55
|
+
for python_name in ['python', 'python3']:
|
|
56
|
+
# Unix-like systems
|
|
57
|
+
conda_python = Path(conda_prefix) / 'bin' / python_name
|
|
58
|
+
if conda_python.is_file():
|
|
59
|
+
return str(conda_python)
|
|
60
|
+
|
|
61
|
+
# Windows
|
|
62
|
+
conda_python = Path(conda_prefix) / f'{python_name}.exe'
|
|
63
|
+
if conda_python.is_file():
|
|
64
|
+
return str(conda_python)
|
|
65
|
+
|
|
66
|
+
# Use PATH resolution as fallback (respects shell's PATH modifications)
|
|
67
|
+
which_python = shutil.which('python')
|
|
68
|
+
if which_python and Path(which_python).resolve() != Path(sys.executable).resolve():
|
|
69
|
+
# Only use if it's different from the current sys.executable
|
|
70
|
+
# This helps detect when we're in a different environment
|
|
71
|
+
return which_python
|
|
72
|
+
|
|
73
|
+
# Try python3 as well
|
|
74
|
+
which_python3 = shutil.which('python3')
|
|
75
|
+
if which_python3 and Path(which_python3).resolve() != Path(sys.executable).resolve():
|
|
76
|
+
return which_python3
|
|
77
|
+
|
|
78
|
+
# Final fallback to current executable
|
|
79
|
+
return sys.executable
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def get_environment_info() -> dict:
|
|
83
|
+
"""
|
|
84
|
+
Get detailed information about the current Python environment.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
dict: Dictionary containing environment information for debugging
|
|
88
|
+
"""
|
|
89
|
+
return {
|
|
90
|
+
'sys_executable': sys.executable,
|
|
91
|
+
'detected_executable': detect_host_python_executable(),
|
|
92
|
+
'virtual_env': os.environ.get('VIRTUAL_ENV'),
|
|
93
|
+
'conda_prefix': os.environ.get('CONDA_PREFIX'),
|
|
94
|
+
'conda_default_env': os.environ.get('CONDA_DEFAULT_ENV'),
|
|
95
|
+
'poetry_active': os.environ.get('POETRY_ACTIVE'),
|
|
96
|
+
'pipenv_active': os.environ.get('PIPENV_ACTIVE'),
|
|
97
|
+
'which_python': shutil.which('python'),
|
|
98
|
+
'which_python3': shutil.which('python3'),
|
|
99
|
+
'path': os.environ.get('PATH', '').split(os.pathsep)[:3], # First 3 PATH entries
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def is_in_virtual_environment() -> bool:
|
|
104
|
+
"""
|
|
105
|
+
Check if we're currently running in any kind of virtual environment.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
bool: True if in a virtual environment, False otherwise
|
|
109
|
+
"""
|
|
110
|
+
return bool(
|
|
111
|
+
os.environ.get('VIRTUAL_ENV') or
|
|
112
|
+
os.environ.get('CONDA_PREFIX') or
|
|
113
|
+
os.environ.get('POETRY_ACTIVE') or
|
|
114
|
+
os.environ.get('PIPENV_ACTIVE')
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def get_environment_type() -> str:
|
|
119
|
+
"""
|
|
120
|
+
Determine the type of virtual environment we're in.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
str: Type of environment ('conda', 'venv', 'poetry', 'pipenv', 'system', 'unknown')
|
|
124
|
+
"""
|
|
125
|
+
if os.environ.get('CONDA_PREFIX'):
|
|
126
|
+
return 'conda'
|
|
127
|
+
elif os.environ.get('POETRY_ACTIVE'):
|
|
128
|
+
return 'poetry'
|
|
129
|
+
elif os.environ.get('PIPENV_ACTIVE'):
|
|
130
|
+
return 'pipenv'
|
|
131
|
+
elif os.environ.get('VIRTUAL_ENV'):
|
|
132
|
+
return 'venv'
|
|
133
|
+
elif is_in_virtual_environment():
|
|
134
|
+
return 'unknown'
|
|
135
|
+
else:
|
|
136
|
+
return 'system'
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if __name__ == '__main__':
|
|
140
|
+
# Demo/test functionality
|
|
141
|
+
print("Python Environment Detection")
|
|
142
|
+
print("=" * 40)
|
|
143
|
+
|
|
144
|
+
env_info = get_environment_info()
|
|
145
|
+
for key, value in env_info.items():
|
|
146
|
+
print(f"{key}: {value}")
|
|
147
|
+
|
|
148
|
+
print()
|
|
149
|
+
print(f"Environment type: {get_environment_type()}")
|
|
150
|
+
print(f"In virtual environment: {is_in_virtual_environment()}")
|
|
151
|
+
print(f"Detected Python executable: {detect_host_python_executable()}")
|
pdd/render_mermaid.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Render architecture.json as an interactive HTML Mermaid diagram.
|
|
4
|
+
Usage:
|
|
5
|
+
python render_mermaid.py architecture.json "App Name" [output.html]
|
|
6
|
+
Features:
|
|
7
|
+
- Direct browser rendering (no external tools)
|
|
8
|
+
- Beautiful modern UI with statistics
|
|
9
|
+
- Color-coded module categories
|
|
10
|
+
- Interactive Mermaid diagram
|
|
11
|
+
- Self-contained HTML (works offline)
|
|
12
|
+
"""
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
import html
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
# Indentation constants for better maintainability
|
|
19
|
+
INDENT = ' ' # 4 spaces per level
|
|
20
|
+
LEVELS = {
|
|
21
|
+
'root': 0,
|
|
22
|
+
'subgraph': 1,
|
|
23
|
+
'node': 2,
|
|
24
|
+
'connection': 1,
|
|
25
|
+
'style': 1
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
def write_pretty_architecture_json(arch_file, architecture):
|
|
29
|
+
"""Rewrite architecture JSON with consistent formatting so diffs stay stable."""
|
|
30
|
+
path = Path(arch_file)
|
|
31
|
+
formatted = json.dumps(architecture, indent=2)
|
|
32
|
+
if not formatted.endswith("\n"):
|
|
33
|
+
formatted += "\n"
|
|
34
|
+
path.write_text(formatted, encoding="utf-8")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def generate_mermaid_code(architecture, app_name="System"):
|
|
38
|
+
"""Generate Mermaid flowchart code from architecture JSON."""
|
|
39
|
+
# Escape quotes for Mermaid label, which uses HTML entities
|
|
40
|
+
escaped_app_name = app_name.replace('"', '"')
|
|
41
|
+
# Match test expectation: add a trailing space only if quotes were present
|
|
42
|
+
prd_label = f'{escaped_app_name} ' if """ in escaped_app_name else escaped_app_name
|
|
43
|
+
|
|
44
|
+
lines = ["flowchart TB", f'{INDENT * LEVELS["node"]}PRD["{prd_label}"]', INDENT]
|
|
45
|
+
|
|
46
|
+
if not architecture:
|
|
47
|
+
lines.append(INDENT)
|
|
48
|
+
|
|
49
|
+
# Categorize modules by tags (frontend takes priority over backend)
|
|
50
|
+
frontend = [
|
|
51
|
+
m
|
|
52
|
+
for m in architecture
|
|
53
|
+
if any(t in m.get('tags', []) for t in ['frontend', 'react', 'nextjs', 'ui', 'page', 'component'])
|
|
54
|
+
]
|
|
55
|
+
backend = [
|
|
56
|
+
m
|
|
57
|
+
for m in architecture
|
|
58
|
+
if m not in frontend
|
|
59
|
+
and any(t in m.get('tags', []) for t in ['backend', 'api', 'database', 'sqlalchemy', 'fastapi'])
|
|
60
|
+
]
|
|
61
|
+
shared = [m for m in architecture if m not in frontend and m not in backend]
|
|
62
|
+
|
|
63
|
+
# Generate subgraphs
|
|
64
|
+
for group_name, modules in [("Frontend", frontend), ("Backend", backend), ("Shared", shared)]:
|
|
65
|
+
if modules:
|
|
66
|
+
lines.append(f"{INDENT * LEVELS['subgraph']}subgraph {group_name}")
|
|
67
|
+
for m in modules:
|
|
68
|
+
name = Path(m['filename']).stem
|
|
69
|
+
pri = m.get('priority', 0)
|
|
70
|
+
lines.append(f'{INDENT * LEVELS["node"]}{name}["{name} ({pri})"]')
|
|
71
|
+
lines.append(f"{INDENT * LEVELS['subgraph']}end")
|
|
72
|
+
lines.append(INDENT)
|
|
73
|
+
|
|
74
|
+
# PRD connections
|
|
75
|
+
if frontend:
|
|
76
|
+
lines.append(f"{INDENT * LEVELS['connection']}PRD --> Frontend")
|
|
77
|
+
if backend:
|
|
78
|
+
lines.append(f"{INDENT * LEVELS['connection']}PRD --> Backend")
|
|
79
|
+
|
|
80
|
+
# Add newline between PRD connections and dependencies
|
|
81
|
+
if frontend or backend:
|
|
82
|
+
lines.append("")
|
|
83
|
+
|
|
84
|
+
# Dependencies
|
|
85
|
+
for m in architecture:
|
|
86
|
+
src = Path(m['filename']).stem
|
|
87
|
+
for dep in m.get('dependencies', []):
|
|
88
|
+
dst = Path(dep).stem
|
|
89
|
+
lines.append(f'{INDENT * LEVELS["connection"]}{src} -->|uses| {dst}')
|
|
90
|
+
|
|
91
|
+
# Add newline after dependencies
|
|
92
|
+
if any(m.get('dependencies', []) for m in architecture):
|
|
93
|
+
lines.append(INDENT)
|
|
94
|
+
|
|
95
|
+
# Styles
|
|
96
|
+
lines.extend([f"{INDENT * LEVELS['style']}classDef frontend fill:#FFF3E0,stroke:#F57C00,stroke-width:2px",
|
|
97
|
+
f"{INDENT * LEVELS['style']}classDef backend fill:#E3F2FD,stroke:#1976D2,stroke-width:2px",
|
|
98
|
+
f"{INDENT * LEVELS['style']}classDef shared fill:#E8F5E9,stroke:#388E3C,stroke-width:2px",
|
|
99
|
+
f"{INDENT * LEVELS['style']}classDef system fill:#E0E0E0,stroke:#616161,stroke-width:3px", INDENT])
|
|
100
|
+
|
|
101
|
+
# Apply classes
|
|
102
|
+
if frontend:
|
|
103
|
+
lines.append(f"{INDENT * LEVELS['style']}class {','.join([Path(m['filename']).stem for m in frontend])} frontend")
|
|
104
|
+
if backend:
|
|
105
|
+
lines.append(f"{INDENT * LEVELS['style']}class {','.join([Path(m['filename']).stem for m in backend])} backend")
|
|
106
|
+
if shared:
|
|
107
|
+
lines.append(f"{INDENT * LEVELS['style']}class {','.join([Path(m['filename']).stem for m in shared])} shared")
|
|
108
|
+
lines.append(f"{INDENT * LEVELS['style']}class PRD system")
|
|
109
|
+
|
|
110
|
+
return "\n".join(lines)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def generate_html(mermaid_code, architecture, app_name):
|
|
114
|
+
"""Generate interactive HTML with hover tooltips."""
|
|
115
|
+
|
|
116
|
+
# Create module data as JSON for tooltips
|
|
117
|
+
module_data = {}
|
|
118
|
+
for m in architecture:
|
|
119
|
+
module_id = Path(m['filename']).stem
|
|
120
|
+
module_data[module_id] = {
|
|
121
|
+
'filename': m['filename'],
|
|
122
|
+
'priority': m.get('priority', 'N/A'),
|
|
123
|
+
'description': m.get('description', 'No description'),
|
|
124
|
+
'dependencies': m.get('dependencies', []),
|
|
125
|
+
'tags': m.get('tags', []),
|
|
126
|
+
'filepath': m.get('filepath', ''),
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
module_json = json.dumps(module_data)
|
|
130
|
+
escaped_app_name = html.escape(app_name)
|
|
131
|
+
|
|
132
|
+
return f"""<!DOCTYPE html>
|
|
133
|
+
<html><head><meta charset="UTF-8"><title>{escaped_app_name}</title>
|
|
134
|
+
<script type=\"module\">
|
|
135
|
+
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
|
|
136
|
+
mermaid.initialize({{startOnLoad:true,theme:'default'}});
|
|
137
|
+
window.addEventListener('load', () => {{
|
|
138
|
+
const moduleData = {module_json};
|
|
139
|
+
|
|
140
|
+
// Add hover listeners to all nodes
|
|
141
|
+
setTimeout(() => {{
|
|
142
|
+
const nodes = document.querySelectorAll('.node');
|
|
143
|
+
nodes.forEach(node => {{
|
|
144
|
+
const text = node.querySelector('.nodeLabel');
|
|
145
|
+
if (!text) return;
|
|
146
|
+
|
|
147
|
+
const nodeText = text.textContent.trim();
|
|
148
|
+
const moduleId = nodeText.split(' ')[0];
|
|
149
|
+
const data = moduleData[moduleId];
|
|
150
|
+
|
|
151
|
+
if (data) {{
|
|
152
|
+
node.style.cursor = 'pointer';
|
|
153
|
+
|
|
154
|
+
node.addEventListener('mouseenter', (e) => {{
|
|
155
|
+
showTooltip(e, data);
|
|
156
|
+
}});
|
|
157
|
+
|
|
158
|
+
node.addEventListener('mouseleave', () => {{
|
|
159
|
+
hideTooltip();
|
|
160
|
+
}});
|
|
161
|
+
}}
|
|
162
|
+
}});
|
|
163
|
+
}}, 500);
|
|
164
|
+
}});
|
|
165
|
+
function showTooltip(e, data) {{
|
|
166
|
+
hideTooltip();
|
|
167
|
+
|
|
168
|
+
const tooltip = document.createElement('div');
|
|
169
|
+
tooltip.id = 'module-tooltip';
|
|
170
|
+
tooltip.innerHTML = `
|
|
171
|
+
<div style="font-weight:600;margin-bottom:8px;font-size:1.1em;">${{data.filename}}</div>
|
|
172
|
+
<div style="margin-bottom:6px;"><strong>Priority:</strong> ${{data.priority}}</div>
|
|
173
|
+
<div style="margin-bottom:6px;"><strong>Path:</strong> ${{data.filepath}}</div>
|
|
174
|
+
<div style="margin-bottom:6px;"><strong>Tags:</strong> ${{data.tags.join(', ')}}</div>
|
|
175
|
+
<div style="margin-bottom:6px;"><strong>Dependencies:</strong> ${{data.dependencies.length > 0 ? data.dependencies.join(', ') : 'None'}}</div>
|
|
176
|
+
<div style="margin-top:8px;padding-top:8px;border-top:1px solid #ddd;font-size:0.9em;color:#444;">${{data.description}}</div>
|
|
177
|
+
`;
|
|
178
|
+
|
|
179
|
+
document.body.appendChild(tooltip);
|
|
180
|
+
|
|
181
|
+
const rect = e.target.closest('.node').getBoundingClientRect();
|
|
182
|
+
tooltip.style.left = rect.right + 10 + 'px';
|
|
183
|
+
tooltip.style.top = rect.top + window.scrollY + 'px';
|
|
184
|
+
}}
|
|
185
|
+
function hideTooltip() {{
|
|
186
|
+
const existing = document.getElementById('module-tooltip');
|
|
187
|
+
if (existing) existing.remove();
|
|
188
|
+
}}
|
|
189
|
+
</script>
|
|
190
|
+
<style>
|
|
191
|
+
*{{margin:0;padding:0;box-sizing:border-box}}
|
|
192
|
+
body{{font-family:system-ui,sans-serif;background:#fff;color:#000;padding:2rem;max-width:1400px;margin:0 auto}}
|
|
193
|
+
h1{{font-size:2rem;font-weight:600;margin-bottom:2rem;padding-bottom:1rem;border-bottom:2px solid #000}}
|
|
194
|
+
.diagram{{border:1px solid #000;padding:2rem;margin:2rem 0;overflow-x:auto;position:relative}}
|
|
195
|
+
.mermaid{{display:flex;justify-content:center}}
|
|
196
|
+
#module-tooltip{{
|
|
197
|
+
position:absolute;
|
|
198
|
+
background:#fff;
|
|
199
|
+
border:2px solid #000;
|
|
200
|
+
padding:1rem;
|
|
201
|
+
max-width:400px;
|
|
202
|
+
z-index:1000;
|
|
203
|
+
box-shadow:4px 4px 0 rgba(0,0,0,0.1);
|
|
204
|
+
font-size:0.9rem;
|
|
205
|
+
line-height:1.5;
|
|
206
|
+
}}
|
|
207
|
+
.node{{transition:opacity 0.2s}}
|
|
208
|
+
.node:hover{{opacity:0.8}}
|
|
209
|
+
</style></head><body>
|
|
210
|
+
<h1>{escaped_app_name}</h1>
|
|
211
|
+
<div class="diagram"><pre class="mermaid">{mermaid_code}</pre></div>
|
|
212
|
+
</body></html>"""
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
if __name__ == "__main__":
|
|
216
|
+
if len(sys.argv) < 2:
|
|
217
|
+
print("Usage: python render_mermaid.py <architecture.json> [app_name] [output.html]")
|
|
218
|
+
sys.exit(1)
|
|
219
|
+
|
|
220
|
+
arch_file = sys.argv[1]
|
|
221
|
+
app_name = sys.argv[2] if len(sys.argv) > 2 else "System Architecture"
|
|
222
|
+
output_file = sys.argv[3] if len(sys.argv) > 3 else f"{Path(arch_file).stem}_diagram.html"
|
|
223
|
+
|
|
224
|
+
with open(arch_file) as f:
|
|
225
|
+
architecture = json.load(f)
|
|
226
|
+
write_pretty_architecture_json(arch_file, architecture)
|
|
227
|
+
|
|
228
|
+
mermaid_code = generate_mermaid_code(architecture, app_name)
|
|
229
|
+
html_content = generate_html(mermaid_code, architecture, app_name)
|
|
230
|
+
|
|
231
|
+
with open(output_file, 'w', encoding='utf-8') as f:
|
|
232
|
+
f.write(html_content)
|
|
233
|
+
|
|
234
|
+
print(f"✅ Generated: {output_file}")
|
|
235
|
+
print(f"📊 Modules: {len(architecture)}")
|
|
236
|
+
print(f"🌐 Open {output_file} in your browser!")
|