pdd-cli 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/auto_update.py +3 -3
- pdd/cli.py +1 -1
- pdd/data/llm_model.csv +1 -1
- pdd/fix_error_loop.py +52 -60
- pdd/pytest_output.py +150 -0
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/METADATA +3 -3
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/RECORD +11 -10
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/LICENSE +0 -0
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.13.dist-info → pdd_cli-0.0.15.dist-info}/top_level.txt +0 -0
pdd/auto_update.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import
|
|
1
|
+
import importlib.metadata
|
|
2
2
|
import requests
|
|
3
3
|
import semver
|
|
4
4
|
import subprocess
|
|
@@ -14,7 +14,7 @@ def auto_update(package_name: str = "pdd-cli", latest_version: str = None) -> No
|
|
|
14
14
|
"""
|
|
15
15
|
try:
|
|
16
16
|
# Get current installed version
|
|
17
|
-
current_version =
|
|
17
|
+
current_version = importlib.metadata.version(package_name)
|
|
18
18
|
|
|
19
19
|
# If latest_version is not provided, fetch from PyPI
|
|
20
20
|
if latest_version is None:
|
|
@@ -63,7 +63,7 @@ def auto_update(package_name: str = "pdd-cli", latest_version: str = None) -> No
|
|
|
63
63
|
else:
|
|
64
64
|
print("Please answer 'y' or 'n'")
|
|
65
65
|
|
|
66
|
-
except
|
|
66
|
+
except importlib.metadata.PackageNotFoundError:
|
|
67
67
|
print(f"Package {package_name} is not installed")
|
|
68
68
|
except Exception as e:
|
|
69
69
|
print(f"Error checking for updates: {str(e)}")
|
pdd/cli.py
CHANGED
|
@@ -46,7 +46,7 @@ console = Console()
|
|
|
46
46
|
@click.option("--review-examples", is_flag=True,
|
|
47
47
|
help="Review and optionally exclude few-shot examples before command execution.")
|
|
48
48
|
@click.option('--local', is_flag=True, help='Run commands locally instead of in the cloud.')
|
|
49
|
-
@click.version_option(version="0.0.
|
|
49
|
+
@click.version_option(version="0.0.15")
|
|
50
50
|
@click.pass_context
|
|
51
51
|
def cli(
|
|
52
52
|
ctx,
|
pdd/data/llm_model.csv
CHANGED
|
@@ -12,6 +12,6 @@ Ollama,"deepseek-r1:70b-llama-distill-q8_0",0.0,0.0,1315,,PWD,,,,,False
|
|
|
12
12
|
Ollama,deepseek-r1:32b-qwen-distill-fp16,0.0,0.0,1316,,PWD,,,,,False
|
|
13
13
|
OpenAI,"o3-mini",1.1,4.4,1319,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
|
|
14
14
|
OpenAI,"o1-2024-12-17",15,60,1331,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
|
|
15
|
-
OpenAI,"gpt-4o",2.5,10,1332,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
|
|
15
|
+
OpenAI,"gpt-4o-2024-11-20",2.5,10,1332,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
|
|
16
16
|
OpenAI,"deepseek-reasoner",0.55,2.19,1336,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
|
|
17
17
|
Fireworks,accounts/fireworks/models/deepseek-r1,3,8,1338,,FIREWORKS_API_KEY,,,8192,,False
|
pdd/fix_error_loop.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
import os
|
|
3
3
|
import sys
|
|
4
|
-
import re
|
|
5
4
|
import subprocess
|
|
6
5
|
import shutil
|
|
7
6
|
from datetime import datetime
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
# Added for the new pytest-based reporting:
|
|
10
|
+
# import pytest
|
|
11
|
+
# import io
|
|
8
12
|
|
|
9
13
|
from rich import print as rprint
|
|
10
14
|
from rich.console import Console
|
|
@@ -18,30 +22,36 @@ def escape_brackets(text: str) -> str:
|
|
|
18
22
|
"""Escape square brackets so Rich doesn't misinterpret them."""
|
|
19
23
|
return text.replace("[", "\\[").replace("]", "\\]")
|
|
20
24
|
|
|
21
|
-
def
|
|
25
|
+
def run_pytest_on_file(test_file: str) -> (int, int, int, str):
|
|
22
26
|
"""
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
Returns a tuple: (fails, errors, warnings)
|
|
27
|
+
Run pytest on the specified test file using subprocess.
|
|
28
|
+
Returns a tuple: (failures, errors, warnings, logs)
|
|
26
29
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
30
|
+
try:
|
|
31
|
+
# Include "--json-only" to ensure only valid JSON is printed.
|
|
32
|
+
cmd = [sys.executable, "-m", "pdd.pytest_output", "--json-only", test_file]
|
|
33
|
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
|
34
|
+
|
|
35
|
+
# Parse the JSON output from stdout
|
|
36
|
+
try:
|
|
37
|
+
output = json.loads(result.stdout)
|
|
38
|
+
test_results = output.get('test_results', [{}])[0]
|
|
39
|
+
|
|
40
|
+
failures = test_results.get('failures', 0)
|
|
41
|
+
errors = test_results.get('errors', 0)
|
|
42
|
+
warnings = test_results.get('warnings', 0)
|
|
43
|
+
|
|
44
|
+
# Combine stdout and stderr from the test results
|
|
45
|
+
logs = test_results.get('standard_output', '') + '\n' + test_results.get('standard_error', '')
|
|
46
|
+
|
|
47
|
+
return failures, errors, warnings, logs
|
|
48
|
+
|
|
49
|
+
except json.JSONDecodeError:
|
|
50
|
+
# If JSON parsing fails, return the raw output
|
|
51
|
+
return 1, 1, 0, f"Failed to parse pytest output:\n{result.stdout}\n{result.stderr}"
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
return 1, 1, 0, f"Error running pytest: {str(e)}"
|
|
45
55
|
|
|
46
56
|
def fix_error_loop(unit_test_file: str,
|
|
47
57
|
code_file: str,
|
|
@@ -55,10 +65,12 @@ def fix_error_loop(unit_test_file: str,
|
|
|
55
65
|
verbose: bool = False):
|
|
56
66
|
"""
|
|
57
67
|
Attempt to fix errors in a unit test and corresponding code using repeated iterations,
|
|
58
|
-
counting only the number of times we actually call the LLM fix function.
|
|
59
|
-
are re-run in the same iteration after a fix to see if we've succeeded,
|
|
60
|
-
'attempts' matches the number of fix attempts (not the total test runs).
|
|
61
|
-
|
|
68
|
+
counting only the number of times we actually call the LLM fix function.
|
|
69
|
+
The tests are re-run in the same iteration after a fix to see if we've succeeded,
|
|
70
|
+
so that 'attempts' matches the number of fix attempts (not the total test runs).
|
|
71
|
+
|
|
72
|
+
This updated version uses pytest's API directly to retrieve failures, errors, and warnings.
|
|
73
|
+
|
|
62
74
|
Inputs:
|
|
63
75
|
unit_test_file: Path to the file containing unit tests.
|
|
64
76
|
code_file: Path to the file containing the code under test.
|
|
@@ -124,11 +136,9 @@ def fix_error_loop(unit_test_file: str,
|
|
|
124
136
|
with open(error_log_file, "a") as elog:
|
|
125
137
|
elog.write(f"\n{iteration_header}\n")
|
|
126
138
|
|
|
127
|
-
# 1) Run the unit tests
|
|
139
|
+
# 1) Run the unit tests using pytest's API directly.
|
|
128
140
|
try:
|
|
129
|
-
|
|
130
|
-
result = subprocess.run(pytest_cmd, capture_output=True, text=True)
|
|
131
|
-
pytest_output = result.stdout + "\n" + result.stderr
|
|
141
|
+
fails, errors, warnings, pytest_output = run_pytest_on_file(unit_test_file)
|
|
132
142
|
except Exception as e:
|
|
133
143
|
rprint(f"[red]Error running pytest:[/red] {e}")
|
|
134
144
|
return False, "", "", fix_attempts, total_cost, model_name
|
|
@@ -139,12 +149,10 @@ def fix_error_loop(unit_test_file: str,
|
|
|
139
149
|
|
|
140
150
|
# Print to console (escaped):
|
|
141
151
|
rprint(f"[magenta]Pytest output:[/magenta]\n{escape_brackets(pytest_output)}")
|
|
142
|
-
|
|
143
|
-
fails, errors, warnings = extract_pytest_summary(pytest_output)
|
|
144
152
|
if verbose:
|
|
145
153
|
rprint(f"[cyan]Iteration summary: {fails} failed, {errors} errors, {warnings} warnings[/cyan]")
|
|
146
154
|
|
|
147
|
-
# If
|
|
155
|
+
# If tests are fully successful, we break out:
|
|
148
156
|
if fails == 0 and errors == 0 and warnings == 0:
|
|
149
157
|
rprint("[green]All tests passed with no warnings! Exiting loop.[/green]")
|
|
150
158
|
break
|
|
@@ -196,7 +204,7 @@ def fix_error_loop(unit_test_file: str,
|
|
|
196
204
|
|
|
197
205
|
# Call fix:
|
|
198
206
|
try:
|
|
199
|
-
#
|
|
207
|
+
# Read error log file into pytest_output so it has history of all previous attempts:
|
|
200
208
|
with open(error_log_file, "r") as f:
|
|
201
209
|
pytest_output = f.read()
|
|
202
210
|
|
|
@@ -222,10 +230,7 @@ def fix_error_loop(unit_test_file: str,
|
|
|
222
230
|
rprint(f"[red]Exceeded the budget of ${budget:.6f}. Ending fixing loop.[/red]")
|
|
223
231
|
break
|
|
224
232
|
|
|
225
|
-
#
|
|
226
|
-
# so skip the old "break if no changes" logic.
|
|
227
|
-
|
|
228
|
-
# If updated_unit_test is True, write to file:
|
|
233
|
+
# Update unit test file if needed.
|
|
229
234
|
if updated_unit_test:
|
|
230
235
|
try:
|
|
231
236
|
with open(unit_test_file, "w") as f:
|
|
@@ -236,7 +241,7 @@ def fix_error_loop(unit_test_file: str,
|
|
|
236
241
|
rprint(f"[red]Error writing updated unit test:[/red] {e}")
|
|
237
242
|
break
|
|
238
243
|
|
|
239
|
-
#
|
|
244
|
+
# Update code file and run verification if needed.
|
|
240
245
|
if updated_code:
|
|
241
246
|
try:
|
|
242
247
|
with open(code_file, "w") as f:
|
|
@@ -274,13 +279,10 @@ def fix_error_loop(unit_test_file: str,
|
|
|
274
279
|
except Exception as e:
|
|
275
280
|
rprint(f"[red]Error restoring backup code file:[/red] {e}")
|
|
276
281
|
break
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
# IMPORTANT: Re-run the tests in the *same* iteration to see if we have fixed the problem:
|
|
280
|
-
# So that if the new code or new test is good, we can break out with exactly one fix_attempt.
|
|
282
|
+
|
|
283
|
+
# Re-run the tests in the same iteration:
|
|
281
284
|
try:
|
|
282
|
-
|
|
283
|
-
second_run_output = second_run_result.stdout + "\n" + second_run_result.stderr
|
|
285
|
+
fails2, errors2, warnings2, second_run_output = run_pytest_on_file(unit_test_file)
|
|
284
286
|
except Exception as e:
|
|
285
287
|
rprint(f"[red]Error running second pytest attempt in iteration {iteration}:[/red] {e}")
|
|
286
288
|
return False, "", "", fix_attempts, total_cost, model_name
|
|
@@ -291,12 +293,10 @@ def fix_error_loop(unit_test_file: str,
|
|
|
291
293
|
|
|
292
294
|
rprint(f"[magenta]Second pytest check:[/magenta]\n{escape_brackets(second_run_output)}")
|
|
293
295
|
|
|
294
|
-
fails2, errors2, warnings2 = extract_pytest_summary(second_run_output)
|
|
295
296
|
if fails2 == 0 and errors2 == 0 and warnings2 == 0:
|
|
296
297
|
rprint("[green]All tests passed on the second run of this iteration! Exiting loop.[/green]")
|
|
297
298
|
break
|
|
298
299
|
else:
|
|
299
|
-
# Update best iteration if needed:
|
|
300
300
|
if (errors2 < best_iteration_info["errors"] or
|
|
301
301
|
(errors2 == best_iteration_info["errors"] and fails2 < best_iteration_info["fails"]) or
|
|
302
302
|
(errors2 == best_iteration_info["errors"] and fails2 == best_iteration_info["fails"] and warnings2 < best_iteration_info["warnings"])):
|
|
@@ -308,16 +308,14 @@ def fix_error_loop(unit_test_file: str,
|
|
|
308
308
|
"unit_test_backup": unit_test_backup,
|
|
309
309
|
"code_backup": code_backup
|
|
310
310
|
}
|
|
311
|
-
# If still not passing, we simply continue to the next iteration in the while loop.
|
|
312
311
|
|
|
313
|
-
#
|
|
312
|
+
# Final test run:
|
|
314
313
|
try:
|
|
315
|
-
|
|
316
|
-
final_result = subprocess.run(final_pytest_cmd, capture_output=True, text=True)
|
|
317
|
-
final_output = final_result.stdout + "\n" + final_result.stderr
|
|
314
|
+
final_fails, final_errors, final_warnings, final_output = run_pytest_on_file(unit_test_file)
|
|
318
315
|
except Exception as e:
|
|
319
316
|
rprint(f"[red]Error running final pytest:[/red] {e}")
|
|
320
317
|
final_output = f"Error: {e}"
|
|
318
|
+
final_fails = final_errors = final_warnings = sys.maxsize
|
|
321
319
|
|
|
322
320
|
with open(error_log_file, "a") as elog:
|
|
323
321
|
elog.write("\n=== Final Pytest Run ===\n")
|
|
@@ -325,13 +323,9 @@ def fix_error_loop(unit_test_file: str,
|
|
|
325
323
|
|
|
326
324
|
rprint(f"[blue]Final pytest output:[/blue]\n{escape_brackets(final_output)}")
|
|
327
325
|
|
|
328
|
-
# Possibly restore best iteration if the final run is not
|
|
329
|
-
# The prompt says: "If the last run isn't the best iteration, restore the best."
|
|
330
|
-
final_fails, final_errors, final_warnings = extract_pytest_summary(final_output)
|
|
326
|
+
# Possibly restore best iteration if the final run is not as good:
|
|
331
327
|
if best_iteration_info["attempt"] is not None:
|
|
332
|
-
# Compare final run to best iteration:
|
|
333
328
|
is_better_final = False
|
|
334
|
-
# If final has strictly fewer errors, or tie then fewer fails, or tie then fewer warnings => keep final
|
|
335
329
|
if final_errors < best_iteration_info["errors"]:
|
|
336
330
|
is_better_final = True
|
|
337
331
|
elif final_errors == best_iteration_info["errors"] and final_fails < best_iteration_info["fails"]:
|
|
@@ -363,8 +357,6 @@ def fix_error_loop(unit_test_file: str,
|
|
|
363
357
|
rprint(f"[red]Error reading final files:[/red] {e}")
|
|
364
358
|
final_unit_test, final_code = "", ""
|
|
365
359
|
|
|
366
|
-
# Check final results for success (no fails, no errors, no warnings)
|
|
367
|
-
final_fails, final_errors, final_warnings = extract_pytest_summary(final_output)
|
|
368
360
|
success = (final_fails == 0 and final_errors == 0 and final_warnings == 0)
|
|
369
361
|
if success:
|
|
370
362
|
rprint("[green]Final tests passed with no warnings.[/green]")
|
pdd/pytest_output.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import json
|
|
3
|
+
import io
|
|
4
|
+
import sys
|
|
5
|
+
import pytest
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.pretty import pprint
|
|
8
|
+
import os
|
|
9
|
+
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
class TestResultCollector:
|
|
13
|
+
__test__ = False # Prevent pytest from collecting this plugin as a test
|
|
14
|
+
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.failures = 0
|
|
17
|
+
self.errors = 0
|
|
18
|
+
self.warnings = 0
|
|
19
|
+
self.passed = 0
|
|
20
|
+
self.logs = io.StringIO()
|
|
21
|
+
self.stdout = ""
|
|
22
|
+
self.stderr = ""
|
|
23
|
+
|
|
24
|
+
def pytest_runtest_logreport(self, report):
|
|
25
|
+
"""
|
|
26
|
+
Treat any failing 'call' phase as a test failure (matching what Pytest calls 'failed'),
|
|
27
|
+
and only count setup/teardown failures (or 'report.outcome == "error"') as errors.
|
|
28
|
+
"""
|
|
29
|
+
# 'report.when' can be "setup", "call", or "teardown"
|
|
30
|
+
if report.when == "call":
|
|
31
|
+
if report.passed:
|
|
32
|
+
self.passed += 1
|
|
33
|
+
elif report.failed:
|
|
34
|
+
# All exceptions that occur in the test body are 'failures'
|
|
35
|
+
self.failures += 1
|
|
36
|
+
elif report.outcome == "error":
|
|
37
|
+
# Not frequently used, but included for completeness
|
|
38
|
+
self.errors += 1
|
|
39
|
+
elif report.when in ("setup", "teardown") and report.failed:
|
|
40
|
+
# Setup/teardown failures are 'errors'
|
|
41
|
+
self.errors += 1
|
|
42
|
+
|
|
43
|
+
def pytest_sessionfinish(self, session):
|
|
44
|
+
"""Capture warnings from pytest session."""
|
|
45
|
+
if hasattr(session.config, 'pluginmanager'):
|
|
46
|
+
terminal_reporter = session.config.pluginmanager.get_plugin("terminalreporter")
|
|
47
|
+
if terminal_reporter:
|
|
48
|
+
self.warnings = len(terminal_reporter.stats.get("warnings", []))
|
|
49
|
+
|
|
50
|
+
def capture_logs(self):
|
|
51
|
+
"""Redirect stdout and stderr to capture logs."""
|
|
52
|
+
sys.stdout = self.logs
|
|
53
|
+
sys.stderr = self.logs
|
|
54
|
+
|
|
55
|
+
def get_logs(self):
|
|
56
|
+
"""Return captured logs and reset stdout/stderr."""
|
|
57
|
+
self.stdout = self.logs.getvalue()
|
|
58
|
+
self.stderr = self.logs.getvalue()
|
|
59
|
+
sys.stdout = sys.__stdout__
|
|
60
|
+
sys.stderr = sys.__stderr__
|
|
61
|
+
return self.stdout, self.stderr
|
|
62
|
+
|
|
63
|
+
def run_pytest_and_capture_output(test_file: str) -> dict:
|
|
64
|
+
"""
|
|
65
|
+
Runs pytest on the given test file and captures the output.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
test_file: The path to the test file.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
A dictionary containing the pytest output.
|
|
72
|
+
"""
|
|
73
|
+
if not os.path.exists(test_file):
|
|
74
|
+
console.print(f"[bold red]Error: Test file '{test_file}' not found.[/]")
|
|
75
|
+
return {}
|
|
76
|
+
|
|
77
|
+
if not test_file.endswith(".py"):
|
|
78
|
+
console.print(
|
|
79
|
+
f"[bold red]Error: Test file '{test_file}' must be a Python file (.py).[/]"
|
|
80
|
+
)
|
|
81
|
+
return {}
|
|
82
|
+
|
|
83
|
+
collector = TestResultCollector()
|
|
84
|
+
try:
|
|
85
|
+
collector.capture_logs()
|
|
86
|
+
result = pytest.main([test_file], plugins=[collector])
|
|
87
|
+
finally:
|
|
88
|
+
stdout, stderr = collector.get_logs()
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"test_file": test_file,
|
|
92
|
+
"test_results": [
|
|
93
|
+
{
|
|
94
|
+
"standard_output": stdout,
|
|
95
|
+
"standard_error": stderr,
|
|
96
|
+
"return_code": int(result),
|
|
97
|
+
"warnings": collector.warnings,
|
|
98
|
+
"errors": collector.errors,
|
|
99
|
+
"failures": collector.failures,
|
|
100
|
+
"passed": collector.passed,
|
|
101
|
+
}
|
|
102
|
+
],
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
def save_output_to_json(output: dict, output_file: str = "pytest.json"):
|
|
106
|
+
"""
|
|
107
|
+
Saves the pytest output to a JSON file.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
output: The dictionary containing the pytest output.
|
|
111
|
+
output_file: The name of the output JSON file. Defaults to "pytest.json".
|
|
112
|
+
"""
|
|
113
|
+
try:
|
|
114
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
115
|
+
json.dump(output, f, indent=4)
|
|
116
|
+
console.print(
|
|
117
|
+
f"[green]Pytest output saved to '{output_file}'.[/green]"
|
|
118
|
+
)
|
|
119
|
+
except Exception as e:
|
|
120
|
+
console.print(
|
|
121
|
+
f"[bold red]Error saving output to JSON: {e}[/]"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def main():
|
|
125
|
+
"""
|
|
126
|
+
Main function for the pytest_output CLI tool.
|
|
127
|
+
"""
|
|
128
|
+
parser = argparse.ArgumentParser(
|
|
129
|
+
description="Capture pytest output and save it to a JSON file."
|
|
130
|
+
)
|
|
131
|
+
parser.add_argument(
|
|
132
|
+
"test_file", type=str, help="Path to the test file."
|
|
133
|
+
)
|
|
134
|
+
parser.add_argument(
|
|
135
|
+
"--json-only", action="store_true", help="Output only JSON to stdout."
|
|
136
|
+
)
|
|
137
|
+
args = parser.parse_args()
|
|
138
|
+
|
|
139
|
+
pytest_output = run_pytest_and_capture_output(args.test_file)
|
|
140
|
+
|
|
141
|
+
if args.json_only:
|
|
142
|
+
# Print only valid JSON to stdout.
|
|
143
|
+
print(json.dumps(pytest_output))
|
|
144
|
+
else:
|
|
145
|
+
console.print(f"Running pytest on: [blue]{args.test_file}[/blue]")
|
|
146
|
+
pprint(pytest_output, console=console) # Pretty print the output
|
|
147
|
+
save_output_to_json(pytest_output)
|
|
148
|
+
|
|
149
|
+
if __name__ == "__main__":
|
|
150
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: pdd-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.15
|
|
4
4
|
Summary: PDD (Prompt-Driven Development) Command Line Interface
|
|
5
5
|
Author: Greg Tanaka
|
|
6
6
|
Author-email: glt@alumni.caltech.edu
|
|
@@ -40,7 +40,7 @@ Requires-Dist: semver==3.0.2
|
|
|
40
40
|
Requires-Dist: setuptools==75.1.0
|
|
41
41
|
Requires-Dist: python-Levenshtein
|
|
42
42
|
|
|
43
|
-
.. image:: https://img.shields.io/badge/pdd--cli-v0.0.
|
|
43
|
+
.. image:: https://img.shields.io/badge/pdd--cli-v0.0.15-blue
|
|
44
44
|
:alt: PDD-CLI Version
|
|
45
45
|
|
|
46
46
|
PDD (Prompt-Driven Development) Command Line Interface
|
|
@@ -101,7 +101,7 @@ After installation, verify:
|
|
|
101
101
|
|
|
102
102
|
pdd --version
|
|
103
103
|
|
|
104
|
-
You'll see the current PDD version (e.g., 0.0.
|
|
104
|
+
You'll see the current PDD version (e.g., 0.0.15).
|
|
105
105
|
|
|
106
106
|
Advanced Installation Tips
|
|
107
107
|
--------------------------
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
pdd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
pdd/auto_deps_main.py,sha256=2lZ-8WqzrPVMnzuVC-O7y1gazRLbY66fbmOdKnkYKNg,3630
|
|
3
3
|
pdd/auto_include.py,sha256=aCa2QXDlOdKbh4vS3uDjWptkHB_Qv3QBNCbZe6mGWoo,6074
|
|
4
|
-
pdd/auto_update.py,sha256=
|
|
4
|
+
pdd/auto_update.py,sha256=Pfav1hrqQIDjZIPuIvryBeM7k-Rc72feVUTJZPtigaU,2889
|
|
5
5
|
pdd/bug_main.py,sha256=myKU9--QWdkV4Wf3mD2PoLPJFNgRjwf4z8s7TC28G_s,3720
|
|
6
6
|
pdd/bug_to_unit_test.py,sha256=dsJNm6qAwx-m7RvFF5RquFJRzxzZGCWT4IKYnzVCUws,5569
|
|
7
7
|
pdd/change.py,sha256=iqjWS5DrQ73yMkuUQlwIRIFlofmKdaK6t6-v3zHKL-4,4985
|
|
8
8
|
pdd/change_main.py,sha256=yL_i1Ws5vt4vAkWiC826csNi2cHP6wKbwe_PfMqbbPY,11407
|
|
9
|
-
pdd/cli.py,sha256=
|
|
9
|
+
pdd/cli.py,sha256=bIFpSQUzv9lHgkxD6PXnk1RZbq7mMATsJ0VjezENE40,16593
|
|
10
10
|
pdd/cmd_test_main.py,sha256=aSCxRnSurg15AvPcJDAPp9xy8p_qqnjU1oV14Hi2R54,5301
|
|
11
11
|
pdd/code_generator.py,sha256=n5akrX7VPe71X4RsD6kKqAVvzBLMlciJI4RtJA1PcgA,4375
|
|
12
12
|
pdd/code_generator_main.py,sha256=G2eRBPXc1cGszkk0PbIPmJZHPaf_dw5d2yZbsvQZA3c,4793
|
|
@@ -23,7 +23,7 @@ pdd/detect_change_main.py,sha256=1Z4ymhjJaVr2aliGyqkqeqSmQ7QMgcl23p0wdsmBas0,365
|
|
|
23
23
|
pdd/find_section.py,sha256=lz_FPY4KDCRAGlL1pWVZiutUNv7E4KsDFK-ymDWA_Ec,962
|
|
24
24
|
pdd/fix_code_loop.py,sha256=L0yxq2yAziPIyFGb8lIP2mvufu8a_gtc5nnN2LuMuKs,8596
|
|
25
25
|
pdd/fix_code_module_errors.py,sha256=M6AnlR2jF5LI-nNg6gIO5LvSkxiaLIUGyTvfnUfe1cU,4625
|
|
26
|
-
pdd/fix_error_loop.py,sha256=
|
|
26
|
+
pdd/fix_error_loop.py,sha256=sIIaQcWi-0gUSEFmnwQWDwP70lGEx8uR7QQODa4JD0M,16902
|
|
27
27
|
pdd/fix_errors_from_unit_tests.py,sha256=8qCEyHZ6lUSBtV9vhQyhgAxDuhngmOy7vVy2HObckd0,8934
|
|
28
28
|
pdd/fix_main.py,sha256=02OIViH12BcsykpDp4Osxw2ndEeThnNakMFkzdpYr48,5333
|
|
29
29
|
pdd/generate_output_paths.py,sha256=zz42GTx9eGyWIYSl3jcWvtJRGnieC3eoPM6DIVcWz2k,7219
|
|
@@ -46,6 +46,7 @@ pdd/postprocess_0.py,sha256=OW17GyCFLYErCyWh2tL4syuho3q2yFf2wyekQ4BLdPM,2168
|
|
|
46
46
|
pdd/preprocess.py,sha256=7_mkREBFlWjIUIyZsYBlnCvIGtpVgPeToHUpaq_ZHC0,8177
|
|
47
47
|
pdd/preprocess_main.py,sha256=dAgFGmjuJB1taZl31c1sY2jMGtQgjnWLbpeB7EFtojY,2977
|
|
48
48
|
pdd/process_csv_change.py,sha256=10XTzVFQ0rE4lPSF93yhIW7VJmxmfe-hk1B7ui_qxJI,8415
|
|
49
|
+
pdd/pytest_output.py,sha256=kmKiMHaQItrDVi_hTCtM5pfCgBuyZVEVRbxdchpS5CY,4796
|
|
49
50
|
pdd/split.py,sha256=aISO7DcD8UkE_r7w1Ii466RgxSlVDFfTCymJ7IWUhsw,4692
|
|
50
51
|
pdd/split_main.py,sha256=dV9G2YJDp12ik6x1a_dgBtyu27BSt4Fyd2trgxL7qFI,4123
|
|
51
52
|
pdd/summarize_directory.py,sha256=3KUOP30RgkBXpz0_btmpubnO1vWAQ3tKyVI84Zp-E9Q,9041
|
|
@@ -57,7 +58,7 @@ pdd/update_main.py,sha256=5a4nsOOaAXULdk0BS9pj4blZ_QHBFeET37uaAqoJI2g,3912
|
|
|
57
58
|
pdd/update_prompt.py,sha256=OdPRIAMu7OBx7E4SOU95hWgdtBY4oO8XOe1dvPChMlU,4351
|
|
58
59
|
pdd/xml_tagger.py,sha256=NcyWacoXarRi6_16pchMhh1M7V-Gfz1cQImO_If2ia4,4241
|
|
59
60
|
pdd/data/language_format.csv,sha256=xUTmFHXSBVBRfPV-NKG3oWo5_ped5ukP-ekFcIlVzJk,877
|
|
60
|
-
pdd/data/llm_model.csv,sha256=
|
|
61
|
+
pdd/data/llm_model.csv,sha256=B240BLwwcweHFrkYT4FA-yBcz7YgVnx4dQopSedQG9I,1702
|
|
61
62
|
pdd/prompts/auto_include_LLM.prompt,sha256=0t-Jmm5o6vVTmqsISTUiewqPT8bB389UZnJoHZvgtu4,13967
|
|
62
63
|
pdd/prompts/bug_to_unit_test_LLM.prompt,sha256=--ysObDv9WzOEyJMuaKEdDHkRrR_1j0dmOtlAFr4YRg,1205
|
|
63
64
|
pdd/prompts/change_LLM.prompt,sha256=W3sE6XZ2fb35XdqOykK1hDPtqkHSv9MZGD3sT8B8WjY,2083
|
|
@@ -89,9 +90,9 @@ pdd/prompts/trim_results_start_LLM.prompt,sha256=WwFlOHha4wzMLtRHDMI6GtcNdl2toE8
|
|
|
89
90
|
pdd/prompts/unfinished_prompt_LLM.prompt,sha256=-JgBpiPTQZdWOAwOG1XpfpD9waynFTAT3Jo84eQ4bTw,1543
|
|
90
91
|
pdd/prompts/update_prompt_LLM.prompt,sha256=_lGaxeVP4oF8yGqiN6yj6UE0j79lxfGdjsYr5w5KSYk,1261
|
|
91
92
|
pdd/prompts/xml_convertor_LLM.prompt,sha256=YGRGXJeg6EhM9690f-SKqQrKqSJjLFD51UrPOlO0Frg,2786
|
|
92
|
-
pdd_cli-0.0.
|
|
93
|
-
pdd_cli-0.0.
|
|
94
|
-
pdd_cli-0.0.
|
|
95
|
-
pdd_cli-0.0.
|
|
96
|
-
pdd_cli-0.0.
|
|
97
|
-
pdd_cli-0.0.
|
|
93
|
+
pdd_cli-0.0.15.dist-info/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
|
|
94
|
+
pdd_cli-0.0.15.dist-info/METADATA,sha256=y3Ro0BThGEaW7iLsbzQQFC64cC90QFinQczvZW6L-tY,6808
|
|
95
|
+
pdd_cli-0.0.15.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
96
|
+
pdd_cli-0.0.15.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
|
|
97
|
+
pdd_cli-0.0.15.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
|
|
98
|
+
pdd_cli-0.0.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|