pdd-cli 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/cli.py +1 -1
- pdd/context_generator.py +12 -6
- pdd/data/llm_model.csv +17 -15
- pdd/fix_error_loop.py +335 -188
- pdd/llm_invoke.py +17 -5
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +3 -3
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/METADATA +29 -23
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/RECORD +12 -12
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/LICENSE +0 -0
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.7.dist-info → pdd_cli-0.0.9.dist-info}/top_level.txt +0 -0
pdd/cli.py
CHANGED
|
@@ -46,7 +46,7 @@ console = Console()
|
|
|
46
46
|
@click.option("--review-examples", is_flag=True,
|
|
47
47
|
help="Review and optionally exclude few-shot examples before command execution.")
|
|
48
48
|
@click.option('--local', is_flag=True, help='Run commands locally instead of in the cloud.')
|
|
49
|
-
@click.version_option(version="0.0.
|
|
49
|
+
@click.version_option(version="0.0.9")
|
|
50
50
|
@click.pass_context
|
|
51
51
|
def cli(
|
|
52
52
|
ctx,
|
pdd/context_generator.py
CHANGED
|
@@ -79,12 +79,18 @@ def context_generator(code_module: str, prompt: str, language: str = "python", s
|
|
|
79
79
|
|
|
80
80
|
# Step 3: Detect if the generation is incomplete using the unfinished_prompt function
|
|
81
81
|
last_600_chars = llm_response['result'][-600:]
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
82
|
+
try:
|
|
83
|
+
reasoning, is_finished, unfinished_cost, unfinished_model = unfinished_prompt(
|
|
84
|
+
prompt_text=last_600_chars,
|
|
85
|
+
strength=0.5,
|
|
86
|
+
temperature=temperature,
|
|
87
|
+
verbose=verbose
|
|
88
|
+
)
|
|
89
|
+
except Exception as e:
|
|
90
|
+
print(f"[red]Error in unfinished_prompt: {e}[/red]")
|
|
91
|
+
is_finished = True # Treat as finished if unfinished_prompt fails
|
|
92
|
+
unfinished_cost = 0.0
|
|
93
|
+
unfinished_model = None
|
|
88
94
|
|
|
89
95
|
if not is_finished:
|
|
90
96
|
if verbose:
|
pdd/data/llm_model.csv
CHANGED
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
provider,model,input,output,coding_arena_elo,base_url,api_key,counter,encoder,max_tokens,max_completion_tokens,structured_output
|
|
2
|
-
OpenAI,"o3-mini",1.1,4.4,1301,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
|
|
3
|
-
OpenAI,"deepseek-coder",0.14,0.28,1256,"https://api.deepseek.com/beta",DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
|
|
4
|
-
OpenAI,"deepseek-reasoner",0.55,2.19,1309,"https://api.deepseek.com/beta",DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
|
|
5
|
-
Ollama,"qwen2.5-coder:32b-instruct-fp16",0.0,0.0,1227,,PWD,,,,,False
|
|
6
|
-
Ollama,"athene-v2:72b-q8_0",0.0,0.0,1253,,PWD,,,,,False
|
|
7
|
-
Anthropic,"claude-3-5-sonnet-20241022",3,15,1308,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
|
|
8
|
-
Google,"gemini-2.0-flash-exp",0.15,0.60,1281,,GOOGLE_API_KEY,,,8192,,False
|
|
9
|
-
Fireworks,"accounts/fireworks/models/llama-v3p3-70b-instruct",3,3,1280,,FIREWORKS_API_KEY,,,16384,,False
|
|
10
|
-
Fireworks,"accounts/fireworks/models/qwen2p5-coder-32b-instruct",.9,.9,1226,,FIREWORKS_API_KEY,,,2048,,False
|
|
11
2
|
OpenAI,"gpt-4o-mini",0.15,0.60,1246,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
|
|
12
|
-
OpenAI,"
|
|
13
|
-
|
|
14
|
-
OpenAI,"
|
|
15
|
-
|
|
16
|
-
GoogleVertexAI,gemini-2.0-pro-exp-02-05,
|
|
17
|
-
|
|
3
|
+
OpenAI,"grok-2-1212",2,10,1255,"https://api.x.ai/v1",XAI_API_KEY,tiktoken,o200k_base,4096,,False
|
|
4
|
+
Anthropic,"claude-3-5-haiku-20241022",1,5,1259,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
|
|
5
|
+
OpenAI,"deepseek-coder",0.14,0.28,1279,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
|
|
6
|
+
Google,"gemini-2.0-flash-thinking-exp-01-21",.1,.4,1291,,GOOGLE_API_KEY,,,8192,,False
|
|
7
|
+
GoogleVertexAI,"gemini-2.0-pro-exp-02-05",1.25,5,1299,,VERTEX_AI_API_KEY,,,8192,,False
|
|
8
|
+
Anthropic,claude-3-5-sonnet-20241022,3,15,1312,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
|
|
9
|
+
Google,gemini-exp-1206,1.25,5,1313,,GOOGLE_API_KEY,,,8192,,False
|
|
10
|
+
OpenAI,"deepseek-r1-distill-llama-70b-specdec",5,5,1314,https://api.groq.com/openai/v1,GROQ_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,16384,,False
|
|
11
|
+
Ollama,"deepseek-r1:70b-llama-distill-q8_0",0.0,0.0,1315,,PWD,,,,,False
|
|
12
|
+
Ollama,deepseek-r1:32b-qwen-distill-fp16,0.0,0.0,1316,,PWD,,,,,False
|
|
13
|
+
Fireworks,"accounts/fireworks/models/deepseek-r1-distill-qwen-32b",.5,.5,1317,,FIREWORKS_API_KEY,,,8192,,True
|
|
14
|
+
Fireworks,accounts/fireworks/models/deepseek-r1-distill-llama-70b,1.2,1.2,1318,,FIREWORKS_API_KEY,,,16384,,False
|
|
15
|
+
OpenAI,"o3-mini",1.1,4.4,1319,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
|
|
16
|
+
OpenAI,"o1-2024-12-17",15,60,1331,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
|
|
17
|
+
OpenAI,"chatgpt-4o-latest",2.5,10,1332,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
|
|
18
|
+
OpenAI,"deepseek-reasoner",0.55,2.19,1336,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
|
|
19
|
+
Fireworks,accounts/fireworks/models/deepseek-r1,3,8,1338,,FIREWORKS_API_KEY,,,8192,,False
|
pdd/fix_error_loop.py
CHANGED
|
@@ -1,216 +1,363 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
1
2
|
import os
|
|
3
|
+
import sys
|
|
2
4
|
import re
|
|
3
|
-
import shutil
|
|
4
5
|
import subprocess
|
|
5
|
-
|
|
6
|
-
from
|
|
6
|
+
import shutil
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
|
|
7
9
|
from rich import print as rprint
|
|
10
|
+
from rich.console import Console
|
|
8
11
|
|
|
12
|
+
# Relative import from an internal module.
|
|
9
13
|
from .fix_errors_from_unit_tests import fix_errors_from_unit_tests
|
|
10
14
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def is_better_than(self, other: Optional['IterationResult']) -> bool:
|
|
19
|
-
if other is None:
|
|
20
|
-
return True
|
|
21
|
-
if self.total_fails_and_errors < other.total_fails_and_errors:
|
|
22
|
-
return True
|
|
23
|
-
if self.total_fails_and_errors == other.total_fails_and_errors:
|
|
24
|
-
return self.errors < other.errors # Prioritize fewer errors
|
|
25
|
-
return False
|
|
26
|
-
|
|
27
|
-
def extract_test_results(pytest_output: str) -> Tuple[int, int]:
|
|
28
|
-
"""Extract the number of fails and errors from pytest output.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
pytest_output (str): The complete pytest output text
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
Tuple[int, int]: Number of fails and errors respectively
|
|
15
|
+
console = Console()
|
|
16
|
+
|
|
17
|
+
def escape_brackets(text: str) -> str:
|
|
18
|
+
"""Escape square brackets so Rich doesn't misinterpret them."""
|
|
19
|
+
return text.replace("[", "\\[").replace("]", "\\]")
|
|
20
|
+
|
|
21
|
+
def extract_pytest_summary(log_contents: str) -> (int, int, int):
|
|
35
22
|
"""
|
|
36
|
-
fails
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
if
|
|
41
|
-
|
|
23
|
+
Extract the number of fails, errors and warnings from pytest output.
|
|
24
|
+
Try to match a typical summary line first; if not found fall back to individual regex searches.
|
|
25
|
+
Returns a tuple: (fails, errors, warnings)
|
|
26
|
+
"""
|
|
27
|
+
fails, errors, warnings = sys.maxsize, sys.maxsize, sys.maxsize # defaults if not found
|
|
28
|
+
summary_pattern = re.compile(
|
|
29
|
+
r"=+\s*(\d+)\s+failed.*?,.*?(\d+)\s+passed.*?,.*?(\d+)\s+warnings", re.IGNORECASE | re.DOTALL
|
|
30
|
+
)
|
|
31
|
+
match = summary_pattern.search(log_contents)
|
|
32
|
+
if match:
|
|
33
|
+
fails = int(match.group(1))
|
|
34
|
+
# In some pytest outputs, failures and errors may be reported separately.
|
|
35
|
+
errors = int(match.group(1)) # assume same value if no distinct errors are provided
|
|
36
|
+
warnings = int(match.group(3))
|
|
42
37
|
else:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
error_match = re.search(r'(\d+)\s+error', pytest_output)
|
|
50
|
-
if error_match:
|
|
51
|
-
errors = int(error_match.group(1))
|
|
52
|
-
|
|
53
|
-
return fails, errors
|
|
38
|
+
failed_match = re.search(r"(\d+)\s+failed", log_contents, re.IGNORECASE)
|
|
39
|
+
errors_match = re.search(r"(\d+)\s+error", log_contents, re.IGNORECASE)
|
|
40
|
+
warnings_match = re.search(r"(\d+)\s+warning", log_contents, re.IGNORECASE)
|
|
41
|
+
fails = int(failed_match.group(1)) if failed_match else 0
|
|
42
|
+
errors = int(errors_match.group(1)) if errors_match else 0
|
|
43
|
+
warnings = int(warnings_match.group(1)) if warnings_match else 0
|
|
54
44
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
unit_test_file: str,
|
|
68
|
-
code_file: str,
|
|
69
|
-
prompt: str,
|
|
70
|
-
verification_program: str,
|
|
71
|
-
strength: float,
|
|
72
|
-
temperature: float,
|
|
73
|
-
max_attempts: int,
|
|
74
|
-
budget: float,
|
|
75
|
-
error_log_file: str = "error_log.txt",
|
|
76
|
-
verbose: bool = False
|
|
77
|
-
) -> Tuple[bool, str, str, int, float, str]:
|
|
78
|
-
"""
|
|
79
|
-
Attempt to fix errors in a unit test and its corresponding code file through multiple iterations.
|
|
45
|
+
return fails, errors, warnings
|
|
46
|
+
|
|
47
|
+
def fix_error_loop(unit_test_file: str,
|
|
48
|
+
code_file: str,
|
|
49
|
+
prompt: str,
|
|
50
|
+
verification_program: str,
|
|
51
|
+
strength: float,
|
|
52
|
+
temperature: float,
|
|
53
|
+
max_attempts: int,
|
|
54
|
+
budget: float,
|
|
55
|
+
error_log_file: str = "error_log.txt",
|
|
56
|
+
verbose: bool = False):
|
|
80
57
|
"""
|
|
81
|
-
|
|
82
|
-
if not all([os.path.exists(f) for f in [unit_test_file, code_file, verification_program]]):
|
|
83
|
-
raise FileNotFoundError("One or more input files do not exist")
|
|
84
|
-
if not (0 <= strength <= 1 and 0 <= temperature <= 1):
|
|
85
|
-
raise ValueError("Strength and temperature must be between 0 and 1")
|
|
58
|
+
Attempt to fix errors in a unit test and corresponding code using repeated iterations.
|
|
86
59
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
60
|
+
Inputs:
|
|
61
|
+
unit_test_file: Path to the file containing unit tests.
|
|
62
|
+
code_file: Path to the file containing the code under test.
|
|
63
|
+
prompt: Prompt that generated the code under test.
|
|
64
|
+
verification_program: Path to a Python program that verifies the code still works.
|
|
65
|
+
strength: float [0,1] representing LLM fix strength.
|
|
66
|
+
temperature: float [0,1] representing LLM temperature.
|
|
67
|
+
max_attempts: Maximum number of iterations for fixes.
|
|
68
|
+
budget: Maximum cost allowed for the fixing process.
|
|
69
|
+
error_log_file: Path to file to log errors (default: "error_log.txt").
|
|
70
|
+
verbose: Enable verbose logging (default: False).
|
|
71
|
+
|
|
72
|
+
Outputs:
|
|
73
|
+
success: Boolean indicating if the overall process succeeded.
|
|
74
|
+
final_unit_test: String contents of the final unit test file.
|
|
75
|
+
final_code: String contents of the final code file.
|
|
76
|
+
total_attempts: Number of fix attempts made.
|
|
77
|
+
total_cost: Total cost accumulated.
|
|
78
|
+
model_name: Name of the LLM model used.
|
|
79
|
+
"""
|
|
80
|
+
# Check if unit_test_file and code_file exist.
|
|
81
|
+
if not os.path.isfile(unit_test_file):
|
|
82
|
+
rprint(f"[red]Error:[/red] Unit test file '{unit_test_file}' does not exist.")
|
|
83
|
+
return False, "", "", 0, 0.0, ""
|
|
84
|
+
if not os.path.isfile(code_file):
|
|
85
|
+
rprint(f"[red]Error:[/red] Code file '{code_file}' does not exist.")
|
|
86
|
+
return False, "", "", 0, 0.0, ""
|
|
87
|
+
if verbose:
|
|
88
|
+
rprint("[cyan]Starting fix error loop process.[/cyan]")
|
|
89
|
+
|
|
90
|
+
# Remove existing error log file if it exists.
|
|
91
|
+
if os.path.exists(error_log_file):
|
|
92
|
+
try:
|
|
90
93
|
os.remove(error_log_file)
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
94
|
+
if verbose:
|
|
95
|
+
rprint(f"[green]Removed old error log file:[/green] {error_log_file}")
|
|
96
|
+
except Exception as e:
|
|
97
|
+
rprint(f"[red]Error:[/red] Could not remove error log file: {e}")
|
|
98
|
+
return False, "", "", 0, 0.0, ""
|
|
99
|
+
|
|
100
|
+
attempt = 0
|
|
96
101
|
total_cost = 0.0
|
|
97
|
-
best_iteration: Optional[IterationResult] = None
|
|
98
102
|
model_name = ""
|
|
103
|
+
best_iteration_info = {
|
|
104
|
+
"attempt": None,
|
|
105
|
+
"fails": sys.maxsize,
|
|
106
|
+
"errors": sys.maxsize,
|
|
107
|
+
"warnings": sys.maxsize,
|
|
108
|
+
"unit_test_backup": None,
|
|
109
|
+
"code_backup": None
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# Timestamp for backup naming.
|
|
113
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
99
114
|
|
|
100
|
-
while
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
with open(error_log_file, 'a') as f:
|
|
108
|
-
result = subprocess.run(['python', '-m', 'pytest', '-vv', '--no-cov', unit_test_file],
|
|
109
|
-
capture_output=True, text=True)
|
|
110
|
-
f.write("\n****************************************************************************************************\n")
|
|
111
|
-
f.write("\nAttempt " + str(attempt_count) + ":\n")
|
|
112
|
-
f.write("\n****************************************************************************************************\n")
|
|
113
|
-
f.write(result.stdout + result.stderr)
|
|
115
|
+
while attempt < max_attempts and total_cost < budget:
|
|
116
|
+
attempt += 1
|
|
117
|
+
iteration_header = f"=== Attempt {attempt} ==="
|
|
118
|
+
rprint(f"[bold blue]{iteration_header}[/bold blue]")
|
|
119
|
+
# Append header to error log.
|
|
120
|
+
with open(error_log_file, "a") as elog:
|
|
121
|
+
elog.write(f"\n{iteration_header}\n")
|
|
114
122
|
|
|
115
|
-
#
|
|
116
|
-
|
|
117
|
-
|
|
123
|
+
# Step 2a: Run pytest on the unit test file.
|
|
124
|
+
try:
|
|
125
|
+
# Run pytest via subprocess.
|
|
126
|
+
# Here we assume that the unit_test_file is discoverable or we pass it explicitly.
|
|
127
|
+
pytest_cmd = [sys.executable, "-m", "pytest", "-vv", "--no-cov", unit_test_file]
|
|
128
|
+
result = subprocess.run(pytest_cmd, capture_output=True, text=True)
|
|
129
|
+
pytest_output = result.stdout + "\n" + result.stderr
|
|
130
|
+
except Exception as e:
|
|
131
|
+
rprint(f"[red]Error running pytest:[/red] {e}")
|
|
132
|
+
return False, "", "", attempt, total_cost, model_name
|
|
133
|
+
|
|
134
|
+
# Append the pytest output to the error log file.
|
|
135
|
+
with open(error_log_file, "a") as elog:
|
|
136
|
+
elog.write(pytest_output + "\n")
|
|
118
137
|
|
|
119
|
-
#
|
|
120
|
-
|
|
138
|
+
# Escape square brackets for safe rprint.
|
|
139
|
+
output_escaped = escape_brackets(pytest_output)
|
|
140
|
+
rprint(f"[magenta]Pytest output:[/magenta]\n{output_escaped}")
|
|
141
|
+
|
|
142
|
+
# Step 2b: Extract numbers of fails, errors, warnings.
|
|
143
|
+
fails, errors, warnings = extract_pytest_summary(pytest_output)
|
|
144
|
+
if verbose:
|
|
145
|
+
rprint(f"[cyan]Iteration summary: {fails} failed, {errors} errors, {warnings} warnings[/cyan]")
|
|
146
|
+
|
|
147
|
+
# Check if tests passed and there are no warnings.
|
|
148
|
+
if fails == 0 and errors == 0 and warnings == 0:
|
|
149
|
+
rprint("[green]All tests passed with no warnings! Exiting loop.[/green]")
|
|
121
150
|
break
|
|
122
|
-
|
|
123
|
-
# Step
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
unit_test_file,
|
|
132
|
-
|
|
151
|
+
|
|
152
|
+
# Step 2c: Create backup copies for unit_test_file and code_file.
|
|
153
|
+
unit_test_dir, unit_test_name = os.path.split(unit_test_file)
|
|
154
|
+
code_dir, code_name = os.path.split(code_file)
|
|
155
|
+
unit_test_backup = os.path.join(unit_test_dir,
|
|
156
|
+
f"{os.path.splitext(unit_test_name)[0]}_{attempt}_{errors}_{fails}_{warnings}_{timestamp}.py")
|
|
157
|
+
code_backup = os.path.join(code_dir,
|
|
158
|
+
f"{os.path.splitext(code_name)[0]}_{attempt}_{errors}_{fails}_{warnings}_{timestamp}.py")
|
|
159
|
+
try:
|
|
160
|
+
shutil.copy(unit_test_file, unit_test_backup)
|
|
161
|
+
shutil.copy(code_file, code_backup)
|
|
162
|
+
if verbose:
|
|
163
|
+
rprint(f"[green]Created backup for unit test:[/green] {unit_test_backup}")
|
|
164
|
+
rprint(f"[green]Created backup for code file:[/green] {code_backup}")
|
|
165
|
+
except Exception as e:
|
|
166
|
+
rprint(f"[red]Error creating backup files:[/red] {e}")
|
|
167
|
+
return False, "", "", attempt, total_cost, model_name
|
|
168
|
+
|
|
169
|
+
# Update best_iteration tracker if this iteration has fewer errors, fails, warnings.
|
|
170
|
+
if (errors < best_iteration_info["errors"] or
|
|
171
|
+
(errors == best_iteration_info["errors"] and fails < best_iteration_info["fails"]) or
|
|
172
|
+
(errors == best_iteration_info["errors"] and fails == best_iteration_info["fails"] and warnings < best_iteration_info["warnings"])):
|
|
173
|
+
best_iteration_info = {
|
|
174
|
+
"attempt": attempt,
|
|
175
|
+
"fails": fails,
|
|
176
|
+
"errors": errors,
|
|
177
|
+
"warnings": warnings,
|
|
178
|
+
"unit_test_backup": unit_test_backup,
|
|
179
|
+
"code_backup": code_backup
|
|
180
|
+
}
|
|
181
|
+
if verbose:
|
|
182
|
+
rprint(f"[cyan]Updated best iteration to attempt {attempt} (errors: {errors}, fails: {fails}, warnings: {warnings}).[/cyan]")
|
|
133
183
|
|
|
134
|
-
# Read
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
184
|
+
# Step 2d: Read file contents.
|
|
185
|
+
try:
|
|
186
|
+
with open(unit_test_file, "r") as f:
|
|
187
|
+
unit_test_contents = f.read()
|
|
188
|
+
with open(code_file, "r") as f:
|
|
189
|
+
code_contents = f.read()
|
|
190
|
+
except Exception as e:
|
|
191
|
+
rprint(f"[red]Error reading input files:[/red] {e}")
|
|
192
|
+
return False, "", "", attempt, total_cost, model_name
|
|
193
|
+
|
|
194
|
+
# Call the internal fix_errors_from_unit_tests function.
|
|
195
|
+
try:
|
|
196
|
+
(updated_unit_test,
|
|
197
|
+
updated_code,
|
|
198
|
+
fixed_unit_test,
|
|
199
|
+
fixed_code,
|
|
200
|
+
cost,
|
|
201
|
+
model_name) = fix_errors_from_unit_tests(
|
|
202
|
+
unit_test_contents,
|
|
203
|
+
code_contents,
|
|
204
|
+
prompt,
|
|
205
|
+
pytest_output,
|
|
206
|
+
error_log_file,
|
|
207
|
+
strength,
|
|
208
|
+
temperature,
|
|
145
209
|
verbose=verbose
|
|
146
210
|
)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
total_cost += iteration_cost
|
|
150
|
-
if total_cost > budget:
|
|
151
|
-
rprint("[bold red]Budget exceeded![/bold red]")
|
|
152
|
-
break
|
|
153
|
-
|
|
154
|
-
if not (update_unit_test or update_code):
|
|
155
|
-
rprint("[bold yellow]No changes needed or possible.[/bold yellow]")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
rprint(f"[red]Error during fix_errors_from_unit_tests call:[/red] {e}")
|
|
156
213
|
break
|
|
157
|
-
|
|
158
|
-
#
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
if update_code:
|
|
164
|
-
with open(code_file, 'w') as f:
|
|
165
|
-
f.write(fixed_code)
|
|
166
|
-
|
|
167
|
-
# Run verification
|
|
168
|
-
rprint("[bold yellow]Running Verification.[/bold yellow]")
|
|
169
|
-
verification_result = subprocess.run(['python', verification_program],
|
|
170
|
-
capture_output=True, text=True)
|
|
171
|
-
|
|
172
|
-
if verification_result.returncode != 0:
|
|
173
|
-
rprint("[bold red]Verification failed! Restoring previous code.[/bold red]")
|
|
174
|
-
shutil.copy2(backup_code, code_file)
|
|
175
|
-
with open(error_log_file, 'a') as f:
|
|
176
|
-
f.write("****************************************************************************************************\n")
|
|
177
|
-
f.write("\nVerification program failed! Here is the output and errors from the verification program that was running the code under test:\n" + verification_result.stdout + verification_result.stderr)
|
|
178
|
-
f.write("****************************************************************************************************\n")
|
|
179
|
-
f.write(f"\nRestoring previous working code.\n")
|
|
180
|
-
continue
|
|
181
|
-
|
|
182
|
-
# Update best iteration if current is better
|
|
183
|
-
if current_iteration.is_better_than(best_iteration):
|
|
184
|
-
best_iteration = current_iteration
|
|
185
|
-
|
|
186
|
-
# Check budget after increment
|
|
214
|
+
|
|
215
|
+
# Add cost.
|
|
216
|
+
total_cost += cost
|
|
217
|
+
if verbose:
|
|
218
|
+
rprint(f"[cyan]Iteration fix cost: ${cost:.6f}, Total cost: ${total_cost:.6f}[/cyan]")
|
|
187
219
|
if total_cost > budget:
|
|
188
|
-
rprint("[
|
|
220
|
+
rprint(f"[red]Exceeded the budget of ${budget:.6f}. Ending fixing loop.[/red]")
|
|
221
|
+
break
|
|
222
|
+
|
|
223
|
+
# If neither unit test nor code was updated, likely no changes were needed.
|
|
224
|
+
if not updated_unit_test and not updated_code:
|
|
225
|
+
rprint("[yellow]No changes were suggested by the LLM. Exiting loop.[/yellow]")
|
|
189
226
|
break
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
227
|
+
|
|
228
|
+
# Step 2e: If updated_unit_test is True, write the updates back.
|
|
229
|
+
if updated_unit_test:
|
|
230
|
+
try:
|
|
231
|
+
with open(unit_test_file, "w") as f:
|
|
232
|
+
f.write(fixed_unit_test)
|
|
233
|
+
if verbose:
|
|
234
|
+
rprint(f"[green]Unit test file updated.[/green]")
|
|
235
|
+
except Exception as e:
|
|
236
|
+
rprint(f"[red]Error writing updated unit test file:[/red] {e}")
|
|
237
|
+
break
|
|
238
|
+
|
|
239
|
+
# Increment attempt counter is already performed at loop start.
|
|
240
|
+
# Step 2f: If updated_code is True, update code file and verify.
|
|
241
|
+
if updated_code:
|
|
242
|
+
try:
|
|
243
|
+
with open(code_file, "w") as f:
|
|
244
|
+
f.write(fixed_code)
|
|
245
|
+
if verbose:
|
|
246
|
+
rprint(f"[green]Code file updated.[/green]")
|
|
247
|
+
except Exception as e:
|
|
248
|
+
rprint(f"[red]Error writing updated code file:[/red] {e}")
|
|
249
|
+
break
|
|
250
|
+
|
|
251
|
+
# Run the verification program.
|
|
252
|
+
try:
|
|
253
|
+
verify_cmd = [sys.executable, verification_program]
|
|
254
|
+
verify_result = subprocess.run(verify_cmd, capture_output=True, text=True)
|
|
255
|
+
verify_output = verify_result.stdout + "\n" + verify_result.stderr
|
|
256
|
+
except Exception as e:
|
|
257
|
+
rprint(f"[red]Error running verification program:[/red] {e}")
|
|
258
|
+
verify_output = f"Verification program error: {e}"
|
|
259
|
+
|
|
260
|
+
# Log verification output.
|
|
261
|
+
with open(error_log_file, "a") as elog:
|
|
262
|
+
elog.write(f"\n[Verification attempt at iteration {attempt}]\n")
|
|
263
|
+
elog.write(verify_output + "\n")
|
|
264
|
+
rprint(f"[blue]Verification program output:[/blue]\n{escape_brackets(verify_output)}")
|
|
265
|
+
|
|
266
|
+
# Check if verification failed. Assume non-zero return code indicates failure.
|
|
267
|
+
if verify_result.returncode != 0:
|
|
268
|
+
rprint(f"[red]Verification failed. Restoring last working code file from backup.[/red]")
|
|
269
|
+
try:
|
|
270
|
+
# Restore code file from the backup of this iteration.
|
|
271
|
+
shutil.copy(code_backup, code_file)
|
|
272
|
+
with open(error_log_file, "a") as elog:
|
|
273
|
+
elog.write(f"Restored code file from backup: {code_backup}\n")
|
|
274
|
+
except Exception as e:
|
|
275
|
+
rprint(f"[red]Error restoring backup code file:[/red] {e}")
|
|
276
|
+
break
|
|
277
|
+
continue # Continue next loop iteration after restore.
|
|
278
|
+
|
|
279
|
+
# End of while loop iteration.
|
|
198
280
|
|
|
199
|
-
# Step
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
281
|
+
# Step 4: After loop, run pytest one last time.
|
|
282
|
+
try:
|
|
283
|
+
final_pytest_cmd = [sys.executable, "-m", "pytest", "-vv", "--no-cov", unit_test_file]
|
|
284
|
+
final_result = subprocess.run(final_pytest_cmd, capture_output=True, text=True)
|
|
285
|
+
final_output = final_result.stdout + "\n" + final_result.stderr
|
|
286
|
+
except Exception as e:
|
|
287
|
+
rprint(f"[red]Error running final pytest:[/red] {e}")
|
|
288
|
+
final_output = f"Error: {e}"
|
|
289
|
+
|
|
290
|
+
# Append final output to error log.
|
|
291
|
+
with open(error_log_file, "a") as elog:
|
|
292
|
+
elog.write("\n=== Final Pytest Run ===\n")
|
|
293
|
+
elog.write(final_output + "\n")
|
|
294
|
+
rprint(f"[blue]Final pytest output:[/blue]\n{escape_brackets(final_output)}")
|
|
207
295
|
|
|
208
|
-
# Step
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
296
|
+
# Step 5: If the last iteration is not the best, restore the best iteration backups.
|
|
297
|
+
best_attempt = best_iteration_info.get("attempt")
|
|
298
|
+
if best_attempt is not None:
|
|
299
|
+
# Optionally compare the last iteration numbers with best_iteration_info here.
|
|
300
|
+
if verbose:
|
|
301
|
+
rprint(f"[cyan]Restoring best iteration ({best_attempt}) from backups.[/cyan]")
|
|
302
|
+
try:
|
|
303
|
+
if best_iteration_info["unit_test_backup"]:
|
|
304
|
+
shutil.copy(best_iteration_info["unit_test_backup"], unit_test_file)
|
|
305
|
+
if best_iteration_info["code_backup"]:
|
|
306
|
+
shutil.copy(best_iteration_info["code_backup"], code_file)
|
|
307
|
+
except Exception as e:
|
|
308
|
+
rprint(f"[red]Error restoring best iteration backups:[/red] {e}")
|
|
215
309
|
|
|
216
|
-
|
|
310
|
+
# Read final file contents.
|
|
311
|
+
try:
|
|
312
|
+
with open(unit_test_file, "r") as f:
|
|
313
|
+
final_unit_test = f.read()
|
|
314
|
+
with open(code_file, "r") as f:
|
|
315
|
+
final_code = f.read()
|
|
316
|
+
except Exception as e:
|
|
317
|
+
rprint(f"[red]Error reading final files:[/red] {e}")
|
|
318
|
+
final_unit_test, final_code = "", ""
|
|
319
|
+
|
|
320
|
+
# Determine success based on final pytest result: pass if no failures, errors or warnings.
|
|
321
|
+
final_fails, final_errors, final_warnings = extract_pytest_summary(final_output)
|
|
322
|
+
success = (final_fails == 0 and final_errors == 0 and final_warnings == 0)
|
|
323
|
+
if success:
|
|
324
|
+
rprint("[green]Final tests passed with no warnings.[/green]")
|
|
325
|
+
else:
|
|
326
|
+
rprint("[red]Final tests still failing or producing warnings.[/red]")
|
|
327
|
+
|
|
328
|
+
return success, final_unit_test, final_code, attempt, total_cost, model_name
|
|
329
|
+
|
|
330
|
+
# If this module is run directly for testing purposes:
|
|
331
|
+
if __name__ == "__main__":
|
|
332
|
+
# Example usage of fix_error_loop.
|
|
333
|
+
unit_test_file = "tests/test_example.py"
|
|
334
|
+
code_file = "src/code_example.py"
|
|
335
|
+
prompt = "Write a function that adds two numbers"
|
|
336
|
+
verification_program = "verify_code.py" # Program that verifies the code
|
|
337
|
+
strength = 0.5
|
|
338
|
+
temperature = 0.0
|
|
339
|
+
max_attempts = 5
|
|
340
|
+
budget = 1.0 # Maximum cost budget
|
|
341
|
+
error_log_file = "error_log.txt"
|
|
342
|
+
verbose = True
|
|
343
|
+
|
|
344
|
+
success, final_unit_test, final_code, attempts, total_cost, model_name = fix_error_loop(
|
|
345
|
+
unit_test_file,
|
|
346
|
+
code_file,
|
|
347
|
+
prompt,
|
|
348
|
+
verification_program,
|
|
349
|
+
strength,
|
|
350
|
+
temperature,
|
|
351
|
+
max_attempts,
|
|
352
|
+
budget,
|
|
353
|
+
error_log_file,
|
|
354
|
+
verbose
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
rprint(f"\n[bold]Process complete.[/bold]")
|
|
358
|
+
rprint(f"Success: {success}")
|
|
359
|
+
rprint(f"Attempts: {attempts}")
|
|
360
|
+
rprint(f"Total cost: ${total_cost:.6f}")
|
|
361
|
+
rprint(f"Model used: {model_name}")
|
|
362
|
+
rprint(f"Final unit test contents:\n{final_unit_test}")
|
|
363
|
+
rprint(f"Final code contents:\n{final_code}")
|
pdd/llm_invoke.py
CHANGED
|
@@ -5,7 +5,7 @@ llm_invoke.py
|
|
|
5
5
|
This module provides a single function, llm_invoke, that runs a prompt with a given input
|
|
6
6
|
against a language model (LLM) using Langchain and returns the output, cost, and model name.
|
|
7
7
|
The function supports model selection based on cost/ELO interpolation controlled by the
|
|
8
|
-
|
|
8
|
+
"strength" parameter. It also implements a retry mechanism: if a model invocation fails,
|
|
9
9
|
it falls back to the next candidate (cheaper for strength < 0.5, or higher ELO for strength ≥ 0.5).
|
|
10
10
|
|
|
11
11
|
Usage:
|
|
@@ -62,11 +62,23 @@ class CompletionStatusHandler(BaseCallbackHandler):
|
|
|
62
62
|
self.is_complete = True
|
|
63
63
|
if response.generations and response.generations[0]:
|
|
64
64
|
generation = response.generations[0][0]
|
|
65
|
-
|
|
66
|
-
|
|
65
|
+
# Safely get generation_info; if it's None, default to {}
|
|
66
|
+
generation_info = generation.generation_info or {}
|
|
67
|
+
self.finish_reason = (generation_info.get('finish_reason') or "").lower()
|
|
68
|
+
|
|
69
|
+
# Attempt to get token usage from generation.message if available.
|
|
70
|
+
if (
|
|
71
|
+
hasattr(generation, "message")
|
|
72
|
+
and generation.message is not None
|
|
73
|
+
and hasattr(generation.message, "usage_metadata")
|
|
74
|
+
and generation.message.usage_metadata
|
|
75
|
+
):
|
|
67
76
|
usage_metadata = generation.message.usage_metadata
|
|
68
|
-
|
|
69
|
-
|
|
77
|
+
else:
|
|
78
|
+
usage_metadata = generation_info.get("usage_metadata", {})
|
|
79
|
+
|
|
80
|
+
self.input_tokens = usage_metadata.get('input_tokens', 0)
|
|
81
|
+
self.output_tokens = usage_metadata.get('output_tokens', 0)
|
|
70
82
|
|
|
71
83
|
class ModelInfo:
|
|
72
84
|
"""
|
|
@@ -55,8 +55,8 @@
|
|
|
55
55
|
Step 1. Compare the prompt to the code_under_test and explain differences, if any.
|
|
56
56
|
Step 2. Compare the prompt to the unit_test and explain differences, if any.
|
|
57
57
|
Step 3. For each prior attempted fix (if any), explain in a few paragraphs for each attempt why it might not have worked.
|
|
58
|
-
Step 4. Write several paragraphs explaining the root cause of each of the errors.
|
|
59
|
-
Step 5. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the solution steps. Sometimes logging or print statements can help debug the code.
|
|
60
|
-
Step 6. Review the above steps and correct for any errors in the code under test or unit test.
|
|
58
|
+
Step 4. Write several paragraphs explaining the root cause of each of the errors and each of the warnings in the code_under_test and unit_test.
|
|
59
|
+
Step 5. Explain in detail step by step how to solve each of the errors and warnings. For each error and warning, there should be several paragraphs description of the solution steps. Sometimes logging or print statements can help debug the code.
|
|
60
|
+
Step 6. Review the above steps and correct for any errors and warnings in the code under test or unit test.
|
|
61
61
|
Step 7. For the code that need changes, write the corrected code_under_test and/or corrected unit_test in its/their entirety.
|
|
62
62
|
</instructions>
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: pdd-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.9
|
|
4
4
|
Summary: PDD (Prompt-Driven Development) Command Line Interface
|
|
5
5
|
Author: Greg Tanaka
|
|
6
6
|
Author-email: glt@alumni.caltech.edu
|
|
@@ -40,7 +40,7 @@ Requires-Dist: semver==3.0.2
|
|
|
40
40
|
Requires-Dist: setuptools==75.1.0
|
|
41
41
|
Requires-Dist: python-Levenshtein
|
|
42
42
|
|
|
43
|
-
.. image:: https://img.shields.io/badge/pdd--cli-v0.0.
|
|
43
|
+
.. image:: https://img.shields.io/badge/pdd--cli-v0.0.9-blue
|
|
44
44
|
:alt: PDD-CLI Version
|
|
45
45
|
|
|
46
46
|
PDD (Prompt-Driven Development) Command Line Interface
|
|
@@ -51,34 +51,40 @@ PDD (Prompt-Driven Development) is a command-line interface that harnesses AI mo
|
|
|
51
51
|
Key Features
|
|
52
52
|
------------
|
|
53
53
|
|
|
54
|
-
- Cloud or Local Execution
|
|
55
|
-
|
|
54
|
+
- Cloud or Local Execution
|
|
55
|
+
|
|
56
|
+
• Run in the cloud (default) with no need to manage API keys.
|
|
56
57
|
• Switch to local mode with the ``--local`` flag for full control using your own API keys.
|
|
57
58
|
|
|
58
|
-
- GitHub Single Sign-On
|
|
59
|
-
|
|
59
|
+
- GitHub Single Sign-On
|
|
60
|
+
|
|
61
|
+
• Secure authentication with GitHub SSO in cloud mode.
|
|
60
62
|
• Automatic token handling so you can focus on coding.
|
|
61
63
|
|
|
62
|
-
- Comprehensive Command Suite
|
|
63
|
-
|
|
64
|
-
•
|
|
65
|
-
•
|
|
66
|
-
•
|
|
67
|
-
•
|
|
68
|
-
•
|
|
64
|
+
- Comprehensive Command Suite
|
|
65
|
+
|
|
66
|
+
• Generate: Create runnable code from prompt files.
|
|
67
|
+
• Example: Build examples that showcase generated code usage.
|
|
68
|
+
• Test: Produce or improve unit tests based on coverage goals.
|
|
69
|
+
• Fix & Crash: Automatically identify and correct errors, iterating if necessary.
|
|
70
|
+
• Update & Change: Keep your prompt files in sync with evolving codebases.
|
|
71
|
+
• Split & Detect: Manage and analyze complex prompts at scale.
|
|
69
72
|
• …and more!
|
|
70
73
|
|
|
71
|
-
- Automated Testing & Cost Tracking
|
|
72
|
-
• Generate coverage reports and additional test cases on the fly.
|
|
73
|
-
• Optional cost-tracking (CSV) for AI usage.
|
|
74
|
+
- Automated Testing & Cost Tracking
|
|
74
75
|
|
|
75
|
-
|
|
76
|
-
•
|
|
77
|
-
|
|
76
|
+
• Generate coverage reports and additional test cases on the fly.
|
|
77
|
+
• Optional cost-tracking (CSV) for AI usage.
|
|
78
|
+
|
|
79
|
+
- Rich Configuration & Customization
|
|
80
|
+
|
|
81
|
+
• Environment variables to define default output paths and settings.
|
|
82
|
+
• Fine-tune AI model behavior with ``--strength`` and ``--temperature``.
|
|
78
83
|
• Built-in auto-update (configurable via env var).
|
|
79
84
|
|
|
80
|
-
- Cross-Language Support
|
|
81
|
-
|
|
85
|
+
- Cross-Language Support
|
|
86
|
+
|
|
87
|
+
• Python, Java, JavaScript, Ruby, Go, C++, and beyond.
|
|
82
88
|
• Prompt naming conventions let PDD infer language automatically.
|
|
83
89
|
|
|
84
90
|
|
|
@@ -95,7 +101,7 @@ After installation, verify:
|
|
|
95
101
|
|
|
96
102
|
pdd --version
|
|
97
103
|
|
|
98
|
-
You
|
|
104
|
+
You'll see the current PDD version (e.g., 0.0.9).
|
|
99
105
|
|
|
100
106
|
Advanced Installation Tips
|
|
101
107
|
--------------------------
|
|
@@ -111,7 +117,7 @@ Create and activate a virtual environment, then install pdd-cli:
|
|
|
111
117
|
|
|
112
118
|
# Activate environment
|
|
113
119
|
# On Windows:
|
|
114
|
-
|
|
120
|
+
pdd-env\Scripts\activate
|
|
115
121
|
# On Unix/MacOS:
|
|
116
122
|
source pdd-env/bin/activate
|
|
117
123
|
|
|
@@ -6,7 +6,7 @@ pdd/bug_main.py,sha256=myKU9--QWdkV4Wf3mD2PoLPJFNgRjwf4z8s7TC28G_s,3720
|
|
|
6
6
|
pdd/bug_to_unit_test.py,sha256=o9bW065UxjvGAn9u8UIwYbIM6R-WcGB2YWuOSXAD484,5569
|
|
7
7
|
pdd/change.py,sha256=JvHPxK9FfnuGJsEkzTpzWI4tg24OJahK0qTLDZSyNPg,4985
|
|
8
8
|
pdd/change_main.py,sha256=yL_i1Ws5vt4vAkWiC826csNi2cHP6wKbwe_PfMqbbPY,11407
|
|
9
|
-
pdd/cli.py,sha256=
|
|
9
|
+
pdd/cli.py,sha256=i3lekbCEiIDaG7ICYtkXaF6txmvrxc4Gvi97lj06v7I,16592
|
|
10
10
|
pdd/cmd_test_main.py,sha256=aSCxRnSurg15AvPcJDAPp9xy8p_qqnjU1oV14Hi2R54,5301
|
|
11
11
|
pdd/code_generator.py,sha256=_b5t9cZ6pExHKKAE2ulfdvfLfbsQj-uwrbk520n-R-Q,4375
|
|
12
12
|
pdd/code_generator_main.py,sha256=G2eRBPXc1cGszkk0PbIPmJZHPaf_dw5d2yZbsvQZA3c,4793
|
|
@@ -14,7 +14,7 @@ pdd/comment_line.py,sha256=sX2hf4bG1fILi_rvI9MkkwCZ2IitgKkW7nOiw8aQKPY,1845
|
|
|
14
14
|
pdd/conflicts_in_prompts.py,sha256=PyasqXGA92JkroXv8b4-B_fYYC4cDYTsGEl9fj01rlc,4654
|
|
15
15
|
pdd/conflicts_main.py,sha256=O87s9baSa9DJMndxPIdsnYO_spoajcv9jii3XYt_-fM,3473
|
|
16
16
|
pdd/construct_paths.py,sha256=8hxkTI_AF5XNpGR4JqCsF4olDBtL8NslXdOZGQt78WM,10039
|
|
17
|
-
pdd/context_generator.py,sha256=
|
|
17
|
+
pdd/context_generator.py,sha256=_0We2ZxA3eiQRbMZ8Md_i5kTALq1zoC2OLMLO5puIi0,5798
|
|
18
18
|
pdd/context_generator_main.py,sha256=TtsY3jHictdEjmB4cHyNwXmZW_LfHJp3KW3UXyzR2cU,2735
|
|
19
19
|
pdd/continue_generation.py,sha256=hAVySc6oEsM_Zpj5AWBKEZqMWgoLlQBHcFtkAZ9sZ0E,5192
|
|
20
20
|
pdd/crash_main.py,sha256=YngROG62ORLGm-IORLq1vlVVidBGc9g2k0GAmq1jFNM,5287
|
|
@@ -23,7 +23,7 @@ pdd/detect_change_main.py,sha256=1Z4ymhjJaVr2aliGyqkqeqSmQ7QMgcl23p0wdsmBas0,365
|
|
|
23
23
|
pdd/find_section.py,sha256=lz_FPY4KDCRAGlL1pWVZiutUNv7E4KsDFK-ymDWA_Ec,962
|
|
24
24
|
pdd/fix_code_loop.py,sha256=L0yxq2yAziPIyFGb8lIP2mvufu8a_gtc5nnN2LuMuKs,8596
|
|
25
25
|
pdd/fix_code_module_errors.py,sha256=BAQ8UtJ61wCEaIwK3MvBVJsaOTDgCCQlFES1u47CVao,4625
|
|
26
|
-
pdd/fix_error_loop.py,sha256=
|
|
26
|
+
pdd/fix_error_loop.py,sha256=EtjqF9e4DVFQ0hh8fsKGYMqYwmN24yOHtziPMZFcvrA,15889
|
|
27
27
|
pdd/fix_errors_from_unit_tests.py,sha256=vul8X9S2U_dr9dbfjwy0x4K8jIX2B810KLOOhboyySQ,8935
|
|
28
28
|
pdd/fix_main.py,sha256=02OIViH12BcsykpDp4Osxw2ndEeThnNakMFkzdpYr48,5333
|
|
29
29
|
pdd/generate_output_paths.py,sha256=zz42GTx9eGyWIYSl3jcWvtJRGnieC3eoPM6DIVcWz2k,7219
|
|
@@ -36,7 +36,7 @@ pdd/git_update.py,sha256=Ya7eI7YFtGIpT7FdziFJfnFkiZlj8I9Lh98lqtXfClc,2855
|
|
|
36
36
|
pdd/increase_tests.py,sha256=sqlfkx4v84Zx22wW6_qwIsupbpLgy6IyPwp-11B22bs,3243
|
|
37
37
|
pdd/insert_includes.py,sha256=bocHBAzs4MAudtIw-JjyHO0kYajwlOLS9jBzV33_LRU,5224
|
|
38
38
|
pdd/install_completion.py,sha256=joTIKRkx0e6kRrXj9NXtMODnIG-G0Twt7wBmj8TirmE,5102
|
|
39
|
-
pdd/llm_invoke.py,sha256=
|
|
39
|
+
pdd/llm_invoke.py,sha256=XQt4FNysaKsGQVmqNFfsRob3lxXowl8LUMpfz2Kr5h4,16802
|
|
40
40
|
pdd/load_prompt_template.py,sha256=4NH8_t5eon_vcyTznqtemJ_yAPkTJm_hSdTRgzj3qEQ,1907
|
|
41
41
|
pdd/pdd_completion.fish,sha256=rs-43fa3kcDBN1uy4oxiofLAWmaqW0U2j5Mu4wCHh5M,6121
|
|
42
42
|
pdd/pdd_completion.sh,sha256=qurWrEksqptjryBZszxHv6i0MqgnIqJenMBDrzMgI98,4535
|
|
@@ -57,7 +57,7 @@ pdd/update_main.py,sha256=5a4nsOOaAXULdk0BS9pj4blZ_QHBFeET37uaAqoJI2g,3912
|
|
|
57
57
|
pdd/update_prompt.py,sha256=OdPRIAMu7OBx7E4SOU95hWgdtBY4oO8XOe1dvPChMlU,4351
|
|
58
58
|
pdd/xml_tagger.py,sha256=LADAXgw15O0Jh6IxdKrbKIpL76QbEZB1mdq5BvuMmsY,4241
|
|
59
59
|
pdd/data/language_format.csv,sha256=xUTmFHXSBVBRfPV-NKG3oWo5_ped5ukP-ekFcIlVzJk,877
|
|
60
|
-
pdd/data/llm_model.csv,sha256=
|
|
60
|
+
pdd/data/llm_model.csv,sha256=OxQsKZX5tWpcxSFaXd1yfosQMJ1m6L7eVbDKjl4XDZk,1925
|
|
61
61
|
pdd/prompts/auto_include_LLM.prompt,sha256=0t-Jmm5o6vVTmqsISTUiewqPT8bB389UZnJoHZvgtu4,13967
|
|
62
62
|
pdd/prompts/bug_to_unit_test_LLM.prompt,sha256=--ysObDv9WzOEyJMuaKEdDHkRrR_1j0dmOtlAFr4YRg,1205
|
|
63
63
|
pdd/prompts/change_LLM.prompt,sha256=W3sE6XZ2fb35XdqOykK1hDPtqkHSv9MZGD3sT8B8WjY,2083
|
|
@@ -77,7 +77,7 @@ pdd/prompts/extract_promptline_LLM.prompt,sha256=owIBRaF2bWwg3S64uyMKzOFMdvvmI_E
|
|
|
77
77
|
pdd/prompts/extract_unit_code_fix_LLM.prompt,sha256=1gWS0-Qs6vMynNNqp1Xc-2hcsyH_NTLZPB1-lvyprm8,14143
|
|
78
78
|
pdd/prompts/extract_xml_LLM.prompt,sha256=eRcHaL-khShpb7C1_b7wmBJHfo2Kh1Wvjo_aOcWZovU,561
|
|
79
79
|
pdd/prompts/fix_code_module_errors_LLM.prompt,sha256=m-oqZ3cOkWbqke_l9z0Nmunf7NsnR9JWTNVVlfcteAY,1405
|
|
80
|
-
pdd/prompts/fix_errors_from_unit_tests_LLM.prompt,sha256=
|
|
80
|
+
pdd/prompts/fix_errors_from_unit_tests_LLM.prompt,sha256=xkQPMu2BumUmb_YFf7kPKpVqY4lI5ajlZsEKUaRZh-E,4779
|
|
81
81
|
pdd/prompts/generate_test_LLM.prompt,sha256=y9SZ40zrRDOdp9DJnqq5_IMpsTORhAOphlo3QZlq7Ac,895
|
|
82
82
|
pdd/prompts/increase_tests_LLM.prompt,sha256=rekFzLRuZy99KifEKNlmPYoQdl8wa04112mtCdIY6S8,955
|
|
83
83
|
pdd/prompts/insert_includes_LLM.prompt,sha256=g-p2gXKENsqvfK5Q9FYbqFsIJ5CP7rbxmd4rROA-W80,1453
|
|
@@ -89,9 +89,9 @@ pdd/prompts/trim_results_start_LLM.prompt,sha256=WwFlOHha4wzMLtRHDMI6GtcNdl2toE8
|
|
|
89
89
|
pdd/prompts/unfinished_prompt_LLM.prompt,sha256=-JgBpiPTQZdWOAwOG1XpfpD9waynFTAT3Jo84eQ4bTw,1543
|
|
90
90
|
pdd/prompts/update_prompt_LLM.prompt,sha256=_lGaxeVP4oF8yGqiN6yj6UE0j79lxfGdjsYr5w5KSYk,1261
|
|
91
91
|
pdd/prompts/xml_convertor_LLM.prompt,sha256=YGRGXJeg6EhM9690f-SKqQrKqSJjLFD51UrPOlO0Frg,2786
|
|
92
|
-
pdd_cli-0.0.
|
|
93
|
-
pdd_cli-0.0.
|
|
94
|
-
pdd_cli-0.0.
|
|
95
|
-
pdd_cli-0.0.
|
|
96
|
-
pdd_cli-0.0.
|
|
97
|
-
pdd_cli-0.0.
|
|
92
|
+
pdd_cli-0.0.9.dist-info/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
|
|
93
|
+
pdd_cli-0.0.9.dist-info/METADATA,sha256=PbKC8DSCYhJ_3_QY_APQl7zZcHdf6F1u0oa1bJvAAQQ,6805
|
|
94
|
+
pdd_cli-0.0.9.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
95
|
+
pdd_cli-0.0.9.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
|
|
96
|
+
pdd_cli-0.0.9.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
|
|
97
|
+
pdd_cli-0.0.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|