pdd-cli 0.0.37__py3-none-any.whl → 0.0.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +1 -1
- pdd/auto_deps_main.py +4 -1
- pdd/auto_include.py +8 -1
- pdd/auto_update.py +2 -2
- pdd/bug_main.py +5 -2
- pdd/bug_to_unit_test.py +9 -2
- pdd/change.py +32 -22
- pdd/change_main.py +14 -10
- pdd/cli.py +11 -1
- pdd/cmd_test_main.py +3 -0
- pdd/code_generator.py +7 -1
- pdd/code_generator_main.py +9 -3
- pdd/conflicts_in_prompts.py +7 -2
- pdd/conflicts_main.py +6 -2
- pdd/context_generator.py +20 -3
- pdd/context_generator_main.py +2 -0
- pdd/continue_generation.py +8 -2
- pdd/crash_main.py +51 -31
- pdd/detect_change.py +8 -4
- pdd/detect_change_main.py +3 -0
- pdd/fix_code_loop.py +7 -2
- pdd/fix_code_module_errors.py +5 -2
- pdd/fix_error_loop.py +6 -2
- pdd/fix_errors_from_unit_tests.py +11 -6
- pdd/fix_main.py +4 -0
- pdd/fix_verification_errors.py +8 -3
- pdd/fix_verification_errors_loop.py +9 -3
- pdd/fix_verification_main.py +37 -31
- pdd/generate_test.py +10 -4
- pdd/git_update.py +5 -3
- pdd/increase_tests.py +5 -2
- pdd/insert_includes.py +8 -2
- pdd/preprocess_main.py +10 -3
- pdd/process_csv_change.py +8 -2
- pdd/split.py +15 -7
- pdd/split_main.py +2 -0
- pdd/summarize_directory.py +4 -0
- pdd/trace.py +9 -5
- pdd/trace_main.py +5 -4
- pdd/unfinished_prompt.py +6 -1
- pdd/update_main.py +6 -3
- pdd/update_prompt.py +8 -4
- pdd/xml_tagger.py +10 -5
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/METADATA +4 -4
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/RECORD +49 -49
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.37.dist-info → pdd_cli-0.0.39.dist-info}/top_level.txt +0 -0
pdd/fix_verification_main.py
CHANGED
|
@@ -16,7 +16,7 @@ from .construct_paths import construct_paths
|
|
|
16
16
|
from .fix_verification_errors import fix_verification_errors
|
|
17
17
|
from .fix_verification_errors_loop import fix_verification_errors_loop
|
|
18
18
|
# Import DEFAULT_STRENGTH from the main package
|
|
19
|
-
from . import DEFAULT_STRENGTH
|
|
19
|
+
from . import DEFAULT_STRENGTH, DEFAULT_TIME
|
|
20
20
|
|
|
21
21
|
# Default values from the README
|
|
22
22
|
DEFAULT_TEMPERATURE = 0.0
|
|
@@ -135,6 +135,7 @@ def fix_verification_main(
|
|
|
135
135
|
force: bool = ctx.obj.get('force', False)
|
|
136
136
|
quiet: bool = ctx.obj.get('quiet', False)
|
|
137
137
|
verbose: bool = ctx.obj.get('verbose', False)
|
|
138
|
+
time: float = ctx.obj.get('time', DEFAULT_TIME) # Get time from context, default 0.25
|
|
138
139
|
|
|
139
140
|
# --- Input Validation ---
|
|
140
141
|
if loop and not verification_program:
|
|
@@ -250,35 +251,39 @@ def fix_verification_main(
|
|
|
250
251
|
try:
|
|
251
252
|
if loop:
|
|
252
253
|
if not quiet:
|
|
253
|
-
rich_print("
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
254
|
+
rich_print("[dim]Running Iterative Verification (fix_verification_errors_loop)...[/dim]")
|
|
255
|
+
try:
|
|
256
|
+
# Call fix_verification_errors_loop for iterative fixing
|
|
257
|
+
loop_results = fix_verification_errors_loop(
|
|
258
|
+
program_file=program_file, # Changed to pass the program_file path
|
|
259
|
+
code_file=code_file, # Changed to pass the code_file path
|
|
260
|
+
prompt=input_strings["prompt_file"], # Correctly passing prompt content
|
|
261
|
+
verification_program=verification_program, # Path to the verifier program
|
|
262
|
+
strength=strength,
|
|
263
|
+
temperature=temperature,
|
|
264
|
+
llm_time=time, # Changed 'time' to 'llm_time'
|
|
265
|
+
max_attempts=max_attempts,
|
|
266
|
+
budget=budget,
|
|
267
|
+
verification_log_file=output_results_path, # Use resolved output_results_path
|
|
268
|
+
# output_code_path should not be passed here
|
|
269
|
+
# output_program_path should not be passed here
|
|
270
|
+
verbose=verbose,
|
|
271
|
+
program_args=[] # Pass an empty list for program_args
|
|
272
|
+
)
|
|
273
|
+
success = loop_results.get('success', False)
|
|
274
|
+
final_program = loop_results.get('final_program', "") # Use .get for safety
|
|
275
|
+
final_code = loop_results.get('final_code', "") # Use .get for safety
|
|
276
|
+
attempts = loop_results.get('total_attempts', 0) # Use .get for safety
|
|
277
|
+
total_cost = loop_results.get('total_cost', 0.0) # Use .get for safety
|
|
278
|
+
model_name = loop_results.get('model_name', "N/A") # Use .get for safety
|
|
279
|
+
# Capture full statistics if available
|
|
280
|
+
# statistics = loop_results.get('statistics', {})
|
|
281
|
+
except Exception as e:
|
|
282
|
+
rich_print(f"[bold red]Error during loop execution:[/bold red] {e}")
|
|
283
|
+
if verbose:
|
|
284
|
+
import traceback
|
|
285
|
+
rich_print(Panel(traceback.format_exc(), title="Traceback", border_style="red"))
|
|
286
|
+
success = False
|
|
282
287
|
else: # Single pass verification
|
|
283
288
|
if not quiet:
|
|
284
289
|
rich_print("\n[bold blue]Running Single Pass Verification (fix_verification_errors)...[/bold blue]")
|
|
@@ -309,7 +314,8 @@ def fix_verification_main(
|
|
|
309
314
|
output=program_output,
|
|
310
315
|
strength=strength,
|
|
311
316
|
temperature=temperature,
|
|
312
|
-
verbose=verbose
|
|
317
|
+
verbose=verbose,
|
|
318
|
+
time=time # Pass time to single pass function
|
|
313
319
|
)
|
|
314
320
|
|
|
315
321
|
# Determine success: If no issues were found OR if fixes were applied
|
pdd/generate_test.py
CHANGED
|
@@ -2,7 +2,7 @@ from typing import Tuple, Optional
|
|
|
2
2
|
from rich import print
|
|
3
3
|
from rich.markdown import Markdown
|
|
4
4
|
from rich.console import Console
|
|
5
|
-
from . import EXTRACTION_STRENGTH
|
|
5
|
+
from . import EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME
|
|
6
6
|
from .load_prompt_template import load_prompt_template
|
|
7
7
|
from .preprocess import preprocess
|
|
8
8
|
from .llm_invoke import llm_invoke
|
|
@@ -15,9 +15,10 @@ console = Console()
|
|
|
15
15
|
def generate_test(
|
|
16
16
|
prompt: str,
|
|
17
17
|
code: str,
|
|
18
|
-
strength: float,
|
|
19
|
-
temperature: float,
|
|
20
|
-
|
|
18
|
+
strength: float=DEFAULT_STRENGTH,
|
|
19
|
+
temperature: float=0.0,
|
|
20
|
+
time: float = DEFAULT_TIME,
|
|
21
|
+
language: str = "python",
|
|
21
22
|
verbose: bool = False
|
|
22
23
|
) -> Tuple[str, float, str]:
|
|
23
24
|
"""
|
|
@@ -29,6 +30,7 @@ def generate_test(
|
|
|
29
30
|
strength (float): The strength of the LLM model (0-1).
|
|
30
31
|
temperature (float): The temperature of the LLM model.
|
|
31
32
|
language (str): The programming language for the unit test.
|
|
33
|
+
time (float, optional): Time budget for LLM calls. Defaults to DEFAULT_TIME.
|
|
32
34
|
verbose (bool): Whether to print detailed information.
|
|
33
35
|
|
|
34
36
|
Returns:
|
|
@@ -62,6 +64,7 @@ def generate_test(
|
|
|
62
64
|
input_json=input_json,
|
|
63
65
|
strength=strength,
|
|
64
66
|
temperature=temperature,
|
|
67
|
+
time=time,
|
|
65
68
|
verbose=verbose
|
|
66
69
|
)
|
|
67
70
|
|
|
@@ -79,6 +82,7 @@ def generate_test(
|
|
|
79
82
|
prompt_text=last_600_chars,
|
|
80
83
|
strength=strength,
|
|
81
84
|
temperature=temperature,
|
|
85
|
+
time=time,
|
|
82
86
|
verbose=verbose
|
|
83
87
|
)
|
|
84
88
|
total_cost += check_cost
|
|
@@ -92,6 +96,7 @@ def generate_test(
|
|
|
92
96
|
llm_output=result,
|
|
93
97
|
strength=strength,
|
|
94
98
|
temperature=temperature,
|
|
99
|
+
time=time,
|
|
95
100
|
verbose=verbose
|
|
96
101
|
)
|
|
97
102
|
total_cost += continue_cost
|
|
@@ -104,6 +109,7 @@ def generate_test(
|
|
|
104
109
|
language=language,
|
|
105
110
|
strength=EXTRACTION_STRENGTH,
|
|
106
111
|
temperature=temperature,
|
|
112
|
+
time=time,
|
|
107
113
|
verbose=verbose
|
|
108
114
|
)
|
|
109
115
|
total_cost += post_cost
|
pdd/git_update.py
CHANGED
|
@@ -5,7 +5,7 @@ from rich.console import Console
|
|
|
5
5
|
from rich.panel import Panel
|
|
6
6
|
from .update_prompt import update_prompt
|
|
7
7
|
import git
|
|
8
|
-
|
|
8
|
+
from . import DEFAULT_TIME
|
|
9
9
|
console = Console()
|
|
10
10
|
|
|
11
11
|
def git_update(
|
|
@@ -13,7 +13,8 @@ def git_update(
|
|
|
13
13
|
modified_code_file: str,
|
|
14
14
|
strength: float,
|
|
15
15
|
temperature: float,
|
|
16
|
-
verbose: bool = False
|
|
16
|
+
verbose: bool = False,
|
|
17
|
+
time: float = DEFAULT_TIME
|
|
17
18
|
) -> Tuple[Optional[str], float, str]:
|
|
18
19
|
"""
|
|
19
20
|
Read in modified code, restore the prior checked-in version from GitHub,
|
|
@@ -61,7 +62,8 @@ def git_update(
|
|
|
61
62
|
modified_code=modified_code,
|
|
62
63
|
strength=strength,
|
|
63
64
|
temperature=temperature,
|
|
64
|
-
verbose=verbose
|
|
65
|
+
verbose=verbose,
|
|
66
|
+
time=time
|
|
65
67
|
)
|
|
66
68
|
|
|
67
69
|
# Write back the modified code
|
pdd/increase_tests.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
from typing import Tuple
|
|
1
|
+
from typing import Tuple, Optional
|
|
2
2
|
from rich.console import Console
|
|
3
|
-
from . import EXTRACTION_STRENGTH
|
|
3
|
+
from . import EXTRACTION_STRENGTH, DEFAULT_TIME
|
|
4
4
|
from .load_prompt_template import load_prompt_template
|
|
5
5
|
from .llm_invoke import llm_invoke
|
|
6
6
|
from .postprocess import postprocess
|
|
@@ -13,6 +13,7 @@ def increase_tests(
|
|
|
13
13
|
language: str = "python",
|
|
14
14
|
strength: float = 0.5,
|
|
15
15
|
temperature: float = 0.0,
|
|
16
|
+
time: Optional[float] = DEFAULT_TIME,
|
|
16
17
|
verbose: bool = False
|
|
17
18
|
) -> Tuple[str, float, str]:
|
|
18
19
|
"""
|
|
@@ -26,6 +27,7 @@ def increase_tests(
|
|
|
26
27
|
language (str, optional): Programming language. Defaults to "python".
|
|
27
28
|
strength (float, optional): LLM model strength. Defaults to 0.5.
|
|
28
29
|
temperature (float, optional): LLM model temperature. Defaults to 0.0.
|
|
30
|
+
time (Optional[float]): Time allocation for the LLM. Defaults to DEFAULT_TIME.
|
|
29
31
|
verbose (bool, optional): Verbose output flag. Defaults to False.
|
|
30
32
|
|
|
31
33
|
Returns:
|
|
@@ -73,6 +75,7 @@ def increase_tests(
|
|
|
73
75
|
input_json=input_json,
|
|
74
76
|
strength=strength,
|
|
75
77
|
temperature=temperature,
|
|
78
|
+
time=time,
|
|
76
79
|
verbose=verbose
|
|
77
80
|
)
|
|
78
81
|
|
pdd/insert_includes.py
CHANGED
|
@@ -7,6 +7,7 @@ from .llm_invoke import llm_invoke
|
|
|
7
7
|
from .load_prompt_template import load_prompt_template
|
|
8
8
|
from .auto_include import auto_include
|
|
9
9
|
from .preprocess import preprocess
|
|
10
|
+
from . import DEFAULT_TIME, DEFAULT_STRENGTH
|
|
10
11
|
|
|
11
12
|
class InsertIncludesOutput(BaseModel):
|
|
12
13
|
output_prompt: str = Field(description="The prompt with dependencies inserted")
|
|
@@ -15,8 +16,9 @@ def insert_includes(
|
|
|
15
16
|
input_prompt: str,
|
|
16
17
|
directory_path: str,
|
|
17
18
|
csv_filename: str,
|
|
18
|
-
strength: float,
|
|
19
|
-
temperature: float,
|
|
19
|
+
strength: float = DEFAULT_STRENGTH,
|
|
20
|
+
temperature: float = 0.0,
|
|
21
|
+
time: float = DEFAULT_TIME,
|
|
20
22
|
verbose: bool = False
|
|
21
23
|
) -> Tuple[str, str, float, str]:
|
|
22
24
|
"""
|
|
@@ -28,6 +30,7 @@ def insert_includes(
|
|
|
28
30
|
csv_filename (str): Name of the CSV file containing dependencies
|
|
29
31
|
strength (float): Strength parameter for the LLM model
|
|
30
32
|
temperature (float): Temperature parameter for the LLM model
|
|
33
|
+
time (float): Time budget for the LLM model
|
|
31
34
|
verbose (bool, optional): Whether to print detailed information. Defaults to False.
|
|
32
35
|
|
|
33
36
|
Returns:
|
|
@@ -74,6 +77,7 @@ def insert_includes(
|
|
|
74
77
|
csv_file=csv_content,
|
|
75
78
|
strength=strength,
|
|
76
79
|
temperature=temperature,
|
|
80
|
+
time=time,
|
|
77
81
|
verbose=verbose
|
|
78
82
|
)
|
|
79
83
|
|
|
@@ -90,6 +94,7 @@ def insert_includes(
|
|
|
90
94
|
},
|
|
91
95
|
strength=strength,
|
|
92
96
|
temperature=temperature,
|
|
97
|
+
time=time,
|
|
93
98
|
verbose=verbose,
|
|
94
99
|
output_pydantic=InsertIncludesOutput
|
|
95
100
|
)
|
|
@@ -135,6 +140,7 @@ def main():
|
|
|
135
140
|
csv_filename=csv_filename,
|
|
136
141
|
strength=strength,
|
|
137
142
|
temperature=temperature,
|
|
143
|
+
time=0.25,
|
|
138
144
|
verbose=True
|
|
139
145
|
)
|
|
140
146
|
|
pdd/preprocess_main.py
CHANGED
|
@@ -7,7 +7,7 @@ from rich import print as rprint
|
|
|
7
7
|
from .construct_paths import construct_paths
|
|
8
8
|
from .preprocess import preprocess
|
|
9
9
|
from .xml_tagger import xml_tagger
|
|
10
|
-
|
|
10
|
+
from . import DEFAULT_TIME, DEFAULT_STRENGTH
|
|
11
11
|
def preprocess_main(
|
|
12
12
|
ctx: click.Context, prompt_file: str, output: Optional[str], xml: bool, recursive: bool, double: bool, exclude: list
|
|
13
13
|
) -> Tuple[str, float, str]:
|
|
@@ -40,10 +40,17 @@ def preprocess_main(
|
|
|
40
40
|
|
|
41
41
|
if xml:
|
|
42
42
|
# Use xml_tagger to add XML delimiters
|
|
43
|
-
strength = ctx.obj.get("strength",
|
|
43
|
+
strength = ctx.obj.get("strength", DEFAULT_STRENGTH)
|
|
44
44
|
temperature = ctx.obj.get("temperature", 0.0)
|
|
45
45
|
verbose = ctx.obj.get("verbose", False)
|
|
46
|
-
|
|
46
|
+
time = ctx.obj.get("time", DEFAULT_TIME)
|
|
47
|
+
xml_tagged, total_cost, model_name = xml_tagger(
|
|
48
|
+
prompt,
|
|
49
|
+
strength,
|
|
50
|
+
temperature,
|
|
51
|
+
verbose,
|
|
52
|
+
time=time
|
|
53
|
+
)
|
|
47
54
|
processed_prompt = xml_tagged
|
|
48
55
|
else:
|
|
49
56
|
# Preprocess the prompt
|
pdd/process_csv_change.py
CHANGED
|
@@ -10,6 +10,7 @@ from .get_extension import get_extension
|
|
|
10
10
|
# Assuming EXTRACTION_STRENGTH and DEFAULT_STRENGTH might be needed later,
|
|
11
11
|
# or just acknowledging their existence as per the prompt.
|
|
12
12
|
# from .. import EXTRACTION_STRENGTH, DEFAULT_STRENGTH
|
|
13
|
+
from . import DEFAULT_TIME # Added DEFAULT_TIME
|
|
13
14
|
|
|
14
15
|
# No changes needed in the code_under_test based on these specific errors.
|
|
15
16
|
|
|
@@ -69,7 +70,8 @@ def process_csv_change(
|
|
|
69
70
|
code_directory: str,
|
|
70
71
|
language: str, # Default language if not specified in prompt filename
|
|
71
72
|
extension: str, # Default extension (unused if language suffix found)
|
|
72
|
-
budget: float
|
|
73
|
+
budget: float,
|
|
74
|
+
time: float = DEFAULT_TIME # Added time parameter
|
|
73
75
|
) -> Tuple[bool, List[Dict[str, str]], float, Optional[str]]:
|
|
74
76
|
"""
|
|
75
77
|
Reads a CSV file, processes each row to modify associated code files using an LLM,
|
|
@@ -86,6 +88,7 @@ def process_csv_change(
|
|
|
86
88
|
extension: Default file extension (including '.') if language cannot be inferred.
|
|
87
89
|
Note: This is less likely to be used if `get_extension` covers the default language.
|
|
88
90
|
budget: Maximum allowed cost for all LLM operations. Must be non-negative.
|
|
91
|
+
time: Time budget for each LLM operation.
|
|
89
92
|
|
|
90
93
|
Returns:
|
|
91
94
|
A tuple containing:
|
|
@@ -296,7 +299,10 @@ def process_csv_change(
|
|
|
296
299
|
input_code=input_code,
|
|
297
300
|
change_prompt=change_instructions,
|
|
298
301
|
strength=strength,
|
|
299
|
-
temperature=temperature
|
|
302
|
+
temperature=temperature,
|
|
303
|
+
time=time, # Pass time
|
|
304
|
+
budget=budget - total_cost, # Pass per-row budget
|
|
305
|
+
quiet=True # Suppress individual change prints for CSV mode
|
|
300
306
|
)
|
|
301
307
|
console.print(f" [dim]Change cost:[/dim] ${cost:.6f}")
|
|
302
308
|
console.print(f" [dim]Model used:[/dim] {current_model_name}")
|
pdd/split.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
from typing import Tuple
|
|
1
|
+
from typing import Tuple, Optional
|
|
2
2
|
from rich import print as rprint
|
|
3
3
|
from rich.markdown import Markdown
|
|
4
4
|
from pydantic import BaseModel, Field
|
|
5
5
|
from .load_prompt_template import load_prompt_template
|
|
6
6
|
from .preprocess import preprocess
|
|
7
7
|
from .llm_invoke import llm_invoke
|
|
8
|
-
|
|
8
|
+
|
|
9
|
+
from . import EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TEMPERATURE, DEFAULT_TIME
|
|
9
10
|
|
|
10
11
|
class PromptSplit(BaseModel):
|
|
11
12
|
extracted_functionality: str = Field(description="The extracted functionality as a sub-module prompt")
|
|
@@ -15,10 +16,11 @@ def split(
|
|
|
15
16
|
input_prompt: str,
|
|
16
17
|
input_code: str,
|
|
17
18
|
example_code: str,
|
|
18
|
-
strength: float,
|
|
19
|
-
temperature: float,
|
|
19
|
+
strength: float = DEFAULT_STRENGTH,
|
|
20
|
+
temperature: float = DEFAULT_TEMPERATURE,
|
|
21
|
+
time: Optional[float] = DEFAULT_TIME,
|
|
20
22
|
verbose: bool = False
|
|
21
|
-
) -> Tuple[str, str, float, str]:
|
|
23
|
+
) -> Tuple[Tuple[str, str], float, str]:
|
|
22
24
|
"""
|
|
23
25
|
Split a prompt into extracted functionality and remaining prompt.
|
|
24
26
|
|
|
@@ -28,14 +30,18 @@ def split(
|
|
|
28
30
|
example_code (str): Example code showing usage.
|
|
29
31
|
strength (float): LLM strength parameter (0-1).
|
|
30
32
|
temperature (float): LLM temperature parameter (0-1).
|
|
33
|
+
time (Optional[float]): Time allocation for the LLM.
|
|
31
34
|
verbose (bool): Whether to print detailed information.
|
|
32
35
|
|
|
33
36
|
Returns:
|
|
34
|
-
Tuple[str, str, float, str]:
|
|
37
|
+
Tuple[Tuple[str, str], float, str]:
|
|
38
|
+
((extracted_functionality, remaining_prompt), total_cost, model_name)
|
|
35
39
|
where model_name is the name of the model used (returned as the second to last tuple element)
|
|
36
40
|
and total_cost is the aggregated cost from all LLM invocations.
|
|
37
41
|
"""
|
|
38
42
|
total_cost = 0.0
|
|
43
|
+
model_name = ""
|
|
44
|
+
|
|
39
45
|
|
|
40
46
|
# Input validation
|
|
41
47
|
if not all([input_prompt, input_code, example_code]):
|
|
@@ -79,6 +85,7 @@ def split(
|
|
|
79
85
|
},
|
|
80
86
|
strength=strength,
|
|
81
87
|
temperature=temperature,
|
|
88
|
+
time=time,
|
|
82
89
|
verbose=verbose
|
|
83
90
|
)
|
|
84
91
|
total_cost += split_response["cost"]
|
|
@@ -95,7 +102,8 @@ def split(
|
|
|
95
102
|
strength=EXTRACTION_STRENGTH, # Fixed strength for extraction
|
|
96
103
|
temperature=temperature,
|
|
97
104
|
output_pydantic=PromptSplit,
|
|
98
|
-
verbose=verbose
|
|
105
|
+
verbose=verbose,
|
|
106
|
+
time=time # Pass time to the second llm_invoke call
|
|
99
107
|
)
|
|
100
108
|
total_cost += extract_response["cost"]
|
|
101
109
|
|
pdd/split_main.py
CHANGED
|
@@ -59,6 +59,7 @@ def split_main(
|
|
|
59
59
|
# Get parameters from context
|
|
60
60
|
strength = ctx.obj.get('strength', 0.5)
|
|
61
61
|
temperature = ctx.obj.get('temperature', 0)
|
|
62
|
+
time = ctx.obj.get('time')
|
|
62
63
|
|
|
63
64
|
# Call the split function with the standardized return pattern (result_data, cost, model_name)
|
|
64
65
|
result_tuple, total_cost, model_name = split(
|
|
@@ -67,6 +68,7 @@ def split_main(
|
|
|
67
68
|
example_code=input_strings["example_code"],
|
|
68
69
|
strength=strength,
|
|
69
70
|
temperature=temperature,
|
|
71
|
+
time=time,
|
|
70
72
|
verbose=not ctx.obj.get('quiet', False)
|
|
71
73
|
)
|
|
72
74
|
|
pdd/summarize_directory.py
CHANGED
|
@@ -11,6 +11,7 @@ from rich.progress import track
|
|
|
11
11
|
|
|
12
12
|
from .load_prompt_template import load_prompt_template
|
|
13
13
|
from .llm_invoke import llm_invoke
|
|
14
|
+
from . import DEFAULT_TIME
|
|
14
15
|
|
|
15
16
|
class FileSummary(BaseModel):
|
|
16
17
|
file_summary: str = Field(description="The summary of the file")
|
|
@@ -80,6 +81,7 @@ def summarize_directory(
|
|
|
80
81
|
strength: float,
|
|
81
82
|
temperature: float,
|
|
82
83
|
verbose: bool,
|
|
84
|
+
time: float = DEFAULT_TIME,
|
|
83
85
|
csv_file: Optional[str] = None
|
|
84
86
|
) -> Tuple[str, float, str]:
|
|
85
87
|
"""
|
|
@@ -90,6 +92,7 @@ def summarize_directory(
|
|
|
90
92
|
strength (float): Between 0 and 1 that is the strength of the LLM model to use.
|
|
91
93
|
temperature (float): Controls the randomness of the LLM's output.
|
|
92
94
|
verbose (bool): Whether to print out the details of the function.
|
|
95
|
+
time (float): Time budget for LLM calls.
|
|
93
96
|
csv_file (Optional[str]): Current CSV file contents if it already exists.
|
|
94
97
|
|
|
95
98
|
Returns:
|
|
@@ -184,6 +187,7 @@ def summarize_directory(
|
|
|
184
187
|
input_json=input_params,
|
|
185
188
|
strength=strength,
|
|
186
189
|
temperature=temperature,
|
|
190
|
+
time=time,
|
|
187
191
|
verbose=verbose,
|
|
188
192
|
output_pydantic=FileSummary
|
|
189
193
|
)
|
pdd/trace.py
CHANGED
|
@@ -6,7 +6,7 @@ import difflib
|
|
|
6
6
|
from .load_prompt_template import load_prompt_template
|
|
7
7
|
from .preprocess import preprocess
|
|
8
8
|
from .llm_invoke import llm_invoke
|
|
9
|
-
|
|
9
|
+
from . import DEFAULT_TIME, DEFAULT_STRENGTH
|
|
10
10
|
console = Console()
|
|
11
11
|
|
|
12
12
|
class PromptLineOutput(BaseModel):
|
|
@@ -16,9 +16,10 @@ def trace(
|
|
|
16
16
|
code_file: str,
|
|
17
17
|
code_line: int,
|
|
18
18
|
prompt_file: str,
|
|
19
|
-
strength: float =
|
|
19
|
+
strength: float = DEFAULT_STRENGTH,
|
|
20
20
|
temperature: float = 0,
|
|
21
|
-
verbose: bool = False
|
|
21
|
+
verbose: bool = False,
|
|
22
|
+
time: float = DEFAULT_TIME
|
|
22
23
|
) -> Tuple[Optional[int], float, str]:
|
|
23
24
|
"""
|
|
24
25
|
Trace a line of code back to its corresponding line in the prompt file.
|
|
@@ -30,6 +31,7 @@ def trace(
|
|
|
30
31
|
strength (float, optional): Model strength. Defaults to 0.5
|
|
31
32
|
temperature (float, optional): Model temperature. Defaults to 0
|
|
32
33
|
verbose (bool, optional): Whether to print detailed information. Defaults to False
|
|
34
|
+
time (float, optional): Time parameter for LLM calls. Defaults to 0.25
|
|
33
35
|
|
|
34
36
|
Returns:
|
|
35
37
|
Tuple[Optional[int], float, str]: (prompt line number, total cost, model name)
|
|
@@ -67,7 +69,8 @@ def trace(
|
|
|
67
69
|
},
|
|
68
70
|
strength=strength,
|
|
69
71
|
temperature=temperature,
|
|
70
|
-
verbose=verbose
|
|
72
|
+
verbose=verbose,
|
|
73
|
+
time=time
|
|
71
74
|
)
|
|
72
75
|
|
|
73
76
|
total_cost += trace_response['cost']
|
|
@@ -89,7 +92,8 @@ def trace(
|
|
|
89
92
|
strength=strength,
|
|
90
93
|
temperature=temperature,
|
|
91
94
|
verbose=verbose,
|
|
92
|
-
output_pydantic=PromptLineOutput
|
|
95
|
+
output_pydantic=PromptLineOutput,
|
|
96
|
+
time=time
|
|
93
97
|
)
|
|
94
98
|
|
|
95
99
|
total_cost += extract_response['cost']
|
pdd/trace_main.py
CHANGED
|
@@ -5,7 +5,7 @@ import os
|
|
|
5
5
|
import logging
|
|
6
6
|
from .construct_paths import construct_paths
|
|
7
7
|
from .trace import trace
|
|
8
|
-
|
|
8
|
+
from . import DEFAULT_TIME, DEFAULT_STRENGTH
|
|
9
9
|
logging.basicConfig(level=logging.WARNING)
|
|
10
10
|
logger = logging.getLogger(__name__)
|
|
11
11
|
|
|
@@ -49,11 +49,12 @@ def trace_main(ctx: click.Context, prompt_file: str, code_file: str, code_line:
|
|
|
49
49
|
logger.debug("Input files loaded")
|
|
50
50
|
|
|
51
51
|
# Perform trace analysis
|
|
52
|
-
strength = ctx.obj.get('strength',
|
|
52
|
+
strength = ctx.obj.get('strength', DEFAULT_STRENGTH)
|
|
53
53
|
temperature = ctx.obj.get('temperature', 0.0)
|
|
54
|
+
time = ctx.obj.get('time', DEFAULT_TIME)
|
|
54
55
|
try:
|
|
55
56
|
prompt_line, total_cost, model_name = trace(
|
|
56
|
-
code_content, code_line, prompt_content, strength, temperature
|
|
57
|
+
code_content, code_line, prompt_content, strength, temperature, time=time
|
|
57
58
|
)
|
|
58
59
|
logger.debug(f"Trace analysis completed: prompt_line={prompt_line}, total_cost={total_cost}, model_name={model_name}")
|
|
59
60
|
|
|
@@ -90,7 +91,7 @@ def trace_main(ctx: click.Context, prompt_file: str, code_file: str, code_line:
|
|
|
90
91
|
logger.debug(f"Results saved to {output_path}")
|
|
91
92
|
except IOError as e:
|
|
92
93
|
if not quiet:
|
|
93
|
-
rprint(f"[bold red]
|
|
94
|
+
rprint(f"[bold red]Error saving trace results: {e}[/bold red]")
|
|
94
95
|
logger.error(f"IOError while saving results: {e}")
|
|
95
96
|
ctx.exit(1)
|
|
96
97
|
|
pdd/unfinished_prompt.py
CHANGED
|
@@ -3,6 +3,7 @@ from pydantic import BaseModel, Field
|
|
|
3
3
|
from rich import print as rprint
|
|
4
4
|
from .load_prompt_template import load_prompt_template
|
|
5
5
|
from .llm_invoke import llm_invoke
|
|
6
|
+
from . import DEFAULT_STRENGTH, DEFAULT_TIME
|
|
6
7
|
|
|
7
8
|
class PromptAnalysis(BaseModel):
|
|
8
9
|
reasoning: str = Field(description="Structured reasoning for the completeness assessment")
|
|
@@ -10,8 +11,9 @@ class PromptAnalysis(BaseModel):
|
|
|
10
11
|
|
|
11
12
|
def unfinished_prompt(
|
|
12
13
|
prompt_text: str,
|
|
13
|
-
strength: float =
|
|
14
|
+
strength: float = DEFAULT_STRENGTH,
|
|
14
15
|
temperature: float = 0,
|
|
16
|
+
time: float = DEFAULT_TIME,
|
|
15
17
|
verbose: bool = False
|
|
16
18
|
) -> Tuple[str, bool, float, str]:
|
|
17
19
|
"""
|
|
@@ -21,6 +23,7 @@ def unfinished_prompt(
|
|
|
21
23
|
prompt_text (str): The prompt text to analyze
|
|
22
24
|
strength (float, optional): Strength of the LLM model. Defaults to 0.5.
|
|
23
25
|
temperature (float, optional): Temperature of the LLM model. Defaults to 0.
|
|
26
|
+
time (float, optional): Time budget for LLM calls. Defaults to DEFAULT_TIME.
|
|
24
27
|
verbose (bool, optional): Whether to print detailed information. Defaults to False.
|
|
25
28
|
|
|
26
29
|
Returns:
|
|
@@ -70,6 +73,7 @@ def unfinished_prompt(
|
|
|
70
73
|
input_json=input_json,
|
|
71
74
|
strength=strength,
|
|
72
75
|
temperature=temperature,
|
|
76
|
+
time=time,
|
|
73
77
|
verbose=verbose,
|
|
74
78
|
output_pydantic=PromptAnalysis
|
|
75
79
|
)
|
|
@@ -103,6 +107,7 @@ if __name__ == "__main__":
|
|
|
103
107
|
try:
|
|
104
108
|
reasoning, is_finished, cost, model = unfinished_prompt(
|
|
105
109
|
prompt_text=sample_prompt,
|
|
110
|
+
time=DEFAULT_TIME,
|
|
106
111
|
verbose=True
|
|
107
112
|
)
|
|
108
113
|
rprint("\n[blue]Results:[/blue]")
|
pdd/update_main.py
CHANGED
|
@@ -6,7 +6,7 @@ from rich import print as rprint
|
|
|
6
6
|
from .construct_paths import construct_paths
|
|
7
7
|
from .update_prompt import update_prompt
|
|
8
8
|
from .git_update import git_update
|
|
9
|
-
|
|
9
|
+
from . import DEFAULT_TIME
|
|
10
10
|
def update_main(
|
|
11
11
|
ctx: click.Context,
|
|
12
12
|
input_prompt_file: str,
|
|
@@ -49,6 +49,7 @@ def update_main(
|
|
|
49
49
|
input_prompt = input_strings["input_prompt_file"]
|
|
50
50
|
modified_code = input_strings["modified_code_file"]
|
|
51
51
|
input_code = input_strings.get("input_code_file")
|
|
52
|
+
time = ctx.obj.get('time', DEFAULT_TIME)
|
|
52
53
|
|
|
53
54
|
# Update prompt using appropriate method
|
|
54
55
|
if git:
|
|
@@ -59,7 +60,8 @@ def update_main(
|
|
|
59
60
|
modified_code_file=modified_code_file,
|
|
60
61
|
strength=ctx.obj.get("strength", 0.5),
|
|
61
62
|
temperature=ctx.obj.get("temperature", 0),
|
|
62
|
-
verbose=ctx.obj.get("verbose", False)
|
|
63
|
+
verbose=ctx.obj.get("verbose", False),
|
|
64
|
+
time=time
|
|
63
65
|
)
|
|
64
66
|
else:
|
|
65
67
|
if input_code is None:
|
|
@@ -70,7 +72,8 @@ def update_main(
|
|
|
70
72
|
modified_code=modified_code,
|
|
71
73
|
strength=ctx.obj.get("strength", 0.5),
|
|
72
74
|
temperature=ctx.obj.get("temperature", 0),
|
|
73
|
-
verbose=ctx.obj.get("verbose", False)
|
|
75
|
+
verbose=ctx.obj.get("verbose", False),
|
|
76
|
+
time=time
|
|
74
77
|
)
|
|
75
78
|
|
|
76
79
|
# Save the modified prompt
|
pdd/update_prompt.py
CHANGED
|
@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field
|
|
|
5
5
|
from .load_prompt_template import load_prompt_template
|
|
6
6
|
from .preprocess import preprocess
|
|
7
7
|
from .llm_invoke import llm_invoke
|
|
8
|
-
|
|
8
|
+
from . import DEFAULT_TIME
|
|
9
9
|
class PromptUpdate(BaseModel):
|
|
10
10
|
modified_prompt: str = Field(description="The updated prompt that will generate the modified code")
|
|
11
11
|
|
|
@@ -15,7 +15,8 @@ def update_prompt(
|
|
|
15
15
|
modified_code: str,
|
|
16
16
|
strength: float,
|
|
17
17
|
temperature: float,
|
|
18
|
-
verbose: bool = False
|
|
18
|
+
verbose: bool = False,
|
|
19
|
+
time: float = DEFAULT_TIME
|
|
19
20
|
) -> Tuple[str, float, str]:
|
|
20
21
|
"""
|
|
21
22
|
Update a prompt based on the original and modified code.
|
|
@@ -27,6 +28,7 @@ def update_prompt(
|
|
|
27
28
|
strength (float): The strength parameter for the LLM model (0-1)
|
|
28
29
|
temperature (float): The temperature parameter for the LLM model (0-1)
|
|
29
30
|
verbose (bool, optional): Whether to print detailed output. Defaults to False.
|
|
31
|
+
time (float, optional): The time parameter for the LLM model. Defaults to 0.25.
|
|
30
32
|
|
|
31
33
|
Returns:
|
|
32
34
|
Tuple[str, float, str]: (modified_prompt, total_cost, model_name)
|
|
@@ -68,7 +70,8 @@ def update_prompt(
|
|
|
68
70
|
},
|
|
69
71
|
strength=strength,
|
|
70
72
|
temperature=temperature,
|
|
71
|
-
verbose=verbose
|
|
73
|
+
verbose=verbose,
|
|
74
|
+
time=time
|
|
72
75
|
)
|
|
73
76
|
|
|
74
77
|
if not first_response or not isinstance(first_response, dict) or 'result' not in first_response:
|
|
@@ -84,7 +87,8 @@ def update_prompt(
|
|
|
84
87
|
strength=0.5,
|
|
85
88
|
temperature=temperature,
|
|
86
89
|
output_pydantic=PromptUpdate,
|
|
87
|
-
verbose=verbose
|
|
90
|
+
verbose=verbose,
|
|
91
|
+
time=time
|
|
88
92
|
)
|
|
89
93
|
|
|
90
94
|
if not second_response or not isinstance(second_response, dict) or 'result' not in second_response:
|