pdd-cli 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +0 -0
- pdd/auto_deps_main.py +98 -0
- pdd/auto_include.py +175 -0
- pdd/auto_update.py +73 -0
- pdd/bug_main.py +99 -0
- pdd/bug_to_unit_test.py +159 -0
- pdd/change.py +141 -0
- pdd/change_main.py +240 -0
- pdd/cli.py +607 -0
- pdd/cmd_test_main.py +155 -0
- pdd/code_generator.py +117 -0
- pdd/code_generator_main.py +66 -0
- pdd/comment_line.py +35 -0
- pdd/conflicts_in_prompts.py +143 -0
- pdd/conflicts_main.py +90 -0
- pdd/construct_paths.py +251 -0
- pdd/context_generator.py +133 -0
- pdd/context_generator_main.py +73 -0
- pdd/continue_generation.py +140 -0
- pdd/crash_main.py +127 -0
- pdd/data/language_format.csv +61 -0
- pdd/data/llm_model.csv +15 -0
- pdd/detect_change.py +142 -0
- pdd/detect_change_main.py +100 -0
- pdd/find_section.py +28 -0
- pdd/fix_code_loop.py +212 -0
- pdd/fix_code_module_errors.py +143 -0
- pdd/fix_error_loop.py +216 -0
- pdd/fix_errors_from_unit_tests.py +240 -0
- pdd/fix_main.py +138 -0
- pdd/generate_output_paths.py +194 -0
- pdd/generate_test.py +140 -0
- pdd/get_comment.py +55 -0
- pdd/get_extension.py +52 -0
- pdd/get_language.py +41 -0
- pdd/git_update.py +84 -0
- pdd/increase_tests.py +93 -0
- pdd/insert_includes.py +150 -0
- pdd/llm_invoke.py +304 -0
- pdd/load_prompt_template.py +59 -0
- pdd/pdd_completion.fish +72 -0
- pdd/pdd_completion.sh +141 -0
- pdd/pdd_completion.zsh +418 -0
- pdd/postprocess.py +121 -0
- pdd/postprocess_0.py +52 -0
- pdd/preprocess.py +199 -0
- pdd/preprocess_main.py +72 -0
- pdd/process_csv_change.py +182 -0
- pdd/prompts/auto_include_LLM.prompt +230 -0
- pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
- pdd/prompts/change_LLM.prompt +34 -0
- pdd/prompts/conflict_LLM.prompt +23 -0
- pdd/prompts/continue_generation_LLM.prompt +3 -0
- pdd/prompts/detect_change_LLM.prompt +65 -0
- pdd/prompts/example_generator_LLM.prompt +10 -0
- pdd/prompts/extract_auto_include_LLM.prompt +6 -0
- pdd/prompts/extract_code_LLM.prompt +22 -0
- pdd/prompts/extract_conflict_LLM.prompt +19 -0
- pdd/prompts/extract_detect_change_LLM.prompt +19 -0
- pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
- pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
- pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
- pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
- pdd/prompts/extract_promptline_LLM.prompt +11 -0
- pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
- pdd/prompts/extract_xml_LLM.prompt +7 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
- pdd/prompts/generate_test_LLM.prompt +12 -0
- pdd/prompts/increase_tests_LLM.prompt +16 -0
- pdd/prompts/insert_includes_LLM.prompt +30 -0
- pdd/prompts/split_LLM.prompt +94 -0
- pdd/prompts/summarize_file_LLM.prompt +11 -0
- pdd/prompts/trace_LLM.prompt +30 -0
- pdd/prompts/trim_results_LLM.prompt +83 -0
- pdd/prompts/trim_results_start_LLM.prompt +45 -0
- pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
- pdd/prompts/update_prompt_LLM.prompt +19 -0
- pdd/prompts/xml_convertor_LLM.prompt +54 -0
- pdd/split.py +119 -0
- pdd/split_main.py +103 -0
- pdd/summarize_directory.py +212 -0
- pdd/trace.py +135 -0
- pdd/trace_main.py +108 -0
- pdd/track_cost.py +102 -0
- pdd/unfinished_prompt.py +114 -0
- pdd/update_main.py +96 -0
- pdd/update_prompt.py +115 -0
- pdd/xml_tagger.py +122 -0
- pdd_cli-0.0.2.dist-info/LICENSE +7 -0
- pdd_cli-0.0.2.dist-info/METADATA +225 -0
- pdd_cli-0.0.2.dist-info/RECORD +95 -0
- pdd_cli-0.0.2.dist-info/WHEEL +5 -0
- pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
- pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
pdd/cmd_test_main.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from typing import Optional, Tuple
|
|
2
|
+
import click
|
|
3
|
+
from rich import print
|
|
4
|
+
from rich.progress import track
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
from .construct_paths import construct_paths
|
|
8
|
+
from .generate_test import generate_test
|
|
9
|
+
from .increase_tests import increase_tests
|
|
10
|
+
|
|
11
|
+
def cmd_test_main(
|
|
12
|
+
ctx: click.Context,
|
|
13
|
+
prompt_file: str,
|
|
14
|
+
code_file: str,
|
|
15
|
+
output: Optional[str],
|
|
16
|
+
language: Optional[str],
|
|
17
|
+
coverage_report: Optional[str],
|
|
18
|
+
existing_tests: Optional[str],
|
|
19
|
+
target_coverage: Optional[float],
|
|
20
|
+
merge: Optional[bool],
|
|
21
|
+
) -> Tuple[str, float, str]:
|
|
22
|
+
"""
|
|
23
|
+
CLI wrapper for generating or enhancing unit tests.
|
|
24
|
+
|
|
25
|
+
Reads a prompt file and a code file, generates unit tests using the `generate_test` function,
|
|
26
|
+
and handles the output location.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
ctx (click.Context): The Click context object.
|
|
30
|
+
prompt_file (str): Path to the prompt file.
|
|
31
|
+
code_file (str): Path to the code file.
|
|
32
|
+
output (Optional[str]): Path to save the generated test file.
|
|
33
|
+
language (Optional[str]): Programming language.
|
|
34
|
+
coverage_report (Optional[str]): Path to the coverage report file.
|
|
35
|
+
existing_tests (Optional[str]): Path to the existing unit test file.
|
|
36
|
+
target_coverage (Optional[float]): Desired code coverage percentage.
|
|
37
|
+
merge (Optional[bool]): Whether to merge new tests with existing tests.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Tuple[str, float, str]: Generated unit test code, total cost, and model name.
|
|
41
|
+
"""
|
|
42
|
+
# Initialize variables
|
|
43
|
+
unit_test = ""
|
|
44
|
+
total_cost = 0.0
|
|
45
|
+
model_name = ""
|
|
46
|
+
output_file_paths = {"output": output}
|
|
47
|
+
input_strings = {}
|
|
48
|
+
|
|
49
|
+
verbose = ctx.obj["verbose"]
|
|
50
|
+
strength = ctx.obj["strength"]
|
|
51
|
+
temperature = ctx.obj["temperature"]
|
|
52
|
+
|
|
53
|
+
if verbose:
|
|
54
|
+
print(f"[bold blue]Prompt file:[/bold blue] {prompt_file}")
|
|
55
|
+
print(f"[bold blue]Code file:[/bold blue] {code_file}")
|
|
56
|
+
if output:
|
|
57
|
+
print(f"[bold blue]Output:[/bold blue] {output}")
|
|
58
|
+
if language:
|
|
59
|
+
print(f"[bold blue]Language:[/bold blue] {language}")
|
|
60
|
+
|
|
61
|
+
# Construct input strings, output file paths, and determine language
|
|
62
|
+
try:
|
|
63
|
+
input_file_paths = {
|
|
64
|
+
"prompt_file": prompt_file,
|
|
65
|
+
"code_file": code_file,
|
|
66
|
+
}
|
|
67
|
+
if coverage_report:
|
|
68
|
+
input_file_paths["coverage_report"] = coverage_report
|
|
69
|
+
if existing_tests:
|
|
70
|
+
input_file_paths["existing_tests"] = existing_tests
|
|
71
|
+
|
|
72
|
+
command_options = {
|
|
73
|
+
"output": output,
|
|
74
|
+
"language": language,
|
|
75
|
+
"merge": merge,
|
|
76
|
+
"target_coverage": target_coverage,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
input_strings, output_file_paths, language = construct_paths(
|
|
80
|
+
input_file_paths=input_file_paths,
|
|
81
|
+
force=ctx.obj["force"],
|
|
82
|
+
quiet=ctx.obj["quiet"],
|
|
83
|
+
command="test",
|
|
84
|
+
command_options=command_options,
|
|
85
|
+
)
|
|
86
|
+
except Exception as e:
|
|
87
|
+
print(f"[bold red]Error constructing paths: {e}[/bold red]")
|
|
88
|
+
ctx.exit(1)
|
|
89
|
+
return "", 0.0, ""
|
|
90
|
+
|
|
91
|
+
if verbose:
|
|
92
|
+
print(f"[bold blue]Language detected:[/bold blue] {language}")
|
|
93
|
+
|
|
94
|
+
# Generate or enhance unit tests
|
|
95
|
+
if not coverage_report:
|
|
96
|
+
try:
|
|
97
|
+
unit_test, total_cost, model_name = generate_test(
|
|
98
|
+
input_strings["prompt_file"],
|
|
99
|
+
input_strings["code_file"],
|
|
100
|
+
strength,
|
|
101
|
+
temperature,
|
|
102
|
+
language,
|
|
103
|
+
)
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(f"[bold red]Error generating tests: {e}[/bold red]")
|
|
106
|
+
ctx.exit(1)
|
|
107
|
+
return "", 0.0, ""
|
|
108
|
+
else:
|
|
109
|
+
if not existing_tests:
|
|
110
|
+
print(
|
|
111
|
+
"[bold red]Error: --existing-tests is required when using --coverage-report[/bold red]"
|
|
112
|
+
)
|
|
113
|
+
ctx.exit(1)
|
|
114
|
+
return "", 0.0, ""
|
|
115
|
+
try:
|
|
116
|
+
unit_test, total_cost, model_name = increase_tests(
|
|
117
|
+
existing_unit_tests=input_strings["existing_tests"],
|
|
118
|
+
coverage_report=input_strings["coverage_report"],
|
|
119
|
+
code=input_strings["code_file"],
|
|
120
|
+
prompt_that_generated_code=input_strings["prompt_file"],
|
|
121
|
+
language=language,
|
|
122
|
+
strength=strength,
|
|
123
|
+
temperature=temperature,
|
|
124
|
+
verbose=verbose,
|
|
125
|
+
)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
print(f"[bold red]Error increasing test coverage: {e}[/bold red]")
|
|
128
|
+
ctx.exit(1)
|
|
129
|
+
return "", 0.0, ""
|
|
130
|
+
|
|
131
|
+
# Handle output
|
|
132
|
+
output_file = output_file_paths["output"]
|
|
133
|
+
if merge and existing_tests:
|
|
134
|
+
output_file = existing_tests
|
|
135
|
+
|
|
136
|
+
if not output_file:
|
|
137
|
+
print("[bold red]Error: Output file path could not be determined.[/bold red]")
|
|
138
|
+
ctx.exit(1)
|
|
139
|
+
return "", 0.0, ""
|
|
140
|
+
try:
|
|
141
|
+
with open(output_file, "w") as f:
|
|
142
|
+
f.write(unit_test)
|
|
143
|
+
print(
|
|
144
|
+
f"[bold green]Unit tests saved to:[/bold green] {output_file}"
|
|
145
|
+
)
|
|
146
|
+
except Exception as e:
|
|
147
|
+
print(f"[bold red]Error saving tests to file: {e}[/bold red]")
|
|
148
|
+
ctx.exit(1)
|
|
149
|
+
return "", 0.0, ""
|
|
150
|
+
|
|
151
|
+
if verbose:
|
|
152
|
+
print(f"[bold blue]Total cost:[/bold blue] ${total_cost:.6f}")
|
|
153
|
+
print(f"[bold blue]Model used:[/bold blue] {model_name}")
|
|
154
|
+
|
|
155
|
+
return unit_test, total_cost, model_name
|
pdd/code_generator.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from typing import Tuple
|
|
2
|
+
from rich.console import Console
|
|
3
|
+
from .preprocess import preprocess
|
|
4
|
+
from .llm_invoke import llm_invoke
|
|
5
|
+
from .unfinished_prompt import unfinished_prompt
|
|
6
|
+
from .continue_generation import continue_generation
|
|
7
|
+
from .postprocess import postprocess
|
|
8
|
+
|
|
9
|
+
console = Console()
|
|
10
|
+
|
|
11
|
+
def code_generator(
|
|
12
|
+
prompt: str,
|
|
13
|
+
language: str,
|
|
14
|
+
strength: float,
|
|
15
|
+
temperature: float = 0.0,
|
|
16
|
+
verbose: bool = False
|
|
17
|
+
) -> Tuple[str, float, str]:
|
|
18
|
+
"""
|
|
19
|
+
Generate code from a prompt using a language model.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
prompt (str): The raw prompt to be processed
|
|
23
|
+
language (str): The target programming language
|
|
24
|
+
strength (float): The strength of the LLM model (0 to 1)
|
|
25
|
+
temperature (float, optional): The temperature for the LLM model. Defaults to 0.0
|
|
26
|
+
verbose (bool, optional): Whether to print detailed information. Defaults to False
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Tuple[str, float, str]: Tuple containing (runnable_code, total_cost, model_name)
|
|
30
|
+
|
|
31
|
+
Raises:
|
|
32
|
+
ValueError: If input parameters are invalid
|
|
33
|
+
Exception: For other unexpected errors
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
# Input validation
|
|
37
|
+
if not isinstance(prompt, str) or not prompt.strip():
|
|
38
|
+
raise ValueError("Prompt must be a non-empty string")
|
|
39
|
+
if not isinstance(language, str) or not language.strip():
|
|
40
|
+
raise ValueError("Language must be a non-empty string")
|
|
41
|
+
if not 0 <= strength <= 1:
|
|
42
|
+
raise ValueError("Strength must be between 0 and 1")
|
|
43
|
+
if not 0 <= temperature <= 2:
|
|
44
|
+
raise ValueError("Temperature must be between 0 and 2")
|
|
45
|
+
|
|
46
|
+
total_cost = 0.0
|
|
47
|
+
model_name = ""
|
|
48
|
+
|
|
49
|
+
# Step 1: Preprocess the prompt
|
|
50
|
+
if verbose:
|
|
51
|
+
console.print("[bold blue]Step 1: Preprocessing prompt[/bold blue]")
|
|
52
|
+
processed_prompt = preprocess(prompt, recursive=False, double_curly_brackets=True)
|
|
53
|
+
|
|
54
|
+
# Step 2: Generate initial response
|
|
55
|
+
if verbose:
|
|
56
|
+
console.print("[bold blue]Step 2: Generating initial response[/bold blue]")
|
|
57
|
+
response = llm_invoke(
|
|
58
|
+
prompt=processed_prompt,
|
|
59
|
+
input_json={},
|
|
60
|
+
strength=strength,
|
|
61
|
+
temperature=temperature,
|
|
62
|
+
verbose=verbose
|
|
63
|
+
)
|
|
64
|
+
initial_output = response['result']
|
|
65
|
+
total_cost += response['cost']
|
|
66
|
+
model_name = response['model_name']
|
|
67
|
+
|
|
68
|
+
# Step 3: Check if generation is complete
|
|
69
|
+
if verbose:
|
|
70
|
+
console.print("[bold blue]Step 3: Checking completion status[/bold blue]")
|
|
71
|
+
last_chunk = initial_output[-600:] if len(initial_output) > 600 else initial_output
|
|
72
|
+
reasoning, is_finished, check_cost, _ = unfinished_prompt(
|
|
73
|
+
prompt_text=last_chunk,
|
|
74
|
+
strength=0.5,
|
|
75
|
+
temperature=0.0,
|
|
76
|
+
verbose=verbose
|
|
77
|
+
)
|
|
78
|
+
total_cost += check_cost
|
|
79
|
+
|
|
80
|
+
# Step 3a: Continue generation if incomplete
|
|
81
|
+
if not is_finished:
|
|
82
|
+
if verbose:
|
|
83
|
+
console.print("[bold yellow]Generation incomplete, continuing...[/bold yellow]")
|
|
84
|
+
final_output, continue_cost, continue_model = continue_generation(
|
|
85
|
+
formatted_input_prompt=processed_prompt,
|
|
86
|
+
llm_output=initial_output,
|
|
87
|
+
strength=strength,
|
|
88
|
+
temperature=temperature,
|
|
89
|
+
verbose=verbose
|
|
90
|
+
)
|
|
91
|
+
total_cost += continue_cost
|
|
92
|
+
model_name = continue_model
|
|
93
|
+
else:
|
|
94
|
+
final_output = initial_output
|
|
95
|
+
|
|
96
|
+
# Step 4: Postprocess the output
|
|
97
|
+
if verbose:
|
|
98
|
+
console.print("[bold blue]Step 4: Postprocessing output[/bold blue]")
|
|
99
|
+
runnable_code, postprocess_cost, model_name_post = postprocess(
|
|
100
|
+
llm_output=final_output,
|
|
101
|
+
language=language,
|
|
102
|
+
strength=0.895,
|
|
103
|
+
temperature=0.0,
|
|
104
|
+
verbose=verbose
|
|
105
|
+
)
|
|
106
|
+
total_cost += postprocess_cost
|
|
107
|
+
|
|
108
|
+
return runnable_code, total_cost, model_name
|
|
109
|
+
|
|
110
|
+
except ValueError as ve:
|
|
111
|
+
if verbose:
|
|
112
|
+
console.print(f"[bold red]Validation Error: {str(ve)}[/bold red]")
|
|
113
|
+
raise
|
|
114
|
+
except Exception as e:
|
|
115
|
+
if verbose:
|
|
116
|
+
console.print(f"[bold red]Unexpected Error: {str(e)}[/bold red]")
|
|
117
|
+
raise
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
from typing import Tuple, Optional
|
|
3
|
+
import click
|
|
4
|
+
from rich import print as rprint
|
|
5
|
+
|
|
6
|
+
from .construct_paths import construct_paths
|
|
7
|
+
from .code_generator import code_generator
|
|
8
|
+
|
|
9
|
+
def code_generator_main(ctx: click.Context, prompt_file: str, output: Optional[str]) -> Tuple[str, float, str]:
|
|
10
|
+
"""
|
|
11
|
+
Main function to generate code from a prompt file.
|
|
12
|
+
|
|
13
|
+
:param ctx: Click context containing command-line parameters.
|
|
14
|
+
:param prompt_file: Path to the prompt file used to generate the code.
|
|
15
|
+
:param output: Optional path to save the generated code.
|
|
16
|
+
:return: A tuple containing the generated code, total cost, and model name used.
|
|
17
|
+
"""
|
|
18
|
+
try:
|
|
19
|
+
# Construct file paths
|
|
20
|
+
input_file_paths = {
|
|
21
|
+
"prompt_file": prompt_file
|
|
22
|
+
}
|
|
23
|
+
command_options = {
|
|
24
|
+
"output": output
|
|
25
|
+
}
|
|
26
|
+
input_strings, output_file_paths, language = construct_paths(
|
|
27
|
+
input_file_paths=input_file_paths,
|
|
28
|
+
force=ctx.obj.get('force', False),
|
|
29
|
+
quiet=ctx.obj.get('quiet', False),
|
|
30
|
+
command="generate",
|
|
31
|
+
command_options=command_options
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Load input file
|
|
35
|
+
prompt_content = input_strings["prompt_file"]
|
|
36
|
+
|
|
37
|
+
# Generate code
|
|
38
|
+
strength = ctx.obj.get('strength', 0.5)
|
|
39
|
+
temperature = ctx.obj.get('temperature', 0.0)
|
|
40
|
+
generated_code, total_cost, model_name = code_generator(
|
|
41
|
+
prompt_content,
|
|
42
|
+
language,
|
|
43
|
+
strength,
|
|
44
|
+
temperature,
|
|
45
|
+
verbose=not ctx.obj.get('quiet', False)
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# Save results
|
|
49
|
+
if output_file_paths["output"]:
|
|
50
|
+
with open(output_file_paths["output"], 'w') as f:
|
|
51
|
+
f.write(generated_code)
|
|
52
|
+
|
|
53
|
+
# Provide user feedback
|
|
54
|
+
if not ctx.obj.get('quiet', False):
|
|
55
|
+
rprint("[bold green]Code generation completed successfully.[/bold green]")
|
|
56
|
+
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
57
|
+
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
58
|
+
if output:
|
|
59
|
+
rprint(f"[bold]Code saved to:[/bold] {output_file_paths['output']}")
|
|
60
|
+
|
|
61
|
+
return generated_code, total_cost, model_name
|
|
62
|
+
|
|
63
|
+
except Exception as e:
|
|
64
|
+
if not ctx.obj.get('quiet', False):
|
|
65
|
+
rprint(f"[bold red]Error:[/bold red] {str(e)}")
|
|
66
|
+
sys.exit(1)
|
pdd/comment_line.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# To achieve the functionality described, we can write a Python function `comment_line` that handles the different scenarios for commenting out a line of code based on the provided `comment_characters`. Here's how you can implement this function:
|
|
2
|
+
|
|
3
|
+
# ```python
|
|
4
|
+
def comment_line(code_line, comment_characters):
|
|
5
|
+
# Check if the language requires deletion of the line
|
|
6
|
+
if comment_characters == 'del':
|
|
7
|
+
return ''
|
|
8
|
+
|
|
9
|
+
# Check if the language uses separate start and end comment characters
|
|
10
|
+
if ' ' in comment_characters:
|
|
11
|
+
start_comment, end_comment = comment_characters.split(' ', 1)
|
|
12
|
+
return f"{start_comment}{code_line}{end_comment}"
|
|
13
|
+
|
|
14
|
+
# For languages with a single comment character
|
|
15
|
+
return f"{comment_characters}{code_line}"
|
|
16
|
+
|
|
17
|
+
# Example usage:
|
|
18
|
+
# Python style comment
|
|
19
|
+
# print(comment_line("print('Hello World!')", "#")) # Output: "#print('Hello World!')"
|
|
20
|
+
|
|
21
|
+
# # HTML style comment
|
|
22
|
+
# print(comment_line("<h1>Hello World!</h1>", "<!-- -->")) # Output: "<!--<h1>Hello World!</h1>-->"
|
|
23
|
+
|
|
24
|
+
# # Language with no comment character (deletion)
|
|
25
|
+
# print(comment_line("some code", "del")) # Output: ""
|
|
26
|
+
# ```
|
|
27
|
+
|
|
28
|
+
# ### Explanation:
|
|
29
|
+
# 1. **Deletion Case**: If `comment_characters` is `'del'`, the function returns an empty string, effectively "deleting" the line.
|
|
30
|
+
|
|
31
|
+
# 2. **Encapsulating Comments**: If `comment_characters` contains a space, it indicates that the language uses separate start and end comment characters. The function splits the string into `start_comment` and `end_comment` and returns the line encapsulated by these.
|
|
32
|
+
|
|
33
|
+
# 3. **Single Comment Character**: For languages with a single comment character (like Python's `#`), the function simply prepends the `comment_characters` to the `code_line`.
|
|
34
|
+
|
|
35
|
+
# This function should handle the specified scenarios for commenting out lines in different programming languages.
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from typing import List, Tuple
|
|
2
|
+
from pydantic import BaseModel, Field
|
|
3
|
+
from rich import print as rprint
|
|
4
|
+
from rich.markdown import Markdown
|
|
5
|
+
from .load_prompt_template import load_prompt_template
|
|
6
|
+
from .llm_invoke import llm_invoke
|
|
7
|
+
|
|
8
|
+
class ConflictChange(BaseModel):
|
|
9
|
+
prompt_name: str = Field(description="Name of the prompt that needs to be changed")
|
|
10
|
+
change_instructions: str = Field(description="Detailed instructions on how to change the prompt")
|
|
11
|
+
|
|
12
|
+
class ConflictResponse(BaseModel):
|
|
13
|
+
changes_list: List[ConflictChange] = Field(description="List of changes needed to resolve conflicts")
|
|
14
|
+
|
|
15
|
+
def conflicts_in_prompts(
|
|
16
|
+
prompt1: str,
|
|
17
|
+
prompt2: str,
|
|
18
|
+
strength: float = 0.5,
|
|
19
|
+
temperature: float = 0,
|
|
20
|
+
verbose: bool = False
|
|
21
|
+
) -> Tuple[List[dict], float, str]:
|
|
22
|
+
"""
|
|
23
|
+
Analyze two prompts for conflicts and suggest resolutions.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
prompt1 (str): First prompt to compare
|
|
27
|
+
prompt2 (str): Second prompt to compare
|
|
28
|
+
strength (float): Model strength (0-1)
|
|
29
|
+
temperature (float): Model temperature (0-1)
|
|
30
|
+
verbose (bool): Whether to print detailed information
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Tuple[List[dict], float, str]: (changes list, total cost, model name)
|
|
34
|
+
"""
|
|
35
|
+
# Input validation - let these raise ValueError directly
|
|
36
|
+
if not prompt1 or not prompt2:
|
|
37
|
+
raise ValueError("Both prompts must be provided")
|
|
38
|
+
if not (0 <= strength <= 1):
|
|
39
|
+
raise ValueError("Strength must be between 0 and 1")
|
|
40
|
+
if not (0 <= temperature <= 1):
|
|
41
|
+
raise ValueError("Temperature must be between 0 and 1")
|
|
42
|
+
|
|
43
|
+
total_cost = 0.0
|
|
44
|
+
model_name = ""
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
# Step 1: Load prompt templates
|
|
48
|
+
conflict_prompt = load_prompt_template("conflict_LLM")
|
|
49
|
+
extract_prompt = load_prompt_template("extract_conflict_LLM")
|
|
50
|
+
|
|
51
|
+
if not conflict_prompt or not extract_prompt:
|
|
52
|
+
raise ValueError("Failed to load prompt templates")
|
|
53
|
+
|
|
54
|
+
# Step 2: First LLM call to analyze conflicts
|
|
55
|
+
input_json = {
|
|
56
|
+
"PROMPT1": prompt1,
|
|
57
|
+
"PROMPT2": prompt2
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if verbose:
|
|
61
|
+
rprint("[blue]Analyzing prompts for conflicts...[/blue]")
|
|
62
|
+
|
|
63
|
+
conflict_response = llm_invoke(
|
|
64
|
+
prompt=conflict_prompt,
|
|
65
|
+
input_json=input_json,
|
|
66
|
+
strength=strength,
|
|
67
|
+
temperature=temperature,
|
|
68
|
+
verbose=verbose
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
total_cost += conflict_response['cost']
|
|
72
|
+
model_name = conflict_response['model_name']
|
|
73
|
+
|
|
74
|
+
if verbose:
|
|
75
|
+
rprint(Markdown(conflict_response['result']))
|
|
76
|
+
|
|
77
|
+
# Step 3: Second LLM call to extract structured conflicts
|
|
78
|
+
extract_input = {
|
|
79
|
+
"llm_output": conflict_response['result']
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
if verbose:
|
|
83
|
+
rprint("[blue]Extracting structured conflict information...[/blue]")
|
|
84
|
+
|
|
85
|
+
extract_response = llm_invoke(
|
|
86
|
+
prompt=extract_prompt,
|
|
87
|
+
input_json=extract_input,
|
|
88
|
+
strength=0.89, # As specified
|
|
89
|
+
temperature=temperature,
|
|
90
|
+
output_pydantic=ConflictResponse,
|
|
91
|
+
verbose=verbose
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
total_cost += extract_response['cost']
|
|
95
|
+
|
|
96
|
+
# Get the changes list from the Pydantic model
|
|
97
|
+
changes_list = [
|
|
98
|
+
change.dict()
|
|
99
|
+
for change in extract_response['result'].changes_list
|
|
100
|
+
]
|
|
101
|
+
|
|
102
|
+
# Step 4: Return results
|
|
103
|
+
return changes_list, total_cost, model_name
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
error_msg = f"Error in conflicts_in_prompts: {str(e)}"
|
|
107
|
+
if verbose:
|
|
108
|
+
rprint(f"[red]{error_msg}[/red]")
|
|
109
|
+
if isinstance(e, ValueError):
|
|
110
|
+
raise e
|
|
111
|
+
raise RuntimeError(error_msg)
|
|
112
|
+
|
|
113
|
+
def main():
|
|
114
|
+
"""
|
|
115
|
+
Example usage of the conflicts_in_prompts function.
|
|
116
|
+
"""
|
|
117
|
+
# Example prompts
|
|
118
|
+
prompt1 = "Write a formal business email in a serious tone."
|
|
119
|
+
prompt2 = "Write a casual, funny email with jokes."
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
changes_list, total_cost, model_name = conflicts_in_prompts(
|
|
123
|
+
prompt1=prompt1,
|
|
124
|
+
prompt2=prompt2,
|
|
125
|
+
strength=0.7,
|
|
126
|
+
temperature=0,
|
|
127
|
+
verbose=True
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
rprint("\n[green]Results:[/green]")
|
|
131
|
+
rprint(f"[blue]Model Used:[/blue] {model_name}")
|
|
132
|
+
rprint(f"[blue]Total Cost:[/blue] ${total_cost:.6f}")
|
|
133
|
+
|
|
134
|
+
rprint("\n[blue]Suggested Changes:[/blue]")
|
|
135
|
+
for change in changes_list:
|
|
136
|
+
rprint(f"[yellow]Prompt:[/yellow] {change['prompt_name']}")
|
|
137
|
+
rprint(f"[yellow]Instructions:[/yellow] {change['change_instructions']}\n")
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
rprint(f"[red]Error in main: {str(e)}[/red]")
|
|
141
|
+
|
|
142
|
+
if __name__ == "__main__":
|
|
143
|
+
main()
|
pdd/conflicts_main.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import sys
|
|
3
|
+
from typing import List, Dict, Tuple, Optional
|
|
4
|
+
import click
|
|
5
|
+
from rich import print as rprint
|
|
6
|
+
|
|
7
|
+
from .construct_paths import construct_paths
|
|
8
|
+
from .conflicts_in_prompts import conflicts_in_prompts
|
|
9
|
+
|
|
10
|
+
def conflicts_main(ctx: click.Context, prompt1: str, prompt2: str, output: Optional[str], verbose: bool = False) -> Tuple[List[Dict], float, str]:
|
|
11
|
+
"""
|
|
12
|
+
Main function to analyze conflicts between two prompts.
|
|
13
|
+
|
|
14
|
+
:param ctx: Click context containing command-line parameters.
|
|
15
|
+
:param prompt1: Path to the first prompt file.
|
|
16
|
+
:param prompt2: Path to the second prompt file.
|
|
17
|
+
:param output: Optional path to save the output CSV file.
|
|
18
|
+
:param verbose: Optional parameter to control verbosity (default: False).
|
|
19
|
+
:return: A tuple containing the list of conflicts, total cost, and model name used.
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
# Construct file paths
|
|
23
|
+
input_file_paths = {
|
|
24
|
+
"prompt1": prompt1,
|
|
25
|
+
"prompt2": prompt2
|
|
26
|
+
}
|
|
27
|
+
command_options = {
|
|
28
|
+
"output": output
|
|
29
|
+
}
|
|
30
|
+
input_strings, output_file_paths, _ = construct_paths(
|
|
31
|
+
input_file_paths=input_file_paths,
|
|
32
|
+
force=ctx.obj.get('force', False),
|
|
33
|
+
quiet=ctx.obj.get('quiet', False),
|
|
34
|
+
command="conflicts",
|
|
35
|
+
command_options=command_options
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Load input files
|
|
39
|
+
prompt1_content = input_strings["prompt1"]
|
|
40
|
+
prompt2_content = input_strings["prompt2"]
|
|
41
|
+
|
|
42
|
+
# Analyze conflicts
|
|
43
|
+
strength = ctx.obj.get('strength', 0.9)
|
|
44
|
+
temperature = ctx.obj.get('temperature', 0)
|
|
45
|
+
conflicts, total_cost, model_name = conflicts_in_prompts(
|
|
46
|
+
prompt1_content,
|
|
47
|
+
prompt2_content,
|
|
48
|
+
strength,
|
|
49
|
+
temperature
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Replace prompt1 and prompt2 with actual file paths
|
|
53
|
+
for conflict in conflicts:
|
|
54
|
+
if conflict['prompt_name'] == 'prompt_1':
|
|
55
|
+
conflict['prompt_name'] = prompt1
|
|
56
|
+
elif conflict['prompt_name'] == 'prompt_2':
|
|
57
|
+
conflict['prompt_name'] = prompt2
|
|
58
|
+
|
|
59
|
+
# Save results
|
|
60
|
+
with open(output_file_paths["output"], 'w', newline='') as csvfile:
|
|
61
|
+
fieldnames = ['prompt_name', 'change_instructions']
|
|
62
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
63
|
+
writer.writeheader()
|
|
64
|
+
for conflict in conflicts:
|
|
65
|
+
writer.writerow(conflict)
|
|
66
|
+
|
|
67
|
+
# Provide user feedback
|
|
68
|
+
if not ctx.obj.get('quiet', False):
|
|
69
|
+
rprint("[bold green]Conflict analysis completed successfully.[/bold green]")
|
|
70
|
+
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
71
|
+
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
72
|
+
if output:
|
|
73
|
+
rprint(f"[bold]Results saved to:[/bold] {output_file_paths['output']}")
|
|
74
|
+
|
|
75
|
+
# Always print conflicts, even in quiet mode
|
|
76
|
+
rprint("[bold]Conflicts detected:[/bold]")
|
|
77
|
+
if conflicts:
|
|
78
|
+
for conflict in conflicts:
|
|
79
|
+
rprint(f"[bold]Prompt:[/bold] {conflict['prompt_name']}")
|
|
80
|
+
rprint(f"[bold]Instructions:[/bold] {conflict['change_instructions']}")
|
|
81
|
+
rprint("---")
|
|
82
|
+
else:
|
|
83
|
+
rprint("No conflicts detected or changes suggested.")
|
|
84
|
+
|
|
85
|
+
return conflicts, total_cost, model_name
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
if not ctx.obj.get('quiet', False):
|
|
89
|
+
rprint(f"[bold red]Error:[/bold red] {str(e)}")
|
|
90
|
+
sys.exit(1)
|