pdd-cli 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

Files changed (95) hide show
  1. pdd/__init__.py +0 -0
  2. pdd/auto_deps_main.py +98 -0
  3. pdd/auto_include.py +175 -0
  4. pdd/auto_update.py +73 -0
  5. pdd/bug_main.py +99 -0
  6. pdd/bug_to_unit_test.py +159 -0
  7. pdd/change.py +141 -0
  8. pdd/change_main.py +240 -0
  9. pdd/cli.py +607 -0
  10. pdd/cmd_test_main.py +155 -0
  11. pdd/code_generator.py +117 -0
  12. pdd/code_generator_main.py +66 -0
  13. pdd/comment_line.py +35 -0
  14. pdd/conflicts_in_prompts.py +143 -0
  15. pdd/conflicts_main.py +90 -0
  16. pdd/construct_paths.py +251 -0
  17. pdd/context_generator.py +133 -0
  18. pdd/context_generator_main.py +73 -0
  19. pdd/continue_generation.py +140 -0
  20. pdd/crash_main.py +127 -0
  21. pdd/data/language_format.csv +61 -0
  22. pdd/data/llm_model.csv +15 -0
  23. pdd/detect_change.py +142 -0
  24. pdd/detect_change_main.py +100 -0
  25. pdd/find_section.py +28 -0
  26. pdd/fix_code_loop.py +212 -0
  27. pdd/fix_code_module_errors.py +143 -0
  28. pdd/fix_error_loop.py +216 -0
  29. pdd/fix_errors_from_unit_tests.py +240 -0
  30. pdd/fix_main.py +138 -0
  31. pdd/generate_output_paths.py +194 -0
  32. pdd/generate_test.py +140 -0
  33. pdd/get_comment.py +55 -0
  34. pdd/get_extension.py +52 -0
  35. pdd/get_language.py +41 -0
  36. pdd/git_update.py +84 -0
  37. pdd/increase_tests.py +93 -0
  38. pdd/insert_includes.py +150 -0
  39. pdd/llm_invoke.py +304 -0
  40. pdd/load_prompt_template.py +59 -0
  41. pdd/pdd_completion.fish +72 -0
  42. pdd/pdd_completion.sh +141 -0
  43. pdd/pdd_completion.zsh +418 -0
  44. pdd/postprocess.py +121 -0
  45. pdd/postprocess_0.py +52 -0
  46. pdd/preprocess.py +199 -0
  47. pdd/preprocess_main.py +72 -0
  48. pdd/process_csv_change.py +182 -0
  49. pdd/prompts/auto_include_LLM.prompt +230 -0
  50. pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
  51. pdd/prompts/change_LLM.prompt +34 -0
  52. pdd/prompts/conflict_LLM.prompt +23 -0
  53. pdd/prompts/continue_generation_LLM.prompt +3 -0
  54. pdd/prompts/detect_change_LLM.prompt +65 -0
  55. pdd/prompts/example_generator_LLM.prompt +10 -0
  56. pdd/prompts/extract_auto_include_LLM.prompt +6 -0
  57. pdd/prompts/extract_code_LLM.prompt +22 -0
  58. pdd/prompts/extract_conflict_LLM.prompt +19 -0
  59. pdd/prompts/extract_detect_change_LLM.prompt +19 -0
  60. pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
  61. pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
  62. pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
  63. pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
  64. pdd/prompts/extract_promptline_LLM.prompt +11 -0
  65. pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
  66. pdd/prompts/extract_xml_LLM.prompt +7 -0
  67. pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
  68. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
  69. pdd/prompts/generate_test_LLM.prompt +12 -0
  70. pdd/prompts/increase_tests_LLM.prompt +16 -0
  71. pdd/prompts/insert_includes_LLM.prompt +30 -0
  72. pdd/prompts/split_LLM.prompt +94 -0
  73. pdd/prompts/summarize_file_LLM.prompt +11 -0
  74. pdd/prompts/trace_LLM.prompt +30 -0
  75. pdd/prompts/trim_results_LLM.prompt +83 -0
  76. pdd/prompts/trim_results_start_LLM.prompt +45 -0
  77. pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
  78. pdd/prompts/update_prompt_LLM.prompt +19 -0
  79. pdd/prompts/xml_convertor_LLM.prompt +54 -0
  80. pdd/split.py +119 -0
  81. pdd/split_main.py +103 -0
  82. pdd/summarize_directory.py +212 -0
  83. pdd/trace.py +135 -0
  84. pdd/trace_main.py +108 -0
  85. pdd/track_cost.py +102 -0
  86. pdd/unfinished_prompt.py +114 -0
  87. pdd/update_main.py +96 -0
  88. pdd/update_prompt.py +115 -0
  89. pdd/xml_tagger.py +122 -0
  90. pdd_cli-0.0.2.dist-info/LICENSE +7 -0
  91. pdd_cli-0.0.2.dist-info/METADATA +225 -0
  92. pdd_cli-0.0.2.dist-info/RECORD +95 -0
  93. pdd_cli-0.0.2.dist-info/WHEEL +5 -0
  94. pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
  95. pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
pdd/trace.py ADDED
@@ -0,0 +1,135 @@
1
+ from typing import Tuple, Optional
2
+ from rich import print
3
+ from rich.console import Console
4
+ from pydantic import BaseModel, Field
5
+ import difflib
6
+ from .load_prompt_template import load_prompt_template
7
+ from .preprocess import preprocess
8
+ from .llm_invoke import llm_invoke
9
+
10
+ console = Console()
11
+
12
+ class PromptLineOutput(BaseModel):
13
+ prompt_line: str = Field(description="The line from the prompt file that matches the code")
14
+
15
+ def trace(
16
+ code_file: str,
17
+ code_line: int,
18
+ prompt_file: str,
19
+ strength: float = 0.5,
20
+ temperature: float = 0,
21
+ verbose: bool = False
22
+ ) -> Tuple[Optional[int], float, str]:
23
+ """
24
+ Trace a line of code back to its corresponding line in the prompt file.
25
+
26
+ Args:
27
+ code_file (str): Content of the code file
28
+ code_line (int): Line number in the code file
29
+ prompt_file (str): Content of the prompt file
30
+ strength (float, optional): Model strength. Defaults to 0.5
31
+ temperature (float, optional): Model temperature. Defaults to 0
32
+ verbose (bool, optional): Whether to print detailed information. Defaults to False
33
+
34
+ Returns:
35
+ Tuple[Optional[int], float, str]: (prompt line number, total cost, model name)
36
+ """
37
+ try:
38
+ # Input validation
39
+ if not all([code_file, prompt_file]) or not isinstance(code_line, int):
40
+ raise ValueError("Invalid input parameters")
41
+
42
+ total_cost = 0
43
+ model_name = ""
44
+
45
+ # Step 1: Extract the code line string
46
+ code_lines = code_file.splitlines()
47
+ if code_line < 1 or code_line > len(code_lines):
48
+ raise ValueError(f"Code line number {code_line} is out of range")
49
+ code_str = code_lines[code_line - 1]
50
+
51
+ # Step 2 & 3: Load and preprocess trace_LLM prompt
52
+ trace_prompt = load_prompt_template("trace_LLM")
53
+ if not trace_prompt:
54
+ raise ValueError("Failed to load trace_LLM prompt template")
55
+ trace_prompt = preprocess(trace_prompt, recursive=False, double_curly_brackets=False)
56
+
57
+ # Step 4: First LLM invocation
58
+ if verbose:
59
+ console.print("[bold blue]Running trace analysis...[/bold blue]")
60
+
61
+ trace_response = llm_invoke(
62
+ prompt=trace_prompt,
63
+ input_json={
64
+ "CODE_FILE": code_file,
65
+ "CODE_STR": code_str,
66
+ "PROMPT_FILE": prompt_file
67
+ },
68
+ strength=strength,
69
+ temperature=temperature,
70
+ verbose=verbose
71
+ )
72
+
73
+ total_cost += trace_response['cost']
74
+ model_name = trace_response['model_name']
75
+
76
+ # Step 5: Load and preprocess extract_promptline_LLM prompt
77
+ extract_prompt = load_prompt_template("extract_promptline_LLM")
78
+ if not extract_prompt:
79
+ raise ValueError("Failed to load extract_promptline_LLM prompt template")
80
+ extract_prompt = preprocess(extract_prompt, recursive=False, double_curly_brackets=False)
81
+
82
+ # Step 6: Second LLM invocation
83
+ if verbose:
84
+ console.print("[bold blue]Extracting prompt line...[/bold blue]")
85
+
86
+ extract_response = llm_invoke(
87
+ prompt=extract_prompt,
88
+ input_json={"llm_output": trace_response['result']},
89
+ strength=strength,
90
+ temperature=temperature,
91
+ verbose=verbose,
92
+ output_pydantic=PromptLineOutput
93
+ )
94
+
95
+ total_cost += extract_response['cost']
96
+ prompt_line_str = extract_response['result'].prompt_line
97
+
98
+ # Step 7: Find matching line in prompt file using fuzzy matching
99
+ prompt_lines = prompt_file.splitlines()
100
+ best_match = None
101
+ highest_ratio = 0
102
+
103
+ if verbose:
104
+ console.print(f"Searching for line: {prompt_line_str}")
105
+
106
+ normalized_search = prompt_line_str.strip()
107
+
108
+ for i, line in enumerate(prompt_lines, 1):
109
+ normalized_line = line.strip()
110
+ ratio = difflib.SequenceMatcher(None, normalized_search, normalized_line).ratio()
111
+
112
+ if verbose:
113
+ console.print(f"Line {i}: '{line}' - Match ratio: {ratio}")
114
+
115
+ # Increase threshold to 0.9 for more precise matching
116
+ if ratio > highest_ratio and ratio > 0.9:
117
+ # Additional check for exact content match after normalization
118
+ if normalized_search == normalized_line:
119
+ highest_ratio = ratio
120
+ best_match = i
121
+ break # Exit on exact match
122
+ highest_ratio = ratio
123
+ best_match = i
124
+
125
+ # Step 8: Return results
126
+ if verbose:
127
+ console.print(f"[green]Found matching line: {best_match}[/green]")
128
+ console.print(f"[green]Total cost: ${total_cost:.6f}[/green]")
129
+ console.print(f"[green]Model used: {model_name}[/green]")
130
+
131
+ return best_match, total_cost, model_name
132
+
133
+ except Exception as e:
134
+ console.print(f"[bold red]Error in trace function: {str(e)}[/bold red]")
135
+ return None, 0.0, ""
pdd/trace_main.py ADDED
@@ -0,0 +1,108 @@
1
+ import click
2
+ from rich import print as rprint
3
+ from typing import Tuple, Optional
4
+ import os
5
+ import logging
6
+ from .construct_paths import construct_paths
7
+ from .trace import trace
8
+
9
+ logging.basicConfig(level=logging.WARNING)
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def trace_main(ctx: click.Context, prompt_file: str, code_file: str, code_line: int, output: Optional[str]) -> Tuple[str, float, str]:
13
+ """
14
+ Handle the core logic for the 'trace' command in the pdd CLI.
15
+
16
+ Args:
17
+ ctx (click.Context): The Click context object containing CLI options and parameters.
18
+ prompt_file (str): Path to the prompt file.
19
+ code_file (str): Path to the generated code file.
20
+ code_line (int): Line number in the code file to trace back to the prompt.
21
+ output (Optional[str]): Path to save the trace analysis results.
22
+
23
+ Returns:
24
+ Tuple[str, float, str]: A tuple containing the prompt line number, total cost, and model name.
25
+ """
26
+ quiet = ctx.obj.get('quiet', False)
27
+ logger.debug(f"Starting trace_main with quiet={quiet}")
28
+ try:
29
+ # Construct file paths
30
+ input_file_paths = {
31
+ "prompt_file": prompt_file,
32
+ "code_file": code_file
33
+ }
34
+ command_options = {
35
+ "output": output
36
+ }
37
+ input_strings, output_file_paths, _ = construct_paths(
38
+ input_file_paths=input_file_paths,
39
+ force=ctx.obj.get('force', False),
40
+ quiet=quiet,
41
+ command="trace",
42
+ command_options=command_options
43
+ )
44
+ logger.debug("File paths constructed successfully")
45
+
46
+ # Load input files
47
+ prompt_content = input_strings["prompt_file"]
48
+ code_content = input_strings["code_file"]
49
+ logger.debug("Input files loaded")
50
+
51
+ # Perform trace analysis
52
+ strength = ctx.obj.get('strength', 0.5)
53
+ temperature = ctx.obj.get('temperature', 0.0)
54
+ try:
55
+ prompt_line, total_cost, model_name = trace(
56
+ code_content, code_line, prompt_content, strength, temperature
57
+ )
58
+ logger.debug(f"Trace analysis completed: prompt_line={prompt_line}, total_cost={total_cost}, model_name={model_name}")
59
+ except ValueError as e:
60
+ if not quiet:
61
+ rprint(f"[bold red]Invalid input: {e}[/bold red]")
62
+ logger.error(f"ValueError during trace analysis: {e}")
63
+ ctx.exit(1)
64
+
65
+ # Save results
66
+ if output:
67
+ output_path = output_file_paths.get("output")
68
+ output_dir = os.path.dirname(os.path.abspath(output_path))
69
+ if output_dir and not os.path.exists(output_dir):
70
+ try:
71
+ os.makedirs(output_dir, exist_ok=True)
72
+ logger.debug(f"Created output directory: {output_dir}")
73
+ except Exception as e:
74
+ if not quiet:
75
+ rprint(f"[bold red]Failed to create output directory: {e}[/bold red]")
76
+ logger.error(f"Error creating output directory: {e}")
77
+ ctx.exit(1)
78
+ try:
79
+ with open(output_path, 'w') as f:
80
+ f.write(f"Prompt Line: {prompt_line}\n")
81
+ f.write(f"Total Cost: ${total_cost:.6f}\n")
82
+ f.write(f"Model Used: {model_name}\n")
83
+ logger.debug(f"Results saved to {output_path}")
84
+ except IOError as e:
85
+ if not quiet:
86
+ rprint(f"[bold red]An unexpected error occurred: {e}[/bold red]")
87
+ logger.error(f"IOError while saving results: {e}")
88
+ ctx.exit(1)
89
+
90
+ # Provide user feedback
91
+ if not quiet:
92
+ rprint(f"[bold green]Trace Analysis Complete[/bold green]")
93
+ rprint(f"Corresponding prompt line: [cyan]{prompt_line}[/cyan]")
94
+ rprint(f"Total cost: [yellow]${total_cost:.6f}[/yellow]")
95
+ rprint(f"Model used: [magenta]{model_name}[/magenta]")
96
+
97
+ return prompt_line, total_cost, model_name
98
+
99
+ except FileNotFoundError as e:
100
+ if not quiet:
101
+ rprint(f"[bold red]File not found: {e}[/bold red]")
102
+ logger.error(f"FileNotFoundError: {e}")
103
+ ctx.exit(1)
104
+ except Exception as e:
105
+ if not quiet:
106
+ rprint(f"[bold red]An unexpected error occurred: {e}[/bold red]")
107
+ logger.error(f"Unexpected error: {e}")
108
+ ctx.exit(1)
pdd/track_cost.py ADDED
@@ -0,0 +1,102 @@
1
+ import functools
2
+ from datetime import datetime
3
+ import csv
4
+ import os
5
+ import click
6
+ from rich import print as rprint
7
+ from typing import Any, Tuple
8
+
9
+ def track_cost(func):
10
+ @functools.wraps(func)
11
+ def wrapper(*args, **kwargs):
12
+ ctx = click.get_current_context()
13
+ if ctx is None:
14
+ return func(*args, **kwargs)
15
+
16
+ start_time = datetime.now()
17
+ try:
18
+ result = func(*args, **kwargs)
19
+ except Exception as e:
20
+ raise e
21
+ end_time = datetime.now()
22
+
23
+ try:
24
+ if ctx.obj and hasattr(ctx.obj, 'get'):
25
+ output_cost_path = ctx.obj.get('output_cost') or os.getenv('PDD_OUTPUT_COST_PATH')
26
+ else:
27
+ output_cost_path = os.getenv('PDD_OUTPUT_COST_PATH')
28
+
29
+ if not output_cost_path:
30
+ return result
31
+
32
+ command_name = ctx.command.name
33
+
34
+ cost, model_name = extract_cost_and_model(result)
35
+
36
+ input_files, output_files = collect_files(args, kwargs)
37
+
38
+ timestamp = start_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
39
+
40
+ row = {
41
+ 'timestamp': timestamp,
42
+ 'model': model_name,
43
+ 'command': command_name,
44
+ 'cost': cost,
45
+ 'input_files': ';'.join(input_files),
46
+ 'output_files': ';'.join(output_files),
47
+ }
48
+
49
+ file_exists = os.path.isfile(output_cost_path)
50
+ fieldnames = ['timestamp', 'model', 'command', 'cost', 'input_files', 'output_files']
51
+
52
+ with open(output_cost_path, 'a', newline='', encoding='utf-8') as csvfile:
53
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
54
+ if not file_exists:
55
+ writer.writeheader()
56
+ writer.writerow(row)
57
+
58
+ print(f"Debug: Writing row to CSV: {row}")
59
+ print(f"Debug: Input files: {input_files}")
60
+ print(f"Debug: Output files: {output_files}")
61
+
62
+ except Exception as e:
63
+ rprint(f"[red]Error tracking cost: {e}[/red]")
64
+
65
+ return result
66
+
67
+ return wrapper
68
+
69
+ def extract_cost_and_model(result: Any) -> Tuple[Any, str]:
70
+ if isinstance(result, tuple) and len(result) >= 3:
71
+ return result[-2], result[-1]
72
+ return '', ''
73
+
74
+ def collect_files(args, kwargs):
75
+ input_files = []
76
+ output_files = []
77
+
78
+ # Collect from args
79
+ for arg in args:
80
+ if isinstance(arg, str):
81
+ input_files.append(arg)
82
+ elif isinstance(arg, list):
83
+ input_files.extend([f for f in arg if isinstance(f, str)])
84
+
85
+ # Collect from kwargs
86
+ for k, v in kwargs.items():
87
+ if k == 'output_cost':
88
+ continue
89
+ if isinstance(v, str):
90
+ if k.startswith('output'):
91
+ output_files.append(v)
92
+ else:
93
+ input_files.append(v)
94
+ elif isinstance(v, list):
95
+ if k.startswith('output'):
96
+ output_files.extend([f for f in v if isinstance(f, str)])
97
+ else:
98
+ input_files.extend([f for f in v if isinstance(f, str)])
99
+
100
+ print(f"Debug: Collected input files: {input_files}")
101
+ print(f"Debug: Collected output files: {output_files}")
102
+ return input_files, output_files
@@ -0,0 +1,114 @@
1
+ from typing import Tuple
2
+ from pydantic import BaseModel, Field
3
+ from rich import print as rprint
4
+ from .load_prompt_template import load_prompt_template
5
+ from .llm_invoke import llm_invoke
6
+
7
+ class PromptAnalysis(BaseModel):
8
+ reasoning: str = Field(description="Structured reasoning for the completeness assessment")
9
+ is_finished: bool = Field(description="Boolean indicating whether the prompt is complete")
10
+
11
+ def unfinished_prompt(
12
+ prompt_text: str,
13
+ strength: float = 0.5,
14
+ temperature: float = 0,
15
+ verbose: bool = False
16
+ ) -> Tuple[str, bool, float, str]:
17
+ """
18
+ Analyze whether a given prompt is complete or needs to continue.
19
+
20
+ Args:
21
+ prompt_text (str): The prompt text to analyze
22
+ strength (float, optional): Strength of the LLM model. Defaults to 0.5.
23
+ temperature (float, optional): Temperature of the LLM model. Defaults to 0.
24
+ verbose (bool, optional): Whether to print detailed information. Defaults to False.
25
+
26
+ Returns:
27
+ Tuple[str, bool, float, str]: Contains:
28
+ - reasoning: Structured reasoning for the completeness assessment
29
+ - is_finished: Boolean indicating whether the prompt is complete
30
+ - total_cost: Total cost of the analysis
31
+ - model_name: Name of the LLM model used
32
+
33
+ Raises:
34
+ ValueError: If input parameters are invalid
35
+ Exception: If there's an error loading the prompt template or invoking the LLM
36
+ """
37
+ try:
38
+ # Input validation
39
+ if not isinstance(prompt_text, str) or not prompt_text.strip():
40
+ raise ValueError("Prompt text must be a non-empty string")
41
+
42
+ if not 0 <= strength <= 1:
43
+ raise ValueError("Strength must be between 0 and 1")
44
+
45
+ if not 0 <= temperature <= 1:
46
+ raise ValueError("Temperature must be between 0 and 1")
47
+
48
+ # Step 1: Load the prompt template
49
+ if verbose:
50
+ rprint("[blue]Loading prompt template...[/blue]")
51
+
52
+ prompt_template = load_prompt_template("unfinished_prompt_LLM")
53
+ if not prompt_template:
54
+ raise Exception("Failed to load prompt template")
55
+
56
+ # Step 2: Prepare input and invoke LLM
57
+ input_json = {"PROMPT_TEXT": prompt_text}
58
+
59
+ if verbose:
60
+ rprint("[blue]Invoking LLM model...[/blue]")
61
+ try:
62
+ rprint(f"Input text: {prompt_text}")
63
+ except:
64
+ print(f"Input text: {prompt_text}")
65
+ rprint(f"Model strength: {strength}")
66
+ rprint(f"Temperature: {temperature}")
67
+
68
+ response = llm_invoke(
69
+ prompt=prompt_template,
70
+ input_json=input_json,
71
+ strength=strength,
72
+ temperature=temperature,
73
+ verbose=verbose,
74
+ output_pydantic=PromptAnalysis
75
+ )
76
+
77
+ # Step 3: Extract and return results
78
+ result: PromptAnalysis = response['result']
79
+ total_cost = response['cost']
80
+ model_name = response['model_name']
81
+
82
+ if verbose:
83
+ rprint("[green]Analysis complete![/green]")
84
+ rprint(f"Reasoning: {result.reasoning}")
85
+ rprint(f"Is finished: {result.is_finished}")
86
+ rprint(f"Total cost: ${total_cost:.6f}")
87
+ rprint(f"Model used: {model_name}")
88
+
89
+ return (
90
+ result.reasoning,
91
+ result.is_finished,
92
+ total_cost,
93
+ model_name
94
+ )
95
+
96
+ except Exception as e:
97
+ rprint("[red]Error in unfinished_prompt:[/red]", str(e))
98
+ raise
99
+
100
+ # Example usage
101
+ if __name__ == "__main__":
102
+ sample_prompt = "Write a function that"
103
+ try:
104
+ reasoning, is_finished, cost, model = unfinished_prompt(
105
+ prompt_text=sample_prompt,
106
+ verbose=True
107
+ )
108
+ rprint("\n[blue]Results:[/blue]")
109
+ rprint(f"Complete? {'Yes' if is_finished else 'No'}")
110
+ rprint(f"Reasoning: {reasoning}")
111
+ rprint(f"Cost: ${cost:.6f}")
112
+ rprint(f"Model: {model}")
113
+ except Exception as e:
114
+ rprint("[red]Error in example:[/red]", str(e))
pdd/update_main.py ADDED
@@ -0,0 +1,96 @@
1
+ import sys
2
+ from typing import Tuple, Optional
3
+ import click
4
+ from rich import print as rprint
5
+
6
+ from .construct_paths import construct_paths
7
+ from .update_prompt import update_prompt
8
+ from .git_update import git_update
9
+
10
+ def update_main(
11
+ ctx: click.Context,
12
+ input_prompt_file: str,
13
+ modified_code_file: str,
14
+ input_code_file: Optional[str],
15
+ output: Optional[str],
16
+ git: bool = False,
17
+ ) -> Tuple[str, float, str]:
18
+ """
19
+ CLI wrapper for updating prompts based on modified code.
20
+
21
+ :param ctx: Click context object containing CLI options and parameters.
22
+ :param input_prompt_file: Path to the original prompt file.
23
+ :param modified_code_file: Path to the modified code file.
24
+ :param input_code_file: Optional path to the original code file. If None, Git history is used if --git is True.
25
+ :param output: Optional path to save the updated prompt.
26
+ :param git: Use Git history to retrieve the original code if True.
27
+ :return: Tuple containing the updated prompt, total cost, and model name.
28
+ """
29
+ try:
30
+ # Construct file paths
31
+ input_file_paths = {"input_prompt_file": input_prompt_file, "modified_code_file": modified_code_file}
32
+ if input_code_file:
33
+ input_file_paths["input_code_file"] = input_code_file
34
+
35
+ # Validate input requirements
36
+ if not git and input_code_file is None:
37
+ raise ValueError("Must provide an input code file or use --git option.")
38
+
39
+ command_options = {"output": output}
40
+ input_strings, output_file_paths, _ = construct_paths(
41
+ input_file_paths=input_file_paths,
42
+ force=ctx.obj.get("force", False),
43
+ quiet=ctx.obj.get("quiet", False),
44
+ command="update",
45
+ command_options=command_options,
46
+ )
47
+
48
+ # Extract input strings
49
+ input_prompt = input_strings["input_prompt_file"]
50
+ modified_code = input_strings["modified_code_file"]
51
+ input_code = input_strings.get("input_code_file")
52
+
53
+ # Update prompt using appropriate method
54
+ if git:
55
+ if input_code_file:
56
+ raise ValueError("Cannot use both --git and provide an input code file.")
57
+ modified_prompt, total_cost, model_name = git_update(
58
+ input_prompt=input_prompt,
59
+ modified_code_file=modified_code_file,
60
+ strength=ctx.obj.get("strength", 0.5),
61
+ temperature=ctx.obj.get("temperature", 0),
62
+ verbose=ctx.obj.get("verbose", False)
63
+ )
64
+ else:
65
+ if input_code is None:
66
+ raise ValueError("Must provide an input code file or use --git option.")
67
+ modified_prompt, total_cost, model_name = update_prompt(
68
+ input_prompt=input_prompt,
69
+ input_code=input_code,
70
+ modified_code=modified_code,
71
+ strength=ctx.obj.get("strength", 0.5),
72
+ temperature=ctx.obj.get("temperature", 0),
73
+ verbose=ctx.obj.get("verbose", False)
74
+ )
75
+
76
+ # Save the modified prompt
77
+ with open(output_file_paths["output"], "w") as f:
78
+ f.write(modified_prompt)
79
+
80
+ # Provide user feedback
81
+ if not ctx.obj.get("quiet", False):
82
+ rprint("[bold green]Prompt updated successfully.[/bold green]")
83
+ rprint(f"[bold]Model used:[/bold] {model_name}")
84
+ rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
85
+ rprint(f"[bold]Updated prompt saved to:[/bold] {output_file_paths['output']}")
86
+
87
+ return modified_prompt, total_cost, model_name
88
+
89
+ except ValueError as e:
90
+ if not ctx.obj.get("quiet", False):
91
+ rprint(f"[bold red]Input error:[/bold red] {str(e)}")
92
+ sys.exit(1)
93
+ except Exception as e:
94
+ if not ctx.obj.get("quiet", False):
95
+ rprint(f"[bold red]Error:[/bold red] {str(e)}")
96
+ sys.exit(1)
pdd/update_prompt.py ADDED
@@ -0,0 +1,115 @@
1
+ from typing import Tuple
2
+ from rich.console import Console
3
+ from rich.markdown import Markdown
4
+ from pydantic import BaseModel, Field
5
+ from .load_prompt_template import load_prompt_template
6
+ from .preprocess import preprocess
7
+ from .llm_invoke import llm_invoke
8
+
9
+ class PromptUpdate(BaseModel):
10
+ modified_prompt: str = Field(description="The updated prompt that will generate the modified code")
11
+
12
+ def update_prompt(
13
+ input_prompt: str,
14
+ input_code: str,
15
+ modified_code: str,
16
+ strength: float,
17
+ temperature: float,
18
+ verbose: bool = False
19
+ ) -> Tuple[str, float, str]:
20
+ """
21
+ Update a prompt based on the original and modified code.
22
+
23
+ Args:
24
+ input_prompt (str): The original prompt that generated the code
25
+ input_code (str): The original generated code
26
+ modified_code (str): The modified code
27
+ strength (float): The strength parameter for the LLM model (0-1)
28
+ temperature (float): The temperature parameter for the LLM model (0-1)
29
+ verbose (bool, optional): Whether to print detailed output. Defaults to False.
30
+
31
+ Returns:
32
+ Tuple[str, float, str]: (modified_prompt, total_cost, model_name)
33
+
34
+ Raises:
35
+ ValueError: If input parameters are invalid
36
+ RuntimeError: If there's an error in LLM processing
37
+ """
38
+ console = Console()
39
+
40
+ # Input validation
41
+ if not all([input_prompt, input_code, modified_code]):
42
+ raise ValueError("All input strings (prompt, code, modified code) must be non-empty")
43
+
44
+ if not (0 <= strength <= 1 and 0 <= temperature <= 1):
45
+ raise ValueError("Strength and temperature must be between 0 and 1")
46
+
47
+ try:
48
+ # Step 1: Load and preprocess prompt templates
49
+ update_prompt_template = load_prompt_template("update_prompt_LLM")
50
+ extract_prompt_template = load_prompt_template("extract_prompt_update_LLM")
51
+
52
+ if not update_prompt_template or not extract_prompt_template:
53
+ raise RuntimeError("Failed to load prompt templates")
54
+
55
+ update_prompt_processed = preprocess(update_prompt_template, False, False)
56
+ extract_prompt_processed = preprocess(extract_prompt_template, False, False)
57
+
58
+ # Step 2: First LLM invocation
59
+ if verbose:
60
+ console.print("[bold blue]Running first LLM invocation...[/bold blue]")
61
+
62
+ first_response = llm_invoke(
63
+ prompt=update_prompt_processed,
64
+ input_json={
65
+ "input_prompt": input_prompt,
66
+ "input_code": input_code,
67
+ "modified_code": modified_code
68
+ },
69
+ strength=strength,
70
+ temperature=temperature,
71
+ verbose=verbose
72
+ )
73
+
74
+ if not first_response or not isinstance(first_response, dict) or 'result' not in first_response:
75
+ raise RuntimeError("First LLM invocation failed")
76
+
77
+ # Step 3: Second LLM invocation
78
+ if verbose:
79
+ console.print("[bold blue]Running second LLM invocation...[/bold blue]")
80
+
81
+ second_response = llm_invoke(
82
+ prompt=extract_prompt_processed,
83
+ input_json={"llm_output": first_response['result']},
84
+ strength=0.5,
85
+ temperature=temperature,
86
+ output_pydantic=PromptUpdate,
87
+ verbose=verbose
88
+ )
89
+
90
+ if not second_response or not isinstance(second_response, dict) or 'result' not in second_response:
91
+ raise RuntimeError("Second LLM invocation failed")
92
+
93
+ # Step 4: Print modified prompt if verbose
94
+ if verbose:
95
+ console.print("\n[bold green]Modified Prompt:[/bold green]")
96
+ console.print(Markdown(second_response['result'].modified_prompt))
97
+
98
+ # Step 5: Calculate total cost
99
+ total_cost = first_response['cost'] + second_response['cost']
100
+
101
+ if verbose:
102
+ console.print(f"\n[bold yellow]Total Cost: ${total_cost:.6f}[/bold yellow]")
103
+ console.print(f"[bold cyan]Model Used: {first_response['model_name']}[/bold cyan]")
104
+
105
+ # Step 6: Return results
106
+ return (
107
+ second_response['result'].modified_prompt,
108
+ total_cost,
109
+ first_response['model_name']
110
+ )
111
+
112
+ except Exception as e:
113
+ error_msg = f"Error in update_prompt: {str(e)}"
114
+ console.print(f"[bold red]{error_msg}[/bold red]")
115
+ raise RuntimeError(error_msg)