pdd-cli 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +0 -0
- pdd/auto_deps_main.py +98 -0
- pdd/auto_include.py +175 -0
- pdd/auto_update.py +73 -0
- pdd/bug_main.py +99 -0
- pdd/bug_to_unit_test.py +159 -0
- pdd/change.py +141 -0
- pdd/change_main.py +240 -0
- pdd/cli.py +607 -0
- pdd/cmd_test_main.py +155 -0
- pdd/code_generator.py +117 -0
- pdd/code_generator_main.py +66 -0
- pdd/comment_line.py +35 -0
- pdd/conflicts_in_prompts.py +143 -0
- pdd/conflicts_main.py +90 -0
- pdd/construct_paths.py +251 -0
- pdd/context_generator.py +133 -0
- pdd/context_generator_main.py +73 -0
- pdd/continue_generation.py +140 -0
- pdd/crash_main.py +127 -0
- pdd/data/language_format.csv +61 -0
- pdd/data/llm_model.csv +15 -0
- pdd/detect_change.py +142 -0
- pdd/detect_change_main.py +100 -0
- pdd/find_section.py +28 -0
- pdd/fix_code_loop.py +212 -0
- pdd/fix_code_module_errors.py +143 -0
- pdd/fix_error_loop.py +216 -0
- pdd/fix_errors_from_unit_tests.py +240 -0
- pdd/fix_main.py +138 -0
- pdd/generate_output_paths.py +194 -0
- pdd/generate_test.py +140 -0
- pdd/get_comment.py +55 -0
- pdd/get_extension.py +52 -0
- pdd/get_language.py +41 -0
- pdd/git_update.py +84 -0
- pdd/increase_tests.py +93 -0
- pdd/insert_includes.py +150 -0
- pdd/llm_invoke.py +304 -0
- pdd/load_prompt_template.py +59 -0
- pdd/pdd_completion.fish +72 -0
- pdd/pdd_completion.sh +141 -0
- pdd/pdd_completion.zsh +418 -0
- pdd/postprocess.py +121 -0
- pdd/postprocess_0.py +52 -0
- pdd/preprocess.py +199 -0
- pdd/preprocess_main.py +72 -0
- pdd/process_csv_change.py +182 -0
- pdd/prompts/auto_include_LLM.prompt +230 -0
- pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
- pdd/prompts/change_LLM.prompt +34 -0
- pdd/prompts/conflict_LLM.prompt +23 -0
- pdd/prompts/continue_generation_LLM.prompt +3 -0
- pdd/prompts/detect_change_LLM.prompt +65 -0
- pdd/prompts/example_generator_LLM.prompt +10 -0
- pdd/prompts/extract_auto_include_LLM.prompt +6 -0
- pdd/prompts/extract_code_LLM.prompt +22 -0
- pdd/prompts/extract_conflict_LLM.prompt +19 -0
- pdd/prompts/extract_detect_change_LLM.prompt +19 -0
- pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
- pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
- pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
- pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
- pdd/prompts/extract_promptline_LLM.prompt +11 -0
- pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
- pdd/prompts/extract_xml_LLM.prompt +7 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
- pdd/prompts/generate_test_LLM.prompt +12 -0
- pdd/prompts/increase_tests_LLM.prompt +16 -0
- pdd/prompts/insert_includes_LLM.prompt +30 -0
- pdd/prompts/split_LLM.prompt +94 -0
- pdd/prompts/summarize_file_LLM.prompt +11 -0
- pdd/prompts/trace_LLM.prompt +30 -0
- pdd/prompts/trim_results_LLM.prompt +83 -0
- pdd/prompts/trim_results_start_LLM.prompt +45 -0
- pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
- pdd/prompts/update_prompt_LLM.prompt +19 -0
- pdd/prompts/xml_convertor_LLM.prompt +54 -0
- pdd/split.py +119 -0
- pdd/split_main.py +103 -0
- pdd/summarize_directory.py +212 -0
- pdd/trace.py +135 -0
- pdd/trace_main.py +108 -0
- pdd/track_cost.py +102 -0
- pdd/unfinished_prompt.py +114 -0
- pdd/update_main.py +96 -0
- pdd/update_prompt.py +115 -0
- pdd/xml_tagger.py +122 -0
- pdd_cli-0.0.2.dist-info/LICENSE +7 -0
- pdd_cli-0.0.2.dist-info/METADATA +225 -0
- pdd_cli-0.0.2.dist-info/RECORD +95 -0
- pdd_cli-0.0.2.dist-info/WHEEL +5 -0
- pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
- pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
pdd/change.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from typing import Tuple
|
|
2
|
+
from rich.console import Console
|
|
3
|
+
from rich.markdown import Markdown
|
|
4
|
+
from rich.panel import Panel
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
from .preprocess import preprocess
|
|
7
|
+
from .load_prompt_template import load_prompt_template
|
|
8
|
+
from .llm_invoke import llm_invoke
|
|
9
|
+
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
class ExtractedPrompt(BaseModel):
|
|
13
|
+
modified_prompt: str = Field(description="The extracted modified prompt")
|
|
14
|
+
|
|
15
|
+
def change(
|
|
16
|
+
input_prompt: str,
|
|
17
|
+
input_code: str,
|
|
18
|
+
change_prompt: str,
|
|
19
|
+
strength: float,
|
|
20
|
+
temperature: float,
|
|
21
|
+
verbose: bool = False
|
|
22
|
+
) -> Tuple[str, float, str]:
|
|
23
|
+
"""
|
|
24
|
+
Change a prompt according to specified modifications.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
input_prompt (str): The original prompt to be modified
|
|
28
|
+
input_code (str): The code generated from the input prompt
|
|
29
|
+
change_prompt (str): Instructions for modifying the input prompt
|
|
30
|
+
strength (float): The strength parameter for the LLM model (0-1)
|
|
31
|
+
temperature (float): The temperature parameter for the LLM model
|
|
32
|
+
verbose (bool): Whether to print detailed information
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Tuple[str, float, str]: (modified prompt, total cost, model name)
|
|
36
|
+
"""
|
|
37
|
+
try:
|
|
38
|
+
# Step 1: Load prompt templates
|
|
39
|
+
change_llm_prompt = load_prompt_template("change_LLM")
|
|
40
|
+
extract_prompt = load_prompt_template("extract_prompt_change_LLM")
|
|
41
|
+
|
|
42
|
+
if not all([change_llm_prompt, extract_prompt]):
|
|
43
|
+
raise ValueError("Failed to load prompt templates")
|
|
44
|
+
|
|
45
|
+
# Step 2: Preprocess the change_LLM prompt
|
|
46
|
+
processed_change_llm = preprocess(change_llm_prompt, recursive=False, double_curly_brackets=False)
|
|
47
|
+
processed_change_prompt = preprocess(change_prompt, recursive=False, double_curly_brackets=False)
|
|
48
|
+
|
|
49
|
+
# Input validation
|
|
50
|
+
if not all([input_prompt, input_code, change_prompt]):
|
|
51
|
+
raise ValueError("Missing required input parameters")
|
|
52
|
+
if not (0 <= strength <= 1):
|
|
53
|
+
raise ValueError("Strength must be between 0 and 1")
|
|
54
|
+
|
|
55
|
+
total_cost = 0.0
|
|
56
|
+
final_model_name = ""
|
|
57
|
+
|
|
58
|
+
# Step 3: Run change prompt through model
|
|
59
|
+
if verbose:
|
|
60
|
+
console.print(Panel("Running change prompt through LLM...", style="blue"))
|
|
61
|
+
|
|
62
|
+
change_response = llm_invoke(
|
|
63
|
+
prompt=processed_change_llm,
|
|
64
|
+
input_json={
|
|
65
|
+
"input_prompt": input_prompt,
|
|
66
|
+
"input_code": input_code,
|
|
67
|
+
"change_prompt": processed_change_prompt
|
|
68
|
+
},
|
|
69
|
+
strength=strength,
|
|
70
|
+
temperature=temperature,
|
|
71
|
+
verbose=verbose
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
total_cost += change_response["cost"]
|
|
75
|
+
final_model_name = change_response["model_name"]
|
|
76
|
+
|
|
77
|
+
# Step 4: Print markdown formatting if verbose
|
|
78
|
+
if verbose:
|
|
79
|
+
console.print(Panel("Change prompt result:", style="green"))
|
|
80
|
+
console.print(Markdown(change_response["result"]))
|
|
81
|
+
|
|
82
|
+
# Step 5: Run extract prompt
|
|
83
|
+
if verbose:
|
|
84
|
+
console.print(Panel("Extracting modified prompt...", style="blue"))
|
|
85
|
+
|
|
86
|
+
extract_response = llm_invoke(
|
|
87
|
+
prompt=extract_prompt,
|
|
88
|
+
input_json={"llm_output": change_response["result"]},
|
|
89
|
+
strength=0.89, # Fixed strength as specified
|
|
90
|
+
temperature=temperature,
|
|
91
|
+
verbose=verbose,
|
|
92
|
+
output_pydantic=ExtractedPrompt
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
total_cost += extract_response["cost"]
|
|
96
|
+
|
|
97
|
+
# Ensure we have a valid result
|
|
98
|
+
if not isinstance(extract_response["result"], ExtractedPrompt):
|
|
99
|
+
raise ValueError("Failed to extract modified prompt")
|
|
100
|
+
|
|
101
|
+
modified_prompt = extract_response["result"].modified_prompt
|
|
102
|
+
|
|
103
|
+
# Step 6: Print extracted prompt if verbose
|
|
104
|
+
if verbose:
|
|
105
|
+
console.print(Panel("Extracted modified prompt:", style="green"))
|
|
106
|
+
console.print(Markdown(modified_prompt))
|
|
107
|
+
|
|
108
|
+
# Step 7: Return results
|
|
109
|
+
return modified_prompt, total_cost, final_model_name
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
console.print(f"[red]Error in change function: {str(e)}[/red]")
|
|
113
|
+
raise
|
|
114
|
+
|
|
115
|
+
def main():
|
|
116
|
+
"""Example usage of the change function"""
|
|
117
|
+
try:
|
|
118
|
+
# Example inputs
|
|
119
|
+
input_prompt = "Write a function that adds two numbers"
|
|
120
|
+
input_code = "def add(a, b):\n return a + b"
|
|
121
|
+
change_prompt = "Make the function handle negative numbers explicitly"
|
|
122
|
+
|
|
123
|
+
modified_prompt, cost, model = change(
|
|
124
|
+
input_prompt=input_prompt,
|
|
125
|
+
input_code=input_code,
|
|
126
|
+
change_prompt=change_prompt,
|
|
127
|
+
strength=0.7,
|
|
128
|
+
temperature=0.7,
|
|
129
|
+
verbose=True
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
console.print("\n[bold green]Results:[/bold green]")
|
|
133
|
+
console.print(f"Modified Prompt: {modified_prompt}")
|
|
134
|
+
console.print(f"Total Cost: ${cost:.6f}")
|
|
135
|
+
console.print(f"Model Used: {model}")
|
|
136
|
+
|
|
137
|
+
except Exception as e:
|
|
138
|
+
console.print(f"[red]Error in main: {str(e)}[/red]")
|
|
139
|
+
|
|
140
|
+
if __name__ == "__main__":
|
|
141
|
+
main()
|
pdd/change_main.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional, Tuple, List, Dict
|
|
4
|
+
import click
|
|
5
|
+
from rich import print as rprint
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
from .construct_paths import construct_paths
|
|
9
|
+
from .change import change as change_func
|
|
10
|
+
from .process_csv_change import process_csv_change
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
logger.setLevel(logging.DEBUG) # Changed from WARNING to DEBUG
|
|
14
|
+
|
|
15
|
+
def change_main(
|
|
16
|
+
ctx: click.Context,
|
|
17
|
+
change_prompt_file: str,
|
|
18
|
+
input_code: str,
|
|
19
|
+
input_prompt_file: Optional[str],
|
|
20
|
+
output: Optional[str],
|
|
21
|
+
use_csv: bool
|
|
22
|
+
) -> Tuple[str, float, str]:
|
|
23
|
+
"""
|
|
24
|
+
Main function to handle the 'change' command logic.
|
|
25
|
+
|
|
26
|
+
:param ctx: Click context containing command-line parameters.
|
|
27
|
+
:param change_prompt_file: Path to the change prompt file.
|
|
28
|
+
:param input_code: Path to the input code file or directory (when using '--csv').
|
|
29
|
+
:param input_prompt_file: Path to the input prompt file. Optional and not used when '--csv' is specified.
|
|
30
|
+
:param output: Optional path to save the modified prompt file. If not specified, it will be generated based on the input files.
|
|
31
|
+
:param use_csv: Flag indicating whether to use CSV mode for batch changes.
|
|
32
|
+
:return: A tuple containing the modified prompt or a message indicating multiple prompts were updated, total cost, and model name used.
|
|
33
|
+
"""
|
|
34
|
+
logger.debug(f"Starting change_main with use_csv={use_csv}")
|
|
35
|
+
try:
|
|
36
|
+
# Validate arguments
|
|
37
|
+
if not use_csv and not input_prompt_file:
|
|
38
|
+
error_msg = "Error: 'input_prompt_file' is required when not using '--csv' mode."
|
|
39
|
+
logger.error(error_msg)
|
|
40
|
+
if not ctx.obj.get('quiet', False):
|
|
41
|
+
rprint(f"[bold red]{error_msg}[/bold red]")
|
|
42
|
+
return (error_msg, 0.0, "")
|
|
43
|
+
|
|
44
|
+
# Check if input_code is a directory when using CSV mode
|
|
45
|
+
if use_csv:
|
|
46
|
+
try:
|
|
47
|
+
if not os.path.isdir(input_code):
|
|
48
|
+
error_msg = f"In CSV mode, 'input_code' must be a directory. Got: {input_code}"
|
|
49
|
+
logger.error(error_msg)
|
|
50
|
+
if not ctx.obj.get('quiet', False):
|
|
51
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
52
|
+
return (error_msg, 0.0, "")
|
|
53
|
+
except Exception as e:
|
|
54
|
+
error_msg = f"Error checking input_code directory: {str(e)}"
|
|
55
|
+
logger.error(error_msg)
|
|
56
|
+
if not ctx.obj.get('quiet', False):
|
|
57
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
58
|
+
return (error_msg, 0.0, "")
|
|
59
|
+
|
|
60
|
+
# Construct file paths
|
|
61
|
+
input_file_paths = {
|
|
62
|
+
"change_prompt_file": change_prompt_file,
|
|
63
|
+
}
|
|
64
|
+
if not use_csv:
|
|
65
|
+
input_file_paths["input_code"] = input_code
|
|
66
|
+
input_file_paths["input_prompt_file"] = input_prompt_file
|
|
67
|
+
|
|
68
|
+
command_options = {
|
|
69
|
+
"output": output
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
logger.debug(f"Constructing paths with input_file_paths={input_file_paths}")
|
|
73
|
+
input_strings, output_file_paths, _ = construct_paths(
|
|
74
|
+
input_file_paths=input_file_paths,
|
|
75
|
+
force=ctx.obj.get('force', False),
|
|
76
|
+
quiet=ctx.obj.get('quiet', False),
|
|
77
|
+
command="change",
|
|
78
|
+
command_options=command_options
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Load input files
|
|
82
|
+
change_prompt_content = input_strings["change_prompt_file"]
|
|
83
|
+
logger.debug("Change prompt content loaded")
|
|
84
|
+
|
|
85
|
+
# Get strength and temperature from context
|
|
86
|
+
strength = ctx.obj.get('strength', 0.9)
|
|
87
|
+
temperature = ctx.obj.get('temperature', 0)
|
|
88
|
+
logger.debug(f"Using strength={strength} and temperature={temperature}")
|
|
89
|
+
|
|
90
|
+
if use_csv:
|
|
91
|
+
logger.debug(f"Using CSV mode with input_code={input_code}")
|
|
92
|
+
# Validate CSV file format
|
|
93
|
+
try:
|
|
94
|
+
with open(change_prompt_file, mode='r', newline='', encoding='utf-8') as csvfile:
|
|
95
|
+
reader = csv.DictReader(csvfile)
|
|
96
|
+
if 'prompt_name' not in reader.fieldnames or 'change_instructions' not in reader.fieldnames:
|
|
97
|
+
error_msg = "CSV file must contain 'prompt_name' and 'change_instructions' columns."
|
|
98
|
+
logger.error(error_msg)
|
|
99
|
+
if not ctx.obj.get('quiet', False):
|
|
100
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
101
|
+
return (error_msg, 0.0, "")
|
|
102
|
+
logger.debug(f"CSV file validated. Columns: {reader.fieldnames}")
|
|
103
|
+
except Exception as e:
|
|
104
|
+
error_msg = f"Error reading CSV file: {str(e)}"
|
|
105
|
+
logger.error(error_msg)
|
|
106
|
+
if not ctx.obj.get('quiet', False):
|
|
107
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
108
|
+
return (error_msg, 0.0, "")
|
|
109
|
+
|
|
110
|
+
# Perform batch changes using CSV
|
|
111
|
+
try:
|
|
112
|
+
logger.debug("Calling process_csv_change")
|
|
113
|
+
success, modified_prompts, total_cost, model_name = process_csv_change(
|
|
114
|
+
csv_file=change_prompt_file,
|
|
115
|
+
strength=strength,
|
|
116
|
+
temperature=temperature,
|
|
117
|
+
code_directory=input_code,
|
|
118
|
+
language=ctx.obj.get('language', 'python'),
|
|
119
|
+
extension=ctx.obj.get('extension', '.py'),
|
|
120
|
+
budget=ctx.obj.get('budget', 10.0)
|
|
121
|
+
)
|
|
122
|
+
logger.debug(f"process_csv_change completed. Success: {success}")
|
|
123
|
+
except Exception as e:
|
|
124
|
+
error_msg = f"Error during CSV processing: {str(e)}"
|
|
125
|
+
logger.error(error_msg)
|
|
126
|
+
if not ctx.obj.get('quiet', False):
|
|
127
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
128
|
+
return (error_msg, 0.0, "")
|
|
129
|
+
|
|
130
|
+
# Determine output path
|
|
131
|
+
output_path = output or output_file_paths.get('output', "batch_modified_prompts.csv")
|
|
132
|
+
logger.debug(f"Output path: {output_path}")
|
|
133
|
+
|
|
134
|
+
# Save results
|
|
135
|
+
if success:
|
|
136
|
+
try:
|
|
137
|
+
if output is None:
|
|
138
|
+
# Save individual files
|
|
139
|
+
for item in modified_prompts:
|
|
140
|
+
file_name = item['file_name']
|
|
141
|
+
modified_prompt = item['modified_prompt']
|
|
142
|
+
individual_output_path = os.path.join(os.path.dirname(output_path), file_name)
|
|
143
|
+
with open(individual_output_path, 'w') as file:
|
|
144
|
+
file.write(modified_prompt)
|
|
145
|
+
logger.debug("Results saved as individual files successfully")
|
|
146
|
+
else:
|
|
147
|
+
# Save as CSV
|
|
148
|
+
with open(output_path, 'w', newline='') as csvfile:
|
|
149
|
+
fieldnames = ['file_name', 'modified_prompt']
|
|
150
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
151
|
+
writer.writeheader()
|
|
152
|
+
for item in modified_prompts:
|
|
153
|
+
writer.writerow(item)
|
|
154
|
+
logger.debug("Results saved successfully")
|
|
155
|
+
except Exception as e:
|
|
156
|
+
error_msg = f"Error writing output: {str(e)}"
|
|
157
|
+
logger.error(error_msg)
|
|
158
|
+
if not ctx.obj.get('quiet', False):
|
|
159
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
160
|
+
return (error_msg, total_cost, model_name)
|
|
161
|
+
|
|
162
|
+
# Provide user feedback
|
|
163
|
+
if not ctx.obj.get('quiet', False):
|
|
164
|
+
if use_csv and success:
|
|
165
|
+
rprint("[bold green]Batch change operation completed successfully.[/bold green]")
|
|
166
|
+
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
167
|
+
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
168
|
+
if output is None:
|
|
169
|
+
output_dir = os.path.dirname(output_path)
|
|
170
|
+
rprint(f"[bold]Results saved as individual files in:[/bold] {output_dir}")
|
|
171
|
+
for item in modified_prompts:
|
|
172
|
+
file_name = item['file_name']
|
|
173
|
+
individual_output_path = os.path.join(output_dir, file_name)
|
|
174
|
+
rprint(f" - {individual_output_path}")
|
|
175
|
+
else:
|
|
176
|
+
rprint(f"[bold]Results saved to CSV:[/bold] {output_path}")
|
|
177
|
+
|
|
178
|
+
logger.debug("Returning success message for CSV mode")
|
|
179
|
+
return ("Multiple prompts have been updated.", total_cost, model_name)
|
|
180
|
+
|
|
181
|
+
else:
|
|
182
|
+
logger.debug("Using non-CSV mode")
|
|
183
|
+
input_code_content = input_strings["input_code"]
|
|
184
|
+
input_prompt_content = input_strings["input_prompt_file"]
|
|
185
|
+
|
|
186
|
+
# Perform single change
|
|
187
|
+
logger.debug("Calling change_func")
|
|
188
|
+
try:
|
|
189
|
+
modified_prompt, total_cost, model_name = change_func(
|
|
190
|
+
input_prompt=input_prompt_content,
|
|
191
|
+
input_code=input_code_content,
|
|
192
|
+
change_prompt=change_prompt_content,
|
|
193
|
+
strength=strength,
|
|
194
|
+
temperature=temperature,
|
|
195
|
+
verbose=ctx.obj.get('verbose', False),
|
|
196
|
+
)
|
|
197
|
+
logger.debug("change_func completed")
|
|
198
|
+
except Exception as e:
|
|
199
|
+
error_msg = f"An unexpected error occurred: {str(e)}"
|
|
200
|
+
logger.error(error_msg)
|
|
201
|
+
if not ctx.obj.get('quiet', False):
|
|
202
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
203
|
+
return (error_msg, 0.0, "")
|
|
204
|
+
|
|
205
|
+
# Determine output path
|
|
206
|
+
output_path = output or output_file_paths.get('output', f"modified_{os.path.basename(input_prompt_file)}")
|
|
207
|
+
logger.debug(f"Output path: {output_path}")
|
|
208
|
+
|
|
209
|
+
# Save the modified prompt
|
|
210
|
+
try:
|
|
211
|
+
with open(output_path, 'w') as f:
|
|
212
|
+
f.write(modified_prompt)
|
|
213
|
+
logger.debug("Results saved successfully")
|
|
214
|
+
except Exception as e:
|
|
215
|
+
error_msg = f"Error writing output file: {str(e)}"
|
|
216
|
+
logger.error(error_msg)
|
|
217
|
+
if not ctx.obj.get('quiet', False):
|
|
218
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
219
|
+
return (error_msg, total_cost, model_name)
|
|
220
|
+
|
|
221
|
+
# Provide user feedback
|
|
222
|
+
if not ctx.obj.get('quiet', False):
|
|
223
|
+
rprint("[bold green]Prompt modification completed successfully.[/bold green]")
|
|
224
|
+
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
225
|
+
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
226
|
+
rprint(f"[bold]Modified prompt saved to:[/bold] {output_path}")
|
|
227
|
+
|
|
228
|
+
logger.debug("Returning success message for non-CSV mode")
|
|
229
|
+
return (modified_prompt, total_cost, model_name)
|
|
230
|
+
|
|
231
|
+
except Exception as e:
|
|
232
|
+
error_msg = f"An unexpected error occurred: {str(e)}"
|
|
233
|
+
logger.error(error_msg)
|
|
234
|
+
if not ctx.obj.get('quiet', False):
|
|
235
|
+
rprint(f"[bold red]Error: {error_msg}[/bold red]")
|
|
236
|
+
return (error_msg, 0.0, "")
|
|
237
|
+
|
|
238
|
+
# This line should never be reached, but we'll log it just in case
|
|
239
|
+
logger.warning("Reached end of change_main without returning")
|
|
240
|
+
return ("An unknown error occurred", 0.0, "")
|