pdd-cli 0.0.24__py3-none-any.whl → 0.0.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +14 -1
- pdd/bug_main.py +5 -1
- pdd/bug_to_unit_test.py +16 -5
- pdd/change.py +2 -1
- pdd/change_main.py +407 -189
- pdd/cli.py +853 -301
- pdd/code_generator.py +2 -1
- pdd/conflicts_in_prompts.py +2 -1
- pdd/construct_paths.py +377 -222
- pdd/context_generator.py +2 -1
- pdd/continue_generation.py +5 -2
- pdd/crash_main.py +55 -20
- pdd/data/llm_model.csv +18 -17
- pdd/detect_change.py +2 -1
- pdd/fix_code_loop.py +465 -160
- pdd/fix_code_module_errors.py +7 -4
- pdd/fix_error_loop.py +9 -9
- pdd/fix_errors_from_unit_tests.py +207 -365
- pdd/fix_main.py +32 -4
- pdd/fix_verification_errors.py +148 -77
- pdd/fix_verification_errors_loop.py +842 -768
- pdd/fix_verification_main.py +412 -0
- pdd/generate_output_paths.py +427 -189
- pdd/generate_test.py +3 -2
- pdd/increase_tests.py +2 -2
- pdd/llm_invoke.py +1167 -343
- pdd/preprocess.py +3 -3
- pdd/process_csv_change.py +466 -154
- pdd/prompts/bug_to_unit_test_LLM.prompt +11 -11
- pdd/prompts/extract_prompt_update_LLM.prompt +11 -5
- pdd/prompts/extract_unit_code_fix_LLM.prompt +2 -2
- pdd/prompts/find_verification_errors_LLM.prompt +11 -9
- pdd/prompts/fix_code_module_errors_LLM.prompt +29 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +5 -5
- pdd/prompts/fix_verification_errors_LLM.prompt +8 -1
- pdd/prompts/generate_test_LLM.prompt +9 -3
- pdd/prompts/trim_results_start_LLM.prompt +1 -1
- pdd/prompts/update_prompt_LLM.prompt +3 -3
- pdd/split.py +6 -5
- pdd/split_main.py +13 -4
- pdd/trace_main.py +7 -0
- pdd/update_model_costs.py +446 -0
- pdd/xml_tagger.py +2 -1
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/METADATA +8 -16
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/RECORD +49 -47
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/WHEEL +1 -1
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.24.dist-info → pdd_cli-0.0.26.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
% You are an expert {language} Software Test Engineer. Your task is to generate a {language} unit test to
|
|
1
|
+
% You are an expert {language} Software Test Engineer. Your task is to generate a {language} unit test to detect issue(s) in code_under_test. The test should compare the current output with the desired output and to ensure the code behaves as expected. If Python, use Pytest.
|
|
2
2
|
|
|
3
3
|
% Inputs:
|
|
4
|
-
• Current output:
|
|
5
|
-
• Desired output:
|
|
6
|
-
• Code under test:
|
|
7
|
-
• Program used to run the code under test:
|
|
8
|
-
• Prompt that generated the code:
|
|
4
|
+
• Current output: <current_output>{current_output}</current_output>
|
|
5
|
+
• Desired output: <desired_output>{desired_output}</desired_output>
|
|
6
|
+
• Code under test: <code_under_test>{code_under_test}</code_under_test>
|
|
7
|
+
• Program used to run the code under test: <program_used_to_run_code_under_test>{program_used_to_run_code_under_test}</program_used_to_run_code_under_test>
|
|
8
|
+
• Prompt that generated the code: <prompt_that_generated_code>{prompt_that_generated_code}</prompt_that_generated_code>
|
|
9
9
|
% Output:
|
|
10
|
-
• A unit test that
|
|
10
|
+
• A unit test that detects the problem(s) and ensures the code meets the expected behavior.
|
|
11
11
|
|
|
12
12
|
% Follow these steps to generate the unit test:
|
|
13
|
-
1. Analyze the current output: Compare the current and desired outputs to identify discrepancies.
|
|
14
|
-
2.
|
|
15
|
-
3.
|
|
13
|
+
1. Analyze the current output: Compare the current and desired outputs to identify discrepancies and explain the issue in several paragraphs.
|
|
14
|
+
2. Based on the above analysis explain in several paragraphs how the issues can be reproduced without having false positives.
|
|
15
|
+
3. Write a test that properly detects the issue in the code_under_test so that if the test passes, the issue is fixed.
|
|
16
16
|
|
|
17
|
-
% Focus exclusively on
|
|
17
|
+
% Focus exclusively on writing a robust unit test to detect and identify the issue(s) in the code provided. The test should not focus on the internals of the code but rather the inputs and outputs so that the test can be reused if the code is regenerated.
|
|
@@ -1,8 +1,14 @@
|
|
|
1
|
-
% You are an expert Software Engineer. Your goal is to extract
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to extract the updated prompt from the LLM output.
|
|
2
2
|
|
|
3
|
-
% Here is the generated llm_output:
|
|
3
|
+
% Here is the generated llm_output: <llm_output>{llm_output}</llm_output>
|
|
4
4
|
|
|
5
|
-
%
|
|
6
|
-
|
|
7
|
-
|
|
5
|
+
% The LLM output contains the modified prompt that will generate the modified code, possibly with some additional commentary or explanation.
|
|
6
|
+
% Your task is to identify and extract ONLY the modified prompt itself, without adding any JSON structure or additional formatting.
|
|
8
7
|
|
|
8
|
+
% Ensure you:
|
|
9
|
+
% 1. Remove any "# Modified Prompt" headers or similar text that isn't part of the actual prompt
|
|
10
|
+
% 2. Preserve all markdown, code blocks, and formatting within the actual prompt
|
|
11
|
+
% 3. Don't add any explanatory text, JSON wrappers, or your own commentary
|
|
12
|
+
% 4. Return only the text that constitutes the actual prompt
|
|
13
|
+
|
|
14
|
+
% The "modified_prompt" should be the complete, standalone prompt that could be used directly to generate the modified code.
|
|
@@ -328,5 +328,5 @@
|
|
|
328
328
|
- 'explanation': String explanation of whether the code under test needs to be fix and/or if the unit test needs to be fixed. Also, explain whether only a fragment of code was provided and the entire unit test and/or code under test needs to be reassembled from the original code and/or unit test.
|
|
329
329
|
- 'update_unit_test': Boolean indicating whether the unit test needs to be updated.
|
|
330
330
|
- 'update_code': Boolean indicating whether the code under test needs to be updated.
|
|
331
|
-
- 'fixed_unit_test': The entire updated unit test code or empty String if no update is needed.
|
|
332
|
-
- 'fixed_code': The entire updated code under test or empty String if no update is needed.
|
|
331
|
+
- 'fixed_unit_test': The entire updated unit test code or empty String if no update is needed. Don't lose prior comments in the unit test unless they are no longer valid.
|
|
332
|
+
- 'fixed_code': The entire updated code under test or empty String if no update is needed. Don't lose prior comments in the code under test unless they are no longer valid.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
% You are an expert Software Engineer. Your goal is to
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to identify any discrepancies between a program, its code_module, and a prompt. You also need to check for any potential bugs or issues in the code.
|
|
2
2
|
|
|
3
3
|
% Here is the program that is running the code_module: <program>{program}</program>
|
|
4
4
|
|
|
@@ -8,18 +8,20 @@
|
|
|
8
8
|
|
|
9
9
|
% Here are the output logs from the program run: <output>{output}</output>
|
|
10
10
|
|
|
11
|
-
% Follow these steps to
|
|
11
|
+
% Follow these steps to identify any issues:
|
|
12
12
|
Step 1. Compare the program and code_module against the prompt and explain any discrepancies.
|
|
13
13
|
Step 2. Analyze the input/output behavior of the program and verify if it meets the expected behavior described in the prompt.
|
|
14
14
|
Step 3. Identify any potential edge cases, error handling issues, or performance concerns that could cause problems in the future.
|
|
15
15
|
Step 4. Check the code for potential bugs that haven't manifested yet.
|
|
16
16
|
Step 5. If any issues are found, explain in detail the root cause of each issue and how it could impact the program's functioning.
|
|
17
17
|
|
|
18
|
-
%
|
|
18
|
+
% After your analysis, determine the number of distinct issues found. If no issues are found, the count should be 0.
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
<
|
|
24
|
-
|
|
25
|
-
%
|
|
20
|
+
% Return your response as a single, valid JSON object.
|
|
21
|
+
% The JSON object must conform to the following structure:
|
|
22
|
+
% {{
|
|
23
|
+
% "issues_count": <integer_count_of_issues_found>,
|
|
24
|
+
% "details": "A detailed explanation of all steps taken during your analysis, including any discrepancies, bugs, or potential issues identified. If no issues are found, this can be a brief confirmation."
|
|
25
|
+
% }}
|
|
26
|
+
% Ensure the "details" field contains your complete textual analysis from Steps 1-5.
|
|
27
|
+
% Ensure the "issues_count" is an integer representing the total number of distinct problems you've identified in your details.
|
|
@@ -8,6 +8,35 @@
|
|
|
8
8
|
|
|
9
9
|
% Here are the error log(s) from the program run and potentially from prior program run fixes: <errors>{errors}</errors>
|
|
10
10
|
|
|
11
|
+
% NOTE: The errors field contains a structured history of previous fixing attempts with XML tags and human-readable content:
|
|
12
|
+
<attempt number="X"> - Start of each attempt record
|
|
13
|
+
<verification>
|
|
14
|
+
Status: Success/failure status with return code
|
|
15
|
+
Output: [Standard output text]
|
|
16
|
+
Error: [Error message text]
|
|
17
|
+
</verification>
|
|
18
|
+
|
|
19
|
+
<current_error>
|
|
20
|
+
[Current error message to be fixed]
|
|
21
|
+
</current_error>
|
|
22
|
+
|
|
23
|
+
<fixing>
|
|
24
|
+
<llm_analysis>
|
|
25
|
+
[Analysis from previous attempts in human-readable format]
|
|
26
|
+
</llm_analysis>
|
|
27
|
+
<decision>
|
|
28
|
+
update_program: true/false
|
|
29
|
+
update_code: true/false
|
|
30
|
+
</decision>
|
|
31
|
+
</fixing>
|
|
32
|
+
</attempt>
|
|
33
|
+
|
|
34
|
+
% When analyzing errors, you should:
|
|
35
|
+
1. Review the history of previous attempts to understand what has been tried
|
|
36
|
+
2. Pay attention to which fixes worked partially or not at all
|
|
37
|
+
3. Avoid repeating approaches that failed in previous attempts
|
|
38
|
+
4. Focus on solving the current error found within the <current_error> tags
|
|
39
|
+
|
|
11
40
|
% Follow these steps to solve these errors:
|
|
12
41
|
Step 1. Compare the prompt to the code_module and explain differences, if any.
|
|
13
42
|
Step 2. Compare the prompt to the program and explain differences, if any.
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
% This prompt is run iteratively. Here are the current errors and past potential fix attempts, if any, from the unit test and verification program run(s): <errors>{errors}</errors>
|
|
10
10
|
|
|
11
11
|
% If the verfication program fails to run, the code_under_test and unit_test are unchanged from the previous iteration.
|
|
12
|
-
|
|
12
|
+
<pdd>
|
|
13
13
|
<examples>
|
|
14
14
|
<example_1>
|
|
15
15
|
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/1/test_conflicts_in_prompts.py</include></example_unit_test>
|
|
@@ -34,7 +34,7 @@
|
|
|
34
34
|
|
|
35
35
|
% Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/3/context_generator_python.prompt</include></example_prompt>
|
|
36
36
|
</example_3>
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
|
|
39
39
|
<example_4>
|
|
40
40
|
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/4/test_detect_change.py</include></example_unit_test>
|
|
@@ -51,8 +51,8 @@
|
|
|
51
51
|
|
|
52
52
|
% Here is an example error/fix log showing how the issues were resolved: <example_error_fix_log><include>context/fix_errors_from_unit_tests/4/error.log</include></example_error_fix_log>
|
|
53
53
|
</example_5>
|
|
54
|
-
</pdd>
|
|
55
54
|
</examples>
|
|
55
|
+
</pdd>
|
|
56
56
|
|
|
57
57
|
<instructions>
|
|
58
58
|
% Follow these steps to solve these errors:
|
|
@@ -60,7 +60,7 @@
|
|
|
60
60
|
Step 2. Compare the prompt to the unit_test and explain differences, if any.
|
|
61
61
|
Step 3. For each prior attempted fix for the code_under_test and unit_test (if any), explain in a few paragraphs for each attempt why it might not have worked.
|
|
62
62
|
Step 4. Write several paragraphs explaining the root cause of each of the errors and each of the warnings in the code_under_test and unit_test.
|
|
63
|
-
Step 5. Explain in detail step by step how to solve each of the errors and warnings. For each error and warning, there should be several paragraphs description of the solution steps. Sometimes logging or print statements can help debug the code in subsequent iterations.
|
|
63
|
+
Step 5. Explain in detail step by step how to solve each of the errors and warnings. For each error and warning, there should be several paragraphs description of the solution steps. Sometimes logging or print statements can help debug the code in subsequent iterations. It is important to make sure the tests are still sufficiently comprehensive to catch potential errors.
|
|
64
64
|
Step 6. Review the above steps and correct for any errors and warnings in the code under test or unit test.
|
|
65
|
-
Step 7. For the code that need changes, write the
|
|
65
|
+
Step 7. For the code that need changes, write the corrected code_under_test and/or corrected unit_test in its/their entirety.
|
|
66
66
|
</instructions>
|
|
@@ -17,4 +17,11 @@
|
|
|
17
17
|
Step 4. Provide the complete fixed code_module and program with explanations for each significant change made.
|
|
18
18
|
Step 5. Verify that the fixed code meets all requirements from the original prompt and addresses all identified issues.
|
|
19
19
|
|
|
20
|
-
%
|
|
20
|
+
% Return your response as a single, valid JSON object.
|
|
21
|
+
% The JSON object must conform to the following structure:
|
|
22
|
+
% {{
|
|
23
|
+
% "explanation": "Detailed explanation of all steps taken, including analysis of issues, solutions developed, and verification that the fixes are correct and meet prompt requirements.",
|
|
24
|
+
% "fixed_code": "The complete, runnable, and fixed Python code for the code_module. This should ONLY be the code, with no additional text or commentary.",
|
|
25
|
+
% "fixed_program": "The complete, runnable, and fixed Python code for the program. This should ONLY be the code, with no additional text or commentary."
|
|
26
|
+
% }}
|
|
27
|
+
% Ensure that the "fixed_code" and "fixed_program" fields contain only the raw source code. Do not include any markdown formatting, comments (unless part of the code itself), or any other explanatory text within these fields.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
% You are an expert Software Test Engineer. Your goal is to generate
|
|
1
|
+
% You are an expert Software Test Engineer. Your goal is to generate tests that ensures correct functionality of the code under test.
|
|
2
2
|
|
|
3
3
|
% Here a description of what the code is supposed to do and was the prompt that generated the code: <prompt_that_generated_code>{prompt_that_generated_code}</prompt_that_generated_code>
|
|
4
4
|
|
|
@@ -9,12 +9,18 @@
|
|
|
9
9
|
- The unit test should be in {language}. If Python, use pytest.
|
|
10
10
|
- Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
|
|
11
11
|
- Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.
|
|
12
|
-
- The code might get regenerated by a LLM so focus the
|
|
12
|
+
- The code might get regenerated by a LLM so focus the tests on the functionality of the code, not the implementation details.
|
|
13
|
+
- NEVER access internal implementation details (variables/functions starting with underscore) in your tests.
|
|
14
|
+
- Setup and teardown methods should only use public APIs and environment variables, never reset internal module state directly.
|
|
15
|
+
- Design tests to be independent of implementation details that might change when code is regenerated.
|
|
16
|
+
- For test isolation, use fixtures and mocking of external dependencies rather than manipulating internal module state. In general minimize the amount of mocking needed so that the tests are more robust to changes in the code under test and more code is tested.
|
|
13
17
|
<include>./context/test.prompt</include>
|
|
14
18
|
|
|
15
19
|
<instructions>
|
|
16
20
|
1. Carefully read the prompt that generated the code under test and determine what might be possible edge cases.
|
|
17
21
|
2. For each edge case explain whether it is better to do the test using Z3 formal verification or unit tests.
|
|
18
22
|
3. Develop a detailed test plan that will ensure the code under test is correct. This should involve both Z3 formal verification and unit tests.
|
|
19
|
-
4.
|
|
23
|
+
4. Now write the test file.
|
|
24
|
+
a) The first part of the test file should be the detailed test plan from step 3 above in comments.
|
|
25
|
+
b) Then write the tests and Z3 formal verification tests that are runnable as unit tests.
|
|
20
26
|
</instructions>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
% You are an expert JSON
|
|
1
|
+
% You are an expert editor and JSON creator. You will be processing the output of a language model (LLM) to extract the unfinished main code block being generated and provide an explanation of how you determined what to cut out. Here is the llm_output to process:
|
|
2
2
|
<llm_output>
|
|
3
3
|
{LLM_OUTPUT}
|
|
4
4
|
</llm_output>
|
|
@@ -8,9 +8,9 @@
|
|
|
8
8
|
Output:
|
|
9
9
|
'modified_prompt' - A string that contains the updated prompt that will generate the modified code.
|
|
10
10
|
|
|
11
|
-
% Here is the input_prompt to change:
|
|
12
|
-
% Here is the input_code:
|
|
13
|
-
% Here is the modified_code:
|
|
11
|
+
% Here is the input_prompt to change: <input_prompt>{input_prompt}</input_prompt>
|
|
12
|
+
% Here is the input_code: <input_code>{input_code}</input_code>
|
|
13
|
+
% Here is the modified_code: <modified_code>{modified_code}</modified_code>
|
|
14
14
|
|
|
15
15
|
% To generate the modified prompt, perform the following sequence of steps:
|
|
16
16
|
1. Using the provided input_code and input_prompt, identify what the code does and how it was generated.
|
pdd/split.py
CHANGED
|
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
|
|
|
5
5
|
from .load_prompt_template import load_prompt_template
|
|
6
6
|
from .preprocess import preprocess
|
|
7
7
|
from .llm_invoke import llm_invoke
|
|
8
|
+
from . import EXTRACTION_STRENGTH
|
|
8
9
|
|
|
9
10
|
class PromptSplit(BaseModel):
|
|
10
11
|
extracted_functionality: str = Field(description="The extracted functionality as a sub-module prompt")
|
|
@@ -17,7 +18,7 @@ def split(
|
|
|
17
18
|
strength: float,
|
|
18
19
|
temperature: float,
|
|
19
20
|
verbose: bool = False
|
|
20
|
-
) -> Tuple[str, str,
|
|
21
|
+
) -> Tuple[str, str, float, str]:
|
|
21
22
|
"""
|
|
22
23
|
Split a prompt into extracted functionality and remaining prompt.
|
|
23
24
|
|
|
@@ -30,7 +31,7 @@ def split(
|
|
|
30
31
|
verbose (bool): Whether to print detailed information.
|
|
31
32
|
|
|
32
33
|
Returns:
|
|
33
|
-
Tuple[str, str,
|
|
34
|
+
Tuple[str, str, float, str]: (extracted_functionality, remaining_prompt, model_name, total_cost)
|
|
34
35
|
where model_name is the name of the model used (returned as the second to last tuple element)
|
|
35
36
|
and total_cost is the aggregated cost from all LLM invocations.
|
|
36
37
|
"""
|
|
@@ -91,7 +92,7 @@ def split(
|
|
|
91
92
|
extract_response = llm_invoke(
|
|
92
93
|
prompt=processed_extract_prompt,
|
|
93
94
|
input_json={"llm_output": split_response["result"]},
|
|
94
|
-
strength=
|
|
95
|
+
strength=EXTRACTION_STRENGTH, # Fixed strength for extraction
|
|
95
96
|
temperature=temperature,
|
|
96
97
|
output_pydantic=PromptSplit,
|
|
97
98
|
verbose=verbose
|
|
@@ -111,8 +112,8 @@ def split(
|
|
|
111
112
|
rprint(f"[bold cyan]Total Cost: ${total_cost:.6f}[/bold cyan]")
|
|
112
113
|
rprint(f"[bold cyan]Model used: {model_name}[/bold cyan]")
|
|
113
114
|
|
|
114
|
-
# 6. Return results
|
|
115
|
-
return extracted_functionality, remaining_prompt,
|
|
115
|
+
# 6. Return results with standardized order: (result_data, cost, model_name)
|
|
116
|
+
return (extracted_functionality, remaining_prompt), total_cost, model_name
|
|
116
117
|
|
|
117
118
|
except Exception as e:
|
|
118
119
|
# Print an error message, then raise an exception that includes
|
pdd/split_main.py
CHANGED
|
@@ -13,7 +13,7 @@ def split_main(
|
|
|
13
13
|
example_code_file: str,
|
|
14
14
|
output_sub: Optional[str],
|
|
15
15
|
output_modified: Optional[str]
|
|
16
|
-
) -> Tuple[str, str,
|
|
16
|
+
) -> Tuple[str, str, float, str]:
|
|
17
17
|
"""
|
|
18
18
|
CLI wrapper for splitting a prompt into extracted functionality and remaining prompt.
|
|
19
19
|
|
|
@@ -60,8 +60,8 @@ def split_main(
|
|
|
60
60
|
strength = ctx.obj.get('strength', 0.5)
|
|
61
61
|
temperature = ctx.obj.get('temperature', 0)
|
|
62
62
|
|
|
63
|
-
# Call the split function
|
|
64
|
-
|
|
63
|
+
# Call the split function with the standardized return pattern (result_data, cost, model_name)
|
|
64
|
+
result_tuple, total_cost, model_name = split(
|
|
65
65
|
input_prompt=input_strings["input_prompt"],
|
|
66
66
|
input_code=input_strings["input_code"],
|
|
67
67
|
example_code=input_strings["example_code"],
|
|
@@ -69,6 +69,9 @@ def split_main(
|
|
|
69
69
|
temperature=temperature,
|
|
70
70
|
verbose=not ctx.obj.get('quiet', False)
|
|
71
71
|
)
|
|
72
|
+
|
|
73
|
+
# Unpack the result tuple
|
|
74
|
+
extracted_functionality, remaining_prompt = result_tuple
|
|
72
75
|
|
|
73
76
|
# Save the output files
|
|
74
77
|
try:
|
|
@@ -87,7 +90,13 @@ def split_main(
|
|
|
87
90
|
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
88
91
|
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
89
92
|
|
|
90
|
-
|
|
93
|
+
# Return with standardized order (result_data, cost, model_name)
|
|
94
|
+
return {
|
|
95
|
+
"sub_prompt_content": extracted_functionality,
|
|
96
|
+
"modified_prompt_content": remaining_prompt,
|
|
97
|
+
"output_sub": output_file_paths["output_sub"],
|
|
98
|
+
"output_modified": output_file_paths["output_modified"]
|
|
99
|
+
}, total_cost, model_name
|
|
91
100
|
|
|
92
101
|
except Exception as e:
|
|
93
102
|
# Handle errors and provide appropriate feedback
|
pdd/trace_main.py
CHANGED
|
@@ -56,6 +56,13 @@ def trace_main(ctx: click.Context, prompt_file: str, code_file: str, code_line:
|
|
|
56
56
|
code_content, code_line, prompt_content, strength, temperature
|
|
57
57
|
)
|
|
58
58
|
logger.debug(f"Trace analysis completed: prompt_line={prompt_line}, total_cost={total_cost}, model_name={model_name}")
|
|
59
|
+
|
|
60
|
+
# Exit with error if trace returned None (indicating an error occurred)
|
|
61
|
+
if prompt_line is None:
|
|
62
|
+
if not quiet:
|
|
63
|
+
rprint(f"[bold red]Trace analysis failed[/bold red]")
|
|
64
|
+
logger.error("Trace analysis failed (prompt_line is None)")
|
|
65
|
+
ctx.exit(1)
|
|
59
66
|
except ValueError as e:
|
|
60
67
|
if not quiet:
|
|
61
68
|
rprint(f"[bold red]Invalid input: {e}[/bold red]")
|