pdd-cli 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

Files changed (95) hide show
  1. pdd/__init__.py +0 -0
  2. pdd/auto_deps_main.py +98 -0
  3. pdd/auto_include.py +175 -0
  4. pdd/auto_update.py +73 -0
  5. pdd/bug_main.py +99 -0
  6. pdd/bug_to_unit_test.py +159 -0
  7. pdd/change.py +141 -0
  8. pdd/change_main.py +240 -0
  9. pdd/cli.py +607 -0
  10. pdd/cmd_test_main.py +155 -0
  11. pdd/code_generator.py +117 -0
  12. pdd/code_generator_main.py +66 -0
  13. pdd/comment_line.py +35 -0
  14. pdd/conflicts_in_prompts.py +143 -0
  15. pdd/conflicts_main.py +90 -0
  16. pdd/construct_paths.py +251 -0
  17. pdd/context_generator.py +133 -0
  18. pdd/context_generator_main.py +73 -0
  19. pdd/continue_generation.py +140 -0
  20. pdd/crash_main.py +127 -0
  21. pdd/data/language_format.csv +61 -0
  22. pdd/data/llm_model.csv +15 -0
  23. pdd/detect_change.py +142 -0
  24. pdd/detect_change_main.py +100 -0
  25. pdd/find_section.py +28 -0
  26. pdd/fix_code_loop.py +212 -0
  27. pdd/fix_code_module_errors.py +143 -0
  28. pdd/fix_error_loop.py +216 -0
  29. pdd/fix_errors_from_unit_tests.py +240 -0
  30. pdd/fix_main.py +138 -0
  31. pdd/generate_output_paths.py +194 -0
  32. pdd/generate_test.py +140 -0
  33. pdd/get_comment.py +55 -0
  34. pdd/get_extension.py +52 -0
  35. pdd/get_language.py +41 -0
  36. pdd/git_update.py +84 -0
  37. pdd/increase_tests.py +93 -0
  38. pdd/insert_includes.py +150 -0
  39. pdd/llm_invoke.py +304 -0
  40. pdd/load_prompt_template.py +59 -0
  41. pdd/pdd_completion.fish +72 -0
  42. pdd/pdd_completion.sh +141 -0
  43. pdd/pdd_completion.zsh +418 -0
  44. pdd/postprocess.py +121 -0
  45. pdd/postprocess_0.py +52 -0
  46. pdd/preprocess.py +199 -0
  47. pdd/preprocess_main.py +72 -0
  48. pdd/process_csv_change.py +182 -0
  49. pdd/prompts/auto_include_LLM.prompt +230 -0
  50. pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
  51. pdd/prompts/change_LLM.prompt +34 -0
  52. pdd/prompts/conflict_LLM.prompt +23 -0
  53. pdd/prompts/continue_generation_LLM.prompt +3 -0
  54. pdd/prompts/detect_change_LLM.prompt +65 -0
  55. pdd/prompts/example_generator_LLM.prompt +10 -0
  56. pdd/prompts/extract_auto_include_LLM.prompt +6 -0
  57. pdd/prompts/extract_code_LLM.prompt +22 -0
  58. pdd/prompts/extract_conflict_LLM.prompt +19 -0
  59. pdd/prompts/extract_detect_change_LLM.prompt +19 -0
  60. pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
  61. pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
  62. pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
  63. pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
  64. pdd/prompts/extract_promptline_LLM.prompt +11 -0
  65. pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
  66. pdd/prompts/extract_xml_LLM.prompt +7 -0
  67. pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
  68. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
  69. pdd/prompts/generate_test_LLM.prompt +12 -0
  70. pdd/prompts/increase_tests_LLM.prompt +16 -0
  71. pdd/prompts/insert_includes_LLM.prompt +30 -0
  72. pdd/prompts/split_LLM.prompt +94 -0
  73. pdd/prompts/summarize_file_LLM.prompt +11 -0
  74. pdd/prompts/trace_LLM.prompt +30 -0
  75. pdd/prompts/trim_results_LLM.prompt +83 -0
  76. pdd/prompts/trim_results_start_LLM.prompt +45 -0
  77. pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
  78. pdd/prompts/update_prompt_LLM.prompt +19 -0
  79. pdd/prompts/xml_convertor_LLM.prompt +54 -0
  80. pdd/split.py +119 -0
  81. pdd/split_main.py +103 -0
  82. pdd/summarize_directory.py +212 -0
  83. pdd/trace.py +135 -0
  84. pdd/trace_main.py +108 -0
  85. pdd/track_cost.py +102 -0
  86. pdd/unfinished_prompt.py +114 -0
  87. pdd/update_main.py +96 -0
  88. pdd/update_prompt.py +115 -0
  89. pdd/xml_tagger.py +122 -0
  90. pdd_cli-0.0.2.dist-info/LICENSE +7 -0
  91. pdd_cli-0.0.2.dist-info/METADATA +225 -0
  92. pdd_cli-0.0.2.dist-info/RECORD +95 -0
  93. pdd_cli-0.0.2.dist-info/WHEEL +5 -0
  94. pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
  95. pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,230 @@
1
+ <overview>
2
+ You are a prompt expert that helps select the necessary subset of "includes" (list of code files) out of a provided list of file paths. Your goal is to infer the purpose of each file based on their names so just the proper includes are included.
3
+ </overview>
4
+
5
+ <definitions>
6
+ Here are the inputs and outputs of this prompt:
7
+ <input>
8
+ 'input_prompt' - A string that contains the prompt that requires the includes to be selected.
9
+ 'available_includes' - A list of strings that contains the file paths of the available includes.
10
+ </input>
11
+ <output>
12
+ 'Step 1.' - A string of possible includes based on the input_prompt.
13
+ 'Step 2.' - A string explaining why an include might or might not be necessary for the prompt.
14
+ 'Step 3.' - A string of the minimum set of includes required to achieve the goal of the input_prompt.
15
+ 'Step 4.' - A string of the string_of_includes based on Step 3.
16
+ </output>
17
+ </definitions>
18
+
19
+ <context>
20
+ Here is the input_prompt to find the includes for: <input_prompt>{input_prompt}</input_prompt>
21
+ Here is the available_includes: <available_includes>{available_includes}</available_includes>
22
+ </context>
23
+
24
+ Here are some examples of how to do this:
25
+ <examples>
26
+ <example_1>
27
+ <example_input_prompt>
28
+ % You are an expert Python Software Engineer. Your goal is to write a Python function, "process_csv_change", that will read in a take in a csv file name and call change_example for each of the lines.
29
+
30
+ <include>context/python_preamble.prompt</include>
31
+
32
+ % Here are the inputs and outputs of the function:
33
+ Inputs:
34
+ 'csv_file' - A string containing the path to the csv file.
35
+ 'strength' - A float between 0 and 1 that represents the strength of the LLM model to use.
36
+ 'temperature' - A float that represents the temperature parameter for the LLM model.
37
+ 'code_directory' - A string containing the path to the directory where the code files are stored.
38
+ 'language' - A string representing the programming language of the code files.
39
+ 'extension' - A string representing the file extension of the code files. Includes the '.' in front of the extension.
40
+ 'budget' - A float representing the maximum cost allowed for the change process.
41
+ Outputs:
42
+ 'success' - A boolean indicating whether the changes were successfully made.
43
+ 'list_of_jsons' - A list of dictionaries containing Key:file_name, Value:modified_prompt.
44
+ 'total_cost' - A float representing the total cost of all fix attempts.
45
+ 'model_name' - A string representing the name of the LLM model used.
46
+
47
+ % This function will do the following:
48
+ Step 1. Read in the csv file with columns prompt_name and change_instructions.
49
+ Step 2. Loop through each line in the csv file:
50
+ a. Initialize variables:
51
+ - Initialize a list_of_jsons to store the modified prompts.
52
+ - Read the prompt from the prompt_name column (text file).
53
+ - Parse the prompt_name into a input_code name:
54
+ - remove the path and suffix _language.prompt from the prompt_name
55
+ - add the suffix extension to the prompt_name
56
+ - change the directory to code_directory
57
+ - Read the input_code from the input_code_name as a string
58
+ - Read the change_instructions from the change_instructions column
59
+ b. Call the change function with the input_prompt, input_code, and change_prompt.
60
+ c. Add the returned total_cost to the total cost accumulator.
61
+ d. If the total cost exceeds the budget, break the loop.
62
+ e If the change was successful, add the modified prompt to the list_of_jsons.
63
+ Step 3. Return the success status, list of modified prompts, total cost, and model name.
64
+ </example_input_prompt>
65
+ <example_available_includes>
66
+ context/DSPy_example.py
67
+ context/anthropic_counter_example.py
68
+ context/autotokenizer_example.py
69
+ context/bug_to_unit_test_example.py
70
+ context/bug_to_unit_test_failure_example.py
71
+ context/change_example.py
72
+ context/cli_example.py
73
+ context/cli_python_preprocessed.prompt
74
+ context/click_example.py
75
+ context/cloud_function_call.py
76
+ context/code_generator_example.py
77
+ context/comment_line_example.py
78
+ context/conflicts_in_prompts_example.py
79
+ context/conflicts_in_prompts_python.prompt
80
+ context/construct_paths_example.py
81
+ context/context_generator_example.py
82
+ context/continue_generation_example.py
83
+ context/detect_change_example.py
84
+ context/execute_bug_to_unit_test_failure.py
85
+ context/final_llm_output.py
86
+ context/find_section_example.py
87
+ context/fix_code_module_errors_example.py
88
+ context/fix_error_loop_example.py
89
+ context/fix_errors_from_unit_tests_example.py
90
+ context/generate_output_paths_example.py
91
+ context/generate_test_example.py
92
+ context/get_comment_example.py
93
+ context/get_extension_example.py
94
+ context/get_language_example.py
95
+ context/git_update_example.py
96
+ context/langchain_lcel_example.py
97
+ context/llm_selector_example.py
98
+ context/llm_token_counter_example.py
99
+ context/postprocess_0_example.py
100
+ context/postprocess_example.py
101
+ context/postprocessed_runnable_llm_output.py
102
+ context/preprocess_example.py
103
+ context/process_csv_change_example.py
104
+ context/prompt_caching.ipynb
105
+ context/split_example.py
106
+ context/tiktoken_example.py
107
+ context/trace_example.py
108
+ context/unfinished_prompt_example.py
109
+ context/unrunnable_raw_llm_output.py
110
+ context/update_prompt_example.py
111
+ context/xml_tagger_example.py
112
+ </example_available_includes>
113
+ <example_string_of_includes>
114
+ % Here are examples of how to use internal modules:
115
+ <internal_example_modules>
116
+ % Here is an example of the change function that will be used: <change_example><include>context/change_example.py</include></change_example>
117
+ </internal_example_modules>
118
+ </example_string_of_includes>
119
+ </example_1>
120
+
121
+ <example_2>
122
+ <example_input_prompt>
123
+ % You are an expert Python Software Engineer. Your goal is to write a Python function, "generate_test", that will create a unit test from a code file.
124
+
125
+ <include>./context/python_preamble.prompt</include>
126
+
127
+ % Here are the inputs and outputs of the function:
128
+ Inputs:
129
+ 'prompt' - A string containing the prompt that generated the code file to be processed.
130
+ 'code' - A string containing the code to generate a unit test from.
131
+ 'strength' - A float between 0 and 1 that is the strength of the LLM model to use.
132
+ 'temperature' - A float that is the temperature of the LLM model to use.
133
+ 'language' - A string that is the language of the unit test to be generated.
134
+ Outputs:
135
+ 'unit_test'- A string that is the generated unit test code.
136
+ 'total_cost' - A float that is the total cost to generate the unit test code.
137
+ 'model_name' - A string that is the name of the selected LLM model
138
+
139
+ % This program will use Langchain to do the following:
140
+ Step 1. use $PDD_PATH environment variable to get the path to the project. Load the '$PDD_PATH/prompts/generate_test_LLM.prompt' file.
141
+ Step 2. Preprocess the prompt using the preprocess function without recursion or doubling of the curly brackets.
142
+ Step 2. Then this will create a Langchain LCEL template from the test generator prompt.
143
+ Step 3. This will use llm_selector for the model.
144
+ Step 4. This will run the inputs through the model using Langchain LCEL.
145
+ 4a. Be sure to pass the following string parameters to the prompt during invoke:
146
+ - 'prompt_that_generated_code': preprocess the prompt using the preprocess function without recursion or doubling of the curly brackets.
147
+ - 'code'
148
+ - 'language'
149
+ 4b. Pretty print a message letting the user know it is running and how many tokens (using token_counter from llm_selector) are in the prompt and the cost. The cost from llm_selector is in dollars per million tokens.
150
+ Step 5. This will pretty print the markdown formatting that is present in the result via the rich Markdown function. It will also pretty print the number of tokens in the result and the cost.
151
+ Step 6. Detect if the generation is incomplete using the unfinished_prompt function (strength .7) by passing in the last 600 characters of the output of Step 4.
152
+ - a. If incomplete, call the continue_generation function to complete the generation.
153
+ - b. Else, if complete, postprocess the model output result using the postprocess function from the postprocess module with a strength of 0.7.
154
+ Step 7. Print out the total_cost including the input and output tokens and functions that incur cost (e.g. postprocessing).
155
+ Step 7. Return the unit_test, total_cost and model_name
156
+ </example_input_prompt>
157
+ <example_available_includes>
158
+ context/DSPy_example.py
159
+ context/anthropic_counter_example.py
160
+ context/autotokenizer_example.py
161
+ context/bug_to_unit_test_example.py
162
+ context/bug_to_unit_test_failure_example.py
163
+ context/change_example.py
164
+ context/cli_example.py
165
+ context/cli_python_preprocessed.prompt
166
+ context/click_example.py
167
+ context/cloud_function_call.py
168
+ context/code_generator_example.py
169
+ context/comment_line_example.py
170
+ context/conflicts_in_prompts_example.py
171
+ context/conflicts_in_prompts_python.prompt
172
+ context/construct_paths_example.py
173
+ context/context_generator_example.py
174
+ context/continue_generation_example.py
175
+ context/detect_change_example.py
176
+ context/execute_bug_to_unit_test_failure.py
177
+ context/final_llm_output.py
178
+ context/find_section_example.py
179
+ context/fix_code_module_errors_example.py
180
+ context/fix_error_loop_example.py
181
+ context/fix_errors_from_unit_tests_example.py
182
+ context/generate_output_paths_example.py
183
+ context/generate_test_example.py
184
+ context/get_comment_example.py
185
+ context/get_extension_example.py
186
+ context/get_language_example.py
187
+ context/git_update_example.py
188
+ context/langchain_lcel_example.py
189
+ context/llm_selector_example.py
190
+ context/llm_token_counter_example.py
191
+ context/postprocess_0_example.py
192
+ context/postprocess_example.py
193
+ context/postprocessed_runnable_llm_output.py
194
+ context/preprocess_example.py
195
+ context/process_csv_change_example.py
196
+ context/prompt_caching.ipynb
197
+ context/split_example.py
198
+ context/tiktoken_example.py
199
+ context/trace_example.py
200
+ context/unfinished_prompt_example.py
201
+ context/unrunnable_raw_llm_output.py
202
+ context/update_prompt_example.py
203
+ context/xml_tagger_example.py
204
+ </example_available_includes>
205
+ <example_string_of_includes>
206
+ % Here is an example of a LangChain Expression Language (LCEL) program: <lcel_example><include>context/langchain_lcel_example.py</include></lcel_example>
207
+
208
+ % Here are examples of how to use internal modules:
209
+ <internal_example_modules>
210
+ % Here is an example how to preprocess the prompt from a file: <preprocess_example><include>./context/preprocess_example.py</include></preprocess_example>
211
+
212
+ % Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example><include>./context/llm_selector_example.py</include></llm_selector_example>
213
+
214
+ % Example usage of the unfinished_prompt function: <unfinished_prompt_example><include>./context/unfinished_prompt_example.py</include></unfinished_prompt_example>
215
+
216
+ % Here is an example how to continue the generation of a model output: <continue_generation_example><include>context/continue_generation_example.py</include></continue_generation_example>
217
+
218
+ % Here is an example how to postprocess the model output result: <postprocess_example><include>context/postprocess_example.py</include></postprocess_example>
219
+ </internal_example_modules>
220
+ </example_string_of_includes>
221
+ </example_2>
222
+ </examples>
223
+
224
+ <instructions>
225
+ Follow these instructions:
226
+ Step 1. Select possible includes from the available_includes based on the input_prompt.
227
+ Step 2. Explain why an include might or might not be necessary for the prompt.
228
+ Step 3. Determine the minimum set of includes required to achieve the goal of the input_prompt.
229
+ Step 4. Generate the string_of_includes based on Step 3.
230
+ </instructions>
@@ -0,0 +1,17 @@
1
+ % You are an expert {language} Software Test Engineer. Your task is to generate a {language} unit test to identify issue(s) in a given code. The test should compare the current output with the desired output and to ensure the code behaves as expected. If Python, use Pytest.
2
+
3
+ % Inputs:
4
+ • Current output: ```{current_output}```
5
+ • Desired output: ```{desired_output}```
6
+ • Code under test: ```{code_under_test}```
7
+ • Program used to run the code under test: ```{program_used_to_run_code_under_test}```
8
+ • Prompt that generated the code: ```{prompt_that_generated_code}```
9
+ % Output:
10
+ • A unit test that covers the problem(s) and ensures the code meets the expected behavior.
11
+
12
+ % Follow these steps to generate the unit test:
13
+ 1. Analyze the current output: Compare the current and desired outputs to identify discrepancies.
14
+ 2. Generate a unit test: Write a test that highlights the issue in the current code to test whether the function produces the correct output as specified.
15
+ 3. Ensure correctness: The generated test should pass only when the code produces the desired output.
16
+
17
+ % Focus exclusively on generating a robust unit test to detect and identify the issue(s) in the code provided.
@@ -0,0 +1,34 @@
1
+ <role>
2
+ You are an expert LLM Prompt Engineer. Your goal is to change the input_prompt into a modified_prompt according to the change_prompt.
3
+ </role>
4
+
5
+ <inputs_outputs_definitions>
6
+ Here are the inputs and outputs of this prompt:
7
+ <input>
8
+ 'input_prompt' - A string that contains the prompt that will be modified by the change_prompt.
9
+ 'input_code' - A string that contains the code that was generated from the input_prompt.
10
+ 'change_prompt' - A string that contains the instructions of how to modify the input_prompt.
11
+ </input>
12
+ <output>
13
+ 'modified_prompt' - A string that contains the modified prompt that was changed based on the change_prompt.
14
+ </output>
15
+ </inputs_outputs_definitions>
16
+
17
+ ```<./prompts/xml/change_example_partial_processed.prompt>```
18
+
19
+ <context>
20
+ Here is the input_prompt to change: <input_prompt>{input_prompt}</input_prompt>
21
+ Here is the input_code generated from the input_prompt: <input_code>{input_code}</input_code>
22
+ Here is the change_prompt to implement: <change_prompt>{change_prompt}</change_prompt>
23
+ </context>
24
+
25
+ <instructions>
26
+ Follow these instructions:
27
+ Step 1. Explain in detail step by step the ramifications of the change_prompt on the input_prompt.
28
+ Step 2. Explain in detail step by step what changes need to be made to the input_prompt to generate the modified_prompt based on Step 1. This step describes how to modify the input_prompt to generate the modified_prompt.
29
+ Step 3. Generate the modified_prompt based on Step 2. Except for the change, the rest of the existing functionality of the input_prompt should remain. Structure the prompt similar to the example prompts, especially including the descriptions of the inputs and outputs.
30
+ </instructions>
31
+
32
+ <important_notes>
33
+ Never ask if you should proceed with generating the modified_prompt as this prompt has no human monitoring. Always assume that the change_prompt is correct and proceed with generating the modified_prompt. Also, for step 3, output the modified prompt not just how to modify the prompt.
34
+ </important_notes>
@@ -0,0 +1,23 @@
1
+ % You are software architect and prompt engineering expert tasked with analyzing two prompts for potential conflicts and suggesting resolutions. Your goal is to identify any inconsistencies or contradictions between the prompts and provide constructive and detailed suggestions on how to resolve these conflicts.
2
+
3
+ <inputs>
4
+ Here are the two prompts you need to analyze:
5
+ <prompt_1>
6
+ {PROMPT1}
7
+ </prompt_1>
8
+
9
+ <prompt_2>
10
+ {PROMPT2}
11
+ </prompt_2>
12
+ </inputs>
13
+
14
+ % Follow these instructions:
15
+ 1. Carefully read and analyze both prompts. Look for any potential conflicts, contradictions, or inconsistencies between them. Consider aspects such as:
16
+ - Goals or objectives
17
+ - Specific instructions or requirements
18
+ - Assumptions or context
19
+ 2. After your analysis, list any conflicts you've identified in a structured format. Remember to be thorough in your analysis and constructive in your suggestions. Your goal is to help improve the compatibility and effectiveness of these prompts. For each conflict, provide the following:
20
+ - Detailed explanation of why this is a conflict
21
+ - Suggestion on how to resolve this conflict
22
+ - Determine which prompt(s) would be best to changed and how
23
+ 3. Based on step 2, create complete and detailed instructions on how to change each prompt to resolve the conflicts. Your instructions should be clear, actionable, and focused on improving the prompts while maintaining their original intent. Everything that is needed to know how to change the prompt effectively should be included here.
@@ -0,0 +1,3 @@
1
+ {FORMATTED_INPUT_PROMPT}
2
+ {LLM_OUTPUT}
3
+ continue, by outputting only the rest of the code and no other commentary
@@ -0,0 +1,65 @@
1
+ % You are an expert prompt engineer. You will be given a list of LLM prompts and a change description. Your task is to analyze which prompts need to be changed based on the change description, and provide detailed instructions on how they should be changed.
2
+
3
+ % Here are the inputs:
4
+ <input>
5
+ <prompt_list>
6
+ {PROMPT_LIST}
7
+ </prompt_list>
8
+
9
+ <change_description>
10
+ {CHANGE_DESCRIPTION}
11
+ </change_description>
12
+ </input>
13
+
14
+
15
+ % Here is an example of an output for a given input:
16
+ <example>
17
+ <input_example>
18
+ <prompt_list_example>
19
+ <include>context/detect_change/2/prompt_list.json</include>
20
+ </prompt_list_example>
21
+
22
+ <change_description_example>
23
+ <include>context/detect_change/2/change.prompt</include>
24
+ </change_description_example>
25
+ </input_example>
26
+
27
+ <output_example>
28
+ <include>context/detect_change/2/detect_change_output.txt</include>
29
+ </output_example>
30
+ </example>
31
+
32
+ % Follow these steps to complete the task:
33
+ <task>
34
+ Step 1. Carefully read and analyze the change description. Consider its implications and how it might affect different types of prompts.
35
+ Step 2. Review each prompt in the prompt list. For each prompt, determine if it needs to be changed based on the change description. Some prompts maybe unaffected by the change description or already have the changes applied.
36
+ Step 3. In your analysis, consider the following:
37
+ - How does the change description impact each prompt?
38
+ - Are there any potential issues or conflicts that might arise from implementing the change?
39
+ - What are different ways the change could be implemented for affected prompts?
40
+ - Where is the best place to implement the change to minimize issues and maximize effectiveness?
41
+ Step 4. Prepare your response in the following format:
42
+ <analysis>
43
+ 1. Provide a detailed description of the impact of the change and potential issues.
44
+ 2. Generate at least three different possible implementation plans. Discuss the pros and cons of each plan.
45
+ 3. Analyze the potential issues and the different plans. Explain step by step which plan is the best and why.
46
+ 4. For each prompt explain if it needs to be changed based on the selected plan.
47
+ 5. List the prompts that need to be changed based on the selected plan. For each prompt that needs to be changed, include:
48
+ a. The prompt's name
49
+ b. Detail and complete instructions for a LLM of how the prompt should be changed. Everything that is needed to know how to change the prompt effectively should be included here.
50
+ - When instructing to include content from another file vs. actually intending to include file contents:
51
+ 1. Mention the filename that should be included.
52
+ 2. Describe where in the prompt the file's contents should be inserted.
53
+ 3. Do not use XML-like syntax (such as angle brackets) when referring to includes, as this may interfere with preprocessing that will happen later.
54
+ For example:
55
+ "Insert the contents of the file './context/python_preamble.prompt' immediately after the role and goal statement using 'include' XML tags. The format for this is 'include' in angle brackets, followed by the file path then closed with 'include' in angle brackets."
56
+ - If multiple files need to be included, list each one separately with clear instructions on where each should be placed.
57
+ - When actually intending to include file contents use the include XML tags. This is common when the include will be replacing existing content.
58
+ - Provide instructions on which parts of the existing prompt should be removed, modified, or retained. Focus on describing the changes conceptually rather than referencing specific text that might be altered by preprocessing.
59
+ - Ensure that any unique instructions or logic specific to the prompt being modified are retained and remain clear.
60
+ - Remember to include any other relevant instructions for modifying the prompt that are not related to file inclusions.
61
+ - When finished, review the instructions to ensure they will make sense after any preprocessing steps that may occur.
62
+ </analysis>
63
+ </task>
64
+
65
+ % Remember to be thorough in your analysis and clear in your explanations. Consider all aspects of the change description and its potential impacts on the prompts.
@@ -0,0 +1,10 @@
1
+ % You are an expert software engineer. Generate a concise example of how to use the following module properly: <code_module>{code_module}</code_module>
2
+
3
+ % Here was the prompt used to generate the module: <prompt_for_code>{processed_prompt}</prompt_for_code>
4
+
5
+ % The language of the example should be in: <language_for_example>{language}</language_for_example>
6
+
7
+ % Make sure the following happens:
8
+ - Document in detail what the input and output parameters in the doc strings
9
+ - Someone needs to be able to fully understand how to use the module from the example.
10
+ <include>./context/example.prompt</include>
@@ -0,0 +1,6 @@
1
+ % You are an expert Prompt Engineer. Your goal is to extract the dependencies string from the output of a LLM.
2
+
3
+ % Here is the generated llm_output: <llm_output>{llm_output}</llm_output>
4
+
5
+ % Output a JSON object with the following keys:
6
+ - 'string_of_includes': String containing the verbatim output of Step 4.
@@ -0,0 +1,22 @@
1
+ % You are an expert Software Engineer. Your goal is to extract the block of text (usually code) from llm_output to be outputed in JSON format.
2
+
3
+ % Here is the llm_output to parse: <llm_output>{llm_output}</llm_output>
4
+
5
+ % Here is the type of the text block to extract: <block_type>{language}</block_type>. If type of the block is 'prompt' then the focus is the prompt itself and that is what should be extracted. If the type is 'log' or 'restructuredtext' then the focus is the report itself and that is what should be extracted.
6
+
7
+ % Otherwise, when not extracting 'prompt' or 'log', you are extracting a code block from llm_output, consider and correct the following for the extracted code:
8
+ - Should be the block of code typically delimited by triple backticks followed by the name of the language of the block. There can be sub-blocks of code within the main block which should still be extracted.
9
+ - Should be the primary focus of the LLM prompt that generated llm_output. Sometimes the primary focus on the generation was to create a prompt. If so, this is the code to be extracted. Generated prompts are often not in triple backticks but should still be extracted.
10
+ - Should be runnable (if not a prompt) with non-runnable text commented or cut out without the initial triple backticks that start or end the code block. Sub code blocks that have triple backticks should still be included.
11
+ - Should be complete and not missing any necessary components or have any errors.
12
+ - Should handle any errors or exceptions that may occur.
13
+ - Should have clear and concise variable and function names and be fully-typed.
14
+ - Should be properly documented with comments that explain why something is done and having doc strings or equivalent.
15
+ - Should be properly PEP 8, if Python, formatted and indented with the proper naming conventions.
16
+ - Never add example calling unless it is already in the code block and if it is a submodule have the conditional main execution for the example.
17
+ - All the functionality of the code block should still be present.
18
+
19
+ % Output a JSON object with the following keys:
20
+ - 'focus': String containing the focus of the generation.
21
+ - 'explanation': String explanation of why this block_type was the focus of the generation and explain any errors detected in the code, if a code type of block.
22
+ - 'extracted_code': String containing the entire generated and corrected block_type of focus.
@@ -0,0 +1,19 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON which contains a list of JSON from the output of a LLM. This LLM generated list of prompts that need to be changed to resolve conflicts.
2
+
3
+ % Here is the generated llm_output: <llm_output>{llm_output}</llm_output>
4
+
5
+ % Output a JSON with key changes_list which has a value that is the list of JSON objects with the following keys:
6
+ - 'prompt_name': Indicate with 'prompt_1' or 'prompt_2' which prompt needs to be changed.
7
+ - 'change_instructions': Detailed instructions of how the prompt should be changed. This should have as much information as the LLM output provides for this prompt.
8
+
9
+ % Example output:
10
+ <output_example>{{"changes_list":[
11
+ {{
12
+ "prompt_name": "prompt_1",
13
+ "change_instructions": "Change prompt_1 to include the new information about the product."
14
+ }},
15
+ {{
16
+ "prompt_name": "prompt_2",
17
+ "change_instructions": "Update prompt_2 to reflect the new pricing structure."
18
+ }}
19
+ ]}}</output_example>
@@ -0,0 +1,19 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON which contains a list of JSON from the output of a LLM. This LLM generated list of prompts that need to be changed.
2
+
3
+ % Here is the generated llm_output: <llm_output>{llm_output}</llm_output>
4
+
5
+ % Output a JSON with key changes_list which has a value that is the list of JSON objects with the following keys:
6
+ - 'prompt_name': String which containts the exact name of prompt that needs to be changed.
7
+ - 'change_instructions': Detailed instructions of how the prompt should be changed. This should have as much information as the LLM output provides for this prompt.
8
+
9
+ % Example output:
10
+ <output_example>{{"changes_list":[
11
+ {{
12
+ "prompt_name": "prompt_1",
13
+ "change_instructions": "Change prompt_1 to include the new information about the product."
14
+ }},
15
+ {{
16
+ "prompt_name": "prompt_2",
17
+ "change_instructions": "Update prompt_2 to reflect the new pricing structure."
18
+ }}
19
+ ]}}</output_example>
@@ -0,0 +1,16 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a program and code module bug fix report. If there is a choice of updating the program or the code module, you should chose to update the code module.
2
+
3
+ % Here is the original program: <program>{program}</program>
4
+
5
+ % Here is the original code module: <code_module>{code}</code_module>
6
+
7
+ % Here is the program/code module bug fix report: ```{program_code_fix}```
8
+
9
+ % Sometimes the fix may only contain partial code. In these cases, you need to incorporate the fix into the original program and/or original code module.
10
+
11
+ % Output a JSON object with the following keys:
12
+ - 'explanation': String explanation of whether the code under test needs to be fix and/or if the unit test needs to be fixed.
13
+ - 'update_program': Boolean indicating whether the program needs to be updated.
14
+ - 'update_code': Boolean indicating whether the code module needs to be updated.
15
+ - 'fixed_program': The entire updated program code or empty String if no update is needed.
16
+ - 'fixed_code': The entire updated code module or empty String if no update is needed.
@@ -0,0 +1,7 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from the output of a LLM. This LLM changed a input_prompt into a modified_prompt.
2
+
3
+ % Here is the generated llm_output: ```{llm_output}```
4
+
5
+ % Output a JSON object with the following keys:
6
+ - 'modified_prompt': String containing the modified prompt from input_prompt via the change_prompt.
7
+
@@ -0,0 +1,9 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from the output of a LLM. This LLM split a prompt into a sub_prompt and modified_prompt.
2
+
3
+ % Here is the generated llm_output: ```{llm_output}```
4
+
5
+ % Output a JSON object with the following keys:
6
+ - 'explaination': String containing the explaination of the split and how the prompts should be extracted properly so that both prompts are complete and functional. For instance, sometimes there will be messages like '[... other internal module examples ...]' which means text is missing and should be copied the input prompt.
7
+ - 'sub_prompt': String containing the sub_prompt that was split from the input_prompt.
8
+ - 'modified_prompt': String containing the complete modified prompt from input_prompt split from the sub_prompt. Sometimes only the changed portion of the modified prompt is given so you may need to combine it with parts of the input prompt get the complete modified prompt.
9
+
@@ -0,0 +1,8 @@
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from the output of a LLM. This LLM changed a input_prompt into a modified_prompt.
2
+
3
+ % Here is the generated llm_output: ```{llm_output}```
4
+
5
+ % Output a JSON object with the following keys:
6
+ - 'modified_prompt': String containing the modified prompt that will generate the modified code.
7
+
8
+
@@ -0,0 +1,11 @@
1
+ % You are an expert Software Engineer. Your goal is to extract the closest matching substring described by the prompt's output to be outputed in JSON format.
2
+
3
+ % Here is the llm_output to parse: <llm_output>{llm_output}</llm_output>
4
+
5
+ % When extracting the closest matching substring from llm_output, consider and correct the following for the extracted code:
6
+ - Should be a substring of prompt_file.
7
+ - Should be a a substring that closely matches code_str in content.
8
+
9
+ % Output a JSON object with the following keys:
10
+ - 'explanation': String explanation of why this prompt_line matches the code_str and explain any errors detected in the code.
11
+ - 'prompt_line': String containing the closest matching verbatim substring of the prompt_file that matches code_str in content. This is not the line number.