pdd-cli 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +0 -0
- pdd/auto_deps_main.py +98 -0
- pdd/auto_include.py +175 -0
- pdd/auto_update.py +73 -0
- pdd/bug_main.py +99 -0
- pdd/bug_to_unit_test.py +159 -0
- pdd/change.py +141 -0
- pdd/change_main.py +240 -0
- pdd/cli.py +607 -0
- pdd/cmd_test_main.py +155 -0
- pdd/code_generator.py +117 -0
- pdd/code_generator_main.py +66 -0
- pdd/comment_line.py +35 -0
- pdd/conflicts_in_prompts.py +143 -0
- pdd/conflicts_main.py +90 -0
- pdd/construct_paths.py +251 -0
- pdd/context_generator.py +133 -0
- pdd/context_generator_main.py +73 -0
- pdd/continue_generation.py +140 -0
- pdd/crash_main.py +127 -0
- pdd/data/language_format.csv +61 -0
- pdd/data/llm_model.csv +15 -0
- pdd/detect_change.py +142 -0
- pdd/detect_change_main.py +100 -0
- pdd/find_section.py +28 -0
- pdd/fix_code_loop.py +212 -0
- pdd/fix_code_module_errors.py +143 -0
- pdd/fix_error_loop.py +216 -0
- pdd/fix_errors_from_unit_tests.py +240 -0
- pdd/fix_main.py +138 -0
- pdd/generate_output_paths.py +194 -0
- pdd/generate_test.py +140 -0
- pdd/get_comment.py +55 -0
- pdd/get_extension.py +52 -0
- pdd/get_language.py +41 -0
- pdd/git_update.py +84 -0
- pdd/increase_tests.py +93 -0
- pdd/insert_includes.py +150 -0
- pdd/llm_invoke.py +304 -0
- pdd/load_prompt_template.py +59 -0
- pdd/pdd_completion.fish +72 -0
- pdd/pdd_completion.sh +141 -0
- pdd/pdd_completion.zsh +418 -0
- pdd/postprocess.py +121 -0
- pdd/postprocess_0.py +52 -0
- pdd/preprocess.py +199 -0
- pdd/preprocess_main.py +72 -0
- pdd/process_csv_change.py +182 -0
- pdd/prompts/auto_include_LLM.prompt +230 -0
- pdd/prompts/bug_to_unit_test_LLM.prompt +17 -0
- pdd/prompts/change_LLM.prompt +34 -0
- pdd/prompts/conflict_LLM.prompt +23 -0
- pdd/prompts/continue_generation_LLM.prompt +3 -0
- pdd/prompts/detect_change_LLM.prompt +65 -0
- pdd/prompts/example_generator_LLM.prompt +10 -0
- pdd/prompts/extract_auto_include_LLM.prompt +6 -0
- pdd/prompts/extract_code_LLM.prompt +22 -0
- pdd/prompts/extract_conflict_LLM.prompt +19 -0
- pdd/prompts/extract_detect_change_LLM.prompt +19 -0
- pdd/prompts/extract_program_code_fix_LLM.prompt +16 -0
- pdd/prompts/extract_prompt_change_LLM.prompt +7 -0
- pdd/prompts/extract_prompt_split_LLM.prompt +9 -0
- pdd/prompts/extract_prompt_update_LLM.prompt +8 -0
- pdd/prompts/extract_promptline_LLM.prompt +11 -0
- pdd/prompts/extract_unit_code_fix_LLM.prompt +332 -0
- pdd/prompts/extract_xml_LLM.prompt +7 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +17 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +62 -0
- pdd/prompts/generate_test_LLM.prompt +12 -0
- pdd/prompts/increase_tests_LLM.prompt +16 -0
- pdd/prompts/insert_includes_LLM.prompt +30 -0
- pdd/prompts/split_LLM.prompt +94 -0
- pdd/prompts/summarize_file_LLM.prompt +11 -0
- pdd/prompts/trace_LLM.prompt +30 -0
- pdd/prompts/trim_results_LLM.prompt +83 -0
- pdd/prompts/trim_results_start_LLM.prompt +45 -0
- pdd/prompts/unfinished_prompt_LLM.prompt +18 -0
- pdd/prompts/update_prompt_LLM.prompt +19 -0
- pdd/prompts/xml_convertor_LLM.prompt +54 -0
- pdd/split.py +119 -0
- pdd/split_main.py +103 -0
- pdd/summarize_directory.py +212 -0
- pdd/trace.py +135 -0
- pdd/trace_main.py +108 -0
- pdd/track_cost.py +102 -0
- pdd/unfinished_prompt.py +114 -0
- pdd/update_main.py +96 -0
- pdd/update_prompt.py +115 -0
- pdd/xml_tagger.py +122 -0
- pdd_cli-0.0.2.dist-info/LICENSE +7 -0
- pdd_cli-0.0.2.dist-info/METADATA +225 -0
- pdd_cli-0.0.2.dist-info/RECORD +95 -0
- pdd_cli-0.0.2.dist-info/WHEEL +5 -0
- pdd_cli-0.0.2.dist-info/entry_points.txt +2 -0
- pdd_cli-0.0.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to extract a JSON (containing keys that have the entire updated code under test and/or unit test) from an analysis of a unit test bug fix report. If there is a choice of updating the unit test or the code under test to resolve the problem, you should chose to update the code under test.
|
|
2
|
+
|
|
3
|
+
% Here is the original unit test code: <unit_test>{unit_test}</unit_test>
|
|
4
|
+
|
|
5
|
+
% Here is the original code under test: <code_under_test>{code}</code_under_test>
|
|
6
|
+
|
|
7
|
+
% Here is the unit test bug fix report: <fix_report>{unit_test_fix}</fix_report>
|
|
8
|
+
|
|
9
|
+
% Sometimes the fix report may only contain partial code snippet(s) of the unit test and/or code under test. In these cases, you need to incorporate the partial fix(es) into the original unit test and/or original code under test so the test and code files are always complete. Read the text around the code block to determine this. Also, often this is signified by comments in the code like:
|
|
10
|
+
- `# Rest of the code remains the same`,
|
|
11
|
+
- `# Other tests remain unchanged`
|
|
12
|
+
- `# ... [remaining test functions remain unchanged]` or
|
|
13
|
+
- `# ... rest of the test file remains unchanged ...` at the end of code snippets.
|
|
14
|
+
|
|
15
|
+
% Here are examples of inputs and proper outputs of this prompt:
|
|
16
|
+
<examples>
|
|
17
|
+
<example_1>
|
|
18
|
+
<input_example>
|
|
19
|
+
<unit_test_example>
|
|
20
|
+
import pytest
|
|
21
|
+
from auth_service import AuthService
|
|
22
|
+
|
|
23
|
+
@pytest.fixture
|
|
24
|
+
def auth_service():
|
|
25
|
+
return AuthService()
|
|
26
|
+
|
|
27
|
+
def test_valid_login(auth_service):
|
|
28
|
+
result = auth_service.login("user@example.com", "password123")
|
|
29
|
+
assert result.success
|
|
30
|
+
assert result.token is not None
|
|
31
|
+
|
|
32
|
+
def test_invalid_password(auth_service):
|
|
33
|
+
result = auth_service.login("user@example.com", "wrongpass")
|
|
34
|
+
assert not result.success
|
|
35
|
+
assert result.token is None
|
|
36
|
+
</unit_test_example>
|
|
37
|
+
|
|
38
|
+
<fix_report_example>
|
|
39
|
+
```python
|
|
40
|
+
# Code above this line remains the same
|
|
41
|
+
def test_invalid_email_format(auth_service):
|
|
42
|
+
result = auth_service.login("invalid-email", "password123")
|
|
43
|
+
assert not result.success
|
|
44
|
+
assert result.error == "Invalid email format"
|
|
45
|
+
|
|
46
|
+
def test_empty_credentials(auth_service):
|
|
47
|
+
result = auth_service.login("", "")
|
|
48
|
+
assert not result.success
|
|
49
|
+
assert result.error == "Email and password required"
|
|
50
|
+
```
|
|
51
|
+
</fix_report_example>
|
|
52
|
+
</input_example>
|
|
53
|
+
|
|
54
|
+
<output_example>
|
|
55
|
+
<fixed_unit_test_example>
|
|
56
|
+
import pytest
|
|
57
|
+
from auth_service import AuthService
|
|
58
|
+
|
|
59
|
+
@pytest.fixture
|
|
60
|
+
def auth_service():
|
|
61
|
+
return AuthService()
|
|
62
|
+
|
|
63
|
+
def test_valid_login(auth_service):
|
|
64
|
+
result = auth_service.login("user@example.com", "password123")
|
|
65
|
+
assert result.success
|
|
66
|
+
assert result.token is not None
|
|
67
|
+
|
|
68
|
+
def test_invalid_password(auth_service):
|
|
69
|
+
result = auth_service.login("user@example.com", "wrongpass")
|
|
70
|
+
assert not result.success
|
|
71
|
+
assert result.token is None
|
|
72
|
+
|
|
73
|
+
def test_invalid_email_format(auth_service):
|
|
74
|
+
result = auth_service.login("invalid-email", "password123")
|
|
75
|
+
assert not result.success
|
|
76
|
+
assert result.error == "Invalid email format"
|
|
77
|
+
|
|
78
|
+
def test_empty_credentials(auth_service):
|
|
79
|
+
result = auth_service.login("", "")
|
|
80
|
+
assert not result.success
|
|
81
|
+
assert result.error == "Email and password required"
|
|
82
|
+
</fixed_unit_test_example>
|
|
83
|
+
<output_example>
|
|
84
|
+
</example_1>
|
|
85
|
+
|
|
86
|
+
<example_2>
|
|
87
|
+
<input_example>
|
|
88
|
+
<unit_test_example>
|
|
89
|
+
import pytest
|
|
90
|
+
from auth_service import AuthService
|
|
91
|
+
|
|
92
|
+
@pytest.fixture
|
|
93
|
+
def auth_service():
|
|
94
|
+
return AuthService()
|
|
95
|
+
|
|
96
|
+
def test_valid_login(auth_service):
|
|
97
|
+
result = auth_service.login("user@example.com", "password123")
|
|
98
|
+
assert result.success
|
|
99
|
+
assert result.token is not None
|
|
100
|
+
|
|
101
|
+
def test_invalid_password(auth_service):
|
|
102
|
+
result = auth_service.login("user@example.com", "wrongpass")
|
|
103
|
+
assert not result.success
|
|
104
|
+
assert result.token is None
|
|
105
|
+
</unit_test_example>
|
|
106
|
+
|
|
107
|
+
<fix_report_example>
|
|
108
|
+
```python
|
|
109
|
+
import pytest
|
|
110
|
+
from auth_service import AuthService
|
|
111
|
+
|
|
112
|
+
# ... [other tests] ...
|
|
113
|
+
|
|
114
|
+
def test_invalid_email_format(auth_service):
|
|
115
|
+
result = auth_service.login("invalid-email", "password123")
|
|
116
|
+
assert not result.success
|
|
117
|
+
assert result.error == "Invalid email format"
|
|
118
|
+
|
|
119
|
+
def test_empty_credentials(auth_service):
|
|
120
|
+
result = auth_service.login("", "")
|
|
121
|
+
assert not result.success
|
|
122
|
+
assert result.error == "Email and password required"
|
|
123
|
+
```
|
|
124
|
+
</fix_report_example>
|
|
125
|
+
</input_example>
|
|
126
|
+
|
|
127
|
+
<output_example>
|
|
128
|
+
<fixed_unit_test_example>
|
|
129
|
+
import pytest
|
|
130
|
+
from auth_service import AuthService
|
|
131
|
+
|
|
132
|
+
@pytest.fixture
|
|
133
|
+
def auth_service():
|
|
134
|
+
return AuthService()
|
|
135
|
+
|
|
136
|
+
def test_valid_login(auth_service):
|
|
137
|
+
result = auth_service.login("user@example.com", "password123")
|
|
138
|
+
assert result.success
|
|
139
|
+
assert result.token is not None
|
|
140
|
+
|
|
141
|
+
def test_invalid_password(auth_service):
|
|
142
|
+
result = auth_service.login("user@example.com", "wrongpass")
|
|
143
|
+
assert not result.success
|
|
144
|
+
assert result.token is None
|
|
145
|
+
|
|
146
|
+
def test_invalid_email_format(auth_service):
|
|
147
|
+
result = auth_service.login("invalid-email", "password123")
|
|
148
|
+
assert not result.success
|
|
149
|
+
assert result.error == "Invalid email format"
|
|
150
|
+
|
|
151
|
+
def test_empty_credentials(auth_service):
|
|
152
|
+
result = auth_service.login("", "")
|
|
153
|
+
assert not result.success
|
|
154
|
+
assert result.error == "Email and password required"
|
|
155
|
+
</fixed_unit_test_example>
|
|
156
|
+
<output_example>
|
|
157
|
+
</example_2>
|
|
158
|
+
|
|
159
|
+
<example_3>
|
|
160
|
+
<input_example>
|
|
161
|
+
<unit_test_example>
|
|
162
|
+
import pytest
|
|
163
|
+
from calculator import Calculator
|
|
164
|
+
|
|
165
|
+
@pytest.fixture
|
|
166
|
+
def calc():
|
|
167
|
+
return Calculator()
|
|
168
|
+
|
|
169
|
+
def test_add(calc):
|
|
170
|
+
assert calc.add(2, 3) == 5
|
|
171
|
+
assert calc.add(-1, 1) == 0
|
|
172
|
+
|
|
173
|
+
def test_subtract(calc):
|
|
174
|
+
assert calc.subtract(5, 3) == 2
|
|
175
|
+
assert calc.subtract(1, 1) == 0
|
|
176
|
+
</unit_test_example>
|
|
177
|
+
|
|
178
|
+
<fix_report_example>
|
|
179
|
+
```python
|
|
180
|
+
import pytest
|
|
181
|
+
from calculator import Calculator
|
|
182
|
+
|
|
183
|
+
@pytest.fixture
|
|
184
|
+
def calc():
|
|
185
|
+
return Calculator()
|
|
186
|
+
|
|
187
|
+
def test_add(calc):
|
|
188
|
+
assert calc.add(2, 3) == 5
|
|
189
|
+
assert calc.add(-1, 1) == 0
|
|
190
|
+
|
|
191
|
+
def test_subtract(calc):
|
|
192
|
+
assert calc.subtract(5, 3) == 2
|
|
193
|
+
assert calc.subtract(1, 1) == 0
|
|
194
|
+
|
|
195
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
196
|
+
(0.1, 0.2, 0.3),
|
|
197
|
+
(-0.5, 0.5, 0.0),
|
|
198
|
+
])
|
|
199
|
+
def test_add_floating_point(calc, a, b, expected):
|
|
200
|
+
assert pytest.approx(calc.add(a, b)) == expected
|
|
201
|
+
|
|
202
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
203
|
+
(0.3, 0.1, 0.2),
|
|
204
|
+
(1.0, 0.5, 0.5),
|
|
205
|
+
])
|
|
206
|
+
def test_subtract_floating_point(calc, a, b, expected):
|
|
207
|
+
assert pytest.approx(calc.subtract(a, b)) == expected
|
|
208
|
+
```
|
|
209
|
+
</fix_report_example>
|
|
210
|
+
</input_example>
|
|
211
|
+
|
|
212
|
+
<output_example>
|
|
213
|
+
<fixed_unit_test_example>
|
|
214
|
+
import pytest
|
|
215
|
+
from calculator import Calculator
|
|
216
|
+
|
|
217
|
+
@pytest.fixture
|
|
218
|
+
def calc():
|
|
219
|
+
return Calculator()
|
|
220
|
+
|
|
221
|
+
def test_add(calc):
|
|
222
|
+
assert calc.add(2, 3) == 5
|
|
223
|
+
assert calc.add(-1, 1) == 0
|
|
224
|
+
|
|
225
|
+
def test_subtract(calc):
|
|
226
|
+
assert calc.subtract(5, 3) == 2
|
|
227
|
+
assert calc.subtract(1, 1) == 0
|
|
228
|
+
|
|
229
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
230
|
+
(0.1, 0.2, 0.3),
|
|
231
|
+
(-0.5, 0.5, 0.0),
|
|
232
|
+
])
|
|
233
|
+
def test_add_floating_point(calc, a, b, expected):
|
|
234
|
+
assert pytest.approx(calc.add(a, b)) == expected
|
|
235
|
+
|
|
236
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
237
|
+
(0.3, 0.1, 0.2),
|
|
238
|
+
(1.0, 0.5, 0.5),
|
|
239
|
+
])
|
|
240
|
+
def test_subtract_floating_point(calc, a, b, expected):
|
|
241
|
+
assert pytest.approx(calc.subtract(a, b)) == expected
|
|
242
|
+
</fixed_unit_test_example>
|
|
243
|
+
</output_example>
|
|
244
|
+
</example_3>
|
|
245
|
+
|
|
246
|
+
<example_4>
|
|
247
|
+
<input_example>
|
|
248
|
+
<unit_test_example>
|
|
249
|
+
import pytest
|
|
250
|
+
from calculator import Calculator
|
|
251
|
+
|
|
252
|
+
@pytest.fixture
|
|
253
|
+
def calc():
|
|
254
|
+
return Calculator()
|
|
255
|
+
|
|
256
|
+
def test_add(calc):
|
|
257
|
+
assert calc.add(2, 3) == 5
|
|
258
|
+
assert calc.add(-1, 1) == 0
|
|
259
|
+
|
|
260
|
+
def test_subtract(calc):
|
|
261
|
+
assert calc.subtract(5, 3) == 2
|
|
262
|
+
assert calc.subtract(1, 1) == 0
|
|
263
|
+
|
|
264
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
265
|
+
(0.1, 0.2, 0.3),
|
|
266
|
+
(-0.5, 0.5, 0.0),
|
|
267
|
+
])
|
|
268
|
+
def test_add_floating_point(calc, a, b, expected):
|
|
269
|
+
assert pytest.approx(calc.add(a, b)) == expected
|
|
270
|
+
|
|
271
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
272
|
+
(0.3, 0.1, 0.2),
|
|
273
|
+
(1.0, 0.5, 0.5),
|
|
274
|
+
])
|
|
275
|
+
def test_subtract_floating_point(calc, a, b, expected):
|
|
276
|
+
assert pytest.approx(calc.subtract(a, b)) == expected
|
|
277
|
+
</unit_test_example>
|
|
278
|
+
|
|
279
|
+
<fix_report_example>
|
|
280
|
+
```python
|
|
281
|
+
# Code above this line remains the same
|
|
282
|
+
|
|
283
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
284
|
+
(0.5, 0.3, 0.2),
|
|
285
|
+
(-0.5, 0.7, 0.2),
|
|
286
|
+
])
|
|
287
|
+
# Rest of the test cases remain the same...
|
|
288
|
+
```
|
|
289
|
+
</fix_report_example>
|
|
290
|
+
</input_example>
|
|
291
|
+
|
|
292
|
+
<output_example>
|
|
293
|
+
<fixed_unit_test_example>
|
|
294
|
+
import pytest
|
|
295
|
+
from calculator import Calculator
|
|
296
|
+
|
|
297
|
+
@pytest.fixture
|
|
298
|
+
def calc():
|
|
299
|
+
return Calculator()
|
|
300
|
+
|
|
301
|
+
def test_add(calc):
|
|
302
|
+
assert calc.add(2, 3) == 5
|
|
303
|
+
assert calc.add(-1, 1) == 0
|
|
304
|
+
|
|
305
|
+
def test_subtract(calc):
|
|
306
|
+
assert calc.subtract(5, 3) == 2
|
|
307
|
+
assert calc.subtract(1, 1) == 0
|
|
308
|
+
|
|
309
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
310
|
+
(0.5, 0.3, 0.2),
|
|
311
|
+
(-0.5, 0.7, 0.2),
|
|
312
|
+
])
|
|
313
|
+
def test_add_floating_point(calc, a, b, expected):
|
|
314
|
+
assert pytest.approx(calc.add(a, b)) == expected
|
|
315
|
+
|
|
316
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
317
|
+
(0.3, 0.1, 0.2),
|
|
318
|
+
(1.0, 0.5, 0.5),
|
|
319
|
+
])
|
|
320
|
+
def test_subtract_floating_point(calc, a, b, expected):
|
|
321
|
+
assert pytest.approx(calc.subtract(a, b)) == expected
|
|
322
|
+
</fixed_unit_test_example>
|
|
323
|
+
</output_example>
|
|
324
|
+
</example_4>
|
|
325
|
+
</examples>
|
|
326
|
+
|
|
327
|
+
% Output a JSON object with the following keys:
|
|
328
|
+
- 'explanation': String explanation of whether the code under test needs to be fix and/or if the unit test needs to be fixed. Also, explain whether only a fragment of code was provided and the entire unit test and/or code under test needs to be reassembled from the original code and/or unit test.
|
|
329
|
+
- 'update_unit_test': Boolean indicating whether the unit test needs to be updated.
|
|
330
|
+
- 'update_code': Boolean indicating whether the code under test needs to be updated.
|
|
331
|
+
- 'fixed_unit_test': The entire updated unit test code or empty String if no update is needed.
|
|
332
|
+
- 'fixed_code': The entire updated code under test or empty String if no update is needed.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a prompt that contains inserted XML tags.
|
|
2
|
+
|
|
3
|
+
% Here is the generated XML prompt_analysis: <prompt_analysis>{xml_generated_analysis}</prompt_analysis>
|
|
4
|
+
|
|
5
|
+
% Output a JSON object with the following keys:
|
|
6
|
+
- 'explanation': String explanation of why the extracted prompt is the prompt with inserted XML tags.
|
|
7
|
+
- 'xml_tagged': String of just the entire prompt with inserted XML tags. Preserve the formatting so the text is easy to read. Don't include the prompt_analysis tags.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to fix the errors in a code_module or program that is causing that program to crash.
|
|
2
|
+
|
|
3
|
+
% Here is the program that is running the code_module that crashed and/or has errors: <program>{program}</program>
|
|
4
|
+
|
|
5
|
+
% Here is the prompt that generated the code_module below: <prompt>{prompt}</prompt>
|
|
6
|
+
|
|
7
|
+
% Here is the code_module that is being used by the program: <code_module>{code}</code_module>
|
|
8
|
+
|
|
9
|
+
% Here are the error log(s) from the program run and potentially from prior program run fixes: <errors>{errors}</errors>
|
|
10
|
+
|
|
11
|
+
% Follow these steps to solve these errors:
|
|
12
|
+
Step 1. Compare the prompt to the code_module and explain differences, if any.
|
|
13
|
+
Step 2. Compare the prompt to the program and explain differences, if any.
|
|
14
|
+
Step 3. Explain in detail step by step why there might be an an error and why prior attempted fixes, if any, may not have worked. Write several paragraphs explaining the root cause of each of the errors.
|
|
15
|
+
Step 4. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the steps. Sometimes logging or print statements can help debug the code_module or program.
|
|
16
|
+
Step 5. Review the above steps and correct for any errors in the logic.
|
|
17
|
+
Step 6. For the code that need changes, write the corrected code_module and/or corrected program in its/their entirety.
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
% You are an expert Software Engineer. Your goal is to diagnose and fix the errors from a unit_test run on the code_under_test. The error might be in the code_under_test or the unit_test or both.
|
|
2
|
+
|
|
3
|
+
% Here is the unit_test for the code_under_test: <unit_test>{unit_test}</unit_test>
|
|
4
|
+
|
|
5
|
+
% Here is the code_under_test: <code_under_test>{code}</code_under_test>
|
|
6
|
+
|
|
7
|
+
% Here is the prompt that generated the code_under_test: <prompt>{prompt}</prompt>
|
|
8
|
+
|
|
9
|
+
% This prompt is run iteratively. Here are the current errors and past potential fix attempts, if any, from the unit test and verification program run(s): <errors>{errors}</errors>
|
|
10
|
+
|
|
11
|
+
<examples>
|
|
12
|
+
<example_1>
|
|
13
|
+
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/1/test_conflicts_in_prompts.py</include></example_unit_test>
|
|
14
|
+
|
|
15
|
+
% Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/1/conflicts_in_prompts.py</include></example_code_under_test>
|
|
16
|
+
|
|
17
|
+
% Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/1/conflicts_in_prompts_python.prompt</include></example_prompt>
|
|
18
|
+
</example_1>
|
|
19
|
+
|
|
20
|
+
<example_2>
|
|
21
|
+
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/2/test_code_generator.py</include></example_unit_test>
|
|
22
|
+
|
|
23
|
+
% Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/2/code_generator.py</include></example_code_under_test>
|
|
24
|
+
|
|
25
|
+
% Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/2/code_generator_python.prompt</include></example_prompt>
|
|
26
|
+
</example_2>
|
|
27
|
+
|
|
28
|
+
<example_3>
|
|
29
|
+
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/3/test_context_generator.py</include></example_unit_test>
|
|
30
|
+
|
|
31
|
+
% Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/3/context_generator.py</include></example_code_under_test>
|
|
32
|
+
|
|
33
|
+
% Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/3/context_generator_python.prompt</include></example_prompt>
|
|
34
|
+
</example_3>
|
|
35
|
+
|
|
36
|
+
<example_4>
|
|
37
|
+
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/4/test_detect_change.py</include></example_unit_test>
|
|
38
|
+
|
|
39
|
+
% Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/4/detect_change.py</include></example_code_under_test>
|
|
40
|
+
|
|
41
|
+
% Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/4/detect_change_python.prompt</include></example_prompt>
|
|
42
|
+
</example_4>
|
|
43
|
+
|
|
44
|
+
<example_5>
|
|
45
|
+
% Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/4/test_detect_change_1_0_1.py</include></example_unit_test>
|
|
46
|
+
|
|
47
|
+
% Here is an example_code_under_test that didn't fully pass the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/4/detect_change_1_0_1.py</include></example_code_under_test>
|
|
48
|
+
|
|
49
|
+
% Here is an example error/fix log showing how the issues were resolved: <example_error_fix_log><include>context/fix_errors_from_unit_tests/4/error.log</include></example_error_fix_log>
|
|
50
|
+
</example_5>
|
|
51
|
+
</examples>
|
|
52
|
+
|
|
53
|
+
<instructions>
|
|
54
|
+
% Follow these steps to solve these errors:
|
|
55
|
+
Step 1. Compare the prompt to the code_under_test and explain differences, if any.
|
|
56
|
+
Step 2. Compare the prompt to the unit_test and explain differences, if any.
|
|
57
|
+
Step 3. For each prior attempted fix (if any), explain in a few paragraphs for each attempt why it might not have worked.
|
|
58
|
+
Step 4. Write several paragraphs explaining the root cause of each of the errors.
|
|
59
|
+
Step 5. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the solution steps. Sometimes logging or print statements can help debug the code.
|
|
60
|
+
Step 6. Review the above steps and correct for any errors in the code under test or unit test.
|
|
61
|
+
Step 7. For the code that need changes, write the corrected code_under_test and/or corrected unit_test in its/their entirety.
|
|
62
|
+
</instructions>
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
% You are an expert Software Test Engineer. Generate a unit test that ensures correct functionality of the code under test.
|
|
2
|
+
|
|
3
|
+
% Here a description of what the code is supposed to do and was the prompt that generated the code: ```{prompt_that_generated_code}```
|
|
4
|
+
|
|
5
|
+
% Here is the code under test: ```{code}```
|
|
6
|
+
|
|
7
|
+
% Follow these rules:
|
|
8
|
+
- The module name for the code under test will have the same name as the function name
|
|
9
|
+
- The unit test should be in {language}. If Python, use pytest.
|
|
10
|
+
- Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
|
|
11
|
+
- Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.
|
|
12
|
+
<include>./context/test.prompt</include>
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
% You are an expert Software Test Engineer. Given an existing set of unit tests along with their coverage reports, generate additional unit tests that provide more coverage for the code under test.
|
|
2
|
+
|
|
3
|
+
% Here a description of what the code is supposed to do and was the prompt that generated the code: ```{prompt_that_generated_code}```
|
|
4
|
+
|
|
5
|
+
% Here is the code under test: ```{code}```
|
|
6
|
+
|
|
7
|
+
Here are the existing unit tests: ```{existing_unit_tests}```
|
|
8
|
+
|
|
9
|
+
Here is the coverage report: ```{coverage_report}```
|
|
10
|
+
|
|
11
|
+
% Follow these rules:
|
|
12
|
+
- The module name for the code under test will have the same name as the function name
|
|
13
|
+
- The unit test should be in {language}. If Python, use pytest.
|
|
14
|
+
- Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
|
|
15
|
+
- Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.
|
|
16
|
+
<include>./context/test.prompt</include>
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
% You are an expert prompt engineer. You goal is to properly insert in dependencies into a prompt.
|
|
2
|
+
|
|
3
|
+
% Here are few examples of how to properly insert dependencies into a prompt:
|
|
4
|
+
<examples>
|
|
5
|
+
<example id="1">
|
|
6
|
+
INPUT:
|
|
7
|
+
<prompt_to_update><include>context/insert/1/prompt_to_update.prompt</include></prompt_to_update>
|
|
8
|
+
<dependencies_to_insert><include>context/insert/1/dependencies.prompt</include></dependencies_to_insert>
|
|
9
|
+
|
|
10
|
+
OUTPUT:
|
|
11
|
+
<updated_prompt><include>context/insert/1/updated_prompt.prompt</include></updated_prompt>
|
|
12
|
+
<example>
|
|
13
|
+
|
|
14
|
+
<example id="2">
|
|
15
|
+
INPUT:
|
|
16
|
+
<prompt_to_update><include>context/insert/2/prompt_to_update.prompt</include></prompt_to_update>
|
|
17
|
+
<dependencies_to_insert><include>context/insert/2/dependencies.prompt</include></dependencies_to_insert>
|
|
18
|
+
|
|
19
|
+
OUTPUT:
|
|
20
|
+
<updated_prompt><include>context/insert/2/updated_prompt.prompt</include></updated_prompt>
|
|
21
|
+
<example>
|
|
22
|
+
<examples>
|
|
23
|
+
|
|
24
|
+
% Generate the output for following inputs based on above examples:
|
|
25
|
+
<prompt_to_update>{actual_prompt_to_update}</prompt_to_update>
|
|
26
|
+
<dependencies_to_insert>{actual_dependencies_to_insert}</dependencies_to_insert>
|
|
27
|
+
|
|
28
|
+
% The output prompt will be in JSON format with the following keys:
|
|
29
|
+
- 'explanation': A string containing of why the dependencies were inserted in a certain location in the prompt.
|
|
30
|
+
- 'output_prompt': A string containing the prompt with the dependencies inserted.
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
% Here are some examples of splitting prompts:
|
|
2
|
+
<examples>
|
|
3
|
+
<pdd>
|
|
4
|
+
<example_1>
|
|
5
|
+
<1_input_prompt><include>./context/split/1/initial_pdd_python.prompt</include></1_input_prompt>
|
|
6
|
+
<1_input_code><include>./context/split/1/pdd.py</include></1_input_code>
|
|
7
|
+
<1_example_code><include>./context/split/1/split_get_extension.py</include></1_example_code>
|
|
8
|
+
<1_sub_prompt><include>./context/split/1/sub_pdd_python.prompt</include></1_sub_prompt>
|
|
9
|
+
<1_modified_prompt><include>./context/split/1/final_pdd_python.prompt</include></1_modified_prompt>
|
|
10
|
+
</example_1>
|
|
11
|
+
|
|
12
|
+
<example_2>
|
|
13
|
+
<2_input_prompt><include>./context/split/2/initial_pdd_python.prompt</include></2_input_prompt>
|
|
14
|
+
<2_input_code><include>./context/split/2/pdd.py</include></2_input_code>
|
|
15
|
+
<2_example_code><include>./context/split/2/split_pdd_construct_output_path.py</include></2_example_code>
|
|
16
|
+
<2_sub_prompt><include>./context/split/2/sub_pdd_python.prompt</include></2_sub_prompt>
|
|
17
|
+
<2_modified_prompt><include>./context/split/2/final_pdd_python.prompt</include></2_modified_prompt>
|
|
18
|
+
</example_2>
|
|
19
|
+
|
|
20
|
+
<example_3>
|
|
21
|
+
<3_input_prompt><include>./context/split/3/initial_postprocess_python.prompt</include></3_input_prompt>
|
|
22
|
+
<3_input_code><include>./context/split/3/postprocess.py</include></3_input_code>
|
|
23
|
+
<3_example_code><include>./context/split/3/split_postprocess_find_section.py</include></3_example_code>
|
|
24
|
+
<3_sub_prompt><include>./context/split/3/sub_postprocess_python.prompt</include></3_sub_prompt>
|
|
25
|
+
<3_modified_prompt><include>./context/split/3/final_postprocess_python.prompt</include></3_modified_prompt>
|
|
26
|
+
</example_3>
|
|
27
|
+
|
|
28
|
+
<example_4>
|
|
29
|
+
<4_input_prompt><include>./context/split/4/initial_construct_paths_python.prompt</include></4_input_prompt>
|
|
30
|
+
<4_input_code><include>./context/split/4/construct_paths.py</include></4_input_code>
|
|
31
|
+
<4_example_code><include>./context/split/4/split_construct_paths_generate_output_filename.py</include></4_example_code>
|
|
32
|
+
<4_sub_prompt><include>./context/split/4/sub_construct_paths_python.prompt</include></4_sub_prompt>
|
|
33
|
+
<4_modified_prompt><include>./context/split/4/final_construct_paths_python.prompt</include></4_modified_prompt>
|
|
34
|
+
</example_4>
|
|
35
|
+
|
|
36
|
+
<example_5>
|
|
37
|
+
<5_input_prompt><include>context/split/5/cli_python.prompt</include></5_input_prompt>
|
|
38
|
+
<5_input_code><include>context/split/5/cli.py</include></5_input_code>
|
|
39
|
+
<5_example_code><include>context/split/5/split_track_cost.py</include></5_example_code>
|
|
40
|
+
<5_sub_prompt><include>context/split/5/track_cost_python.prompt</include></5_sub_prompt>
|
|
41
|
+
<5_modified_prompt><include>context/split/5/modified_cli_python.prompt</include></5_modified_prompt>
|
|
42
|
+
</example_5>
|
|
43
|
+
</pdd>
|
|
44
|
+
|
|
45
|
+
<example_6>
|
|
46
|
+
<6_input_prompt><include>context/split/6/cli_python.prompt</include></6_input_prompt>
|
|
47
|
+
<6_input_code><include>context/split/6/cli.py</include></6_input_code>
|
|
48
|
+
<6_example_code><include>context/split/6/split_conflicts.py</include></6_example_code>
|
|
49
|
+
<6_sub_prompt><include>context/split/6/conflicts_main_python.prompt</include></6_sub_prompt>
|
|
50
|
+
<6_modified_prompt><include>context/split/6/modified_cli_python.prompt</include></6_modified_prompt>
|
|
51
|
+
</example_6>
|
|
52
|
+
|
|
53
|
+
<example_7>
|
|
54
|
+
<7_input_prompt><include>context/split/7/cli_python.prompt</include></7_input_prompt>
|
|
55
|
+
<7_input_code><include>context/split/7/cli.py</include></7_input_code>
|
|
56
|
+
<7_example_code><include>context/split/7/split_trace_main.py</include></7_example_code>
|
|
57
|
+
<7_sub_prompt><include>context/split/7/trace_main_python.prompt</include></7_sub_prompt>
|
|
58
|
+
<7_modified_prompt><include>context/split/7/modified_cli_python.prompt</include></7_modified_prompt>
|
|
59
|
+
</example_7>
|
|
60
|
+
|
|
61
|
+
</examples>
|
|
62
|
+
|
|
63
|
+
<context>
|
|
64
|
+
% You are an expert LLM Prompt Engineer. Your goal is to split the input_prompt (a larger prompt) into a sub_prompt and modified_prompt (two smaller prompts) with no loss of functionality. This is to make it easier to generate and test the modules easier.
|
|
65
|
+
|
|
66
|
+
% Here are the inputs and outputs of this prompt:
|
|
67
|
+
<input_definitions>
|
|
68
|
+
Input:
|
|
69
|
+
'input_prompt' - A string contains the prompt that will be split into a sub_prompt and modified_prompt.
|
|
70
|
+
'input_code' - A string that contains the code that was generated from the input_prompt.
|
|
71
|
+
'example_code' - A string that contains the code example of how the code generated from the sub_prompt would be used by the code generated from the modified_prompt.
|
|
72
|
+
</input_definitions>
|
|
73
|
+
<output_definitions>
|
|
74
|
+
Output:
|
|
75
|
+
'sub_prompt' - A string that contains the sub_prompt that was split from the input_prompt.
|
|
76
|
+
'modified_prompt' - A string that contains the modified prompt from input_prompt split from the above sub_prompt.
|
|
77
|
+
</output_definitions>
|
|
78
|
+
</context>
|
|
79
|
+
|
|
80
|
+
<inputs>
|
|
81
|
+
<input_prompt>{input_prompt}</input_prompt>
|
|
82
|
+
<input_code>{input_code}</input_code>
|
|
83
|
+
<example_code>{example_code}</example_code>
|
|
84
|
+
</inputs>
|
|
85
|
+
|
|
86
|
+
<instructions>
|
|
87
|
+
% Follow these instructions:
|
|
88
|
+
1. Write several paragraphs to explain based on the example_code how the input_prompt could be split into a sub_prompt and modified_prompt.
|
|
89
|
+
2. Write out several paragraphs in detail all the functionality of the sub_prompt by looking at the input_prompt and input_code.
|
|
90
|
+
3. Write out what are the possible difficulties in splitting the prompt according to the input example_code. For each difficulty write several paragraphs
|
|
91
|
+
4. Write out how to overcome the difficulties. Write several paragraphs for each difficulty.
|
|
92
|
+
5. Write the sub_prompt which would generate the code that could be used by the example_code. This prompt should carefully consider all the prior steps and ensure enough detail is provided to generate the code properly. Internal modules need to be imported using include and other appropriate xml tags in the same style as the input_prompt.
|
|
93
|
+
6. Write the complete modified_prompt which would incorporate the sub_prompt without any duplications or conflicting functionalities.
|
|
94
|
+
</instructions>
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
% Review the input string and provide a summary of its contents in about 200 words.
|
|
2
|
+
|
|
3
|
+
% Here are the Inputs/Outputs:
|
|
4
|
+
- input:
|
|
5
|
+
'file_contents': A string containing the contents of the file to summarize.
|
|
6
|
+
- output: JSON output with the following key:
|
|
7
|
+
'file_summary': A string containing the summary of the file.
|
|
8
|
+
|
|
9
|
+
<file_contents>
|
|
10
|
+
{file_contents}
|
|
11
|
+
</file_contents>
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
% Imagine you're a an expert Python Software Engineer. Your goal is to find the part of the .prompt file. It will take in three arguments, the text of the .prompt file, the text of the code file, and the line that the debugger is on in the code file. Your task is to find the equivalent line in the .prompt file that matches with the line in the code file.
|
|
2
|
+
|
|
3
|
+
% Here are the inputs and outputs of the prompt:
|
|
4
|
+
Input:
|
|
5
|
+
`code_file` (str) - A string that contains the text of the code file.
|
|
6
|
+
`code_str` (str) - A substring of code_file that represents the line that the debugger is on in the code_file.
|
|
7
|
+
`prompt_file` (str) - A string that contains the text of the .prompt file.
|
|
8
|
+
Output:
|
|
9
|
+
`prompt_line` (str) - An string that represents the equivalent line in the .prompt file that matches with the code_str line in the code file.
|
|
10
|
+
|
|
11
|
+
% Here is the code_file to reference:
|
|
12
|
+
|
|
13
|
+
<code_file>
|
|
14
|
+
{CODE_FILE}
|
|
15
|
+
</code_file>
|
|
16
|
+
|
|
17
|
+
% Here is the code_str to reference:
|
|
18
|
+
|
|
19
|
+
<code_str>
|
|
20
|
+
{CODE_STR}
|
|
21
|
+
</code_str>
|
|
22
|
+
|
|
23
|
+
% Here is the prompt_file to reference:
|
|
24
|
+
|
|
25
|
+
<prompt_file>
|
|
26
|
+
{PROMPT_FILE}
|
|
27
|
+
</prompt_file>
|
|
28
|
+
|
|
29
|
+
% To generate the prompt_line, find a substring of prompt_file that matches code_str, which is a substring of code_file.
|
|
30
|
+
|