pdd-cli 0.0.42__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. pdd/__init__.py +4 -4
  2. pdd/agentic_common.py +863 -0
  3. pdd/agentic_crash.py +534 -0
  4. pdd/agentic_fix.py +1179 -0
  5. pdd/agentic_langtest.py +162 -0
  6. pdd/agentic_update.py +370 -0
  7. pdd/agentic_verify.py +183 -0
  8. pdd/auto_deps_main.py +15 -5
  9. pdd/auto_include.py +63 -5
  10. pdd/bug_main.py +3 -2
  11. pdd/bug_to_unit_test.py +2 -0
  12. pdd/change_main.py +11 -4
  13. pdd/cli.py +22 -1181
  14. pdd/cmd_test_main.py +80 -19
  15. pdd/code_generator.py +58 -18
  16. pdd/code_generator_main.py +672 -25
  17. pdd/commands/__init__.py +42 -0
  18. pdd/commands/analysis.py +248 -0
  19. pdd/commands/fix.py +140 -0
  20. pdd/commands/generate.py +257 -0
  21. pdd/commands/maintenance.py +174 -0
  22. pdd/commands/misc.py +79 -0
  23. pdd/commands/modify.py +230 -0
  24. pdd/commands/report.py +144 -0
  25. pdd/commands/templates.py +215 -0
  26. pdd/commands/utility.py +110 -0
  27. pdd/config_resolution.py +58 -0
  28. pdd/conflicts_main.py +8 -3
  29. pdd/construct_paths.py +281 -81
  30. pdd/context_generator.py +10 -2
  31. pdd/context_generator_main.py +113 -11
  32. pdd/continue_generation.py +47 -7
  33. pdd/core/__init__.py +0 -0
  34. pdd/core/cli.py +503 -0
  35. pdd/core/dump.py +554 -0
  36. pdd/core/errors.py +63 -0
  37. pdd/core/utils.py +90 -0
  38. pdd/crash_main.py +44 -11
  39. pdd/data/language_format.csv +71 -62
  40. pdd/data/llm_model.csv +20 -18
  41. pdd/detect_change_main.py +5 -4
  42. pdd/fix_code_loop.py +331 -77
  43. pdd/fix_error_loop.py +209 -60
  44. pdd/fix_errors_from_unit_tests.py +4 -3
  45. pdd/fix_main.py +75 -18
  46. pdd/fix_verification_errors.py +12 -100
  47. pdd/fix_verification_errors_loop.py +319 -272
  48. pdd/fix_verification_main.py +57 -17
  49. pdd/generate_output_paths.py +93 -10
  50. pdd/generate_test.py +16 -5
  51. pdd/get_jwt_token.py +48 -9
  52. pdd/get_run_command.py +73 -0
  53. pdd/get_test_command.py +68 -0
  54. pdd/git_update.py +70 -19
  55. pdd/increase_tests.py +7 -0
  56. pdd/incremental_code_generator.py +2 -2
  57. pdd/insert_includes.py +11 -3
  58. pdd/llm_invoke.py +1278 -110
  59. pdd/load_prompt_template.py +36 -10
  60. pdd/pdd_completion.fish +25 -2
  61. pdd/pdd_completion.sh +30 -4
  62. pdd/pdd_completion.zsh +79 -4
  63. pdd/postprocess.py +10 -3
  64. pdd/preprocess.py +228 -15
  65. pdd/preprocess_main.py +8 -5
  66. pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
  67. pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
  68. pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
  69. pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
  70. pdd/prompts/agentic_update_LLM.prompt +1071 -0
  71. pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
  72. pdd/prompts/auto_include_LLM.prompt +98 -101
  73. pdd/prompts/change_LLM.prompt +1 -3
  74. pdd/prompts/detect_change_LLM.prompt +562 -3
  75. pdd/prompts/example_generator_LLM.prompt +22 -1
  76. pdd/prompts/extract_code_LLM.prompt +5 -1
  77. pdd/prompts/extract_program_code_fix_LLM.prompt +14 -2
  78. pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
  79. pdd/prompts/extract_promptline_LLM.prompt +17 -11
  80. pdd/prompts/find_verification_errors_LLM.prompt +6 -0
  81. pdd/prompts/fix_code_module_errors_LLM.prompt +16 -4
  82. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +6 -41
  83. pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
  84. pdd/prompts/generate_test_LLM.prompt +21 -6
  85. pdd/prompts/increase_tests_LLM.prompt +1 -2
  86. pdd/prompts/insert_includes_LLM.prompt +1181 -6
  87. pdd/prompts/split_LLM.prompt +1 -62
  88. pdd/prompts/trace_LLM.prompt +25 -22
  89. pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
  90. pdd/prompts/update_prompt_LLM.prompt +22 -1
  91. pdd/prompts/xml_convertor_LLM.prompt +3246 -7
  92. pdd/pytest_output.py +188 -21
  93. pdd/python_env_detector.py +151 -0
  94. pdd/render_mermaid.py +236 -0
  95. pdd/setup_tool.py +648 -0
  96. pdd/simple_math.py +2 -0
  97. pdd/split_main.py +3 -2
  98. pdd/summarize_directory.py +56 -7
  99. pdd/sync_determine_operation.py +918 -186
  100. pdd/sync_main.py +82 -32
  101. pdd/sync_orchestration.py +1456 -453
  102. pdd/sync_tui.py +848 -0
  103. pdd/template_registry.py +264 -0
  104. pdd/templates/architecture/architecture_json.prompt +242 -0
  105. pdd/templates/generic/generate_prompt.prompt +174 -0
  106. pdd/trace.py +168 -12
  107. pdd/trace_main.py +4 -3
  108. pdd/track_cost.py +151 -61
  109. pdd/unfinished_prompt.py +49 -3
  110. pdd/update_main.py +549 -67
  111. pdd/update_model_costs.py +2 -2
  112. pdd/update_prompt.py +19 -4
  113. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +20 -7
  114. pdd_cli-0.0.90.dist-info/RECORD +153 -0
  115. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
  116. pdd_cli-0.0.42.dist-info/RECORD +0 -115
  117. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
  118. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
  119. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,10 @@
1
- % You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a program and code module bug fix report. If there is a choice of updating the program or the code module, you should chose to update the code module.
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a program and code module bug fix report.
2
+
3
+ % IMPORTANT: The crash command is designed to fix errors in BOTH the code module AND the calling program that caused the crash. You should fix whatever needs to be fixed to make the program run successfully:
4
+ - If the code module has bugs, fix the code module
5
+ - If the calling program has bugs, fix the calling program
6
+ - If both have issues that contribute to the crash, fix BOTH
7
+ - The goal is to ensure the program runs without errors after the fix
2
8
 
3
9
  % Here is the original program: <program>{program}</program>
4
10
 
@@ -14,4 +20,10 @@
14
20
  - 'update_program': Boolean indicating whether the program needs to be updated.
15
21
  - 'update_code': Boolean indicating whether the code module needs to be updated.
16
22
  - 'fixed_program': The entire updated program code or empty String if no update is needed.
17
- - 'fixed_code': The entire updated code module or empty String if no update is needed.
23
+ - 'fixed_code': The entire updated code module or empty String if no update is needed.
24
+
25
+ % IMPORTANT JSON formatting rules for code strings:
26
+ - Use standard JSON escaping for newlines: a single backslash-n (\n) represents an actual newline
27
+ - Do NOT double-escape newlines. Use \n not \\n for line breaks in code
28
+ - String literals in code that need to print newlines should use \n (which appears as \\n in JSON)
29
+ - Example: for code with two lines "def foo():\n pass", the JSON should be: "def foo():\n pass"
@@ -1,14 +1,13 @@
1
- % You are an expert Software Engineer. Your goal is to extract the updated prompt from the LLM output.
1
+ % You are an expert Software Engineer. Your goal is to extract the updated prompt from the LLM output in JSON format.
2
2
 
3
3
  % Here is the generated llm_output: <llm_output>{llm_output}</llm_output>
4
4
 
5
- % The LLM output contains the modified prompt that will generate the modified code, possibly with some additional commentary or explanation.
6
- % Your task is to identify and extract ONLY the modified prompt itself, without adding any JSON structure or additional formatting.
5
+ % The LLM output contains the modified prompt that will generate the modified code, possibly with some additional commentary or explanation. Your task is to identify and extract ONLY the modified prompt itself, without adding any additional formatting.
7
6
 
8
7
  % Ensure you:
9
- % 1. Remove any "# Modified Prompt" headers or similar text that isn't part of the actual prompt
10
- % 2. Preserve all markdown, code blocks, and formatting within the actual prompt
11
- % 3. Don't add any explanatory text, JSON wrappers, or your own commentary
12
- % 4. Return only the text that constitutes the actual prompt
8
+ 1. Remove any "# Modified Prompt" headers or similar text that isn't part of the actual prompt
9
+ 2. Preserve all markdown, code blocks, and formatting within the actual prompt
10
+ 3. Don't add any explanatory text, JSON wrappers, or your own commentary
11
+ 4. Return only the text that constitutes the actual prompt
13
12
 
14
- % The "modified_prompt" should be the complete, standalone prompt that could be used directly to generate the modified code.
13
+ % The "modified_prompt" JSON key should be the complete, standalone prompt that could be used directly to generate the modified code.
@@ -1,11 +1,17 @@
1
- % You are an expert Software Engineer. Your goal is to extract the closest matching substring described by the prompt's output to be outputed in JSON format.
2
-
3
- % Here is the llm_output to parse: <llm_output>{llm_output}</llm_output>
4
-
5
- % When extracting the closest matching substring from llm_output, consider and correct the following for the extracted code:
6
- - Should be a substring of prompt_file.
7
- - Should be a a substring that closely matches code_str in content.
8
-
9
- % Output a JSON object with the following keys:
10
- - 'explanation': String explanation of why this prompt_line matches the code_str and explain any errors detected in the code.
11
- - 'prompt_line': String containing the closest matching verbatim substring of the prompt_file that matches code_str in content. This is not the line number.
1
+ % You are an extremely literal parser. The LLM output below follows this structure:
2
+ % <analysis> ... </analysis>
3
+ % <verbatim_prompt_line>
4
+ % <<EXACT SUBSTRING FROM PROMPT_FILE>>
5
+ % </verbatim_prompt_line>
6
+ %
7
+ % Task
8
+ % • Extract the text between <verbatim_prompt_line> and </verbatim_prompt_line> (if present).
9
+ % Output ONLY JSON with the keys:
10
+ % - "prompt_line": the exact substring between the tags. Do not alter whitespace or characters except for JSON escaping.
11
+ % - "explanation": short confirmation (<=120 characters) that the substring was copied verbatim, or describe why extraction failed.
12
+ % • If the tags are missing or empty, set "prompt_line" to "" and explain the issue.
13
+ % • Do not wrap the JSON in Markdown. No commentary, no additional keys.
14
+ %
15
+ <llm_output>
16
+ {llm_output}
17
+ </llm_output>
@@ -21,6 +21,12 @@
21
21
  Step 4. Identify any potential edge cases, error handling issues, or performance concerns that could cause problems in the future.
22
22
  Step 5. Check the code for potential bugs that haven't manifested yet.
23
23
  Step 6. If any issues are found, explain in detail the root cause of each issue and how it could impact the program's functioning.
24
+ Step 6.5. When analyzing errors involving mocked dependencies:
25
+ - Identify if 'program' uses MagicMock, unittest.mock, or similar mocking
26
+ - Trace the mock configuration to verify it matches expected real API behavior
27
+ - For AttributeError on mock objects: check if mock.return_value or mock.__getitem__.return_value has correct type
28
+ - Flag errors as "Mock Configuration Error" when the mock setup doesn't match real API return types
29
+ - Flag errors as "Production Code Error" only when the API usage is clearly incorrect
24
30
  Step 7. Carefully distinguish between:
25
31
  a. Incompatibilities (functions called by program but missing from code_module) - these are critical issues
26
32
  b. Prompt adherence issues (code doesn't match prompt requirements) - these are important but secondary to compatibility
@@ -1,6 +1,14 @@
1
- % You are an expert Software Engineer. Your goal is to fix the errors in a code_module or program that is causing that program to crash.
1
+ % You are an expert Software Engineer. Your goal is to fix the errors in a code_module AND/OR program that is causing that program to crash.
2
2
 
3
- % Here is the program that is running the code_module that crashed and/or has errors: <program>{program}</program>
3
+ % IMPORTANT: The crash command should fix whatever needs to be fixed to make the program run successfully:
4
+ - If the code module has bugs, fix the code module
5
+ - If the calling program has bugs, fix the calling program
6
+ - If both have issues that contribute to the crash, fix BOTH
7
+ - The goal is to ensure the program runs without errors after all fixes are applied
8
+ - If you are not able to fix the calling program, or if you cannot access it, you shouldn't guess at fixes to the calling program. Do not add the functionality of the calling program to the code_module.
9
+ - You must ensure that the code_module strictly adheres to the prompt requirements
10
+
11
+ % Here is the calling program that is running the code_module that crashed and/or has errors: <program>{program}</program>
4
12
 
5
13
  % Here is the prompt that generated the code_module below: <prompt>{prompt}</prompt>
6
14
 
@@ -41,6 +49,10 @@
41
49
  Step 1. Compare the prompt to the code_module and explain differences, if any.
42
50
  Step 2. Compare the prompt to the program and explain differences, if any.
43
51
  Step 3. Explain in detail step by step why there might be an an error and why prior attempted fixes, if any, may not have worked. Write several paragraphs explaining the root cause of each of the errors.
44
- Step 4. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the steps. Sometimes logging or print statements can help debug the code_module or program.
52
+ Step 4. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the steps. Consider whether the fix requires:
53
+ - Updating the code_module only
54
+ - Updating the calling program only
55
+ - Updating BOTH the code_module AND the calling program
56
+ Sometimes logging or print statements can help debug the code_module or program.
45
57
  Step 5. Review the above steps and correct for any errors in the logic.
46
- Step 6. For the code that need changes, write the corrected code_module and/or corrected program in its/their entirety.
58
+ Step 6. For ALL code that needs changes, write the corrected code_module and/or corrected program in their entirety. If both need fixes, provide both complete fixed versions.
@@ -9,50 +9,15 @@
9
9
  % This prompt is run iteratively. Here are the current errors and past potential fix attempts, if any, from the unit test and verification program run(s): <errors>{errors}</errors>
10
10
 
11
11
  % If the verfication program fails to run, the code_under_test and unit_test are unchanged from the previous iteration.
12
- <pdd>
13
- <examples>
14
- <example_1>
15
- % Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/1/test_conflicts_in_prompts.py</include></example_unit_test>
16
-
17
- % Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/1/conflicts_in_prompts.py</include></example_code_under_test>
18
12
 
19
- % Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/1/conflicts_in_prompts_python.prompt</include></example_prompt>
20
- </example_1>
13
+ % IMPORTANT: The original prompt is the authoritative specification for what the code should do.
14
+ % When analyzing errors:
15
+ % - If the code doesn't match the prompt specification, fix the CODE
16
+ % - If the unit_test expects behavior not specified in the prompt, fix the TEST
17
+ % - Never add functionality to the code that isn't specified in the prompt
18
+ % - Tests should verify prompt-specified behavior, not arbitrary expectations
21
19
 
22
- <example_2>
23
- % Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/2/test_code_generator.py</include></example_unit_test>
24
20
 
25
- % Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/2/code_generator.py</include></example_code_under_test>
26
-
27
- % Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/2/code_generator_python.prompt</include></example_prompt>
28
- </example_2>
29
-
30
- <example_3>
31
- % Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/3/test_context_generator.py</include></example_unit_test>
32
-
33
- % Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/3/context_generator.py</include></example_code_under_test>
34
-
35
- % Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/3/context_generator_python.prompt</include></example_prompt>
36
- </example_3>
37
-
38
-
39
- <example_4>
40
- % Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/4/test_detect_change.py</include></example_unit_test>
41
-
42
- % Here is an example_code_under_test that fully passes the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/4/detect_change.py</include></example_code_under_test>
43
-
44
- % Here is the prompt that generated the example_code_under_test: <example_prompt><include>context/fix_errors_from_unit_tests/4/detect_change_python.prompt</include></example_prompt>
45
- </example_4>
46
-
47
- <example_5>
48
- % Here is an example_unit_test for the example_code_under_test: <example_unit_test><include>context/fix_errors_from_unit_tests/4/test_detect_change_1_0_1.py</include></example_unit_test>
49
-
50
- % Here is an example_code_under_test that didn't fully pass the example_unit_test: <example_code_under_test><include>context/fix_errors_from_unit_tests/4/detect_change_1_0_1.py</include></example_code_under_test>
51
-
52
- % Here is an example error/fix log showing how the issues were resolved: <example_error_fix_log><include>context/fix_errors_from_unit_tests/4/error.log</include></example_error_fix_log>
53
- </example_5>
54
- </examples>
55
- </pdd>
56
21
 
57
22
  <instructions>
58
23
  % Follow these steps to solve these errors:
@@ -21,6 +21,28 @@
21
21
  5. Prefer making additive changes (adding new functions, improving existing ones) rather than removing functionality, even if that means going beyond the minimal requirements of the prompt.
22
22
  6. If your previous fixes resulted in verification failures related to missing functions, ensure those functions are included in your solution.
23
23
 
24
+ % MOCK VS PRODUCTION CODE GUIDANCE:
25
+ 1. IDENTIFY THE TEST FILE: The 'program' file may be a TEST FILE that uses mocks (MagicMock, unittest.mock, patch) to simulate external dependencies.
26
+ - Look for imports: `from unittest.mock import MagicMock, patch`
27
+ - Look for mock setup patterns: `mock_obj.return_value`, `mock_obj.__getitem__.return_value`
28
+
29
+ 2. WHEN ERRORS OCCUR IN MOCK INTERACTIONS:
30
+ - FIRST check if the mock setup is incorrect (wrong return_value structure, missing __getitem__ configuration)
31
+ - Mock return types must match the REAL API return types exactly
32
+ - Common mock bugs:
33
+ * `__getitem__.return_value = [item]` when it should be `= item` (for APIs where indexing returns single item)
34
+ * Missing chained mock configuration (e.g., `mock.method().other_method()`)
35
+
36
+ 3. PRESERVE PRODUCTION CODE API USAGE:
37
+ - The 'code_module' implements PRODUCTION code that calls real APIs
38
+ - Assume production code uses CORRECT API patterns unless you have documentation proving otherwise
39
+ - Do NOT change production code indexing patterns (like `[0][0]`) without external API documentation
40
+
41
+ 4. DIAGNOSIS PRIORITY for "AttributeError" or type mismatch:
42
+ a. First: Check mock.return_value / mock.__getitem__.return_value structure
43
+ b. Second: Check if mock chaining matches expected API call pattern
44
+ c. Third: Only then consider if production code has wrong API usage
45
+
24
46
  % Follow these steps to fix the program or code_module:
25
47
  Step 1. Analyze and understand each identified issue in the context of the code_module and program.
26
48
  Step 2. Analyze how the program uses the code_module to determine all functions that must be preserved.
@@ -4,7 +4,15 @@
4
4
 
5
5
  % Here is the code under test: <code_under_test>{code}</code_under_test>
6
6
 
7
+ % File path information:
8
+ - The code under test module file is located at: <code_under_test_file_path>{source_file_path}</code_under_test_file_path>
9
+ - The example file will be saved at: <test_file_path>{test_file_path}</test_file_path>
10
+ - The module name (without extension) is: <module_name>{module_name}</module_name>
11
+
7
12
  % Follow these rules:
13
+ - CRITICAL: You MUST analyze the actual code provided in code_under_test and generate tests for the EXACT functions defined in that code
14
+ - CRITICAL: Import statements must use the ACTUAL module name from the code file path, not generic names. Include the necessary code/path info to import the code under test.
15
+ - CRITICAL: Test the ACTUAL function names, parameters, and behavior shown in the provided code
8
16
  - The module name for the code under test will have the same name as the function name
9
17
  - The unit test should be in {language}. If Python, use pytest.
10
18
  - Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
@@ -17,10 +25,17 @@
17
25
  <include>./context/test.prompt</include>
18
26
 
19
27
  <instructions>
20
- 1. Carefully read the prompt that generated the code under test and determine what might be possible edge cases.
21
- 2. For each edge case explain whether it is better to do the test using Z3 formal verification or unit tests.
22
- 3. Develop a detailed test plan that will ensure the code under test is correct. This should involve both Z3 formal verification and unit tests.
23
- 4. Now write the test file.
24
- a) The first part of the test file should be the detailed test plan from step 3 above in comments.
25
- b) Then write the tests and Z3 formal verification tests that are runnable as unit tests.
28
+ 1. FIRST: Carefully analyze the ACTUAL code provided in code_under_test:
29
+ - Identify the EXACT function names defined in the code
30
+ - Identify the EXACT parameters and their types
31
+ - Identify the EXACT return values and behavior
32
+ - Identify any error conditions or edge cases
33
+ 2. SECOND: Analyze the prompt that generated the code to understand the intended functionality and edge cases.
34
+ 3. THIRD: For each edge case explain whether it is better to do the test using Z3 formal verification or unit tests.
35
+ 4. FOURTH: Develop a detailed test plan that will ensure the code under test is correct. This should involve both Z3 formal verification and unit tests.
36
+ 5. FIFTH: Write the test file with:
37
+ a) The first part of the test file should be the detailed test plan from step 4 above in comments.
38
+ b) Import statements using the ACTUAL module name from the code file path (e.g., if code is in "my_function.py", use "from my_function import function_name")
39
+ c) Tests for the ACTUAL function names and behavior shown in the provided code
40
+ d) Z3 formal verification tests that are runnable as unit tests.
26
41
  </instructions>
@@ -12,5 +12,4 @@ Here is the coverage report: ```{coverage_report}```
12
12
  - The module name for the code under test will have the same name as the function name
13
13
  - The unit test should be in {language}. If Python, use pytest.
14
14
  - Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
15
- - Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.
16
- <include>./context/test.prompt</include>
15
+ - Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.