pdd-cli 0.0.90__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +521 -786
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +25 -8
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +185 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +87 -29
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +136 -113
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +190 -164
- pdd/commands/sessions.py +284 -0
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +27 -3
- pdd/core/cloud.py +237 -0
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +204 -4
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +459 -95
- pdd/load_prompt_template.py +15 -34
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +4 -1
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +20 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +136 -75
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +23 -5
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +15 -10
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
|
@@ -9,6 +9,16 @@
|
|
|
9
9
|
- The example file will be saved at: <test_file_path>{test_file_path}</test_file_path>
|
|
10
10
|
- The module name (without extension) is: <module_name>{module_name}</module_name>
|
|
11
11
|
|
|
12
|
+
% EXISTING TESTS (if provided - your output will be APPENDED to this file):
|
|
13
|
+
<existing_tests>{existing_tests}</existing_tests>
|
|
14
|
+
|
|
15
|
+
% If existing tests are provided above:
|
|
16
|
+
- Generate ONLY NEW test functions (your output will be appended to the existing file)
|
|
17
|
+
- Do NOT include import statements (they already exist in the file)
|
|
18
|
+
- Do NOT duplicate any existing test function names
|
|
19
|
+
- Maintain consistent style with existing tests (fixtures, naming conventions)
|
|
20
|
+
- Focus on testing functionality NOT already covered by existing tests
|
|
21
|
+
|
|
12
22
|
% Follow these rules:
|
|
13
23
|
- CRITICAL: You MUST analyze the actual code provided in code_under_test and generate tests for the EXACT functions defined in that code
|
|
14
24
|
- CRITICAL: Import statements must use the ACTUAL module name from the code file path, not generic names. Include the necessary code/path info to import the code under test.
|
|
@@ -22,7 +32,16 @@
|
|
|
22
32
|
- Setup and teardown methods should only use public APIs and environment variables, never reset internal module state directly.
|
|
23
33
|
- Design tests to be independent of implementation details that might change when code is regenerated.
|
|
24
34
|
- For test isolation, use fixtures and mocking of external dependencies rather than manipulating internal module state. In general minimize the amount of mocking needed so that the tests are more robust to changes in the code under test and more code is tested.
|
|
25
|
-
|
|
35
|
+
|
|
36
|
+
% TEST ISOLATION PRINCIPLE:
|
|
37
|
+
% CRITICAL: Each test MUST be isolated and not pollute state for other tests.
|
|
38
|
+
- Tests must clean up any state they modify (environment variables, global state, files, mocks)
|
|
39
|
+
- Use the testing framework's built-in isolation mechanisms (fixtures, setup/teardown, context managers)
|
|
40
|
+
- Avoid modifying global or module-level state directly; if unavoidable, always restore original state
|
|
41
|
+
- Prefer function-scoped test resources over shared/module-scoped ones to ensure isolation
|
|
42
|
+
|
|
43
|
+
<include>context/test.prompt</include>
|
|
44
|
+
<include>context/pytest_isolation_example.py</include>
|
|
26
45
|
|
|
27
46
|
<instructions>
|
|
28
47
|
1. FIRST: Carefully analyze the ACTUAL code provided in code_under_test:
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
% You are an expert Software Test Engineer. Your goal is to generate tests based on the intended behavior described in a prompt and demonstrated in an example file.
|
|
2
|
+
|
|
3
|
+
% Here a description of what the code is supposed to do and was the prompt that generated the code: <prompt_that_generated_code>{prompt_that_generated_code}</prompt_that_generated_code>
|
|
4
|
+
|
|
5
|
+
% Here is an example showing how the module should be used: <example_usage>{example}</example_usage>
|
|
6
|
+
|
|
7
|
+
% File path information:
|
|
8
|
+
- The example file is located at: <example_file_path>{source_file_path}</example_file_path>
|
|
9
|
+
- The test file will be saved at: <test_file_path>{test_file_path}</test_file_path>
|
|
10
|
+
- The module name (without extension) is: <module_name>{module_name}</module_name>
|
|
11
|
+
|
|
12
|
+
% EXISTING TESTS (if provided - your output will be APPENDED to this file):
|
|
13
|
+
<existing_tests>{existing_tests}</existing_tests>
|
|
14
|
+
|
|
15
|
+
% If existing tests are provided above:
|
|
16
|
+
- Generate ONLY NEW test functions (your output will be appended to the existing file)
|
|
17
|
+
- Do NOT include import statements (they already exist in the file)
|
|
18
|
+
- Do NOT duplicate any existing test function names
|
|
19
|
+
- Maintain consistent style with existing tests (fixtures, naming conventions)
|
|
20
|
+
- Focus on testing functionality NOT already covered by existing tests
|
|
21
|
+
|
|
22
|
+
% Follow these rules:
|
|
23
|
+
- CRITICAL: Analyze the EXAMPLE to understand the API (function names, parameters, return values)
|
|
24
|
+
- CRITICAL: Import statements must match the module structure shown in the example
|
|
25
|
+
- CRITICAL: Test the intended function names and behavior based on the prompt
|
|
26
|
+
- The module name for the code under test will have the same name as the function name
|
|
27
|
+
- The unit test should be in {language}. If Python, use pytest.
|
|
28
|
+
- Use individual test functions for each case to make it easier to identify which specific cases pass or fail.
|
|
29
|
+
- Use the description of the functionality in the prompt to generate tests with useful tests with good code coverage.
|
|
30
|
+
- Focus on testing the INTENDED FUNCTIONALITY, not implementation details.
|
|
31
|
+
- NEVER access internal implementation details (variables/functions starting with underscore) in your tests.
|
|
32
|
+
- Setup and teardown methods should only use public APIs and environment variables, never reset internal module state directly.
|
|
33
|
+
- Design tests to be independent of implementation details.
|
|
34
|
+
- For test isolation, use fixtures and mocking of external dependencies rather than manipulating internal module state. In general minimize the amount of mocking needed so that the tests are more robust to changes in the code under test and more code is tested.
|
|
35
|
+
- Know that the generated test will be in a different directory (`tests`) than the module (in directory `pdd`) it is calling and will need an absolute reference. The module file name will be same as the function name.
|
|
36
|
+
- Created files should be in the `output` directory.
|
|
37
|
+
- Data files (language_format.csv and llm_model.csv) already exist in the PDD_PATH/`data` directory. Do not write over them. It already contains data for popular languages and LLM models and can be used for tests.
|
|
38
|
+
- The PDD_PATH environment variable is already set.
|
|
39
|
+
|
|
40
|
+
% PYTEST TEST ISOLATION AND ANTI-POLLUTION RULES:
|
|
41
|
+
% CRITICAL: Generated tests MUST be isolated and not pollute state for other tests. Follow these rules strictly:
|
|
42
|
+
|
|
43
|
+
% 1. ENVIRONMENT VARIABLES:
|
|
44
|
+
- ALWAYS use monkeypatch.setenv() or monkeypatch.delenv() instead of os.environ["VAR"] = "value"
|
|
45
|
+
- NEVER use direct os.environ manipulation - it persists beyond the test and pollutes other tests
|
|
46
|
+
- BAD: os.environ["API_KEY"] = "test_key" # POLLUTION: persists after test ends
|
|
47
|
+
- GOOD: monkeypatch.setenv("API_KEY", "test_key") # Auto-cleaned by pytest
|
|
48
|
+
|
|
49
|
+
% 2. MOCKING EXTERNAL DEPENDENCIES:
|
|
50
|
+
- Use context managers or monkeypatch for mocks - they auto-cleanup after the test
|
|
51
|
+
- Prefer monkeypatch.setattr() over unittest.mock.patch() decorators at module level
|
|
52
|
+
- BAD: @patch('module.func') at module/class level # Can leak if exception occurs
|
|
53
|
+
- GOOD: monkeypatch.setattr('module.func', mock_func) # Always cleaned up
|
|
54
|
+
- GOOD: with patch('module.func') as mock: # Context manager ensures cleanup
|
|
55
|
+
|
|
56
|
+
% 3. FIXTURE CLEANUP WITH YIELD:
|
|
57
|
+
- Use yield-based fixtures with cleanup code after yield for any resources
|
|
58
|
+
- Prefer function-scoped fixtures over module or session scope to ensure isolation
|
|
59
|
+
- BAD: @pytest.fixture(scope="module") without cleanup # State leaks between tests
|
|
60
|
+
- GOOD: @pytest.fixture with yield and cleanup after yield # Always cleans up
|
|
61
|
+
- Example of proper fixture:
|
|
62
|
+
@pytest.fixture
|
|
63
|
+
def temp_resource():
|
|
64
|
+
resource = setup_resource()
|
|
65
|
+
yield resource
|
|
66
|
+
resource.cleanup() # Always runs after test, even on failure
|
|
67
|
+
|
|
68
|
+
% 4. SYS.MODULES MANIPULATION:
|
|
69
|
+
- AVOID manipulating sys.modules directly whenever possible
|
|
70
|
+
- If unavoidable, ALWAYS save and restore in try/finally or fixture with yield
|
|
71
|
+
- BAD: sys.modules["module"] = mock_module # Pollutes all subsequent tests
|
|
72
|
+
- GOOD: Use a fixture that saves, mocks, and restores:
|
|
73
|
+
@pytest.fixture
|
|
74
|
+
def mock_module():
|
|
75
|
+
saved = sys.modules.get("module")
|
|
76
|
+
sys.modules["module"] = MagicMock()
|
|
77
|
+
yield
|
|
78
|
+
if saved is not None:
|
|
79
|
+
sys.modules["module"] = saved
|
|
80
|
+
elif "module" in sys.modules:
|
|
81
|
+
del sys.modules["module"]
|
|
82
|
+
|
|
83
|
+
% 5. FILE SYSTEM OPERATIONS:
|
|
84
|
+
- ALWAYS use the tmp_path fixture for creating temporary files and directories
|
|
85
|
+
- NEVER create files in the working directory or fixed paths
|
|
86
|
+
- BAD: with open("test_output.txt", "w") as f: ... # Leaves file behind
|
|
87
|
+
- GOOD: def test_file(tmp_path): (tmp_path / "test_output.txt").write_text(...)
|
|
88
|
+
|
|
89
|
+
% 6. GLOBAL/MODULE STATE:
|
|
90
|
+
- Never modify global variables or module-level state directly in tests
|
|
91
|
+
- Use monkeypatch.setattr() for any module-level variables that need changing
|
|
92
|
+
- Reset any singleton instances using fixtures with proper teardown
|
|
93
|
+
|
|
94
|
+
% SUMMARY OF GOOD PATTERNS:
|
|
95
|
+
- Use tmp_path fixture for file operations
|
|
96
|
+
- Use monkeypatch fixture for environment variables and attributes
|
|
97
|
+
- Use pytest.raises() as context manager for exception testing
|
|
98
|
+
- Prefer function-scoped fixtures over module or session scope
|
|
99
|
+
- Use yield in fixtures to ensure cleanup runs even on test failure
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
<instructions>
|
|
103
|
+
1. FIRST: Carefully analyze the EXAMPLE to understand:
|
|
104
|
+
- How to import the module (exact import statements)
|
|
105
|
+
- What functions/classes are exposed
|
|
106
|
+
- How they are called (parameters, return values)
|
|
107
|
+
2. SECOND: Analyze the prompt that describes the intended functionality and edge cases.
|
|
108
|
+
3. THIRD: For each edge case explain whether it is better to do the test using Z3 formal verification or unit tests.
|
|
109
|
+
4. FOURTH: Develop a detailed test plan that will ensure the intended functionality is correct. This should involve both Z3 formal verification and unit tests.
|
|
110
|
+
5. FIFTH: Write the test file with:
|
|
111
|
+
a) The first part of the test file should be the detailed test plan from step 4 above in comments.
|
|
112
|
+
b) Import statements matching the module structure from the example
|
|
113
|
+
c) Tests for the intended function names and behavior from the prompt
|
|
114
|
+
d) Z3 formal verification tests that are runnable as unit tests.
|
|
115
|
+
</instructions>
|