pdd-cli 0.0.90__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +521 -786
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +25 -8
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +185 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +87 -29
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +136 -113
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +190 -164
- pdd/commands/sessions.py +284 -0
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +27 -3
- pdd/core/cloud.py +237 -0
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +204 -4
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +459 -95
- pdd/load_prompt_template.py +15 -34
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +4 -1
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +20 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +136 -75
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +23 -5
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +15 -10
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
pdd/auth_service.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""Shared authentication service for PDD Cloud.
|
|
2
|
+
|
|
3
|
+
This module provides common authentication functions used by both:
|
|
4
|
+
- REST API endpoints (pdd/server/routes/auth.py) for the web frontend
|
|
5
|
+
- CLI commands (pdd/commands/auth.py) for terminal-based auth management
|
|
6
|
+
|
|
7
|
+
By centralizing auth logic here, we ensure consistent behavior across interfaces.
|
|
8
|
+
"""
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import time
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional, Tuple, Dict, Any
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# JWT file cache path
|
|
18
|
+
JWT_CACHE_FILE = Path.home() / ".pdd" / "jwt_cache"
|
|
19
|
+
|
|
20
|
+
# Keyring configuration (must match app_name="PDD CLI" used in commands/auth.py)
|
|
21
|
+
KEYRING_SERVICE_NAME = "firebase-auth-PDD CLI"
|
|
22
|
+
KEYRING_USER_NAME = "refresh_token"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_jwt_cache_info() -> Tuple[bool, Optional[float]]:
|
|
26
|
+
"""
|
|
27
|
+
Check JWT cache file for valid token.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Tuple of (is_valid, expires_at). If valid, expires_at is the timestamp
|
|
31
|
+
when the token expires. If invalid or not found, returns (False, None).
|
|
32
|
+
"""
|
|
33
|
+
if not JWT_CACHE_FILE.exists():
|
|
34
|
+
return False, None
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
with open(JWT_CACHE_FILE, "r") as f:
|
|
38
|
+
cache = json.load(f)
|
|
39
|
+
expires_at = cache.get("expires_at", 0)
|
|
40
|
+
# Check if token is still valid (with 5 minute buffer)
|
|
41
|
+
if expires_at > time.time() + 300:
|
|
42
|
+
return True, expires_at
|
|
43
|
+
except (json.JSONDecodeError, IOError, KeyError):
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
return False, None
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_cached_jwt() -> Optional[str]:
|
|
50
|
+
"""
|
|
51
|
+
Get the cached JWT token if it exists and is valid.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
The JWT token string if valid, None otherwise.
|
|
55
|
+
"""
|
|
56
|
+
if not JWT_CACHE_FILE.exists():
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
with open(JWT_CACHE_FILE, "r") as f:
|
|
61
|
+
cache = json.load(f)
|
|
62
|
+
expires_at = cache.get("expires_at", 0)
|
|
63
|
+
# Check if token is still valid (with 5 minute buffer)
|
|
64
|
+
if expires_at > time.time() + 300:
|
|
65
|
+
# Check both 'id_token' (new) and 'jwt' (legacy) keys for backwards compatibility
|
|
66
|
+
return cache.get("id_token") or cache.get("jwt")
|
|
67
|
+
except (json.JSONDecodeError, IOError, KeyError):
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def has_refresh_token() -> bool:
|
|
74
|
+
"""
|
|
75
|
+
Check if there's a stored refresh token in keyring.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
True if a refresh token exists, False otherwise.
|
|
79
|
+
"""
|
|
80
|
+
try:
|
|
81
|
+
import keyring
|
|
82
|
+
|
|
83
|
+
token = keyring.get_password(KEYRING_SERVICE_NAME, KEYRING_USER_NAME)
|
|
84
|
+
return token is not None
|
|
85
|
+
except ImportError:
|
|
86
|
+
# Try alternative keyring
|
|
87
|
+
try:
|
|
88
|
+
import keyrings.alt.file
|
|
89
|
+
|
|
90
|
+
kr = keyrings.alt.file.PlaintextKeyring()
|
|
91
|
+
token = kr.get_password(KEYRING_SERVICE_NAME, KEYRING_USER_NAME)
|
|
92
|
+
return token is not None
|
|
93
|
+
except ImportError:
|
|
94
|
+
pass
|
|
95
|
+
except Exception:
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def clear_jwt_cache() -> Tuple[bool, Optional[str]]:
|
|
102
|
+
"""
|
|
103
|
+
Clear the JWT cache file.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Tuple of (success, error_message). If successful, error_message is None.
|
|
107
|
+
"""
|
|
108
|
+
if not JWT_CACHE_FILE.exists():
|
|
109
|
+
return True, None
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
JWT_CACHE_FILE.unlink()
|
|
113
|
+
return True, None
|
|
114
|
+
except Exception as e:
|
|
115
|
+
return False, f"Failed to clear JWT cache: {e}"
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def clear_refresh_token() -> Tuple[bool, Optional[str]]:
|
|
119
|
+
"""
|
|
120
|
+
Clear the refresh token from keyring.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Tuple of (success, error_message). If successful, error_message is None.
|
|
124
|
+
"""
|
|
125
|
+
try:
|
|
126
|
+
import keyring
|
|
127
|
+
|
|
128
|
+
keyring.delete_password(KEYRING_SERVICE_NAME, KEYRING_USER_NAME)
|
|
129
|
+
return True, None
|
|
130
|
+
except ImportError:
|
|
131
|
+
# Try alternative keyring
|
|
132
|
+
try:
|
|
133
|
+
import keyrings.alt.file
|
|
134
|
+
|
|
135
|
+
kr = keyrings.alt.file.PlaintextKeyring()
|
|
136
|
+
kr.delete_password(KEYRING_SERVICE_NAME, KEYRING_USER_NAME)
|
|
137
|
+
return True, None
|
|
138
|
+
except ImportError:
|
|
139
|
+
return True, None # No keyring available, nothing to clear
|
|
140
|
+
except Exception as e:
|
|
141
|
+
return False, f"Failed to clear refresh token: {e}"
|
|
142
|
+
except Exception as e:
|
|
143
|
+
error_str = str(e)
|
|
144
|
+
# Ignore "not found" errors - token was already deleted
|
|
145
|
+
if "not found" in error_str.lower() or "no matching" in error_str.lower():
|
|
146
|
+
return True, None
|
|
147
|
+
return False, f"Failed to clear refresh token: {e}"
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def get_auth_status() -> Dict[str, Any]:
|
|
151
|
+
"""
|
|
152
|
+
Get current authentication status.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Dict with keys:
|
|
156
|
+
- authenticated: bool - True if user has valid auth
|
|
157
|
+
- cached: bool - True if using cached JWT (vs refresh token)
|
|
158
|
+
- expires_at: Optional[float] - JWT expiration timestamp if cached
|
|
159
|
+
"""
|
|
160
|
+
# First check JWT cache
|
|
161
|
+
cache_valid, expires_at = get_jwt_cache_info()
|
|
162
|
+
if cache_valid:
|
|
163
|
+
return {
|
|
164
|
+
"authenticated": True,
|
|
165
|
+
"cached": True,
|
|
166
|
+
"expires_at": expires_at,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
# Check for refresh token in keyring
|
|
170
|
+
has_refresh = has_refresh_token()
|
|
171
|
+
if has_refresh:
|
|
172
|
+
return {
|
|
173
|
+
"authenticated": True,
|
|
174
|
+
"cached": False,
|
|
175
|
+
"expires_at": None,
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return {
|
|
179
|
+
"authenticated": False,
|
|
180
|
+
"cached": False,
|
|
181
|
+
"expires_at": None,
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def logout() -> Tuple[bool, Optional[str]]:
|
|
186
|
+
"""
|
|
187
|
+
Clear all authentication tokens (logout).
|
|
188
|
+
|
|
189
|
+
Clears both the JWT cache file and the refresh token from keyring.
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Tuple of (success, error_message). If any error occurred,
|
|
193
|
+
success is False and error_message contains the details.
|
|
194
|
+
"""
|
|
195
|
+
errors = []
|
|
196
|
+
|
|
197
|
+
# Clear JWT cache
|
|
198
|
+
jwt_success, jwt_error = clear_jwt_cache()
|
|
199
|
+
if not jwt_success and jwt_error:
|
|
200
|
+
errors.append(jwt_error)
|
|
201
|
+
|
|
202
|
+
# Clear refresh token from keyring
|
|
203
|
+
refresh_success, refresh_error = clear_refresh_token()
|
|
204
|
+
if not refresh_success and refresh_error:
|
|
205
|
+
errors.append(refresh_error)
|
|
206
|
+
|
|
207
|
+
if errors:
|
|
208
|
+
return False, "; ".join(errors)
|
|
209
|
+
|
|
210
|
+
return True, None
|
pdd/auto_deps_main.py
CHANGED
|
@@ -1,40 +1,36 @@
|
|
|
1
|
-
|
|
1
|
+
from __future__ import annotations
|
|
2
2
|
import sys
|
|
3
3
|
from pathlib import Path
|
|
4
|
-
from typing import
|
|
4
|
+
from typing import Optional, Tuple, Callable
|
|
5
5
|
import click
|
|
6
6
|
from rich import print as rprint
|
|
7
|
+
from filelock import FileLock
|
|
7
8
|
|
|
8
9
|
from . import DEFAULT_STRENGTH, DEFAULT_TIME
|
|
9
10
|
from .construct_paths import construct_paths
|
|
10
11
|
from .insert_includes import insert_includes
|
|
11
12
|
|
|
12
|
-
|
|
13
|
+
|
|
14
|
+
def auto_deps_main(
|
|
13
15
|
ctx: click.Context,
|
|
14
16
|
prompt_file: str,
|
|
15
17
|
directory_path: str,
|
|
16
18
|
auto_deps_csv_path: Optional[str],
|
|
17
19
|
output: Optional[str],
|
|
18
|
-
force_scan: Optional[bool],
|
|
20
|
+
force_scan: Optional[bool] = False,
|
|
19
21
|
progress_callback: Optional[Callable[[int, int], None]] = None
|
|
20
22
|
) -> Tuple[str, float, str]:
|
|
21
23
|
"""
|
|
22
|
-
Main function to analyze and insert dependencies
|
|
23
|
-
|
|
24
|
-
Args:
|
|
25
|
-
ctx: Click context containing command-line parameters.
|
|
26
|
-
prompt_file: Path to the input prompt file.
|
|
27
|
-
directory_path: Path to directory containing potential dependency files.
|
|
28
|
-
auto_deps_csv_path: Path to CSV file containing auto-dependency information.
|
|
29
|
-
output: Optional path to save the modified prompt file.
|
|
30
|
-
force_scan: Flag to force rescan of directory by deleting CSV file.
|
|
31
|
-
progress_callback: Callback for progress updates (current, total) for each file.
|
|
24
|
+
Main function to analyze a prompt file and insert dependencies found in a directory.
|
|
32
25
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
26
|
+
:param ctx: Click context containing command-line parameters.
|
|
27
|
+
:param prompt_file: Path to the input prompt file.
|
|
28
|
+
:param directory_path: Path to the directory or glob pattern containing potential dependency files.
|
|
29
|
+
:param auto_deps_csv_path: Preferred CSV file path for dependency info (may be overridden by resolved paths).
|
|
30
|
+
:param output: File path (or directory) to save the modified prompt file.
|
|
31
|
+
:param force_scan: Flag to force a rescan by deleting the existing CSV cache.
|
|
32
|
+
:param progress_callback: Optional callback for progress updates (current, total).
|
|
33
|
+
:return: A tuple containing the modified prompt, total cost, and model name used.
|
|
38
34
|
"""
|
|
39
35
|
try:
|
|
40
36
|
# Construct file paths
|
|
@@ -45,7 +41,7 @@ def auto_deps_main( # pylint: disable=too-many-arguments, too-many-locals
|
|
|
45
41
|
"output": output,
|
|
46
42
|
"csv": auto_deps_csv_path
|
|
47
43
|
}
|
|
48
|
-
|
|
44
|
+
|
|
49
45
|
resolved_config, input_strings, output_file_paths, _ = construct_paths(
|
|
50
46
|
input_file_paths=input_file_paths,
|
|
51
47
|
force=ctx.obj.get('force', False),
|
|
@@ -56,50 +52,64 @@ def auto_deps_main( # pylint: disable=too-many-arguments, too-many-locals
|
|
|
56
52
|
confirm_callback=ctx.obj.get('confirm_callback')
|
|
57
53
|
)
|
|
58
54
|
|
|
59
|
-
#
|
|
55
|
+
# Resolve CSV path
|
|
60
56
|
csv_path = output_file_paths.get("csv", "project_dependencies.csv")
|
|
61
57
|
|
|
62
|
-
# Handle
|
|
58
|
+
# Handle force scan option
|
|
63
59
|
if force_scan and Path(csv_path).exists():
|
|
64
60
|
if not ctx.obj.get('quiet', False):
|
|
65
|
-
rprint(
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
61
|
+
rprint(f"[yellow]Removing existing CSV file due to --force-scan option: {csv_path}[/yellow]")
|
|
62
|
+
try:
|
|
63
|
+
Path(csv_path).unlink()
|
|
64
|
+
except OSError as e:
|
|
65
|
+
if not ctx.obj.get('quiet', False):
|
|
66
|
+
rprint(f"[yellow]Warning: Could not delete CSV file: {e}[/yellow]")
|
|
70
67
|
|
|
71
|
-
#
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
68
|
+
# Acquire lock to prevent concurrent access to the CSV cache
|
|
69
|
+
lock_path = f"{csv_path}.lock"
|
|
70
|
+
lock = FileLock(lock_path)
|
|
71
|
+
|
|
72
|
+
with lock:
|
|
73
|
+
# Load input file
|
|
74
|
+
prompt_content = input_strings["prompt_file"]
|
|
75
75
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
76
|
+
# Get LLM parameters
|
|
77
|
+
strength = ctx.obj.get('strength', DEFAULT_STRENGTH)
|
|
78
|
+
temperature = ctx.obj.get('temperature', 0.0)
|
|
79
|
+
time_budget = ctx.obj.get('time', DEFAULT_TIME)
|
|
80
|
+
verbose = not ctx.obj.get('quiet', False)
|
|
81
|
+
|
|
82
|
+
# Run the dependency analysis and insertion
|
|
83
|
+
modified_prompt, csv_output, total_cost, model_name = insert_includes(
|
|
84
|
+
input_prompt=prompt_content,
|
|
85
|
+
directory_path=directory_path,
|
|
86
|
+
csv_filename=csv_path,
|
|
87
|
+
prompt_filename=prompt_file,
|
|
88
|
+
strength=strength,
|
|
89
|
+
temperature=temperature,
|
|
90
|
+
time=time_budget,
|
|
91
|
+
verbose=verbose,
|
|
92
|
+
progress_callback=progress_callback
|
|
93
|
+
)
|
|
88
94
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
95
|
+
# Save the modified prompt
|
|
96
|
+
output_path = output_file_paths["output"]
|
|
97
|
+
if output_path:
|
|
98
|
+
with open(output_path, 'w', encoding='utf-8') as f:
|
|
99
|
+
f.write(modified_prompt)
|
|
92
100
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
101
|
+
# Save the CSV output if content exists
|
|
102
|
+
if csv_output:
|
|
103
|
+
with open(csv_path, 'w', encoding='utf-8') as f:
|
|
104
|
+
f.write(csv_output)
|
|
96
105
|
|
|
97
106
|
# Provide user feedback
|
|
98
107
|
if not ctx.obj.get('quiet', False):
|
|
99
108
|
rprint("[bold green]Successfully analyzed and inserted dependencies![/bold green]")
|
|
100
109
|
rprint(f"[bold]Model used:[/bold] {model_name}")
|
|
101
110
|
rprint(f"[bold]Total cost:[/bold] ${total_cost:.6f}")
|
|
102
|
-
|
|
111
|
+
if output_path:
|
|
112
|
+
rprint(f"[bold]Modified prompt saved to:[/bold] {output_path}")
|
|
103
113
|
rprint(f"[bold]Dependency information saved to:[/bold] {csv_path}")
|
|
104
114
|
|
|
105
115
|
return modified_prompt, total_cost, model_name
|
|
@@ -107,8 +117,8 @@ def auto_deps_main( # pylint: disable=too-many-arguments, too-many-locals
|
|
|
107
117
|
except click.Abort:
|
|
108
118
|
# User cancelled - re-raise to stop the sync loop
|
|
109
119
|
raise
|
|
110
|
-
except Exception as
|
|
120
|
+
except Exception as e:
|
|
111
121
|
if not ctx.obj.get('quiet', False):
|
|
112
|
-
rprint(f"[bold red]Error:[/bold red] {str(
|
|
122
|
+
rprint(f"[bold red]Error:[/bold red] {str(e)}")
|
|
113
123
|
# Return error result instead of sys.exit(1) to allow orchestrator to handle gracefully
|
|
114
|
-
return "", 0.0, f"Error: {
|
|
124
|
+
return "", 0.0, f"Error: {e}"
|
pdd/auto_include.py
CHANGED
|
@@ -4,7 +4,8 @@ insert dependencies into a prompt.
|
|
|
4
4
|
"""
|
|
5
5
|
import re
|
|
6
6
|
from io import StringIO
|
|
7
|
-
from
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Callable, List, Optional, Set, Tuple
|
|
8
9
|
|
|
9
10
|
import pandas as pd
|
|
10
11
|
from pydantic import BaseModel, Field
|
|
@@ -151,11 +152,175 @@ def _filter_self_references(dependencies: str, module_name: Optional[str]) -> st
|
|
|
151
152
|
"""
|
|
152
153
|
if not module_name:
|
|
153
154
|
return dependencies
|
|
154
|
-
# Pattern matches: <...><include>context/{module_name}_example.py</include></...>
|
|
155
|
-
|
|
155
|
+
# Pattern matches: <...><include>context/[subdirs/]{module_name}_example.py</include></...>
|
|
156
|
+
# The (?:[^/]+/)* matches zero or more subdirectory levels (e.g., backend/, frontend/)
|
|
157
|
+
pattern = rf'<[^>]+><include>context/(?:[^/]+/)*{re.escape(module_name)}_example\.py</include></[^>]+>\s*'
|
|
156
158
|
return re.sub(pattern, '', dependencies)
|
|
157
159
|
|
|
158
160
|
|
|
161
|
+
def _fix_malformed_includes(dependencies: str) -> str:
|
|
162
|
+
"""Fix malformed [File: ...] patterns to proper <include>...</include> format.
|
|
163
|
+
|
|
164
|
+
The LLM sometimes outputs [File: path] instead of <include>path</include>.
|
|
165
|
+
This function corrects that error.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
dependencies: The dependencies string containing potential malformed includes.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
The dependencies string with [File:] patterns converted to <include> tags.
|
|
172
|
+
"""
|
|
173
|
+
# Pattern: <tag>[File: path]</tag> or <tag>\n[File: path]\n</tag>
|
|
174
|
+
pattern = r'(<[^>]+>)\s*\[File:\s*([^\]]+)\]\s*(</[^>]+>)'
|
|
175
|
+
|
|
176
|
+
def replacer(match: re.Match) -> str:
|
|
177
|
+
opening_tag = match.group(1)
|
|
178
|
+
path = match.group(2).strip() # Strip whitespace from captured path
|
|
179
|
+
closing_tag = match.group(3)
|
|
180
|
+
return f'{opening_tag}<include>{path}</include>{closing_tag}'
|
|
181
|
+
|
|
182
|
+
fixed = re.sub(pattern, replacer, dependencies)
|
|
183
|
+
if fixed != dependencies:
|
|
184
|
+
console.print("[yellow]Warning: Fixed malformed [File:] patterns in dependencies[/yellow]")
|
|
185
|
+
return fixed
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _extract_example_modules(content: str) -> Set[str]:
|
|
189
|
+
"""Extract module names from _example.py includes.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
content: The string content to search for include tags.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
A set of module names extracted from _example.py paths.
|
|
196
|
+
E.g., 'context/agentic_bug_example.py' -> 'agentic_bug'
|
|
197
|
+
"""
|
|
198
|
+
pattern = r'<include>(.*?)</include>'
|
|
199
|
+
matches = re.findall(pattern, content, re.DOTALL)
|
|
200
|
+
modules = set()
|
|
201
|
+
for match in matches:
|
|
202
|
+
path = match.strip()
|
|
203
|
+
# Match pattern: context/[subdirs/]module_name_example.py
|
|
204
|
+
example_match = re.search(r'context/(?:[^/]+/)*([^/]+)_example\.py$', path)
|
|
205
|
+
if example_match:
|
|
206
|
+
modules.add(example_match.group(1))
|
|
207
|
+
return modules
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _detect_circular_dependencies(
|
|
211
|
+
current_prompt: str,
|
|
212
|
+
new_dependencies: str,
|
|
213
|
+
prompts_dir: Optional[str] = None
|
|
214
|
+
) -> List[List[str]]:
|
|
215
|
+
"""Detect circular dependencies through example file includes.
|
|
216
|
+
|
|
217
|
+
Detects module-level circular dependencies where:
|
|
218
|
+
- Module A's prompt includes module B's example file
|
|
219
|
+
- Module B's prompt includes module A's example file
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
current_prompt: The current prompt file being processed.
|
|
223
|
+
new_dependencies: The new dependencies string to check.
|
|
224
|
+
prompts_dir: Optional base directory for resolving prompt paths.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
List of cycles found, where each cycle is a list of module names.
|
|
228
|
+
"""
|
|
229
|
+
# Extract current module name from prompt filename
|
|
230
|
+
current_module = _extract_module_name(current_prompt)
|
|
231
|
+
if not current_module:
|
|
232
|
+
return []
|
|
233
|
+
|
|
234
|
+
# Extract module names from example includes in new dependencies
|
|
235
|
+
new_dep_modules = _extract_example_modules(new_dependencies)
|
|
236
|
+
if not new_dep_modules:
|
|
237
|
+
return []
|
|
238
|
+
|
|
239
|
+
cycles: List[List[str]] = []
|
|
240
|
+
|
|
241
|
+
# Determine base directory for prompts
|
|
242
|
+
if prompts_dir:
|
|
243
|
+
base_dir = Path(prompts_dir)
|
|
244
|
+
else:
|
|
245
|
+
# Try to find prompts directory relative to current prompt
|
|
246
|
+
current_path = Path(current_prompt)
|
|
247
|
+
if current_path.parent.name == 'prompts' or 'prompts' in str(current_path):
|
|
248
|
+
base_dir = current_path.parent
|
|
249
|
+
else:
|
|
250
|
+
base_dir = Path('prompts')
|
|
251
|
+
|
|
252
|
+
# Extract current prompt filename for cycle reporting
|
|
253
|
+
current_prompt_name = Path(current_prompt).name
|
|
254
|
+
|
|
255
|
+
# For each module we're about to depend on, check if it depends on us
|
|
256
|
+
for dep_module in new_dep_modules:
|
|
257
|
+
# Find the prompt file for this module (try common patterns)
|
|
258
|
+
prompt_patterns = [
|
|
259
|
+
f"{dep_module}_python.prompt",
|
|
260
|
+
f"{dep_module}_LLM.prompt",
|
|
261
|
+
f"{dep_module}.prompt",
|
|
262
|
+
]
|
|
263
|
+
|
|
264
|
+
for pattern in prompt_patterns:
|
|
265
|
+
prompt_path = base_dir / pattern
|
|
266
|
+
if prompt_path.exists():
|
|
267
|
+
try:
|
|
268
|
+
content = prompt_path.read_text(encoding='utf-8')
|
|
269
|
+
# Check if this prompt includes our example file
|
|
270
|
+
dep_modules = _extract_example_modules(content)
|
|
271
|
+
if current_module in dep_modules:
|
|
272
|
+
# Found circular dependency!
|
|
273
|
+
# Use actual prompt filenames, not hardcoded suffixes
|
|
274
|
+
cycles.append([
|
|
275
|
+
current_prompt_name,
|
|
276
|
+
pattern,
|
|
277
|
+
current_prompt_name
|
|
278
|
+
])
|
|
279
|
+
except Exception:
|
|
280
|
+
pass
|
|
281
|
+
break
|
|
282
|
+
|
|
283
|
+
return cycles
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def _filter_circular_dependencies(dependencies: str, cycles: List[List[str]]) -> str:
|
|
287
|
+
"""Remove include tags that would create circular dependencies.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
dependencies: The dependencies string containing include tags.
|
|
291
|
+
cycles: List of cycles, where each cycle is a list of prompt filenames.
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
The dependencies string with circular dependency includes removed.
|
|
295
|
+
"""
|
|
296
|
+
if not cycles:
|
|
297
|
+
return dependencies
|
|
298
|
+
|
|
299
|
+
# Extract module names from cycles (e.g., 'agentic_bug_python.prompt' -> 'agentic_bug')
|
|
300
|
+
problematic_modules: Set[str] = set()
|
|
301
|
+
for cycle in cycles:
|
|
302
|
+
for prompt_name in cycle:
|
|
303
|
+
# Extract module name from prompt filename using shared helper
|
|
304
|
+
module_name = _extract_module_name(prompt_name)
|
|
305
|
+
if module_name:
|
|
306
|
+
problematic_modules.add(module_name)
|
|
307
|
+
|
|
308
|
+
if not problematic_modules:
|
|
309
|
+
return dependencies
|
|
310
|
+
|
|
311
|
+
# Pattern to match include tags with _example.py files
|
|
312
|
+
# Matches: <wrapper><include>context/[subdirs/]module_example.py</include></wrapper>
|
|
313
|
+
# Using a simpler approach: find each include and check if it's problematic
|
|
314
|
+
result = dependencies
|
|
315
|
+
for module in problematic_modules:
|
|
316
|
+
# Remove includes for this module's example file
|
|
317
|
+
# Pattern: <wrapper><include>context/[subdirs/]module_example.py</include></wrapper>
|
|
318
|
+
pattern = rf'<[^>]+><include>context/(?:[^/]+/)*{re.escape(module)}_example\.py</include></[^>]+>\s*'
|
|
319
|
+
result = re.sub(pattern, '', result)
|
|
320
|
+
|
|
321
|
+
return result
|
|
322
|
+
|
|
323
|
+
|
|
159
324
|
def auto_include(
|
|
160
325
|
input_prompt: str,
|
|
161
326
|
directory_path: str,
|
|
@@ -226,6 +391,23 @@ def auto_include(
|
|
|
226
391
|
module_name = _extract_module_name(prompt_filename)
|
|
227
392
|
dependencies = _filter_self_references(dependencies, module_name)
|
|
228
393
|
|
|
394
|
+
# Fix any malformed [File:] patterns from LLM output
|
|
395
|
+
dependencies = _fix_malformed_includes(dependencies)
|
|
396
|
+
|
|
397
|
+
# Detect and filter circular dependencies in prompt includes
|
|
398
|
+
if prompt_filename:
|
|
399
|
+
cycles = _detect_circular_dependencies(
|
|
400
|
+
current_prompt=prompt_filename,
|
|
401
|
+
new_dependencies=dependencies
|
|
402
|
+
)
|
|
403
|
+
if cycles:
|
|
404
|
+
dependencies = _filter_circular_dependencies(dependencies, cycles)
|
|
405
|
+
for cycle in cycles:
|
|
406
|
+
console.print(
|
|
407
|
+
f"[yellow]Warning: Filtered circular dependency: "
|
|
408
|
+
f"{' -> '.join(cycle)}[/yellow]"
|
|
409
|
+
)
|
|
410
|
+
|
|
229
411
|
total_cost = summary_cost + llm_cost
|
|
230
412
|
model_name = llm_model_name or summary_model
|
|
231
413
|
|