pdd-cli 0.0.118__py3-none-any.whl → 0.0.121__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. pdd/__init__.py +1 -1
  2. pdd/agentic_bug_orchestrator.py +15 -6
  3. pdd/agentic_change_orchestrator.py +18 -7
  4. pdd/agentic_common.py +68 -40
  5. pdd/agentic_crash.py +2 -1
  6. pdd/agentic_e2e_fix_orchestrator.py +165 -9
  7. pdd/agentic_update.py +2 -1
  8. pdd/agentic_verify.py +3 -2
  9. pdd/auto_include.py +51 -0
  10. pdd/commands/analysis.py +32 -25
  11. pdd/commands/connect.py +69 -1
  12. pdd/commands/fix.py +31 -13
  13. pdd/commands/generate.py +5 -0
  14. pdd/commands/modify.py +47 -11
  15. pdd/commands/utility.py +12 -7
  16. pdd/core/cli.py +17 -4
  17. pdd/core/dump.py +68 -20
  18. pdd/fix_main.py +4 -2
  19. pdd/frontend/dist/assets/index-CUWd8al1.js +450 -0
  20. pdd/frontend/dist/index.html +1 -1
  21. pdd/llm_invoke.py +82 -12
  22. pdd/operation_log.py +342 -0
  23. pdd/postprocess.py +122 -100
  24. pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +11 -2
  25. pdd/prompts/generate_test_LLM.prompt +0 -1
  26. pdd/prompts/generate_test_from_example_LLM.prompt +251 -0
  27. pdd/prompts/prompt_code_diff_LLM.prompt +29 -25
  28. pdd/server/routes/prompts.py +26 -1
  29. pdd/server/terminal_spawner.py +15 -7
  30. pdd/sync_orchestration.py +164 -147
  31. pdd/sync_order.py +304 -0
  32. pdd/update_main.py +48 -24
  33. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/METADATA +3 -3
  34. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/RECORD +37 -35
  35. pdd/frontend/dist/assets/index-DQ3wkeQ2.js +0 -449
  36. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/WHEEL +0 -0
  37. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/entry_points.txt +0 -0
  38. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/licenses/LICENSE +0 -0
  39. {pdd_cli-0.0.118.dist-info → pdd_cli-0.0.121.dist-info}/top_level.txt +0 -0
@@ -367,7 +367,7 @@
367
367
  }
368
368
  }
369
369
  </script>
370
- <script type="module" crossorigin src="/assets/index-DQ3wkeQ2.js"></script>
370
+ <script type="module" crossorigin src="/assets/index-CUWd8al1.js"></script>
371
371
  <link rel="stylesheet" crossorigin href="/assets/index-B5DZHykP.css">
372
372
  </head>
373
373
  <body class="bg-surface-950 text-gray-200">
pdd/llm_invoke.py CHANGED
@@ -146,20 +146,88 @@ class InsufficientCreditsError(Exception):
146
146
  # --- Cloud Execution Helpers ---
147
147
 
148
148
  def _ensure_all_properties_required(schema: Dict[str, Any]) -> Dict[str, Any]:
149
- """Ensure ALL properties are in the required array (OpenAI strict mode requirement).
149
+ """Recursively ensure ALL properties are in the required array (OpenAI strict mode).
150
150
 
151
- OpenAI's strict mode requires that all properties in a JSON schema are listed
152
- in the 'required' array. Pydantic's model_json_schema() only includes fields
153
- without default values in 'required', which causes OpenAI to reject the schema.
151
+ OpenAI's strict mode requires that all properties at ALL levels of a JSON schema
152
+ are listed in the 'required' array. Pydantic's model_json_schema() only includes
153
+ fields without default values in 'required', which causes OpenAI to reject the schema.
154
+
155
+ This function walks the entire schema tree and ensures every object type has all
156
+ its properties in the 'required' array.
154
157
 
155
158
  Args:
156
159
  schema: A JSON schema dictionary
157
160
 
158
161
  Returns:
159
- The schema with all properties added to 'required'
162
+ The schema with all properties added to 'required' at all nesting levels
160
163
  """
161
- if 'properties' in schema:
164
+ if not isinstance(schema, dict):
165
+ return schema
166
+
167
+ # If this is an object with properties, make all properties required
168
+ if schema.get('type') == 'object' and 'properties' in schema:
162
169
  schema['required'] = list(schema['properties'].keys())
170
+ # Recurse into each property
171
+ for prop_schema in schema['properties'].values():
172
+ _ensure_all_properties_required(prop_schema)
173
+
174
+ # Handle array items
175
+ if schema.get('type') == 'array' and 'items' in schema:
176
+ _ensure_all_properties_required(schema['items'])
177
+
178
+ # Handle anyOf/oneOf/allOf
179
+ for key in ('anyOf', 'oneOf', 'allOf'):
180
+ if key in schema:
181
+ for sub_schema in schema[key]:
182
+ _ensure_all_properties_required(sub_schema)
183
+
184
+ # Handle $defs
185
+ if '$defs' in schema:
186
+ for def_schema in schema['$defs'].values():
187
+ _ensure_all_properties_required(def_schema)
188
+
189
+ return schema
190
+
191
+
192
+ def _add_additional_properties_false(schema: Dict[str, Any]) -> Dict[str, Any]:
193
+ """Recursively add additionalProperties: false to all object schemas.
194
+
195
+ OpenAI's strict mode requires additionalProperties: false on ALL object
196
+ schemas, including nested ones. This function walks the schema tree and
197
+ adds the property to every object type.
198
+
199
+ Args:
200
+ schema: A JSON schema dictionary
201
+
202
+ Returns:
203
+ The schema with additionalProperties: false on all objects
204
+ """
205
+ if not isinstance(schema, dict):
206
+ return schema
207
+
208
+ # If this is an object type, add additionalProperties: false
209
+ if schema.get('type') == 'object':
210
+ schema['additionalProperties'] = False
211
+ # Recursively process properties
212
+ if 'properties' in schema:
213
+ for prop_name, prop_schema in schema['properties'].items():
214
+ _add_additional_properties_false(prop_schema)
215
+
216
+ # Handle arrays - process items schema
217
+ if schema.get('type') == 'array' and 'items' in schema:
218
+ _add_additional_properties_false(schema['items'])
219
+
220
+ # Handle anyOf, oneOf, allOf
221
+ for key in ('anyOf', 'oneOf', 'allOf'):
222
+ if key in schema:
223
+ for sub_schema in schema[key]:
224
+ _add_additional_properties_false(sub_schema)
225
+
226
+ # Handle $defs (Pydantic's reference definitions)
227
+ if '$defs' in schema:
228
+ for def_name, def_schema in schema['$defs'].items():
229
+ _add_additional_properties_false(def_schema)
230
+
163
231
  return schema
164
232
 
165
233
 
@@ -1919,8 +1987,8 @@ def llm_invoke(
1919
1987
  schema = output_pydantic.model_json_schema()
1920
1988
  # Ensure all properties are in required array (OpenAI strict mode requirement)
1921
1989
  _ensure_all_properties_required(schema)
1922
- # Add additionalProperties: false for strict mode (required by OpenAI)
1923
- schema["additionalProperties"] = False
1990
+ # Add additionalProperties: false recursively for strict mode (required by OpenAI)
1991
+ _add_additional_properties_false(schema)
1924
1992
  response_format = {
1925
1993
  "type": "json_schema",
1926
1994
  "json_schema": {
@@ -1945,8 +2013,10 @@ def llm_invoke(
1945
2013
  "strict": False
1946
2014
  }
1947
2015
  }
1948
- # Add additionalProperties: false for strict mode (required by OpenAI)
1949
- response_format["json_schema"]["schema"]["additionalProperties"] = False
2016
+ # Ensure all properties are in required array (OpenAI strict mode requirement)
2017
+ _ensure_all_properties_required(response_format["json_schema"]["schema"])
2018
+ # Add additionalProperties: false recursively for strict mode (required by OpenAI)
2019
+ _add_additional_properties_false(response_format["json_schema"]["schema"])
1950
2020
 
1951
2021
  litellm_kwargs["response_format"] = response_format
1952
2022
 
@@ -2133,8 +2203,8 @@ def llm_invoke(
2133
2203
 
2134
2204
  # Ensure all properties are in required array (OpenAI strict mode requirement)
2135
2205
  _ensure_all_properties_required(schema)
2136
- # Add additionalProperties: false for strict mode (required by OpenAI)
2137
- schema['additionalProperties'] = False
2206
+ # Add additionalProperties: false recursively for strict mode (required by OpenAI)
2207
+ _add_additional_properties_false(schema)
2138
2208
 
2139
2209
  # Use text.format with json_schema for structured output
2140
2210
  text_block = {
pdd/operation_log.py ADDED
@@ -0,0 +1,342 @@
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import json
5
+ import os
6
+ import re
7
+
8
+ import time
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
12
+
13
+ from rich.console import Console
14
+
15
+ # We assume standard paths relative to the project root
16
+ PDD_DIR = ".pdd"
17
+ META_DIR = os.path.join(PDD_DIR, "meta")
18
+
19
+
20
+ def ensure_meta_dir() -> None:
21
+ """Ensure the .pdd/meta directory exists."""
22
+ os.makedirs(META_DIR, exist_ok=True)
23
+
24
+
25
+ def get_log_path(basename: str, language: str) -> Path:
26
+ """Get the path to the sync log for a specific module."""
27
+ ensure_meta_dir()
28
+ return Path(META_DIR) / f"{basename}_{language}_sync.log"
29
+
30
+
31
+ def get_fingerprint_path(basename: str, language: str) -> Path:
32
+ """Get the path to the fingerprint JSON file for a specific module."""
33
+ ensure_meta_dir()
34
+ return Path(META_DIR) / f"{basename}_{language}.json"
35
+
36
+
37
+ def get_run_report_path(basename: str, language: str) -> Path:
38
+ """Get the path to the run report file for a specific module."""
39
+ ensure_meta_dir()
40
+ return Path(META_DIR) / f"{basename}_{language}_run.json"
41
+
42
+
43
+ def infer_module_identity(prompt_file_path: Union[str, Path]) -> Tuple[Optional[str], Optional[str]]:
44
+ """
45
+ Infer basename and language from a prompt file path.
46
+
47
+ Expected pattern: prompts/{basename}_{language}.prompt
48
+
49
+ Args:
50
+ prompt_file_path: Path to the prompt file.
51
+
52
+ Returns:
53
+ Tuple of (basename, language) or (None, None) if inference fails.
54
+ """
55
+ path_obj = Path(prompt_file_path)
56
+ filename = path_obj.stem # e.g., "my_module_python" from "my_module_python.prompt"
57
+
58
+ # Try to split by the last underscore to separate language
59
+ # This is a heuristic; strict naming conventions are assumed
60
+ match = re.match(r"^(.*)_([^_]+)$", filename)
61
+ if match:
62
+ basename = match.group(1)
63
+ language = match.group(2)
64
+ return basename, language
65
+
66
+ return None, None
67
+
68
+
69
+ def load_operation_log(basename: str, language: str) -> List[Dict[str, Any]]:
70
+ """
71
+ Load all log entries for a module.
72
+
73
+ Args:
74
+ basename: Module basename.
75
+ language: Module language.
76
+
77
+ Returns:
78
+ List of log entries (dictionaries).
79
+ """
80
+ log_path = get_log_path(basename, language)
81
+ entries = []
82
+
83
+ if log_path.exists():
84
+ try:
85
+ with open(log_path, 'r', encoding='utf-8') as f:
86
+ for line in f:
87
+ if line.strip():
88
+ try:
89
+ entry = json.loads(line)
90
+ # Backwards compatibility: defaulting invocation_mode to "sync"
91
+ if "invocation_mode" not in entry:
92
+ entry["invocation_mode"] = "sync"
93
+ entries.append(entry)
94
+ except json.JSONDecodeError:
95
+ continue
96
+ except Exception:
97
+ # If log is corrupt or unreadable, return empty list rather than crashing
98
+ pass
99
+
100
+ return entries
101
+
102
+
103
+ def append_log_entry(
104
+ basename: str,
105
+ language: str,
106
+ entry: Dict[str, Any]
107
+ ) -> None:
108
+ """
109
+ Append a single entry to the module's sync log.
110
+
111
+ Args:
112
+ basename: Module basename.
113
+ language: Module language.
114
+ entry: Dictionary of data to log.
115
+ """
116
+ log_path = get_log_path(basename, language)
117
+
118
+ # Ensure standard fields exist
119
+ if "timestamp" not in entry:
120
+ entry["timestamp"] = datetime.now().isoformat()
121
+
122
+ try:
123
+ with open(log_path, 'a', encoding='utf-8') as f:
124
+ f.write(json.dumps(entry) + "\n")
125
+ except Exception as e:
126
+ # Fallback console warning if logging fails
127
+ console = Console()
128
+ console.print(f"[yellow]Warning: Failed to write to log file {log_path}: {e}[/yellow]")
129
+
130
+
131
+ def create_log_entry(
132
+ operation: str,
133
+ reason: str,
134
+ invocation_mode: str = "sync",
135
+ estimated_cost: float = 0.0,
136
+ confidence: float = 0.0,
137
+ decision_type: str = "unknown"
138
+ ) -> Dict[str, Any]:
139
+ """
140
+ Create a new log entry dictionary structure.
141
+ """
142
+ return {
143
+ "timestamp": datetime.now().isoformat(),
144
+ "operation": operation,
145
+ "reason": reason,
146
+ "invocation_mode": invocation_mode,
147
+ "estimated_cost": estimated_cost,
148
+ "confidence": confidence,
149
+ "decision_type": decision_type,
150
+ "success": False,
151
+ "duration": 0.0,
152
+ "actual_cost": 0.0,
153
+ "model": "unknown",
154
+ "error": None
155
+ }
156
+
157
+
158
+ def create_manual_log_entry(operation: str) -> Dict[str, Any]:
159
+ """
160
+ Convenience function to create a manual invocation log entry dict.
161
+ """
162
+ return create_log_entry(
163
+ operation=operation,
164
+ reason="Manual invocation via CLI",
165
+ invocation_mode="manual"
166
+ )
167
+
168
+
169
+ def update_log_entry(
170
+ entry: Dict[str, Any],
171
+ success: bool,
172
+ cost: float,
173
+ model: str,
174
+ duration: float,
175
+ error: Optional[str] = None
176
+ ) -> Dict[str, Any]:
177
+ """
178
+ Update a log entry with execution results.
179
+ """
180
+ entry["success"] = success
181
+ entry["actual_cost"] = cost
182
+ entry["model"] = model
183
+ entry["duration"] = duration
184
+ entry["error"] = error
185
+ return entry
186
+
187
+
188
+ def log_event(
189
+ basename: str,
190
+ language: str,
191
+ event_type: str,
192
+ details: Any,
193
+ invocation_mode: str = "manual"
194
+ ) -> None:
195
+ """
196
+ Log a special event to the sync log.
197
+ """
198
+ entry = {
199
+ "timestamp": datetime.now().isoformat(),
200
+ "type": "event",
201
+ "event_type": event_type,
202
+ "details": details,
203
+ "invocation_mode": invocation_mode
204
+ }
205
+ append_log_entry(basename, language, entry)
206
+
207
+
208
+ def save_fingerprint(
209
+ basename: str,
210
+ language: str,
211
+ operation: str,
212
+ paths: Optional[Dict[str, Path]] = None,
213
+ cost: float = 0.0,
214
+ model: str = "unknown"
215
+ ) -> None:
216
+ """
217
+ Save the current fingerprint/state to the state file.
218
+
219
+ Writes the full Fingerprint dataclass format compatible with read_fingerprint()
220
+ in sync_determine_operation.py. This ensures manual commands (generate, example)
221
+ don't break sync's fingerprint tracking.
222
+ """
223
+ from dataclasses import asdict
224
+ from datetime import timezone
225
+ from .sync_determine_operation import calculate_current_hashes, Fingerprint
226
+ from . import __version__
227
+
228
+ path = get_fingerprint_path(basename, language)
229
+
230
+ # Calculate file hashes from paths (if provided)
231
+ current_hashes = calculate_current_hashes(paths) if paths else {}
232
+
233
+ # Create Fingerprint with same format as _save_fingerprint_atomic
234
+ fingerprint = Fingerprint(
235
+ pdd_version=__version__,
236
+ timestamp=datetime.now(timezone.utc).isoformat(),
237
+ command=operation,
238
+ prompt_hash=current_hashes.get('prompt_hash'),
239
+ code_hash=current_hashes.get('code_hash'),
240
+ example_hash=current_hashes.get('example_hash'),
241
+ test_hash=current_hashes.get('test_hash'),
242
+ test_files=current_hashes.get('test_files'),
243
+ )
244
+
245
+ try:
246
+ with open(path, 'w', encoding='utf-8') as f:
247
+ json.dump(asdict(fingerprint), f, indent=2)
248
+ except Exception as e:
249
+ console = Console()
250
+ console.print(f"[yellow]Warning: Failed to save fingerprint to {path}: {e}[/yellow]")
251
+
252
+
253
+ def save_run_report(basename: str, language: str, report_data: Dict[str, Any]) -> None:
254
+ """
255
+ Save a run report (test results) to the state file.
256
+ """
257
+ path = get_run_report_path(basename, language)
258
+ try:
259
+ with open(path, 'w', encoding='utf-8') as f:
260
+ json.dump(report_data, f, indent=2)
261
+ except Exception as e:
262
+ console = Console()
263
+ console.print(f"[yellow]Warning: Failed to save run report to {path}: {e}[/yellow]")
264
+
265
+
266
+ def clear_run_report(basename: str, language: str) -> None:
267
+ """
268
+ Remove an existing run report if it exists.
269
+ """
270
+ path = get_run_report_path(basename, language)
271
+ if path.exists():
272
+ try:
273
+ os.remove(path)
274
+ except Exception:
275
+ pass
276
+
277
+
278
+ def log_operation(
279
+ operation: str,
280
+ updates_fingerprint: bool = False,
281
+ updates_run_report: bool = False,
282
+ clears_run_report: bool = False
283
+ ) -> Callable:
284
+ """
285
+ Decorator for Click commands to automatically log operations and manage state.
286
+ """
287
+ def decorator(func: Callable) -> Callable:
288
+ @functools.wraps(func)
289
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
290
+ # Try to get prompt_file from named kwarg first
291
+ prompt_file = kwargs.get('prompt_file')
292
+
293
+ # If not found, check if there's an 'args' tuple (for commands using nargs=-1)
294
+ # and the first element looks like a prompt file path
295
+ if not prompt_file:
296
+ cli_args = kwargs.get('args')
297
+ if cli_args and len(cli_args) > 0:
298
+ first_arg = str(cli_args[0])
299
+ # Check if it looks like a prompt file (ends with .prompt)
300
+ if first_arg.endswith('.prompt'):
301
+ prompt_file = first_arg
302
+
303
+ basename, language = (None, None)
304
+ if prompt_file:
305
+ basename, language = infer_module_identity(prompt_file)
306
+
307
+ if basename and language and clears_run_report:
308
+ clear_run_report(basename, language)
309
+
310
+ entry = create_manual_log_entry(operation=operation)
311
+ start_time = time.time()
312
+ success = False
313
+ result = None
314
+ error_msg = None
315
+
316
+ try:
317
+ result = func(*args, **kwargs)
318
+ success = True
319
+ return result
320
+ except Exception as e:
321
+ success = False
322
+ error_msg = str(e)
323
+ raise
324
+ finally:
325
+ duration = time.time() - start_time
326
+ cost = 0.0
327
+ model = "unknown"
328
+ if success and result:
329
+ if isinstance(result, tuple) and len(result) >= 3:
330
+ if isinstance(result[1], (int, float)): cost = float(result[1])
331
+ if isinstance(result[2], str): model = str(result[2])
332
+
333
+ update_log_entry(entry, success=success, cost=cost, model=model, duration=duration, error=error_msg)
334
+ if basename and language:
335
+ append_log_entry(basename, language, entry)
336
+ if success:
337
+ if updates_fingerprint:
338
+ save_fingerprint(basename, language, operation=operation, cost=cost, model=model)
339
+ if updates_run_report and isinstance(result, dict):
340
+ save_run_report(basename, language, result)
341
+ return wrapper
342
+ return decorator