pdd-cli 0.0.90__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +521 -786
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +25 -8
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +185 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +87 -29
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +136 -113
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +190 -164
- pdd/commands/sessions.py +284 -0
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +27 -3
- pdd/core/cloud.py +237 -0
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +204 -4
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +459 -95
- pdd/load_prompt_template.py +15 -34
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +4 -1
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +20 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +136 -75
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +23 -5
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +15 -10
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
% You are an expert PDD (Prompt-Driven Development) engineer. Your task is to update a prompt file to reflect code changes while making it as compact as possible.
|
|
2
2
|
|
|
3
3
|
% Prompting Guide (follow these best practices):
|
|
4
|
+
<pdd_prompting_guide>
|
|
4
5
|
# Prompt‑Driven Development Prompting Guide
|
|
5
6
|
|
|
6
7
|
This guide shows how to write effective prompts for Prompt‑Driven Development (PDD). It distills best practices from the PDD whitepaper, the PDD doctrine, and working patterns in this repo. It also contrasts PDD prompts with interactive agentic coding tools (e.g., Claude Code, Cursor) where prompts act as ad‑hoc patches instead of the source of truth.
|
|
@@ -15,19 +16,21 @@ If you are new to Prompt-Driven Development (PDD), follow this recipe:
|
|
|
15
16
|
|
|
16
17
|
1. **Think "One Prompt = One Module":** Don't try to generate the whole app at once. Focus on one file (e.g., `user_service.py`).
|
|
17
18
|
2. **Use a Template:** Start with a clear structure: Role, Requirements, Dependencies, Instructions.
|
|
18
|
-
3. **Explicitly Include Context:** Use
|
|
19
|
+
3. **Explicitly Include Context:** Use `<include>path/to/file</include>` to give the model *only* what it needs (e.g., a shared preamble or a dependency interface). This is a **PDD directive**, not just XML.
|
|
19
20
|
4. **Regenerate, Don't Patch:** If the code is wrong, fix it using `pdd fix`. This updates the system's memory so the next `pdd generate` is grounded in the correct solution.
|
|
20
21
|
5. **Verify:** Run the generated code/tests.
|
|
21
22
|
|
|
22
23
|
*Tip: Treat your prompt like source code. It is the single source of truth.*
|
|
23
24
|
|
|
25
|
+
*For the conceptual foundation of why this works, see [The Mold Paradigm](prompt-driven-development-doctrine.md#the-mold-paradigm) in the doctrine.*
|
|
26
|
+
|
|
24
27
|
---
|
|
25
28
|
|
|
26
29
|
## Glossary
|
|
27
30
|
|
|
28
31
|
- **Context Engineering:** The art of curating exactly what information (code, docs, examples) fits into the LLM's limited "working memory" (context window) to get the best result.
|
|
29
32
|
- **Shared Preamble:** A standard text file (e.g., `project_preamble.prompt`) included in every prompt to enforce common rules like coding style, forbidden libraries, and formatting.
|
|
30
|
-
- **PDD Directive:** Special tags like
|
|
33
|
+
- **PDD Directive:** Special tags like `<include>` or `<shell>` that the PDD tool processes *before* sending the text to the AI. The AI sees the *result* (the file content), not the tag.
|
|
31
34
|
- **Source of Truth:** The definitive record. In PDD, the **Prompt** is the source of truth; the code is just a temporary artifact generated from it.
|
|
32
35
|
- **Grounding (Few-Shot History):** The process where the PDD system automatically uses successful past pairs of (Prompt, Code) as "few-shot" examples during generation. This ensures that regenerated code adheres to the established style and logic of the previous version, preventing the model from hallucinating a completely different implementation.
|
|
33
36
|
- **Drift:** When the generated code slowly diverges from the prompt's intent over time, or when manual edits to code make it inconsistent with the prompt.
|
|
@@ -43,6 +46,8 @@ If you are new to Prompt-Driven Development (PDD), follow this recipe:
|
|
|
43
46
|
|
|
44
47
|
Contrast with interactive patching (Claude Code, Cursor): prompts are ephemeral instructions for local diffs. They are great for short, local fixes, but tend to drift from original intent as context is implicit and often lost. In PDD, prompts are versioned, explicit, and designed for batch, reproducible generation.
|
|
45
48
|
|
|
49
|
+
For a deeper exploration of why this paradigm shift matters—and an analogy to manufacturing's wood‑to‑plastic transition—see [The Mold Paradigm](prompt-driven-development-doctrine.md#the-mold-paradigm) in the doctrine.
|
|
50
|
+
|
|
46
51
|
---
|
|
47
52
|
|
|
48
53
|
## The PDD Mental Model
|
|
@@ -74,6 +79,7 @@ Notes:
|
|
|
74
79
|
|
|
75
80
|
---
|
|
76
81
|
|
|
82
|
+
<a name="automated-grounding"></a>
|
|
77
83
|
## Automated Grounding (PDD Cloud)
|
|
78
84
|
|
|
79
85
|
Unlike standard LLM interactions where every request is a blank slate, PDD Cloud uses **Automated Grounding** to prevent "implementation drift."
|
|
@@ -172,17 +178,16 @@ These patterns are used across prompts in this repo:
|
|
|
172
178
|
|
|
173
179
|
- Preamble and role: start with a concise, authoritative description of the task and audience (e.g., “You are an expert Python engineer…”).
|
|
174
180
|
- Includes for context: bring only what the model needs.
|
|
175
|
-
- Single include: `<include>path/to/file
|
|
176
|
-
- Multiple:
|
|
177
|
-
[File not found: path2]
|
|
178
|
-
[File not found: …]`
|
|
181
|
+
- Single include: `<include>path/to/file</include>`. **Note:** This is a PDD directive, not standard XML. The PDD tool replaces this tag with the actual file content *before* the LLM sees it. (Handles both text and images).
|
|
182
|
+
- Multiple: `<include-many>path1, path2, …</include-many>`
|
|
179
183
|
- Grouping: wrap includes in a semantic tag to name the dependency or file they represent, for example:
|
|
180
184
|
```xml
|
|
181
185
|
<render_js>
|
|
182
|
-
|
|
186
|
+
<include>src/render.js</include>
|
|
183
187
|
</render_js>
|
|
184
188
|
```
|
|
185
189
|
- When including larger files inline for clarity, wrap with opening/closing tags named after the file, e.g. `<render.js>…</render.js>`.
|
|
190
|
+
- Note: `<include>`, `<include-many>`, `<shell>`, and `<web>` inside fenced code blocks (``` or ~~~) or inline backticks are treated as literal text.
|
|
186
191
|
- Inputs/outputs: state them explicitly (names, types, shapes). Prompts should define Inputs/Outputs and steps clearly.
|
|
187
192
|
- Steps & Chain of Thought: Outline a short, deterministic plan. For complex logical tasks, explicitly instruct the model to "Analyze the requirements and think step-by-step before writing code." This improves accuracy on difficult reasoning problems.
|
|
188
193
|
- Constraints: specify style, performance targets, security, and error handling.
|
|
@@ -192,360 +197,205 @@ Tip: Prefer small, named sections using XML‑style tags to make context scannab
|
|
|
192
197
|
|
|
193
198
|
### Special XML Tags: pdd, shell, web
|
|
194
199
|
|
|
195
|
-
The PDD preprocessor supports additional XML‑style tags to keep prompts clean, reproducible, and self‑contained. Processing order (per spec) is: `pdd` → `include`/`include-many` → `shell` → `web`. When `recursive=True`, `<shell>` and
|
|
196
|
-
- Purpose: fetch the page (via Firecrawl) and inline the markdown content.
|
|
197
|
-
- Behavior: executes during non‑recursive preprocessing; on failure, inserts a bracketed error note.
|
|
198
|
-
- Example: `[Skip to main content](https://docs.litellm.ai/docs/completion/json_mode#__docusaurus_skipToContent_fallback)
|
|
199
|
-
|
|
200
|
-
On this page
|
|
201
|
-
|
|
202
|
-
## Quick Start [](https://docs.litellm.ai/docs/completion/json_mode\#quick-start "Direct link to Quick Start")
|
|
203
|
-
|
|
204
|
-
- SDK
|
|
205
|
-
- PROXY
|
|
206
|
-
|
|
207
|
-
```python
|
|
208
|
-
from litellm import completion
|
|
209
|
-
import os
|
|
210
|
-
|
|
211
|
-
os.environ["OPENAI_API_KEY"] = ""
|
|
212
|
-
|
|
213
|
-
response = completion(
|
|
214
|
-
model="gpt-4o-mini",
|
|
215
|
-
response_format={ "type": "json_object" },
|
|
216
|
-
messages=[\
|
|
217
|
-
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},\
|
|
218
|
-
{"role": "user", "content": "Who won the world series in 2020?"}\
|
|
219
|
-
]
|
|
220
|
-
)
|
|
221
|
-
print(response.choices[0].message.content)
|
|
222
|
-
```
|
|
223
|
-
|
|
224
|
-
```bash
|
|
225
|
-
curl http://0.0.0.0:4000/v1/chat/completions \
|
|
226
|
-
-H "Content-Type: application/json" \
|
|
227
|
-
-H "Authorization: Bearer $LITELLM_KEY" \
|
|
228
|
-
-d '{
|
|
229
|
-
"model": "gpt-4o-mini",
|
|
230
|
-
"response_format": { "type": "json_object" },
|
|
231
|
-
"messages": [\
|
|
232
|
-
{\
|
|
233
|
-
"role": "system",\
|
|
234
|
-
"content": "You are a helpful assistant designed to output JSON."\
|
|
235
|
-
},\
|
|
236
|
-
{\
|
|
237
|
-
"role": "user",\
|
|
238
|
-
"content": "Who won the world series in 2020?"\
|
|
239
|
-
}\
|
|
240
|
-
]
|
|
241
|
-
}'
|
|
242
|
-
```
|
|
243
|
-
|
|
244
|
-
## Check Model Support [](https://docs.litellm.ai/docs/completion/json_mode\#check-model-support "Direct link to Check Model Support")
|
|
245
|
-
|
|
246
|
-
### 1\. Check if model supports `response_format` [](https://docs.litellm.ai/docs/completion/json_mode\#1-check-if-model-supports-response_format "Direct link to 1-check-if-model-supports-response_format")
|
|
247
|
-
|
|
248
|
-
Call `litellm.get_supported_openai_params` to check if a model/provider supports `response_format`.
|
|
249
|
-
|
|
250
|
-
```python
|
|
251
|
-
from litellm import get_supported_openai_params
|
|
252
|
-
|
|
253
|
-
params = get_supported_openai_params(model="anthropic.claude-3", custom_llm_provider="bedrock")
|
|
254
|
-
|
|
255
|
-
assert "response_format" in params
|
|
256
|
-
```
|
|
257
|
-
|
|
258
|
-
### 2\. Check if model supports `json_schema` [](https://docs.litellm.ai/docs/completion/json_mode\#2-check-if-model-supports-json_schema "Direct link to 2-check-if-model-supports-json_schema")
|
|
259
|
-
|
|
260
|
-
This is used to check if you can pass
|
|
261
|
-
|
|
262
|
-
- `response_format={ "type": "json_schema", "json_schema": … , "strict": true }`
|
|
263
|
-
- `response_format=<Pydantic Model>`
|
|
264
|
-
|
|
265
|
-
```python
|
|
266
|
-
from litellm import supports_response_schema
|
|
267
|
-
|
|
268
|
-
assert supports_response_schema(model="gemini-1.5-pro-preview-0215", custom_llm_provider="bedrock")
|
|
269
|
-
```
|
|
200
|
+
The PDD preprocessor supports additional XML‑style tags to keep prompts clean, reproducible, and self‑contained. Processing order (per spec) is: `pdd` → `include`/`include-many` → `shell` → `web`. When `recursive=True`, `<shell>` and `<web>` are deferred until a non‑recursive pass.
|
|
270
201
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
To use Structured Outputs, simply specify
|
|
276
|
-
|
|
277
|
-
```text
|
|
278
|
-
response_format: { "type": "json_schema", "json_schema": … , "strict": true }
|
|
279
|
-
```
|
|
202
|
+
- `<pdd>…</pdd>`
|
|
203
|
+
- Purpose: human‑only comment. Removed entirely during preprocessing.
|
|
204
|
+
- Use: inline rationale or notes that should not reach the model.
|
|
205
|
+
- Example: `Before step X <pdd>explain why we do this here</pdd>`
|
|
280
206
|
|
|
281
|
-
|
|
207
|
+
- `<shell>…</shell>`
|
|
208
|
+
- Purpose: run a shell command and inline stdout at that position.
|
|
209
|
+
- Behavior: executes during non‑recursive preprocessing; on non‑zero exit, inserts a bracketed error with the exit code instead of failing the pipeline.
|
|
210
|
+
- Example: `<shell>git config --get user.name</shell>`
|
|
282
211
|
|
|
283
|
-
-
|
|
284
|
-
-
|
|
285
|
-
-
|
|
286
|
-
-
|
|
287
|
-
- Vertex AI models (Gemini + Anthropic)
|
|
288
|
-
- Bedrock Models
|
|
289
|
-
- Anthropic API Models
|
|
290
|
-
- Groq Models
|
|
291
|
-
- Ollama Models
|
|
292
|
-
- Databricks Models
|
|
293
|
-
|
|
294
|
-
- SDK
|
|
295
|
-
- PROXY
|
|
296
|
-
|
|
297
|
-
```python
|
|
298
|
-
import os
|
|
299
|
-
from litellm import completion
|
|
300
|
-
from pydantic import BaseModel
|
|
212
|
+
- `<web>URL</web>`
|
|
213
|
+
- Purpose: fetch the page (via Firecrawl) and inline the markdown content.
|
|
214
|
+
- Behavior: executes during non‑recursive preprocessing; on failure, inserts a bracketed error note.
|
|
215
|
+
- Example: `<web>https://docs.litellm.ai/docs/completion/json_mode</web>`
|
|
301
216
|
|
|
302
|
-
|
|
303
|
-
|
|
217
|
+
> ⚠️ **Warning: Non-Deterministic Tags**
|
|
218
|
+
>
|
|
219
|
+
> `<shell>` and `<web>` introduce **non-determinism**:
|
|
220
|
+
> - `<shell>` output varies by environment (different machines, different results)
|
|
221
|
+
> - `<web>` content changes over time (same URL, different content)
|
|
222
|
+
>
|
|
223
|
+
> **Impact:** Same prompt file → different generations on different machines/times
|
|
224
|
+
>
|
|
225
|
+
> **Prefer instead:** Capture output to a static file, then `<include>` that file. This ensures reproducible regeneration.
|
|
304
226
|
|
|
305
|
-
|
|
227
|
+
Use these tags sparingly. When you must use them, prefer stable commands with bounded output (e.g., `head -n 20` in `<shell>`).
|
|
306
228
|
|
|
307
|
-
|
|
308
|
-
name: str
|
|
309
|
-
date: str
|
|
310
|
-
participants: list[str]
|
|
229
|
+
---
|
|
311
230
|
|
|
312
|
-
|
|
313
|
-
events: list[CalendarEvent]
|
|
231
|
+
## Architecture Metadata Tags
|
|
314
232
|
|
|
315
|
-
|
|
316
|
-
model="gpt-4o-2024-08-06",
|
|
317
|
-
messages=messages,
|
|
318
|
-
response_format=EventsList
|
|
319
|
-
)
|
|
233
|
+
PDD prompts can include optional XML metadata tags that sync with `architecture.json`. These tags enable bidirectional sync between prompt files and the architecture visualization, keeping your project's architecture documentation automatically up-to-date.
|
|
320
234
|
|
|
321
|
-
|
|
235
|
+
### Tag Format
|
|
322
236
|
|
|
323
|
-
|
|
324
|
-
```
|
|
237
|
+
Place architecture metadata tags at the **top of your prompt file** (after any `<include>` directives but before the main content):
|
|
325
238
|
|
|
326
|
-
|
|
239
|
+
```xml
|
|
240
|
+
<pdd-reason>Brief description of module's purpose (60-120 chars)</pdd-reason>
|
|
241
|
+
|
|
242
|
+
<pdd-interface>
|
|
243
|
+
{{
|
|
244
|
+
"type": "module",
|
|
245
|
+
"module": {{
|
|
246
|
+
"functions": [
|
|
247
|
+
{{"name": "function_name", "signature": "(...)", "returns": "Type"}}
|
|
248
|
+
]
|
|
249
|
+
}}
|
|
250
|
+
}}
|
|
251
|
+
</pdd-interface>
|
|
327
252
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
- model_name: "gpt-4o"
|
|
331
|
-
litellm_params:
|
|
332
|
-
model: "gpt-4o-2024-08-06"
|
|
253
|
+
<pdd-dependency>dependency_prompt_1.prompt</pdd-dependency>
|
|
254
|
+
<pdd-dependency>dependency_prompt_2.prompt</pdd-dependency>
|
|
333
255
|
```
|
|
334
256
|
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
257
|
+
### Tag Reference
|
|
258
|
+
|
|
259
|
+
**`<pdd-reason>`**
|
|
260
|
+
- **Purpose**: One-line description of why this module exists
|
|
261
|
+
- **Maps to**: `architecture.json["reason"]`
|
|
262
|
+
- **Format**: Single line string (recommended 60-120 characters)
|
|
263
|
+
- **Example**: `<pdd-reason>Provides unified LLM invocation across all PDD operations.</pdd-reason>`
|
|
264
|
+
|
|
265
|
+
**`<pdd-interface>`**
|
|
266
|
+
- **Purpose**: JSON describing the module's public API (functions, commands, pages)
|
|
267
|
+
- **Maps to**: `architecture.json["interface"]`
|
|
268
|
+
- **Format**: Valid JSON matching one of four interface types (see below)
|
|
269
|
+
- **Example**:
|
|
270
|
+
```xml
|
|
271
|
+
<pdd-interface>
|
|
272
|
+
{{
|
|
273
|
+
"type": "module",
|
|
274
|
+
"module": {{
|
|
275
|
+
"functions": [
|
|
276
|
+
{{"name": "llm_invoke", "signature": "(prompt, strength, ...)", "returns": "Dict"}}
|
|
277
|
+
]
|
|
278
|
+
}}
|
|
279
|
+
}}
|
|
280
|
+
</pdd-interface>
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
**`<pdd-dependency>`**
|
|
284
|
+
- **Purpose**: References other prompt files this module depends on
|
|
285
|
+
- **Maps to**: `architecture.json["dependencies"]` array
|
|
286
|
+
- **Format**: Prompt filename (e.g., `llm_invoke_python.prompt`)
|
|
287
|
+
- **Multiple tags**: Use one `<pdd-dependency>` tag per dependency
|
|
288
|
+
- **Example**:
|
|
289
|
+
```xml
|
|
290
|
+
<pdd-dependency>llm_invoke_python.prompt</pdd-dependency>
|
|
291
|
+
<pdd-dependency>path_resolution_python.prompt</pdd-dependency>
|
|
292
|
+
```
|
|
293
|
+
|
|
294
|
+
### Interface Types
|
|
295
|
+
|
|
296
|
+
The `<pdd-interface>` tag supports four interface types, matching the architecture.json schema:
|
|
297
|
+
|
|
298
|
+
**Module Interface** (Python modules with functions):
|
|
299
|
+
```json
|
|
300
|
+
{{
|
|
301
|
+
"type": "module",
|
|
302
|
+
"module": {{
|
|
303
|
+
"functions": [
|
|
304
|
+
{{"name": "func_name", "signature": "(arg1, arg2)", "returns": "Type"}}
|
|
305
|
+
]
|
|
306
|
+
}}
|
|
307
|
+
}}
|
|
339
308
|
```
|
|
340
309
|
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
client = OpenAI(
|
|
352
|
-
api_key="anything", # 👈 PROXY KEY (can be anything, if master_key not set)
|
|
353
|
-
base_url="http://0.0.0.0:4000" # 👈 PROXY BASE URL
|
|
354
|
-
)
|
|
355
|
-
|
|
356
|
-
class Step(BaseModel):
|
|
357
|
-
explanation: str
|
|
358
|
-
output: str
|
|
359
|
-
|
|
360
|
-
class MathReasoning(BaseModel):
|
|
361
|
-
steps: list[Step]
|
|
362
|
-
final_answer: str
|
|
363
|
-
|
|
364
|
-
completion = client.beta.chat.completions.parse(
|
|
365
|
-
model="gpt-4o",
|
|
366
|
-
messages=[\
|
|
367
|
-
{"role": "system", "content": "You are a helpful math tutor. Guide the user through the solution step by step."},\
|
|
368
|
-
{"role": "user", "content": "how can I solve 8x + 7 = -23"}\
|
|
369
|
-
],
|
|
370
|
-
response_format=MathReasoning,
|
|
371
|
-
)
|
|
372
|
-
|
|
373
|
-
math_reasoning = completion.choices[0].message.parsed
|
|
310
|
+
**CLI Interface** (Command-line interfaces):
|
|
311
|
+
```json
|
|
312
|
+
{{
|
|
313
|
+
"type": "cli",
|
|
314
|
+
"cli": {{
|
|
315
|
+
"commands": [
|
|
316
|
+
{{"name": "cmd_name", "description": "What it does"}}
|
|
317
|
+
]
|
|
318
|
+
}}
|
|
319
|
+
}}
|
|
374
320
|
```
|
|
375
321
|
|
|
376
|
-
**
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
"role": "system",\
|
|
387
|
-
"content": "You are a helpful math tutor. Guide the user through the solution step by step."\
|
|
388
|
-
},\
|
|
389
|
-
{\
|
|
390
|
-
"role": "user",\
|
|
391
|
-
"content": "how can I solve 8x + 7 = -23"\
|
|
392
|
-
}\
|
|
393
|
-
],
|
|
394
|
-
"response_format": {
|
|
395
|
-
"type": "json_schema",
|
|
396
|
-
"json_schema": {
|
|
397
|
-
"name": "math_reasoning",
|
|
398
|
-
"schema": {
|
|
399
|
-
"type": "object",
|
|
400
|
-
"properties": {
|
|
401
|
-
"steps": {
|
|
402
|
-
"type": "array",
|
|
403
|
-
"items": {
|
|
404
|
-
"type": "object",
|
|
405
|
-
"properties": {
|
|
406
|
-
"explanation": { "type": "string" },
|
|
407
|
-
"output": { "type": "string" }
|
|
408
|
-
},
|
|
409
|
-
"required": ["explanation", "output"],
|
|
410
|
-
"additionalProperties": false
|
|
411
|
-
}
|
|
412
|
-
},
|
|
413
|
-
"final_answer": { "type": "string" }
|
|
414
|
-
},
|
|
415
|
-
"required": ["steps", "final_answer"],
|
|
416
|
-
"additionalProperties": false
|
|
417
|
-
},
|
|
418
|
-
"strict": true
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
}'
|
|
322
|
+
**Command Interface** (PDD commands):
|
|
323
|
+
```json
|
|
324
|
+
{{
|
|
325
|
+
"type": "command",
|
|
326
|
+
"command": {{
|
|
327
|
+
"commands": [
|
|
328
|
+
{{"name": "cmd_name", "description": "What it does"}}
|
|
329
|
+
]
|
|
330
|
+
}}
|
|
331
|
+
}}
|
|
422
332
|
```
|
|
423
333
|
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
334
|
+
**Frontend Interface** (UI pages):
|
|
335
|
+
```json
|
|
336
|
+
{{
|
|
337
|
+
"type": "frontend",
|
|
338
|
+
"frontend": {{
|
|
339
|
+
"pages": [
|
|
340
|
+
{{"name": "page_name", "route": "/path"}}
|
|
341
|
+
]
|
|
342
|
+
}}
|
|
343
|
+
}}
|
|
430
344
|
```
|
|
431
345
|
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
[**See Code**](https://github.com/BerriAI/litellm/blob/671d8ac496b6229970c7f2a3bdedd6cb84f0746b/litellm/litellm_core_utils/json_validation_rule.py#L4)
|
|
435
|
-
|
|
436
|
-
- SDK
|
|
437
|
-
- PROXY
|
|
438
|
-
|
|
439
|
-
```python
|
|
440
|
-
# !gcloud auth application-default login - run this to add vertex credentials to your env
|
|
441
|
-
import litellm, os
|
|
442
|
-
from litellm import completion
|
|
443
|
-
from pydantic import BaseModel
|
|
346
|
+
### Sync Workflow
|
|
444
347
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
348
|
+
1. **Add/edit tags** in your prompt files using the format above
|
|
349
|
+
2. **Click "Sync from Prompt"** in the PDD Connect Architecture page (or call the API endpoint)
|
|
350
|
+
3. **Tags automatically update** `architecture.json` with your changes
|
|
351
|
+
4. **Architecture visualization** reflects the updated dependencies and interfaces
|
|
449
352
|
|
|
450
|
-
|
|
451
|
-
litellm.set_verbose = True # see the raw request made by litellm
|
|
353
|
+
Prompts are the **source of truth** - tags in prompt files override what's in `architecture.json`. This aligns with PDD's core philosophy that prompts, not code or documentation, are authoritative.
|
|
452
354
|
|
|
453
|
-
|
|
454
|
-
name: str
|
|
455
|
-
date: str
|
|
456
|
-
participants: list[str]
|
|
355
|
+
### Validation
|
|
457
356
|
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
357
|
+
Validation is **lenient**:
|
|
358
|
+
- Missing tags are OK - only fields with tags get updated
|
|
359
|
+
- Malformed XML/JSON is skipped without blocking sync
|
|
360
|
+
- Circular dependencies are detected and prevent invalid updates
|
|
361
|
+
- Missing dependency files generate warnings but don't block sync
|
|
463
362
|
|
|
464
|
-
|
|
465
|
-
```
|
|
363
|
+
### Best Practices
|
|
466
364
|
|
|
467
|
-
|
|
365
|
+
**Keep `<pdd-reason>` concise** (60-120 chars)
|
|
366
|
+
- Good: "Provides unified LLM invocation across all PDD operations."
|
|
367
|
+
- Too long: "This module exists because we needed a way to call different LLM providers through a unified interface that supports both streaming and non-streaming modes while also handling rate limiting and retry logic..."
|
|
468
368
|
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
model: "gemini/gemini-1.5-flash"
|
|
474
|
-
api_key: os.environ/GEMINI_API_KEY
|
|
369
|
+
**Use prompt filenames for dependencies**, not module names
|
|
370
|
+
- Correct: `<pdd-dependency>llm_invoke_python.prompt</pdd-dependency>`
|
|
371
|
+
- Wrong: `<pdd-dependency>pdd.llm_invoke</pdd-dependency>`
|
|
372
|
+
- Wrong: `<pdd-dependency>context/example.py</pdd-dependency>`
|
|
475
373
|
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
374
|
+
**Validate interface JSON before committing**
|
|
375
|
+
- Use a JSON validator to check syntax
|
|
376
|
+
- Ensure `type` field matches one of: `module`, `cli`, `command`, `frontend`
|
|
377
|
+
- Include required nested keys (`functions`, `commands`, or `pages`)
|
|
479
378
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
```
|
|
379
|
+
**Run "Sync All" after bulk prompt updates**
|
|
380
|
+
- If you've edited multiple prompts, sync all at once
|
|
381
|
+
- Review the validation results for circular dependencies
|
|
382
|
+
- Fix any warnings before committing changes
|
|
485
383
|
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
```bash
|
|
489
|
-
curl http://0.0.0.0:4000/v1/chat/completions \
|
|
490
|
-
-H "Content-Type: application/json" \
|
|
491
|
-
-H "Authorization: Bearer $LITELLM_API_KEY" \
|
|
492
|
-
-d '{
|
|
493
|
-
"model": "gemini-1.5-flash",
|
|
494
|
-
"messages": [\
|
|
495
|
-
{"role": "system", "content": "Extract the event information."},\
|
|
496
|
-
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday."},\
|
|
497
|
-
],
|
|
498
|
-
"response_format": {
|
|
499
|
-
"type": "json_schema",
|
|
500
|
-
"json_schema": {
|
|
501
|
-
"name": "math_reasoning",
|
|
502
|
-
"schema": {
|
|
503
|
-
"type": "object",
|
|
504
|
-
"properties": {
|
|
505
|
-
"steps": {
|
|
506
|
-
"type": "array",
|
|
507
|
-
"items": {
|
|
508
|
-
"type": "object",
|
|
509
|
-
"properties": {
|
|
510
|
-
"explanation": { "type": "string" },
|
|
511
|
-
"output": { "type": "string" }
|
|
512
|
-
},
|
|
513
|
-
"required": ["explanation", "output"],
|
|
514
|
-
"additionalProperties": false
|
|
515
|
-
}
|
|
516
|
-
},
|
|
517
|
-
"final_answer": { "type": "string" }
|
|
518
|
-
},
|
|
519
|
-
"required": ["steps", "final_answer"],
|
|
520
|
-
"additionalProperties": false
|
|
521
|
-
},
|
|
522
|
-
"strict": true
|
|
523
|
-
}
|
|
524
|
-
},
|
|
525
|
-
}'
|
|
526
|
-
```
|
|
384
|
+
### Relationship to Other Tags
|
|
527
385
|
|
|
528
|
-
-
|
|
529
|
-
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
- [Pass in 'json\_schema'](https://docs.litellm.ai/docs/completion/json_mode#pass-in-json_schema)
|
|
533
|
-
- [Validate JSON Schema](https://docs.litellm.ai/docs/completion/json_mode#validate-json-schema)
|
|
386
|
+
**`<pdd-dependency>` vs `<include>`**:
|
|
387
|
+
- `<pdd-dependency>`: Declares architectural dependency (updates `architecture.json`)
|
|
388
|
+
- `<include>`: Injects content into prompt for LLM context (does NOT affect architecture)
|
|
389
|
+
- Use both when appropriate - they serve different purposes
|
|
534
390
|
|
|
535
|
-
|
|
536
|
-
|
|
391
|
+
**`<pdd-*>` tags vs `<pdd>` comments**:
|
|
392
|
+
- `<pdd-reason>`, `<pdd-interface>`, `<pdd-dependency>`: Metadata tags (processed by sync tool)
|
|
393
|
+
- `<pdd>...</pdd>`: Human-only comments (removed by preprocessor, never reach LLM)
|
|
394
|
+
- Both are valid PDD directives with different purposes
|
|
537
395
|
|
|
538
|
-
|
|
539
|
-
>
|
|
540
|
-
> `<shell>` and `<web>` introduce **non-determinism**:
|
|
541
|
-
> - `<shell>` output varies by environment (different machines, different results)
|
|
542
|
-
> - `<web>` content changes over time (same URL, different content)
|
|
543
|
-
>
|
|
544
|
-
> **Impact:** Same prompt file → different generations on different machines/times
|
|
545
|
-
>
|
|
546
|
-
> **Prefer instead:** Capture output to a static file, then `[Error processing include: ` that file. This ensures reproducible regeneration.
|
|
396
|
+
### Example: Complete Prompt with Metadata Tags
|
|
547
397
|
|
|
548
|
-
|
|
398
|
+
See `docs/examples/prompt_with_metadata.prompt` for a full example showing all three metadata tags in context.
|
|
549
399
|
|
|
550
400
|
---
|
|
551
401
|
|
|
@@ -553,11 +403,11 @@ Use these tags sparingly. When you must use them, prefer stable commands with bo
|
|
|
553
403
|
|
|
554
404
|
### Shared Preamble for Consistency
|
|
555
405
|
|
|
556
|
-
Use a shared include (e.g., `<include>context/project_preamble.prompt
|
|
406
|
+
Use a shared include (e.g., `<include>context/project_preamble.prompt</include>`) at the top of every prompt. You should create this file in your project's `context/` directory to define your "Constitution": consistent coding style (e.g., indentation, naming conventions), preferred linting rules, and forbidden libraries. This ensures all generated code speaks the same language without cluttering individual prompts.
|
|
557
407
|
|
|
558
408
|
### Automatic Update Propagation via Includes
|
|
559
409
|
|
|
560
|
-
A key benefit of
|
|
410
|
+
A key benefit of `<include>` directives is **automatic propagation**: when the included file changes, all prompts that reference it automatically reflect those changes on the next generation—without editing the prompts themselves.
|
|
561
411
|
|
|
562
412
|
Use this pattern when:
|
|
563
413
|
- **Authoritative documentation exists elsewhere** (e.g., a README that defines environment variables, API contracts, or configuration options). Include it rather than duplicating the content.
|
|
@@ -687,7 +537,7 @@ This simplified example illustrates a minimal functional prompt:
|
|
|
687
537
|
```text
|
|
688
538
|
% You are an expert Python engineer. Your goal is to write a function `get_extension` that returns the file extension for a given language.
|
|
689
539
|
|
|
690
|
-
<include>context/python_preamble.prompt
|
|
540
|
+
<include>context/python_preamble.prompt</include>
|
|
691
541
|
|
|
692
542
|
% Inputs/Outputs
|
|
693
543
|
Input: language (str), like "Python" or "Makefile".
|
|
@@ -706,7 +556,7 @@ This simplified example illustrates a minimal functional prompt:
|
|
|
706
556
|
This style:
|
|
707
557
|
- Declares role and outcome
|
|
708
558
|
- Specifies IO, data sources, and steps
|
|
709
|
-
- Uses an
|
|
559
|
+
- Uses an `<include>` to pull a shared preamble
|
|
710
560
|
|
|
711
561
|
---
|
|
712
562
|
|
|
@@ -809,7 +659,7 @@ Include dependencies explicitly when:
|
|
|
809
659
|
|
|
810
660
|
```xml
|
|
811
661
|
<billing_service>
|
|
812
|
-
<include>context/billing_service_example.py
|
|
662
|
+
<include>context/billing_service_example.py</include>
|
|
813
663
|
</billing_service>
|
|
814
664
|
```
|
|
815
665
|
|
|
@@ -817,7 +667,7 @@ Include dependencies explicitly when:
|
|
|
817
667
|
|
|
818
668
|
If you've successfully generated code that uses a dependency before, grounding often suffices—the usage pattern is already in the cloud database.
|
|
819
669
|
|
|
820
|
-
**Prefer explicit
|
|
670
|
+
**Prefer explicit `<include>` for:** External APIs, critical contracts, cross-team interfaces
|
|
821
671
|
**Rely on grounding for:** Internal modules with established patterns
|
|
822
672
|
|
|
823
673
|
### Token Efficiency
|
|
@@ -932,9 +782,9 @@ Constraints:
|
|
|
932
782
|
- Output a unified diff only
|
|
933
783
|
|
|
934
784
|
Snippet:
|
|
935
|
-
export function parseUserId(input: string) {
|
|
785
|
+
export function parseUserId(input: string) {{
|
|
936
786
|
return input.trim().split(":")[1];
|
|
937
|
-
}
|
|
787
|
+
}}
|
|
938
788
|
```
|
|
939
789
|
|
|
940
790
|
PDD‑style prompt (source of truth):
|
|
@@ -956,7 +806,7 @@ PDD‑style prompt (source of truth):
|
|
|
956
806
|
|
|
957
807
|
% Dependencies
|
|
958
808
|
<logger>
|
|
959
|
-
<include>context/logger_example.ts
|
|
809
|
+
<include>context/logger_example.ts</include>
|
|
960
810
|
</logger>
|
|
961
811
|
|
|
962
812
|
% Instructions
|
|
@@ -1007,7 +857,7 @@ Key differences:
|
|
|
1007
857
|
|
|
1008
858
|
## Naming & Conventions (This Repo)
|
|
1009
859
|
|
|
1010
|
-
- One prompt per module/file, named like `${BASENAME}_${LanguageOrFramework}.prompt` (see templates under `pdd/pdd/templates`).
|
|
860
|
+
- One prompt per module/file, named like `${{BASENAME}}_${{LanguageOrFramework}}.prompt` (see templates under `pdd/pdd/templates`).
|
|
1011
861
|
- Follow codebase conventions from README.md for Python and TypeScript style.
|
|
1012
862
|
- Use curated examples under `context/` to encode interfaces and behaviors.
|
|
1013
863
|
|
|
@@ -1017,6 +867,7 @@ Key differences:
|
|
|
1017
867
|
|
|
1018
868
|
Think of prompts as your programming language. Keep them concise, explicit, and modular. Regenerate instead of patching, verify behavior with accumulating tests, and continuously back‑propagate implementation learnings into your prompts. That discipline is what converts maintenance from an endless patchwork into a compounding system of leverage.
|
|
1019
869
|
|
|
870
|
+
</pdd_prompting_guide>
|
|
1020
871
|
|
|
1021
872
|
% Files to Read
|
|
1022
873
|
|
|
@@ -1026,8 +877,11 @@ Think of prompts as your programming language. Keep them concise, explicit, and
|
|
|
1026
877
|
2. **Code file (modified)**: {code_path}
|
|
1027
878
|
- This contains the user's modifications that the prompt needs to capture
|
|
1028
879
|
|
|
1029
|
-
3. **Test files
|
|
1030
|
-
-
|
|
880
|
+
3. **Test files**: {test_paths}
|
|
881
|
+
- If tests are listed above, read ALL of them
|
|
882
|
+
- If "No tests were found", search for test files yourself:
|
|
883
|
+
- Look for files named `test_<module>*.py` in sibling `tests/` directory (../tests/)
|
|
884
|
+
- Check common locations: `tests/`, `__tests__/`, `test/`
|
|
1031
885
|
- Behaviors verified by tests DON'T need to be explicitly specified in the prompt
|
|
1032
886
|
- Tests accumulate across numbered files (test_module.py, test_module_1.py, etc.)
|
|
1033
887
|
|
|
@@ -1066,6 +920,6 @@ Think of prompts as your programming language. Keep them concise, explicit, and
|
|
|
1066
920
|
|
|
1067
921
|
Write the updated prompt directly to: {prompt_path}
|
|
1068
922
|
|
|
1069
|
-
If you create new shared include files, write them to the `context/` directory.
|
|
923
|
+
If you create new shared include files, write them to the `context/` or equivalent directory. It may be different for different projects so you might need to explore the project to find the correct directory.
|
|
1070
924
|
|
|
1071
925
|
If the prompt is already optimal, you may leave it unchanged.
|