pdd-cli 0.0.56__py3-none-any.whl → 0.0.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/preprocess.py CHANGED
@@ -48,7 +48,9 @@ def process_backtick_includes(text: str, recursive: bool) -> str:
48
48
  return f"```{content}```"
49
49
  except FileNotFoundError:
50
50
  console.print(f"[bold red]Warning:[/bold red] File not found: {file_path}")
51
- return match.group(0)
51
+ # First pass (recursive=True): leave the tag so a later env expansion can resolve it
52
+ # Second pass (recursive=False): replace with a visible placeholder
53
+ return match.group(0) if recursive else f"```[File not found: {file_path}]```"
52
54
  except Exception as e:
53
55
  console.print(f"[bold red]Error processing include:[/bold red] {str(e)}")
54
56
  return f"```[Error processing include: {file_path}]```"
@@ -62,9 +64,9 @@ def process_backtick_includes(text: str, recursive: bool) -> str:
62
64
  def process_xml_tags(text: str, recursive: bool) -> str:
63
65
  text = process_pdd_tags(text)
64
66
  text = process_include_tags(text, recursive)
65
-
66
- text = process_shell_tags(text)
67
- text = process_web_tags(text)
67
+ text = process_include_many_tags(text, recursive)
68
+ text = process_shell_tags(text, recursive)
69
+ text = process_web_tags(text, recursive)
68
70
  return text
69
71
 
70
72
  def process_include_tags(text: str, recursive: bool) -> str:
@@ -81,7 +83,9 @@ def process_include_tags(text: str, recursive: bool) -> str:
81
83
  return content
82
84
  except FileNotFoundError:
83
85
  console.print(f"[bold red]Warning:[/bold red] File not found: {file_path}")
84
- return f"[File not found: {file_path}]"
86
+ # First pass (recursive=True): leave the tag so a later env expansion can resolve it
87
+ # Second pass (recursive=False): replace with a visible placeholder
88
+ return match.group(0) if recursive else f"[File not found: {file_path}]"
85
89
  except Exception as e:
86
90
  console.print(f"[bold red]Error processing include:[/bold red] {str(e)}")
87
91
  return f"[Error processing include: {file_path}]"
@@ -101,10 +105,13 @@ def process_pdd_tags(text: str) -> str:
101
105
  return "This is a test "
102
106
  return processed
103
107
 
104
- def process_shell_tags(text: str) -> str:
108
+ def process_shell_tags(text: str, recursive: bool) -> str:
105
109
  pattern = r'<shell>(.*?)</shell>'
106
110
  def replace_shell(match):
107
111
  command = match.group(1).strip()
112
+ if recursive:
113
+ # Defer execution until after env var expansion
114
+ return match.group(0)
108
115
  console.print(f"Executing shell command: [cyan]{escape(command)}[/cyan]")
109
116
  try:
110
117
  result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
@@ -118,10 +125,13 @@ def process_shell_tags(text: str) -> str:
118
125
  return f"[Shell execution error: {str(e)}]"
119
126
  return re.sub(pattern, replace_shell, text, flags=re.DOTALL)
120
127
 
121
- def process_web_tags(text: str) -> str:
128
+ def process_web_tags(text: str, recursive: bool) -> str:
122
129
  pattern = r'<web>(.*?)</web>'
123
130
  def replace_web(match):
124
131
  url = match.group(1).strip()
132
+ if recursive:
133
+ # Defer network operations until after env var expansion
134
+ return match.group(0)
125
135
  console.print(f"Scraping web content from: [cyan]{url}[/cyan]")
126
136
  try:
127
137
  try:
@@ -144,6 +154,34 @@ def process_web_tags(text: str) -> str:
144
154
  return f"[Web scraping error: {str(e)}]"
145
155
  return re.sub(pattern, replace_web, text, flags=re.DOTALL)
146
156
 
157
+ def process_include_many_tags(text: str, recursive: bool) -> str:
158
+ """Process <include-many> blocks whose inner content is a comma- or newline-separated
159
+ list of file paths (typically provided via variables after env expansion)."""
160
+ pattern = r'<include-many>(.*?)</include-many>'
161
+ def replace_many(match):
162
+ inner = match.group(1)
163
+ if recursive:
164
+ # Wait for env expansion to materialize the list
165
+ return match.group(0)
166
+ # Split by newlines or commas
167
+ raw_items = [s.strip() for part in inner.split('\n') for s in part.split(',')]
168
+ paths = [p for p in raw_items if p]
169
+ contents: list[str] = []
170
+ for p in paths:
171
+ try:
172
+ full_path = get_file_path(p)
173
+ console.print(f"Including (many): [cyan]{full_path}[/cyan]")
174
+ with open(full_path, 'r', encoding='utf-8') as fh:
175
+ contents.append(fh.read())
176
+ except FileNotFoundError:
177
+ console.print(f"[bold red]Warning:[/bold red] File not found: {p}")
178
+ contents.append(f"[File not found: {p}]")
179
+ except Exception as e:
180
+ console.print(f"[bold red]Error processing include-many:[/bold red] {str(e)}")
181
+ contents.append(f"[Error processing include: {p}]")
182
+ return "\n".join(contents)
183
+ return re.sub(pattern, replace_many, text, flags=re.DOTALL)
184
+
147
185
  def double_curly(text: str, exclude_keys: Optional[List[str]] = None) -> str:
148
186
  if exclude_keys is None:
149
187
  exclude_keys = []
@@ -1,11 +1,17 @@
1
- % You are an expert Software Engineer. Your goal is to extract the closest matching substring described by the prompt's output to be outputed in JSON format.
2
-
3
- % Here is the llm_output to parse: <llm_output>{llm_output}</llm_output>
4
-
5
- % When extracting the closest matching substring from llm_output, consider and correct the following for the extracted code:
6
- - Should be a substring of prompt_file.
7
- - Should be a a substring that closely matches code_str in content.
8
-
9
- % Output a JSON object with the following keys:
10
- - 'explanation': String explanation of why this prompt_line matches the code_str and explain any errors detected in the code.
11
- - 'prompt_line': String containing the closest matching verbatim substring of the prompt_file that matches code_str in content. This is not the line number.
1
+ % You are an extremely literal parser. The LLM output below follows this structure:
2
+ % <analysis> ... </analysis>
3
+ % <verbatim_prompt_line>
4
+ % <<EXACT SUBSTRING FROM PROMPT_FILE>>
5
+ % </verbatim_prompt_line>
6
+ %
7
+ % Task
8
+ % • Extract the text between <verbatim_prompt_line> and </verbatim_prompt_line> (if present).
9
+ % Output ONLY JSON with the keys:
10
+ % - "prompt_line": the exact substring between the tags. Do not alter whitespace or characters except for JSON escaping.
11
+ % - "explanation": short confirmation (<=120 characters) that the substring was copied verbatim, or describe why extraction failed.
12
+ % • If the tags are missing or empty, set "prompt_line" to "" and explain the issue.
13
+ % • Do not wrap the JSON in Markdown. No commentary, no additional keys.
14
+ %
15
+ <llm_output>
16
+ {llm_output}
17
+ </llm_output>
@@ -1,30 +1,33 @@
1
- % Imagine you're a an expert Python Software Engineer. Your goal is to find the part of the .prompt file. It will take in three arguments, the text of the .prompt file, the text of the code file, and the line that the debugger is on in the code file. Your task is to find the equivalent line in the .prompt file that matches with the line in the code file.
2
-
3
- % Here are the inputs and outputs of the prompt:
4
- Input:
5
- `code_file` (str) - A string that contains the text of the code file.
6
- `code_str` (str) - A substring of code_file that represents the line that the debugger is on in the code_file.
7
- `prompt_file` (str) - A string that contains the text of the .prompt file.
8
- Output:
9
- `prompt_line` (str) - An string that represents the equivalent line in the .prompt file that matches with the code_str line in the code file.
10
-
11
- % Here is the code_file to reference:
12
-
1
+ % You are a highly accurate Python Software Engineer. Your job is to locate the exact line (or smallest excerpt) in the prompt file that produced the current line in the generated code.
2
+
3
+ % Inputs
4
+ code_file (str) : full contents of the generated code file
5
+ code_str (str) : the single line from the code file currently under inspection
6
+ prompt_file (str) : full contents of the originating prompt file
7
+
8
+ % Rules
9
+ 1. Identify the minimal substring in prompt_file whose wording most directly corresponds to code_str. Copy it VERBATIM.
10
+ 2. Do not paraphrase, summarize, or reformat; the substring must appear exactly in prompt_file.
11
+ 3. If multiple lines apply, choose the most specific line or snippet (prefer the shortest exact match).
12
+ 4. Provide a short explanation of why the substring matches code_str.
13
+
14
+ % Output format (MUST follow exactly; no additional text)
15
+ <analysis>
16
+ Explain your reasoning here in plain text (no JSON). Reference the file sections you compared.
17
+ </analysis>
18
+ <verbatim_prompt_line>
19
+ <<PASTE THE EXACT SUBSTRING FROM prompt_file HERE>>
20
+ </verbatim_prompt_line>
21
+
22
+ % Reference materials
13
23
  <code_file>
14
- {CODE_FILE}
24
+ {CODE_FILE}
15
25
  </code_file>
16
26
 
17
- % Here is the code_str to reference:
18
-
19
27
  <code_str>
20
- {CODE_STR}
28
+ {CODE_STR}
21
29
  </code_str>
22
30
 
23
- % Here is the prompt_file to reference:
24
-
25
31
  <prompt_file>
26
- {PROMPT_FILE}
32
+ {PROMPT_FILE}
27
33
  </prompt_file>
28
-
29
- % To generate the prompt_line, find a substring of prompt_file that matches code_str, which is a substring of code_file.
30
-
@@ -0,0 +1,264 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ import shutil
5
+ from collections.abc import Iterable as IterableABC
6
+ from dataclasses import dataclass, field
7
+ from pathlib import Path
8
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
9
+
10
+ from importlib.resources import as_file
11
+
12
+ try:
13
+ from importlib.resources import files as pkg_files
14
+ except ImportError: # pragma: no cover
15
+ # Fallback for Python < 3.11 if needed.
16
+ from importlib_resources import files as pkg_files # type: ignore[attr-defined]
17
+
18
+
19
+ _FRONT_MATTER_PATTERN = re.compile(r"^---\s*\r?\n(.*?)\r?\n---\s*\r?\n?", re.DOTALL)
20
+ _SOURCE_PRIORITY = {"packaged": 0, "project": 1}
21
+
22
+
23
+ @dataclass
24
+ class TemplateMeta:
25
+ name: str
26
+ path: Path
27
+ description: str = ""
28
+ version: str = ""
29
+ tags: List[str] = field(default_factory=list)
30
+ language: str = ""
31
+ output: str = ""
32
+ variables: Dict[str, Any] = field(default_factory=dict)
33
+ usage: Dict[str, Any] = field(default_factory=dict)
34
+ discover: Dict[str, Any] = field(default_factory=dict)
35
+ output_schema: Dict[str, Any] = field(default_factory=dict)
36
+ notes: str = ""
37
+ source: str = "packaged"
38
+
39
+ @property
40
+ def alias(self) -> str:
41
+ return self.path.stem
42
+
43
+
44
+ def _safe_load_yaml(text: str) -> Optional[Dict[str, Any]]:
45
+ try:
46
+ import yaml # type: ignore
47
+ except ImportError as exc: # pragma: no cover - PyYAML is an install requirement
48
+ raise RuntimeError("PyYAML is required to parse template front matter") from exc
49
+
50
+ try:
51
+ data = yaml.safe_load(text) or {}
52
+ except Exception:
53
+ return None
54
+ if not isinstance(data, dict):
55
+ return None
56
+ return data
57
+
58
+
59
+ def _parse_front_matter(text: str) -> Tuple[Optional[Dict[str, Any]], str]:
60
+ match = _FRONT_MATTER_PATTERN.match(text)
61
+ if not match:
62
+ return None, text
63
+ meta_text = match.group(1)
64
+ rest = text[match.end():]
65
+ meta = _safe_load_yaml(meta_text)
66
+ return meta, rest
67
+
68
+
69
+ def _normalize_tags(tags: Any) -> List[str]:
70
+ if tags is None:
71
+ return []
72
+ if isinstance(tags, str):
73
+ return [tags.lower()]
74
+ if isinstance(tags, IterableABC):
75
+ normalized: List[str] = []
76
+ for tag in tags:
77
+ if tag is None:
78
+ continue
79
+ normalized.append(str(tag).lower())
80
+ return normalized
81
+ return []
82
+
83
+
84
+ def _ensure_mapping(value: Any) -> Dict[str, Any]:
85
+ if isinstance(value, dict):
86
+ return dict(value)
87
+ return {}
88
+
89
+
90
+ def _normalize_meta(raw: Dict[str, Any], path: Path, source: str) -> TemplateMeta:
91
+ name = str(raw.get("name") or path.stem)
92
+ description = str(raw.get("description") or "")
93
+ version = str(raw.get("version") or "")
94
+ language = str(raw.get("language") or "")
95
+ output = str(raw.get("output") or "")
96
+ tags = _normalize_tags(raw.get("tags"))
97
+ notes_raw = raw.get("notes")
98
+ notes = "" if notes_raw is None else str(notes_raw)
99
+
100
+ return TemplateMeta(
101
+ name=name,
102
+ path=path.resolve(),
103
+ description=description,
104
+ version=version,
105
+ tags=tags,
106
+ language=language,
107
+ output=output,
108
+ variables=_ensure_mapping(raw.get("variables")),
109
+ usage=_ensure_mapping(raw.get("usage")),
110
+ discover=_ensure_mapping(raw.get("discover")),
111
+ output_schema=_ensure_mapping(raw.get("output_schema")),
112
+ notes=notes,
113
+ source=source,
114
+ )
115
+
116
+
117
+ def _iter_project_templates() -> Iterable[Path]:
118
+ root = Path.cwd() / "prompts"
119
+ if not root.exists():
120
+ return ()
121
+ return (path for path in root.rglob("*.prompt") if path.is_file())
122
+
123
+
124
+ def _iter_packaged_templates() -> Iterable[Path]:
125
+ try:
126
+ pkg_root = pkg_files("pdd").joinpath("templates")
127
+ except ModuleNotFoundError: # pragma: no cover - package missing
128
+ return ()
129
+ if not pkg_root.is_dir():
130
+ return ()
131
+
132
+ resolved: List[Path] = []
133
+ for entry in pkg_root.rglob("*.prompt"): # type: ignore[attr-defined]
134
+ try:
135
+ with as_file(entry) as concrete:
136
+ path = Path(concrete)
137
+ if path.is_file():
138
+ resolved.append(path)
139
+ except FileNotFoundError:
140
+ continue
141
+ return resolved
142
+
143
+
144
+ def _load_meta_from_path(path: Path, source: str) -> Optional[TemplateMeta]:
145
+ try:
146
+ text = path.read_text(encoding="utf-8")
147
+ except OSError:
148
+ return None
149
+ front_matter, _ = _parse_front_matter(text)
150
+ if not front_matter:
151
+ return None
152
+ return _normalize_meta(front_matter, path, source)
153
+
154
+
155
+ def _index_templates() -> Tuple[Dict[str, TemplateMeta], Dict[str, TemplateMeta]]:
156
+ by_name: Dict[str, TemplateMeta] = {}
157
+ priority: Dict[str, int] = {}
158
+
159
+ def register(meta: TemplateMeta) -> None:
160
+ current_priority = priority.get(meta.name, -1)
161
+ new_priority = _SOURCE_PRIORITY.get(meta.source, 0)
162
+ if new_priority < current_priority:
163
+ return
164
+ by_name[meta.name] = meta
165
+ priority[meta.name] = new_priority
166
+
167
+ for path in _iter_packaged_templates():
168
+ meta = _load_meta_from_path(Path(path), "packaged")
169
+ if meta:
170
+ register(meta)
171
+
172
+ for path in _iter_project_templates():
173
+ meta = _load_meta_from_path(Path(path), "project")
174
+ if meta:
175
+ register(meta)
176
+
177
+ lookup = dict(by_name)
178
+ lookup_priority = priority.copy()
179
+
180
+ for meta in by_name.values():
181
+ alias = meta.alias
182
+ alias_priority = lookup_priority.get(alias, -1)
183
+ meta_priority = priority.get(meta.name, 0)
184
+ if alias_priority <= meta_priority:
185
+ lookup[alias] = meta
186
+ lookup_priority[alias] = meta_priority
187
+
188
+ return by_name, lookup
189
+
190
+
191
+ def _meta_to_payload(meta: TemplateMeta) -> Dict[str, Any]:
192
+ return {
193
+ "name": meta.name,
194
+ "path": str(meta.path),
195
+ "description": meta.description,
196
+ "version": meta.version,
197
+ "tags": list(meta.tags),
198
+ "language": meta.language,
199
+ "output": meta.output,
200
+ "variables": dict(meta.variables),
201
+ "usage": dict(meta.usage),
202
+ "discover": dict(meta.discover),
203
+ "output_schema": dict(meta.output_schema),
204
+ "notes": meta.notes,
205
+ }
206
+
207
+
208
+ def list_templates(filter_tag: Optional[str] = None) -> List[Dict[str, Any]]:
209
+ by_name, _ = _index_templates()
210
+ normalized_tag = filter_tag.lower() if filter_tag else None
211
+ items: List[Dict[str, Any]] = []
212
+ for meta in by_name.values():
213
+ if normalized_tag and normalized_tag not in meta.tags:
214
+ continue
215
+ items.append({
216
+ "name": meta.name,
217
+ "path": str(meta.path),
218
+ "description": meta.description,
219
+ "version": meta.version,
220
+ "tags": list(meta.tags),
221
+ })
222
+ items.sort(key=lambda item: item["name"].lower())
223
+ return items
224
+
225
+
226
+ def load_template(name: str) -> Dict[str, Any]:
227
+ _, lookup = _index_templates()
228
+ meta = lookup.get(name)
229
+ if not meta:
230
+ raise FileNotFoundError(f"Template '{name}' not found.")
231
+ return _meta_to_payload(meta)
232
+
233
+
234
+ def show_template(name: str) -> Dict[str, Any]:
235
+ meta = load_template(name)
236
+ summary = {
237
+ "name": meta["name"],
238
+ "path": meta["path"],
239
+ "description": meta.get("description", ""),
240
+ "version": meta.get("version", ""),
241
+ "tags": meta.get("tags", []),
242
+ "language": meta.get("language", ""),
243
+ "output": meta.get("output", ""),
244
+ }
245
+ return {
246
+ "summary": summary,
247
+ "variables": meta.get("variables", {}),
248
+ "usage": meta.get("usage", {}),
249
+ "discover": meta.get("discover", {}),
250
+ "output_schema": meta.get("output_schema", {}),
251
+ "notes": meta.get("notes", ""),
252
+ }
253
+
254
+
255
+ def copy_template(name: str, dest_dir: str) -> str:
256
+ meta = load_template(name)
257
+ src = Path(meta["path"])
258
+ if not src.exists():
259
+ raise FileNotFoundError(f"Template '{name}' file is missing at {src}")
260
+ dest_root = Path(dest_dir)
261
+ dest_root.mkdir(parents=True, exist_ok=True)
262
+ dest_path = dest_root / src.name
263
+ shutil.copy2(src, dest_path)
264
+ return str(dest_path.resolve())
@@ -0,0 +1,183 @@
1
+ ---
2
+ name: architecture/architecture_json
3
+ description: Unified architecture template for multiple tech stacks
4
+ version: 1.0.0
5
+ tags: [architecture, template, json]
6
+ language: json
7
+ output: architecture.json
8
+ variables:
9
+ APP_NAME:
10
+ required: false
11
+ type: string
12
+ description: Optional app name for context.
13
+ example: Shop
14
+ PRD_FILE:
15
+ required: true
16
+ type: path
17
+ description: Primary product requirements document (PRD) describing scope and goals.
18
+ example_paths: [PRD.md, docs/specs.md, docs/product/prd.md]
19
+ example_content: |
20
+ Title: Order Management MVP
21
+ Goals: Enable customers to create and track orders end-to-end.
22
+ Key Features:
23
+ - Create Order: id, user_id, items[], total, status
24
+ - View Order: details page with status timeline
25
+ - List Orders: filter by status, date, user
26
+ Non-Functional Requirements:
27
+ - P95 latency < 300ms for read endpoints
28
+ - Error rate < 0.1%
29
+ TECH_STACK_FILE:
30
+ required: false
31
+ type: path
32
+ description: Tech stack overview (languages, frameworks, infrastructure, and tools).
33
+ example_paths: [docs/tech_stack.md, docs/architecture/stack.md]
34
+ example_content: |
35
+ Backend: Python (FastAPI), Postgres (SQLAlchemy), PyTest
36
+ Frontend: Next.js (TypeScript), shadcn/ui, Tailwind CSS
37
+ API: REST
38
+ Auth: Firebase Auth (GitHub Device Flow), JWT for API
39
+ Infra: Vercel (frontend), Cloud Run (backend), Cloud SQL (Postgres)
40
+ Observability: OpenTelemetry traces, Cloud Logging
41
+ DOC_FILES:
42
+ required: false
43
+ type: list
44
+ description: Additional documentation files (comma/newline-separated).
45
+ example_paths: [docs/ux.md, docs/components.md]
46
+ example_content: |
47
+ Design overview, patterns and constraints
48
+ INCLUDE_FILES:
49
+ required: false
50
+ type: list
51
+ description: Specific source files to include (comma/newline-separated).
52
+ example_paths: [src/app.py, src/api.py, frontend/app/layout.tsx, frontend/app/page.tsx]
53
+ usage:
54
+ generate:
55
+ - name: Minimal (PRD only)
56
+ command: pdd generate -e PRD_FILE=docs/specs.md --output architecture.json pdd/templates/architecture/architecture_json.prompt
57
+ - name: With tech stack overview
58
+ command: pdd generate -e PRD_FILE=docs/specs.md -e TECH_STACK_FILE=docs/tech_stack.md --output architecture.json pdd/templates/architecture/architecture_json.prompt
59
+
60
+ discover:
61
+ enabled: false
62
+ max_per_pattern: 5
63
+ max_total: 10
64
+
65
+ output_schema:
66
+ type: array
67
+ items:
68
+ type: object
69
+ required: [reason, description, dependencies, priority, filename]
70
+ properties:
71
+ reason: { type: string }
72
+ description: { type: string }
73
+ dependencies: { type: array, items: { type: string } }
74
+ priority: { type: integer, minimum: 1 }
75
+ filename: { type: string }
76
+ tags: { type: array, items: { type: string } }
77
+ interface:
78
+ type: object
79
+ properties:
80
+ type: { enum: [component, page, module, api, graphql, cli, job, message, config] }
81
+ component: { type: object }
82
+ page:
83
+ type: object
84
+ properties:
85
+ route: { type: string }
86
+ params: { type: array, items: { type: object } }
87
+ dataSources: { type: array, items: { type: object } }
88
+ layout: { type: object }
89
+ module: { type: object }
90
+ api: { type: object }
91
+ graphql: { type: object }
92
+ cli: { type: object }
93
+ job: { type: object }
94
+ message: { type: object }
95
+ config: { type: object }
96
+ ---
97
+
98
+ Purpose: Produce an architecture JSON that enumerates prompt files to generate code files for the project.
99
+
100
+ <PRD_FILE><include>${PRD_FILE}</include></PRD_FILE>
101
+ <TECH_STACK_FILE><include>${TECH_STACK_FILE}</include></TECH_STACK_FILE>
102
+ <DOC_FILES><include-many>${DOC_FILES}</include-many></DOC_FILES>
103
+
104
+ <INCLUDE_FILES><include-many>${INCLUDE_FILES}</include-many></INCLUDE_FILES>
105
+
106
+ INSTRUCTIONS:
107
+ - Use only the facts from the included documents and files. Do not invent technologies or filenames.
108
+ - If TECH_STACK_FILE is absent, infer a reasonable tech stack from the PRD and included files; state key assumptions within each item's description.
109
+ - Output a single top-level JSON array of items. Each item must include:
110
+ - reason, description, dependencies (filenames), priority (1 = highest), filename, optional tags.
111
+ - interface: include only the applicable sub-object (component, page, module, api, graphql, cli, job, message, or config). Omit all non-applicable sub-objects entirely.
112
+ - Valid JSON only. No comments or trailing commas.
113
+
114
+ OUTPUT FORMAT (authoritative):
115
+ ```json
116
+ {
117
+ "type": "array",
118
+ "items": {
119
+ "type": "object",
120
+ "required": ["reason", "description", "dependencies", "priority", "filename"],
121
+ "properties": {
122
+ "reason": {"type": "string"},
123
+ "description": {"type": "string"},
124
+ "dependencies": {"type": "array", "items": {"type": "string"}},
125
+ "priority": {"type": "integer", "minimum": 1},
126
+ "filename": {"type": "string"},
127
+ "tags": {"type": "array", "items": {"type": "string"}},
128
+ "interface": {
129
+ "type": "object",
130
+ "properties": {
131
+ "type": {"enum": ["component", "page", "module", "api", "graphql", "cli", "job", "message", "config"]},
132
+ "component": {"type": "object"},
133
+ "page": {"type": "object", "properties": {"route": {"type": "string"}}},
134
+ "module": {"type": "object"},
135
+ "api": {"type": "object"},
136
+ "graphql": {"type": "object"},
137
+ "cli": {"type": "object"},
138
+ "job": {"type": "object"},
139
+ "message": {"type": "object"},
140
+ "config": {"type": "object"}
141
+ }
142
+ }
143
+ }
144
+ }
145
+ }
146
+ ```
147
+
148
+ INTERFACE TYPES (emit only applicable):
149
+ - page: route (string), params? (array), dataSources? (array), layout? (object)
150
+ - component: props (array of {name, type, required?}), emits? (array), context? (array)
151
+ - module: functions (array of {name, signature, returns?, errors?, sideEffects?})
152
+ - api: endpoints (array of {method, path, auth?, requestSchema?, responseSchema?, errors?})
153
+ - graphql: sdl? (string) or operations {queries?[], mutations?[], subscriptions?[]}
154
+ - cli: commands (array of {name, args?[], flags?[], exitCodes?[]}), io? {stdin?, stdout?}
155
+ - job: trigger {schedule? | event?}, inputs? (array), outputs? (array), retryPolicy? (string)
156
+ - message: topics (array of {name, direction: "publish"|"subscribe", schema?, qos?})
157
+ - config: keys (array of {name, type, default?, required?, source: "env"|"file"|"secret"})
158
+
159
+ FILENAME CONVENTIONS:
160
+ - The "filename" field is the prompt filename to generate (not the code file). Use PDD convention: <base>_<LangOrFramework>.prompt where <LangOrFramework> matches the tech stack.
161
+ - Examples (adapt to your stack):
162
+ - Next.js (TypeScript React): page_TypeScriptReact.prompt -> generates page.tsx; layout_TypeScriptReact.prompt -> layout.tsx
163
+ - Python backend: api_Python.prompt -> api.py; orders_Python.prompt -> orders.py
164
+ - Choose descriptive <base> names (e.g., orders_page, orders_api) and keep names consistent across dependencies.
165
+
166
+ DEPENDENCY RULES:
167
+ - The "dependencies" array must list other items by their prompt filenames (the "filename" values), not code filenames.
168
+ - Do not reference files that are not part of this array unless they were explicitly provided via INCLUDE_FILES/DOC_FILES.
169
+ - Avoid cycles; if a cycle is necessary, justify it in the description and clarify initialization order.
170
+
171
+ PRIORITY AND ORDERING:
172
+ - Use unique integer priorities starting at 1 without gaps (1,2,3,...).
173
+ - Sort the top-level array by ascending priority.
174
+
175
+ TAGS (optional):
176
+ - Use short, lower-case tags for slicing (e.g., ["frontend","nextjs"], ["backend","api"], ["config"]).
177
+
178
+ CONTENT GUIDANCE:
179
+ - Descriptions must be architectural and actionable: responsibilities, interfaces, error handling, cross-cutting concerns.
180
+ - For API items, outline endpoints (method, path, auth) and high-level request/response shapes.
181
+ - For page/component items, include the route, key props, and data sources.
182
+
183
+ DO NOT INCLUDE the schema or these conventions in the output; return only the JSON array.