pdd-cli 0.0.55__py3-none-any.whl → 0.0.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

@@ -209,7 +209,7 @@ def get_extension(language: str) -> str:
209
209
  return extensions.get(language.lower(), language.lower())
210
210
 
211
211
 
212
- def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts") -> Dict[str, Path]:
212
+ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts", context_override: Optional[str] = None) -> Dict[str, Path]:
213
213
  """Returns a dictionary mapping file types to their expected Path objects."""
214
214
  import logging
215
215
  logger = logging.getLogger(__name__)
@@ -233,7 +233,8 @@ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts
233
233
  force=True,
234
234
  quiet=True,
235
235
  command="sync",
236
- command_options={"basename": basename, "language": language}
236
+ command_options={"basename": basename, "language": language},
237
+ context_override=context_override
237
238
  )
238
239
 
239
240
  import logging
@@ -299,7 +300,8 @@ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts
299
300
  force=True, # Use force=True to avoid interactive prompts during sync
300
301
  quiet=True,
301
302
  command="sync", # Use sync command to get more tolerant path handling
302
- command_options={"basename": basename, "language": language}
303
+ command_options={"basename": basename, "language": language},
304
+ context_override=context_override
303
305
  )
304
306
 
305
307
  # For sync command, output_file_paths contains the configured paths
@@ -332,7 +334,8 @@ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts
332
334
  # Get example path using example command
333
335
  _, _, example_output_paths, _ = construct_paths(
334
336
  input_file_paths={"prompt_file": prompt_path, "code_file": code_path},
335
- force=True, quiet=True, command="example", command_options={}
337
+ force=True, quiet=True, command="example", command_options={},
338
+ context_override=context_override
336
339
  )
337
340
  example_path = Path(example_output_paths.get('output', f"{basename}_example.{get_extension(language)}"))
338
341
 
@@ -340,7 +343,8 @@ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts
340
343
  try:
341
344
  _, _, test_output_paths, _ = construct_paths(
342
345
  input_file_paths={"prompt_file": prompt_path, "code_file": code_path},
343
- force=True, quiet=True, command="test", command_options={}
346
+ force=True, quiet=True, command="test", command_options={},
347
+ context_override=context_override
344
348
  )
345
349
  test_path = Path(test_output_paths.get('output', f"test_{basename}.{get_extension(language)}"))
346
350
  except FileNotFoundError:
@@ -365,14 +369,16 @@ def get_pdd_file_paths(basename: str, language: str, prompts_dir: str = "prompts
365
369
  # Get configured directories by using construct_paths with just the prompt file
366
370
  _, _, example_output_paths, _ = construct_paths(
367
371
  input_file_paths={"prompt_file": prompt_path},
368
- force=True, quiet=True, command="example", command_options={}
372
+ force=True, quiet=True, command="example", command_options={},
373
+ context_override=context_override
369
374
  )
370
375
  example_path = Path(example_output_paths.get('output', f"{basename}_example.{get_extension(language)}"))
371
376
 
372
377
  try:
373
378
  _, _, test_output_paths, _ = construct_paths(
374
379
  input_file_paths={"prompt_file": prompt_path},
375
- force=True, quiet=True, command="test", command_options={}
380
+ force=True, quiet=True, command="test", command_options={},
381
+ context_override=context_override
376
382
  )
377
383
  test_path = Path(test_output_paths.get('output', f"test_{basename}.{get_extension(language)}"))
378
384
  except Exception:
@@ -768,7 +774,7 @@ def _check_example_success_history(basename: str, language: str) -> bool:
768
774
  return False
769
775
 
770
776
 
771
- def sync_determine_operation(basename: str, language: str, target_coverage: float, budget: float = 10.0, log_mode: bool = False, prompts_dir: str = "prompts", skip_tests: bool = False, skip_verify: bool = False) -> SyncDecision:
777
+ def sync_determine_operation(basename: str, language: str, target_coverage: float, budget: float = 10.0, log_mode: bool = False, prompts_dir: str = "prompts", skip_tests: bool = False, skip_verify: bool = False, context_override: Optional[str] = None) -> SyncDecision:
772
778
  """
773
779
  Core decision-making function for sync operations with skip flag awareness.
774
780
 
@@ -788,14 +794,14 @@ def sync_determine_operation(basename: str, language: str, target_coverage: floa
788
794
 
789
795
  if log_mode:
790
796
  # Skip locking for read-only analysis
791
- return _perform_sync_analysis(basename, language, target_coverage, budget, prompts_dir, skip_tests, skip_verify)
797
+ return _perform_sync_analysis(basename, language, target_coverage, budget, prompts_dir, skip_tests, skip_verify, context_override)
792
798
  else:
793
799
  # Normal exclusive locking for actual operations
794
800
  with SyncLock(basename, language) as lock:
795
- return _perform_sync_analysis(basename, language, target_coverage, budget, prompts_dir, skip_tests, skip_verify)
801
+ return _perform_sync_analysis(basename, language, target_coverage, budget, prompts_dir, skip_tests, skip_verify, context_override)
796
802
 
797
803
 
798
- def _perform_sync_analysis(basename: str, language: str, target_coverage: float, budget: float, prompts_dir: str = "prompts", skip_tests: bool = False, skip_verify: bool = False) -> SyncDecision:
804
+ def _perform_sync_analysis(basename: str, language: str, target_coverage: float, budget: float, prompts_dir: str = "prompts", skip_tests: bool = False, skip_verify: bool = False, context_override: Optional[str] = None) -> SyncDecision:
799
805
  """
800
806
  Perform the sync state analysis without locking concerns.
801
807
 
@@ -846,7 +852,7 @@ def _perform_sync_analysis(basename: str, language: str, target_coverage: float,
846
852
  # Check test failures (after crash verification check)
847
853
  if run_report.tests_failed > 0:
848
854
  # First check if the test file actually exists
849
- pdd_files = get_pdd_file_paths(basename, language, prompts_dir)
855
+ pdd_files = get_pdd_file_paths(basename, language, prompts_dir, context_override=context_override)
850
856
  test_file = pdd_files.get('test')
851
857
 
852
858
  # Only suggest 'fix' if test file exists
@@ -945,7 +951,7 @@ def _perform_sync_analysis(basename: str, language: str, target_coverage: float,
945
951
  )
946
952
 
947
953
  # 2. Analyze File State
948
- paths = get_pdd_file_paths(basename, language, prompts_dir)
954
+ paths = get_pdd_file_paths(basename, language, prompts_dir, context_override=context_override)
949
955
  current_hashes = calculate_current_hashes(paths)
950
956
 
951
957
  # 3. Implement the Decision Tree
@@ -1264,7 +1270,14 @@ def _perform_sync_analysis(basename: str, language: str, target_coverage: float,
1264
1270
  )
1265
1271
 
1266
1272
 
1267
- def analyze_conflict_with_llm(basename: str, language: str, fingerprint: Fingerprint, changed_files: List[str], prompts_dir: str = "prompts") -> SyncDecision:
1273
+ def analyze_conflict_with_llm(
1274
+ basename: str,
1275
+ language: str,
1276
+ fingerprint: Fingerprint,
1277
+ changed_files: List[str],
1278
+ prompts_dir: str = "prompts",
1279
+ context_override: Optional[str] = None,
1280
+ ) -> SyncDecision:
1268
1281
  """
1269
1282
  Resolve complex sync conflicts using an LLM.
1270
1283
 
@@ -1297,7 +1310,7 @@ def analyze_conflict_with_llm(basename: str, language: str, fingerprint: Fingerp
1297
1310
  )
1298
1311
 
1299
1312
  # 2. Gather file paths and diffs
1300
- paths = get_pdd_file_paths(basename, language, prompts_dir)
1313
+ paths = get_pdd_file_paths(basename, language, prompts_dir, context_override=context_override)
1301
1314
 
1302
1315
  # Generate diffs for changed files
1303
1316
  diffs = {}
pdd/sync_main.py CHANGED
@@ -192,6 +192,7 @@ def sync_main(
192
192
  log=True,
193
193
  verbose=verbose,
194
194
  quiet=quiet,
195
+ context_override=context_override,
195
196
  )
196
197
  return {}, 0.0, ""
197
198
 
@@ -280,6 +281,7 @@ def sync_main(
280
281
  review_examples=review_examples,
281
282
  local=local,
282
283
  context_config=resolved_config,
284
+ context_override=context_override,
283
285
  )
284
286
 
285
287
  lang_cost = sync_result.get("total_cost", 0.0)
@@ -330,4 +332,4 @@ def sync_main(
330
332
  aggregated_results["total_cost"] = total_cost
331
333
  aggregated_results["primary_model"] = primary_model
332
334
 
333
- return aggregated_results, total_cost, primary_model
335
+ return aggregated_results, total_cost, primary_model
pdd/sync_orchestration.py CHANGED
@@ -341,6 +341,7 @@ def sync_orchestration(
341
341
  review_examples: bool = False,
342
342
  local: bool = False,
343
343
  context_config: Optional[Dict[str, str]] = None,
344
+ context_override: Optional[str] = None,
344
345
  ) -> Dict[str, Any]:
345
346
  """
346
347
  Orchestrates the complete PDD sync workflow with parallel animation.
@@ -358,7 +359,7 @@ def sync_orchestration(
358
359
 
359
360
  # --- Initialize State and Paths ---
360
361
  try:
361
- pdd_files = get_pdd_file_paths(basename, language, prompts_dir)
362
+ pdd_files = get_pdd_file_paths(basename, language, prompts_dir, context_override=context_override)
362
363
  # Debug: Print the paths we got
363
364
  print(f"DEBUG: get_pdd_file_paths returned:")
364
365
  print(f" test: {pdd_files.get('test', 'N/A')}")
@@ -457,7 +458,7 @@ def sync_orchestration(
457
458
  "percentage": (budget_remaining / budget) * 100
458
459
  })
459
460
 
460
- decision = sync_determine_operation(basename, language, target_coverage, budget_remaining, False, prompts_dir, skip_tests, skip_verify)
461
+ decision = sync_determine_operation(basename, language, target_coverage, budget_remaining, False, prompts_dir, skip_tests, skip_verify, context_override)
461
462
  operation = decision.operation
462
463
 
463
464
  # Create log entry with decision info
@@ -0,0 +1,264 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ import shutil
5
+ from collections.abc import Iterable as IterableABC
6
+ from dataclasses import dataclass, field
7
+ from pathlib import Path
8
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
9
+
10
+ from importlib.resources import as_file
11
+
12
+ try:
13
+ from importlib.resources import files as pkg_files
14
+ except ImportError: # pragma: no cover
15
+ # Fallback for Python < 3.11 if needed.
16
+ from importlib_resources import files as pkg_files # type: ignore[attr-defined]
17
+
18
+
19
+ _FRONT_MATTER_PATTERN = re.compile(r"^---\s*\r?\n(.*?)\r?\n---\s*\r?\n?", re.DOTALL)
20
+ _SOURCE_PRIORITY = {"packaged": 0, "project": 1}
21
+
22
+
23
+ @dataclass
24
+ class TemplateMeta:
25
+ name: str
26
+ path: Path
27
+ description: str = ""
28
+ version: str = ""
29
+ tags: List[str] = field(default_factory=list)
30
+ language: str = ""
31
+ output: str = ""
32
+ variables: Dict[str, Any] = field(default_factory=dict)
33
+ usage: Dict[str, Any] = field(default_factory=dict)
34
+ discover: Dict[str, Any] = field(default_factory=dict)
35
+ output_schema: Dict[str, Any] = field(default_factory=dict)
36
+ notes: str = ""
37
+ source: str = "packaged"
38
+
39
+ @property
40
+ def alias(self) -> str:
41
+ return self.path.stem
42
+
43
+
44
+ def _safe_load_yaml(text: str) -> Optional[Dict[str, Any]]:
45
+ try:
46
+ import yaml # type: ignore
47
+ except ImportError as exc: # pragma: no cover - PyYAML is an install requirement
48
+ raise RuntimeError("PyYAML is required to parse template front matter") from exc
49
+
50
+ try:
51
+ data = yaml.safe_load(text) or {}
52
+ except Exception:
53
+ return None
54
+ if not isinstance(data, dict):
55
+ return None
56
+ return data
57
+
58
+
59
+ def _parse_front_matter(text: str) -> Tuple[Optional[Dict[str, Any]], str]:
60
+ match = _FRONT_MATTER_PATTERN.match(text)
61
+ if not match:
62
+ return None, text
63
+ meta_text = match.group(1)
64
+ rest = text[match.end():]
65
+ meta = _safe_load_yaml(meta_text)
66
+ return meta, rest
67
+
68
+
69
+ def _normalize_tags(tags: Any) -> List[str]:
70
+ if tags is None:
71
+ return []
72
+ if isinstance(tags, str):
73
+ return [tags.lower()]
74
+ if isinstance(tags, IterableABC):
75
+ normalized: List[str] = []
76
+ for tag in tags:
77
+ if tag is None:
78
+ continue
79
+ normalized.append(str(tag).lower())
80
+ return normalized
81
+ return []
82
+
83
+
84
+ def _ensure_mapping(value: Any) -> Dict[str, Any]:
85
+ if isinstance(value, dict):
86
+ return dict(value)
87
+ return {}
88
+
89
+
90
+ def _normalize_meta(raw: Dict[str, Any], path: Path, source: str) -> TemplateMeta:
91
+ name = str(raw.get("name") or path.stem)
92
+ description = str(raw.get("description") or "")
93
+ version = str(raw.get("version") or "")
94
+ language = str(raw.get("language") or "")
95
+ output = str(raw.get("output") or "")
96
+ tags = _normalize_tags(raw.get("tags"))
97
+ notes_raw = raw.get("notes")
98
+ notes = "" if notes_raw is None else str(notes_raw)
99
+
100
+ return TemplateMeta(
101
+ name=name,
102
+ path=path.resolve(),
103
+ description=description,
104
+ version=version,
105
+ tags=tags,
106
+ language=language,
107
+ output=output,
108
+ variables=_ensure_mapping(raw.get("variables")),
109
+ usage=_ensure_mapping(raw.get("usage")),
110
+ discover=_ensure_mapping(raw.get("discover")),
111
+ output_schema=_ensure_mapping(raw.get("output_schema")),
112
+ notes=notes,
113
+ source=source,
114
+ )
115
+
116
+
117
+ def _iter_project_templates() -> Iterable[Path]:
118
+ root = Path.cwd() / "prompts"
119
+ if not root.exists():
120
+ return ()
121
+ return (path for path in root.rglob("*.prompt") if path.is_file())
122
+
123
+
124
+ def _iter_packaged_templates() -> Iterable[Path]:
125
+ try:
126
+ pkg_root = pkg_files("pdd").joinpath("templates")
127
+ except ModuleNotFoundError: # pragma: no cover - package missing
128
+ return ()
129
+ if not pkg_root.is_dir():
130
+ return ()
131
+
132
+ resolved: List[Path] = []
133
+ for entry in pkg_root.rglob("*.prompt"): # type: ignore[attr-defined]
134
+ try:
135
+ with as_file(entry) as concrete:
136
+ path = Path(concrete)
137
+ if path.is_file():
138
+ resolved.append(path)
139
+ except FileNotFoundError:
140
+ continue
141
+ return resolved
142
+
143
+
144
+ def _load_meta_from_path(path: Path, source: str) -> Optional[TemplateMeta]:
145
+ try:
146
+ text = path.read_text(encoding="utf-8")
147
+ except OSError:
148
+ return None
149
+ front_matter, _ = _parse_front_matter(text)
150
+ if not front_matter:
151
+ return None
152
+ return _normalize_meta(front_matter, path, source)
153
+
154
+
155
+ def _index_templates() -> Tuple[Dict[str, TemplateMeta], Dict[str, TemplateMeta]]:
156
+ by_name: Dict[str, TemplateMeta] = {}
157
+ priority: Dict[str, int] = {}
158
+
159
+ def register(meta: TemplateMeta) -> None:
160
+ current_priority = priority.get(meta.name, -1)
161
+ new_priority = _SOURCE_PRIORITY.get(meta.source, 0)
162
+ if new_priority < current_priority:
163
+ return
164
+ by_name[meta.name] = meta
165
+ priority[meta.name] = new_priority
166
+
167
+ for path in _iter_packaged_templates():
168
+ meta = _load_meta_from_path(Path(path), "packaged")
169
+ if meta:
170
+ register(meta)
171
+
172
+ for path in _iter_project_templates():
173
+ meta = _load_meta_from_path(Path(path), "project")
174
+ if meta:
175
+ register(meta)
176
+
177
+ lookup = dict(by_name)
178
+ lookup_priority = priority.copy()
179
+
180
+ for meta in by_name.values():
181
+ alias = meta.alias
182
+ alias_priority = lookup_priority.get(alias, -1)
183
+ meta_priority = priority.get(meta.name, 0)
184
+ if alias_priority <= meta_priority:
185
+ lookup[alias] = meta
186
+ lookup_priority[alias] = meta_priority
187
+
188
+ return by_name, lookup
189
+
190
+
191
+ def _meta_to_payload(meta: TemplateMeta) -> Dict[str, Any]:
192
+ return {
193
+ "name": meta.name,
194
+ "path": str(meta.path),
195
+ "description": meta.description,
196
+ "version": meta.version,
197
+ "tags": list(meta.tags),
198
+ "language": meta.language,
199
+ "output": meta.output,
200
+ "variables": dict(meta.variables),
201
+ "usage": dict(meta.usage),
202
+ "discover": dict(meta.discover),
203
+ "output_schema": dict(meta.output_schema),
204
+ "notes": meta.notes,
205
+ }
206
+
207
+
208
+ def list_templates(filter_tag: Optional[str] = None) -> List[Dict[str, Any]]:
209
+ by_name, _ = _index_templates()
210
+ normalized_tag = filter_tag.lower() if filter_tag else None
211
+ items: List[Dict[str, Any]] = []
212
+ for meta in by_name.values():
213
+ if normalized_tag and normalized_tag not in meta.tags:
214
+ continue
215
+ items.append({
216
+ "name": meta.name,
217
+ "path": str(meta.path),
218
+ "description": meta.description,
219
+ "version": meta.version,
220
+ "tags": list(meta.tags),
221
+ })
222
+ items.sort(key=lambda item: item["name"].lower())
223
+ return items
224
+
225
+
226
+ def load_template(name: str) -> Dict[str, Any]:
227
+ _, lookup = _index_templates()
228
+ meta = lookup.get(name)
229
+ if not meta:
230
+ raise FileNotFoundError(f"Template '{name}' not found.")
231
+ return _meta_to_payload(meta)
232
+
233
+
234
+ def show_template(name: str) -> Dict[str, Any]:
235
+ meta = load_template(name)
236
+ summary = {
237
+ "name": meta["name"],
238
+ "path": meta["path"],
239
+ "description": meta.get("description", ""),
240
+ "version": meta.get("version", ""),
241
+ "tags": meta.get("tags", []),
242
+ "language": meta.get("language", ""),
243
+ "output": meta.get("output", ""),
244
+ }
245
+ return {
246
+ "summary": summary,
247
+ "variables": meta.get("variables", {}),
248
+ "usage": meta.get("usage", {}),
249
+ "discover": meta.get("discover", {}),
250
+ "output_schema": meta.get("output_schema", {}),
251
+ "notes": meta.get("notes", ""),
252
+ }
253
+
254
+
255
+ def copy_template(name: str, dest_dir: str) -> str:
256
+ meta = load_template(name)
257
+ src = Path(meta["path"])
258
+ if not src.exists():
259
+ raise FileNotFoundError(f"Template '{name}' file is missing at {src}")
260
+ dest_root = Path(dest_dir)
261
+ dest_root.mkdir(parents=True, exist_ok=True)
262
+ dest_path = dest_root / src.name
263
+ shutil.copy2(src, dest_path)
264
+ return str(dest_path.resolve())
@@ -0,0 +1,183 @@
1
+ ---
2
+ name: architecture/architecture_json
3
+ description: Unified architecture template for multiple tech stacks
4
+ version: 1.0.0
5
+ tags: [architecture, template, json]
6
+ language: json
7
+ output: architecture.json
8
+ variables:
9
+ APP_NAME:
10
+ required: false
11
+ type: string
12
+ description: Optional app name for context.
13
+ example: Shop
14
+ PRD_FILE:
15
+ required: true
16
+ type: path
17
+ description: Primary product requirements document (PRD) describing scope and goals.
18
+ example_paths: [PRD.md, docs/specs.md, docs/product/prd.md]
19
+ example_content: |
20
+ Title: Order Management MVP
21
+ Goals: Enable customers to create and track orders end-to-end.
22
+ Key Features:
23
+ - Create Order: id, user_id, items[], total, status
24
+ - View Order: details page with status timeline
25
+ - List Orders: filter by status, date, user
26
+ Non-Functional Requirements:
27
+ - P95 latency < 300ms for read endpoints
28
+ - Error rate < 0.1%
29
+ TECH_STACK_FILE:
30
+ required: false
31
+ type: path
32
+ description: Tech stack overview (languages, frameworks, infrastructure, and tools).
33
+ example_paths: [docs/tech_stack.md, docs/architecture/stack.md]
34
+ example_content: |
35
+ Backend: Python (FastAPI), Postgres (SQLAlchemy), PyTest
36
+ Frontend: Next.js (TypeScript), shadcn/ui, Tailwind CSS
37
+ API: REST
38
+ Auth: Firebase Auth (GitHub Device Flow), JWT for API
39
+ Infra: Vercel (frontend), Cloud Run (backend), Cloud SQL (Postgres)
40
+ Observability: OpenTelemetry traces, Cloud Logging
41
+ DOC_FILES:
42
+ required: false
43
+ type: list
44
+ description: Additional documentation files (comma/newline-separated).
45
+ example_paths: [docs/ux.md, docs/components.md]
46
+ example_content: |
47
+ Design overview, patterns and constraints
48
+ INCLUDE_FILES:
49
+ required: false
50
+ type: list
51
+ description: Specific source files to include (comma/newline-separated).
52
+ example_paths: [src/app.py, src/api.py, frontend/app/layout.tsx, frontend/app/page.tsx]
53
+ usage:
54
+ generate:
55
+ - name: Minimal (PRD only)
56
+ command: pdd generate -e PRD_FILE=docs/specs.md --output architecture.json pdd/templates/architecture/architecture_json.prompt
57
+ - name: With tech stack overview
58
+ command: pdd generate -e PRD_FILE=docs/specs.md -e TECH_STACK_FILE=docs/tech_stack.md --output architecture.json pdd/templates/architecture/architecture_json.prompt
59
+
60
+ discover:
61
+ enabled: false
62
+ max_per_pattern: 5
63
+ max_total: 10
64
+
65
+ output_schema:
66
+ type: array
67
+ items:
68
+ type: object
69
+ required: [reason, description, dependencies, priority, filename]
70
+ properties:
71
+ reason: { type: string }
72
+ description: { type: string }
73
+ dependencies: { type: array, items: { type: string } }
74
+ priority: { type: integer, minimum: 1 }
75
+ filename: { type: string }
76
+ tags: { type: array, items: { type: string } }
77
+ interface:
78
+ type: object
79
+ properties:
80
+ type: { enum: [component, page, module, api, graphql, cli, job, message, config] }
81
+ component: { type: object }
82
+ page:
83
+ type: object
84
+ properties:
85
+ route: { type: string }
86
+ params: { type: array, items: { type: object } }
87
+ dataSources: { type: array, items: { type: object } }
88
+ layout: { type: object }
89
+ module: { type: object }
90
+ api: { type: object }
91
+ graphql: { type: object }
92
+ cli: { type: object }
93
+ job: { type: object }
94
+ message: { type: object }
95
+ config: { type: object }
96
+ ---
97
+
98
+ Purpose: Produce an architecture JSON that enumerates prompt files to generate code files for the project.
99
+
100
+ <PRD_FILE><include>${PRD_FILE}</include></PRD_FILE>
101
+ <TECH_STACK_FILE><include>${TECH_STACK_FILE}</include></TECH_STACK_FILE>
102
+ <DOC_FILES><include-many>${DOC_FILES}</include-many></DOC_FILES>
103
+
104
+ <INCLUDE_FILES><include-many>${INCLUDE_FILES}</include-many></INCLUDE_FILES>
105
+
106
+ INSTRUCTIONS:
107
+ - Use only the facts from the included documents and files. Do not invent technologies or filenames.
108
+ - If TECH_STACK_FILE is absent, infer a reasonable tech stack from the PRD and included files; state key assumptions within each item's description.
109
+ - Output a single top-level JSON array of items. Each item must include:
110
+ - reason, description, dependencies (filenames), priority (1 = highest), filename, optional tags.
111
+ - interface: include only the applicable sub-object (component, page, module, api, graphql, cli, job, message, or config). Omit all non-applicable sub-objects entirely.
112
+ - Valid JSON only. No comments or trailing commas.
113
+
114
+ OUTPUT FORMAT (authoritative):
115
+ ```json
116
+ {
117
+ "type": "array",
118
+ "items": {
119
+ "type": "object",
120
+ "required": ["reason", "description", "dependencies", "priority", "filename"],
121
+ "properties": {
122
+ "reason": {"type": "string"},
123
+ "description": {"type": "string"},
124
+ "dependencies": {"type": "array", "items": {"type": "string"}},
125
+ "priority": {"type": "integer", "minimum": 1},
126
+ "filename": {"type": "string"},
127
+ "tags": {"type": "array", "items": {"type": "string"}},
128
+ "interface": {
129
+ "type": "object",
130
+ "properties": {
131
+ "type": {"enum": ["component", "page", "module", "api", "graphql", "cli", "job", "message", "config"]},
132
+ "component": {"type": "object"},
133
+ "page": {"type": "object", "properties": {"route": {"type": "string"}}},
134
+ "module": {"type": "object"},
135
+ "api": {"type": "object"},
136
+ "graphql": {"type": "object"},
137
+ "cli": {"type": "object"},
138
+ "job": {"type": "object"},
139
+ "message": {"type": "object"},
140
+ "config": {"type": "object"}
141
+ }
142
+ }
143
+ }
144
+ }
145
+ }
146
+ ```
147
+
148
+ INTERFACE TYPES (emit only applicable):
149
+ - page: route (string), params? (array), dataSources? (array), layout? (object)
150
+ - component: props (array of {name, type, required?}), emits? (array), context? (array)
151
+ - module: functions (array of {name, signature, returns?, errors?, sideEffects?})
152
+ - api: endpoints (array of {method, path, auth?, requestSchema?, responseSchema?, errors?})
153
+ - graphql: sdl? (string) or operations {queries?[], mutations?[], subscriptions?[]}
154
+ - cli: commands (array of {name, args?[], flags?[], exitCodes?[]}), io? {stdin?, stdout?}
155
+ - job: trigger {schedule? | event?}, inputs? (array), outputs? (array), retryPolicy? (string)
156
+ - message: topics (array of {name, direction: "publish"|"subscribe", schema?, qos?})
157
+ - config: keys (array of {name, type, default?, required?, source: "env"|"file"|"secret"})
158
+
159
+ FILENAME CONVENTIONS:
160
+ - The "filename" field is the prompt filename to generate (not the code file). Use PDD convention: <base>_<LangOrFramework>.prompt where <LangOrFramework> matches the tech stack.
161
+ - Examples (adapt to your stack):
162
+ - Next.js (TypeScript React): page_TypeScriptReact.prompt -> generates page.tsx; layout_TypeScriptReact.prompt -> layout.tsx
163
+ - Python backend: api_Python.prompt -> api.py; orders_Python.prompt -> orders.py
164
+ - Choose descriptive <base> names (e.g., orders_page, orders_api) and keep names consistent across dependencies.
165
+
166
+ DEPENDENCY RULES:
167
+ - The "dependencies" array must list other items by their prompt filenames (the "filename" values), not code filenames.
168
+ - Do not reference files that are not part of this array unless they were explicitly provided via INCLUDE_FILES/DOC_FILES.
169
+ - Avoid cycles; if a cycle is necessary, justify it in the description and clarify initialization order.
170
+
171
+ PRIORITY AND ORDERING:
172
+ - Use unique integer priorities starting at 1 without gaps (1,2,3,...).
173
+ - Sort the top-level array by ascending priority.
174
+
175
+ TAGS (optional):
176
+ - Use short, lower-case tags for slicing (e.g., ["frontend","nextjs"], ["backend","api"], ["config"]).
177
+
178
+ CONTENT GUIDANCE:
179
+ - Descriptions must be architectural and actionable: responsibilities, interfaces, error handling, cross-cutting concerns.
180
+ - For API items, outline endpoints (method, path, auth) and high-level request/response shapes.
181
+ - For page/component items, include the route, key props, and data sources.
182
+
183
+ DO NOT INCLUDE the schema or these conventions in the output; return only the JSON array.