codd-dev 0.2.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
codd/implementer.py ADDED
@@ -0,0 +1,846 @@
1
+ """CoDD implementer — design-to-code generation from implementation plans."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import deque
6
+ from dataclasses import dataclass
7
+ import json
8
+ from pathlib import Path, PurePosixPath
9
+ import re
10
+ from typing import Any
11
+
12
+ import codd.generator as generator_module
13
+ from codd.generator import DependencyDocument, _load_project_config, _normalize_conventions
14
+ from codd.scanner import _extract_frontmatter, build_document_node_path_map
15
+
16
+
17
+ DEFAULT_IMPLEMENT_NODE_ID = "plan:implementation-plan"
18
+ FILE_BLOCK_RE = re.compile(r"^=== FILE: (?P<path>.+?) ===\s*$", re.MULTILINE)
19
+ SPRINT_HEADING_RE = re.compile(
20
+ r"^####\s+Sprint\s+(?P<number>\d+)(?:((?P<window>[^)]+)))?(?:\s*:\s*(?P<title>.+))?\s*$",
21
+ re.MULTILINE,
22
+ )
23
+ SECTION_HEADING_RE = re.compile(r"^##\s+\d+\.\s+(?P<title>.+?)\s*$", re.MULTILINE)
24
+ EXPORT_TYPE_RE = re.compile(
25
+ r"^\s*export\s+(?:declare\s+)?(?:type|interface|enum)\s+(?P<name>[A-Za-z_][A-Za-z0-9_]*)",
26
+ re.MULTILINE,
27
+ )
28
+ EXPORT_CLASS_RE = re.compile(
29
+ r"^\s*export\s+(?:default\s+)?class\s+(?P<name>[A-Za-z_][A-Za-z0-9_]*)",
30
+ re.MULTILINE,
31
+ )
32
+ EXPORT_FUNCTION_RE = re.compile(
33
+ r"^\s*export\s+(?:default\s+)?(?:async\s+)?function\s+(?P<name>[A-Za-z_][A-Za-z0-9_]*)",
34
+ re.MULTILINE,
35
+ )
36
+ EXPORT_VALUE_RE = re.compile(
37
+ r"^\s*export\s+(?:const|let|var)\s+(?P<name>[A-Za-z_][A-Za-z0-9_]*)",
38
+ re.MULTILINE,
39
+ )
40
+ EXPORT_NAMED_BLOCK_RE = re.compile(
41
+ r"^\s*export\s+(?P<type_prefix>type\s+)?{\s*(?P<body>[^}]+)\s*}(?:\s+from\s+['\"].+['\"])?\s*;?",
42
+ re.MULTILINE,
43
+ )
44
+
45
+
46
+ @dataclass(frozen=True)
47
+ class ImplementationPlan:
48
+ """Implementation plan document and its metadata."""
49
+
50
+ node_id: str
51
+ path: Path
52
+ content: str
53
+ depends_on: list[dict[str, Any]]
54
+ conventions: list[dict[str, Any]]
55
+
56
+
57
+ @dataclass(frozen=True)
58
+ class ImplementationTask:
59
+ """Concrete implementation task for one sprint."""
60
+
61
+ sprint: int
62
+ task_id: str
63
+ title: str
64
+ summary: str
65
+ module_hint: str
66
+ deliverable: str
67
+ sprint_title: str
68
+ sprint_window: str
69
+ output_dir: str
70
+ dependency_node_ids: list[str]
71
+ sprint_context: str
72
+
73
+
74
+ @dataclass(frozen=True)
75
+ class ImplementationResult:
76
+ """Result of generating code for one implementation task."""
77
+
78
+ sprint: int
79
+ task_id: str
80
+ task_title: str
81
+ output_dir: Path
82
+ generated_files: list[Path]
83
+
84
+
85
+ def implement_sprint(
86
+ project_root: Path,
87
+ sprint: int,
88
+ *,
89
+ task: str | None = None,
90
+ ai_command: str | None = None,
91
+ ) -> list[ImplementationResult]:
92
+ """Generate code for one sprint from implementation plan context."""
93
+ project_root = project_root.resolve()
94
+ config = _load_project_config(project_root)
95
+ plan = _load_implementation_plan(project_root, config)
96
+ selected_tasks = _select_tasks(plan, sprint, task)
97
+ if not selected_tasks:
98
+ if task:
99
+ raise ValueError(f"no implementation task matched {task!r} in sprint {sprint}")
100
+ raise ValueError(f"implementation plan does not define sprint {sprint}")
101
+
102
+ resolved_ai_command = generator_module._resolve_ai_command(config, ai_command)
103
+ global_conventions = _normalize_conventions(config.get("conventions", []))
104
+ coding_principles = _load_coding_principles(project_root, config)
105
+ node_paths = build_document_node_path_map(project_root, config)
106
+ detailed_design_node_ids = _select_detailed_design_dependency_node_ids(plan.depends_on, node_paths)
107
+
108
+ results: list[ImplementationResult] = []
109
+ prior_task_outputs: list[dict[str, Any]] = []
110
+ for selected_task in selected_tasks:
111
+ dependency_node_ids = _ordered_unique(selected_task.dependency_node_ids + detailed_design_node_ids)
112
+ dependency_documents, document_conventions = _collect_dependency_documents(
113
+ project_root,
114
+ dependency_node_ids,
115
+ node_paths,
116
+ )
117
+ combined_conventions = _merge_conventions(
118
+ global_conventions,
119
+ plan.conventions,
120
+ document_conventions,
121
+ )
122
+ prompt = _build_implementation_prompt(
123
+ config=config,
124
+ plan=plan,
125
+ task=selected_task,
126
+ dependency_documents=dependency_documents,
127
+ conventions=combined_conventions,
128
+ coding_principles=coding_principles,
129
+ prior_task_outputs=prior_task_outputs,
130
+ )
131
+ raw_output = generator_module._invoke_ai_command(resolved_ai_command, prompt)
132
+ generated_files = _write_generated_files(
133
+ project_root=project_root,
134
+ plan=plan,
135
+ task=selected_task,
136
+ dependency_documents=dependency_documents,
137
+ output_dir=selected_task.output_dir,
138
+ raw_output=raw_output,
139
+ )
140
+ prior_task_outputs.append(
141
+ _summarize_generated_task_output(project_root, selected_task, generated_files)
142
+ )
143
+ results.append(
144
+ ImplementationResult(
145
+ sprint=sprint,
146
+ task_id=selected_task.task_id,
147
+ task_title=selected_task.title,
148
+ output_dir=project_root / selected_task.output_dir,
149
+ generated_files=generated_files,
150
+ )
151
+ )
152
+
153
+ return results
154
+
155
+
156
+ def _load_implementation_plan(project_root: Path, config: dict[str, Any]) -> ImplementationPlan:
157
+ node_paths = build_document_node_path_map(project_root, config)
158
+ rel_path = node_paths.get(DEFAULT_IMPLEMENT_NODE_ID, Path("docs/plan/implementation_plan.md"))
159
+ plan_path = project_root / rel_path
160
+ if not plan_path.exists():
161
+ raise FileNotFoundError(f"implementation plan not found: {rel_path.as_posix()}")
162
+
163
+ codd = _extract_frontmatter(plan_path) or {}
164
+ content = plan_path.read_text(encoding="utf-8")
165
+ return ImplementationPlan(
166
+ node_id=str(codd.get("node_id") or DEFAULT_IMPLEMENT_NODE_ID),
167
+ path=rel_path,
168
+ content=content,
169
+ depends_on=generator_module._normalize_dependencies(codd.get("depends_on", [])),
170
+ conventions=_normalize_conventions(codd.get("conventions", [])),
171
+ )
172
+
173
+
174
+ def _load_coding_principles(project_root: Path, config: dict[str, Any]) -> str | None:
175
+ raw_path = config.get("coding_principles")
176
+ if raw_path is None:
177
+ return None
178
+ if not isinstance(raw_path, str) or not raw_path.strip():
179
+ raise ValueError("coding_principles must be a non-empty project-relative path when configured")
180
+
181
+ principles_path = project_root / raw_path
182
+ if not principles_path.exists():
183
+ raise FileNotFoundError(f"coding_principles file not found: {raw_path}")
184
+
185
+ return principles_path.read_text(encoding="utf-8")
186
+
187
+
188
+ def _select_tasks(plan: ImplementationPlan, sprint: int, task_filter: str | None) -> list[ImplementationTask]:
189
+ tasks = _parse_explicit_sprint_tasks(plan, sprint)
190
+ if not tasks:
191
+ tasks = _infer_sprint_tasks_from_milestones(plan, sprint)
192
+
193
+ if task_filter is None:
194
+ return tasks
195
+
196
+ needle = task_filter.strip().casefold()
197
+ return [
198
+ task
199
+ for task in tasks
200
+ if needle in {
201
+ task.task_id.casefold(),
202
+ _slug_from_text(task.title).casefold(),
203
+ _slug_from_text(task.output_dir).casefold(),
204
+ }
205
+ or needle in task.title.casefold()
206
+ ]
207
+
208
+
209
+ def _parse_explicit_sprint_tasks(plan: ImplementationPlan, sprint: int) -> list[ImplementationTask]:
210
+ matches = list(SPRINT_HEADING_RE.finditer(plan.content))
211
+ if not matches:
212
+ return []
213
+
214
+ for index, match in enumerate(matches):
215
+ if int(match.group("number")) != sprint:
216
+ continue
217
+ section_start = match.end()
218
+ section_end = matches[index + 1].start() if index + 1 < len(matches) else len(plan.content)
219
+ section_text = plan.content[section_start:section_end]
220
+ table_rows = _parse_markdown_table(section_text)
221
+ if not table_rows:
222
+ return []
223
+
224
+ sprint_title = (match.group("title") or f"Sprint {sprint}").strip()
225
+ sprint_window = (match.group("window") or "").strip()
226
+ tasks: list[ImplementationTask] = []
227
+ for row in table_rows:
228
+ if len(row) < 4:
229
+ continue
230
+ task_id = row[0] or f"{sprint}-{len(tasks) + 1}"
231
+ title = row[1] or f"Sprint {sprint} Task {len(tasks) + 1}"
232
+ module_hint = row[2]
233
+ deliverable = row[3]
234
+ slug = _derive_task_slug(title, module_hint, task_id)
235
+ tasks.append(
236
+ ImplementationTask(
237
+ sprint=sprint,
238
+ task_id=task_id,
239
+ title=title,
240
+ summary=title,
241
+ module_hint=module_hint,
242
+ deliverable=deliverable,
243
+ sprint_title=sprint_title,
244
+ sprint_window=sprint_window,
245
+ output_dir=f"src/generated/sprint_{sprint}/{slug}",
246
+ dependency_node_ids=_infer_dependency_node_ids(plan, title, module_hint, deliverable),
247
+ sprint_context=_clean_text_block(section_text),
248
+ )
249
+ )
250
+ return tasks
251
+
252
+ return []
253
+
254
+
255
+ def _infer_sprint_tasks_from_milestones(plan: ImplementationPlan, sprint: int) -> list[ImplementationTask]:
256
+ milestones = _parse_milestone_rows(plan.content)
257
+ if sprint < 1 or sprint > len(milestones):
258
+ return []
259
+
260
+ milestone = milestones[sprint - 1]
261
+ sprint_title = milestone["title"] or f"Sprint {sprint}"
262
+ sprint_window = milestone["period"]
263
+ sprint_context = (
264
+ f"Milestone: {milestone['title']}\n"
265
+ f"Period: {milestone['period']}\n"
266
+ f"Deliverables: {milestone['deliverables']}"
267
+ )
268
+
269
+ if sprint == 1:
270
+ return [
271
+ ImplementationTask(
272
+ sprint=sprint,
273
+ task_id="1-project-initialization",
274
+ title="Project initialization",
275
+ summary="Bootstrap the Next.js/TypeScript application shell and runtime foundations for Sprint 1.",
276
+ module_hint="Application bootstrap, providers, configuration defaults, app shell",
277
+ deliverable=milestone["deliverables"],
278
+ sprint_title=sprint_title,
279
+ sprint_window=sprint_window,
280
+ output_dir="src/generated/sprint_1/project_initialization",
281
+ dependency_node_ids=["design:system-design", "design:ux-design"],
282
+ sprint_context=sprint_context,
283
+ ),
284
+ ImplementationTask(
285
+ sprint=sprint,
286
+ task_id="1-database-foundation",
287
+ title="Database foundation",
288
+ summary="Create Prisma-oriented tenant-aware database foundations for Sprint 1.",
289
+ module_hint="Prisma client helpers, tenant session context, RLS-aware data access scaffolding",
290
+ deliverable=milestone["deliverables"],
291
+ sprint_title=sprint_title,
292
+ sprint_window=sprint_window,
293
+ output_dir="src/generated/sprint_1/database_foundation",
294
+ dependency_node_ids=["design:system-design", "design:database-design"],
295
+ sprint_context=sprint_context,
296
+ ),
297
+ ImplementationTask(
298
+ sprint=sprint,
299
+ task_id="1-authentication",
300
+ title="Authentication foundation",
301
+ summary="Create authentication, session, and RBAC code foundations for Sprint 1.",
302
+ module_hint="NextAuth-compatible configuration, session helpers, role guards",
303
+ deliverable=milestone["deliverables"],
304
+ sprint_title=sprint_title,
305
+ sprint_window=sprint_window,
306
+ output_dir="src/generated/sprint_1/authentication",
307
+ dependency_node_ids=["design:system-design", "design:auth-authorization-design"],
308
+ sprint_context=sprint_context,
309
+ ),
310
+ ImplementationTask(
311
+ sprint=sprint,
312
+ task_id="1-common-middleware",
313
+ title="Common middleware",
314
+ summary="Create shared request context, tenant guard, and audit middleware foundations for Sprint 1.",
315
+ module_hint="Request ID, tenant status checks, audit hooks, role evaluation pipeline",
316
+ deliverable=milestone["deliverables"],
317
+ sprint_title=sprint_title,
318
+ sprint_window=sprint_window,
319
+ output_dir="src/generated/sprint_1/common_middleware",
320
+ dependency_node_ids=[
321
+ "design:system-design",
322
+ "design:database-design",
323
+ "design:auth-authorization-design",
324
+ "design:api-design",
325
+ ],
326
+ sprint_context=sprint_context,
327
+ ),
328
+ ]
329
+
330
+ title_slug = _slug_from_text(sprint_title) or f"sprint_{sprint}"
331
+ summary_chunks = [chunk for chunk in _split_deliverable_chunks(milestone["deliverables"]) if chunk][:4]
332
+ if not summary_chunks:
333
+ summary_chunks = [milestone["deliverables"] or sprint_title]
334
+
335
+ tasks: list[ImplementationTask] = []
336
+ for index, chunk in enumerate(summary_chunks, start=1):
337
+ slug = _derive_task_slug(chunk, "", f"{sprint}-{index}")
338
+ tasks.append(
339
+ ImplementationTask(
340
+ sprint=sprint,
341
+ task_id=f"{sprint}-{index}",
342
+ title=chunk,
343
+ summary=chunk,
344
+ module_hint=f"src/generated/sprint_{sprint}/{slug}",
345
+ deliverable=milestone["deliverables"],
346
+ sprint_title=sprint_title,
347
+ sprint_window=sprint_window,
348
+ output_dir=f"src/generated/sprint_{sprint}/{slug or title_slug}",
349
+ dependency_node_ids=[entry["id"] for entry in plan.depends_on] or ["design:system-design"],
350
+ sprint_context=sprint_context,
351
+ )
352
+ )
353
+ return tasks
354
+
355
+
356
+ def _parse_markdown_table(section_text: str) -> list[list[str]]:
357
+ rows: list[list[str]] = []
358
+ for line in section_text.splitlines():
359
+ stripped = line.strip()
360
+ if not stripped.startswith("|") or stripped.count("|") < 4:
361
+ continue
362
+ cells = [cell.strip() for cell in stripped.strip("|").split("|")]
363
+ if all(re.fullmatch(r"-{3,}", cell.replace(" ", "")) for cell in cells):
364
+ continue
365
+ if cells and cells[0] == "#":
366
+ continue
367
+ rows.append(cells)
368
+ return rows
369
+
370
+
371
+ def _parse_milestone_rows(content: str) -> list[dict[str, str]]:
372
+ match = re.search(
373
+ r"^##\s+3\.\s+Milestones(マイルストーン)\s*$",
374
+ content,
375
+ re.MULTILINE,
376
+ )
377
+ if not match:
378
+ return []
379
+
380
+ remaining = content[match.end():]
381
+ next_heading = SECTION_HEADING_RE.search(remaining)
382
+ section_text = remaining[: next_heading.start()] if next_heading else remaining
383
+ rows = _parse_markdown_table(section_text)
384
+ milestones: list[dict[str, str]] = []
385
+ for row in rows:
386
+ if len(row) < 3:
387
+ continue
388
+ milestones.append(
389
+ {
390
+ "period": row[0],
391
+ "title": row[1],
392
+ "deliverables": row[2],
393
+ }
394
+ )
395
+ return milestones
396
+
397
+
398
+ def _infer_dependency_node_ids(
399
+ plan: ImplementationPlan,
400
+ title: str,
401
+ module_hint: str,
402
+ deliverable: str,
403
+ ) -> list[str]:
404
+ plan_dependencies = [entry["id"] for entry in plan.depends_on]
405
+ keyword_text = " ".join([title, module_hint, deliverable]).casefold()
406
+ selected: list[str] = []
407
+
408
+ dependency_keywords = {
409
+ "design:system-design": ["system", "architecture", "基盤", "bootstrap", "project", "middleware"],
410
+ "design:database-design": ["database", "db", "prisma", "schema", "rls", "sql"],
411
+ "design:api-design": ["api", "endpoint", "request", "middleware", "route"],
412
+ "design:auth-authorization-design": ["auth", "jwt", "rbac", "oauth", "session", "認証", "認可"],
413
+ "design:ux-design": ["ui", "layout", "screen", "component", "ux", "frontend"],
414
+ "design:integration-design": ["integration", "stripe", "line", "sendgrid", "4ms", "bunny"],
415
+ }
416
+
417
+ for node_id in plan_dependencies:
418
+ for keyword in dependency_keywords.get(node_id, []):
419
+ if keyword in keyword_text:
420
+ selected.append(node_id)
421
+ break
422
+
423
+ if "design:system-design" in plan_dependencies and "design:system-design" not in selected:
424
+ selected.insert(0, "design:system-design")
425
+
426
+ return _ordered_unique(selected or plan_dependencies)
427
+
428
+
429
+ def _select_detailed_design_dependency_node_ids(
430
+ dependencies: list[dict[str, Any]],
431
+ node_paths: dict[str, Path],
432
+ ) -> list[str]:
433
+ selected: list[str] = []
434
+ for dependency in dependencies:
435
+ node_id = dependency["id"]
436
+ rel_path = node_paths.get(node_id)
437
+ if rel_path is None:
438
+ continue
439
+ if _is_detailed_design_path(rel_path):
440
+ selected.append(node_id)
441
+ return _ordered_unique(selected)
442
+
443
+
444
+ def _collect_dependency_documents(
445
+ project_root: Path,
446
+ initial_node_ids: list[str],
447
+ node_paths: dict[str, Path],
448
+ ) -> tuple[list[DependencyDocument], list[dict[str, Any]]]:
449
+ documents: list[DependencyDocument] = []
450
+ conventions: list[dict[str, Any]] = []
451
+ queue: deque[str] = deque(node_id for node_id in initial_node_ids if node_id)
452
+ required_node_ids = set(initial_node_ids)
453
+ seen: set[str] = set()
454
+ missing: list[str] = []
455
+
456
+ while queue:
457
+ node_id = queue.popleft()
458
+ if node_id in seen:
459
+ continue
460
+ seen.add(node_id)
461
+
462
+ rel_path = node_paths.get(node_id)
463
+ if rel_path is None:
464
+ if node_id in required_node_ids:
465
+ missing.append(node_id)
466
+ continue
467
+
468
+ doc_path = project_root / rel_path
469
+ if not doc_path.exists():
470
+ if node_id in required_node_ids:
471
+ raise ValueError(
472
+ f"dependency document {node_id!r} maps to {rel_path.as_posix()}, but the file does not exist"
473
+ )
474
+ continue
475
+
476
+ content = doc_path.read_text(encoding="utf-8")
477
+ documents.append(DependencyDocument(node_id=node_id, path=rel_path, content=content))
478
+
479
+ codd = _extract_frontmatter(doc_path) or {}
480
+ conventions.extend(_normalize_conventions(codd.get("conventions", [])))
481
+ for dependency in generator_module._normalize_dependencies(codd.get("depends_on", [])):
482
+ if dependency["id"] not in seen:
483
+ queue.append(dependency["id"])
484
+
485
+ if missing:
486
+ raise ValueError(f"unable to resolve dependency document paths for: {', '.join(sorted(set(missing)))}")
487
+
488
+ documents.sort(key=lambda document: document.path.as_posix())
489
+ return documents, conventions
490
+
491
+
492
+ def _merge_conventions(*groups: list[dict[str, Any]]) -> list[dict[str, Any]]:
493
+ merged: list[dict[str, Any]] = []
494
+ seen: set[str] = set()
495
+ for group in groups:
496
+ for convention in group:
497
+ normalized = {
498
+ "targets": [target for target in convention.get("targets", []) if isinstance(target, str)],
499
+ "reason": str(convention.get("reason") or "").strip(),
500
+ }
501
+ key = json.dumps(normalized, ensure_ascii=False, sort_keys=True)
502
+ if key in seen:
503
+ continue
504
+ seen.add(key)
505
+ merged.append(normalized)
506
+ return merged
507
+
508
+
509
+ def _build_implementation_prompt(
510
+ *,
511
+ config: dict[str, Any],
512
+ plan: ImplementationPlan,
513
+ task: ImplementationTask,
514
+ dependency_documents: list[DependencyDocument],
515
+ conventions: list[dict[str, Any]],
516
+ coding_principles: str | None,
517
+ prior_task_outputs: list[dict[str, Any]] | None = None,
518
+ ) -> str:
519
+ project = config.get("project") or {}
520
+ frameworks = project.get("frameworks") or []
521
+ language = project.get("language") or "typescript"
522
+ framework_text = ", ".join(str(item) for item in frameworks) if frameworks else "(unspecified)"
523
+
524
+ lines = [
525
+ "You are generating implementation code from CoDD design documents.",
526
+ f"Project name: {project.get('name') or '(unknown)'}",
527
+ f"Primary language: {language}",
528
+ f"Framework stack: {framework_text}",
529
+ f"Implementation plan: {plan.path.as_posix()} ({plan.node_id})",
530
+ f"Sprint: {task.sprint}",
531
+ f"Sprint title: {task.sprint_title}",
532
+ f"Sprint window: {task.sprint_window or '(unspecified)'}",
533
+ f"Task ID: {task.task_id}",
534
+ f"Task title: {task.title}",
535
+ f"Task summary: {task.summary}",
536
+ f"Module hint: {task.module_hint}",
537
+ f"Deliverable: {task.deliverable}",
538
+ f"Output directory: {task.output_dir}",
539
+ "",
540
+ "Mandatory instructions:",
541
+ "- Generate concrete production-oriented TypeScript / TSX source files.",
542
+ "- Use Next.js App Router, TypeScript, and Prisma-compatible patterns when relevant.",
543
+ "- Reflect tenant isolation, RLS context propagation, authentication, authorization, and auditability explicitly where the design requires them.",
544
+ "- The tool will prepend traceability comments to each generated file; do not emit separate metadata files.",
545
+ "- Do not emit prose, explanations, Markdown headings, YAML, TODOs, placeholders, or file descriptions outside the required FILE blocks.",
546
+ "- Every generated file path must stay under the output directory shown above.",
547
+ "- If a React component is needed, emit .tsx files. Otherwise prefer .ts files.",
548
+ "- Favor small coherent modules rather than one monolithic file.",
549
+ "- Cross-file imports may use relative imports or '@/generated/...' style aliases, but keep the task internally coherent.",
550
+ "",
551
+ "Required output format (repeat this block for each file and output nothing else):",
552
+ f"=== FILE: {task.output_dir}/<filename>.ts ===",
553
+ "```ts",
554
+ "// code",
555
+ "```",
556
+ "",
557
+ "ABSOLUTE PROHIBITION: Outputting prose, planning notes, TODO markers, or files outside the output directory is a CRITICAL ERROR.",
558
+ "",
559
+ "Sprint context:",
560
+ task.sprint_context,
561
+ ]
562
+
563
+ if coding_principles:
564
+ lines.extend(
565
+ [
566
+ "",
567
+ "Project coding principles (treat these as source-of-truth implementation rules):",
568
+ coding_principles.rstrip(),
569
+ ]
570
+ )
571
+
572
+ if conventions:
573
+ lines.extend(
574
+ [
575
+ "",
576
+ "Non-negotiable conventions:",
577
+ "- These are release-blocking constraints. The code must embody them explicitly.",
578
+ "- If a convention concerns security, RLS, tenant boundaries, or auth, implement a concrete control rather than only comments.",
579
+ ]
580
+ )
581
+ for index, convention in enumerate(conventions, start=1):
582
+ targets = ", ".join(target for target in convention.get("targets", []) if isinstance(target, str))
583
+ reason = convention.get("reason") or "(no reason provided)"
584
+ lines.append(f"{index}. Targets: {targets or '(no explicit targets)'}")
585
+ lines.append(f" Reason: {reason}")
586
+
587
+ if prior_task_outputs:
588
+ lines.extend(
589
+ [
590
+ "",
591
+ "Prior implementations (same sprint, earlier tasks only):",
592
+ "- The following summaries describe code that was already generated earlier in this sprint.",
593
+ "- ABSOLUTE PROHIBITION: Re-implementing the same type definitions, utility functions, classes, guards, middleware, or helpers is a CRITICAL ERROR and a release-blocking violation.",
594
+ "- Reuse these implementations via imports. If a needed symbol already exists below, import it instead of redefining it.",
595
+ ]
596
+ )
597
+ for summary in prior_task_outputs:
598
+ lines.extend(_format_prior_task_summary(summary))
599
+
600
+ lines.extend(
601
+ [
602
+ "",
603
+ "Dependency documents:",
604
+ ]
605
+ )
606
+ for document in dependency_documents:
607
+ lines.extend(
608
+ [
609
+ f"--- BEGIN DEPENDENCY {document.path.as_posix()} ({document.node_id}) ---",
610
+ document.content.rstrip(),
611
+ f"--- END DEPENDENCY {document.path.as_posix()} ---",
612
+ "",
613
+ ]
614
+ )
615
+
616
+ return "\n".join(lines).rstrip() + "\n"
617
+
618
+
619
+ def _write_generated_files(
620
+ *,
621
+ project_root: Path,
622
+ plan: ImplementationPlan,
623
+ task: ImplementationTask,
624
+ dependency_documents: list[DependencyDocument],
625
+ output_dir: str,
626
+ raw_output: str,
627
+ ) -> list[Path]:
628
+ file_payloads = _parse_file_payloads(raw_output, output_dir)
629
+ traceability_comment = _build_traceability_comment(plan, task, dependency_documents)
630
+ generated_paths: list[Path] = []
631
+ for relative_path, content in file_payloads:
632
+ destination = project_root / relative_path
633
+ destination.parent.mkdir(parents=True, exist_ok=True)
634
+ destination.write_text(_prepend_traceability_comment(relative_path, traceability_comment, content), encoding="utf-8")
635
+ generated_paths.append(destination)
636
+ return generated_paths
637
+
638
+
639
+ def _parse_file_payloads(raw_output: str, output_dir: str) -> list[tuple[str, str]]:
640
+ cleaned_output = raw_output.strip()
641
+ matches = list(FILE_BLOCK_RE.finditer(cleaned_output))
642
+ if not matches:
643
+ fallback_content = _strip_code_fence(cleaned_output).strip()
644
+ if not fallback_content:
645
+ raise ValueError("AI command returned empty implementation output")
646
+ extension = ".tsx" if _looks_like_tsx(fallback_content) else ".ts"
647
+ return [(f"{output_dir}/index{extension}", fallback_content.rstrip() + "\n")]
648
+
649
+ payloads: list[tuple[str, str]] = []
650
+ output_prefix = PurePosixPath(output_dir)
651
+ for index, match in enumerate(matches):
652
+ start = match.end()
653
+ end = matches[index + 1].start() if index + 1 < len(matches) else len(cleaned_output)
654
+ block = cleaned_output[start:end].strip()
655
+ path_text = match.group("path").strip()
656
+ path = PurePosixPath(path_text)
657
+ if path.is_absolute() or ".." in path.parts:
658
+ raise ValueError(f"generated file path must stay within project-relative src/, got {path_text!r}")
659
+ if not path.parts or path.parts[0] != "src":
660
+ raise ValueError(f"generated file path must stay under src/, got {path_text!r}")
661
+ if tuple(path.parts[: len(output_prefix.parts)]) != output_prefix.parts:
662
+ raise ValueError(
663
+ f"generated file path {path_text!r} is outside the requested output directory {output_dir!r}"
664
+ )
665
+
666
+ content = _strip_code_fence(block).strip()
667
+ if not content:
668
+ raise ValueError(f"generated file {path_text!r} was empty")
669
+ payloads.append((path.as_posix(), content.rstrip() + "\n"))
670
+
671
+ return payloads
672
+
673
+
674
+ def _summarize_generated_task_output(
675
+ project_root: Path,
676
+ task: ImplementationTask,
677
+ generated_files: list[Path],
678
+ ) -> dict[str, Any]:
679
+ exported_types: list[str] = []
680
+ exported_functions: list[str] = []
681
+ exported_classes: list[str] = []
682
+ exported_values: list[str] = []
683
+ relative_files: list[str] = []
684
+
685
+ for file_path in generated_files:
686
+ relative_files.append(file_path.relative_to(project_root).as_posix())
687
+ summary = _extract_export_summary(file_path.read_text(encoding="utf-8"))
688
+ exported_types.extend(summary["exported_types"])
689
+ exported_functions.extend(summary["exported_functions"])
690
+ exported_classes.extend(summary["exported_classes"])
691
+ exported_values.extend(summary["exported_values"])
692
+
693
+ return {
694
+ "task_id": task.task_id,
695
+ "task_title": task.title,
696
+ "directory": task.output_dir,
697
+ "files": relative_files,
698
+ "exported_types": _ordered_unique(exported_types),
699
+ "exported_functions": _ordered_unique(exported_functions),
700
+ "exported_classes": _ordered_unique(exported_classes),
701
+ "exported_values": _ordered_unique(exported_values),
702
+ }
703
+
704
+
705
+ def _extract_export_summary(content: str) -> dict[str, list[str]]:
706
+ summary = {
707
+ "exported_types": [match.group("name") for match in EXPORT_TYPE_RE.finditer(content)],
708
+ "exported_functions": [match.group("name") for match in EXPORT_FUNCTION_RE.finditer(content)],
709
+ "exported_classes": [match.group("name") for match in EXPORT_CLASS_RE.finditer(content)],
710
+ "exported_values": [match.group("name") for match in EXPORT_VALUE_RE.finditer(content)],
711
+ }
712
+
713
+ for match in EXPORT_NAMED_BLOCK_RE.finditer(content):
714
+ body = match.group("body")
715
+ block_is_type = bool(match.group("type_prefix"))
716
+ for raw_item in body.split(","):
717
+ item = raw_item.strip()
718
+ if not item:
719
+ continue
720
+ item_is_type = block_is_type
721
+ if item.startswith("type "):
722
+ item_is_type = True
723
+ item = item[5:].strip()
724
+ exported_name = item.split(" as ")[-1].strip()
725
+ if not exported_name:
726
+ continue
727
+ bucket = "exported_types" if item_is_type else "exported_values"
728
+ summary[bucket].append(exported_name)
729
+
730
+ return {key: _ordered_unique(values) for key, values in summary.items()}
731
+
732
+
733
+ def _format_prior_task_summary(summary: dict[str, Any]) -> list[str]:
734
+ lines = [
735
+ f"- Task {summary.get('task_id') or '(unknown)'}: {summary.get('task_title') or '(untitled task)'}",
736
+ f" Directory: {summary.get('directory') or '(unknown directory)'}",
737
+ ]
738
+
739
+ files = [str(item) for item in summary.get("files", []) if str(item).strip()]
740
+ if files:
741
+ lines.append(f" Files: {', '.join(files)}")
742
+
743
+ for label, key in (
744
+ ("Exported types", "exported_types"),
745
+ ("Exported functions", "exported_functions"),
746
+ ("Exported classes", "exported_classes"),
747
+ ("Other exported values", "exported_values"),
748
+ ):
749
+ items = [str(item) for item in summary.get(key, []) if str(item).strip()]
750
+ if items:
751
+ lines.append(f" {label}: {', '.join(items)}")
752
+
753
+ return lines
754
+
755
+
756
+ def _build_traceability_comment(
757
+ plan: ImplementationPlan,
758
+ task: ImplementationTask,
759
+ dependency_documents: list[DependencyDocument],
760
+ ) -> str:
761
+ lines = [
762
+ "@generated-by: codd implement",
763
+ f"@generated-from: {plan.path.as_posix()} ({plan.node_id})",
764
+ f"@task-id: {task.task_id}",
765
+ f"@task-title: {task.title}",
766
+ ]
767
+ for document in dependency_documents:
768
+ lines.append(f"@generated-from: {document.path.as_posix()} ({document.node_id})")
769
+ return "\n".join(lines)
770
+
771
+
772
+ def _prepend_traceability_comment(relative_path: str, comment_block: str, content: str) -> str:
773
+ suffix = PurePosixPath(relative_path).suffix
774
+ if suffix not in {".ts", ".tsx", ".js", ".jsx"}:
775
+ return content
776
+
777
+ formatted_comment = "\n".join(f"// {line}" for line in comment_block.splitlines())
778
+ stripped_content = content.lstrip()
779
+ if stripped_content.startswith("// @generated-by: codd implement"):
780
+ return content
781
+ return f"{formatted_comment}\n\n{content.lstrip()}"
782
+
783
+
784
+ def _strip_code_fence(block: str) -> str:
785
+ stripped = block.strip()
786
+ fenced = re.match(r"^```(?:[a-zA-Z0-9_+-]+)?\s*\n(?P<body>.*)\n```$", stripped, re.DOTALL)
787
+ if fenced:
788
+ return fenced.group("body")
789
+ return stripped
790
+
791
+
792
+ def _looks_like_tsx(content: str) -> bool:
793
+ return bool(re.search(r"</?[A-Z][A-Za-z0-9]*|return\s*\(\s*<", content))
794
+
795
+
796
+ def _split_deliverable_chunks(text: str) -> list[str]:
797
+ chunks = re.split(r"[、/]", text or "")
798
+ return [re.sub(r"\s+", " ", chunk).strip(" ・") for chunk in chunks if chunk.strip(" ・")]
799
+
800
+
801
+ def _derive_task_slug(title: str, module_hint: str, task_id: str) -> str:
802
+ keyword_text = " ".join([title, module_hint]).casefold()
803
+ keyword_map = {
804
+ "project_initialization": ["bootstrap", "project", "基盤", "初期化"],
805
+ "database_foundation": ["database", "db", "prisma", "schema", "rls", "sql"],
806
+ "authentication": ["auth", "oauth", "jwt", "session", "認証", "認可", "rbac", "google"],
807
+ "common_middleware": ["middleware", "request", "tenant", "監査", "role"],
808
+ "ui_foundation": ["ui", "layout", "screen", "component", "ux"],
809
+ "integration": ["integration", "stripe", "line", "sendgrid", "bunny", "4ms"],
810
+ "testing": ["test", "lint", "eslint", "quality"],
811
+ }
812
+ for slug, keywords in keyword_map.items():
813
+ if any(keyword in keyword_text for keyword in keywords):
814
+ return slug
815
+
816
+ generic_slug = _slug_from_text(title) or _slug_from_text(module_hint)
817
+ if generic_slug:
818
+ return generic_slug
819
+ return f"task_{_slug_from_text(task_id)}"
820
+
821
+
822
+ def _slug_from_text(text: str) -> str:
823
+ ascii_text = re.sub(r"[^a-zA-Z0-9]+", "_", text).strip("_").lower()
824
+ ascii_text = re.sub(r"_+", "_", ascii_text)
825
+ return ascii_text
826
+
827
+
828
+ def _ordered_unique(items: list[str]) -> list[str]:
829
+ seen: set[str] = set()
830
+ ordered: list[str] = []
831
+ for item in items:
832
+ if item in seen:
833
+ continue
834
+ seen.add(item)
835
+ ordered.append(item)
836
+ return ordered
837
+
838
+
839
+ def _clean_text_block(text: str) -> str:
840
+ return "\n".join(line.rstrip() for line in text.strip().splitlines())
841
+
842
+
843
+ def _is_detailed_design_path(path: Path | str) -> bool:
844
+ path_text = path.as_posix() if isinstance(path, Path) else str(path)
845
+ parts = PurePosixPath(path_text).parts
846
+ return len(parts) >= 2 and parts[0] == "docs" and parts[1] == "detailed_design"