codd-dev 0.2.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
codd/verifier.py ADDED
@@ -0,0 +1,426 @@
1
+ """CoDD verifier for typecheck and test validation with design traceability."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ import re
8
+ import shlex
9
+ import subprocess
10
+ from dataclasses import dataclass
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ from codd.config import load_project_config
16
+
17
+
18
+ @dataclass(frozen=True)
19
+ class TypecheckError:
20
+ file_path: str
21
+ line: int
22
+ col: int
23
+ code: str
24
+ message: str
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class TypecheckResult:
29
+ success: bool
30
+ error_count: int
31
+ errors: tuple[TypecheckError, ...]
32
+
33
+
34
+ @dataclass(frozen=True)
35
+ class TestFailure:
36
+ test_file_path: str
37
+ test_name: str
38
+ failure_messages: tuple[str, ...]
39
+
40
+
41
+ @dataclass(frozen=True)
42
+ class TestResult:
43
+ success: bool
44
+ total: int
45
+ passed: int
46
+ failed: int
47
+ skipped: int
48
+ failures: tuple[TestFailure, ...]
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class DesignRef:
53
+ node_id: str
54
+ doc_path: str
55
+ trace_source: str
56
+ source_file: str
57
+
58
+
59
+ @dataclass(frozen=True)
60
+ class VerifyResult:
61
+ success: bool
62
+ typecheck: TypecheckResult
63
+ tests: TestResult
64
+ design_refs: tuple[DesignRef, ...]
65
+ warnings: tuple[str, ...]
66
+ report_path: str
67
+
68
+
69
+ class VerifyPreflightError(Exception):
70
+ """Raised when the target project is missing required build/test inputs."""
71
+
72
+
73
+ DEFAULT_VERIFY_CONFIG: dict[str, Any] = {
74
+ "typecheck_command": "npx tsc --noEmit",
75
+ "test_command": "npx jest --ci --json --outputFile=.codd/test-results.json",
76
+ "test_output_file": ".codd/test-results.json",
77
+ "report_output": "docs/test/verify_report.md",
78
+ "test_pattern": "tests/unit/sprint_{sprint}/**/*.test.ts",
79
+ }
80
+
81
+ GENERATED_FROM_RE = re.compile(
82
+ r"^//\s*@generated-from:\s*(?P<path>.+?)\s*\((?P<node_id>[^)]+)\)\s*$",
83
+ re.MULTILINE,
84
+ )
85
+ TSC_ERROR_RE = re.compile(
86
+ r"^(?P<file>[^(]+)\((?P<line>\d+),(?P<col>\d+)\):\s*error\s+(?P<code>TS\d+):\s*(?P<message>.+)$",
87
+ re.MULTILINE,
88
+ )
89
+ TS_IMPORT_RE = re.compile(
90
+ r"^\s*import\s+.*?\s+from\s+['\"](?P<path>[^'\"]+)['\"]",
91
+ re.MULTILINE,
92
+ )
93
+
94
+
95
+ def _propagate_targets(design_refs: tuple[DesignRef, ...]) -> tuple[str, ...]:
96
+ return tuple(dict.fromkeys(ref.node_id for ref in design_refs))
97
+
98
+
99
+ def run_verify(
100
+ project_root: Path,
101
+ sprint: int | None = None,
102
+ ) -> VerifyResult:
103
+ """Run build + test verification and trace failures to design documents."""
104
+ config = _load_project_config(project_root)
105
+ verifier = _Verifier(project_root.resolve(), config)
106
+ return verifier.run(sprint)
107
+
108
+
109
+ def _load_project_config(project_root: Path) -> dict[str, Any]:
110
+ """Load merged CoDD config and extract the verify section."""
111
+ config = dict(DEFAULT_VERIFY_CONFIG)
112
+ project_config = load_project_config(project_root.resolve())
113
+ verify_config = project_config.get("verify", {})
114
+ if verify_config is None:
115
+ return config
116
+ if not isinstance(verify_config, dict):
117
+ raise ValueError("codd verify config must be a mapping")
118
+ config.update(verify_config)
119
+ return config
120
+
121
+
122
+ class _Verifier:
123
+ def __init__(self, project_root: Path, config: dict[str, Any]):
124
+ self.project_root = project_root
125
+ self.config = config
126
+
127
+ def run(self, sprint: int | None = None) -> VerifyResult:
128
+ warnings: list[str] = []
129
+ self._preflight_check()
130
+
131
+ (self.project_root / ".codd").mkdir(exist_ok=True)
132
+
133
+ typecheck_result = self._run_typecheck()
134
+ test_result = self._run_tests(sprint)
135
+
136
+ design_refs: list[DesignRef] = []
137
+ if not typecheck_result.success:
138
+ refs, new_warnings = self._trace_from_typecheck_errors(typecheck_result.errors)
139
+ design_refs.extend(refs)
140
+ warnings.extend(new_warnings)
141
+ if not test_result.success:
142
+ refs, new_warnings = self._trace_from_test_failures(test_result.failures)
143
+ design_refs.extend(refs)
144
+ warnings.extend(new_warnings)
145
+
146
+ unique_refs: list[DesignRef] = []
147
+ seen_ref_keys: set[tuple[str, str]] = set()
148
+ for ref in design_refs:
149
+ key = (ref.node_id, ref.source_file)
150
+ if key in seen_ref_keys:
151
+ continue
152
+ seen_ref_keys.add(key)
153
+ unique_refs.append(ref)
154
+
155
+ unique_warnings = tuple(dict.fromkeys(warnings))
156
+ success = typecheck_result.success and test_result.success
157
+ interim = VerifyResult(
158
+ success=success,
159
+ typecheck=typecheck_result,
160
+ tests=test_result,
161
+ design_refs=tuple(unique_refs),
162
+ warnings=unique_warnings,
163
+ report_path="",
164
+ )
165
+ report_path = self._generate_report(interim)
166
+ return VerifyResult(
167
+ success=success,
168
+ typecheck=typecheck_result,
169
+ tests=test_result,
170
+ design_refs=tuple(unique_refs),
171
+ warnings=unique_warnings,
172
+ report_path=report_path,
173
+ )
174
+
175
+ def _preflight_check(self) -> None:
176
+ missing = []
177
+ for name in ("package.json", "tsconfig.json", "node_modules"):
178
+ if not (self.project_root / name).exists():
179
+ missing.append(name)
180
+ if missing:
181
+ raise VerifyPreflightError(f"Missing: {', '.join(missing)}. Run npm install first.")
182
+
183
+ def _run_typecheck(self) -> TypecheckResult:
184
+ proc = subprocess.run(
185
+ shlex.split(self.config["typecheck_command"]),
186
+ cwd=str(self.project_root),
187
+ env=os.environ.copy(),
188
+ capture_output=True,
189
+ text=True,
190
+ )
191
+ output = proc.stdout + proc.stderr
192
+ errors = tuple(
193
+ TypecheckError(
194
+ file_path=match.group("file").strip(),
195
+ line=int(match.group("line")),
196
+ col=int(match.group("col")),
197
+ code=match.group("code"),
198
+ message=match.group("message").strip(),
199
+ )
200
+ for match in TSC_ERROR_RE.finditer(output)
201
+ )
202
+ return TypecheckResult(
203
+ success=proc.returncode == 0,
204
+ error_count=len(errors),
205
+ errors=errors,
206
+ )
207
+
208
+ def _run_tests(self, sprint: int | None) -> TestResult:
209
+ output_path = self._resolve_path(self.config.get("test_output_file", ".codd/test-results.json"))
210
+ if output_path.exists():
211
+ output_path.unlink()
212
+
213
+ command = shlex.split(self.config["test_command"])
214
+ if sprint is not None:
215
+ pattern = self.config.get("test_pattern", "tests/unit/sprint_{sprint}/**/*.test.ts")
216
+ command.append(f"--testPathPattern={pattern.format(sprint=sprint)}")
217
+
218
+ proc = subprocess.run(
219
+ command,
220
+ cwd=str(self.project_root),
221
+ env=os.environ.copy(),
222
+ capture_output=True,
223
+ text=True,
224
+ )
225
+ data = self._load_test_output(output_path, proc.stdout)
226
+
227
+ failures: list[TestFailure] = []
228
+ for suite in data.get("testResults", []):
229
+ if suite.get("status") != "failed":
230
+ continue
231
+ for assertion in suite.get("assertionResults", []):
232
+ if assertion.get("status") != "failed":
233
+ continue
234
+ failures.append(
235
+ TestFailure(
236
+ test_file_path=str(suite.get("testFilePath") or suite.get("name") or ""),
237
+ test_name=str(assertion.get("fullName", "")),
238
+ failure_messages=tuple(assertion.get("failureMessages", [])),
239
+ )
240
+ )
241
+
242
+ return TestResult(
243
+ success=bool(data.get("success", False) and proc.returncode == 0),
244
+ total=int(data.get("numTotalTests", 0)),
245
+ passed=int(data.get("numPassedTests", 0)),
246
+ failed=int(data.get("numFailedTests", 0)),
247
+ skipped=int(data.get("numPendingTests", 0)),
248
+ failures=tuple(failures),
249
+ )
250
+
251
+ def _load_test_output(self, output_path: Path, stdout: str) -> dict[str, Any]:
252
+ if output_path.exists():
253
+ payload = json.loads(output_path.read_text(encoding="utf-8"))
254
+ if not isinstance(payload, dict):
255
+ raise ValueError("jest output must be a JSON object")
256
+ return payload
257
+
258
+ raw = stdout.strip()
259
+ if raw:
260
+ payload = json.loads(raw)
261
+ if not isinstance(payload, dict):
262
+ raise ValueError("jest stdout JSON must be an object")
263
+ return payload
264
+
265
+ return {
266
+ "success": False,
267
+ "numTotalTests": 0,
268
+ "numPassedTests": 0,
269
+ "numFailedTests": 0,
270
+ "numPendingTests": 0,
271
+ "testResults": [],
272
+ }
273
+
274
+ def _trace_from_typecheck_errors(
275
+ self, errors: tuple[TypecheckError, ...]
276
+ ) -> tuple[list[DesignRef], list[str]]:
277
+ refs: list[DesignRef] = []
278
+ warnings: list[str] = []
279
+ for error in errors:
280
+ ts_file = Path(error.file_path)
281
+ if not ts_file.is_absolute():
282
+ ts_file = self.project_root / ts_file
283
+ new_refs, new_warnings = self._extract_design_refs(ts_file, "typecheck_error")
284
+ refs.extend(new_refs)
285
+ warnings.extend(new_warnings)
286
+ return refs, warnings
287
+
288
+ def _trace_from_test_failures(
289
+ self, failures: tuple[TestFailure, ...]
290
+ ) -> tuple[list[DesignRef], list[str]]:
291
+ refs: list[DesignRef] = []
292
+ warnings: list[str] = []
293
+ for failure in failures:
294
+ test_file = Path(failure.test_file_path)
295
+ if not test_file.is_absolute():
296
+ test_file = self.project_root / test_file
297
+ if not test_file.exists() or test_file.is_dir():
298
+ continue
299
+
300
+ content = test_file.read_text(encoding="utf-8")
301
+ for import_match in TS_IMPORT_RE.finditer(content):
302
+ import_path = import_match.group("path")
303
+ if not import_path.startswith("."):
304
+ continue
305
+ for candidate in self._resolve_import_candidates(test_file, import_path):
306
+ if not candidate.exists():
307
+ continue
308
+ new_refs, new_warnings = self._extract_design_refs(candidate, "test_failure")
309
+ refs.extend(new_refs)
310
+ warnings.extend(new_warnings)
311
+ break
312
+ return refs, warnings
313
+
314
+ def _resolve_import_candidates(self, test_file: Path, import_path: str) -> tuple[Path, ...]:
315
+ base = (test_file.parent / import_path).resolve()
316
+ candidates = [base]
317
+ if base.suffix:
318
+ candidates.extend([base.with_suffix(".ts"), base.with_suffix(".tsx")])
319
+ else:
320
+ candidates.extend(
321
+ [
322
+ base.with_suffix(".ts"),
323
+ base.with_suffix(".tsx"),
324
+ base / "index.ts",
325
+ base / "index.tsx",
326
+ ]
327
+ )
328
+
329
+ unique: list[Path] = []
330
+ seen: set[Path] = set()
331
+ for candidate in candidates:
332
+ if candidate in seen:
333
+ continue
334
+ seen.add(candidate)
335
+ unique.append(candidate)
336
+ return tuple(unique)
337
+
338
+ def _extract_design_refs(
339
+ self, ts_file: Path, trace_source: str
340
+ ) -> tuple[list[DesignRef], list[str]]:
341
+ warnings: list[str] = []
342
+ refs: list[DesignRef] = []
343
+ if not ts_file.exists():
344
+ return refs, warnings
345
+
346
+ try:
347
+ header = "\n".join(ts_file.read_text(encoding="utf-8").splitlines()[:30])
348
+ except OSError:
349
+ return refs, warnings
350
+
351
+ matches = list(GENERATED_FROM_RE.finditer(header))
352
+ if not matches:
353
+ warnings.append(f"No @generated-from header in {ts_file} — manual review required")
354
+ return refs, warnings
355
+
356
+ for match in matches:
357
+ refs.append(
358
+ DesignRef(
359
+ node_id=match.group("node_id"),
360
+ doc_path=match.group("path"),
361
+ trace_source=trace_source,
362
+ source_file=str(ts_file),
363
+ )
364
+ )
365
+ return refs, warnings
366
+
367
+ def _generate_report(self, result: VerifyResult) -> str:
368
+ report_path = self._resolve_path(self.config.get("report_output", "docs/test/verify_report.md"))
369
+ report_path.parent.mkdir(parents=True, exist_ok=True)
370
+
371
+ lines = [
372
+ "# CoDD Verify Report",
373
+ "",
374
+ f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
375
+ "",
376
+ f"## Result: {'PASS' if result.success else 'FAIL'}",
377
+ "",
378
+ "## Typecheck",
379
+ f"- Status: {'PASS' if result.typecheck.success else 'FAIL'}",
380
+ f"- Errors: {result.typecheck.error_count}",
381
+ "",
382
+ "## Tests",
383
+ f"- Status: {'PASS' if result.tests.success else 'FAIL'}",
384
+ (
385
+ f"- Total: {result.tests.total} | Passed: {result.tests.passed} | "
386
+ f"Failed: {result.tests.failed} | Skipped: {result.tests.skipped}"
387
+ ),
388
+ ]
389
+
390
+ if result.typecheck.errors:
391
+ lines.extend(["", "### Typecheck Errors"])
392
+ for error in result.typecheck.errors:
393
+ lines.append(
394
+ f"- `{error.file_path}:{error.line}:{error.col}` {error.code}: {error.message}"
395
+ )
396
+
397
+ if result.tests.failures:
398
+ lines.extend(["", "### Test Failures"])
399
+ for failure in result.tests.failures:
400
+ lines.append(f"- `{failure.test_file_path}` - {failure.test_name}")
401
+
402
+ if result.design_refs:
403
+ lines.extend(["", "## Design Documents to Review"])
404
+ for ref in result.design_refs:
405
+ lines.append(
406
+ f"- **{ref.node_id}** -> `{ref.doc_path}` (from `{ref.source_file}`, via {ref.trace_source})"
407
+ )
408
+ targets = _propagate_targets(result.design_refs)
409
+ if targets:
410
+ lines.extend(["", "## Suggested Propagation Targets"])
411
+ for target in targets:
412
+ lines.append(f"- `{target}`")
413
+
414
+ if result.warnings:
415
+ lines.extend(["", "## Warnings"])
416
+ for warning in result.warnings:
417
+ lines.append(f"- {warning}")
418
+
419
+ report_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
420
+ return str(report_path)
421
+
422
+ def _resolve_path(self, raw_path: str) -> Path:
423
+ path = Path(raw_path)
424
+ if path.is_absolute():
425
+ return path
426
+ return self.project_root / path
@@ -0,0 +1,241 @@
1
+ Metadata-Version: 2.4
2
+ Name: codd-dev
3
+ Version: 0.2.0a1
4
+ Summary: CoDD: Coherence-Driven Development — cross-artifact change impact analysis
5
+ Project-URL: Homepage, https://github.com/yohey-w/shogun-codd
6
+ Project-URL: Repository, https://github.com/yohey-w/shogun-codd
7
+ Project-URL: Issues, https://github.com/yohey-w/shogun-codd/issues
8
+ Author-email: Yohei Watanabe <yohey-w@users.noreply.github.com>
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Keywords: change-impact,claude-code,dependency-graph,plugin,software-engineering
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Topic :: Software Development :: Quality Assurance
17
+ Requires-Python: >=3.10
18
+ Requires-Dist: click>=8.0
19
+ Requires-Dist: pyyaml>=6.0
20
+ Provides-Extra: mcp
21
+ Provides-Extra: scan
22
+ Requires-Dist: tree-sitter-java>=0.22; extra == 'scan'
23
+ Requires-Dist: tree-sitter-python>=0.22; extra == 'scan'
24
+ Requires-Dist: tree-sitter-typescript>=0.22; extra == 'scan'
25
+ Requires-Dist: tree-sitter>=0.22; extra == 'scan'
26
+ Description-Content-Type: text/markdown
27
+
28
+ # CoDD — Coherence-Driven Development
29
+
30
+ **CoDD keeps AI-built systems coherent as requirements change.**
31
+
32
+ Give CoDD your requirements and constraints. AI generates the design top-down, derives implementation and test strategy from those artifacts, and traces change impact across a dependency graph — so nothing falls out of sync.
33
+
34
+ > *Harnesses tell agents how to work. CoDD keeps artifacts coherent.*
35
+
36
+ ```
37
+ Harness (CLAUDE.md, AGENTS.md, Hooks, Skills) ← Rules, guardrails, flow
38
+ └─ CoDD (methodology) ← Operates on the harness flow
39
+ └─ Design docs (docs/*.md) ← Artifacts CoDD generates and maintains
40
+ ```
41
+
42
+ **Public Alpha** — `pip install codd-dev` — init / scan / impact / validate are stable today.
43
+
44
+ ## The Problem
45
+
46
+ AI can generate code from specs. But what happens when requirements change mid-project?
47
+
48
+ - Which design docs are affected?
49
+ - Which tests need updating?
50
+ - Which API contracts broke?
51
+ - Did anyone forget to update the database migration?
52
+
53
+ Spec-driven tools help you write specs first. They don't track what happens when those specs change. That's where CoDD comes in.
54
+
55
+ ### Why not just AGENTS.md or hooks?
56
+
57
+ AGENTS.md, CLAUDE.md, and hooks are **harness infrastructure** — they tell agents how to behave. CoDD is a **coherence layer** that sits on top of any harness and keeps design artifacts, implementation, and tests in sync when requirements change. CoDD is harness-agnostic: it works with Claude Code, GitHub Copilot, Cursor, or any agent framework.
58
+
59
+ ## Core Principle: Derive, Don't Configure
60
+
61
+ **Upstream artifacts + best practices = downstream is self-evident.**
62
+
63
+ - `system_design.md` says "Next.js + Supabase" → test strategy is vitest + Playwright. No config needed.
64
+ - `api_design.md` says "FastAPI" → pytest + httpx. No config needed.
65
+ - Requirements change → `codd impact` shows exactly what's affected.
66
+
67
+ You define requirements and constraints. AI derives everything else.
68
+
69
+ ## How It Works
70
+
71
+ ```
72
+ Phase 1: Requirements (human) ─┐
73
+ Phase 2: Design generation (AI) │ V-Model left side
74
+ Phase 3: Scan (auto) │
75
+ Phase 4: Implementation (AI) ─┘
76
+ Phase 5: Verification (AI + human) ─── V-Model right side
77
+ Phase 6: Change impact analysis ─┐
78
+ Phase 7: Change propagation │ Continuous coherence
79
+ Phase 8: Customer review ─┘
80
+ ```
81
+
82
+ Design docs are generated in **Wave order** — each wave depends on the previous:
83
+
84
+ ```
85
+ Wave 1: Acceptance criteria + ADR (← requirements only)
86
+ Wave 2: System design (← req + Wave 1)
87
+ Wave 3: Database design + API design (← req + Wave 1-2)
88
+ Wave 4: UI/UX design (← req + Wave 1-3)
89
+ Wave 5: Implementation plan (← all above)
90
+ ```
91
+
92
+ Verification runs bottom-up (IPA Common Frame):
93
+ ```
94
+ Unit tests ← verifies detailed design
95
+ Integration ← verifies system design
96
+ E2E / System ← verifies requirements + acceptance criteria
97
+ ```
98
+
99
+ ## Three Layers (Don't Confuse Them)
100
+
101
+ ```
102
+ Harness (CLAUDE.md, Hooks, Skills) ← Rules, guardrails, flow
103
+ └─ CoDD (methodology) ← Operates on the harness flow
104
+ └─ Design docs (docs/*.md) ← Artifacts CoDD generates and maintains
105
+ ```
106
+
107
+ - **Harness** = how agents work (any harness: Claude Code, Copilot, Cursor, etc.)
108
+ - **CoDD** = how artifacts stay coherent across changes
109
+ - **Docs** = what CoDD produces and maintains
110
+
111
+ CoDD is **harness-agnostic**. It runs on top of whatever agent framework you use.
112
+
113
+ ## Quick Start
114
+
115
+ ```bash
116
+ # Install
117
+ pip install codd-dev
118
+
119
+ # Initialize
120
+ codd init --project-name "my-project" --language "typescript"
121
+
122
+ # Scan — build dependency graph from frontmatter
123
+ codd scan
124
+
125
+ # Impact — what breaks if I change this?
126
+ codd impact --diff HEAD~1
127
+ ```
128
+
129
+ ## Real Project: Osato LMS
130
+
131
+ CoDD was dogfooded on a production LMS (Learning Management System). All design documents, implementation code, and tests were generated by AI following CoDD's workflow. No manual review by the client.
132
+
133
+ ```
134
+ docs/
135
+ ├── requirements/ # What to build (client agreement, SSoT)
136
+ ├── design/ # How to build it (system design, API, DB, UI)
137
+ ├── detailed_design/ # Module-level specs
138
+ ├── plan/ # WBS, schedule, RACI
139
+ ├── governance/ # ADR, meeting minutes, change requests
140
+ ├── test/ # Acceptance criteria, test plans
141
+ ├── operations/ # Runbooks, monitoring design
142
+ └── infra/ # Infrastructure specs
143
+ ```
144
+
145
+ Every doc has CoDD frontmatter declaring its dependencies:
146
+
147
+ ```yaml
148
+ ---
149
+ codd:
150
+ node_id: "design:api-design"
151
+ depends_on:
152
+ - id: "design:system-design"
153
+ relation: derives_from
154
+ - id: "req:lms-requirements-v2.0"
155
+ relation: implements
156
+ ---
157
+ ```
158
+
159
+ When the requirements changed mid-project, `codd impact` identified exactly which design docs, API endpoints, and test cases needed updating — and AI fixed them automatically.
160
+
161
+ ## How CoDD Differs from Spec Kit / OpenSpec
162
+
163
+ | | Spec Kit | OpenSpec | **CoDD** |
164
+ |--|----------|---------|----------|
165
+ | Write specs first | Yes | Yes | Yes |
166
+ | AI generates code from specs | Yes | Yes | Yes |
167
+ | **Change propagation** | No | No | **Dependency graph + impact analysis** |
168
+ | **Derive test strategy from architecture** | No | No | **Automatic (derive, don't configure)** |
169
+ | **V-Model verification** | No | No | **Unit → Integration → E2E** |
170
+ | **Impact analysis on change** | No | No | **codd impact --diff HEAD~1** |
171
+ | Harness-agnostic | GitHub Copilot focused | Multi-agent | **Any harness** |
172
+
173
+ **Spec Kit and OpenSpec answer "how do I start?" CoDD answers "how do I keep going when things change?"**
174
+
175
+ ## What's Available Now (v0.2.0-alpha.1)
176
+
177
+ | Command | Status | What it does |
178
+ |---------|--------|-------------|
179
+ | `codd init` | **Stable** | Initialize CoDD in any project |
180
+ | `codd scan` | **Stable** | Build dependency graph from frontmatter |
181
+ | `codd impact` | **Stable** | Analyze change impact (Green/Amber/Gray bands) |
182
+ | `codd validate` | **Alpha** | Check frontmatter integrity and graph consistency |
183
+ | `codd generate` | Experimental | Generate design docs in Wave order |
184
+ | `codd plan` | Experimental | Wave execution status and auto-initialization |
185
+ | `codd verify` | Experimental | V-Model verification (typecheck + tests → design tracing) |
186
+ | `codd implement` | Experimental | Design-to-code generation |
187
+
188
+ ### Alpha Scope: What We Promise / What We Don't
189
+
190
+ | We promise | We don't promise (yet) |
191
+ |------------|----------------------|
192
+ | Frontmatter-based dependency graph works | Full semantic dependency types beyond Wave order |
193
+ | `codd impact` correctly identifies affected nodes | Automatic fix of affected nodes |
194
+ | `codd validate` catches broken references and cycles | Exhaustive validation of all edge cases |
195
+ | Harness-agnostic (no vendor lock-in) | Turnkey integrations for every harness |
196
+ | Derivation principle: architecture → test strategy | Fully automated end-to-end generation pipeline |
197
+ | MIT license, stable CLI interface for core commands | API stability for experimental commands |
198
+
199
+ ## Frontmatter is the Single Source of Truth
200
+
201
+ CoDD uses YAML frontmatter in Markdown files to declare dependencies. `graph.db` is a derived cache — regenerated on every `codd scan`. No separate config files to maintain.
202
+
203
+ ```yaml
204
+ ---
205
+ codd:
206
+ node_id: "design:system-design"
207
+ type: design
208
+ depends_on:
209
+ - id: "req:lms-requirements-v2.0"
210
+ relation: implements
211
+ conventions:
212
+ - targets: ["db:rls_policies"]
213
+ reason: "Tenant isolation is non-negotiable"
214
+ ---
215
+ ```
216
+
217
+ ## Real-World Example: Derive, Don't Configure
218
+
219
+ ```
220
+ system_design.md says "Next.js + Supabase"
221
+ → Test strategy: vitest (unit) + Playwright (E2E). No config needed.
222
+
223
+ system_design.md says "FastAPI + Python"
224
+ → Test strategy: pytest (unit/integration) + httpx (API). No config needed.
225
+
226
+ system_design.md says "CLI tool in Go"
227
+ → Test strategy: go test (unit/integration). No config needed.
228
+ ```
229
+
230
+ The architecture determines the test strategy. CoDD derives it — you don't configure it.
231
+
232
+ ## Roadmap
233
+
234
+ - [ ] Semantic dependency types (requires, affects, verifies, implements)
235
+ - [ ] `codd verify` — full docs ↔ code ↔ tests coherence check
236
+ - [ ] Multi-agent integration examples (Claude Code, Copilot, Cursor)
237
+ - [ ] VS Code extension for impact visualization
238
+
239
+ ## License
240
+
241
+ MIT
@@ -0,0 +1,18 @@
1
+ codd/__init__.py,sha256=AfzNT2ZTbjpX8ZkkmfnaQ-z9Inc87NZQJembUY_ix4Q,73
2
+ codd/cli.py,sha256=3grEHt9wrVpS9nVf7hg655cbv0XIt-WEOFkaLXu1Uz8,12587
3
+ codd/config.py,sha256=oq05-BwllvyQvSyxjUoPqpw37-KVPU3Ioectl9Rclyw,1909
4
+ codd/defaults.yaml,sha256=Z43uS1bI00FO6-q37Qwr_pTkydgX4MOqA_njDmad7dk,512
5
+ codd/generator.py,sha256=qOq2jOoD0lPwTJpr10uRdt62wcGk8ibi6sLZBp8rwCI,24238
6
+ codd/graph.py,sha256=DnPID8kEnHsAMuSUq6-5H6P_145hBw295MtWWaJ14jc,10506
7
+ codd/hooks.py,sha256=qceJXLYvRGf7gDehKvth5HYpEy51xjoBtYcDPLXVy3I,3306
8
+ codd/implementer.py,sha256=BlOHrgGNHGrZhbhxg1PDthPj1BH1a4ViiGO3Jx217xg,33576
9
+ codd/planner.py,sha256=Cs3ewsOMajcOFw1YkRK54eLvCY1nd4C6uj1CLYPvHp4,22582
10
+ codd/propagate.py,sha256=k3UHInq7gSEafnlby1x-MaPkl_MzCcsIoNVrHhGPoSc,11869
11
+ codd/scanner.py,sha256=5cSIkrs9ozgPWVXEummbAEgTBwYNcxA4icxktrSBQNQ,18684
12
+ codd/validator.py,sha256=VXuvtqh4wjjxOlhWhkrwEke7OFOgfg5YE3sWXqHQkDg,16150
13
+ codd/verifier.py,sha256=Jp2ssijlKxbCzna6l9wsjKcXvu-UbNb9J5fSmH9WbsY,14645
14
+ codd_dev-0.2.0a1.dist-info/METADATA,sha256=jtt2DeB0kUkhYXuJfi-xk8BaTeFTht0v7qZNFmYunBQ,9560
15
+ codd_dev-0.2.0a1.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
16
+ codd_dev-0.2.0a1.dist-info/entry_points.txt,sha256=UDAbmLzJtylD_Ctz4DXjHbpVTAdgNTKOtp0EEYTR320,39
17
+ codd_dev-0.2.0a1.dist-info/licenses/LICENSE,sha256=lxe1LIqG7R8QrE4jJMD1IZvRzHTtcE5RnJHy75tSKz8,1062
18
+ codd_dev-0.2.0a1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ codd = codd.cli:main