atdd 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/__init__.py +0 -0
- atdd/cli.py +404 -0
- atdd/coach/__init__.py +0 -0
- atdd/coach/commands/__init__.py +0 -0
- atdd/coach/commands/add_persistence_metadata.py +215 -0
- atdd/coach/commands/analyze_migrations.py +188 -0
- atdd/coach/commands/consumers.py +720 -0
- atdd/coach/commands/infer_governance_status.py +149 -0
- atdd/coach/commands/initializer.py +177 -0
- atdd/coach/commands/interface.py +1078 -0
- atdd/coach/commands/inventory.py +565 -0
- atdd/coach/commands/migration.py +240 -0
- atdd/coach/commands/registry.py +1560 -0
- atdd/coach/commands/session.py +430 -0
- atdd/coach/commands/sync.py +405 -0
- atdd/coach/commands/test_interface.py +399 -0
- atdd/coach/commands/test_runner.py +141 -0
- atdd/coach/commands/tests/__init__.py +1 -0
- atdd/coach/commands/tests/test_telemetry_array_validation.py +235 -0
- atdd/coach/commands/traceability.py +4264 -0
- atdd/coach/conventions/session.convention.yaml +754 -0
- atdd/coach/overlays/__init__.py +2 -0
- atdd/coach/overlays/claude.md +2 -0
- atdd/coach/schemas/config.schema.json +34 -0
- atdd/coach/schemas/manifest.schema.json +101 -0
- atdd/coach/templates/ATDD.md +282 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +327 -0
- atdd/coach/utils/__init__.py +0 -0
- atdd/coach/utils/graph/__init__.py +0 -0
- atdd/coach/utils/graph/urn.py +875 -0
- atdd/coach/validators/__init__.py +0 -0
- atdd/coach/validators/shared_fixtures.py +365 -0
- atdd/coach/validators/test_enrich_wagon_registry.py +167 -0
- atdd/coach/validators/test_registry.py +575 -0
- atdd/coach/validators/test_session_validation.py +1183 -0
- atdd/coach/validators/test_traceability.py +448 -0
- atdd/coach/validators/test_update_feature_paths.py +108 -0
- atdd/coach/validators/test_validate_contract_consumers.py +297 -0
- atdd/coder/__init__.py +1 -0
- atdd/coder/conventions/adapter.recipe.yaml +88 -0
- atdd/coder/conventions/backend.convention.yaml +460 -0
- atdd/coder/conventions/boundaries.convention.yaml +666 -0
- atdd/coder/conventions/commons.convention.yaml +460 -0
- atdd/coder/conventions/complexity.recipe.yaml +109 -0
- atdd/coder/conventions/component-naming.convention.yaml +178 -0
- atdd/coder/conventions/design.convention.yaml +327 -0
- atdd/coder/conventions/design.recipe.yaml +273 -0
- atdd/coder/conventions/dto.convention.yaml +660 -0
- atdd/coder/conventions/frontend.convention.yaml +542 -0
- atdd/coder/conventions/green.convention.yaml +1012 -0
- atdd/coder/conventions/presentation.convention.yaml +587 -0
- atdd/coder/conventions/refactor.convention.yaml +535 -0
- atdd/coder/conventions/technology.convention.yaml +206 -0
- atdd/coder/conventions/tests/__init__.py +0 -0
- atdd/coder/conventions/tests/test_adapter_recipe.py +302 -0
- atdd/coder/conventions/tests/test_complexity_recipe.py +289 -0
- atdd/coder/conventions/tests/test_component_taxonomy.py +278 -0
- atdd/coder/conventions/tests/test_component_urn_naming.py +165 -0
- atdd/coder/conventions/tests/test_thinness_recipe.py +286 -0
- atdd/coder/conventions/thinness.recipe.yaml +82 -0
- atdd/coder/conventions/train.convention.yaml +325 -0
- atdd/coder/conventions/verification.protocol.yaml +53 -0
- atdd/coder/schemas/design_system.schema.json +361 -0
- atdd/coder/validators/__init__.py +0 -0
- atdd/coder/validators/test_commons_structure.py +485 -0
- atdd/coder/validators/test_complexity.py +416 -0
- atdd/coder/validators/test_cross_language_consistency.py +431 -0
- atdd/coder/validators/test_design_system_compliance.py +413 -0
- atdd/coder/validators/test_dto_testing_patterns.py +268 -0
- atdd/coder/validators/test_green_cross_stack_layers.py +168 -0
- atdd/coder/validators/test_green_layer_dependencies.py +148 -0
- atdd/coder/validators/test_green_python_layer_structure.py +103 -0
- atdd/coder/validators/test_green_supabase_layer_structure.py +103 -0
- atdd/coder/validators/test_import_boundaries.py +396 -0
- atdd/coder/validators/test_init_file_urns.py +593 -0
- atdd/coder/validators/test_preact_layer_boundaries.py +221 -0
- atdd/coder/validators/test_presentation_convention.py +260 -0
- atdd/coder/validators/test_python_architecture.py +674 -0
- atdd/coder/validators/test_quality_metrics.py +420 -0
- atdd/coder/validators/test_station_master_pattern.py +244 -0
- atdd/coder/validators/test_train_infrastructure.py +454 -0
- atdd/coder/validators/test_train_urns.py +293 -0
- atdd/coder/validators/test_typescript_architecture.py +616 -0
- atdd/coder/validators/test_usecase_structure.py +421 -0
- atdd/coder/validators/test_wagon_boundaries.py +586 -0
- atdd/conftest.py +126 -0
- atdd/planner/__init__.py +1 -0
- atdd/planner/conventions/acceptance.convention.yaml +538 -0
- atdd/planner/conventions/appendix.convention.yaml +187 -0
- atdd/planner/conventions/artifact-naming.convention.yaml +852 -0
- atdd/planner/conventions/component.convention.yaml +670 -0
- atdd/planner/conventions/criteria.convention.yaml +141 -0
- atdd/planner/conventions/feature.convention.yaml +371 -0
- atdd/planner/conventions/interface.convention.yaml +382 -0
- atdd/planner/conventions/steps.convention.yaml +141 -0
- atdd/planner/conventions/train.convention.yaml +552 -0
- atdd/planner/conventions/wagon.convention.yaml +275 -0
- atdd/planner/conventions/wmbt.convention.yaml +258 -0
- atdd/planner/schemas/acceptance.schema.json +336 -0
- atdd/planner/schemas/appendix.schema.json +78 -0
- atdd/planner/schemas/component.schema.json +114 -0
- atdd/planner/schemas/feature.schema.json +197 -0
- atdd/planner/schemas/train.schema.json +192 -0
- atdd/planner/schemas/wagon.schema.json +281 -0
- atdd/planner/schemas/wmbt.schema.json +59 -0
- atdd/planner/validators/__init__.py +0 -0
- atdd/planner/validators/conftest.py +5 -0
- atdd/planner/validators/test_draft_wagon_registry.py +374 -0
- atdd/planner/validators/test_plan_cross_refs.py +240 -0
- atdd/planner/validators/test_plan_uniqueness.py +224 -0
- atdd/planner/validators/test_plan_urn_resolution.py +268 -0
- atdd/planner/validators/test_plan_wagons.py +174 -0
- atdd/planner/validators/test_train_validation.py +514 -0
- atdd/planner/validators/test_wagon_urn_chain.py +648 -0
- atdd/planner/validators/test_wmbt_consistency.py +327 -0
- atdd/planner/validators/test_wmbt_vocabulary.py +632 -0
- atdd/tester/__init__.py +1 -0
- atdd/tester/conventions/artifact.convention.yaml +257 -0
- atdd/tester/conventions/contract.convention.yaml +1009 -0
- atdd/tester/conventions/filename.convention.yaml +555 -0
- atdd/tester/conventions/migration.convention.yaml +509 -0
- atdd/tester/conventions/red.convention.yaml +797 -0
- atdd/tester/conventions/routing.convention.yaml +51 -0
- atdd/tester/conventions/telemetry.convention.yaml +458 -0
- atdd/tester/schemas/a11y.tmpl.json +17 -0
- atdd/tester/schemas/artifact.schema.json +189 -0
- atdd/tester/schemas/contract.schema.json +591 -0
- atdd/tester/schemas/contract.tmpl.json +95 -0
- atdd/tester/schemas/db.tmpl.json +20 -0
- atdd/tester/schemas/e2e.tmpl.json +17 -0
- atdd/tester/schemas/edge_function.tmpl.json +17 -0
- atdd/tester/schemas/event.tmpl.json +17 -0
- atdd/tester/schemas/http.tmpl.json +19 -0
- atdd/tester/schemas/job.tmpl.json +18 -0
- atdd/tester/schemas/load.tmpl.json +21 -0
- atdd/tester/schemas/metric.tmpl.json +19 -0
- atdd/tester/schemas/pack.schema.json +139 -0
- atdd/tester/schemas/realtime.tmpl.json +20 -0
- atdd/tester/schemas/rls.tmpl.json +18 -0
- atdd/tester/schemas/script.tmpl.json +16 -0
- atdd/tester/schemas/sec.tmpl.json +18 -0
- atdd/tester/schemas/storage.tmpl.json +18 -0
- atdd/tester/schemas/telemetry.schema.json +128 -0
- atdd/tester/schemas/telemetry_tracking_manifest.schema.json +143 -0
- atdd/tester/schemas/test_filename.schema.json +194 -0
- atdd/tester/schemas/test_intent.schema.json +179 -0
- atdd/tester/schemas/unit.tmpl.json +18 -0
- atdd/tester/schemas/visual.tmpl.json +18 -0
- atdd/tester/schemas/ws.tmpl.json +17 -0
- atdd/tester/utils/__init__.py +0 -0
- atdd/tester/utils/filename.py +300 -0
- atdd/tester/validators/__init__.py +0 -0
- atdd/tester/validators/cleanup_duplicate_headers.py +116 -0
- atdd/tester/validators/cleanup_duplicate_headers_v2.py +135 -0
- atdd/tester/validators/conftest.py +5 -0
- atdd/tester/validators/coverage_gap_report.py +321 -0
- atdd/tester/validators/fix_dual_ac_references.py +179 -0
- atdd/tester/validators/remove_duplicate_lines.py +93 -0
- atdd/tester/validators/test_acceptance_urn_filename_mapping.py +359 -0
- atdd/tester/validators/test_acceptance_urn_separator.py +166 -0
- atdd/tester/validators/test_artifact_naming_category.py +307 -0
- atdd/tester/validators/test_contract_schema_compliance.py +706 -0
- atdd/tester/validators/test_contracts_structure.py +200 -0
- atdd/tester/validators/test_coverage_adequacy.py +797 -0
- atdd/tester/validators/test_dual_ac_reference.py +225 -0
- atdd/tester/validators/test_fixture_validity.py +372 -0
- atdd/tester/validators/test_isolation.py +487 -0
- atdd/tester/validators/test_migration_coverage.py +204 -0
- atdd/tester/validators/test_migration_criteria.py +276 -0
- atdd/tester/validators/test_migration_generation.py +116 -0
- atdd/tester/validators/test_python_test_naming.py +410 -0
- atdd/tester/validators/test_red_layer_validation.py +95 -0
- atdd/tester/validators/test_red_python_layer_structure.py +87 -0
- atdd/tester/validators/test_red_supabase_layer_structure.py +90 -0
- atdd/tester/validators/test_telemetry_structure.py +634 -0
- atdd/tester/validators/test_typescript_test_naming.py +301 -0
- atdd/tester/validators/test_typescript_test_structure.py +84 -0
- atdd-0.1.0.dist-info/METADATA +191 -0
- atdd-0.1.0.dist-info/RECORD +183 -0
- atdd-0.1.0.dist-info/WHEEL +5 -0
- atdd-0.1.0.dist-info/entry_points.txt +2 -0
- atdd-0.1.0.dist-info/licenses/LICENSE +674 -0
- atdd-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1183 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Session file validation against session.convention.yaml.
|
|
3
|
+
|
|
4
|
+
Purpose: Validate session files before implementation starts, after design/planning phase.
|
|
5
|
+
Convention: src/atdd/coach/conventions/session.convention.yaml
|
|
6
|
+
Template: src/atdd/coach/templates/SESSION-TEMPLATE.md
|
|
7
|
+
|
|
8
|
+
Note: Sessions are created in the consuming repo, not in the ATDD package itself.
|
|
9
|
+
This validator runs against {consumer_repo}/sessions/ directory.
|
|
10
|
+
|
|
11
|
+
Supports two formats:
|
|
12
|
+
1. Hybrid (new): YAML frontmatter + Markdown body
|
|
13
|
+
2. Legacy: Pure Markdown with **Field:** patterns
|
|
14
|
+
|
|
15
|
+
Run: python3 -m pytest src/atdd/coach/validators/test_session_validation.py -v
|
|
16
|
+
"""
|
|
17
|
+
import pytest
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import List, Dict, Optional, Set, Tuple, Any
|
|
21
|
+
import yaml
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# ============================================================================
|
|
25
|
+
# Configuration
|
|
26
|
+
# ============================================================================
|
|
27
|
+
|
|
28
|
+
# Package paths (relative to this file)
|
|
29
|
+
ATDD_PKG_ROOT = Path(__file__).parent.parent.parent # src/atdd
|
|
30
|
+
CONVENTION_FILE = ATDD_PKG_ROOT / "coach" / "conventions" / "session.convention.yaml"
|
|
31
|
+
TEMPLATE_FILE = ATDD_PKG_ROOT / "coach" / "templates" / "SESSION-TEMPLATE.md"
|
|
32
|
+
|
|
33
|
+
# Consumer repo paths (where sessions are created via `atdd init`)
|
|
34
|
+
# Default to current working directory, can be overridden
|
|
35
|
+
REPO_ROOT = Path.cwd()
|
|
36
|
+
SESSIONS_DIR = REPO_ROOT / "atdd-sessions"
|
|
37
|
+
|
|
38
|
+
# Valid values from convention
|
|
39
|
+
VALID_STATUSES = {"INIT", "PLANNED", "ACTIVE", "BLOCKED", "COMPLETE", "OBSOLETE"}
|
|
40
|
+
VALID_TYPES = {"implementation", "migration", "refactor", "analysis", "planning", "cleanup", "tracking"}
|
|
41
|
+
VALID_ARCHETYPES = {"db", "be", "fe", "contracts", "wmbt", "wagon", "train", "telemetry", "migrations"}
|
|
42
|
+
VALID_PROGRESS_STATUSES = {"TODO", "IN_PROGRESS", "DONE", "BLOCKED", "SKIPPED", "N/A"}
|
|
43
|
+
VALID_COMPLEXITIES = {1, 2, 3, 4, 5}
|
|
44
|
+
|
|
45
|
+
# Required markdown sections (in body)
|
|
46
|
+
REQUIRED_BODY_SECTIONS = [
|
|
47
|
+
"Context",
|
|
48
|
+
"Architecture",
|
|
49
|
+
"Phases",
|
|
50
|
+
"Validation",
|
|
51
|
+
"Session Log",
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
# Required frontmatter fields
|
|
55
|
+
REQUIRED_FRONTMATTER_FIELDS = [
|
|
56
|
+
"session",
|
|
57
|
+
"title",
|
|
58
|
+
"date",
|
|
59
|
+
"status",
|
|
60
|
+
"branch",
|
|
61
|
+
"type",
|
|
62
|
+
"complexity",
|
|
63
|
+
"archetypes",
|
|
64
|
+
]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# ============================================================================
|
|
68
|
+
# Parsing Functions
|
|
69
|
+
# ============================================================================
|
|
70
|
+
|
|
71
|
+
def parse_frontmatter(content: str) -> Tuple[Optional[Dict[str, Any]], str]:
|
|
72
|
+
"""
|
|
73
|
+
Parse YAML frontmatter from content.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Tuple of (frontmatter_dict or None, body_content)
|
|
77
|
+
"""
|
|
78
|
+
if not content.startswith("---"):
|
|
79
|
+
return None, content
|
|
80
|
+
|
|
81
|
+
# Find the closing ---
|
|
82
|
+
parts = content.split("---", 2)
|
|
83
|
+
if len(parts) < 3:
|
|
84
|
+
return None, content
|
|
85
|
+
|
|
86
|
+
try:
|
|
87
|
+
frontmatter = yaml.safe_load(parts[1])
|
|
88
|
+
body = parts[2]
|
|
89
|
+
return frontmatter, body
|
|
90
|
+
except yaml.YAMLError:
|
|
91
|
+
return None, content
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def parse_legacy_header(content: str) -> Dict[str, str]:
|
|
95
|
+
"""
|
|
96
|
+
Parse legacy Markdown header with **Field:** patterns.
|
|
97
|
+
"""
|
|
98
|
+
header = {}
|
|
99
|
+
|
|
100
|
+
patterns = {
|
|
101
|
+
"title": r"^#\s+SESSION-(\d+):\s+(.+)$",
|
|
102
|
+
"date": r"\*\*Date:\*\*\s*(.+)",
|
|
103
|
+
"status": r"\*\*Status:\*\*\s*(\S+)",
|
|
104
|
+
"branch": r"\*\*Branch:\*\*\s*(.+)",
|
|
105
|
+
"type": r"\*\*Type:\*\*\s*(\w+)",
|
|
106
|
+
"complexity": r"\*\*Complexity:\*\*\s*(\d)",
|
|
107
|
+
"archetypes": r"\*\*Archetypes:\*\*\s*(.+)",
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
for field, pattern in patterns.items():
|
|
111
|
+
match = re.search(pattern, content, re.MULTILINE)
|
|
112
|
+
if match:
|
|
113
|
+
if field == "title":
|
|
114
|
+
header["session"] = match.group(1)
|
|
115
|
+
header["title"] = match.group(2).strip()
|
|
116
|
+
else:
|
|
117
|
+
header[field] = match.group(1).strip()
|
|
118
|
+
|
|
119
|
+
return header
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def parse_session_file(path: Path) -> Dict[str, Any]:
|
|
123
|
+
"""
|
|
124
|
+
Parse a session file (hybrid or legacy format).
|
|
125
|
+
|
|
126
|
+
Returns structured data with:
|
|
127
|
+
- path: Path object
|
|
128
|
+
- name: filename
|
|
129
|
+
- format: "hybrid" or "legacy"
|
|
130
|
+
- frontmatter: dict (from YAML or parsed legacy)
|
|
131
|
+
- body: markdown content
|
|
132
|
+
- sections: set of ## headings in body
|
|
133
|
+
"""
|
|
134
|
+
content = path.read_text()
|
|
135
|
+
|
|
136
|
+
result = {
|
|
137
|
+
"path": path,
|
|
138
|
+
"name": path.name,
|
|
139
|
+
"content": content,
|
|
140
|
+
"frontmatter": {},
|
|
141
|
+
"body": "",
|
|
142
|
+
"sections": set(),
|
|
143
|
+
"format": "unknown",
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
# Try hybrid format first (YAML frontmatter)
|
|
147
|
+
frontmatter, body = parse_frontmatter(content)
|
|
148
|
+
|
|
149
|
+
if frontmatter:
|
|
150
|
+
result["format"] = "hybrid"
|
|
151
|
+
result["frontmatter"] = frontmatter
|
|
152
|
+
result["body"] = body
|
|
153
|
+
else:
|
|
154
|
+
# Fall back to legacy format
|
|
155
|
+
result["format"] = "legacy"
|
|
156
|
+
result["frontmatter"] = parse_legacy_header(content)
|
|
157
|
+
result["body"] = content
|
|
158
|
+
|
|
159
|
+
# Extract sections from body
|
|
160
|
+
section_pattern = r"^##\s+(.+)$"
|
|
161
|
+
for match in re.finditer(section_pattern, result["body"], re.MULTILINE):
|
|
162
|
+
section_name = match.group(1).strip()
|
|
163
|
+
result["sections"].add(section_name)
|
|
164
|
+
|
|
165
|
+
return result
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# ============================================================================
|
|
169
|
+
# Fixtures
|
|
170
|
+
# ============================================================================
|
|
171
|
+
|
|
172
|
+
@pytest.fixture
|
|
173
|
+
def convention() -> Dict:
|
|
174
|
+
"""Load session convention file."""
|
|
175
|
+
if not CONVENTION_FILE.exists():
|
|
176
|
+
pytest.skip(f"Convention file not found: {CONVENTION_FILE}")
|
|
177
|
+
|
|
178
|
+
with open(CONVENTION_FILE) as f:
|
|
179
|
+
return yaml.safe_load(f)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
@pytest.fixture
|
|
183
|
+
def session_files() -> List[Path]:
|
|
184
|
+
"""Get all session files (excluding template and archive)."""
|
|
185
|
+
if not SESSIONS_DIR.exists():
|
|
186
|
+
pytest.skip(f"Sessions directory not found: {SESSIONS_DIR}. Run 'atdd init' first.")
|
|
187
|
+
|
|
188
|
+
files = []
|
|
189
|
+
for f in SESSIONS_DIR.glob("SESSION-*.md"):
|
|
190
|
+
# Skip template
|
|
191
|
+
if f.name == "SESSION-TEMPLATE.md":
|
|
192
|
+
continue
|
|
193
|
+
files.append(f)
|
|
194
|
+
|
|
195
|
+
return sorted(files)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@pytest.fixture
|
|
199
|
+
def hybrid_session_files(session_files: List[Path]) -> List[Path]:
|
|
200
|
+
"""Get session files using hybrid format (YAML frontmatter)."""
|
|
201
|
+
hybrid = []
|
|
202
|
+
for f in session_files:
|
|
203
|
+
content = f.read_text()
|
|
204
|
+
if content.startswith("---"):
|
|
205
|
+
hybrid.append(f)
|
|
206
|
+
return hybrid
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
@pytest.fixture
|
|
210
|
+
def active_session_files(session_files: List[Path]) -> List[Path]:
|
|
211
|
+
"""Get only active session files (not COMPLETE or OBSOLETE)."""
|
|
212
|
+
active = []
|
|
213
|
+
for f in session_files:
|
|
214
|
+
parsed = parse_session_file(f)
|
|
215
|
+
status = str(parsed["frontmatter"].get("status", "")).upper()
|
|
216
|
+
status_word = status.split()[0] if status else ""
|
|
217
|
+
|
|
218
|
+
if status_word not in {"COMPLETE", "OBSOLETE"}:
|
|
219
|
+
active.append(f)
|
|
220
|
+
return active
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# ============================================================================
|
|
224
|
+
# Hybrid Format Validation Tests
|
|
225
|
+
# ============================================================================
|
|
226
|
+
|
|
227
|
+
def test_hybrid_sessions_have_valid_frontmatter(hybrid_session_files: List[Path]):
|
|
228
|
+
"""
|
|
229
|
+
Test that hybrid sessions have parseable YAML frontmatter.
|
|
230
|
+
"""
|
|
231
|
+
invalid = []
|
|
232
|
+
|
|
233
|
+
for f in hybrid_session_files:
|
|
234
|
+
content = f.read_text()
|
|
235
|
+
frontmatter, _ = parse_frontmatter(content)
|
|
236
|
+
|
|
237
|
+
if frontmatter is None:
|
|
238
|
+
invalid.append(f"{f.name}: YAML frontmatter parse error")
|
|
239
|
+
|
|
240
|
+
if invalid:
|
|
241
|
+
pytest.fail(f"Invalid YAML frontmatter:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def test_hybrid_sessions_have_required_frontmatter_fields(hybrid_session_files: List[Path]):
|
|
245
|
+
"""
|
|
246
|
+
Test that hybrid sessions have all required frontmatter fields.
|
|
247
|
+
"""
|
|
248
|
+
missing = []
|
|
249
|
+
|
|
250
|
+
for f in hybrid_session_files:
|
|
251
|
+
parsed = parse_session_file(f)
|
|
252
|
+
|
|
253
|
+
if parsed["format"] != "hybrid":
|
|
254
|
+
continue
|
|
255
|
+
|
|
256
|
+
fm = parsed["frontmatter"]
|
|
257
|
+
for field in REQUIRED_FRONTMATTER_FIELDS:
|
|
258
|
+
if field not in fm:
|
|
259
|
+
missing.append(f"{f.name}: missing frontmatter field '{field}'")
|
|
260
|
+
|
|
261
|
+
if missing:
|
|
262
|
+
pytest.fail(f"Missing frontmatter fields:\n" + "\n".join(f" - {m}" for m in missing))
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def test_hybrid_sessions_have_valid_progress_structure(hybrid_session_files: List[Path]):
|
|
266
|
+
"""
|
|
267
|
+
Test that hybrid sessions have valid progress structure in frontmatter.
|
|
268
|
+
"""
|
|
269
|
+
invalid = []
|
|
270
|
+
|
|
271
|
+
for f in hybrid_session_files:
|
|
272
|
+
parsed = parse_session_file(f)
|
|
273
|
+
fm = parsed["frontmatter"]
|
|
274
|
+
|
|
275
|
+
if "progress" not in fm:
|
|
276
|
+
continue
|
|
277
|
+
|
|
278
|
+
progress = fm["progress"]
|
|
279
|
+
|
|
280
|
+
# Check phases
|
|
281
|
+
if "phases" in progress:
|
|
282
|
+
for phase in progress["phases"]:
|
|
283
|
+
if "id" not in phase:
|
|
284
|
+
invalid.append(f"{f.name}: phase missing 'id'")
|
|
285
|
+
if "status" not in phase:
|
|
286
|
+
invalid.append(f"{f.name}: phase missing 'status'")
|
|
287
|
+
elif phase["status"] not in VALID_PROGRESS_STATUSES:
|
|
288
|
+
invalid.append(f"{f.name}: phase status '{phase['status']}' invalid")
|
|
289
|
+
|
|
290
|
+
# Check WMBT (for implementation sessions)
|
|
291
|
+
if "wmbt" in progress:
|
|
292
|
+
for wmbt in progress["wmbt"]:
|
|
293
|
+
if "id" not in wmbt:
|
|
294
|
+
invalid.append(f"{f.name}: WMBT missing 'id'")
|
|
295
|
+
for phase in ["red", "green", "refactor"]:
|
|
296
|
+
if phase in wmbt and wmbt[phase] not in VALID_PROGRESS_STATUSES:
|
|
297
|
+
invalid.append(f"{f.name}: WMBT {wmbt.get('id')} {phase} status invalid")
|
|
298
|
+
|
|
299
|
+
if invalid:
|
|
300
|
+
pytest.fail(f"Invalid progress structure:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def test_hybrid_sessions_have_success_criteria(hybrid_session_files: List[Path]):
|
|
304
|
+
"""
|
|
305
|
+
Test that hybrid sessions have success_criteria in frontmatter.
|
|
306
|
+
"""
|
|
307
|
+
missing = []
|
|
308
|
+
|
|
309
|
+
for f in hybrid_session_files:
|
|
310
|
+
parsed = parse_session_file(f)
|
|
311
|
+
fm = parsed["frontmatter"]
|
|
312
|
+
|
|
313
|
+
if "success_criteria" not in fm:
|
|
314
|
+
missing.append(f"{f.name}: missing success_criteria")
|
|
315
|
+
elif not isinstance(fm["success_criteria"], list):
|
|
316
|
+
missing.append(f"{f.name}: success_criteria must be a list")
|
|
317
|
+
elif len(fm["success_criteria"]) == 0:
|
|
318
|
+
missing.append(f"{f.name}: success_criteria is empty")
|
|
319
|
+
|
|
320
|
+
if missing:
|
|
321
|
+
pytest.fail(f"Missing success criteria:\n" + "\n".join(f" - {m}" for m in missing))
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
# ============================================================================
|
|
325
|
+
# General Validation Tests (Both Formats)
|
|
326
|
+
# ============================================================================
|
|
327
|
+
|
|
328
|
+
def test_session_files_have_valid_naming(session_files: List[Path]):
|
|
329
|
+
"""
|
|
330
|
+
Test that session files follow naming convention.
|
|
331
|
+
|
|
332
|
+
Patterns:
|
|
333
|
+
- SESSION-{NN}-{slug}.md (active)
|
|
334
|
+
- SESSION-{NN}-{slug}-(completed).md (completed)
|
|
335
|
+
- SESSION-{NN}-{slug}-✅.md (completed with checkmark)
|
|
336
|
+
"""
|
|
337
|
+
pattern = re.compile(r"^SESSION-(\d{2})-([a-z0-9-]+)(-\(completed\)|-✅)?\.md$")
|
|
338
|
+
|
|
339
|
+
invalid = []
|
|
340
|
+
for f in session_files:
|
|
341
|
+
if not pattern.match(f.name):
|
|
342
|
+
invalid.append(f.name)
|
|
343
|
+
|
|
344
|
+
if invalid:
|
|
345
|
+
pytest.fail(f"Invalid session file names:\n" + "\n".join(f" - {n}" for n in invalid))
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
def test_session_status_is_valid(session_files: List[Path]):
|
|
349
|
+
"""
|
|
350
|
+
Test that session status is a valid value.
|
|
351
|
+
|
|
352
|
+
Valid: INIT, PLANNED, ACTIVE, BLOCKED, COMPLETE, OBSOLETE
|
|
353
|
+
"""
|
|
354
|
+
invalid = []
|
|
355
|
+
|
|
356
|
+
# Status aliases for legacy format
|
|
357
|
+
status_aliases = {
|
|
358
|
+
"IN_PROGRESS": "ACTIVE",
|
|
359
|
+
"IN": "ACTIVE",
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
for f in session_files:
|
|
363
|
+
parsed = parse_session_file(f)
|
|
364
|
+
status_raw = str(parsed["frontmatter"].get("status", "")).upper()
|
|
365
|
+
status_word = status_raw.split()[0] if status_raw else ""
|
|
366
|
+
|
|
367
|
+
# Apply aliases
|
|
368
|
+
status = status_aliases.get(status_word, status_word)
|
|
369
|
+
|
|
370
|
+
if status and status not in VALID_STATUSES:
|
|
371
|
+
invalid.append(f"{f.name}: status '{status_raw}' not in {VALID_STATUSES}")
|
|
372
|
+
|
|
373
|
+
if invalid:
|
|
374
|
+
pytest.fail(f"Invalid status values:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def test_session_type_is_valid(session_files: List[Path]):
|
|
378
|
+
"""
|
|
379
|
+
Test that session type is a valid value.
|
|
380
|
+
"""
|
|
381
|
+
invalid = []
|
|
382
|
+
|
|
383
|
+
for f in session_files:
|
|
384
|
+
parsed = parse_session_file(f)
|
|
385
|
+
session_type = str(parsed["frontmatter"].get("type", "")).lower()
|
|
386
|
+
|
|
387
|
+
if session_type and session_type not in VALID_TYPES:
|
|
388
|
+
invalid.append(f"{f.name}: type '{session_type}' not in {VALID_TYPES}")
|
|
389
|
+
|
|
390
|
+
if invalid:
|
|
391
|
+
pytest.fail(f"Invalid type values:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def test_session_archetypes_are_valid(session_files: List[Path]):
|
|
395
|
+
"""
|
|
396
|
+
Test that session archetypes are valid values.
|
|
397
|
+
"""
|
|
398
|
+
invalid = []
|
|
399
|
+
|
|
400
|
+
for f in session_files:
|
|
401
|
+
parsed = parse_session_file(f)
|
|
402
|
+
archetypes_raw = parsed["frontmatter"].get("archetypes", [])
|
|
403
|
+
|
|
404
|
+
# Handle both list (hybrid) and string (legacy) formats
|
|
405
|
+
if isinstance(archetypes_raw, str):
|
|
406
|
+
archetypes = [a.strip().lower() for a in archetypes_raw.split(",")]
|
|
407
|
+
elif isinstance(archetypes_raw, list):
|
|
408
|
+
archetypes = [str(a).lower() for a in archetypes_raw]
|
|
409
|
+
else:
|
|
410
|
+
archetypes = []
|
|
411
|
+
|
|
412
|
+
for arch in archetypes:
|
|
413
|
+
if arch and arch not in VALID_ARCHETYPES and not arch.startswith("{"):
|
|
414
|
+
invalid.append(f"{f.name}: archetype '{arch}' not in {VALID_ARCHETYPES}")
|
|
415
|
+
|
|
416
|
+
if invalid:
|
|
417
|
+
pytest.fail(f"Invalid archetype values:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def test_session_complexity_is_valid(session_files: List[Path]):
|
|
421
|
+
"""
|
|
422
|
+
Test that session complexity is 1-5.
|
|
423
|
+
"""
|
|
424
|
+
invalid = []
|
|
425
|
+
|
|
426
|
+
for f in session_files:
|
|
427
|
+
parsed = parse_session_file(f)
|
|
428
|
+
complexity = parsed["frontmatter"].get("complexity")
|
|
429
|
+
|
|
430
|
+
if complexity is not None:
|
|
431
|
+
try:
|
|
432
|
+
c = int(complexity)
|
|
433
|
+
if c not in VALID_COMPLEXITIES:
|
|
434
|
+
invalid.append(f"{f.name}: complexity {c} not in 1-5")
|
|
435
|
+
except (ValueError, TypeError):
|
|
436
|
+
invalid.append(f"{f.name}: complexity '{complexity}' not a number")
|
|
437
|
+
|
|
438
|
+
if invalid:
|
|
439
|
+
pytest.fail(f"Invalid complexity values:\n" + "\n".join(f" - {i}" for i in invalid))
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def test_session_files_have_required_body_sections(session_files: List[Path]):
|
|
443
|
+
"""
|
|
444
|
+
Test that all session files have required sections in body.
|
|
445
|
+
"""
|
|
446
|
+
missing = []
|
|
447
|
+
|
|
448
|
+
for f in session_files:
|
|
449
|
+
parsed = parse_session_file(f)
|
|
450
|
+
|
|
451
|
+
for section in REQUIRED_BODY_SECTIONS:
|
|
452
|
+
if section not in parsed["sections"]:
|
|
453
|
+
missing.append(f"{f.name}: missing ## {section}")
|
|
454
|
+
|
|
455
|
+
if missing:
|
|
456
|
+
pytest.fail(
|
|
457
|
+
f"Missing required body sections ({len(missing)} violations):\n" +
|
|
458
|
+
"\n".join(f" - {m}" for m in missing[:20]) +
|
|
459
|
+
(f"\n ... and {len(missing) - 20} more" if len(missing) > 20 else "")
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def test_session_has_session_log_entry(active_session_files: List[Path]):
|
|
464
|
+
"""
|
|
465
|
+
Test that active sessions have at least one Session Log entry.
|
|
466
|
+
"""
|
|
467
|
+
missing = []
|
|
468
|
+
|
|
469
|
+
for f in active_session_files:
|
|
470
|
+
parsed = parse_session_file(f)
|
|
471
|
+
body = parsed["body"]
|
|
472
|
+
|
|
473
|
+
# Look for ### Session N pattern
|
|
474
|
+
if not re.search(r"###\s+Session\s+\d+", body):
|
|
475
|
+
missing.append(f"{f.name}: no Session Log entries (### Session N)")
|
|
476
|
+
|
|
477
|
+
if missing:
|
|
478
|
+
pytest.fail(f"Missing Session Log entries:\n" + "\n".join(f" - {m}" for m in missing))
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def test_session_has_gate_commands(active_session_files: List[Path]):
|
|
482
|
+
"""
|
|
483
|
+
Test that active sessions have gate commands for validation.
|
|
484
|
+
"""
|
|
485
|
+
missing = []
|
|
486
|
+
|
|
487
|
+
for f in active_session_files:
|
|
488
|
+
parsed = parse_session_file(f)
|
|
489
|
+
body = parsed["body"]
|
|
490
|
+
|
|
491
|
+
# Look for code blocks with commands
|
|
492
|
+
has_gate = re.search(r"```(?:bash|shell)?\n[^`]*(?:pytest|python|npm|supabase)", body)
|
|
493
|
+
|
|
494
|
+
if not has_gate:
|
|
495
|
+
missing.append(f"{f.name}: no gate commands found")
|
|
496
|
+
|
|
497
|
+
if missing:
|
|
498
|
+
pytest.fail(f"Missing gate commands:\n" + "\n".join(f" - {m}" for m in missing))
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
# ============================================================================
|
|
502
|
+
# Pre-Implementation Gate
|
|
503
|
+
# ============================================================================
|
|
504
|
+
|
|
505
|
+
def test_planned_sessions_ready_for_implementation(session_files: List[Path]):
|
|
506
|
+
"""
|
|
507
|
+
Test that PLANNED sessions have all required elements before implementation.
|
|
508
|
+
|
|
509
|
+
This is the main gate to run before starting implementation.
|
|
510
|
+
|
|
511
|
+
Checks:
|
|
512
|
+
1. Status is PLANNED (not INIT)
|
|
513
|
+
2. All required sections present
|
|
514
|
+
3. Scope is defined
|
|
515
|
+
4. Phases are defined
|
|
516
|
+
5. Success criteria are defined
|
|
517
|
+
6. Gate commands are present
|
|
518
|
+
"""
|
|
519
|
+
issues = []
|
|
520
|
+
|
|
521
|
+
for f in session_files:
|
|
522
|
+
parsed = parse_session_file(f)
|
|
523
|
+
fm = parsed["frontmatter"]
|
|
524
|
+
status = str(fm.get("status", "")).upper().split()[0] if fm.get("status") else ""
|
|
525
|
+
|
|
526
|
+
# Only check PLANNED sessions
|
|
527
|
+
if status != "PLANNED":
|
|
528
|
+
continue
|
|
529
|
+
|
|
530
|
+
session_issues = []
|
|
531
|
+
body = parsed["body"]
|
|
532
|
+
|
|
533
|
+
# Check required body sections
|
|
534
|
+
for section in REQUIRED_BODY_SECTIONS:
|
|
535
|
+
if section not in parsed["sections"]:
|
|
536
|
+
session_issues.append(f"missing body section: {section}")
|
|
537
|
+
|
|
538
|
+
# Check scope definition (frontmatter or body)
|
|
539
|
+
has_scope = False
|
|
540
|
+
if "scope" in fm and fm["scope"]:
|
|
541
|
+
has_scope = True
|
|
542
|
+
elif "In Scope" in body or "In scope" in body:
|
|
543
|
+
has_scope = True
|
|
544
|
+
|
|
545
|
+
if not has_scope:
|
|
546
|
+
session_issues.append("missing scope definition")
|
|
547
|
+
|
|
548
|
+
# Check phases defined (frontmatter or body)
|
|
549
|
+
has_phases = False
|
|
550
|
+
if "progress" in fm and "phases" in fm.get("progress", {}):
|
|
551
|
+
has_phases = True
|
|
552
|
+
elif re.search(r"###\s+Phase\s+\d+", body):
|
|
553
|
+
has_phases = True
|
|
554
|
+
|
|
555
|
+
if not has_phases:
|
|
556
|
+
session_issues.append("no phases defined")
|
|
557
|
+
|
|
558
|
+
# Check success criteria (frontmatter or body)
|
|
559
|
+
has_criteria = False
|
|
560
|
+
if "success_criteria" in fm and fm["success_criteria"]:
|
|
561
|
+
has_criteria = True
|
|
562
|
+
elif re.search(r"- \[[ x]\]", body):
|
|
563
|
+
has_criteria = True
|
|
564
|
+
|
|
565
|
+
if not has_criteria:
|
|
566
|
+
session_issues.append("no success criteria")
|
|
567
|
+
|
|
568
|
+
# Check gate commands
|
|
569
|
+
if not re.search(r"```(?:bash|shell)?\n[^`]*(?:pytest|python|npm)", body):
|
|
570
|
+
session_issues.append("no gate commands")
|
|
571
|
+
|
|
572
|
+
if session_issues:
|
|
573
|
+
issues.append(f"{f.name}:\n" + "\n".join(f" - {i}" for i in session_issues))
|
|
574
|
+
|
|
575
|
+
if issues:
|
|
576
|
+
pytest.fail(
|
|
577
|
+
f"PLANNED sessions not ready for implementation:\n\n" +
|
|
578
|
+
"\n\n".join(issues)
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
# ============================================================================
|
|
583
|
+
# Implementation Session Tests
|
|
584
|
+
# ============================================================================
|
|
585
|
+
|
|
586
|
+
def test_implementation_sessions_have_wmbt_tracking(session_files: List[Path]):
|
|
587
|
+
"""
|
|
588
|
+
Test that implementation sessions have WMBT tracking.
|
|
589
|
+
|
|
590
|
+
Checks frontmatter progress.wmbt or body WMBT Status section.
|
|
591
|
+
"""
|
|
592
|
+
missing = []
|
|
593
|
+
|
|
594
|
+
for f in session_files:
|
|
595
|
+
parsed = parse_session_file(f)
|
|
596
|
+
fm = parsed["frontmatter"]
|
|
597
|
+
session_type = str(fm.get("type", "")).lower()
|
|
598
|
+
|
|
599
|
+
if session_type != "implementation":
|
|
600
|
+
continue
|
|
601
|
+
|
|
602
|
+
# Check frontmatter
|
|
603
|
+
has_wmbt_frontmatter = (
|
|
604
|
+
"progress" in fm and
|
|
605
|
+
"wmbt" in fm.get("progress", {}) and
|
|
606
|
+
len(fm["progress"]["wmbt"]) > 0
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
# Check body
|
|
610
|
+
has_wmbt_body = "WMBT Status" in parsed["body"] or "WMBT" in parsed["sections"]
|
|
611
|
+
|
|
612
|
+
if not has_wmbt_frontmatter and not has_wmbt_body:
|
|
613
|
+
missing.append(f"{f.name}: implementation session missing WMBT tracking")
|
|
614
|
+
|
|
615
|
+
if missing:
|
|
616
|
+
pytest.fail(f"Missing WMBT tracking:\n" + "\n".join(f" - {m}" for m in missing))
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
def test_implementation_sessions_have_atdd_phases(session_files: List[Path]):
|
|
620
|
+
"""
|
|
621
|
+
Test that implementation sessions track RED/GREEN/REFACTOR phases.
|
|
622
|
+
"""
|
|
623
|
+
missing = []
|
|
624
|
+
|
|
625
|
+
for f in session_files:
|
|
626
|
+
parsed = parse_session_file(f)
|
|
627
|
+
fm = parsed["frontmatter"]
|
|
628
|
+
session_type = str(fm.get("type", "")).lower()
|
|
629
|
+
|
|
630
|
+
if session_type != "implementation":
|
|
631
|
+
continue
|
|
632
|
+
|
|
633
|
+
body = parsed["body"]
|
|
634
|
+
|
|
635
|
+
# Check frontmatter
|
|
636
|
+
has_atdd_frontmatter = (
|
|
637
|
+
"progress" in fm and
|
|
638
|
+
"atdd" in fm.get("progress", {})
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
# Check body
|
|
642
|
+
has_red = "RED" in body
|
|
643
|
+
has_green = "GREEN" in body
|
|
644
|
+
has_refactor = "REFACTOR" in body
|
|
645
|
+
has_atdd_body = has_red and has_green and has_refactor
|
|
646
|
+
|
|
647
|
+
if not has_atdd_frontmatter and not has_atdd_body:
|
|
648
|
+
missing_phases = []
|
|
649
|
+
if not has_red:
|
|
650
|
+
missing_phases.append("RED")
|
|
651
|
+
if not has_green:
|
|
652
|
+
missing_phases.append("GREEN")
|
|
653
|
+
if not has_refactor:
|
|
654
|
+
missing_phases.append("REFACTOR")
|
|
655
|
+
missing.append(f"{f.name}: missing ATDD phases: {', '.join(missing_phases)}")
|
|
656
|
+
|
|
657
|
+
if missing:
|
|
658
|
+
pytest.fail(f"Missing ATDD phases:\n" + "\n".join(f" - {m}" for m in missing))
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
# ============================================================================
|
|
662
|
+
# Gate Tests Validation
|
|
663
|
+
# ============================================================================
|
|
664
|
+
|
|
665
|
+
# Required ATDD validators per archetype (from session.convention.yaml)
|
|
666
|
+
REQUIRED_VALIDATORS_BY_ARCHETYPE = {
|
|
667
|
+
"db": [
|
|
668
|
+
"atdd/tester/validators/test_migration_coverage.py",
|
|
669
|
+
],
|
|
670
|
+
"be": [
|
|
671
|
+
"atdd/coder/validators/test_python_architecture.py",
|
|
672
|
+
"atdd/coder/validators/test_import_boundaries.py",
|
|
673
|
+
],
|
|
674
|
+
"fe": [
|
|
675
|
+
"atdd/coder/validators/test_typescript_architecture.py",
|
|
676
|
+
],
|
|
677
|
+
"contracts": [
|
|
678
|
+
"atdd/tester/validators/test_contract_schema_compliance.py",
|
|
679
|
+
],
|
|
680
|
+
"wmbt": [
|
|
681
|
+
"atdd/planner/validators/test_wmbt_consistency.py",
|
|
682
|
+
],
|
|
683
|
+
"wagon": [
|
|
684
|
+
"atdd/planner/validators/test_wagon_urn_chain.py",
|
|
685
|
+
"atdd/coder/validators/test_wagon_boundaries.py",
|
|
686
|
+
],
|
|
687
|
+
"train": [
|
|
688
|
+
"atdd/planner/validators/test_train_validation.py",
|
|
689
|
+
],
|
|
690
|
+
"telemetry": [
|
|
691
|
+
"atdd/tester/validators/test_telemetry_structure.py",
|
|
692
|
+
],
|
|
693
|
+
"migrations": [
|
|
694
|
+
"atdd/tester/validators/test_migration_coverage.py",
|
|
695
|
+
],
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
# Universal required validators (all sessions)
|
|
699
|
+
UNIVERSAL_VALIDATORS = [
|
|
700
|
+
"atdd/coach/validators/test_session_validation.py",
|
|
701
|
+
]
|
|
702
|
+
|
|
703
|
+
# Valid gate test phases
|
|
704
|
+
VALID_GATE_PHASES = {"design", "implementation", "validation", "completion"}
|
|
705
|
+
|
|
706
|
+
# Valid gate test expected values
|
|
707
|
+
VALID_GATE_EXPECTED = {"PASS", "FAIL"}
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
def test_hybrid_sessions_have_gate_tests(hybrid_session_files: List[Path]):
|
|
711
|
+
"""
|
|
712
|
+
Test that hybrid sessions have gate_tests defined in frontmatter.
|
|
713
|
+
|
|
714
|
+
Gate tests are required to enforce conventions via ATDD validators.
|
|
715
|
+
"""
|
|
716
|
+
missing = []
|
|
717
|
+
|
|
718
|
+
for f in hybrid_session_files:
|
|
719
|
+
parsed = parse_session_file(f)
|
|
720
|
+
fm = parsed["frontmatter"]
|
|
721
|
+
|
|
722
|
+
if "gate_tests" not in fm:
|
|
723
|
+
missing.append(f"{f.name}: missing gate_tests in frontmatter")
|
|
724
|
+
elif not isinstance(fm["gate_tests"], list):
|
|
725
|
+
missing.append(f"{f.name}: gate_tests must be a list")
|
|
726
|
+
elif len(fm["gate_tests"]) == 0:
|
|
727
|
+
missing.append(f"{f.name}: gate_tests is empty")
|
|
728
|
+
|
|
729
|
+
if missing:
|
|
730
|
+
pytest.fail(f"Missing gate_tests:\n" + "\n".join(f" - {m}" for m in missing))
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
def test_gate_tests_have_valid_structure(hybrid_session_files: List[Path]):
|
|
734
|
+
"""
|
|
735
|
+
Test that gate_tests have valid structure with required fields.
|
|
736
|
+
|
|
737
|
+
Required fields: id, phase, archetype, command, expected, atdd_validator, status
|
|
738
|
+
"""
|
|
739
|
+
invalid = []
|
|
740
|
+
|
|
741
|
+
required_fields = ["id", "phase", "archetype", "command", "expected", "atdd_validator"]
|
|
742
|
+
|
|
743
|
+
for f in hybrid_session_files:
|
|
744
|
+
parsed = parse_session_file(f)
|
|
745
|
+
fm = parsed["frontmatter"]
|
|
746
|
+
|
|
747
|
+
if "gate_tests" not in fm or not isinstance(fm["gate_tests"], list):
|
|
748
|
+
continue
|
|
749
|
+
|
|
750
|
+
for idx, gate in enumerate(fm["gate_tests"]):
|
|
751
|
+
if not isinstance(gate, dict):
|
|
752
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] is not a dict")
|
|
753
|
+
continue
|
|
754
|
+
|
|
755
|
+
for field in required_fields:
|
|
756
|
+
if field not in gate:
|
|
757
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] missing '{field}'")
|
|
758
|
+
|
|
759
|
+
# Validate phase value
|
|
760
|
+
if "phase" in gate and gate["phase"] not in VALID_GATE_PHASES:
|
|
761
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] phase '{gate['phase']}' invalid")
|
|
762
|
+
|
|
763
|
+
# Validate expected value
|
|
764
|
+
if "expected" in gate and gate["expected"] not in VALID_GATE_EXPECTED:
|
|
765
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] expected '{gate['expected']}' invalid")
|
|
766
|
+
|
|
767
|
+
# Validate archetype value
|
|
768
|
+
if "archetype" in gate:
|
|
769
|
+
arch = gate["archetype"]
|
|
770
|
+
if arch != "all" and arch not in VALID_ARCHETYPES and not arch.startswith("{"):
|
|
771
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] archetype '{arch}' invalid")
|
|
772
|
+
|
|
773
|
+
if invalid:
|
|
774
|
+
pytest.fail(f"Invalid gate_tests structure:\n" + "\n".join(f" - {i}" for i in invalid[:30]))
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
def test_gate_tests_reference_valid_atdd_validators(hybrid_session_files: List[Path]):
|
|
778
|
+
"""
|
|
779
|
+
Test that gate_tests reference valid ATDD validator paths.
|
|
780
|
+
|
|
781
|
+
Validator paths must:
|
|
782
|
+
- Start with 'atdd/' or be 'manual' or 'feature-specific'
|
|
783
|
+
- End with '.py' or be a directory path ending with '/'
|
|
784
|
+
"""
|
|
785
|
+
invalid = []
|
|
786
|
+
|
|
787
|
+
for f in hybrid_session_files:
|
|
788
|
+
parsed = parse_session_file(f)
|
|
789
|
+
fm = parsed["frontmatter"]
|
|
790
|
+
|
|
791
|
+
if "gate_tests" not in fm or not isinstance(fm["gate_tests"], list):
|
|
792
|
+
continue
|
|
793
|
+
|
|
794
|
+
for idx, gate in enumerate(fm["gate_tests"]):
|
|
795
|
+
if not isinstance(gate, dict):
|
|
796
|
+
continue
|
|
797
|
+
|
|
798
|
+
validator = gate.get("atdd_validator", "")
|
|
799
|
+
|
|
800
|
+
# Skip special values
|
|
801
|
+
if validator in ["manual", "feature-specific"]:
|
|
802
|
+
continue
|
|
803
|
+
|
|
804
|
+
# Skip template placeholders
|
|
805
|
+
if validator.startswith("{"):
|
|
806
|
+
continue
|
|
807
|
+
|
|
808
|
+
# Must start with atdd/
|
|
809
|
+
if not validator.startswith("atdd/"):
|
|
810
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] atdd_validator must start with 'atdd/'")
|
|
811
|
+
continue
|
|
812
|
+
|
|
813
|
+
# Must end with .py or /
|
|
814
|
+
if not (validator.endswith(".py") or validator.endswith("/")):
|
|
815
|
+
invalid.append(f"{f.name}: gate_tests[{idx}] atdd_validator must end with '.py' or '/'")
|
|
816
|
+
|
|
817
|
+
if invalid:
|
|
818
|
+
pytest.fail(f"Invalid ATDD validator references:\n" + "\n".join(f" - {i}" for i in invalid[:30]))
|
|
819
|
+
|
|
820
|
+
|
|
821
|
+
def test_gate_tests_cover_declared_archetypes(hybrid_session_files: List[Path]):
|
|
822
|
+
"""
|
|
823
|
+
Test that gate_tests exist for all declared archetypes.
|
|
824
|
+
|
|
825
|
+
Each archetype declared in session.archetypes must have at least one
|
|
826
|
+
corresponding gate_test with matching archetype or archetype='all'.
|
|
827
|
+
"""
|
|
828
|
+
missing = []
|
|
829
|
+
|
|
830
|
+
for f in hybrid_session_files:
|
|
831
|
+
parsed = parse_session_file(f)
|
|
832
|
+
fm = parsed["frontmatter"]
|
|
833
|
+
|
|
834
|
+
# Get declared archetypes
|
|
835
|
+
archetypes_raw = fm.get("archetypes", [])
|
|
836
|
+
if isinstance(archetypes_raw, str):
|
|
837
|
+
archetypes = [a.strip().lower() for a in archetypes_raw.split(",")]
|
|
838
|
+
elif isinstance(archetypes_raw, list):
|
|
839
|
+
archetypes = [str(a).lower() for a in archetypes_raw]
|
|
840
|
+
else:
|
|
841
|
+
continue
|
|
842
|
+
|
|
843
|
+
# Skip template placeholders
|
|
844
|
+
archetypes = [a for a in archetypes if not a.startswith("{")]
|
|
845
|
+
|
|
846
|
+
if not archetypes:
|
|
847
|
+
continue
|
|
848
|
+
|
|
849
|
+
# Get gate_tests archetypes
|
|
850
|
+
gate_tests = fm.get("gate_tests", [])
|
|
851
|
+
if not isinstance(gate_tests, list):
|
|
852
|
+
continue
|
|
853
|
+
|
|
854
|
+
gate_archetypes = set()
|
|
855
|
+
has_all_archetype = False
|
|
856
|
+
|
|
857
|
+
for gate in gate_tests:
|
|
858
|
+
if isinstance(gate, dict):
|
|
859
|
+
arch = gate.get("archetype", "")
|
|
860
|
+
if arch == "all":
|
|
861
|
+
has_all_archetype = True
|
|
862
|
+
gate_archetypes.add(arch)
|
|
863
|
+
|
|
864
|
+
# Each declared archetype needs coverage (unless 'all' is present)
|
|
865
|
+
for arch in archetypes:
|
|
866
|
+
if arch not in gate_archetypes and not has_all_archetype:
|
|
867
|
+
missing.append(f"{f.name}: archetype '{arch}' has no gate_test")
|
|
868
|
+
|
|
869
|
+
if missing:
|
|
870
|
+
pytest.fail(f"Archetypes without gate_tests:\n" + "\n".join(f" - {m}" for m in missing[:30]))
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
def test_planned_sessions_have_universal_gate_tests(session_files: List[Path]):
|
|
874
|
+
"""
|
|
875
|
+
Test that PLANNED/ACTIVE sessions have required universal gate tests.
|
|
876
|
+
|
|
877
|
+
Universal gates:
|
|
878
|
+
- GT-001: Session validation (design phase)
|
|
879
|
+
- GT-900: Full ATDD suite (completion phase)
|
|
880
|
+
"""
|
|
881
|
+
missing = []
|
|
882
|
+
|
|
883
|
+
for f in session_files:
|
|
884
|
+
parsed = parse_session_file(f)
|
|
885
|
+
fm = parsed["frontmatter"]
|
|
886
|
+
status = str(fm.get("status", "")).upper().split()[0] if fm.get("status") else ""
|
|
887
|
+
|
|
888
|
+
# Only check PLANNED and ACTIVE sessions
|
|
889
|
+
if status not in {"PLANNED", "ACTIVE"}:
|
|
890
|
+
continue
|
|
891
|
+
|
|
892
|
+
# For hybrid format only
|
|
893
|
+
if parsed["format"] != "hybrid":
|
|
894
|
+
continue
|
|
895
|
+
|
|
896
|
+
gate_tests = fm.get("gate_tests", [])
|
|
897
|
+
if not isinstance(gate_tests, list):
|
|
898
|
+
missing.append(f"{f.name}: gate_tests is not a list")
|
|
899
|
+
continue
|
|
900
|
+
|
|
901
|
+
# Extract gate IDs
|
|
902
|
+
gate_ids = set()
|
|
903
|
+
for gate in gate_tests:
|
|
904
|
+
if isinstance(gate, dict) and "id" in gate:
|
|
905
|
+
gate_ids.add(gate["id"])
|
|
906
|
+
|
|
907
|
+
# Check for universal gates (or any design/completion gates)
|
|
908
|
+
has_design_gate = any(
|
|
909
|
+
isinstance(g, dict) and g.get("phase") == "design"
|
|
910
|
+
for g in gate_tests
|
|
911
|
+
)
|
|
912
|
+
has_completion_gate = any(
|
|
913
|
+
isinstance(g, dict) and g.get("phase") == "completion"
|
|
914
|
+
for g in gate_tests
|
|
915
|
+
)
|
|
916
|
+
|
|
917
|
+
if not has_design_gate:
|
|
918
|
+
missing.append(f"{f.name}: missing design phase gate_test")
|
|
919
|
+
if not has_completion_gate:
|
|
920
|
+
missing.append(f"{f.name}: missing completion phase gate_test")
|
|
921
|
+
|
|
922
|
+
if missing:
|
|
923
|
+
pytest.fail(f"Missing universal gate_tests:\n" + "\n".join(f" - {m}" for m in missing))
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
# ============================================================================
|
|
927
|
+
# ATDD Workflow Sequence Validation
|
|
928
|
+
# ============================================================================
|
|
929
|
+
|
|
930
|
+
# Valid workflow phase statuses
|
|
931
|
+
VALID_WORKFLOW_STATUSES = {"TODO", "IN_PROGRESS", "DONE", "SKIPPED", "N/A"}
|
|
932
|
+
|
|
933
|
+
# Workflow phases and their dependencies
|
|
934
|
+
WORKFLOW_PHASES = {
|
|
935
|
+
"planner": {"order": 1, "depends_on": []},
|
|
936
|
+
"tester": {"order": 2, "depends_on": ["planner"]},
|
|
937
|
+
"coder": {"order": 3, "depends_on": ["planner", "tester"]},
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
# Required workflow phases by session type
|
|
941
|
+
REQUIRED_WORKFLOW_PHASES_BY_TYPE = {
|
|
942
|
+
"implementation": ["planner", "tester", "coder"],
|
|
943
|
+
"migration": ["planner", "tester", "coder"],
|
|
944
|
+
"refactor": ["tester", "coder"],
|
|
945
|
+
"analysis": [],
|
|
946
|
+
"planning": ["planner"],
|
|
947
|
+
"cleanup": ["coder"],
|
|
948
|
+
"tracking": [],
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
|
|
952
|
+
def test_implementation_sessions_have_workflow_phases(hybrid_session_files: List[Path]):
|
|
953
|
+
"""
|
|
954
|
+
Test that implementation sessions have workflow_phases tracking.
|
|
955
|
+
|
|
956
|
+
ATDD workflow: Planner → Tester → Coder
|
|
957
|
+
This ensures the Plan → Test → Code sequence is followed.
|
|
958
|
+
"""
|
|
959
|
+
missing = []
|
|
960
|
+
|
|
961
|
+
for f in hybrid_session_files:
|
|
962
|
+
parsed = parse_session_file(f)
|
|
963
|
+
fm = parsed["frontmatter"]
|
|
964
|
+
session_type = str(fm.get("type", "")).lower()
|
|
965
|
+
|
|
966
|
+
# Only check implementation sessions
|
|
967
|
+
if session_type != "implementation":
|
|
968
|
+
continue
|
|
969
|
+
|
|
970
|
+
if "workflow_phases" not in fm:
|
|
971
|
+
missing.append(f"{f.name}: missing workflow_phases in frontmatter")
|
|
972
|
+
continue
|
|
973
|
+
|
|
974
|
+
wf = fm["workflow_phases"]
|
|
975
|
+
|
|
976
|
+
# Check all required phases exist
|
|
977
|
+
for phase in ["planner", "tester", "coder"]:
|
|
978
|
+
if phase not in wf:
|
|
979
|
+
missing.append(f"{f.name}: workflow_phases missing '{phase}' phase")
|
|
980
|
+
|
|
981
|
+
if missing:
|
|
982
|
+
pytest.fail(f"Missing workflow_phases:\n" + "\n".join(f" - {m}" for m in missing))
|
|
983
|
+
|
|
984
|
+
|
|
985
|
+
def test_workflow_phases_have_valid_structure(hybrid_session_files: List[Path]):
|
|
986
|
+
"""
|
|
987
|
+
Test that workflow_phases have valid structure with required fields.
|
|
988
|
+
|
|
989
|
+
Each phase must have: status, gate, gate_status
|
|
990
|
+
"""
|
|
991
|
+
invalid = []
|
|
992
|
+
|
|
993
|
+
required_fields = ["status", "gate", "gate_status"]
|
|
994
|
+
|
|
995
|
+
for f in hybrid_session_files:
|
|
996
|
+
parsed = parse_session_file(f)
|
|
997
|
+
fm = parsed["frontmatter"]
|
|
998
|
+
|
|
999
|
+
if "workflow_phases" not in fm or not isinstance(fm["workflow_phases"], dict):
|
|
1000
|
+
continue
|
|
1001
|
+
|
|
1002
|
+
wf = fm["workflow_phases"]
|
|
1003
|
+
|
|
1004
|
+
for phase_name, phase_data in wf.items():
|
|
1005
|
+
if not isinstance(phase_data, dict):
|
|
1006
|
+
invalid.append(f"{f.name}: workflow_phases.{phase_name} is not a dict")
|
|
1007
|
+
continue
|
|
1008
|
+
|
|
1009
|
+
# Validate required fields
|
|
1010
|
+
for field in required_fields:
|
|
1011
|
+
if field not in phase_data:
|
|
1012
|
+
invalid.append(f"{f.name}: workflow_phases.{phase_name} missing '{field}'")
|
|
1013
|
+
|
|
1014
|
+
# Validate status value
|
|
1015
|
+
if "status" in phase_data and phase_data["status"] not in VALID_WORKFLOW_STATUSES:
|
|
1016
|
+
invalid.append(f"{f.name}: workflow_phases.{phase_name}.status '{phase_data['status']}' invalid")
|
|
1017
|
+
|
|
1018
|
+
# Validate gate_status value
|
|
1019
|
+
if "gate_status" in phase_data and phase_data["gate_status"] not in VALID_WORKFLOW_STATUSES:
|
|
1020
|
+
invalid.append(f"{f.name}: workflow_phases.{phase_name}.gate_status '{phase_data['gate_status']}' invalid")
|
|
1021
|
+
|
|
1022
|
+
if invalid:
|
|
1023
|
+
pytest.fail(f"Invalid workflow_phases structure:\n" + "\n".join(f" - {i}" for i in invalid[:30]))
|
|
1024
|
+
|
|
1025
|
+
|
|
1026
|
+
def test_workflow_phase_dependencies_respected(hybrid_session_files: List[Path]):
|
|
1027
|
+
"""
|
|
1028
|
+
Test that workflow phase dependencies are respected.
|
|
1029
|
+
|
|
1030
|
+
ATDD workflow rules (from session.convention.yaml):
|
|
1031
|
+
- WF-001: MUST complete planner phase before tester phase
|
|
1032
|
+
- WF-002: MUST complete tester phase before coder phase
|
|
1033
|
+
- WF-003: MUST have RED test before writing implementation
|
|
1034
|
+
|
|
1035
|
+
A phase can only be IN_PROGRESS or DONE if its dependencies are DONE or SKIPPED.
|
|
1036
|
+
"""
|
|
1037
|
+
violations = []
|
|
1038
|
+
|
|
1039
|
+
for f in hybrid_session_files:
|
|
1040
|
+
parsed = parse_session_file(f)
|
|
1041
|
+
fm = parsed["frontmatter"]
|
|
1042
|
+
|
|
1043
|
+
if "workflow_phases" not in fm or not isinstance(fm["workflow_phases"], dict):
|
|
1044
|
+
continue
|
|
1045
|
+
|
|
1046
|
+
wf = fm["workflow_phases"]
|
|
1047
|
+
|
|
1048
|
+
for phase_name, phase_info in WORKFLOW_PHASES.items():
|
|
1049
|
+
if phase_name not in wf:
|
|
1050
|
+
continue
|
|
1051
|
+
|
|
1052
|
+
phase_data = wf[phase_name]
|
|
1053
|
+
if not isinstance(phase_data, dict):
|
|
1054
|
+
continue
|
|
1055
|
+
|
|
1056
|
+
phase_status = phase_data.get("status", "TODO")
|
|
1057
|
+
|
|
1058
|
+
# Skip if phase is TODO, SKIPPED, or N/A
|
|
1059
|
+
if phase_status in {"TODO", "SKIPPED", "N/A"}:
|
|
1060
|
+
continue
|
|
1061
|
+
|
|
1062
|
+
# If phase is IN_PROGRESS or DONE, check dependencies
|
|
1063
|
+
for dep in phase_info["depends_on"]:
|
|
1064
|
+
if dep not in wf:
|
|
1065
|
+
violations.append(
|
|
1066
|
+
f"{f.name}: {phase_name} is {phase_status} but dependency '{dep}' is missing"
|
|
1067
|
+
)
|
|
1068
|
+
continue
|
|
1069
|
+
|
|
1070
|
+
dep_data = wf[dep]
|
|
1071
|
+
if not isinstance(dep_data, dict):
|
|
1072
|
+
continue
|
|
1073
|
+
|
|
1074
|
+
dep_status = dep_data.get("status", "TODO")
|
|
1075
|
+
|
|
1076
|
+
# Dependency must be DONE or SKIPPED for phase to progress
|
|
1077
|
+
if dep_status not in {"DONE", "SKIPPED"}:
|
|
1078
|
+
violations.append(
|
|
1079
|
+
f"{f.name}: {phase_name} is {phase_status} but dependency '{dep}' is {dep_status} "
|
|
1080
|
+
f"(violates WF-00{phase_info['order']})"
|
|
1081
|
+
)
|
|
1082
|
+
|
|
1083
|
+
if violations:
|
|
1084
|
+
pytest.fail(
|
|
1085
|
+
f"Workflow phase dependency violations (Plan → Test → Code):\n" +
|
|
1086
|
+
"\n".join(f" - {v}" for v in violations[:20])
|
|
1087
|
+
)
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
def test_session_type_has_required_workflow_phases(hybrid_session_files: List[Path]):
|
|
1091
|
+
"""
|
|
1092
|
+
Test that sessions have required workflow phases based on their type.
|
|
1093
|
+
|
|
1094
|
+
Session type workflow mapping:
|
|
1095
|
+
- implementation: planner, tester, coder
|
|
1096
|
+
- migration: planner, tester, coder
|
|
1097
|
+
- refactor: tester, coder
|
|
1098
|
+
- planning: planner
|
|
1099
|
+
- cleanup: coder
|
|
1100
|
+
- analysis, tracking: none required
|
|
1101
|
+
"""
|
|
1102
|
+
missing = []
|
|
1103
|
+
|
|
1104
|
+
for f in hybrid_session_files:
|
|
1105
|
+
parsed = parse_session_file(f)
|
|
1106
|
+
fm = parsed["frontmatter"]
|
|
1107
|
+
session_type = str(fm.get("type", "")).lower()
|
|
1108
|
+
|
|
1109
|
+
if session_type not in REQUIRED_WORKFLOW_PHASES_BY_TYPE:
|
|
1110
|
+
continue
|
|
1111
|
+
|
|
1112
|
+
required_phases = REQUIRED_WORKFLOW_PHASES_BY_TYPE[session_type]
|
|
1113
|
+
if not required_phases:
|
|
1114
|
+
continue
|
|
1115
|
+
|
|
1116
|
+
wf = fm.get("workflow_phases", {})
|
|
1117
|
+
if not isinstance(wf, dict):
|
|
1118
|
+
if required_phases:
|
|
1119
|
+
missing.append(f"{f.name}: type '{session_type}' requires workflow_phases")
|
|
1120
|
+
continue
|
|
1121
|
+
|
|
1122
|
+
for phase in required_phases:
|
|
1123
|
+
if phase not in wf:
|
|
1124
|
+
missing.append(f"{f.name}: type '{session_type}' requires workflow_phases.{phase}")
|
|
1125
|
+
|
|
1126
|
+
if missing:
|
|
1127
|
+
pytest.fail(
|
|
1128
|
+
f"Sessions missing required workflow phases for their type:\n" +
|
|
1129
|
+
"\n".join(f" - {m}" for m in missing[:30])
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
|
|
1133
|
+
# ============================================================================
|
|
1134
|
+
# Summary Test
|
|
1135
|
+
# ============================================================================
|
|
1136
|
+
|
|
1137
|
+
def test_session_validation_summary(session_files: List[Path]):
|
|
1138
|
+
"""
|
|
1139
|
+
Generate a summary of all session files and their validation status.
|
|
1140
|
+
|
|
1141
|
+
This test always passes but prints a summary.
|
|
1142
|
+
"""
|
|
1143
|
+
print("\n" + "=" * 70)
|
|
1144
|
+
print("SESSION VALIDATION SUMMARY")
|
|
1145
|
+
print("=" * 70)
|
|
1146
|
+
|
|
1147
|
+
stats = {
|
|
1148
|
+
"total": len(session_files),
|
|
1149
|
+
"by_format": {"hybrid": 0, "legacy": 0},
|
|
1150
|
+
"by_status": {},
|
|
1151
|
+
"by_type": {},
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
for f in session_files:
|
|
1155
|
+
parsed = parse_session_file(f)
|
|
1156
|
+
fm = parsed["frontmatter"]
|
|
1157
|
+
|
|
1158
|
+
# Format
|
|
1159
|
+
stats["by_format"][parsed["format"]] += 1
|
|
1160
|
+
|
|
1161
|
+
# Status
|
|
1162
|
+
status = str(fm.get("status", "UNKNOWN")).upper().split()[0]
|
|
1163
|
+
stats["by_status"][status] = stats["by_status"].get(status, 0) + 1
|
|
1164
|
+
|
|
1165
|
+
# Type
|
|
1166
|
+
session_type = str(fm.get("type", "unknown")).lower()
|
|
1167
|
+
stats["by_type"][session_type] = stats["by_type"].get(session_type, 0) + 1
|
|
1168
|
+
|
|
1169
|
+
print(f"\nTotal sessions: {stats['total']}")
|
|
1170
|
+
|
|
1171
|
+
print("\nBy Format:")
|
|
1172
|
+
for fmt, count in stats["by_format"].items():
|
|
1173
|
+
print(f" {fmt}: {count}")
|
|
1174
|
+
|
|
1175
|
+
print("\nBy Status:")
|
|
1176
|
+
for status, count in sorted(stats["by_status"].items()):
|
|
1177
|
+
print(f" {status}: {count}")
|
|
1178
|
+
|
|
1179
|
+
print("\nBy Type:")
|
|
1180
|
+
for t, count in sorted(stats["by_type"].items()):
|
|
1181
|
+
print(f" {t}: {count}")
|
|
1182
|
+
|
|
1183
|
+
print("\n" + "=" * 70)
|