atdd 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/__init__.py +6 -0
- atdd/__main__.py +4 -0
- atdd/cli.py +404 -0
- atdd/coach/__init__.py +0 -0
- atdd/coach/commands/__init__.py +0 -0
- atdd/coach/commands/add_persistence_metadata.py +215 -0
- atdd/coach/commands/analyze_migrations.py +188 -0
- atdd/coach/commands/consumers.py +720 -0
- atdd/coach/commands/infer_governance_status.py +149 -0
- atdd/coach/commands/initializer.py +177 -0
- atdd/coach/commands/interface.py +1078 -0
- atdd/coach/commands/inventory.py +565 -0
- atdd/coach/commands/migration.py +240 -0
- atdd/coach/commands/registry.py +1560 -0
- atdd/coach/commands/session.py +430 -0
- atdd/coach/commands/sync.py +405 -0
- atdd/coach/commands/test_interface.py +399 -0
- atdd/coach/commands/test_runner.py +141 -0
- atdd/coach/commands/tests/__init__.py +1 -0
- atdd/coach/commands/tests/test_telemetry_array_validation.py +235 -0
- atdd/coach/commands/traceability.py +4264 -0
- atdd/coach/conventions/session.convention.yaml +754 -0
- atdd/coach/overlays/__init__.py +2 -0
- atdd/coach/overlays/claude.md +2 -0
- atdd/coach/schemas/config.schema.json +34 -0
- atdd/coach/schemas/manifest.schema.json +101 -0
- atdd/coach/templates/ATDD.md +282 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +327 -0
- atdd/coach/utils/__init__.py +0 -0
- atdd/coach/utils/graph/__init__.py +0 -0
- atdd/coach/utils/graph/urn.py +875 -0
- atdd/coach/validators/__init__.py +0 -0
- atdd/coach/validators/shared_fixtures.py +365 -0
- atdd/coach/validators/test_enrich_wagon_registry.py +167 -0
- atdd/coach/validators/test_registry.py +575 -0
- atdd/coach/validators/test_session_validation.py +1183 -0
- atdd/coach/validators/test_traceability.py +448 -0
- atdd/coach/validators/test_update_feature_paths.py +108 -0
- atdd/coach/validators/test_validate_contract_consumers.py +297 -0
- atdd/coder/__init__.py +1 -0
- atdd/coder/conventions/adapter.recipe.yaml +88 -0
- atdd/coder/conventions/backend.convention.yaml +460 -0
- atdd/coder/conventions/boundaries.convention.yaml +666 -0
- atdd/coder/conventions/commons.convention.yaml +460 -0
- atdd/coder/conventions/complexity.recipe.yaml +109 -0
- atdd/coder/conventions/component-naming.convention.yaml +178 -0
- atdd/coder/conventions/design.convention.yaml +327 -0
- atdd/coder/conventions/design.recipe.yaml +273 -0
- atdd/coder/conventions/dto.convention.yaml +660 -0
- atdd/coder/conventions/frontend.convention.yaml +542 -0
- atdd/coder/conventions/green.convention.yaml +1012 -0
- atdd/coder/conventions/presentation.convention.yaml +587 -0
- atdd/coder/conventions/refactor.convention.yaml +535 -0
- atdd/coder/conventions/technology.convention.yaml +206 -0
- atdd/coder/conventions/tests/__init__.py +0 -0
- atdd/coder/conventions/tests/test_adapter_recipe.py +302 -0
- atdd/coder/conventions/tests/test_complexity_recipe.py +289 -0
- atdd/coder/conventions/tests/test_component_taxonomy.py +278 -0
- atdd/coder/conventions/tests/test_component_urn_naming.py +165 -0
- atdd/coder/conventions/tests/test_thinness_recipe.py +286 -0
- atdd/coder/conventions/thinness.recipe.yaml +82 -0
- atdd/coder/conventions/train.convention.yaml +325 -0
- atdd/coder/conventions/verification.protocol.yaml +53 -0
- atdd/coder/schemas/design_system.schema.json +361 -0
- atdd/coder/validators/__init__.py +0 -0
- atdd/coder/validators/test_commons_structure.py +485 -0
- atdd/coder/validators/test_complexity.py +416 -0
- atdd/coder/validators/test_cross_language_consistency.py +431 -0
- atdd/coder/validators/test_design_system_compliance.py +413 -0
- atdd/coder/validators/test_dto_testing_patterns.py +268 -0
- atdd/coder/validators/test_green_cross_stack_layers.py +168 -0
- atdd/coder/validators/test_green_layer_dependencies.py +148 -0
- atdd/coder/validators/test_green_python_layer_structure.py +103 -0
- atdd/coder/validators/test_green_supabase_layer_structure.py +103 -0
- atdd/coder/validators/test_import_boundaries.py +396 -0
- atdd/coder/validators/test_init_file_urns.py +593 -0
- atdd/coder/validators/test_preact_layer_boundaries.py +221 -0
- atdd/coder/validators/test_presentation_convention.py +260 -0
- atdd/coder/validators/test_python_architecture.py +674 -0
- atdd/coder/validators/test_quality_metrics.py +420 -0
- atdd/coder/validators/test_station_master_pattern.py +244 -0
- atdd/coder/validators/test_train_infrastructure.py +454 -0
- atdd/coder/validators/test_train_urns.py +293 -0
- atdd/coder/validators/test_typescript_architecture.py +616 -0
- atdd/coder/validators/test_usecase_structure.py +421 -0
- atdd/coder/validators/test_wagon_boundaries.py +586 -0
- atdd/conftest.py +126 -0
- atdd/planner/__init__.py +1 -0
- atdd/planner/conventions/acceptance.convention.yaml +538 -0
- atdd/planner/conventions/appendix.convention.yaml +187 -0
- atdd/planner/conventions/artifact-naming.convention.yaml +852 -0
- atdd/planner/conventions/component.convention.yaml +670 -0
- atdd/planner/conventions/criteria.convention.yaml +141 -0
- atdd/planner/conventions/feature.convention.yaml +371 -0
- atdd/planner/conventions/interface.convention.yaml +382 -0
- atdd/planner/conventions/steps.convention.yaml +141 -0
- atdd/planner/conventions/train.convention.yaml +552 -0
- atdd/planner/conventions/wagon.convention.yaml +275 -0
- atdd/planner/conventions/wmbt.convention.yaml +258 -0
- atdd/planner/schemas/acceptance.schema.json +336 -0
- atdd/planner/schemas/appendix.schema.json +78 -0
- atdd/planner/schemas/component.schema.json +114 -0
- atdd/planner/schemas/feature.schema.json +197 -0
- atdd/planner/schemas/train.schema.json +192 -0
- atdd/planner/schemas/wagon.schema.json +281 -0
- atdd/planner/schemas/wmbt.schema.json +59 -0
- atdd/planner/validators/__init__.py +0 -0
- atdd/planner/validators/conftest.py +5 -0
- atdd/planner/validators/test_draft_wagon_registry.py +374 -0
- atdd/planner/validators/test_plan_cross_refs.py +240 -0
- atdd/planner/validators/test_plan_uniqueness.py +224 -0
- atdd/planner/validators/test_plan_urn_resolution.py +268 -0
- atdd/planner/validators/test_plan_wagons.py +174 -0
- atdd/planner/validators/test_train_validation.py +514 -0
- atdd/planner/validators/test_wagon_urn_chain.py +648 -0
- atdd/planner/validators/test_wmbt_consistency.py +327 -0
- atdd/planner/validators/test_wmbt_vocabulary.py +632 -0
- atdd/tester/__init__.py +1 -0
- atdd/tester/conventions/artifact.convention.yaml +257 -0
- atdd/tester/conventions/contract.convention.yaml +1009 -0
- atdd/tester/conventions/filename.convention.yaml +555 -0
- atdd/tester/conventions/migration.convention.yaml +509 -0
- atdd/tester/conventions/red.convention.yaml +797 -0
- atdd/tester/conventions/routing.convention.yaml +51 -0
- atdd/tester/conventions/telemetry.convention.yaml +458 -0
- atdd/tester/schemas/a11y.tmpl.json +17 -0
- atdd/tester/schemas/artifact.schema.json +189 -0
- atdd/tester/schemas/contract.schema.json +591 -0
- atdd/tester/schemas/contract.tmpl.json +95 -0
- atdd/tester/schemas/db.tmpl.json +20 -0
- atdd/tester/schemas/e2e.tmpl.json +17 -0
- atdd/tester/schemas/edge_function.tmpl.json +17 -0
- atdd/tester/schemas/event.tmpl.json +17 -0
- atdd/tester/schemas/http.tmpl.json +19 -0
- atdd/tester/schemas/job.tmpl.json +18 -0
- atdd/tester/schemas/load.tmpl.json +21 -0
- atdd/tester/schemas/metric.tmpl.json +19 -0
- atdd/tester/schemas/pack.schema.json +139 -0
- atdd/tester/schemas/realtime.tmpl.json +20 -0
- atdd/tester/schemas/rls.tmpl.json +18 -0
- atdd/tester/schemas/script.tmpl.json +16 -0
- atdd/tester/schemas/sec.tmpl.json +18 -0
- atdd/tester/schemas/storage.tmpl.json +18 -0
- atdd/tester/schemas/telemetry.schema.json +128 -0
- atdd/tester/schemas/telemetry_tracking_manifest.schema.json +143 -0
- atdd/tester/schemas/test_filename.schema.json +194 -0
- atdd/tester/schemas/test_intent.schema.json +179 -0
- atdd/tester/schemas/unit.tmpl.json +18 -0
- atdd/tester/schemas/visual.tmpl.json +18 -0
- atdd/tester/schemas/ws.tmpl.json +17 -0
- atdd/tester/utils/__init__.py +0 -0
- atdd/tester/utils/filename.py +300 -0
- atdd/tester/validators/__init__.py +0 -0
- atdd/tester/validators/cleanup_duplicate_headers.py +116 -0
- atdd/tester/validators/cleanup_duplicate_headers_v2.py +135 -0
- atdd/tester/validators/conftest.py +5 -0
- atdd/tester/validators/coverage_gap_report.py +321 -0
- atdd/tester/validators/fix_dual_ac_references.py +179 -0
- atdd/tester/validators/remove_duplicate_lines.py +93 -0
- atdd/tester/validators/test_acceptance_urn_filename_mapping.py +359 -0
- atdd/tester/validators/test_acceptance_urn_separator.py +166 -0
- atdd/tester/validators/test_artifact_naming_category.py +307 -0
- atdd/tester/validators/test_contract_schema_compliance.py +706 -0
- atdd/tester/validators/test_contracts_structure.py +200 -0
- atdd/tester/validators/test_coverage_adequacy.py +797 -0
- atdd/tester/validators/test_dual_ac_reference.py +225 -0
- atdd/tester/validators/test_fixture_validity.py +372 -0
- atdd/tester/validators/test_isolation.py +487 -0
- atdd/tester/validators/test_migration_coverage.py +204 -0
- atdd/tester/validators/test_migration_criteria.py +276 -0
- atdd/tester/validators/test_migration_generation.py +116 -0
- atdd/tester/validators/test_python_test_naming.py +410 -0
- atdd/tester/validators/test_red_layer_validation.py +95 -0
- atdd/tester/validators/test_red_python_layer_structure.py +87 -0
- atdd/tester/validators/test_red_supabase_layer_structure.py +90 -0
- atdd/tester/validators/test_telemetry_structure.py +634 -0
- atdd/tester/validators/test_typescript_test_naming.py +301 -0
- atdd/tester/validators/test_typescript_test_structure.py +84 -0
- atdd-0.2.1.dist-info/METADATA +221 -0
- atdd-0.2.1.dist-info/RECORD +184 -0
- atdd-0.2.1.dist-info/WHEEL +5 -0
- atdd-0.2.1.dist-info/entry_points.txt +2 -0
- atdd-0.2.1.dist-info/licenses/LICENSE +674 -0
- atdd-0.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Generate detailed coverage gap report with actionable details.
|
|
4
|
+
|
|
5
|
+
Provides:
|
|
6
|
+
- Full list of all missing ACs (not truncated)
|
|
7
|
+
- Grouped by wagon
|
|
8
|
+
- Grouped by test category (UNIT, HTTP, GOLDEN, etc.)
|
|
9
|
+
- Suggested test file locations
|
|
10
|
+
- Coverage statistics per wagon
|
|
11
|
+
- Prioritization guidance
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import yaml
|
|
15
|
+
import re
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from collections import defaultdict
|
|
18
|
+
from typing import Dict, List, Set, Tuple
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Path constants
|
|
22
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
23
|
+
PLAN_DIR = REPO_ROOT / "plan"
|
|
24
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
25
|
+
LIB_DIR = REPO_ROOT / "lib"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def find_acceptance_criteria() -> Dict[str, Dict]:
|
|
29
|
+
"""Find all acceptance criteria definitions."""
|
|
30
|
+
if not PLAN_DIR.exists():
|
|
31
|
+
return {}
|
|
32
|
+
|
|
33
|
+
acs = {}
|
|
34
|
+
|
|
35
|
+
for yaml_file in PLAN_DIR.rglob("*.yaml"):
|
|
36
|
+
if yaml_file.name.startswith('_'):
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
with open(yaml_file, 'r', encoding='utf-8') as f:
|
|
41
|
+
data = yaml.safe_load(f)
|
|
42
|
+
except Exception:
|
|
43
|
+
continue
|
|
44
|
+
|
|
45
|
+
if isinstance(data, dict) and 'acceptances' in data:
|
|
46
|
+
wmbt_urn = data.get('urn', 'unknown')
|
|
47
|
+
wagon_name = yaml_file.parent.name
|
|
48
|
+
|
|
49
|
+
for acceptance in data.get('acceptances', []):
|
|
50
|
+
identity = acceptance.get('identity', {})
|
|
51
|
+
urn = identity.get('urn')
|
|
52
|
+
|
|
53
|
+
if urn:
|
|
54
|
+
acs[urn] = {
|
|
55
|
+
'wagon': wagon_name,
|
|
56
|
+
'wmbt': wmbt_urn,
|
|
57
|
+
'wmbt_file': yaml_file.stem,
|
|
58
|
+
'purpose': identity.get('purpose', ''),
|
|
59
|
+
'file': str(yaml_file.relative_to(REPO_ROOT))
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
return acs
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def find_python_tests() -> Dict[str, List[str]]:
|
|
66
|
+
"""Find all Python test files and extract test function names."""
|
|
67
|
+
if not PYTHON_DIR.exists():
|
|
68
|
+
return {}
|
|
69
|
+
|
|
70
|
+
tests = {}
|
|
71
|
+
|
|
72
|
+
for test_file in PYTHON_DIR.rglob("test_*.py"):
|
|
73
|
+
try:
|
|
74
|
+
with open(test_file, 'r', encoding='utf-8') as f:
|
|
75
|
+
content = f.read()
|
|
76
|
+
except Exception:
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
test_functions = re.findall(r'def\s+(test_\w+)\s*\(', content)
|
|
80
|
+
|
|
81
|
+
if test_functions:
|
|
82
|
+
rel_path = str(test_file.relative_to(REPO_ROOT))
|
|
83
|
+
tests[rel_path] = test_functions
|
|
84
|
+
|
|
85
|
+
return tests
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def extract_ac_reference_from_test_name(test_name: str) -> str | None:
|
|
89
|
+
"""Extract AC URN reference from test name."""
|
|
90
|
+
match = re.search(r'AC[-_]([A-Z0-9]+)[-_](\d{3})', test_name.upper())
|
|
91
|
+
if match:
|
|
92
|
+
return f"AC-{match.group(1)}-{match.group(2)}"
|
|
93
|
+
|
|
94
|
+
match = re.search(r'(?:test_)?ac_(\d{3})', test_name.lower())
|
|
95
|
+
if match:
|
|
96
|
+
return f"AC-{match.group(1)}"
|
|
97
|
+
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def extract_ac_reference_from_docstring(file_path: str, test_name: str) -> str | None:
|
|
102
|
+
"""Extract AC reference from test docstring or header comments (per RED convention).
|
|
103
|
+
|
|
104
|
+
Per RED convention v1.0+, tests SHOULD have AC URN in BOTH:
|
|
105
|
+
1. Header comment: # URN: acc:...
|
|
106
|
+
2. Module docstring: RED Test for acc:...
|
|
107
|
+
|
|
108
|
+
This function accepts EITHER format for backward compatibility,
|
|
109
|
+
but convention enforcement should validate BOTH are present and match.
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
with open(REPO_ROOT / file_path, 'r', encoding='utf-8') as f:
|
|
113
|
+
content = f.read()
|
|
114
|
+
except Exception:
|
|
115
|
+
return None
|
|
116
|
+
|
|
117
|
+
ac_from_header = None
|
|
118
|
+
ac_from_docstring = None
|
|
119
|
+
|
|
120
|
+
# Python header comment (RED convention format: # URN: acc:...)
|
|
121
|
+
if file_path.endswith('.py'):
|
|
122
|
+
# Check header comment
|
|
123
|
+
header_comment_match = re.search(
|
|
124
|
+
r'^#\s*URN:\s*(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
125
|
+
content,
|
|
126
|
+
re.MULTILINE
|
|
127
|
+
)
|
|
128
|
+
if header_comment_match:
|
|
129
|
+
ac_from_header = header_comment_match.group(1)
|
|
130
|
+
|
|
131
|
+
# Check module docstring
|
|
132
|
+
module_docstring_match = re.search(
|
|
133
|
+
r'^\s*""".*?(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
134
|
+
content,
|
|
135
|
+
re.DOTALL | re.MULTILINE
|
|
136
|
+
)
|
|
137
|
+
if module_docstring_match:
|
|
138
|
+
ac_from_docstring = module_docstring_match.group(1)
|
|
139
|
+
|
|
140
|
+
# Function docstring (fallback)
|
|
141
|
+
if not ac_from_docstring:
|
|
142
|
+
pattern = f'def {test_name}.*?"""(.*?)"""'
|
|
143
|
+
match = re.search(pattern, content, re.DOTALL)
|
|
144
|
+
if match:
|
|
145
|
+
docstring = match.group(1)
|
|
146
|
+
ac_match = re.search(r'acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?', docstring)
|
|
147
|
+
if ac_match:
|
|
148
|
+
ac_from_docstring = ac_match.group(0)
|
|
149
|
+
|
|
150
|
+
# Return whichever we found (prefer header for consistency)
|
|
151
|
+
# Note: Ideally both should exist and match (per convention)
|
|
152
|
+
return ac_from_header or ac_from_docstring
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def map_tests_to_acs(python_tests: Dict[str, List[str]]) -> Dict[str, List[Tuple[str, str]]]:
|
|
156
|
+
"""Map tests to acceptance criteria."""
|
|
157
|
+
ac_to_tests = {}
|
|
158
|
+
|
|
159
|
+
for file_path, test_names in python_tests.items():
|
|
160
|
+
for test_name in test_names:
|
|
161
|
+
# Prioritize docstring extraction (has full AC URN) over test name (has partial ref)
|
|
162
|
+
ac_ref = extract_ac_reference_from_docstring(file_path, test_name)
|
|
163
|
+
|
|
164
|
+
# Fall back to test name extraction if docstring doesn't have AC URN
|
|
165
|
+
if not ac_ref:
|
|
166
|
+
ac_ref = extract_ac_reference_from_test_name(test_name)
|
|
167
|
+
|
|
168
|
+
if ac_ref:
|
|
169
|
+
if ac_ref not in ac_to_tests:
|
|
170
|
+
ac_to_tests[ac_ref] = []
|
|
171
|
+
ac_to_tests[ac_ref].append((file_path, test_name))
|
|
172
|
+
|
|
173
|
+
return ac_to_tests
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def extract_test_category(ac_urn: str) -> str:
|
|
177
|
+
"""Extract test category from AC URN (e.g., UNIT, HTTP, GOLDEN)."""
|
|
178
|
+
match = re.search(r'acc:[a-z\-]+:([A-Z0-9]+)-([A-Z]+)-\d{3}', ac_urn)
|
|
179
|
+
if match:
|
|
180
|
+
return match.group(2)
|
|
181
|
+
return "UNKNOWN"
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def suggest_test_location(ac_data: Dict) -> str:
|
|
185
|
+
"""Suggest where the test file should be created."""
|
|
186
|
+
wagon = ac_data['wagon']
|
|
187
|
+
wmbt_file = ac_data['wmbt_file']
|
|
188
|
+
return f"python/{wagon}/test_{wmbt_file}.py"
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def generate_report():
|
|
192
|
+
"""Generate comprehensive coverage gap report."""
|
|
193
|
+
print("=" * 70)
|
|
194
|
+
print("COVERAGE GAP ANALYSIS - Full Detailed Report")
|
|
195
|
+
print("=" * 70)
|
|
196
|
+
print()
|
|
197
|
+
|
|
198
|
+
# Find all ACs and tests
|
|
199
|
+
acs = find_acceptance_criteria()
|
|
200
|
+
python_tests = find_python_tests()
|
|
201
|
+
ac_to_tests = map_tests_to_acs(python_tests)
|
|
202
|
+
|
|
203
|
+
# Find missing tests
|
|
204
|
+
missing_acs = []
|
|
205
|
+
for ac_urn, ac_data in acs.items():
|
|
206
|
+
if ac_urn not in ac_to_tests:
|
|
207
|
+
missing_acs.append((ac_urn, ac_data))
|
|
208
|
+
|
|
209
|
+
# Calculate overall coverage
|
|
210
|
+
total_acs = len(acs)
|
|
211
|
+
covered_acs = total_acs - len(missing_acs)
|
|
212
|
+
coverage_pct = (covered_acs / total_acs * 100) if total_acs > 0 else 0
|
|
213
|
+
|
|
214
|
+
print(f"📊 OVERALL COVERAGE")
|
|
215
|
+
print(f" Total ACs: {total_acs}")
|
|
216
|
+
print(f" Covered: {covered_acs}")
|
|
217
|
+
print(f" Missing: {len(missing_acs)}")
|
|
218
|
+
print(f" Coverage: {coverage_pct:.1f}%")
|
|
219
|
+
print()
|
|
220
|
+
|
|
221
|
+
# Group by wagon
|
|
222
|
+
print("=" * 70)
|
|
223
|
+
print("📦 COVERAGE BY WAGON")
|
|
224
|
+
print("=" * 70)
|
|
225
|
+
print()
|
|
226
|
+
|
|
227
|
+
wagon_coverage = defaultdict(lambda: {'total': 0, 'missing': 0, 'acs': []})
|
|
228
|
+
|
|
229
|
+
for ac_urn, ac_data in acs.items():
|
|
230
|
+
wagon = ac_data['wagon']
|
|
231
|
+
wagon_coverage[wagon]['total'] += 1
|
|
232
|
+
if ac_urn not in ac_to_tests:
|
|
233
|
+
wagon_coverage[wagon]['missing'] += 1
|
|
234
|
+
wagon_coverage[wagon]['acs'].append((ac_urn, ac_data))
|
|
235
|
+
|
|
236
|
+
# Sort by missing count (worst first)
|
|
237
|
+
sorted_wagons = sorted(
|
|
238
|
+
wagon_coverage.items(),
|
|
239
|
+
key=lambda x: x[1]['missing'],
|
|
240
|
+
reverse=True
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
for wagon, stats in sorted_wagons:
|
|
244
|
+
if stats['missing'] == 0:
|
|
245
|
+
continue
|
|
246
|
+
|
|
247
|
+
cov = ((stats['total'] - stats['missing']) / stats['total'] * 100) if stats['total'] > 0 else 0
|
|
248
|
+
print(f"🚂 {wagon}")
|
|
249
|
+
print(f" Coverage: {cov:.1f}% ({stats['total'] - stats['missing']}/{stats['total']})")
|
|
250
|
+
print(f" Missing: {stats['missing']} ACs")
|
|
251
|
+
print()
|
|
252
|
+
|
|
253
|
+
# Group by test category
|
|
254
|
+
print("=" * 70)
|
|
255
|
+
print("🏷️ MISSING TESTS BY CATEGORY")
|
|
256
|
+
print("=" * 70)
|
|
257
|
+
print()
|
|
258
|
+
|
|
259
|
+
category_breakdown = defaultdict(list)
|
|
260
|
+
for ac_urn, ac_data in missing_acs:
|
|
261
|
+
category = extract_test_category(ac_urn)
|
|
262
|
+
category_breakdown[category].append((ac_urn, ac_data))
|
|
263
|
+
|
|
264
|
+
for category in sorted(category_breakdown.keys()):
|
|
265
|
+
print(f"📌 {category}: {len(category_breakdown[category])} missing")
|
|
266
|
+
|
|
267
|
+
print()
|
|
268
|
+
|
|
269
|
+
# Detailed breakdown
|
|
270
|
+
print("=" * 70)
|
|
271
|
+
print("📋 ALL MISSING ACCEPTANCE CRITERIA (Full List)")
|
|
272
|
+
print("=" * 70)
|
|
273
|
+
print()
|
|
274
|
+
|
|
275
|
+
# Group by wagon for detailed output
|
|
276
|
+
for wagon, stats in sorted_wagons:
|
|
277
|
+
if stats['missing'] == 0:
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
print(f"\n{'=' * 70}")
|
|
281
|
+
print(f"WAGON: {wagon} ({stats['missing']} missing tests)")
|
|
282
|
+
print(f"{'=' * 70}\n")
|
|
283
|
+
|
|
284
|
+
# Sort by AC URN for consistency
|
|
285
|
+
sorted_acs = sorted(stats['acs'], key=lambda x: x[0])
|
|
286
|
+
|
|
287
|
+
for ac_urn, ac_data in sorted_acs:
|
|
288
|
+
category = extract_test_category(ac_urn)
|
|
289
|
+
test_location = suggest_test_location(ac_data)
|
|
290
|
+
|
|
291
|
+
print(f"URN: {ac_urn}")
|
|
292
|
+
print(f" Category: {category}")
|
|
293
|
+
print(f" WMBT: {ac_data['wmbt']}")
|
|
294
|
+
print(f" Purpose: {ac_data['purpose']}")
|
|
295
|
+
print(f" Spec File: {ac_data['file']}")
|
|
296
|
+
print(f" Suggested Test: {test_location}")
|
|
297
|
+
print()
|
|
298
|
+
|
|
299
|
+
# Summary and recommendations
|
|
300
|
+
print("=" * 70)
|
|
301
|
+
print("💡 RECOMMENDATIONS")
|
|
302
|
+
print("=" * 70)
|
|
303
|
+
print()
|
|
304
|
+
|
|
305
|
+
print("Priority Order (by missing test count):")
|
|
306
|
+
for i, (wagon, stats) in enumerate(sorted_wagons[:5], 1):
|
|
307
|
+
if stats['missing'] == 0:
|
|
308
|
+
continue
|
|
309
|
+
print(f" {i}. {wagon}: {stats['missing']} missing tests")
|
|
310
|
+
|
|
311
|
+
print()
|
|
312
|
+
print("Next Steps:")
|
|
313
|
+
print(" 1. Focus on high-priority wagons first")
|
|
314
|
+
print(" 2. Group test creation by WMBT file (test file)")
|
|
315
|
+
print(" 3. Use suggested test locations above")
|
|
316
|
+
print(" 4. Reference spec files for AC details")
|
|
317
|
+
print()
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
if __name__ == "__main__":
|
|
321
|
+
generate_report()
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Script to automatically fix dual AC reference violations.
|
|
4
|
+
|
|
5
|
+
Fixes:
|
|
6
|
+
1. Missing header comments (adds # URN: acc:... at top)
|
|
7
|
+
2. Missing module docstrings (adds RED Test for acc:... after headers)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
15
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def extract_ac_from_header(content: str) -> str | None:
|
|
19
|
+
"""Extract AC URN from header comment."""
|
|
20
|
+
match = re.search(
|
|
21
|
+
r'^#\s*URN:\s*(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
22
|
+
content,
|
|
23
|
+
re.MULTILINE
|
|
24
|
+
)
|
|
25
|
+
return match.group(1) if match else None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def extract_ac_from_docstring(content: str) -> str | None:
|
|
29
|
+
"""Extract AC URN from module docstring."""
|
|
30
|
+
match = re.search(
|
|
31
|
+
r'^\s*""".*?(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
32
|
+
content,
|
|
33
|
+
re.DOTALL | re.MULTILINE
|
|
34
|
+
)
|
|
35
|
+
return match.group(1) if match else None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def add_header_comment(content: str, ac_urn: str) -> str:
|
|
39
|
+
"""Add header comment with AC URN at the top of the file."""
|
|
40
|
+
header = f"""# Runtime: python
|
|
41
|
+
# Rationale: Test implementation for acceptance criteria
|
|
42
|
+
# URN: {ac_urn}
|
|
43
|
+
# Phase: RED
|
|
44
|
+
# Purpose: Verify acceptance criteria
|
|
45
|
+
|
|
46
|
+
"""
|
|
47
|
+
return header + content
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def add_module_docstring(content: str, ac_urn: str) -> str:
|
|
51
|
+
"""Add module docstring with AC URN after header comments."""
|
|
52
|
+
# Find the end of header comments
|
|
53
|
+
lines = content.split('\n')
|
|
54
|
+
insert_index = 0
|
|
55
|
+
|
|
56
|
+
# Skip header comments
|
|
57
|
+
for i, line in enumerate(lines):
|
|
58
|
+
if line.startswith('#') or line.strip() == '':
|
|
59
|
+
insert_index = i + 1
|
|
60
|
+
else:
|
|
61
|
+
break
|
|
62
|
+
|
|
63
|
+
# Create docstring
|
|
64
|
+
docstring = f'''"""
|
|
65
|
+
RED Test for {ac_urn}
|
|
66
|
+
wagon: {{wagon}} | feature: {{feature}} | phase: RED
|
|
67
|
+
WMBT: {{wmbt URN}}
|
|
68
|
+
Purpose: {{acceptance criteria purpose}}
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
'''
|
|
72
|
+
|
|
73
|
+
# Insert docstring
|
|
74
|
+
lines.insert(insert_index, docstring.rstrip())
|
|
75
|
+
return '\n'.join(lines)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def fix_file(file_path: Path) -> tuple[bool, str]:
|
|
79
|
+
"""
|
|
80
|
+
Fix a single file.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
(changed, message) tuple
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
87
|
+
original_content = f.read()
|
|
88
|
+
except Exception as e:
|
|
89
|
+
return False, f"ERROR: Could not read: {e}"
|
|
90
|
+
|
|
91
|
+
ac_from_header = extract_ac_from_header(original_content)
|
|
92
|
+
ac_from_docstring = extract_ac_from_docstring(original_content)
|
|
93
|
+
|
|
94
|
+
# Skip if both are present
|
|
95
|
+
if ac_from_header and ac_from_docstring:
|
|
96
|
+
return False, "SKIP: Already has both header and docstring"
|
|
97
|
+
|
|
98
|
+
# Skip if neither is present (legacy test)
|
|
99
|
+
if not ac_from_header and not ac_from_docstring:
|
|
100
|
+
return False, "SKIP: Legacy test without AC URN"
|
|
101
|
+
|
|
102
|
+
new_content = original_content
|
|
103
|
+
changes = []
|
|
104
|
+
|
|
105
|
+
# Add missing header
|
|
106
|
+
if ac_from_docstring and not ac_from_header:
|
|
107
|
+
new_content = add_header_comment(new_content, ac_from_docstring)
|
|
108
|
+
changes.append("Added header comment")
|
|
109
|
+
|
|
110
|
+
# Add missing docstring
|
|
111
|
+
if ac_from_header and not ac_from_docstring:
|
|
112
|
+
new_content = add_module_docstring(new_content, ac_from_header)
|
|
113
|
+
changes.append("Added module docstring")
|
|
114
|
+
|
|
115
|
+
# Write back
|
|
116
|
+
try:
|
|
117
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
118
|
+
f.write(new_content)
|
|
119
|
+
return True, f"FIXED: {', '.join(changes)}"
|
|
120
|
+
except Exception as e:
|
|
121
|
+
return False, f"ERROR: Could not write: {e}"
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def find_test_files() -> list:
|
|
125
|
+
"""Find all Python test files."""
|
|
126
|
+
if not PYTHON_DIR.exists():
|
|
127
|
+
return []
|
|
128
|
+
|
|
129
|
+
test_files = []
|
|
130
|
+
for py_file in PYTHON_DIR.rglob("test_*.py"):
|
|
131
|
+
if '__pycache__' in str(py_file) or 'conftest' in py_file.name:
|
|
132
|
+
continue
|
|
133
|
+
test_files.append(py_file)
|
|
134
|
+
|
|
135
|
+
return test_files
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def main():
|
|
139
|
+
"""Fix all test files with dual AC reference violations."""
|
|
140
|
+
test_files = find_test_files()
|
|
141
|
+
|
|
142
|
+
if not test_files:
|
|
143
|
+
print("No test files found")
|
|
144
|
+
return
|
|
145
|
+
|
|
146
|
+
print(f"Found {len(test_files)} test files")
|
|
147
|
+
print("=" * 80)
|
|
148
|
+
|
|
149
|
+
fixed_count = 0
|
|
150
|
+
skipped_count = 0
|
|
151
|
+
error_count = 0
|
|
152
|
+
|
|
153
|
+
for test_file in sorted(test_files):
|
|
154
|
+
rel_path = test_file.relative_to(REPO_ROOT)
|
|
155
|
+
changed, message = fix_file(test_file)
|
|
156
|
+
|
|
157
|
+
if changed:
|
|
158
|
+
print(f"✅ {rel_path}")
|
|
159
|
+
print(f" {message}")
|
|
160
|
+
fixed_count += 1
|
|
161
|
+
elif "ERROR" in message:
|
|
162
|
+
print(f"❌ {rel_path}")
|
|
163
|
+
print(f" {message}")
|
|
164
|
+
error_count += 1
|
|
165
|
+
else:
|
|
166
|
+
# Skip printing for files that don't need changes
|
|
167
|
+
skipped_count += 1
|
|
168
|
+
|
|
169
|
+
print("=" * 80)
|
|
170
|
+
print(f"Summary:")
|
|
171
|
+
print(f" Fixed: {fixed_count} files")
|
|
172
|
+
print(f" Skipped: {skipped_count} files")
|
|
173
|
+
print(f" Errors: {error_count} files")
|
|
174
|
+
print()
|
|
175
|
+
print(f"Total processed: {len(test_files)} files")
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
if __name__ == "__main__":
|
|
179
|
+
main()
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Simple script to remove consecutive duplicate lines in test files.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
10
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def remove_consecutive_duplicates(content: str) -> tuple[str, bool]:
|
|
14
|
+
"""
|
|
15
|
+
Remove consecutive duplicate lines.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
(cleaned_content, was_changed)
|
|
19
|
+
"""
|
|
20
|
+
lines = content.split('\n')
|
|
21
|
+
if not lines:
|
|
22
|
+
return content, False
|
|
23
|
+
|
|
24
|
+
clean_lines = [lines[0]] # Always keep first line
|
|
25
|
+
was_changed = False
|
|
26
|
+
|
|
27
|
+
for i in range(1, len(lines)):
|
|
28
|
+
if lines[i] != lines[i-1]:
|
|
29
|
+
clean_lines.append(lines[i])
|
|
30
|
+
else:
|
|
31
|
+
# Skip duplicate line
|
|
32
|
+
was_changed = True
|
|
33
|
+
|
|
34
|
+
cleaned = '\n'.join(clean_lines)
|
|
35
|
+
return cleaned, was_changed
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def find_test_files() -> list:
|
|
39
|
+
"""Find all Python test files."""
|
|
40
|
+
if not PYTHON_DIR.exists():
|
|
41
|
+
return []
|
|
42
|
+
|
|
43
|
+
test_files = []
|
|
44
|
+
for py_file in PYTHON_DIR.rglob("test_*.py"):
|
|
45
|
+
if '__pycache__' in str(py_file) or 'conftest' in py_file.name:
|
|
46
|
+
continue
|
|
47
|
+
test_files.append(py_file)
|
|
48
|
+
|
|
49
|
+
return test_files
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def main():
|
|
53
|
+
"""Remove consecutive duplicate lines in all test files."""
|
|
54
|
+
test_files = find_test_files()
|
|
55
|
+
|
|
56
|
+
if not test_files:
|
|
57
|
+
print("No test files found")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
print(f"Checking {len(test_files)} test files for consecutive duplicates")
|
|
61
|
+
print("=" * 80)
|
|
62
|
+
|
|
63
|
+
cleaned_count = 0
|
|
64
|
+
|
|
65
|
+
for test_file in sorted(test_files):
|
|
66
|
+
try:
|
|
67
|
+
with open(test_file, 'r', encoding='utf-8') as f:
|
|
68
|
+
original_content = f.read()
|
|
69
|
+
except Exception as e:
|
|
70
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
71
|
+
print(f" ERROR: Could not read: {e}")
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
cleaned_content, was_changed = remove_consecutive_duplicates(original_content)
|
|
75
|
+
|
|
76
|
+
if was_changed:
|
|
77
|
+
try:
|
|
78
|
+
with open(test_file, 'w', encoding='utf-8') as f:
|
|
79
|
+
f.write(cleaned_content)
|
|
80
|
+
rel_path = test_file.relative_to(REPO_ROOT)
|
|
81
|
+
print(f"✅ {rel_path}")
|
|
82
|
+
print(f" Removed consecutive duplicate lines")
|
|
83
|
+
cleaned_count += 1
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
86
|
+
print(f" ERROR: Could not write: {e}")
|
|
87
|
+
|
|
88
|
+
print("=" * 80)
|
|
89
|
+
print(f"Cleaned {cleaned_count} files")
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
main()
|