atdd 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/__init__.py +6 -0
- atdd/__main__.py +4 -0
- atdd/cli.py +404 -0
- atdd/coach/__init__.py +0 -0
- atdd/coach/commands/__init__.py +0 -0
- atdd/coach/commands/add_persistence_metadata.py +215 -0
- atdd/coach/commands/analyze_migrations.py +188 -0
- atdd/coach/commands/consumers.py +720 -0
- atdd/coach/commands/infer_governance_status.py +149 -0
- atdd/coach/commands/initializer.py +177 -0
- atdd/coach/commands/interface.py +1078 -0
- atdd/coach/commands/inventory.py +565 -0
- atdd/coach/commands/migration.py +240 -0
- atdd/coach/commands/registry.py +1560 -0
- atdd/coach/commands/session.py +430 -0
- atdd/coach/commands/sync.py +405 -0
- atdd/coach/commands/test_interface.py +399 -0
- atdd/coach/commands/test_runner.py +141 -0
- atdd/coach/commands/tests/__init__.py +1 -0
- atdd/coach/commands/tests/test_telemetry_array_validation.py +235 -0
- atdd/coach/commands/traceability.py +4264 -0
- atdd/coach/conventions/session.convention.yaml +754 -0
- atdd/coach/overlays/__init__.py +2 -0
- atdd/coach/overlays/claude.md +2 -0
- atdd/coach/schemas/config.schema.json +34 -0
- atdd/coach/schemas/manifest.schema.json +101 -0
- atdd/coach/templates/ATDD.md +282 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +327 -0
- atdd/coach/utils/__init__.py +0 -0
- atdd/coach/utils/graph/__init__.py +0 -0
- atdd/coach/utils/graph/urn.py +875 -0
- atdd/coach/validators/__init__.py +0 -0
- atdd/coach/validators/shared_fixtures.py +365 -0
- atdd/coach/validators/test_enrich_wagon_registry.py +167 -0
- atdd/coach/validators/test_registry.py +575 -0
- atdd/coach/validators/test_session_validation.py +1183 -0
- atdd/coach/validators/test_traceability.py +448 -0
- atdd/coach/validators/test_update_feature_paths.py +108 -0
- atdd/coach/validators/test_validate_contract_consumers.py +297 -0
- atdd/coder/__init__.py +1 -0
- atdd/coder/conventions/adapter.recipe.yaml +88 -0
- atdd/coder/conventions/backend.convention.yaml +460 -0
- atdd/coder/conventions/boundaries.convention.yaml +666 -0
- atdd/coder/conventions/commons.convention.yaml +460 -0
- atdd/coder/conventions/complexity.recipe.yaml +109 -0
- atdd/coder/conventions/component-naming.convention.yaml +178 -0
- atdd/coder/conventions/design.convention.yaml +327 -0
- atdd/coder/conventions/design.recipe.yaml +273 -0
- atdd/coder/conventions/dto.convention.yaml +660 -0
- atdd/coder/conventions/frontend.convention.yaml +542 -0
- atdd/coder/conventions/green.convention.yaml +1012 -0
- atdd/coder/conventions/presentation.convention.yaml +587 -0
- atdd/coder/conventions/refactor.convention.yaml +535 -0
- atdd/coder/conventions/technology.convention.yaml +206 -0
- atdd/coder/conventions/tests/__init__.py +0 -0
- atdd/coder/conventions/tests/test_adapter_recipe.py +302 -0
- atdd/coder/conventions/tests/test_complexity_recipe.py +289 -0
- atdd/coder/conventions/tests/test_component_taxonomy.py +278 -0
- atdd/coder/conventions/tests/test_component_urn_naming.py +165 -0
- atdd/coder/conventions/tests/test_thinness_recipe.py +286 -0
- atdd/coder/conventions/thinness.recipe.yaml +82 -0
- atdd/coder/conventions/train.convention.yaml +325 -0
- atdd/coder/conventions/verification.protocol.yaml +53 -0
- atdd/coder/schemas/design_system.schema.json +361 -0
- atdd/coder/validators/__init__.py +0 -0
- atdd/coder/validators/test_commons_structure.py +485 -0
- atdd/coder/validators/test_complexity.py +416 -0
- atdd/coder/validators/test_cross_language_consistency.py +431 -0
- atdd/coder/validators/test_design_system_compliance.py +413 -0
- atdd/coder/validators/test_dto_testing_patterns.py +268 -0
- atdd/coder/validators/test_green_cross_stack_layers.py +168 -0
- atdd/coder/validators/test_green_layer_dependencies.py +148 -0
- atdd/coder/validators/test_green_python_layer_structure.py +103 -0
- atdd/coder/validators/test_green_supabase_layer_structure.py +103 -0
- atdd/coder/validators/test_import_boundaries.py +396 -0
- atdd/coder/validators/test_init_file_urns.py +593 -0
- atdd/coder/validators/test_preact_layer_boundaries.py +221 -0
- atdd/coder/validators/test_presentation_convention.py +260 -0
- atdd/coder/validators/test_python_architecture.py +674 -0
- atdd/coder/validators/test_quality_metrics.py +420 -0
- atdd/coder/validators/test_station_master_pattern.py +244 -0
- atdd/coder/validators/test_train_infrastructure.py +454 -0
- atdd/coder/validators/test_train_urns.py +293 -0
- atdd/coder/validators/test_typescript_architecture.py +616 -0
- atdd/coder/validators/test_usecase_structure.py +421 -0
- atdd/coder/validators/test_wagon_boundaries.py +586 -0
- atdd/conftest.py +126 -0
- atdd/planner/__init__.py +1 -0
- atdd/planner/conventions/acceptance.convention.yaml +538 -0
- atdd/planner/conventions/appendix.convention.yaml +187 -0
- atdd/planner/conventions/artifact-naming.convention.yaml +852 -0
- atdd/planner/conventions/component.convention.yaml +670 -0
- atdd/planner/conventions/criteria.convention.yaml +141 -0
- atdd/planner/conventions/feature.convention.yaml +371 -0
- atdd/planner/conventions/interface.convention.yaml +382 -0
- atdd/planner/conventions/steps.convention.yaml +141 -0
- atdd/planner/conventions/train.convention.yaml +552 -0
- atdd/planner/conventions/wagon.convention.yaml +275 -0
- atdd/planner/conventions/wmbt.convention.yaml +258 -0
- atdd/planner/schemas/acceptance.schema.json +336 -0
- atdd/planner/schemas/appendix.schema.json +78 -0
- atdd/planner/schemas/component.schema.json +114 -0
- atdd/planner/schemas/feature.schema.json +197 -0
- atdd/planner/schemas/train.schema.json +192 -0
- atdd/planner/schemas/wagon.schema.json +281 -0
- atdd/planner/schemas/wmbt.schema.json +59 -0
- atdd/planner/validators/__init__.py +0 -0
- atdd/planner/validators/conftest.py +5 -0
- atdd/planner/validators/test_draft_wagon_registry.py +374 -0
- atdd/planner/validators/test_plan_cross_refs.py +240 -0
- atdd/planner/validators/test_plan_uniqueness.py +224 -0
- atdd/planner/validators/test_plan_urn_resolution.py +268 -0
- atdd/planner/validators/test_plan_wagons.py +174 -0
- atdd/planner/validators/test_train_validation.py +514 -0
- atdd/planner/validators/test_wagon_urn_chain.py +648 -0
- atdd/planner/validators/test_wmbt_consistency.py +327 -0
- atdd/planner/validators/test_wmbt_vocabulary.py +632 -0
- atdd/tester/__init__.py +1 -0
- atdd/tester/conventions/artifact.convention.yaml +257 -0
- atdd/tester/conventions/contract.convention.yaml +1009 -0
- atdd/tester/conventions/filename.convention.yaml +555 -0
- atdd/tester/conventions/migration.convention.yaml +509 -0
- atdd/tester/conventions/red.convention.yaml +797 -0
- atdd/tester/conventions/routing.convention.yaml +51 -0
- atdd/tester/conventions/telemetry.convention.yaml +458 -0
- atdd/tester/schemas/a11y.tmpl.json +17 -0
- atdd/tester/schemas/artifact.schema.json +189 -0
- atdd/tester/schemas/contract.schema.json +591 -0
- atdd/tester/schemas/contract.tmpl.json +95 -0
- atdd/tester/schemas/db.tmpl.json +20 -0
- atdd/tester/schemas/e2e.tmpl.json +17 -0
- atdd/tester/schemas/edge_function.tmpl.json +17 -0
- atdd/tester/schemas/event.tmpl.json +17 -0
- atdd/tester/schemas/http.tmpl.json +19 -0
- atdd/tester/schemas/job.tmpl.json +18 -0
- atdd/tester/schemas/load.tmpl.json +21 -0
- atdd/tester/schemas/metric.tmpl.json +19 -0
- atdd/tester/schemas/pack.schema.json +139 -0
- atdd/tester/schemas/realtime.tmpl.json +20 -0
- atdd/tester/schemas/rls.tmpl.json +18 -0
- atdd/tester/schemas/script.tmpl.json +16 -0
- atdd/tester/schemas/sec.tmpl.json +18 -0
- atdd/tester/schemas/storage.tmpl.json +18 -0
- atdd/tester/schemas/telemetry.schema.json +128 -0
- atdd/tester/schemas/telemetry_tracking_manifest.schema.json +143 -0
- atdd/tester/schemas/test_filename.schema.json +194 -0
- atdd/tester/schemas/test_intent.schema.json +179 -0
- atdd/tester/schemas/unit.tmpl.json +18 -0
- atdd/tester/schemas/visual.tmpl.json +18 -0
- atdd/tester/schemas/ws.tmpl.json +17 -0
- atdd/tester/utils/__init__.py +0 -0
- atdd/tester/utils/filename.py +300 -0
- atdd/tester/validators/__init__.py +0 -0
- atdd/tester/validators/cleanup_duplicate_headers.py +116 -0
- atdd/tester/validators/cleanup_duplicate_headers_v2.py +135 -0
- atdd/tester/validators/conftest.py +5 -0
- atdd/tester/validators/coverage_gap_report.py +321 -0
- atdd/tester/validators/fix_dual_ac_references.py +179 -0
- atdd/tester/validators/remove_duplicate_lines.py +93 -0
- atdd/tester/validators/test_acceptance_urn_filename_mapping.py +359 -0
- atdd/tester/validators/test_acceptance_urn_separator.py +166 -0
- atdd/tester/validators/test_artifact_naming_category.py +307 -0
- atdd/tester/validators/test_contract_schema_compliance.py +706 -0
- atdd/tester/validators/test_contracts_structure.py +200 -0
- atdd/tester/validators/test_coverage_adequacy.py +797 -0
- atdd/tester/validators/test_dual_ac_reference.py +225 -0
- atdd/tester/validators/test_fixture_validity.py +372 -0
- atdd/tester/validators/test_isolation.py +487 -0
- atdd/tester/validators/test_migration_coverage.py +204 -0
- atdd/tester/validators/test_migration_criteria.py +276 -0
- atdd/tester/validators/test_migration_generation.py +116 -0
- atdd/tester/validators/test_python_test_naming.py +410 -0
- atdd/tester/validators/test_red_layer_validation.py +95 -0
- atdd/tester/validators/test_red_python_layer_structure.py +87 -0
- atdd/tester/validators/test_red_supabase_layer_structure.py +90 -0
- atdd/tester/validators/test_telemetry_structure.py +634 -0
- atdd/tester/validators/test_typescript_test_naming.py +301 -0
- atdd/tester/validators/test_typescript_test_structure.py +84 -0
- atdd-0.2.1.dist-info/METADATA +221 -0
- atdd-0.2.1.dist-info/RECORD +184 -0
- atdd-0.2.1.dist-info/WHEEL +5 -0
- atdd-0.2.1.dist-info/entry_points.txt +2 -0
- atdd-0.2.1.dist-info/licenses/LICENSE +674 -0
- atdd-0.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test that Python test files have AC URN in BOTH header comment and docstring.
|
|
3
|
+
|
|
4
|
+
Per RED convention v1.0+, tests MUST have AC URN in:
|
|
5
|
+
1. Header comment: # URN: acc:...
|
|
6
|
+
2. Module docstring: RED Test for acc:...
|
|
7
|
+
|
|
8
|
+
This ensures:
|
|
9
|
+
- Machine parseability (header comment)
|
|
10
|
+
- Human readability (docstring)
|
|
11
|
+
- Redundancy for validation
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import pytest
|
|
15
|
+
import re
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Path constants
|
|
20
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
21
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def find_test_files() -> list:
|
|
25
|
+
"""Find all Python test files."""
|
|
26
|
+
if not PYTHON_DIR.exists():
|
|
27
|
+
return []
|
|
28
|
+
|
|
29
|
+
test_files = []
|
|
30
|
+
for py_file in PYTHON_DIR.rglob("test_*.py"):
|
|
31
|
+
# Skip __pycache__ and conftest
|
|
32
|
+
if '__pycache__' in str(py_file) or 'conftest' in py_file.name:
|
|
33
|
+
continue
|
|
34
|
+
test_files.append(py_file)
|
|
35
|
+
|
|
36
|
+
return test_files
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def extract_ac_from_header(content: str) -> str | None:
|
|
40
|
+
"""Extract AC URN from header comment."""
|
|
41
|
+
match = re.search(
|
|
42
|
+
r'^#\s*URN:\s*(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
43
|
+
content,
|
|
44
|
+
re.MULTILINE
|
|
45
|
+
)
|
|
46
|
+
return match.group(1) if match else None
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def extract_ac_from_docstring(content: str) -> str | None:
|
|
50
|
+
"""Extract AC URN from module docstring."""
|
|
51
|
+
match = re.search(
|
|
52
|
+
r'^\s*""".*?(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3}(?:-[a-z\-]+)?)',
|
|
53
|
+
content,
|
|
54
|
+
re.DOTALL | re.MULTILINE
|
|
55
|
+
)
|
|
56
|
+
return match.group(1) if match else None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@pytest.mark.platform
|
|
60
|
+
def test_all_tests_have_dual_ac_references():
|
|
61
|
+
"""
|
|
62
|
+
SPEC-TESTER-CONVENTION-0001: Test files MUST have AC URN in both header and docstring
|
|
63
|
+
|
|
64
|
+
Given: All Python test files
|
|
65
|
+
When: Checking for AC URN references
|
|
66
|
+
Then: Files MUST have AC URN in BOTH header comment AND module docstring
|
|
67
|
+
AND both references MUST match exactly
|
|
68
|
+
"""
|
|
69
|
+
test_files = find_test_files()
|
|
70
|
+
|
|
71
|
+
if not test_files:
|
|
72
|
+
pytest.skip("No test files found")
|
|
73
|
+
|
|
74
|
+
errors = []
|
|
75
|
+
warnings = []
|
|
76
|
+
|
|
77
|
+
for test_file in test_files:
|
|
78
|
+
try:
|
|
79
|
+
with open(test_file, 'r', encoding='utf-8') as f:
|
|
80
|
+
content = f.read()
|
|
81
|
+
except Exception as e:
|
|
82
|
+
warnings.append(f"Could not read {test_file.relative_to(REPO_ROOT)}: {e}")
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
ac_from_header = extract_ac_from_header(content)
|
|
86
|
+
ac_from_docstring = extract_ac_from_docstring(content)
|
|
87
|
+
|
|
88
|
+
rel_path = test_file.relative_to(REPO_ROOT)
|
|
89
|
+
|
|
90
|
+
# Check for presence
|
|
91
|
+
if not ac_from_header and not ac_from_docstring:
|
|
92
|
+
# Legacy test without AC URN - skip for now (will be migrated)
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
if ac_from_header and not ac_from_docstring:
|
|
96
|
+
errors.append(
|
|
97
|
+
f"{rel_path}\n"
|
|
98
|
+
f" ❌ MISSING: Module docstring with AC URN\n"
|
|
99
|
+
f" Current state:\n"
|
|
100
|
+
f" ✅ Header comment: {ac_from_header}\n"
|
|
101
|
+
f" ❌ Module docstring: MISSING\n"
|
|
102
|
+
f"\n"
|
|
103
|
+
f" ACTION REQUIRED: Add this module docstring after header comments:\n"
|
|
104
|
+
f' """\n'
|
|
105
|
+
f' RED Test for {ac_from_header}\n'
|
|
106
|
+
f' wagon: {{wagon}} | feature: {{feature}} | phase: {{RED|GREEN|REFACTOR}}\n'
|
|
107
|
+
f' WMBT: {{wmbt URN}}\n'
|
|
108
|
+
f' Purpose: {{acceptance criteria purpose}}\n'
|
|
109
|
+
f' """\n'
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if ac_from_docstring and not ac_from_header:
|
|
113
|
+
errors.append(
|
|
114
|
+
f"{rel_path}\n"
|
|
115
|
+
f" ❌ MISSING: Header comment with AC URN\n"
|
|
116
|
+
f" Current state:\n"
|
|
117
|
+
f" ❌ Header comment: MISSING\n"
|
|
118
|
+
f" ✅ Module docstring: {ac_from_docstring}\n"
|
|
119
|
+
f"\n"
|
|
120
|
+
f" ACTION REQUIRED: Add these lines at the top of the file:\n"
|
|
121
|
+
f" # Runtime: {{python|supabase|dart}}\n"
|
|
122
|
+
f" # Rationale: {{brief explanation}}\n"
|
|
123
|
+
f" # URN: {ac_from_docstring}\n"
|
|
124
|
+
f" # Phase: {{RED|GREEN|REFACTOR}}\n"
|
|
125
|
+
f" # Purpose: {{acceptance criteria purpose}}\n"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Check for match (allowing slugless to match slugged)
|
|
129
|
+
# Pattern: acc:wagon:WMBT-HARNESS-NNN[-optional-slug]
|
|
130
|
+
if ac_from_header and ac_from_docstring:
|
|
131
|
+
# Extract base URN (without slug) for comparison
|
|
132
|
+
# Pattern: acc:wagon:WMBT-HARNESS-NNN
|
|
133
|
+
base_pattern = r'(acc:[a-z\-]+:[A-Z0-9]+-[A-Z0-9]+-\d{3})'
|
|
134
|
+
header_base = re.match(base_pattern, ac_from_header)
|
|
135
|
+
docstring_base = re.match(base_pattern, ac_from_docstring)
|
|
136
|
+
|
|
137
|
+
if header_base and docstring_base:
|
|
138
|
+
# Compare base URNs (both URNs should have same wagon:WMBT-HARNESS-NNN)
|
|
139
|
+
if header_base.group(1) != docstring_base.group(1):
|
|
140
|
+
errors.append(
|
|
141
|
+
f"{rel_path}\n"
|
|
142
|
+
f" ❌ MISMATCH: Header and docstring reference different AC URNs\n"
|
|
143
|
+
f" Current state:\n"
|
|
144
|
+
f" Header comment: {ac_from_header}\n"
|
|
145
|
+
f" Module docstring: {ac_from_docstring}\n"
|
|
146
|
+
f"\n"
|
|
147
|
+
f" ACTION REQUIRED: Both MUST reference the same AC URN\n"
|
|
148
|
+
f" Either:\n"
|
|
149
|
+
f" 1. Update header to: # URN: {ac_from_docstring}\n"
|
|
150
|
+
f" OR\n"
|
|
151
|
+
f" 2. Update docstring to: RED Test for {ac_from_header}\n"
|
|
152
|
+
f" (Choose the correct AC URN from plan/ acceptance criteria)"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if warnings:
|
|
156
|
+
print("\n⚠️ WARNINGS:")
|
|
157
|
+
for warning in warnings:
|
|
158
|
+
print(f" {warning}")
|
|
159
|
+
|
|
160
|
+
if errors:
|
|
161
|
+
# Categorize errors
|
|
162
|
+
missing_docstring = sum(1 for e in errors if "MISSING: Module docstring" in e)
|
|
163
|
+
missing_header = sum(1 for e in errors if "MISSING: Header comment" in e)
|
|
164
|
+
mismatched = sum(1 for e in errors if "MISMATCH:" in e)
|
|
165
|
+
|
|
166
|
+
error_report = "\n\n".join(errors)
|
|
167
|
+
pytest.fail(
|
|
168
|
+
f"\n\n"
|
|
169
|
+
f"══════════════════════════════════════════════════════════════════════\n"
|
|
170
|
+
f"❌ AC URN VALIDATION FAILED: {len(errors)} test files need updates\n"
|
|
171
|
+
f"══════════════════════════════════════════════════════════════════════\n"
|
|
172
|
+
f"\n"
|
|
173
|
+
f"BREAKDOWN:\n"
|
|
174
|
+
f" • Missing docstring: {missing_docstring} files\n"
|
|
175
|
+
f" • Missing header: {missing_header} files\n"
|
|
176
|
+
f" • Mismatched URNs: {mismatched} files\n"
|
|
177
|
+
f"\n"
|
|
178
|
+
f"PER RED CONVENTION v1.0+, test files MUST have AC URN in BOTH:\n"
|
|
179
|
+
f" 1. Header comment: # URN: acc:...\n"
|
|
180
|
+
f" 2. Module docstring: RED Test for acc:...\n"
|
|
181
|
+
f" AND both references MUST match exactly.\n"
|
|
182
|
+
f"\n"
|
|
183
|
+
f"══════════════════════════════════════════════════════════════════════\n"
|
|
184
|
+
f"DETAILED ERRORS:\n"
|
|
185
|
+
f"══════════════════════════════════════════════════════════════════════\n"
|
|
186
|
+
f"\n{error_report}\n"
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
@pytest.mark.platform
|
|
191
|
+
def test_dual_ac_reference_format_examples():
|
|
192
|
+
"""
|
|
193
|
+
SPEC-TESTER-CONVENTION-0002: Document correct dual AC reference format
|
|
194
|
+
|
|
195
|
+
This test documents the expected format for dual AC references.
|
|
196
|
+
"""
|
|
197
|
+
# This test always passes - it's documentation
|
|
198
|
+
correct_format = '''
|
|
199
|
+
# Runtime: python
|
|
200
|
+
# Rationale: Game mechanics - stateful timebank depletion algorithm
|
|
201
|
+
# URN: acc:burn-timebank:E001-UNIT-001
|
|
202
|
+
# Phase: GREEN
|
|
203
|
+
# Purpose: Verify timebank decrements during active decision
|
|
204
|
+
"""
|
|
205
|
+
RED Test for acc:burn-timebank:E001-UNIT-001
|
|
206
|
+
wagon: burn-timebank | feature: burn-time | phase: GREEN
|
|
207
|
+
WMBT: wmbt:burn-timebank:E001
|
|
208
|
+
Purpose: Verify timebank decrements during active decision
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
import pytest
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def test_e001_unit_001_timebank_decrements_during_decision():
|
|
215
|
+
"""Test implementation..."""
|
|
216
|
+
pass
|
|
217
|
+
'''
|
|
218
|
+
|
|
219
|
+
# Validate the format
|
|
220
|
+
ac_from_header = extract_ac_from_header(correct_format)
|
|
221
|
+
ac_from_docstring = extract_ac_from_docstring(correct_format)
|
|
222
|
+
|
|
223
|
+
assert ac_from_header == "acc:burn-timebank:E001-UNIT-001"
|
|
224
|
+
assert ac_from_docstring == "acc:burn-timebank:E001-UNIT-001"
|
|
225
|
+
assert ac_from_header == ac_from_docstring
|
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test fixtures match contract schemas and are valid.
|
|
3
|
+
|
|
4
|
+
Validates:
|
|
5
|
+
- Test fixtures conform to contract schemas
|
|
6
|
+
- Fixture data is realistic and valid
|
|
7
|
+
- Fixtures cover edge cases
|
|
8
|
+
- Fixtures are not hardcoded production data
|
|
9
|
+
|
|
10
|
+
Inspired by: .claude/utils/tester/ (fixture utilities)
|
|
11
|
+
But: Self-contained, no utility dependencies
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import pytest
|
|
15
|
+
import json
|
|
16
|
+
import yaml
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Dict, List, Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Path constants
|
|
22
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
23
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
24
|
+
CONTRACTS_DIR = REPO_ROOT / "contracts"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def find_contract_schemas() -> Dict[str, Dict]:
|
|
28
|
+
"""
|
|
29
|
+
Find all contract schemas.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Dict mapping contract ID to schema data
|
|
33
|
+
"""
|
|
34
|
+
if not CONTRACTS_DIR.exists():
|
|
35
|
+
return {}
|
|
36
|
+
|
|
37
|
+
schemas = {}
|
|
38
|
+
|
|
39
|
+
for schema_file in CONTRACTS_DIR.rglob("*.schema.json"):
|
|
40
|
+
try:
|
|
41
|
+
with open(schema_file, 'r', encoding='utf-8') as f:
|
|
42
|
+
schema = json.load(f)
|
|
43
|
+
|
|
44
|
+
schema_id = schema.get('$id', str(schema_file.stem))
|
|
45
|
+
schemas[schema_id] = {
|
|
46
|
+
'schema': schema,
|
|
47
|
+
'file': str(schema_file.relative_to(REPO_ROOT))
|
|
48
|
+
}
|
|
49
|
+
except Exception:
|
|
50
|
+
continue
|
|
51
|
+
|
|
52
|
+
return schemas
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def find_test_fixtures() -> Dict[str, List[Any]]:
|
|
56
|
+
"""
|
|
57
|
+
Find test fixtures in Python test files.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Dict mapping fixture file to list of fixture data
|
|
61
|
+
"""
|
|
62
|
+
if not PYTHON_DIR.exists():
|
|
63
|
+
return {}
|
|
64
|
+
|
|
65
|
+
fixtures = {}
|
|
66
|
+
|
|
67
|
+
# Look for fixture files (conftest.py, fixtures.py, etc.)
|
|
68
|
+
for test_dir in PYTHON_DIR.rglob("test"):
|
|
69
|
+
if not test_dir.is_dir():
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
# Check conftest.py
|
|
73
|
+
conftest = test_dir / "conftest.py"
|
|
74
|
+
if conftest.exists():
|
|
75
|
+
fixture_data = extract_fixtures_from_file(conftest)
|
|
76
|
+
if fixture_data:
|
|
77
|
+
fixtures[str(conftest.relative_to(REPO_ROOT))] = fixture_data
|
|
78
|
+
|
|
79
|
+
# Check fixtures.py
|
|
80
|
+
fixtures_file = test_dir / "fixtures.py"
|
|
81
|
+
if fixtures_file.exists():
|
|
82
|
+
fixture_data = extract_fixtures_from_file(fixtures_file)
|
|
83
|
+
if fixture_data:
|
|
84
|
+
fixtures[str(fixtures_file.relative_to(REPO_ROOT))] = fixture_data
|
|
85
|
+
|
|
86
|
+
# Check fixtures/ directory
|
|
87
|
+
fixtures_dir = test_dir / "fixtures"
|
|
88
|
+
if fixtures_dir.exists() and fixtures_dir.is_dir():
|
|
89
|
+
for fixture_file in fixtures_dir.glob("*.json"):
|
|
90
|
+
try:
|
|
91
|
+
with open(fixture_file, 'r', encoding='utf-8') as f:
|
|
92
|
+
data = json.load(f)
|
|
93
|
+
fixtures[str(fixture_file.relative_to(REPO_ROOT))] = [data]
|
|
94
|
+
except Exception:
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
for fixture_file in fixtures_dir.glob("*.yaml"):
|
|
98
|
+
try:
|
|
99
|
+
with open(fixture_file, 'r', encoding='utf-8') as f:
|
|
100
|
+
data = yaml.safe_load(f)
|
|
101
|
+
fixtures[str(fixture_file.relative_to(REPO_ROOT))] = [data]
|
|
102
|
+
except Exception:
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
return fixtures
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def extract_fixtures_from_file(file_path: Path) -> List[Dict]:
|
|
109
|
+
"""
|
|
110
|
+
Extract fixture data from Python file.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
List of fixture dictionaries found in file
|
|
114
|
+
"""
|
|
115
|
+
# Simplified extraction - looks for dict literals
|
|
116
|
+
# In reality, would need AST parsing for complete extraction
|
|
117
|
+
fixtures = []
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
121
|
+
content = f.read()
|
|
122
|
+
except Exception:
|
|
123
|
+
return []
|
|
124
|
+
|
|
125
|
+
# Look for pytest fixtures that return dictionaries
|
|
126
|
+
# This is a simplified heuristic
|
|
127
|
+
import re
|
|
128
|
+
|
|
129
|
+
# Find @pytest.fixture decorated functions
|
|
130
|
+
fixture_pattern = r'@pytest\.fixture[^\n]*\ndef\s+(\w+)\([^)]*\):'
|
|
131
|
+
fixture_matches = re.finditer(fixture_pattern, content)
|
|
132
|
+
|
|
133
|
+
for match in fixture_matches:
|
|
134
|
+
fixture_name = match.group(1)
|
|
135
|
+
# Very simplified - just noting that fixtures exist
|
|
136
|
+
# Real implementation would extract actual data
|
|
137
|
+
fixtures.append({'fixture_name': fixture_name, 'type': 'pytest_fixture'})
|
|
138
|
+
|
|
139
|
+
return fixtures
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def validate_against_schema(data: Dict, schema: Dict) -> List[str]:
|
|
143
|
+
"""
|
|
144
|
+
Validate data against JSON schema.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
List of validation errors (empty if valid)
|
|
148
|
+
"""
|
|
149
|
+
errors = []
|
|
150
|
+
|
|
151
|
+
# Check required fields
|
|
152
|
+
required = schema.get('required', [])
|
|
153
|
+
for field in required:
|
|
154
|
+
if field not in data:
|
|
155
|
+
errors.append(f"Missing required field: {field}")
|
|
156
|
+
|
|
157
|
+
# Check property types
|
|
158
|
+
properties = schema.get('properties', {})
|
|
159
|
+
for field, value in data.items():
|
|
160
|
+
if field in properties:
|
|
161
|
+
expected_type = properties[field].get('type')
|
|
162
|
+
actual_type = type(value).__name__
|
|
163
|
+
|
|
164
|
+
# Map Python types to JSON schema types
|
|
165
|
+
type_map = {
|
|
166
|
+
'str': 'string',
|
|
167
|
+
'int': 'integer',
|
|
168
|
+
'float': 'number',
|
|
169
|
+
'bool': 'boolean',
|
|
170
|
+
'list': 'array',
|
|
171
|
+
'dict': 'object',
|
|
172
|
+
'NoneType': 'null'
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
json_type = type_map.get(actual_type, actual_type)
|
|
176
|
+
|
|
177
|
+
if expected_type and json_type != expected_type:
|
|
178
|
+
errors.append(
|
|
179
|
+
f"Field '{field}' has type '{json_type}', expected '{expected_type}'"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
return errors
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def check_for_suspicious_data(data: Any) -> List[str]:
|
|
186
|
+
"""
|
|
187
|
+
Check fixture data for suspicious patterns.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
List of warnings about suspicious data
|
|
191
|
+
"""
|
|
192
|
+
warnings = []
|
|
193
|
+
|
|
194
|
+
if isinstance(data, dict):
|
|
195
|
+
for key, value in data.items():
|
|
196
|
+
# Check for potential production data patterns
|
|
197
|
+
if isinstance(value, str):
|
|
198
|
+
# Email addresses (might be real)
|
|
199
|
+
if '@' in value and '.' in value and 'example.com' not in value and 'test.com' not in value:
|
|
200
|
+
warnings.append(f"Field '{key}' contains real-looking email: {value}")
|
|
201
|
+
|
|
202
|
+
# Phone numbers (might be real)
|
|
203
|
+
if len(value.replace('-', '').replace(' ', '').replace('(', '').replace(')', '')) == 10 and value.replace('-', '').replace(' ', '').replace('(', '').replace(')', '').isdigit():
|
|
204
|
+
warnings.append(f"Field '{key}' contains real-looking phone number")
|
|
205
|
+
|
|
206
|
+
# Hardcoded IDs (should be generated)
|
|
207
|
+
if key.endswith('_id') and value == value and not value.startswith('test-') and not value.startswith('fixture-'):
|
|
208
|
+
warnings.append(f"Field '{key}' has hardcoded ID (should be generated)")
|
|
209
|
+
|
|
210
|
+
# Recursive check for nested dicts
|
|
211
|
+
if isinstance(value, dict):
|
|
212
|
+
warnings.extend(check_for_suspicious_data(value))
|
|
213
|
+
|
|
214
|
+
if isinstance(value, list):
|
|
215
|
+
for item in value:
|
|
216
|
+
if isinstance(item, dict):
|
|
217
|
+
warnings.extend(check_for_suspicious_data(item))
|
|
218
|
+
|
|
219
|
+
return warnings
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
@pytest.mark.tester
|
|
223
|
+
def test_fixtures_match_contract_schemas():
|
|
224
|
+
"""
|
|
225
|
+
SPEC-TESTER-FIXTURE-0001: Test fixtures conform to contract schemas.
|
|
226
|
+
|
|
227
|
+
Fixtures should match the structure defined in contracts.
|
|
228
|
+
This ensures tests use realistic, valid data.
|
|
229
|
+
|
|
230
|
+
Given: Test fixtures and contract schemas
|
|
231
|
+
When: Validating fixture data against schemas
|
|
232
|
+
Then: All fixtures conform to their schemas
|
|
233
|
+
"""
|
|
234
|
+
schemas = find_contract_schemas()
|
|
235
|
+
fixtures = find_test_fixtures()
|
|
236
|
+
|
|
237
|
+
if not schemas:
|
|
238
|
+
pytest.skip("No contract schemas found")
|
|
239
|
+
|
|
240
|
+
if not fixtures:
|
|
241
|
+
pytest.skip("No test fixtures found")
|
|
242
|
+
|
|
243
|
+
violations = []
|
|
244
|
+
|
|
245
|
+
# For each fixture, try to find matching schema
|
|
246
|
+
for fixture_file, fixture_data_list in fixtures.items():
|
|
247
|
+
for fixture_data in fixture_data_list:
|
|
248
|
+
if not isinstance(fixture_data, dict):
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
# Skip pytest fixture metadata entries - they're not actual data
|
|
252
|
+
if fixture_data.get('type') == 'pytest_fixture':
|
|
253
|
+
continue
|
|
254
|
+
|
|
255
|
+
# Only validate fixtures that have an explicit schema reference
|
|
256
|
+
# via $schema or schema_ref field
|
|
257
|
+
schema_ref = fixture_data.get('$schema') or fixture_data.get('schema_ref')
|
|
258
|
+
if not schema_ref:
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
# Find matching schema
|
|
262
|
+
if schema_ref in schemas:
|
|
263
|
+
errors = validate_against_schema(fixture_data, schemas[schema_ref]['schema'])
|
|
264
|
+
|
|
265
|
+
if errors:
|
|
266
|
+
violations.append(
|
|
267
|
+
f"{fixture_file}\\n"
|
|
268
|
+
f" Schema: {schema_ref}\\n"
|
|
269
|
+
f" Errors: {', '.join(errors[:3])}"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
if violations:
|
|
273
|
+
pytest.fail(
|
|
274
|
+
f"\\n\\nFound {len(violations)} fixture validation errors:\\n\\n" +
|
|
275
|
+
"\\n\\n".join(violations[:10]) +
|
|
276
|
+
(f"\\n\\n... and {len(violations) - 10} more" if len(violations) > 10 else "")
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
@pytest.mark.tester
|
|
281
|
+
def test_fixtures_do_not_contain_production_data():
|
|
282
|
+
"""
|
|
283
|
+
SPEC-TESTER-FIXTURE-0002: Fixtures don't contain production data.
|
|
284
|
+
|
|
285
|
+
Test fixtures should use fake/generated data, not real production data.
|
|
286
|
+
|
|
287
|
+
Patterns to avoid:
|
|
288
|
+
- Real email addresses
|
|
289
|
+
- Real phone numbers
|
|
290
|
+
- Production API keys
|
|
291
|
+
- Actual user names
|
|
292
|
+
|
|
293
|
+
Given: Test fixtures
|
|
294
|
+
When: Scanning for production data patterns
|
|
295
|
+
Then: No production data found
|
|
296
|
+
"""
|
|
297
|
+
fixtures = find_test_fixtures()
|
|
298
|
+
|
|
299
|
+
if not fixtures:
|
|
300
|
+
pytest.skip("No test fixtures found")
|
|
301
|
+
|
|
302
|
+
violations = []
|
|
303
|
+
|
|
304
|
+
for fixture_file, fixture_data_list in fixtures.items():
|
|
305
|
+
for fixture_data in fixture_data_list:
|
|
306
|
+
warnings = check_for_suspicious_data(fixture_data)
|
|
307
|
+
|
|
308
|
+
if warnings:
|
|
309
|
+
violations.append(
|
|
310
|
+
f"{fixture_file}\\n" +
|
|
311
|
+
"\\n".join(f" - {w}" for w in warnings[:5])
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
if violations:
|
|
315
|
+
pytest.fail(
|
|
316
|
+
f"\\n\\nFound {len(violations)} fixtures with suspicious data:\\n\\n" +
|
|
317
|
+
"\\n\\n".join(violations[:10]) +
|
|
318
|
+
(f"\\n\\n... and {len(violations) - 10} more" if len(violations) > 10 else "") +
|
|
319
|
+
"\\n\\nFixtures should use clearly fake/test data (example.com, test-, etc.)"
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
@pytest.mark.tester
|
|
324
|
+
def test_fixtures_use_descriptive_names():
|
|
325
|
+
"""
|
|
326
|
+
SPEC-TESTER-FIXTURE-0003: Fixtures have descriptive names.
|
|
327
|
+
|
|
328
|
+
Fixture names should clearly indicate what they provide.
|
|
329
|
+
|
|
330
|
+
Good: valid_user_fixture, invalid_email_fixture
|
|
331
|
+
Bad: data, test_data, fixture1
|
|
332
|
+
|
|
333
|
+
Given: Test fixtures
|
|
334
|
+
When: Checking fixture names
|
|
335
|
+
Then: Names are descriptive and follow conventions
|
|
336
|
+
"""
|
|
337
|
+
fixtures = find_test_fixtures()
|
|
338
|
+
|
|
339
|
+
if not fixtures:
|
|
340
|
+
pytest.skip("No test fixtures found")
|
|
341
|
+
|
|
342
|
+
violations = []
|
|
343
|
+
|
|
344
|
+
for fixture_file, fixture_data_list in fixtures.items():
|
|
345
|
+
for fixture_data in fixture_data_list:
|
|
346
|
+
if isinstance(fixture_data, dict) and 'fixture_name' in fixture_data:
|
|
347
|
+
name = fixture_data['fixture_name']
|
|
348
|
+
|
|
349
|
+
# Check for bad names
|
|
350
|
+
bad_patterns = ['data', 'test', 'fixture1', 'fixture2', 'tmp', 'temp']
|
|
351
|
+
|
|
352
|
+
if name.lower() in bad_patterns:
|
|
353
|
+
violations.append(
|
|
354
|
+
f"{fixture_file}\\n"
|
|
355
|
+
f" Fixture: {name}\\n"
|
|
356
|
+
f" Issue: Name too generic, should be descriptive"
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
# Check if name is too short
|
|
360
|
+
if len(name) < 5:
|
|
361
|
+
violations.append(
|
|
362
|
+
f"{fixture_file}\\n"
|
|
363
|
+
f" Fixture: {name}\\n"
|
|
364
|
+
f" Issue: Name too short (should be descriptive)"
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
if violations:
|
|
368
|
+
pytest.fail(
|
|
369
|
+
f"\\n\\nFound {len(violations)} fixture naming violations:\\n\\n" +
|
|
370
|
+
"\\n\\n".join(violations[:10]) +
|
|
371
|
+
(f"\\n\\n... and {len(violations) - 10} more" if len(violations) > 10 else "")
|
|
372
|
+
)
|