atdd 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/__init__.py +0 -0
- atdd/cli.py +404 -0
- atdd/coach/__init__.py +0 -0
- atdd/coach/commands/__init__.py +0 -0
- atdd/coach/commands/add_persistence_metadata.py +215 -0
- atdd/coach/commands/analyze_migrations.py +188 -0
- atdd/coach/commands/consumers.py +720 -0
- atdd/coach/commands/infer_governance_status.py +149 -0
- atdd/coach/commands/initializer.py +177 -0
- atdd/coach/commands/interface.py +1078 -0
- atdd/coach/commands/inventory.py +565 -0
- atdd/coach/commands/migration.py +240 -0
- atdd/coach/commands/registry.py +1560 -0
- atdd/coach/commands/session.py +430 -0
- atdd/coach/commands/sync.py +405 -0
- atdd/coach/commands/test_interface.py +399 -0
- atdd/coach/commands/test_runner.py +141 -0
- atdd/coach/commands/tests/__init__.py +1 -0
- atdd/coach/commands/tests/test_telemetry_array_validation.py +235 -0
- atdd/coach/commands/traceability.py +4264 -0
- atdd/coach/conventions/session.convention.yaml +754 -0
- atdd/coach/overlays/__init__.py +2 -0
- atdd/coach/overlays/claude.md +2 -0
- atdd/coach/schemas/config.schema.json +34 -0
- atdd/coach/schemas/manifest.schema.json +101 -0
- atdd/coach/templates/ATDD.md +282 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +327 -0
- atdd/coach/utils/__init__.py +0 -0
- atdd/coach/utils/graph/__init__.py +0 -0
- atdd/coach/utils/graph/urn.py +875 -0
- atdd/coach/validators/__init__.py +0 -0
- atdd/coach/validators/shared_fixtures.py +365 -0
- atdd/coach/validators/test_enrich_wagon_registry.py +167 -0
- atdd/coach/validators/test_registry.py +575 -0
- atdd/coach/validators/test_session_validation.py +1183 -0
- atdd/coach/validators/test_traceability.py +448 -0
- atdd/coach/validators/test_update_feature_paths.py +108 -0
- atdd/coach/validators/test_validate_contract_consumers.py +297 -0
- atdd/coder/__init__.py +1 -0
- atdd/coder/conventions/adapter.recipe.yaml +88 -0
- atdd/coder/conventions/backend.convention.yaml +460 -0
- atdd/coder/conventions/boundaries.convention.yaml +666 -0
- atdd/coder/conventions/commons.convention.yaml +460 -0
- atdd/coder/conventions/complexity.recipe.yaml +109 -0
- atdd/coder/conventions/component-naming.convention.yaml +178 -0
- atdd/coder/conventions/design.convention.yaml +327 -0
- atdd/coder/conventions/design.recipe.yaml +273 -0
- atdd/coder/conventions/dto.convention.yaml +660 -0
- atdd/coder/conventions/frontend.convention.yaml +542 -0
- atdd/coder/conventions/green.convention.yaml +1012 -0
- atdd/coder/conventions/presentation.convention.yaml +587 -0
- atdd/coder/conventions/refactor.convention.yaml +535 -0
- atdd/coder/conventions/technology.convention.yaml +206 -0
- atdd/coder/conventions/tests/__init__.py +0 -0
- atdd/coder/conventions/tests/test_adapter_recipe.py +302 -0
- atdd/coder/conventions/tests/test_complexity_recipe.py +289 -0
- atdd/coder/conventions/tests/test_component_taxonomy.py +278 -0
- atdd/coder/conventions/tests/test_component_urn_naming.py +165 -0
- atdd/coder/conventions/tests/test_thinness_recipe.py +286 -0
- atdd/coder/conventions/thinness.recipe.yaml +82 -0
- atdd/coder/conventions/train.convention.yaml +325 -0
- atdd/coder/conventions/verification.protocol.yaml +53 -0
- atdd/coder/schemas/design_system.schema.json +361 -0
- atdd/coder/validators/__init__.py +0 -0
- atdd/coder/validators/test_commons_structure.py +485 -0
- atdd/coder/validators/test_complexity.py +416 -0
- atdd/coder/validators/test_cross_language_consistency.py +431 -0
- atdd/coder/validators/test_design_system_compliance.py +413 -0
- atdd/coder/validators/test_dto_testing_patterns.py +268 -0
- atdd/coder/validators/test_green_cross_stack_layers.py +168 -0
- atdd/coder/validators/test_green_layer_dependencies.py +148 -0
- atdd/coder/validators/test_green_python_layer_structure.py +103 -0
- atdd/coder/validators/test_green_supabase_layer_structure.py +103 -0
- atdd/coder/validators/test_import_boundaries.py +396 -0
- atdd/coder/validators/test_init_file_urns.py +593 -0
- atdd/coder/validators/test_preact_layer_boundaries.py +221 -0
- atdd/coder/validators/test_presentation_convention.py +260 -0
- atdd/coder/validators/test_python_architecture.py +674 -0
- atdd/coder/validators/test_quality_metrics.py +420 -0
- atdd/coder/validators/test_station_master_pattern.py +244 -0
- atdd/coder/validators/test_train_infrastructure.py +454 -0
- atdd/coder/validators/test_train_urns.py +293 -0
- atdd/coder/validators/test_typescript_architecture.py +616 -0
- atdd/coder/validators/test_usecase_structure.py +421 -0
- atdd/coder/validators/test_wagon_boundaries.py +586 -0
- atdd/conftest.py +126 -0
- atdd/planner/__init__.py +1 -0
- atdd/planner/conventions/acceptance.convention.yaml +538 -0
- atdd/planner/conventions/appendix.convention.yaml +187 -0
- atdd/planner/conventions/artifact-naming.convention.yaml +852 -0
- atdd/planner/conventions/component.convention.yaml +670 -0
- atdd/planner/conventions/criteria.convention.yaml +141 -0
- atdd/planner/conventions/feature.convention.yaml +371 -0
- atdd/planner/conventions/interface.convention.yaml +382 -0
- atdd/planner/conventions/steps.convention.yaml +141 -0
- atdd/planner/conventions/train.convention.yaml +552 -0
- atdd/planner/conventions/wagon.convention.yaml +275 -0
- atdd/planner/conventions/wmbt.convention.yaml +258 -0
- atdd/planner/schemas/acceptance.schema.json +336 -0
- atdd/planner/schemas/appendix.schema.json +78 -0
- atdd/planner/schemas/component.schema.json +114 -0
- atdd/planner/schemas/feature.schema.json +197 -0
- atdd/planner/schemas/train.schema.json +192 -0
- atdd/planner/schemas/wagon.schema.json +281 -0
- atdd/planner/schemas/wmbt.schema.json +59 -0
- atdd/planner/validators/__init__.py +0 -0
- atdd/planner/validators/conftest.py +5 -0
- atdd/planner/validators/test_draft_wagon_registry.py +374 -0
- atdd/planner/validators/test_plan_cross_refs.py +240 -0
- atdd/planner/validators/test_plan_uniqueness.py +224 -0
- atdd/planner/validators/test_plan_urn_resolution.py +268 -0
- atdd/planner/validators/test_plan_wagons.py +174 -0
- atdd/planner/validators/test_train_validation.py +514 -0
- atdd/planner/validators/test_wagon_urn_chain.py +648 -0
- atdd/planner/validators/test_wmbt_consistency.py +327 -0
- atdd/planner/validators/test_wmbt_vocabulary.py +632 -0
- atdd/tester/__init__.py +1 -0
- atdd/tester/conventions/artifact.convention.yaml +257 -0
- atdd/tester/conventions/contract.convention.yaml +1009 -0
- atdd/tester/conventions/filename.convention.yaml +555 -0
- atdd/tester/conventions/migration.convention.yaml +509 -0
- atdd/tester/conventions/red.convention.yaml +797 -0
- atdd/tester/conventions/routing.convention.yaml +51 -0
- atdd/tester/conventions/telemetry.convention.yaml +458 -0
- atdd/tester/schemas/a11y.tmpl.json +17 -0
- atdd/tester/schemas/artifact.schema.json +189 -0
- atdd/tester/schemas/contract.schema.json +591 -0
- atdd/tester/schemas/contract.tmpl.json +95 -0
- atdd/tester/schemas/db.tmpl.json +20 -0
- atdd/tester/schemas/e2e.tmpl.json +17 -0
- atdd/tester/schemas/edge_function.tmpl.json +17 -0
- atdd/tester/schemas/event.tmpl.json +17 -0
- atdd/tester/schemas/http.tmpl.json +19 -0
- atdd/tester/schemas/job.tmpl.json +18 -0
- atdd/tester/schemas/load.tmpl.json +21 -0
- atdd/tester/schemas/metric.tmpl.json +19 -0
- atdd/tester/schemas/pack.schema.json +139 -0
- atdd/tester/schemas/realtime.tmpl.json +20 -0
- atdd/tester/schemas/rls.tmpl.json +18 -0
- atdd/tester/schemas/script.tmpl.json +16 -0
- atdd/tester/schemas/sec.tmpl.json +18 -0
- atdd/tester/schemas/storage.tmpl.json +18 -0
- atdd/tester/schemas/telemetry.schema.json +128 -0
- atdd/tester/schemas/telemetry_tracking_manifest.schema.json +143 -0
- atdd/tester/schemas/test_filename.schema.json +194 -0
- atdd/tester/schemas/test_intent.schema.json +179 -0
- atdd/tester/schemas/unit.tmpl.json +18 -0
- atdd/tester/schemas/visual.tmpl.json +18 -0
- atdd/tester/schemas/ws.tmpl.json +17 -0
- atdd/tester/utils/__init__.py +0 -0
- atdd/tester/utils/filename.py +300 -0
- atdd/tester/validators/__init__.py +0 -0
- atdd/tester/validators/cleanup_duplicate_headers.py +116 -0
- atdd/tester/validators/cleanup_duplicate_headers_v2.py +135 -0
- atdd/tester/validators/conftest.py +5 -0
- atdd/tester/validators/coverage_gap_report.py +321 -0
- atdd/tester/validators/fix_dual_ac_references.py +179 -0
- atdd/tester/validators/remove_duplicate_lines.py +93 -0
- atdd/tester/validators/test_acceptance_urn_filename_mapping.py +359 -0
- atdd/tester/validators/test_acceptance_urn_separator.py +166 -0
- atdd/tester/validators/test_artifact_naming_category.py +307 -0
- atdd/tester/validators/test_contract_schema_compliance.py +706 -0
- atdd/tester/validators/test_contracts_structure.py +200 -0
- atdd/tester/validators/test_coverage_adequacy.py +797 -0
- atdd/tester/validators/test_dual_ac_reference.py +225 -0
- atdd/tester/validators/test_fixture_validity.py +372 -0
- atdd/tester/validators/test_isolation.py +487 -0
- atdd/tester/validators/test_migration_coverage.py +204 -0
- atdd/tester/validators/test_migration_criteria.py +276 -0
- atdd/tester/validators/test_migration_generation.py +116 -0
- atdd/tester/validators/test_python_test_naming.py +410 -0
- atdd/tester/validators/test_red_layer_validation.py +95 -0
- atdd/tester/validators/test_red_python_layer_structure.py +87 -0
- atdd/tester/validators/test_red_supabase_layer_structure.py +90 -0
- atdd/tester/validators/test_telemetry_structure.py +634 -0
- atdd/tester/validators/test_typescript_test_naming.py +301 -0
- atdd/tester/validators/test_typescript_test_structure.py +84 -0
- atdd-0.1.0.dist-info/METADATA +191 -0
- atdd-0.1.0.dist-info/RECORD +183 -0
- atdd-0.1.0.dist-info/WHEEL +5 -0
- atdd-0.1.0.dist-info/entry_points.txt +2 -0
- atdd-0.1.0.dist-info/licenses/LICENSE +674 -0
- atdd-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,4264 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Contract and telemetry traceability reconciliation command.
|
|
3
|
+
|
|
4
|
+
SPEC: .claude/agents/coach/utils.spec.yaml::traceability
|
|
5
|
+
IDs: SPEC-COACH-UTILS-0283 through SPEC-COACH-UTILS-0291
|
|
6
|
+
|
|
7
|
+
Purpose:
|
|
8
|
+
- Reconciles wagon manifests with actual contract/telemetry files
|
|
9
|
+
- Detects missing references (contract: null / telemetry: null)
|
|
10
|
+
- Proposes and applies fixes pragmatically with user approval
|
|
11
|
+
- Validates bidirectional traceability
|
|
12
|
+
|
|
13
|
+
Architecture: Clean 4-layer architecture
|
|
14
|
+
- Layer 1: Entities (Domain models)
|
|
15
|
+
- Layer 2: Use Cases (Business logic)
|
|
16
|
+
- Layer 3: Adapters (I/O, formatting)
|
|
17
|
+
- Layer 4: Command (CLI orchestration)
|
|
18
|
+
"""
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import yaml
|
|
22
|
+
import json
|
|
23
|
+
import re
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
26
|
+
from dataclasses import dataclass, field
|
|
27
|
+
from collections import defaultdict
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Path constants
|
|
31
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
32
|
+
PLAN_DIR = REPO_ROOT / "plan"
|
|
33
|
+
CONTRACTS_DIR = REPO_ROOT / "contracts"
|
|
34
|
+
TELEMETRY_DIR = REPO_ROOT / "telemetry"
|
|
35
|
+
FACTS_DIR = REPO_ROOT / "facts"
|
|
36
|
+
FEATURES_DIR = REPO_ROOT / "features"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# ============================================================================
|
|
40
|
+
# LAYER 1: ENTITIES (Domain Models)
|
|
41
|
+
# ============================================================================
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ProduceItem:
|
|
46
|
+
"""
|
|
47
|
+
A contract or telemetry item that a wagon produces.
|
|
48
|
+
|
|
49
|
+
Domain entity representing the produce section of a wagon manifest.
|
|
50
|
+
|
|
51
|
+
telemetry_ref supports:
|
|
52
|
+
- List[str]: Array of telemetry URNs for multiple signals
|
|
53
|
+
- None: No telemetry for this produce item
|
|
54
|
+
"""
|
|
55
|
+
name: str
|
|
56
|
+
to: str
|
|
57
|
+
contract_ref: Optional[str]
|
|
58
|
+
telemetry_ref: Optional[Union[str, List[str]]]
|
|
59
|
+
wagon: str
|
|
60
|
+
urn: Optional[str] = None # Explicit URN from manifest (overrides derived URN)
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def has_null_contract_ref(self) -> bool:
|
|
64
|
+
"""Check if contract reference is null/missing."""
|
|
65
|
+
return self.contract_ref is None or str(self.contract_ref) == 'None'
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def has_null_telemetry_ref(self) -> bool:
|
|
69
|
+
"""
|
|
70
|
+
Check if telemetry reference is null/missing.
|
|
71
|
+
|
|
72
|
+
Handles both single URN and array formats:
|
|
73
|
+
- None -> True
|
|
74
|
+
- Empty list -> True
|
|
75
|
+
- Non-empty list -> False
|
|
76
|
+
- String (legacy) -> False
|
|
77
|
+
"""
|
|
78
|
+
if self.telemetry_ref is None:
|
|
79
|
+
return True
|
|
80
|
+
if isinstance(self.telemetry_ref, list):
|
|
81
|
+
return len(self.telemetry_ref) == 0
|
|
82
|
+
if isinstance(self.telemetry_ref, str):
|
|
83
|
+
return self.telemetry_ref == 'None'
|
|
84
|
+
return True
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def derived_contract_urn(self) -> str:
|
|
88
|
+
"""Derive contract URN from artifact name or use explicit URN."""
|
|
89
|
+
# If explicit URN provided in manifest, use that
|
|
90
|
+
if self.urn:
|
|
91
|
+
return self.urn
|
|
92
|
+
# Otherwise derive: Artifact name: match:dilemma.paired → contract:match:dilemma.paired
|
|
93
|
+
return f"contract:{self.name}"
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def derived_telemetry_urn(self) -> str:
|
|
97
|
+
"""
|
|
98
|
+
Derive telemetry URN from artifact name or use explicit URN.
|
|
99
|
+
|
|
100
|
+
Returns artifact-level URN (complete with variant):
|
|
101
|
+
- Artifact name: match:dilemma.paired → telemetry:match:dilemma.paired
|
|
102
|
+
|
|
103
|
+
Note: This returns the complete URN now (not aspect-level).
|
|
104
|
+
For array support, each variant gets its own complete URN.
|
|
105
|
+
"""
|
|
106
|
+
# If explicit URN provided in manifest and it's a telemetry URN, use that
|
|
107
|
+
if self.urn and self.urn.startswith('telemetry:'):
|
|
108
|
+
return self.urn
|
|
109
|
+
# Otherwise derive from name
|
|
110
|
+
return f"telemetry:{self.name}"
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@dataclass
|
|
114
|
+
class ContractFile:
|
|
115
|
+
"""
|
|
116
|
+
A contract schema file in the contracts directory.
|
|
117
|
+
|
|
118
|
+
Domain entity representing an actual contract file on disk.
|
|
119
|
+
"""
|
|
120
|
+
file_path: str
|
|
121
|
+
contract_id: str
|
|
122
|
+
domain: str
|
|
123
|
+
resource: str
|
|
124
|
+
version: Optional[str]
|
|
125
|
+
producer: Optional[str]
|
|
126
|
+
consumers: List[str] = field(default_factory=list)
|
|
127
|
+
traceability: Dict = field(default_factory=dict)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
@dataclass
|
|
131
|
+
class TelemetryFile:
|
|
132
|
+
"""
|
|
133
|
+
A telemetry definition file in the telemetry directory.
|
|
134
|
+
|
|
135
|
+
Domain entity representing an actual telemetry file on disk.
|
|
136
|
+
"""
|
|
137
|
+
file_path: str
|
|
138
|
+
telemetry_id: str
|
|
139
|
+
domain: str
|
|
140
|
+
resource: str
|
|
141
|
+
producer: Optional[str]
|
|
142
|
+
artifact_ref: Optional[str] = None
|
|
143
|
+
acceptance_criteria: List[str] = field(default_factory=list)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@dataclass
|
|
147
|
+
class SignalDeclaration:
|
|
148
|
+
"""
|
|
149
|
+
A signal (metric/event/log) declared in an acceptance criteria.
|
|
150
|
+
|
|
151
|
+
Domain entity representing telemetry requirements from acceptance.
|
|
152
|
+
"""
|
|
153
|
+
wagon: str
|
|
154
|
+
acceptance_urn: str
|
|
155
|
+
signal_type: str # metric, event, log
|
|
156
|
+
signal_name: str
|
|
157
|
+
plane: Optional[str] = None
|
|
158
|
+
measure: Optional[str] = None
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
@dataclass
|
|
162
|
+
class FeatureIOSeed:
|
|
163
|
+
"""
|
|
164
|
+
An I/O seed (consume/produce) declared in a feature file.
|
|
165
|
+
|
|
166
|
+
Domain entity representing feature-level artifact dependencies.
|
|
167
|
+
"""
|
|
168
|
+
name: str
|
|
169
|
+
contract: Optional[str]
|
|
170
|
+
telemetry: Optional[str]
|
|
171
|
+
derived: Optional[bool] = None
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
@dataclass
|
|
175
|
+
class FeatureFile:
|
|
176
|
+
"""
|
|
177
|
+
A feature YAML file with ioSeeds declarations.
|
|
178
|
+
|
|
179
|
+
Domain entity representing an actual feature file on disk.
|
|
180
|
+
"""
|
|
181
|
+
file_path: str
|
|
182
|
+
feature_urn: str
|
|
183
|
+
wagon_urn: str
|
|
184
|
+
consume: List[FeatureIOSeed] = field(default_factory=list)
|
|
185
|
+
produce: List[FeatureIOSeed] = field(default_factory=list)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
@dataclass
|
|
189
|
+
class ReconciliationResult:
|
|
190
|
+
"""
|
|
191
|
+
Result of reconciling wagon manifests with actual files.
|
|
192
|
+
|
|
193
|
+
Aggregates all reconciliation findings.
|
|
194
|
+
"""
|
|
195
|
+
total_issues: int = 0
|
|
196
|
+
missing_contract_refs: List[Dict] = field(default_factory=list)
|
|
197
|
+
missing_telemetry_refs: List[Dict] = field(default_factory=list)
|
|
198
|
+
missing_signal_telemetry: List[Dict] = field(default_factory=list)
|
|
199
|
+
orphaned_telemetry: List[Dict] = field(default_factory=list)
|
|
200
|
+
telemetry_without_artifact_ref: List[Dict] = field(default_factory=list)
|
|
201
|
+
telemetry_invalid_artifact_ref: List[Dict] = field(default_factory=list)
|
|
202
|
+
telemetry_naming_violations: List[Dict] = field(default_factory=list)
|
|
203
|
+
mismatched_producers: List[Dict] = field(default_factory=list)
|
|
204
|
+
feature_io_mismatches: List[Dict] = field(default_factory=list)
|
|
205
|
+
by_wagon: Dict[str, Dict] = field(default_factory=dict)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
@dataclass
|
|
209
|
+
class ContractImplementation:
|
|
210
|
+
"""
|
|
211
|
+
A contract implementation in a specific programming language.
|
|
212
|
+
|
|
213
|
+
Domain entity representing a DTO/entity class generated from a contract schema.
|
|
214
|
+
"""
|
|
215
|
+
file_path: str
|
|
216
|
+
contract_urn: str # e.g., "match:dilemma.current"
|
|
217
|
+
language: str # 'python', 'dart', 'typescript'
|
|
218
|
+
class_name: Optional[str] = None # e.g., "CurrentDilemmaDTO"
|
|
219
|
+
schema_ref: Optional[str] = None # Path to source schema
|
|
220
|
+
fields: List[str] = field(default_factory=list)
|
|
221
|
+
urn_comment: Optional[str] = None # Extracted from file header
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
@dataclass
|
|
225
|
+
class ImplementationCoverage:
|
|
226
|
+
"""
|
|
227
|
+
Cross-language implementation status for a contract.
|
|
228
|
+
|
|
229
|
+
Tracks which languages have implemented a given contract schema.
|
|
230
|
+
"""
|
|
231
|
+
contract_urn: str
|
|
232
|
+
schema_path: str
|
|
233
|
+
python_impl: Optional[ContractImplementation] = None
|
|
234
|
+
dart_impl: Optional[ContractImplementation] = None
|
|
235
|
+
typescript_impl: Optional[ContractImplementation] = None
|
|
236
|
+
|
|
237
|
+
@property
|
|
238
|
+
def coverage_percentage(self) -> float:
|
|
239
|
+
"""Calculate percentage of target languages with implementations."""
|
|
240
|
+
implemented = sum([
|
|
241
|
+
self.python_impl is not None,
|
|
242
|
+
self.dart_impl is not None,
|
|
243
|
+
self.typescript_impl is not None
|
|
244
|
+
])
|
|
245
|
+
return (implemented / 3.0) * 100
|
|
246
|
+
|
|
247
|
+
@property
|
|
248
|
+
def is_fully_covered(self) -> bool:
|
|
249
|
+
"""Check if all target languages have implementations."""
|
|
250
|
+
return self.coverage_percentage == 100.0
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
@dataclass
|
|
254
|
+
class ImplementationReconciliationResult:
|
|
255
|
+
"""
|
|
256
|
+
Result of reconciling contract schemas with language implementations.
|
|
257
|
+
|
|
258
|
+
Aggregates all implementation coverage findings.
|
|
259
|
+
"""
|
|
260
|
+
total_contracts: int = 0
|
|
261
|
+
coverage_by_contract: List[ImplementationCoverage] = field(default_factory=list)
|
|
262
|
+
missing_python: List[Dict] = field(default_factory=list)
|
|
263
|
+
missing_dart: List[Dict] = field(default_factory=list)
|
|
264
|
+
missing_typescript: List[Dict] = field(default_factory=list)
|
|
265
|
+
orphaned_dtos: List[Dict] = field(default_factory=list)
|
|
266
|
+
field_mismatches: List[Dict] = field(default_factory=list)
|
|
267
|
+
|
|
268
|
+
@property
|
|
269
|
+
def avg_coverage(self) -> float:
|
|
270
|
+
"""Calculate average coverage percentage across all contracts."""
|
|
271
|
+
if not self.coverage_by_contract:
|
|
272
|
+
return 0.0
|
|
273
|
+
return sum(c.coverage_percentage for c in self.coverage_by_contract) / len(self.coverage_by_contract)
|
|
274
|
+
|
|
275
|
+
@property
|
|
276
|
+
def total_issues(self) -> int:
|
|
277
|
+
"""Count total implementation issues found."""
|
|
278
|
+
return (
|
|
279
|
+
len(self.missing_python) +
|
|
280
|
+
len(self.missing_dart) +
|
|
281
|
+
len(self.missing_typescript) +
|
|
282
|
+
len(self.orphaned_dtos) +
|
|
283
|
+
len(self.field_mismatches)
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
@dataclass
|
|
288
|
+
class FunnelStage:
|
|
289
|
+
"""
|
|
290
|
+
A stage in the traceability funnel with success/failure counts.
|
|
291
|
+
|
|
292
|
+
Tracks how many items successfully pass through this stage.
|
|
293
|
+
"""
|
|
294
|
+
stage_name: str # e.g., "Wagon → Artifact"
|
|
295
|
+
total_in: int # Items entering this stage
|
|
296
|
+
total_out: int # Items successfully passing through
|
|
297
|
+
leaks: List[Dict] = field(default_factory=list) # Items that leaked (failed)
|
|
298
|
+
|
|
299
|
+
@property
|
|
300
|
+
def leak_rate(self) -> float:
|
|
301
|
+
"""Calculate percentage of items that leaked at this stage."""
|
|
302
|
+
if self.total_in == 0:
|
|
303
|
+
return 0.0
|
|
304
|
+
return ((self.total_in - self.total_out) / self.total_in) * 100
|
|
305
|
+
|
|
306
|
+
@property
|
|
307
|
+
def pass_rate(self) -> float:
|
|
308
|
+
"""Calculate percentage of items that passed through."""
|
|
309
|
+
if self.total_in == 0:
|
|
310
|
+
return 0.0
|
|
311
|
+
return (self.total_out / self.total_in) * 100
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
@dataclass
|
|
315
|
+
class ThemeFunnel:
|
|
316
|
+
"""
|
|
317
|
+
Traceability funnel analysis for a theme (domain).
|
|
318
|
+
|
|
319
|
+
Tracks: Theme → Wagons → Artifacts → Contracts → Implementations
|
|
320
|
+
"""
|
|
321
|
+
theme: str # e.g., "match", "scenario"
|
|
322
|
+
wagon_count: int = 0
|
|
323
|
+
artifact_count: int = 0
|
|
324
|
+
contract_count: int = 0
|
|
325
|
+
python_impl_count: int = 0
|
|
326
|
+
dart_impl_count: int = 0
|
|
327
|
+
typescript_impl_count: int = 0
|
|
328
|
+
|
|
329
|
+
# Funnel stages
|
|
330
|
+
stage_wagon_to_artifact: Optional[FunnelStage] = None
|
|
331
|
+
stage_artifact_to_contract: Optional[FunnelStage] = None
|
|
332
|
+
stage_contract_to_python: Optional[FunnelStage] = None
|
|
333
|
+
stage_contract_to_dart: Optional[FunnelStage] = None
|
|
334
|
+
stage_contract_to_typescript: Optional[FunnelStage] = None
|
|
335
|
+
|
|
336
|
+
@property
|
|
337
|
+
def overall_health(self) -> float:
|
|
338
|
+
"""Calculate overall traceability health (0-100%)."""
|
|
339
|
+
if self.artifact_count == 0:
|
|
340
|
+
return 0.0
|
|
341
|
+
# Best case: artifact → contract → all 3 implementations
|
|
342
|
+
max_possible = self.artifact_count * 3 # 3 languages per artifact
|
|
343
|
+
actual = self.python_impl_count + self.dart_impl_count + self.typescript_impl_count
|
|
344
|
+
return (actual / max_possible) * 100 if max_possible > 0 else 0.0
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
@dataclass
|
|
348
|
+
class SmartThemeFunnel:
|
|
349
|
+
"""
|
|
350
|
+
Smart traceability funnel with producer/consumer awareness.
|
|
351
|
+
|
|
352
|
+
Only counts required implementations based on actual wagon tech stacks.
|
|
353
|
+
"""
|
|
354
|
+
theme: str
|
|
355
|
+
wagon_count: int = 0
|
|
356
|
+
artifact_count: int = 0
|
|
357
|
+
contract_count: int = 0
|
|
358
|
+
|
|
359
|
+
# Required (based on producer/consumer stacks)
|
|
360
|
+
python_required: int = 0
|
|
361
|
+
dart_required: int = 0
|
|
362
|
+
typescript_required: int = 0
|
|
363
|
+
|
|
364
|
+
# Implemented
|
|
365
|
+
python_impl_count: int = 0
|
|
366
|
+
dart_impl_count: int = 0
|
|
367
|
+
typescript_impl_count: int = 0
|
|
368
|
+
|
|
369
|
+
# Contract requirements details
|
|
370
|
+
contracts: List[ContractRequirements] = field(default_factory=list)
|
|
371
|
+
|
|
372
|
+
# Funnel stages
|
|
373
|
+
stage_artifact_to_contract: Optional[FunnelStage] = None
|
|
374
|
+
stage_contract_to_python: Optional[FunnelStage] = None
|
|
375
|
+
stage_contract_to_dart: Optional[FunnelStage] = None
|
|
376
|
+
stage_contract_to_typescript: Optional[FunnelStage] = None
|
|
377
|
+
|
|
378
|
+
@property
|
|
379
|
+
def overall_health(self) -> float:
|
|
380
|
+
"""Calculate health based on required vs implemented."""
|
|
381
|
+
total_required = self.python_required + self.dart_required + self.typescript_required
|
|
382
|
+
if total_required == 0:
|
|
383
|
+
return 100.0 # No requirements = perfect health
|
|
384
|
+
|
|
385
|
+
total_impl = self.python_impl_count + self.dart_impl_count + self.typescript_impl_count
|
|
386
|
+
return (total_impl / total_required) * 100
|
|
387
|
+
|
|
388
|
+
@property
|
|
389
|
+
def python_missing_rate(self) -> float:
|
|
390
|
+
"""Calculate Python missing percentage."""
|
|
391
|
+
if self.python_required == 0:
|
|
392
|
+
return 0.0
|
|
393
|
+
return ((self.python_required - self.python_impl_count) / self.python_required) * 100
|
|
394
|
+
|
|
395
|
+
@property
|
|
396
|
+
def dart_missing_rate(self) -> float:
|
|
397
|
+
"""Calculate Dart missing percentage."""
|
|
398
|
+
if self.dart_required == 0:
|
|
399
|
+
return 0.0
|
|
400
|
+
return ((self.dart_required - self.dart_impl_count) / self.dart_required) * 100
|
|
401
|
+
|
|
402
|
+
@property
|
|
403
|
+
def typescript_missing_rate(self) -> float:
|
|
404
|
+
"""Calculate TypeScript missing percentage."""
|
|
405
|
+
if self.typescript_required == 0:
|
|
406
|
+
return 0.0
|
|
407
|
+
return ((self.typescript_required - self.typescript_impl_count) / self.typescript_required) * 100
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
@dataclass
|
|
411
|
+
class FunnelAnalysisResult:
|
|
412
|
+
"""
|
|
413
|
+
Complete funnel analysis showing traceability breakdown by theme.
|
|
414
|
+
|
|
415
|
+
Identifies where in the chain (wagon→artifact→contract→impl) traceability breaks.
|
|
416
|
+
"""
|
|
417
|
+
by_theme: Dict[str, ThemeFunnel] = field(default_factory=dict)
|
|
418
|
+
orphaned_contracts: List[Dict] = field(default_factory=list) # Contracts with no producing wagon
|
|
419
|
+
|
|
420
|
+
@property
|
|
421
|
+
def total_themes(self) -> int:
|
|
422
|
+
"""Count total themes analyzed."""
|
|
423
|
+
return len(self.by_theme)
|
|
424
|
+
|
|
425
|
+
@property
|
|
426
|
+
def healthiest_theme(self) -> Optional[str]:
|
|
427
|
+
"""Identify theme with best traceability health."""
|
|
428
|
+
if not self.by_theme:
|
|
429
|
+
return None
|
|
430
|
+
return max(self.by_theme.items(), key=lambda x: x[1].overall_health)[0]
|
|
431
|
+
|
|
432
|
+
@property
|
|
433
|
+
def sickest_theme(self) -> Optional[str]:
|
|
434
|
+
"""Identify theme with worst traceability health."""
|
|
435
|
+
if not self.by_theme:
|
|
436
|
+
return None
|
|
437
|
+
return min(self.by_theme.items(), key=lambda x: x[1].overall_health)[0]
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
@dataclass
|
|
441
|
+
class SmartFunnelAnalysisResult:
|
|
442
|
+
"""
|
|
443
|
+
Smart funnel analysis with producer/consumer awareness.
|
|
444
|
+
|
|
445
|
+
Shows only required implementations, not all possible languages.
|
|
446
|
+
"""
|
|
447
|
+
by_theme: Dict[str, SmartThemeFunnel] = field(default_factory=dict)
|
|
448
|
+
|
|
449
|
+
@property
|
|
450
|
+
def total_themes(self) -> int:
|
|
451
|
+
"""Count total themes analyzed."""
|
|
452
|
+
return len(self.by_theme)
|
|
453
|
+
|
|
454
|
+
@property
|
|
455
|
+
def healthiest_theme(self) -> Optional[str]:
|
|
456
|
+
"""Identify theme with best traceability health."""
|
|
457
|
+
if not self.by_theme:
|
|
458
|
+
return None
|
|
459
|
+
return max(self.by_theme.items(), key=lambda x: x[1].overall_health)[0]
|
|
460
|
+
|
|
461
|
+
@property
|
|
462
|
+
def sickest_theme(self) -> Optional[str]:
|
|
463
|
+
"""Identify theme with worst traceability health."""
|
|
464
|
+
if not self.by_theme:
|
|
465
|
+
return None
|
|
466
|
+
return min(self.by_theme.items(), key=lambda x: x[1].overall_health)[0]
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
@dataclass
|
|
470
|
+
class WagonTechStack:
|
|
471
|
+
"""
|
|
472
|
+
Technology stack information for a wagon.
|
|
473
|
+
|
|
474
|
+
Determines which languages this wagon uses based on codebase structure.
|
|
475
|
+
"""
|
|
476
|
+
wagon_urn: str # e.g., "wagon:pace-dilemmas"
|
|
477
|
+
wagon_slug: str # e.g., "pace-dilemmas"
|
|
478
|
+
has_python: bool = False
|
|
479
|
+
has_dart: bool = False
|
|
480
|
+
has_typescript: bool = False
|
|
481
|
+
python_path: Optional[str] = None # e.g., "python/pace_dilemmas/"
|
|
482
|
+
dart_path: Optional[str] = None # e.g., "lib/features/scenario/"
|
|
483
|
+
typescript_path: Optional[str] = None # e.g., "src/wagons/pace-dilemmas/"
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
@dataclass
|
|
487
|
+
class ContractRequirements:
|
|
488
|
+
"""
|
|
489
|
+
Required DTO implementations for a contract based on producer/consumers.
|
|
490
|
+
|
|
491
|
+
Only languages actually used by producer/consumers are marked as required.
|
|
492
|
+
"""
|
|
493
|
+
contract_urn: str
|
|
494
|
+
schema_path: str
|
|
495
|
+
producer: Optional[str] = None # wagon URN
|
|
496
|
+
consumers: List[str] = field(default_factory=list) # wagon URNs
|
|
497
|
+
|
|
498
|
+
# Required implementations (based on tech stacks)
|
|
499
|
+
requires_python: bool = False
|
|
500
|
+
requires_dart: bool = False
|
|
501
|
+
requires_typescript: bool = False
|
|
502
|
+
|
|
503
|
+
# Actual implementations
|
|
504
|
+
has_python: bool = False
|
|
505
|
+
has_dart: bool = False
|
|
506
|
+
has_typescript: bool = False
|
|
507
|
+
|
|
508
|
+
# Missing requirements
|
|
509
|
+
missing_python: bool = False
|
|
510
|
+
missing_dart: bool = False
|
|
511
|
+
missing_typescript: bool = False
|
|
512
|
+
|
|
513
|
+
# Suggested paths
|
|
514
|
+
python_path_suggestion: Optional[str] = None
|
|
515
|
+
dart_path_suggestion: Optional[str] = None
|
|
516
|
+
typescript_path_suggestion: Optional[str] = None
|
|
517
|
+
|
|
518
|
+
def calculate_requirements(self, wagon_stacks: Dict[str, WagonTechStack]):
|
|
519
|
+
"""Calculate which languages are required based on producer/consumer stacks."""
|
|
520
|
+
all_wagons = []
|
|
521
|
+
if self.producer:
|
|
522
|
+
all_wagons.append(self.producer)
|
|
523
|
+
all_wagons.extend(self.consumers)
|
|
524
|
+
|
|
525
|
+
for wagon_urn in all_wagons:
|
|
526
|
+
stack = wagon_stacks.get(wagon_urn)
|
|
527
|
+
if stack:
|
|
528
|
+
if stack.has_python:
|
|
529
|
+
self.requires_python = True
|
|
530
|
+
if stack.has_dart:
|
|
531
|
+
self.requires_dart = True
|
|
532
|
+
if stack.has_typescript:
|
|
533
|
+
self.requires_typescript = True
|
|
534
|
+
|
|
535
|
+
# Calculate missing
|
|
536
|
+
self.missing_python = self.requires_python and not self.has_python
|
|
537
|
+
self.missing_dart = self.requires_dart and not self.has_dart
|
|
538
|
+
self.missing_typescript = self.requires_typescript and not self.has_typescript
|
|
539
|
+
|
|
540
|
+
@property
|
|
541
|
+
def total_required(self) -> int:
|
|
542
|
+
"""Count how many languages are required."""
|
|
543
|
+
return sum([self.requires_python, self.requires_dart, self.requires_typescript])
|
|
544
|
+
|
|
545
|
+
@property
|
|
546
|
+
def total_implemented(self) -> int:
|
|
547
|
+
"""Count how many required languages are implemented."""
|
|
548
|
+
implemented = 0
|
|
549
|
+
if self.requires_python and self.has_python:
|
|
550
|
+
implemented += 1
|
|
551
|
+
if self.requires_dart and self.has_dart:
|
|
552
|
+
implemented += 1
|
|
553
|
+
if self.requires_typescript and self.has_typescript:
|
|
554
|
+
implemented += 1
|
|
555
|
+
return implemented
|
|
556
|
+
|
|
557
|
+
@property
|
|
558
|
+
def coverage_percentage(self) -> float:
|
|
559
|
+
"""Calculate coverage percentage (only counting required languages)."""
|
|
560
|
+
if self.total_required == 0:
|
|
561
|
+
return 100.0 # No requirements = 100% coverage
|
|
562
|
+
return (self.total_implemented / self.total_required) * 100
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
# ============================================================================
|
|
566
|
+
# LAYER 2: USE CASES (Business Logic)
|
|
567
|
+
# ============================================================================
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
class ManifestParser:
|
|
571
|
+
"""
|
|
572
|
+
Use case: Parse wagon manifests to extract produce items.
|
|
573
|
+
|
|
574
|
+
Scans plan directory for wagon manifests and extracts produce declarations.
|
|
575
|
+
"""
|
|
576
|
+
|
|
577
|
+
def __init__(self, plan_dir: Path = PLAN_DIR):
|
|
578
|
+
self.plan_dir = plan_dir
|
|
579
|
+
|
|
580
|
+
def parse_manifest(self, manifest_path: Path) -> Optional[Dict]:
|
|
581
|
+
"""Parse a single manifest file."""
|
|
582
|
+
try:
|
|
583
|
+
with open(manifest_path, 'r', encoding='utf-8') as f:
|
|
584
|
+
return yaml.safe_load(f)
|
|
585
|
+
except Exception:
|
|
586
|
+
return None
|
|
587
|
+
|
|
588
|
+
def parse_produce_items(self, manifest_data: Dict) -> List[ProduceItem]:
|
|
589
|
+
"""Extract produce items from manifest data."""
|
|
590
|
+
produce_items = []
|
|
591
|
+
wagon = manifest_data.get('wagon', 'unknown')
|
|
592
|
+
|
|
593
|
+
for produce in manifest_data.get('produce', []):
|
|
594
|
+
# Normalize telemetry to list for uniform processing
|
|
595
|
+
telemetry_raw = produce.get('telemetry')
|
|
596
|
+
telemetry_ref = self._normalize_telemetry_ref(telemetry_raw)
|
|
597
|
+
|
|
598
|
+
item = ProduceItem(
|
|
599
|
+
name=produce.get('name', ''),
|
|
600
|
+
to=produce.get('to', ''),
|
|
601
|
+
contract_ref=produce.get('contract'),
|
|
602
|
+
telemetry_ref=telemetry_ref,
|
|
603
|
+
wagon=wagon,
|
|
604
|
+
urn=produce.get('urn') # Extract explicit URN if provided
|
|
605
|
+
)
|
|
606
|
+
produce_items.append(item)
|
|
607
|
+
|
|
608
|
+
return produce_items
|
|
609
|
+
|
|
610
|
+
def _normalize_telemetry_ref(self, telemetry_raw):
|
|
611
|
+
"""
|
|
612
|
+
Normalize telemetry reference to list format for uniform processing.
|
|
613
|
+
|
|
614
|
+
Supports backward compatibility:
|
|
615
|
+
- String URN -> [URN] (single-item list)
|
|
616
|
+
- List of URNs -> List (unchanged)
|
|
617
|
+
- None/null -> None
|
|
618
|
+
|
|
619
|
+
Returns:
|
|
620
|
+
List of URN strings, or None if telemetry is null
|
|
621
|
+
"""
|
|
622
|
+
if telemetry_raw is None:
|
|
623
|
+
return None
|
|
624
|
+
elif isinstance(telemetry_raw, list):
|
|
625
|
+
return telemetry_raw
|
|
626
|
+
elif isinstance(telemetry_raw, str):
|
|
627
|
+
return [telemetry_raw]
|
|
628
|
+
else:
|
|
629
|
+
return None
|
|
630
|
+
|
|
631
|
+
def find_all_manifests(self) -> List[Tuple[Path, Dict]]:
|
|
632
|
+
"""Find and parse all wagon manifests."""
|
|
633
|
+
manifests = []
|
|
634
|
+
|
|
635
|
+
if not self.plan_dir.exists():
|
|
636
|
+
return manifests
|
|
637
|
+
|
|
638
|
+
for manifest_file in self.plan_dir.rglob("_*.yaml"):
|
|
639
|
+
data = self.parse_manifest(manifest_file)
|
|
640
|
+
if data and isinstance(data, dict):
|
|
641
|
+
manifests.append((manifest_file, data))
|
|
642
|
+
|
|
643
|
+
return manifests
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
class AcceptanceParser:
|
|
647
|
+
"""
|
|
648
|
+
Use case: Parse acceptance criteria files to extract signal declarations.
|
|
649
|
+
|
|
650
|
+
Finds and parses acceptance files to identify telemetry requirements.
|
|
651
|
+
"""
|
|
652
|
+
|
|
653
|
+
def __init__(self, plan_dir: Path = PLAN_DIR):
|
|
654
|
+
self.plan_dir = plan_dir
|
|
655
|
+
|
|
656
|
+
def parse_acceptance_file(self, acceptance_file: Path) -> List[SignalDeclaration]:
|
|
657
|
+
"""Extract signal declarations from an acceptance file."""
|
|
658
|
+
signals = []
|
|
659
|
+
|
|
660
|
+
try:
|
|
661
|
+
with open(acceptance_file, 'r', encoding='utf-8') as f:
|
|
662
|
+
data = yaml.safe_load(f)
|
|
663
|
+
except Exception:
|
|
664
|
+
return signals
|
|
665
|
+
|
|
666
|
+
wagon = data.get('metadata', {}).get('wagon', 'unknown')
|
|
667
|
+
|
|
668
|
+
for acceptance in data.get('acceptances', []):
|
|
669
|
+
acc_urn = acceptance.get('identity', {}).get('urn', '')
|
|
670
|
+
signal_block = acceptance.get('signal', {})
|
|
671
|
+
|
|
672
|
+
# Extract metrics
|
|
673
|
+
for metric in signal_block.get('metrics', []):
|
|
674
|
+
signals.append(SignalDeclaration(
|
|
675
|
+
wagon=wagon,
|
|
676
|
+
acceptance_urn=acc_urn,
|
|
677
|
+
signal_type='metric',
|
|
678
|
+
signal_name=metric.get('name', ''),
|
|
679
|
+
plane=metric.get('plane'),
|
|
680
|
+
measure=metric.get('type')
|
|
681
|
+
))
|
|
682
|
+
|
|
683
|
+
# Extract events
|
|
684
|
+
for event in signal_block.get('events', []):
|
|
685
|
+
signals.append(SignalDeclaration(
|
|
686
|
+
wagon=wagon,
|
|
687
|
+
acceptance_urn=acc_urn,
|
|
688
|
+
signal_type='event',
|
|
689
|
+
signal_name=event.get('name', '')
|
|
690
|
+
))
|
|
691
|
+
|
|
692
|
+
# Extract logs
|
|
693
|
+
for log in signal_block.get('logs', []):
|
|
694
|
+
signals.append(SignalDeclaration(
|
|
695
|
+
wagon=wagon,
|
|
696
|
+
acceptance_urn=acc_urn,
|
|
697
|
+
signal_type='log',
|
|
698
|
+
signal_name=log.get('body', '')[:50] # First 50 chars as name
|
|
699
|
+
))
|
|
700
|
+
|
|
701
|
+
return signals
|
|
702
|
+
|
|
703
|
+
def find_all_acceptances(self) -> Dict[str, List[SignalDeclaration]]:
|
|
704
|
+
"""Find all acceptance files and extract signals grouped by wagon."""
|
|
705
|
+
signals_by_wagon = defaultdict(list)
|
|
706
|
+
|
|
707
|
+
if not self.plan_dir.exists():
|
|
708
|
+
return signals_by_wagon
|
|
709
|
+
|
|
710
|
+
# Pattern: plan/{wagon_dir}/*.yaml (excluding _*.yaml manifests)
|
|
711
|
+
for wagon_dir in self.plan_dir.iterdir():
|
|
712
|
+
if not wagon_dir.is_dir() or wagon_dir.name.startswith('_'):
|
|
713
|
+
continue
|
|
714
|
+
|
|
715
|
+
for acceptance_file in wagon_dir.glob('[CLPE]*.yaml'):
|
|
716
|
+
signals = self.parse_acceptance_file(acceptance_file)
|
|
717
|
+
wagon_name = wagon_dir.name.replace('_', '-')
|
|
718
|
+
signals_by_wagon[wagon_name].extend(signals)
|
|
719
|
+
|
|
720
|
+
return dict(signals_by_wagon)
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
class ContractFinder:
|
|
724
|
+
"""
|
|
725
|
+
Use case: Find and match contract files with URNs.
|
|
726
|
+
|
|
727
|
+
Scans contracts directory and provides intelligent URN matching.
|
|
728
|
+
"""
|
|
729
|
+
|
|
730
|
+
def __init__(self, contracts_dir: Path = CONTRACTS_DIR):
|
|
731
|
+
self.contracts_dir = contracts_dir
|
|
732
|
+
|
|
733
|
+
def find_all_contracts(self) -> List[ContractFile]:
|
|
734
|
+
"""Find all contract schema files."""
|
|
735
|
+
contracts = []
|
|
736
|
+
|
|
737
|
+
if not self.contracts_dir.exists():
|
|
738
|
+
return contracts
|
|
739
|
+
|
|
740
|
+
for contract_file in self.contracts_dir.rglob("*.schema.json"):
|
|
741
|
+
try:
|
|
742
|
+
with open(contract_file, 'r', encoding='utf-8') as f:
|
|
743
|
+
data = json.load(f)
|
|
744
|
+
except Exception:
|
|
745
|
+
continue
|
|
746
|
+
|
|
747
|
+
contract_id = data.get('$id', '')
|
|
748
|
+
metadata = data.get('x-artifact-metadata', {})
|
|
749
|
+
|
|
750
|
+
contract = ContractFile(
|
|
751
|
+
file_path=str(contract_file.relative_to(REPO_ROOT)),
|
|
752
|
+
contract_id=contract_id,
|
|
753
|
+
domain=metadata.get('domain', ''),
|
|
754
|
+
resource=metadata.get('resource', ''),
|
|
755
|
+
version=metadata.get('version'),
|
|
756
|
+
producer=metadata.get('producer'),
|
|
757
|
+
consumers=metadata.get('consumers', []),
|
|
758
|
+
traceability=metadata.get('traceability', {})
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
contracts.append(contract)
|
|
762
|
+
|
|
763
|
+
return contracts
|
|
764
|
+
|
|
765
|
+
def find_by_urn(self, urn: str, contracts: List[ContractFile]) -> Optional[ContractFile]:
|
|
766
|
+
"""
|
|
767
|
+
Find a contract file matching a URN using multiple strategies.
|
|
768
|
+
|
|
769
|
+
Tries:
|
|
770
|
+
1. Exact match on contract_id
|
|
771
|
+
2. Normalized match (colon vs dot variations)
|
|
772
|
+
3. Path-based match
|
|
773
|
+
"""
|
|
774
|
+
# Strip 'contract:' prefix if present
|
|
775
|
+
search_urn = urn.replace('contract:', '')
|
|
776
|
+
|
|
777
|
+
for contract in contracts:
|
|
778
|
+
# Strategy 1: Exact match
|
|
779
|
+
if contract.contract_id == search_urn:
|
|
780
|
+
return contract
|
|
781
|
+
|
|
782
|
+
# Strategy 2: Normalized match (colon vs dot)
|
|
783
|
+
normalized_id = contract.contract_id.replace('.', ':')
|
|
784
|
+
normalized_urn = search_urn.replace('.', ':')
|
|
785
|
+
|
|
786
|
+
if normalized_id == normalized_urn:
|
|
787
|
+
return contract
|
|
788
|
+
|
|
789
|
+
# Strategy 3: Path-based match
|
|
790
|
+
contract_path = contract.file_path.replace('contracts/', '').replace('.schema.json', '')
|
|
791
|
+
urn_path = search_urn.replace(':', '/')
|
|
792
|
+
|
|
793
|
+
if contract_path == urn_path:
|
|
794
|
+
return contract
|
|
795
|
+
|
|
796
|
+
# Also try with colon notation
|
|
797
|
+
if contract_path.replace('/', ':') == normalized_urn:
|
|
798
|
+
return contract
|
|
799
|
+
|
|
800
|
+
return None
|
|
801
|
+
|
|
802
|
+
|
|
803
|
+
class FeatureFinder:
|
|
804
|
+
"""
|
|
805
|
+
Use case: Find and parse feature files with ioSeeds.
|
|
806
|
+
|
|
807
|
+
Scans plan/features and features/ directories for feature YAML files.
|
|
808
|
+
"""
|
|
809
|
+
|
|
810
|
+
def __init__(self, plan_dir: Path = PLAN_DIR, features_dir: Path = FEATURES_DIR):
|
|
811
|
+
self.plan_dir = plan_dir
|
|
812
|
+
self.features_dir = features_dir
|
|
813
|
+
|
|
814
|
+
def find_all_features(self) -> List[FeatureFile]:
|
|
815
|
+
"""Find all feature YAML files with ioSeeds."""
|
|
816
|
+
features = []
|
|
817
|
+
|
|
818
|
+
# Scan plan/*/features/*.yaml
|
|
819
|
+
if self.plan_dir.exists():
|
|
820
|
+
for feature_file in self.plan_dir.rglob("features/*.yaml"):
|
|
821
|
+
feature = self.parse_feature_file(feature_file)
|
|
822
|
+
if feature:
|
|
823
|
+
features.append(feature)
|
|
824
|
+
|
|
825
|
+
# Scan features/*/*.yaml
|
|
826
|
+
if self.features_dir.exists():
|
|
827
|
+
for feature_file in self.features_dir.rglob("*.yaml"):
|
|
828
|
+
if feature_file.name != '_features.yaml':
|
|
829
|
+
feature = self.parse_feature_file(feature_file)
|
|
830
|
+
if feature:
|
|
831
|
+
features.append(feature)
|
|
832
|
+
|
|
833
|
+
return features
|
|
834
|
+
|
|
835
|
+
def parse_feature_file(self, feature_path: Path) -> Optional[FeatureFile]:
|
|
836
|
+
"""Parse a feature YAML file and extract ioSeeds."""
|
|
837
|
+
try:
|
|
838
|
+
with open(feature_path, 'r', encoding='utf-8') as f:
|
|
839
|
+
data = yaml.safe_load(f)
|
|
840
|
+
except Exception:
|
|
841
|
+
return None
|
|
842
|
+
|
|
843
|
+
if not isinstance(data, dict):
|
|
844
|
+
return None
|
|
845
|
+
|
|
846
|
+
# Extract basic info
|
|
847
|
+
feature_urn = data.get('urn', '')
|
|
848
|
+
wagon_urn = data.get('wagon', '')
|
|
849
|
+
|
|
850
|
+
# Extract ioSeeds
|
|
851
|
+
io_seeds = data.get('ioSeeds', {})
|
|
852
|
+
if not io_seeds:
|
|
853
|
+
return None # Skip features without ioSeeds
|
|
854
|
+
|
|
855
|
+
consume_items = []
|
|
856
|
+
for item in io_seeds.get('consume', []):
|
|
857
|
+
consume_items.append(FeatureIOSeed(
|
|
858
|
+
name=item.get('name', ''),
|
|
859
|
+
contract=item.get('contract'),
|
|
860
|
+
telemetry=item.get('telemetry'),
|
|
861
|
+
derived=item.get('derived')
|
|
862
|
+
))
|
|
863
|
+
|
|
864
|
+
produce_items = []
|
|
865
|
+
for item in io_seeds.get('produce', []):
|
|
866
|
+
produce_items.append(FeatureIOSeed(
|
|
867
|
+
name=item.get('name', ''),
|
|
868
|
+
contract=item.get('contract'),
|
|
869
|
+
telemetry=item.get('telemetry'),
|
|
870
|
+
derived=item.get('derived')
|
|
871
|
+
))
|
|
872
|
+
|
|
873
|
+
return FeatureFile(
|
|
874
|
+
file_path=str(feature_path.relative_to(REPO_ROOT)),
|
|
875
|
+
feature_urn=feature_urn,
|
|
876
|
+
wagon_urn=wagon_urn,
|
|
877
|
+
consume=consume_items,
|
|
878
|
+
produce=produce_items
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
|
|
882
|
+
class TelemetryFinder:
|
|
883
|
+
"""
|
|
884
|
+
Use case: Find and match telemetry files with URNs.
|
|
885
|
+
|
|
886
|
+
Scans telemetry directory and provides intelligent URN matching.
|
|
887
|
+
"""
|
|
888
|
+
|
|
889
|
+
def __init__(self, telemetry_dir: Path = TELEMETRY_DIR):
|
|
890
|
+
self.telemetry_dir = telemetry_dir
|
|
891
|
+
|
|
892
|
+
def find_all_telemetry(self) -> List[TelemetryFile]:
|
|
893
|
+
"""Find all telemetry definition files (JSON and YAML)."""
|
|
894
|
+
telemetry_files = []
|
|
895
|
+
|
|
896
|
+
if not self.telemetry_dir.exists():
|
|
897
|
+
return telemetry_files
|
|
898
|
+
|
|
899
|
+
# Scan for both .json (signal files) and .yaml (manifest files)
|
|
900
|
+
import json
|
|
901
|
+
for pattern in ["*.json", "*.yaml"]:
|
|
902
|
+
for telemetry_file in self.telemetry_dir.rglob(pattern):
|
|
903
|
+
# Skip test files
|
|
904
|
+
if "/tests/" in str(telemetry_file):
|
|
905
|
+
continue
|
|
906
|
+
|
|
907
|
+
try:
|
|
908
|
+
with open(telemetry_file, 'r', encoding='utf-8') as f:
|
|
909
|
+
if telemetry_file.suffix == '.json':
|
|
910
|
+
data = json.load(f)
|
|
911
|
+
else:
|
|
912
|
+
data = yaml.safe_load(f)
|
|
913
|
+
except Exception:
|
|
914
|
+
continue
|
|
915
|
+
|
|
916
|
+
# Extract telemetry ID from $id field or filename
|
|
917
|
+
telemetry_id = data.get('$id') or data.get('id', '')
|
|
918
|
+
|
|
919
|
+
telemetry = TelemetryFile(
|
|
920
|
+
file_path=str(telemetry_file.relative_to(REPO_ROOT)),
|
|
921
|
+
telemetry_id=telemetry_id,
|
|
922
|
+
domain=data.get('domain', ''),
|
|
923
|
+
resource=data.get('resource', ''),
|
|
924
|
+
producer=data.get('producer'),
|
|
925
|
+
artifact_ref=data.get('artifact_ref'),
|
|
926
|
+
acceptance_criteria=data.get('acceptance_criteria', [])
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
telemetry_files.append(telemetry)
|
|
930
|
+
|
|
931
|
+
return telemetry_files
|
|
932
|
+
|
|
933
|
+
|
|
934
|
+
class WagonTechStackDetector:
|
|
935
|
+
"""
|
|
936
|
+
Use case: Detect technology stack for each wagon.
|
|
937
|
+
|
|
938
|
+
Scans codebase to determine which languages (Python/Dart/TS) each wagon uses.
|
|
939
|
+
"""
|
|
940
|
+
|
|
941
|
+
def __init__(self, repo_root: Path = REPO_ROOT):
|
|
942
|
+
self.repo_root = repo_root
|
|
943
|
+
self.python_dir = repo_root / "python"
|
|
944
|
+
self.dart_dir = repo_root / "lib"
|
|
945
|
+
self.ts_dir = repo_root / "src"
|
|
946
|
+
|
|
947
|
+
def detect_all_stacks(self) -> Dict[str, WagonTechStack]:
|
|
948
|
+
"""
|
|
949
|
+
Detect tech stacks for all wagons.
|
|
950
|
+
|
|
951
|
+
Returns:
|
|
952
|
+
Dict mapping wagon URN to WagonTechStack
|
|
953
|
+
"""
|
|
954
|
+
stacks = {}
|
|
955
|
+
|
|
956
|
+
# Detect Python wagons
|
|
957
|
+
if self.python_dir.exists():
|
|
958
|
+
for wagon_dir in self.python_dir.iterdir():
|
|
959
|
+
if wagon_dir.is_dir() and not wagon_dir.name.startswith(('_', '.')):
|
|
960
|
+
wagon_slug = wagon_dir.name
|
|
961
|
+
wagon_urn = f"wagon:{wagon_slug.replace('_', '-')}"
|
|
962
|
+
|
|
963
|
+
stack = WagonTechStack(
|
|
964
|
+
wagon_urn=wagon_urn,
|
|
965
|
+
wagon_slug=wagon_slug,
|
|
966
|
+
has_python=True,
|
|
967
|
+
python_path=str(wagon_dir.relative_to(self.repo_root))
|
|
968
|
+
)
|
|
969
|
+
stacks[wagon_urn] = stack
|
|
970
|
+
|
|
971
|
+
# Detect Dart features (frontend)
|
|
972
|
+
if self.dart_dir.exists():
|
|
973
|
+
features_dir = self.dart_dir / "features"
|
|
974
|
+
if features_dir.exists():
|
|
975
|
+
for feature_dir in features_dir.iterdir():
|
|
976
|
+
if feature_dir.is_dir() and not feature_dir.name.startswith(('_', '.')):
|
|
977
|
+
feature_name = feature_dir.name
|
|
978
|
+
# Dart app acts as consumer, mapped by feature name
|
|
979
|
+
wagon_urn = f"app:dart:{feature_name}"
|
|
980
|
+
|
|
981
|
+
stack = WagonTechStack(
|
|
982
|
+
wagon_urn=wagon_urn,
|
|
983
|
+
wagon_slug=feature_name,
|
|
984
|
+
has_dart=True,
|
|
985
|
+
dart_path=str(feature_dir.relative_to(self.repo_root))
|
|
986
|
+
)
|
|
987
|
+
stacks[wagon_urn] = stack
|
|
988
|
+
|
|
989
|
+
# Detect TypeScript wagons/features
|
|
990
|
+
if self.ts_dir.exists():
|
|
991
|
+
wagons_dir = self.ts_dir / "wagons"
|
|
992
|
+
if wagons_dir.exists():
|
|
993
|
+
for wagon_dir in wagons_dir.iterdir():
|
|
994
|
+
if wagon_dir.is_dir() and not wagon_dir.name.startswith(('_', '.')):
|
|
995
|
+
wagon_slug = wagon_dir.name
|
|
996
|
+
wagon_urn = f"wagon:{wagon_slug}"
|
|
997
|
+
|
|
998
|
+
# May already exist from Python detection
|
|
999
|
+
if wagon_urn in stacks:
|
|
1000
|
+
stacks[wagon_urn].has_typescript = True
|
|
1001
|
+
stacks[wagon_urn].typescript_path = str(wagon_dir.relative_to(self.repo_root))
|
|
1002
|
+
else:
|
|
1003
|
+
stack = WagonTechStack(
|
|
1004
|
+
wagon_urn=wagon_urn,
|
|
1005
|
+
wagon_slug=wagon_slug,
|
|
1006
|
+
has_typescript=True,
|
|
1007
|
+
typescript_path=str(wagon_dir.relative_to(self.repo_root))
|
|
1008
|
+
)
|
|
1009
|
+
stacks[wagon_urn] = stack
|
|
1010
|
+
|
|
1011
|
+
return stacks
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
class PythonDTOFinder:
|
|
1015
|
+
"""
|
|
1016
|
+
Use case: Find Python DTO implementations of contracts.
|
|
1017
|
+
|
|
1018
|
+
Scans python/contracts/**/*.py for DTO classes with URN annotations.
|
|
1019
|
+
Pattern:
|
|
1020
|
+
# urn: contract:match:dilemma.current.dto
|
|
1021
|
+
@dataclass
|
|
1022
|
+
class CurrentDilemmaDTO:
|
|
1023
|
+
"""
|
|
1024
|
+
|
|
1025
|
+
# Pattern: # urn: contract:domain:resource[.variant][.dto]
|
|
1026
|
+
URN_PATTERN = re.compile(r'#\s*urn:\s*contract:([^:\s]+:[^\s]+)')
|
|
1027
|
+
# Alternative pattern: Contract: contracts/domain/resource.schema.json
|
|
1028
|
+
CONTRACT_PATH_PATTERN = re.compile(r'Contract:\s*contracts/([^/]+)/([^.\s]+)\.schema\.json')
|
|
1029
|
+
# Pattern: @dataclass (with optional args) followed by class XxxDTO
|
|
1030
|
+
DTO_CLASS_PATTERN = re.compile(r'@dataclass[^\n]*\nclass\s+(\w+(?:DTO)?)\s*[:\(]', re.MULTILINE)
|
|
1031
|
+
# Pattern: field_name: Type (simplified field extraction)
|
|
1032
|
+
FIELD_PATTERN = re.compile(r'^\s+(\w+):\s+', re.MULTILINE)
|
|
1033
|
+
|
|
1034
|
+
def __init__(self, repo_root: Path = REPO_ROOT):
|
|
1035
|
+
self.repo_root = repo_root
|
|
1036
|
+
self.contracts_dir = repo_root / "python" / "contracts"
|
|
1037
|
+
|
|
1038
|
+
def find_all_dtos(self) -> List[ContractImplementation]:
|
|
1039
|
+
"""Scan python/contracts/ for DTO classes."""
|
|
1040
|
+
implementations = []
|
|
1041
|
+
|
|
1042
|
+
if not self.contracts_dir.exists():
|
|
1043
|
+
return implementations
|
|
1044
|
+
|
|
1045
|
+
for py_file in self.contracts_dir.rglob("*.py"):
|
|
1046
|
+
if py_file.name == '__init__.py':
|
|
1047
|
+
continue
|
|
1048
|
+
|
|
1049
|
+
impl = self._parse_dto_file(py_file)
|
|
1050
|
+
if impl:
|
|
1051
|
+
implementations.append(impl)
|
|
1052
|
+
|
|
1053
|
+
return implementations
|
|
1054
|
+
|
|
1055
|
+
def _parse_dto_file(self, file_path: Path) -> Optional[ContractImplementation]:
|
|
1056
|
+
"""Extract contract URN and DTO class from Python file."""
|
|
1057
|
+
try:
|
|
1058
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
1059
|
+
content = f.read()
|
|
1060
|
+
except Exception:
|
|
1061
|
+
return None
|
|
1062
|
+
|
|
1063
|
+
# Try primary pattern: # urn: contract:...
|
|
1064
|
+
urn_match = self.URN_PATTERN.search(content)
|
|
1065
|
+
if urn_match:
|
|
1066
|
+
urn_full = urn_match.group(1)
|
|
1067
|
+
# Remove .dto suffix if present for contract URN
|
|
1068
|
+
contract_urn = urn_full.replace('.dto', '')
|
|
1069
|
+
else:
|
|
1070
|
+
# Try alternative pattern: Contract: contracts/domain/resource.schema.json
|
|
1071
|
+
contract_match = self.CONTRACT_PATH_PATTERN.search(content)
|
|
1072
|
+
if not contract_match:
|
|
1073
|
+
return None
|
|
1074
|
+
|
|
1075
|
+
# Reconstruct URN from file path
|
|
1076
|
+
domain = contract_match.group(1)
|
|
1077
|
+
resource = contract_match.group(2)
|
|
1078
|
+
contract_urn = f"{domain}:{resource}"
|
|
1079
|
+
urn_full = contract_urn
|
|
1080
|
+
|
|
1081
|
+
# Extract DTO class name
|
|
1082
|
+
class_match = self.DTO_CLASS_PATTERN.search(content)
|
|
1083
|
+
class_name = class_match.group(1) if class_match else None
|
|
1084
|
+
|
|
1085
|
+
# Extract field names (simplified)
|
|
1086
|
+
fields = self._extract_fields(content)
|
|
1087
|
+
|
|
1088
|
+
# Derive schema path
|
|
1089
|
+
schema_ref = self._derive_schema_path(contract_urn)
|
|
1090
|
+
|
|
1091
|
+
return ContractImplementation(
|
|
1092
|
+
file_path=str(file_path.relative_to(self.repo_root)),
|
|
1093
|
+
contract_urn=contract_urn,
|
|
1094
|
+
language='python',
|
|
1095
|
+
class_name=class_name,
|
|
1096
|
+
schema_ref=schema_ref,
|
|
1097
|
+
fields=fields,
|
|
1098
|
+
urn_comment=urn_full
|
|
1099
|
+
)
|
|
1100
|
+
|
|
1101
|
+
def _extract_fields(self, content: str) -> List[str]:
|
|
1102
|
+
"""Extract field names from dataclass."""
|
|
1103
|
+
fields = []
|
|
1104
|
+
in_class = False
|
|
1105
|
+
|
|
1106
|
+
for line in content.split('\n'):
|
|
1107
|
+
# Detect class start
|
|
1108
|
+
if line.strip().startswith('class ') and 'DTO' in line:
|
|
1109
|
+
in_class = True
|
|
1110
|
+
continue
|
|
1111
|
+
|
|
1112
|
+
if in_class:
|
|
1113
|
+
# Stop at next class or end of indentation
|
|
1114
|
+
if line and not line[0].isspace():
|
|
1115
|
+
break
|
|
1116
|
+
|
|
1117
|
+
# Match field pattern: field_name: Type
|
|
1118
|
+
match = self.FIELD_PATTERN.match(line)
|
|
1119
|
+
if match:
|
|
1120
|
+
field_name = match.group(1)
|
|
1121
|
+
# Skip dunder methods and properties
|
|
1122
|
+
if not field_name.startswith('_'):
|
|
1123
|
+
fields.append(field_name)
|
|
1124
|
+
|
|
1125
|
+
return fields
|
|
1126
|
+
|
|
1127
|
+
def _derive_schema_path(self, contract_urn: str) -> str:
|
|
1128
|
+
"""Derive schema path from contract URN."""
|
|
1129
|
+
# contract:match:dilemma.current → contracts/match/dilemma/current.schema.json
|
|
1130
|
+
parts = contract_urn.split(':')
|
|
1131
|
+
if len(parts) >= 2:
|
|
1132
|
+
domain_resource = ':'.join(parts)
|
|
1133
|
+
path = domain_resource.replace(':', '/').replace('.', '/')
|
|
1134
|
+
return f"contracts/{path}.schema.json"
|
|
1135
|
+
return ""
|
|
1136
|
+
|
|
1137
|
+
|
|
1138
|
+
class DartDTOFinder:
|
|
1139
|
+
"""
|
|
1140
|
+
DEPRECATED: Dart/Flutter frontend was removed in SESSION-18.
|
|
1141
|
+
This class is kept for API compatibility but always returns empty results.
|
|
1142
|
+
"""
|
|
1143
|
+
|
|
1144
|
+
def __init__(self, repo_root: Path = REPO_ROOT):
|
|
1145
|
+
self.repo_root = repo_root
|
|
1146
|
+
|
|
1147
|
+
def find_all_dtos(self) -> List[ContractImplementation]:
|
|
1148
|
+
"""Returns empty - Dart frontend deprecated."""
|
|
1149
|
+
return []
|
|
1150
|
+
|
|
1151
|
+
|
|
1152
|
+
class TypeScriptDTOFinder:
|
|
1153
|
+
"""
|
|
1154
|
+
Use case: Find TypeScript interface/type definitions.
|
|
1155
|
+
|
|
1156
|
+
Scans src/contracts/**/*.ts for contract interfaces.
|
|
1157
|
+
Pattern: export interface XxxDTO { ... }
|
|
1158
|
+
"""
|
|
1159
|
+
|
|
1160
|
+
INTERFACE_PATTERN = re.compile(r'export\s+(?:interface|type)\s+(\w+(?:DTO)?)\s*[{=]')
|
|
1161
|
+
FIELD_PATTERN = re.compile(r'^\s+(\w+)[\?:]:', re.MULTILINE)
|
|
1162
|
+
|
|
1163
|
+
def __init__(self, repo_root: Path = REPO_ROOT):
|
|
1164
|
+
self.repo_root = repo_root
|
|
1165
|
+
self.src_dir = repo_root / "src"
|
|
1166
|
+
self.contracts_dir = repo_root / "contracts"
|
|
1167
|
+
|
|
1168
|
+
def find_all_dtos(self) -> List[ContractImplementation]:
|
|
1169
|
+
"""Scan TypeScript contract interfaces."""
|
|
1170
|
+
implementations = []
|
|
1171
|
+
|
|
1172
|
+
# Check both src/ and contracts/ directories
|
|
1173
|
+
for base_dir in [self.src_dir, self.contracts_dir]:
|
|
1174
|
+
if not base_dir.exists():
|
|
1175
|
+
continue
|
|
1176
|
+
|
|
1177
|
+
for ts_file in base_dir.rglob("*.ts"):
|
|
1178
|
+
# Skip test files
|
|
1179
|
+
if '.test.ts' in str(ts_file) or '.test.tsx' in str(ts_file):
|
|
1180
|
+
continue
|
|
1181
|
+
|
|
1182
|
+
impls = self._parse_ts_file(ts_file)
|
|
1183
|
+
implementations.extend(impls)
|
|
1184
|
+
|
|
1185
|
+
return implementations
|
|
1186
|
+
|
|
1187
|
+
def _parse_ts_file(self, file_path: Path) -> List[ContractImplementation]:
|
|
1188
|
+
"""Extract interface/type definitions."""
|
|
1189
|
+
implementations = []
|
|
1190
|
+
|
|
1191
|
+
try:
|
|
1192
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
1193
|
+
content = f.read()
|
|
1194
|
+
except Exception:
|
|
1195
|
+
return implementations
|
|
1196
|
+
|
|
1197
|
+
# Find all exported interfaces/types
|
|
1198
|
+
for match in self.INTERFACE_PATTERN.finditer(content):
|
|
1199
|
+
interface_name = match.group(1)
|
|
1200
|
+
|
|
1201
|
+
# Infer contract URN
|
|
1202
|
+
contract_urn = self._infer_contract_urn(file_path, interface_name)
|
|
1203
|
+
|
|
1204
|
+
# Extract fields
|
|
1205
|
+
fields = self._extract_fields(content, interface_name, match.start())
|
|
1206
|
+
|
|
1207
|
+
impl = ContractImplementation(
|
|
1208
|
+
file_path=str(file_path.relative_to(self.repo_root)),
|
|
1209
|
+
contract_urn=contract_urn,
|
|
1210
|
+
language='typescript',
|
|
1211
|
+
class_name=interface_name,
|
|
1212
|
+
schema_ref=self._derive_schema_path(contract_urn),
|
|
1213
|
+
fields=fields
|
|
1214
|
+
)
|
|
1215
|
+
implementations.append(impl)
|
|
1216
|
+
|
|
1217
|
+
return implementations
|
|
1218
|
+
|
|
1219
|
+
def _infer_contract_urn(self, file_path: Path, interface_name: str) -> str:
|
|
1220
|
+
"""Infer contract URN from file path."""
|
|
1221
|
+
# Remove DTO suffix for URN
|
|
1222
|
+
base_name = interface_name.replace('DTO', '').replace('Interface', '')
|
|
1223
|
+
|
|
1224
|
+
# Convert to kebab-case
|
|
1225
|
+
urn = self._camel_to_kebab(base_name)
|
|
1226
|
+
|
|
1227
|
+
# Try to extract domain from path
|
|
1228
|
+
parts = file_path.parts
|
|
1229
|
+
if 'contracts' in parts:
|
|
1230
|
+
idx = parts.index('contracts')
|
|
1231
|
+
if idx + 1 < len(parts):
|
|
1232
|
+
# Use directory structure as URN
|
|
1233
|
+
path_parts = parts[idx + 1:]
|
|
1234
|
+
urn = ':'.join(path_parts).replace('.ts', '')
|
|
1235
|
+
|
|
1236
|
+
return urn
|
|
1237
|
+
|
|
1238
|
+
def _camel_to_kebab(self, name: str) -> str:
|
|
1239
|
+
"""Convert CamelCase to kebab-case."""
|
|
1240
|
+
result = re.sub('([a-z0-9])([A-Z])', r'\1-\2', name)
|
|
1241
|
+
return result.lower()
|
|
1242
|
+
|
|
1243
|
+
def _extract_fields(self, content: str, interface_name: str, start_pos: int) -> List[str]:
|
|
1244
|
+
"""Extract field names from interface/type."""
|
|
1245
|
+
fields = []
|
|
1246
|
+
|
|
1247
|
+
# Extract block after interface declaration
|
|
1248
|
+
snippet = content[start_pos:start_pos + 2000]
|
|
1249
|
+
|
|
1250
|
+
# Find fields within braces
|
|
1251
|
+
brace_start = snippet.find('{')
|
|
1252
|
+
if brace_start == -1:
|
|
1253
|
+
return fields
|
|
1254
|
+
|
|
1255
|
+
brace_count = 1
|
|
1256
|
+
brace_end = brace_start + 1
|
|
1257
|
+
|
|
1258
|
+
# Find matching closing brace
|
|
1259
|
+
while brace_end < len(snippet) and brace_count > 0:
|
|
1260
|
+
if snippet[brace_end] == '{':
|
|
1261
|
+
brace_count += 1
|
|
1262
|
+
elif snippet[brace_end] == '}':
|
|
1263
|
+
brace_count -= 1
|
|
1264
|
+
brace_end += 1
|
|
1265
|
+
|
|
1266
|
+
interface_body = snippet[brace_start:brace_end]
|
|
1267
|
+
|
|
1268
|
+
for field_match in self.FIELD_PATTERN.finditer(interface_body):
|
|
1269
|
+
field_name = field_match.group(1)
|
|
1270
|
+
fields.append(field_name)
|
|
1271
|
+
|
|
1272
|
+
return fields
|
|
1273
|
+
|
|
1274
|
+
def _derive_schema_path(self, contract_urn: str) -> str:
|
|
1275
|
+
"""Derive schema path from contract URN."""
|
|
1276
|
+
path = contract_urn.replace(':', '/').replace('.', '/')
|
|
1277
|
+
return f"contracts/{path}.schema.json"
|
|
1278
|
+
|
|
1279
|
+
|
|
1280
|
+
class TraceabilityReconciler:
|
|
1281
|
+
"""
|
|
1282
|
+
Use case: Reconcile wagon manifests with contracts/telemetry.
|
|
1283
|
+
|
|
1284
|
+
Core business logic for detecting missing references and mismatches.
|
|
1285
|
+
"""
|
|
1286
|
+
|
|
1287
|
+
def __init__(self):
|
|
1288
|
+
self.manifest_parser = ManifestParser()
|
|
1289
|
+
self.acceptance_parser = AcceptanceParser()
|
|
1290
|
+
self.contract_finder = ContractFinder()
|
|
1291
|
+
self.telemetry_finder = TelemetryFinder()
|
|
1292
|
+
self.feature_finder = FeatureFinder()
|
|
1293
|
+
|
|
1294
|
+
def parse_produce_items(self, manifest_data: Dict) -> List[ProduceItem]:
|
|
1295
|
+
"""Parse produce items from manifest data."""
|
|
1296
|
+
return self.manifest_parser.parse_produce_items(manifest_data)
|
|
1297
|
+
|
|
1298
|
+
def detect_missing_contract_refs(
|
|
1299
|
+
self,
|
|
1300
|
+
produce_items: List[ProduceItem],
|
|
1301
|
+
contracts: List[ContractFile]
|
|
1302
|
+
) -> List[Dict]:
|
|
1303
|
+
"""Detect produce items with null contract refs when contract files exist."""
|
|
1304
|
+
missing_refs = []
|
|
1305
|
+
|
|
1306
|
+
for item in produce_items:
|
|
1307
|
+
if item.has_null_contract_ref:
|
|
1308
|
+
# Try to find matching contract using derived URN
|
|
1309
|
+
contract = self.contract_finder.find_by_urn(item.derived_contract_urn, contracts)
|
|
1310
|
+
|
|
1311
|
+
if contract:
|
|
1312
|
+
missing_refs.append({
|
|
1313
|
+
'wagon': item.wagon,
|
|
1314
|
+
'produce_name': item.name,
|
|
1315
|
+
'urn': item.derived_contract_urn,
|
|
1316
|
+
'proposed_fix': contract.file_path
|
|
1317
|
+
})
|
|
1318
|
+
|
|
1319
|
+
return missing_refs
|
|
1320
|
+
|
|
1321
|
+
def detect_missing_telemetry_refs(
|
|
1322
|
+
self,
|
|
1323
|
+
produce_items: List[ProduceItem],
|
|
1324
|
+
telemetry_files: List[TelemetryFile]
|
|
1325
|
+
) -> List[Dict]:
|
|
1326
|
+
"""Detect produce items with null telemetry refs when telemetry files exist."""
|
|
1327
|
+
missing_refs = []
|
|
1328
|
+
|
|
1329
|
+
for item in produce_items:
|
|
1330
|
+
if item.has_null_telemetry_ref:
|
|
1331
|
+
# Check if telemetry directory exists for this artifact using derived URN
|
|
1332
|
+
# Convention: telemetry URN is at aspect level (e.g., telemetry:match:dilemma)
|
|
1333
|
+
# Check if any telemetry files exist in the corresponding directory
|
|
1334
|
+
telemetry_urn = item.derived_telemetry_urn
|
|
1335
|
+
matching_telemetry = None
|
|
1336
|
+
for telemetry in telemetry_files:
|
|
1337
|
+
# Match either exactly or as a prefix (for aspect-level matching)
|
|
1338
|
+
if telemetry.telemetry_id == telemetry_urn or telemetry.telemetry_id.startswith(telemetry_urn):
|
|
1339
|
+
matching_telemetry = telemetry
|
|
1340
|
+
break
|
|
1341
|
+
|
|
1342
|
+
if matching_telemetry:
|
|
1343
|
+
missing_refs.append({
|
|
1344
|
+
'wagon': item.wagon,
|
|
1345
|
+
'produce_name': item.name,
|
|
1346
|
+
'urn': telemetry_urn,
|
|
1347
|
+
'proposed_fix': matching_telemetry.file_path
|
|
1348
|
+
})
|
|
1349
|
+
|
|
1350
|
+
return missing_refs
|
|
1351
|
+
|
|
1352
|
+
def detect_signal_telemetry_issues(
|
|
1353
|
+
self,
|
|
1354
|
+
signals_by_wagon: Dict[str, List[SignalDeclaration]],
|
|
1355
|
+
telemetry_files: List[TelemetryFile],
|
|
1356
|
+
produce_items_by_wagon: Dict[str, List[ProduceItem]]
|
|
1357
|
+
) -> Tuple[List[Dict], List[Dict]]:
|
|
1358
|
+
"""
|
|
1359
|
+
Detect issues with signal-driven telemetry.
|
|
1360
|
+
|
|
1361
|
+
Returns: (missing_signal_telemetry, orphaned_telemetry)
|
|
1362
|
+
"""
|
|
1363
|
+
missing = []
|
|
1364
|
+
orphaned = []
|
|
1365
|
+
|
|
1366
|
+
# Check wagons with signals
|
|
1367
|
+
for wagon, signals in signals_by_wagon.items():
|
|
1368
|
+
if not signals:
|
|
1369
|
+
continue
|
|
1370
|
+
|
|
1371
|
+
# Wagon has signals - should have telemetry
|
|
1372
|
+
wagon_produce_items = produce_items_by_wagon.get(wagon, [])
|
|
1373
|
+
has_telemetry_ref = any(
|
|
1374
|
+
not item.has_null_telemetry_ref
|
|
1375
|
+
for item in wagon_produce_items
|
|
1376
|
+
)
|
|
1377
|
+
|
|
1378
|
+
# Derive expected telemetry URN from first produce item
|
|
1379
|
+
if wagon_produce_items:
|
|
1380
|
+
expected_urn = wagon_produce_items[0].derived_telemetry_urn
|
|
1381
|
+
|
|
1382
|
+
# Check if telemetry files exist
|
|
1383
|
+
has_telemetry_files = any(
|
|
1384
|
+
expected_urn.replace('telemetry:', '') in telemetry.telemetry_id
|
|
1385
|
+
for telemetry in telemetry_files
|
|
1386
|
+
)
|
|
1387
|
+
|
|
1388
|
+
if has_telemetry_files and not has_telemetry_ref:
|
|
1389
|
+
missing.append({
|
|
1390
|
+
'wagon': wagon,
|
|
1391
|
+
'signal_count': len(signals),
|
|
1392
|
+
'expected_urn': expected_urn,
|
|
1393
|
+
'current': 'telemetry: null',
|
|
1394
|
+
'proposed_fix': expected_urn,
|
|
1395
|
+
'reason': 'Acceptances declare signals, telemetry files exist, but wagon manifest has telemetry: null'
|
|
1396
|
+
})
|
|
1397
|
+
|
|
1398
|
+
# Check for orphaned telemetry (files exist but no signals in acceptances)
|
|
1399
|
+
# This is informational, not necessarily an error
|
|
1400
|
+
wagons_with_signals = set(signals_by_wagon.keys())
|
|
1401
|
+
wagons_with_telemetry = set()
|
|
1402
|
+
for telemetry in telemetry_files:
|
|
1403
|
+
# Extract wagon from telemetry_id (e.g., "match:dilemma" → "match")
|
|
1404
|
+
parts = telemetry.telemetry_id.split(':')
|
|
1405
|
+
if len(parts) >= 2:
|
|
1406
|
+
wagon_candidate = parts[0] if len(parts) == 2 else f"{parts[0]}-{parts[1]}"
|
|
1407
|
+
wagons_with_telemetry.add(wagon_candidate)
|
|
1408
|
+
|
|
1409
|
+
# Orphaned = has telemetry but no signals
|
|
1410
|
+
for wagon in wagons_with_telemetry - wagons_with_signals:
|
|
1411
|
+
orphaned.append({
|
|
1412
|
+
'wagon': wagon,
|
|
1413
|
+
'note': 'Telemetry files exist but no signal declarations found in acceptances'
|
|
1414
|
+
})
|
|
1415
|
+
|
|
1416
|
+
return missing, orphaned
|
|
1417
|
+
|
|
1418
|
+
def validate_telemetry_artifact_refs(
|
|
1419
|
+
self,
|
|
1420
|
+
telemetry_files: List[TelemetryFile],
|
|
1421
|
+
contracts: List[ContractFile]
|
|
1422
|
+
) -> Tuple[List[Dict], List[Dict]]:
|
|
1423
|
+
"""
|
|
1424
|
+
Validate telemetry files reference valid artifacts/contracts.
|
|
1425
|
+
|
|
1426
|
+
Returns: (missing_artifact_refs, invalid_artifact_refs)
|
|
1427
|
+
"""
|
|
1428
|
+
missing = []
|
|
1429
|
+
invalid = []
|
|
1430
|
+
|
|
1431
|
+
# Build contract URN lookup
|
|
1432
|
+
contract_urns = {contract.contract_id for contract in contracts}
|
|
1433
|
+
|
|
1434
|
+
for telemetry in telemetry_files:
|
|
1435
|
+
# Skip manifest and pack files (not signal files)
|
|
1436
|
+
if (telemetry.file_path.endswith('_tracking.yaml') or
|
|
1437
|
+
telemetry.file_path.endswith('_signals.yaml') or
|
|
1438
|
+
'.pack.' in telemetry.file_path):
|
|
1439
|
+
continue
|
|
1440
|
+
|
|
1441
|
+
# Check if artifact_ref is missing
|
|
1442
|
+
if not telemetry.artifact_ref:
|
|
1443
|
+
missing.append({
|
|
1444
|
+
'telemetry_file': telemetry.file_path,
|
|
1445
|
+
'telemetry_id': telemetry.telemetry_id,
|
|
1446
|
+
'reason': 'Missing artifact_ref field (required per telemetry convention)'
|
|
1447
|
+
})
|
|
1448
|
+
continue
|
|
1449
|
+
|
|
1450
|
+
# Check if artifact_ref points to valid contract
|
|
1451
|
+
# Remove 'contract:' prefix for matching
|
|
1452
|
+
artifact_ref_normalized = telemetry.artifact_ref.replace('contract:', '')
|
|
1453
|
+
|
|
1454
|
+
# Check if contract exists
|
|
1455
|
+
contract_exists = any(
|
|
1456
|
+
artifact_ref_normalized in contract.contract_id or
|
|
1457
|
+
contract.contract_id in artifact_ref_normalized
|
|
1458
|
+
for contract in contracts
|
|
1459
|
+
)
|
|
1460
|
+
|
|
1461
|
+
if not contract_exists:
|
|
1462
|
+
invalid.append({
|
|
1463
|
+
'telemetry_file': telemetry.file_path,
|
|
1464
|
+
'telemetry_id': telemetry.telemetry_id,
|
|
1465
|
+
'artifact_ref': telemetry.artifact_ref,
|
|
1466
|
+
'reason': f'References non-existent contract: {telemetry.artifact_ref}'
|
|
1467
|
+
})
|
|
1468
|
+
|
|
1469
|
+
return missing, invalid
|
|
1470
|
+
|
|
1471
|
+
def validate_telemetry_naming_convention(
|
|
1472
|
+
self,
|
|
1473
|
+
telemetry_files: List[TelemetryFile]
|
|
1474
|
+
) -> List[Dict]:
|
|
1475
|
+
"""
|
|
1476
|
+
Validate telemetry $id follows naming convention.
|
|
1477
|
+
|
|
1478
|
+
Convention: {theme}:{domain}:{aspect}.{type}.{plane}[.{measure}]
|
|
1479
|
+
- Colons (:) for hierarchy (theme, domain, aspect)
|
|
1480
|
+
- Dots (.) for signal facets (type, plane, measure)
|
|
1481
|
+
|
|
1482
|
+
Common violations:
|
|
1483
|
+
- Using colons for signal type: "match:pacing:exhausted:event" ❌
|
|
1484
|
+
- Should be dots: "match:pacing.exhausted.event.be" ✅
|
|
1485
|
+
"""
|
|
1486
|
+
violations = []
|
|
1487
|
+
|
|
1488
|
+
for telemetry in telemetry_files:
|
|
1489
|
+
if not telemetry.telemetry_id:
|
|
1490
|
+
continue
|
|
1491
|
+
|
|
1492
|
+
telemetry_id = telemetry.telemetry_id
|
|
1493
|
+
|
|
1494
|
+
# Skip manifest files (_tracking.yaml, _signals.yaml)
|
|
1495
|
+
if telemetry.file_path.endswith('_tracking.yaml') or \
|
|
1496
|
+
telemetry.file_path.endswith('_signals.yaml') or \
|
|
1497
|
+
'.pack.' in telemetry.file_path:
|
|
1498
|
+
continue
|
|
1499
|
+
|
|
1500
|
+
# Check if ID contains signal facets (should have dots)
|
|
1501
|
+
# Valid pattern: has dots separating type.plane[.measure]
|
|
1502
|
+
has_dots = '.' in telemetry_id
|
|
1503
|
+
|
|
1504
|
+
if not has_dots:
|
|
1505
|
+
violations.append({
|
|
1506
|
+
'file': telemetry.file_path,
|
|
1507
|
+
'current_id': telemetry_id,
|
|
1508
|
+
'violation': 'Missing dots for signal facets (should have .type.plane)',
|
|
1509
|
+
'example': 'Should be like: match:dilemma.current.metric.be.duration'
|
|
1510
|
+
})
|
|
1511
|
+
continue
|
|
1512
|
+
|
|
1513
|
+
# Check for common violation: signal type using colon instead of dot
|
|
1514
|
+
# Pattern: ends with :event, :metric, :log, :trace (should be .event, .metric, etc)
|
|
1515
|
+
signal_types = ['event', 'metric', 'log', 'trace']
|
|
1516
|
+
for sig_type in signal_types:
|
|
1517
|
+
if f':{sig_type}' in telemetry_id and not f'.{sig_type}' in telemetry_id:
|
|
1518
|
+
# Extract parts
|
|
1519
|
+
parts = telemetry_id.split(':')
|
|
1520
|
+
|
|
1521
|
+
# Suggest fix by converting last colon to dot
|
|
1522
|
+
suggested = telemetry_id.replace(f':{sig_type}', f'.{sig_type}')
|
|
1523
|
+
|
|
1524
|
+
violations.append({
|
|
1525
|
+
'file': telemetry.file_path,
|
|
1526
|
+
'current_id': telemetry_id,
|
|
1527
|
+
'violation': f'Signal type ":{sig_type}" should use dot ".{sig_type}"',
|
|
1528
|
+
'suggested_fix': suggested,
|
|
1529
|
+
'reason': 'Colons are for hierarchy (theme:domain:aspect), dots are for signal facets (.type.plane.measure)'
|
|
1530
|
+
})
|
|
1531
|
+
break
|
|
1532
|
+
|
|
1533
|
+
# Check artifact_ref matches ID hierarchy
|
|
1534
|
+
if telemetry.artifact_ref:
|
|
1535
|
+
# Extract artifact part (before first dot)
|
|
1536
|
+
artifact_hierarchy = telemetry_id.split('.')[0] if '.' in telemetry_id else telemetry_id
|
|
1537
|
+
artifact_ref_normalized = telemetry.artifact_ref.replace('contract:', '')
|
|
1538
|
+
|
|
1539
|
+
# artifact_ref should match the hierarchy part
|
|
1540
|
+
# e.g., ID "match:pacing.exhausted.event.be" → artifact_ref "contract:match:pacing.exhausted"
|
|
1541
|
+
# But we need to account for the variant part (after .)
|
|
1542
|
+
if not artifact_ref_normalized.startswith(artifact_hierarchy.split('.')[0]):
|
|
1543
|
+
violations.append({
|
|
1544
|
+
'file': telemetry.file_path,
|
|
1545
|
+
'current_id': telemetry_id,
|
|
1546
|
+
'artifact_ref': telemetry.artifact_ref,
|
|
1547
|
+
'violation': 'artifact_ref hierarchy does not match $id hierarchy',
|
|
1548
|
+
'reason': f'$id starts with "{artifact_hierarchy}" but artifact_ref is "{telemetry.artifact_ref}"'
|
|
1549
|
+
})
|
|
1550
|
+
|
|
1551
|
+
return violations
|
|
1552
|
+
|
|
1553
|
+
def validate_feature_wagon_io_alignment(
|
|
1554
|
+
self,
|
|
1555
|
+
features: List[FeatureFile],
|
|
1556
|
+
manifests: List[Tuple[Path, Dict]]
|
|
1557
|
+
) -> List[Dict]:
|
|
1558
|
+
"""
|
|
1559
|
+
Validate that feature ioSeeds align with wagon produce/consume.
|
|
1560
|
+
|
|
1561
|
+
Checks:
|
|
1562
|
+
- Feature consume items must exist in wagon consume OR wagon produce (internal dependency)
|
|
1563
|
+
- Feature produce items must exist in wagon produce
|
|
1564
|
+
- Contract/telemetry URNs must match between feature and wagon
|
|
1565
|
+
"""
|
|
1566
|
+
mismatches = []
|
|
1567
|
+
|
|
1568
|
+
# Build wagon lookup
|
|
1569
|
+
wagon_by_urn = {}
|
|
1570
|
+
for manifest_path, manifest_data in manifests:
|
|
1571
|
+
wagon_slug = manifest_data.get('wagon', '')
|
|
1572
|
+
wagon_urn = f"wagon:{wagon_slug}"
|
|
1573
|
+
wagon_by_urn[wagon_urn] = {
|
|
1574
|
+
'slug': wagon_slug,
|
|
1575
|
+
'manifest_path': str(manifest_path.relative_to(REPO_ROOT)),
|
|
1576
|
+
'produce': manifest_data.get('produce', []),
|
|
1577
|
+
'consume': manifest_data.get('consume', [])
|
|
1578
|
+
}
|
|
1579
|
+
|
|
1580
|
+
# Validate each feature
|
|
1581
|
+
for feature in features:
|
|
1582
|
+
wagon_data = wagon_by_urn.get(feature.wagon_urn)
|
|
1583
|
+
if not wagon_data:
|
|
1584
|
+
mismatches.append({
|
|
1585
|
+
'feature': feature.feature_urn,
|
|
1586
|
+
'file': feature.file_path,
|
|
1587
|
+
'wagon': feature.wagon_urn,
|
|
1588
|
+
'issue': f"Wagon not found: {feature.wagon_urn}"
|
|
1589
|
+
})
|
|
1590
|
+
continue
|
|
1591
|
+
|
|
1592
|
+
# Build wagon artifact lookup by name
|
|
1593
|
+
wagon_artifacts = {}
|
|
1594
|
+
for item in wagon_data['produce']:
|
|
1595
|
+
wagon_artifacts[item['name']] = {
|
|
1596
|
+
'type': 'produce',
|
|
1597
|
+
'contract': item.get('contract'),
|
|
1598
|
+
'telemetry': item.get('telemetry')
|
|
1599
|
+
}
|
|
1600
|
+
for item in wagon_data['consume']:
|
|
1601
|
+
wagon_artifacts[item['name']] = {
|
|
1602
|
+
'type': 'consume',
|
|
1603
|
+
'contract': item.get('contract'),
|
|
1604
|
+
'telemetry': item.get('telemetry')
|
|
1605
|
+
}
|
|
1606
|
+
|
|
1607
|
+
# Validate feature consume items
|
|
1608
|
+
for consume_item in feature.consume:
|
|
1609
|
+
if consume_item.name not in wagon_artifacts:
|
|
1610
|
+
mismatches.append({
|
|
1611
|
+
'feature': feature.feature_urn,
|
|
1612
|
+
'file': feature.file_path,
|
|
1613
|
+
'wagon': feature.wagon_urn,
|
|
1614
|
+
'artifact_name': consume_item.name,
|
|
1615
|
+
'issue': f"Feature consumes '{consume_item.name}' but wagon does not produce or consume it",
|
|
1616
|
+
'feature_contract': consume_item.contract,
|
|
1617
|
+
'feature_telemetry': consume_item.telemetry
|
|
1618
|
+
})
|
|
1619
|
+
else:
|
|
1620
|
+
# Check if contract URNs match
|
|
1621
|
+
wagon_artifact = wagon_artifacts[consume_item.name]
|
|
1622
|
+
if consume_item.contract != wagon_artifact['contract']:
|
|
1623
|
+
mismatches.append({
|
|
1624
|
+
'feature': feature.feature_urn,
|
|
1625
|
+
'file': feature.file_path,
|
|
1626
|
+
'wagon': feature.wagon_urn,
|
|
1627
|
+
'artifact_name': consume_item.name,
|
|
1628
|
+
'issue': f"Contract mismatch for '{consume_item.name}'",
|
|
1629
|
+
'feature_contract': consume_item.contract,
|
|
1630
|
+
'wagon_contract': wagon_artifact['contract']
|
|
1631
|
+
})
|
|
1632
|
+
# Check if telemetry URNs match
|
|
1633
|
+
if consume_item.telemetry != wagon_artifact['telemetry']:
|
|
1634
|
+
mismatches.append({
|
|
1635
|
+
'feature': feature.feature_urn,
|
|
1636
|
+
'file': feature.file_path,
|
|
1637
|
+
'wagon': feature.wagon_urn,
|
|
1638
|
+
'artifact_name': consume_item.name,
|
|
1639
|
+
'issue': f"Telemetry mismatch for '{consume_item.name}'",
|
|
1640
|
+
'feature_telemetry': consume_item.telemetry,
|
|
1641
|
+
'wagon_telemetry': wagon_artifact['telemetry']
|
|
1642
|
+
})
|
|
1643
|
+
|
|
1644
|
+
# Validate feature produce items
|
|
1645
|
+
for produce_item in feature.produce:
|
|
1646
|
+
if produce_item.name not in wagon_artifacts:
|
|
1647
|
+
mismatches.append({
|
|
1648
|
+
'feature': feature.feature_urn,
|
|
1649
|
+
'file': feature.file_path,
|
|
1650
|
+
'wagon': feature.wagon_urn,
|
|
1651
|
+
'artifact_name': produce_item.name,
|
|
1652
|
+
'issue': f"Feature produces '{produce_item.name}' but wagon does not produce it",
|
|
1653
|
+
'feature_contract': produce_item.contract,
|
|
1654
|
+
'feature_telemetry': produce_item.telemetry
|
|
1655
|
+
})
|
|
1656
|
+
else:
|
|
1657
|
+
wagon_artifact = wagon_artifacts[produce_item.name]
|
|
1658
|
+
if wagon_artifact['type'] != 'produce':
|
|
1659
|
+
mismatches.append({
|
|
1660
|
+
'feature': feature.feature_urn,
|
|
1661
|
+
'file': feature.file_path,
|
|
1662
|
+
'wagon': feature.wagon_urn,
|
|
1663
|
+
'artifact_name': produce_item.name,
|
|
1664
|
+
'issue': f"Feature produces '{produce_item.name}' but wagon only consumes it"
|
|
1665
|
+
})
|
|
1666
|
+
# Check if contract URNs match
|
|
1667
|
+
if produce_item.contract != wagon_artifact['contract']:
|
|
1668
|
+
mismatches.append({
|
|
1669
|
+
'feature': feature.feature_urn,
|
|
1670
|
+
'file': feature.file_path,
|
|
1671
|
+
'wagon': feature.wagon_urn,
|
|
1672
|
+
'artifact_name': produce_item.name,
|
|
1673
|
+
'issue': f"Contract mismatch for '{produce_item.name}'",
|
|
1674
|
+
'feature_contract': produce_item.contract,
|
|
1675
|
+
'wagon_contract': wagon_artifact['contract']
|
|
1676
|
+
})
|
|
1677
|
+
# Check if telemetry URNs match
|
|
1678
|
+
if produce_item.telemetry != wagon_artifact['telemetry']:
|
|
1679
|
+
mismatches.append({
|
|
1680
|
+
'feature': feature.feature_urn,
|
|
1681
|
+
'file': feature.file_path,
|
|
1682
|
+
'wagon': feature.wagon_urn,
|
|
1683
|
+
'artifact_name': produce_item.name,
|
|
1684
|
+
'issue': f"Telemetry mismatch for '{produce_item.name}'",
|
|
1685
|
+
'feature_telemetry': produce_item.telemetry,
|
|
1686
|
+
'wagon_telemetry': wagon_artifact['telemetry']
|
|
1687
|
+
})
|
|
1688
|
+
|
|
1689
|
+
return mismatches
|
|
1690
|
+
|
|
1691
|
+
def reconcile_all(self) -> ReconciliationResult:
|
|
1692
|
+
"""
|
|
1693
|
+
Run full repository reconciliation.
|
|
1694
|
+
|
|
1695
|
+
Scans all wagon manifests and contract/telemetry files,
|
|
1696
|
+
detects issues, and generates comprehensive report.
|
|
1697
|
+
"""
|
|
1698
|
+
result = ReconciliationResult()
|
|
1699
|
+
|
|
1700
|
+
# Load all artifacts
|
|
1701
|
+
manifests = self.manifest_parser.find_all_manifests()
|
|
1702
|
+
contracts = self.contract_finder.find_all_contracts()
|
|
1703
|
+
telemetry_files = self.telemetry_finder.find_all_telemetry()
|
|
1704
|
+
signals_by_wagon = self.acceptance_parser.find_all_acceptances()
|
|
1705
|
+
|
|
1706
|
+
# Build produce items by wagon for signal validation
|
|
1707
|
+
produce_items_by_wagon = {}
|
|
1708
|
+
|
|
1709
|
+
# Process each wagon
|
|
1710
|
+
for manifest_path, manifest_data in manifests:
|
|
1711
|
+
wagon = manifest_data.get('wagon', 'unknown')
|
|
1712
|
+
produce_items = self.parse_produce_items(manifest_data)
|
|
1713
|
+
produce_items_by_wagon[wagon] = produce_items
|
|
1714
|
+
|
|
1715
|
+
# Detect missing contract references
|
|
1716
|
+
missing_contracts = self.detect_missing_contract_refs(produce_items, contracts)
|
|
1717
|
+
result.missing_contract_refs.extend(missing_contracts)
|
|
1718
|
+
|
|
1719
|
+
# Detect missing telemetry references
|
|
1720
|
+
missing_telemetry = self.detect_missing_telemetry_refs(produce_items, telemetry_files)
|
|
1721
|
+
result.missing_telemetry_refs.extend(missing_telemetry)
|
|
1722
|
+
|
|
1723
|
+
# Group by wagon
|
|
1724
|
+
if wagon not in result.by_wagon:
|
|
1725
|
+
result.by_wagon[wagon] = {
|
|
1726
|
+
'missing_contracts': [],
|
|
1727
|
+
'missing_telemetry': [],
|
|
1728
|
+
'missing_signal_telemetry': [],
|
|
1729
|
+
'manifest_path': str(manifest_path.relative_to(REPO_ROOT))
|
|
1730
|
+
}
|
|
1731
|
+
|
|
1732
|
+
result.by_wagon[wagon]['missing_contracts'].extend(missing_contracts)
|
|
1733
|
+
result.by_wagon[wagon]['missing_telemetry'].extend(missing_telemetry)
|
|
1734
|
+
|
|
1735
|
+
# Detect signal-driven telemetry issues
|
|
1736
|
+
missing_signal_tel, orphaned_tel = self.detect_signal_telemetry_issues(
|
|
1737
|
+
signals_by_wagon, telemetry_files, produce_items_by_wagon
|
|
1738
|
+
)
|
|
1739
|
+
result.missing_signal_telemetry.extend(missing_signal_tel)
|
|
1740
|
+
result.orphaned_telemetry.extend(orphaned_tel)
|
|
1741
|
+
|
|
1742
|
+
# Add signal issues to by_wagon grouping
|
|
1743
|
+
for issue in missing_signal_tel:
|
|
1744
|
+
wagon = issue['wagon']
|
|
1745
|
+
if wagon in result.by_wagon:
|
|
1746
|
+
result.by_wagon[wagon]['missing_signal_telemetry'].append(issue)
|
|
1747
|
+
|
|
1748
|
+
# Validate telemetry → artifact references
|
|
1749
|
+
missing_artifact_refs, invalid_artifact_refs = self.validate_telemetry_artifact_refs(
|
|
1750
|
+
telemetry_files, contracts
|
|
1751
|
+
)
|
|
1752
|
+
result.telemetry_without_artifact_ref.extend(missing_artifact_refs)
|
|
1753
|
+
result.telemetry_invalid_artifact_ref.extend(invalid_artifact_refs)
|
|
1754
|
+
|
|
1755
|
+
# Validate telemetry naming convention
|
|
1756
|
+
naming_violations = self.validate_telemetry_naming_convention(telemetry_files)
|
|
1757
|
+
result.telemetry_naming_violations.extend(naming_violations)
|
|
1758
|
+
|
|
1759
|
+
# Validate feature-wagon I/O alignment
|
|
1760
|
+
features = self.feature_finder.find_all_features()
|
|
1761
|
+
feature_mismatches = self.validate_feature_wagon_io_alignment(features, manifests)
|
|
1762
|
+
result.feature_io_mismatches.extend(feature_mismatches)
|
|
1763
|
+
|
|
1764
|
+
# Calculate total issues
|
|
1765
|
+
result.total_issues = (
|
|
1766
|
+
len(result.missing_contract_refs) +
|
|
1767
|
+
len(result.missing_telemetry_refs) +
|
|
1768
|
+
len(result.missing_signal_telemetry) +
|
|
1769
|
+
len(result.telemetry_without_artifact_ref) +
|
|
1770
|
+
len(result.telemetry_invalid_artifact_ref) +
|
|
1771
|
+
len(result.telemetry_naming_violations) +
|
|
1772
|
+
len(result.feature_io_mismatches)
|
|
1773
|
+
)
|
|
1774
|
+
|
|
1775
|
+
return result
|
|
1776
|
+
|
|
1777
|
+
|
|
1778
|
+
class ContractImplementationReconciler:
|
|
1779
|
+
"""
|
|
1780
|
+
Use case: Reconcile contract schemas with multi-language implementations.
|
|
1781
|
+
|
|
1782
|
+
Validates that each contract has implementations in target languages (Python, Dart, TS).
|
|
1783
|
+
Detects orphaned DTOs without schemas and missing implementations.
|
|
1784
|
+
"""
|
|
1785
|
+
|
|
1786
|
+
def __init__(self):
|
|
1787
|
+
self.contract_finder = ContractFinder()
|
|
1788
|
+
self.python_finder = PythonDTOFinder()
|
|
1789
|
+
self.dart_finder = DartDTOFinder()
|
|
1790
|
+
self.ts_finder = TypeScriptDTOFinder()
|
|
1791
|
+
|
|
1792
|
+
def reconcile_all(self) -> ImplementationReconciliationResult:
|
|
1793
|
+
"""
|
|
1794
|
+
Run full contract implementation reconciliation.
|
|
1795
|
+
|
|
1796
|
+
Returns:
|
|
1797
|
+
ImplementationReconciliationResult with coverage analysis
|
|
1798
|
+
"""
|
|
1799
|
+
result = ImplementationReconciliationResult()
|
|
1800
|
+
|
|
1801
|
+
# Find all contracts and implementations
|
|
1802
|
+
contracts = self.contract_finder.find_all_contracts()
|
|
1803
|
+
py_dtos = self.python_finder.find_all_dtos()
|
|
1804
|
+
dart_dtos = self.dart_finder.find_all_dtos()
|
|
1805
|
+
ts_dtos = self.ts_finder.find_all_dtos()
|
|
1806
|
+
|
|
1807
|
+
# Build lookup tables by contract URN
|
|
1808
|
+
py_by_urn = {}
|
|
1809
|
+
for dto in py_dtos:
|
|
1810
|
+
# Normalize URN (remove contract: prefix if present)
|
|
1811
|
+
urn = dto.contract_urn.replace('contract:', '')
|
|
1812
|
+
py_by_urn[urn] = dto
|
|
1813
|
+
|
|
1814
|
+
dart_by_urn = {}
|
|
1815
|
+
for dto in dart_dtos:
|
|
1816
|
+
urn = dto.contract_urn.replace('contract:', '')
|
|
1817
|
+
dart_by_urn[urn] = dto
|
|
1818
|
+
|
|
1819
|
+
ts_by_urn = {}
|
|
1820
|
+
for dto in ts_dtos:
|
|
1821
|
+
urn = dto.contract_urn.replace('contract:', '')
|
|
1822
|
+
ts_by_urn[urn] = dto
|
|
1823
|
+
|
|
1824
|
+
# Build contract URN set for orphan detection
|
|
1825
|
+
contract_urns = set()
|
|
1826
|
+
for contract in contracts:
|
|
1827
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
1828
|
+
contract_urns.add(urn)
|
|
1829
|
+
|
|
1830
|
+
# Check each contract for implementation coverage
|
|
1831
|
+
for contract in contracts:
|
|
1832
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
1833
|
+
|
|
1834
|
+
# Find implementations for this contract
|
|
1835
|
+
python_impl = py_by_urn.get(urn)
|
|
1836
|
+
dart_impl = dart_by_urn.get(urn)
|
|
1837
|
+
ts_impl = ts_by_urn.get(urn)
|
|
1838
|
+
|
|
1839
|
+
# Create coverage record
|
|
1840
|
+
coverage = ImplementationCoverage(
|
|
1841
|
+
contract_urn=urn,
|
|
1842
|
+
schema_path=contract.file_path,
|
|
1843
|
+
python_impl=python_impl,
|
|
1844
|
+
dart_impl=dart_impl,
|
|
1845
|
+
typescript_impl=ts_impl
|
|
1846
|
+
)
|
|
1847
|
+
|
|
1848
|
+
result.coverage_by_contract.append(coverage)
|
|
1849
|
+
|
|
1850
|
+
# Track missing implementations
|
|
1851
|
+
if not python_impl:
|
|
1852
|
+
result.missing_python.append({
|
|
1853
|
+
'contract': urn,
|
|
1854
|
+
'schema': contract.file_path,
|
|
1855
|
+
'expected_path': self._suggest_python_path(urn)
|
|
1856
|
+
})
|
|
1857
|
+
|
|
1858
|
+
if not dart_impl:
|
|
1859
|
+
result.missing_dart.append({
|
|
1860
|
+
'contract': urn,
|
|
1861
|
+
'schema': contract.file_path,
|
|
1862
|
+
'expected_path': self._suggest_dart_path(urn)
|
|
1863
|
+
})
|
|
1864
|
+
|
|
1865
|
+
if not ts_impl:
|
|
1866
|
+
result.missing_typescript.append({
|
|
1867
|
+
'contract': urn,
|
|
1868
|
+
'schema': contract.file_path,
|
|
1869
|
+
'expected_path': self._suggest_ts_path(urn)
|
|
1870
|
+
})
|
|
1871
|
+
|
|
1872
|
+
# Find orphaned DTOs (implementations without schemas)
|
|
1873
|
+
for dto in py_dtos + dart_dtos + ts_dtos:
|
|
1874
|
+
urn = dto.contract_urn.replace('contract:', '')
|
|
1875
|
+
|
|
1876
|
+
# Normalize URN for matching (handle variations)
|
|
1877
|
+
urn_normalized = urn.replace('.', ':')
|
|
1878
|
+
matched = any(
|
|
1879
|
+
contract_urn.replace('.', ':') == urn_normalized
|
|
1880
|
+
for contract_urn in contract_urns
|
|
1881
|
+
)
|
|
1882
|
+
|
|
1883
|
+
if not matched:
|
|
1884
|
+
result.orphaned_dtos.append({
|
|
1885
|
+
'file': dto.file_path,
|
|
1886
|
+
'urn': urn,
|
|
1887
|
+
'language': dto.language,
|
|
1888
|
+
'class_name': dto.class_name,
|
|
1889
|
+
'reason': 'No corresponding schema file found'
|
|
1890
|
+
})
|
|
1891
|
+
|
|
1892
|
+
# Set total contracts count
|
|
1893
|
+
result.total_contracts = len(contracts)
|
|
1894
|
+
|
|
1895
|
+
return result
|
|
1896
|
+
|
|
1897
|
+
def _suggest_python_path(self, contract_urn: str) -> str:
|
|
1898
|
+
"""Suggest Python DTO path for a contract URN."""
|
|
1899
|
+
# match:dilemma.current → python/contracts/match/dilemma/current.py
|
|
1900
|
+
path = contract_urn.replace(':', '/').replace('.', '/')
|
|
1901
|
+
return f"python/contracts/{path}.py"
|
|
1902
|
+
|
|
1903
|
+
def _suggest_dart_path(self, contract_urn: str) -> str:
|
|
1904
|
+
"""Suggest Dart entity path for a contract URN."""
|
|
1905
|
+
# match:dilemma.current → lib/features/match/domain/dilemma_entities.dart
|
|
1906
|
+
parts = contract_urn.split(':')
|
|
1907
|
+
if len(parts) >= 2:
|
|
1908
|
+
domain = parts[0]
|
|
1909
|
+
resource = '_'.join(parts[1:]).replace('.', '_')
|
|
1910
|
+
return f"lib/features/{domain}/domain/{resource}_entities.dart"
|
|
1911
|
+
return f"lib/contracts/{contract_urn.replace(':', '/')}.dart"
|
|
1912
|
+
|
|
1913
|
+
def _suggest_ts_path(self, contract_urn: str) -> str:
|
|
1914
|
+
"""Suggest TypeScript interface path for a contract URN."""
|
|
1915
|
+
# match:dilemma.current → src/contracts/match/dilemma/current.ts
|
|
1916
|
+
path = contract_urn.replace(':', '/').replace('.', '/')
|
|
1917
|
+
return f"src/contracts/{path}.ts"
|
|
1918
|
+
|
|
1919
|
+
|
|
1920
|
+
class SmartImplementationReconciler:
|
|
1921
|
+
"""
|
|
1922
|
+
Use case: Smart reconciliation based on producer/consumer requirements.
|
|
1923
|
+
|
|
1924
|
+
Only flags missing DTOs when producer/consumer wagons actually use that language.
|
|
1925
|
+
Maps wagon URN → feature → component → DTO path.
|
|
1926
|
+
"""
|
|
1927
|
+
|
|
1928
|
+
def __init__(self):
|
|
1929
|
+
self.contract_finder = ContractFinder()
|
|
1930
|
+
self.python_finder = PythonDTOFinder()
|
|
1931
|
+
self.dart_finder = DartDTOFinder()
|
|
1932
|
+
self.ts_finder = TypeScriptDTOFinder()
|
|
1933
|
+
self.stack_detector = WagonTechStackDetector()
|
|
1934
|
+
|
|
1935
|
+
def reconcile_smart(self) -> List[ContractRequirements]:
|
|
1936
|
+
"""
|
|
1937
|
+
Run smart reconciliation with producer/consumer awareness.
|
|
1938
|
+
|
|
1939
|
+
Returns:
|
|
1940
|
+
List of ContractRequirements with smart coverage analysis
|
|
1941
|
+
"""
|
|
1942
|
+
# Load all data
|
|
1943
|
+
contracts = self.contract_finder.find_all_contracts()
|
|
1944
|
+
py_dtos = self.python_finder.find_all_dtos()
|
|
1945
|
+
dart_dtos = self.dart_finder.find_all_dtos()
|
|
1946
|
+
ts_dtos = self.ts_finder.find_all_dtos()
|
|
1947
|
+
wagon_stacks = self.stack_detector.detect_all_stacks()
|
|
1948
|
+
|
|
1949
|
+
# Build lookup tables
|
|
1950
|
+
py_by_urn = {dto.contract_urn.replace('contract:', ''): dto for dto in py_dtos}
|
|
1951
|
+
dart_by_urn = {dto.contract_urn.replace('contract:', ''): dto for dto in dart_dtos}
|
|
1952
|
+
ts_by_urn = {dto.contract_urn.replace('contract:', ''): dto for dto in ts_dtos}
|
|
1953
|
+
|
|
1954
|
+
results = []
|
|
1955
|
+
|
|
1956
|
+
for contract in contracts:
|
|
1957
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
1958
|
+
|
|
1959
|
+
# Create requirement
|
|
1960
|
+
req = ContractRequirements(
|
|
1961
|
+
contract_urn=urn,
|
|
1962
|
+
schema_path=contract.file_path,
|
|
1963
|
+
producer=contract.producer,
|
|
1964
|
+
consumers=contract.consumers
|
|
1965
|
+
)
|
|
1966
|
+
|
|
1967
|
+
# Check actual implementations
|
|
1968
|
+
req.has_python = urn in py_by_urn
|
|
1969
|
+
req.has_dart = urn in dart_by_urn
|
|
1970
|
+
req.has_typescript = urn in ts_by_urn
|
|
1971
|
+
|
|
1972
|
+
# Calculate requirements based on producer/consumer stacks
|
|
1973
|
+
req.calculate_requirements(wagon_stacks)
|
|
1974
|
+
|
|
1975
|
+
# Generate path suggestions
|
|
1976
|
+
if req.missing_python:
|
|
1977
|
+
req.python_path_suggestion = self._suggest_python_path(urn, req.producer, wagon_stacks)
|
|
1978
|
+
|
|
1979
|
+
if req.missing_dart:
|
|
1980
|
+
req.dart_path_suggestion = self._suggest_dart_path(urn, req.consumers, wagon_stacks)
|
|
1981
|
+
|
|
1982
|
+
if req.missing_typescript:
|
|
1983
|
+
req.typescript_path_suggestion = self._suggest_ts_path(urn, req.producer, wagon_stacks)
|
|
1984
|
+
|
|
1985
|
+
results.append(req)
|
|
1986
|
+
|
|
1987
|
+
return results
|
|
1988
|
+
|
|
1989
|
+
def _suggest_python_path(self, contract_urn: str, producer: Optional[str], wagon_stacks: Dict[str, WagonTechStack]) -> str:
|
|
1990
|
+
"""Suggest Python DTO path based on producer wagon."""
|
|
1991
|
+
# Default path
|
|
1992
|
+
default_path = f"python/contracts/{contract_urn.replace(':', '/')}.py"
|
|
1993
|
+
|
|
1994
|
+
if not producer:
|
|
1995
|
+
return default_path
|
|
1996
|
+
|
|
1997
|
+
stack = wagon_stacks.get(producer)
|
|
1998
|
+
if stack and stack.python_path:
|
|
1999
|
+
# Suggest within wagon's directory
|
|
2000
|
+
return f"{stack.python_path}/contracts/{contract_urn.replace(':', '/')}.py"
|
|
2001
|
+
|
|
2002
|
+
return default_path
|
|
2003
|
+
|
|
2004
|
+
def _suggest_dart_path(self, contract_urn: str, consumers: List[str], wagon_stacks: Dict[str, WagonTechStack]) -> str:
|
|
2005
|
+
"""Suggest Dart entity path based on consumer features."""
|
|
2006
|
+
# Check if any consumer is a Dart feature
|
|
2007
|
+
for consumer_urn in consumers:
|
|
2008
|
+
stack = wagon_stacks.get(consumer_urn)
|
|
2009
|
+
if stack and stack.has_dart and stack.dart_path:
|
|
2010
|
+
# Extract domain from contract URN
|
|
2011
|
+
domain = contract_urn.split(':')[0] if ':' in contract_urn else contract_urn
|
|
2012
|
+
entity_name = contract_urn.split(':')[-1].replace('.', '_')
|
|
2013
|
+
return f"{stack.dart_path}/domain/{entity_name}_entity.dart"
|
|
2014
|
+
|
|
2015
|
+
# Default: try to match feature name from contract
|
|
2016
|
+
domain = contract_urn.split(':')[0] if ':' in contract_urn else contract_urn
|
|
2017
|
+
resource = contract_urn.split(':')[-1] if ':' in contract_urn else contract_urn
|
|
2018
|
+
return f"lib/features/{domain}/domain/{resource.replace('.', '_')}_entity.dart"
|
|
2019
|
+
|
|
2020
|
+
def _suggest_ts_path(self, contract_urn: str, producer: Optional[str], wagon_stacks: Dict[str, WagonTechStack]) -> str:
|
|
2021
|
+
"""Suggest TypeScript interface path based on producer wagon."""
|
|
2022
|
+
# Default path
|
|
2023
|
+
default_path = f"src/contracts/{contract_urn.replace(':', '/')}.ts"
|
|
2024
|
+
|
|
2025
|
+
if not producer:
|
|
2026
|
+
return default_path
|
|
2027
|
+
|
|
2028
|
+
stack = wagon_stacks.get(producer)
|
|
2029
|
+
if stack and stack.typescript_path:
|
|
2030
|
+
# Suggest within wagon's directory
|
|
2031
|
+
return f"{stack.typescript_path}/contracts/{contract_urn.replace(':', '/')}.ts"
|
|
2032
|
+
|
|
2033
|
+
return default_path
|
|
2034
|
+
|
|
2035
|
+
|
|
2036
|
+
class FunnelAnalyzer:
|
|
2037
|
+
"""
|
|
2038
|
+
Use case: Analyze traceability funnel by theme.
|
|
2039
|
+
|
|
2040
|
+
Identifies where traceability breaks: wagon → artifact → contract → impl.
|
|
2041
|
+
"""
|
|
2042
|
+
|
|
2043
|
+
def __init__(self):
|
|
2044
|
+
self.manifest_parser = ManifestParser()
|
|
2045
|
+
self.contract_finder = ContractFinder()
|
|
2046
|
+
self.python_finder = PythonDTOFinder()
|
|
2047
|
+
self.dart_finder = DartDTOFinder()
|
|
2048
|
+
self.ts_finder = TypeScriptDTOFinder()
|
|
2049
|
+
|
|
2050
|
+
def analyze_funnel(self) -> FunnelAnalysisResult:
|
|
2051
|
+
"""
|
|
2052
|
+
Run full funnel analysis.
|
|
2053
|
+
|
|
2054
|
+
Returns:
|
|
2055
|
+
FunnelAnalysisResult with breakdown by theme
|
|
2056
|
+
"""
|
|
2057
|
+
result = FunnelAnalysisResult()
|
|
2058
|
+
|
|
2059
|
+
# Load all data
|
|
2060
|
+
manifests = self.manifest_parser.find_all_manifests()
|
|
2061
|
+
contracts = self.contract_finder.find_all_contracts()
|
|
2062
|
+
py_dtos = self.python_finder.find_all_dtos()
|
|
2063
|
+
dart_dtos = self.dart_finder.find_all_dtos()
|
|
2064
|
+
ts_dtos = self.ts_finder.find_all_dtos()
|
|
2065
|
+
|
|
2066
|
+
# Build lookups
|
|
2067
|
+
contracts_by_urn = {
|
|
2068
|
+
c.contract_id.replace('contract:', ''): c
|
|
2069
|
+
for c in contracts
|
|
2070
|
+
}
|
|
2071
|
+
py_by_urn = {
|
|
2072
|
+
dto.contract_urn.replace('contract:', ''): dto
|
|
2073
|
+
for dto in py_dtos
|
|
2074
|
+
}
|
|
2075
|
+
dart_by_urn = {
|
|
2076
|
+
dto.contract_urn.replace('contract:', ''): dto
|
|
2077
|
+
for dto in dart_dtos
|
|
2078
|
+
}
|
|
2079
|
+
ts_by_urn = {
|
|
2080
|
+
dto.contract_urn.replace('contract:', ''): dto
|
|
2081
|
+
for dto in ts_dtos
|
|
2082
|
+
}
|
|
2083
|
+
|
|
2084
|
+
# Extract all artifacts from wagon manifests
|
|
2085
|
+
artifacts_by_theme = defaultdict(list)
|
|
2086
|
+
wagons_by_theme = defaultdict(set)
|
|
2087
|
+
|
|
2088
|
+
for manifest_path, manifest_data in manifests:
|
|
2089
|
+
wagon = manifest_data.get('wagon', 'unknown')
|
|
2090
|
+
|
|
2091
|
+
for produce in manifest_data.get('produce', []):
|
|
2092
|
+
artifact_name = produce.get('name', '')
|
|
2093
|
+
if not artifact_name:
|
|
2094
|
+
continue
|
|
2095
|
+
|
|
2096
|
+
# Extract theme from artifact name (e.g., "match:dilemma.current" → "match")
|
|
2097
|
+
theme = artifact_name.split(':')[0] if ':' in artifact_name else 'unknown'
|
|
2098
|
+
|
|
2099
|
+
artifacts_by_theme[theme].append({
|
|
2100
|
+
'name': artifact_name,
|
|
2101
|
+
'wagon': wagon,
|
|
2102
|
+
'contract_ref': produce.get('contract'),
|
|
2103
|
+
'has_contract': produce.get('contract') is not None and produce.get('contract') != 'null'
|
|
2104
|
+
})
|
|
2105
|
+
wagons_by_theme[theme].add(wagon)
|
|
2106
|
+
|
|
2107
|
+
# Build funnel for each theme
|
|
2108
|
+
for theme in sorted(artifacts_by_theme.keys()):
|
|
2109
|
+
artifacts = artifacts_by_theme[theme]
|
|
2110
|
+
funnel = ThemeFunnel(theme=theme)
|
|
2111
|
+
|
|
2112
|
+
# Count wagons for this theme
|
|
2113
|
+
funnel.wagon_count = len(wagons_by_theme[theme])
|
|
2114
|
+
|
|
2115
|
+
# Count artifacts
|
|
2116
|
+
funnel.artifact_count = len(artifacts)
|
|
2117
|
+
|
|
2118
|
+
# Artifact → Contract stage
|
|
2119
|
+
artifacts_with_contracts = [a for a in artifacts if a['has_contract']]
|
|
2120
|
+
artifact_to_contract_leaks = []
|
|
2121
|
+
|
|
2122
|
+
for artifact in artifacts:
|
|
2123
|
+
if not artifact['has_contract']:
|
|
2124
|
+
artifact_to_contract_leaks.append({
|
|
2125
|
+
'artifact': artifact['name'],
|
|
2126
|
+
'wagon': artifact['wagon'],
|
|
2127
|
+
'reason': 'contract: null in wagon manifest'
|
|
2128
|
+
})
|
|
2129
|
+
|
|
2130
|
+
funnel.stage_artifact_to_contract = FunnelStage(
|
|
2131
|
+
stage_name="Artifact → Contract",
|
|
2132
|
+
total_in=len(artifacts),
|
|
2133
|
+
total_out=len(artifacts_with_contracts),
|
|
2134
|
+
leaks=artifact_to_contract_leaks
|
|
2135
|
+
)
|
|
2136
|
+
|
|
2137
|
+
# Count contracts for this theme
|
|
2138
|
+
theme_contracts = [
|
|
2139
|
+
c for c in contracts
|
|
2140
|
+
if c.contract_id.startswith(theme + ':') or c.contract_id.startswith(theme + '.')
|
|
2141
|
+
]
|
|
2142
|
+
funnel.contract_count = len(theme_contracts)
|
|
2143
|
+
|
|
2144
|
+
# Contract → Python stage
|
|
2145
|
+
contract_to_python_leaks = []
|
|
2146
|
+
python_impl_count = 0
|
|
2147
|
+
|
|
2148
|
+
for contract in theme_contracts:
|
|
2149
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
2150
|
+
if urn in py_by_urn:
|
|
2151
|
+
python_impl_count += 1
|
|
2152
|
+
else:
|
|
2153
|
+
contract_to_python_leaks.append({
|
|
2154
|
+
'contract': urn,
|
|
2155
|
+
'schema': contract.file_path,
|
|
2156
|
+
'reason': 'No Python DTO found'
|
|
2157
|
+
})
|
|
2158
|
+
|
|
2159
|
+
funnel.python_impl_count = python_impl_count
|
|
2160
|
+
funnel.stage_contract_to_python = FunnelStage(
|
|
2161
|
+
stage_name="Contract → Python",
|
|
2162
|
+
total_in=len(theme_contracts),
|
|
2163
|
+
total_out=python_impl_count,
|
|
2164
|
+
leaks=contract_to_python_leaks
|
|
2165
|
+
)
|
|
2166
|
+
|
|
2167
|
+
# Contract → Dart stage
|
|
2168
|
+
contract_to_dart_leaks = []
|
|
2169
|
+
dart_impl_count = 0
|
|
2170
|
+
|
|
2171
|
+
for contract in theme_contracts:
|
|
2172
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
2173
|
+
if urn in dart_by_urn:
|
|
2174
|
+
dart_impl_count += 1
|
|
2175
|
+
else:
|
|
2176
|
+
contract_to_dart_leaks.append({
|
|
2177
|
+
'contract': urn,
|
|
2178
|
+
'schema': contract.file_path,
|
|
2179
|
+
'reason': 'No Dart entity found'
|
|
2180
|
+
})
|
|
2181
|
+
|
|
2182
|
+
funnel.dart_impl_count = dart_impl_count
|
|
2183
|
+
funnel.stage_contract_to_dart = FunnelStage(
|
|
2184
|
+
stage_name="Contract → Dart",
|
|
2185
|
+
total_in=len(theme_contracts),
|
|
2186
|
+
total_out=dart_impl_count,
|
|
2187
|
+
leaks=contract_to_dart_leaks
|
|
2188
|
+
)
|
|
2189
|
+
|
|
2190
|
+
# Contract → TypeScript stage
|
|
2191
|
+
contract_to_ts_leaks = []
|
|
2192
|
+
ts_impl_count = 0
|
|
2193
|
+
|
|
2194
|
+
for contract in theme_contracts:
|
|
2195
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
2196
|
+
if urn in ts_by_urn:
|
|
2197
|
+
ts_impl_count += 1
|
|
2198
|
+
else:
|
|
2199
|
+
contract_to_ts_leaks.append({
|
|
2200
|
+
'contract': urn,
|
|
2201
|
+
'schema': contract.file_path,
|
|
2202
|
+
'reason': 'No TypeScript interface found'
|
|
2203
|
+
})
|
|
2204
|
+
|
|
2205
|
+
funnel.typescript_impl_count = ts_impl_count
|
|
2206
|
+
funnel.stage_contract_to_typescript = FunnelStage(
|
|
2207
|
+
stage_name="Contract → TypeScript",
|
|
2208
|
+
total_in=len(theme_contracts),
|
|
2209
|
+
total_out=ts_impl_count,
|
|
2210
|
+
leaks=contract_to_ts_leaks
|
|
2211
|
+
)
|
|
2212
|
+
|
|
2213
|
+
result.by_theme[theme] = funnel
|
|
2214
|
+
|
|
2215
|
+
# Find orphaned contracts (no producing wagon)
|
|
2216
|
+
all_artifact_names = set()
|
|
2217
|
+
for artifacts in artifacts_by_theme.values():
|
|
2218
|
+
for artifact in artifacts:
|
|
2219
|
+
all_artifact_names.add(artifact['name'])
|
|
2220
|
+
|
|
2221
|
+
for contract in contracts:
|
|
2222
|
+
urn = contract.contract_id.replace('contract:', '')
|
|
2223
|
+
# Check if any artifact produces this contract
|
|
2224
|
+
if urn not in all_artifact_names:
|
|
2225
|
+
result.orphaned_contracts.append({
|
|
2226
|
+
'contract': urn,
|
|
2227
|
+
'schema': contract.file_path,
|
|
2228
|
+
'producer': contract.producer
|
|
2229
|
+
})
|
|
2230
|
+
|
|
2231
|
+
return result
|
|
2232
|
+
|
|
2233
|
+
|
|
2234
|
+
class SmartFunnelAnalyzer:
|
|
2235
|
+
"""
|
|
2236
|
+
Use case: Smart funnel analysis with producer/consumer awareness.
|
|
2237
|
+
|
|
2238
|
+
Shows only required DTOs based on actual wagon tech stacks.
|
|
2239
|
+
"""
|
|
2240
|
+
|
|
2241
|
+
def __init__(self):
|
|
2242
|
+
self.manifest_parser = ManifestParser()
|
|
2243
|
+
self.smart_reconciler = SmartImplementationReconciler()
|
|
2244
|
+
|
|
2245
|
+
def analyze_smart_funnel(self) -> SmartFunnelAnalysisResult:
|
|
2246
|
+
"""
|
|
2247
|
+
Run smart funnel analysis.
|
|
2248
|
+
|
|
2249
|
+
Returns:
|
|
2250
|
+
SmartFunnelAnalysisResult with producer/consumer aware breakdown
|
|
2251
|
+
"""
|
|
2252
|
+
result = SmartFunnelAnalysisResult()
|
|
2253
|
+
|
|
2254
|
+
# Get smart requirements (already has producer/consumer awareness)
|
|
2255
|
+
requirements = self.smart_reconciler.reconcile_smart()
|
|
2256
|
+
|
|
2257
|
+
# Load manifests to get artifact info
|
|
2258
|
+
manifests = self.manifest_parser.find_all_manifests()
|
|
2259
|
+
|
|
2260
|
+
# Build artifacts by theme
|
|
2261
|
+
artifacts_by_theme = defaultdict(list)
|
|
2262
|
+
wagons_by_theme = defaultdict(set)
|
|
2263
|
+
|
|
2264
|
+
for manifest_path, manifest_data in manifests:
|
|
2265
|
+
wagon = manifest_data.get('wagon', 'unknown')
|
|
2266
|
+
|
|
2267
|
+
for produce in manifest_data.get('produce', []):
|
|
2268
|
+
artifact_name = produce.get('name', '')
|
|
2269
|
+
if not artifact_name:
|
|
2270
|
+
continue
|
|
2271
|
+
|
|
2272
|
+
theme = artifact_name.split(':')[0] if ':' in artifact_name else 'unknown'
|
|
2273
|
+
|
|
2274
|
+
artifacts_by_theme[theme].append({
|
|
2275
|
+
'name': artifact_name,
|
|
2276
|
+
'wagon': wagon,
|
|
2277
|
+
'contract_ref': produce.get('contract'),
|
|
2278
|
+
'has_contract': produce.get('contract') is not None and produce.get('contract') != 'null'
|
|
2279
|
+
})
|
|
2280
|
+
wagons_by_theme[theme].add(wagon)
|
|
2281
|
+
|
|
2282
|
+
# Group requirements by theme
|
|
2283
|
+
requirements_by_theme = defaultdict(list)
|
|
2284
|
+
for req in requirements:
|
|
2285
|
+
theme = req.contract_urn.split(':')[0] if ':' in req.contract_urn else 'unknown'
|
|
2286
|
+
requirements_by_theme[theme].append(req)
|
|
2287
|
+
|
|
2288
|
+
# Build smart funnel for each theme
|
|
2289
|
+
all_themes = set(list(artifacts_by_theme.keys()) + list(requirements_by_theme.keys()))
|
|
2290
|
+
|
|
2291
|
+
for theme in sorted(all_themes):
|
|
2292
|
+
artifacts = artifacts_by_theme.get(theme, [])
|
|
2293
|
+
theme_requirements = requirements_by_theme.get(theme, [])
|
|
2294
|
+
|
|
2295
|
+
funnel = SmartThemeFunnel(theme=theme)
|
|
2296
|
+
|
|
2297
|
+
# Count wagons
|
|
2298
|
+
funnel.wagon_count = len(wagons_by_theme.get(theme, set()))
|
|
2299
|
+
|
|
2300
|
+
# Count artifacts
|
|
2301
|
+
funnel.artifact_count = len(artifacts)
|
|
2302
|
+
|
|
2303
|
+
# Count contracts
|
|
2304
|
+
funnel.contract_count = len(theme_requirements)
|
|
2305
|
+
|
|
2306
|
+
# Store contract details
|
|
2307
|
+
funnel.contracts = theme_requirements
|
|
2308
|
+
|
|
2309
|
+
# Calculate requirements and implementations
|
|
2310
|
+
funnel.python_required = sum(1 for r in theme_requirements if r.requires_python)
|
|
2311
|
+
funnel.dart_required = sum(1 for r in theme_requirements if r.requires_dart)
|
|
2312
|
+
funnel.typescript_required = sum(1 for r in theme_requirements if r.requires_typescript)
|
|
2313
|
+
|
|
2314
|
+
funnel.python_impl_count = sum(1 for r in theme_requirements if r.requires_python and r.has_python)
|
|
2315
|
+
funnel.dart_impl_count = sum(1 for r in theme_requirements if r.requires_dart and r.has_dart)
|
|
2316
|
+
funnel.typescript_impl_count = sum(1 for r in theme_requirements if r.requires_typescript and r.has_typescript)
|
|
2317
|
+
|
|
2318
|
+
# Artifact → Contract stage
|
|
2319
|
+
artifacts_with_contracts = [a for a in artifacts if a['has_contract']]
|
|
2320
|
+
artifact_to_contract_leaks = []
|
|
2321
|
+
|
|
2322
|
+
for artifact in artifacts:
|
|
2323
|
+
if not artifact['has_contract']:
|
|
2324
|
+
artifact_to_contract_leaks.append({
|
|
2325
|
+
'artifact': artifact['name'],
|
|
2326
|
+
'wagon': artifact['wagon'],
|
|
2327
|
+
'reason': 'contract: null in wagon manifest'
|
|
2328
|
+
})
|
|
2329
|
+
|
|
2330
|
+
funnel.stage_artifact_to_contract = FunnelStage(
|
|
2331
|
+
stage_name="Artifact → Contract",
|
|
2332
|
+
total_in=len(artifacts),
|
|
2333
|
+
total_out=len(artifacts_with_contracts),
|
|
2334
|
+
leaks=artifact_to_contract_leaks
|
|
2335
|
+
)
|
|
2336
|
+
|
|
2337
|
+
# Contract → Python stage (smart: only count if required)
|
|
2338
|
+
contract_to_python_leaks = []
|
|
2339
|
+
for req in theme_requirements:
|
|
2340
|
+
if req.requires_python and not req.has_python:
|
|
2341
|
+
contract_to_python_leaks.append({
|
|
2342
|
+
'contract': req.contract_urn,
|
|
2343
|
+
'producer': req.producer,
|
|
2344
|
+
'consumers': req.consumers,
|
|
2345
|
+
'reason': 'Required by producer/consumer but not implemented'
|
|
2346
|
+
})
|
|
2347
|
+
|
|
2348
|
+
if funnel.python_required > 0:
|
|
2349
|
+
funnel.stage_contract_to_python = FunnelStage(
|
|
2350
|
+
stage_name="Contract → Python (Required Only)",
|
|
2351
|
+
total_in=funnel.python_required,
|
|
2352
|
+
total_out=funnel.python_impl_count,
|
|
2353
|
+
leaks=contract_to_python_leaks
|
|
2354
|
+
)
|
|
2355
|
+
|
|
2356
|
+
# Contract → Dart stage (smart)
|
|
2357
|
+
contract_to_dart_leaks = []
|
|
2358
|
+
for req in theme_requirements:
|
|
2359
|
+
if req.requires_dart and not req.has_dart:
|
|
2360
|
+
contract_to_dart_leaks.append({
|
|
2361
|
+
'contract': req.contract_urn,
|
|
2362
|
+
'producer': req.producer,
|
|
2363
|
+
'consumers': req.consumers,
|
|
2364
|
+
'reason': 'Required by Dart consumer but not implemented'
|
|
2365
|
+
})
|
|
2366
|
+
|
|
2367
|
+
if funnel.dart_required > 0:
|
|
2368
|
+
funnel.stage_contract_to_dart = FunnelStage(
|
|
2369
|
+
stage_name="Contract → Dart (Required Only)",
|
|
2370
|
+
total_in=funnel.dart_required,
|
|
2371
|
+
total_out=funnel.dart_impl_count,
|
|
2372
|
+
leaks=contract_to_dart_leaks
|
|
2373
|
+
)
|
|
2374
|
+
|
|
2375
|
+
# Contract → TypeScript stage (smart)
|
|
2376
|
+
contract_to_ts_leaks = []
|
|
2377
|
+
for req in theme_requirements:
|
|
2378
|
+
if req.requires_typescript and not req.has_typescript:
|
|
2379
|
+
contract_to_ts_leaks.append({
|
|
2380
|
+
'contract': req.contract_urn,
|
|
2381
|
+
'producer': req.producer,
|
|
2382
|
+
'consumers': req.consumers,
|
|
2383
|
+
'reason': 'Required by TypeScript consumer but not implemented'
|
|
2384
|
+
})
|
|
2385
|
+
|
|
2386
|
+
if funnel.typescript_required > 0:
|
|
2387
|
+
funnel.stage_contract_to_typescript = FunnelStage(
|
|
2388
|
+
stage_name="Contract → TypeScript (Required Only)",
|
|
2389
|
+
total_in=funnel.typescript_required,
|
|
2390
|
+
total_out=funnel.typescript_impl_count,
|
|
2391
|
+
leaks=contract_to_ts_leaks
|
|
2392
|
+
)
|
|
2393
|
+
|
|
2394
|
+
result.by_theme[theme] = funnel
|
|
2395
|
+
|
|
2396
|
+
return result
|
|
2397
|
+
|
|
2398
|
+
|
|
2399
|
+
class TraceabilityValidator:
|
|
2400
|
+
"""
|
|
2401
|
+
Use case: Validate bidirectional traceability.
|
|
2402
|
+
|
|
2403
|
+
Ensures wagon->contract and contract->wagon references are consistent.
|
|
2404
|
+
"""
|
|
2405
|
+
|
|
2406
|
+
def validate_bidirectional(self, produce_item: Dict, contract: Dict) -> bool:
|
|
2407
|
+
"""
|
|
2408
|
+
Validate bidirectional traceability.
|
|
2409
|
+
|
|
2410
|
+
Checks:
|
|
2411
|
+
- Wagon declares producing the contract
|
|
2412
|
+
- Contract declares the wagon as producer
|
|
2413
|
+
"""
|
|
2414
|
+
expected_producer = f"wagon:{produce_item['wagon']}"
|
|
2415
|
+
actual_producer = contract.get('producer')
|
|
2416
|
+
|
|
2417
|
+
return expected_producer == actual_producer
|
|
2418
|
+
|
|
2419
|
+
def check_producer_match(self, produce_item: Dict, contract: Dict) -> Optional[Dict]:
|
|
2420
|
+
"""
|
|
2421
|
+
Check if producer in contract matches wagon declaration.
|
|
2422
|
+
|
|
2423
|
+
Returns mismatch details if inconsistent, None if consistent.
|
|
2424
|
+
"""
|
|
2425
|
+
expected_producer = f"wagon:{produce_item['wagon']}"
|
|
2426
|
+
actual_producer = contract.get('producer')
|
|
2427
|
+
|
|
2428
|
+
if expected_producer != actual_producer:
|
|
2429
|
+
return {
|
|
2430
|
+
'wagon': produce_item['wagon'],
|
|
2431
|
+
'urn': produce_item['urn'],
|
|
2432
|
+
'expected': expected_producer,
|
|
2433
|
+
'actual': actual_producer,
|
|
2434
|
+
'contract_path': contract.get('file_path')
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
return None
|
|
2438
|
+
|
|
2439
|
+
|
|
2440
|
+
class ContractMatcher:
|
|
2441
|
+
"""
|
|
2442
|
+
Use case: Match contracts using multiple strategies.
|
|
2443
|
+
|
|
2444
|
+
Wrapper around ContractFinder for backward compatibility.
|
|
2445
|
+
"""
|
|
2446
|
+
|
|
2447
|
+
def __init__(self):
|
|
2448
|
+
self.finder = ContractFinder()
|
|
2449
|
+
|
|
2450
|
+
def find_by_urn(self, urn: str, contracts: List[Dict]) -> Optional[Dict]:
|
|
2451
|
+
"""Find contract by URN using multiple matching strategies."""
|
|
2452
|
+
# Convert dict contracts to ContractFile objects
|
|
2453
|
+
contract_objs = [
|
|
2454
|
+
ContractFile(
|
|
2455
|
+
file_path=c['file_path'],
|
|
2456
|
+
contract_id=c.get('contract_id', ''),
|
|
2457
|
+
domain=c.get('domain', ''),
|
|
2458
|
+
resource=c.get('resource', ''),
|
|
2459
|
+
version=c.get('version'),
|
|
2460
|
+
producer=c.get('producer')
|
|
2461
|
+
)
|
|
2462
|
+
for c in contracts
|
|
2463
|
+
]
|
|
2464
|
+
|
|
2465
|
+
result = self.finder.find_by_urn(urn, contract_objs)
|
|
2466
|
+
|
|
2467
|
+
if result:
|
|
2468
|
+
return {
|
|
2469
|
+
'file_path': result.file_path,
|
|
2470
|
+
'contract_id': result.contract_id
|
|
2471
|
+
}
|
|
2472
|
+
|
|
2473
|
+
return None
|
|
2474
|
+
|
|
2475
|
+
|
|
2476
|
+
# ============================================================================
|
|
2477
|
+
# LAYER 3: ADAPTERS (I/O, Formatting)
|
|
2478
|
+
# ============================================================================
|
|
2479
|
+
|
|
2480
|
+
|
|
2481
|
+
class ReportFormatter:
|
|
2482
|
+
"""
|
|
2483
|
+
Adapter: Format reconciliation reports for display.
|
|
2484
|
+
|
|
2485
|
+
Converts reconciliation results into human-readable text.
|
|
2486
|
+
"""
|
|
2487
|
+
|
|
2488
|
+
@staticmethod
|
|
2489
|
+
def format_report(result: ReconciliationResult) -> str:
|
|
2490
|
+
"""Format comprehensive reconciliation report."""
|
|
2491
|
+
lines = []
|
|
2492
|
+
|
|
2493
|
+
lines.append("=" * 70)
|
|
2494
|
+
lines.append("CONTRACT/TELEMETRY TRACEABILITY RECONCILIATION")
|
|
2495
|
+
lines.append("=" * 70)
|
|
2496
|
+
lines.append("")
|
|
2497
|
+
|
|
2498
|
+
# Summary
|
|
2499
|
+
lines.append(f"📊 SUMMARY")
|
|
2500
|
+
lines.append(f" Total Issues: {result.total_issues}")
|
|
2501
|
+
lines.append(f" Missing Contract Refs: {len(result.missing_contract_refs)}")
|
|
2502
|
+
lines.append(f" Missing Telemetry Refs: {len(result.missing_telemetry_refs)}")
|
|
2503
|
+
lines.append(f" Signal-Driven Telemetry Issues: {len(result.missing_signal_telemetry)}")
|
|
2504
|
+
lines.append(f" Telemetry Without artifact_ref: {len(result.telemetry_without_artifact_ref)}")
|
|
2505
|
+
lines.append(f" Telemetry With Invalid artifact_ref: {len(result.telemetry_invalid_artifact_ref)}")
|
|
2506
|
+
lines.append(f" Telemetry Naming Convention Violations: {len(result.telemetry_naming_violations)}")
|
|
2507
|
+
lines.append(f" Feature-Wagon I/O Mismatches: {len(result.feature_io_mismatches)}")
|
|
2508
|
+
if result.orphaned_telemetry:
|
|
2509
|
+
lines.append(f" ℹ️ Orphaned Telemetry (info): {len(result.orphaned_telemetry)}")
|
|
2510
|
+
lines.append("")
|
|
2511
|
+
|
|
2512
|
+
# Missing contract references
|
|
2513
|
+
if result.missing_contract_refs:
|
|
2514
|
+
lines.append("=" * 70)
|
|
2515
|
+
lines.append("🔴 MISSING CONTRACT REFERENCES")
|
|
2516
|
+
lines.append("=" * 70)
|
|
2517
|
+
lines.append("")
|
|
2518
|
+
|
|
2519
|
+
for ref in result.missing_contract_refs:
|
|
2520
|
+
lines.append(f"Wagon: {ref['wagon']}")
|
|
2521
|
+
lines.append(f" Produce: {ref['produce_name']}")
|
|
2522
|
+
lines.append(f" URN: {ref['urn']}")
|
|
2523
|
+
lines.append(f" Current: contract: null")
|
|
2524
|
+
lines.append(f" 💡 PROPOSED FIX: contract: {ref['proposed_fix']}")
|
|
2525
|
+
lines.append("")
|
|
2526
|
+
|
|
2527
|
+
# Missing telemetry references
|
|
2528
|
+
if result.missing_telemetry_refs:
|
|
2529
|
+
lines.append("=" * 70)
|
|
2530
|
+
lines.append("🔴 MISSING TELEMETRY REFERENCES")
|
|
2531
|
+
lines.append("=" * 70)
|
|
2532
|
+
lines.append("")
|
|
2533
|
+
|
|
2534
|
+
for ref in result.missing_telemetry_refs:
|
|
2535
|
+
lines.append(f"Wagon: {ref['wagon']}")
|
|
2536
|
+
lines.append(f" Produce: {ref['produce_name']}")
|
|
2537
|
+
lines.append(f" URN: {ref['urn']}")
|
|
2538
|
+
lines.append(f" Current: telemetry: null")
|
|
2539
|
+
lines.append(f" 💡 PROPOSED FIX: telemetry: {ref['proposed_fix']}")
|
|
2540
|
+
lines.append("")
|
|
2541
|
+
|
|
2542
|
+
# Signal-driven telemetry issues
|
|
2543
|
+
if result.missing_signal_telemetry:
|
|
2544
|
+
lines.append("=" * 70)
|
|
2545
|
+
lines.append("🔴 SIGNAL-DRIVEN TELEMETRY ISSUES")
|
|
2546
|
+
lines.append("=" * 70)
|
|
2547
|
+
lines.append("")
|
|
2548
|
+
|
|
2549
|
+
for issue in result.missing_signal_telemetry:
|
|
2550
|
+
lines.append(f"Wagon: {issue['wagon']}")
|
|
2551
|
+
lines.append(f" Signals Declared: {issue['signal_count']} (in acceptance criteria)")
|
|
2552
|
+
lines.append(f" Expected URN: {issue['expected_urn']}")
|
|
2553
|
+
lines.append(f" Current: {issue['current']}")
|
|
2554
|
+
lines.append(f" 💡 PROPOSED FIX: telemetry: {issue['proposed_fix']}")
|
|
2555
|
+
lines.append(f" Reason: {issue['reason']}")
|
|
2556
|
+
lines.append("")
|
|
2557
|
+
|
|
2558
|
+
# Telemetry without artifact_ref
|
|
2559
|
+
if result.telemetry_without_artifact_ref:
|
|
2560
|
+
lines.append("=" * 70)
|
|
2561
|
+
lines.append("🔴 TELEMETRY FILES WITHOUT artifact_ref")
|
|
2562
|
+
lines.append("=" * 70)
|
|
2563
|
+
lines.append("")
|
|
2564
|
+
|
|
2565
|
+
for issue in result.telemetry_without_artifact_ref:
|
|
2566
|
+
lines.append(f"File: {issue['telemetry_file']}")
|
|
2567
|
+
lines.append(f" ID: {issue['telemetry_id']}")
|
|
2568
|
+
lines.append(f" Issue: {issue['reason']}")
|
|
2569
|
+
lines.append(f" 💡 FIX: Add artifact_ref field pointing to contract URN")
|
|
2570
|
+
lines.append("")
|
|
2571
|
+
|
|
2572
|
+
# Telemetry with invalid artifact_ref
|
|
2573
|
+
if result.telemetry_invalid_artifact_ref:
|
|
2574
|
+
lines.append("=" * 70)
|
|
2575
|
+
lines.append("🔴 TELEMETRY FILES WITH INVALID artifact_ref")
|
|
2576
|
+
lines.append("=" * 70)
|
|
2577
|
+
lines.append("")
|
|
2578
|
+
|
|
2579
|
+
for issue in result.telemetry_invalid_artifact_ref:
|
|
2580
|
+
lines.append(f"File: {issue['telemetry_file']}")
|
|
2581
|
+
lines.append(f" ID: {issue['telemetry_id']}")
|
|
2582
|
+
lines.append(f" artifact_ref: {issue['artifact_ref']}")
|
|
2583
|
+
lines.append(f" Issue: {issue['reason']}")
|
|
2584
|
+
lines.append(f" 💡 FIX: Update artifact_ref to point to existing contract")
|
|
2585
|
+
lines.append("")
|
|
2586
|
+
|
|
2587
|
+
# Telemetry naming convention violations
|
|
2588
|
+
if result.telemetry_naming_violations:
|
|
2589
|
+
lines.append("=" * 70)
|
|
2590
|
+
lines.append("🔴 TELEMETRY NAMING CONVENTION VIOLATIONS")
|
|
2591
|
+
lines.append("=" * 70)
|
|
2592
|
+
lines.append("")
|
|
2593
|
+
lines.append("Convention: {theme}:{domain}:{aspect}.{type}.{plane}[.{measure}]")
|
|
2594
|
+
lines.append(" - Colons (:) for hierarchy (theme, domain, aspect)")
|
|
2595
|
+
lines.append(" - Dots (.) for signal facets (type, plane, measure)")
|
|
2596
|
+
lines.append("")
|
|
2597
|
+
|
|
2598
|
+
for violation in result.telemetry_naming_violations:
|
|
2599
|
+
lines.append(f"File: {violation['file']}")
|
|
2600
|
+
lines.append(f" Current $id: {violation['current_id']}")
|
|
2601
|
+
lines.append(f" Violation: {violation['violation']}")
|
|
2602
|
+
if 'suggested_fix' in violation:
|
|
2603
|
+
lines.append(f" 💡 SUGGESTED FIX: $id = \"{violation['suggested_fix']}\"")
|
|
2604
|
+
if 'reason' in violation:
|
|
2605
|
+
lines.append(f" Reason: {violation['reason']}")
|
|
2606
|
+
if 'example' in violation:
|
|
2607
|
+
lines.append(f" Example: {violation['example']}")
|
|
2608
|
+
if 'artifact_ref' in violation:
|
|
2609
|
+
lines.append(f" artifact_ref: {violation['artifact_ref']}")
|
|
2610
|
+
lines.append("")
|
|
2611
|
+
|
|
2612
|
+
# Feature-Wagon I/O Mismatches
|
|
2613
|
+
if result.feature_io_mismatches:
|
|
2614
|
+
lines.append("=" * 70)
|
|
2615
|
+
lines.append("🔴 FEATURE-WAGON I/O MISMATCHES")
|
|
2616
|
+
lines.append("=" * 70)
|
|
2617
|
+
lines.append("")
|
|
2618
|
+
lines.append("Features must align with their parent wagon's produce/consume declarations.")
|
|
2619
|
+
lines.append("")
|
|
2620
|
+
|
|
2621
|
+
for mismatch in result.feature_io_mismatches:
|
|
2622
|
+
lines.append(f"Feature: {mismatch['feature']}")
|
|
2623
|
+
lines.append(f" File: {mismatch['file']}")
|
|
2624
|
+
lines.append(f" Wagon: {mismatch['wagon']}")
|
|
2625
|
+
if 'artifact_name' in mismatch:
|
|
2626
|
+
lines.append(f" Artifact: {mismatch['artifact_name']}")
|
|
2627
|
+
lines.append(f" Issue: {mismatch['issue']}")
|
|
2628
|
+
|
|
2629
|
+
# Show detailed mismatch information
|
|
2630
|
+
if 'feature_contract' in mismatch and 'wagon_contract' in mismatch:
|
|
2631
|
+
lines.append(f" Feature declares: contract: {mismatch['feature_contract']}")
|
|
2632
|
+
lines.append(f" Wagon declares: contract: {mismatch['wagon_contract']}")
|
|
2633
|
+
elif 'feature_contract' in mismatch:
|
|
2634
|
+
lines.append(f" Feature contract: {mismatch['feature_contract']}")
|
|
2635
|
+
|
|
2636
|
+
if 'feature_telemetry' in mismatch and 'wagon_telemetry' in mismatch:
|
|
2637
|
+
lines.append(f" Feature declares: telemetry: {mismatch['feature_telemetry']}")
|
|
2638
|
+
lines.append(f" Wagon declares: telemetry: {mismatch['wagon_telemetry']}")
|
|
2639
|
+
elif 'feature_telemetry' in mismatch:
|
|
2640
|
+
lines.append(f" Feature telemetry: {mismatch['feature_telemetry']}")
|
|
2641
|
+
|
|
2642
|
+
lines.append(f" 💡 FIX: Update feature ioSeeds to match wagon manifest declarations")
|
|
2643
|
+
lines.append("")
|
|
2644
|
+
|
|
2645
|
+
# Orphaned telemetry (informational)
|
|
2646
|
+
if result.orphaned_telemetry:
|
|
2647
|
+
lines.append("=" * 70)
|
|
2648
|
+
lines.append("ℹ️ ORPHANED TELEMETRY (INFORMATIONAL)")
|
|
2649
|
+
lines.append("=" * 70)
|
|
2650
|
+
lines.append("")
|
|
2651
|
+
|
|
2652
|
+
for orphan in result.orphaned_telemetry:
|
|
2653
|
+
lines.append(f"Wagon: {orphan['wagon']}")
|
|
2654
|
+
lines.append(f" Note: {orphan['note']}")
|
|
2655
|
+
lines.append("")
|
|
2656
|
+
|
|
2657
|
+
# By wagon summary
|
|
2658
|
+
if result.by_wagon:
|
|
2659
|
+
lines.append("=" * 70)
|
|
2660
|
+
lines.append("📦 BY WAGON")
|
|
2661
|
+
lines.append("=" * 70)
|
|
2662
|
+
lines.append("")
|
|
2663
|
+
|
|
2664
|
+
for wagon, issues in result.by_wagon.items():
|
|
2665
|
+
total = (
|
|
2666
|
+
len(issues['missing_contracts']) +
|
|
2667
|
+
len(issues['missing_telemetry']) +
|
|
2668
|
+
len(issues.get('missing_signal_telemetry', []))
|
|
2669
|
+
)
|
|
2670
|
+
if total > 0:
|
|
2671
|
+
lines.append(f"🚂 {wagon}: {total} issues")
|
|
2672
|
+
lines.append(f" Manifest: {issues['manifest_path']}")
|
|
2673
|
+
lines.append("")
|
|
2674
|
+
|
|
2675
|
+
return "\n".join(lines)
|
|
2676
|
+
|
|
2677
|
+
|
|
2678
|
+
class ImplementationReportFormatter:
|
|
2679
|
+
"""
|
|
2680
|
+
Adapter: Format contract implementation coverage reports.
|
|
2681
|
+
|
|
2682
|
+
Converts implementation reconciliation results into human-readable text.
|
|
2683
|
+
"""
|
|
2684
|
+
|
|
2685
|
+
@staticmethod
|
|
2686
|
+
def format_report(result: ImplementationReconciliationResult) -> str:
|
|
2687
|
+
"""Format comprehensive implementation coverage report."""
|
|
2688
|
+
lines = []
|
|
2689
|
+
|
|
2690
|
+
lines.append("=" * 80)
|
|
2691
|
+
lines.append("CONTRACT IMPLEMENTATION TRACEABILITY")
|
|
2692
|
+
lines.append("=" * 80)
|
|
2693
|
+
lines.append("")
|
|
2694
|
+
|
|
2695
|
+
# Summary
|
|
2696
|
+
lines.append("📊 SUMMARY")
|
|
2697
|
+
lines.append(f" Total Contracts: {result.total_contracts}")
|
|
2698
|
+
lines.append(f" Average Coverage: {result.avg_coverage:.1f}%")
|
|
2699
|
+
lines.append(f" Total Issues: {result.total_issues}")
|
|
2700
|
+
lines.append(f" Missing Python DTOs: {len(result.missing_python)}")
|
|
2701
|
+
lines.append(f" Missing Dart Entities: {len(result.missing_dart)}")
|
|
2702
|
+
lines.append(f" Missing TypeScript Interfaces: {len(result.missing_typescript)}")
|
|
2703
|
+
lines.append(f" Orphaned DTOs: {len(result.orphaned_dtos)}")
|
|
2704
|
+
lines.append("")
|
|
2705
|
+
|
|
2706
|
+
# Coverage by contract
|
|
2707
|
+
if result.coverage_by_contract:
|
|
2708
|
+
lines.append("=" * 80)
|
|
2709
|
+
lines.append("📦 COVERAGE BY CONTRACT")
|
|
2710
|
+
lines.append("=" * 80)
|
|
2711
|
+
lines.append("")
|
|
2712
|
+
|
|
2713
|
+
# Sort by coverage percentage (show incomplete first)
|
|
2714
|
+
sorted_coverage = sorted(result.coverage_by_contract, key=lambda c: c.coverage_percentage)
|
|
2715
|
+
|
|
2716
|
+
for cov in sorted_coverage:
|
|
2717
|
+
# Status indicators
|
|
2718
|
+
status_py = "✅" if cov.python_impl else "❌"
|
|
2719
|
+
status_dart = "✅" if cov.dart_impl else "❌"
|
|
2720
|
+
status_ts = "✅" if cov.typescript_impl else "❌"
|
|
2721
|
+
|
|
2722
|
+
coverage_emoji = "✅" if cov.is_fully_covered else "⚠️ "
|
|
2723
|
+
|
|
2724
|
+
lines.append(f"{coverage_emoji} {cov.contract_urn} ({cov.coverage_percentage:.0f}%)")
|
|
2725
|
+
lines.append(f" Schema: {cov.schema_path}")
|
|
2726
|
+
|
|
2727
|
+
# Python implementation
|
|
2728
|
+
if cov.python_impl:
|
|
2729
|
+
lines.append(f" {status_py} Python: {cov.python_impl.file_path}")
|
|
2730
|
+
lines.append(f" Class: {cov.python_impl.class_name} ({len(cov.python_impl.fields)} fields)")
|
|
2731
|
+
else:
|
|
2732
|
+
lines.append(f" {status_py} Python: MISSING")
|
|
2733
|
+
|
|
2734
|
+
# Dart implementation
|
|
2735
|
+
if cov.dart_impl:
|
|
2736
|
+
lines.append(f" {status_dart} Dart: {cov.dart_impl.file_path}")
|
|
2737
|
+
lines.append(f" Class: {cov.dart_impl.class_name} ({len(cov.dart_impl.fields)} fields)")
|
|
2738
|
+
else:
|
|
2739
|
+
lines.append(f" {status_dart} Dart: MISSING")
|
|
2740
|
+
|
|
2741
|
+
# TypeScript implementation
|
|
2742
|
+
if cov.typescript_impl:
|
|
2743
|
+
lines.append(f" {status_ts} TypeScript: {cov.typescript_impl.file_path}")
|
|
2744
|
+
lines.append(f" Interface: {cov.typescript_impl.class_name} ({len(cov.typescript_impl.fields)} fields)")
|
|
2745
|
+
else:
|
|
2746
|
+
lines.append(f" {status_ts} TypeScript: MISSING")
|
|
2747
|
+
|
|
2748
|
+
lines.append("")
|
|
2749
|
+
|
|
2750
|
+
# Missing Python DTOs
|
|
2751
|
+
if result.missing_python:
|
|
2752
|
+
lines.append("=" * 80)
|
|
2753
|
+
lines.append("🔴 MISSING PYTHON DTOs")
|
|
2754
|
+
lines.append("=" * 80)
|
|
2755
|
+
lines.append("")
|
|
2756
|
+
|
|
2757
|
+
for missing in result.missing_python:
|
|
2758
|
+
lines.append(f"Contract: {missing['contract']}")
|
|
2759
|
+
lines.append(f" Schema: {missing['schema']}")
|
|
2760
|
+
lines.append(f" 💡 SUGGESTED: Create {missing['expected_path']}")
|
|
2761
|
+
lines.append("")
|
|
2762
|
+
|
|
2763
|
+
# Missing Dart entities
|
|
2764
|
+
if result.missing_dart:
|
|
2765
|
+
lines.append("=" * 80)
|
|
2766
|
+
lines.append("🔴 MISSING DART ENTITIES")
|
|
2767
|
+
lines.append("=" * 80)
|
|
2768
|
+
lines.append("")
|
|
2769
|
+
|
|
2770
|
+
for missing in result.missing_dart:
|
|
2771
|
+
lines.append(f"Contract: {missing['contract']}")
|
|
2772
|
+
lines.append(f" Schema: {missing['schema']}")
|
|
2773
|
+
lines.append(f" 💡 SUGGESTED: Create {missing['expected_path']}")
|
|
2774
|
+
lines.append("")
|
|
2775
|
+
|
|
2776
|
+
# Missing TypeScript interfaces
|
|
2777
|
+
if result.missing_typescript:
|
|
2778
|
+
lines.append("=" * 80)
|
|
2779
|
+
lines.append("🔴 MISSING TYPESCRIPT INTERFACES")
|
|
2780
|
+
lines.append("=" * 80)
|
|
2781
|
+
lines.append("")
|
|
2782
|
+
|
|
2783
|
+
for missing in result.missing_typescript:
|
|
2784
|
+
lines.append(f"Contract: {missing['contract']}")
|
|
2785
|
+
lines.append(f" Schema: {missing['schema']}")
|
|
2786
|
+
lines.append(f" 💡 SUGGESTED: Create {missing['expected_path']}")
|
|
2787
|
+
lines.append("")
|
|
2788
|
+
|
|
2789
|
+
# Orphaned DTOs
|
|
2790
|
+
if result.orphaned_dtos:
|
|
2791
|
+
lines.append("=" * 80)
|
|
2792
|
+
lines.append("⚠️ ORPHANED DTOs (No Schema)")
|
|
2793
|
+
lines.append("=" * 80)
|
|
2794
|
+
lines.append("")
|
|
2795
|
+
|
|
2796
|
+
for orphan in result.orphaned_dtos:
|
|
2797
|
+
lines.append(f"File: {orphan['file']}")
|
|
2798
|
+
lines.append(f" URN: {orphan['urn']}")
|
|
2799
|
+
lines.append(f" Language: {orphan['language'].capitalize()}")
|
|
2800
|
+
lines.append(f" Class: {orphan['class_name']}")
|
|
2801
|
+
lines.append(f" Issue: {orphan['reason']}")
|
|
2802
|
+
lines.append(f" 💡 FIX: Create schema at contracts/{orphan['urn'].replace(':', '/')}.schema.json")
|
|
2803
|
+
lines.append(f" or remove orphaned DTO")
|
|
2804
|
+
lines.append("")
|
|
2805
|
+
|
|
2806
|
+
# Statistics summary
|
|
2807
|
+
if result.coverage_by_contract:
|
|
2808
|
+
lines.append("=" * 80)
|
|
2809
|
+
lines.append("📈 STATISTICS")
|
|
2810
|
+
lines.append("=" * 80)
|
|
2811
|
+
lines.append("")
|
|
2812
|
+
|
|
2813
|
+
# Count by coverage level
|
|
2814
|
+
full_coverage = sum(1 for c in result.coverage_by_contract if c.coverage_percentage == 100)
|
|
2815
|
+
partial_coverage = sum(1 for c in result.coverage_by_contract if 0 < c.coverage_percentage < 100)
|
|
2816
|
+
no_coverage = sum(1 for c in result.coverage_by_contract if c.coverage_percentage == 0)
|
|
2817
|
+
|
|
2818
|
+
lines.append(f"Full Coverage (100%): {full_coverage} contracts")
|
|
2819
|
+
lines.append(f"Partial Coverage: {partial_coverage} contracts")
|
|
2820
|
+
lines.append(f"No Coverage (0%): {no_coverage} contracts")
|
|
2821
|
+
lines.append("")
|
|
2822
|
+
|
|
2823
|
+
# Language-specific stats
|
|
2824
|
+
total = len(result.coverage_by_contract)
|
|
2825
|
+
py_count = sum(1 for c in result.coverage_by_contract if c.python_impl)
|
|
2826
|
+
dart_count = sum(1 for c in result.coverage_by_contract if c.dart_impl)
|
|
2827
|
+
ts_count = sum(1 for c in result.coverage_by_contract if c.typescript_impl)
|
|
2828
|
+
|
|
2829
|
+
lines.append(f"Python Implementation: {py_count}/{total} ({py_count/total*100:.1f}%)")
|
|
2830
|
+
lines.append(f"Dart Implementation: {dart_count}/{total} ({dart_count/total*100:.1f}%)")
|
|
2831
|
+
lines.append(f"TypeScript Implementation: {ts_count}/{total} ({ts_count/total*100:.1f}%)")
|
|
2832
|
+
lines.append("")
|
|
2833
|
+
|
|
2834
|
+
return "\n".join(lines)
|
|
2835
|
+
|
|
2836
|
+
|
|
2837
|
+
class FunnelReportFormatter:
|
|
2838
|
+
"""
|
|
2839
|
+
Adapter: Format funnel analysis reports.
|
|
2840
|
+
|
|
2841
|
+
Shows traceability breakdown by theme with leak identification.
|
|
2842
|
+
"""
|
|
2843
|
+
|
|
2844
|
+
@staticmethod
|
|
2845
|
+
def format_report(result: FunnelAnalysisResult) -> str:
|
|
2846
|
+
"""Format comprehensive funnel analysis report."""
|
|
2847
|
+
lines = []
|
|
2848
|
+
|
|
2849
|
+
lines.append("=" * 80)
|
|
2850
|
+
lines.append("TRACEABILITY FUNNEL ANALYSIS")
|
|
2851
|
+
lines.append("=" * 80)
|
|
2852
|
+
lines.append("")
|
|
2853
|
+
|
|
2854
|
+
# Executive summary
|
|
2855
|
+
lines.append("📊 EXECUTIVE SUMMARY")
|
|
2856
|
+
lines.append(f" Total Themes: {result.total_themes}")
|
|
2857
|
+
if result.healthiest_theme:
|
|
2858
|
+
lines.append(f" 🏆 Healthiest Theme: {result.healthiest_theme}")
|
|
2859
|
+
if result.sickest_theme:
|
|
2860
|
+
lines.append(f" ⚠️ Sickest Theme: {result.sickest_theme}")
|
|
2861
|
+
lines.append("")
|
|
2862
|
+
|
|
2863
|
+
# Funnel by theme
|
|
2864
|
+
for theme, funnel in sorted(result.by_theme.items(),
|
|
2865
|
+
key=lambda x: x[1].overall_health,
|
|
2866
|
+
reverse=True):
|
|
2867
|
+
lines.append("=" * 80)
|
|
2868
|
+
health_emoji = "✅" if funnel.overall_health >= 75 else "⚠️ " if funnel.overall_health >= 25 else "🔴"
|
|
2869
|
+
lines.append(f"{health_emoji} THEME: {theme.upper()} (Health: {funnel.overall_health:.1f}%)")
|
|
2870
|
+
lines.append("=" * 80)
|
|
2871
|
+
lines.append("")
|
|
2872
|
+
|
|
2873
|
+
# Funnel visualization
|
|
2874
|
+
lines.append("FUNNEL STAGES:")
|
|
2875
|
+
lines.append("")
|
|
2876
|
+
lines.append(f" ┌─ Wagons: {funnel.wagon_count}")
|
|
2877
|
+
lines.append(f" │")
|
|
2878
|
+
lines.append(f" ├─ Artifacts: {funnel.artifact_count}")
|
|
2879
|
+
|
|
2880
|
+
if funnel.stage_artifact_to_contract:
|
|
2881
|
+
stage = funnel.stage_artifact_to_contract
|
|
2882
|
+
leak_indicator = "💧" if stage.leak_rate > 0 else " "
|
|
2883
|
+
lines.append(f" │ {leak_indicator} ({stage.pass_rate:.0f}% pass, {stage.leak_rate:.0f}% leak)")
|
|
2884
|
+
|
|
2885
|
+
lines.append(f" │")
|
|
2886
|
+
lines.append(f" ├─ Contracts: {funnel.contract_count}")
|
|
2887
|
+
lines.append(f" │")
|
|
2888
|
+
|
|
2889
|
+
# Python branch
|
|
2890
|
+
if funnel.stage_contract_to_python:
|
|
2891
|
+
stage = funnel.stage_contract_to_python
|
|
2892
|
+
leak_indicator = "💧" if stage.leak_rate > 0 else " "
|
|
2893
|
+
lines.append(f" ├──┬─ Python DTOs: {funnel.python_impl_count}")
|
|
2894
|
+
lines.append(f" │ {leak_indicator} ({stage.pass_rate:.0f}% pass, {stage.leak_rate:.0f}% leak)")
|
|
2895
|
+
|
|
2896
|
+
# Dart branch
|
|
2897
|
+
if funnel.stage_contract_to_dart:
|
|
2898
|
+
stage = funnel.stage_contract_to_dart
|
|
2899
|
+
leak_indicator = "💧" if stage.leak_rate > 0 else " "
|
|
2900
|
+
lines.append(f" ├──┬─ Dart Entities: {funnel.dart_impl_count}")
|
|
2901
|
+
lines.append(f" │ {leak_indicator} ({stage.pass_rate:.0f}% pass, {stage.leak_rate:.0f}% leak)")
|
|
2902
|
+
|
|
2903
|
+
# TypeScript branch
|
|
2904
|
+
if funnel.stage_contract_to_typescript:
|
|
2905
|
+
stage = funnel.stage_contract_to_typescript
|
|
2906
|
+
leak_indicator = "💧" if stage.leak_rate > 0 else " "
|
|
2907
|
+
lines.append(f" └──┬─ TypeScript Interfaces: {funnel.typescript_impl_count}")
|
|
2908
|
+
lines.append(f" {leak_indicator} ({stage.pass_rate:.0f}% pass, {stage.leak_rate:.0f}% leak)")
|
|
2909
|
+
|
|
2910
|
+
lines.append("")
|
|
2911
|
+
|
|
2912
|
+
# Show leaks for this theme
|
|
2913
|
+
if funnel.stage_artifact_to_contract and funnel.stage_artifact_to_contract.leaks:
|
|
2914
|
+
lines.append(" 💧 LEAKS AT ARTIFACT → CONTRACT:")
|
|
2915
|
+
for leak in funnel.stage_artifact_to_contract.leaks[:5]: # Show first 5
|
|
2916
|
+
lines.append(f" - {leak['artifact']} (wagon: {leak['wagon']})")
|
|
2917
|
+
if len(funnel.stage_artifact_to_contract.leaks) > 5:
|
|
2918
|
+
lines.append(f" ... and {len(funnel.stage_artifact_to_contract.leaks) - 5} more")
|
|
2919
|
+
lines.append("")
|
|
2920
|
+
|
|
2921
|
+
if funnel.stage_contract_to_python and funnel.stage_contract_to_python.leaks:
|
|
2922
|
+
lines.append(" 💧 LEAKS AT CONTRACT → PYTHON:")
|
|
2923
|
+
for leak in funnel.stage_contract_to_python.leaks[:5]:
|
|
2924
|
+
lines.append(f" - {leak['contract']}")
|
|
2925
|
+
if len(funnel.stage_contract_to_python.leaks) > 5:
|
|
2926
|
+
lines.append(f" ... and {len(funnel.stage_contract_to_python.leaks) - 5} more")
|
|
2927
|
+
lines.append("")
|
|
2928
|
+
|
|
2929
|
+
if funnel.stage_contract_to_dart and funnel.stage_contract_to_dart.leaks:
|
|
2930
|
+
lines.append(" 💧 LEAKS AT CONTRACT → DART:")
|
|
2931
|
+
for leak in funnel.stage_contract_to_dart.leaks[:5]:
|
|
2932
|
+
lines.append(f" - {leak['contract']}")
|
|
2933
|
+
if len(funnel.stage_contract_to_dart.leaks) > 5:
|
|
2934
|
+
lines.append(f" ... and {len(funnel.stage_contract_to_dart.leaks) - 5} more")
|
|
2935
|
+
lines.append("")
|
|
2936
|
+
|
|
2937
|
+
# Orphaned contracts
|
|
2938
|
+
if result.orphaned_contracts:
|
|
2939
|
+
lines.append("=" * 80)
|
|
2940
|
+
lines.append("⚠️ ORPHANED CONTRACTS (No Producing Wagon)")
|
|
2941
|
+
lines.append("=" * 80)
|
|
2942
|
+
lines.append("")
|
|
2943
|
+
|
|
2944
|
+
for orphan in result.orphaned_contracts[:10]:
|
|
2945
|
+
lines.append(f" - {orphan['contract']}")
|
|
2946
|
+
lines.append(f" Schema: {orphan['schema']}")
|
|
2947
|
+
lines.append(f" Producer: {orphan.get('producer', 'unknown')}")
|
|
2948
|
+
lines.append("")
|
|
2949
|
+
|
|
2950
|
+
if len(result.orphaned_contracts) > 10:
|
|
2951
|
+
lines.append(f" ... and {len(result.orphaned_contracts) - 10} more orphaned contracts")
|
|
2952
|
+
lines.append("")
|
|
2953
|
+
|
|
2954
|
+
# Key insights
|
|
2955
|
+
lines.append("=" * 80)
|
|
2956
|
+
lines.append("💡 KEY INSIGHTS")
|
|
2957
|
+
lines.append("=" * 80)
|
|
2958
|
+
lines.append("")
|
|
2959
|
+
|
|
2960
|
+
# Find worst leaks
|
|
2961
|
+
worst_artifact_leak = None
|
|
2962
|
+
worst_python_leak = None
|
|
2963
|
+
worst_dart_leak = None
|
|
2964
|
+
|
|
2965
|
+
for theme, funnel in result.by_theme.items():
|
|
2966
|
+
if funnel.stage_artifact_to_contract and funnel.stage_artifact_to_contract.leak_rate > 0:
|
|
2967
|
+
if not worst_artifact_leak or funnel.stage_artifact_to_contract.leak_rate > worst_artifact_leak[1]:
|
|
2968
|
+
worst_artifact_leak = (theme, funnel.stage_artifact_to_contract.leak_rate)
|
|
2969
|
+
|
|
2970
|
+
if funnel.stage_contract_to_python and funnel.stage_contract_to_python.leak_rate > 0:
|
|
2971
|
+
if not worst_python_leak or funnel.stage_contract_to_python.leak_rate > worst_python_leak[1]:
|
|
2972
|
+
worst_python_leak = (theme, funnel.stage_contract_to_python.leak_rate)
|
|
2973
|
+
|
|
2974
|
+
if funnel.stage_contract_to_dart and funnel.stage_contract_to_dart.leak_rate > 0:
|
|
2975
|
+
if not worst_dart_leak or funnel.stage_contract_to_dart.leak_rate > worst_dart_leak[1]:
|
|
2976
|
+
worst_dart_leak = (theme, funnel.stage_contract_to_dart.leak_rate)
|
|
2977
|
+
|
|
2978
|
+
if worst_artifact_leak:
|
|
2979
|
+
lines.append(f"⚠️ Biggest Artifact→Contract leak: '{worst_artifact_leak[0]}' ({worst_artifact_leak[1]:.0f}% leak)")
|
|
2980
|
+
|
|
2981
|
+
if worst_python_leak:
|
|
2982
|
+
lines.append(f"⚠️ Biggest Contract→Python leak: '{worst_python_leak[0]}' ({worst_python_leak[1]:.0f}% leak)")
|
|
2983
|
+
|
|
2984
|
+
if worst_dart_leak:
|
|
2985
|
+
lines.append(f"⚠️ Biggest Contract→Dart leak: '{worst_dart_leak[0]}' ({worst_dart_leak[1]:.0f}% leak)")
|
|
2986
|
+
|
|
2987
|
+
lines.append("")
|
|
2988
|
+
lines.append("💡 Focus on fixing the biggest leaks first for maximum impact!")
|
|
2989
|
+
lines.append("")
|
|
2990
|
+
|
|
2991
|
+
return "\n".join(lines)
|
|
2992
|
+
|
|
2993
|
+
|
|
2994
|
+
class SmartFunnelReportFormatter:
|
|
2995
|
+
"""
|
|
2996
|
+
Adapter: Format smart funnel reports with producer/consumer awareness.
|
|
2997
|
+
|
|
2998
|
+
Shows visual funnel with only required DTOs.
|
|
2999
|
+
"""
|
|
3000
|
+
|
|
3001
|
+
@staticmethod
|
|
3002
|
+
def format_report(result: SmartFunnelAnalysisResult) -> str:
|
|
3003
|
+
"""Format smart funnel analysis report."""
|
|
3004
|
+
lines = []
|
|
3005
|
+
|
|
3006
|
+
lines.append("=" * 80)
|
|
3007
|
+
lines.append("SMART TRACEABILITY FUNNEL (Producer/Consumer Aware)")
|
|
3008
|
+
lines.append("=" * 80)
|
|
3009
|
+
lines.append("")
|
|
3010
|
+
|
|
3011
|
+
# Executive summary
|
|
3012
|
+
lines.append("📊 EXECUTIVE SUMMARY")
|
|
3013
|
+
lines.append(f" Total Themes: {result.total_themes}")
|
|
3014
|
+
if result.healthiest_theme:
|
|
3015
|
+
lines.append(f" 🏆 Healthiest Theme: {result.healthiest_theme}")
|
|
3016
|
+
if result.sickest_theme:
|
|
3017
|
+
lines.append(f" ⚠️ Sickest Theme: {result.sickest_theme}")
|
|
3018
|
+
lines.append("")
|
|
3019
|
+
|
|
3020
|
+
# Funnel by theme (sorted by health, worst first)
|
|
3021
|
+
for theme, funnel in sorted(result.by_theme.items(),
|
|
3022
|
+
key=lambda x: x[1].overall_health):
|
|
3023
|
+
lines.append("=" * 80)
|
|
3024
|
+
health_emoji = "✅" if funnel.overall_health >= 75 else "⚠️ " if funnel.overall_health >= 25 else "🔴"
|
|
3025
|
+
lines.append(f"{health_emoji} THEME: {theme.upper()} (Health: {funnel.overall_health:.1f}%)")
|
|
3026
|
+
lines.append("=" * 80)
|
|
3027
|
+
lines.append("")
|
|
3028
|
+
|
|
3029
|
+
# Funnel visualization
|
|
3030
|
+
lines.append("FUNNEL STAGES:")
|
|
3031
|
+
lines.append("")
|
|
3032
|
+
lines.append(f" ┌─ Wagons: {funnel.wagon_count}")
|
|
3033
|
+
lines.append(f" │")
|
|
3034
|
+
lines.append(f" ├─ Artifacts: {funnel.artifact_count}")
|
|
3035
|
+
|
|
3036
|
+
if funnel.stage_artifact_to_contract:
|
|
3037
|
+
stage = funnel.stage_artifact_to_contract
|
|
3038
|
+
leak_indicator = "💧" if stage.leak_rate > 0 else " "
|
|
3039
|
+
lines.append(f" │ {leak_indicator} ({stage.pass_rate:.0f}% pass, {stage.leak_rate:.0f}% leak)")
|
|
3040
|
+
|
|
3041
|
+
lines.append(f" │")
|
|
3042
|
+
lines.append(f" ├─ Contracts: {funnel.contract_count}")
|
|
3043
|
+
lines.append(f" │")
|
|
3044
|
+
|
|
3045
|
+
# Python branch (only if required)
|
|
3046
|
+
if funnel.python_required > 0:
|
|
3047
|
+
leak_indicator = "💧" if funnel.python_impl_count < funnel.python_required else " "
|
|
3048
|
+
lines.append(f" ├──┬─ Python DTOs: {funnel.python_impl_count}/{funnel.python_required} required")
|
|
3049
|
+
if funnel.stage_contract_to_python:
|
|
3050
|
+
lines.append(f" │ {leak_indicator} ({funnel.stage_contract_to_python.pass_rate:.0f}% pass, {funnel.stage_contract_to_python.leak_rate:.0f}% leak)")
|
|
3051
|
+
else:
|
|
3052
|
+
lines.append(f" ├──┬─ Python DTOs: Not required")
|
|
3053
|
+
|
|
3054
|
+
# Dart branch (only if required)
|
|
3055
|
+
if funnel.dart_required > 0:
|
|
3056
|
+
leak_indicator = "💧" if funnel.dart_impl_count < funnel.dart_required else " "
|
|
3057
|
+
lines.append(f" ├──┬─ Dart Entities: {funnel.dart_impl_count}/{funnel.dart_required} required")
|
|
3058
|
+
if funnel.stage_contract_to_dart:
|
|
3059
|
+
lines.append(f" │ {leak_indicator} ({funnel.stage_contract_to_dart.pass_rate:.0f}% pass, {funnel.stage_contract_to_dart.leak_rate:.0f}% leak)")
|
|
3060
|
+
else:
|
|
3061
|
+
lines.append(f" ├──┬─ Dart Entities: Not required")
|
|
3062
|
+
|
|
3063
|
+
# TypeScript branch (only if required)
|
|
3064
|
+
if funnel.typescript_required > 0:
|
|
3065
|
+
leak_indicator = "💧" if funnel.typescript_impl_count < funnel.typescript_required else " "
|
|
3066
|
+
lines.append(f" └──┬─ TypeScript Interfaces: {funnel.typescript_impl_count}/{funnel.typescript_required} required")
|
|
3067
|
+
if funnel.stage_contract_to_typescript:
|
|
3068
|
+
lines.append(f" {leak_indicator} ({funnel.stage_contract_to_typescript.pass_rate:.0f}% pass, {funnel.stage_contract_to_typescript.leak_rate:.0f}% leak)")
|
|
3069
|
+
else:
|
|
3070
|
+
lines.append(f" └──┬─ TypeScript Interfaces: Not required")
|
|
3071
|
+
|
|
3072
|
+
lines.append("")
|
|
3073
|
+
|
|
3074
|
+
# Show leaks
|
|
3075
|
+
if funnel.stage_artifact_to_contract and funnel.stage_artifact_to_contract.leaks:
|
|
3076
|
+
lines.append(" 💧 LEAKS AT ARTIFACT → CONTRACT:")
|
|
3077
|
+
for leak in funnel.stage_artifact_to_contract.leaks[:5]:
|
|
3078
|
+
lines.append(f" - {leak['artifact']} (wagon: {leak['wagon']})")
|
|
3079
|
+
if len(funnel.stage_artifact_to_contract.leaks) > 5:
|
|
3080
|
+
lines.append(f" ... and {len(funnel.stage_artifact_to_contract.leaks) - 5} more")
|
|
3081
|
+
lines.append("")
|
|
3082
|
+
|
|
3083
|
+
if funnel.stage_contract_to_python and funnel.stage_contract_to_python.leaks:
|
|
3084
|
+
lines.append(" 💧 LEAKS AT CONTRACT → PYTHON (Required Only):")
|
|
3085
|
+
for leak in funnel.stage_contract_to_python.leaks[:3]:
|
|
3086
|
+
lines.append(f" - {leak['contract']}")
|
|
3087
|
+
lines.append(f" Producer: {leak.get('producer', 'unknown')}")
|
|
3088
|
+
if leak.get('consumers'):
|
|
3089
|
+
lines.append(f" Consumers: {len(leak['consumers'])} wagon(s)")
|
|
3090
|
+
if len(funnel.stage_contract_to_python.leaks) > 3:
|
|
3091
|
+
lines.append(f" ... and {len(funnel.stage_contract_to_python.leaks) - 3} more")
|
|
3092
|
+
lines.append("")
|
|
3093
|
+
|
|
3094
|
+
# Key insights
|
|
3095
|
+
lines.append("=" * 80)
|
|
3096
|
+
lines.append("💡 KEY INSIGHTS")
|
|
3097
|
+
lines.append("=" * 80)
|
|
3098
|
+
lines.append("")
|
|
3099
|
+
|
|
3100
|
+
# Find worst leaks
|
|
3101
|
+
worst_python_leak = None
|
|
3102
|
+
for theme, funnel in result.by_theme.items():
|
|
3103
|
+
if funnel.python_missing_rate > 0:
|
|
3104
|
+
if not worst_python_leak or funnel.python_missing_rate > worst_python_leak[1]:
|
|
3105
|
+
worst_python_leak = (theme, funnel.python_missing_rate, funnel.python_required - funnel.python_impl_count)
|
|
3106
|
+
|
|
3107
|
+
if worst_python_leak:
|
|
3108
|
+
lines.append(f"⚠️ Biggest Python leak: '{worst_python_leak[0]}' ({worst_python_leak[2]} DTOs missing, {worst_python_leak[1]:.0f}% leak)")
|
|
3109
|
+
|
|
3110
|
+
lines.append("")
|
|
3111
|
+
lines.append("💡 This funnel only shows DTOs required by actual producer/consumer wagons!")
|
|
3112
|
+
lines.append("")
|
|
3113
|
+
|
|
3114
|
+
return "\n".join(lines)
|
|
3115
|
+
|
|
3116
|
+
|
|
3117
|
+
class SmartImplementationReportFormatter:
|
|
3118
|
+
"""
|
|
3119
|
+
Adapter: Format smart implementation reports with producer/consumer awareness.
|
|
3120
|
+
|
|
3121
|
+
Shows only required DTOs based on actual producer/consumer tech stacks.
|
|
3122
|
+
"""
|
|
3123
|
+
|
|
3124
|
+
@staticmethod
|
|
3125
|
+
def format_report(requirements: List[ContractRequirements]) -> str:
|
|
3126
|
+
"""Format smart implementation report."""
|
|
3127
|
+
lines = []
|
|
3128
|
+
|
|
3129
|
+
lines.append("=" * 80)
|
|
3130
|
+
lines.append("SMART IMPLEMENTATION TRACEABILITY (Producer/Consumer Aware)")
|
|
3131
|
+
lines.append("=" * 80)
|
|
3132
|
+
lines.append("")
|
|
3133
|
+
|
|
3134
|
+
# Calculate statistics
|
|
3135
|
+
total = len(requirements)
|
|
3136
|
+
fully_covered = sum(1 for r in requirements if r.coverage_percentage == 100)
|
|
3137
|
+
partially_covered = sum(1 for r in requirements if 0 < r.coverage_percentage < 100)
|
|
3138
|
+
not_covered = sum(1 for r in requirements if r.coverage_percentage == 0)
|
|
3139
|
+
|
|
3140
|
+
requires_python = sum(1 for r in requirements if r.requires_python)
|
|
3141
|
+
requires_dart = sum(1 for r in requirements if r.requires_dart)
|
|
3142
|
+
requires_ts = sum(1 for r in requirements if r.requires_typescript)
|
|
3143
|
+
|
|
3144
|
+
has_python = sum(1 for r in requirements if r.has_python and r.requires_python)
|
|
3145
|
+
has_dart = sum(1 for r in requirements if r.has_dart and r.requires_dart)
|
|
3146
|
+
has_ts = sum(1 for r in requirements if r.has_typescript and r.requires_typescript)
|
|
3147
|
+
|
|
3148
|
+
# Summary
|
|
3149
|
+
lines.append("📊 SUMMARY")
|
|
3150
|
+
lines.append(f" Total Contracts: {total}")
|
|
3151
|
+
lines.append(f" Fully Covered (100%): {fully_covered}")
|
|
3152
|
+
lines.append(f" Partially Covered: {partially_covered}")
|
|
3153
|
+
lines.append(f" Not Covered (0%): {not_covered}")
|
|
3154
|
+
lines.append("")
|
|
3155
|
+
lines.append(" REQUIREMENTS (Based on Producer/Consumer Tech Stacks):")
|
|
3156
|
+
lines.append(f" Python Required: {requires_python} contracts ({has_python} implemented, {requires_python - has_python} missing)")
|
|
3157
|
+
lines.append(f" Dart Required: {requires_dart} contracts ({has_dart} implemented, {requires_dart - has_dart} missing)")
|
|
3158
|
+
lines.append(f" TypeScript Required: {requires_ts} contracts ({has_ts} implemented, {requires_ts - has_ts} missing)")
|
|
3159
|
+
lines.append("")
|
|
3160
|
+
|
|
3161
|
+
# Group by coverage
|
|
3162
|
+
incomplete = [r for r in requirements if r.coverage_percentage < 100]
|
|
3163
|
+
incomplete.sort(key=lambda r: r.coverage_percentage)
|
|
3164
|
+
|
|
3165
|
+
if incomplete:
|
|
3166
|
+
lines.append("=" * 80)
|
|
3167
|
+
lines.append("⚠️ INCOMPLETE CONTRACTS (Missing Required DTOs)")
|
|
3168
|
+
lines.append("=" * 80)
|
|
3169
|
+
lines.append("")
|
|
3170
|
+
|
|
3171
|
+
for req in incomplete[:20]: # Show first 20
|
|
3172
|
+
coverage_emoji = "🔴" if req.coverage_percentage == 0 else "⚠️ "
|
|
3173
|
+
lines.append(f"{coverage_emoji} {req.contract_urn} ({req.coverage_percentage:.0f}% coverage)")
|
|
3174
|
+
lines.append(f" Schema: {req.schema_path}")
|
|
3175
|
+
lines.append(f" Producer: {req.producer or 'unknown'}")
|
|
3176
|
+
if req.consumers:
|
|
3177
|
+
lines.append(f" Consumers: {', '.join(req.consumers)}")
|
|
3178
|
+
lines.append("")
|
|
3179
|
+
|
|
3180
|
+
# Python requirements
|
|
3181
|
+
if req.requires_python:
|
|
3182
|
+
if req.has_python:
|
|
3183
|
+
lines.append(f" ✅ Python: Implemented")
|
|
3184
|
+
else:
|
|
3185
|
+
lines.append(f" ❌ Python: MISSING")
|
|
3186
|
+
if req.python_path_suggestion:
|
|
3187
|
+
lines.append(f" 💡 Create: {req.python_path_suggestion}")
|
|
3188
|
+
|
|
3189
|
+
# Dart requirements
|
|
3190
|
+
if req.requires_dart:
|
|
3191
|
+
if req.has_dart:
|
|
3192
|
+
lines.append(f" ✅ Dart: Implemented")
|
|
3193
|
+
else:
|
|
3194
|
+
lines.append(f" ❌ Dart: MISSING")
|
|
3195
|
+
if req.dart_path_suggestion:
|
|
3196
|
+
lines.append(f" 💡 Create: {req.dart_path_suggestion}")
|
|
3197
|
+
|
|
3198
|
+
# TypeScript requirements
|
|
3199
|
+
if req.requires_typescript:
|
|
3200
|
+
if req.has_typescript:
|
|
3201
|
+
lines.append(f" ✅ TypeScript: Implemented")
|
|
3202
|
+
else:
|
|
3203
|
+
lines.append(f" ❌ TypeScript: MISSING")
|
|
3204
|
+
if req.typescript_path_suggestion:
|
|
3205
|
+
lines.append(f" 💡 Create: {req.typescript_path_suggestion}")
|
|
3206
|
+
|
|
3207
|
+
lines.append("")
|
|
3208
|
+
|
|
3209
|
+
if len(incomplete) > 20:
|
|
3210
|
+
lines.append(f"... and {len(incomplete) - 20} more incomplete contracts")
|
|
3211
|
+
lines.append("")
|
|
3212
|
+
|
|
3213
|
+
# Fully covered contracts
|
|
3214
|
+
complete = [r for r in requirements if r.coverage_percentage == 100]
|
|
3215
|
+
if complete:
|
|
3216
|
+
lines.append("=" * 80)
|
|
3217
|
+
lines.append(f"✅ FULLY COVERED CONTRACTS ({len(complete)})")
|
|
3218
|
+
lines.append("=" * 80)
|
|
3219
|
+
lines.append("")
|
|
3220
|
+
|
|
3221
|
+
for req in complete[:10]: # Show first 10
|
|
3222
|
+
lines.append(f"✅ {req.contract_urn}")
|
|
3223
|
+
lines.append(f" Producer: {req.producer or 'unknown'}")
|
|
3224
|
+
impl_langs = []
|
|
3225
|
+
if req.requires_python:
|
|
3226
|
+
impl_langs.append("Python")
|
|
3227
|
+
if req.requires_dart:
|
|
3228
|
+
impl_langs.append("Dart")
|
|
3229
|
+
if req.requires_typescript:
|
|
3230
|
+
impl_langs.append("TypeScript")
|
|
3231
|
+
lines.append(f" Implemented: {', '.join(impl_langs)}")
|
|
3232
|
+
lines.append("")
|
|
3233
|
+
|
|
3234
|
+
if len(complete) > 10:
|
|
3235
|
+
lines.append(f"... and {len(complete) - 10} more fully covered contracts")
|
|
3236
|
+
lines.append("")
|
|
3237
|
+
|
|
3238
|
+
# Key insights
|
|
3239
|
+
lines.append("=" * 80)
|
|
3240
|
+
lines.append("💡 KEY INSIGHTS")
|
|
3241
|
+
lines.append("=" * 80)
|
|
3242
|
+
lines.append("")
|
|
3243
|
+
|
|
3244
|
+
python_missing_rate = ((requires_python - has_python) / requires_python * 100) if requires_python > 0 else 0
|
|
3245
|
+
dart_missing_rate = ((requires_dart - has_dart) / requires_dart * 100) if requires_dart > 0 else 0
|
|
3246
|
+
ts_missing_rate = ((requires_ts - has_ts) / requires_ts * 100) if requires_ts > 0 else 0
|
|
3247
|
+
|
|
3248
|
+
lines.append(f"⚠️ Python DTOs: {python_missing_rate:.1f}% of required DTOs are missing")
|
|
3249
|
+
lines.append(f"⚠️ Dart Entities: {dart_missing_rate:.1f}% of required entities are missing")
|
|
3250
|
+
lines.append(f"⚠️ TypeScript Interfaces: {ts_missing_rate:.1f}% of required interfaces are missing")
|
|
3251
|
+
lines.append("")
|
|
3252
|
+
lines.append("💡 This report only shows DTOs required by actual producer/consumer wagons!")
|
|
3253
|
+
lines.append("")
|
|
3254
|
+
|
|
3255
|
+
return "\n".join(lines)
|
|
3256
|
+
|
|
3257
|
+
|
|
3258
|
+
class YAMLUpdater:
|
|
3259
|
+
"""
|
|
3260
|
+
Adapter: Update YAML files pragmatically.
|
|
3261
|
+
|
|
3262
|
+
Handles YAML file updates while preserving formatting.
|
|
3263
|
+
"""
|
|
3264
|
+
|
|
3265
|
+
def update_yaml_field(
|
|
3266
|
+
self,
|
|
3267
|
+
file_path: str,
|
|
3268
|
+
section: str,
|
|
3269
|
+
field_name: str,
|
|
3270
|
+
field_value: str,
|
|
3271
|
+
old_value: str = "null"
|
|
3272
|
+
) -> bool:
|
|
3273
|
+
"""
|
|
3274
|
+
Update a field in a YAML file.
|
|
3275
|
+
|
|
3276
|
+
Args:
|
|
3277
|
+
file_path: Path to YAML file
|
|
3278
|
+
section: Section name (e.g., 'produce')
|
|
3279
|
+
field_name: Field to update (e.g., 'contract')
|
|
3280
|
+
field_value: New value
|
|
3281
|
+
old_value: Old value to replace (default: "null")
|
|
3282
|
+
|
|
3283
|
+
Returns:
|
|
3284
|
+
True if successful, False otherwise
|
|
3285
|
+
"""
|
|
3286
|
+
try:
|
|
3287
|
+
path = Path(file_path)
|
|
3288
|
+
|
|
3289
|
+
if not path.exists():
|
|
3290
|
+
return False
|
|
3291
|
+
|
|
3292
|
+
# Read file
|
|
3293
|
+
with open(path, 'r', encoding='utf-8') as f:
|
|
3294
|
+
content = f.read()
|
|
3295
|
+
|
|
3296
|
+
# Replace using string manipulation (preserves formatting better than yaml.dump)
|
|
3297
|
+
# Pattern: "field_name: null" -> "field_name: value"
|
|
3298
|
+
pattern = f'{field_name}: {old_value}'
|
|
3299
|
+
replacement = f'{field_name}: {field_value}'
|
|
3300
|
+
|
|
3301
|
+
updated_content = content.replace(pattern, replacement, 1)
|
|
3302
|
+
|
|
3303
|
+
# Write back
|
|
3304
|
+
with open(path, 'w', encoding='utf-8') as f:
|
|
3305
|
+
f.write(updated_content)
|
|
3306
|
+
|
|
3307
|
+
return True
|
|
3308
|
+
|
|
3309
|
+
except Exception:
|
|
3310
|
+
return False
|
|
3311
|
+
|
|
3312
|
+
|
|
3313
|
+
class TraceabilityFixer:
|
|
3314
|
+
"""
|
|
3315
|
+
Adapter: Apply fixes to wagon manifests.
|
|
3316
|
+
|
|
3317
|
+
Pragmatically updates manifest files with user approval.
|
|
3318
|
+
"""
|
|
3319
|
+
|
|
3320
|
+
def __init__(self):
|
|
3321
|
+
self.updater = YAMLUpdater()
|
|
3322
|
+
|
|
3323
|
+
def apply_contract_fix(
|
|
3324
|
+
self,
|
|
3325
|
+
manifest_path: str,
|
|
3326
|
+
produce_name: str,
|
|
3327
|
+
contract_path: str
|
|
3328
|
+
) -> bool:
|
|
3329
|
+
"""
|
|
3330
|
+
Apply fix for missing contract reference.
|
|
3331
|
+
|
|
3332
|
+
Updates wagon manifest YAML file to set contract field.
|
|
3333
|
+
"""
|
|
3334
|
+
return self.updater.update_yaml_field(
|
|
3335
|
+
file_path=manifest_path,
|
|
3336
|
+
section='produce',
|
|
3337
|
+
field_name='contract',
|
|
3338
|
+
field_value=contract_path,
|
|
3339
|
+
old_value='null'
|
|
3340
|
+
)
|
|
3341
|
+
|
|
3342
|
+
def apply_telemetry_fix(
|
|
3343
|
+
self,
|
|
3344
|
+
manifest_path: str,
|
|
3345
|
+
produce_name: str,
|
|
3346
|
+
telemetry_path: str
|
|
3347
|
+
) -> bool:
|
|
3348
|
+
"""
|
|
3349
|
+
Apply fix for missing telemetry reference.
|
|
3350
|
+
|
|
3351
|
+
Updates wagon manifest YAML file to set telemetry field.
|
|
3352
|
+
"""
|
|
3353
|
+
return self.updater.update_yaml_field(
|
|
3354
|
+
file_path=manifest_path,
|
|
3355
|
+
section='produce',
|
|
3356
|
+
field_name='telemetry',
|
|
3357
|
+
field_value=telemetry_path,
|
|
3358
|
+
old_value='null'
|
|
3359
|
+
)
|
|
3360
|
+
|
|
3361
|
+
|
|
3362
|
+
# ============================================================================
|
|
3363
|
+
# LAYER 4: COMMAND (CLI Orchestration)
|
|
3364
|
+
# ============================================================================
|
|
3365
|
+
|
|
3366
|
+
|
|
3367
|
+
def run_reconciliation(report_only: bool = True):
|
|
3368
|
+
"""
|
|
3369
|
+
Run traceability reconciliation command.
|
|
3370
|
+
|
|
3371
|
+
Args:
|
|
3372
|
+
report_only: If True, only show report. If False, prompt for fixes.
|
|
3373
|
+
"""
|
|
3374
|
+
reconciler = TraceabilityReconciler()
|
|
3375
|
+
formatter = ReportFormatter()
|
|
3376
|
+
fixer = TraceabilityFixer()
|
|
3377
|
+
|
|
3378
|
+
# Run reconciliation
|
|
3379
|
+
result = reconciler.reconcile_all()
|
|
3380
|
+
|
|
3381
|
+
# Format and display report
|
|
3382
|
+
report = formatter.format_report(result)
|
|
3383
|
+
print(report)
|
|
3384
|
+
|
|
3385
|
+
# If report_only mode, stop here
|
|
3386
|
+
if report_only or result.total_issues == 0:
|
|
3387
|
+
return
|
|
3388
|
+
|
|
3389
|
+
# Interactive fix mode
|
|
3390
|
+
print("\n" + "=" * 70)
|
|
3391
|
+
print("🔧 FIX MODE")
|
|
3392
|
+
print("=" * 70)
|
|
3393
|
+
print()
|
|
3394
|
+
|
|
3395
|
+
# Offer to fix contract references
|
|
3396
|
+
if result.missing_contract_refs:
|
|
3397
|
+
response = input(f"Fix {len(result.missing_contract_refs)} missing contract references? [y/N]: ")
|
|
3398
|
+
|
|
3399
|
+
if response.lower() == 'y':
|
|
3400
|
+
for ref in result.missing_contract_refs:
|
|
3401
|
+
wagon = ref['wagon']
|
|
3402
|
+
manifest_path = result.by_wagon[wagon]['manifest_path']
|
|
3403
|
+
|
|
3404
|
+
success = fixer.apply_contract_fix(
|
|
3405
|
+
manifest_path=manifest_path,
|
|
3406
|
+
produce_name=ref['produce_name'],
|
|
3407
|
+
contract_path=ref['proposed_fix']
|
|
3408
|
+
)
|
|
3409
|
+
|
|
3410
|
+
if success:
|
|
3411
|
+
print(f"✅ Fixed {wagon} -> {ref['urn']}")
|
|
3412
|
+
else:
|
|
3413
|
+
print(f"❌ Failed to fix {wagon} -> {ref['urn']}")
|
|
3414
|
+
|
|
3415
|
+
# Offer to fix telemetry references
|
|
3416
|
+
if result.missing_telemetry_refs:
|
|
3417
|
+
response = input(f"Fix {len(result.missing_telemetry_refs)} missing telemetry references? [y/N]: ")
|
|
3418
|
+
|
|
3419
|
+
if response.lower() == 'y':
|
|
3420
|
+
for ref in result.missing_telemetry_refs:
|
|
3421
|
+
wagon = ref['wagon']
|
|
3422
|
+
manifest_path = result.by_wagon[wagon]['manifest_path']
|
|
3423
|
+
|
|
3424
|
+
success = fixer.apply_telemetry_fix(
|
|
3425
|
+
manifest_path=manifest_path,
|
|
3426
|
+
produce_name=ref['produce_name'],
|
|
3427
|
+
telemetry_path=ref['proposed_fix']
|
|
3428
|
+
)
|
|
3429
|
+
|
|
3430
|
+
if success:
|
|
3431
|
+
print(f"✅ Fixed {wagon} -> {ref['urn']}")
|
|
3432
|
+
else:
|
|
3433
|
+
print(f"❌ Failed to fix {wagon} -> {ref['urn']}")
|
|
3434
|
+
|
|
3435
|
+
|
|
3436
|
+
# ============================================================================
|
|
3437
|
+
# WMBT TEST TRACEABILITY VALIDATION
|
|
3438
|
+
# ============================================================================
|
|
3439
|
+
|
|
3440
|
+
|
|
3441
|
+
@dataclass
|
|
3442
|
+
class WMBTTestFile:
|
|
3443
|
+
"""
|
|
3444
|
+
A test file with WMBT code in its filename.
|
|
3445
|
+
|
|
3446
|
+
Domain entity representing a test that traces to acceptance criteria.
|
|
3447
|
+
"""
|
|
3448
|
+
file_path: str
|
|
3449
|
+
wmbt_code: str # e.g., "C004", "E001"
|
|
3450
|
+
harness: str # e.g., "UNIT", "HTTP", "E2E"
|
|
3451
|
+
sequence: str # e.g., "001", "019"
|
|
3452
|
+
slug: Optional[str] = None
|
|
3453
|
+
wagon: Optional[str] = None
|
|
3454
|
+
|
|
3455
|
+
|
|
3456
|
+
class WMBTTestFinder:
|
|
3457
|
+
"""
|
|
3458
|
+
Use case: Find and parse test files using WMBT naming conventions.
|
|
3459
|
+
|
|
3460
|
+
Test Naming Convention (from tester):
|
|
3461
|
+
Python: test_{wmbt_lower}_{harness_lower}_{nnn}[_{slug_snake}].py
|
|
3462
|
+
Dart: {WMBT}_{HARNESS}_{NNN}[_{slug_snake}]_test.dart
|
|
3463
|
+
TypeScript (backend): {wmbt}-{harness}-{nnn}[-{slug-kebab}].test.ts
|
|
3464
|
+
TypeScript (preact): {WMBT}_{HARNESS}_{NNN}[_{slug_snake}].test.ts[x]
|
|
3465
|
+
Go: {wmbt}_{harness}_{nnn}[_{slug_snake}]_test.go
|
|
3466
|
+
|
|
3467
|
+
WMBT Format: {STEP}{NNN} where:
|
|
3468
|
+
- STEP: D|L|P|C|E|M|Y|R|K
|
|
3469
|
+
- NNN: 3-digit sequence (001-999)
|
|
3470
|
+
"""
|
|
3471
|
+
|
|
3472
|
+
# Test filename patterns by language
|
|
3473
|
+
PYTHON_TEST_PATTERN = re.compile(
|
|
3474
|
+
r'^test_([dlpcemyrk]\d{3})_([a-z0-9]+)_(\d{3})(?:_([a-z0-9_]+))?\.py$',
|
|
3475
|
+
re.IGNORECASE
|
|
3476
|
+
)
|
|
3477
|
+
|
|
3478
|
+
DART_TEST_PATTERN = re.compile(
|
|
3479
|
+
r'^([DLPCEMYRK]\d{3})_([A-Z0-9]+)_(\d{3})(?:_([a-z0-9_]+))?_test\.dart$',
|
|
3480
|
+
re.IGNORECASE
|
|
3481
|
+
)
|
|
3482
|
+
|
|
3483
|
+
TS_TEST_PATTERN = re.compile(
|
|
3484
|
+
r'^([dlpcemyrk]\d{3})-([a-z0-9]+)-(\d{3})(?:-([a-z0-9-]+))?\.test\.ts$',
|
|
3485
|
+
re.IGNORECASE
|
|
3486
|
+
)
|
|
3487
|
+
|
|
3488
|
+
TS_PREACT_TEST_PATTERN = re.compile(
|
|
3489
|
+
r'^([DLPCEMYRK]\d{3})_([A-Z0-9]+)_(\d{3})(?:_([a-z0-9_]+))?\.test\.ts(?:x)?$',
|
|
3490
|
+
re.IGNORECASE
|
|
3491
|
+
)
|
|
3492
|
+
|
|
3493
|
+
GO_TEST_PATTERN = re.compile(
|
|
3494
|
+
r'^([dlpcemyrk]\d{3})_([a-z0-9]+)_(\d{3})(?:_([a-z0-9_]+))?_test\.go$',
|
|
3495
|
+
re.IGNORECASE
|
|
3496
|
+
)
|
|
3497
|
+
|
|
3498
|
+
# URN pattern for Dart tests (in comments)
|
|
3499
|
+
DART_URN_PATTERN = re.compile(
|
|
3500
|
+
r'//\s*urn:\s*acc:([a-z][a-z0-9-]*):([DLPCEMYRK]\d{3})-([A-Z0-9]+)-(\d{3})',
|
|
3501
|
+
re.IGNORECASE
|
|
3502
|
+
)
|
|
3503
|
+
|
|
3504
|
+
def __init__(self, repo_root: Path = REPO_ROOT):
|
|
3505
|
+
self.repo_root = repo_root
|
|
3506
|
+
|
|
3507
|
+
def find_all_test_files(self, languages: List[str] = None) -> List[WMBTTestFile]:
|
|
3508
|
+
"""
|
|
3509
|
+
Find all test files following WMBT naming convention.
|
|
3510
|
+
|
|
3511
|
+
Args:
|
|
3512
|
+
languages: List of languages to scan (default: ['python', 'dart', 'typescript', 'go'])
|
|
3513
|
+
|
|
3514
|
+
Returns:
|
|
3515
|
+
List of WMBTTestFile objects
|
|
3516
|
+
"""
|
|
3517
|
+
if languages is None:
|
|
3518
|
+
languages = ['python', 'dart', 'typescript', 'go']
|
|
3519
|
+
|
|
3520
|
+
test_files = []
|
|
3521
|
+
|
|
3522
|
+
for language in languages:
|
|
3523
|
+
if language == 'python':
|
|
3524
|
+
test_files.extend(self._find_python_tests())
|
|
3525
|
+
elif language == 'dart':
|
|
3526
|
+
test_files.extend(self._find_dart_tests())
|
|
3527
|
+
elif language in ['typescript', 'ts']:
|
|
3528
|
+
test_files.extend(self._find_typescript_tests())
|
|
3529
|
+
elif language == 'go':
|
|
3530
|
+
test_files.extend(self._find_go_tests())
|
|
3531
|
+
|
|
3532
|
+
return test_files
|
|
3533
|
+
|
|
3534
|
+
def _find_python_tests(self) -> List[WMBTTestFile]:
|
|
3535
|
+
"""Find Python test files."""
|
|
3536
|
+
test_files = []
|
|
3537
|
+
test_dirs = [
|
|
3538
|
+
self.repo_root / 'python' / 'tests',
|
|
3539
|
+
self.repo_root / 'python',
|
|
3540
|
+
self.repo_root / 'tests',
|
|
3541
|
+
]
|
|
3542
|
+
|
|
3543
|
+
for test_dir in test_dirs:
|
|
3544
|
+
if not test_dir.exists():
|
|
3545
|
+
continue
|
|
3546
|
+
|
|
3547
|
+
for test_file in test_dir.rglob('test_*.py'):
|
|
3548
|
+
parsed = self._parse_test_filename(test_file.name, 'python')
|
|
3549
|
+
if parsed:
|
|
3550
|
+
wagon = self._infer_wagon_from_path(test_file)
|
|
3551
|
+
test_files.append(WMBTTestFile(
|
|
3552
|
+
file_path=str(test_file.relative_to(self.repo_root)),
|
|
3553
|
+
wmbt_code=parsed['wmbt'],
|
|
3554
|
+
harness=parsed['harness'],
|
|
3555
|
+
sequence=parsed['nnn'],
|
|
3556
|
+
slug=parsed['slug'],
|
|
3557
|
+
wagon=wagon
|
|
3558
|
+
))
|
|
3559
|
+
|
|
3560
|
+
return test_files
|
|
3561
|
+
|
|
3562
|
+
def _find_dart_tests(self) -> List[WMBTTestFile]:
|
|
3563
|
+
"""DEPRECATED: Dart/Flutter frontend removed in SESSION-18."""
|
|
3564
|
+
return []
|
|
3565
|
+
|
|
3566
|
+
def _find_typescript_tests(self) -> List[WMBTTestFile]:
|
|
3567
|
+
"""Find TypeScript test files."""
|
|
3568
|
+
test_files = []
|
|
3569
|
+
test_dirs = [
|
|
3570
|
+
self.repo_root / 'src',
|
|
3571
|
+
self.repo_root / 'test',
|
|
3572
|
+
self.repo_root / 'tests',
|
|
3573
|
+
self.repo_root / 'web' / 'tests',
|
|
3574
|
+
]
|
|
3575
|
+
|
|
3576
|
+
for test_dir in test_dirs:
|
|
3577
|
+
if not test_dir.exists():
|
|
3578
|
+
continue
|
|
3579
|
+
|
|
3580
|
+
for pattern in ['*.test.ts', '*.test.tsx']:
|
|
3581
|
+
for test_file in test_dir.rglob(pattern):
|
|
3582
|
+
parsed = self._parse_test_filename(test_file.name, 'typescript')
|
|
3583
|
+
if parsed:
|
|
3584
|
+
wagon = self._infer_wagon_from_path(test_file)
|
|
3585
|
+
test_files.append(WMBTTestFile(
|
|
3586
|
+
file_path=str(test_file.relative_to(self.repo_root)),
|
|
3587
|
+
wmbt_code=parsed['wmbt'],
|
|
3588
|
+
harness=parsed['harness'],
|
|
3589
|
+
sequence=parsed['nnn'],
|
|
3590
|
+
slug=parsed['slug'],
|
|
3591
|
+
wagon=wagon
|
|
3592
|
+
))
|
|
3593
|
+
|
|
3594
|
+
return test_files
|
|
3595
|
+
|
|
3596
|
+
def _find_go_tests(self) -> List[WMBTTestFile]:
|
|
3597
|
+
"""Find Go test files."""
|
|
3598
|
+
test_files = []
|
|
3599
|
+
test_dirs = [
|
|
3600
|
+
self.repo_root / 'pkg',
|
|
3601
|
+
self.repo_root / 'internal',
|
|
3602
|
+
self.repo_root / 'test',
|
|
3603
|
+
]
|
|
3604
|
+
|
|
3605
|
+
for test_dir in test_dirs:
|
|
3606
|
+
if not test_dir.exists():
|
|
3607
|
+
continue
|
|
3608
|
+
|
|
3609
|
+
for test_file in test_dir.rglob('*_test.go'):
|
|
3610
|
+
parsed = self._parse_test_filename(test_file.name, 'go')
|
|
3611
|
+
if parsed:
|
|
3612
|
+
wagon = self._infer_wagon_from_path(test_file)
|
|
3613
|
+
test_files.append(WMBTTestFile(
|
|
3614
|
+
file_path=str(test_file.relative_to(self.repo_root)),
|
|
3615
|
+
wmbt_code=parsed['wmbt'],
|
|
3616
|
+
harness=parsed['harness'],
|
|
3617
|
+
sequence=parsed['nnn'],
|
|
3618
|
+
slug=parsed['slug'],
|
|
3619
|
+
wagon=wagon
|
|
3620
|
+
))
|
|
3621
|
+
|
|
3622
|
+
return test_files
|
|
3623
|
+
|
|
3624
|
+
def _parse_test_filename(self, filename: str, language: str) -> Optional[Dict[str, str]]:
|
|
3625
|
+
"""
|
|
3626
|
+
Parse test filename to extract WMBT code, harness, and sequence.
|
|
3627
|
+
|
|
3628
|
+
Args:
|
|
3629
|
+
filename: Test filename
|
|
3630
|
+
language: Language (python, dart, typescript, go)
|
|
3631
|
+
|
|
3632
|
+
Returns:
|
|
3633
|
+
Dict with keys: wmbt, harness, nnn, slug (or None if invalid)
|
|
3634
|
+
"""
|
|
3635
|
+
patterns = {
|
|
3636
|
+
'python': self.PYTHON_TEST_PATTERN,
|
|
3637
|
+
'dart': self.DART_TEST_PATTERN,
|
|
3638
|
+
'typescript': [self.TS_TEST_PATTERN, self.TS_PREACT_TEST_PATTERN],
|
|
3639
|
+
'ts': [self.TS_TEST_PATTERN, self.TS_PREACT_TEST_PATTERN],
|
|
3640
|
+
'go': self.GO_TEST_PATTERN,
|
|
3641
|
+
}
|
|
3642
|
+
|
|
3643
|
+
pattern = patterns.get(language)
|
|
3644
|
+
if not pattern:
|
|
3645
|
+
return None
|
|
3646
|
+
|
|
3647
|
+
if isinstance(pattern, list):
|
|
3648
|
+
match = None
|
|
3649
|
+
for candidate in pattern:
|
|
3650
|
+
match = candidate.match(filename)
|
|
3651
|
+
if match:
|
|
3652
|
+
break
|
|
3653
|
+
if not match:
|
|
3654
|
+
return None
|
|
3655
|
+
else:
|
|
3656
|
+
match = pattern.match(filename)
|
|
3657
|
+
if not match:
|
|
3658
|
+
return None
|
|
3659
|
+
|
|
3660
|
+
wmbt, harness, nnn, slug = match.groups()
|
|
3661
|
+
|
|
3662
|
+
# Normalize to uppercase
|
|
3663
|
+
return {
|
|
3664
|
+
'wmbt': wmbt.upper(),
|
|
3665
|
+
'harness': harness.upper(),
|
|
3666
|
+
'nnn': nnn,
|
|
3667
|
+
'slug': slug
|
|
3668
|
+
}
|
|
3669
|
+
|
|
3670
|
+
def _parse_dart_urn_comment(self, test_file: Path) -> Optional[Dict[str, str]]:
|
|
3671
|
+
"""DEPRECATED: Dart/Flutter frontend removed in SESSION-18."""
|
|
3672
|
+
return None
|
|
3673
|
+
|
|
3674
|
+
def _infer_wagon_from_path(self, test_file: Path) -> Optional[str]:
|
|
3675
|
+
"""
|
|
3676
|
+
Infer wagon name from test file path.
|
|
3677
|
+
|
|
3678
|
+
Common patterns:
|
|
3679
|
+
- python/tests/{wagon}/test_*.py
|
|
3680
|
+
- python/{wagon}/tests/test_*.py
|
|
3681
|
+
"""
|
|
3682
|
+
parts = test_file.parts
|
|
3683
|
+
|
|
3684
|
+
# Look for wagon directory names
|
|
3685
|
+
for i, part in enumerate(parts):
|
|
3686
|
+
if part in ['tests', 'test']:
|
|
3687
|
+
# Check previous part
|
|
3688
|
+
if i > 0:
|
|
3689
|
+
candidate = parts[i - 1]
|
|
3690
|
+
# Convert to wagon slug format
|
|
3691
|
+
return candidate.replace('_', '-')
|
|
3692
|
+
# Check next part
|
|
3693
|
+
elif i < len(parts) - 1:
|
|
3694
|
+
candidate = parts[i + 1]
|
|
3695
|
+
if candidate not in ['test', 'tests'] and not candidate.endswith('.py'):
|
|
3696
|
+
return candidate.replace('_', '-')
|
|
3697
|
+
|
|
3698
|
+
return None
|
|
3699
|
+
|
|
3700
|
+
|
|
3701
|
+
class WMBTAcceptanceParser:
|
|
3702
|
+
"""
|
|
3703
|
+
Use case: Parse acceptance criteria files to extract WMBT codes.
|
|
3704
|
+
|
|
3705
|
+
Finds WMBT codes declared in wagon acceptance YAML files.
|
|
3706
|
+
"""
|
|
3707
|
+
|
|
3708
|
+
# WMBT pattern in YAML acceptance files
|
|
3709
|
+
WMBT_PATTERN = re.compile(r'^([DLPCEMYRK])(\d{3})$')
|
|
3710
|
+
|
|
3711
|
+
def __init__(self, plan_dir: Path = PLAN_DIR):
|
|
3712
|
+
self.plan_dir = plan_dir
|
|
3713
|
+
|
|
3714
|
+
def extract_wmbt_codes_from_wagons_yaml(self) -> Dict[str, List[str]]:
|
|
3715
|
+
"""
|
|
3716
|
+
Extract WMBT codes from the main _wagons.yaml file.
|
|
3717
|
+
|
|
3718
|
+
Returns:
|
|
3719
|
+
Dict mapping wagon names to their WMBT codes
|
|
3720
|
+
"""
|
|
3721
|
+
wagon_wmbts = {}
|
|
3722
|
+
wagons_yaml_path = self.plan_dir / '_wagons.yaml'
|
|
3723
|
+
|
|
3724
|
+
if not wagons_yaml_path.exists():
|
|
3725
|
+
return wagon_wmbts
|
|
3726
|
+
|
|
3727
|
+
try:
|
|
3728
|
+
with open(wagons_yaml_path, 'r') as f:
|
|
3729
|
+
data = yaml.safe_load(f)
|
|
3730
|
+
|
|
3731
|
+
wagons = data.get('wagons', [])
|
|
3732
|
+
for wagon in wagons:
|
|
3733
|
+
wagon_name = wagon.get('wagon')
|
|
3734
|
+
if not wagon_name:
|
|
3735
|
+
continue
|
|
3736
|
+
|
|
3737
|
+
wmbts = []
|
|
3738
|
+
# Check for WMBT section
|
|
3739
|
+
wmbt_section = wagon.get('wmbt', {})
|
|
3740
|
+
|
|
3741
|
+
# Some wagons list WMBTs directly as keys
|
|
3742
|
+
if isinstance(wmbt_section, dict):
|
|
3743
|
+
for key in wmbt_section.keys():
|
|
3744
|
+
# Skip metadata keys like 'total', 'coverage'
|
|
3745
|
+
if self.WMBT_PATTERN.match(key):
|
|
3746
|
+
wmbts.append(key)
|
|
3747
|
+
|
|
3748
|
+
wagon_wmbts[wagon_name] = sorted(set(wmbts))
|
|
3749
|
+
|
|
3750
|
+
except Exception:
|
|
3751
|
+
pass
|
|
3752
|
+
|
|
3753
|
+
return wagon_wmbts
|
|
3754
|
+
|
|
3755
|
+
def extract_wmbt_codes_from_wagon_dir(self, wagon_slug: str) -> List[str]:
|
|
3756
|
+
"""
|
|
3757
|
+
Extract WMBT codes from a wagon's YAML files.
|
|
3758
|
+
|
|
3759
|
+
Args:
|
|
3760
|
+
wagon_slug: Wagon slug (e.g., 'maintain-ux')
|
|
3761
|
+
|
|
3762
|
+
Returns:
|
|
3763
|
+
List of WMBT codes found
|
|
3764
|
+
"""
|
|
3765
|
+
wmbts = []
|
|
3766
|
+
wagon_dir = self.plan_dir / wagon_slug
|
|
3767
|
+
|
|
3768
|
+
if not wagon_dir.exists() or not wagon_dir.is_dir():
|
|
3769
|
+
return wmbts
|
|
3770
|
+
|
|
3771
|
+
# Find all YAML files except _*.yaml (manifests)
|
|
3772
|
+
yaml_files = [f for f in wagon_dir.glob('*.yaml') if not f.name.startswith('_')]
|
|
3773
|
+
|
|
3774
|
+
for yaml_file in yaml_files:
|
|
3775
|
+
try:
|
|
3776
|
+
with open(yaml_file, 'r') as f:
|
|
3777
|
+
content = yaml.safe_load(f)
|
|
3778
|
+
|
|
3779
|
+
# Look for WMBT codes in acceptance criteria
|
|
3780
|
+
if isinstance(content, dict):
|
|
3781
|
+
# Check for direct WMBT keys (e.g., C004, E001)
|
|
3782
|
+
for key in content.keys():
|
|
3783
|
+
if self.WMBT_PATTERN.match(key):
|
|
3784
|
+
wmbts.append(key)
|
|
3785
|
+
|
|
3786
|
+
# Check nested 'acceptance' or 'wmbt' sections
|
|
3787
|
+
for section_key in ['acceptance', 'wmbt', 'criteria', 'acceptance_criteria']:
|
|
3788
|
+
section = content.get(section_key, {})
|
|
3789
|
+
if isinstance(section, dict):
|
|
3790
|
+
for key in section.keys():
|
|
3791
|
+
if self.WMBT_PATTERN.match(key):
|
|
3792
|
+
wmbts.append(key)
|
|
3793
|
+
|
|
3794
|
+
except Exception:
|
|
3795
|
+
pass
|
|
3796
|
+
|
|
3797
|
+
return sorted(set(wmbts))
|
|
3798
|
+
|
|
3799
|
+
|
|
3800
|
+
class WMBTTraceabilityValidator:
|
|
3801
|
+
"""
|
|
3802
|
+
Use case: Validate WMBT test traceability.
|
|
3803
|
+
|
|
3804
|
+
Ensures each test traces to an acceptance criterion via WMBT code.
|
|
3805
|
+
"""
|
|
3806
|
+
|
|
3807
|
+
def __init__(self):
|
|
3808
|
+
self.test_finder = WMBTTestFinder()
|
|
3809
|
+
self.acceptance_parser = WMBTAcceptanceParser()
|
|
3810
|
+
|
|
3811
|
+
def validate_all(self) -> Dict[str, any]:
|
|
3812
|
+
"""
|
|
3813
|
+
Run full WMBT traceability validation.
|
|
3814
|
+
|
|
3815
|
+
Returns:
|
|
3816
|
+
Dict with validation results
|
|
3817
|
+
"""
|
|
3818
|
+
# Find all test files
|
|
3819
|
+
test_files = self.test_finder.find_all_test_files()
|
|
3820
|
+
|
|
3821
|
+
# Track by language
|
|
3822
|
+
by_language = defaultdict(int)
|
|
3823
|
+
for test in test_files:
|
|
3824
|
+
if test.file_path.endswith('.py'):
|
|
3825
|
+
by_language['python'] += 1
|
|
3826
|
+
elif test.file_path.endswith('.dart'):
|
|
3827
|
+
by_language['dart'] += 1
|
|
3828
|
+
elif test.file_path.endswith('.ts'):
|
|
3829
|
+
by_language['typescript'] += 1
|
|
3830
|
+
elif test.file_path.endswith('.go'):
|
|
3831
|
+
by_language['go'] += 1
|
|
3832
|
+
|
|
3833
|
+
# Extract WMBT codes from wagons
|
|
3834
|
+
wagon_wmbts_manifest = self.acceptance_parser.extract_wmbt_codes_from_wagons_yaml()
|
|
3835
|
+
|
|
3836
|
+
# Also check wagon directories
|
|
3837
|
+
wagon_wmbts = {}
|
|
3838
|
+
for wagon_name in wagon_wmbts_manifest.keys():
|
|
3839
|
+
wagon_slug = wagon_name.replace('_', '-')
|
|
3840
|
+
wmbts_from_dir = self.acceptance_parser.extract_wmbt_codes_from_wagon_dir(wagon_slug)
|
|
3841
|
+
|
|
3842
|
+
# Combine both sources
|
|
3843
|
+
combined = set(wagon_wmbts_manifest.get(wagon_name, []))
|
|
3844
|
+
combined.update(wmbts_from_dir)
|
|
3845
|
+
wagon_wmbts[wagon_name] = sorted(combined)
|
|
3846
|
+
|
|
3847
|
+
# Flatten all WMBT codes
|
|
3848
|
+
all_wmbts = set()
|
|
3849
|
+
for wmbts in wagon_wmbts.values():
|
|
3850
|
+
all_wmbts.update(wmbts)
|
|
3851
|
+
|
|
3852
|
+
# Validate test files
|
|
3853
|
+
orphaned_tests = []
|
|
3854
|
+
valid_tests = []
|
|
3855
|
+
tests_by_wmbt = defaultdict(list)
|
|
3856
|
+
|
|
3857
|
+
for test in test_files:
|
|
3858
|
+
wmbt = test.wmbt_code
|
|
3859
|
+
tests_by_wmbt[wmbt].append(test.file_path)
|
|
3860
|
+
|
|
3861
|
+
if wmbt not in all_wmbts:
|
|
3862
|
+
orphaned_tests.append({
|
|
3863
|
+
'file': test.file_path,
|
|
3864
|
+
'wmbt': wmbt,
|
|
3865
|
+
'harness': test.harness,
|
|
3866
|
+
'wagon': test.wagon,
|
|
3867
|
+
'reason': 'Test file has WMBT code not found in any acceptance criteria'
|
|
3868
|
+
})
|
|
3869
|
+
else:
|
|
3870
|
+
valid_tests.append(test.file_path)
|
|
3871
|
+
|
|
3872
|
+
# Find missing tests (WMBT with no tests)
|
|
3873
|
+
missing_tests = []
|
|
3874
|
+
for wmbt in sorted(all_wmbts):
|
|
3875
|
+
if wmbt not in tests_by_wmbt:
|
|
3876
|
+
# Find which wagon owns this WMBT
|
|
3877
|
+
owner_wagons = [w for w, wmbts in wagon_wmbts.items() if wmbt in wmbts]
|
|
3878
|
+
missing_tests.append({
|
|
3879
|
+
'wmbt': wmbt,
|
|
3880
|
+
'wagons': owner_wagons,
|
|
3881
|
+
'reason': 'Acceptance criterion exists but no test file found'
|
|
3882
|
+
})
|
|
3883
|
+
|
|
3884
|
+
# Calculate coverage
|
|
3885
|
+
total_tests = len(test_files)
|
|
3886
|
+
valid_count = len(valid_tests)
|
|
3887
|
+
coverage = valid_count / total_tests if total_tests > 0 else 1.0
|
|
3888
|
+
|
|
3889
|
+
return {
|
|
3890
|
+
'total_tests': total_tests,
|
|
3891
|
+
'valid_tests': valid_count,
|
|
3892
|
+
'orphaned_tests': orphaned_tests,
|
|
3893
|
+
'missing_tests': missing_tests,
|
|
3894
|
+
'coverage': coverage,
|
|
3895
|
+
'tests_by_wmbt': dict(tests_by_wmbt),
|
|
3896
|
+
'wagon_wmbts': wagon_wmbts,
|
|
3897
|
+
'by_language': dict(by_language)
|
|
3898
|
+
}
|
|
3899
|
+
|
|
3900
|
+
|
|
3901
|
+
def format_wmbt_traceability_report(result: Dict[str, any]) -> str:
|
|
3902
|
+
"""
|
|
3903
|
+
Format WMBT traceability validation report.
|
|
3904
|
+
|
|
3905
|
+
Args:
|
|
3906
|
+
result: Validation result from WMBTTraceabilityValidator
|
|
3907
|
+
|
|
3908
|
+
Returns:
|
|
3909
|
+
Formatted report string
|
|
3910
|
+
"""
|
|
3911
|
+
lines = []
|
|
3912
|
+
|
|
3913
|
+
lines.append("=" * 70)
|
|
3914
|
+
lines.append("WMBT TEST TRACEABILITY VALIDATION")
|
|
3915
|
+
lines.append("=" * 70)
|
|
3916
|
+
lines.append("")
|
|
3917
|
+
|
|
3918
|
+
# Summary
|
|
3919
|
+
lines.append(f"📊 SUMMARY")
|
|
3920
|
+
lines.append(f" Total Test Files: {result['total_tests']}")
|
|
3921
|
+
lines.append(f" Valid Tests: {result['valid_tests']}")
|
|
3922
|
+
lines.append(f" Orphaned Tests: {len(result['orphaned_tests'])}")
|
|
3923
|
+
lines.append(f" Missing Tests: {len(result['missing_tests'])}")
|
|
3924
|
+
lines.append(f" Coverage: {result['coverage']:.1%}")
|
|
3925
|
+
lines.append("")
|
|
3926
|
+
|
|
3927
|
+
# Language breakdown
|
|
3928
|
+
if 'by_language' in result:
|
|
3929
|
+
lines.append(f"📚 BY LANGUAGE")
|
|
3930
|
+
for lang, count in sorted(result['by_language'].items()):
|
|
3931
|
+
lines.append(f" {lang.capitalize()}: {count} test(s)")
|
|
3932
|
+
lines.append("")
|
|
3933
|
+
|
|
3934
|
+
# Orphaned tests (tests without acceptance criteria)
|
|
3935
|
+
if result['orphaned_tests']:
|
|
3936
|
+
lines.append("=" * 70)
|
|
3937
|
+
lines.append("🔴 ORPHANED TESTS (No Acceptance Criteria)")
|
|
3938
|
+
lines.append("=" * 70)
|
|
3939
|
+
lines.append("")
|
|
3940
|
+
lines.append("These test files follow WMBT naming convention but their WMBT code")
|
|
3941
|
+
lines.append("is not found in any wagon's acceptance criteria.")
|
|
3942
|
+
lines.append("")
|
|
3943
|
+
|
|
3944
|
+
for orphan in result['orphaned_tests']:
|
|
3945
|
+
lines.append(f"File: {orphan['file']}")
|
|
3946
|
+
lines.append(f" WMBT: {orphan['wmbt']}")
|
|
3947
|
+
lines.append(f" Harness: {orphan['harness']}")
|
|
3948
|
+
if orphan['wagon']:
|
|
3949
|
+
lines.append(f" Inferred Wagon: {orphan['wagon']}")
|
|
3950
|
+
lines.append(f" Issue: {orphan['reason']}")
|
|
3951
|
+
lines.append(f" 💡 FIX: Either add {orphan['wmbt']} to wagon acceptance criteria")
|
|
3952
|
+
lines.append(f" or rename test file to use valid WMBT code")
|
|
3953
|
+
lines.append("")
|
|
3954
|
+
|
|
3955
|
+
# Missing tests (acceptance criteria without tests)
|
|
3956
|
+
if result['missing_tests']:
|
|
3957
|
+
lines.append("=" * 70)
|
|
3958
|
+
lines.append("⚠️ MISSING TESTS (Acceptance Criteria Without Tests)")
|
|
3959
|
+
lines.append("=" * 70)
|
|
3960
|
+
lines.append("")
|
|
3961
|
+
lines.append("These WMBT codes are declared in acceptance criteria but no")
|
|
3962
|
+
lines.append("test files were found following the naming convention.")
|
|
3963
|
+
lines.append("")
|
|
3964
|
+
|
|
3965
|
+
for missing in result['missing_tests']:
|
|
3966
|
+
lines.append(f"WMBT: {missing['wmbt']}")
|
|
3967
|
+
lines.append(f" Wagons: {', '.join(missing['wagons'])}")
|
|
3968
|
+
lines.append(f" Issue: {missing['reason']}")
|
|
3969
|
+
lines.append(f" 💡 FIX: Create test file following convention:")
|
|
3970
|
+
lines.append(f" test_{missing['wmbt'].lower()}_{{harness}}_{{nnn}}.py")
|
|
3971
|
+
lines.append(f" Example: test_{missing['wmbt'].lower()}_unit_001.py")
|
|
3972
|
+
lines.append("")
|
|
3973
|
+
|
|
3974
|
+
# Test coverage by WMBT
|
|
3975
|
+
if result['tests_by_wmbt']:
|
|
3976
|
+
lines.append("=" * 70)
|
|
3977
|
+
lines.append("📋 TEST COVERAGE BY WMBT")
|
|
3978
|
+
lines.append("=" * 70)
|
|
3979
|
+
lines.append("")
|
|
3980
|
+
|
|
3981
|
+
for wmbt in sorted(result['tests_by_wmbt'].keys()):
|
|
3982
|
+
tests = result['tests_by_wmbt'][wmbt]
|
|
3983
|
+
lines.append(f"{wmbt}: {len(tests)} test(s)")
|
|
3984
|
+
for test in tests:
|
|
3985
|
+
lines.append(f" - {test}")
|
|
3986
|
+
lines.append("")
|
|
3987
|
+
|
|
3988
|
+
return "\n".join(lines)
|
|
3989
|
+
|
|
3990
|
+
|
|
3991
|
+
def run_wmbt_traceability_validation(verbose: bool = True):
|
|
3992
|
+
"""
|
|
3993
|
+
Run WMBT test traceability validation command.
|
|
3994
|
+
|
|
3995
|
+
Args:
|
|
3996
|
+
verbose: Show detailed report
|
|
3997
|
+
"""
|
|
3998
|
+
validator = WMBTTraceabilityValidator()
|
|
3999
|
+
result = validator.validate_all()
|
|
4000
|
+
|
|
4001
|
+
# Format and display report
|
|
4002
|
+
report = format_wmbt_traceability_report(result)
|
|
4003
|
+
print(report)
|
|
4004
|
+
|
|
4005
|
+
# Return non-zero exit code if issues found
|
|
4006
|
+
return 1 if (result['orphaned_tests'] or result['missing_tests']) else 0
|
|
4007
|
+
|
|
4008
|
+
|
|
4009
|
+
def run_implementation_reconciliation(verbose: bool = True):
|
|
4010
|
+
"""
|
|
4011
|
+
Run contract implementation reconciliation command.
|
|
4012
|
+
|
|
4013
|
+
Args:
|
|
4014
|
+
verbose: Show detailed report (default: True)
|
|
4015
|
+
"""
|
|
4016
|
+
reconciler = ContractImplementationReconciler()
|
|
4017
|
+
formatter = ImplementationReportFormatter()
|
|
4018
|
+
|
|
4019
|
+
# Run reconciliation
|
|
4020
|
+
result = reconciler.reconcile_all()
|
|
4021
|
+
|
|
4022
|
+
# Format and display report
|
|
4023
|
+
report = formatter.format_report(result)
|
|
4024
|
+
print(report)
|
|
4025
|
+
|
|
4026
|
+
# Return non-zero exit code if issues found
|
|
4027
|
+
return 1 if result.total_issues > 0 else 0
|
|
4028
|
+
|
|
4029
|
+
|
|
4030
|
+
def run_funnel_analysis(verbose: bool = True):
|
|
4031
|
+
"""
|
|
4032
|
+
Run traceability funnel analysis command.
|
|
4033
|
+
|
|
4034
|
+
Args:
|
|
4035
|
+
verbose: Show detailed report (default: True)
|
|
4036
|
+
"""
|
|
4037
|
+
analyzer = FunnelAnalyzer()
|
|
4038
|
+
formatter = FunnelReportFormatter()
|
|
4039
|
+
|
|
4040
|
+
# Run analysis
|
|
4041
|
+
result = analyzer.analyze_funnel()
|
|
4042
|
+
|
|
4043
|
+
# Format and display report
|
|
4044
|
+
report = formatter.format_report(result)
|
|
4045
|
+
print(report)
|
|
4046
|
+
|
|
4047
|
+
# Return success (funnel is informational, not pass/fail)
|
|
4048
|
+
return 0
|
|
4049
|
+
|
|
4050
|
+
|
|
4051
|
+
def run_smart_reconciliation(verbose: bool = True):
|
|
4052
|
+
"""
|
|
4053
|
+
Run smart implementation reconciliation (producer/consumer aware).
|
|
4054
|
+
|
|
4055
|
+
Args:
|
|
4056
|
+
verbose: Show detailed report (default: True)
|
|
4057
|
+
"""
|
|
4058
|
+
reconciler = SmartImplementationReconciler()
|
|
4059
|
+
formatter = SmartImplementationReportFormatter()
|
|
4060
|
+
|
|
4061
|
+
# Run smart reconciliation
|
|
4062
|
+
requirements = reconciler.reconcile_smart()
|
|
4063
|
+
|
|
4064
|
+
# Format and display report
|
|
4065
|
+
report = formatter.format_report(requirements)
|
|
4066
|
+
print(report)
|
|
4067
|
+
|
|
4068
|
+
# Return non-zero if missing requirements
|
|
4069
|
+
missing_count = sum(1 for r in requirements if r.coverage_percentage < 100)
|
|
4070
|
+
return 1 if missing_count > 0 else 0
|
|
4071
|
+
|
|
4072
|
+
|
|
4073
|
+
def run_smart_funnel(verbose: bool = True):
|
|
4074
|
+
"""
|
|
4075
|
+
Run smart funnel analysis (producer/consumer aware).
|
|
4076
|
+
|
|
4077
|
+
Args:
|
|
4078
|
+
verbose: Show detailed report (default: True)
|
|
4079
|
+
"""
|
|
4080
|
+
analyzer = SmartFunnelAnalyzer()
|
|
4081
|
+
formatter = SmartFunnelReportFormatter()
|
|
4082
|
+
|
|
4083
|
+
# Run smart funnel analysis
|
|
4084
|
+
result = analyzer.analyze_smart_funnel()
|
|
4085
|
+
|
|
4086
|
+
# Format and display report
|
|
4087
|
+
report = formatter.format_report(result)
|
|
4088
|
+
print(report)
|
|
4089
|
+
|
|
4090
|
+
# Return success (funnel is informational)
|
|
4091
|
+
return 0
|
|
4092
|
+
|
|
4093
|
+
|
|
4094
|
+
if __name__ == "__main__":
|
|
4095
|
+
import sys
|
|
4096
|
+
|
|
4097
|
+
report_only = '--report' in sys.argv or '-r' in sys.argv
|
|
4098
|
+
wmbt_mode = '--wmbt' in sys.argv or '--test-traceability' in sys.argv
|
|
4099
|
+
impl_mode = '--impl' in sys.argv or '--implementations' in sys.argv
|
|
4100
|
+
funnel_mode = '--funnel' in sys.argv
|
|
4101
|
+
smart_mode = '--smart' in sys.argv
|
|
4102
|
+
smart_funnel_mode = '--smart-funnel' in sys.argv
|
|
4103
|
+
|
|
4104
|
+
if smart_funnel_mode:
|
|
4105
|
+
# Run smart funnel analysis (producer/consumer aware, with visualization)
|
|
4106
|
+
exit_code = run_smart_funnel(verbose=True)
|
|
4107
|
+
sys.exit(exit_code)
|
|
4108
|
+
elif smart_mode:
|
|
4109
|
+
# Run smart implementation reconciliation (producer/consumer aware)
|
|
4110
|
+
exit_code = run_smart_reconciliation(verbose=True)
|
|
4111
|
+
sys.exit(exit_code)
|
|
4112
|
+
elif funnel_mode:
|
|
4113
|
+
# Run traceability funnel analysis
|
|
4114
|
+
exit_code = run_funnel_analysis(verbose=True)
|
|
4115
|
+
sys.exit(exit_code)
|
|
4116
|
+
elif impl_mode:
|
|
4117
|
+
# Run contract implementation reconciliation
|
|
4118
|
+
exit_code = run_implementation_reconciliation(verbose=True)
|
|
4119
|
+
sys.exit(exit_code)
|
|
4120
|
+
elif wmbt_mode:
|
|
4121
|
+
# Run WMBT test traceability validation
|
|
4122
|
+
exit_code = run_wmbt_traceability_validation(verbose=True)
|
|
4123
|
+
sys.exit(exit_code)
|
|
4124
|
+
else:
|
|
4125
|
+
# Run contract/telemetry reconciliation
|
|
4126
|
+
run_reconciliation(report_only=report_only)
|
|
4127
|
+
|
|
4128
|
+
|
|
4129
|
+
# ============================================================================
|
|
4130
|
+
# TRAIN URN VALIDATION
|
|
4131
|
+
# ============================================================================
|
|
4132
|
+
|
|
4133
|
+
def validate_train_urns(verbose: bool = False) -> Dict[str, any]:
|
|
4134
|
+
"""
|
|
4135
|
+
Validate train URNs in theme orchestrators.
|
|
4136
|
+
|
|
4137
|
+
Checks:
|
|
4138
|
+
- Theme orchestrators in python/shared/ have train URNs
|
|
4139
|
+
- URN format: train:{theme}:{train_id}
|
|
4140
|
+
- Referenced train specs exist in plan/_trains/
|
|
4141
|
+
|
|
4142
|
+
Returns:
|
|
4143
|
+
Dict with validation results
|
|
4144
|
+
"""
|
|
4145
|
+
import re
|
|
4146
|
+
|
|
4147
|
+
shared_dir = REPO_ROOT / "python" / "shared"
|
|
4148
|
+
trains_dir = REPO_ROOT / "plan" / "_trains"
|
|
4149
|
+
|
|
4150
|
+
results = {
|
|
4151
|
+
'orchestrators_found': [],
|
|
4152
|
+
'missing_urns': [],
|
|
4153
|
+
'invalid_format': [],
|
|
4154
|
+
'missing_specs': [],
|
|
4155
|
+
'valid_urns': []
|
|
4156
|
+
}
|
|
4157
|
+
|
|
4158
|
+
if not shared_dir.exists():
|
|
4159
|
+
return results
|
|
4160
|
+
|
|
4161
|
+
# Find all train specs
|
|
4162
|
+
train_specs = set()
|
|
4163
|
+
if trains_dir.exists():
|
|
4164
|
+
for yaml_file in trains_dir.glob("*.yaml"):
|
|
4165
|
+
train_specs.add(yaml_file.stem)
|
|
4166
|
+
|
|
4167
|
+
# Check each theme orchestrator
|
|
4168
|
+
for py_file in shared_dir.glob("*.py"):
|
|
4169
|
+
if py_file.name in ["__init__.py", "conftest.py"]:
|
|
4170
|
+
continue
|
|
4171
|
+
|
|
4172
|
+
results['orchestrators_found'].append(py_file.name)
|
|
4173
|
+
|
|
4174
|
+
# Extract train URNs from file
|
|
4175
|
+
urns = []
|
|
4176
|
+
with open(py_file, 'r', encoding='utf-8') as f:
|
|
4177
|
+
for line in f:
|
|
4178
|
+
stripped = line.strip()
|
|
4179
|
+
if not stripped or not stripped.startswith('#'):
|
|
4180
|
+
if urns: # Stop after header section
|
|
4181
|
+
break
|
|
4182
|
+
continue
|
|
4183
|
+
|
|
4184
|
+
match = re.match(r'#\s*urn:\s*train:([^:]+):(.+)', stripped)
|
|
4185
|
+
if match:
|
|
4186
|
+
theme = match.group(1)
|
|
4187
|
+
train_id = match.group(2).strip()
|
|
4188
|
+
urns.append((theme, train_id, f"train:{theme}:{train_id}"))
|
|
4189
|
+
|
|
4190
|
+
if not urns:
|
|
4191
|
+
results['missing_urns'].append({
|
|
4192
|
+
'file': py_file.name,
|
|
4193
|
+
'message': f"No train URNs found in {py_file.name}"
|
|
4194
|
+
})
|
|
4195
|
+
continue
|
|
4196
|
+
|
|
4197
|
+
# Validate each URN
|
|
4198
|
+
theme_from_filename = py_file.stem
|
|
4199
|
+
for theme, train_id, full_urn in urns:
|
|
4200
|
+
# Check theme matches filename
|
|
4201
|
+
if theme != theme_from_filename:
|
|
4202
|
+
results['invalid_format'].append({
|
|
4203
|
+
'file': py_file.name,
|
|
4204
|
+
'urn': full_urn,
|
|
4205
|
+
'issue': f"Theme '{theme}' doesn't match filename '{theme_from_filename}.py'"
|
|
4206
|
+
})
|
|
4207
|
+
|
|
4208
|
+
# Check train_id format
|
|
4209
|
+
if not re.match(r'^\d{4}-[a-z][a-z0-9-]*$', train_id):
|
|
4210
|
+
results['invalid_format'].append({
|
|
4211
|
+
'file': py_file.name,
|
|
4212
|
+
'urn': full_urn,
|
|
4213
|
+
'issue': f"Train ID '{train_id}' doesn't match pattern DDDD-kebab-case"
|
|
4214
|
+
})
|
|
4215
|
+
|
|
4216
|
+
# Check train spec exists
|
|
4217
|
+
if train_id not in train_specs:
|
|
4218
|
+
results['missing_specs'].append({
|
|
4219
|
+
'file': py_file.name,
|
|
4220
|
+
'urn': full_urn,
|
|
4221
|
+
'train_id': train_id,
|
|
4222
|
+
'expected_path': f"plan/_trains/{train_id}.yaml"
|
|
4223
|
+
})
|
|
4224
|
+
else:
|
|
4225
|
+
results['valid_urns'].append({
|
|
4226
|
+
'file': py_file.name,
|
|
4227
|
+
'urn': full_urn,
|
|
4228
|
+
'spec': f"plan/_trains/{train_id}.yaml"
|
|
4229
|
+
})
|
|
4230
|
+
|
|
4231
|
+
# Print summary if verbose
|
|
4232
|
+
if verbose:
|
|
4233
|
+
print("\n" + "=" * 80)
|
|
4234
|
+
print("TRAIN URN VALIDATION")
|
|
4235
|
+
print("=" * 80)
|
|
4236
|
+
print(f"\nOrchestrators found: {len(results['orchestrators_found'])}")
|
|
4237
|
+
print(f"Valid URNs: {len(results['valid_urns'])}")
|
|
4238
|
+
print(f"Missing URNs: {len(results['missing_urns'])}")
|
|
4239
|
+
print(f"Invalid format: {len(results['invalid_format'])}")
|
|
4240
|
+
print(f"Missing specs: {len(results['missing_specs'])}")
|
|
4241
|
+
|
|
4242
|
+
if results['valid_urns']:
|
|
4243
|
+
print("\n✅ Valid train URNs:")
|
|
4244
|
+
for item in results['valid_urns']:
|
|
4245
|
+
print(f" {item['file']}: {item['urn']}")
|
|
4246
|
+
|
|
4247
|
+
if results['missing_urns']:
|
|
4248
|
+
print("\n❌ Missing train URNs:")
|
|
4249
|
+
for item in results['missing_urns']:
|
|
4250
|
+
print(f" {item['file']}: {item['message']}")
|
|
4251
|
+
|
|
4252
|
+
if results['invalid_format']:
|
|
4253
|
+
print("\n❌ Invalid format:")
|
|
4254
|
+
for item in results['invalid_format']:
|
|
4255
|
+
print(f" {item['file']}: {item['urn']}")
|
|
4256
|
+
print(f" Issue: {item['issue']}")
|
|
4257
|
+
|
|
4258
|
+
if results['missing_specs']:
|
|
4259
|
+
print("\n❌ Missing train specs:")
|
|
4260
|
+
for item in results['missing_specs']:
|
|
4261
|
+
print(f" {item['file']}: {item['urn']}")
|
|
4262
|
+
print(f" Expected: {item['expected_path']}")
|
|
4263
|
+
|
|
4264
|
+
return results
|