atdd 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/__init__.py +6 -0
- atdd/__main__.py +4 -0
- atdd/cli.py +404 -0
- atdd/coach/__init__.py +0 -0
- atdd/coach/commands/__init__.py +0 -0
- atdd/coach/commands/add_persistence_metadata.py +215 -0
- atdd/coach/commands/analyze_migrations.py +188 -0
- atdd/coach/commands/consumers.py +720 -0
- atdd/coach/commands/infer_governance_status.py +149 -0
- atdd/coach/commands/initializer.py +177 -0
- atdd/coach/commands/interface.py +1078 -0
- atdd/coach/commands/inventory.py +565 -0
- atdd/coach/commands/migration.py +240 -0
- atdd/coach/commands/registry.py +1560 -0
- atdd/coach/commands/session.py +430 -0
- atdd/coach/commands/sync.py +405 -0
- atdd/coach/commands/test_interface.py +399 -0
- atdd/coach/commands/test_runner.py +141 -0
- atdd/coach/commands/tests/__init__.py +1 -0
- atdd/coach/commands/tests/test_telemetry_array_validation.py +235 -0
- atdd/coach/commands/traceability.py +4264 -0
- atdd/coach/conventions/session.convention.yaml +754 -0
- atdd/coach/overlays/__init__.py +2 -0
- atdd/coach/overlays/claude.md +2 -0
- atdd/coach/schemas/config.schema.json +34 -0
- atdd/coach/schemas/manifest.schema.json +101 -0
- atdd/coach/templates/ATDD.md +282 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +327 -0
- atdd/coach/utils/__init__.py +0 -0
- atdd/coach/utils/graph/__init__.py +0 -0
- atdd/coach/utils/graph/urn.py +875 -0
- atdd/coach/validators/__init__.py +0 -0
- atdd/coach/validators/shared_fixtures.py +365 -0
- atdd/coach/validators/test_enrich_wagon_registry.py +167 -0
- atdd/coach/validators/test_registry.py +575 -0
- atdd/coach/validators/test_session_validation.py +1183 -0
- atdd/coach/validators/test_traceability.py +448 -0
- atdd/coach/validators/test_update_feature_paths.py +108 -0
- atdd/coach/validators/test_validate_contract_consumers.py +297 -0
- atdd/coder/__init__.py +1 -0
- atdd/coder/conventions/adapter.recipe.yaml +88 -0
- atdd/coder/conventions/backend.convention.yaml +460 -0
- atdd/coder/conventions/boundaries.convention.yaml +666 -0
- atdd/coder/conventions/commons.convention.yaml +460 -0
- atdd/coder/conventions/complexity.recipe.yaml +109 -0
- atdd/coder/conventions/component-naming.convention.yaml +178 -0
- atdd/coder/conventions/design.convention.yaml +327 -0
- atdd/coder/conventions/design.recipe.yaml +273 -0
- atdd/coder/conventions/dto.convention.yaml +660 -0
- atdd/coder/conventions/frontend.convention.yaml +542 -0
- atdd/coder/conventions/green.convention.yaml +1012 -0
- atdd/coder/conventions/presentation.convention.yaml +587 -0
- atdd/coder/conventions/refactor.convention.yaml +535 -0
- atdd/coder/conventions/technology.convention.yaml +206 -0
- atdd/coder/conventions/tests/__init__.py +0 -0
- atdd/coder/conventions/tests/test_adapter_recipe.py +302 -0
- atdd/coder/conventions/tests/test_complexity_recipe.py +289 -0
- atdd/coder/conventions/tests/test_component_taxonomy.py +278 -0
- atdd/coder/conventions/tests/test_component_urn_naming.py +165 -0
- atdd/coder/conventions/tests/test_thinness_recipe.py +286 -0
- atdd/coder/conventions/thinness.recipe.yaml +82 -0
- atdd/coder/conventions/train.convention.yaml +325 -0
- atdd/coder/conventions/verification.protocol.yaml +53 -0
- atdd/coder/schemas/design_system.schema.json +361 -0
- atdd/coder/validators/__init__.py +0 -0
- atdd/coder/validators/test_commons_structure.py +485 -0
- atdd/coder/validators/test_complexity.py +416 -0
- atdd/coder/validators/test_cross_language_consistency.py +431 -0
- atdd/coder/validators/test_design_system_compliance.py +413 -0
- atdd/coder/validators/test_dto_testing_patterns.py +268 -0
- atdd/coder/validators/test_green_cross_stack_layers.py +168 -0
- atdd/coder/validators/test_green_layer_dependencies.py +148 -0
- atdd/coder/validators/test_green_python_layer_structure.py +103 -0
- atdd/coder/validators/test_green_supabase_layer_structure.py +103 -0
- atdd/coder/validators/test_import_boundaries.py +396 -0
- atdd/coder/validators/test_init_file_urns.py +593 -0
- atdd/coder/validators/test_preact_layer_boundaries.py +221 -0
- atdd/coder/validators/test_presentation_convention.py +260 -0
- atdd/coder/validators/test_python_architecture.py +674 -0
- atdd/coder/validators/test_quality_metrics.py +420 -0
- atdd/coder/validators/test_station_master_pattern.py +244 -0
- atdd/coder/validators/test_train_infrastructure.py +454 -0
- atdd/coder/validators/test_train_urns.py +293 -0
- atdd/coder/validators/test_typescript_architecture.py +616 -0
- atdd/coder/validators/test_usecase_structure.py +421 -0
- atdd/coder/validators/test_wagon_boundaries.py +586 -0
- atdd/conftest.py +126 -0
- atdd/planner/__init__.py +1 -0
- atdd/planner/conventions/acceptance.convention.yaml +538 -0
- atdd/planner/conventions/appendix.convention.yaml +187 -0
- atdd/planner/conventions/artifact-naming.convention.yaml +852 -0
- atdd/planner/conventions/component.convention.yaml +670 -0
- atdd/planner/conventions/criteria.convention.yaml +141 -0
- atdd/planner/conventions/feature.convention.yaml +371 -0
- atdd/planner/conventions/interface.convention.yaml +382 -0
- atdd/planner/conventions/steps.convention.yaml +141 -0
- atdd/planner/conventions/train.convention.yaml +552 -0
- atdd/planner/conventions/wagon.convention.yaml +275 -0
- atdd/planner/conventions/wmbt.convention.yaml +258 -0
- atdd/planner/schemas/acceptance.schema.json +336 -0
- atdd/planner/schemas/appendix.schema.json +78 -0
- atdd/planner/schemas/component.schema.json +114 -0
- atdd/planner/schemas/feature.schema.json +197 -0
- atdd/planner/schemas/train.schema.json +192 -0
- atdd/planner/schemas/wagon.schema.json +281 -0
- atdd/planner/schemas/wmbt.schema.json +59 -0
- atdd/planner/validators/__init__.py +0 -0
- atdd/planner/validators/conftest.py +5 -0
- atdd/planner/validators/test_draft_wagon_registry.py +374 -0
- atdd/planner/validators/test_plan_cross_refs.py +240 -0
- atdd/planner/validators/test_plan_uniqueness.py +224 -0
- atdd/planner/validators/test_plan_urn_resolution.py +268 -0
- atdd/planner/validators/test_plan_wagons.py +174 -0
- atdd/planner/validators/test_train_validation.py +514 -0
- atdd/planner/validators/test_wagon_urn_chain.py +648 -0
- atdd/planner/validators/test_wmbt_consistency.py +327 -0
- atdd/planner/validators/test_wmbt_vocabulary.py +632 -0
- atdd/tester/__init__.py +1 -0
- atdd/tester/conventions/artifact.convention.yaml +257 -0
- atdd/tester/conventions/contract.convention.yaml +1009 -0
- atdd/tester/conventions/filename.convention.yaml +555 -0
- atdd/tester/conventions/migration.convention.yaml +509 -0
- atdd/tester/conventions/red.convention.yaml +797 -0
- atdd/tester/conventions/routing.convention.yaml +51 -0
- atdd/tester/conventions/telemetry.convention.yaml +458 -0
- atdd/tester/schemas/a11y.tmpl.json +17 -0
- atdd/tester/schemas/artifact.schema.json +189 -0
- atdd/tester/schemas/contract.schema.json +591 -0
- atdd/tester/schemas/contract.tmpl.json +95 -0
- atdd/tester/schemas/db.tmpl.json +20 -0
- atdd/tester/schemas/e2e.tmpl.json +17 -0
- atdd/tester/schemas/edge_function.tmpl.json +17 -0
- atdd/tester/schemas/event.tmpl.json +17 -0
- atdd/tester/schemas/http.tmpl.json +19 -0
- atdd/tester/schemas/job.tmpl.json +18 -0
- atdd/tester/schemas/load.tmpl.json +21 -0
- atdd/tester/schemas/metric.tmpl.json +19 -0
- atdd/tester/schemas/pack.schema.json +139 -0
- atdd/tester/schemas/realtime.tmpl.json +20 -0
- atdd/tester/schemas/rls.tmpl.json +18 -0
- atdd/tester/schemas/script.tmpl.json +16 -0
- atdd/tester/schemas/sec.tmpl.json +18 -0
- atdd/tester/schemas/storage.tmpl.json +18 -0
- atdd/tester/schemas/telemetry.schema.json +128 -0
- atdd/tester/schemas/telemetry_tracking_manifest.schema.json +143 -0
- atdd/tester/schemas/test_filename.schema.json +194 -0
- atdd/tester/schemas/test_intent.schema.json +179 -0
- atdd/tester/schemas/unit.tmpl.json +18 -0
- atdd/tester/schemas/visual.tmpl.json +18 -0
- atdd/tester/schemas/ws.tmpl.json +17 -0
- atdd/tester/utils/__init__.py +0 -0
- atdd/tester/utils/filename.py +300 -0
- atdd/tester/validators/__init__.py +0 -0
- atdd/tester/validators/cleanup_duplicate_headers.py +116 -0
- atdd/tester/validators/cleanup_duplicate_headers_v2.py +135 -0
- atdd/tester/validators/conftest.py +5 -0
- atdd/tester/validators/coverage_gap_report.py +321 -0
- atdd/tester/validators/fix_dual_ac_references.py +179 -0
- atdd/tester/validators/remove_duplicate_lines.py +93 -0
- atdd/tester/validators/test_acceptance_urn_filename_mapping.py +359 -0
- atdd/tester/validators/test_acceptance_urn_separator.py +166 -0
- atdd/tester/validators/test_artifact_naming_category.py +307 -0
- atdd/tester/validators/test_contract_schema_compliance.py +706 -0
- atdd/tester/validators/test_contracts_structure.py +200 -0
- atdd/tester/validators/test_coverage_adequacy.py +797 -0
- atdd/tester/validators/test_dual_ac_reference.py +225 -0
- atdd/tester/validators/test_fixture_validity.py +372 -0
- atdd/tester/validators/test_isolation.py +487 -0
- atdd/tester/validators/test_migration_coverage.py +204 -0
- atdd/tester/validators/test_migration_criteria.py +276 -0
- atdd/tester/validators/test_migration_generation.py +116 -0
- atdd/tester/validators/test_python_test_naming.py +410 -0
- atdd/tester/validators/test_red_layer_validation.py +95 -0
- atdd/tester/validators/test_red_python_layer_structure.py +87 -0
- atdd/tester/validators/test_red_supabase_layer_structure.py +90 -0
- atdd/tester/validators/test_telemetry_structure.py +634 -0
- atdd/tester/validators/test_typescript_test_naming.py +301 -0
- atdd/tester/validators/test_typescript_test_structure.py +84 -0
- atdd-0.2.1.dist-info/METADATA +221 -0
- atdd-0.2.1.dist-info/RECORD +184 -0
- atdd-0.2.1.dist-info/WHEEL +5 -0
- atdd-0.2.1.dist-info/entry_points.txt +2 -0
- atdd-0.2.1.dist-info/licenses/LICENSE +674 -0
- atdd-0.2.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test filename generation from acceptance URNs.
|
|
3
|
+
|
|
4
|
+
Provides utilities to generate test filenames following language-specific
|
|
5
|
+
conventions from acceptance URN format.
|
|
6
|
+
|
|
7
|
+
URN Format: acc:{wagon}:{WMBT}-{HARNESS}-{NNN}[-{slug}]
|
|
8
|
+
Example: acc:maintain-ux:C004-E2E-019-user-connection
|
|
9
|
+
|
|
10
|
+
Spec: SPEC-TESTER-CONV-0068 through SPEC-TESTER-CONV-0076
|
|
11
|
+
URN: utils:tester:filename
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import re
|
|
15
|
+
from typing import Dict, Optional
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# URN pattern for acceptance criteria
|
|
19
|
+
# Format: acc:{wagon}:{WMBT}-{HARNESS}-{NNN}[-{slug}]
|
|
20
|
+
# - wagon: lowercase with hyphens (a-z0-9-)
|
|
21
|
+
# - WMBT: Step code + 3-digit sequence (e.g., C004, E019)
|
|
22
|
+
# - HARNESS: Uppercase code (UNIT, HTTP, E2E, etc.)
|
|
23
|
+
# - NNN: 3-digit zero-padded sequence (001-999)
|
|
24
|
+
# - slug: Optional kebab-case descriptor
|
|
25
|
+
URN_PATTERN = r'^acc:([a-z][a-z0-9-]*):([DLPCEMYRK][0-9]{3})-([A-Z0-9]+)-([0-9]{3})(?:-([a-z0-9-]+))?$'
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def parse_acceptance_urn(urn: str) -> Dict[str, Optional[str]]:
|
|
29
|
+
"""
|
|
30
|
+
Parse acceptance URN into components.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
urn: Acceptance URN in format acc:{wagon}:{WMBT}-{HARNESS}-{NNN}[-{slug}]
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Dictionary with keys: wagon, WMBT, HARNESS, NNN, slug
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
ValueError: If URN format is invalid
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
>>> parse_acceptance_urn("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
43
|
+
{
|
|
44
|
+
'wagon': 'maintain-ux',
|
|
45
|
+
'WMBT': 'C004',
|
|
46
|
+
'HARNESS': 'E2E',
|
|
47
|
+
'NNN': '019',
|
|
48
|
+
'slug': 'user-connection'
|
|
49
|
+
}
|
|
50
|
+
"""
|
|
51
|
+
match = re.match(URN_PATTERN, urn)
|
|
52
|
+
if not match:
|
|
53
|
+
raise ValueError(f"Invalid acceptance URN: {urn}")
|
|
54
|
+
|
|
55
|
+
wagon, WMBT, HARNESS, NNN, slug = match.groups()
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
'wagon': wagon,
|
|
59
|
+
'WMBT': WMBT,
|
|
60
|
+
'HARNESS': HARNESS,
|
|
61
|
+
'NNN': NNN,
|
|
62
|
+
'slug': slug # None if not present
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def kebab_to_snake(slug: Optional[str]) -> str:
|
|
67
|
+
"""
|
|
68
|
+
Convert kebab-case to snake_case.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
slug: Kebab-case string (e.g., "user-connection")
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Snake_case string (e.g., "user_connection")
|
|
75
|
+
|
|
76
|
+
Example:
|
|
77
|
+
>>> kebab_to_snake("user-connection")
|
|
78
|
+
'user_connection'
|
|
79
|
+
"""
|
|
80
|
+
if not slug:
|
|
81
|
+
return ""
|
|
82
|
+
return slug.replace('-', '_')
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def kebab_to_pascal(slug: Optional[str]) -> str:
|
|
86
|
+
"""
|
|
87
|
+
Convert kebab-case to PascalCase.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
slug: Kebab-case string (e.g., "user-connection")
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
PascalCase string (e.g., "UserConnection")
|
|
94
|
+
|
|
95
|
+
Example:
|
|
96
|
+
>>> kebab_to_pascal("user-connection")
|
|
97
|
+
'UserConnection'
|
|
98
|
+
"""
|
|
99
|
+
if not slug:
|
|
100
|
+
return ""
|
|
101
|
+
return ''.join(part.capitalize() for part in slug.split('-'))
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def dart_filename(urn: str) -> str:
|
|
105
|
+
"""
|
|
106
|
+
Generate Dart test filename from acceptance URN.
|
|
107
|
+
|
|
108
|
+
Pattern: {WMBT}_{HARNESS}_{NNN}[_{slug_snake}]_test.dart
|
|
109
|
+
- WMBT and HARNESS remain uppercase
|
|
110
|
+
- Underscore separators
|
|
111
|
+
- Slug converted to snake_case
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
urn: Acceptance URN
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Dart test filename
|
|
118
|
+
|
|
119
|
+
Example:
|
|
120
|
+
>>> dart_filename("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
121
|
+
'C004_E2E_019_user_connection_test.dart'
|
|
122
|
+
"""
|
|
123
|
+
parts = parse_acceptance_urn(urn)
|
|
124
|
+
slug_part = f"_{kebab_to_snake(parts['slug'])}" if parts['slug'] else ""
|
|
125
|
+
return f"{parts['WMBT']}_{parts['HARNESS']}_{parts['NNN']}{slug_part}_test.dart"
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def typescript_filename(urn: str) -> str:
|
|
129
|
+
"""
|
|
130
|
+
Generate TypeScript test filename from acceptance URN.
|
|
131
|
+
|
|
132
|
+
Pattern: {wmbt_lower}-{harness_lower}-{nnn}[-{slug-kebab}].test.ts
|
|
133
|
+
- All components lowercase
|
|
134
|
+
- Hyphen separators
|
|
135
|
+
- Slug preserved in kebab-case
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
urn: Acceptance URN
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
TypeScript test filename
|
|
142
|
+
|
|
143
|
+
Example:
|
|
144
|
+
>>> typescript_filename("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
145
|
+
'c004-e2e-019-user-connection.test.ts'
|
|
146
|
+
"""
|
|
147
|
+
parts = parse_acceptance_urn(urn)
|
|
148
|
+
slug_part = f"-{parts['slug']}" if parts['slug'] else ""
|
|
149
|
+
return f"{parts['WMBT'].lower()}-{parts['HARNESS'].lower()}-{parts['NNN']}{slug_part}.test.ts"
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def typescript_preact_filename(urn: str, tsx: Optional[bool] = None) -> str:
|
|
153
|
+
"""
|
|
154
|
+
Generate Preact TypeScript test filename from acceptance URN.
|
|
155
|
+
|
|
156
|
+
Pattern: {WMBT}_{HARNESS}_{NNN}[_{slug_snake}].test.ts[x]
|
|
157
|
+
- WMBT and HARNESS remain uppercase
|
|
158
|
+
- Underscore separators
|
|
159
|
+
- Slug converted to snake_case
|
|
160
|
+
- .test.tsx reserved for widget/component tests
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
urn: Acceptance URN
|
|
164
|
+
tsx: Force .test.tsx if True, .test.ts if False. Defaults to None
|
|
165
|
+
which uses HARNESS == WIDGET to decide.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Preact TypeScript test filename
|
|
169
|
+
"""
|
|
170
|
+
parts = parse_acceptance_urn(urn)
|
|
171
|
+
slug_part = f"_{kebab_to_snake(parts['slug'])}" if parts['slug'] else ""
|
|
172
|
+
use_tsx = tsx if tsx is not None else parts['HARNESS'] == "WIDGET"
|
|
173
|
+
suffix = ".test.tsx" if use_tsx else ".test.ts"
|
|
174
|
+
return f"{parts['WMBT']}_{parts['HARNESS']}_{parts['NNN']}{slug_part}{suffix}"
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def python_filename(urn: str) -> str:
|
|
178
|
+
"""
|
|
179
|
+
Generate Python test filename from acceptance URN.
|
|
180
|
+
|
|
181
|
+
Pattern: test_{wmbt_lower}_{harness_lower}_{nnn}[_{slug_snake}].py
|
|
182
|
+
- Prefix "test_" required by pytest
|
|
183
|
+
- All components lowercase
|
|
184
|
+
- Underscore separators
|
|
185
|
+
- Slug converted to snake_case
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
urn: Acceptance URN
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Python test filename
|
|
192
|
+
|
|
193
|
+
Example:
|
|
194
|
+
>>> python_filename("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
195
|
+
'test_c004_e2e_019_user_connection.py'
|
|
196
|
+
"""
|
|
197
|
+
parts = parse_acceptance_urn(urn)
|
|
198
|
+
slug_part = f"_{kebab_to_snake(parts['slug'])}" if parts['slug'] else ""
|
|
199
|
+
return f"test_{parts['WMBT'].lower()}_{parts['HARNESS'].lower()}_{parts['NNN']}{slug_part}.py"
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def go_filename(urn: str) -> str:
|
|
203
|
+
"""
|
|
204
|
+
Generate Go test filename from acceptance URN.
|
|
205
|
+
|
|
206
|
+
Pattern: {wmbt_lower}_{harness_lower}_{nnn}[_{slug_snake}]_test.go
|
|
207
|
+
- All components lowercase
|
|
208
|
+
- Underscore separators
|
|
209
|
+
- Slug converted to snake_case
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
urn: Acceptance URN
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Go test filename
|
|
216
|
+
|
|
217
|
+
Example:
|
|
218
|
+
>>> go_filename("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
219
|
+
'c004_e2e_019_user_connection_test.go'
|
|
220
|
+
"""
|
|
221
|
+
parts = parse_acceptance_urn(urn)
|
|
222
|
+
slug_part = f"_{kebab_to_snake(parts['slug'])}" if parts['slug'] else ""
|
|
223
|
+
return f"{parts['WMBT'].lower()}_{parts['HARNESS'].lower()}_{parts['NNN']}{slug_part}_test.go"
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def java_classname(urn: str) -> str:
|
|
227
|
+
"""
|
|
228
|
+
Generate Java/Kotlin test classname from acceptance URN.
|
|
229
|
+
|
|
230
|
+
Pattern: {WMBT}{HARNESS}{NNN}{SlugPascal}Test
|
|
231
|
+
- WMBT and HARNESS uppercase
|
|
232
|
+
- Slug converted to PascalCase
|
|
233
|
+
- No separators
|
|
234
|
+
- Suffix "Test"
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
urn: Acceptance URN
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Java/Kotlin test classname
|
|
241
|
+
|
|
242
|
+
Example:
|
|
243
|
+
>>> java_classname("acc:maintain-ux:C004-E2E-019-user-connection")
|
|
244
|
+
'C004E2E019UserConnectionTest'
|
|
245
|
+
"""
|
|
246
|
+
parts = parse_acceptance_urn(urn)
|
|
247
|
+
slug_part = kebab_to_pascal(parts['slug']) if parts['slug'] else ""
|
|
248
|
+
return f"{parts['WMBT']}{parts['HARNESS']}{parts['NNN']}{slug_part}Test"
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def generate_test_filename(urn: str, language: str) -> str:
|
|
252
|
+
"""
|
|
253
|
+
Generate test filename for specified language from acceptance URN.
|
|
254
|
+
|
|
255
|
+
Unified interface that routes to language-specific generators.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
urn: Acceptance URN
|
|
259
|
+
language: Target language (dart, typescript, typescript_preact, python, go, java, kotlin)
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Test filename for specified language
|
|
263
|
+
|
|
264
|
+
Raises:
|
|
265
|
+
ValueError: If language is not supported
|
|
266
|
+
|
|
267
|
+
Example:
|
|
268
|
+
>>> generate_test_filename("acc:maintain-ux:C004-E2E-019", "python")
|
|
269
|
+
'test_c004_e2e_019.py'
|
|
270
|
+
"""
|
|
271
|
+
generators = {
|
|
272
|
+
'dart': dart_filename,
|
|
273
|
+
'typescript': typescript_filename,
|
|
274
|
+
'typescript_preact': typescript_preact_filename,
|
|
275
|
+
'python': python_filename,
|
|
276
|
+
'go': go_filename,
|
|
277
|
+
'java': lambda urn: java_classname(urn) + ".java",
|
|
278
|
+
'kotlin': lambda urn: java_classname(urn) + ".kt",
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
if language not in generators:
|
|
282
|
+
raise ValueError(f"Unsupported language: {language}")
|
|
283
|
+
|
|
284
|
+
return generators[language](urn)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
# Export all public functions
|
|
288
|
+
__all__ = [
|
|
289
|
+
'URN_PATTERN',
|
|
290
|
+
'parse_acceptance_urn',
|
|
291
|
+
'kebab_to_snake',
|
|
292
|
+
'kebab_to_pascal',
|
|
293
|
+
'dart_filename',
|
|
294
|
+
'typescript_filename',
|
|
295
|
+
'typescript_preact_filename',
|
|
296
|
+
'python_filename',
|
|
297
|
+
'go_filename',
|
|
298
|
+
'java_classname',
|
|
299
|
+
'generate_test_filename',
|
|
300
|
+
]
|
|
File without changes
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Script to clean up duplicate header comments.
|
|
4
|
+
|
|
5
|
+
Some files have duplicate headers after the automated fix - this removes the old headers
|
|
6
|
+
and keeps only the new standardized ones.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
14
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def clean_duplicate_headers(content: str) -> tuple[str, bool]:
|
|
18
|
+
"""
|
|
19
|
+
Remove duplicate header comments, keeping only the first URN header.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
(cleaned_content, was_changed)
|
|
23
|
+
"""
|
|
24
|
+
lines = content.split('\n')
|
|
25
|
+
new_lines = []
|
|
26
|
+
found_first_urn = False
|
|
27
|
+
skip_mode = False
|
|
28
|
+
skip_count = 0
|
|
29
|
+
|
|
30
|
+
for i, line in enumerate(lines):
|
|
31
|
+
# Check if this is a URN header
|
|
32
|
+
if re.match(r'^#\s*URN:', line, re.IGNORECASE):
|
|
33
|
+
if not found_first_urn:
|
|
34
|
+
# This is the first URN header - keep it
|
|
35
|
+
found_first_urn = True
|
|
36
|
+
new_lines.append(line)
|
|
37
|
+
else:
|
|
38
|
+
# This is a duplicate URN header - start skipping
|
|
39
|
+
skip_mode = True
|
|
40
|
+
skip_count = 1 # Skip this line
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
# If in skip mode, skip header-like lines after duplicate URN
|
|
44
|
+
if skip_mode:
|
|
45
|
+
# Check if this looks like a header comment
|
|
46
|
+
if line.startswith('#') and not line.strip().startswith('# urn:'):
|
|
47
|
+
skip_count += 1
|
|
48
|
+
continue
|
|
49
|
+
else:
|
|
50
|
+
# End of duplicate header block
|
|
51
|
+
skip_mode = False
|
|
52
|
+
|
|
53
|
+
new_lines.append(line)
|
|
54
|
+
|
|
55
|
+
cleaned = '\n'.join(new_lines)
|
|
56
|
+
was_changed = skip_count > 0
|
|
57
|
+
|
|
58
|
+
return cleaned, was_changed
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def find_test_files() -> list:
|
|
62
|
+
"""Find all Python test files."""
|
|
63
|
+
if not PYTHON_DIR.exists():
|
|
64
|
+
return []
|
|
65
|
+
|
|
66
|
+
test_files = []
|
|
67
|
+
for py_file in PYTHON_DIR.rglob("test_*.py"):
|
|
68
|
+
if '__pycache__' in str(py_file) or 'conftest' in py_file.name:
|
|
69
|
+
continue
|
|
70
|
+
test_files.append(py_file)
|
|
71
|
+
|
|
72
|
+
return test_files
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def main():
|
|
76
|
+
"""Clean up duplicate headers in all test files."""
|
|
77
|
+
test_files = find_test_files()
|
|
78
|
+
|
|
79
|
+
if not test_files:
|
|
80
|
+
print("No test files found")
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
print(f"Checking {len(test_files)} test files for duplicate headers")
|
|
84
|
+
print("=" * 80)
|
|
85
|
+
|
|
86
|
+
cleaned_count = 0
|
|
87
|
+
|
|
88
|
+
for test_file in sorted(test_files):
|
|
89
|
+
try:
|
|
90
|
+
with open(test_file, 'r', encoding='utf-8') as f:
|
|
91
|
+
original_content = f.read()
|
|
92
|
+
except Exception as e:
|
|
93
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
94
|
+
print(f" ERROR: Could not read: {e}")
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
cleaned_content, was_changed = clean_duplicate_headers(original_content)
|
|
98
|
+
|
|
99
|
+
if was_changed:
|
|
100
|
+
try:
|
|
101
|
+
with open(test_file, 'w', encoding='utf-8') as f:
|
|
102
|
+
f.write(cleaned_content)
|
|
103
|
+
rel_path = test_file.relative_to(REPO_ROOT)
|
|
104
|
+
print(f"✅ {rel_path}")
|
|
105
|
+
print(f" Removed duplicate header comments")
|
|
106
|
+
cleaned_count += 1
|
|
107
|
+
except Exception as e:
|
|
108
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
109
|
+
print(f" ERROR: Could not write: {e}")
|
|
110
|
+
|
|
111
|
+
print("=" * 80)
|
|
112
|
+
print(f"Cleaned {cleaned_count} files")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
if __name__ == "__main__":
|
|
116
|
+
main()
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Script to clean up duplicate header comments - Version 2.
|
|
4
|
+
|
|
5
|
+
This version removes ALL old header-style comments after the first URN,
|
|
6
|
+
keeping only the new standardized header at the top.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
14
|
+
PYTHON_DIR = REPO_ROOT / "python"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def clean_file_content(content: str) -> tuple[str, bool]:
|
|
18
|
+
"""
|
|
19
|
+
Clean up file by removing duplicate/old headers.
|
|
20
|
+
|
|
21
|
+
Strategy:
|
|
22
|
+
1. Find first URN line (this is our canonical header start)
|
|
23
|
+
2. After canonical header block ends, remove any old URN/header lines
|
|
24
|
+
3. Keep the module docstring and code
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
(cleaned_content, was_changed)
|
|
28
|
+
"""
|
|
29
|
+
lines = content.split('\n')
|
|
30
|
+
|
|
31
|
+
# Find first URN line index
|
|
32
|
+
first_urn_idx = None
|
|
33
|
+
for i, line in enumerate(lines):
|
|
34
|
+
if re.match(r'^#\s*URN:', line, re.IGNORECASE):
|
|
35
|
+
first_urn_idx = i
|
|
36
|
+
break
|
|
37
|
+
|
|
38
|
+
if first_urn_idx is None:
|
|
39
|
+
# No URN found, return as-is
|
|
40
|
+
return content, False
|
|
41
|
+
|
|
42
|
+
# Find end of first header block (after first URN)
|
|
43
|
+
header_end_idx = first_urn_idx
|
|
44
|
+
for i in range(first_urn_idx + 1, len(lines)):
|
|
45
|
+
line = lines[i]
|
|
46
|
+
# Continue while we see header comments or empty lines
|
|
47
|
+
if line.startswith('#') or line.strip() == '':
|
|
48
|
+
header_end_idx = i
|
|
49
|
+
else:
|
|
50
|
+
break
|
|
51
|
+
|
|
52
|
+
# Now scan for and remove duplicate header lines after header_end_idx
|
|
53
|
+
clean_lines = lines[:header_end_idx + 1]
|
|
54
|
+
was_changed = False
|
|
55
|
+
|
|
56
|
+
for i in range(header_end_idx + 1, len(lines)):
|
|
57
|
+
line = lines[i]
|
|
58
|
+
|
|
59
|
+
# Skip duplicate URN lines (any case)
|
|
60
|
+
if re.match(r'^#\s*[uU][rR][nN]:', line):
|
|
61
|
+
was_changed = True
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
# Skip duplicate Runtime/Rationale lines after first header
|
|
65
|
+
if re.match(r'^#\s*Runtime:', line):
|
|
66
|
+
was_changed = True
|
|
67
|
+
continue
|
|
68
|
+
|
|
69
|
+
if re.match(r'^#\s*Rationale:', line):
|
|
70
|
+
was_changed = True
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
# Keep everything else
|
|
74
|
+
clean_lines.append(line)
|
|
75
|
+
|
|
76
|
+
cleaned = '\n'.join(clean_lines)
|
|
77
|
+
return cleaned, was_changed
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def find_test_files() -> list:
|
|
81
|
+
"""Find all Python test files."""
|
|
82
|
+
if not PYTHON_DIR.exists():
|
|
83
|
+
return []
|
|
84
|
+
|
|
85
|
+
test_files = []
|
|
86
|
+
for py_file in PYTHON_DIR.rglob("test_*.py"):
|
|
87
|
+
if '__pycache__' in str(py_file) or 'conftest' in py_file.name:
|
|
88
|
+
continue
|
|
89
|
+
test_files.append(py_file)
|
|
90
|
+
|
|
91
|
+
return test_files
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def main():
|
|
95
|
+
"""Clean up duplicate headers in all test files."""
|
|
96
|
+
test_files = find_test_files()
|
|
97
|
+
|
|
98
|
+
if not test_files:
|
|
99
|
+
print("No test files found")
|
|
100
|
+
return
|
|
101
|
+
|
|
102
|
+
print(f"Checking {len(test_files)} test files for duplicate headers")
|
|
103
|
+
print("=" * 80)
|
|
104
|
+
|
|
105
|
+
cleaned_count = 0
|
|
106
|
+
|
|
107
|
+
for test_file in sorted(test_files):
|
|
108
|
+
try:
|
|
109
|
+
with open(test_file, 'r', encoding='utf-8') as f:
|
|
110
|
+
original_content = f.read()
|
|
111
|
+
except Exception as e:
|
|
112
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
113
|
+
print(f" ERROR: Could not read: {e}")
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
cleaned_content, was_changed = clean_file_content(original_content)
|
|
117
|
+
|
|
118
|
+
if was_changed:
|
|
119
|
+
try:
|
|
120
|
+
with open(test_file, 'w', encoding='utf-8') as f:
|
|
121
|
+
f.write(cleaned_content)
|
|
122
|
+
rel_path = test_file.relative_to(REPO_ROOT)
|
|
123
|
+
print(f"✅ {rel_path}")
|
|
124
|
+
print(f" Removed duplicate header comments")
|
|
125
|
+
cleaned_count += 1
|
|
126
|
+
except Exception as e:
|
|
127
|
+
print(f"❌ {test_file.relative_to(REPO_ROOT)}")
|
|
128
|
+
print(f" ERROR: Could not write: {e}")
|
|
129
|
+
|
|
130
|
+
print("=" * 80)
|
|
131
|
+
print(f"Cleaned {cleaned_count} files")
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
if __name__ == "__main__":
|
|
135
|
+
main()
|