atdd 0.2.11__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atdd/cli.py +14 -4
- atdd/coach/commands/inventory.py +2 -4
- atdd/coach/commands/test_runner.py +3 -3
- atdd/coach/templates/ATDD.md +5 -0
- atdd/coach/templates/SESSION-TEMPLATE.md +9 -0
- atdd/coach/utils/__init__.py +5 -0
- atdd/coach/utils/repo.py +97 -0
- atdd/coach/validators/shared_fixtures.py +2 -0
- atdd/tester/conventions/security.convention.yaml +165 -0
- atdd/tester/validators/test_contract_security.py +569 -0
- atdd/tester/validators/test_contracts_structure.py +81 -0
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/METADATA +1 -1
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/RECORD +17 -14
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/WHEEL +0 -0
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/entry_points.txt +0 -0
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {atdd-0.2.11.dist-info → atdd-0.3.1.dist-info}/top_level.txt +0 -0
atdd/cli.py
CHANGED
|
@@ -44,6 +44,7 @@ from atdd.coach.commands.initializer import ProjectInitializer
|
|
|
44
44
|
from atdd.coach.commands.session import SessionManager
|
|
45
45
|
from atdd.coach.commands.sync import AgentConfigSync
|
|
46
46
|
from atdd.coach.commands.gate import ATDDGate
|
|
47
|
+
from atdd.coach.utils.repo import find_repo_root
|
|
47
48
|
from atdd.version_check import print_update_notice
|
|
48
49
|
|
|
49
50
|
|
|
@@ -57,8 +58,8 @@ class ATDDCoach:
|
|
|
57
58
|
- Coder: Implementation phase validation
|
|
58
59
|
"""
|
|
59
60
|
|
|
60
|
-
def __init__(self):
|
|
61
|
-
self.repo_root =
|
|
61
|
+
def __init__(self, repo_root: Path = None):
|
|
62
|
+
self.repo_root = repo_root or find_repo_root()
|
|
62
63
|
self.inventory = RepositoryInventory(self.repo_root)
|
|
63
64
|
self.test_runner = TestRunner(self.repo_root)
|
|
64
65
|
self.registry_updater = RegistryUpdater(self.repo_root)
|
|
@@ -290,6 +291,14 @@ Phase descriptions:
|
|
|
290
291
|
|
|
291
292
|
# ----- Existing flag-based arguments (backwards compatible) -----
|
|
292
293
|
|
|
294
|
+
# Repository root override
|
|
295
|
+
parser.add_argument(
|
|
296
|
+
"--repo",
|
|
297
|
+
type=str,
|
|
298
|
+
metavar="PATH",
|
|
299
|
+
help="Target repository root (default: auto-detect from .atdd/)"
|
|
300
|
+
)
|
|
301
|
+
|
|
293
302
|
# Main command groups
|
|
294
303
|
parser.add_argument(
|
|
295
304
|
"--inventory",
|
|
@@ -394,8 +403,9 @@ Phase descriptions:
|
|
|
394
403
|
|
|
395
404
|
# ----- Handle flag-based commands (backwards compatible) -----
|
|
396
405
|
|
|
397
|
-
# Create coach instance
|
|
398
|
-
|
|
406
|
+
# Create coach instance with optional repo override
|
|
407
|
+
repo_path = Path(args.repo) if args.repo else None
|
|
408
|
+
coach = ATDDCoach(repo_root=repo_path)
|
|
399
409
|
|
|
400
410
|
# Handle commands
|
|
401
411
|
if args.inventory:
|
atdd/coach/commands/inventory.py
CHANGED
|
@@ -26,7 +26,7 @@ class RepositoryInventory:
|
|
|
26
26
|
"""Generate comprehensive repository inventory."""
|
|
27
27
|
|
|
28
28
|
def __init__(self, repo_root: Path = None):
|
|
29
|
-
self.repo_root = repo_root or Path(
|
|
29
|
+
self.repo_root = repo_root or Path.cwd()
|
|
30
30
|
self.inventory = {
|
|
31
31
|
"inventory": {
|
|
32
32
|
"generated_at": datetime.now().isoformat(),
|
|
@@ -288,7 +288,7 @@ class RepositoryInventory:
|
|
|
288
288
|
feature_files = len(python_tests) + len(ts_tests)
|
|
289
289
|
|
|
290
290
|
meta_cases = planner_cases + tester_cases + coder_cases + platform_cases
|
|
291
|
-
feature_cases = python_cases #
|
|
291
|
+
feature_cases = python_cases # TS case counting would require parsing those languages
|
|
292
292
|
|
|
293
293
|
return {
|
|
294
294
|
"total_files": meta_files + feature_files,
|
|
@@ -312,13 +312,11 @@ class RepositoryInventory:
|
|
|
312
312
|
"feature_tests": {
|
|
313
313
|
"files": {
|
|
314
314
|
"python": len(python_tests),
|
|
315
|
-
"dart": len(dart_tests),
|
|
316
315
|
"typescript": len(ts_tests),
|
|
317
316
|
"total": feature_files
|
|
318
317
|
},
|
|
319
318
|
"cases": {
|
|
320
319
|
"python": python_cases,
|
|
321
|
-
"dart": "not_counted",
|
|
322
320
|
"typescript": "not_counted",
|
|
323
321
|
"total": feature_cases
|
|
324
322
|
}
|
|
@@ -15,7 +15,7 @@ class TestRunner:
|
|
|
15
15
|
"""Run ATDD meta-tests with various configurations."""
|
|
16
16
|
|
|
17
17
|
def __init__(self, repo_root: Path = None):
|
|
18
|
-
self.repo_root = repo_root or Path(
|
|
18
|
+
self.repo_root = repo_root or Path.cwd()
|
|
19
19
|
self.atdd_dir = self.repo_root / "atdd"
|
|
20
20
|
|
|
21
21
|
def run_tests(
|
|
@@ -88,11 +88,11 @@ class TestRunner:
|
|
|
88
88
|
# Show collected tests summary
|
|
89
89
|
cmd.append("--tb=short")
|
|
90
90
|
|
|
91
|
-
# Run pytest
|
|
91
|
+
# Run pytest from current directory (consumer repo)
|
|
92
92
|
print(f"🧪 Running: {' '.join(cmd)}")
|
|
93
93
|
print("=" * 60)
|
|
94
94
|
|
|
95
|
-
result = subprocess.run(cmd
|
|
95
|
+
result = subprocess.run(cmd)
|
|
96
96
|
return result.returncode
|
|
97
97
|
|
|
98
98
|
def run_phase(self, phase: str, **kwargs) -> int:
|
atdd/coach/templates/ATDD.md
CHANGED
|
@@ -13,6 +13,11 @@ missions:
|
|
|
13
13
|
#
|
|
14
14
|
# PROTOCOL:
|
|
15
15
|
# ---------
|
|
16
|
+
# 0. TOOL GATE (MANDATORY)
|
|
17
|
+
# If your agent supports plan mode (Claude Code, etc.), enable it NOW.
|
|
18
|
+
# This is a tool capability gate, NOT the ATDD Planner phase.
|
|
19
|
+
# If unavailable, state: "Plan mode unavailable" and proceed.
|
|
20
|
+
#
|
|
16
21
|
# 1. Run this command FIRST:
|
|
17
22
|
#
|
|
18
23
|
# atdd gate
|
|
@@ -1,3 +1,12 @@
|
|
|
1
|
+
<!--
|
|
2
|
+
# =============================================================================
|
|
3
|
+
# TOOL GATE (MANDATORY BEFORE FILLING THIS FILE)
|
|
4
|
+
# =============================================================================
|
|
5
|
+
# If your agent supports plan mode (Claude Code, etc.), enable it NOW.
|
|
6
|
+
# This is a tool capability gate, NOT the ATDD Planner phase.
|
|
7
|
+
# If unavailable, state: "Plan mode unavailable" and proceed.
|
|
8
|
+
# =============================================================================
|
|
9
|
+
-->
|
|
1
10
|
---
|
|
2
11
|
# SESSION METADATA (YAML frontmatter - machine-parseable)
|
|
3
12
|
#
|
atdd/coach/utils/__init__.py
CHANGED
atdd/coach/utils/repo.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Repository root detection utility.
|
|
3
|
+
|
|
4
|
+
Finds the consumer repository root using multiple detection strategies:
|
|
5
|
+
1. .atdd/manifest.yaml (preferred - explicit ATDD project marker)
|
|
6
|
+
2. plan/ AND contracts/ both exist (ATDD project structure)
|
|
7
|
+
3. .git/ directory (fallback - any git repo)
|
|
8
|
+
4. cwd (last resort - allows commands to work on uninitialized repos)
|
|
9
|
+
|
|
10
|
+
This ensures ATDD commands operate on the user's repo, not the package root.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from functools import lru_cache
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@lru_cache(maxsize=1)
|
|
19
|
+
def find_repo_root(start: Optional[Path] = None) -> Path:
|
|
20
|
+
"""
|
|
21
|
+
Find repo root by searching upward for ATDD project markers.
|
|
22
|
+
|
|
23
|
+
Detection order (first match wins):
|
|
24
|
+
1. .atdd/manifest.yaml - explicit ATDD project marker
|
|
25
|
+
2. plan/ AND contracts/ both exist - ATDD project structure
|
|
26
|
+
3. .git/ directory - fallback for any git repository
|
|
27
|
+
4. cwd - last resort if no markers found
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
start: Starting directory (default: cwd)
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Path to repo root (falls back to cwd if no markers found)
|
|
34
|
+
|
|
35
|
+
Note:
|
|
36
|
+
Results are cached for performance. If .atdd/manifest.yaml is not found,
|
|
37
|
+
commands may operate in a degraded mode.
|
|
38
|
+
"""
|
|
39
|
+
current = start or Path.cwd()
|
|
40
|
+
current = current.resolve()
|
|
41
|
+
|
|
42
|
+
while current != current.parent:
|
|
43
|
+
# Strategy 1: .atdd/manifest.yaml (preferred)
|
|
44
|
+
if (current / ".atdd" / "manifest.yaml").is_file():
|
|
45
|
+
return current
|
|
46
|
+
|
|
47
|
+
# Strategy 2: plan/ AND contracts/ both exist
|
|
48
|
+
if (current / "plan").is_dir() and (current / "contracts").is_dir():
|
|
49
|
+
return current
|
|
50
|
+
|
|
51
|
+
# Strategy 3: .git/ directory (fallback)
|
|
52
|
+
if (current / ".git").is_dir():
|
|
53
|
+
return current
|
|
54
|
+
|
|
55
|
+
current = current.parent
|
|
56
|
+
|
|
57
|
+
# Strategy 4: Return starting directory as last resort
|
|
58
|
+
# Commands can handle uninitialized repos appropriately
|
|
59
|
+
return start.resolve() if start else Path.cwd().resolve()
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def require_repo_root(start: Optional[Path] = None) -> Path:
|
|
63
|
+
"""
|
|
64
|
+
Find repo root, raising RuntimeError if no markers found.
|
|
65
|
+
|
|
66
|
+
This is a stricter version of find_repo_root() for commands that
|
|
67
|
+
require a valid ATDD project structure.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
start: Starting directory (default: cwd)
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Path to repo root
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
RuntimeError: If no ATDD project markers (.atdd/manifest.yaml,
|
|
77
|
+
plan/ + contracts/, or .git/) are found
|
|
78
|
+
"""
|
|
79
|
+
current = start or Path.cwd()
|
|
80
|
+
current = current.resolve()
|
|
81
|
+
start_path = current
|
|
82
|
+
|
|
83
|
+
while current != current.parent:
|
|
84
|
+
# Check for any valid marker
|
|
85
|
+
if (current / ".atdd" / "manifest.yaml").is_file():
|
|
86
|
+
return current
|
|
87
|
+
if (current / "plan").is_dir() and (current / "contracts").is_dir():
|
|
88
|
+
return current
|
|
89
|
+
if (current / ".git").is_dir():
|
|
90
|
+
return current
|
|
91
|
+
|
|
92
|
+
current = current.parent
|
|
93
|
+
|
|
94
|
+
raise RuntimeError(
|
|
95
|
+
f"No ATDD project markers found searching from {start_path}. "
|
|
96
|
+
"Expected one of: .atdd/manifest.yaml, plan/ + contracts/, or .git/"
|
|
97
|
+
)
|
|
@@ -118,6 +118,8 @@ def wagon_manifests() -> List[Tuple[Path, Dict[str, Any]]]:
|
|
|
118
118
|
manifests.append((manifest_path, manifest_data))
|
|
119
119
|
|
|
120
120
|
# Also discover individual wagon manifests (pattern: plan/*/_{wagon}.yaml)
|
|
121
|
+
if not PLAN_DIR.exists():
|
|
122
|
+
return manifests
|
|
121
123
|
for wagon_dir in PLAN_DIR.iterdir():
|
|
122
124
|
if wagon_dir.is_dir() and not wagon_dir.name.startswith("_"):
|
|
123
125
|
for manifest_file in wagon_dir.glob("_*.yaml"):
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
version: "1.0"
|
|
2
|
+
name: "Security Convention"
|
|
3
|
+
description: "Security validation rules for contract schemas"
|
|
4
|
+
|
|
5
|
+
# Validator Specifications
|
|
6
|
+
validators:
|
|
7
|
+
- id: "SPEC-TESTER-SEC-0001"
|
|
8
|
+
name: "Secured operations must declare auth headers"
|
|
9
|
+
test: "test_secured_operations_have_required_headers"
|
|
10
|
+
mode: "hard"
|
|
11
|
+
description: |
|
|
12
|
+
Secured operations must include appropriate authentication headers
|
|
13
|
+
based on their declared security scheme. This ensures that all
|
|
14
|
+
protected endpoints have proper header declarations for auth tokens.
|
|
15
|
+
|
|
16
|
+
- id: "SPEC-TESTER-SEC-0002"
|
|
17
|
+
name: "Operations must have explicit security field"
|
|
18
|
+
test: "test_operations_have_explicit_security"
|
|
19
|
+
mode: "soft (xfail)"
|
|
20
|
+
description: |
|
|
21
|
+
All API operations should declare their security requirements explicitly.
|
|
22
|
+
Use security: [] for public endpoints and security: [{...}] for protected.
|
|
23
|
+
This ensures security posture is intentional, not accidental.
|
|
24
|
+
|
|
25
|
+
- id: "SPEC-TESTER-SEC-0003"
|
|
26
|
+
name: "Secured operations need SEC/RLS acceptance coverage"
|
|
27
|
+
test: "test_secured_operations_have_security_acceptance"
|
|
28
|
+
mode: "soft (xfail)"
|
|
29
|
+
description: |
|
|
30
|
+
Secured operations must have at least one acceptance criteria using
|
|
31
|
+
the SEC (Security) or RLS (Row-Level Security) harness. This ensures
|
|
32
|
+
security requirements are tested.
|
|
33
|
+
|
|
34
|
+
- id: "SPEC-TESTER-SEC-0004"
|
|
35
|
+
name: "Error responses should not expose sensitive data"
|
|
36
|
+
test: "test_error_responses_have_no_sensitive_fields"
|
|
37
|
+
mode: "warning only"
|
|
38
|
+
description: |
|
|
39
|
+
Error responses (4xx/5xx) should not contain fields with sensitive
|
|
40
|
+
names like password, secret, credential, ssn, api_key, private_key.
|
|
41
|
+
This prevents accidental exposure of sensitive data in error messages.
|
|
42
|
+
|
|
43
|
+
# Security Scheme to Header Mapping
|
|
44
|
+
scheme_header_mapping:
|
|
45
|
+
jwt:
|
|
46
|
+
headers: ["authorization"]
|
|
47
|
+
format: "Bearer {token}"
|
|
48
|
+
description: "JSON Web Token authentication"
|
|
49
|
+
|
|
50
|
+
bearer:
|
|
51
|
+
headers: ["authorization"]
|
|
52
|
+
format: "Bearer {token}"
|
|
53
|
+
description: "Bearer token authentication"
|
|
54
|
+
|
|
55
|
+
oauth2:
|
|
56
|
+
headers: ["authorization"]
|
|
57
|
+
format: "Bearer {access_token}"
|
|
58
|
+
description: "OAuth 2.0 authentication"
|
|
59
|
+
|
|
60
|
+
http:
|
|
61
|
+
headers: ["authorization"]
|
|
62
|
+
format: "Varies by scheme (basic, bearer, digest)"
|
|
63
|
+
description: "HTTP authentication"
|
|
64
|
+
|
|
65
|
+
apiKey:
|
|
66
|
+
headers: "dynamic"
|
|
67
|
+
source: "security[].name (default: x-api-key)"
|
|
68
|
+
location: "security[].in (header, query, or cookie)"
|
|
69
|
+
description: "API Key authentication"
|
|
70
|
+
|
|
71
|
+
# Enforcement Modes
|
|
72
|
+
enforcement:
|
|
73
|
+
environment_variable: "ATDD_SECURITY_ENFORCE"
|
|
74
|
+
default: "0"
|
|
75
|
+
modes:
|
|
76
|
+
soft:
|
|
77
|
+
value: "0"
|
|
78
|
+
behavior: "pytest.xfail - test marked as expected failure"
|
|
79
|
+
visibility: "Visible in test output as XFAIL"
|
|
80
|
+
use_case: "Development, incremental adoption"
|
|
81
|
+
|
|
82
|
+
hard:
|
|
83
|
+
value: "1"
|
|
84
|
+
behavior: "pytest.fail - test fails the suite"
|
|
85
|
+
visibility: "Visible as FAILED"
|
|
86
|
+
use_case: "CI/CD, production readiness"
|
|
87
|
+
|
|
88
|
+
# Sensitive Data Detection
|
|
89
|
+
sensitive_data_detection:
|
|
90
|
+
description: "Rules for detecting potentially sensitive fields in error responses"
|
|
91
|
+
|
|
92
|
+
sensitive_fields:
|
|
93
|
+
- "password"
|
|
94
|
+
- "secret"
|
|
95
|
+
- "credential"
|
|
96
|
+
- "ssn"
|
|
97
|
+
- "api_key"
|
|
98
|
+
- "private_key"
|
|
99
|
+
- "token"
|
|
100
|
+
|
|
101
|
+
allowed_exceptions:
|
|
102
|
+
- "error_key"
|
|
103
|
+
- "key_id"
|
|
104
|
+
- "token_type"
|
|
105
|
+
|
|
106
|
+
detection_method: "Substring match (case-insensitive)"
|
|
107
|
+
note: "Review flagged fields manually - not all matches are security issues"
|
|
108
|
+
|
|
109
|
+
# Threat Modeling Integration
|
|
110
|
+
threat_modeling:
|
|
111
|
+
canonical_location: "feature.yaml: security.abuse_cases[]"
|
|
112
|
+
description: |
|
|
113
|
+
Threat models and abuse cases should be documented in the feature YAML
|
|
114
|
+
files under the security.abuse_cases array. This ensures security
|
|
115
|
+
considerations are part of the feature specification.
|
|
116
|
+
|
|
117
|
+
schema:
|
|
118
|
+
abuse_case:
|
|
119
|
+
required:
|
|
120
|
+
- id # Unique identifier (e.g., THREAT-001)
|
|
121
|
+
- name # Short name
|
|
122
|
+
- threat # Description of the threat
|
|
123
|
+
- mitigation # How the threat is addressed
|
|
124
|
+
optional:
|
|
125
|
+
- severity # low, medium, high, critical
|
|
126
|
+
- likelihood # unlikely, possible, likely
|
|
127
|
+
- acceptance_ref # URN to acceptance test covering this threat
|
|
128
|
+
|
|
129
|
+
example:
|
|
130
|
+
security:
|
|
131
|
+
abuse_cases:
|
|
132
|
+
- id: "THREAT-001"
|
|
133
|
+
name: "Session Hijacking"
|
|
134
|
+
threat: "Attacker steals session token via XSS"
|
|
135
|
+
mitigation: "HttpOnly cookies, CSP headers"
|
|
136
|
+
severity: "high"
|
|
137
|
+
acceptance_ref: "acc:auth:D001-SEC-001-session-protection"
|
|
138
|
+
|
|
139
|
+
# Acceptance Coverage Requirements
|
|
140
|
+
acceptance_coverage:
|
|
141
|
+
description: "Requirements for security acceptance test coverage"
|
|
142
|
+
|
|
143
|
+
valid_harnesses:
|
|
144
|
+
- "SEC" # Security-focused tests
|
|
145
|
+
- "RLS" # Row-Level Security tests
|
|
146
|
+
|
|
147
|
+
urn_format: "acc:{wagon}:{WMBT}-{HARNESS}-{NNN}[-{slug}]"
|
|
148
|
+
full_format_required: true
|
|
149
|
+
note: |
|
|
150
|
+
Short refs (without harness) cannot be validated for SEC/RLS coverage.
|
|
151
|
+
Always use the full URN format with the harness identifier.
|
|
152
|
+
|
|
153
|
+
examples:
|
|
154
|
+
valid:
|
|
155
|
+
- "acc:auth:D001-SEC-001-token-validation"
|
|
156
|
+
- "acc:player:P002-RLS-001-own-data-only"
|
|
157
|
+
invalid:
|
|
158
|
+
- "SEC-001" # Missing wagon and WMBT
|
|
159
|
+
- "D001-001" # Missing harness identifier
|
|
160
|
+
|
|
161
|
+
# Cross-References
|
|
162
|
+
references:
|
|
163
|
+
test_file: "atdd/tester/validators/test_contract_security.py"
|
|
164
|
+
contract_convention: "atdd/tester/conventions/contract.convention.yaml"
|
|
165
|
+
api_structure: "contract.convention.yaml#api_structure.authentication"
|
|
@@ -0,0 +1,569 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Platform tests: Contract security validation.
|
|
3
|
+
|
|
4
|
+
Validates that contract schemas properly declare security requirements:
|
|
5
|
+
- Secured operations have required auth headers
|
|
6
|
+
- Operations have explicit security field
|
|
7
|
+
- Secured operations have SEC/RLS acceptance coverage
|
|
8
|
+
- Error responses do not expose sensitive data
|
|
9
|
+
|
|
10
|
+
Spec: SPEC-TESTER-SEC-0001 through SPEC-TESTER-SEC-0004
|
|
11
|
+
URN: tester:validators:contract-security
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import re
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Dict, List, Optional, Set
|
|
19
|
+
|
|
20
|
+
import pytest
|
|
21
|
+
|
|
22
|
+
# Import find_repo_root with fallback
|
|
23
|
+
try:
|
|
24
|
+
from atdd.coach.utils.repo import find_repo_root
|
|
25
|
+
except ImportError:
|
|
26
|
+
def find_repo_root() -> Path:
|
|
27
|
+
"""Fallback: search upward for .git directory."""
|
|
28
|
+
current = Path.cwd().resolve()
|
|
29
|
+
while current != current.parent:
|
|
30
|
+
if (current / ".git").is_dir():
|
|
31
|
+
return current
|
|
32
|
+
current = current.parent
|
|
33
|
+
return Path.cwd().resolve()
|
|
34
|
+
|
|
35
|
+
# Import parse_acceptance_urn with fallback
|
|
36
|
+
try:
|
|
37
|
+
from atdd.tester.utils.filename import parse_acceptance_urn
|
|
38
|
+
except ImportError:
|
|
39
|
+
URN_PATTERN = r'^acc:([a-z][a-z0-9-]*):([DLPCEMYRK][0-9]{3})-([A-Z0-9]+)-([0-9]{3})(?:-([a-z0-9-]+))?$'
|
|
40
|
+
|
|
41
|
+
def parse_acceptance_urn(urn: str) -> Dict[str, Optional[str]]:
|
|
42
|
+
"""Fallback URN parser."""
|
|
43
|
+
match = re.match(URN_PATTERN, urn)
|
|
44
|
+
if not match:
|
|
45
|
+
raise ValueError(f"Invalid acceptance URN: {urn}")
|
|
46
|
+
wagon, WMBT, HARNESS, NNN, slug = match.groups()
|
|
47
|
+
return {
|
|
48
|
+
'wagon': wagon,
|
|
49
|
+
'WMBT': WMBT,
|
|
50
|
+
'HARNESS': HARNESS,
|
|
51
|
+
'NNN': NNN,
|
|
52
|
+
'slug': slug
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# Path constants
|
|
57
|
+
REPO_ROOT = find_repo_root()
|
|
58
|
+
CONTRACTS_DIR = REPO_ROOT / "contracts"
|
|
59
|
+
|
|
60
|
+
# Security enforcement mode
|
|
61
|
+
ENFORCE_SECURITY = os.environ.get("ATDD_SECURITY_ENFORCE", "0") == "1"
|
|
62
|
+
|
|
63
|
+
# Scheme to required headers mapping
|
|
64
|
+
SCHEME_HEADERS = {
|
|
65
|
+
"jwt": {"authorization"},
|
|
66
|
+
"bearer": {"authorization"},
|
|
67
|
+
"oauth2": {"authorization"},
|
|
68
|
+
"http": {"authorization"},
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
# Sensitive field names that should not appear in error responses
|
|
72
|
+
SENSITIVE_FIELDS = {"password", "secret", "credential", "ssn", "api_key", "private_key", "token"}
|
|
73
|
+
# Fields that are allowed even if they contain "key" or similar
|
|
74
|
+
ALLOWED_IN_ERRORS = {"error_key", "key_id", "token_type"}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def find_all_contract_schemas() -> List[Path]:
|
|
78
|
+
"""Find all contract schema files."""
|
|
79
|
+
if not CONTRACTS_DIR.exists():
|
|
80
|
+
return []
|
|
81
|
+
return list(CONTRACTS_DIR.glob("**/*.schema.json"))
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def load_contract(path: Path) -> Optional[Dict]:
|
|
85
|
+
"""Load and parse a contract schema file."""
|
|
86
|
+
try:
|
|
87
|
+
with open(path) as f:
|
|
88
|
+
return json.load(f)
|
|
89
|
+
except (json.JSONDecodeError, OSError):
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def get_secured_operations(contract: Dict) -> List[Dict]:
|
|
94
|
+
"""Extract operations that have security defined."""
|
|
95
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
96
|
+
api = metadata.get("api", {})
|
|
97
|
+
operations = api.get("operations", [])
|
|
98
|
+
|
|
99
|
+
secured = []
|
|
100
|
+
for op in operations:
|
|
101
|
+
if not isinstance(op, dict):
|
|
102
|
+
continue
|
|
103
|
+
security = op.get("security", [])
|
|
104
|
+
if security: # Non-empty security array means secured
|
|
105
|
+
secured.append(op)
|
|
106
|
+
return secured
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def is_secured_operation(contract: Dict) -> bool:
|
|
110
|
+
"""Check if contract represents a secured operation."""
|
|
111
|
+
# First check operations-based security (preferred method)
|
|
112
|
+
secured_ops = get_secured_operations(contract)
|
|
113
|
+
if secured_ops:
|
|
114
|
+
return True
|
|
115
|
+
|
|
116
|
+
# Fallback to legacy metadata checks
|
|
117
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
118
|
+
security = metadata.get("security", {})
|
|
119
|
+
|
|
120
|
+
# Check for explicit security declaration
|
|
121
|
+
if security.get("requires_auth") is True:
|
|
122
|
+
return True
|
|
123
|
+
if security.get("authentication"):
|
|
124
|
+
return True
|
|
125
|
+
|
|
126
|
+
# Check for security scheme references
|
|
127
|
+
if "securitySchemes" in contract:
|
|
128
|
+
return True
|
|
129
|
+
|
|
130
|
+
# Check API metadata for security indicators
|
|
131
|
+
api = metadata.get("api", {})
|
|
132
|
+
if api.get("security"):
|
|
133
|
+
return True
|
|
134
|
+
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def get_declared_headers(contract: Dict) -> Set[str]:
|
|
139
|
+
"""Extract declared header parameters from contract."""
|
|
140
|
+
headers = set()
|
|
141
|
+
|
|
142
|
+
# Check parameters at root level
|
|
143
|
+
for param in contract.get("parameters", []):
|
|
144
|
+
if param.get("in") == "header":
|
|
145
|
+
headers.add(param.get("name", "").lower())
|
|
146
|
+
|
|
147
|
+
# Check properties that might be headers
|
|
148
|
+
props = contract.get("properties", {})
|
|
149
|
+
if "headers" in props and isinstance(props["headers"], dict):
|
|
150
|
+
header_props = props["headers"].get("properties", {})
|
|
151
|
+
headers.update(k.lower() for k in header_props.keys())
|
|
152
|
+
|
|
153
|
+
# Check x-artifact-metadata for header declarations
|
|
154
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
155
|
+
api = metadata.get("api", {})
|
|
156
|
+
for header in api.get("headers", []):
|
|
157
|
+
if isinstance(header, str):
|
|
158
|
+
headers.add(header.lower())
|
|
159
|
+
elif isinstance(header, dict):
|
|
160
|
+
headers.add(header.get("name", "").lower())
|
|
161
|
+
|
|
162
|
+
# Check operations for header declarations
|
|
163
|
+
for op in api.get("operations", []):
|
|
164
|
+
if not isinstance(op, dict):
|
|
165
|
+
continue
|
|
166
|
+
for header in op.get("headers", []):
|
|
167
|
+
if isinstance(header, str):
|
|
168
|
+
headers.add(header.lower())
|
|
169
|
+
elif isinstance(header, dict):
|
|
170
|
+
headers.add(header.get("name", "").lower())
|
|
171
|
+
|
|
172
|
+
return headers
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def get_operation_headers(operation: Dict) -> Set[str]:
|
|
176
|
+
"""Extract declared headers from a single operation."""
|
|
177
|
+
headers = set()
|
|
178
|
+
for header in operation.get("headers", []):
|
|
179
|
+
if isinstance(header, str):
|
|
180
|
+
headers.add(header.lower())
|
|
181
|
+
elif isinstance(header, dict):
|
|
182
|
+
headers.add(header.get("name", "").lower())
|
|
183
|
+
return headers
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def get_required_headers_for_security(security_schemes: List[Dict]) -> Set[str]:
|
|
187
|
+
"""Determine required headers based on security schemes."""
|
|
188
|
+
required = set()
|
|
189
|
+
for scheme in security_schemes:
|
|
190
|
+
if not isinstance(scheme, dict):
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
scheme_type = scheme.get("type", "").lower()
|
|
194
|
+
|
|
195
|
+
# Check SCHEME_HEADERS mapping
|
|
196
|
+
if scheme_type in SCHEME_HEADERS:
|
|
197
|
+
required.update(SCHEME_HEADERS[scheme_type])
|
|
198
|
+
elif scheme_type == "apikey":
|
|
199
|
+
# apiKey: use the name field, default to x-api-key
|
|
200
|
+
header_name = scheme.get("name", "x-api-key").lower()
|
|
201
|
+
if scheme.get("in", "header") == "header":
|
|
202
|
+
required.add(header_name)
|
|
203
|
+
else:
|
|
204
|
+
# Unknown scheme, require authorization header as fallback
|
|
205
|
+
required.add("authorization")
|
|
206
|
+
|
|
207
|
+
return required
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def get_acceptance_refs(contract: Dict) -> List[str]:
|
|
211
|
+
"""Extract acceptance references from contract."""
|
|
212
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
213
|
+
traceability = metadata.get("traceability", {})
|
|
214
|
+
return traceability.get("acceptance_refs", [])
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _resolve_schema_ref(contract: Dict, schema_ref: str) -> Optional[Dict]:
|
|
218
|
+
"""Resolve $ref to actual schema definition."""
|
|
219
|
+
if not isinstance(schema_ref, str):
|
|
220
|
+
return None
|
|
221
|
+
if not schema_ref.startswith("#/definitions/"):
|
|
222
|
+
return None
|
|
223
|
+
definition_name = schema_ref.split("/")[-1]
|
|
224
|
+
return contract.get("definitions", {}).get(definition_name)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def _extract_field_names(schema: Dict, contract: Dict, visited: Optional[Set[str]] = None) -> Set[str]:
|
|
228
|
+
"""Recursively extract all field names from a schema."""
|
|
229
|
+
if visited is None:
|
|
230
|
+
visited = set()
|
|
231
|
+
|
|
232
|
+
fields = set()
|
|
233
|
+
|
|
234
|
+
if not isinstance(schema, dict):
|
|
235
|
+
return fields
|
|
236
|
+
|
|
237
|
+
# Handle $ref
|
|
238
|
+
ref = schema.get("$ref")
|
|
239
|
+
if ref:
|
|
240
|
+
if ref in visited:
|
|
241
|
+
return fields
|
|
242
|
+
visited.add(ref)
|
|
243
|
+
resolved = _resolve_schema_ref(contract, ref)
|
|
244
|
+
if resolved:
|
|
245
|
+
fields.update(_extract_field_names(resolved, contract, visited))
|
|
246
|
+
return fields
|
|
247
|
+
|
|
248
|
+
# Extract property names
|
|
249
|
+
properties = schema.get("properties", {})
|
|
250
|
+
if isinstance(properties, dict):
|
|
251
|
+
fields.update(properties.keys())
|
|
252
|
+
for prop_schema in properties.values():
|
|
253
|
+
if isinstance(prop_schema, dict):
|
|
254
|
+
fields.update(_extract_field_names(prop_schema, contract, visited))
|
|
255
|
+
|
|
256
|
+
# Handle items in arrays
|
|
257
|
+
items = schema.get("items")
|
|
258
|
+
if isinstance(items, dict):
|
|
259
|
+
fields.update(_extract_field_names(items, contract, visited))
|
|
260
|
+
|
|
261
|
+
# Handle allOf, anyOf, oneOf
|
|
262
|
+
for combinator in ("allOf", "anyOf", "oneOf"):
|
|
263
|
+
combined = schema.get(combinator, [])
|
|
264
|
+
if isinstance(combined, list):
|
|
265
|
+
for sub_schema in combined:
|
|
266
|
+
if isinstance(sub_schema, dict):
|
|
267
|
+
fields.update(_extract_field_names(sub_schema, contract, visited))
|
|
268
|
+
|
|
269
|
+
return fields
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def soft_fail_or_fail(message: str, issues: List[str]):
|
|
273
|
+
"""Fail test or soft-fail (xfail) based on ATDD_SECURITY_ENFORCE env var."""
|
|
274
|
+
full_message = (
|
|
275
|
+
f"{message}:\n" +
|
|
276
|
+
"\n".join(f" {issue}" for issue in issues[:10]) +
|
|
277
|
+
(f"\n ... and {len(issues) - 10} more" if len(issues) > 10 else "")
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
if ENFORCE_SECURITY:
|
|
281
|
+
pytest.fail(full_message)
|
|
282
|
+
else:
|
|
283
|
+
pytest.xfail(
|
|
284
|
+
f"[SOFT-FAIL] {full_message}\n\n"
|
|
285
|
+
"Set ATDD_SECURITY_ENFORCE=1 to enforce."
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
@pytest.mark.tester
|
|
290
|
+
@pytest.mark.security
|
|
291
|
+
def test_secured_operations_have_required_headers():
|
|
292
|
+
"""
|
|
293
|
+
SPEC-TESTER-SEC-0001: Secured operations must declare auth headers
|
|
294
|
+
|
|
295
|
+
Given: Contract schemas with security requirements
|
|
296
|
+
When: Checking for header declarations
|
|
297
|
+
Then: Secured operations must include appropriate auth header based on scheme
|
|
298
|
+
|
|
299
|
+
Security scheme to header mapping:
|
|
300
|
+
- jwt/bearer/oauth2/http: authorization
|
|
301
|
+
- apiKey: dynamic (from security[].name, default x-api-key)
|
|
302
|
+
"""
|
|
303
|
+
contract_files = find_all_contract_schemas()
|
|
304
|
+
|
|
305
|
+
if not contract_files:
|
|
306
|
+
pytest.skip("No contract schema files found")
|
|
307
|
+
|
|
308
|
+
missing_headers = []
|
|
309
|
+
|
|
310
|
+
for contract_path in contract_files:
|
|
311
|
+
contract = load_contract(contract_path)
|
|
312
|
+
if not contract:
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
316
|
+
api = metadata.get("api", {})
|
|
317
|
+
operations = api.get("operations", [])
|
|
318
|
+
|
|
319
|
+
# Check each secured operation
|
|
320
|
+
for op in operations:
|
|
321
|
+
if not isinstance(op, dict):
|
|
322
|
+
continue
|
|
323
|
+
|
|
324
|
+
security = op.get("security", [])
|
|
325
|
+
if not security:
|
|
326
|
+
continue # Not a secured operation
|
|
327
|
+
|
|
328
|
+
required_headers = get_required_headers_for_security(security)
|
|
329
|
+
declared_headers = get_operation_headers(op)
|
|
330
|
+
|
|
331
|
+
# Also check contract-level headers
|
|
332
|
+
declared_headers.update(get_declared_headers(contract))
|
|
333
|
+
|
|
334
|
+
if not declared_headers.intersection(required_headers):
|
|
335
|
+
op_desc = f"{op.get('method', '?')} {op.get('path', '?')}"
|
|
336
|
+
missing_headers.append(
|
|
337
|
+
f"{contract_path.relative_to(REPO_ROOT)} [{op_desc}]: "
|
|
338
|
+
f"Secured operation missing auth header. "
|
|
339
|
+
f"Declared: {sorted(declared_headers) or 'none'}. "
|
|
340
|
+
f"Required one of: {sorted(required_headers)}"
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
if missing_headers:
|
|
344
|
+
soft_fail_or_fail(
|
|
345
|
+
f"Found {len(missing_headers)} secured operations without auth headers",
|
|
346
|
+
missing_headers
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
@pytest.mark.tester
|
|
351
|
+
@pytest.mark.security
|
|
352
|
+
def test_operations_have_explicit_security():
|
|
353
|
+
"""
|
|
354
|
+
SPEC-TESTER-SEC-0002: All operations should have explicit security field
|
|
355
|
+
|
|
356
|
+
Given: Contract schemas
|
|
357
|
+
When: Checking for security metadata
|
|
358
|
+
Then: Operations should declare security requirements explicitly
|
|
359
|
+
(either security: [] for public or security: [{...}] for protected)
|
|
360
|
+
|
|
361
|
+
This ensures security posture is intentional, not accidental.
|
|
362
|
+
"""
|
|
363
|
+
contract_files = find_all_contract_schemas()
|
|
364
|
+
|
|
365
|
+
if not contract_files:
|
|
366
|
+
pytest.skip("No contract schema files found")
|
|
367
|
+
|
|
368
|
+
missing_security = []
|
|
369
|
+
|
|
370
|
+
for contract_path in contract_files:
|
|
371
|
+
contract = load_contract(contract_path)
|
|
372
|
+
if not contract:
|
|
373
|
+
continue
|
|
374
|
+
|
|
375
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
376
|
+
|
|
377
|
+
# Skip non-API contracts (e.g., shared schemas, types)
|
|
378
|
+
if not metadata.get("api"):
|
|
379
|
+
continue
|
|
380
|
+
|
|
381
|
+
api = metadata.get("api", {})
|
|
382
|
+
operations = api.get("operations", [])
|
|
383
|
+
|
|
384
|
+
# Check each operation for explicit security
|
|
385
|
+
for op in operations:
|
|
386
|
+
if not isinstance(op, dict):
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
# security field must be present (even if empty array for public)
|
|
390
|
+
if "security" not in op:
|
|
391
|
+
op_desc = f"{op.get('method', '?')} {op.get('path', '?')}"
|
|
392
|
+
missing_security.append(
|
|
393
|
+
f"{contract_path.relative_to(REPO_ROOT)} [{op_desc}]: "
|
|
394
|
+
f"Operation missing explicit security field. "
|
|
395
|
+
f"Add security: [] for public or security: [{{...}}] for protected"
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Also check for legacy x-artifact-metadata.security pattern
|
|
399
|
+
if not operations:
|
|
400
|
+
security = metadata.get("security", {})
|
|
401
|
+
has_explicit_security = (
|
|
402
|
+
"requires_auth" in security or
|
|
403
|
+
"authentication" in security or
|
|
404
|
+
"securitySchemes" in contract or
|
|
405
|
+
api.get("security")
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
if not has_explicit_security:
|
|
409
|
+
missing_security.append(
|
|
410
|
+
f"{contract_path.relative_to(REPO_ROOT)}: "
|
|
411
|
+
f"API contract missing explicit security declaration. "
|
|
412
|
+
f"Add x-artifact-metadata.api.operations[].security"
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
if missing_security:
|
|
416
|
+
soft_fail_or_fail(
|
|
417
|
+
f"Found {len(missing_security)} operations without explicit security",
|
|
418
|
+
missing_security
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
@pytest.mark.tester
|
|
423
|
+
@pytest.mark.security
|
|
424
|
+
def test_secured_operations_have_security_acceptance():
|
|
425
|
+
"""
|
|
426
|
+
SPEC-TESTER-SEC-0003: Secured operations must have SEC/RLS acceptance coverage
|
|
427
|
+
|
|
428
|
+
Given: Contract schemas with security requirements
|
|
429
|
+
When: Checking acceptance_refs
|
|
430
|
+
Then: At least one acceptance criteria must use SEC or RLS harness
|
|
431
|
+
|
|
432
|
+
SEC harness: Security-focused acceptance tests
|
|
433
|
+
RLS harness: Row-Level Security acceptance tests
|
|
434
|
+
"""
|
|
435
|
+
contract_files = find_all_contract_schemas()
|
|
436
|
+
|
|
437
|
+
if not contract_files:
|
|
438
|
+
pytest.skip("No contract schema files found")
|
|
439
|
+
|
|
440
|
+
SECURITY_HARNESSES = {"SEC", "RLS"}
|
|
441
|
+
missing_coverage = []
|
|
442
|
+
|
|
443
|
+
for contract_path in contract_files:
|
|
444
|
+
contract = load_contract(contract_path)
|
|
445
|
+
if not contract:
|
|
446
|
+
continue
|
|
447
|
+
|
|
448
|
+
if not is_secured_operation(contract):
|
|
449
|
+
continue
|
|
450
|
+
|
|
451
|
+
acceptance_refs = get_acceptance_refs(contract)
|
|
452
|
+
|
|
453
|
+
# Check if any acceptance ref uses SEC or RLS harness
|
|
454
|
+
has_security_coverage = False
|
|
455
|
+
for ref in acceptance_refs:
|
|
456
|
+
try:
|
|
457
|
+
parsed = parse_acceptance_urn(ref)
|
|
458
|
+
if parsed.get("HARNESS") in SECURITY_HARNESSES:
|
|
459
|
+
has_security_coverage = True
|
|
460
|
+
break
|
|
461
|
+
except ValueError:
|
|
462
|
+
# Invalid URN format, skip
|
|
463
|
+
continue
|
|
464
|
+
|
|
465
|
+
if not has_security_coverage:
|
|
466
|
+
missing_coverage.append(
|
|
467
|
+
f"{contract_path.relative_to(REPO_ROOT)}: "
|
|
468
|
+
f"Secured operation missing SEC/RLS acceptance coverage. "
|
|
469
|
+
f"Current refs: {acceptance_refs or 'none'}"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
if missing_coverage:
|
|
473
|
+
soft_fail_or_fail(
|
|
474
|
+
f"Found {len(missing_coverage)} secured operations without SEC/RLS acceptance",
|
|
475
|
+
missing_coverage
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
@pytest.mark.tester
|
|
480
|
+
@pytest.mark.security
|
|
481
|
+
def test_error_responses_have_no_sensitive_fields():
|
|
482
|
+
"""
|
|
483
|
+
SPEC-TESTER-SEC-0004: Error responses should not expose sensitive data.
|
|
484
|
+
|
|
485
|
+
Given: Contract schemas with error response definitions
|
|
486
|
+
When: Checking 4xx/5xx response schemas
|
|
487
|
+
Then: Response schemas should not contain sensitive field names
|
|
488
|
+
|
|
489
|
+
Mode: warning only (pytest.xfail, not hard fail)
|
|
490
|
+
|
|
491
|
+
Sensitive fields: password, secret, credential, ssn, api_key, private_key, token
|
|
492
|
+
Allowed exceptions: error_key, key_id, token_type
|
|
493
|
+
"""
|
|
494
|
+
contract_files = find_all_contract_schemas()
|
|
495
|
+
|
|
496
|
+
if not contract_files:
|
|
497
|
+
pytest.skip("No contract schema files found")
|
|
498
|
+
|
|
499
|
+
sensitive_exposures = []
|
|
500
|
+
|
|
501
|
+
for contract_path in contract_files:
|
|
502
|
+
contract = load_contract(contract_path)
|
|
503
|
+
if not contract:
|
|
504
|
+
continue
|
|
505
|
+
|
|
506
|
+
metadata = contract.get("x-artifact-metadata", {})
|
|
507
|
+
api = metadata.get("api", {})
|
|
508
|
+
operations = api.get("operations", [])
|
|
509
|
+
|
|
510
|
+
for op in operations:
|
|
511
|
+
if not isinstance(op, dict):
|
|
512
|
+
continue
|
|
513
|
+
|
|
514
|
+
responses = op.get("responses", {})
|
|
515
|
+
if not isinstance(responses, dict):
|
|
516
|
+
continue
|
|
517
|
+
|
|
518
|
+
# Check 4xx and 5xx responses
|
|
519
|
+
for status_code, response in responses.items():
|
|
520
|
+
if not isinstance(status_code, str):
|
|
521
|
+
continue
|
|
522
|
+
|
|
523
|
+
# Only check error responses (4xx, 5xx)
|
|
524
|
+
if not (status_code.startswith("4") or status_code.startswith("5")):
|
|
525
|
+
continue
|
|
526
|
+
|
|
527
|
+
if not isinstance(response, dict):
|
|
528
|
+
continue
|
|
529
|
+
|
|
530
|
+
schema = response.get("schema", {})
|
|
531
|
+
if not schema:
|
|
532
|
+
continue
|
|
533
|
+
|
|
534
|
+
# Handle $ref - can be string or dict with $ref key
|
|
535
|
+
if isinstance(schema, str):
|
|
536
|
+
resolved = _resolve_schema_ref(contract, schema)
|
|
537
|
+
if resolved:
|
|
538
|
+
schema = resolved
|
|
539
|
+
else:
|
|
540
|
+
continue # Can't resolve, skip
|
|
541
|
+
elif isinstance(schema, dict) and "$ref" in schema:
|
|
542
|
+
resolved = _resolve_schema_ref(contract, schema["$ref"])
|
|
543
|
+
if resolved:
|
|
544
|
+
schema = resolved
|
|
545
|
+
|
|
546
|
+
# Extract all field names from the schema
|
|
547
|
+
field_names = _extract_field_names(schema, contract)
|
|
548
|
+
|
|
549
|
+
# Check for sensitive fields
|
|
550
|
+
for field in field_names:
|
|
551
|
+
field_lower = field.lower()
|
|
552
|
+
# Check if field matches sensitive patterns
|
|
553
|
+
for sensitive in SENSITIVE_FIELDS:
|
|
554
|
+
if sensitive in field_lower and field_lower not in ALLOWED_IN_ERRORS:
|
|
555
|
+
op_desc = f"{op.get('method', '?')} {op.get('path', '?')}"
|
|
556
|
+
sensitive_exposures.append(
|
|
557
|
+
f"{contract_path.relative_to(REPO_ROOT)} [{op_desc}] {status_code}: "
|
|
558
|
+
f"Error response contains potentially sensitive field '{field}'"
|
|
559
|
+
)
|
|
560
|
+
break
|
|
561
|
+
|
|
562
|
+
if sensitive_exposures:
|
|
563
|
+
# This is warning-only mode, always xfail
|
|
564
|
+
pytest.xfail(
|
|
565
|
+
f"[WARNING] Found {len(sensitive_exposures)} error responses with potentially sensitive fields:\n" +
|
|
566
|
+
"\n".join(f" {exp}" for exp in sensitive_exposures[:10]) +
|
|
567
|
+
(f"\n ... and {len(sensitive_exposures) - 10} more" if len(sensitive_exposures) > 10 else "") +
|
|
568
|
+
"\n\nReview these fields to ensure no sensitive data is exposed in error responses."
|
|
569
|
+
)
|
|
@@ -7,6 +7,7 @@ Tests ensure contract directories align with URN conventions.
|
|
|
7
7
|
import pytest
|
|
8
8
|
import re
|
|
9
9
|
from pathlib import Path
|
|
10
|
+
from typing import Optional
|
|
10
11
|
|
|
11
12
|
# Path constants
|
|
12
13
|
REPO_ROOT = Path(__file__).resolve().parents[4]
|
|
@@ -198,3 +199,83 @@ def test_contract_files_are_valid_formats():
|
|
|
198
199
|
(f"\n ... and {len(invalid_files) - 10} more" if len(invalid_files) > 10 else "") +
|
|
199
200
|
f"\n Allowed: {', '.join(sorted(allowed_extensions))}"
|
|
200
201
|
)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def contract_urn_to_path(contract_urn: str) -> Optional[Path]:
|
|
205
|
+
"""
|
|
206
|
+
Convert contract URN to expected file path.
|
|
207
|
+
|
|
208
|
+
Pattern: contract:{theme}:{domain}.{facet} → contracts/{theme}/{domain}/{facet}.schema.json
|
|
209
|
+
|
|
210
|
+
Examples:
|
|
211
|
+
contract:commons:player.identity → contracts/commons/player/identity.schema.json
|
|
212
|
+
contract:mechanic:decision.choice → contracts/mechanic/decision/choice.schema.json
|
|
213
|
+
contract:match:dilemma:current → contracts/match/dilemma/current.schema.json
|
|
214
|
+
"""
|
|
215
|
+
if not contract_urn or contract_urn == "null":
|
|
216
|
+
return None
|
|
217
|
+
if not contract_urn.startswith("contract:"):
|
|
218
|
+
return None
|
|
219
|
+
|
|
220
|
+
# Remove "contract:" prefix
|
|
221
|
+
urn_without_prefix = contract_urn[9:]
|
|
222
|
+
|
|
223
|
+
# Split by colon
|
|
224
|
+
parts = urn_without_prefix.split(":")
|
|
225
|
+
if len(parts) < 2:
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
# First part is theme
|
|
229
|
+
theme = parts[0]
|
|
230
|
+
|
|
231
|
+
# Remaining parts form domain.facet (join with : if multiple)
|
|
232
|
+
domain_facet = ":".join(parts[1:])
|
|
233
|
+
|
|
234
|
+
# Convert domain.facet to domain/facet path (dots become slashes)
|
|
235
|
+
path_parts = domain_facet.replace(".", "/")
|
|
236
|
+
|
|
237
|
+
# Also convert colons to slashes for multi-level URNs
|
|
238
|
+
path_parts = path_parts.replace(":", "/")
|
|
239
|
+
|
|
240
|
+
return CONTRACTS_DIR / theme / f"{path_parts}.schema.json"
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
@pytest.mark.platform
|
|
244
|
+
def test_wagon_produce_contracts_exist(wagon_manifests):
|
|
245
|
+
"""
|
|
246
|
+
SPEC-PLATFORM-CONTRACTS-0006: All wagon produce contracts have schema files
|
|
247
|
+
|
|
248
|
+
Given: Wagon manifests with produce[] declarations
|
|
249
|
+
When: Checking for declared contract URNs
|
|
250
|
+
Then: Each contract:* URN resolves to an existing .schema.json file
|
|
251
|
+
|
|
252
|
+
This ensures the planner's intent (wagon declares contract) matches
|
|
253
|
+
the tester's reality (contract file exists).
|
|
254
|
+
"""
|
|
255
|
+
missing_contracts = []
|
|
256
|
+
|
|
257
|
+
for manifest_path, manifest in wagon_manifests:
|
|
258
|
+
wagon_slug = manifest.get("wagon", "unknown")
|
|
259
|
+
|
|
260
|
+
for produce_item in manifest.get("produce", []):
|
|
261
|
+
contract_urn = produce_item.get("contract")
|
|
262
|
+
|
|
263
|
+
if not contract_urn or contract_urn == "null":
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
expected_path = contract_urn_to_path(contract_urn)
|
|
267
|
+
|
|
268
|
+
if expected_path and not expected_path.exists():
|
|
269
|
+
artifact_name = produce_item.get("name", "?")
|
|
270
|
+
missing_contracts.append(
|
|
271
|
+
f"wagon:{wagon_slug} → {contract_urn}\n"
|
|
272
|
+
f" Artifact: {artifact_name}\n"
|
|
273
|
+
f" Expected: {expected_path.relative_to(REPO_ROOT)}"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
if missing_contracts:
|
|
277
|
+
pytest.fail(
|
|
278
|
+
f"Found {len(missing_contracts)} wagon produce declarations without contract files:\n\n" +
|
|
279
|
+
"\n\n".join(missing_contracts[:10]) +
|
|
280
|
+
(f"\n\n... and {len(missing_contracts) - 10} more" if len(missing_contracts) > 10 else "")
|
|
281
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
atdd/__init__.py,sha256=-S8i9OahH-t9FJkPn6nprxipnjVum3rLeVsCS74T6eY,156
|
|
2
2
|
atdd/__main__.py,sha256=B0sXDQLjFN9GowTlXo4NMWwPZPjDsrT8Frq7DnbdOD8,77
|
|
3
|
-
atdd/cli.py,sha256=
|
|
3
|
+
atdd/cli.py,sha256=osZ_WFMbp3BFGgH-K7vApnoeulgeSU7-z6LB86-htnI,14534
|
|
4
4
|
atdd/conftest.py,sha256=Fj3kIhCETbj2QBCIjySBgdS3stKNRZcZzKTJr7A4LaQ,5300
|
|
5
5
|
atdd/version_check.py,sha256=B9MbbxO_sJrEC3fxFJhTlOIkLLTRQCDO1_8ec1KvuWY,3540
|
|
6
6
|
atdd/coach/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -12,13 +12,13 @@ atdd/coach/commands/gate.py,sha256=_V2GypqoGixTs_kLWxFF3HgEt-Wi2r6Iv0YL75yWrWo,5
|
|
|
12
12
|
atdd/coach/commands/infer_governance_status.py,sha256=91-VnI64mOzijc1Cgkmr7cnNCir2-z2ITA2-SGwk3TU,4473
|
|
13
13
|
atdd/coach/commands/initializer.py,sha256=Hli3hlL_aHnuDIzK0lHzE6KjG2QGvl2E2TvjmYoPPNE,6069
|
|
14
14
|
atdd/coach/commands/interface.py,sha256=PPCwICFNN4ddPqucUATIiBrfEkDO66MZbYQkwNu6lm4,40459
|
|
15
|
-
atdd/coach/commands/inventory.py,sha256=
|
|
15
|
+
atdd/coach/commands/inventory.py,sha256=Xakb6U3SfnEcR7rhZ0Wg8SK5frg-oPE7C0fCvE-VoH4,20923
|
|
16
16
|
atdd/coach/commands/migration.py,sha256=KlRXXM1O-ZqfwGOWxN5C_gxfTttbXUG_yq9pOLYsGnQ,8119
|
|
17
17
|
atdd/coach/commands/registry.py,sha256=r1QWg841eK6bS4vEbYEviylDCpFkInUVMTsf5h4ArrQ,58344
|
|
18
18
|
atdd/coach/commands/session.py,sha256=MhuWXd5TR6bB3w0t8vANeZx3L476qwLT6EUQMwg-wQA,14268
|
|
19
19
|
atdd/coach/commands/sync.py,sha256=dA6BYsebTT1zOMXgea6DqawkvjU_N-JJJMFj9GP2onk,12259
|
|
20
20
|
atdd/coach/commands/test_interface.py,sha256=a7ut2Hhk0PnQ5LfJZkoQwfkfkVuB5OHA4QBwOS0-jcg,16870
|
|
21
|
-
atdd/coach/commands/test_runner.py,sha256=
|
|
21
|
+
atdd/coach/commands/test_runner.py,sha256=Lutclc8Qr8BmLBvAYshXAjf8ynUM4eJgHF8GLPa1Usw,3929
|
|
22
22
|
atdd/coach/commands/traceability.py,sha256=SYl0dRB_nQZJKe1IOCHxX193vaxOChVjnh6hcqcSOB0,163581
|
|
23
23
|
atdd/coach/commands/tests/__init__.py,sha256=svBEnRruMuXuaYi3lFxJlzHNWdZ5vlBTaBpjXupaxDA,33
|
|
24
24
|
atdd/coach/commands/tests/test_telemetry_array_validation.py,sha256=WK5ZXvR1avlzX7mSX84dmxxLFnw7eQB4jtjo7bHG7aE,8464
|
|
@@ -27,13 +27,14 @@ atdd/coach/overlays/__init__.py,sha256=2lMiMSgfLJ3YHLpbzNI5B88AdQxiMEwjIfsWWb8t3
|
|
|
27
27
|
atdd/coach/overlays/claude.md,sha256=33mhpqhmsRhCtdWlU7cMXAJDsaVra9uBBK8URV8OtQA,101
|
|
28
28
|
atdd/coach/schemas/config.schema.json,sha256=xzct7gBoPTIGh3NFPSGtfW0zIiyFdHDZkvjuy1qgAqA,951
|
|
29
29
|
atdd/coach/schemas/manifest.schema.json,sha256=WO13-YF_FgH1awh96khCtk-112b6XSC24anlY3B7GjY,2885
|
|
30
|
-
atdd/coach/templates/ATDD.md,sha256=
|
|
31
|
-
atdd/coach/templates/SESSION-TEMPLATE.md,sha256=
|
|
32
|
-
atdd/coach/utils/__init__.py,sha256=
|
|
30
|
+
atdd/coach/templates/ATDD.md,sha256=DnJ1jzdoUWATEwvnweh2DCfoCepLMC_f35wlSvKF1FI,12153
|
|
31
|
+
atdd/coach/templates/SESSION-TEMPLATE.md,sha256=tZ4Bgf4MZ6uHXwhy1C43SGpTiYm-_AvmFS6QxA2ISng,9140
|
|
32
|
+
atdd/coach/utils/__init__.py,sha256=7Jbo-heJEKSAn6I0s35z_2S4R8qGZ48PL6a2IntcNYg,148
|
|
33
|
+
atdd/coach/utils/repo.py,sha256=9rZeAf1b7-modoTiPZAwtjnoiI8YyHWIZhpqXvkJ7Qo,3104
|
|
33
34
|
atdd/coach/utils/graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
35
|
atdd/coach/utils/graph/urn.py,sha256=O2AHIB_CmmMUvXzyejc_oFReNW_rOcw7m4qaqSYcnNQ,33558
|
|
35
36
|
atdd/coach/validators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
36
|
-
atdd/coach/validators/shared_fixtures.py,sha256=
|
|
37
|
+
atdd/coach/validators/shared_fixtures.py,sha256=ZxpTYM1ERL9sJXNl_IbtBl8MHnGBx1IhKT091Jh_gH4,11920
|
|
37
38
|
atdd/coach/validators/test_enrich_wagon_registry.py,sha256=WeTwYJqoNY6mEYc-QAvQo7YVagSOjaNKxB6Q6dpWqIM,6561
|
|
38
39
|
atdd/coach/validators/test_registry.py,sha256=ffN70yA_1xxL3R8gdpGbY2M8dQXyuajIZhBZ-ylNiNs,17845
|
|
39
40
|
atdd/coach/validators/test_session_validation.py,sha256=Tx5HfQ0mZfVLypLe4wbKTracMc9nVRCoxDobOko6xwU,39104
|
|
@@ -124,6 +125,7 @@ atdd/tester/conventions/filename.convention.yaml,sha256=WywcPhdxIZSoY6F6OSx5v3_A
|
|
|
124
125
|
atdd/tester/conventions/migration.convention.yaml,sha256=xqZZrMKJVlBQdGab5mJF2enChLebGGPQkd-ioDWFQtw,20075
|
|
125
126
|
atdd/tester/conventions/red.convention.yaml,sha256=S0mc4JpvNuOSYhqmt6bm0tCuDTCFYFRsSBru3XMwXgs,28901
|
|
126
127
|
atdd/tester/conventions/routing.convention.yaml,sha256=gRJrnQd-cXCUGby0CeaJdRmrFXZx70kl-FxXCoXiu5Y,2751
|
|
128
|
+
atdd/tester/conventions/security.convention.yaml,sha256=3QjQZqw1Iw8g6jUmuKWkOqcZ1n1sc1OJDvKD0eTk070,5445
|
|
127
129
|
atdd/tester/conventions/telemetry.convention.yaml,sha256=n8Q8x7BtiDnDf920EWiq3xBzaf8tYev7WIi1pQ27O6o,16119
|
|
128
130
|
atdd/tester/schemas/a11y.tmpl.json,sha256=o-i8-fQeLxqTPgeDysOQDvyoHdlcCDOlUGtPG1-_Wro,1088
|
|
129
131
|
atdd/tester/schemas/artifact.schema.json,sha256=CVmhmz9dsSsZW4HYS_wohn-kzq_mx4YPYunwrBHtbQg,5755
|
|
@@ -163,7 +165,8 @@ atdd/tester/validators/test_acceptance_urn_filename_mapping.py,sha256=sLopDoF78h
|
|
|
163
165
|
atdd/tester/validators/test_acceptance_urn_separator.py,sha256=PnXaISoOVgr21cGgbcFnm8V9re1nv0ctatJaIqJk6KE,6847
|
|
164
166
|
atdd/tester/validators/test_artifact_naming_category.py,sha256=V7AOamSUIbpl6bO_Qj3h_46vc2hsh6OVPCMco1H7zTc,12541
|
|
165
167
|
atdd/tester/validators/test_contract_schema_compliance.py,sha256=8LnZotoND2UNdZy9XdoNnTcBdMjyDsHtgjKLZ4bFeX8,26103
|
|
166
|
-
atdd/tester/validators/
|
|
168
|
+
atdd/tester/validators/test_contract_security.py,sha256=fKNaevmbSAoGyOKBhPnHw5xSGXptG0ciS0__qtO9Qac,19541
|
|
169
|
+
atdd/tester/validators/test_contracts_structure.py,sha256=b930uB96iXK9xF080pATkzBWtP4mvRenkOeA1RdhJOE,9784
|
|
167
170
|
atdd/tester/validators/test_coverage_adequacy.py,sha256=1UCJ0-7xnkvcdAagfvB7dT4ZzPEyZC5mMxA3z2g4yGA,27026
|
|
168
171
|
atdd/tester/validators/test_dual_ac_reference.py,sha256=3yksy4xWta7kC1QMJQvx-a-cBXNSpY4FNpi0K8H3CYU,8888
|
|
169
172
|
atdd/tester/validators/test_fixture_validity.py,sha256=nmkPOWXQFieyGC-hXtIhMm_j1wAW-kIRJsGC3QDB1hM,12071
|
|
@@ -178,9 +181,9 @@ atdd/tester/validators/test_red_supabase_layer_structure.py,sha256=26cnzPZAwSFy0
|
|
|
178
181
|
atdd/tester/validators/test_telemetry_structure.py,sha256=hIUnU2WU-8PNIg9EVHe2fnUdIQKIOUm5AWEtCBUXLVk,22467
|
|
179
182
|
atdd/tester/validators/test_typescript_test_naming.py,sha256=E-TyGv_GVlTfsbyuxrtv9sOWSZS_QcpH6rrJFbWoeeU,11280
|
|
180
183
|
atdd/tester/validators/test_typescript_test_structure.py,sha256=eV89SD1RaKtchBZupqhnJmaruoROosf3LwB4Fwe4UJI,2612
|
|
181
|
-
atdd-0.
|
|
182
|
-
atdd-0.
|
|
183
|
-
atdd-0.
|
|
184
|
-
atdd-0.
|
|
185
|
-
atdd-0.
|
|
186
|
-
atdd-0.
|
|
184
|
+
atdd-0.3.1.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
185
|
+
atdd-0.3.1.dist-info/METADATA,sha256=xAWoy8Xg4lpkMoYfxoNn6gFJxUuX8lHE8fhT5Ns3DOw,7081
|
|
186
|
+
atdd-0.3.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
187
|
+
atdd-0.3.1.dist-info/entry_points.txt,sha256=-C3yrA1WQQfN3iuGmSzPapA5cKVBEYU5Q1HUffSJTbY,38
|
|
188
|
+
atdd-0.3.1.dist-info/top_level.txt,sha256=VKkf6Uiyrm4RS6ULCGM-v8AzYN8K2yg8SMqwJLoO-xs,5
|
|
189
|
+
atdd-0.3.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|