delimit-cli 2.3.2 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.dockerignore +7 -0
- package/.github/workflows/ci.yml +22 -0
- package/CHANGELOG.md +33 -0
- package/CODE_OF_CONDUCT.md +48 -0
- package/CONTRIBUTING.md +67 -0
- package/Dockerfile +9 -0
- package/LICENSE +21 -0
- package/README.md +51 -130
- package/SECURITY.md +42 -0
- package/adapters/codex-forge.js +107 -0
- package/adapters/codex-jamsons.js +142 -0
- package/adapters/codex-security.js +94 -0
- package/adapters/gemini-forge.js +120 -0
- package/adapters/gemini-jamsons.js +152 -0
- package/bin/delimit-cli.js +52 -2
- package/bin/delimit-setup.js +258 -0
- package/gateway/ai/backends/__init__.py +0 -0
- package/gateway/ai/backends/async_utils.py +21 -0
- package/gateway/ai/backends/deploy_bridge.py +150 -0
- package/gateway/ai/backends/gateway_core.py +261 -0
- package/gateway/ai/backends/generate_bridge.py +38 -0
- package/gateway/ai/backends/governance_bridge.py +196 -0
- package/gateway/ai/backends/intel_bridge.py +59 -0
- package/gateway/ai/backends/memory_bridge.py +93 -0
- package/gateway/ai/backends/ops_bridge.py +137 -0
- package/gateway/ai/backends/os_bridge.py +82 -0
- package/gateway/ai/backends/repo_bridge.py +117 -0
- package/gateway/ai/backends/ui_bridge.py +118 -0
- package/gateway/ai/backends/vault_bridge.py +129 -0
- package/gateway/ai/server.py +1182 -0
- package/gateway/core/__init__.py +3 -0
- package/gateway/core/__pycache__/__init__.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/auto_baseline.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/ci_formatter.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/contract_ledger.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/dependency_graph.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/dependency_manifest.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/diff_engine_v2.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/event_backbone.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/event_schema.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/explainer.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/gateway.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/gateway_v2.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/gateway_v3.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/impact_analyzer.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/policy_engine.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/registry.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/registry_v2.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/registry_v3.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/semver_classifier.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/spec_detector.cpython-310.pyc +0 -0
- package/gateway/core/__pycache__/surface_bridge.cpython-310.pyc +0 -0
- package/gateway/core/auto_baseline.py +304 -0
- package/gateway/core/ci_formatter.py +283 -0
- package/gateway/core/complexity_analyzer.py +386 -0
- package/gateway/core/contract_ledger.py +345 -0
- package/gateway/core/dependency_graph.py +218 -0
- package/gateway/core/dependency_manifest.py +223 -0
- package/gateway/core/diff_engine_v2.py +477 -0
- package/gateway/core/diff_engine_v2.py.bak +426 -0
- package/gateway/core/event_backbone.py +268 -0
- package/gateway/core/event_schema.py +258 -0
- package/gateway/core/explainer.py +438 -0
- package/gateway/core/gateway.py +128 -0
- package/gateway/core/gateway_v2.py +154 -0
- package/gateway/core/gateway_v3.py +224 -0
- package/gateway/core/impact_analyzer.py +163 -0
- package/gateway/core/policies/default.yml +13 -0
- package/gateway/core/policies/relaxed.yml +48 -0
- package/gateway/core/policies/strict.yml +55 -0
- package/gateway/core/policy_engine.py +464 -0
- package/gateway/core/registry.py +52 -0
- package/gateway/core/registry_v2.py +132 -0
- package/gateway/core/registry_v3.py +134 -0
- package/gateway/core/semver_classifier.py +152 -0
- package/gateway/core/spec_detector.py +130 -0
- package/gateway/core/surface_bridge.py +307 -0
- package/gateway/core/zero_spec/__init__.py +4 -0
- package/gateway/core/zero_spec/__pycache__/__init__.cpython-310.pyc +0 -0
- package/gateway/core/zero_spec/__pycache__/detector.cpython-310.pyc +0 -0
- package/gateway/core/zero_spec/__pycache__/express_extractor.cpython-310.pyc +0 -0
- package/gateway/core/zero_spec/__pycache__/fastapi_extractor.cpython-310.pyc +0 -0
- package/gateway/core/zero_spec/__pycache__/nestjs_extractor.cpython-310.pyc +0 -0
- package/gateway/core/zero_spec/detector.py +353 -0
- package/gateway/core/zero_spec/express_extractor.py +483 -0
- package/gateway/core/zero_spec/fastapi_extractor.py +254 -0
- package/gateway/core/zero_spec/nestjs_extractor.py +369 -0
- package/gateway/tasks/__init__.py +1 -0
- package/gateway/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/check_policy.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/check_policy_v2.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/check_policy_v3.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/explain_diff.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/explain_diff_v2.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/validate_api.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/validate_api_v2.cpython-310.pyc +0 -0
- package/gateway/tasks/__pycache__/validate_api_v3.cpython-310.pyc +0 -0
- package/gateway/tasks/check_policy.py +177 -0
- package/gateway/tasks/check_policy_v2.py +255 -0
- package/gateway/tasks/check_policy_v3.py +255 -0
- package/gateway/tasks/explain_diff.py +305 -0
- package/gateway/tasks/explain_diff_v2.py +267 -0
- package/gateway/tasks/validate_api.py +131 -0
- package/gateway/tasks/validate_api_v2.py +208 -0
- package/gateway/tasks/validate_api_v3.py +163 -0
- package/package.json +3 -3
- package/adapters/codex-skill.js +0 -87
- package/adapters/cursor-extension.js +0 -190
- package/adapters/gemini-action.js +0 -93
- package/adapters/openai-function.js +0 -112
- package/adapters/xai-plugin.js +0 -151
- package/test-decision-engine.js +0 -181
- package/test-hook.js +0 -27
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
from schemas.base import TaskRequest, TaskResponse, ErrorResponse, TaskStatus, ErrorDetails
|
|
5
|
+
from .registry import task_registry
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
class Gateway:
|
|
10
|
+
"""Main gateway implementing V10 architecture with advisor recommendations"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, max_file_size: int = 10 * 1024 * 1024, timeout: int = 30):
|
|
13
|
+
self.registry = task_registry
|
|
14
|
+
self.max_file_size = max_file_size
|
|
15
|
+
self.timeout = timeout
|
|
16
|
+
self._load_tasks()
|
|
17
|
+
|
|
18
|
+
def _load_tasks(self):
|
|
19
|
+
"""Load all task modules to register handlers"""
|
|
20
|
+
try:
|
|
21
|
+
import tasks.validate_api
|
|
22
|
+
import tasks.check_policy
|
|
23
|
+
import tasks.explain_diff
|
|
24
|
+
except ImportError as e:
|
|
25
|
+
logger.warning(f"Could not load all tasks: {e}")
|
|
26
|
+
|
|
27
|
+
def run(self, task: str, files: List[str], **kwargs) -> Dict[str, Any]:
|
|
28
|
+
"""Main entry point - the single gateway function"""
|
|
29
|
+
start_time = time.time()
|
|
30
|
+
|
|
31
|
+
# Build request with Codex's recommendation for strict typing
|
|
32
|
+
try:
|
|
33
|
+
request = TaskRequest(
|
|
34
|
+
task=task,
|
|
35
|
+
files=files,
|
|
36
|
+
config=kwargs,
|
|
37
|
+
correlation_id=kwargs.get("correlation_id"),
|
|
38
|
+
version=kwargs.get("version", "v1")
|
|
39
|
+
)
|
|
40
|
+
except Exception as e:
|
|
41
|
+
return self._error_response("invalid_request", str(e))
|
|
42
|
+
|
|
43
|
+
# Check if task exists
|
|
44
|
+
if not self.registry.has_task(task):
|
|
45
|
+
return self._error_response(
|
|
46
|
+
"unknown_task",
|
|
47
|
+
f"Task '{task}' not recognized",
|
|
48
|
+
available_tasks=self.registry.list_tasks()
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Get handler - use None for default version if v1 requested
|
|
52
|
+
version_to_use = None if request.version == "v1" else request.version
|
|
53
|
+
handler = self.registry.get_handler(task, version_to_use)
|
|
54
|
+
if not handler:
|
|
55
|
+
return self._error_response(
|
|
56
|
+
"version_not_found",
|
|
57
|
+
f"Version {request.version} not found for task '{task}'"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Execute with timeout and error handling per Codex's guardrails
|
|
61
|
+
try:
|
|
62
|
+
# Validate file constraints
|
|
63
|
+
for file_path in files:
|
|
64
|
+
if not self._validate_file(file_path):
|
|
65
|
+
return self._error_response(
|
|
66
|
+
"file_validation_failed",
|
|
67
|
+
f"File validation failed for: {file_path}"
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Execute task
|
|
71
|
+
result = handler(request)
|
|
72
|
+
|
|
73
|
+
# Build response with observability (Codex requirement #5)
|
|
74
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
75
|
+
|
|
76
|
+
response = TaskResponse(
|
|
77
|
+
status=TaskStatus.SUCCESS,
|
|
78
|
+
task=task,
|
|
79
|
+
result=result,
|
|
80
|
+
duration_ms=duration_ms,
|
|
81
|
+
correlation_id=request.correlation_id
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
logger.info(f"Task {task} completed in {duration_ms}ms")
|
|
85
|
+
return response.model_dump(mode='json')
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
89
|
+
logger.error(f"Task {task} failed after {duration_ms}ms: {e}")
|
|
90
|
+
|
|
91
|
+
return TaskResponse(
|
|
92
|
+
status=TaskStatus.ERROR,
|
|
93
|
+
task=task,
|
|
94
|
+
errors=[ErrorDetails(
|
|
95
|
+
code="execution_failed",
|
|
96
|
+
message=str(e),
|
|
97
|
+
retryable=True
|
|
98
|
+
)],
|
|
99
|
+
duration_ms=duration_ms,
|
|
100
|
+
correlation_id=request.correlation_id
|
|
101
|
+
).model_dump(mode='json')
|
|
102
|
+
|
|
103
|
+
def _validate_file(self, file_path: str) -> bool:
|
|
104
|
+
"""Validate file constraints"""
|
|
105
|
+
try:
|
|
106
|
+
import os
|
|
107
|
+
if not os.path.exists(file_path):
|
|
108
|
+
return False
|
|
109
|
+
file_size = os.path.getsize(file_path)
|
|
110
|
+
return file_size <= self.max_file_size
|
|
111
|
+
except:
|
|
112
|
+
return False
|
|
113
|
+
|
|
114
|
+
def _error_response(self, code: str, message: str, **kwargs) -> Dict[str, Any]:
|
|
115
|
+
"""Build standardized error response (Codex requirement #2)"""
|
|
116
|
+
return ErrorResponse(
|
|
117
|
+
code=code,
|
|
118
|
+
message=message,
|
|
119
|
+
details=kwargs.get("details"),
|
|
120
|
+
available_tasks=kwargs.get("available_tasks")
|
|
121
|
+
).model_dump(mode='json')
|
|
122
|
+
|
|
123
|
+
# Global gateway instance
|
|
124
|
+
gateway = Gateway()
|
|
125
|
+
|
|
126
|
+
def delimit_run(task: str, files: List[str], **kwargs) -> Dict[str, Any]:
|
|
127
|
+
"""The main gateway function - V10 architecture entry point"""
|
|
128
|
+
return gateway.run(task, files, **kwargs)
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hardened Gateway with Evidence Contract
|
|
3
|
+
V12 Core Hardening
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import time
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Optional, Dict, Any
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from schemas.evidence import TaskEvidence, Decision
|
|
12
|
+
from schemas.requests import ValidateAPIRequest, CheckPolicyRequest, ExplainDiffRequest
|
|
13
|
+
from .registry_v2 import task_registry
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class HardenedGateway:
|
|
19
|
+
"""
|
|
20
|
+
V12 Hardened Gateway with strict evidence contract
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
self.registry = task_registry
|
|
25
|
+
self._load_tasks()
|
|
26
|
+
|
|
27
|
+
def _load_tasks(self):
|
|
28
|
+
"""Load all task modules to register handlers"""
|
|
29
|
+
try:
|
|
30
|
+
# Import refactored tasks with evidence contract
|
|
31
|
+
import tasks.validate_api_v2
|
|
32
|
+
import tasks.check_policy_v2
|
|
33
|
+
import tasks.explain_diff_v2
|
|
34
|
+
except ImportError as e:
|
|
35
|
+
logger.warning(f"Could not load all tasks: {e}")
|
|
36
|
+
|
|
37
|
+
def run_validate_api(self, request: ValidateAPIRequest) -> TaskEvidence:
|
|
38
|
+
"""Execute validate-api task with typed request/response"""
|
|
39
|
+
handler = self.registry.get_handler("validate-api", request.version)
|
|
40
|
+
if not handler:
|
|
41
|
+
raise ValueError(f"Task handler not found: validate-api:{request.version or 'latest'}")
|
|
42
|
+
|
|
43
|
+
return handler(request)
|
|
44
|
+
|
|
45
|
+
def run_check_policy(self, request: CheckPolicyRequest) -> TaskEvidence:
|
|
46
|
+
"""Execute check-policy task with typed request/response"""
|
|
47
|
+
handler = self.registry.get_handler("check-policy", request.version)
|
|
48
|
+
if not handler:
|
|
49
|
+
raise ValueError(f"Task handler not found: check-policy:{request.version or 'latest'}")
|
|
50
|
+
|
|
51
|
+
return handler(request)
|
|
52
|
+
|
|
53
|
+
def run_explain_diff(self, request: ExplainDiffRequest) -> TaskEvidence:
|
|
54
|
+
"""Execute explain-diff task with typed request/response"""
|
|
55
|
+
handler = self.registry.get_handler("explain-diff", request.version)
|
|
56
|
+
if not handler:
|
|
57
|
+
raise ValueError(f"Task handler not found: explain-diff:{request.version or 'latest'}")
|
|
58
|
+
|
|
59
|
+
return handler(request)
|
|
60
|
+
|
|
61
|
+
def run(self, task: str, **kwargs) -> Dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Main gateway entry point - maintains backward compatibility
|
|
64
|
+
Returns Evidence Contract as dict
|
|
65
|
+
"""
|
|
66
|
+
start_time = time.time()
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
# Route to typed handlers based on task
|
|
70
|
+
if task == "validate-api":
|
|
71
|
+
request = ValidateAPIRequest(
|
|
72
|
+
task=task,
|
|
73
|
+
old_spec=kwargs.get("old_spec") or kwargs.get("files", [])[0],
|
|
74
|
+
new_spec=kwargs.get("new_spec") or kwargs.get("files", [])[1],
|
|
75
|
+
version=kwargs.get("version"),
|
|
76
|
+
correlation_id=kwargs.get("correlation_id")
|
|
77
|
+
)
|
|
78
|
+
evidence = self.run_validate_api(request)
|
|
79
|
+
|
|
80
|
+
elif task == "check-policy":
|
|
81
|
+
request = CheckPolicyRequest(
|
|
82
|
+
task=task,
|
|
83
|
+
spec_files=kwargs.get("spec_files") or kwargs.get("files", []),
|
|
84
|
+
policy_file=kwargs.get("policy_file"),
|
|
85
|
+
policy_inline=kwargs.get("policy_inline"),
|
|
86
|
+
version=kwargs.get("version"),
|
|
87
|
+
correlation_id=kwargs.get("correlation_id")
|
|
88
|
+
)
|
|
89
|
+
evidence = self.run_check_policy(request)
|
|
90
|
+
|
|
91
|
+
elif task == "explain-diff":
|
|
92
|
+
request = ExplainDiffRequest(
|
|
93
|
+
task=task,
|
|
94
|
+
old_spec=kwargs.get("old_spec") or kwargs.get("files", [])[0],
|
|
95
|
+
new_spec=kwargs.get("new_spec") or kwargs.get("files", [])[1],
|
|
96
|
+
detail_level=kwargs.get("detail_level", "medium"),
|
|
97
|
+
version=kwargs.get("version"),
|
|
98
|
+
correlation_id=kwargs.get("correlation_id")
|
|
99
|
+
)
|
|
100
|
+
evidence = self.run_explain_diff(request)
|
|
101
|
+
|
|
102
|
+
else:
|
|
103
|
+
# Unknown task - return error evidence
|
|
104
|
+
return {
|
|
105
|
+
"task": task,
|
|
106
|
+
"task_version": "unknown",
|
|
107
|
+
"decision": "fail",
|
|
108
|
+
"exit_code": 1,
|
|
109
|
+
"summary": f"Unknown task: {task}",
|
|
110
|
+
"violations": [{
|
|
111
|
+
"rule": "task_exists",
|
|
112
|
+
"severity": "high",
|
|
113
|
+
"message": f"Task '{task}' not found"
|
|
114
|
+
}]
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Add timing
|
|
118
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
119
|
+
evidence_dict = evidence.model_dump(mode='json')
|
|
120
|
+
evidence_dict["duration_ms"] = duration_ms
|
|
121
|
+
|
|
122
|
+
return evidence_dict
|
|
123
|
+
|
|
124
|
+
except Exception as e:
|
|
125
|
+
logger.error(f"Task execution failed: {e}")
|
|
126
|
+
|
|
127
|
+
# Return error evidence
|
|
128
|
+
return {
|
|
129
|
+
"task": task,
|
|
130
|
+
"task_version": "error",
|
|
131
|
+
"decision": "fail",
|
|
132
|
+
"exit_code": 1,
|
|
133
|
+
"summary": f"Execution failed: {str(e)}",
|
|
134
|
+
"violations": [{
|
|
135
|
+
"rule": "execution",
|
|
136
|
+
"severity": "high",
|
|
137
|
+
"message": str(e)
|
|
138
|
+
}],
|
|
139
|
+
"duration_ms": int((time.time() - start_time) * 1000)
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# Global instance
|
|
144
|
+
gateway = HardenedGateway()
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def delimit_run(task: str, files: list = None, **kwargs) -> Dict[str, Any]:
|
|
148
|
+
"""
|
|
149
|
+
Main entry point maintaining backward compatibility
|
|
150
|
+
Returns Evidence Contract as dictionary
|
|
151
|
+
"""
|
|
152
|
+
if files:
|
|
153
|
+
kwargs["files"] = files
|
|
154
|
+
return gateway.run(task, **kwargs)
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hardened Gateway with Evidence Contract - Final Version
|
|
3
|
+
V12 Complete Implementation
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import time
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Optional, Dict
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from schemas.evidence import TaskEvidence, Decision, Violation, ViolationSeverity, Remediation
|
|
12
|
+
from schemas.requests_v2 import ValidateAPIRequest, CheckPolicyRequest, ExplainDiffRequest
|
|
13
|
+
from .registry_v3 import task_registry
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class HardenedGateway:
|
|
19
|
+
"""
|
|
20
|
+
V12 Hardened Gateway with strict evidence contract
|
|
21
|
+
All paths return TaskEvidence
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self.registry = task_registry
|
|
26
|
+
self._load_tasks()
|
|
27
|
+
|
|
28
|
+
def _load_tasks(self):
|
|
29
|
+
"""Load all task modules to register handlers"""
|
|
30
|
+
try:
|
|
31
|
+
# Import refactored tasks with evidence contract
|
|
32
|
+
import tasks.validate_api_v3
|
|
33
|
+
import tasks.check_policy_v3
|
|
34
|
+
import tasks.explain_diff_v2
|
|
35
|
+
except ImportError as e:
|
|
36
|
+
logger.warning(f"Could not load all tasks: {e}")
|
|
37
|
+
|
|
38
|
+
def run_validate_api(self, request: ValidateAPIRequest) -> TaskEvidence:
|
|
39
|
+
"""Execute validate-api task with typed request/response"""
|
|
40
|
+
handler = self.registry.get_handler("validate-api", request.version)
|
|
41
|
+
if not handler:
|
|
42
|
+
# Return error evidence instead of raising
|
|
43
|
+
return self._create_error_evidence(
|
|
44
|
+
"validate-api",
|
|
45
|
+
f"Task handler not found: validate-api:{request.version or 'latest'}",
|
|
46
|
+
request.correlation_id
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
return handler(request)
|
|
51
|
+
except Exception as e:
|
|
52
|
+
return self._create_error_evidence("validate-api", str(e), request.correlation_id)
|
|
53
|
+
|
|
54
|
+
def run_check_policy(self, request: CheckPolicyRequest) -> TaskEvidence:
|
|
55
|
+
"""Execute check-policy task with typed request/response"""
|
|
56
|
+
handler = self.registry.get_handler("check-policy", request.version)
|
|
57
|
+
if not handler:
|
|
58
|
+
return self._create_error_evidence(
|
|
59
|
+
"check-policy",
|
|
60
|
+
f"Task handler not found: check-policy:{request.version or 'latest'}",
|
|
61
|
+
request.correlation_id
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
return handler(request)
|
|
66
|
+
except Exception as e:
|
|
67
|
+
return self._create_error_evidence("check-policy", str(e), request.correlation_id)
|
|
68
|
+
|
|
69
|
+
def run_explain_diff(self, request: ExplainDiffRequest) -> TaskEvidence:
|
|
70
|
+
"""Execute explain-diff task with typed request/response"""
|
|
71
|
+
handler = self.registry.get_handler("explain-diff", request.version)
|
|
72
|
+
if not handler:
|
|
73
|
+
return self._create_error_evidence(
|
|
74
|
+
"explain-diff",
|
|
75
|
+
f"Task handler not found: explain-diff:{request.version or 'latest'}",
|
|
76
|
+
request.correlation_id
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
return handler(request)
|
|
81
|
+
except Exception as e:
|
|
82
|
+
return self._create_error_evidence("explain-diff", str(e), request.correlation_id)
|
|
83
|
+
|
|
84
|
+
def _create_error_evidence(self, task: str, error_message: str, correlation_id: Optional[str] = None) -> TaskEvidence:
|
|
85
|
+
"""Create proper TaskEvidence for errors - never return raw dicts"""
|
|
86
|
+
return TaskEvidence(
|
|
87
|
+
task=task,
|
|
88
|
+
task_version="error",
|
|
89
|
+
decision=Decision.FAIL,
|
|
90
|
+
exit_code=1,
|
|
91
|
+
violations=[
|
|
92
|
+
Violation(
|
|
93
|
+
rule="execution_error",
|
|
94
|
+
severity=ViolationSeverity.HIGH,
|
|
95
|
+
message=error_message,
|
|
96
|
+
details={"error_type": "execution_failure"}
|
|
97
|
+
)
|
|
98
|
+
],
|
|
99
|
+
evidence=[],
|
|
100
|
+
remediation=Remediation(
|
|
101
|
+
summary="Task execution failed",
|
|
102
|
+
steps=["Check input parameters", "Verify file paths exist", "Review error message"],
|
|
103
|
+
documentation="https://docs.delimit.ai/troubleshooting"
|
|
104
|
+
),
|
|
105
|
+
summary=f"Task execution failed: {error_message}",
|
|
106
|
+
correlation_id=correlation_id,
|
|
107
|
+
metrics={}
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def run(self, task: str, **kwargs) -> Dict[str, str]:
|
|
111
|
+
"""
|
|
112
|
+
Main gateway entry point - maintains backward compatibility
|
|
113
|
+
Returns Evidence Contract as dict
|
|
114
|
+
ALL PATHS RETURN TaskEvidence
|
|
115
|
+
"""
|
|
116
|
+
start_time = time.time()
|
|
117
|
+
correlation_id = kwargs.get("correlation_id")
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
# Route to typed handlers based on task
|
|
121
|
+
if task == "validate-api":
|
|
122
|
+
# Handle both old and new parameter styles
|
|
123
|
+
files = kwargs.get("files", [])
|
|
124
|
+
old_spec = kwargs.get("old_spec") or (files[0] if len(files) > 0 else None)
|
|
125
|
+
new_spec = kwargs.get("new_spec") or (files[1] if len(files) > 1 else None)
|
|
126
|
+
|
|
127
|
+
if not old_spec or not new_spec:
|
|
128
|
+
evidence = self._create_error_evidence(
|
|
129
|
+
task,
|
|
130
|
+
"validate-api requires two files: old_spec and new_spec",
|
|
131
|
+
correlation_id
|
|
132
|
+
)
|
|
133
|
+
else:
|
|
134
|
+
request = ValidateAPIRequest(
|
|
135
|
+
task=task,
|
|
136
|
+
old_spec=old_spec,
|
|
137
|
+
new_spec=new_spec,
|
|
138
|
+
version=kwargs.get("version"),
|
|
139
|
+
correlation_id=correlation_id
|
|
140
|
+
)
|
|
141
|
+
evidence = self.run_validate_api(request)
|
|
142
|
+
|
|
143
|
+
elif task == "check-policy":
|
|
144
|
+
files = kwargs.get("spec_files") or kwargs.get("files", [])
|
|
145
|
+
|
|
146
|
+
if not files:
|
|
147
|
+
evidence = self._create_error_evidence(
|
|
148
|
+
task,
|
|
149
|
+
"check-policy requires at least one spec file",
|
|
150
|
+
correlation_id
|
|
151
|
+
)
|
|
152
|
+
else:
|
|
153
|
+
request = CheckPolicyRequest(
|
|
154
|
+
task=task,
|
|
155
|
+
spec_files=files,
|
|
156
|
+
policy_file=kwargs.get("policy_file"),
|
|
157
|
+
policy_inline=kwargs.get("policy_inline"),
|
|
158
|
+
version=kwargs.get("version"),
|
|
159
|
+
correlation_id=correlation_id
|
|
160
|
+
)
|
|
161
|
+
evidence = self.run_check_policy(request)
|
|
162
|
+
|
|
163
|
+
elif task == "explain-diff":
|
|
164
|
+
files = kwargs.get("files", [])
|
|
165
|
+
old_spec = kwargs.get("old_spec") or (files[0] if len(files) > 0 else None)
|
|
166
|
+
new_spec = kwargs.get("new_spec") or (files[1] if len(files) > 1 else None)
|
|
167
|
+
|
|
168
|
+
if not old_spec or not new_spec:
|
|
169
|
+
evidence = self._create_error_evidence(
|
|
170
|
+
task,
|
|
171
|
+
"explain-diff requires two files: old_spec and new_spec",
|
|
172
|
+
correlation_id
|
|
173
|
+
)
|
|
174
|
+
else:
|
|
175
|
+
request = ExplainDiffRequest(
|
|
176
|
+
task=task,
|
|
177
|
+
old_spec=old_spec,
|
|
178
|
+
new_spec=new_spec,
|
|
179
|
+
detail_level=kwargs.get("detail_level", "medium"),
|
|
180
|
+
version=kwargs.get("version"),
|
|
181
|
+
correlation_id=correlation_id
|
|
182
|
+
)
|
|
183
|
+
evidence = self.run_explain_diff(request)
|
|
184
|
+
|
|
185
|
+
else:
|
|
186
|
+
# Unknown task - return error evidence
|
|
187
|
+
evidence = self._create_error_evidence(
|
|
188
|
+
task,
|
|
189
|
+
f"Unknown task: {task}. Available tasks: validate-api, check-policy, explain-diff",
|
|
190
|
+
correlation_id
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# Add timing
|
|
194
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
195
|
+
evidence_dict = evidence.model_dump(mode='json')
|
|
196
|
+
evidence_dict["duration_ms"] = duration_ms
|
|
197
|
+
|
|
198
|
+
return evidence_dict
|
|
199
|
+
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.error(f"Task execution failed: {e}")
|
|
202
|
+
|
|
203
|
+
# Always return TaskEvidence, never raw dict
|
|
204
|
+
error_evidence = self._create_error_evidence(task, str(e), correlation_id)
|
|
205
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
206
|
+
evidence_dict = error_evidence.model_dump(mode='json')
|
|
207
|
+
evidence_dict["duration_ms"] = duration_ms
|
|
208
|
+
|
|
209
|
+
return evidence_dict
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# Global instance
|
|
213
|
+
gateway = HardenedGateway()
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def delimit_run(task: str, files: list = None, **kwargs) -> Dict[str, str]:
|
|
217
|
+
"""
|
|
218
|
+
Main entry point maintaining backward compatibility
|
|
219
|
+
Returns Evidence Contract as dictionary
|
|
220
|
+
ALL PATHS RETURN VALID TaskEvidence
|
|
221
|
+
"""
|
|
222
|
+
if files:
|
|
223
|
+
kwargs["files"] = files
|
|
224
|
+
return gateway.run(task, **kwargs)
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Delimit Impact Analyzer
|
|
3
|
+
Determines downstream consumers affected by an API change
|
|
4
|
+
and produces informational impact summaries for CI output.
|
|
5
|
+
|
|
6
|
+
Per Jamsons Doctrine:
|
|
7
|
+
- Impact analysis is INFORMATIONAL ONLY
|
|
8
|
+
- NEVER affects CI pass/fail outcome
|
|
9
|
+
- Deterministic outputs
|
|
10
|
+
- Graceful degradation when no dependency data exists
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Any, Dict, List, Optional
|
|
15
|
+
|
|
16
|
+
from .dependency_graph import DependencyGraph
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger("delimit.impact_analyzer")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImpactAnalyzer:
|
|
22
|
+
"""Analyze the downstream impact of API changes."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, graph: DependencyGraph):
|
|
25
|
+
"""Initialize with a dependency graph.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
graph: Populated DependencyGraph instance.
|
|
29
|
+
"""
|
|
30
|
+
self._graph = graph
|
|
31
|
+
|
|
32
|
+
def analyze(self, api_name: str) -> Dict[str, Any]:
|
|
33
|
+
"""Analyze the impact of a change to an API.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
api_name: The API that changed.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Deterministic impact summary dictionary.
|
|
40
|
+
"""
|
|
41
|
+
downstream = self._graph.get_consumers(api_name)
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
"api": api_name,
|
|
45
|
+
"downstream_services": downstream,
|
|
46
|
+
"impact_count": len(downstream),
|
|
47
|
+
"graph_available": not self._graph.is_empty(),
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def analyze_multiple(self, api_names: List[str]) -> List[Dict[str, Any]]:
|
|
51
|
+
"""Analyze impact for multiple APIs.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
api_names: List of API names that changed.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Sorted list of impact summaries.
|
|
58
|
+
"""
|
|
59
|
+
results = [self.analyze(api) for api in api_names]
|
|
60
|
+
results.sort(key=lambda r: r["api"])
|
|
61
|
+
return results
|
|
62
|
+
|
|
63
|
+
def get_blast_radius(self, api_name: str) -> int:
|
|
64
|
+
"""Get the number of downstream services affected.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
api_name: The API that changed.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Number of affected downstream services.
|
|
71
|
+
"""
|
|
72
|
+
return len(self._graph.get_consumers(api_name))
|
|
73
|
+
|
|
74
|
+
def format_ci_output(self, api_name: str) -> str:
|
|
75
|
+
"""Format impact analysis for CI log output.
|
|
76
|
+
|
|
77
|
+
This output is informational only and NEVER affects CI outcome.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
api_name: The API that changed.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Formatted string for CI logs.
|
|
84
|
+
"""
|
|
85
|
+
impact = self.analyze(api_name)
|
|
86
|
+
lines = []
|
|
87
|
+
|
|
88
|
+
lines.append("")
|
|
89
|
+
lines.append("--------------------------------------")
|
|
90
|
+
lines.append("DELIMIT IMPACT ANALYSIS")
|
|
91
|
+
lines.append("--------------------------------------")
|
|
92
|
+
lines.append("")
|
|
93
|
+
lines.append(f"API changed: {api_name}")
|
|
94
|
+
lines.append("")
|
|
95
|
+
|
|
96
|
+
if not impact["graph_available"]:
|
|
97
|
+
lines.append("No dependency manifests found.")
|
|
98
|
+
lines.append("Add .delimit/dependencies.yaml to enable impact analysis.")
|
|
99
|
+
elif impact["impact_count"] == 0:
|
|
100
|
+
lines.append("No known downstream consumers.")
|
|
101
|
+
else:
|
|
102
|
+
lines.append("Potential downstream consumers:")
|
|
103
|
+
lines.append("")
|
|
104
|
+
for service in impact["downstream_services"]:
|
|
105
|
+
lines.append(f" * {service}")
|
|
106
|
+
lines.append("")
|
|
107
|
+
lines.append(f"Blast radius: {impact['impact_count']} service(s)")
|
|
108
|
+
|
|
109
|
+
lines.append("")
|
|
110
|
+
lines.append("--------------------------------------")
|
|
111
|
+
lines.append("")
|
|
112
|
+
|
|
113
|
+
return "\n".join(lines)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def analyze_impact(
|
|
117
|
+
graph: DependencyGraph,
|
|
118
|
+
api_name: str,
|
|
119
|
+
) -> Dict[str, Any]:
|
|
120
|
+
"""Convenience function for CI pipeline integration.
|
|
121
|
+
|
|
122
|
+
Called after event_backbone in the pipeline:
|
|
123
|
+
diff_engine → policy_engine → complexity_analyzer
|
|
124
|
+
→ event_backbone → dependency_graph → impact_analyzer
|
|
125
|
+
|
|
126
|
+
CRITICAL: This function NEVER raises exceptions.
|
|
127
|
+
Impact analysis is informational only.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
graph: Dependency graph (may be empty).
|
|
131
|
+
api_name: The API that changed.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Impact summary dictionary.
|
|
135
|
+
"""
|
|
136
|
+
try:
|
|
137
|
+
analyzer = ImpactAnalyzer(graph)
|
|
138
|
+
return analyzer.analyze(api_name)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.warning("Impact analysis failed: %s — continuing", e)
|
|
141
|
+
return {
|
|
142
|
+
"api": api_name,
|
|
143
|
+
"downstream_services": [],
|
|
144
|
+
"impact_count": 0,
|
|
145
|
+
"graph_available": False,
|
|
146
|
+
"error": str(e),
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def format_impact_for_ci(
|
|
151
|
+
graph: DependencyGraph,
|
|
152
|
+
api_name: str,
|
|
153
|
+
) -> str:
|
|
154
|
+
"""Convenience function to format impact analysis for CI output.
|
|
155
|
+
|
|
156
|
+
NEVER raises. Returns empty string on failure.
|
|
157
|
+
"""
|
|
158
|
+
try:
|
|
159
|
+
analyzer = ImpactAnalyzer(graph)
|
|
160
|
+
return analyzer.format_ci_output(api_name)
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.warning("Impact formatting failed: %s", e)
|
|
163
|
+
return ""
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Delimit Policy Preset: default
|
|
2
|
+
# Balanced rules for most teams. Blocks destructive changes, warns on risky ones.
|
|
3
|
+
# Good starting point — tighten as your API matures.
|
|
4
|
+
|
|
5
|
+
override_defaults: false
|
|
6
|
+
|
|
7
|
+
# Uses built-in defaults:
|
|
8
|
+
# ERROR: endpoint_removed, method_removed, required_param_added, field_removed (2xx)
|
|
9
|
+
# WARN: type_changed
|
|
10
|
+
# ALLOW: enum_value_added
|
|
11
|
+
#
|
|
12
|
+
# No custom rules needed — the built-in defaults are the "default" preset.
|
|
13
|
+
rules: []
|