delimit-cli 2.4.0 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/.dockerignore +7 -0
  2. package/.github/workflows/ci.yml +22 -0
  3. package/CODE_OF_CONDUCT.md +48 -0
  4. package/CONTRIBUTING.md +67 -0
  5. package/Dockerfile +9 -0
  6. package/LICENSE +21 -0
  7. package/README.md +18 -69
  8. package/SECURITY.md +42 -0
  9. package/adapters/gemini-forge.js +11 -0
  10. package/adapters/gemini-jamsons.js +152 -0
  11. package/bin/delimit-cli.js +8 -0
  12. package/bin/delimit-setup.js +258 -0
  13. package/gateway/ai/backends/__init__.py +0 -0
  14. package/gateway/ai/backends/async_utils.py +21 -0
  15. package/gateway/ai/backends/deploy_bridge.py +150 -0
  16. package/gateway/ai/backends/gateway_core.py +261 -0
  17. package/gateway/ai/backends/generate_bridge.py +38 -0
  18. package/gateway/ai/backends/governance_bridge.py +196 -0
  19. package/gateway/ai/backends/intel_bridge.py +59 -0
  20. package/gateway/ai/backends/memory_bridge.py +93 -0
  21. package/gateway/ai/backends/ops_bridge.py +137 -0
  22. package/gateway/ai/backends/os_bridge.py +82 -0
  23. package/gateway/ai/backends/repo_bridge.py +117 -0
  24. package/gateway/ai/backends/ui_bridge.py +118 -0
  25. package/gateway/ai/backends/vault_bridge.py +129 -0
  26. package/gateway/ai/server.py +1182 -0
  27. package/gateway/core/__init__.py +3 -0
  28. package/gateway/core/__pycache__/__init__.cpython-310.pyc +0 -0
  29. package/gateway/core/__pycache__/auto_baseline.cpython-310.pyc +0 -0
  30. package/gateway/core/__pycache__/ci_formatter.cpython-310.pyc +0 -0
  31. package/gateway/core/__pycache__/contract_ledger.cpython-310.pyc +0 -0
  32. package/gateway/core/__pycache__/dependency_graph.cpython-310.pyc +0 -0
  33. package/gateway/core/__pycache__/dependency_manifest.cpython-310.pyc +0 -0
  34. package/gateway/core/__pycache__/diff_engine_v2.cpython-310.pyc +0 -0
  35. package/gateway/core/__pycache__/event_backbone.cpython-310.pyc +0 -0
  36. package/gateway/core/__pycache__/event_schema.cpython-310.pyc +0 -0
  37. package/gateway/core/__pycache__/explainer.cpython-310.pyc +0 -0
  38. package/gateway/core/__pycache__/gateway.cpython-310.pyc +0 -0
  39. package/gateway/core/__pycache__/gateway_v2.cpython-310.pyc +0 -0
  40. package/gateway/core/__pycache__/gateway_v3.cpython-310.pyc +0 -0
  41. package/gateway/core/__pycache__/impact_analyzer.cpython-310.pyc +0 -0
  42. package/gateway/core/__pycache__/policy_engine.cpython-310.pyc +0 -0
  43. package/gateway/core/__pycache__/registry.cpython-310.pyc +0 -0
  44. package/gateway/core/__pycache__/registry_v2.cpython-310.pyc +0 -0
  45. package/gateway/core/__pycache__/registry_v3.cpython-310.pyc +0 -0
  46. package/gateway/core/__pycache__/semver_classifier.cpython-310.pyc +0 -0
  47. package/gateway/core/__pycache__/spec_detector.cpython-310.pyc +0 -0
  48. package/gateway/core/__pycache__/surface_bridge.cpython-310.pyc +0 -0
  49. package/gateway/core/auto_baseline.py +304 -0
  50. package/gateway/core/ci_formatter.py +283 -0
  51. package/gateway/core/complexity_analyzer.py +386 -0
  52. package/gateway/core/contract_ledger.py +345 -0
  53. package/gateway/core/dependency_graph.py +218 -0
  54. package/gateway/core/dependency_manifest.py +223 -0
  55. package/gateway/core/diff_engine_v2.py +477 -0
  56. package/gateway/core/diff_engine_v2.py.bak +426 -0
  57. package/gateway/core/event_backbone.py +268 -0
  58. package/gateway/core/event_schema.py +258 -0
  59. package/gateway/core/explainer.py +438 -0
  60. package/gateway/core/gateway.py +128 -0
  61. package/gateway/core/gateway_v2.py +154 -0
  62. package/gateway/core/gateway_v3.py +224 -0
  63. package/gateway/core/impact_analyzer.py +163 -0
  64. package/gateway/core/policies/default.yml +13 -0
  65. package/gateway/core/policies/relaxed.yml +48 -0
  66. package/gateway/core/policies/strict.yml +55 -0
  67. package/gateway/core/policy_engine.py +464 -0
  68. package/gateway/core/registry.py +52 -0
  69. package/gateway/core/registry_v2.py +132 -0
  70. package/gateway/core/registry_v3.py +134 -0
  71. package/gateway/core/semver_classifier.py +152 -0
  72. package/gateway/core/spec_detector.py +130 -0
  73. package/gateway/core/surface_bridge.py +307 -0
  74. package/gateway/core/zero_spec/__init__.py +4 -0
  75. package/gateway/core/zero_spec/__pycache__/__init__.cpython-310.pyc +0 -0
  76. package/gateway/core/zero_spec/__pycache__/detector.cpython-310.pyc +0 -0
  77. package/gateway/core/zero_spec/__pycache__/express_extractor.cpython-310.pyc +0 -0
  78. package/gateway/core/zero_spec/__pycache__/fastapi_extractor.cpython-310.pyc +0 -0
  79. package/gateway/core/zero_spec/__pycache__/nestjs_extractor.cpython-310.pyc +0 -0
  80. package/gateway/core/zero_spec/detector.py +353 -0
  81. package/gateway/core/zero_spec/express_extractor.py +483 -0
  82. package/gateway/core/zero_spec/fastapi_extractor.py +254 -0
  83. package/gateway/core/zero_spec/nestjs_extractor.py +369 -0
  84. package/gateway/tasks/__init__.py +1 -0
  85. package/gateway/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  86. package/gateway/tasks/__pycache__/check_policy.cpython-310.pyc +0 -0
  87. package/gateway/tasks/__pycache__/check_policy_v2.cpython-310.pyc +0 -0
  88. package/gateway/tasks/__pycache__/check_policy_v3.cpython-310.pyc +0 -0
  89. package/gateway/tasks/__pycache__/explain_diff.cpython-310.pyc +0 -0
  90. package/gateway/tasks/__pycache__/explain_diff_v2.cpython-310.pyc +0 -0
  91. package/gateway/tasks/__pycache__/validate_api.cpython-310.pyc +0 -0
  92. package/gateway/tasks/__pycache__/validate_api_v2.cpython-310.pyc +0 -0
  93. package/gateway/tasks/__pycache__/validate_api_v3.cpython-310.pyc +0 -0
  94. package/gateway/tasks/check_policy.py +177 -0
  95. package/gateway/tasks/check_policy_v2.py +255 -0
  96. package/gateway/tasks/check_policy_v3.py +255 -0
  97. package/gateway/tasks/explain_diff.py +305 -0
  98. package/gateway/tasks/explain_diff_v2.py +267 -0
  99. package/gateway/tasks/validate_api.py +131 -0
  100. package/gateway/tasks/validate_api_v2.py +208 -0
  101. package/gateway/tasks/validate_api_v3.py +163 -0
  102. package/package.json +4 -3
  103. package/adapters/codex-skill.js +0 -87
  104. package/adapters/cursor-extension.js +0 -190
  105. package/adapters/gemini-action.js +0 -93
  106. package/adapters/openai-function.js +0 -112
  107. package/adapters/xai-plugin.js +0 -151
  108. package/test-decision-engine.js +0 -181
  109. package/test-hook.js +0 -27
  110. package/tests/cli.test.js +0 -359
  111. package/tests/fixtures/openapi-changed.yaml +0 -56
  112. package/tests/fixtures/openapi.yaml +0 -87
@@ -0,0 +1,3 @@
1
+ from .registry import TaskRegistry, task_registry
2
+
3
+ __all__ = ["TaskRegistry", "task_registry"]
@@ -0,0 +1,304 @@
1
+ """
2
+ Auto-Baseline Mode for Gradual Adoption
3
+ Enables teams to start governance without failing on existing issues
4
+ """
5
+
6
+ import json
7
+ import hashlib
8
+ from pathlib import Path
9
+ from datetime import datetime
10
+ from typing import Dict, Any, Optional, List
11
+
12
+ from schemas.evidence import TaskEvidence, Decision, Violation
13
+
14
+
15
+ class AutoBaseline:
16
+ """
17
+ Manages automatic baseline generation and comparison.
18
+ Allows gradual adoption by only flagging NEW violations.
19
+ """
20
+
21
+ def __init__(self, baseline_dir: Optional[Path] = None):
22
+ """
23
+ Initialize auto-baseline manager.
24
+
25
+ Args:
26
+ baseline_dir: Directory to store baselines (default: ~/.delimit/baselines)
27
+ """
28
+ self.baseline_dir = baseline_dir or (Path.home() / ".delimit" / "baselines")
29
+ self.baseline_dir.mkdir(parents=True, exist_ok=True)
30
+
31
+ def get_baseline_path(self, file_path: str, task: str) -> Path:
32
+ """
33
+ Get the baseline file path for a given source file.
34
+
35
+ Args:
36
+ file_path: Path to the source file
37
+ task: Task name (validate-api, check-policy, etc)
38
+
39
+ Returns:
40
+ Path to baseline file
41
+ """
42
+ file_hash = hashlib.md5(file_path.encode()).hexdigest()[:8]
43
+ file_name = Path(file_path).stem
44
+ baseline_name = f"{file_name}_{task}_{file_hash}.baseline.json"
45
+ return self.baseline_dir / baseline_name
46
+
47
+ def load_baseline(self, file_path: str, task: str) -> Optional[Dict[str, Any]]:
48
+ """
49
+ Load existing baseline if it exists.
50
+
51
+ Args:
52
+ file_path: Path to the source file
53
+ task: Task name
54
+
55
+ Returns:
56
+ Baseline data or None if not found
57
+ """
58
+ baseline_path = self.get_baseline_path(file_path, task)
59
+
60
+ if baseline_path.exists():
61
+ with baseline_path.open('r') as f:
62
+ return json.load(f)
63
+
64
+ return None
65
+
66
+ def save_baseline(self, file_path: str, task: str, evidence: TaskEvidence) -> Path:
67
+ """
68
+ Save current results as baseline.
69
+
70
+ Args:
71
+ file_path: Path to the source file
72
+ task: Task name
73
+ evidence: Task evidence to baseline
74
+
75
+ Returns:
76
+ Path to saved baseline
77
+ """
78
+ baseline_path = self.get_baseline_path(file_path, task)
79
+
80
+ baseline_data = {
81
+ "timestamp": datetime.now().isoformat(),
82
+ "file": file_path,
83
+ "task": task,
84
+ "violations": [v.model_dump(mode='json') for v in evidence.violations],
85
+ "metrics": evidence.metrics,
86
+ "checksum": self._calculate_file_checksum(file_path)
87
+ }
88
+
89
+ with baseline_path.open('w') as f:
90
+ json.dump(baseline_data, f, indent=2)
91
+
92
+ return baseline_path
93
+
94
+ def filter_new_violations(self,
95
+ current_evidence: TaskEvidence,
96
+ baseline_data: Dict[str, Any]) -> TaskEvidence:
97
+ """
98
+ Filter violations to only show NEW issues not in baseline.
99
+
100
+ Args:
101
+ current_evidence: Current task evidence
102
+ baseline_data: Baseline data to compare against
103
+
104
+ Returns:
105
+ Modified evidence with only new violations
106
+ """
107
+ baseline_violations = baseline_data.get("violations", [])
108
+
109
+ # Create set of baseline violation signatures
110
+ baseline_sigs = set()
111
+ for v in baseline_violations:
112
+ sig = f"{v.get('rule')}:{v.get('path', '')}:{v.get('message', '')}"
113
+ baseline_sigs.add(sig)
114
+
115
+ # Filter to only new violations
116
+ new_violations = []
117
+ baselined_count = 0
118
+
119
+ for violation in current_evidence.violations:
120
+ sig = f"{violation.rule}:{violation.path or ''}:{violation.message}"
121
+ if sig not in baseline_sigs:
122
+ new_violations.append(violation)
123
+ else:
124
+ baselined_count += 1
125
+
126
+ # Update evidence
127
+ current_evidence.violations = new_violations
128
+
129
+ # Adjust decision based on new violations only
130
+ if len(new_violations) == 0:
131
+ current_evidence.decision = Decision.PASS
132
+ current_evidence.exit_code = 0
133
+ current_evidence.summary = f"No new violations found ({baselined_count} baselined)"
134
+ else:
135
+ # Keep original decision for new violations
136
+ current_evidence.summary = f"{current_evidence.summary} ({baselined_count} baselined)"
137
+
138
+ # Add baseline info to metrics
139
+ current_evidence.metrics["baselined_violations"] = baselined_count
140
+ current_evidence.metrics["new_violations"] = len(new_violations)
141
+ current_evidence.metrics["baseline_applied"] = True
142
+
143
+ return current_evidence
144
+
145
+ def apply_auto_baseline(self,
146
+ file_path: str,
147
+ task: str,
148
+ evidence: TaskEvidence,
149
+ create_if_missing: bool = True) -> TaskEvidence:
150
+ """
151
+ Apply auto-baseline logic to task evidence.
152
+
153
+ Args:
154
+ file_path: Path to the source file
155
+ task: Task name
156
+ evidence: Task evidence to process
157
+ create_if_missing: Create baseline if it doesn't exist
158
+
159
+ Returns:
160
+ Modified evidence with baseline applied
161
+ """
162
+ baseline = self.load_baseline(file_path, task)
163
+
164
+ if baseline is None:
165
+ if create_if_missing and evidence.violations:
166
+ # First run - create baseline
167
+ baseline_path = self.save_baseline(file_path, task, evidence)
168
+
169
+ # On first baseline, pass with warning
170
+ evidence.decision = Decision.WARN
171
+ evidence.exit_code = 0
172
+ evidence.summary = f"Baseline created with {len(evidence.violations)} violations"
173
+ evidence.metrics["baseline_created"] = True
174
+ evidence.metrics["baseline_path"] = str(baseline_path)
175
+ else:
176
+ # No baseline and no violations - pass normally
177
+ evidence.metrics["baseline_applied"] = False
178
+ else:
179
+ # Apply baseline filtering
180
+ evidence = self.filter_new_violations(evidence, baseline)
181
+
182
+ return evidence
183
+
184
+ def _calculate_file_checksum(self, file_path: str) -> str:
185
+ """Calculate checksum of file for change detection."""
186
+ try:
187
+ with open(file_path, 'rb') as f:
188
+ return hashlib.sha256(f.read()).hexdigest()
189
+ except:
190
+ return ""
191
+
192
+ def update_baseline(self,
193
+ file_path: str,
194
+ task: str,
195
+ evidence: TaskEvidence,
196
+ threshold: float = 0.8) -> bool:
197
+ """
198
+ Update baseline if improvement threshold is met.
199
+
200
+ Args:
201
+ file_path: Path to the source file
202
+ task: Task name
203
+ evidence: Current task evidence
204
+ threshold: Improvement threshold (0.8 = 20% reduction required)
205
+
206
+ Returns:
207
+ True if baseline was updated
208
+ """
209
+ baseline = self.load_baseline(file_path, task)
210
+
211
+ if baseline is None:
212
+ # No existing baseline
213
+ self.save_baseline(file_path, task, evidence)
214
+ return True
215
+
216
+ # Check if violations have improved enough
217
+ baseline_count = len(baseline.get("violations", []))
218
+ current_count = len(evidence.violations)
219
+
220
+ if current_count <= baseline_count * threshold:
221
+ # Significant improvement - update baseline
222
+ self.save_baseline(file_path, task, evidence)
223
+ return True
224
+
225
+ return False
226
+
227
+ def get_baseline_status(self) -> Dict[str, Any]:
228
+ """
229
+ Get status of all baselines.
230
+
231
+ Returns:
232
+ Status information about baselines
233
+ """
234
+ baselines = list(self.baseline_dir.glob("*.baseline.json"))
235
+
236
+ status = {
237
+ "baseline_dir": str(self.baseline_dir),
238
+ "total_baselines": len(baselines),
239
+ "baselines": []
240
+ }
241
+
242
+ for baseline_file in baselines:
243
+ with baseline_file.open('r') as f:
244
+ data = json.load(f)
245
+ status["baselines"].append({
246
+ "file": data.get("file"),
247
+ "task": data.get("task"),
248
+ "timestamp": data.get("timestamp"),
249
+ "violations_count": len(data.get("violations", [])),
250
+ "path": str(baseline_file)
251
+ })
252
+
253
+ return status
254
+
255
+ def clear_baseline(self, file_path: Optional[str] = None, task: Optional[str] = None) -> int:
256
+ """
257
+ Clear baselines.
258
+
259
+ Args:
260
+ file_path: Specific file to clear baseline for (optional)
261
+ task: Specific task to clear baseline for (optional)
262
+
263
+ Returns:
264
+ Number of baselines cleared
265
+ """
266
+ count = 0
267
+
268
+ if file_path and task:
269
+ # Clear specific baseline
270
+ baseline_path = self.get_baseline_path(file_path, task)
271
+ if baseline_path.exists():
272
+ baseline_path.unlink()
273
+ count = 1
274
+ else:
275
+ # Clear all baselines
276
+ for baseline_file in self.baseline_dir.glob("*.baseline.json"):
277
+ baseline_file.unlink()
278
+ count += 1
279
+
280
+ return count
281
+
282
+
283
+ # Convenience functions
284
+ def apply_auto_baseline(evidence: TaskEvidence,
285
+ file_path: str,
286
+ task: str,
287
+ enabled: bool = False) -> TaskEvidence:
288
+ """
289
+ Apply auto-baseline to evidence if enabled.
290
+
291
+ Args:
292
+ evidence: Task evidence
293
+ file_path: Source file path
294
+ task: Task name
295
+ enabled: Whether auto-baseline is enabled
296
+
297
+ Returns:
298
+ Potentially modified evidence
299
+ """
300
+ if not enabled:
301
+ return evidence
302
+
303
+ baseline_manager = AutoBaseline()
304
+ return baseline_manager.apply_auto_baseline(file_path, task, evidence)
@@ -0,0 +1,283 @@
1
+ """
2
+ CI Output Formatter - Creates clear, actionable output for developers.
3
+ Supports GitHub Actions annotations and PR comments.
4
+ """
5
+
6
+ from typing import Dict, List, Any, Optional
7
+ from enum import Enum
8
+ import json
9
+
10
+ class OutputFormat(Enum):
11
+ TEXT = "text"
12
+ MARKDOWN = "markdown"
13
+ GITHUB_ANNOTATION = "github_annotation"
14
+ JSON = "json"
15
+
16
+ class CIFormatter:
17
+ """Format Delimit output for different CI environments."""
18
+
19
+ def __init__(self, format_type: OutputFormat = OutputFormat.TEXT):
20
+ self.format_type = format_type
21
+
22
+ def format_result(self, result: Dict[str, Any]) -> str:
23
+ """Format the complete result based on output type."""
24
+ if self.format_type == OutputFormat.JSON:
25
+ return json.dumps(result, indent=2)
26
+ elif self.format_type == OutputFormat.MARKDOWN:
27
+ return self._format_markdown(result)
28
+ elif self.format_type == OutputFormat.GITHUB_ANNOTATION:
29
+ return self._format_github_annotations(result)
30
+ else:
31
+ return self._format_text(result)
32
+
33
+ def _format_text(self, result: Dict[str, Any]) -> str:
34
+ """Format as plain text for terminal output."""
35
+ lines = []
36
+
37
+ decision = result.get("decision", "unknown")
38
+ violations = result.get("violations", [])
39
+ summary = result.get("summary", {})
40
+
41
+ # Header
42
+ if decision == "fail":
43
+ lines.append("❌ API Governance Check Failed")
44
+ elif decision == "warn":
45
+ lines.append("⚠️ API Governance Check Passed with Warnings")
46
+ else:
47
+ lines.append("✅ API Governance Check Passed")
48
+
49
+ lines.append("=" * 50)
50
+
51
+ # Summary
52
+ if summary:
53
+ lines.append(f"Total Changes: {summary.get('total_changes', 0)}")
54
+ lines.append(f"Breaking Changes: {summary.get('breaking_changes', 0)}")
55
+ lines.append(f"Policy Violations: {summary.get('violations', 0)}")
56
+ lines.append("")
57
+
58
+ # Violations
59
+ if violations:
60
+ lines.append("Violations Found:")
61
+ lines.append("-" * 40)
62
+
63
+ # Group by severity
64
+ errors = [v for v in violations if v.get("severity") == "error"]
65
+ warnings = [v for v in violations if v.get("severity") == "warning"]
66
+
67
+ if errors:
68
+ lines.append("\n🔴 ERRORS (Must Fix):")
69
+ for v in errors:
70
+ lines.append(f" • {v.get('message', 'Unknown violation')}")
71
+ if v.get("path"):
72
+ lines.append(f" Location: {v['path']}")
73
+
74
+ if warnings:
75
+ lines.append("\n🟡 WARNINGS:")
76
+ for v in warnings:
77
+ lines.append(f" • {v.get('message', 'Unknown warning')}")
78
+ if v.get("path"):
79
+ lines.append(f" Location: {v['path']}")
80
+
81
+ # Remediation
82
+ if violations and decision == "fail":
83
+ lines.append("\n" + "=" * 50)
84
+ lines.append("Suggested Fixes:")
85
+ lines.append("1. Restore removed endpoints/fields")
86
+ lines.append("2. Make new parameters optional")
87
+ lines.append("3. Use API versioning (e.g., /v2/)")
88
+ lines.append("4. Add deprecation notices before removing")
89
+
90
+ return "\n".join(lines)
91
+
92
+ def _format_markdown(self, result: Dict[str, Any]) -> str:
93
+ """Format as Markdown for PR comments.
94
+
95
+ Includes semver classification badge and migration guidance when
96
+ the result carries semver/explainer data.
97
+ """
98
+ lines = []
99
+
100
+ decision = result.get("decision", "unknown")
101
+ violations = result.get("violations", [])
102
+ summary = result.get("summary", {})
103
+ semver = result.get("semver") # optional dict from semver_classifier
104
+
105
+ # Header — include semver badge when available
106
+ bump_badge = ""
107
+ if semver:
108
+ bump = semver.get("bump", "unknown")
109
+ bump_badge = {"major": " `MAJOR`", "minor": " `MINOR`", "patch": " `PATCH`", "none": ""}.get(bump, "")
110
+
111
+ if decision == "fail":
112
+ lines.append(f"## 🚨 Delimit: Breaking Changes{bump_badge}\n")
113
+ elif decision == "warn":
114
+ lines.append(f"## ⚠️ Delimit: Potential Issues{bump_badge}\n")
115
+ else:
116
+ lines.append(f"## ✅ API Changes Look Good{bump_badge}\n")
117
+
118
+ # Semver + summary table
119
+ lines.append("| Metric | Value |")
120
+ lines.append("|--------|-------|")
121
+ if semver:
122
+ lines.append(f"| Semver bump | `{semver.get('bump', 'unknown')}` |")
123
+ if semver.get("next_version"):
124
+ lines.append(f"| Next version | `{semver['next_version']}` |")
125
+ lines.append(f"| Total changes | {summary.get('total_changes', 0)} |")
126
+ lines.append(f"| Breaking | {summary.get('breaking_changes', 0)} |")
127
+ if summary.get("violations", 0) > 0:
128
+ lines.append(f"| Policy violations | {summary['violations']} |")
129
+ lines.append("")
130
+
131
+ # Violations table
132
+ if violations:
133
+ errors = [v for v in violations if v.get("severity") == "error"]
134
+ warnings = [v for v in violations if v.get("severity") == "warning"]
135
+
136
+ if errors or warnings:
137
+ lines.append("### Violations\n")
138
+ lines.append("| Severity | Rule | Description | Location |")
139
+ lines.append("|----------|------|-------------|----------|")
140
+
141
+ for v in errors:
142
+ rule = v.get("name", v.get("rule", "Unknown"))
143
+ desc = v.get("message", "Unknown violation")
144
+ location = v.get("path", "-")
145
+ lines.append(f"| 🔴 **Error** | {rule} | {desc} | `{location}` |")
146
+
147
+ for v in warnings:
148
+ rule = v.get("name", v.get("rule", "Unknown"))
149
+ desc = v.get("message", "Unknown warning")
150
+ location = v.get("path", "-")
151
+ lines.append(f"| 🟡 Warning | {rule} | {desc} | `{location}` |")
152
+
153
+ lines.append("")
154
+
155
+ # Detailed changes
156
+ all_changes = result.get("all_changes", [])
157
+ if all_changes and len(all_changes) <= 10:
158
+ lines.append("<details>")
159
+ lines.append("<summary>All changes</summary>\n")
160
+ lines.append("```")
161
+ for change in all_changes:
162
+ breaking = "BREAKING" if change.get("is_breaking") else "safe"
163
+ lines.append(f"[{breaking}] {change.get('message', 'Unknown change')}")
164
+ lines.append("```")
165
+ lines.append("</details>\n")
166
+
167
+ # Migration guidance (from explainer) when available
168
+ migration = result.get("migration")
169
+ if migration and decision == "fail":
170
+ lines.append("<details>")
171
+ lines.append("<summary>Migration guide</summary>\n")
172
+ lines.append(migration)
173
+ lines.append("\n</details>\n")
174
+
175
+ # Remediation
176
+ if violations and decision == "fail" and not migration:
177
+ lines.append("### 💡 How to Fix\n")
178
+ lines.append("1. **Restore removed endpoints** — deprecate before removing")
179
+ lines.append("2. **Make parameters optional** — don't add required params")
180
+ lines.append("3. **Use versioning** — create `/v2/` for breaking changes")
181
+ lines.append("4. **Gradual migration** — provide guides and time")
182
+ lines.append("")
183
+
184
+ lines.append("---")
185
+ lines.append("*Generated by [Delimit](https://github.com/delimit-ai/delimit) — ESLint for API contracts*")
186
+
187
+ return "\n".join(lines)
188
+
189
+ def _format_github_annotations(self, result: Dict[str, Any]) -> str:
190
+ """Format as GitHub Actions annotations."""
191
+ annotations = []
192
+
193
+ violations = result.get("violations", [])
194
+
195
+ for v in violations:
196
+ severity = v.get("severity", "warning")
197
+ message = v.get("message", "Unknown violation")
198
+ path = v.get("path", "")
199
+
200
+ # GitHub annotation format
201
+ if severity == "error":
202
+ level = "error"
203
+ elif severity == "warning":
204
+ level = "warning"
205
+ else:
206
+ level = "notice"
207
+
208
+ # Extract file and line if possible
209
+ file = "openapi.yaml" # Default, would need to map from path
210
+
211
+ # GitHub annotation syntax
212
+ annotation = f"::{level} file={file},title=API Governance::{message}"
213
+ annotations.append(annotation)
214
+
215
+ # Also output summary
216
+ decision = result.get("decision", "unknown")
217
+ summary = result.get("summary", {})
218
+
219
+ if decision == "fail":
220
+ annotations.append(f"::error::Delimit found {summary.get('violations', 0)} policy violations")
221
+ elif decision == "warn":
222
+ annotations.append(f"::warning::Delimit found {summary.get('violations', 0)} warnings")
223
+
224
+ return "\n".join(annotations)
225
+
226
+
227
+ class PRCommentGenerator:
228
+ """Generate PR comments for GitHub."""
229
+
230
+ @staticmethod
231
+ def generate_comment(result: Dict[str, Any], pr_number: Optional[int] = None) -> str:
232
+ """Generate a complete PR comment."""
233
+ formatter = CIFormatter(OutputFormat.MARKDOWN)
234
+ content = formatter.format_result(result)
235
+
236
+ # Add PR-specific header if PR number provided
237
+ if pr_number:
238
+ header = f"### Delimit Report for PR #{pr_number}\n\n"
239
+ content = header + content
240
+
241
+ return content
242
+
243
+ @staticmethod
244
+ def generate_inline_comment(violation: Dict[str, Any]) -> str:
245
+ """Generate inline comment for specific line."""
246
+ severity = violation.get("severity", "warning")
247
+ message = violation.get("message", "Unknown issue")
248
+
249
+ icon = "🔴" if severity == "error" else "⚠️"
250
+
251
+ return f"{icon} **Delimit**: {message}"
252
+
253
+
254
+ def format_for_ci(result: Dict[str, Any], ci_environment: str = "github") -> str:
255
+ """
256
+ Main entry point for CI formatting.
257
+
258
+ Args:
259
+ result: The Delimit check result
260
+ ci_environment: The CI platform (github, gitlab, jenkins, etc.)
261
+
262
+ Returns:
263
+ Formatted output string
264
+ """
265
+ if ci_environment == "github":
266
+ # Use GitHub annotations for inline warnings
267
+ formatter = CIFormatter(OutputFormat.GITHUB_ANNOTATION)
268
+ annotations = formatter.format_result(result)
269
+
270
+ # Also output readable summary
271
+ formatter = CIFormatter(OutputFormat.TEXT)
272
+ summary = formatter.format_result(result)
273
+
274
+ return annotations + "\n\n" + summary
275
+
276
+ elif ci_environment == "pr_comment":
277
+ # Generate markdown for PR comment
278
+ return PRCommentGenerator.generate_comment(result)
279
+
280
+ else:
281
+ # Default text output
282
+ formatter = CIFormatter(OutputFormat.TEXT)
283
+ return formatter.format_result(result)