delimit-cli 3.4.0 → 3.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/delimit-setup.js +23 -0
- package/gateway/ai/backends/tools_data.py +830 -0
- package/gateway/ai/backends/tools_design.py +921 -0
- package/gateway/ai/backends/tools_infra.py +866 -0
- package/gateway/ai/backends/tools_real.py +766 -0
- package/gateway/ai/backends/ui_bridge.py +26 -49
- package/gateway/ai/deliberation.py +387 -0
- package/gateway/ai/ledger_manager.py +207 -0
- package/gateway/ai/server.py +630 -216
- package/package.json +1 -1
|
@@ -0,0 +1,866 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Real implementations for infrastructure tools (replacing suite stubs).
|
|
3
|
+
|
|
4
|
+
Tools:
|
|
5
|
+
- security_audit: dep audit + anti-pattern scan + secret detection
|
|
6
|
+
- obs_status: system health (disk, memory, services, uptime)
|
|
7
|
+
- obs_metrics: live system metrics from /proc
|
|
8
|
+
- obs_logs: search system and application logs
|
|
9
|
+
- release_plan: git-based release planning
|
|
10
|
+
- release_status: file-based deploy tracker
|
|
11
|
+
|
|
12
|
+
All tools work WITHOUT external integrations by default.
|
|
13
|
+
Optional upgrades noted in each function's docstring.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import os
|
|
19
|
+
import re
|
|
20
|
+
import shutil
|
|
21
|
+
import subprocess
|
|
22
|
+
import time
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from typing import Any, Dict, List, Optional
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger("delimit.ai.tools_infra")
|
|
28
|
+
|
|
29
|
+
# ─── Helpers ──────────────────────────────────────────────────────────────
|
|
30
|
+
|
|
31
|
+
DEPLOYS_DIR = Path(os.environ.get("DELIMIT_DEPLOYS_DIR", os.path.expanduser("~/.delimit/deploys")))
|
|
32
|
+
|
|
33
|
+
# Secret patterns: name -> regex
|
|
34
|
+
SECRET_PATTERNS = {
|
|
35
|
+
"aws_access_key": r"(?:AKIA[0-9A-Z]{16})",
|
|
36
|
+
"aws_secret_key": r"(?:aws_secret_access_key|AWS_SECRET_ACCESS_KEY)\s*[=:]\s*['\"]?[A-Za-z0-9/+=]{40}",
|
|
37
|
+
"generic_api_key": r"(?:api[_-]?key|apikey)\s*[=:]\s*['\"]?[A-Za-z0-9_\-]{20,}",
|
|
38
|
+
"generic_secret": r"(?:secret|password|passwd|token)\s*[=:]\s*['\"]?[^\s'\"]{8,}",
|
|
39
|
+
"private_key_header": r"-----BEGIN (?:RSA |EC |DSA )?PRIVATE KEY-----",
|
|
40
|
+
"github_token": r"gh[pousr]_[A-Za-z0-9_]{36,}",
|
|
41
|
+
"slack_token": r"xox[baprs]-[0-9A-Za-z\-]{10,}",
|
|
42
|
+
"jwt_token": r"eyJ[A-Za-z0-9_-]{10,}\.eyJ[A-Za-z0-9_-]{10,}\.[A-Za-z0-9_-]{10,}",
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
# Dangerous code patterns: name -> (regex, description, severity)
|
|
46
|
+
ANTI_PATTERNS = {
|
|
47
|
+
"eval_usage": (r"\beval\s*\(", "Use of eval() — potential code injection", "high"),
|
|
48
|
+
"exec_usage": (r"\bexec\s*\(", "Use of exec() — potential code injection", "high"),
|
|
49
|
+
"sql_concat": (r"""(?:execute|cursor\.execute|query)\s*\(\s*(?:f['\"]|['\"].*%s|.*\+\s*['\"])""", "SQL string concatenation — potential SQL injection", "critical"),
|
|
50
|
+
"dangerous_innerHTML": (r"dangerouslySetInnerHTML", "dangerouslySetInnerHTML — potential XSS", "high"),
|
|
51
|
+
"subprocess_shell": (r"subprocess\.\w+\([^)]*shell\s*=\s*True", "subprocess with shell=True — potential command injection", "medium"),
|
|
52
|
+
"pickle_load": (r"pickle\.loads?\(", "pickle.load — potential arbitrary code execution", "high"),
|
|
53
|
+
"yaml_unsafe_load": (r"yaml\.load\([^)]*(?!Loader)", "yaml.load without safe Loader", "medium"),
|
|
54
|
+
"hardcoded_ip": (r"\b(?:192\.168\.\d+\.\d+|10\.\d+\.\d+\.\d+|172\.(?:1[6-9]|2\d|3[01])\.\d+\.\d+)\b", "Hardcoded internal IP address", "low"),
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# File extensions to scan
|
|
58
|
+
SCAN_EXTENSIONS = {".py", ".js", ".ts", ".jsx", ".tsx", ".go", ".rb", ".java", ".rs", ".yaml", ".yml", ".json", ".env", ".sh", ".bash"}
|
|
59
|
+
|
|
60
|
+
# Skip directories
|
|
61
|
+
SKIP_DIRS = {"node_modules", ".git", "__pycache__", ".venv", "venv", ".tox", "dist", "build", ".next", ".nuxt", "vendor"}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _run_cmd(cmd: List[str], timeout: int = 30, cwd: Optional[str] = None) -> Dict[str, Any]:
|
|
65
|
+
"""Run a command and return stdout, stderr, returncode."""
|
|
66
|
+
try:
|
|
67
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout, cwd=cwd)
|
|
68
|
+
return {"stdout": result.stdout, "stderr": result.stderr, "returncode": result.returncode}
|
|
69
|
+
except FileNotFoundError:
|
|
70
|
+
return {"stdout": "", "stderr": f"Command not found: {cmd[0]}", "returncode": -1}
|
|
71
|
+
except subprocess.TimeoutExpired:
|
|
72
|
+
return {"stdout": "", "stderr": f"Command timed out after {timeout}s", "returncode": -2}
|
|
73
|
+
except Exception as e:
|
|
74
|
+
return {"stdout": "", "stderr": str(e), "returncode": -3}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _scan_files(target: str) -> List[Path]:
|
|
78
|
+
"""Collect scannable source files under target."""
|
|
79
|
+
root = Path(target).resolve()
|
|
80
|
+
files = []
|
|
81
|
+
if root.is_file():
|
|
82
|
+
return [root]
|
|
83
|
+
if not root.is_dir():
|
|
84
|
+
return []
|
|
85
|
+
for p in root.rglob("*"):
|
|
86
|
+
if any(skip in p.parts for skip in SKIP_DIRS):
|
|
87
|
+
continue
|
|
88
|
+
if p.is_file() and p.suffix in SCAN_EXTENSIONS:
|
|
89
|
+
files.append(p)
|
|
90
|
+
# Cap to avoid scanning massive repos
|
|
91
|
+
if len(files) >= 5000:
|
|
92
|
+
break
|
|
93
|
+
return files
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# ─── 5. security_audit ──────────────────────────────────────────────────
|
|
97
|
+
|
|
98
|
+
def security_audit(target: str = ".") -> Dict[str, Any]:
|
|
99
|
+
"""Audit security: dependency vulnerabilities + anti-patterns + secret detection.
|
|
100
|
+
|
|
101
|
+
Default: runs pip-audit/npm-audit, regex scans for secrets and dangerous patterns.
|
|
102
|
+
Optional upgrade: set SNYK_TOKEN or TRIVY_PATH for enhanced scanning.
|
|
103
|
+
"""
|
|
104
|
+
target_path = Path(target).resolve()
|
|
105
|
+
if not target_path.exists():
|
|
106
|
+
return {"error": "target_not_found", "message": f"Path does not exist: {target}"}
|
|
107
|
+
|
|
108
|
+
vulnerabilities = []
|
|
109
|
+
anti_patterns_found = []
|
|
110
|
+
secrets_found = []
|
|
111
|
+
tools_used = []
|
|
112
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
|
|
113
|
+
|
|
114
|
+
# --- 1. Dependency audit ---
|
|
115
|
+
cwd = str(target_path) if target_path.is_dir() else str(target_path.parent)
|
|
116
|
+
|
|
117
|
+
# Python: pip-audit
|
|
118
|
+
if (target_path / "requirements.txt").exists() or (target_path / "pyproject.toml").exists() or (target_path / "setup.py").exists():
|
|
119
|
+
pip_audit = shutil.which("pip-audit")
|
|
120
|
+
if pip_audit:
|
|
121
|
+
r = _run_cmd([pip_audit, "--format", "json", "--desc"], timeout=60, cwd=cwd)
|
|
122
|
+
tools_used.append("pip-audit")
|
|
123
|
+
if r["returncode"] == 0 or r["stdout"].strip():
|
|
124
|
+
try:
|
|
125
|
+
entries = json.loads(r["stdout"]) if r["stdout"].strip() else []
|
|
126
|
+
# pip-audit returns {"dependencies": [...]}
|
|
127
|
+
deps = entries if isinstance(entries, list) else entries.get("dependencies", [])
|
|
128
|
+
for dep in deps:
|
|
129
|
+
for vuln in dep.get("vulns", []):
|
|
130
|
+
sev = vuln.get("fix_versions", ["unknown"])
|
|
131
|
+
vulnerabilities.append({
|
|
132
|
+
"source": "pip-audit",
|
|
133
|
+
"package": dep.get("name", "unknown"),
|
|
134
|
+
"installed": dep.get("version", "unknown"),
|
|
135
|
+
"id": vuln.get("id", "unknown"),
|
|
136
|
+
"description": vuln.get("description", "")[:200],
|
|
137
|
+
"fix_versions": vuln.get("fix_versions", []),
|
|
138
|
+
"severity": "high",
|
|
139
|
+
})
|
|
140
|
+
severity_counts["high"] += 1
|
|
141
|
+
except (json.JSONDecodeError, KeyError):
|
|
142
|
+
pass
|
|
143
|
+
else:
|
|
144
|
+
tools_used.append("pip-audit (not installed)")
|
|
145
|
+
|
|
146
|
+
# Node: npm audit
|
|
147
|
+
if (target_path / "package.json").exists():
|
|
148
|
+
npm = shutil.which("npm")
|
|
149
|
+
if npm:
|
|
150
|
+
r = _run_cmd([npm, "audit", "--json"], timeout=60, cwd=cwd)
|
|
151
|
+
tools_used.append("npm-audit")
|
|
152
|
+
try:
|
|
153
|
+
data = json.loads(r["stdout"]) if r["stdout"].strip() else {}
|
|
154
|
+
advisories = data.get("vulnerabilities", data.get("advisories", {}))
|
|
155
|
+
if isinstance(advisories, dict):
|
|
156
|
+
for name, info in advisories.items():
|
|
157
|
+
sev = info.get("severity", "high") if isinstance(info, dict) else "high"
|
|
158
|
+
vulnerabilities.append({
|
|
159
|
+
"source": "npm-audit",
|
|
160
|
+
"package": name,
|
|
161
|
+
"severity": sev,
|
|
162
|
+
"title": info.get("title", "") if isinstance(info, dict) else "",
|
|
163
|
+
"via": str(info.get("via", ""))[:200] if isinstance(info, dict) else "",
|
|
164
|
+
})
|
|
165
|
+
sev_key = sev if sev in severity_counts else "high"
|
|
166
|
+
severity_counts[sev_key] += 1
|
|
167
|
+
except (json.JSONDecodeError, KeyError):
|
|
168
|
+
pass
|
|
169
|
+
else:
|
|
170
|
+
tools_used.append("npm (not installed)")
|
|
171
|
+
|
|
172
|
+
# Optional: Snyk
|
|
173
|
+
snyk_token = os.environ.get("SNYK_TOKEN")
|
|
174
|
+
if snyk_token and shutil.which("snyk"):
|
|
175
|
+
r = _run_cmd(["snyk", "test", "--json"], timeout=120, cwd=cwd)
|
|
176
|
+
tools_used.append("snyk")
|
|
177
|
+
try:
|
|
178
|
+
data = json.loads(r["stdout"]) if r["stdout"].strip() else {}
|
|
179
|
+
for vuln in data.get("vulnerabilities", []):
|
|
180
|
+
vulnerabilities.append({
|
|
181
|
+
"source": "snyk",
|
|
182
|
+
"package": vuln.get("packageName", "unknown"),
|
|
183
|
+
"severity": vuln.get("severity", "high"),
|
|
184
|
+
"id": vuln.get("id", ""),
|
|
185
|
+
"title": vuln.get("title", ""),
|
|
186
|
+
})
|
|
187
|
+
sev = vuln.get("severity", "high")
|
|
188
|
+
sev_key = sev if sev in severity_counts else "high"
|
|
189
|
+
severity_counts[sev_key] += 1
|
|
190
|
+
except (json.JSONDecodeError, KeyError):
|
|
191
|
+
pass
|
|
192
|
+
|
|
193
|
+
# Optional: Trivy
|
|
194
|
+
trivy_path = os.environ.get("TRIVY_PATH") or shutil.which("trivy")
|
|
195
|
+
if trivy_path and os.path.isfile(trivy_path):
|
|
196
|
+
r = _run_cmd([trivy_path, "fs", "--format", "json", str(target_path)], timeout=120)
|
|
197
|
+
tools_used.append("trivy")
|
|
198
|
+
try:
|
|
199
|
+
data = json.loads(r["stdout"]) if r["stdout"].strip() else {}
|
|
200
|
+
for result_entry in data.get("Results", []):
|
|
201
|
+
for vuln in result_entry.get("Vulnerabilities", []):
|
|
202
|
+
vulnerabilities.append({
|
|
203
|
+
"source": "trivy",
|
|
204
|
+
"package": vuln.get("PkgName", "unknown"),
|
|
205
|
+
"severity": vuln.get("Severity", "UNKNOWN").lower(),
|
|
206
|
+
"id": vuln.get("VulnerabilityID", ""),
|
|
207
|
+
"title": vuln.get("Title", ""),
|
|
208
|
+
})
|
|
209
|
+
sev = vuln.get("Severity", "high").lower()
|
|
210
|
+
sev_key = sev if sev in severity_counts else "high"
|
|
211
|
+
severity_counts[sev_key] += 1
|
|
212
|
+
except (json.JSONDecodeError, KeyError):
|
|
213
|
+
pass
|
|
214
|
+
|
|
215
|
+
# --- 2. Anti-pattern scan ---
|
|
216
|
+
files = _scan_files(target)
|
|
217
|
+
tools_used.append(f"pattern-scanner ({len(files)} files)")
|
|
218
|
+
|
|
219
|
+
for fpath in files:
|
|
220
|
+
try:
|
|
221
|
+
content = fpath.read_text(errors="ignore")
|
|
222
|
+
except (OSError, PermissionError):
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
rel = str(fpath.relative_to(Path(target).resolve())) if Path(target).resolve() in fpath.parents or fpath == Path(target).resolve() else str(fpath)
|
|
226
|
+
|
|
227
|
+
# Secret detection
|
|
228
|
+
for secret_name, pattern in SECRET_PATTERNS.items():
|
|
229
|
+
for match in re.finditer(pattern, content):
|
|
230
|
+
line_num = content[:match.start()].count("\n") + 1
|
|
231
|
+
secrets_found.append({
|
|
232
|
+
"file": rel,
|
|
233
|
+
"line": line_num,
|
|
234
|
+
"type": secret_name,
|
|
235
|
+
"severity": "critical",
|
|
236
|
+
"snippet": content[max(0, match.start() - 10):match.end() + 10].strip()[:80],
|
|
237
|
+
})
|
|
238
|
+
severity_counts["critical"] += 1
|
|
239
|
+
|
|
240
|
+
# Anti-pattern detection
|
|
241
|
+
for ap_name, (pattern, desc, sev) in ANTI_PATTERNS.items():
|
|
242
|
+
for match in re.finditer(pattern, content):
|
|
243
|
+
line_num = content[:match.start()].count("\n") + 1
|
|
244
|
+
anti_patterns_found.append({
|
|
245
|
+
"file": rel,
|
|
246
|
+
"line": line_num,
|
|
247
|
+
"pattern": ap_name,
|
|
248
|
+
"description": desc,
|
|
249
|
+
"severity": sev,
|
|
250
|
+
})
|
|
251
|
+
severity_counts[sev] += 1
|
|
252
|
+
|
|
253
|
+
# --- 3. Check for .env in git ---
|
|
254
|
+
env_in_git = False
|
|
255
|
+
if (Path(target).resolve() / ".git").is_dir():
|
|
256
|
+
r = _run_cmd(["git", "ls-files", "--cached", ".env"], cwd=cwd)
|
|
257
|
+
if r["stdout"].strip():
|
|
258
|
+
env_in_git = True
|
|
259
|
+
anti_patterns_found.append({
|
|
260
|
+
"file": ".env",
|
|
261
|
+
"line": 0,
|
|
262
|
+
"pattern": "env_in_git",
|
|
263
|
+
"description": ".env file is tracked in git — secrets may be exposed in history",
|
|
264
|
+
"severity": "critical",
|
|
265
|
+
})
|
|
266
|
+
severity_counts["critical"] += 1
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
"target": str(target_path),
|
|
270
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
271
|
+
"vulnerabilities": vulnerabilities,
|
|
272
|
+
"anti_patterns": anti_patterns_found,
|
|
273
|
+
"secrets_detected": len(secrets_found),
|
|
274
|
+
"secrets": secrets_found[:20], # Cap output to avoid huge responses
|
|
275
|
+
"env_in_git": env_in_git,
|
|
276
|
+
"severity_summary": severity_counts,
|
|
277
|
+
"tools_used": tools_used,
|
|
278
|
+
"files_scanned": len(files),
|
|
279
|
+
"total_findings": len(vulnerabilities) + len(anti_patterns_found) + len(secrets_found),
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
# ─── 6. obs_status ──────────────────────────────────────────────────────
|
|
284
|
+
|
|
285
|
+
# Common service ports to probe
|
|
286
|
+
KNOWN_PORTS = {
|
|
287
|
+
3000: "Node/Next.js",
|
|
288
|
+
3001: "Dev server",
|
|
289
|
+
4000: "GraphQL",
|
|
290
|
+
5000: "Flask/FastAPI",
|
|
291
|
+
5173: "Vite",
|
|
292
|
+
5432: "PostgreSQL",
|
|
293
|
+
6379: "Redis",
|
|
294
|
+
8000: "Django/FastAPI",
|
|
295
|
+
8080: "HTTP alt",
|
|
296
|
+
8443: "HTTPS alt",
|
|
297
|
+
9090: "Prometheus",
|
|
298
|
+
9200: "Elasticsearch",
|
|
299
|
+
27017: "MongoDB",
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def obs_status() -> Dict[str, Any]:
|
|
304
|
+
"""System health: disk, memory, services, uptime. Uses system commands only."""
|
|
305
|
+
result = {
|
|
306
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
307
|
+
"disk_usage": {},
|
|
308
|
+
"memory_usage": {},
|
|
309
|
+
"services_detected": [],
|
|
310
|
+
"uptime": "",
|
|
311
|
+
"process_count": 0,
|
|
312
|
+
"load_average": [],
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
# Disk space
|
|
316
|
+
r = _run_cmd(["df", "-h", "--output=target,size,used,avail,pcent", "-x", "tmpfs", "-x", "devtmpfs", "-x", "overlay"])
|
|
317
|
+
if r["returncode"] == 0:
|
|
318
|
+
lines = r["stdout"].strip().split("\n")
|
|
319
|
+
disks = []
|
|
320
|
+
for line in lines[1:]:
|
|
321
|
+
parts = line.split()
|
|
322
|
+
if len(parts) >= 5:
|
|
323
|
+
disks.append({
|
|
324
|
+
"mount": parts[0],
|
|
325
|
+
"size": parts[1],
|
|
326
|
+
"used": parts[2],
|
|
327
|
+
"available": parts[3],
|
|
328
|
+
"percent": parts[4],
|
|
329
|
+
})
|
|
330
|
+
result["disk_usage"] = disks
|
|
331
|
+
|
|
332
|
+
# Memory
|
|
333
|
+
r = _run_cmd(["free", "-m"])
|
|
334
|
+
if r["returncode"] == 0:
|
|
335
|
+
lines = r["stdout"].strip().split("\n")
|
|
336
|
+
for line in lines:
|
|
337
|
+
if line.startswith("Mem:"):
|
|
338
|
+
parts = line.split()
|
|
339
|
+
if len(parts) >= 7:
|
|
340
|
+
total = int(parts[1])
|
|
341
|
+
used = int(parts[2])
|
|
342
|
+
result["memory_usage"] = {
|
|
343
|
+
"total_mb": total,
|
|
344
|
+
"used_mb": used,
|
|
345
|
+
"free_mb": int(parts[3]),
|
|
346
|
+
"available_mb": int(parts[6]) if len(parts) > 6 else total - used,
|
|
347
|
+
"percent_used": round(used / total * 100, 1) if total > 0 else 0,
|
|
348
|
+
}
|
|
349
|
+
elif line.startswith("Swap:"):
|
|
350
|
+
parts = line.split()
|
|
351
|
+
if len(parts) >= 3:
|
|
352
|
+
result["swap_usage"] = {
|
|
353
|
+
"total_mb": int(parts[1]),
|
|
354
|
+
"used_mb": int(parts[2]),
|
|
355
|
+
"free_mb": int(parts[3]) if len(parts) > 3 else 0,
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
# Uptime
|
|
359
|
+
r = _run_cmd(["uptime", "-p"])
|
|
360
|
+
if r["returncode"] == 0:
|
|
361
|
+
result["uptime"] = r["stdout"].strip()
|
|
362
|
+
else:
|
|
363
|
+
# Fallback: read from /proc/uptime
|
|
364
|
+
try:
|
|
365
|
+
raw = Path("/proc/uptime").read_text().split()[0]
|
|
366
|
+
secs = float(raw)
|
|
367
|
+
days = int(secs // 86400)
|
|
368
|
+
hours = int((secs % 86400) // 3600)
|
|
369
|
+
mins = int((secs % 3600) // 60)
|
|
370
|
+
result["uptime"] = f"up {days} days, {hours} hours, {mins} minutes"
|
|
371
|
+
except Exception:
|
|
372
|
+
result["uptime"] = "unknown"
|
|
373
|
+
|
|
374
|
+
# Process count
|
|
375
|
+
r = _run_cmd(["ps", "aux", "--no-headers"])
|
|
376
|
+
if r["returncode"] == 0:
|
|
377
|
+
result["process_count"] = len(r["stdout"].strip().split("\n"))
|
|
378
|
+
|
|
379
|
+
# Load average
|
|
380
|
+
try:
|
|
381
|
+
loadavg = Path("/proc/loadavg").read_text().split()[:3]
|
|
382
|
+
result["load_average"] = [float(x) for x in loadavg]
|
|
383
|
+
except Exception:
|
|
384
|
+
pass
|
|
385
|
+
|
|
386
|
+
# Service detection via port probing
|
|
387
|
+
services = []
|
|
388
|
+
curl = shutil.which("curl")
|
|
389
|
+
for port, name in KNOWN_PORTS.items():
|
|
390
|
+
if curl:
|
|
391
|
+
r = _run_cmd([curl, "-s", "-o", "/dev/null", "-w", "%{http_code}", "--connect-timeout", "1", f"http://localhost:{port}/"])
|
|
392
|
+
if r["returncode"] == 0 and r["stdout"].strip() not in ("000", ""):
|
|
393
|
+
services.append({"port": port, "name": name, "status": "up", "http_code": r["stdout"].strip()})
|
|
394
|
+
else:
|
|
395
|
+
# Fallback: check if port is listening via /proc/net/tcp
|
|
396
|
+
try:
|
|
397
|
+
hex_port = f"{port:04X}"
|
|
398
|
+
tcp_data = Path("/proc/net/tcp").read_text()
|
|
399
|
+
if hex_port in tcp_data:
|
|
400
|
+
services.append({"port": port, "name": name, "status": "listening"})
|
|
401
|
+
except Exception:
|
|
402
|
+
pass
|
|
403
|
+
result["services_detected"] = services
|
|
404
|
+
|
|
405
|
+
return result
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
# ─── 7. obs_metrics ─────────────────────────────────────────────────────
|
|
409
|
+
|
|
410
|
+
def obs_metrics(query: str = "system", time_range: str = "1h", source: Optional[str] = None) -> Dict[str, Any]:
|
|
411
|
+
"""Live system metrics from /proc. Query: cpu|memory|disk|io|all.
|
|
412
|
+
|
|
413
|
+
Optional upgrade: set PROMETHEUS_URL or GRAFANA_URL for remote metrics.
|
|
414
|
+
"""
|
|
415
|
+
result = {
|
|
416
|
+
"query": query,
|
|
417
|
+
"time_range": time_range,
|
|
418
|
+
"source": source or "local",
|
|
419
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
420
|
+
"metrics": {},
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
# Check for Prometheus/Grafana integration
|
|
424
|
+
prometheus_url = os.environ.get("PROMETHEUS_URL")
|
|
425
|
+
if prometheus_url and source in ("prometheus", None):
|
|
426
|
+
try:
|
|
427
|
+
import urllib.request
|
|
428
|
+
url = f"{prometheus_url}/api/v1/query?query={query}"
|
|
429
|
+
with urllib.request.urlopen(url, timeout=5) as resp:
|
|
430
|
+
data = json.loads(resp.read())
|
|
431
|
+
result["metrics"]["prometheus"] = data.get("data", {}).get("result", [])
|
|
432
|
+
result["source"] = "prometheus"
|
|
433
|
+
return result
|
|
434
|
+
except Exception as e:
|
|
435
|
+
result["metrics"]["prometheus_error"] = str(e)
|
|
436
|
+
|
|
437
|
+
q = query.lower()
|
|
438
|
+
|
|
439
|
+
# CPU metrics
|
|
440
|
+
if q in ("cpu", "system", "all"):
|
|
441
|
+
try:
|
|
442
|
+
stat1 = Path("/proc/stat").read_text().split("\n")[0].split()[1:]
|
|
443
|
+
time.sleep(0.2)
|
|
444
|
+
stat2 = Path("/proc/stat").read_text().split("\n")[0].split()[1:]
|
|
445
|
+
vals1 = [int(x) for x in stat1[:7]]
|
|
446
|
+
vals2 = [int(x) for x in stat2[:7]]
|
|
447
|
+
delta = [b - a for a, b in zip(vals1, vals2)]
|
|
448
|
+
total = sum(delta)
|
|
449
|
+
idle = delta[3]
|
|
450
|
+
cpu_pct = round((total - idle) / total * 100, 1) if total > 0 else 0.0
|
|
451
|
+
result["metrics"]["cpu_percent"] = cpu_pct
|
|
452
|
+
result["metrics"]["cpu_cores"] = os.cpu_count()
|
|
453
|
+
except Exception as e:
|
|
454
|
+
result["metrics"]["cpu_error"] = str(e)
|
|
455
|
+
|
|
456
|
+
# Memory metrics
|
|
457
|
+
if q in ("memory", "mem", "system", "all"):
|
|
458
|
+
try:
|
|
459
|
+
meminfo = {}
|
|
460
|
+
for line in Path("/proc/meminfo").read_text().split("\n"):
|
|
461
|
+
if ":" in line:
|
|
462
|
+
key, val = line.split(":", 1)
|
|
463
|
+
meminfo[key.strip()] = int(val.strip().split()[0]) # kB
|
|
464
|
+
total = meminfo.get("MemTotal", 0)
|
|
465
|
+
available = meminfo.get("MemAvailable", 0)
|
|
466
|
+
used = total - available
|
|
467
|
+
result["metrics"]["memory_total_mb"] = round(total / 1024, 1)
|
|
468
|
+
result["metrics"]["memory_used_mb"] = round(used / 1024, 1)
|
|
469
|
+
result["metrics"]["memory_available_mb"] = round(available / 1024, 1)
|
|
470
|
+
result["metrics"]["memory_percent"] = round(used / total * 100, 1) if total > 0 else 0
|
|
471
|
+
except Exception as e:
|
|
472
|
+
result["metrics"]["memory_error"] = str(e)
|
|
473
|
+
|
|
474
|
+
# Disk I/O
|
|
475
|
+
if q in ("disk", "io", "system", "all"):
|
|
476
|
+
try:
|
|
477
|
+
diskstats = Path("/proc/diskstats").read_text().split("\n")
|
|
478
|
+
disks = []
|
|
479
|
+
for line in diskstats:
|
|
480
|
+
parts = line.split()
|
|
481
|
+
if len(parts) >= 14:
|
|
482
|
+
dev = parts[2]
|
|
483
|
+
# Filter to real block devices (sda, nvme, vda, etc.)
|
|
484
|
+
if re.match(r'^(sd[a-z]+|nvme\d+n\d+|vd[a-z]+|xvd[a-z]+)$', dev):
|
|
485
|
+
disks.append({
|
|
486
|
+
"device": dev,
|
|
487
|
+
"reads_completed": int(parts[3]),
|
|
488
|
+
"writes_completed": int(parts[7]),
|
|
489
|
+
"read_sectors": int(parts[5]),
|
|
490
|
+
"write_sectors": int(parts[9]),
|
|
491
|
+
"io_in_progress": int(parts[11]),
|
|
492
|
+
})
|
|
493
|
+
result["metrics"]["disk_io"] = disks
|
|
494
|
+
except Exception as e:
|
|
495
|
+
result["metrics"]["disk_io_error"] = str(e)
|
|
496
|
+
|
|
497
|
+
# Disk space
|
|
498
|
+
r = _run_cmd(["df", "-B1", "--output=target,size,used,avail", "-x", "tmpfs", "-x", "devtmpfs", "-x", "overlay"])
|
|
499
|
+
if r["returncode"] == 0:
|
|
500
|
+
lines = r["stdout"].strip().split("\n")[1:]
|
|
501
|
+
disk_space = []
|
|
502
|
+
for line in lines:
|
|
503
|
+
parts = line.split()
|
|
504
|
+
if len(parts) >= 4:
|
|
505
|
+
try:
|
|
506
|
+
total_b = int(parts[1])
|
|
507
|
+
used_b = int(parts[2])
|
|
508
|
+
disk_space.append({
|
|
509
|
+
"mount": parts[0],
|
|
510
|
+
"total_gb": round(total_b / (1024**3), 1),
|
|
511
|
+
"used_gb": round(used_b / (1024**3), 1),
|
|
512
|
+
"percent": round(used_b / total_b * 100, 1) if total_b > 0 else 0,
|
|
513
|
+
})
|
|
514
|
+
except ValueError:
|
|
515
|
+
pass
|
|
516
|
+
result["metrics"]["disk_space"] = disk_space
|
|
517
|
+
|
|
518
|
+
# Network (brief)
|
|
519
|
+
if q in ("network", "net", "all"):
|
|
520
|
+
try:
|
|
521
|
+
net_lines = Path("/proc/net/dev").read_text().split("\n")[2:]
|
|
522
|
+
interfaces = []
|
|
523
|
+
for line in net_lines:
|
|
524
|
+
if ":" in line:
|
|
525
|
+
parts = line.split(":")
|
|
526
|
+
iface = parts[0].strip()
|
|
527
|
+
if iface in ("lo",):
|
|
528
|
+
continue
|
|
529
|
+
vals = parts[1].split()
|
|
530
|
+
if len(vals) >= 10:
|
|
531
|
+
interfaces.append({
|
|
532
|
+
"interface": iface,
|
|
533
|
+
"rx_bytes": int(vals[0]),
|
|
534
|
+
"tx_bytes": int(vals[8]),
|
|
535
|
+
"rx_packets": int(vals[1]),
|
|
536
|
+
"tx_packets": int(vals[9]),
|
|
537
|
+
})
|
|
538
|
+
result["metrics"]["network"] = interfaces
|
|
539
|
+
except Exception as e:
|
|
540
|
+
result["metrics"]["network_error"] = str(e)
|
|
541
|
+
|
|
542
|
+
return result
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
# ─── 8. obs_logs ─────────────────────────────────────────────────────────
|
|
546
|
+
|
|
547
|
+
# Default log paths to search
|
|
548
|
+
DEFAULT_LOG_PATHS = [
|
|
549
|
+
"/var/log/syslog",
|
|
550
|
+
"/var/log/messages",
|
|
551
|
+
"/var/log/auth.log",
|
|
552
|
+
"/var/log/kern.log",
|
|
553
|
+
"/var/log/nginx/access.log",
|
|
554
|
+
"/var/log/nginx/error.log",
|
|
555
|
+
"/var/log/caddy/access.log",
|
|
556
|
+
]
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
def obs_logs(query: str, time_range: str = "1h", source: Optional[str] = None) -> Dict[str, Any]:
|
|
560
|
+
"""Search system and application logs.
|
|
561
|
+
|
|
562
|
+
Optional upgrade: set ELASTICSEARCH_URL or LOKI_URL for centralized log search.
|
|
563
|
+
"""
|
|
564
|
+
result = {
|
|
565
|
+
"query": query,
|
|
566
|
+
"time_range": time_range,
|
|
567
|
+
"source": source or "local",
|
|
568
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
569
|
+
"matches": [],
|
|
570
|
+
"total_lines_searched": 0,
|
|
571
|
+
"sources_checked": [],
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
# Check for Elasticsearch integration
|
|
575
|
+
es_url = os.environ.get("ELASTICSEARCH_URL")
|
|
576
|
+
if es_url and source in ("elasticsearch", "es", None):
|
|
577
|
+
try:
|
|
578
|
+
import urllib.request
|
|
579
|
+
url = f"{es_url}/_search"
|
|
580
|
+
payload = json.dumps({"query": {"match": {"message": query}}, "size": 50}).encode()
|
|
581
|
+
req = urllib.request.Request(url, data=payload, headers={"Content-Type": "application/json"})
|
|
582
|
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
|
583
|
+
data = json.loads(resp.read())
|
|
584
|
+
hits = data.get("hits", {}).get("hits", [])
|
|
585
|
+
result["matches"] = [{"source": "elasticsearch", "message": h["_source"].get("message", "")} for h in hits[:50]]
|
|
586
|
+
result["source"] = "elasticsearch"
|
|
587
|
+
return result
|
|
588
|
+
except Exception as e:
|
|
589
|
+
result["sources_checked"].append({"source": "elasticsearch", "error": str(e)})
|
|
590
|
+
|
|
591
|
+
# Parse time_range to seconds for journalctl
|
|
592
|
+
time_map = {"5m": 300, "15m": 900, "30m": 1800, "1h": 3600, "2h": 7200, "6h": 21600, "12h": 43200, "24h": 86400, "1d": 86400, "7d": 604800}
|
|
593
|
+
since_secs = time_map.get(time_range, 3600)
|
|
594
|
+
since_arg = f"--since=-{since_secs}s"
|
|
595
|
+
|
|
596
|
+
# 1. journalctl (best on systemd systems)
|
|
597
|
+
journalctl = shutil.which("journalctl")
|
|
598
|
+
if journalctl:
|
|
599
|
+
r = _run_cmd([journalctl, "--no-pager", "-g", query, since_arg, "--lines=100"], timeout=15)
|
|
600
|
+
result["sources_checked"].append({"source": "journalctl", "available": True})
|
|
601
|
+
if r["returncode"] in (0, 1): # 1 = no matches
|
|
602
|
+
lines = r["stdout"].strip().split("\n") if r["stdout"].strip() else []
|
|
603
|
+
for line in lines[-50:]: # Last 50 matches
|
|
604
|
+
if line.strip():
|
|
605
|
+
result["matches"].append({"source": "journalctl", "line": line.strip()})
|
|
606
|
+
result["total_lines_searched"] += len(lines)
|
|
607
|
+
else:
|
|
608
|
+
result["sources_checked"].append({"source": "journalctl", "available": False})
|
|
609
|
+
|
|
610
|
+
# 2. Log file search
|
|
611
|
+
log_paths = DEFAULT_LOG_PATHS[:]
|
|
612
|
+
if source and source not in ("local", "journalctl", "elasticsearch", "es", "loki"):
|
|
613
|
+
# Treat source as a custom log path
|
|
614
|
+
log_paths = [source]
|
|
615
|
+
|
|
616
|
+
for log_path in log_paths:
|
|
617
|
+
p = Path(log_path)
|
|
618
|
+
if not p.exists() or not p.is_file():
|
|
619
|
+
continue
|
|
620
|
+
result["sources_checked"].append({"source": log_path, "available": True})
|
|
621
|
+
try:
|
|
622
|
+
grep = shutil.which("grep")
|
|
623
|
+
if grep:
|
|
624
|
+
r = _run_cmd([grep, "-i", "-n", "--text", query, log_path], timeout=10)
|
|
625
|
+
if r["returncode"] == 0 and r["stdout"].strip():
|
|
626
|
+
lines = r["stdout"].strip().split("\n")
|
|
627
|
+
result["total_lines_searched"] += len(lines)
|
|
628
|
+
for line in lines[-30:]: # Last 30 matches per file
|
|
629
|
+
result["matches"].append({"source": log_path, "line": line.strip()[:500]})
|
|
630
|
+
except Exception:
|
|
631
|
+
pass
|
|
632
|
+
|
|
633
|
+
# 3. Application logs (common locations)
|
|
634
|
+
app_log_dirs = [
|
|
635
|
+
Path.home() / ".pm2" / "logs",
|
|
636
|
+
Path("/var/log/app"),
|
|
637
|
+
Path("/var/log/delimit"),
|
|
638
|
+
]
|
|
639
|
+
for log_dir in app_log_dirs:
|
|
640
|
+
if log_dir.is_dir():
|
|
641
|
+
for logfile in sorted(log_dir.glob("*.log"), key=lambda p: p.stat().st_mtime, reverse=True)[:5]:
|
|
642
|
+
result["sources_checked"].append({"source": str(logfile), "available": True})
|
|
643
|
+
grep = shutil.which("grep")
|
|
644
|
+
if grep:
|
|
645
|
+
r = _run_cmd([grep, "-i", "-n", "--text", query, str(logfile)], timeout=10)
|
|
646
|
+
if r["returncode"] == 0 and r["stdout"].strip():
|
|
647
|
+
lines = r["stdout"].strip().split("\n")
|
|
648
|
+
result["total_lines_searched"] += len(lines)
|
|
649
|
+
for line in lines[-20:]:
|
|
650
|
+
result["matches"].append({"source": str(logfile), "line": line.strip()[:500]})
|
|
651
|
+
|
|
652
|
+
# Cap total matches
|
|
653
|
+
result["matches"] = result["matches"][:100]
|
|
654
|
+
result["total_matches"] = len(result["matches"])
|
|
655
|
+
|
|
656
|
+
return result
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
# ─── 9. release_plan ────────────────────────────────────────────────────
|
|
660
|
+
|
|
661
|
+
def release_plan(environment: str = "production", version: str = "", repository: str = ".", services: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
662
|
+
"""Generate a release plan from git history. Uses git only, no external integrations."""
|
|
663
|
+
repo_path = Path(repository).resolve()
|
|
664
|
+
if not (repo_path / ".git").is_dir():
|
|
665
|
+
return {"error": "not_a_git_repo", "message": f"No .git directory found at {repo_path}"}
|
|
666
|
+
|
|
667
|
+
cwd = str(repo_path)
|
|
668
|
+
result = {
|
|
669
|
+
"environment": environment,
|
|
670
|
+
"repository": str(repo_path),
|
|
671
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
672
|
+
"services": services or [],
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
# Get last tag
|
|
676
|
+
r = _run_cmd(["git", "describe", "--tags", "--abbrev=0"], cwd=cwd)
|
|
677
|
+
last_tag = r["stdout"].strip() if r["returncode"] == 0 else None
|
|
678
|
+
result["last_tag"] = last_tag
|
|
679
|
+
|
|
680
|
+
# Commits since last tag
|
|
681
|
+
if last_tag:
|
|
682
|
+
r = _run_cmd(["git", "log", f"{last_tag}..HEAD", "--oneline", "--no-decorate"], cwd=cwd)
|
|
683
|
+
else:
|
|
684
|
+
r = _run_cmd(["git", "log", "--oneline", "--no-decorate", "-50"], cwd=cwd)
|
|
685
|
+
commits = [line.strip() for line in r["stdout"].strip().split("\n") if line.strip()] if r["stdout"].strip() else []
|
|
686
|
+
result["commits_since_last_tag"] = len(commits)
|
|
687
|
+
result["commits"] = commits[:30] # Cap
|
|
688
|
+
|
|
689
|
+
# Changed files since last tag
|
|
690
|
+
if last_tag:
|
|
691
|
+
r = _run_cmd(["git", "diff", "--name-only", last_tag, "HEAD"], cwd=cwd)
|
|
692
|
+
else:
|
|
693
|
+
r = _run_cmd(["git", "diff", "--name-only", "HEAD~10", "HEAD"], cwd=cwd)
|
|
694
|
+
changed = [f for f in r["stdout"].strip().split("\n") if f.strip()] if r["stdout"].strip() else []
|
|
695
|
+
result["changed_files"] = changed
|
|
696
|
+
result["changed_files_count"] = len(changed)
|
|
697
|
+
|
|
698
|
+
# Authors
|
|
699
|
+
if last_tag:
|
|
700
|
+
r = _run_cmd(["git", "log", f"{last_tag}..HEAD", "--format=%an"], cwd=cwd)
|
|
701
|
+
else:
|
|
702
|
+
r = _run_cmd(["git", "log", "--format=%an", "-50"], cwd=cwd)
|
|
703
|
+
authors = list(set(line.strip() for line in r["stdout"].strip().split("\n") if line.strip())) if r["stdout"].strip() else []
|
|
704
|
+
result["authors"] = authors
|
|
705
|
+
|
|
706
|
+
# Suggest version
|
|
707
|
+
if version:
|
|
708
|
+
result["suggested_version"] = version
|
|
709
|
+
elif last_tag:
|
|
710
|
+
# Simple semver bump heuristic
|
|
711
|
+
tag = last_tag.lstrip("v")
|
|
712
|
+
parts = tag.split(".")
|
|
713
|
+
if len(parts) == 3:
|
|
714
|
+
# Check for breaking changes (MAJOR words in commits)
|
|
715
|
+
commit_text = " ".join(commits).lower()
|
|
716
|
+
if any(kw in commit_text for kw in ["breaking", "!:", "major"]):
|
|
717
|
+
parts[0] = str(int(parts[0]) + 1)
|
|
718
|
+
parts[1] = "0"
|
|
719
|
+
parts[2] = "0"
|
|
720
|
+
elif any(kw in commit_text for kw in ["feat", "feature", "add"]):
|
|
721
|
+
parts[1] = str(int(parts[1]) + 1)
|
|
722
|
+
parts[2] = "0"
|
|
723
|
+
else:
|
|
724
|
+
parts[2] = str(int(parts[2]) + 1)
|
|
725
|
+
result["suggested_version"] = ".".join(parts)
|
|
726
|
+
else:
|
|
727
|
+
result["suggested_version"] = "unknown"
|
|
728
|
+
else:
|
|
729
|
+
result["suggested_version"] = "0.1.0"
|
|
730
|
+
|
|
731
|
+
# Release checklist
|
|
732
|
+
checklist = []
|
|
733
|
+
|
|
734
|
+
# Tests passing?
|
|
735
|
+
has_tests = any(
|
|
736
|
+
(repo_path / f).exists()
|
|
737
|
+
for f in ["pytest.ini", "pyproject.toml", "jest.config.js", "jest.config.ts", "vitest.config.ts", "package.json"]
|
|
738
|
+
)
|
|
739
|
+
checklist.append({"item": "Tests passing", "status": "check_required" if has_tests else "no_test_config", "required": True})
|
|
740
|
+
|
|
741
|
+
# Changelog updated?
|
|
742
|
+
changelog = repo_path / "CHANGELOG.md"
|
|
743
|
+
if changelog.exists():
|
|
744
|
+
content = changelog.read_text(errors="ignore")[:500]
|
|
745
|
+
has_version = version and version in content
|
|
746
|
+
checklist.append({"item": "CHANGELOG.md updated", "status": "done" if has_version else "pending", "required": True})
|
|
747
|
+
else:
|
|
748
|
+
checklist.append({"item": "CHANGELOG.md exists", "status": "missing", "required": False})
|
|
749
|
+
|
|
750
|
+
# Version bumped in config?
|
|
751
|
+
version_files = ["package.json", "pyproject.toml", "setup.py", "version.py", "Cargo.toml"]
|
|
752
|
+
for vf in version_files:
|
|
753
|
+
if (repo_path / vf).exists():
|
|
754
|
+
checklist.append({"item": f"Version in {vf} updated", "status": "check_required", "required": True})
|
|
755
|
+
break
|
|
756
|
+
|
|
757
|
+
# Clean working tree?
|
|
758
|
+
r = _run_cmd(["git", "status", "--porcelain"], cwd=cwd)
|
|
759
|
+
clean = not r["stdout"].strip()
|
|
760
|
+
checklist.append({"item": "Clean working tree", "status": "clean" if clean else "dirty", "required": True})
|
|
761
|
+
|
|
762
|
+
# No uncommitted changes
|
|
763
|
+
checklist.append({"item": "All changes committed", "status": "yes" if clean else "no", "required": True})
|
|
764
|
+
|
|
765
|
+
# CI/CD config present?
|
|
766
|
+
ci_files = [".github/workflows", ".gitlab-ci.yml", "Jenkinsfile", ".circleci/config.yml"]
|
|
767
|
+
has_ci = any((repo_path / f).exists() for f in ci_files)
|
|
768
|
+
checklist.append({"item": "CI/CD pipeline configured", "status": "present" if has_ci else "not_found", "required": False})
|
|
769
|
+
|
|
770
|
+
result["checklist"] = checklist
|
|
771
|
+
|
|
772
|
+
# Write plan to deploys dir
|
|
773
|
+
DEPLOYS_DIR.mkdir(parents=True, exist_ok=True)
|
|
774
|
+
plan_file = DEPLOYS_DIR / f"plan_{environment}_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.json"
|
|
775
|
+
plan_data = {
|
|
776
|
+
"environment": environment,
|
|
777
|
+
"version": result.get("suggested_version", version),
|
|
778
|
+
"repository": str(repo_path),
|
|
779
|
+
"status": "planned",
|
|
780
|
+
"timestamp": result["timestamp"],
|
|
781
|
+
"commits": len(commits),
|
|
782
|
+
"changed_files": len(changed),
|
|
783
|
+
}
|
|
784
|
+
try:
|
|
785
|
+
plan_file.write_text(json.dumps(plan_data, indent=2))
|
|
786
|
+
result["plan_file"] = str(plan_file)
|
|
787
|
+
except OSError as e:
|
|
788
|
+
result["plan_file_error"] = str(e)
|
|
789
|
+
|
|
790
|
+
return result
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
# ─── 10. release_status ─────────────────────────────────────────────────
|
|
794
|
+
|
|
795
|
+
def release_status(environment: str = "production") -> Dict[str, Any]:
|
|
796
|
+
"""Check release/deploy status from file-based tracker + git state."""
|
|
797
|
+
result = {
|
|
798
|
+
"environment": environment,
|
|
799
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
800
|
+
"latest_deploy": None,
|
|
801
|
+
"current_tag": None,
|
|
802
|
+
"ahead_by_commits": 0,
|
|
803
|
+
"status": "unknown",
|
|
804
|
+
"deploy_history": [],
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
# Read from deploy tracker
|
|
808
|
+
if DEPLOYS_DIR.is_dir():
|
|
809
|
+
plans = sorted(DEPLOYS_DIR.glob(f"plan_{environment}_*.json"), key=lambda p: p.stat().st_mtime, reverse=True)
|
|
810
|
+
for plan_file in plans[:10]:
|
|
811
|
+
try:
|
|
812
|
+
data = json.loads(plan_file.read_text())
|
|
813
|
+
result["deploy_history"].append(data)
|
|
814
|
+
except (json.JSONDecodeError, OSError):
|
|
815
|
+
pass
|
|
816
|
+
if result["deploy_history"]:
|
|
817
|
+
result["latest_deploy"] = result["deploy_history"][0]
|
|
818
|
+
result["status"] = result["deploy_history"][0].get("status", "unknown")
|
|
819
|
+
|
|
820
|
+
# Git state: current tag and how far HEAD is ahead
|
|
821
|
+
# Try to find a repo from latest deploy, or use cwd
|
|
822
|
+
repo_path = None
|
|
823
|
+
if result["latest_deploy"] and result["latest_deploy"].get("repository"):
|
|
824
|
+
rp = Path(result["latest_deploy"]["repository"])
|
|
825
|
+
if (rp / ".git").is_dir():
|
|
826
|
+
repo_path = str(rp)
|
|
827
|
+
|
|
828
|
+
if not repo_path:
|
|
829
|
+
# Fallback: check cwd
|
|
830
|
+
if Path(".git").is_dir():
|
|
831
|
+
repo_path = "."
|
|
832
|
+
|
|
833
|
+
if repo_path:
|
|
834
|
+
cwd = repo_path
|
|
835
|
+
# Current tag
|
|
836
|
+
r = _run_cmd(["git", "describe", "--tags", "--abbrev=0"], cwd=cwd)
|
|
837
|
+
if r["returncode"] == 0:
|
|
838
|
+
tag = r["stdout"].strip()
|
|
839
|
+
result["current_tag"] = tag
|
|
840
|
+
|
|
841
|
+
# Commits ahead of tag
|
|
842
|
+
r2 = _run_cmd(["git", "rev-list", f"{tag}..HEAD", "--count"], cwd=cwd)
|
|
843
|
+
if r2["returncode"] == 0:
|
|
844
|
+
try:
|
|
845
|
+
result["ahead_by_commits"] = int(r2["stdout"].strip())
|
|
846
|
+
except ValueError:
|
|
847
|
+
pass
|
|
848
|
+
|
|
849
|
+
# Determine status
|
|
850
|
+
if result["ahead_by_commits"] == 0:
|
|
851
|
+
result["status"] = "up_to_date"
|
|
852
|
+
else:
|
|
853
|
+
result["status"] = "ahead_of_tag"
|
|
854
|
+
else:
|
|
855
|
+
result["current_tag"] = None
|
|
856
|
+
result["status"] = "no_tags"
|
|
857
|
+
|
|
858
|
+
# Current branch and HEAD
|
|
859
|
+
r = _run_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=cwd)
|
|
860
|
+
if r["returncode"] == 0:
|
|
861
|
+
result["current_branch"] = r["stdout"].strip()
|
|
862
|
+
r = _run_cmd(["git", "rev-parse", "--short", "HEAD"], cwd=cwd)
|
|
863
|
+
if r["returncode"] == 0:
|
|
864
|
+
result["head_sha"] = r["stdout"].strip()
|
|
865
|
+
|
|
866
|
+
return result
|