@raghulm/aegis-mcp 1.0.4 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +286 -290
- package/audit/audit_logger.py +62 -62
- package/package.json +5 -6
- package/policies/roles.yaml +34 -34
- package/policies/scope_rules.yaml +16 -16
- package/requirements.txt +8 -7
- package/run_stdio.py +22 -22
- package/server/auth.py +69 -69
- package/server/config.py +82 -82
- package/server/health.py +19 -19
- package/server/logging.py +33 -33
- package/server/main.py +212 -144
- package/server/stdio.py +7 -7
- package/tools/aws/ec2.py +26 -26
- package/tools/aws/s3.py +54 -54
- package/tools/cicd/jenkins.py +256 -0
- package/tools/cicd/pipeline.py +33 -33
- package/tools/git/repo.py +22 -22
- package/tools/kubernetes/audit.py +108 -108
- package/tools/kubernetes/pods.py +27 -27
- package/tools/network/headers.py +99 -99
- package/tools/network/port_scanner.py +66 -66
- package/tools/network/ssl_checker.py +65 -65
- package/tools/security/deps.py +103 -103
- package/tools/security/secrets.py +91 -91
- package/tools/security/semgrep.py +261 -261
- package/tools/security/trivy.py +19 -19
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
"""Jenkins CI/CD integration tools for Aegis MCP.
|
|
2
|
+
|
|
3
|
+
Provides functions to manage Jenkins jobs and builds via the Jenkins REST API
|
|
4
|
+
using the ``python-jenkins`` library. Credentials (URL, username, API token)
|
|
5
|
+
are passed per-call so no global state or environment variables are required.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import jenkins
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ---------------------------------------------------------------------------
|
|
17
|
+
# Helpers
|
|
18
|
+
# ---------------------------------------------------------------------------
|
|
19
|
+
|
|
20
|
+
def _client(url: str, username: str, api_token: str) -> jenkins.Jenkins:
|
|
21
|
+
"""Return a configured Jenkins client, raising RuntimeError on failure."""
|
|
22
|
+
try:
|
|
23
|
+
server = jenkins.Jenkins(url, username=username, password=api_token)
|
|
24
|
+
# Verify credentials by fetching the server version header
|
|
25
|
+
server.get_whoami()
|
|
26
|
+
return server
|
|
27
|
+
except jenkins.JenkinsException as exc:
|
|
28
|
+
raise RuntimeError(
|
|
29
|
+
f"Cannot connect to Jenkins at '{url}': {exc}"
|
|
30
|
+
) from exc
|
|
31
|
+
except Exception as exc:
|
|
32
|
+
raise RuntimeError(
|
|
33
|
+
f"Jenkins connection error: {exc}"
|
|
34
|
+
) from exc
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# ---------------------------------------------------------------------------
|
|
38
|
+
# Tool functions
|
|
39
|
+
# ---------------------------------------------------------------------------
|
|
40
|
+
|
|
41
|
+
def jenkins_list_jobs(url: str, username: str, api_token: str) -> list[dict]:
|
|
42
|
+
"""List all jobs on a Jenkins server.
|
|
43
|
+
|
|
44
|
+
Returns a list of dicts with keys: name, url, color (build status indicator).
|
|
45
|
+
"""
|
|
46
|
+
server = _client(url, username, api_token)
|
|
47
|
+
try:
|
|
48
|
+
jobs = server.get_all_jobs()
|
|
49
|
+
return [
|
|
50
|
+
{
|
|
51
|
+
"name": j.get("name", ""),
|
|
52
|
+
"url": j.get("url", ""),
|
|
53
|
+
"color": j.get("color", ""),
|
|
54
|
+
}
|
|
55
|
+
for j in jobs
|
|
56
|
+
]
|
|
57
|
+
except jenkins.JenkinsException as exc:
|
|
58
|
+
raise RuntimeError(f"Failed to list Jenkins jobs: {exc}") from exc
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def jenkins_get_job_info(
|
|
62
|
+
url: str, username: str, api_token: str, job_name: str
|
|
63
|
+
) -> dict:
|
|
64
|
+
"""Get detailed information about a Jenkins job.
|
|
65
|
+
|
|
66
|
+
Returns build history, health reports, and configuration details.
|
|
67
|
+
"""
|
|
68
|
+
server = _client(url, username, api_token)
|
|
69
|
+
try:
|
|
70
|
+
info = server.get_job_info(job_name)
|
|
71
|
+
return {
|
|
72
|
+
"name": info.get("name", ""),
|
|
73
|
+
"url": info.get("url", ""),
|
|
74
|
+
"description": info.get("description", ""),
|
|
75
|
+
"buildable": info.get("buildable", False),
|
|
76
|
+
"color": info.get("color", ""),
|
|
77
|
+
"last_build": info.get("lastBuild"),
|
|
78
|
+
"last_successful_build": info.get("lastSuccessfulBuild"),
|
|
79
|
+
"last_failed_build": info.get("lastFailedBuild"),
|
|
80
|
+
"health_report": info.get("healthReport", []),
|
|
81
|
+
"in_queue": info.get("inQueue", False),
|
|
82
|
+
}
|
|
83
|
+
except jenkins.NotFoundException:
|
|
84
|
+
raise RuntimeError(f"Jenkins job '{job_name}' not found")
|
|
85
|
+
except jenkins.JenkinsException as exc:
|
|
86
|
+
raise RuntimeError(
|
|
87
|
+
f"Failed to get info for job '{job_name}': {exc}"
|
|
88
|
+
) from exc
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def jenkins_create_job(
|
|
92
|
+
url: str,
|
|
93
|
+
username: str,
|
|
94
|
+
api_token: str,
|
|
95
|
+
job_name: str,
|
|
96
|
+
config_xml: str = "",
|
|
97
|
+
) -> dict:
|
|
98
|
+
"""Create a new Jenkins job.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
url: Jenkins server URL.
|
|
102
|
+
username: Jenkins username.
|
|
103
|
+
api_token: Jenkins API token.
|
|
104
|
+
job_name: Name for the new job.
|
|
105
|
+
config_xml: Jenkins job configuration XML. If empty, a minimal
|
|
106
|
+
freestyle project config is used.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Dict with the created job name and URL.
|
|
110
|
+
"""
|
|
111
|
+
if not config_xml:
|
|
112
|
+
config_xml = jenkins.EMPTY_CONFIG_XML
|
|
113
|
+
|
|
114
|
+
server = _client(url, username, api_token)
|
|
115
|
+
try:
|
|
116
|
+
server.create_job(job_name, config_xml)
|
|
117
|
+
return {
|
|
118
|
+
"status": "created",
|
|
119
|
+
"job_name": job_name,
|
|
120
|
+
"url": f"{url.rstrip('/')}/job/{job_name}/",
|
|
121
|
+
}
|
|
122
|
+
except jenkins.JenkinsException as exc:
|
|
123
|
+
raise RuntimeError(
|
|
124
|
+
f"Failed to create job '{job_name}': {exc}"
|
|
125
|
+
) from exc
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def jenkins_trigger_build(
|
|
129
|
+
url: str,
|
|
130
|
+
username: str,
|
|
131
|
+
api_token: str,
|
|
132
|
+
job_name: str,
|
|
133
|
+
parameters: str = "",
|
|
134
|
+
) -> dict:
|
|
135
|
+
"""Trigger a build for a Jenkins job.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
url: Jenkins server URL.
|
|
139
|
+
username: Jenkins username.
|
|
140
|
+
api_token: Jenkins API token.
|
|
141
|
+
job_name: Name of the job to build.
|
|
142
|
+
parameters: Optional JSON string of build parameters,
|
|
143
|
+
e.g. '{"BRANCH": "main"}'.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Dict with the queue item number.
|
|
147
|
+
"""
|
|
148
|
+
server = _client(url, username, api_token)
|
|
149
|
+
try:
|
|
150
|
+
params: dict[str, Any] | None = None
|
|
151
|
+
if parameters:
|
|
152
|
+
params = json.loads(parameters)
|
|
153
|
+
|
|
154
|
+
queue_item = server.build_job(job_name, parameters=params)
|
|
155
|
+
return {
|
|
156
|
+
"status": "triggered",
|
|
157
|
+
"job_name": job_name,
|
|
158
|
+
"queue_item": queue_item,
|
|
159
|
+
}
|
|
160
|
+
except json.JSONDecodeError as exc:
|
|
161
|
+
raise RuntimeError(
|
|
162
|
+
f"Invalid parameters JSON: {exc}"
|
|
163
|
+
) from exc
|
|
164
|
+
except jenkins.JenkinsException as exc:
|
|
165
|
+
raise RuntimeError(
|
|
166
|
+
f"Failed to trigger build for '{job_name}': {exc}"
|
|
167
|
+
) from exc
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def jenkins_get_build_info(
|
|
171
|
+
url: str,
|
|
172
|
+
username: str,
|
|
173
|
+
api_token: str,
|
|
174
|
+
job_name: str,
|
|
175
|
+
build_number: int,
|
|
176
|
+
) -> dict:
|
|
177
|
+
"""Get information about a specific build.
|
|
178
|
+
|
|
179
|
+
Returns build result, duration, timestamp, and other metadata.
|
|
180
|
+
"""
|
|
181
|
+
server = _client(url, username, api_token)
|
|
182
|
+
try:
|
|
183
|
+
info = server.get_build_info(job_name, build_number)
|
|
184
|
+
return {
|
|
185
|
+
"job_name": job_name,
|
|
186
|
+
"build_number": info.get("number"),
|
|
187
|
+
"result": info.get("result"),
|
|
188
|
+
"duration_ms": info.get("duration"),
|
|
189
|
+
"timestamp": info.get("timestamp"),
|
|
190
|
+
"building": info.get("building", False),
|
|
191
|
+
"url": info.get("url", ""),
|
|
192
|
+
"display_name": info.get("displayName", ""),
|
|
193
|
+
}
|
|
194
|
+
except jenkins.NotFoundException:
|
|
195
|
+
raise RuntimeError(
|
|
196
|
+
f"Build #{build_number} not found for job '{job_name}'"
|
|
197
|
+
)
|
|
198
|
+
except jenkins.JenkinsException as exc:
|
|
199
|
+
raise RuntimeError(
|
|
200
|
+
f"Failed to get build info for '{job_name}' #{build_number}: {exc}"
|
|
201
|
+
) from exc
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def jenkins_get_build_log(
|
|
205
|
+
url: str,
|
|
206
|
+
username: str,
|
|
207
|
+
api_token: str,
|
|
208
|
+
job_name: str,
|
|
209
|
+
build_number: int,
|
|
210
|
+
) -> dict:
|
|
211
|
+
"""Fetch the console output of a Jenkins build.
|
|
212
|
+
|
|
213
|
+
Returns the full console log as a string (truncated to 50 000 chars to
|
|
214
|
+
keep MCP responses manageable).
|
|
215
|
+
"""
|
|
216
|
+
server = _client(url, username, api_token)
|
|
217
|
+
try:
|
|
218
|
+
output = server.get_build_console_output(job_name, build_number)
|
|
219
|
+
max_len = 50_000
|
|
220
|
+
truncated = len(output) > max_len
|
|
221
|
+
return {
|
|
222
|
+
"job_name": job_name,
|
|
223
|
+
"build_number": build_number,
|
|
224
|
+
"log": output[:max_len],
|
|
225
|
+
"truncated": truncated,
|
|
226
|
+
}
|
|
227
|
+
except jenkins.NotFoundException:
|
|
228
|
+
raise RuntimeError(
|
|
229
|
+
f"Build #{build_number} not found for job '{job_name}'"
|
|
230
|
+
)
|
|
231
|
+
except jenkins.JenkinsException as exc:
|
|
232
|
+
raise RuntimeError(
|
|
233
|
+
f"Failed to get build log for '{job_name}' #{build_number}: {exc}"
|
|
234
|
+
) from exc
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def jenkins_delete_job(
|
|
238
|
+
url: str, username: str, api_token: str, job_name: str
|
|
239
|
+
) -> dict:
|
|
240
|
+
"""Delete a Jenkins job.
|
|
241
|
+
|
|
242
|
+
Returns confirmation dict on success.
|
|
243
|
+
"""
|
|
244
|
+
server = _client(url, username, api_token)
|
|
245
|
+
try:
|
|
246
|
+
server.delete_job(job_name)
|
|
247
|
+
return {
|
|
248
|
+
"status": "deleted",
|
|
249
|
+
"job_name": job_name,
|
|
250
|
+
}
|
|
251
|
+
except jenkins.NotFoundException:
|
|
252
|
+
raise RuntimeError(f"Jenkins job '{job_name}' not found")
|
|
253
|
+
except jenkins.JenkinsException as exc:
|
|
254
|
+
raise RuntimeError(
|
|
255
|
+
f"Failed to delete job '{job_name}': {exc}"
|
|
256
|
+
) from exc
|
package/tools/cicd/pipeline.py
CHANGED
|
@@ -1,33 +1,33 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import requests
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def pipeline_status(base_url: str, pipeline_id: str, api_token: str) -> dict:
|
|
7
|
-
"""Fetch the status of a CI/CD pipeline by its ID.
|
|
8
|
-
|
|
9
|
-
Args:
|
|
10
|
-
base_url: Base URL of the CI/CD service API.
|
|
11
|
-
pipeline_id: Unique identifier of the pipeline.
|
|
12
|
-
api_token: API token for authenticating with the CI/CD service.
|
|
13
|
-
|
|
14
|
-
Returns:
|
|
15
|
-
Pipeline status as a JSON-compatible dict.
|
|
16
|
-
"""
|
|
17
|
-
try:
|
|
18
|
-
response = requests.get(
|
|
19
|
-
f"{base_url.rstrip('/')}/pipelines/{pipeline_id}",
|
|
20
|
-
headers={"Authorization": f"Bearer {api_token}"},
|
|
21
|
-
timeout=15,
|
|
22
|
-
)
|
|
23
|
-
response.raise_for_status()
|
|
24
|
-
except requests.ConnectionError as exc:
|
|
25
|
-
raise RuntimeError(f"Cannot connect to CI/CD service at '{base_url}': {exc}") from exc
|
|
26
|
-
except requests.HTTPError as exc:
|
|
27
|
-
raise RuntimeError(
|
|
28
|
-
f"CI/CD API error for pipeline '{pipeline_id}': {exc.response.status_code}"
|
|
29
|
-
) from exc
|
|
30
|
-
except requests.Timeout as exc:
|
|
31
|
-
raise RuntimeError(f"CI/CD API request timed out for pipeline '{pipeline_id}'") from exc
|
|
32
|
-
|
|
33
|
-
return response.json()
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def pipeline_status(base_url: str, pipeline_id: str, api_token: str) -> dict:
|
|
7
|
+
"""Fetch the status of a CI/CD pipeline by its ID.
|
|
8
|
+
|
|
9
|
+
Args:
|
|
10
|
+
base_url: Base URL of the CI/CD service API.
|
|
11
|
+
pipeline_id: Unique identifier of the pipeline.
|
|
12
|
+
api_token: API token for authenticating with the CI/CD service.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
Pipeline status as a JSON-compatible dict.
|
|
16
|
+
"""
|
|
17
|
+
try:
|
|
18
|
+
response = requests.get(
|
|
19
|
+
f"{base_url.rstrip('/')}/pipelines/{pipeline_id}",
|
|
20
|
+
headers={"Authorization": f"Bearer {api_token}"},
|
|
21
|
+
timeout=15,
|
|
22
|
+
)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
except requests.ConnectionError as exc:
|
|
25
|
+
raise RuntimeError(f"Cannot connect to CI/CD service at '{base_url}': {exc}") from exc
|
|
26
|
+
except requests.HTTPError as exc:
|
|
27
|
+
raise RuntimeError(
|
|
28
|
+
f"CI/CD API error for pipeline '{pipeline_id}': {exc.response.status_code}"
|
|
29
|
+
) from exc
|
|
30
|
+
except requests.Timeout as exc:
|
|
31
|
+
raise RuntimeError(f"CI/CD API request timed out for pipeline '{pipeline_id}'") from exc
|
|
32
|
+
|
|
33
|
+
return response.json()
|
package/tools/git/repo.py
CHANGED
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import subprocess
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def get_recent_commits(limit: int = 10) -> list[dict[str, str]]:
|
|
7
|
-
try:
|
|
8
|
-
raw = subprocess.check_output(
|
|
9
|
-
["git", "log", f"-{limit}", "--pretty=format:%H|%an|%s"],
|
|
10
|
-
text=True,
|
|
11
|
-
)
|
|
12
|
-
except FileNotFoundError as exc:
|
|
13
|
-
raise RuntimeError("git is not installed or not on PATH") from exc
|
|
14
|
-
except subprocess.CalledProcessError as exc:
|
|
15
|
-
raise RuntimeError(f"git log failed: {exc.output}") from exc
|
|
16
|
-
|
|
17
|
-
commits = []
|
|
18
|
-
for line in raw.splitlines():
|
|
19
|
-
parts = line.split("|", maxsplit=2)
|
|
20
|
-
if len(parts) == 3:
|
|
21
|
-
commits.append({"hash": parts[0], "author": parts[1], "subject": parts[2]})
|
|
22
|
-
return commits
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_recent_commits(limit: int = 10) -> list[dict[str, str]]:
|
|
7
|
+
try:
|
|
8
|
+
raw = subprocess.check_output(
|
|
9
|
+
["git", "log", f"-{limit}", "--pretty=format:%H|%an|%s"],
|
|
10
|
+
text=True,
|
|
11
|
+
)
|
|
12
|
+
except FileNotFoundError as exc:
|
|
13
|
+
raise RuntimeError("git is not installed or not on PATH") from exc
|
|
14
|
+
except subprocess.CalledProcessError as exc:
|
|
15
|
+
raise RuntimeError(f"git log failed: {exc.output}") from exc
|
|
16
|
+
|
|
17
|
+
commits = []
|
|
18
|
+
for line in raw.splitlines():
|
|
19
|
+
parts = line.split("|", maxsplit=2)
|
|
20
|
+
if len(parts) == 3:
|
|
21
|
+
commits.append({"hash": parts[0], "author": parts[1], "subject": parts[2]})
|
|
22
|
+
return commits
|
|
@@ -1,108 +1,108 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
import subprocess
|
|
5
|
-
from typing import Any
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def k8s_security_audit(namespace: str = "") -> list[dict[str, Any]]:
|
|
9
|
-
findings = []
|
|
10
|
-
|
|
11
|
-
cmd_base = ["kubectl", "get"]
|
|
12
|
-
if namespace:
|
|
13
|
-
ns_args = ["-n", namespace]
|
|
14
|
-
else:
|
|
15
|
-
ns_args = ["-A"]
|
|
16
|
-
|
|
17
|
-
try:
|
|
18
|
-
pods_result = subprocess.check_output(
|
|
19
|
-
cmd_base + ["pods"] + ns_args + ["-o", "json"],
|
|
20
|
-
stderr=subprocess.STDOUT,
|
|
21
|
-
text=True,
|
|
22
|
-
)
|
|
23
|
-
pods_payload = json.loads(pods_result)
|
|
24
|
-
except Exception as exc:
|
|
25
|
-
print(f"Warning: Failed to get pods: {exc}")
|
|
26
|
-
pods_payload = {"items": []}
|
|
27
|
-
|
|
28
|
-
try:
|
|
29
|
-
services_result = subprocess.check_output(
|
|
30
|
-
cmd_base + ["svc"] + ns_args + ["-o", "json"],
|
|
31
|
-
stderr=subprocess.STDOUT,
|
|
32
|
-
text=True,
|
|
33
|
-
)
|
|
34
|
-
svcs_payload = json.loads(services_result)
|
|
35
|
-
except Exception as exc:
|
|
36
|
-
print(f"Warning: Failed to get services: {exc}")
|
|
37
|
-
svcs_payload = {"items": []}
|
|
38
|
-
|
|
39
|
-
try:
|
|
40
|
-
roles_result = subprocess.check_output(
|
|
41
|
-
cmd_base + ["clusterrolebindings", "-o", "json"],
|
|
42
|
-
stderr=subprocess.STDOUT,
|
|
43
|
-
text=True,
|
|
44
|
-
)
|
|
45
|
-
crb_payload = json.loads(roles_result)
|
|
46
|
-
except Exception as exc:
|
|
47
|
-
print(f"Warning: Failed to get clusterrolebindings: {exc}")
|
|
48
|
-
crb_payload = {"items": []}
|
|
49
|
-
|
|
50
|
-
# Parse pods
|
|
51
|
-
for pod in pods_payload.get("items", []):
|
|
52
|
-
metadata = pod.get("metadata", {})
|
|
53
|
-
pod_name = metadata.get("name", "unknown")
|
|
54
|
-
pod_ns = metadata.get("namespace", "unknown")
|
|
55
|
-
spec = pod.get("spec", {})
|
|
56
|
-
|
|
57
|
-
# Check hostNetwork
|
|
58
|
-
if spec.get("hostNetwork") is True:
|
|
59
|
-
findings.append({
|
|
60
|
-
"type": "hostNetwork",
|
|
61
|
-
"severity": "HIGH",
|
|
62
|
-
"resource": f"Pod/{pod_ns}/{pod_name}",
|
|
63
|
-
"message": "Pod is using host network."
|
|
64
|
-
})
|
|
65
|
-
|
|
66
|
-
# Check privileged containers
|
|
67
|
-
for container in spec.get("containers", []):
|
|
68
|
-
sec_ctx = container.get("securityContext", {})
|
|
69
|
-
if sec_ctx.get("privileged") is True:
|
|
70
|
-
findings.append({
|
|
71
|
-
"type": "privileged_container",
|
|
72
|
-
"severity": "CRITICAL",
|
|
73
|
-
"resource": f"Pod/{pod_ns}/{pod_name}",
|
|
74
|
-
"message": f"Container '{container.get('name')}' is running as privileged.",
|
|
75
|
-
})
|
|
76
|
-
|
|
77
|
-
# Parse services
|
|
78
|
-
for svc in svcs_payload.get("items", []):
|
|
79
|
-
metadata = svc.get("metadata", {})
|
|
80
|
-
svc_name = metadata.get("name", "unknown")
|
|
81
|
-
svc_ns = metadata.get("namespace", "unknown")
|
|
82
|
-
spec = svc.get("spec", {})
|
|
83
|
-
|
|
84
|
-
if spec.get("type") == "NodePort":
|
|
85
|
-
findings.append({
|
|
86
|
-
"type": "exposed_nodeport",
|
|
87
|
-
"severity": "MEDIUM",
|
|
88
|
-
"resource": f"Service/{svc_ns}/{svc_name}",
|
|
89
|
-
"message": "Service is exposed via NodePort."
|
|
90
|
-
})
|
|
91
|
-
|
|
92
|
-
# Parse cluster role bindings
|
|
93
|
-
for crb in crb_payload.get("items", []):
|
|
94
|
-
metadata = crb.get("metadata", {})
|
|
95
|
-
crb_name = metadata.get("name", "unknown")
|
|
96
|
-
role_ref = crb.get("roleRef", {})
|
|
97
|
-
|
|
98
|
-
if role_ref.get("name") == "cluster-admin":
|
|
99
|
-
for subj in crb.get("subjects", []):
|
|
100
|
-
if subj.get("kind") == "ServiceAccount":
|
|
101
|
-
findings.append({
|
|
102
|
-
"type": "cluster_admin_sa",
|
|
103
|
-
"severity": "CRITICAL",
|
|
104
|
-
"resource": f"ClusterRoleBinding/{crb_name}",
|
|
105
|
-
"message": f"ServiceAccount '{subj.get('namespace')}/{subj.get('name')}' is bound to cluster-admin."
|
|
106
|
-
})
|
|
107
|
-
|
|
108
|
-
return findings
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def k8s_security_audit(namespace: str = "") -> list[dict[str, Any]]:
|
|
9
|
+
findings = []
|
|
10
|
+
|
|
11
|
+
cmd_base = ["kubectl", "get"]
|
|
12
|
+
if namespace:
|
|
13
|
+
ns_args = ["-n", namespace]
|
|
14
|
+
else:
|
|
15
|
+
ns_args = ["-A"]
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
pods_result = subprocess.check_output(
|
|
19
|
+
cmd_base + ["pods"] + ns_args + ["-o", "json"],
|
|
20
|
+
stderr=subprocess.STDOUT,
|
|
21
|
+
text=True,
|
|
22
|
+
)
|
|
23
|
+
pods_payload = json.loads(pods_result)
|
|
24
|
+
except Exception as exc:
|
|
25
|
+
print(f"Warning: Failed to get pods: {exc}")
|
|
26
|
+
pods_payload = {"items": []}
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
services_result = subprocess.check_output(
|
|
30
|
+
cmd_base + ["svc"] + ns_args + ["-o", "json"],
|
|
31
|
+
stderr=subprocess.STDOUT,
|
|
32
|
+
text=True,
|
|
33
|
+
)
|
|
34
|
+
svcs_payload = json.loads(services_result)
|
|
35
|
+
except Exception as exc:
|
|
36
|
+
print(f"Warning: Failed to get services: {exc}")
|
|
37
|
+
svcs_payload = {"items": []}
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
roles_result = subprocess.check_output(
|
|
41
|
+
cmd_base + ["clusterrolebindings", "-o", "json"],
|
|
42
|
+
stderr=subprocess.STDOUT,
|
|
43
|
+
text=True,
|
|
44
|
+
)
|
|
45
|
+
crb_payload = json.loads(roles_result)
|
|
46
|
+
except Exception as exc:
|
|
47
|
+
print(f"Warning: Failed to get clusterrolebindings: {exc}")
|
|
48
|
+
crb_payload = {"items": []}
|
|
49
|
+
|
|
50
|
+
# Parse pods
|
|
51
|
+
for pod in pods_payload.get("items", []):
|
|
52
|
+
metadata = pod.get("metadata", {})
|
|
53
|
+
pod_name = metadata.get("name", "unknown")
|
|
54
|
+
pod_ns = metadata.get("namespace", "unknown")
|
|
55
|
+
spec = pod.get("spec", {})
|
|
56
|
+
|
|
57
|
+
# Check hostNetwork
|
|
58
|
+
if spec.get("hostNetwork") is True:
|
|
59
|
+
findings.append({
|
|
60
|
+
"type": "hostNetwork",
|
|
61
|
+
"severity": "HIGH",
|
|
62
|
+
"resource": f"Pod/{pod_ns}/{pod_name}",
|
|
63
|
+
"message": "Pod is using host network."
|
|
64
|
+
})
|
|
65
|
+
|
|
66
|
+
# Check privileged containers
|
|
67
|
+
for container in spec.get("containers", []):
|
|
68
|
+
sec_ctx = container.get("securityContext", {})
|
|
69
|
+
if sec_ctx.get("privileged") is True:
|
|
70
|
+
findings.append({
|
|
71
|
+
"type": "privileged_container",
|
|
72
|
+
"severity": "CRITICAL",
|
|
73
|
+
"resource": f"Pod/{pod_ns}/{pod_name}",
|
|
74
|
+
"message": f"Container '{container.get('name')}' is running as privileged.",
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
# Parse services
|
|
78
|
+
for svc in svcs_payload.get("items", []):
|
|
79
|
+
metadata = svc.get("metadata", {})
|
|
80
|
+
svc_name = metadata.get("name", "unknown")
|
|
81
|
+
svc_ns = metadata.get("namespace", "unknown")
|
|
82
|
+
spec = svc.get("spec", {})
|
|
83
|
+
|
|
84
|
+
if spec.get("type") == "NodePort":
|
|
85
|
+
findings.append({
|
|
86
|
+
"type": "exposed_nodeport",
|
|
87
|
+
"severity": "MEDIUM",
|
|
88
|
+
"resource": f"Service/{svc_ns}/{svc_name}",
|
|
89
|
+
"message": "Service is exposed via NodePort."
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
# Parse cluster role bindings
|
|
93
|
+
for crb in crb_payload.get("items", []):
|
|
94
|
+
metadata = crb.get("metadata", {})
|
|
95
|
+
crb_name = metadata.get("name", "unknown")
|
|
96
|
+
role_ref = crb.get("roleRef", {})
|
|
97
|
+
|
|
98
|
+
if role_ref.get("name") == "cluster-admin":
|
|
99
|
+
for subj in crb.get("subjects", []):
|
|
100
|
+
if subj.get("kind") == "ServiceAccount":
|
|
101
|
+
findings.append({
|
|
102
|
+
"type": "cluster_admin_sa",
|
|
103
|
+
"severity": "CRITICAL",
|
|
104
|
+
"resource": f"ClusterRoleBinding/{crb_name}",
|
|
105
|
+
"message": f"ServiceAccount '{subj.get('namespace')}/{subj.get('name')}' is bound to cluster-admin."
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
return findings
|
package/tools/kubernetes/pods.py
CHANGED
|
@@ -1,27 +1,27 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
import subprocess
|
|
5
|
-
from typing import Any
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def list_pods(namespace: str = "default") -> list[dict[str, Any]]:
|
|
9
|
-
try:
|
|
10
|
-
result = subprocess.check_output(
|
|
11
|
-
["kubectl", "get", "pods", "-n", namespace, "-o", "json"],
|
|
12
|
-
stderr=subprocess.STDOUT,
|
|
13
|
-
text=True,
|
|
14
|
-
)
|
|
15
|
-
except FileNotFoundError as exc:
|
|
16
|
-
raise RuntimeError("kubectl is not installed or not on PATH") from exc
|
|
17
|
-
except subprocess.CalledProcessError as exc:
|
|
18
|
-
raise RuntimeError(f"kubectl error (namespace '{namespace}'): {exc.output}") from exc
|
|
19
|
-
|
|
20
|
-
payload = json.loads(result)
|
|
21
|
-
return [
|
|
22
|
-
{
|
|
23
|
-
"name": item["metadata"]["name"],
|
|
24
|
-
"phase": item["status"].get("phase"),
|
|
25
|
-
}
|
|
26
|
-
for item in payload.get("items", [])
|
|
27
|
-
]
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def list_pods(namespace: str = "default") -> list[dict[str, Any]]:
|
|
9
|
+
try:
|
|
10
|
+
result = subprocess.check_output(
|
|
11
|
+
["kubectl", "get", "pods", "-n", namespace, "-o", "json"],
|
|
12
|
+
stderr=subprocess.STDOUT,
|
|
13
|
+
text=True,
|
|
14
|
+
)
|
|
15
|
+
except FileNotFoundError as exc:
|
|
16
|
+
raise RuntimeError("kubectl is not installed or not on PATH") from exc
|
|
17
|
+
except subprocess.CalledProcessError as exc:
|
|
18
|
+
raise RuntimeError(f"kubectl error (namespace '{namespace}'): {exc.output}") from exc
|
|
19
|
+
|
|
20
|
+
payload = json.loads(result)
|
|
21
|
+
return [
|
|
22
|
+
{
|
|
23
|
+
"name": item["metadata"]["name"],
|
|
24
|
+
"phase": item["status"].get("phase"),
|
|
25
|
+
}
|
|
26
|
+
for item in payload.get("items", [])
|
|
27
|
+
]
|