qualys-mcp 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: qualys-mcp
|
|
3
|
+
Version: 2.1.0
|
|
4
|
+
Summary: MCP server for Qualys security APIs - natural language interaction with vulnerability, asset, and cloud security data
|
|
5
|
+
Project-URL: Homepage, https://github.com/nelssec/qualys-mcp
|
|
6
|
+
Project-URL: Repository, https://github.com/nelssec/qualys-mcp
|
|
7
|
+
Project-URL: Issues, https://github.com/nelssec/qualys-mcp/issues
|
|
8
|
+
Author-email: Andrew Nelson <andrew@nelssec.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: ai,claude,mcp,qualys,security,vmdr,vulnerability
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Information Technology
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Security
|
|
22
|
+
Requires-Python: >=3.9
|
|
23
|
+
Requires-Dist: fastmcp>=0.1.0
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# Qualys MCP Server
|
|
27
|
+
|
|
28
|
+
A lightweight MCP server for Qualys security data - **12 tools** that answer your security questions.
|
|
29
|
+
|
|
30
|
+
## Claude Desktop Config
|
|
31
|
+
|
|
32
|
+
Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
|
|
33
|
+
|
|
34
|
+
```json
|
|
35
|
+
{
|
|
36
|
+
"mcpServers": {
|
|
37
|
+
"qualys": {
|
|
38
|
+
"command": "uvx",
|
|
39
|
+
"args": ["qualys-mcp"],
|
|
40
|
+
"env": {
|
|
41
|
+
"QUALYS_USERNAME": "your-username",
|
|
42
|
+
"QUALYS_PASSWORD": "your-password",
|
|
43
|
+
"QUALYS_BASE_URL": "https://qualysapi.qualys.com",
|
|
44
|
+
"QUALYS_GATEWAY_URL": "https://gateway.qg1.apps.qualys.com"
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
Requires [uv](https://docs.astral.sh/uv/): `brew install uv` or `curl -LsSf https://astral.sh/uv/install.sh | sh`
|
|
52
|
+
|
|
53
|
+
## Alternative Installation
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
# Install globally
|
|
57
|
+
pip install qualys-mcp
|
|
58
|
+
|
|
59
|
+
# Run directly
|
|
60
|
+
export QUALYS_USERNAME="your-username"
|
|
61
|
+
export QUALYS_PASSWORD="your-password"
|
|
62
|
+
export QUALYS_BASE_URL="https://qualysapi.qualys.com"
|
|
63
|
+
export QUALYS_GATEWAY_URL="https://gateway.qg1.apps.qualys.com"
|
|
64
|
+
qualys-mcp
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## Tools
|
|
68
|
+
|
|
69
|
+
| Tool | Question it answers |
|
|
70
|
+
|------|---------------------|
|
|
71
|
+
| `get_weekly_priorities` | What should my team fix this week? |
|
|
72
|
+
| `investigate_cve` | Are we affected by CVE-XXXX? |
|
|
73
|
+
| `get_security_posture` | How secure are we overall? |
|
|
74
|
+
| `get_patch_status` | What's our patching coverage? |
|
|
75
|
+
| `get_compliance_gaps` | What will fail our audit? |
|
|
76
|
+
| `get_cloud_risk` | What's our cloud security posture? |
|
|
77
|
+
| `get_asset_risk` | Why is this asset risky? |
|
|
78
|
+
| `get_tech_debt` | How do we reduce EOL software? |
|
|
79
|
+
| `get_image_vulns` | What vulns are in this container image? |
|
|
80
|
+
| `get_expiring_certs` | What certificates expire soon? |
|
|
81
|
+
| `get_threats` | What threats have we detected? |
|
|
82
|
+
| `get_webapp_vulns` | What web app vulns exist? |
|
|
83
|
+
|
|
84
|
+
## Qualys PODs
|
|
85
|
+
|
|
86
|
+
| POD | BASE_URL | GATEWAY_URL |
|
|
87
|
+
|-----|----------|-------------|
|
|
88
|
+
| US1 | qualysapi.qualys.com | gateway.qg1.apps.qualys.com |
|
|
89
|
+
| US2 | qualysapi.qg2.apps.qualys.com | gateway.qg2.apps.qualys.com |
|
|
90
|
+
| US3 | qualysapi.qg3.apps.qualys.com | gateway.qg3.apps.qualys.com |
|
|
91
|
+
| EU1 | qualysapi.qualys.eu | gateway.qg1.apps.qualys.eu |
|
|
92
|
+
| EU2 | qualysapi.qg2.apps.qualys.eu | gateway.qg2.apps.qualys.eu |
|
|
93
|
+
|
|
94
|
+
## License
|
|
95
|
+
|
|
96
|
+
MIT - Copyright (c) 2025 Andrew Nelson
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
qualys_mcp.py,sha256=6V8ulDBKwLD7elgDT3Ow4GWUv1HEMHUrT-Xeer4yPrY,27677
|
|
2
|
+
qualys_mcp-2.1.0.dist-info/METADATA,sha256=-QohxatoOIOGnUwLISk4GJ7-QGp3xIyCVVUY3gAUwV4,3295
|
|
3
|
+
qualys_mcp-2.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
4
|
+
qualys_mcp-2.1.0.dist-info/entry_points.txt,sha256=Dc8X0AhJDjGaZOJ0SNpWDWjEX4sYzrYa9FZEbggX0Rs,47
|
|
5
|
+
qualys_mcp-2.1.0.dist-info/licenses/LICENSE,sha256=dW3nC4AX_VbxPAgneSDR-miZPiHgAYw5JhPtdbUEt_E,1091
|
|
6
|
+
qualys_mcp-2.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Andrew Nelson <andrew@nelssec.com>
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
qualys_mcp.py
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Qualys MCP Server - Pure Python implementation using FastMCP"""
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import json
|
|
6
|
+
import base64
|
|
7
|
+
from urllib.request import Request, urlopen
|
|
8
|
+
from urllib.parse import urlencode
|
|
9
|
+
import xml.etree.ElementTree as ET
|
|
10
|
+
from datetime import datetime, timedelta
|
|
11
|
+
from fastmcp import FastMCP
|
|
12
|
+
|
|
13
|
+
mcp = FastMCP("qualys-mcp")
|
|
14
|
+
|
|
15
|
+
USERNAME = os.environ.get('QUALYS_USERNAME', '')
|
|
16
|
+
PASSWORD = os.environ.get('QUALYS_PASSWORD', '')
|
|
17
|
+
BASE_URL = os.environ.get('QUALYS_BASE_URL', '').rstrip('/')
|
|
18
|
+
GATEWAY_URL = os.environ.get('QUALYS_GATEWAY_URL', '').rstrip('/')
|
|
19
|
+
BASIC_AUTH = base64.b64encode(f"{USERNAME}:{PASSWORD}".encode()).decode()
|
|
20
|
+
BEARER_TOKEN = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def get_bearer_token():
|
|
24
|
+
global BEARER_TOKEN
|
|
25
|
+
if BEARER_TOKEN:
|
|
26
|
+
return BEARER_TOKEN
|
|
27
|
+
try:
|
|
28
|
+
req = Request(f"{GATEWAY_URL}/auth", method='POST')
|
|
29
|
+
req.add_header('Authorization', f'Basic {BASIC_AUTH}')
|
|
30
|
+
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
|
31
|
+
with urlopen(req, data=b'', timeout=30) as resp:
|
|
32
|
+
BEARER_TOKEN = resp.read().decode().strip()
|
|
33
|
+
return BEARER_TOKEN
|
|
34
|
+
except:
|
|
35
|
+
return None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def api_get(url, gateway=False):
|
|
39
|
+
req = Request(url)
|
|
40
|
+
if gateway:
|
|
41
|
+
token = get_bearer_token()
|
|
42
|
+
req.add_header('Authorization', f'Bearer {token}' if token else f'Basic {BASIC_AUTH}')
|
|
43
|
+
else:
|
|
44
|
+
req.add_header('Authorization', f'Basic {BASIC_AUTH}')
|
|
45
|
+
req.add_header('X-Requested-With', 'qualys-mcp')
|
|
46
|
+
try:
|
|
47
|
+
with urlopen(req, timeout=60) as resp:
|
|
48
|
+
return resp.read()
|
|
49
|
+
except:
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_detections(severity=5, limit=500):
|
|
54
|
+
data = api_get(f"{BASE_URL}/api/2.0/fo/asset/host/vm/detection/?action=list&severities={severity}&truncation_limit={limit}&status=Active")
|
|
55
|
+
if not data:
|
|
56
|
+
return []
|
|
57
|
+
dets = []
|
|
58
|
+
try:
|
|
59
|
+
root = ET.fromstring(data)
|
|
60
|
+
for host in root.findall('.//HOST'):
|
|
61
|
+
hid, ip = host.findtext('ID', ''), host.findtext('IP', '')
|
|
62
|
+
for d in host.findall('.//DETECTION'):
|
|
63
|
+
dets.append({'host_id': hid, 'ip': ip, 'qid': int(d.findtext('QID', '0')),
|
|
64
|
+
'severity': int(d.findtext('SEVERITY', '0')), 'status': d.findtext('STATUS', '')})
|
|
65
|
+
except:
|
|
66
|
+
pass
|
|
67
|
+
return dets
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def get_kb(qid):
|
|
71
|
+
data = api_get(f"{BASE_URL}/api/2.0/fo/knowledge_base/vuln/?action=list&ids={qid}")
|
|
72
|
+
if not data:
|
|
73
|
+
return None
|
|
74
|
+
try:
|
|
75
|
+
root = ET.fromstring(data)
|
|
76
|
+
v = root.find('.//VULN')
|
|
77
|
+
if not v:
|
|
78
|
+
return None
|
|
79
|
+
return {'qid': qid, 'title': v.findtext('TITLE', ''), 'severity': int(v.findtext('SEVERITY_LEVEL', '0')),
|
|
80
|
+
'cves': [c.findtext('ID', '') for c in v.findall('.//CVE_LIST/CVE')],
|
|
81
|
+
'solution': v.findtext('SOLUTION', ''), 'patch_available': v.findtext('PATCHABLE', '0') == '1'}
|
|
82
|
+
except:
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def get_cve_qids(cve):
|
|
87
|
+
data = api_get(f"{BASE_URL}/api/2.0/fo/knowledge_base/vuln/?action=list&details=Basic&cve_id={cve}")
|
|
88
|
+
if not data:
|
|
89
|
+
return []
|
|
90
|
+
try:
|
|
91
|
+
return [int(v.findtext('QID')) for v in ET.fromstring(data).findall('.//VULN') if v.findtext('QID')]
|
|
92
|
+
except:
|
|
93
|
+
return []
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def get_assets(limit=100, qql=None):
|
|
97
|
+
url = f"{GATEWAY_URL}/am/v1/assets?pageSize={limit}"
|
|
98
|
+
if qql:
|
|
99
|
+
from urllib.parse import quote
|
|
100
|
+
url += f"&filter={quote(qql)}"
|
|
101
|
+
data = api_get(url, gateway=True)
|
|
102
|
+
try:
|
|
103
|
+
return json.loads(data).get('assetListData', {}).get('asset', []) if data else []
|
|
104
|
+
except:
|
|
105
|
+
return []
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def get_images(limit=100, severity=None):
|
|
109
|
+
url = f"{GATEWAY_URL}/csapi/v1.3/images?pageSize={limit}"
|
|
110
|
+
if severity:
|
|
111
|
+
url += f"&filter=vulnerabilities.severity:{severity}"
|
|
112
|
+
data = api_get(url, gateway=True)
|
|
113
|
+
try:
|
|
114
|
+
return json.loads(data).get('data', []) if data else []
|
|
115
|
+
except:
|
|
116
|
+
return []
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def get_containers(limit=100):
|
|
120
|
+
data = api_get(f"{GATEWAY_URL}/csapi/v1.3/containers?pageSize={limit}&filter=state:RUNNING", gateway=True)
|
|
121
|
+
try:
|
|
122
|
+
return json.loads(data).get('data', []) if data else []
|
|
123
|
+
except:
|
|
124
|
+
return []
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def get_connectors(provider='aws', limit=50):
|
|
128
|
+
data = api_get(f"{GATEWAY_URL}/cloudview-api/rest/v1/{provider}/connectors?pageSize={limit}", gateway=True)
|
|
129
|
+
try:
|
|
130
|
+
return json.loads(data).get('content', []) if data else []
|
|
131
|
+
except:
|
|
132
|
+
return []
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_evaluations(account_id, provider='aws', limit=500):
|
|
136
|
+
data = api_get(f"{GATEWAY_URL}/cloudview-api/rest/v1/{provider}/evaluations/{account_id}?pageSize={limit}", gateway=True)
|
|
137
|
+
try:
|
|
138
|
+
return json.loads(data).get('content', []) if data else []
|
|
139
|
+
except:
|
|
140
|
+
return []
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def get_cdr(days=7, limit=100):
|
|
144
|
+
end = datetime.utcnow()
|
|
145
|
+
start = end - timedelta(days=days)
|
|
146
|
+
data = api_get(f"{GATEWAY_URL}/cdr-api/rest/v1/findings/?startAt={start.isoformat()}Z&endAt={end.isoformat()}Z&limit={limit}", gateway=True)
|
|
147
|
+
try:
|
|
148
|
+
return json.loads(data).get('content', []) if data else []
|
|
149
|
+
except:
|
|
150
|
+
return []
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def get_image_details(image_id):
|
|
154
|
+
data = api_get(f"{GATEWAY_URL}/csapi/v1.3/images/{image_id}", gateway=True)
|
|
155
|
+
try:
|
|
156
|
+
return json.loads(data) if data else None
|
|
157
|
+
except:
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def get_image_vulns_api(image_id):
|
|
162
|
+
data = api_get(f"{GATEWAY_URL}/csapi/v1.3/images/{image_id}/vuln", gateway=True)
|
|
163
|
+
try:
|
|
164
|
+
return json.loads(data).get('data', []) if data else []
|
|
165
|
+
except:
|
|
166
|
+
return []
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def get_certificates(limit=100, days_expiring=None):
|
|
170
|
+
url = f"{GATEWAY_URL}/certview/v1/certificates?pageSize={limit}"
|
|
171
|
+
if days_expiring:
|
|
172
|
+
future = (datetime.utcnow() + timedelta(days=days_expiring)).strftime('%Y-%m-%d')
|
|
173
|
+
url += f"&filter=validTo:<{future}"
|
|
174
|
+
data = api_get(url, gateway=True)
|
|
175
|
+
try:
|
|
176
|
+
return json.loads(data).get('data', []) if data else []
|
|
177
|
+
except:
|
|
178
|
+
return []
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def get_fim_events(limit=100, days=7):
|
|
182
|
+
end = datetime.utcnow()
|
|
183
|
+
start = end - timedelta(days=days)
|
|
184
|
+
data = api_get(f"{BASE_URL}/fim/v2/events?filter=dateTime:[{start.strftime('%Y-%m-%dT%H:%M:%SZ')}...{end.strftime('%Y-%m-%dT%H:%M:%SZ')}]&pageSize={limit}")
|
|
185
|
+
try:
|
|
186
|
+
return json.loads(data).get('data', []) if data else []
|
|
187
|
+
except:
|
|
188
|
+
return []
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def get_edr_events(limit=100, severity=None):
|
|
192
|
+
url = f"{GATEWAY_URL}/edr/v1/events?pageSize={limit}"
|
|
193
|
+
if severity:
|
|
194
|
+
url += f"&filter=severity:{severity}"
|
|
195
|
+
data = api_get(url, gateway=True)
|
|
196
|
+
try:
|
|
197
|
+
return json.loads(data).get('data', []) if data else []
|
|
198
|
+
except:
|
|
199
|
+
return []
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def get_was_findings(limit=100, severity=None):
|
|
203
|
+
url = f"{BASE_URL}/qps/rest/3.0/search/was/finding"
|
|
204
|
+
criteria = "<ServiceRequest><filters><Criteria field=\"status\" operator=\"EQUALS\">ACTIVE</Criteria>"
|
|
205
|
+
if severity:
|
|
206
|
+
criteria += f"<Criteria field=\"severity\" operator=\"EQUALS\">{severity}</Criteria>"
|
|
207
|
+
criteria += f"</filters><preferences><limitResults>{limit}</limitResults></preferences></ServiceRequest>"
|
|
208
|
+
|
|
209
|
+
from urllib.request import Request
|
|
210
|
+
req = Request(url, data=criteria.encode(), method='POST')
|
|
211
|
+
req.add_header('Authorization', f'Basic {BASIC_AUTH}')
|
|
212
|
+
req.add_header('Content-Type', 'text/xml')
|
|
213
|
+
req.add_header('X-Requested-With', 'qualys-mcp')
|
|
214
|
+
try:
|
|
215
|
+
with urlopen(req, timeout=60) as resp:
|
|
216
|
+
root = ET.fromstring(resp.read())
|
|
217
|
+
findings = []
|
|
218
|
+
for f in root.findall('.//Finding'):
|
|
219
|
+
findings.append({
|
|
220
|
+
'id': f.findtext('id', ''),
|
|
221
|
+
'qid': f.findtext('qid', ''),
|
|
222
|
+
'name': f.findtext('name', ''),
|
|
223
|
+
'severity': int(f.findtext('severity', '0')),
|
|
224
|
+
'url': f.findtext('url', ''),
|
|
225
|
+
'webAppId': f.findtext('webApp/id', ''),
|
|
226
|
+
'webAppName': f.findtext('webApp/name', '')
|
|
227
|
+
})
|
|
228
|
+
return findings
|
|
229
|
+
except:
|
|
230
|
+
return []
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def get_was_webapps(limit=100):
|
|
234
|
+
data = api_get(f"{BASE_URL}/qps/rest/3.0/count/was/webapp")
|
|
235
|
+
webapps = []
|
|
236
|
+
url = f"{BASE_URL}/qps/rest/3.0/search/was/webapp"
|
|
237
|
+
criteria = f"<ServiceRequest><preferences><limitResults>{limit}</limitResults></preferences></ServiceRequest>"
|
|
238
|
+
|
|
239
|
+
from urllib.request import Request
|
|
240
|
+
req = Request(url, data=criteria.encode(), method='POST')
|
|
241
|
+
req.add_header('Authorization', f'Basic {BASIC_AUTH}')
|
|
242
|
+
req.add_header('Content-Type', 'text/xml')
|
|
243
|
+
req.add_header('X-Requested-With', 'qualys-mcp')
|
|
244
|
+
try:
|
|
245
|
+
with urlopen(req, timeout=60) as resp:
|
|
246
|
+
root = ET.fromstring(resp.read())
|
|
247
|
+
for wa in root.findall('.//WebApp'):
|
|
248
|
+
webapps.append({
|
|
249
|
+
'id': wa.findtext('id', ''),
|
|
250
|
+
'name': wa.findtext('name', ''),
|
|
251
|
+
'url': wa.findtext('url', '')
|
|
252
|
+
})
|
|
253
|
+
except:
|
|
254
|
+
pass
|
|
255
|
+
return webapps
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
@mcp.tool()
|
|
259
|
+
def get_weekly_priorities(limit: int = 10) -> dict:
|
|
260
|
+
"""Get prioritized security actions for the week. Returns top critical vulns and container risks ranked by severity and impact."""
|
|
261
|
+
result = {'summary': {'totalCritical': 0, 'assetsAffected': 0, 'containersAtRisk': 0, 'patchable': 0},
|
|
262
|
+
'priorities': [], 'byEffort': {'patch': 0, 'config': 0, 'upgrade': 0}}
|
|
263
|
+
|
|
264
|
+
dets = get_detections(5, 500)
|
|
265
|
+
qids = {}
|
|
266
|
+
hosts = set()
|
|
267
|
+
for d in dets:
|
|
268
|
+
qid = d['qid']
|
|
269
|
+
if qid not in qids:
|
|
270
|
+
qids[qid] = {'count': 0, 'hosts': set(), 'sev': d['severity']}
|
|
271
|
+
qids[qid]['count'] += 1
|
|
272
|
+
qids[qid]['hosts'].add(d['host_id'])
|
|
273
|
+
hosts.add(d['host_id'])
|
|
274
|
+
|
|
275
|
+
for i, (qid, data) in enumerate(sorted(qids.items(), key=lambda x: (x[1]['sev'], len(x[1]['hosts'])), reverse=True)[:limit]):
|
|
276
|
+
kb = get_kb(qid)
|
|
277
|
+
patch = kb.get('patch_available', False) if kb else False
|
|
278
|
+
result['byEffort']['patch' if patch else 'config'] += 1
|
|
279
|
+
if patch:
|
|
280
|
+
result['summary']['patchable'] += 1
|
|
281
|
+
result['priorities'].append({
|
|
282
|
+
'rank': i + 1, 'qid': qid, 'title': kb['title'] if kb else f"QID {qid}",
|
|
283
|
+
'cves': kb.get('cves', [])[:3] if kb else [], 'hosts': len(data['hosts']),
|
|
284
|
+
'effort': 'patch' if patch else 'config', 'fix': (kb.get('solution', '') if kb else '')[:100]
|
|
285
|
+
})
|
|
286
|
+
|
|
287
|
+
vuln_imgs = {img.get('imageId') for img in get_images(100, 5)}
|
|
288
|
+
at_risk = [c for c in get_containers(500) if c.get('imageId') in vuln_imgs]
|
|
289
|
+
if at_risk:
|
|
290
|
+
result['priorities'].append({'rank': len(result['priorities']) + 1, 'title': 'Vulnerable containers',
|
|
291
|
+
'containers': len(at_risk), 'effort': 'upgrade'})
|
|
292
|
+
result['byEffort']['upgrade'] = len(at_risk)
|
|
293
|
+
result['summary']['containersAtRisk'] = len(at_risk)
|
|
294
|
+
|
|
295
|
+
result['summary']['totalCritical'] = len(qids)
|
|
296
|
+
result['summary']['assetsAffected'] = len(hosts)
|
|
297
|
+
return result
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
@mcp.tool()
|
|
301
|
+
def investigate_cve(cve: str) -> dict:
|
|
302
|
+
"""Investigate if your environment is affected by a specific CVE. Returns affected hosts, images, and remediation."""
|
|
303
|
+
result = {'cve': cve, 'qids': [], 'affectedHosts': [], 'affectedImages': [], 'patchAvailable': False, 'fix': ''}
|
|
304
|
+
|
|
305
|
+
qids = get_cve_qids(cve)
|
|
306
|
+
result['qids'] = qids
|
|
307
|
+
|
|
308
|
+
if qids:
|
|
309
|
+
kb = get_kb(qids[0])
|
|
310
|
+
if kb:
|
|
311
|
+
result['patchAvailable'] = kb.get('patch_available', False)
|
|
312
|
+
result['fix'] = kb.get('solution', '')[:500]
|
|
313
|
+
|
|
314
|
+
for qid in qids[:2]:
|
|
315
|
+
for d in get_detections(1, 300):
|
|
316
|
+
if d['qid'] == qid:
|
|
317
|
+
result['affectedHosts'].append({'id': d['host_id'], 'ip': d['ip']})
|
|
318
|
+
|
|
319
|
+
for img in get_images(200):
|
|
320
|
+
if any(cve in str(v) for v in img.get('vulnerabilities', [])):
|
|
321
|
+
result['affectedImages'].append({'id': img.get('imageId'), 'repo': img.get('repo')})
|
|
322
|
+
|
|
323
|
+
result['totalHosts'] = len(result['affectedHosts'])
|
|
324
|
+
result['totalImages'] = len(result['affectedImages'])
|
|
325
|
+
return result
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
@mcp.tool()
|
|
329
|
+
def get_security_posture() -> dict:
|
|
330
|
+
"""Get overall security health score and stats across assets, vulns, containers, and cloud."""
|
|
331
|
+
health = 100
|
|
332
|
+
result = {'healthScore': 0, 'assets': {'total': 0, 'highRisk': 0},
|
|
333
|
+
'vulns': {'critical': 0, 'high': 0}, 'containers': {'total': 0, 'atRisk': 0},
|
|
334
|
+
'cloud': {'accounts': 0, 'failedControls': 0}}
|
|
335
|
+
|
|
336
|
+
assets = get_assets(500)
|
|
337
|
+
result['assets']['total'] = len(assets)
|
|
338
|
+
result['assets']['highRisk'] = len([a for a in assets if a.get('assetRiskScore', 0) >= 700])
|
|
339
|
+
if assets:
|
|
340
|
+
health -= int(result['assets']['highRisk'] / len(assets) * 50)
|
|
341
|
+
|
|
342
|
+
result['vulns']['critical'] = len(get_detections(5, 200))
|
|
343
|
+
result['vulns']['high'] = len(get_detections(4, 200))
|
|
344
|
+
if result['vulns']['critical'] > 50:
|
|
345
|
+
health -= 20
|
|
346
|
+
elif result['vulns']['critical'] > 10:
|
|
347
|
+
health -= 10
|
|
348
|
+
|
|
349
|
+
imgs = get_images(500)
|
|
350
|
+
result['containers']['total'] = len(imgs)
|
|
351
|
+
vuln_ids = {i.get('imageId') for i in get_images(100, 5)}
|
|
352
|
+
result['containers']['atRisk'] = len([c for c in get_containers(500) if c.get('imageId') in vuln_ids])
|
|
353
|
+
|
|
354
|
+
for p in ['aws', 'azure', 'gcp']:
|
|
355
|
+
conns = get_connectors(p, 20)
|
|
356
|
+
result['cloud']['accounts'] += len(conns)
|
|
357
|
+
if conns:
|
|
358
|
+
acc = conns[0].get('awsAccountId') or conns[0].get('azureSubscriptionId') or conns[0].get('gcpProjectId')
|
|
359
|
+
if acc:
|
|
360
|
+
result['cloud']['failedControls'] += len([e for e in get_evaluations(acc, p, 500) if e.get('result') in ['FAIL', 'FAILED']])
|
|
361
|
+
|
|
362
|
+
result['healthScore'] = max(0, health)
|
|
363
|
+
return result
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
@mcp.tool()
|
|
367
|
+
def get_patch_status(limit: int = 20) -> dict:
|
|
368
|
+
"""Get patching coverage - how many assets need patches and which patches are most common."""
|
|
369
|
+
result = {'coverage': 0, 'assetsTotal': 0, 'assetsNeedPatches': 0, 'topMissing': []}
|
|
370
|
+
|
|
371
|
+
assets = get_assets(500)
|
|
372
|
+
result['assetsTotal'] = len(assets)
|
|
373
|
+
|
|
374
|
+
dets = get_detections(5, 500)
|
|
375
|
+
qids = {}
|
|
376
|
+
for d in dets:
|
|
377
|
+
kb = get_kb(d['qid'])
|
|
378
|
+
if kb and kb.get('patch_available'):
|
|
379
|
+
qids[d['qid']] = qids.get(d['qid'], 0) + 1
|
|
380
|
+
|
|
381
|
+
result['topMissing'] = [{'qid': q, 'count': c, 'title': (get_kb(q) or {}).get('title', '')}
|
|
382
|
+
for q, c in sorted(qids.items(), key=lambda x: x[1], reverse=True)[:limit]]
|
|
383
|
+
|
|
384
|
+
hosts_need = set(d['host_id'] for d in dets if d['qid'] in qids)
|
|
385
|
+
result['assetsNeedPatches'] = len(hosts_need)
|
|
386
|
+
if result['assetsTotal']:
|
|
387
|
+
result['coverage'] = round((result['assetsTotal'] - len(hosts_need)) / result['assetsTotal'] * 100, 1)
|
|
388
|
+
|
|
389
|
+
return result
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
@mcp.tool()
|
|
393
|
+
def get_compliance_gaps(limit: int = 20) -> dict:
|
|
394
|
+
"""Get top failing compliance controls that could fail audits."""
|
|
395
|
+
result = {'passRate': 0, 'failingControls': 0, 'topFailing': []}
|
|
396
|
+
|
|
397
|
+
fails = {}
|
|
398
|
+
passes = 0
|
|
399
|
+
for p in ['aws', 'azure', 'gcp']:
|
|
400
|
+
conns = get_connectors(p, 10)
|
|
401
|
+
if conns:
|
|
402
|
+
acc = conns[0].get('awsAccountId') or conns[0].get('azureSubscriptionId') or conns[0].get('gcpProjectId')
|
|
403
|
+
if acc:
|
|
404
|
+
for e in get_evaluations(acc, p, 500):
|
|
405
|
+
if e.get('result') in ['FAIL', 'FAILED']:
|
|
406
|
+
cid = e.get('controlId', '')
|
|
407
|
+
fails[cid] = fails.get(cid, 0) + 1
|
|
408
|
+
elif e.get('result') in ['PASS', 'PASSED']:
|
|
409
|
+
passes += 1
|
|
410
|
+
|
|
411
|
+
result['failingControls'] = len(fails)
|
|
412
|
+
result['topFailing'] = [{'controlId': c, 'failCount': n} for c, n in sorted(fails.items(), key=lambda x: x[1], reverse=True)[:limit]]
|
|
413
|
+
|
|
414
|
+
total = sum(fails.values()) + passes
|
|
415
|
+
result['passRate'] = round(passes / total * 100, 1) if total else 0
|
|
416
|
+
return result
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
@mcp.tool()
|
|
420
|
+
def get_cloud_risk(limit: int = 20) -> dict:
|
|
421
|
+
"""Get cloud security posture across AWS, Azure, GCP - accounts, failed controls, and threats."""
|
|
422
|
+
result = {'accounts': [], 'failedControls': [], 'threats': [], 'stats': {'total': 0, 'critical': 0}}
|
|
423
|
+
|
|
424
|
+
for p in ['aws', 'azure', 'gcp']:
|
|
425
|
+
for c in get_connectors(p, 50):
|
|
426
|
+
acc = c.get('awsAccountId') or c.get('azureSubscriptionId') or c.get('gcpProjectId', '')
|
|
427
|
+
result['accounts'].append({'id': acc, 'provider': p.upper(), 'name': c.get('name', '')})
|
|
428
|
+
|
|
429
|
+
result['stats']['total'] = len(result['accounts'])
|
|
430
|
+
|
|
431
|
+
if result['accounts']:
|
|
432
|
+
acc = result['accounts'][0]
|
|
433
|
+
fails = {}
|
|
434
|
+
for e in get_evaluations(acc['id'], acc['provider'].lower(), 500):
|
|
435
|
+
if e.get('result') in ['FAIL', 'FAILED']:
|
|
436
|
+
cid = e.get('controlId', '')
|
|
437
|
+
fails[cid] = fails.get(cid, 0) + 1
|
|
438
|
+
result['failedControls'] = [{'id': c, 'count': n} for c, n in sorted(fails.items(), key=lambda x: x[1], reverse=True)[:limit]]
|
|
439
|
+
|
|
440
|
+
for f in get_cdr(7, limit):
|
|
441
|
+
sev = str(f.get('severity', ''))
|
|
442
|
+
if sev in ['CRITICAL', '5']:
|
|
443
|
+
result['stats']['critical'] += 1
|
|
444
|
+
result['threats'].append({'severity': sev, 'category': f.get('category', ''), 'resource': f.get('resourceId', '')})
|
|
445
|
+
|
|
446
|
+
return result
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
@mcp.tool()
|
|
450
|
+
def get_asset_risk(asset_id: str) -> dict:
|
|
451
|
+
"""Get risk summary for a specific asset - risk score, top vulnerabilities, and remediation."""
|
|
452
|
+
result = {'assetId': asset_id, 'riskScore': 0, 'vulns': []}
|
|
453
|
+
|
|
454
|
+
for a in get_assets(500):
|
|
455
|
+
if str(a.get('assetId')) == str(asset_id):
|
|
456
|
+
result['ip'] = a.get('address', '')
|
|
457
|
+
result['hostname'] = a.get('dnsName', '')
|
|
458
|
+
result['riskScore'] = int(a.get('assetRiskScore', 0))
|
|
459
|
+
break
|
|
460
|
+
|
|
461
|
+
for d in get_detections(4, 500):
|
|
462
|
+
if d['host_id'] == asset_id and len(result['vulns']) < 10:
|
|
463
|
+
kb = get_kb(d['qid'])
|
|
464
|
+
result['vulns'].append({'qid': d['qid'], 'title': kb['title'] if kb else '', 'severity': d['severity']})
|
|
465
|
+
|
|
466
|
+
return result
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
@mcp.tool()
|
|
470
|
+
def get_tech_debt(days_until_eol: int = 0) -> dict:
|
|
471
|
+
"""Get EOL/EOS software across your environment. Use days_until_eol to find software approaching end-of-life (e.g., 90 for next 90 days). Returns current EOL/EOS plus upcoming."""
|
|
472
|
+
result = {
|
|
473
|
+
'stats': {'total': 0, 'currentEOL': 0, 'currentEOS': 0, 'approachingEOL': 0},
|
|
474
|
+
'currentEOL': [],
|
|
475
|
+
'currentEOS': [],
|
|
476
|
+
'approachingEOL': [],
|
|
477
|
+
'byOS': []
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
assets = get_assets(500)
|
|
481
|
+
result['stats']['total'] = len(assets)
|
|
482
|
+
|
|
483
|
+
today = datetime.utcnow().date()
|
|
484
|
+
cutoff = today + timedelta(days=days_until_eol) if days_until_eol > 0 else None
|
|
485
|
+
|
|
486
|
+
os_data = {}
|
|
487
|
+
|
|
488
|
+
for a in assets:
|
|
489
|
+
os_info = a.get('operatingSystem', {})
|
|
490
|
+
if not isinstance(os_info, dict):
|
|
491
|
+
continue
|
|
492
|
+
|
|
493
|
+
os_name = os_info.get('osName', 'Unknown')
|
|
494
|
+
lc = os_info.get('lifecycle', {})
|
|
495
|
+
if not isinstance(lc, dict):
|
|
496
|
+
continue
|
|
497
|
+
|
|
498
|
+
stage = lc.get('stage', '')
|
|
499
|
+
eol_date = lc.get('eolDate', '')
|
|
500
|
+
eos_date = lc.get('eosDate', '')
|
|
501
|
+
|
|
502
|
+
asset_info = {
|
|
503
|
+
'assetId': a.get('assetId'),
|
|
504
|
+
'ip': a.get('address', ''),
|
|
505
|
+
'hostname': a.get('dnsName', ''),
|
|
506
|
+
'os': os_name,
|
|
507
|
+
'eolDate': eol_date,
|
|
508
|
+
'eosDate': eos_date
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
if os_name not in os_data:
|
|
512
|
+
os_data[os_name] = {'eol': 0, 'eos': 0, 'approaching': 0, 'eolDate': eol_date, 'eosDate': eos_date}
|
|
513
|
+
|
|
514
|
+
if stage == 'EOL':
|
|
515
|
+
result['stats']['currentEOL'] += 1
|
|
516
|
+
os_data[os_name]['eol'] += 1
|
|
517
|
+
if len(result['currentEOL']) < 20:
|
|
518
|
+
result['currentEOL'].append(asset_info)
|
|
519
|
+
elif stage == 'EOS':
|
|
520
|
+
result['stats']['currentEOS'] += 1
|
|
521
|
+
os_data[os_name]['eos'] += 1
|
|
522
|
+
if len(result['currentEOS']) < 20:
|
|
523
|
+
result['currentEOS'].append(asset_info)
|
|
524
|
+
elif cutoff and eol_date:
|
|
525
|
+
try:
|
|
526
|
+
eol = datetime.strptime(eol_date[:10], '%Y-%m-%d').date()
|
|
527
|
+
if today < eol <= cutoff:
|
|
528
|
+
result['stats']['approachingEOL'] += 1
|
|
529
|
+
os_data[os_name]['approaching'] += 1
|
|
530
|
+
days_left = (eol - today).days
|
|
531
|
+
asset_info['daysUntilEOL'] = days_left
|
|
532
|
+
if len(result['approachingEOL']) < 20:
|
|
533
|
+
result['approachingEOL'].append(asset_info)
|
|
534
|
+
except:
|
|
535
|
+
pass
|
|
536
|
+
|
|
537
|
+
result['byOS'] = [
|
|
538
|
+
{'os': k, 'eolCount': v['eol'], 'eosCount': v['eos'], 'approachingCount': v['approaching'],
|
|
539
|
+
'eolDate': v['eolDate'], 'eosDate': v['eosDate']}
|
|
540
|
+
for k, v in sorted(os_data.items(), key=lambda x: x[1]['eol'] + x[1]['eos'] + x[1]['approaching'], reverse=True)[:15]
|
|
541
|
+
if v['eol'] + v['eos'] + v['approaching'] > 0
|
|
542
|
+
]
|
|
543
|
+
|
|
544
|
+
return result
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
@mcp.tool()
|
|
548
|
+
def get_image_vulns(image_id: str, limit: int = 50) -> dict:
|
|
549
|
+
"""Get vulnerabilities for a specific container image. Returns severity breakdown and top vulns."""
|
|
550
|
+
result = {
|
|
551
|
+
'imageId': image_id,
|
|
552
|
+
'repo': '',
|
|
553
|
+
'tag': '',
|
|
554
|
+
'stats': {'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'total': 0},
|
|
555
|
+
'vulns': []
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
img = get_image_details(image_id)
|
|
559
|
+
if img:
|
|
560
|
+
result['repo'] = img.get('repo', '')
|
|
561
|
+
result['tag'] = img.get('tag', '')
|
|
562
|
+
result['created'] = img.get('created', '')
|
|
563
|
+
|
|
564
|
+
vulns = get_image_vulns_api(image_id)
|
|
565
|
+
for v in vulns[:limit]:
|
|
566
|
+
sev = v.get('severity', 0)
|
|
567
|
+
if sev == 5:
|
|
568
|
+
result['stats']['critical'] += 1
|
|
569
|
+
elif sev == 4:
|
|
570
|
+
result['stats']['high'] += 1
|
|
571
|
+
elif sev == 3:
|
|
572
|
+
result['stats']['medium'] += 1
|
|
573
|
+
else:
|
|
574
|
+
result['stats']['low'] += 1
|
|
575
|
+
|
|
576
|
+
result['vulns'].append({
|
|
577
|
+
'qid': v.get('qid'),
|
|
578
|
+
'cve': v.get('cveId', ''),
|
|
579
|
+
'severity': sev,
|
|
580
|
+
'title': v.get('title', ''),
|
|
581
|
+
'fixVersion': v.get('fixedVersion', '')
|
|
582
|
+
})
|
|
583
|
+
|
|
584
|
+
result['stats']['total'] = len(vulns)
|
|
585
|
+
result['vulns'] = sorted(result['vulns'], key=lambda x: x['severity'], reverse=True)[:limit]
|
|
586
|
+
return result
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
@mcp.tool()
|
|
590
|
+
def get_expiring_certs(days: int = 30, limit: int = 50) -> dict:
|
|
591
|
+
"""Get SSL/TLS certificates expiring within specified days. Default 30 days."""
|
|
592
|
+
result = {
|
|
593
|
+
'days': days,
|
|
594
|
+
'stats': {'expiring': 0, 'expired': 0, 'valid': 0},
|
|
595
|
+
'expiring': [],
|
|
596
|
+
'expired': []
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
today = datetime.utcnow()
|
|
600
|
+
cutoff = today + timedelta(days=days)
|
|
601
|
+
|
|
602
|
+
certs = get_certificates(limit * 2, days)
|
|
603
|
+
for c in certs:
|
|
604
|
+
cert_info = {
|
|
605
|
+
'id': c.get('id', ''),
|
|
606
|
+
'subject': c.get('subject', {}).get('commonName', ''),
|
|
607
|
+
'issuer': c.get('issuer', {}).get('commonName', ''),
|
|
608
|
+
'validTo': c.get('validTo', ''),
|
|
609
|
+
'hosts': [h.get('hostname', '') for h in c.get('hosts', [])[:5]]
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
valid_to = c.get('validTo', '')
|
|
613
|
+
if valid_to:
|
|
614
|
+
try:
|
|
615
|
+
exp_date = datetime.strptime(valid_to[:10], '%Y-%m-%d')
|
|
616
|
+
days_left = (exp_date - today).days
|
|
617
|
+
cert_info['daysUntilExpiry'] = days_left
|
|
618
|
+
|
|
619
|
+
if days_left < 0:
|
|
620
|
+
result['stats']['expired'] += 1
|
|
621
|
+
if len(result['expired']) < limit:
|
|
622
|
+
result['expired'].append(cert_info)
|
|
623
|
+
elif days_left <= days:
|
|
624
|
+
result['stats']['expiring'] += 1
|
|
625
|
+
if len(result['expiring']) < limit:
|
|
626
|
+
result['expiring'].append(cert_info)
|
|
627
|
+
else:
|
|
628
|
+
result['stats']['valid'] += 1
|
|
629
|
+
except:
|
|
630
|
+
pass
|
|
631
|
+
|
|
632
|
+
result['expiring'] = sorted(result['expiring'], key=lambda x: x.get('daysUntilExpiry', 999))
|
|
633
|
+
result['expired'] = sorted(result['expired'], key=lambda x: x.get('daysUntilExpiry', 0))
|
|
634
|
+
return result
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
@mcp.tool()
|
|
638
|
+
def get_threats(days: int = 7, limit: int = 50) -> dict:
|
|
639
|
+
"""Get combined threat view from FIM (file integrity), EDR (endpoint), and CDR (cloud detection). Returns recent security events."""
|
|
640
|
+
result = {
|
|
641
|
+
'days': days,
|
|
642
|
+
'stats': {'fim': 0, 'edr': 0, 'cdr': 0, 'critical': 0, 'high': 0},
|
|
643
|
+
'fim': [],
|
|
644
|
+
'edr': [],
|
|
645
|
+
'cdr': []
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
fim_events = get_fim_events(limit, days)
|
|
649
|
+
for e in fim_events:
|
|
650
|
+
sev = e.get('severity', '')
|
|
651
|
+
if sev in ['CRITICAL', '5']:
|
|
652
|
+
result['stats']['critical'] += 1
|
|
653
|
+
elif sev in ['HIGH', '4']:
|
|
654
|
+
result['stats']['high'] += 1
|
|
655
|
+
result['fim'].append({
|
|
656
|
+
'action': e.get('action', ''),
|
|
657
|
+
'path': e.get('filePath', ''),
|
|
658
|
+
'hostname': e.get('hostname', ''),
|
|
659
|
+
'dateTime': e.get('dateTime', ''),
|
|
660
|
+
'severity': sev
|
|
661
|
+
})
|
|
662
|
+
result['stats']['fim'] = len(fim_events)
|
|
663
|
+
|
|
664
|
+
edr_events = get_edr_events(limit, 'Critical')
|
|
665
|
+
edr_events += get_edr_events(limit, 'High')
|
|
666
|
+
for e in edr_events[:limit]:
|
|
667
|
+
sev = e.get('severity', '')
|
|
668
|
+
if sev == 'Critical':
|
|
669
|
+
result['stats']['critical'] += 1
|
|
670
|
+
elif sev == 'High':
|
|
671
|
+
result['stats']['high'] += 1
|
|
672
|
+
result['edr'].append({
|
|
673
|
+
'type': e.get('eventType', ''),
|
|
674
|
+
'process': e.get('processName', ''),
|
|
675
|
+
'hostname': e.get('hostname', ''),
|
|
676
|
+
'dateTime': e.get('dateTime', ''),
|
|
677
|
+
'severity': sev
|
|
678
|
+
})
|
|
679
|
+
result['stats']['edr'] = len(edr_events)
|
|
680
|
+
|
|
681
|
+
cdr_findings = get_cdr(days, limit)
|
|
682
|
+
for f in cdr_findings:
|
|
683
|
+
sev = str(f.get('severity', ''))
|
|
684
|
+
if sev in ['CRITICAL', '5']:
|
|
685
|
+
result['stats']['critical'] += 1
|
|
686
|
+
elif sev in ['HIGH', '4']:
|
|
687
|
+
result['stats']['high'] += 1
|
|
688
|
+
result['cdr'].append({
|
|
689
|
+
'category': f.get('category', ''),
|
|
690
|
+
'resource': f.get('resourceId', ''),
|
|
691
|
+
'provider': f.get('cloudProvider', ''),
|
|
692
|
+
'dateTime': f.get('createdAt', ''),
|
|
693
|
+
'severity': sev
|
|
694
|
+
})
|
|
695
|
+
result['stats']['cdr'] = len(cdr_findings)
|
|
696
|
+
|
|
697
|
+
return result
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
@mcp.tool()
|
|
701
|
+
def get_webapp_vulns(severity: int = 4, limit: int = 50) -> dict:
|
|
702
|
+
"""Get web application vulnerabilities from WAS scans. Default severity 4+ (high/critical)."""
|
|
703
|
+
result = {
|
|
704
|
+
'minSeverity': severity,
|
|
705
|
+
'stats': {'critical': 0, 'high': 0, 'medium': 0, 'total': 0, 'webApps': 0},
|
|
706
|
+
'vulns': [],
|
|
707
|
+
'byWebApp': []
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
findings = get_was_findings(limit * 2, severity)
|
|
711
|
+
webapp_vulns = {}
|
|
712
|
+
|
|
713
|
+
for f in findings:
|
|
714
|
+
sev = f.get('severity', 0)
|
|
715
|
+
if sev >= 5:
|
|
716
|
+
result['stats']['critical'] += 1
|
|
717
|
+
elif sev >= 4:
|
|
718
|
+
result['stats']['high'] += 1
|
|
719
|
+
elif sev >= 3:
|
|
720
|
+
result['stats']['medium'] += 1
|
|
721
|
+
|
|
722
|
+
webapp_id = f.get('webAppId', '')
|
|
723
|
+
webapp_name = f.get('webAppName', '')
|
|
724
|
+
if webapp_id:
|
|
725
|
+
if webapp_id not in webapp_vulns:
|
|
726
|
+
webapp_vulns[webapp_id] = {'id': webapp_id, 'name': webapp_name, 'critical': 0, 'high': 0, 'total': 0}
|
|
727
|
+
webapp_vulns[webapp_id]['total'] += 1
|
|
728
|
+
if sev >= 5:
|
|
729
|
+
webapp_vulns[webapp_id]['critical'] += 1
|
|
730
|
+
elif sev >= 4:
|
|
731
|
+
webapp_vulns[webapp_id]['high'] += 1
|
|
732
|
+
|
|
733
|
+
result['vulns'].append({
|
|
734
|
+
'qid': f.get('qid'),
|
|
735
|
+
'name': f.get('name', ''),
|
|
736
|
+
'severity': sev,
|
|
737
|
+
'url': f.get('url', ''),
|
|
738
|
+
'webApp': webapp_name
|
|
739
|
+
})
|
|
740
|
+
|
|
741
|
+
result['stats']['total'] = len(findings)
|
|
742
|
+
result['stats']['webApps'] = len(webapp_vulns)
|
|
743
|
+
result['vulns'] = sorted(result['vulns'], key=lambda x: x['severity'], reverse=True)[:limit]
|
|
744
|
+
result['byWebApp'] = sorted(webapp_vulns.values(), key=lambda x: (x['critical'], x['high'], x['total']), reverse=True)[:20]
|
|
745
|
+
return result
|
|
746
|
+
|
|
747
|
+
|
|
748
|
+
def main():
|
|
749
|
+
mcp.run()
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
if __name__ == "__main__":
|
|
753
|
+
main()
|