kailash 0.2.2__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/nodes/api/__init__.py +5 -0
- kailash/nodes/api/monitoring.py +463 -0
- kailash/nodes/api/security.py +822 -0
- kailash/nodes/base.py +3 -3
- kailash/nodes/data/__init__.py +6 -0
- kailash/nodes/data/event_generation.py +297 -0
- kailash/nodes/data/file_discovery.py +601 -0
- kailash/nodes/transform/processors.py +1 -1
- kailash/runtime/async_local.py +1 -1
- kailash/runtime/docker.py +4 -4
- kailash/runtime/local.py +39 -15
- kailash/runtime/parallel.py +2 -2
- kailash/runtime/parallel_cyclic.py +2 -2
- kailash/runtime/testing.py +2 -2
- kailash/utils/templates.py +6 -6
- kailash/visualization/performance.py +16 -3
- kailash/visualization/reports.py +5 -1
- kailash/workflow/cycle_analyzer.py +8 -1
- kailash/workflow/cyclic_runner.py +1 -1
- kailash/workflow/graph.py +18 -6
- kailash/workflow/visualization.py +10 -2
- kailash-0.3.0.dist-info/METADATA +428 -0
- {kailash-0.2.2.dist-info → kailash-0.3.0.dist-info}/RECORD +28 -24
- kailash-0.2.2.dist-info/METADATA +0 -121
- {kailash-0.2.2.dist-info → kailash-0.3.0.dist-info}/WHEEL +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,822 @@
|
|
1
|
+
"""Security scanning and audit nodes for security assessment."""
|
2
|
+
|
3
|
+
import socket
|
4
|
+
import time
|
5
|
+
from datetime import datetime, timezone
|
6
|
+
from typing import Any, Dict, List
|
7
|
+
from urllib.parse import urlparse
|
8
|
+
|
9
|
+
import requests
|
10
|
+
|
11
|
+
from kailash.nodes.base import Node, NodeParameter, register_node
|
12
|
+
|
13
|
+
|
14
|
+
@register_node()
|
15
|
+
class SecurityScannerNode(Node):
|
16
|
+
"""
|
17
|
+
Performs security scans and vulnerability assessments.
|
18
|
+
|
19
|
+
This node provides comprehensive security scanning capabilities for web
|
20
|
+
applications, networks, and systems. It replaces DataTransformer with
|
21
|
+
embedded Python code for security audit tasks, offering standardized
|
22
|
+
security assessment patterns.
|
23
|
+
|
24
|
+
Design Philosophy:
|
25
|
+
Security assessment requires consistent, thorough scanning with proper
|
26
|
+
reporting. This node eliminates the need for custom security scanning
|
27
|
+
code in DataTransformer nodes by providing dedicated, configurable
|
28
|
+
security assessment capabilities with industry-standard checks.
|
29
|
+
|
30
|
+
Upstream Dependencies:
|
31
|
+
- Target discovery nodes
|
32
|
+
- Configuration nodes with scan parameters
|
33
|
+
- Authentication credential nodes
|
34
|
+
- Scope definition nodes
|
35
|
+
|
36
|
+
Downstream Consumers:
|
37
|
+
- Vulnerability reporting nodes
|
38
|
+
- Risk assessment nodes
|
39
|
+
- Compliance checking nodes
|
40
|
+
- Alert generation nodes
|
41
|
+
- Remediation workflow nodes
|
42
|
+
|
43
|
+
Configuration:
|
44
|
+
- Scan types and targets
|
45
|
+
- Vulnerability databases
|
46
|
+
- Scan intensity and depth
|
47
|
+
- Authentication parameters
|
48
|
+
- Exclusion patterns
|
49
|
+
|
50
|
+
Implementation Details:
|
51
|
+
- Multiple scan types (port, web, SSL, etc.)
|
52
|
+
- Vulnerability classification
|
53
|
+
- Risk scoring and prioritization
|
54
|
+
- Compliance framework mapping
|
55
|
+
- Detailed finding documentation
|
56
|
+
|
57
|
+
Error Handling:
|
58
|
+
- Network timeout management
|
59
|
+
- Permission error handling
|
60
|
+
- Invalid target handling
|
61
|
+
- Partial scan completion
|
62
|
+
|
63
|
+
Side Effects:
|
64
|
+
- Network requests to target systems
|
65
|
+
- File system access for local scans
|
66
|
+
- Process execution for external tools
|
67
|
+
- Logging of scan activities
|
68
|
+
|
69
|
+
Examples:
|
70
|
+
>>> # Web application security scan
|
71
|
+
>>> scanner = SecurityScannerNode(
|
72
|
+
... scan_types=['web_security', 'ssl_check'],
|
73
|
+
... targets=['https://example.com', 'https://app.example.com'],
|
74
|
+
... scan_depth='basic'
|
75
|
+
... )
|
76
|
+
>>> result = scanner.execute()
|
77
|
+
>>> assert 'security_findings' in result
|
78
|
+
>>> assert result['scan_summary']['total_targets'] == 2
|
79
|
+
>>>
|
80
|
+
>>> # Network security scan
|
81
|
+
>>> scanner = SecurityScannerNode(
|
82
|
+
... scan_types=['port_scan', 'service_detection'],
|
83
|
+
... targets=['192.168.1.0/24'],
|
84
|
+
... ports='1-1024'
|
85
|
+
... )
|
86
|
+
>>> result = scanner.execute()
|
87
|
+
>>> assert 'security_findings' in result
|
88
|
+
"""
|
89
|
+
|
90
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
91
|
+
return {
|
92
|
+
"scan_types": NodeParameter(
|
93
|
+
name="scan_types",
|
94
|
+
type=list,
|
95
|
+
required=True,
|
96
|
+
description="Types of security scans to perform",
|
97
|
+
),
|
98
|
+
"targets": NodeParameter(
|
99
|
+
name="targets",
|
100
|
+
type=list,
|
101
|
+
required=True,
|
102
|
+
description="List of targets to scan (URLs, IPs, domains)",
|
103
|
+
),
|
104
|
+
"scan_depth": NodeParameter(
|
105
|
+
name="scan_depth",
|
106
|
+
type=str,
|
107
|
+
required=False,
|
108
|
+
default="basic",
|
109
|
+
description="Scan depth: basic, standard, or comprehensive",
|
110
|
+
),
|
111
|
+
"ports": NodeParameter(
|
112
|
+
name="ports",
|
113
|
+
type=str,
|
114
|
+
required=False,
|
115
|
+
default="common",
|
116
|
+
description="Port range to scan (e.g., '1-1024', 'common', 'all')",
|
117
|
+
),
|
118
|
+
"timeout": NodeParameter(
|
119
|
+
name="timeout",
|
120
|
+
type=int,
|
121
|
+
required=False,
|
122
|
+
default=60,
|
123
|
+
description="Timeout in seconds for each scan",
|
124
|
+
),
|
125
|
+
"include_compliance": NodeParameter(
|
126
|
+
name="include_compliance",
|
127
|
+
type=bool,
|
128
|
+
required=False,
|
129
|
+
default=True,
|
130
|
+
description="Include compliance framework mapping",
|
131
|
+
),
|
132
|
+
"risk_scoring": NodeParameter(
|
133
|
+
name="risk_scoring",
|
134
|
+
type=bool,
|
135
|
+
required=False,
|
136
|
+
default=True,
|
137
|
+
description="Calculate risk scores for findings",
|
138
|
+
),
|
139
|
+
}
|
140
|
+
|
141
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
142
|
+
scan_types = kwargs["scan_types"]
|
143
|
+
targets = kwargs["targets"]
|
144
|
+
scan_depth = kwargs.get("scan_depth", "basic")
|
145
|
+
ports = kwargs.get("ports", "common")
|
146
|
+
timeout = kwargs.get("timeout", 60)
|
147
|
+
include_compliance = kwargs.get("include_compliance", True)
|
148
|
+
risk_scoring = kwargs.get("risk_scoring", True)
|
149
|
+
|
150
|
+
start_time = time.time()
|
151
|
+
all_findings = []
|
152
|
+
scan_results = {}
|
153
|
+
|
154
|
+
for target in targets:
|
155
|
+
target_findings = []
|
156
|
+
|
157
|
+
for scan_type in scan_types:
|
158
|
+
try:
|
159
|
+
findings = self._perform_scan(
|
160
|
+
scan_type, target, scan_depth, ports, timeout
|
161
|
+
)
|
162
|
+
target_findings.extend(findings)
|
163
|
+
except Exception as e:
|
164
|
+
# Log scan error but continue with other scans
|
165
|
+
error_finding = {
|
166
|
+
"type": "scan_error",
|
167
|
+
"target": target,
|
168
|
+
"scan_type": scan_type,
|
169
|
+
"severity": "info",
|
170
|
+
"title": f"Scan Error: {scan_type}",
|
171
|
+
"description": f"Failed to complete {scan_type} scan: {str(e)}",
|
172
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
173
|
+
}
|
174
|
+
target_findings.append(error_finding)
|
175
|
+
|
176
|
+
# Post-process findings
|
177
|
+
if risk_scoring:
|
178
|
+
target_findings = self._calculate_risk_scores(target_findings)
|
179
|
+
|
180
|
+
if include_compliance:
|
181
|
+
target_findings = self._add_compliance_mapping(target_findings)
|
182
|
+
|
183
|
+
all_findings.extend(target_findings)
|
184
|
+
scan_results[target] = target_findings
|
185
|
+
|
186
|
+
execution_time = time.time() - start_time
|
187
|
+
|
188
|
+
# Generate summary
|
189
|
+
scan_summary = self._generate_scan_summary(
|
190
|
+
all_findings, targets, scan_types, execution_time
|
191
|
+
)
|
192
|
+
|
193
|
+
return {
|
194
|
+
"security_findings": all_findings,
|
195
|
+
"scan_results": scan_results,
|
196
|
+
"scan_summary": scan_summary,
|
197
|
+
"total_findings": len(all_findings),
|
198
|
+
"high_risk_findings": len(
|
199
|
+
[f for f in all_findings if f.get("risk_score", 0) >= 8]
|
200
|
+
),
|
201
|
+
"medium_risk_findings": len(
|
202
|
+
[f for f in all_findings if 4 <= f.get("risk_score", 0) < 8]
|
203
|
+
),
|
204
|
+
"low_risk_findings": len(
|
205
|
+
[f for f in all_findings if 1 <= f.get("risk_score", 0) < 4]
|
206
|
+
),
|
207
|
+
"execution_time": execution_time,
|
208
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
209
|
+
}
|
210
|
+
|
211
|
+
def _perform_scan(
|
212
|
+
self, scan_type: str, target: str, scan_depth: str, ports: str, timeout: int
|
213
|
+
) -> List[Dict[str, Any]]:
|
214
|
+
"""Perform a specific type of security scan."""
|
215
|
+
|
216
|
+
if scan_type == "web_security":
|
217
|
+
return self._scan_web_security(target, scan_depth, timeout)
|
218
|
+
elif scan_type == "ssl_check":
|
219
|
+
return self._scan_ssl(target, timeout)
|
220
|
+
elif scan_type == "port_scan":
|
221
|
+
return self._scan_ports(target, ports, timeout)
|
222
|
+
elif scan_type == "service_detection":
|
223
|
+
return self._scan_services(target, ports, timeout)
|
224
|
+
elif scan_type == "vulnerability_check":
|
225
|
+
return self._scan_vulnerabilities(target, scan_depth, timeout)
|
226
|
+
elif scan_type == "header_analysis":
|
227
|
+
return self._scan_headers(target, timeout)
|
228
|
+
else:
|
229
|
+
return [
|
230
|
+
{
|
231
|
+
"type": "unsupported_scan",
|
232
|
+
"target": target,
|
233
|
+
"severity": "info",
|
234
|
+
"title": f"Unsupported Scan Type: {scan_type}",
|
235
|
+
"description": f"Scan type '{scan_type}' is not supported",
|
236
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
237
|
+
}
|
238
|
+
]
|
239
|
+
|
240
|
+
def _scan_web_security(
|
241
|
+
self, target: str, scan_depth: str, timeout: int
|
242
|
+
) -> List[Dict[str, Any]]:
|
243
|
+
"""Perform web application security scan."""
|
244
|
+
findings = []
|
245
|
+
|
246
|
+
try:
|
247
|
+
response = requests.get(target, timeout=timeout, allow_redirects=True)
|
248
|
+
|
249
|
+
# Check for common security issues
|
250
|
+
findings.extend(self._check_security_headers(target, response))
|
251
|
+
findings.extend(self._check_ssl_redirect(target, response))
|
252
|
+
findings.extend(self._check_directory_listing(target, response))
|
253
|
+
|
254
|
+
if scan_depth in ["standard", "comprehensive"]:
|
255
|
+
findings.extend(self._check_common_files(target, timeout))
|
256
|
+
findings.extend(self._check_injection_points(target, timeout))
|
257
|
+
|
258
|
+
if scan_depth == "comprehensive":
|
259
|
+
findings.extend(self._check_authentication(target, timeout))
|
260
|
+
|
261
|
+
except requests.RequestException as e:
|
262
|
+
findings.append(
|
263
|
+
{
|
264
|
+
"type": "connection_error",
|
265
|
+
"target": target,
|
266
|
+
"severity": "medium",
|
267
|
+
"title": "Connection Error",
|
268
|
+
"description": f"Failed to connect to target: {str(e)}",
|
269
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
270
|
+
}
|
271
|
+
)
|
272
|
+
|
273
|
+
return findings
|
274
|
+
|
275
|
+
def _scan_ssl(self, target: str, timeout: int) -> List[Dict[str, Any]]:
|
276
|
+
"""Perform SSL/TLS security scan."""
|
277
|
+
findings = []
|
278
|
+
|
279
|
+
try:
|
280
|
+
parsed_url = urlparse(target)
|
281
|
+
hostname = parsed_url.hostname
|
282
|
+
port = parsed_url.port or (443 if parsed_url.scheme == "https" else 80)
|
283
|
+
|
284
|
+
if parsed_url.scheme != "https":
|
285
|
+
findings.append(
|
286
|
+
{
|
287
|
+
"type": "ssl_not_used",
|
288
|
+
"target": target,
|
289
|
+
"severity": "medium",
|
290
|
+
"title": "SSL/TLS Not Used",
|
291
|
+
"description": "Target does not use HTTPS encryption",
|
292
|
+
"recommendation": "Implement SSL/TLS encryption",
|
293
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
294
|
+
}
|
295
|
+
)
|
296
|
+
return findings
|
297
|
+
|
298
|
+
# Check SSL certificate using OpenSSL (if available)
|
299
|
+
try:
|
300
|
+
import ssl
|
301
|
+
|
302
|
+
context = ssl.create_default_context()
|
303
|
+
with socket.create_connection(
|
304
|
+
(hostname, port), timeout=timeout
|
305
|
+
) as sock:
|
306
|
+
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
307
|
+
cert = ssock.getpeercert()
|
308
|
+
|
309
|
+
# Check certificate expiration
|
310
|
+
not_after = datetime.strptime(
|
311
|
+
cert["notAfter"], "%b %d %H:%M:%S %Y %Z"
|
312
|
+
)
|
313
|
+
days_until_expiry = (not_after - datetime.now()).days
|
314
|
+
|
315
|
+
if days_until_expiry < 30:
|
316
|
+
findings.append(
|
317
|
+
{
|
318
|
+
"type": "ssl_expiring",
|
319
|
+
"target": target,
|
320
|
+
"severity": (
|
321
|
+
"high" if days_until_expiry < 7 else "medium"
|
322
|
+
),
|
323
|
+
"title": "SSL Certificate Expiring",
|
324
|
+
"description": f"SSL certificate expires in {days_until_expiry} days",
|
325
|
+
"details": {"expiry_date": cert["notAfter"]},
|
326
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
327
|
+
+ "Z",
|
328
|
+
}
|
329
|
+
)
|
330
|
+
|
331
|
+
except Exception as e:
|
332
|
+
findings.append(
|
333
|
+
{
|
334
|
+
"type": "ssl_check_error",
|
335
|
+
"target": target,
|
336
|
+
"severity": "low",
|
337
|
+
"title": "SSL Check Error",
|
338
|
+
"description": f"Failed to perform detailed SSL check: {str(e)}",
|
339
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
340
|
+
}
|
341
|
+
)
|
342
|
+
|
343
|
+
except Exception as e:
|
344
|
+
findings.append(
|
345
|
+
{
|
346
|
+
"type": "ssl_scan_error",
|
347
|
+
"target": target,
|
348
|
+
"severity": "low",
|
349
|
+
"title": "SSL Scan Error",
|
350
|
+
"description": f"SSL scan failed: {str(e)}",
|
351
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
352
|
+
}
|
353
|
+
)
|
354
|
+
|
355
|
+
return findings
|
356
|
+
|
357
|
+
def _scan_ports(
|
358
|
+
self, target: str, ports: str, timeout: int
|
359
|
+
) -> List[Dict[str, Any]]:
|
360
|
+
"""Perform port scan."""
|
361
|
+
findings = []
|
362
|
+
|
363
|
+
# Parse target to get hostname/IP
|
364
|
+
if target.startswith(("http://", "https://")):
|
365
|
+
hostname = urlparse(target).hostname
|
366
|
+
else:
|
367
|
+
hostname = target
|
368
|
+
|
369
|
+
# Define port ranges
|
370
|
+
if ports == "common":
|
371
|
+
port_list = [
|
372
|
+
21,
|
373
|
+
22,
|
374
|
+
23,
|
375
|
+
25,
|
376
|
+
53,
|
377
|
+
80,
|
378
|
+
110,
|
379
|
+
143,
|
380
|
+
443,
|
381
|
+
993,
|
382
|
+
995,
|
383
|
+
1433,
|
384
|
+
3306,
|
385
|
+
3389,
|
386
|
+
5432,
|
387
|
+
6379,
|
388
|
+
]
|
389
|
+
elif ports == "all":
|
390
|
+
port_list = range(1, 65536)
|
391
|
+
elif "-" in ports:
|
392
|
+
start, end = map(int, ports.split("-"))
|
393
|
+
port_list = range(start, end + 1)
|
394
|
+
else:
|
395
|
+
port_list = [
|
396
|
+
int(p.strip()) for p in ports.split(",") if p.strip().isdigit()
|
397
|
+
]
|
398
|
+
|
399
|
+
open_ports = []
|
400
|
+
for port in port_list:
|
401
|
+
try:
|
402
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
403
|
+
sock.settimeout(
|
404
|
+
min(timeout / len(port_list), 1)
|
405
|
+
) # Adjust timeout per port
|
406
|
+
result = sock.connect_ex((hostname, port))
|
407
|
+
sock.close()
|
408
|
+
|
409
|
+
if result == 0:
|
410
|
+
open_ports.append(port)
|
411
|
+
|
412
|
+
# Check for potentially risky open ports
|
413
|
+
risky_ports = {
|
414
|
+
21: ("FTP", "medium"),
|
415
|
+
23: ("Telnet", "high"),
|
416
|
+
25: ("SMTP", "low"),
|
417
|
+
1433: ("SQL Server", "medium"),
|
418
|
+
3306: ("MySQL", "medium"),
|
419
|
+
3389: ("RDP", "high"),
|
420
|
+
5432: ("PostgreSQL", "medium"),
|
421
|
+
6379: ("Redis", "medium"),
|
422
|
+
}
|
423
|
+
|
424
|
+
if port in risky_ports:
|
425
|
+
service, severity = risky_ports[port]
|
426
|
+
findings.append(
|
427
|
+
{
|
428
|
+
"type": "open_port",
|
429
|
+
"target": target,
|
430
|
+
"severity": severity,
|
431
|
+
"title": f"Potentially Risky Open Port: {port}",
|
432
|
+
"description": f"Port {port} ({service}) is open and accessible",
|
433
|
+
"details": {"port": port, "service": service},
|
434
|
+
"recommendation": f"Ensure {service} service is properly secured",
|
435
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
436
|
+
+ "Z",
|
437
|
+
}
|
438
|
+
)
|
439
|
+
|
440
|
+
except Exception:
|
441
|
+
continue # Port closed or filtered
|
442
|
+
|
443
|
+
# Add summary finding
|
444
|
+
if open_ports:
|
445
|
+
findings.append(
|
446
|
+
{
|
447
|
+
"type": "port_scan_summary",
|
448
|
+
"target": target,
|
449
|
+
"severity": "info",
|
450
|
+
"title": "Port Scan Results",
|
451
|
+
"description": f"Found {len(open_ports)} open ports",
|
452
|
+
"details": {
|
453
|
+
"open_ports": open_ports,
|
454
|
+
"total_scanned": len(port_list),
|
455
|
+
},
|
456
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
457
|
+
}
|
458
|
+
)
|
459
|
+
|
460
|
+
return findings
|
461
|
+
|
462
|
+
def _scan_services(
|
463
|
+
self, target: str, ports: str, timeout: int
|
464
|
+
) -> List[Dict[str, Any]]:
|
465
|
+
"""Perform service detection scan."""
|
466
|
+
# For now, this is a simplified version
|
467
|
+
# In a full implementation, you would use nmap or similar tools
|
468
|
+
return self._scan_ports(target, ports, timeout)
|
469
|
+
|
470
|
+
def _scan_vulnerabilities(
|
471
|
+
self, target: str, scan_depth: str, timeout: int
|
472
|
+
) -> List[Dict[str, Any]]:
|
473
|
+
"""Perform vulnerability scan using known CVE patterns."""
|
474
|
+
findings = []
|
475
|
+
|
476
|
+
try:
|
477
|
+
response = requests.get(target, timeout=timeout)
|
478
|
+
|
479
|
+
# Check for common vulnerability indicators
|
480
|
+
server_header = response.headers.get("Server", "").lower()
|
481
|
+
|
482
|
+
# Check for outdated software versions (simplified)
|
483
|
+
vulnerable_patterns = {
|
484
|
+
"apache/2.2": {"cve": "CVE-2012-0053", "severity": "medium"},
|
485
|
+
"nginx/1.0": {"cve": "CVE-2013-2028", "severity": "medium"},
|
486
|
+
"iis/7.0": {"cve": "CVE-2010-1256", "severity": "high"},
|
487
|
+
}
|
488
|
+
|
489
|
+
for pattern, vuln_info in vulnerable_patterns.items():
|
490
|
+
if pattern in server_header:
|
491
|
+
findings.append(
|
492
|
+
{
|
493
|
+
"type": "potential_vulnerability",
|
494
|
+
"target": target,
|
495
|
+
"severity": vuln_info["severity"],
|
496
|
+
"title": "Potentially Vulnerable Server Version",
|
497
|
+
"description": f"Server header indicates potentially vulnerable version: {server_header}",
|
498
|
+
"details": {
|
499
|
+
"server_header": server_header,
|
500
|
+
"potential_cve": vuln_info["cve"],
|
501
|
+
},
|
502
|
+
"recommendation": "Update server software to latest version",
|
503
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
504
|
+
}
|
505
|
+
)
|
506
|
+
|
507
|
+
except Exception as e:
|
508
|
+
findings.append(
|
509
|
+
{
|
510
|
+
"type": "vulnerability_scan_error",
|
511
|
+
"target": target,
|
512
|
+
"severity": "low",
|
513
|
+
"title": "Vulnerability Scan Error",
|
514
|
+
"description": f"Failed to perform vulnerability scan: {str(e)}",
|
515
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
516
|
+
}
|
517
|
+
)
|
518
|
+
|
519
|
+
return findings
|
520
|
+
|
521
|
+
def _scan_headers(self, target: str, timeout: int) -> List[Dict[str, Any]]:
|
522
|
+
"""Perform HTTP security headers analysis."""
|
523
|
+
findings = []
|
524
|
+
|
525
|
+
try:
|
526
|
+
response = requests.get(target, timeout=timeout)
|
527
|
+
headers = response.headers
|
528
|
+
|
529
|
+
# Check for missing security headers
|
530
|
+
security_headers = {
|
531
|
+
"X-Frame-Options": "Clickjacking protection",
|
532
|
+
"X-Content-Type-Options": "MIME type sniffing protection",
|
533
|
+
"X-XSS-Protection": "XSS protection",
|
534
|
+
"Strict-Transport-Security": "HTTPS enforcement",
|
535
|
+
"Content-Security-Policy": "Content injection protection",
|
536
|
+
"Referrer-Policy": "Referrer information control",
|
537
|
+
}
|
538
|
+
|
539
|
+
for header, description in security_headers.items():
|
540
|
+
if header not in headers:
|
541
|
+
findings.append(
|
542
|
+
{
|
543
|
+
"type": "missing_security_header",
|
544
|
+
"target": target,
|
545
|
+
"severity": "medium",
|
546
|
+
"title": f"Missing Security Header: {header}",
|
547
|
+
"description": f"Missing {header} header for {description}",
|
548
|
+
"recommendation": f"Implement {header} header",
|
549
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
550
|
+
}
|
551
|
+
)
|
552
|
+
|
553
|
+
except Exception as e:
|
554
|
+
findings.append(
|
555
|
+
{
|
556
|
+
"type": "header_scan_error",
|
557
|
+
"target": target,
|
558
|
+
"severity": "low",
|
559
|
+
"title": "Header Scan Error",
|
560
|
+
"description": f"Failed to analyze headers: {str(e)}",
|
561
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
562
|
+
}
|
563
|
+
)
|
564
|
+
|
565
|
+
return findings
|
566
|
+
|
567
|
+
def _check_security_headers(
|
568
|
+
self, target: str, response: requests.Response
|
569
|
+
) -> List[Dict[str, Any]]:
|
570
|
+
"""Check for security headers in response."""
|
571
|
+
return self._scan_headers(target, 30) # Reuse header scan logic
|
572
|
+
|
573
|
+
def _check_ssl_redirect(
|
574
|
+
self, target: str, response: requests.Response
|
575
|
+
) -> List[Dict[str, Any]]:
|
576
|
+
"""Check if HTTP redirects to HTTPS."""
|
577
|
+
findings = []
|
578
|
+
|
579
|
+
if target.startswith("http://") and not response.url.startswith("https://"):
|
580
|
+
findings.append(
|
581
|
+
{
|
582
|
+
"type": "no_ssl_redirect",
|
583
|
+
"target": target,
|
584
|
+
"severity": "medium",
|
585
|
+
"title": "No HTTPS Redirect",
|
586
|
+
"description": "HTTP requests are not redirected to HTTPS",
|
587
|
+
"recommendation": "Implement automatic HTTPS redirect",
|
588
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
589
|
+
}
|
590
|
+
)
|
591
|
+
|
592
|
+
return findings
|
593
|
+
|
594
|
+
def _check_directory_listing(
|
595
|
+
self, target: str, response: requests.Response
|
596
|
+
) -> List[Dict[str, Any]]:
|
597
|
+
"""Check for directory listing vulnerabilities."""
|
598
|
+
findings = []
|
599
|
+
|
600
|
+
if "Index of /" in response.text or "Directory listing for" in response.text:
|
601
|
+
findings.append(
|
602
|
+
{
|
603
|
+
"type": "directory_listing",
|
604
|
+
"target": target,
|
605
|
+
"severity": "medium",
|
606
|
+
"title": "Directory Listing Enabled",
|
607
|
+
"description": "Directory listing is enabled, potentially exposing sensitive files",
|
608
|
+
"recommendation": "Disable directory listing",
|
609
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
610
|
+
}
|
611
|
+
)
|
612
|
+
|
613
|
+
return findings
|
614
|
+
|
615
|
+
def _check_common_files(self, target: str, timeout: int) -> List[Dict[str, Any]]:
|
616
|
+
"""Check for common sensitive files."""
|
617
|
+
findings = []
|
618
|
+
|
619
|
+
common_files = [
|
620
|
+
".env",
|
621
|
+
"config.php",
|
622
|
+
"wp-config.php",
|
623
|
+
".htaccess",
|
624
|
+
"robots.txt",
|
625
|
+
"sitemap.xml",
|
626
|
+
"admin/",
|
627
|
+
"backup/",
|
628
|
+
]
|
629
|
+
|
630
|
+
base_url = target.rstrip("/")
|
631
|
+
for file_path in common_files:
|
632
|
+
try:
|
633
|
+
url = f"{base_url}/{file_path}"
|
634
|
+
response = requests.get(url, timeout=timeout)
|
635
|
+
if response.status_code == 200:
|
636
|
+
findings.append(
|
637
|
+
{
|
638
|
+
"type": "sensitive_file_exposed",
|
639
|
+
"target": target,
|
640
|
+
"severity": (
|
641
|
+
"medium"
|
642
|
+
if file_path in [".env", "config.php", "wp-config.php"]
|
643
|
+
else "low"
|
644
|
+
),
|
645
|
+
"title": f"Sensitive File Accessible: {file_path}",
|
646
|
+
"description": f"Sensitive file {file_path} is publicly accessible",
|
647
|
+
"details": {"file_path": file_path, "url": url},
|
648
|
+
"recommendation": "Restrict access to sensitive files",
|
649
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
650
|
+
}
|
651
|
+
)
|
652
|
+
except:
|
653
|
+
continue
|
654
|
+
|
655
|
+
return findings
|
656
|
+
|
657
|
+
def _check_injection_points(
|
658
|
+
self, target: str, timeout: int
|
659
|
+
) -> List[Dict[str, Any]]:
|
660
|
+
"""Check for basic injection vulnerabilities."""
|
661
|
+
# This is a simplified check - real implementations would be much more thorough
|
662
|
+
findings = []
|
663
|
+
|
664
|
+
test_payloads = [
|
665
|
+
"' OR '1'='1",
|
666
|
+
"<script>alert('xss')</script>",
|
667
|
+
"../../../etc/passwd",
|
668
|
+
]
|
669
|
+
|
670
|
+
for payload in test_payloads:
|
671
|
+
try:
|
672
|
+
response = requests.get(
|
673
|
+
target, params={"test": payload}, timeout=timeout
|
674
|
+
)
|
675
|
+
|
676
|
+
# Very basic detection - real scanners would be much more sophisticated
|
677
|
+
if payload in response.text:
|
678
|
+
findings.append(
|
679
|
+
{
|
680
|
+
"type": "potential_injection",
|
681
|
+
"target": target,
|
682
|
+
"severity": "high",
|
683
|
+
"title": "Potential Injection Vulnerability",
|
684
|
+
"description": f"Test payload reflected in response: {payload[:50]}...",
|
685
|
+
"recommendation": "Implement proper input validation and sanitization",
|
686
|
+
"timestamp": datetime.now(timezone.utc).isoformat() + "Z",
|
687
|
+
}
|
688
|
+
)
|
689
|
+
break # Don't continue testing if one is found
|
690
|
+
except:
|
691
|
+
continue
|
692
|
+
|
693
|
+
return findings
|
694
|
+
|
695
|
+
def _check_authentication(self, target: str, timeout: int) -> List[Dict[str, Any]]:
|
696
|
+
"""Check for authentication-related issues."""
|
697
|
+
findings = []
|
698
|
+
|
699
|
+
# Check for login pages without HTTPS
|
700
|
+
if target.startswith("http://"):
|
701
|
+
login_paths = ["/login", "/admin", "/wp-admin", "/signin"]
|
702
|
+
for path in login_paths:
|
703
|
+
try:
|
704
|
+
url = f"{target.rstrip('/')}{path}"
|
705
|
+
response = requests.get(url, timeout=timeout)
|
706
|
+
if (
|
707
|
+
response.status_code == 200
|
708
|
+
and "password" in response.text.lower()
|
709
|
+
):
|
710
|
+
findings.append(
|
711
|
+
{
|
712
|
+
"type": "insecure_login",
|
713
|
+
"target": target,
|
714
|
+
"severity": "high",
|
715
|
+
"title": "Insecure Login Page",
|
716
|
+
"description": f"Login page at {path} is not using HTTPS",
|
717
|
+
"details": {"login_path": path},
|
718
|
+
"recommendation": "Use HTTPS for all authentication pages",
|
719
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
720
|
+
+ "Z",
|
721
|
+
}
|
722
|
+
)
|
723
|
+
except:
|
724
|
+
continue
|
725
|
+
|
726
|
+
return findings
|
727
|
+
|
728
|
+
def _calculate_risk_scores(
|
729
|
+
self, findings: List[Dict[str, Any]]
|
730
|
+
) -> List[Dict[str, Any]]:
|
731
|
+
"""Calculate risk scores for findings."""
|
732
|
+
severity_scores = {
|
733
|
+
"critical": 10,
|
734
|
+
"high": 8,
|
735
|
+
"medium": 5,
|
736
|
+
"low": 2,
|
737
|
+
"info": 1,
|
738
|
+
}
|
739
|
+
|
740
|
+
for finding in findings:
|
741
|
+
severity = finding.get("severity", "info")
|
742
|
+
base_score = severity_scores.get(severity, 1)
|
743
|
+
|
744
|
+
# Adjust score based on finding type
|
745
|
+
type_modifiers = {
|
746
|
+
"ssl_expiring": 1.2,
|
747
|
+
"potential_vulnerability": 1.5,
|
748
|
+
"insecure_login": 1.3,
|
749
|
+
"directory_listing": 1.1,
|
750
|
+
}
|
751
|
+
|
752
|
+
finding_type = finding.get("type", "")
|
753
|
+
modifier = type_modifiers.get(finding_type, 1.0)
|
754
|
+
|
755
|
+
finding["risk_score"] = min(10, base_score * modifier)
|
756
|
+
|
757
|
+
return findings
|
758
|
+
|
759
|
+
def _add_compliance_mapping(
|
760
|
+
self, findings: List[Dict[str, Any]]
|
761
|
+
) -> List[Dict[str, Any]]:
|
762
|
+
"""Add compliance framework mapping to findings."""
|
763
|
+
compliance_mapping = {
|
764
|
+
"missing_security_header": ["OWASP Top 10", "PCI DSS"],
|
765
|
+
"ssl_not_used": ["PCI DSS", "HIPAA", "SOX"],
|
766
|
+
"insecure_login": ["PCI DSS", "HIPAA", "GDPR"],
|
767
|
+
"potential_vulnerability": ["OWASP Top 10", "NIST"],
|
768
|
+
"directory_listing": ["OWASP Top 10"],
|
769
|
+
}
|
770
|
+
|
771
|
+
for finding in findings:
|
772
|
+
finding_type = finding.get("type", "")
|
773
|
+
if finding_type in compliance_mapping:
|
774
|
+
finding["compliance_frameworks"] = compliance_mapping[finding_type]
|
775
|
+
|
776
|
+
return findings
|
777
|
+
|
778
|
+
def _generate_scan_summary(
|
779
|
+
self,
|
780
|
+
findings: List[Dict],
|
781
|
+
targets: List[str],
|
782
|
+
scan_types: List[str],
|
783
|
+
execution_time: float,
|
784
|
+
) -> Dict[str, Any]:
|
785
|
+
"""Generate summary of security scan results."""
|
786
|
+
|
787
|
+
# Count findings by severity
|
788
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
|
789
|
+
|
790
|
+
for finding in findings:
|
791
|
+
severity = finding.get("severity", "info")
|
792
|
+
if severity in severity_counts:
|
793
|
+
severity_counts[severity] += 1
|
794
|
+
|
795
|
+
# Count findings by type
|
796
|
+
type_counts = {}
|
797
|
+
for finding in findings:
|
798
|
+
finding_type = finding.get("type", "unknown")
|
799
|
+
type_counts[finding_type] = type_counts.get(finding_type, 0) + 1
|
800
|
+
|
801
|
+
# Calculate overall risk level
|
802
|
+
if severity_counts["critical"] > 0:
|
803
|
+
overall_risk = "critical"
|
804
|
+
elif severity_counts["high"] > 0:
|
805
|
+
overall_risk = "high"
|
806
|
+
elif severity_counts["medium"] > 0:
|
807
|
+
overall_risk = "medium"
|
808
|
+
elif severity_counts["low"] > 0:
|
809
|
+
overall_risk = "low"
|
810
|
+
else:
|
811
|
+
overall_risk = "minimal"
|
812
|
+
|
813
|
+
return {
|
814
|
+
"total_targets": len(targets),
|
815
|
+
"scan_types": scan_types,
|
816
|
+
"total_findings": len(findings),
|
817
|
+
"severity_breakdown": severity_counts,
|
818
|
+
"finding_types": type_counts,
|
819
|
+
"overall_risk_level": overall_risk,
|
820
|
+
"execution_time": execution_time,
|
821
|
+
"scan_completed": True,
|
822
|
+
}
|