kailash 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +33 -1
- kailash/access_control/__init__.py +129 -0
- kailash/access_control/managers.py +461 -0
- kailash/access_control/rule_evaluators.py +467 -0
- kailash/access_control_abac.py +825 -0
- kailash/config/__init__.py +27 -0
- kailash/config/database_config.py +359 -0
- kailash/database/__init__.py +28 -0
- kailash/database/execution_pipeline.py +499 -0
- kailash/middleware/__init__.py +306 -0
- kailash/middleware/auth/__init__.py +33 -0
- kailash/middleware/auth/access_control.py +436 -0
- kailash/middleware/auth/auth_manager.py +422 -0
- kailash/middleware/auth/jwt_auth.py +477 -0
- kailash/middleware/auth/kailash_jwt_auth.py +616 -0
- kailash/middleware/communication/__init__.py +37 -0
- kailash/middleware/communication/ai_chat.py +989 -0
- kailash/middleware/communication/api_gateway.py +802 -0
- kailash/middleware/communication/events.py +470 -0
- kailash/middleware/communication/realtime.py +710 -0
- kailash/middleware/core/__init__.py +21 -0
- kailash/middleware/core/agent_ui.py +890 -0
- kailash/middleware/core/schema.py +643 -0
- kailash/middleware/core/workflows.py +396 -0
- kailash/middleware/database/__init__.py +63 -0
- kailash/middleware/database/base.py +113 -0
- kailash/middleware/database/base_models.py +525 -0
- kailash/middleware/database/enums.py +106 -0
- kailash/middleware/database/migrations.py +12 -0
- kailash/{api/database.py → middleware/database/models.py} +183 -291
- kailash/middleware/database/repositories.py +685 -0
- kailash/middleware/database/session_manager.py +19 -0
- kailash/middleware/mcp/__init__.py +38 -0
- kailash/middleware/mcp/client_integration.py +585 -0
- kailash/middleware/mcp/enhanced_server.py +576 -0
- kailash/nodes/__init__.py +25 -3
- kailash/nodes/admin/__init__.py +35 -0
- kailash/nodes/admin/audit_log.py +794 -0
- kailash/nodes/admin/permission_check.py +864 -0
- kailash/nodes/admin/role_management.py +823 -0
- kailash/nodes/admin/security_event.py +1519 -0
- kailash/nodes/admin/user_management.py +944 -0
- kailash/nodes/ai/a2a.py +24 -7
- kailash/nodes/ai/ai_providers.py +1 -0
- kailash/nodes/ai/embedding_generator.py +11 -11
- kailash/nodes/ai/intelligent_agent_orchestrator.py +99 -11
- kailash/nodes/ai/llm_agent.py +407 -2
- kailash/nodes/ai/self_organizing.py +85 -10
- kailash/nodes/api/auth.py +287 -6
- kailash/nodes/api/rest.py +151 -0
- kailash/nodes/auth/__init__.py +17 -0
- kailash/nodes/auth/directory_integration.py +1228 -0
- kailash/nodes/auth/enterprise_auth_provider.py +1328 -0
- kailash/nodes/auth/mfa.py +2338 -0
- kailash/nodes/auth/risk_assessment.py +872 -0
- kailash/nodes/auth/session_management.py +1093 -0
- kailash/nodes/auth/sso.py +1040 -0
- kailash/nodes/base.py +344 -13
- kailash/nodes/base_cycle_aware.py +4 -2
- kailash/nodes/base_with_acl.py +1 -1
- kailash/nodes/code/python.py +293 -12
- kailash/nodes/compliance/__init__.py +9 -0
- kailash/nodes/compliance/data_retention.py +1888 -0
- kailash/nodes/compliance/gdpr.py +2004 -0
- kailash/nodes/data/__init__.py +22 -2
- kailash/nodes/data/async_connection.py +469 -0
- kailash/nodes/data/async_sql.py +757 -0
- kailash/nodes/data/async_vector.py +598 -0
- kailash/nodes/data/readers.py +767 -0
- kailash/nodes/data/retrieval.py +360 -1
- kailash/nodes/data/sharepoint_graph.py +397 -21
- kailash/nodes/data/sql.py +94 -5
- kailash/nodes/data/streaming.py +68 -8
- kailash/nodes/data/vector_db.py +54 -4
- kailash/nodes/enterprise/__init__.py +13 -0
- kailash/nodes/enterprise/batch_processor.py +741 -0
- kailash/nodes/enterprise/data_lineage.py +497 -0
- kailash/nodes/logic/convergence.py +31 -9
- kailash/nodes/logic/operations.py +14 -3
- kailash/nodes/mixins/__init__.py +8 -0
- kailash/nodes/mixins/event_emitter.py +201 -0
- kailash/nodes/mixins/mcp.py +9 -4
- kailash/nodes/mixins/security.py +165 -0
- kailash/nodes/monitoring/__init__.py +7 -0
- kailash/nodes/monitoring/performance_benchmark.py +2497 -0
- kailash/nodes/rag/__init__.py +284 -0
- kailash/nodes/rag/advanced.py +1615 -0
- kailash/nodes/rag/agentic.py +773 -0
- kailash/nodes/rag/conversational.py +999 -0
- kailash/nodes/rag/evaluation.py +875 -0
- kailash/nodes/rag/federated.py +1188 -0
- kailash/nodes/rag/graph.py +721 -0
- kailash/nodes/rag/multimodal.py +671 -0
- kailash/nodes/rag/optimized.py +933 -0
- kailash/nodes/rag/privacy.py +1059 -0
- kailash/nodes/rag/query_processing.py +1335 -0
- kailash/nodes/rag/realtime.py +764 -0
- kailash/nodes/rag/registry.py +547 -0
- kailash/nodes/rag/router.py +837 -0
- kailash/nodes/rag/similarity.py +1854 -0
- kailash/nodes/rag/strategies.py +566 -0
- kailash/nodes/rag/workflows.py +575 -0
- kailash/nodes/security/__init__.py +19 -0
- kailash/nodes/security/abac_evaluator.py +1411 -0
- kailash/nodes/security/audit_log.py +91 -0
- kailash/nodes/security/behavior_analysis.py +1893 -0
- kailash/nodes/security/credential_manager.py +401 -0
- kailash/nodes/security/rotating_credentials.py +760 -0
- kailash/nodes/security/security_event.py +132 -0
- kailash/nodes/security/threat_detection.py +1103 -0
- kailash/nodes/testing/__init__.py +9 -0
- kailash/nodes/testing/credential_testing.py +499 -0
- kailash/nodes/transform/__init__.py +10 -2
- kailash/nodes/transform/chunkers.py +592 -1
- kailash/nodes/transform/processors.py +484 -14
- kailash/nodes/validation.py +321 -0
- kailash/runtime/access_controlled.py +1 -1
- kailash/runtime/async_local.py +41 -7
- kailash/runtime/docker.py +1 -1
- kailash/runtime/local.py +474 -55
- kailash/runtime/parallel.py +1 -1
- kailash/runtime/parallel_cyclic.py +1 -1
- kailash/runtime/testing.py +210 -2
- kailash/utils/migrations/__init__.py +25 -0
- kailash/utils/migrations/generator.py +433 -0
- kailash/utils/migrations/models.py +231 -0
- kailash/utils/migrations/runner.py +489 -0
- kailash/utils/secure_logging.py +342 -0
- kailash/workflow/__init__.py +16 -0
- kailash/workflow/cyclic_runner.py +3 -4
- kailash/workflow/graph.py +70 -2
- kailash/workflow/resilience.py +249 -0
- kailash/workflow/templates.py +726 -0
- {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/METADATA +253 -20
- kailash-0.4.0.dist-info/RECORD +223 -0
- kailash/api/__init__.py +0 -17
- kailash/api/__main__.py +0 -6
- kailash/api/studio_secure.py +0 -893
- kailash/mcp/__main__.py +0 -13
- kailash/mcp/server_new.py +0 -336
- kailash/mcp/servers/__init__.py +0 -12
- kailash-0.3.1.dist-info/RECORD +0 -136
- {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/WHEEL +0 -0
- {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.3.1.dist-info → kailash-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1103 @@
|
|
1
|
+
"""
|
2
|
+
AI-powered threat detection and analysis node.
|
3
|
+
|
4
|
+
This module provides enterprise-grade threat detection capabilities using AI/LLM
|
5
|
+
for advanced threat analysis, real-time event processing, and automated response.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import asyncio
|
9
|
+
import json
|
10
|
+
import logging
|
11
|
+
from datetime import UTC, datetime, timedelta
|
12
|
+
from typing import Any, Callable, Dict, List, Optional
|
13
|
+
|
14
|
+
from kailash.nodes.ai.llm_agent import LLMAgentNode
|
15
|
+
from kailash.nodes.base import Node, NodeParameter, register_node
|
16
|
+
from kailash.nodes.mixins import LoggingMixin, PerformanceMixin, SecurityMixin
|
17
|
+
from kailash.nodes.security.audit_log import AuditLogNode
|
18
|
+
from kailash.nodes.security.security_event import SecurityEvent, SecurityEventNode
|
19
|
+
|
20
|
+
logger = logging.getLogger(__name__)
|
21
|
+
|
22
|
+
|
23
|
+
@register_node()
|
24
|
+
class ThreatDetectionNode(SecurityMixin, PerformanceMixin, LoggingMixin, Node):
|
25
|
+
"""AI-powered threat detection and analysis.
|
26
|
+
|
27
|
+
This node provides comprehensive threat detection capabilities including:
|
28
|
+
- Real-time event analysis with <100ms response time
|
29
|
+
- AI-powered threat pattern recognition
|
30
|
+
- Automated response actions
|
31
|
+
- Threat intelligence correlation
|
32
|
+
- Integration with security event logging
|
33
|
+
|
34
|
+
Example:
|
35
|
+
>>> threat_detector = ThreatDetectionNode(
|
36
|
+
... detection_rules=["brute_force", "privilege_escalation"],
|
37
|
+
... ai_model="ollama:llama3.2:3b",
|
38
|
+
... response_actions=["alert", "block_ip"],
|
39
|
+
... real_time=True
|
40
|
+
... )
|
41
|
+
>>>
|
42
|
+
>>> events = [
|
43
|
+
... {"type": "login", "user": "admin", "ip": "192.168.1.100", "failed": True},
|
44
|
+
... {"type": "login", "user": "admin", "ip": "192.168.1.100", "failed": True},
|
45
|
+
... {"type": "login", "user": "admin", "ip": "192.168.1.100", "failed": True}
|
46
|
+
... ]
|
47
|
+
>>>
|
48
|
+
>>> threats = threat_detector.run(events=events)
|
49
|
+
>>> print(f"Detected {len(threats['threats'])} threats")
|
50
|
+
"""
|
51
|
+
|
52
|
+
def __init__(
|
53
|
+
self,
|
54
|
+
name: str = "threat_detection",
|
55
|
+
detection_rules: Optional[List[str]] = None,
|
56
|
+
ai_model: str = "ollama:llama3.2:3b",
|
57
|
+
response_actions: Optional[List[str]] = None,
|
58
|
+
real_time: bool = True,
|
59
|
+
severity_threshold: str = "medium",
|
60
|
+
response_time_target_ms: int = 100,
|
61
|
+
**kwargs,
|
62
|
+
):
|
63
|
+
"""Initialize threat detection node.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
name: Node name
|
67
|
+
detection_rules: List of detection rules to apply
|
68
|
+
ai_model: AI model for threat analysis
|
69
|
+
response_actions: Automated response actions
|
70
|
+
real_time: Enable real-time threat detection
|
71
|
+
severity_threshold: Minimum severity to trigger response
|
72
|
+
response_time_target_ms: Target response time in milliseconds
|
73
|
+
**kwargs: Additional node parameters
|
74
|
+
"""
|
75
|
+
# Set attributes before calling super().__init__()
|
76
|
+
self.detection_rules = detection_rules or [
|
77
|
+
"brute_force",
|
78
|
+
"privilege_escalation",
|
79
|
+
"data_exfiltration",
|
80
|
+
"insider_threat",
|
81
|
+
"anomalous_behavior",
|
82
|
+
]
|
83
|
+
self.ai_model = ai_model
|
84
|
+
self.response_actions = response_actions or ["alert", "log"]
|
85
|
+
self.real_time = real_time
|
86
|
+
self.severity_threshold = severity_threshold
|
87
|
+
self.response_time_target_ms = response_time_target_ms
|
88
|
+
|
89
|
+
# Initialize parent classes
|
90
|
+
super().__init__(name=name, **kwargs)
|
91
|
+
|
92
|
+
# Initialize AI agent for threat analysis
|
93
|
+
self.ai_agent = LLMAgentNode(
|
94
|
+
name=f"{name}_ai_agent",
|
95
|
+
provider="ollama",
|
96
|
+
model=ai_model.replace("ollama:", ""),
|
97
|
+
temperature=0.1, # Low temperature for consistent analysis
|
98
|
+
)
|
99
|
+
|
100
|
+
# Initialize security event and audit logging
|
101
|
+
self.security_event_node = SecurityEventNode(name=f"{name}_security_events")
|
102
|
+
self.audit_log_node = AuditLogNode(name=f"{name}_audit_log")
|
103
|
+
|
104
|
+
# Threat detection patterns and rules
|
105
|
+
self.threat_patterns = {
|
106
|
+
"brute_force": {
|
107
|
+
"pattern": "multiple_failed_logins",
|
108
|
+
"threshold": 5,
|
109
|
+
"time_window": 300, # 5 minutes
|
110
|
+
"severity": "high",
|
111
|
+
},
|
112
|
+
"privilege_escalation": {
|
113
|
+
"pattern": "unauthorized_admin_access",
|
114
|
+
"keywords": ["sudo", "admin", "root", "privilege"],
|
115
|
+
"severity": "critical",
|
116
|
+
},
|
117
|
+
"data_exfiltration": {
|
118
|
+
"pattern": "large_data_transfer",
|
119
|
+
"size_threshold_mb": 100,
|
120
|
+
"unusual_hours": True,
|
121
|
+
"severity": "critical",
|
122
|
+
},
|
123
|
+
"insider_threat": {
|
124
|
+
"pattern": "abnormal_user_behavior",
|
125
|
+
"deviation_threshold": 0.8,
|
126
|
+
"severity": "high",
|
127
|
+
},
|
128
|
+
"anomalous_behavior": {
|
129
|
+
"pattern": "statistical_anomaly",
|
130
|
+
"confidence_threshold": 0.9,
|
131
|
+
"severity": "medium",
|
132
|
+
},
|
133
|
+
}
|
134
|
+
|
135
|
+
# Response action mappings
|
136
|
+
self.response_handlers = {
|
137
|
+
"alert": self._send_alert,
|
138
|
+
"block_ip": self._block_ip,
|
139
|
+
"lock_account": self._lock_account,
|
140
|
+
"quarantine": self._quarantine_resource,
|
141
|
+
"log": self._log_threat,
|
142
|
+
}
|
143
|
+
|
144
|
+
# Performance tracking
|
145
|
+
self.detection_stats = {
|
146
|
+
"total_events_processed": 0,
|
147
|
+
"threats_detected": 0,
|
148
|
+
"false_positives": 0,
|
149
|
+
"avg_detection_time_ms": 0,
|
150
|
+
"last_detection": None,
|
151
|
+
}
|
152
|
+
|
153
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
154
|
+
"""Get node parameters for validation and documentation.
|
155
|
+
|
156
|
+
Returns:
|
157
|
+
Dictionary mapping parameter names to NodeParameter objects
|
158
|
+
"""
|
159
|
+
return {
|
160
|
+
"events": NodeParameter(
|
161
|
+
name="events",
|
162
|
+
type=list,
|
163
|
+
description="List of security events to analyze for threats",
|
164
|
+
required=True,
|
165
|
+
),
|
166
|
+
"time_window": NodeParameter(
|
167
|
+
name="time_window",
|
168
|
+
type=int,
|
169
|
+
description="Time window in seconds for threat correlation",
|
170
|
+
required=False,
|
171
|
+
default=3600,
|
172
|
+
),
|
173
|
+
"context": NodeParameter(
|
174
|
+
name="context",
|
175
|
+
type=dict,
|
176
|
+
description="Additional context for threat analysis",
|
177
|
+
required=False,
|
178
|
+
default={},
|
179
|
+
),
|
180
|
+
}
|
181
|
+
|
182
|
+
def run(
|
183
|
+
self,
|
184
|
+
events: List[Dict[str, Any]],
|
185
|
+
time_window: int = 3600,
|
186
|
+
context: Optional[Dict[str, Any]] = None,
|
187
|
+
**kwargs,
|
188
|
+
) -> Dict[str, Any]:
|
189
|
+
"""Run threat detection analysis.
|
190
|
+
|
191
|
+
Args:
|
192
|
+
events: List of security events to analyze
|
193
|
+
time_window: Time window in seconds for threat correlation
|
194
|
+
context: Additional context for analysis
|
195
|
+
**kwargs: Additional parameters
|
196
|
+
|
197
|
+
Returns:
|
198
|
+
Dictionary containing detected threats and analysis results
|
199
|
+
"""
|
200
|
+
start_time = datetime.now(UTC)
|
201
|
+
context = context or {}
|
202
|
+
|
203
|
+
try:
|
204
|
+
# Validate and sanitize inputs
|
205
|
+
safe_params = self.validate_and_sanitize_inputs(
|
206
|
+
{"events": events, "time_window": time_window, "context": context}
|
207
|
+
)
|
208
|
+
|
209
|
+
events = safe_params["events"]
|
210
|
+
time_window = safe_params["time_window"]
|
211
|
+
context = safe_params["context"]
|
212
|
+
|
213
|
+
self.log_node_execution("threat_detection_start", event_count=len(events))
|
214
|
+
|
215
|
+
# Run threat detection pipeline
|
216
|
+
results = self._analyze_threats(events, time_window, context)
|
217
|
+
|
218
|
+
# Update performance stats
|
219
|
+
processing_time = (datetime.now(UTC) - start_time).total_seconds() * 1000
|
220
|
+
self._update_stats(len(events), len(results["threats"]), processing_time)
|
221
|
+
|
222
|
+
# Log successful detection
|
223
|
+
self.log_node_execution(
|
224
|
+
"threat_detection_complete",
|
225
|
+
threats_found=len(results["threats"]),
|
226
|
+
processing_time_ms=processing_time,
|
227
|
+
)
|
228
|
+
|
229
|
+
return results
|
230
|
+
|
231
|
+
except Exception as e:
|
232
|
+
self.log_error_with_traceback(e, "threat_detection")
|
233
|
+
raise
|
234
|
+
|
235
|
+
def _analyze_threats(
|
236
|
+
self, events: List[Dict[str, Any]], time_window: int, context: Dict[str, Any]
|
237
|
+
) -> Dict[str, Any]:
|
238
|
+
"""Analyze events for threats using rule-based and AI detection.
|
239
|
+
|
240
|
+
Args:
|
241
|
+
events: Security events to analyze
|
242
|
+
time_window: Time window for correlation
|
243
|
+
context: Additional context
|
244
|
+
|
245
|
+
Returns:
|
246
|
+
Dictionary with detected threats and analysis
|
247
|
+
"""
|
248
|
+
detected_threats = []
|
249
|
+
analysis_results = {
|
250
|
+
"rule_based_detections": 0,
|
251
|
+
"ai_detections": 0,
|
252
|
+
"correlation_matches": 0,
|
253
|
+
"response_actions_taken": [],
|
254
|
+
}
|
255
|
+
|
256
|
+
# Phase 1: Rule-based detection
|
257
|
+
rule_threats = self._detect_rule_based_threats(events, time_window)
|
258
|
+
detected_threats.extend(rule_threats)
|
259
|
+
analysis_results["rule_based_detections"] = len(rule_threats)
|
260
|
+
|
261
|
+
# Phase 2: AI-powered detection for complex patterns
|
262
|
+
if len(events) > 0:
|
263
|
+
ai_threats = self._detect_ai_threats(events, context)
|
264
|
+
detected_threats.extend(ai_threats)
|
265
|
+
analysis_results["ai_detections"] = len(ai_threats)
|
266
|
+
|
267
|
+
# Phase 3: Cross-correlation analysis
|
268
|
+
correlated_threats = self._correlate_threats(detected_threats, events)
|
269
|
+
analysis_results["correlation_matches"] = len(correlated_threats)
|
270
|
+
|
271
|
+
# Phase 4: Response actions for high-severity threats
|
272
|
+
for threat in detected_threats:
|
273
|
+
if self._should_trigger_response(threat):
|
274
|
+
actions_taken = self._execute_response_actions(threat)
|
275
|
+
analysis_results["response_actions_taken"].extend(actions_taken)
|
276
|
+
|
277
|
+
return {
|
278
|
+
"success": True,
|
279
|
+
"threats": detected_threats,
|
280
|
+
"analysis": analysis_results,
|
281
|
+
"stats": self.detection_stats,
|
282
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
283
|
+
}
|
284
|
+
|
285
|
+
def _detect_rule_based_threats(
|
286
|
+
self, events: List[Dict[str, Any]], time_window: int
|
287
|
+
) -> List[Dict[str, Any]]:
|
288
|
+
"""Detect threats using predefined rules.
|
289
|
+
|
290
|
+
Args:
|
291
|
+
events: Events to analyze
|
292
|
+
time_window: Time window in seconds
|
293
|
+
|
294
|
+
Returns:
|
295
|
+
List of detected threats
|
296
|
+
"""
|
297
|
+
threats = []
|
298
|
+
|
299
|
+
for rule_name in self.detection_rules:
|
300
|
+
if rule_name not in self.threat_patterns:
|
301
|
+
continue
|
302
|
+
|
303
|
+
pattern = self.threat_patterns[rule_name]
|
304
|
+
rule_threats = self._apply_detection_rule(
|
305
|
+
events, rule_name, pattern, time_window
|
306
|
+
)
|
307
|
+
threats.extend(rule_threats)
|
308
|
+
|
309
|
+
return threats
|
310
|
+
|
311
|
+
def _apply_detection_rule(
|
312
|
+
self,
|
313
|
+
events: List[Dict[str, Any]],
|
314
|
+
rule_name: str,
|
315
|
+
pattern: Dict[str, Any],
|
316
|
+
time_window: int,
|
317
|
+
) -> List[Dict[str, Any]]:
|
318
|
+
"""Apply a specific detection rule to events.
|
319
|
+
|
320
|
+
Args:
|
321
|
+
events: Events to analyze
|
322
|
+
rule_name: Name of the detection rule
|
323
|
+
pattern: Rule pattern configuration
|
324
|
+
time_window: Time window in seconds
|
325
|
+
|
326
|
+
Returns:
|
327
|
+
List of threats detected by this rule
|
328
|
+
"""
|
329
|
+
threats = []
|
330
|
+
|
331
|
+
if rule_name == "brute_force":
|
332
|
+
threats.extend(self._detect_brute_force(events, pattern, time_window))
|
333
|
+
elif rule_name == "privilege_escalation":
|
334
|
+
threats.extend(self._detect_privilege_escalation(events, pattern))
|
335
|
+
elif rule_name == "data_exfiltration":
|
336
|
+
threats.extend(self._detect_data_exfiltration(events, pattern))
|
337
|
+
elif rule_name == "insider_threat":
|
338
|
+
threats.extend(self._detect_insider_threat(events, pattern))
|
339
|
+
elif rule_name == "anomalous_behavior":
|
340
|
+
threats.extend(self._detect_anomalous_behavior(events, pattern))
|
341
|
+
|
342
|
+
return threats
|
343
|
+
|
344
|
+
def _detect_brute_force(
|
345
|
+
self, events: List[Dict[str, Any]], pattern: Dict[str, Any], time_window: int
|
346
|
+
) -> List[Dict[str, Any]]:
|
347
|
+
"""Detect brute force attacks.
|
348
|
+
|
349
|
+
Args:
|
350
|
+
events: Events to analyze
|
351
|
+
pattern: Brute force pattern configuration
|
352
|
+
time_window: Time window in seconds
|
353
|
+
|
354
|
+
Returns:
|
355
|
+
List of brute force threats
|
356
|
+
"""
|
357
|
+
threats = []
|
358
|
+
login_failures = {}
|
359
|
+
|
360
|
+
current_time = datetime.now(UTC)
|
361
|
+
cutoff_time = current_time - timedelta(seconds=time_window)
|
362
|
+
|
363
|
+
# Group failed login attempts by user/IP
|
364
|
+
for event in events:
|
365
|
+
if (
|
366
|
+
event.get("type") == "login"
|
367
|
+
and event.get("failed", False)
|
368
|
+
and event.get("timestamp")
|
369
|
+
):
|
370
|
+
|
371
|
+
event_time = datetime.fromisoformat(
|
372
|
+
event["timestamp"].replace("Z", "+00:00")
|
373
|
+
)
|
374
|
+
if event_time > cutoff_time:
|
375
|
+
key = f"{event.get('user', 'unknown')}:{event.get('ip', 'unknown')}"
|
376
|
+
if key not in login_failures:
|
377
|
+
login_failures[key] = []
|
378
|
+
login_failures[key].append(event)
|
379
|
+
|
380
|
+
# Check for brute force patterns
|
381
|
+
threshold = pattern.get("threshold", 5)
|
382
|
+
for key, failed_attempts in login_failures.items():
|
383
|
+
if len(failed_attempts) >= threshold:
|
384
|
+
user, ip = key.split(":", 1)
|
385
|
+
threats.append(
|
386
|
+
{
|
387
|
+
"id": f"brute_force_{user}_{ip}_{int(current_time.timestamp())}",
|
388
|
+
"type": "brute_force",
|
389
|
+
"severity": pattern["severity"],
|
390
|
+
"user": user,
|
391
|
+
"source_ip": ip,
|
392
|
+
"failed_attempts": len(failed_attempts),
|
393
|
+
"time_window": time_window,
|
394
|
+
"detection_time": current_time.isoformat(),
|
395
|
+
"confidence": min(1.0, len(failed_attempts) / (threshold * 2)),
|
396
|
+
"evidence": failed_attempts[
|
397
|
+
:5
|
398
|
+
], # Include first 5 attempts as evidence
|
399
|
+
}
|
400
|
+
)
|
401
|
+
|
402
|
+
return threats
|
403
|
+
|
404
|
+
def _detect_privilege_escalation(
|
405
|
+
self, events: List[Dict[str, Any]], pattern: Dict[str, Any]
|
406
|
+
) -> List[Dict[str, Any]]:
|
407
|
+
"""Detect privilege escalation attempts.
|
408
|
+
|
409
|
+
Args:
|
410
|
+
events: Events to analyze
|
411
|
+
pattern: Privilege escalation pattern configuration
|
412
|
+
|
413
|
+
Returns:
|
414
|
+
List of privilege escalation threats
|
415
|
+
"""
|
416
|
+
threats = []
|
417
|
+
keywords = pattern.get("keywords", ["sudo", "admin", "root", "privilege"])
|
418
|
+
|
419
|
+
for event in events:
|
420
|
+
# Check for privilege escalation indicators
|
421
|
+
if event.get("type") in ["command", "access", "authentication"]:
|
422
|
+
event_text = json.dumps(event).lower()
|
423
|
+
|
424
|
+
matched_keywords = [kw for kw in keywords if kw in event_text]
|
425
|
+
if matched_keywords and event.get("unauthorized", False):
|
426
|
+
threats.append(
|
427
|
+
{
|
428
|
+
"id": f"priv_esc_{event.get('user', 'unknown')}_{int(datetime.now(UTC).timestamp())}",
|
429
|
+
"type": "privilege_escalation",
|
430
|
+
"severity": pattern["severity"],
|
431
|
+
"user": event.get("user", "unknown"),
|
432
|
+
"source_ip": event.get("ip", "unknown"),
|
433
|
+
"matched_keywords": matched_keywords,
|
434
|
+
"detection_time": datetime.now(UTC).isoformat(),
|
435
|
+
"confidence": min(
|
436
|
+
1.0, len(matched_keywords) / len(keywords)
|
437
|
+
),
|
438
|
+
"evidence": event,
|
439
|
+
}
|
440
|
+
)
|
441
|
+
|
442
|
+
return threats
|
443
|
+
|
444
|
+
def _detect_data_exfiltration(
|
445
|
+
self, events: List[Dict[str, Any]], pattern: Dict[str, Any]
|
446
|
+
) -> List[Dict[str, Any]]:
|
447
|
+
"""Detect data exfiltration attempts.
|
448
|
+
|
449
|
+
Args:
|
450
|
+
events: Events to analyze
|
451
|
+
pattern: Data exfiltration pattern configuration
|
452
|
+
|
453
|
+
Returns:
|
454
|
+
List of data exfiltration threats
|
455
|
+
"""
|
456
|
+
threats = []
|
457
|
+
size_threshold = (
|
458
|
+
pattern.get("size_threshold_mb", 100) * 1024 * 1024
|
459
|
+
) # Convert to bytes
|
460
|
+
|
461
|
+
for event in events:
|
462
|
+
if event.get("type") == "data_transfer":
|
463
|
+
size = event.get("size_bytes", 0)
|
464
|
+
if size > size_threshold:
|
465
|
+
# Check for unusual hours if configured
|
466
|
+
unusual_time = False
|
467
|
+
if pattern.get("unusual_hours", False):
|
468
|
+
event_time = datetime.fromisoformat(
|
469
|
+
event.get(
|
470
|
+
"timestamp", datetime.now(UTC).isoformat()
|
471
|
+
).replace("Z", "+00:00")
|
472
|
+
)
|
473
|
+
hour = event_time.hour
|
474
|
+
unusual_time = hour < 6 or hour > 22 # Outside business hours
|
475
|
+
|
476
|
+
if unusual_time or size > size_threshold * 2: # Very large transfer
|
477
|
+
threats.append(
|
478
|
+
{
|
479
|
+
"id": f"data_exfil_{event.get('user', 'unknown')}_{int(datetime.now(UTC).timestamp())}",
|
480
|
+
"type": "data_exfiltration",
|
481
|
+
"severity": pattern["severity"],
|
482
|
+
"user": event.get("user", "unknown"),
|
483
|
+
"source_ip": event.get("ip", "unknown"),
|
484
|
+
"transfer_size_mb": size / (1024 * 1024),
|
485
|
+
"unusual_hours": unusual_time,
|
486
|
+
"detection_time": datetime.now(UTC).isoformat(),
|
487
|
+
"confidence": min(1.0, size / (size_threshold * 3)),
|
488
|
+
"evidence": event,
|
489
|
+
}
|
490
|
+
)
|
491
|
+
|
492
|
+
return threats
|
493
|
+
|
494
|
+
def _detect_insider_threat(
|
495
|
+
self, events: List[Dict[str, Any]], pattern: Dict[str, Any]
|
496
|
+
) -> List[Dict[str, Any]]:
|
497
|
+
"""Detect insider threat indicators.
|
498
|
+
|
499
|
+
Args:
|
500
|
+
events: Events to analyze
|
501
|
+
pattern: Insider threat pattern configuration
|
502
|
+
|
503
|
+
Returns:
|
504
|
+
List of insider threat detections
|
505
|
+
"""
|
506
|
+
threats = []
|
507
|
+
|
508
|
+
# Simple heuristic-based insider threat detection
|
509
|
+
user_behaviors = {}
|
510
|
+
|
511
|
+
for event in events:
|
512
|
+
user = event.get("user", "unknown")
|
513
|
+
if user not in user_behaviors:
|
514
|
+
user_behaviors[user] = {
|
515
|
+
"access_patterns": [],
|
516
|
+
"data_access": [],
|
517
|
+
"time_patterns": [],
|
518
|
+
"unusual_activities": 0,
|
519
|
+
}
|
520
|
+
|
521
|
+
# Track unusual activities
|
522
|
+
if event.get("unusual", False) or event.get("anomalous", False):
|
523
|
+
user_behaviors[user]["unusual_activities"] += 1
|
524
|
+
|
525
|
+
# Track access patterns
|
526
|
+
if event.get("type") == "access":
|
527
|
+
user_behaviors[user]["access_patterns"].append(event.get("resource"))
|
528
|
+
|
529
|
+
# Track data access
|
530
|
+
if event.get("type") == "data_access":
|
531
|
+
user_behaviors[user]["data_access"].append(event.get("data_type"))
|
532
|
+
|
533
|
+
# Analyze for insider threat indicators
|
534
|
+
for user, behavior in user_behaviors.items():
|
535
|
+
risk_score = 0
|
536
|
+
indicators = []
|
537
|
+
|
538
|
+
# High unusual activity count
|
539
|
+
if behavior["unusual_activities"] > 5:
|
540
|
+
risk_score += 0.4
|
541
|
+
indicators.append("high_unusual_activity")
|
542
|
+
|
543
|
+
# Diverse data access (potential data gathering)
|
544
|
+
if len(set(behavior["data_access"])) > 10:
|
545
|
+
risk_score += 0.3
|
546
|
+
indicators.append("diverse_data_access")
|
547
|
+
|
548
|
+
# Unusual resource access patterns
|
549
|
+
if len(set(behavior["access_patterns"])) > 20:
|
550
|
+
risk_score += 0.3
|
551
|
+
indicators.append("broad_resource_access")
|
552
|
+
|
553
|
+
if risk_score > pattern.get("deviation_threshold", 0.8):
|
554
|
+
threats.append(
|
555
|
+
{
|
556
|
+
"id": f"insider_threat_{user}_{int(datetime.now(UTC).timestamp())}",
|
557
|
+
"type": "insider_threat",
|
558
|
+
"severity": pattern["severity"],
|
559
|
+
"user": user,
|
560
|
+
"risk_score": risk_score,
|
561
|
+
"indicators": indicators,
|
562
|
+
"detection_time": datetime.now(UTC).isoformat(),
|
563
|
+
"confidence": min(1.0, risk_score),
|
564
|
+
"evidence": {
|
565
|
+
"unusual_activities": behavior["unusual_activities"],
|
566
|
+
"unique_data_types": len(set(behavior["data_access"])),
|
567
|
+
"unique_resources": len(set(behavior["access_patterns"])),
|
568
|
+
},
|
569
|
+
}
|
570
|
+
)
|
571
|
+
|
572
|
+
return threats
|
573
|
+
|
574
|
+
def _detect_anomalous_behavior(
|
575
|
+
self, events: List[Dict[str, Any]], pattern: Dict[str, Any]
|
576
|
+
) -> List[Dict[str, Any]]:
|
577
|
+
"""Detect statistical anomalies in behavior.
|
578
|
+
|
579
|
+
Args:
|
580
|
+
events: Events to analyze
|
581
|
+
pattern: Anomalous behavior pattern configuration
|
582
|
+
|
583
|
+
Returns:
|
584
|
+
List of anomalous behavior detections
|
585
|
+
"""
|
586
|
+
threats = []
|
587
|
+
|
588
|
+
# Simple statistical anomaly detection
|
589
|
+
event_counts = {}
|
590
|
+
time_patterns = {}
|
591
|
+
|
592
|
+
for event in events:
|
593
|
+
event_type = event.get("type", "unknown")
|
594
|
+
user = event.get("user", "unknown")
|
595
|
+
|
596
|
+
# Count events by type
|
597
|
+
if event_type not in event_counts:
|
598
|
+
event_counts[event_type] = 0
|
599
|
+
event_counts[event_type] += 1
|
600
|
+
|
601
|
+
# Track time patterns
|
602
|
+
if event.get("timestamp"):
|
603
|
+
try:
|
604
|
+
event_time = datetime.fromisoformat(
|
605
|
+
event["timestamp"].replace("Z", "+00:00")
|
606
|
+
)
|
607
|
+
hour = event_time.hour
|
608
|
+
time_key = f"{user}:{hour}"
|
609
|
+
if time_key not in time_patterns:
|
610
|
+
time_patterns[time_key] = 0
|
611
|
+
time_patterns[time_key] += 1
|
612
|
+
except:
|
613
|
+
pass
|
614
|
+
|
615
|
+
# Detect anomalies
|
616
|
+
confidence_threshold = pattern.get("confidence_threshold", 0.9)
|
617
|
+
|
618
|
+
# Check for unusual event frequency
|
619
|
+
if event_counts:
|
620
|
+
avg_count = sum(event_counts.values()) / len(event_counts)
|
621
|
+
for event_type, count in event_counts.items():
|
622
|
+
if count > avg_count * 3: # 3x above average
|
623
|
+
confidence = min(1.0, count / (avg_count * 5))
|
624
|
+
if confidence >= confidence_threshold:
|
625
|
+
threats.append(
|
626
|
+
{
|
627
|
+
"id": f"anomaly_{event_type}_{int(datetime.now(UTC).timestamp())}",
|
628
|
+
"type": "anomalous_behavior",
|
629
|
+
"subtype": "unusual_frequency",
|
630
|
+
"severity": pattern["severity"],
|
631
|
+
"event_type": event_type,
|
632
|
+
"frequency": count,
|
633
|
+
"average_frequency": avg_count,
|
634
|
+
"detection_time": datetime.now(UTC).isoformat(),
|
635
|
+
"confidence": confidence,
|
636
|
+
"evidence": {"event_counts": event_counts},
|
637
|
+
}
|
638
|
+
)
|
639
|
+
|
640
|
+
return threats
|
641
|
+
|
642
|
+
def _detect_ai_threats(
|
643
|
+
self, events: List[Dict[str, Any]], context: Dict[str, Any]
|
644
|
+
) -> List[Dict[str, Any]]:
|
645
|
+
"""Use AI to detect complex threat patterns.
|
646
|
+
|
647
|
+
Args:
|
648
|
+
events: Events to analyze
|
649
|
+
context: Additional context for analysis
|
650
|
+
|
651
|
+
Returns:
|
652
|
+
List of AI-detected threats
|
653
|
+
"""
|
654
|
+
threats = []
|
655
|
+
|
656
|
+
try:
|
657
|
+
# Prepare events for AI analysis
|
658
|
+
event_summary = self._prepare_events_for_ai(events)
|
659
|
+
|
660
|
+
# Create AI analysis prompt
|
661
|
+
prompt = self._create_ai_analysis_prompt(event_summary, context)
|
662
|
+
|
663
|
+
# Run AI analysis
|
664
|
+
ai_response = self.ai_agent.run(
|
665
|
+
provider="ollama",
|
666
|
+
model=self.ai_model.replace("ollama:", ""),
|
667
|
+
messages=[{"role": "user", "content": prompt}],
|
668
|
+
)
|
669
|
+
|
670
|
+
# Parse AI response for threats
|
671
|
+
ai_threats = self._parse_ai_response(ai_response)
|
672
|
+
threats.extend(ai_threats)
|
673
|
+
|
674
|
+
except Exception as e:
|
675
|
+
self.log_with_context("WARNING", f"AI threat detection failed: {e}")
|
676
|
+
|
677
|
+
return threats
|
678
|
+
|
679
|
+
def _prepare_events_for_ai(self, events: List[Dict[str, Any]]) -> str:
|
680
|
+
"""Prepare events for AI analysis.
|
681
|
+
|
682
|
+
Args:
|
683
|
+
events: Raw events
|
684
|
+
|
685
|
+
Returns:
|
686
|
+
Formatted event summary for AI analysis
|
687
|
+
"""
|
688
|
+
# Limit events for AI analysis (performance)
|
689
|
+
sample_events = events[:50] if len(events) > 50 else events
|
690
|
+
|
691
|
+
# Create summary
|
692
|
+
summary = {
|
693
|
+
"total_events": len(events),
|
694
|
+
"event_types": list(set(event.get("type", "unknown") for event in events)),
|
695
|
+
"unique_users": list(set(event.get("user", "unknown") for event in events)),
|
696
|
+
"unique_ips": list(set(event.get("ip", "unknown") for event in events)),
|
697
|
+
"time_range": {
|
698
|
+
"start": min(
|
699
|
+
event.get("timestamp", "")
|
700
|
+
for event in events
|
701
|
+
if event.get("timestamp")
|
702
|
+
),
|
703
|
+
"end": max(
|
704
|
+
event.get("timestamp", "")
|
705
|
+
for event in events
|
706
|
+
if event.get("timestamp")
|
707
|
+
),
|
708
|
+
},
|
709
|
+
"sample_events": sample_events,
|
710
|
+
}
|
711
|
+
|
712
|
+
return json.dumps(summary, indent=2)
|
713
|
+
|
714
|
+
def _create_ai_analysis_prompt(
|
715
|
+
self, event_summary: str, context: Dict[str, Any]
|
716
|
+
) -> str:
|
717
|
+
"""Create prompt for AI threat analysis.
|
718
|
+
|
719
|
+
Args:
|
720
|
+
event_summary: Formatted event summary
|
721
|
+
context: Additional context
|
722
|
+
|
723
|
+
Returns:
|
724
|
+
AI analysis prompt
|
725
|
+
"""
|
726
|
+
prompt = f"""
|
727
|
+
You are a cybersecurity expert analyzing security events for potential threats.
|
728
|
+
|
729
|
+
CONTEXT:
|
730
|
+
{json.dumps(context, indent=2) if context else "No additional context provided"}
|
731
|
+
|
732
|
+
EVENTS TO ANALYZE:
|
733
|
+
{event_summary}
|
734
|
+
|
735
|
+
TASK:
|
736
|
+
Analyze these events for security threats that may not be caught by simple rules.
|
737
|
+
Look for:
|
738
|
+
1. Complex attack patterns
|
739
|
+
2. Coordinated activities
|
740
|
+
3. Subtle indicators of compromise
|
741
|
+
4. Advanced persistent threats
|
742
|
+
5. Social engineering attempts
|
743
|
+
|
744
|
+
RESPONSE FORMAT:
|
745
|
+
Return a JSON array of threat objects with this structure:
|
746
|
+
[
|
747
|
+
{{
|
748
|
+
"id": "unique_threat_id",
|
749
|
+
"type": "threat_type",
|
750
|
+
"severity": "low|medium|high|critical",
|
751
|
+
"description": "detailed threat description",
|
752
|
+
"confidence": 0.0-1.0,
|
753
|
+
"indicators": ["indicator1", "indicator2"],
|
754
|
+
"evidence": {{"key": "value"}},
|
755
|
+
"recommended_actions": ["action1", "action2"]
|
756
|
+
}}
|
757
|
+
]
|
758
|
+
|
759
|
+
If no threats are detected, return an empty array: []
|
760
|
+
"""
|
761
|
+
return prompt
|
762
|
+
|
763
|
+
def _parse_ai_response(self, ai_response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
764
|
+
"""Parse AI response for detected threats.
|
765
|
+
|
766
|
+
Args:
|
767
|
+
ai_response: Response from AI agent
|
768
|
+
|
769
|
+
Returns:
|
770
|
+
List of parsed threats
|
771
|
+
"""
|
772
|
+
threats = []
|
773
|
+
|
774
|
+
try:
|
775
|
+
# Extract content from AI response
|
776
|
+
content = ai_response.get("result", {}).get("content", "")
|
777
|
+
if not content:
|
778
|
+
return threats
|
779
|
+
|
780
|
+
# Try to parse JSON response
|
781
|
+
import re
|
782
|
+
|
783
|
+
json_match = re.search(r"\[.*\]", content, re.DOTALL)
|
784
|
+
if json_match:
|
785
|
+
threats_data = json.loads(json_match.group())
|
786
|
+
|
787
|
+
for threat_data in threats_data:
|
788
|
+
# Add AI detection metadata
|
789
|
+
threat_data["detection_method"] = "ai_analysis"
|
790
|
+
threat_data["detection_time"] = datetime.now(UTC).isoformat()
|
791
|
+
|
792
|
+
# Ensure required fields
|
793
|
+
if not threat_data.get("id"):
|
794
|
+
threat_data["id"] = (
|
795
|
+
f"ai_threat_{int(datetime.now(UTC).timestamp())}"
|
796
|
+
)
|
797
|
+
|
798
|
+
threats.append(threat_data)
|
799
|
+
|
800
|
+
except Exception as e:
|
801
|
+
self.log_with_context("WARNING", f"Failed to parse AI response: {e}")
|
802
|
+
|
803
|
+
return threats
|
804
|
+
|
805
|
+
def _correlate_threats(
|
806
|
+
self, threats: List[Dict[str, Any]], events: List[Dict[str, Any]]
|
807
|
+
) -> List[Dict[str, Any]]:
|
808
|
+
"""Correlate threats across different detection methods.
|
809
|
+
|
810
|
+
Args:
|
811
|
+
threats: Detected threats
|
812
|
+
events: Original events
|
813
|
+
|
814
|
+
Returns:
|
815
|
+
List of correlated threat patterns
|
816
|
+
"""
|
817
|
+
correlated = []
|
818
|
+
|
819
|
+
# Group threats by user/IP for correlation
|
820
|
+
threat_groups = {}
|
821
|
+
|
822
|
+
for threat in threats:
|
823
|
+
key_parts = []
|
824
|
+
if threat.get("user"):
|
825
|
+
key_parts.append(f"user:{threat['user']}")
|
826
|
+
if threat.get("source_ip"):
|
827
|
+
key_parts.append(f"ip:{threat['source_ip']}")
|
828
|
+
|
829
|
+
if key_parts:
|
830
|
+
key = "|".join(key_parts)
|
831
|
+
if key not in threat_groups:
|
832
|
+
threat_groups[key] = []
|
833
|
+
threat_groups[key].append(threat)
|
834
|
+
|
835
|
+
# Look for correlated patterns
|
836
|
+
for key, group_threats in threat_groups.items():
|
837
|
+
if len(group_threats) > 1:
|
838
|
+
# Multiple threats from same user/IP - potential coordinated attack
|
839
|
+
correlated.append(
|
840
|
+
{
|
841
|
+
"id": f"correlated_{key.replace(':', '_').replace('|', '_')}_{int(datetime.now(UTC).timestamp())}",
|
842
|
+
"type": "correlated_attack",
|
843
|
+
"severity": "high",
|
844
|
+
"description": f"Multiple threat types detected from {key}",
|
845
|
+
"related_threats": [t["id"] for t in group_threats],
|
846
|
+
"threat_types": list(set(t["type"] for t in group_threats)),
|
847
|
+
"correlation_score": min(1.0, len(group_threats) / 3),
|
848
|
+
"detection_time": datetime.now(UTC).isoformat(),
|
849
|
+
}
|
850
|
+
)
|
851
|
+
|
852
|
+
return correlated
|
853
|
+
|
854
|
+
def _should_trigger_response(self, threat: Dict[str, Any]) -> bool:
|
855
|
+
"""Determine if threat should trigger automated response.
|
856
|
+
|
857
|
+
Args:
|
858
|
+
threat: Threat to evaluate
|
859
|
+
|
860
|
+
Returns:
|
861
|
+
True if response should be triggered
|
862
|
+
"""
|
863
|
+
severity = threat.get("severity", "low")
|
864
|
+
confidence = threat.get("confidence", 0.0)
|
865
|
+
|
866
|
+
# Response thresholds
|
867
|
+
thresholds = {"critical": 0.7, "high": 0.8, "medium": 0.9, "low": 0.95}
|
868
|
+
|
869
|
+
required_confidence = thresholds.get(severity, 0.95)
|
870
|
+
|
871
|
+
# Check if severity meets minimum threshold
|
872
|
+
severity_levels = {"low": 1, "medium": 2, "high": 3, "critical": 4}
|
873
|
+
min_severity = severity_levels.get(self.severity_threshold, 2)
|
874
|
+
threat_severity = severity_levels.get(severity, 1)
|
875
|
+
|
876
|
+
return threat_severity >= min_severity and confidence >= required_confidence
|
877
|
+
|
878
|
+
def _execute_response_actions(self, threat: Dict[str, Any]) -> List[str]:
|
879
|
+
"""Execute automated response actions for a threat.
|
880
|
+
|
881
|
+
Args:
|
882
|
+
threat: Threat that triggered response
|
883
|
+
|
884
|
+
Returns:
|
885
|
+
List of actions taken
|
886
|
+
"""
|
887
|
+
actions_taken = []
|
888
|
+
|
889
|
+
for action in self.response_actions:
|
890
|
+
try:
|
891
|
+
if action in self.response_handlers:
|
892
|
+
self.response_handlers[action](threat)
|
893
|
+
actions_taken.append(action)
|
894
|
+
self.log_with_context(
|
895
|
+
"INFO",
|
896
|
+
f"Executed response action: {action}",
|
897
|
+
threat_id=threat["id"],
|
898
|
+
)
|
899
|
+
else:
|
900
|
+
self.log_with_context(
|
901
|
+
"WARNING", f"Unknown response action: {action}"
|
902
|
+
)
|
903
|
+
except Exception as e:
|
904
|
+
self.log_with_context(
|
905
|
+
"ERROR", f"Failed to execute response action {action}: {e}"
|
906
|
+
)
|
907
|
+
|
908
|
+
return actions_taken
|
909
|
+
|
910
|
+
def _send_alert(self, threat: Dict[str, Any]) -> None:
|
911
|
+
"""Send threat alert.
|
912
|
+
|
913
|
+
Args:
|
914
|
+
threat: Threat information
|
915
|
+
"""
|
916
|
+
# Create security event for the alert
|
917
|
+
alert_event = {
|
918
|
+
"event_type": "threat_alert",
|
919
|
+
"severity": threat.get("severity", "medium"),
|
920
|
+
"description": f"Threat detected: {threat.get('type', 'unknown')}",
|
921
|
+
"metadata": threat,
|
922
|
+
"user_id": threat.get("user", "system"),
|
923
|
+
"source_ip": threat.get("source_ip", "unknown"),
|
924
|
+
}
|
925
|
+
|
926
|
+
self.security_event_node.run(**alert_event)
|
927
|
+
|
928
|
+
def _block_ip(self, threat: Dict[str, Any]) -> None:
|
929
|
+
"""Block IP address associated with threat.
|
930
|
+
|
931
|
+
Args:
|
932
|
+
threat: Threat information
|
933
|
+
"""
|
934
|
+
ip = threat.get("source_ip")
|
935
|
+
if ip and ip != "unknown":
|
936
|
+
# Log the IP blocking action
|
937
|
+
self.log_with_context(
|
938
|
+
"INFO", f"Would block IP: {ip} (threat: {threat['id']})"
|
939
|
+
)
|
940
|
+
|
941
|
+
# In a real implementation, this would interface with firewall/network controls
|
942
|
+
# For now, just log the action
|
943
|
+
block_event = {
|
944
|
+
"event_type": "ip_blocked",
|
945
|
+
"severity": "high",
|
946
|
+
"description": f"IP {ip} blocked due to threat {threat['id']}",
|
947
|
+
"metadata": {"blocked_ip": ip, "threat_id": threat["id"]},
|
948
|
+
"user_id": "system",
|
949
|
+
"source_ip": ip,
|
950
|
+
}
|
951
|
+
|
952
|
+
self.security_event_node.run(**block_event)
|
953
|
+
|
954
|
+
def _lock_account(self, threat: Dict[str, Any]) -> None:
|
955
|
+
"""Lock user account associated with threat.
|
956
|
+
|
957
|
+
Args:
|
958
|
+
threat: Threat information
|
959
|
+
"""
|
960
|
+
user = threat.get("user")
|
961
|
+
if user and user != "unknown":
|
962
|
+
# Log the account locking action
|
963
|
+
self.log_with_context(
|
964
|
+
"INFO", f"Would lock account: {user} (threat: {threat['id']})"
|
965
|
+
)
|
966
|
+
|
967
|
+
# In a real implementation, this would interface with user management system
|
968
|
+
lock_event = {
|
969
|
+
"event_type": "account_locked",
|
970
|
+
"severity": "high",
|
971
|
+
"description": f"Account {user} locked due to threat {threat['id']}",
|
972
|
+
"metadata": {"locked_user": user, "threat_id": threat["id"]},
|
973
|
+
"user_id": user,
|
974
|
+
"source_ip": threat.get("source_ip", "unknown"),
|
975
|
+
}
|
976
|
+
|
977
|
+
self.security_event_node.run(**lock_event)
|
978
|
+
|
979
|
+
def _quarantine_resource(self, threat: Dict[str, Any]) -> None:
|
980
|
+
"""Quarantine resource associated with threat.
|
981
|
+
|
982
|
+
Args:
|
983
|
+
threat: Threat information
|
984
|
+
"""
|
985
|
+
# Log the quarantine action
|
986
|
+
self.log_with_context(
|
987
|
+
"INFO", f"Would quarantine resource for threat: {threat['id']}"
|
988
|
+
)
|
989
|
+
|
990
|
+
quarantine_event = {
|
991
|
+
"event_type": "resource_quarantined",
|
992
|
+
"severity": "medium",
|
993
|
+
"description": f"Resource quarantined due to threat {threat['id']}",
|
994
|
+
"metadata": {"threat_id": threat["id"]},
|
995
|
+
"user_id": threat.get("user", "system"),
|
996
|
+
"source_ip": threat.get("source_ip", "unknown"),
|
997
|
+
}
|
998
|
+
|
999
|
+
self.security_event_node.run(**quarantine_event)
|
1000
|
+
|
1001
|
+
def _log_threat(self, threat: Dict[str, Any]) -> None:
|
1002
|
+
"""Log threat to audit trail.
|
1003
|
+
|
1004
|
+
Args:
|
1005
|
+
threat: Threat information
|
1006
|
+
"""
|
1007
|
+
# Create audit log entry
|
1008
|
+
log_entry = {
|
1009
|
+
"action": "threat_detected",
|
1010
|
+
"user_id": threat.get("user", "system"),
|
1011
|
+
"resource_type": "security_event",
|
1012
|
+
"resource_id": threat["id"],
|
1013
|
+
"metadata": threat,
|
1014
|
+
"ip_address": threat.get("source_ip", "unknown"),
|
1015
|
+
}
|
1016
|
+
|
1017
|
+
self.audit_log_node.run(**log_entry)
|
1018
|
+
|
1019
|
+
def _update_stats(
|
1020
|
+
self, events_processed: int, threats_detected: int, processing_time_ms: float
|
1021
|
+
) -> None:
|
1022
|
+
"""Update detection statistics.
|
1023
|
+
|
1024
|
+
Args:
|
1025
|
+
events_processed: Number of events processed
|
1026
|
+
threats_detected: Number of threats detected
|
1027
|
+
processing_time_ms: Processing time in milliseconds
|
1028
|
+
"""
|
1029
|
+
self.detection_stats["total_events_processed"] += events_processed
|
1030
|
+
self.detection_stats["threats_detected"] += threats_detected
|
1031
|
+
self.detection_stats["last_detection"] = datetime.now(UTC).isoformat()
|
1032
|
+
|
1033
|
+
# Update average detection time
|
1034
|
+
if self.detection_stats["avg_detection_time_ms"] == 0:
|
1035
|
+
self.detection_stats["avg_detection_time_ms"] = processing_time_ms
|
1036
|
+
else:
|
1037
|
+
# Simple moving average
|
1038
|
+
self.detection_stats["avg_detection_time_ms"] = (
|
1039
|
+
self.detection_stats["avg_detection_time_ms"] * 0.9
|
1040
|
+
+ processing_time_ms * 0.1
|
1041
|
+
)
|
1042
|
+
|
1043
|
+
def analyze_patterns(
|
1044
|
+
self, time_window: timedelta = timedelta(hours=24)
|
1045
|
+
) -> Dict[str, Any]:
|
1046
|
+
"""Analyze historical patterns for threat intelligence.
|
1047
|
+
|
1048
|
+
Args:
|
1049
|
+
time_window: Time window for pattern analysis
|
1050
|
+
|
1051
|
+
Returns:
|
1052
|
+
Dictionary with pattern analysis results
|
1053
|
+
"""
|
1054
|
+
return {
|
1055
|
+
"analysis_type": "historical_patterns",
|
1056
|
+
"time_window": str(time_window),
|
1057
|
+
"pattern_summary": "Historical pattern analysis would be implemented here",
|
1058
|
+
"stats": self.detection_stats,
|
1059
|
+
"recommendations": [
|
1060
|
+
"Implement pattern baseline learning",
|
1061
|
+
"Add threat feed integration",
|
1062
|
+
"Enable machine learning models",
|
1063
|
+
],
|
1064
|
+
}
|
1065
|
+
|
1066
|
+
def auto_respond(self, threat: SecurityEvent) -> List[str]:
|
1067
|
+
"""Execute automated response actions for a threat.
|
1068
|
+
|
1069
|
+
Args:
|
1070
|
+
threat: Security event representing the threat
|
1071
|
+
|
1072
|
+
Returns:
|
1073
|
+
List of response actions taken
|
1074
|
+
"""
|
1075
|
+
threat_dict = {
|
1076
|
+
"id": getattr(threat, "correlation_id", None)
|
1077
|
+
or f"event_{int(datetime.now(UTC).timestamp())}",
|
1078
|
+
"type": threat.event_type,
|
1079
|
+
"severity": threat.severity,
|
1080
|
+
"user": threat.user_id,
|
1081
|
+
"source_ip": threat.ip_address,
|
1082
|
+
}
|
1083
|
+
|
1084
|
+
return self._execute_response_actions(threat_dict)
|
1085
|
+
|
1086
|
+
def get_detection_stats(self) -> Dict[str, Any]:
|
1087
|
+
"""Get current detection statistics.
|
1088
|
+
|
1089
|
+
Returns:
|
1090
|
+
Dictionary with detection statistics
|
1091
|
+
"""
|
1092
|
+
return {
|
1093
|
+
**self.detection_stats,
|
1094
|
+
"rules_enabled": self.detection_rules,
|
1095
|
+
"response_actions": self.response_actions,
|
1096
|
+
"ai_model": self.ai_model,
|
1097
|
+
"real_time_enabled": self.real_time,
|
1098
|
+
"performance_target_ms": self.response_time_target_ms,
|
1099
|
+
}
|
1100
|
+
|
1101
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
1102
|
+
"""Async execution method for enterprise integration."""
|
1103
|
+
return self.run(**kwargs)
|