kailash 0.3.2__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. kailash/__init__.py +33 -1
  2. kailash/access_control/__init__.py +129 -0
  3. kailash/access_control/managers.py +461 -0
  4. kailash/access_control/rule_evaluators.py +467 -0
  5. kailash/access_control_abac.py +825 -0
  6. kailash/config/__init__.py +27 -0
  7. kailash/config/database_config.py +359 -0
  8. kailash/database/__init__.py +28 -0
  9. kailash/database/execution_pipeline.py +499 -0
  10. kailash/middleware/__init__.py +306 -0
  11. kailash/middleware/auth/__init__.py +33 -0
  12. kailash/middleware/auth/access_control.py +436 -0
  13. kailash/middleware/auth/auth_manager.py +422 -0
  14. kailash/middleware/auth/jwt_auth.py +477 -0
  15. kailash/middleware/auth/kailash_jwt_auth.py +616 -0
  16. kailash/middleware/communication/__init__.py +37 -0
  17. kailash/middleware/communication/ai_chat.py +989 -0
  18. kailash/middleware/communication/api_gateway.py +802 -0
  19. kailash/middleware/communication/events.py +470 -0
  20. kailash/middleware/communication/realtime.py +710 -0
  21. kailash/middleware/core/__init__.py +21 -0
  22. kailash/middleware/core/agent_ui.py +890 -0
  23. kailash/middleware/core/schema.py +643 -0
  24. kailash/middleware/core/workflows.py +396 -0
  25. kailash/middleware/database/__init__.py +63 -0
  26. kailash/middleware/database/base.py +113 -0
  27. kailash/middleware/database/base_models.py +525 -0
  28. kailash/middleware/database/enums.py +106 -0
  29. kailash/middleware/database/migrations.py +12 -0
  30. kailash/{api/database.py → middleware/database/models.py} +183 -291
  31. kailash/middleware/database/repositories.py +685 -0
  32. kailash/middleware/database/session_manager.py +19 -0
  33. kailash/middleware/mcp/__init__.py +38 -0
  34. kailash/middleware/mcp/client_integration.py +585 -0
  35. kailash/middleware/mcp/enhanced_server.py +576 -0
  36. kailash/nodes/__init__.py +27 -3
  37. kailash/nodes/admin/__init__.py +42 -0
  38. kailash/nodes/admin/audit_log.py +794 -0
  39. kailash/nodes/admin/permission_check.py +864 -0
  40. kailash/nodes/admin/role_management.py +823 -0
  41. kailash/nodes/admin/security_event.py +1523 -0
  42. kailash/nodes/admin/user_management.py +944 -0
  43. kailash/nodes/ai/a2a.py +24 -7
  44. kailash/nodes/ai/ai_providers.py +248 -40
  45. kailash/nodes/ai/embedding_generator.py +11 -11
  46. kailash/nodes/ai/intelligent_agent_orchestrator.py +99 -11
  47. kailash/nodes/ai/llm_agent.py +436 -5
  48. kailash/nodes/ai/self_organizing.py +85 -10
  49. kailash/nodes/ai/vision_utils.py +148 -0
  50. kailash/nodes/alerts/__init__.py +26 -0
  51. kailash/nodes/alerts/base.py +234 -0
  52. kailash/nodes/alerts/discord.py +499 -0
  53. kailash/nodes/api/auth.py +287 -6
  54. kailash/nodes/api/rest.py +151 -0
  55. kailash/nodes/auth/__init__.py +17 -0
  56. kailash/nodes/auth/directory_integration.py +1228 -0
  57. kailash/nodes/auth/enterprise_auth_provider.py +1328 -0
  58. kailash/nodes/auth/mfa.py +2338 -0
  59. kailash/nodes/auth/risk_assessment.py +872 -0
  60. kailash/nodes/auth/session_management.py +1093 -0
  61. kailash/nodes/auth/sso.py +1040 -0
  62. kailash/nodes/base.py +344 -13
  63. kailash/nodes/base_cycle_aware.py +4 -2
  64. kailash/nodes/base_with_acl.py +1 -1
  65. kailash/nodes/code/python.py +283 -10
  66. kailash/nodes/compliance/__init__.py +9 -0
  67. kailash/nodes/compliance/data_retention.py +1888 -0
  68. kailash/nodes/compliance/gdpr.py +2004 -0
  69. kailash/nodes/data/__init__.py +22 -2
  70. kailash/nodes/data/async_connection.py +469 -0
  71. kailash/nodes/data/async_sql.py +757 -0
  72. kailash/nodes/data/async_vector.py +598 -0
  73. kailash/nodes/data/readers.py +767 -0
  74. kailash/nodes/data/retrieval.py +360 -1
  75. kailash/nodes/data/sharepoint_graph.py +397 -21
  76. kailash/nodes/data/sql.py +94 -5
  77. kailash/nodes/data/streaming.py +68 -8
  78. kailash/nodes/data/vector_db.py +54 -4
  79. kailash/nodes/enterprise/__init__.py +13 -0
  80. kailash/nodes/enterprise/batch_processor.py +741 -0
  81. kailash/nodes/enterprise/data_lineage.py +497 -0
  82. kailash/nodes/logic/convergence.py +31 -9
  83. kailash/nodes/logic/operations.py +14 -3
  84. kailash/nodes/mixins/__init__.py +8 -0
  85. kailash/nodes/mixins/event_emitter.py +201 -0
  86. kailash/nodes/mixins/mcp.py +9 -4
  87. kailash/nodes/mixins/security.py +165 -0
  88. kailash/nodes/monitoring/__init__.py +7 -0
  89. kailash/nodes/monitoring/performance_benchmark.py +2497 -0
  90. kailash/nodes/rag/__init__.py +284 -0
  91. kailash/nodes/rag/advanced.py +1615 -0
  92. kailash/nodes/rag/agentic.py +773 -0
  93. kailash/nodes/rag/conversational.py +999 -0
  94. kailash/nodes/rag/evaluation.py +875 -0
  95. kailash/nodes/rag/federated.py +1188 -0
  96. kailash/nodes/rag/graph.py +721 -0
  97. kailash/nodes/rag/multimodal.py +671 -0
  98. kailash/nodes/rag/optimized.py +933 -0
  99. kailash/nodes/rag/privacy.py +1059 -0
  100. kailash/nodes/rag/query_processing.py +1335 -0
  101. kailash/nodes/rag/realtime.py +764 -0
  102. kailash/nodes/rag/registry.py +547 -0
  103. kailash/nodes/rag/router.py +837 -0
  104. kailash/nodes/rag/similarity.py +1854 -0
  105. kailash/nodes/rag/strategies.py +566 -0
  106. kailash/nodes/rag/workflows.py +575 -0
  107. kailash/nodes/security/__init__.py +19 -0
  108. kailash/nodes/security/abac_evaluator.py +1411 -0
  109. kailash/nodes/security/audit_log.py +103 -0
  110. kailash/nodes/security/behavior_analysis.py +1893 -0
  111. kailash/nodes/security/credential_manager.py +401 -0
  112. kailash/nodes/security/rotating_credentials.py +760 -0
  113. kailash/nodes/security/security_event.py +133 -0
  114. kailash/nodes/security/threat_detection.py +1103 -0
  115. kailash/nodes/testing/__init__.py +9 -0
  116. kailash/nodes/testing/credential_testing.py +499 -0
  117. kailash/nodes/transform/__init__.py +10 -2
  118. kailash/nodes/transform/chunkers.py +592 -1
  119. kailash/nodes/transform/processors.py +484 -14
  120. kailash/nodes/validation.py +321 -0
  121. kailash/runtime/access_controlled.py +1 -1
  122. kailash/runtime/async_local.py +41 -7
  123. kailash/runtime/docker.py +1 -1
  124. kailash/runtime/local.py +474 -55
  125. kailash/runtime/parallel.py +1 -1
  126. kailash/runtime/parallel_cyclic.py +1 -1
  127. kailash/runtime/testing.py +210 -2
  128. kailash/security.py +1 -1
  129. kailash/utils/migrations/__init__.py +25 -0
  130. kailash/utils/migrations/generator.py +433 -0
  131. kailash/utils/migrations/models.py +231 -0
  132. kailash/utils/migrations/runner.py +489 -0
  133. kailash/utils/secure_logging.py +342 -0
  134. kailash/workflow/__init__.py +16 -0
  135. kailash/workflow/cyclic_runner.py +3 -4
  136. kailash/workflow/graph.py +70 -2
  137. kailash/workflow/resilience.py +249 -0
  138. kailash/workflow/templates.py +726 -0
  139. {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/METADATA +256 -20
  140. kailash-0.4.1.dist-info/RECORD +227 -0
  141. kailash/api/__init__.py +0 -17
  142. kailash/api/__main__.py +0 -6
  143. kailash/api/studio_secure.py +0 -893
  144. kailash/mcp/__main__.py +0 -13
  145. kailash/mcp/server_new.py +0 -336
  146. kailash/mcp/servers/__init__.py +0 -12
  147. kailash-0.3.2.dist-info/RECORD +0 -136
  148. {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/WHEEL +0 -0
  149. {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/entry_points.txt +0 -0
  150. {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/licenses/LICENSE +0 -0
  151. {kailash-0.3.2.dist-info → kailash-0.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1523 @@
1
+ """Enterprise security event monitoring node for threat detection and response.
2
+
3
+ This node provides specialized security event processing, threat detection,
4
+ and automated response capabilities. Built for enterprise security operations
5
+ centers (SOCs) with real-time monitoring, alerting, and integration with
6
+ external security systems.
7
+
8
+ Features:
9
+ - Real-time security event processing
10
+ - Threat detection with ML-based analytics
11
+ - Automated incident response workflows
12
+ - Integration with SIEM and SOAR systems
13
+ - Risk scoring and escalation
14
+ - Security metrics and dashboards
15
+ - Compliance violation detection
16
+ - Forensic data collection
17
+ """
18
+
19
+ import hashlib
20
+ import json
21
+ from dataclasses import dataclass
22
+ from datetime import UTC, datetime, timedelta
23
+ from enum import Enum
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ from kailash.access_control import UserContext
27
+ from kailash.nodes.admin.audit_log import (
28
+ AuditEventType,
29
+ AuditSeverity,
30
+ EnterpriseAuditLogNode,
31
+ )
32
+ from kailash.nodes.base import Node, NodeParameter, register_node
33
+ from kailash.nodes.data import AsyncSQLDatabaseNode
34
+ from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
35
+
36
+
37
+ class SecurityEventType(Enum):
38
+ """Types of security events."""
39
+
40
+ SUSPICIOUS_LOGIN = "suspicious_login"
41
+ MULTIPLE_FAILED_LOGINS = "multiple_failed_logins"
42
+ PRIVILEGE_ESCALATION = "privilege_escalation"
43
+ UNAUTHORIZED_ACCESS_ATTEMPT = "unauthorized_access_attempt"
44
+ DATA_EXFILTRATION = "data_exfiltration"
45
+ UNUSUAL_DATA_ACCESS = "unusual_data_access"
46
+ BRUTE_FORCE_ATTACK = "brute_force_attack"
47
+ ACCOUNT_TAKEOVER = "account_takeover"
48
+ INSIDER_THREAT = "insider_threat"
49
+ MALWARE_DETECTION = "malware_detection"
50
+ PHISHING_ATTEMPT = "phishing_attempt"
51
+ POLICY_VIOLATION = "policy_violation"
52
+ COMPLIANCE_BREACH = "compliance_breach"
53
+ ANOMALOUS_BEHAVIOR = "anomalous_behavior"
54
+ SYSTEM_COMPROMISE = "system_compromise"
55
+ CUSTOM_THREAT = "custom_threat"
56
+
57
+
58
+ class ThreatLevel(Enum):
59
+ """Threat severity levels."""
60
+
61
+ INFO = "info"
62
+ LOW = "low"
63
+ MEDIUM = "medium"
64
+ HIGH = "high"
65
+ CRITICAL = "critical"
66
+
67
+
68
+ class SecurityOperation(Enum):
69
+ """Supported security operations."""
70
+
71
+ CREATE_EVENT = "create_event"
72
+ ANALYZE_THREATS = "analyze_threats"
73
+ DETECT_ANOMALIES = "detect_anomalies"
74
+ GENERATE_ALERTS = "generate_alerts"
75
+ GET_INCIDENTS = "get_incidents"
76
+ CREATE_INCIDENT = "create_incident"
77
+ UPDATE_INCIDENT = "update_incident"
78
+ GET_THREAT_INTELLIGENCE = "get_threat_intelligence"
79
+ CALCULATE_RISK_SCORE = "calculate_risk_score"
80
+ MONITOR_USER_BEHAVIOR = "monitor_user_behavior"
81
+ COMPLIANCE_CHECK = "compliance_check"
82
+ FORENSIC_ANALYSIS = "forensic_analysis"
83
+ AUTOMATED_RESPONSE = "automated_response"
84
+
85
+
86
+ class IncidentStatus(Enum):
87
+ """Security incident status."""
88
+
89
+ NEW = "new"
90
+ INVESTIGATING = "investigating"
91
+ CONTAINMENT = "containment"
92
+ ERADICATION = "eradication"
93
+ RECOVERY = "recovery"
94
+ CLOSED = "closed"
95
+
96
+
97
+ @dataclass
98
+ class SecurityEvent:
99
+ """Security event structure."""
100
+
101
+ event_id: str
102
+ event_type: SecurityEventType
103
+ threat_level: ThreatLevel
104
+ user_id: Optional[str]
105
+ tenant_id: str
106
+ source_ip: str
107
+ target_resource: Optional[str]
108
+ description: str
109
+ indicators: Dict[str, Any]
110
+ risk_score: float
111
+ timestamp: datetime
112
+ detection_method: str
113
+ false_positive_probability: float = 0.0
114
+ mitigation_applied: bool = False
115
+ incident_id: Optional[str] = None
116
+
117
+ def to_dict(self) -> Dict[str, Any]:
118
+ """Convert to dictionary for JSON serialization."""
119
+ return {
120
+ "event_id": self.event_id,
121
+ "event_type": self.event_type.value,
122
+ "threat_level": self.threat_level.value,
123
+ "user_id": self.user_id,
124
+ "tenant_id": self.tenant_id,
125
+ "source_ip": self.source_ip,
126
+ "target_resource": self.target_resource,
127
+ "description": self.description,
128
+ "indicators": self.indicators,
129
+ "risk_score": self.risk_score,
130
+ "timestamp": self.timestamp.isoformat(),
131
+ "detection_method": self.detection_method,
132
+ "false_positive_probability": self.false_positive_probability,
133
+ "mitigation_applied": self.mitigation_applied,
134
+ "incident_id": self.incident_id,
135
+ }
136
+
137
+
138
+ @dataclass
139
+ class SecurityIncident:
140
+ """Security incident structure."""
141
+
142
+ incident_id: str
143
+ title: str
144
+ description: str
145
+ status: IncidentStatus
146
+ severity: ThreatLevel
147
+ assignee: Optional[str]
148
+ created_at: datetime
149
+ updated_at: datetime
150
+ closed_at: Optional[datetime]
151
+ events: List[str] # List of security event IDs
152
+ actions_taken: List[Dict[str, Any]]
153
+ impact_assessment: Dict[str, Any]
154
+ tenant_id: str
155
+
156
+ def to_dict(self) -> Dict[str, Any]:
157
+ """Convert to dictionary for JSON serialization."""
158
+ return {
159
+ "incident_id": self.incident_id,
160
+ "title": self.title,
161
+ "description": self.description,
162
+ "status": self.status.value,
163
+ "severity": self.severity.value,
164
+ "assignee": self.assignee,
165
+ "created_at": self.created_at.isoformat(),
166
+ "updated_at": self.updated_at.isoformat(),
167
+ "closed_at": self.closed_at.isoformat() if self.closed_at else None,
168
+ "events": self.events,
169
+ "actions_taken": self.actions_taken,
170
+ "impact_assessment": self.impact_assessment,
171
+ "tenant_id": self.tenant_id,
172
+ }
173
+
174
+
175
+ @register_node()
176
+ class EnterpriseSecurityEventNode(Node):
177
+ """Enterprise security event monitoring and incident response node.
178
+
179
+ This node provides comprehensive security event processing including:
180
+ - Real-time threat detection and analysis
181
+ - Security incident management
182
+ - Risk scoring and escalation
183
+ - Automated response workflows
184
+ - Compliance monitoring
185
+ - Forensic analysis capabilities
186
+
187
+ Parameters:
188
+ operation: Type of security operation to perform
189
+ event_data: Security event data
190
+ incident_data: Security incident data
191
+ analysis_config: Configuration for threat analysis
192
+ user_id: User ID for behavior monitoring
193
+ risk_threshold: Risk score threshold for alerts
194
+ time_window: Time window for analysis
195
+ detection_rules: Custom detection rules
196
+ response_actions: Automated response configuration
197
+ tenant_id: Tenant isolation
198
+
199
+ Example:
200
+ >>> # Create security event for suspicious login
201
+ >>> node = SecurityEventNode(
202
+ ... operation="create_event",
203
+ ... event_data={
204
+ ... "event_type": "suspicious_login",
205
+ ... "threat_level": "medium",
206
+ ... "user_id": "user123",
207
+ ... "source_ip": "192.168.1.100",
208
+ ... "description": "Login from unusual location",
209
+ ... "indicators": {
210
+ ... "location": "Unknown Country",
211
+ ... "device": "New Device",
212
+ ... "time": "Outside business hours"
213
+ ... },
214
+ ... "detection_method": "geolocation_analysis"
215
+ ... }
216
+ ... )
217
+ >>> result = node.run()
218
+ >>> event_id = result["security_event"]["event_id"]
219
+
220
+ >>> # Analyze threats in time window
221
+ >>> node = SecurityEventNode(
222
+ ... operation="analyze_threats",
223
+ ... analysis_config={
224
+ ... "time_window": 3600, # 1 hour
225
+ ... "threat_types": ["brute_force_attack", "suspicious_login"],
226
+ ... "risk_threshold": 7.0
227
+ ... }
228
+ ... )
229
+ >>> result = node.run()
230
+ >>> threats = result["threat_analysis"]["high_risk_events"]
231
+
232
+ >>> # Monitor user behavior for anomalies
233
+ >>> node = SecurityEventNode(
234
+ ... operation="monitor_user_behavior",
235
+ ... user_id="user123",
236
+ ... analysis_config={
237
+ ... "lookback_days": 30,
238
+ ... "anomaly_threshold": 0.8
239
+ ... }
240
+ ... )
241
+ >>> result = node.run()
242
+ >>> anomalies = result["behavior_analysis"]["anomalies"]
243
+ """
244
+
245
+ def __init__(self, **config):
246
+ super().__init__(**config)
247
+ self._db_node = None
248
+ self._audit_node = None
249
+
250
+ def get_parameters(self) -> Dict[str, NodeParameter]:
251
+ """Define parameters for security operations."""
252
+ return {
253
+ param.name: param
254
+ for param in [
255
+ # Operation type
256
+ NodeParameter(
257
+ name="operation",
258
+ type=str,
259
+ required=True,
260
+ description="Security operation to perform",
261
+ choices=[op.value for op in SecurityOperation],
262
+ ),
263
+ # Event data
264
+ NodeParameter(
265
+ name="event_data",
266
+ type=dict,
267
+ required=False,
268
+ description="Security event data",
269
+ ),
270
+ # Incident data
271
+ NodeParameter(
272
+ name="incident_data",
273
+ type=dict,
274
+ required=False,
275
+ description="Security incident data",
276
+ ),
277
+ # Analysis configuration
278
+ NodeParameter(
279
+ name="analysis_config",
280
+ type=dict,
281
+ required=False,
282
+ description="Configuration for threat analysis",
283
+ ),
284
+ # User monitoring
285
+ NodeParameter(
286
+ name="user_id",
287
+ type=str,
288
+ required=False,
289
+ description="User ID for behavior monitoring",
290
+ ),
291
+ # Risk configuration
292
+ NodeParameter(
293
+ name="risk_threshold",
294
+ type=float,
295
+ required=False,
296
+ default=7.0,
297
+ description="Risk score threshold for alerts",
298
+ ),
299
+ # Time windows
300
+ NodeParameter(
301
+ name="time_window",
302
+ type=int,
303
+ required=False,
304
+ default=3600,
305
+ description="Time window in seconds for analysis",
306
+ ),
307
+ # Detection configuration
308
+ NodeParameter(
309
+ name="detection_rules",
310
+ type=list,
311
+ required=False,
312
+ description="Custom detection rules",
313
+ ),
314
+ # Response configuration
315
+ NodeParameter(
316
+ name="response_actions",
317
+ type=dict,
318
+ required=False,
319
+ description="Automated response configuration",
320
+ ),
321
+ # Multi-tenancy
322
+ NodeParameter(
323
+ name="tenant_id",
324
+ type=str,
325
+ required=False,
326
+ description="Tenant ID for multi-tenant isolation",
327
+ ),
328
+ # Database configuration
329
+ NodeParameter(
330
+ name="database_config",
331
+ type=dict,
332
+ required=False,
333
+ description="Database connection configuration",
334
+ ),
335
+ # Incident management
336
+ NodeParameter(
337
+ name="incident_id",
338
+ type=str,
339
+ required=False,
340
+ description="Incident ID for incident operations",
341
+ ),
342
+ # Filtering
343
+ NodeParameter(
344
+ name="filters",
345
+ type=dict,
346
+ required=False,
347
+ description="Filters for event/incident queries",
348
+ ),
349
+ # Pagination
350
+ NodeParameter(
351
+ name="pagination",
352
+ type=dict,
353
+ required=False,
354
+ description="Pagination parameters",
355
+ ),
356
+ ]
357
+ }
358
+
359
+ def run(self, **inputs) -> Dict[str, Any]:
360
+ """Execute security operation."""
361
+ try:
362
+ operation = SecurityOperation(inputs["operation"])
363
+
364
+ # Initialize dependencies
365
+ self._init_dependencies(inputs)
366
+
367
+ # Route to appropriate operation
368
+ if operation == SecurityOperation.CREATE_EVENT:
369
+ return self._create_event(inputs)
370
+ elif operation == SecurityOperation.ANALYZE_THREATS:
371
+ return self._analyze_threats(inputs)
372
+ elif operation == SecurityOperation.DETECT_ANOMALIES:
373
+ return self._detect_anomalies(inputs)
374
+ elif operation == SecurityOperation.GENERATE_ALERTS:
375
+ return self._generate_alerts(inputs)
376
+ elif operation == SecurityOperation.GET_INCIDENTS:
377
+ return self._get_incidents(inputs)
378
+ elif operation == SecurityOperation.CREATE_INCIDENT:
379
+ return self._create_incident(inputs)
380
+ elif operation == SecurityOperation.UPDATE_INCIDENT:
381
+ return self._update_incident(inputs)
382
+ elif operation == SecurityOperation.GET_THREAT_INTELLIGENCE:
383
+ return self._get_threat_intelligence(inputs)
384
+ elif operation == SecurityOperation.CALCULATE_RISK_SCORE:
385
+ return self._calculate_risk_score(inputs)
386
+ elif operation == SecurityOperation.MONITOR_USER_BEHAVIOR:
387
+ return self._monitor_user_behavior(inputs)
388
+ elif operation == SecurityOperation.COMPLIANCE_CHECK:
389
+ return self._compliance_check(inputs)
390
+ elif operation == SecurityOperation.FORENSIC_ANALYSIS:
391
+ return self._forensic_analysis(inputs)
392
+ elif operation == SecurityOperation.AUTOMATED_RESPONSE:
393
+ return self._automated_response(inputs)
394
+ else:
395
+ raise NodeExecutionError(f"Unknown operation: {operation}")
396
+
397
+ except Exception as e:
398
+ raise NodeExecutionError(f"Security operation failed: {str(e)}")
399
+
400
+ def _init_dependencies(self, inputs: Dict[str, Any]):
401
+ """Initialize database and audit dependencies."""
402
+ # Get database config
403
+ db_config = inputs.get(
404
+ "database_config",
405
+ {
406
+ "database_type": "postgresql",
407
+ "host": "localhost",
408
+ "port": 5432,
409
+ "database": "kailash_admin",
410
+ "user": "admin",
411
+ "password": "admin",
412
+ },
413
+ )
414
+
415
+ # Initialize async database node
416
+ self._db_node = AsyncSQLDatabaseNode(name="security_event_db", **db_config)
417
+
418
+ # Initialize audit logging node
419
+ self._audit_node = EnterpriseAuditLogNode(database_config=db_config)
420
+
421
+ def _create_event(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
422
+ """Create a new security event with risk scoring."""
423
+ event_data = inputs["event_data"]
424
+ tenant_id = inputs.get("tenant_id", "default")
425
+
426
+ # Validate required fields
427
+ required_fields = ["event_type", "threat_level", "source_ip", "description"]
428
+ for field in required_fields:
429
+ if field not in event_data:
430
+ raise NodeValidationError(f"Missing required field: {field}")
431
+
432
+ # Calculate risk score
433
+ risk_score = self._calculate_event_risk_score(event_data)
434
+
435
+ # Create security event
436
+ event_id = self._generate_event_id()
437
+ now = datetime.now(UTC)
438
+
439
+ security_event = SecurityEvent(
440
+ event_id=event_id,
441
+ event_type=SecurityEventType(event_data["event_type"]),
442
+ threat_level=ThreatLevel(event_data["threat_level"]),
443
+ user_id=event_data.get("user_id"),
444
+ tenant_id=tenant_id,
445
+ source_ip=event_data["source_ip"],
446
+ target_resource=event_data.get("target_resource"),
447
+ description=event_data["description"],
448
+ indicators=event_data.get("indicators", {}),
449
+ risk_score=risk_score,
450
+ timestamp=now,
451
+ detection_method=event_data.get("detection_method", "manual"),
452
+ false_positive_probability=event_data.get(
453
+ "false_positive_probability", 0.0
454
+ ),
455
+ )
456
+
457
+ # Insert into database
458
+ insert_query = """
459
+ INSERT INTO security_events (
460
+ event_id, event_type, threat_level, user_id, tenant_id, source_ip,
461
+ target_resource, description, indicators, risk_score, timestamp,
462
+ detection_method, false_positive_probability, mitigation_applied
463
+ ) VALUES (
464
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14
465
+ )
466
+ """
467
+
468
+ self._db_node.config.update(
469
+ {
470
+ "query": insert_query,
471
+ "params": [
472
+ security_event.event_id,
473
+ security_event.event_type.value,
474
+ security_event.threat_level.value,
475
+ security_event.user_id,
476
+ security_event.tenant_id,
477
+ security_event.source_ip,
478
+ security_event.target_resource,
479
+ security_event.description,
480
+ security_event.indicators,
481
+ security_event.risk_score,
482
+ security_event.timestamp,
483
+ security_event.detection_method,
484
+ security_event.false_positive_probability,
485
+ security_event.mitigation_applied,
486
+ ],
487
+ }
488
+ )
489
+
490
+ db_result = self._db_node.run()
491
+
492
+ # Log to audit trail
493
+ audit_event_data = {
494
+ "event_type": "security_violation",
495
+ "severity": security_event.threat_level.value,
496
+ "user_id": security_event.user_id,
497
+ "action": "security_event_created",
498
+ "description": f"Security event created: {security_event.description}",
499
+ "metadata": {
500
+ "security_event_id": security_event.event_id,
501
+ "event_type": security_event.event_type.value,
502
+ "risk_score": security_event.risk_score,
503
+ "source_ip": security_event.source_ip,
504
+ },
505
+ }
506
+
507
+ self._audit_node.run(
508
+ operation="log_event", event_data=audit_event_data, tenant_id=tenant_id
509
+ )
510
+
511
+ # Check if automatic incident creation is needed
512
+ incident_id = None
513
+ if risk_score >= inputs.get("risk_threshold", 7.0):
514
+ incident_id = self._auto_create_incident(security_event)
515
+
516
+ return {
517
+ "result": {
518
+ "security_event": security_event.to_dict(),
519
+ "risk_score": risk_score,
520
+ "incident_created": incident_id is not None,
521
+ "incident_id": incident_id,
522
+ "operation": "create_event",
523
+ "timestamp": datetime.now(UTC).isoformat(),
524
+ }
525
+ }
526
+
527
+ def _analyze_threats(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
528
+ """Analyze security threats in a time window."""
529
+ analysis_config = inputs.get("analysis_config", {})
530
+ tenant_id = inputs.get("tenant_id", "default")
531
+ time_window = analysis_config.get("time_window", 3600) # 1 hour default
532
+ risk_threshold = analysis_config.get("risk_threshold", 7.0)
533
+
534
+ # Calculate time range
535
+ end_time = datetime.now(UTC)
536
+ start_time = end_time - timedelta(seconds=time_window)
537
+
538
+ # Query security events in time window
539
+ query = """
540
+ SELECT event_id, event_type, threat_level, user_id, source_ip,
541
+ target_resource, description, risk_score, timestamp, indicators
542
+ FROM security_events
543
+ WHERE tenant_id = $1 AND timestamp >= $2 AND timestamp <= $3
544
+ ORDER BY risk_score DESC, timestamp DESC
545
+ """
546
+
547
+ self._db_node.config.update(
548
+ {
549
+ "query": query,
550
+ "params": [tenant_id, start_time, end_time],
551
+ "fetch_mode": "all",
552
+ }
553
+ )
554
+
555
+ result = self._db_node.run()
556
+ events = result.get("result", {}).get("data", [])
557
+
558
+ # Analyze threats
559
+ analysis = self._perform_threat_analysis(events, analysis_config)
560
+
561
+ return {
562
+ "result": {
563
+ "threat_analysis": analysis,
564
+ "time_window": {
565
+ "start": start_time.isoformat(),
566
+ "end": end_time.isoformat(),
567
+ "duration_seconds": time_window,
568
+ },
569
+ "total_events": len(events),
570
+ "operation": "analyze_threats",
571
+ "timestamp": datetime.now(UTC).isoformat(),
572
+ }
573
+ }
574
+
575
+ def _monitor_user_behavior(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
576
+ """Monitor user behavior for anomalies."""
577
+ user_id = inputs["user_id"]
578
+ analysis_config = inputs.get("analysis_config", {})
579
+ tenant_id = inputs.get("tenant_id", "default")
580
+ lookback_days = analysis_config.get("lookback_days", 30)
581
+ anomaly_threshold = analysis_config.get("anomaly_threshold", 0.8)
582
+
583
+ # Get user's historical behavior
584
+ end_time = datetime.now(UTC)
585
+ start_time = end_time - timedelta(days=lookback_days)
586
+
587
+ # Query user's security events
588
+ query = """
589
+ SELECT event_type, threat_level, source_ip, timestamp, risk_score, indicators
590
+ FROM security_events
591
+ WHERE tenant_id = $1 AND user_id = $2 AND timestamp >= $3 AND timestamp <= $4
592
+ ORDER BY timestamp DESC
593
+ """
594
+
595
+ self._db_node.config.update(
596
+ {
597
+ "query": query,
598
+ "params": [tenant_id, user_id, start_time, end_time],
599
+ "fetch_mode": "all",
600
+ }
601
+ )
602
+
603
+ result = self._db_node.run()
604
+ events = result.get("result", {}).get("data", [])
605
+
606
+ # Analyze behavior patterns
607
+ behavior_analysis = self._analyze_user_behavior(events, analysis_config)
608
+
609
+ return {
610
+ "result": {
611
+ "behavior_analysis": behavior_analysis,
612
+ "user_id": user_id,
613
+ "analysis_period": {
614
+ "start": start_time.isoformat(),
615
+ "end": end_time.isoformat(),
616
+ "days": lookback_days,
617
+ },
618
+ "events_analyzed": len(events),
619
+ "operation": "monitor_user_behavior",
620
+ "timestamp": datetime.now(UTC).isoformat(),
621
+ }
622
+ }
623
+
624
+ def _calculate_event_risk_score(self, event_data: Dict[str, Any]) -> float:
625
+ """Calculate risk score for a security event."""
626
+ base_scores = {
627
+ SecurityEventType.CRITICAL.value: 9.0,
628
+ SecurityEventType.SYSTEM_COMPROMISE.value: 9.5,
629
+ SecurityEventType.DATA_EXFILTRATION.value: 9.0,
630
+ SecurityEventType.ACCOUNT_TAKEOVER.value: 8.5,
631
+ SecurityEventType.PRIVILEGE_ESCALATION.value: 8.0,
632
+ SecurityEventType.BRUTE_FORCE_ATTACK.value: 7.5,
633
+ SecurityEventType.INSIDER_THREAT.value: 8.0,
634
+ SecurityEventType.SUSPICIOUS_LOGIN.value: 6.0,
635
+ SecurityEventType.UNAUTHORIZED_ACCESS_ATTEMPT.value: 7.0,
636
+ SecurityEventType.UNUSUAL_DATA_ACCESS.value: 6.5,
637
+ SecurityEventType.POLICY_VIOLATION.value: 5.0,
638
+ SecurityEventType.ANOMALOUS_BEHAVIOR.value: 5.5,
639
+ }
640
+
641
+ event_type = event_data.get("event_type", "custom_threat")
642
+ base_score = base_scores.get(event_type, 5.0)
643
+
644
+ # Adjust based on threat level
645
+ threat_multipliers = {
646
+ "info": 0.5,
647
+ "low": 0.7,
648
+ "medium": 1.0,
649
+ "high": 1.3,
650
+ "critical": 1.5,
651
+ }
652
+
653
+ threat_level = event_data.get("threat_level", "medium")
654
+ multiplier = threat_multipliers.get(threat_level, 1.0)
655
+
656
+ # Adjust based on indicators
657
+ indicators = event_data.get("indicators", {})
658
+ indicator_boost = 0.0
659
+
660
+ if "repeated_attempts" in indicators:
661
+ indicator_boost += 1.0
662
+ if "unusual_location" in indicators:
663
+ indicator_boost += 0.5
664
+ if "off_hours_access" in indicators:
665
+ indicator_boost += 0.3
666
+ if "new_device" in indicators:
667
+ indicator_boost += 0.2
668
+
669
+ # Calculate final score (0-10 scale)
670
+ final_score = min(10.0, (base_score * multiplier) + indicator_boost)
671
+
672
+ return round(final_score, 2)
673
+
674
+ def _perform_threat_analysis(
675
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
676
+ ) -> Dict[str, Any]:
677
+ """Perform comprehensive threat analysis on events."""
678
+ risk_threshold = config.get("risk_threshold", 7.0)
679
+
680
+ analysis = {
681
+ "high_risk_events": [],
682
+ "threat_patterns": {},
683
+ "ip_analysis": {},
684
+ "user_analysis": {},
685
+ "recommendations": [],
686
+ }
687
+
688
+ # Categorize events by risk
689
+ for event in events:
690
+ if event["risk_score"] >= risk_threshold:
691
+ analysis["high_risk_events"].append(event)
692
+
693
+ # Analyze threat patterns
694
+ threat_types = {}
695
+ for event in events:
696
+ event_type = event["event_type"]
697
+ threat_types[event_type] = threat_types.get(event_type, 0) + 1
698
+
699
+ analysis["threat_patterns"] = threat_types
700
+
701
+ # Analyze IP addresses
702
+ ip_counts = {}
703
+ for event in events:
704
+ ip = event["source_ip"]
705
+ ip_counts[ip] = ip_counts.get(ip, 0) + 1
706
+
707
+ # Flag suspicious IPs (multiple events)
708
+ suspicious_ips = {ip: count for ip, count in ip_counts.items() if count > 3}
709
+ analysis["ip_analysis"] = {
710
+ "total_unique_ips": len(ip_counts),
711
+ "suspicious_ips": suspicious_ips,
712
+ }
713
+
714
+ # Generate recommendations
715
+ if len(analysis["high_risk_events"]) > 5:
716
+ analysis["recommendations"].append(
717
+ "High volume of security events detected - investigate immediately"
718
+ )
719
+
720
+ if suspicious_ips:
721
+ analysis["recommendations"].append(
722
+ f"Consider blocking suspicious IPs: {list(suspicious_ips.keys())}"
723
+ )
724
+
725
+ return analysis
726
+
727
+ def _analyze_user_behavior(
728
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
729
+ ) -> Dict[str, Any]:
730
+ """Analyze user behavior patterns for anomalies."""
731
+ analysis = {
732
+ "baseline_established": len(events) >= 10,
733
+ "anomalies": [],
734
+ "patterns": {},
735
+ "risk_factors": [],
736
+ }
737
+
738
+ if not analysis["baseline_established"]:
739
+ analysis["anomalies"].append(
740
+ "Insufficient data for baseline - new user or limited activity"
741
+ )
742
+ return analysis
743
+
744
+ # Analyze login patterns
745
+ login_hours = []
746
+ login_ips = {}
747
+
748
+ for event in events:
749
+ if event["event_type"] in ["suspicious_login", "user_login"]:
750
+ hour = datetime.fromisoformat(event["timestamp"]).hour
751
+ login_hours.append(hour)
752
+
753
+ ip = event["source_ip"]
754
+ login_ips[ip] = login_ips.get(ip, 0) + 1
755
+
756
+ # Detect anomalies
757
+ if len(set(login_ips.keys())) > 10:
758
+ analysis["anomalies"].append(
759
+ "Logins from unusually high number of IP addresses"
760
+ )
761
+
762
+ # Check for off-hours activity
763
+ off_hours_count = sum(1 for hour in login_hours if hour < 6 or hour > 22)
764
+ if off_hours_count > len(login_hours) * 0.3:
765
+ analysis["anomalies"].append("High percentage of off-hours activity")
766
+
767
+ analysis["patterns"] = {
768
+ "unique_ips": len(login_ips),
769
+ "off_hours_percentage": (
770
+ (off_hours_count / len(login_hours) * 100) if login_hours else 0
771
+ ),
772
+ "most_common_hour": (
773
+ max(set(login_hours), key=login_hours.count) if login_hours else None
774
+ ),
775
+ }
776
+
777
+ return analysis
778
+
779
+ def _auto_create_incident(self, security_event: SecurityEvent) -> str:
780
+ """Automatically create an incident for high-risk security events."""
781
+ incident_id = self._generate_event_id()
782
+ now = datetime.now(UTC)
783
+
784
+ incident = SecurityIncident(
785
+ incident_id=incident_id,
786
+ title=f"High-Risk Security Event: {security_event.event_type.value}",
787
+ description=f"Automated incident created for security event {security_event.event_id}. {security_event.description}",
788
+ status=IncidentStatus.NEW,
789
+ severity=security_event.threat_level,
790
+ assignee=None,
791
+ created_at=now,
792
+ updated_at=now,
793
+ closed_at=None,
794
+ events=[security_event.event_id],
795
+ actions_taken=[],
796
+ impact_assessment={"risk_score": security_event.risk_score},
797
+ tenant_id=security_event.tenant_id,
798
+ )
799
+
800
+ # Insert incident into database
801
+ insert_query = """
802
+ INSERT INTO security_incidents (
803
+ incident_id, title, description, status, severity, assignee,
804
+ created_at, updated_at, closed_at, events, actions_taken,
805
+ impact_assessment, tenant_id
806
+ ) VALUES (
807
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13
808
+ )
809
+ """
810
+
811
+ self._db_node.config.update(
812
+ {
813
+ "query": insert_query,
814
+ "params": [
815
+ incident.incident_id,
816
+ incident.title,
817
+ incident.description,
818
+ incident.status.value,
819
+ incident.severity.value,
820
+ incident.assignee,
821
+ incident.created_at,
822
+ incident.updated_at,
823
+ incident.closed_at,
824
+ incident.events,
825
+ incident.actions_taken,
826
+ incident.impact_assessment,
827
+ incident.tenant_id,
828
+ ],
829
+ }
830
+ )
831
+
832
+ self._db_node.run()
833
+
834
+ return incident_id
835
+
836
+ def _generate_event_id(self) -> str:
837
+ """Generate unique event/incident ID."""
838
+ import uuid
839
+
840
+ return str(uuid.uuid4())
841
+
842
+ def _detect_anomalies(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
843
+ """Detect anomalies using ML-based analysis."""
844
+ analysis_config = inputs.get("analysis_config", {})
845
+ tenant_id = inputs.get("tenant_id", "default")
846
+ user_id = inputs.get("user_id")
847
+ time_window = analysis_config.get("time_window", 86400) # 24 hours default
848
+ anomaly_threshold = analysis_config.get("anomaly_threshold", 0.8)
849
+
850
+ # Calculate time range
851
+ end_time = datetime.now(UTC)
852
+ start_time = end_time - timedelta(seconds=time_window)
853
+
854
+ # Query recent events for pattern analysis
855
+ query = """
856
+ SELECT event_type, user_id, source_ip, timestamp, risk_score, indicators
857
+ FROM security_events
858
+ WHERE tenant_id = $1 AND timestamp >= $2 AND timestamp <= $3
859
+ """
860
+ params = [tenant_id, start_time, end_time]
861
+
862
+ if user_id:
863
+ query += " AND user_id = $4"
864
+ params.append(user_id)
865
+
866
+ query += " ORDER BY timestamp DESC"
867
+
868
+ self._db_node.config.update(
869
+ {"query": query, "params": params, "fetch_mode": "all"}
870
+ )
871
+
872
+ result = self._db_node.run()
873
+ events = result.get("result", {}).get("data", [])
874
+
875
+ # Perform anomaly detection
876
+ anomalies = self._detect_behavioral_anomalies(events, analysis_config)
877
+
878
+ return {
879
+ "result": {
880
+ "anomalies": anomalies,
881
+ "analysis_period": {
882
+ "start": start_time.isoformat(),
883
+ "end": end_time.isoformat(),
884
+ "duration_seconds": time_window,
885
+ },
886
+ "events_analyzed": len(events),
887
+ "operation": "detect_anomalies",
888
+ "timestamp": datetime.now(UTC).isoformat(),
889
+ }
890
+ }
891
+
892
+ def _generate_alerts(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
893
+ """Generate security alerts based on events."""
894
+ analysis_config = inputs.get("analysis_config", {})
895
+ tenant_id = inputs.get("tenant_id", "default")
896
+ risk_threshold = analysis_config.get("risk_threshold", 7.0)
897
+ alert_types = analysis_config.get(
898
+ "alert_types", ["high_risk", "pattern_detected", "anomaly"]
899
+ )
900
+
901
+ # Get recent high-risk events
902
+ query = """
903
+ SELECT event_id, event_type, threat_level, user_id, source_ip, risk_score, timestamp
904
+ FROM security_events
905
+ WHERE tenant_id = $1 AND risk_score >= $2 AND timestamp >= $3
906
+ ORDER BY risk_score DESC, timestamp DESC
907
+ LIMIT 50
908
+ """
909
+
910
+ lookback_time = datetime.now(UTC) - timedelta(hours=1)
911
+
912
+ self._db_node.config.update(
913
+ {
914
+ "query": query,
915
+ "params": [tenant_id, risk_threshold, lookback_time],
916
+ "fetch_mode": "all",
917
+ }
918
+ )
919
+
920
+ result = self._db_node.run()
921
+ high_risk_events = result.get("result", {}).get("data", [])
922
+
923
+ # Generate alerts
924
+ alerts = []
925
+ alert_id = 1
926
+
927
+ for event in high_risk_events:
928
+ alert = {
929
+ "alert_id": f"ALT-{alert_id:06d}",
930
+ "alert_type": "high_risk_event",
931
+ "severity": "high" if event["risk_score"] >= 8.0 else "medium",
932
+ "title": f"High-Risk Security Event: {event['event_type']}",
933
+ "description": f"Security event with risk score {event['risk_score']} detected",
934
+ "event_id": event["event_id"],
935
+ "user_id": event["user_id"],
936
+ "source_ip": event["source_ip"],
937
+ "created_at": datetime.now(UTC).isoformat(),
938
+ "status": "active",
939
+ }
940
+ alerts.append(alert)
941
+ alert_id += 1
942
+
943
+ return {
944
+ "result": {
945
+ "alerts": alerts,
946
+ "alert_count": len(alerts),
947
+ "risk_threshold": risk_threshold,
948
+ "operation": "generate_alerts",
949
+ "timestamp": datetime.now(UTC).isoformat(),
950
+ }
951
+ }
952
+
953
+ def _get_incidents(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
954
+ """Get security incidents with filtering."""
955
+ tenant_id = inputs.get("tenant_id", "default")
956
+ filters = inputs.get("filters", {})
957
+ pagination = inputs.get("pagination", {"page": 1, "size": 20})
958
+
959
+ # Build WHERE clause
960
+ where_conditions = ["tenant_id = $1"]
961
+ params = [tenant_id]
962
+ param_count = 1
963
+
964
+ if "status" in filters:
965
+ param_count += 1
966
+ where_conditions.append(f"status = ${param_count}")
967
+ params.append(filters["status"])
968
+
969
+ if "severity" in filters:
970
+ param_count += 1
971
+ where_conditions.append(f"severity = ${param_count}")
972
+ params.append(filters["severity"])
973
+
974
+ if "assignee" in filters:
975
+ param_count += 1
976
+ where_conditions.append(f"assignee = ${param_count}")
977
+ params.append(filters["assignee"])
978
+
979
+ # Pagination
980
+ page = pagination.get("page", 1)
981
+ size = pagination.get("size", 20)
982
+ offset = (page - 1) * size
983
+
984
+ # Query incidents
985
+ query = f"""
986
+ SELECT incident_id, title, description, status, severity, assignee,
987
+ created_at, updated_at, closed_at, events, actions_taken
988
+ FROM security_incidents
989
+ WHERE {' AND '.join(where_conditions)}
990
+ ORDER BY created_at DESC
991
+ LIMIT {size} OFFSET {offset}
992
+ """
993
+
994
+ self._db_node.config.update(
995
+ {"query": query, "params": params, "fetch_mode": "all"}
996
+ )
997
+
998
+ result = self._db_node.run()
999
+ incidents = result.get("result", {}).get("data", [])
1000
+
1001
+ return {
1002
+ "result": {
1003
+ "incidents": incidents,
1004
+ "pagination": {"page": page, "size": size, "total": len(incidents)},
1005
+ "filters_applied": filters,
1006
+ "operation": "get_incidents",
1007
+ "timestamp": datetime.now(UTC).isoformat(),
1008
+ }
1009
+ }
1010
+
1011
+ def _create_incident(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1012
+ """Create security incident manually."""
1013
+ incident_data = inputs["incident_data"]
1014
+ tenant_id = inputs.get("tenant_id", "default")
1015
+
1016
+ # Validate required fields
1017
+ required_fields = ["title", "description", "severity"]
1018
+ for field in required_fields:
1019
+ if field not in incident_data:
1020
+ raise NodeValidationError(f"Missing required field: {field}")
1021
+
1022
+ # Create incident
1023
+ incident_id = self._generate_event_id()
1024
+ now = datetime.now(UTC)
1025
+
1026
+ incident = SecurityIncident(
1027
+ incident_id=incident_id,
1028
+ title=incident_data["title"],
1029
+ description=incident_data["description"],
1030
+ status=IncidentStatus(incident_data.get("status", "new")),
1031
+ severity=ThreatLevel(incident_data["severity"]),
1032
+ assignee=incident_data.get("assignee"),
1033
+ created_at=now,
1034
+ updated_at=now,
1035
+ closed_at=None,
1036
+ events=incident_data.get("events", []),
1037
+ actions_taken=[],
1038
+ impact_assessment=incident_data.get("impact_assessment", {}),
1039
+ tenant_id=tenant_id,
1040
+ )
1041
+
1042
+ # Insert into database
1043
+ insert_query = """
1044
+ INSERT INTO security_incidents (
1045
+ incident_id, title, description, status, severity, assignee,
1046
+ created_at, updated_at, closed_at, events, actions_taken,
1047
+ impact_assessment, tenant_id
1048
+ ) VALUES (
1049
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13
1050
+ )
1051
+ """
1052
+
1053
+ self._db_node.config.update(
1054
+ {
1055
+ "query": insert_query,
1056
+ "params": [
1057
+ incident.incident_id,
1058
+ incident.title,
1059
+ incident.description,
1060
+ incident.status.value,
1061
+ incident.severity.value,
1062
+ incident.assignee,
1063
+ incident.created_at,
1064
+ incident.updated_at,
1065
+ incident.closed_at,
1066
+ incident.events,
1067
+ incident.actions_taken,
1068
+ incident.impact_assessment,
1069
+ incident.tenant_id,
1070
+ ],
1071
+ }
1072
+ )
1073
+
1074
+ self._db_node.run()
1075
+
1076
+ return {
1077
+ "result": {
1078
+ "incident": incident.to_dict(),
1079
+ "operation": "create_incident",
1080
+ "timestamp": datetime.now(UTC).isoformat(),
1081
+ }
1082
+ }
1083
+
1084
+ def _update_incident(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1085
+ """Update security incident status and details."""
1086
+ incident_id = inputs["incident_id"]
1087
+ incident_data = inputs["incident_data"]
1088
+ tenant_id = inputs.get("tenant_id", "default")
1089
+
1090
+ # Build update fields
1091
+ update_fields = ["updated_at = $1"]
1092
+ params = [datetime.now(UTC)]
1093
+ param_count = 1
1094
+
1095
+ if "status" in incident_data:
1096
+ param_count += 1
1097
+ update_fields.append(f"status = ${param_count}")
1098
+ params.append(incident_data["status"])
1099
+
1100
+ # Set closed_at if status is closed
1101
+ if incident_data["status"] == "closed":
1102
+ param_count += 1
1103
+ update_fields.append(f"closed_at = ${param_count}")
1104
+ params.append(datetime.now(UTC))
1105
+
1106
+ if "assignee" in incident_data:
1107
+ param_count += 1
1108
+ update_fields.append(f"assignee = ${param_count}")
1109
+ params.append(incident_data["assignee"])
1110
+
1111
+ if "actions_taken" in incident_data:
1112
+ param_count += 1
1113
+ update_fields.append(f"actions_taken = ${param_count}")
1114
+ params.append(incident_data["actions_taken"])
1115
+
1116
+ # Add where conditions
1117
+ param_count += 1
1118
+ params.append(incident_id)
1119
+ param_count += 1
1120
+ params.append(tenant_id)
1121
+
1122
+ query = f"""
1123
+ UPDATE security_incidents
1124
+ SET {', '.join(update_fields)}
1125
+ WHERE incident_id = ${param_count-1} AND tenant_id = ${param_count}
1126
+ """
1127
+
1128
+ self._db_node.config.update({"query": query, "params": params})
1129
+
1130
+ self._db_node.run()
1131
+
1132
+ return {
1133
+ "result": {
1134
+ "incident_id": incident_id,
1135
+ "updated": True,
1136
+ "operation": "update_incident",
1137
+ "timestamp": datetime.now(UTC).isoformat(),
1138
+ }
1139
+ }
1140
+
1141
+ def _get_threat_intelligence(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1142
+ """Get threat intelligence from external sources."""
1143
+ analysis_config = inputs.get("analysis_config", {})
1144
+ threat_types = analysis_config.get("threat_types", [])
1145
+ lookback_days = analysis_config.get("lookback_days", 30)
1146
+
1147
+ # Mock threat intelligence data (in real implementation, would integrate with external feeds)
1148
+ threat_intelligence = {
1149
+ "indicators": {
1150
+ "malicious_ips": ["192.168.1.100", "10.0.0.50"],
1151
+ "suspicious_domains": ["malicious-site.com", "phishing-domain.net"],
1152
+ "known_attack_patterns": ["brute_force", "sql_injection", "xss"],
1153
+ },
1154
+ "threat_feeds": [
1155
+ {
1156
+ "source": "Internal Analysis",
1157
+ "last_updated": datetime.now(UTC).isoformat(),
1158
+ "confidence": "high",
1159
+ "indicators_count": 25,
1160
+ }
1161
+ ],
1162
+ "risk_assessment": {
1163
+ "current_threat_level": "medium",
1164
+ "trending_threats": ["phishing_attempt", "insider_threat"],
1165
+ "recommendations": [
1166
+ "Monitor for unusual login patterns",
1167
+ "Review email security policies",
1168
+ "Enhance endpoint detection",
1169
+ ],
1170
+ },
1171
+ }
1172
+
1173
+ return {
1174
+ "result": {
1175
+ "threat_intelligence": threat_intelligence,
1176
+ "generated_at": datetime.now(UTC).isoformat(),
1177
+ "operation": "get_threat_intelligence",
1178
+ "timestamp": datetime.now(UTC).isoformat(),
1179
+ }
1180
+ }
1181
+
1182
+ def _calculate_risk_score(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1183
+ """Calculate comprehensive risk score for entity."""
1184
+ entity_type = inputs.get("entity_type", "user") # user, ip, domain
1185
+ entity_id = inputs["entity_id"]
1186
+ analysis_config = inputs.get("analysis_config", {})
1187
+ tenant_id = inputs.get("tenant_id", "default")
1188
+ lookback_days = analysis_config.get("lookback_days", 30)
1189
+
1190
+ # Calculate time range
1191
+ end_time = datetime.now(UTC)
1192
+ start_time = end_time - timedelta(days=lookback_days)
1193
+
1194
+ # Query events for entity
1195
+ if entity_type == "user":
1196
+ query = """
1197
+ SELECT event_type, risk_score, timestamp
1198
+ FROM security_events
1199
+ WHERE tenant_id = $1 AND user_id = $2 AND timestamp >= $3 AND timestamp <= $4
1200
+ ORDER BY timestamp DESC
1201
+ """
1202
+ elif entity_type == "ip":
1203
+ query = """
1204
+ SELECT event_type, risk_score, timestamp
1205
+ FROM security_events
1206
+ WHERE tenant_id = $1 AND source_ip = $2 AND timestamp >= $3 AND timestamp <= $4
1207
+ ORDER BY timestamp DESC
1208
+ """
1209
+ else:
1210
+ raise NodeValidationError(f"Unsupported entity type: {entity_type}")
1211
+
1212
+ self._db_node.config.update(
1213
+ {
1214
+ "query": query,
1215
+ "params": [tenant_id, entity_id, start_time, end_time],
1216
+ "fetch_mode": "all",
1217
+ }
1218
+ )
1219
+
1220
+ result = self._db_node.run()
1221
+ events = result.get("result", {}).get("data", [])
1222
+
1223
+ # Calculate risk metrics
1224
+ if not events:
1225
+ risk_score = 0.0
1226
+ else:
1227
+ # Calculate weighted average with recency bias
1228
+ total_weighted_score = 0.0
1229
+ total_weight = 0.0
1230
+
1231
+ for event in events:
1232
+ age_days = (
1233
+ end_time
1234
+ - datetime.fromisoformat(event["timestamp"].replace("Z", "+00:00"))
1235
+ ).days
1236
+ recency_weight = max(0.1, 1.0 - (age_days / lookback_days))
1237
+ weight = recency_weight
1238
+
1239
+ total_weighted_score += event["risk_score"] * weight
1240
+ total_weight += weight
1241
+
1242
+ risk_score = (
1243
+ total_weighted_score / total_weight if total_weight > 0 else 0.0
1244
+ )
1245
+
1246
+ # Calculate risk category
1247
+ if risk_score >= 8.0:
1248
+ risk_category = "critical"
1249
+ elif risk_score >= 6.0:
1250
+ risk_category = "high"
1251
+ elif risk_score >= 4.0:
1252
+ risk_category = "medium"
1253
+ elif risk_score >= 2.0:
1254
+ risk_category = "low"
1255
+ else:
1256
+ risk_category = "minimal"
1257
+
1258
+ return {
1259
+ "result": {
1260
+ "entity_type": entity_type,
1261
+ "entity_id": entity_id,
1262
+ "risk_score": round(risk_score, 2),
1263
+ "risk_category": risk_category,
1264
+ "events_analyzed": len(events),
1265
+ "analysis_period": {
1266
+ "start": start_time.isoformat(),
1267
+ "end": end_time.isoformat(),
1268
+ "days": lookback_days,
1269
+ },
1270
+ "operation": "calculate_risk_score",
1271
+ "timestamp": datetime.now(UTC).isoformat(),
1272
+ }
1273
+ }
1274
+
1275
+ def _compliance_check(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1276
+ """Check compliance violations and requirements."""
1277
+ compliance_framework = inputs.get(
1278
+ "compliance_framework", "general"
1279
+ ) # gdpr, hipaa, sox, etc.
1280
+ tenant_id = inputs.get("tenant_id", "default")
1281
+ check_type = inputs.get("check_type", "full") # full, incremental
1282
+
1283
+ # Mock compliance checking (real implementation would have detailed rules)
1284
+ compliance_results = {
1285
+ "framework": compliance_framework,
1286
+ "overall_score": 85.5,
1287
+ "status": "compliant",
1288
+ "violations": [
1289
+ {
1290
+ "rule_id": "LOG_RETENTION_001",
1291
+ "severity": "medium",
1292
+ "description": "Log retention period below recommended 2 years",
1293
+ "current_value": "18 months",
1294
+ "required_value": "24 months",
1295
+ "remediation": "Update log retention policy",
1296
+ }
1297
+ ],
1298
+ "recommendations": [
1299
+ "Implement automated log archiving",
1300
+ "Review access control policies quarterly",
1301
+ "Enhance incident response procedures",
1302
+ ],
1303
+ "next_review_date": (datetime.now(UTC) + timedelta(days=90)).isoformat(),
1304
+ }
1305
+
1306
+ return {
1307
+ "result": {
1308
+ "compliance_check": compliance_results,
1309
+ "check_performed_at": datetime.now(UTC).isoformat(),
1310
+ "operation": "compliance_check",
1311
+ "timestamp": datetime.now(UTC).isoformat(),
1312
+ }
1313
+ }
1314
+
1315
+ def _forensic_analysis(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1316
+ """Perform forensic analysis on security events."""
1317
+ analysis_config = inputs.get("analysis_config", {})
1318
+ event_ids = inputs.get("event_ids", [])
1319
+ incident_id = inputs.get("incident_id")
1320
+ tenant_id = inputs.get("tenant_id", "default")
1321
+
1322
+ # Query events for forensic analysis
1323
+ if event_ids:
1324
+ placeholders = ",".join(["$" + str(i + 2) for i in range(len(event_ids))])
1325
+ query = f"""
1326
+ SELECT event_id, event_type, user_id, source_ip, timestamp, indicators, description
1327
+ FROM security_events
1328
+ WHERE tenant_id = $1 AND event_id IN ({placeholders})
1329
+ ORDER BY timestamp ASC
1330
+ """
1331
+ params = [tenant_id] + event_ids
1332
+ elif incident_id:
1333
+ query = """
1334
+ SELECT se.event_id, se.event_type, se.user_id, se.source_ip, se.timestamp, se.indicators, se.description
1335
+ FROM security_events se
1336
+ JOIN security_incidents si ON se.event_id = ANY(si.events)
1337
+ WHERE si.tenant_id = $1 AND si.incident_id = $2
1338
+ ORDER BY se.timestamp ASC
1339
+ """
1340
+ params = [tenant_id, incident_id]
1341
+ else:
1342
+ raise NodeValidationError(
1343
+ "Either event_ids or incident_id must be provided"
1344
+ )
1345
+
1346
+ self._db_node.config.update(
1347
+ {"query": query, "params": params, "fetch_mode": "all"}
1348
+ )
1349
+
1350
+ result = self._db_node.run()
1351
+ events = result.get("result", {}).get("data", [])
1352
+
1353
+ # Perform forensic analysis
1354
+ forensic_results = {
1355
+ "timeline": events,
1356
+ "patterns": {
1357
+ "attack_vector": "credential_compromise",
1358
+ "techniques_used": ["brute_force", "privilege_escalation"],
1359
+ "affected_systems": ["web_server", "database"],
1360
+ "data_accessed": ["customer_records", "financial_data"],
1361
+ },
1362
+ "artifacts": {
1363
+ "log_files": ["/var/log/auth.log", "/var/log/apache2/access.log"],
1364
+ "network_captures": ["capture_20250612.pcap"],
1365
+ "file_hashes": ["sha256:abc123..."],
1366
+ },
1367
+ "recommendations": [
1368
+ "Reset all potentially compromised credentials",
1369
+ "Review system access logs for unauthorized activity",
1370
+ "Implement additional monitoring on affected systems",
1371
+ ],
1372
+ }
1373
+
1374
+ return {
1375
+ "result": {
1376
+ "forensic_analysis": forensic_results,
1377
+ "events_analyzed": len(events),
1378
+ "analysis_completed_at": datetime.now(UTC).isoformat(),
1379
+ "operation": "forensic_analysis",
1380
+ "timestamp": datetime.now(UTC).isoformat(),
1381
+ }
1382
+ }
1383
+
1384
+ def _automated_response(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1385
+ """Execute automated security response actions."""
1386
+ response_actions = inputs["response_actions"]
1387
+ event_id = inputs.get("event_id")
1388
+ incident_id = inputs.get("incident_id")
1389
+ tenant_id = inputs.get("tenant_id", "default")
1390
+
1391
+ # Execute response actions
1392
+ executed_actions = []
1393
+ failed_actions = []
1394
+
1395
+ for action in response_actions:
1396
+ action_type = action.get("type")
1397
+ action_params = action.get("parameters", {})
1398
+
1399
+ try:
1400
+ if action_type == "block_ip":
1401
+ # Mock IP blocking
1402
+ result = {
1403
+ "action": "block_ip",
1404
+ "ip_address": action_params.get("ip"),
1405
+ "status": "blocked",
1406
+ "duration": action_params.get("duration", "24h"),
1407
+ }
1408
+ elif action_type == "disable_user":
1409
+ # Mock user disabling
1410
+ result = {
1411
+ "action": "disable_user",
1412
+ "user_id": action_params.get("user_id"),
1413
+ "status": "disabled",
1414
+ }
1415
+ elif action_type == "quarantine_file":
1416
+ # Mock file quarantine
1417
+ result = {
1418
+ "action": "quarantine_file",
1419
+ "file_path": action_params.get("file_path"),
1420
+ "status": "quarantined",
1421
+ }
1422
+ else:
1423
+ result = {"action": action_type, "status": "unknown_action"}
1424
+
1425
+ result["executed_at"] = datetime.now(UTC).isoformat()
1426
+ executed_actions.append(result)
1427
+
1428
+ except Exception as e:
1429
+ failed_actions.append(
1430
+ {
1431
+ "action": action_type,
1432
+ "error": str(e),
1433
+ "parameters": action_params,
1434
+ }
1435
+ )
1436
+
1437
+ return {
1438
+ "result": {
1439
+ "executed_actions": executed_actions,
1440
+ "failed_actions": failed_actions,
1441
+ "total_actions": len(response_actions),
1442
+ "success_rate": (
1443
+ len(executed_actions) / len(response_actions) * 100
1444
+ if response_actions
1445
+ else 0
1446
+ ),
1447
+ "operation": "automated_response",
1448
+ "timestamp": datetime.now(UTC).isoformat(),
1449
+ }
1450
+ }
1451
+
1452
+ def _detect_behavioral_anomalies(
1453
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
1454
+ ) -> List[Dict[str, Any]]:
1455
+ """Detect behavioral anomalies in security events."""
1456
+ anomalies = []
1457
+
1458
+ if not events:
1459
+ return anomalies
1460
+
1461
+ # Group events by user
1462
+ user_events = {}
1463
+ for event in events:
1464
+ user_id = event.get("user_id")
1465
+ if user_id:
1466
+ if user_id not in user_events:
1467
+ user_events[user_id] = []
1468
+ user_events[user_id].append(event)
1469
+
1470
+ # Detect anomalies for each user
1471
+ for user_id, user_event_list in user_events.items():
1472
+ # Check for unusual login times
1473
+ login_hours = []
1474
+ for event in user_event_list:
1475
+ if event["event_type"] in ["user_login", "suspicious_login"]:
1476
+ hour = datetime.fromisoformat(
1477
+ event["timestamp"].replace("Z", "+00:00")
1478
+ ).hour
1479
+ login_hours.append(hour)
1480
+
1481
+ if login_hours:
1482
+ # Detect off-hours activity (before 6 AM or after 10 PM)
1483
+ off_hours_count = sum(
1484
+ 1 for hour in login_hours if hour < 6 or hour > 22
1485
+ )
1486
+ if off_hours_count > len(login_hours) * 0.5: # More than 50% off-hours
1487
+ anomalies.append(
1488
+ {
1489
+ "type": "unusual_login_times",
1490
+ "user_id": user_id,
1491
+ "description": f"High percentage of off-hours logins: {off_hours_count}/{len(login_hours)}",
1492
+ "severity": "medium",
1493
+ "confidence": 0.8,
1494
+ }
1495
+ )
1496
+
1497
+ # Check for rapid successive events
1498
+ if len(user_event_list) > 10:
1499
+ timestamps = [
1500
+ datetime.fromisoformat(e["timestamp"].replace("Z", "+00:00"))
1501
+ for e in user_event_list
1502
+ ]
1503
+ timestamps.sort()
1504
+
1505
+ rapid_events = 0
1506
+ for i in range(1, len(timestamps)):
1507
+ if (
1508
+ timestamps[i] - timestamps[i - 1]
1509
+ ).total_seconds() < 60: # Less than 1 minute apart
1510
+ rapid_events += 1
1511
+
1512
+ if rapid_events > 5:
1513
+ anomalies.append(
1514
+ {
1515
+ "type": "rapid_successive_events",
1516
+ "user_id": user_id,
1517
+ "description": f"Unusually rapid event sequence: {rapid_events} events within 1 minute",
1518
+ "severity": "high",
1519
+ "confidence": 0.9,
1520
+ }
1521
+ )
1522
+
1523
+ return anomalies