kailash 0.3.2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. kailash/__init__.py +33 -1
  2. kailash/access_control/__init__.py +129 -0
  3. kailash/access_control/managers.py +461 -0
  4. kailash/access_control/rule_evaluators.py +467 -0
  5. kailash/access_control_abac.py +825 -0
  6. kailash/config/__init__.py +27 -0
  7. kailash/config/database_config.py +359 -0
  8. kailash/database/__init__.py +28 -0
  9. kailash/database/execution_pipeline.py +499 -0
  10. kailash/middleware/__init__.py +306 -0
  11. kailash/middleware/auth/__init__.py +33 -0
  12. kailash/middleware/auth/access_control.py +436 -0
  13. kailash/middleware/auth/auth_manager.py +422 -0
  14. kailash/middleware/auth/jwt_auth.py +477 -0
  15. kailash/middleware/auth/kailash_jwt_auth.py +616 -0
  16. kailash/middleware/communication/__init__.py +37 -0
  17. kailash/middleware/communication/ai_chat.py +989 -0
  18. kailash/middleware/communication/api_gateway.py +802 -0
  19. kailash/middleware/communication/events.py +470 -0
  20. kailash/middleware/communication/realtime.py +710 -0
  21. kailash/middleware/core/__init__.py +21 -0
  22. kailash/middleware/core/agent_ui.py +890 -0
  23. kailash/middleware/core/schema.py +643 -0
  24. kailash/middleware/core/workflows.py +396 -0
  25. kailash/middleware/database/__init__.py +63 -0
  26. kailash/middleware/database/base.py +113 -0
  27. kailash/middleware/database/base_models.py +525 -0
  28. kailash/middleware/database/enums.py +106 -0
  29. kailash/middleware/database/migrations.py +12 -0
  30. kailash/{api/database.py → middleware/database/models.py} +183 -291
  31. kailash/middleware/database/repositories.py +685 -0
  32. kailash/middleware/database/session_manager.py +19 -0
  33. kailash/middleware/mcp/__init__.py +38 -0
  34. kailash/middleware/mcp/client_integration.py +585 -0
  35. kailash/middleware/mcp/enhanced_server.py +576 -0
  36. kailash/nodes/__init__.py +25 -3
  37. kailash/nodes/admin/__init__.py +35 -0
  38. kailash/nodes/admin/audit_log.py +794 -0
  39. kailash/nodes/admin/permission_check.py +864 -0
  40. kailash/nodes/admin/role_management.py +823 -0
  41. kailash/nodes/admin/security_event.py +1519 -0
  42. kailash/nodes/admin/user_management.py +944 -0
  43. kailash/nodes/ai/a2a.py +24 -7
  44. kailash/nodes/ai/ai_providers.py +1 -0
  45. kailash/nodes/ai/embedding_generator.py +11 -11
  46. kailash/nodes/ai/intelligent_agent_orchestrator.py +99 -11
  47. kailash/nodes/ai/llm_agent.py +407 -2
  48. kailash/nodes/ai/self_organizing.py +85 -10
  49. kailash/nodes/api/auth.py +287 -6
  50. kailash/nodes/api/rest.py +151 -0
  51. kailash/nodes/auth/__init__.py +17 -0
  52. kailash/nodes/auth/directory_integration.py +1228 -0
  53. kailash/nodes/auth/enterprise_auth_provider.py +1328 -0
  54. kailash/nodes/auth/mfa.py +2338 -0
  55. kailash/nodes/auth/risk_assessment.py +872 -0
  56. kailash/nodes/auth/session_management.py +1093 -0
  57. kailash/nodes/auth/sso.py +1040 -0
  58. kailash/nodes/base.py +344 -13
  59. kailash/nodes/base_cycle_aware.py +4 -2
  60. kailash/nodes/base_with_acl.py +1 -1
  61. kailash/nodes/code/python.py +283 -10
  62. kailash/nodes/compliance/__init__.py +9 -0
  63. kailash/nodes/compliance/data_retention.py +1888 -0
  64. kailash/nodes/compliance/gdpr.py +2004 -0
  65. kailash/nodes/data/__init__.py +22 -2
  66. kailash/nodes/data/async_connection.py +469 -0
  67. kailash/nodes/data/async_sql.py +757 -0
  68. kailash/nodes/data/async_vector.py +598 -0
  69. kailash/nodes/data/readers.py +767 -0
  70. kailash/nodes/data/retrieval.py +360 -1
  71. kailash/nodes/data/sharepoint_graph.py +397 -21
  72. kailash/nodes/data/sql.py +94 -5
  73. kailash/nodes/data/streaming.py +68 -8
  74. kailash/nodes/data/vector_db.py +54 -4
  75. kailash/nodes/enterprise/__init__.py +13 -0
  76. kailash/nodes/enterprise/batch_processor.py +741 -0
  77. kailash/nodes/enterprise/data_lineage.py +497 -0
  78. kailash/nodes/logic/convergence.py +31 -9
  79. kailash/nodes/logic/operations.py +14 -3
  80. kailash/nodes/mixins/__init__.py +8 -0
  81. kailash/nodes/mixins/event_emitter.py +201 -0
  82. kailash/nodes/mixins/mcp.py +9 -4
  83. kailash/nodes/mixins/security.py +165 -0
  84. kailash/nodes/monitoring/__init__.py +7 -0
  85. kailash/nodes/monitoring/performance_benchmark.py +2497 -0
  86. kailash/nodes/rag/__init__.py +284 -0
  87. kailash/nodes/rag/advanced.py +1615 -0
  88. kailash/nodes/rag/agentic.py +773 -0
  89. kailash/nodes/rag/conversational.py +999 -0
  90. kailash/nodes/rag/evaluation.py +875 -0
  91. kailash/nodes/rag/federated.py +1188 -0
  92. kailash/nodes/rag/graph.py +721 -0
  93. kailash/nodes/rag/multimodal.py +671 -0
  94. kailash/nodes/rag/optimized.py +933 -0
  95. kailash/nodes/rag/privacy.py +1059 -0
  96. kailash/nodes/rag/query_processing.py +1335 -0
  97. kailash/nodes/rag/realtime.py +764 -0
  98. kailash/nodes/rag/registry.py +547 -0
  99. kailash/nodes/rag/router.py +837 -0
  100. kailash/nodes/rag/similarity.py +1854 -0
  101. kailash/nodes/rag/strategies.py +566 -0
  102. kailash/nodes/rag/workflows.py +575 -0
  103. kailash/nodes/security/__init__.py +19 -0
  104. kailash/nodes/security/abac_evaluator.py +1411 -0
  105. kailash/nodes/security/audit_log.py +91 -0
  106. kailash/nodes/security/behavior_analysis.py +1893 -0
  107. kailash/nodes/security/credential_manager.py +401 -0
  108. kailash/nodes/security/rotating_credentials.py +760 -0
  109. kailash/nodes/security/security_event.py +132 -0
  110. kailash/nodes/security/threat_detection.py +1103 -0
  111. kailash/nodes/testing/__init__.py +9 -0
  112. kailash/nodes/testing/credential_testing.py +499 -0
  113. kailash/nodes/transform/__init__.py +10 -2
  114. kailash/nodes/transform/chunkers.py +592 -1
  115. kailash/nodes/transform/processors.py +484 -14
  116. kailash/nodes/validation.py +321 -0
  117. kailash/runtime/access_controlled.py +1 -1
  118. kailash/runtime/async_local.py +41 -7
  119. kailash/runtime/docker.py +1 -1
  120. kailash/runtime/local.py +474 -55
  121. kailash/runtime/parallel.py +1 -1
  122. kailash/runtime/parallel_cyclic.py +1 -1
  123. kailash/runtime/testing.py +210 -2
  124. kailash/utils/migrations/__init__.py +25 -0
  125. kailash/utils/migrations/generator.py +433 -0
  126. kailash/utils/migrations/models.py +231 -0
  127. kailash/utils/migrations/runner.py +489 -0
  128. kailash/utils/secure_logging.py +342 -0
  129. kailash/workflow/__init__.py +16 -0
  130. kailash/workflow/cyclic_runner.py +3 -4
  131. kailash/workflow/graph.py +70 -2
  132. kailash/workflow/resilience.py +249 -0
  133. kailash/workflow/templates.py +726 -0
  134. {kailash-0.3.2.dist-info → kailash-0.4.0.dist-info}/METADATA +253 -20
  135. kailash-0.4.0.dist-info/RECORD +223 -0
  136. kailash/api/__init__.py +0 -17
  137. kailash/api/__main__.py +0 -6
  138. kailash/api/studio_secure.py +0 -893
  139. kailash/mcp/__main__.py +0 -13
  140. kailash/mcp/server_new.py +0 -336
  141. kailash/mcp/servers/__init__.py +0 -12
  142. kailash-0.3.2.dist-info/RECORD +0 -136
  143. {kailash-0.3.2.dist-info → kailash-0.4.0.dist-info}/WHEEL +0 -0
  144. {kailash-0.3.2.dist-info → kailash-0.4.0.dist-info}/entry_points.txt +0 -0
  145. {kailash-0.3.2.dist-info → kailash-0.4.0.dist-info}/licenses/LICENSE +0 -0
  146. {kailash-0.3.2.dist-info → kailash-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1519 @@
1
+ """Enterprise security event monitoring node for threat detection and response.
2
+
3
+ This node provides specialized security event processing, threat detection,
4
+ and automated response capabilities. Built for enterprise security operations
5
+ centers (SOCs) with real-time monitoring, alerting, and integration with
6
+ external security systems.
7
+
8
+ Features:
9
+ - Real-time security event processing
10
+ - Threat detection with ML-based analytics
11
+ - Automated incident response workflows
12
+ - Integration with SIEM and SOAR systems
13
+ - Risk scoring and escalation
14
+ - Security metrics and dashboards
15
+ - Compliance violation detection
16
+ - Forensic data collection
17
+ """
18
+
19
+ import hashlib
20
+ import json
21
+ from dataclasses import dataclass
22
+ from datetime import UTC, datetime, timedelta
23
+ from enum import Enum
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ from kailash.access_control import UserContext
27
+ from kailash.nodes.admin.audit_log import AuditEventType, AuditLogNode, AuditSeverity
28
+ from kailash.nodes.base import Node, NodeParameter, register_node
29
+ from kailash.nodes.data import AsyncSQLDatabaseNode
30
+ from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
31
+
32
+
33
+ class SecurityEventType(Enum):
34
+ """Types of security events."""
35
+
36
+ SUSPICIOUS_LOGIN = "suspicious_login"
37
+ MULTIPLE_FAILED_LOGINS = "multiple_failed_logins"
38
+ PRIVILEGE_ESCALATION = "privilege_escalation"
39
+ UNAUTHORIZED_ACCESS_ATTEMPT = "unauthorized_access_attempt"
40
+ DATA_EXFILTRATION = "data_exfiltration"
41
+ UNUSUAL_DATA_ACCESS = "unusual_data_access"
42
+ BRUTE_FORCE_ATTACK = "brute_force_attack"
43
+ ACCOUNT_TAKEOVER = "account_takeover"
44
+ INSIDER_THREAT = "insider_threat"
45
+ MALWARE_DETECTION = "malware_detection"
46
+ PHISHING_ATTEMPT = "phishing_attempt"
47
+ POLICY_VIOLATION = "policy_violation"
48
+ COMPLIANCE_BREACH = "compliance_breach"
49
+ ANOMALOUS_BEHAVIOR = "anomalous_behavior"
50
+ SYSTEM_COMPROMISE = "system_compromise"
51
+ CUSTOM_THREAT = "custom_threat"
52
+
53
+
54
+ class ThreatLevel(Enum):
55
+ """Threat severity levels."""
56
+
57
+ INFO = "info"
58
+ LOW = "low"
59
+ MEDIUM = "medium"
60
+ HIGH = "high"
61
+ CRITICAL = "critical"
62
+
63
+
64
+ class SecurityOperation(Enum):
65
+ """Supported security operations."""
66
+
67
+ CREATE_EVENT = "create_event"
68
+ ANALYZE_THREATS = "analyze_threats"
69
+ DETECT_ANOMALIES = "detect_anomalies"
70
+ GENERATE_ALERTS = "generate_alerts"
71
+ GET_INCIDENTS = "get_incidents"
72
+ CREATE_INCIDENT = "create_incident"
73
+ UPDATE_INCIDENT = "update_incident"
74
+ GET_THREAT_INTELLIGENCE = "get_threat_intelligence"
75
+ CALCULATE_RISK_SCORE = "calculate_risk_score"
76
+ MONITOR_USER_BEHAVIOR = "monitor_user_behavior"
77
+ COMPLIANCE_CHECK = "compliance_check"
78
+ FORENSIC_ANALYSIS = "forensic_analysis"
79
+ AUTOMATED_RESPONSE = "automated_response"
80
+
81
+
82
+ class IncidentStatus(Enum):
83
+ """Security incident status."""
84
+
85
+ NEW = "new"
86
+ INVESTIGATING = "investigating"
87
+ CONTAINMENT = "containment"
88
+ ERADICATION = "eradication"
89
+ RECOVERY = "recovery"
90
+ CLOSED = "closed"
91
+
92
+
93
+ @dataclass
94
+ class SecurityEvent:
95
+ """Security event structure."""
96
+
97
+ event_id: str
98
+ event_type: SecurityEventType
99
+ threat_level: ThreatLevel
100
+ user_id: Optional[str]
101
+ tenant_id: str
102
+ source_ip: str
103
+ target_resource: Optional[str]
104
+ description: str
105
+ indicators: Dict[str, Any]
106
+ risk_score: float
107
+ timestamp: datetime
108
+ detection_method: str
109
+ false_positive_probability: float = 0.0
110
+ mitigation_applied: bool = False
111
+ incident_id: Optional[str] = None
112
+
113
+ def to_dict(self) -> Dict[str, Any]:
114
+ """Convert to dictionary for JSON serialization."""
115
+ return {
116
+ "event_id": self.event_id,
117
+ "event_type": self.event_type.value,
118
+ "threat_level": self.threat_level.value,
119
+ "user_id": self.user_id,
120
+ "tenant_id": self.tenant_id,
121
+ "source_ip": self.source_ip,
122
+ "target_resource": self.target_resource,
123
+ "description": self.description,
124
+ "indicators": self.indicators,
125
+ "risk_score": self.risk_score,
126
+ "timestamp": self.timestamp.isoformat(),
127
+ "detection_method": self.detection_method,
128
+ "false_positive_probability": self.false_positive_probability,
129
+ "mitigation_applied": self.mitigation_applied,
130
+ "incident_id": self.incident_id,
131
+ }
132
+
133
+
134
+ @dataclass
135
+ class SecurityIncident:
136
+ """Security incident structure."""
137
+
138
+ incident_id: str
139
+ title: str
140
+ description: str
141
+ status: IncidentStatus
142
+ severity: ThreatLevel
143
+ assignee: Optional[str]
144
+ created_at: datetime
145
+ updated_at: datetime
146
+ closed_at: Optional[datetime]
147
+ events: List[str] # List of security event IDs
148
+ actions_taken: List[Dict[str, Any]]
149
+ impact_assessment: Dict[str, Any]
150
+ tenant_id: str
151
+
152
+ def to_dict(self) -> Dict[str, Any]:
153
+ """Convert to dictionary for JSON serialization."""
154
+ return {
155
+ "incident_id": self.incident_id,
156
+ "title": self.title,
157
+ "description": self.description,
158
+ "status": self.status.value,
159
+ "severity": self.severity.value,
160
+ "assignee": self.assignee,
161
+ "created_at": self.created_at.isoformat(),
162
+ "updated_at": self.updated_at.isoformat(),
163
+ "closed_at": self.closed_at.isoformat() if self.closed_at else None,
164
+ "events": self.events,
165
+ "actions_taken": self.actions_taken,
166
+ "impact_assessment": self.impact_assessment,
167
+ "tenant_id": self.tenant_id,
168
+ }
169
+
170
+
171
+ @register_node()
172
+ class SecurityEventNode(Node):
173
+ """Enterprise security event monitoring and incident response node.
174
+
175
+ This node provides comprehensive security event processing including:
176
+ - Real-time threat detection and analysis
177
+ - Security incident management
178
+ - Risk scoring and escalation
179
+ - Automated response workflows
180
+ - Compliance monitoring
181
+ - Forensic analysis capabilities
182
+
183
+ Parameters:
184
+ operation: Type of security operation to perform
185
+ event_data: Security event data
186
+ incident_data: Security incident data
187
+ analysis_config: Configuration for threat analysis
188
+ user_id: User ID for behavior monitoring
189
+ risk_threshold: Risk score threshold for alerts
190
+ time_window: Time window for analysis
191
+ detection_rules: Custom detection rules
192
+ response_actions: Automated response configuration
193
+ tenant_id: Tenant isolation
194
+
195
+ Example:
196
+ >>> # Create security event for suspicious login
197
+ >>> node = SecurityEventNode(
198
+ ... operation="create_event",
199
+ ... event_data={
200
+ ... "event_type": "suspicious_login",
201
+ ... "threat_level": "medium",
202
+ ... "user_id": "user123",
203
+ ... "source_ip": "192.168.1.100",
204
+ ... "description": "Login from unusual location",
205
+ ... "indicators": {
206
+ ... "location": "Unknown Country",
207
+ ... "device": "New Device",
208
+ ... "time": "Outside business hours"
209
+ ... },
210
+ ... "detection_method": "geolocation_analysis"
211
+ ... }
212
+ ... )
213
+ >>> result = node.run()
214
+ >>> event_id = result["security_event"]["event_id"]
215
+
216
+ >>> # Analyze threats in time window
217
+ >>> node = SecurityEventNode(
218
+ ... operation="analyze_threats",
219
+ ... analysis_config={
220
+ ... "time_window": 3600, # 1 hour
221
+ ... "threat_types": ["brute_force_attack", "suspicious_login"],
222
+ ... "risk_threshold": 7.0
223
+ ... }
224
+ ... )
225
+ >>> result = node.run()
226
+ >>> threats = result["threat_analysis"]["high_risk_events"]
227
+
228
+ >>> # Monitor user behavior for anomalies
229
+ >>> node = SecurityEventNode(
230
+ ... operation="monitor_user_behavior",
231
+ ... user_id="user123",
232
+ ... analysis_config={
233
+ ... "lookback_days": 30,
234
+ ... "anomaly_threshold": 0.8
235
+ ... }
236
+ ... )
237
+ >>> result = node.run()
238
+ >>> anomalies = result["behavior_analysis"]["anomalies"]
239
+ """
240
+
241
+ def __init__(self, **config):
242
+ super().__init__(**config)
243
+ self._db_node = None
244
+ self._audit_node = None
245
+
246
+ def get_parameters(self) -> Dict[str, NodeParameter]:
247
+ """Define parameters for security operations."""
248
+ return {
249
+ param.name: param
250
+ for param in [
251
+ # Operation type
252
+ NodeParameter(
253
+ name="operation",
254
+ type=str,
255
+ required=True,
256
+ description="Security operation to perform",
257
+ choices=[op.value for op in SecurityOperation],
258
+ ),
259
+ # Event data
260
+ NodeParameter(
261
+ name="event_data",
262
+ type=dict,
263
+ required=False,
264
+ description="Security event data",
265
+ ),
266
+ # Incident data
267
+ NodeParameter(
268
+ name="incident_data",
269
+ type=dict,
270
+ required=False,
271
+ description="Security incident data",
272
+ ),
273
+ # Analysis configuration
274
+ NodeParameter(
275
+ name="analysis_config",
276
+ type=dict,
277
+ required=False,
278
+ description="Configuration for threat analysis",
279
+ ),
280
+ # User monitoring
281
+ NodeParameter(
282
+ name="user_id",
283
+ type=str,
284
+ required=False,
285
+ description="User ID for behavior monitoring",
286
+ ),
287
+ # Risk configuration
288
+ NodeParameter(
289
+ name="risk_threshold",
290
+ type=float,
291
+ required=False,
292
+ default=7.0,
293
+ description="Risk score threshold for alerts",
294
+ ),
295
+ # Time windows
296
+ NodeParameter(
297
+ name="time_window",
298
+ type=int,
299
+ required=False,
300
+ default=3600,
301
+ description="Time window in seconds for analysis",
302
+ ),
303
+ # Detection configuration
304
+ NodeParameter(
305
+ name="detection_rules",
306
+ type=list,
307
+ required=False,
308
+ description="Custom detection rules",
309
+ ),
310
+ # Response configuration
311
+ NodeParameter(
312
+ name="response_actions",
313
+ type=dict,
314
+ required=False,
315
+ description="Automated response configuration",
316
+ ),
317
+ # Multi-tenancy
318
+ NodeParameter(
319
+ name="tenant_id",
320
+ type=str,
321
+ required=False,
322
+ description="Tenant ID for multi-tenant isolation",
323
+ ),
324
+ # Database configuration
325
+ NodeParameter(
326
+ name="database_config",
327
+ type=dict,
328
+ required=False,
329
+ description="Database connection configuration",
330
+ ),
331
+ # Incident management
332
+ NodeParameter(
333
+ name="incident_id",
334
+ type=str,
335
+ required=False,
336
+ description="Incident ID for incident operations",
337
+ ),
338
+ # Filtering
339
+ NodeParameter(
340
+ name="filters",
341
+ type=dict,
342
+ required=False,
343
+ description="Filters for event/incident queries",
344
+ ),
345
+ # Pagination
346
+ NodeParameter(
347
+ name="pagination",
348
+ type=dict,
349
+ required=False,
350
+ description="Pagination parameters",
351
+ ),
352
+ ]
353
+ }
354
+
355
+ def run(self, **inputs) -> Dict[str, Any]:
356
+ """Execute security operation."""
357
+ try:
358
+ operation = SecurityOperation(inputs["operation"])
359
+
360
+ # Initialize dependencies
361
+ self._init_dependencies(inputs)
362
+
363
+ # Route to appropriate operation
364
+ if operation == SecurityOperation.CREATE_EVENT:
365
+ return self._create_event(inputs)
366
+ elif operation == SecurityOperation.ANALYZE_THREATS:
367
+ return self._analyze_threats(inputs)
368
+ elif operation == SecurityOperation.DETECT_ANOMALIES:
369
+ return self._detect_anomalies(inputs)
370
+ elif operation == SecurityOperation.GENERATE_ALERTS:
371
+ return self._generate_alerts(inputs)
372
+ elif operation == SecurityOperation.GET_INCIDENTS:
373
+ return self._get_incidents(inputs)
374
+ elif operation == SecurityOperation.CREATE_INCIDENT:
375
+ return self._create_incident(inputs)
376
+ elif operation == SecurityOperation.UPDATE_INCIDENT:
377
+ return self._update_incident(inputs)
378
+ elif operation == SecurityOperation.GET_THREAT_INTELLIGENCE:
379
+ return self._get_threat_intelligence(inputs)
380
+ elif operation == SecurityOperation.CALCULATE_RISK_SCORE:
381
+ return self._calculate_risk_score(inputs)
382
+ elif operation == SecurityOperation.MONITOR_USER_BEHAVIOR:
383
+ return self._monitor_user_behavior(inputs)
384
+ elif operation == SecurityOperation.COMPLIANCE_CHECK:
385
+ return self._compliance_check(inputs)
386
+ elif operation == SecurityOperation.FORENSIC_ANALYSIS:
387
+ return self._forensic_analysis(inputs)
388
+ elif operation == SecurityOperation.AUTOMATED_RESPONSE:
389
+ return self._automated_response(inputs)
390
+ else:
391
+ raise NodeExecutionError(f"Unknown operation: {operation}")
392
+
393
+ except Exception as e:
394
+ raise NodeExecutionError(f"Security operation failed: {str(e)}")
395
+
396
+ def _init_dependencies(self, inputs: Dict[str, Any]):
397
+ """Initialize database and audit dependencies."""
398
+ # Get database config
399
+ db_config = inputs.get(
400
+ "database_config",
401
+ {
402
+ "database_type": "postgresql",
403
+ "host": "localhost",
404
+ "port": 5432,
405
+ "database": "kailash_admin",
406
+ "user": "admin",
407
+ "password": "admin",
408
+ },
409
+ )
410
+
411
+ # Initialize async database node
412
+ self._db_node = AsyncSQLDatabaseNode(name="security_event_db", **db_config)
413
+
414
+ # Initialize audit logging node
415
+ self._audit_node = AuditLogNode(database_config=db_config)
416
+
417
+ def _create_event(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
418
+ """Create a new security event with risk scoring."""
419
+ event_data = inputs["event_data"]
420
+ tenant_id = inputs.get("tenant_id", "default")
421
+
422
+ # Validate required fields
423
+ required_fields = ["event_type", "threat_level", "source_ip", "description"]
424
+ for field in required_fields:
425
+ if field not in event_data:
426
+ raise NodeValidationError(f"Missing required field: {field}")
427
+
428
+ # Calculate risk score
429
+ risk_score = self._calculate_event_risk_score(event_data)
430
+
431
+ # Create security event
432
+ event_id = self._generate_event_id()
433
+ now = datetime.now(UTC)
434
+
435
+ security_event = SecurityEvent(
436
+ event_id=event_id,
437
+ event_type=SecurityEventType(event_data["event_type"]),
438
+ threat_level=ThreatLevel(event_data["threat_level"]),
439
+ user_id=event_data.get("user_id"),
440
+ tenant_id=tenant_id,
441
+ source_ip=event_data["source_ip"],
442
+ target_resource=event_data.get("target_resource"),
443
+ description=event_data["description"],
444
+ indicators=event_data.get("indicators", {}),
445
+ risk_score=risk_score,
446
+ timestamp=now,
447
+ detection_method=event_data.get("detection_method", "manual"),
448
+ false_positive_probability=event_data.get(
449
+ "false_positive_probability", 0.0
450
+ ),
451
+ )
452
+
453
+ # Insert into database
454
+ insert_query = """
455
+ INSERT INTO security_events (
456
+ event_id, event_type, threat_level, user_id, tenant_id, source_ip,
457
+ target_resource, description, indicators, risk_score, timestamp,
458
+ detection_method, false_positive_probability, mitigation_applied
459
+ ) VALUES (
460
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14
461
+ )
462
+ """
463
+
464
+ self._db_node.config.update(
465
+ {
466
+ "query": insert_query,
467
+ "params": [
468
+ security_event.event_id,
469
+ security_event.event_type.value,
470
+ security_event.threat_level.value,
471
+ security_event.user_id,
472
+ security_event.tenant_id,
473
+ security_event.source_ip,
474
+ security_event.target_resource,
475
+ security_event.description,
476
+ security_event.indicators,
477
+ security_event.risk_score,
478
+ security_event.timestamp,
479
+ security_event.detection_method,
480
+ security_event.false_positive_probability,
481
+ security_event.mitigation_applied,
482
+ ],
483
+ }
484
+ )
485
+
486
+ db_result = self._db_node.run()
487
+
488
+ # Log to audit trail
489
+ audit_event_data = {
490
+ "event_type": "security_violation",
491
+ "severity": security_event.threat_level.value,
492
+ "user_id": security_event.user_id,
493
+ "action": "security_event_created",
494
+ "description": f"Security event created: {security_event.description}",
495
+ "metadata": {
496
+ "security_event_id": security_event.event_id,
497
+ "event_type": security_event.event_type.value,
498
+ "risk_score": security_event.risk_score,
499
+ "source_ip": security_event.source_ip,
500
+ },
501
+ }
502
+
503
+ self._audit_node.run(
504
+ operation="log_event", event_data=audit_event_data, tenant_id=tenant_id
505
+ )
506
+
507
+ # Check if automatic incident creation is needed
508
+ incident_id = None
509
+ if risk_score >= inputs.get("risk_threshold", 7.0):
510
+ incident_id = self._auto_create_incident(security_event)
511
+
512
+ return {
513
+ "result": {
514
+ "security_event": security_event.to_dict(),
515
+ "risk_score": risk_score,
516
+ "incident_created": incident_id is not None,
517
+ "incident_id": incident_id,
518
+ "operation": "create_event",
519
+ "timestamp": datetime.now(UTC).isoformat(),
520
+ }
521
+ }
522
+
523
+ def _analyze_threats(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
524
+ """Analyze security threats in a time window."""
525
+ analysis_config = inputs.get("analysis_config", {})
526
+ tenant_id = inputs.get("tenant_id", "default")
527
+ time_window = analysis_config.get("time_window", 3600) # 1 hour default
528
+ risk_threshold = analysis_config.get("risk_threshold", 7.0)
529
+
530
+ # Calculate time range
531
+ end_time = datetime.now(UTC)
532
+ start_time = end_time - timedelta(seconds=time_window)
533
+
534
+ # Query security events in time window
535
+ query = """
536
+ SELECT event_id, event_type, threat_level, user_id, source_ip,
537
+ target_resource, description, risk_score, timestamp, indicators
538
+ FROM security_events
539
+ WHERE tenant_id = $1 AND timestamp >= $2 AND timestamp <= $3
540
+ ORDER BY risk_score DESC, timestamp DESC
541
+ """
542
+
543
+ self._db_node.config.update(
544
+ {
545
+ "query": query,
546
+ "params": [tenant_id, start_time, end_time],
547
+ "fetch_mode": "all",
548
+ }
549
+ )
550
+
551
+ result = self._db_node.run()
552
+ events = result.get("result", {}).get("data", [])
553
+
554
+ # Analyze threats
555
+ analysis = self._perform_threat_analysis(events, analysis_config)
556
+
557
+ return {
558
+ "result": {
559
+ "threat_analysis": analysis,
560
+ "time_window": {
561
+ "start": start_time.isoformat(),
562
+ "end": end_time.isoformat(),
563
+ "duration_seconds": time_window,
564
+ },
565
+ "total_events": len(events),
566
+ "operation": "analyze_threats",
567
+ "timestamp": datetime.now(UTC).isoformat(),
568
+ }
569
+ }
570
+
571
+ def _monitor_user_behavior(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
572
+ """Monitor user behavior for anomalies."""
573
+ user_id = inputs["user_id"]
574
+ analysis_config = inputs.get("analysis_config", {})
575
+ tenant_id = inputs.get("tenant_id", "default")
576
+ lookback_days = analysis_config.get("lookback_days", 30)
577
+ anomaly_threshold = analysis_config.get("anomaly_threshold", 0.8)
578
+
579
+ # Get user's historical behavior
580
+ end_time = datetime.now(UTC)
581
+ start_time = end_time - timedelta(days=lookback_days)
582
+
583
+ # Query user's security events
584
+ query = """
585
+ SELECT event_type, threat_level, source_ip, timestamp, risk_score, indicators
586
+ FROM security_events
587
+ WHERE tenant_id = $1 AND user_id = $2 AND timestamp >= $3 AND timestamp <= $4
588
+ ORDER BY timestamp DESC
589
+ """
590
+
591
+ self._db_node.config.update(
592
+ {
593
+ "query": query,
594
+ "params": [tenant_id, user_id, start_time, end_time],
595
+ "fetch_mode": "all",
596
+ }
597
+ )
598
+
599
+ result = self._db_node.run()
600
+ events = result.get("result", {}).get("data", [])
601
+
602
+ # Analyze behavior patterns
603
+ behavior_analysis = self._analyze_user_behavior(events, analysis_config)
604
+
605
+ return {
606
+ "result": {
607
+ "behavior_analysis": behavior_analysis,
608
+ "user_id": user_id,
609
+ "analysis_period": {
610
+ "start": start_time.isoformat(),
611
+ "end": end_time.isoformat(),
612
+ "days": lookback_days,
613
+ },
614
+ "events_analyzed": len(events),
615
+ "operation": "monitor_user_behavior",
616
+ "timestamp": datetime.now(UTC).isoformat(),
617
+ }
618
+ }
619
+
620
+ def _calculate_event_risk_score(self, event_data: Dict[str, Any]) -> float:
621
+ """Calculate risk score for a security event."""
622
+ base_scores = {
623
+ SecurityEventType.CRITICAL.value: 9.0,
624
+ SecurityEventType.SYSTEM_COMPROMISE.value: 9.5,
625
+ SecurityEventType.DATA_EXFILTRATION.value: 9.0,
626
+ SecurityEventType.ACCOUNT_TAKEOVER.value: 8.5,
627
+ SecurityEventType.PRIVILEGE_ESCALATION.value: 8.0,
628
+ SecurityEventType.BRUTE_FORCE_ATTACK.value: 7.5,
629
+ SecurityEventType.INSIDER_THREAT.value: 8.0,
630
+ SecurityEventType.SUSPICIOUS_LOGIN.value: 6.0,
631
+ SecurityEventType.UNAUTHORIZED_ACCESS_ATTEMPT.value: 7.0,
632
+ SecurityEventType.UNUSUAL_DATA_ACCESS.value: 6.5,
633
+ SecurityEventType.POLICY_VIOLATION.value: 5.0,
634
+ SecurityEventType.ANOMALOUS_BEHAVIOR.value: 5.5,
635
+ }
636
+
637
+ event_type = event_data.get("event_type", "custom_threat")
638
+ base_score = base_scores.get(event_type, 5.0)
639
+
640
+ # Adjust based on threat level
641
+ threat_multipliers = {
642
+ "info": 0.5,
643
+ "low": 0.7,
644
+ "medium": 1.0,
645
+ "high": 1.3,
646
+ "critical": 1.5,
647
+ }
648
+
649
+ threat_level = event_data.get("threat_level", "medium")
650
+ multiplier = threat_multipliers.get(threat_level, 1.0)
651
+
652
+ # Adjust based on indicators
653
+ indicators = event_data.get("indicators", {})
654
+ indicator_boost = 0.0
655
+
656
+ if "repeated_attempts" in indicators:
657
+ indicator_boost += 1.0
658
+ if "unusual_location" in indicators:
659
+ indicator_boost += 0.5
660
+ if "off_hours_access" in indicators:
661
+ indicator_boost += 0.3
662
+ if "new_device" in indicators:
663
+ indicator_boost += 0.2
664
+
665
+ # Calculate final score (0-10 scale)
666
+ final_score = min(10.0, (base_score * multiplier) + indicator_boost)
667
+
668
+ return round(final_score, 2)
669
+
670
+ def _perform_threat_analysis(
671
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
672
+ ) -> Dict[str, Any]:
673
+ """Perform comprehensive threat analysis on events."""
674
+ risk_threshold = config.get("risk_threshold", 7.0)
675
+
676
+ analysis = {
677
+ "high_risk_events": [],
678
+ "threat_patterns": {},
679
+ "ip_analysis": {},
680
+ "user_analysis": {},
681
+ "recommendations": [],
682
+ }
683
+
684
+ # Categorize events by risk
685
+ for event in events:
686
+ if event["risk_score"] >= risk_threshold:
687
+ analysis["high_risk_events"].append(event)
688
+
689
+ # Analyze threat patterns
690
+ threat_types = {}
691
+ for event in events:
692
+ event_type = event["event_type"]
693
+ threat_types[event_type] = threat_types.get(event_type, 0) + 1
694
+
695
+ analysis["threat_patterns"] = threat_types
696
+
697
+ # Analyze IP addresses
698
+ ip_counts = {}
699
+ for event in events:
700
+ ip = event["source_ip"]
701
+ ip_counts[ip] = ip_counts.get(ip, 0) + 1
702
+
703
+ # Flag suspicious IPs (multiple events)
704
+ suspicious_ips = {ip: count for ip, count in ip_counts.items() if count > 3}
705
+ analysis["ip_analysis"] = {
706
+ "total_unique_ips": len(ip_counts),
707
+ "suspicious_ips": suspicious_ips,
708
+ }
709
+
710
+ # Generate recommendations
711
+ if len(analysis["high_risk_events"]) > 5:
712
+ analysis["recommendations"].append(
713
+ "High volume of security events detected - investigate immediately"
714
+ )
715
+
716
+ if suspicious_ips:
717
+ analysis["recommendations"].append(
718
+ f"Consider blocking suspicious IPs: {list(suspicious_ips.keys())}"
719
+ )
720
+
721
+ return analysis
722
+
723
+ def _analyze_user_behavior(
724
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
725
+ ) -> Dict[str, Any]:
726
+ """Analyze user behavior patterns for anomalies."""
727
+ analysis = {
728
+ "baseline_established": len(events) >= 10,
729
+ "anomalies": [],
730
+ "patterns": {},
731
+ "risk_factors": [],
732
+ }
733
+
734
+ if not analysis["baseline_established"]:
735
+ analysis["anomalies"].append(
736
+ "Insufficient data for baseline - new user or limited activity"
737
+ )
738
+ return analysis
739
+
740
+ # Analyze login patterns
741
+ login_hours = []
742
+ login_ips = {}
743
+
744
+ for event in events:
745
+ if event["event_type"] in ["suspicious_login", "user_login"]:
746
+ hour = datetime.fromisoformat(event["timestamp"]).hour
747
+ login_hours.append(hour)
748
+
749
+ ip = event["source_ip"]
750
+ login_ips[ip] = login_ips.get(ip, 0) + 1
751
+
752
+ # Detect anomalies
753
+ if len(set(login_ips.keys())) > 10:
754
+ analysis["anomalies"].append(
755
+ "Logins from unusually high number of IP addresses"
756
+ )
757
+
758
+ # Check for off-hours activity
759
+ off_hours_count = sum(1 for hour in login_hours if hour < 6 or hour > 22)
760
+ if off_hours_count > len(login_hours) * 0.3:
761
+ analysis["anomalies"].append("High percentage of off-hours activity")
762
+
763
+ analysis["patterns"] = {
764
+ "unique_ips": len(login_ips),
765
+ "off_hours_percentage": (
766
+ (off_hours_count / len(login_hours) * 100) if login_hours else 0
767
+ ),
768
+ "most_common_hour": (
769
+ max(set(login_hours), key=login_hours.count) if login_hours else None
770
+ ),
771
+ }
772
+
773
+ return analysis
774
+
775
+ def _auto_create_incident(self, security_event: SecurityEvent) -> str:
776
+ """Automatically create an incident for high-risk security events."""
777
+ incident_id = self._generate_event_id()
778
+ now = datetime.now(UTC)
779
+
780
+ incident = SecurityIncident(
781
+ incident_id=incident_id,
782
+ title=f"High-Risk Security Event: {security_event.event_type.value}",
783
+ description=f"Automated incident created for security event {security_event.event_id}. {security_event.description}",
784
+ status=IncidentStatus.NEW,
785
+ severity=security_event.threat_level,
786
+ assignee=None,
787
+ created_at=now,
788
+ updated_at=now,
789
+ closed_at=None,
790
+ events=[security_event.event_id],
791
+ actions_taken=[],
792
+ impact_assessment={"risk_score": security_event.risk_score},
793
+ tenant_id=security_event.tenant_id,
794
+ )
795
+
796
+ # Insert incident into database
797
+ insert_query = """
798
+ INSERT INTO security_incidents (
799
+ incident_id, title, description, status, severity, assignee,
800
+ created_at, updated_at, closed_at, events, actions_taken,
801
+ impact_assessment, tenant_id
802
+ ) VALUES (
803
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13
804
+ )
805
+ """
806
+
807
+ self._db_node.config.update(
808
+ {
809
+ "query": insert_query,
810
+ "params": [
811
+ incident.incident_id,
812
+ incident.title,
813
+ incident.description,
814
+ incident.status.value,
815
+ incident.severity.value,
816
+ incident.assignee,
817
+ incident.created_at,
818
+ incident.updated_at,
819
+ incident.closed_at,
820
+ incident.events,
821
+ incident.actions_taken,
822
+ incident.impact_assessment,
823
+ incident.tenant_id,
824
+ ],
825
+ }
826
+ )
827
+
828
+ self._db_node.run()
829
+
830
+ return incident_id
831
+
832
+ def _generate_event_id(self) -> str:
833
+ """Generate unique event/incident ID."""
834
+ import uuid
835
+
836
+ return str(uuid.uuid4())
837
+
838
+ def _detect_anomalies(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
839
+ """Detect anomalies using ML-based analysis."""
840
+ analysis_config = inputs.get("analysis_config", {})
841
+ tenant_id = inputs.get("tenant_id", "default")
842
+ user_id = inputs.get("user_id")
843
+ time_window = analysis_config.get("time_window", 86400) # 24 hours default
844
+ anomaly_threshold = analysis_config.get("anomaly_threshold", 0.8)
845
+
846
+ # Calculate time range
847
+ end_time = datetime.now(UTC)
848
+ start_time = end_time - timedelta(seconds=time_window)
849
+
850
+ # Query recent events for pattern analysis
851
+ query = """
852
+ SELECT event_type, user_id, source_ip, timestamp, risk_score, indicators
853
+ FROM security_events
854
+ WHERE tenant_id = $1 AND timestamp >= $2 AND timestamp <= $3
855
+ """
856
+ params = [tenant_id, start_time, end_time]
857
+
858
+ if user_id:
859
+ query += " AND user_id = $4"
860
+ params.append(user_id)
861
+
862
+ query += " ORDER BY timestamp DESC"
863
+
864
+ self._db_node.config.update(
865
+ {"query": query, "params": params, "fetch_mode": "all"}
866
+ )
867
+
868
+ result = self._db_node.run()
869
+ events = result.get("result", {}).get("data", [])
870
+
871
+ # Perform anomaly detection
872
+ anomalies = self._detect_behavioral_anomalies(events, analysis_config)
873
+
874
+ return {
875
+ "result": {
876
+ "anomalies": anomalies,
877
+ "analysis_period": {
878
+ "start": start_time.isoformat(),
879
+ "end": end_time.isoformat(),
880
+ "duration_seconds": time_window,
881
+ },
882
+ "events_analyzed": len(events),
883
+ "operation": "detect_anomalies",
884
+ "timestamp": datetime.now(UTC).isoformat(),
885
+ }
886
+ }
887
+
888
+ def _generate_alerts(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
889
+ """Generate security alerts based on events."""
890
+ analysis_config = inputs.get("analysis_config", {})
891
+ tenant_id = inputs.get("tenant_id", "default")
892
+ risk_threshold = analysis_config.get("risk_threshold", 7.0)
893
+ alert_types = analysis_config.get(
894
+ "alert_types", ["high_risk", "pattern_detected", "anomaly"]
895
+ )
896
+
897
+ # Get recent high-risk events
898
+ query = """
899
+ SELECT event_id, event_type, threat_level, user_id, source_ip, risk_score, timestamp
900
+ FROM security_events
901
+ WHERE tenant_id = $1 AND risk_score >= $2 AND timestamp >= $3
902
+ ORDER BY risk_score DESC, timestamp DESC
903
+ LIMIT 50
904
+ """
905
+
906
+ lookback_time = datetime.now(UTC) - timedelta(hours=1)
907
+
908
+ self._db_node.config.update(
909
+ {
910
+ "query": query,
911
+ "params": [tenant_id, risk_threshold, lookback_time],
912
+ "fetch_mode": "all",
913
+ }
914
+ )
915
+
916
+ result = self._db_node.run()
917
+ high_risk_events = result.get("result", {}).get("data", [])
918
+
919
+ # Generate alerts
920
+ alerts = []
921
+ alert_id = 1
922
+
923
+ for event in high_risk_events:
924
+ alert = {
925
+ "alert_id": f"ALT-{alert_id:06d}",
926
+ "alert_type": "high_risk_event",
927
+ "severity": "high" if event["risk_score"] >= 8.0 else "medium",
928
+ "title": f"High-Risk Security Event: {event['event_type']}",
929
+ "description": f"Security event with risk score {event['risk_score']} detected",
930
+ "event_id": event["event_id"],
931
+ "user_id": event["user_id"],
932
+ "source_ip": event["source_ip"],
933
+ "created_at": datetime.now(UTC).isoformat(),
934
+ "status": "active",
935
+ }
936
+ alerts.append(alert)
937
+ alert_id += 1
938
+
939
+ return {
940
+ "result": {
941
+ "alerts": alerts,
942
+ "alert_count": len(alerts),
943
+ "risk_threshold": risk_threshold,
944
+ "operation": "generate_alerts",
945
+ "timestamp": datetime.now(UTC).isoformat(),
946
+ }
947
+ }
948
+
949
+ def _get_incidents(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
950
+ """Get security incidents with filtering."""
951
+ tenant_id = inputs.get("tenant_id", "default")
952
+ filters = inputs.get("filters", {})
953
+ pagination = inputs.get("pagination", {"page": 1, "size": 20})
954
+
955
+ # Build WHERE clause
956
+ where_conditions = ["tenant_id = $1"]
957
+ params = [tenant_id]
958
+ param_count = 1
959
+
960
+ if "status" in filters:
961
+ param_count += 1
962
+ where_conditions.append(f"status = ${param_count}")
963
+ params.append(filters["status"])
964
+
965
+ if "severity" in filters:
966
+ param_count += 1
967
+ where_conditions.append(f"severity = ${param_count}")
968
+ params.append(filters["severity"])
969
+
970
+ if "assignee" in filters:
971
+ param_count += 1
972
+ where_conditions.append(f"assignee = ${param_count}")
973
+ params.append(filters["assignee"])
974
+
975
+ # Pagination
976
+ page = pagination.get("page", 1)
977
+ size = pagination.get("size", 20)
978
+ offset = (page - 1) * size
979
+
980
+ # Query incidents
981
+ query = f"""
982
+ SELECT incident_id, title, description, status, severity, assignee,
983
+ created_at, updated_at, closed_at, events, actions_taken
984
+ FROM security_incidents
985
+ WHERE {' AND '.join(where_conditions)}
986
+ ORDER BY created_at DESC
987
+ LIMIT {size} OFFSET {offset}
988
+ """
989
+
990
+ self._db_node.config.update(
991
+ {"query": query, "params": params, "fetch_mode": "all"}
992
+ )
993
+
994
+ result = self._db_node.run()
995
+ incidents = result.get("result", {}).get("data", [])
996
+
997
+ return {
998
+ "result": {
999
+ "incidents": incidents,
1000
+ "pagination": {"page": page, "size": size, "total": len(incidents)},
1001
+ "filters_applied": filters,
1002
+ "operation": "get_incidents",
1003
+ "timestamp": datetime.now(UTC).isoformat(),
1004
+ }
1005
+ }
1006
+
1007
+ def _create_incident(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1008
+ """Create security incident manually."""
1009
+ incident_data = inputs["incident_data"]
1010
+ tenant_id = inputs.get("tenant_id", "default")
1011
+
1012
+ # Validate required fields
1013
+ required_fields = ["title", "description", "severity"]
1014
+ for field in required_fields:
1015
+ if field not in incident_data:
1016
+ raise NodeValidationError(f"Missing required field: {field}")
1017
+
1018
+ # Create incident
1019
+ incident_id = self._generate_event_id()
1020
+ now = datetime.now(UTC)
1021
+
1022
+ incident = SecurityIncident(
1023
+ incident_id=incident_id,
1024
+ title=incident_data["title"],
1025
+ description=incident_data["description"],
1026
+ status=IncidentStatus(incident_data.get("status", "new")),
1027
+ severity=ThreatLevel(incident_data["severity"]),
1028
+ assignee=incident_data.get("assignee"),
1029
+ created_at=now,
1030
+ updated_at=now,
1031
+ closed_at=None,
1032
+ events=incident_data.get("events", []),
1033
+ actions_taken=[],
1034
+ impact_assessment=incident_data.get("impact_assessment", {}),
1035
+ tenant_id=tenant_id,
1036
+ )
1037
+
1038
+ # Insert into database
1039
+ insert_query = """
1040
+ INSERT INTO security_incidents (
1041
+ incident_id, title, description, status, severity, assignee,
1042
+ created_at, updated_at, closed_at, events, actions_taken,
1043
+ impact_assessment, tenant_id
1044
+ ) VALUES (
1045
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13
1046
+ )
1047
+ """
1048
+
1049
+ self._db_node.config.update(
1050
+ {
1051
+ "query": insert_query,
1052
+ "params": [
1053
+ incident.incident_id,
1054
+ incident.title,
1055
+ incident.description,
1056
+ incident.status.value,
1057
+ incident.severity.value,
1058
+ incident.assignee,
1059
+ incident.created_at,
1060
+ incident.updated_at,
1061
+ incident.closed_at,
1062
+ incident.events,
1063
+ incident.actions_taken,
1064
+ incident.impact_assessment,
1065
+ incident.tenant_id,
1066
+ ],
1067
+ }
1068
+ )
1069
+
1070
+ self._db_node.run()
1071
+
1072
+ return {
1073
+ "result": {
1074
+ "incident": incident.to_dict(),
1075
+ "operation": "create_incident",
1076
+ "timestamp": datetime.now(UTC).isoformat(),
1077
+ }
1078
+ }
1079
+
1080
+ def _update_incident(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1081
+ """Update security incident status and details."""
1082
+ incident_id = inputs["incident_id"]
1083
+ incident_data = inputs["incident_data"]
1084
+ tenant_id = inputs.get("tenant_id", "default")
1085
+
1086
+ # Build update fields
1087
+ update_fields = ["updated_at = $1"]
1088
+ params = [datetime.now(UTC)]
1089
+ param_count = 1
1090
+
1091
+ if "status" in incident_data:
1092
+ param_count += 1
1093
+ update_fields.append(f"status = ${param_count}")
1094
+ params.append(incident_data["status"])
1095
+
1096
+ # Set closed_at if status is closed
1097
+ if incident_data["status"] == "closed":
1098
+ param_count += 1
1099
+ update_fields.append(f"closed_at = ${param_count}")
1100
+ params.append(datetime.now(UTC))
1101
+
1102
+ if "assignee" in incident_data:
1103
+ param_count += 1
1104
+ update_fields.append(f"assignee = ${param_count}")
1105
+ params.append(incident_data["assignee"])
1106
+
1107
+ if "actions_taken" in incident_data:
1108
+ param_count += 1
1109
+ update_fields.append(f"actions_taken = ${param_count}")
1110
+ params.append(incident_data["actions_taken"])
1111
+
1112
+ # Add where conditions
1113
+ param_count += 1
1114
+ params.append(incident_id)
1115
+ param_count += 1
1116
+ params.append(tenant_id)
1117
+
1118
+ query = f"""
1119
+ UPDATE security_incidents
1120
+ SET {', '.join(update_fields)}
1121
+ WHERE incident_id = ${param_count-1} AND tenant_id = ${param_count}
1122
+ """
1123
+
1124
+ self._db_node.config.update({"query": query, "params": params})
1125
+
1126
+ self._db_node.run()
1127
+
1128
+ return {
1129
+ "result": {
1130
+ "incident_id": incident_id,
1131
+ "updated": True,
1132
+ "operation": "update_incident",
1133
+ "timestamp": datetime.now(UTC).isoformat(),
1134
+ }
1135
+ }
1136
+
1137
+ def _get_threat_intelligence(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1138
+ """Get threat intelligence from external sources."""
1139
+ analysis_config = inputs.get("analysis_config", {})
1140
+ threat_types = analysis_config.get("threat_types", [])
1141
+ lookback_days = analysis_config.get("lookback_days", 30)
1142
+
1143
+ # Mock threat intelligence data (in real implementation, would integrate with external feeds)
1144
+ threat_intelligence = {
1145
+ "indicators": {
1146
+ "malicious_ips": ["192.168.1.100", "10.0.0.50"],
1147
+ "suspicious_domains": ["malicious-site.com", "phishing-domain.net"],
1148
+ "known_attack_patterns": ["brute_force", "sql_injection", "xss"],
1149
+ },
1150
+ "threat_feeds": [
1151
+ {
1152
+ "source": "Internal Analysis",
1153
+ "last_updated": datetime.now(UTC).isoformat(),
1154
+ "confidence": "high",
1155
+ "indicators_count": 25,
1156
+ }
1157
+ ],
1158
+ "risk_assessment": {
1159
+ "current_threat_level": "medium",
1160
+ "trending_threats": ["phishing_attempt", "insider_threat"],
1161
+ "recommendations": [
1162
+ "Monitor for unusual login patterns",
1163
+ "Review email security policies",
1164
+ "Enhance endpoint detection",
1165
+ ],
1166
+ },
1167
+ }
1168
+
1169
+ return {
1170
+ "result": {
1171
+ "threat_intelligence": threat_intelligence,
1172
+ "generated_at": datetime.now(UTC).isoformat(),
1173
+ "operation": "get_threat_intelligence",
1174
+ "timestamp": datetime.now(UTC).isoformat(),
1175
+ }
1176
+ }
1177
+
1178
+ def _calculate_risk_score(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1179
+ """Calculate comprehensive risk score for entity."""
1180
+ entity_type = inputs.get("entity_type", "user") # user, ip, domain
1181
+ entity_id = inputs["entity_id"]
1182
+ analysis_config = inputs.get("analysis_config", {})
1183
+ tenant_id = inputs.get("tenant_id", "default")
1184
+ lookback_days = analysis_config.get("lookback_days", 30)
1185
+
1186
+ # Calculate time range
1187
+ end_time = datetime.now(UTC)
1188
+ start_time = end_time - timedelta(days=lookback_days)
1189
+
1190
+ # Query events for entity
1191
+ if entity_type == "user":
1192
+ query = """
1193
+ SELECT event_type, risk_score, timestamp
1194
+ FROM security_events
1195
+ WHERE tenant_id = $1 AND user_id = $2 AND timestamp >= $3 AND timestamp <= $4
1196
+ ORDER BY timestamp DESC
1197
+ """
1198
+ elif entity_type == "ip":
1199
+ query = """
1200
+ SELECT event_type, risk_score, timestamp
1201
+ FROM security_events
1202
+ WHERE tenant_id = $1 AND source_ip = $2 AND timestamp >= $3 AND timestamp <= $4
1203
+ ORDER BY timestamp DESC
1204
+ """
1205
+ else:
1206
+ raise NodeValidationError(f"Unsupported entity type: {entity_type}")
1207
+
1208
+ self._db_node.config.update(
1209
+ {
1210
+ "query": query,
1211
+ "params": [tenant_id, entity_id, start_time, end_time],
1212
+ "fetch_mode": "all",
1213
+ }
1214
+ )
1215
+
1216
+ result = self._db_node.run()
1217
+ events = result.get("result", {}).get("data", [])
1218
+
1219
+ # Calculate risk metrics
1220
+ if not events:
1221
+ risk_score = 0.0
1222
+ else:
1223
+ # Calculate weighted average with recency bias
1224
+ total_weighted_score = 0.0
1225
+ total_weight = 0.0
1226
+
1227
+ for event in events:
1228
+ age_days = (
1229
+ end_time
1230
+ - datetime.fromisoformat(event["timestamp"].replace("Z", "+00:00"))
1231
+ ).days
1232
+ recency_weight = max(0.1, 1.0 - (age_days / lookback_days))
1233
+ weight = recency_weight
1234
+
1235
+ total_weighted_score += event["risk_score"] * weight
1236
+ total_weight += weight
1237
+
1238
+ risk_score = (
1239
+ total_weighted_score / total_weight if total_weight > 0 else 0.0
1240
+ )
1241
+
1242
+ # Calculate risk category
1243
+ if risk_score >= 8.0:
1244
+ risk_category = "critical"
1245
+ elif risk_score >= 6.0:
1246
+ risk_category = "high"
1247
+ elif risk_score >= 4.0:
1248
+ risk_category = "medium"
1249
+ elif risk_score >= 2.0:
1250
+ risk_category = "low"
1251
+ else:
1252
+ risk_category = "minimal"
1253
+
1254
+ return {
1255
+ "result": {
1256
+ "entity_type": entity_type,
1257
+ "entity_id": entity_id,
1258
+ "risk_score": round(risk_score, 2),
1259
+ "risk_category": risk_category,
1260
+ "events_analyzed": len(events),
1261
+ "analysis_period": {
1262
+ "start": start_time.isoformat(),
1263
+ "end": end_time.isoformat(),
1264
+ "days": lookback_days,
1265
+ },
1266
+ "operation": "calculate_risk_score",
1267
+ "timestamp": datetime.now(UTC).isoformat(),
1268
+ }
1269
+ }
1270
+
1271
+ def _compliance_check(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1272
+ """Check compliance violations and requirements."""
1273
+ compliance_framework = inputs.get(
1274
+ "compliance_framework", "general"
1275
+ ) # gdpr, hipaa, sox, etc.
1276
+ tenant_id = inputs.get("tenant_id", "default")
1277
+ check_type = inputs.get("check_type", "full") # full, incremental
1278
+
1279
+ # Mock compliance checking (real implementation would have detailed rules)
1280
+ compliance_results = {
1281
+ "framework": compliance_framework,
1282
+ "overall_score": 85.5,
1283
+ "status": "compliant",
1284
+ "violations": [
1285
+ {
1286
+ "rule_id": "LOG_RETENTION_001",
1287
+ "severity": "medium",
1288
+ "description": "Log retention period below recommended 2 years",
1289
+ "current_value": "18 months",
1290
+ "required_value": "24 months",
1291
+ "remediation": "Update log retention policy",
1292
+ }
1293
+ ],
1294
+ "recommendations": [
1295
+ "Implement automated log archiving",
1296
+ "Review access control policies quarterly",
1297
+ "Enhance incident response procedures",
1298
+ ],
1299
+ "next_review_date": (datetime.now(UTC) + timedelta(days=90)).isoformat(),
1300
+ }
1301
+
1302
+ return {
1303
+ "result": {
1304
+ "compliance_check": compliance_results,
1305
+ "check_performed_at": datetime.now(UTC).isoformat(),
1306
+ "operation": "compliance_check",
1307
+ "timestamp": datetime.now(UTC).isoformat(),
1308
+ }
1309
+ }
1310
+
1311
+ def _forensic_analysis(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1312
+ """Perform forensic analysis on security events."""
1313
+ analysis_config = inputs.get("analysis_config", {})
1314
+ event_ids = inputs.get("event_ids", [])
1315
+ incident_id = inputs.get("incident_id")
1316
+ tenant_id = inputs.get("tenant_id", "default")
1317
+
1318
+ # Query events for forensic analysis
1319
+ if event_ids:
1320
+ placeholders = ",".join(["$" + str(i + 2) for i in range(len(event_ids))])
1321
+ query = f"""
1322
+ SELECT event_id, event_type, user_id, source_ip, timestamp, indicators, description
1323
+ FROM security_events
1324
+ WHERE tenant_id = $1 AND event_id IN ({placeholders})
1325
+ ORDER BY timestamp ASC
1326
+ """
1327
+ params = [tenant_id] + event_ids
1328
+ elif incident_id:
1329
+ query = """
1330
+ SELECT se.event_id, se.event_type, se.user_id, se.source_ip, se.timestamp, se.indicators, se.description
1331
+ FROM security_events se
1332
+ JOIN security_incidents si ON se.event_id = ANY(si.events)
1333
+ WHERE si.tenant_id = $1 AND si.incident_id = $2
1334
+ ORDER BY se.timestamp ASC
1335
+ """
1336
+ params = [tenant_id, incident_id]
1337
+ else:
1338
+ raise NodeValidationError(
1339
+ "Either event_ids or incident_id must be provided"
1340
+ )
1341
+
1342
+ self._db_node.config.update(
1343
+ {"query": query, "params": params, "fetch_mode": "all"}
1344
+ )
1345
+
1346
+ result = self._db_node.run()
1347
+ events = result.get("result", {}).get("data", [])
1348
+
1349
+ # Perform forensic analysis
1350
+ forensic_results = {
1351
+ "timeline": events,
1352
+ "patterns": {
1353
+ "attack_vector": "credential_compromise",
1354
+ "techniques_used": ["brute_force", "privilege_escalation"],
1355
+ "affected_systems": ["web_server", "database"],
1356
+ "data_accessed": ["customer_records", "financial_data"],
1357
+ },
1358
+ "artifacts": {
1359
+ "log_files": ["/var/log/auth.log", "/var/log/apache2/access.log"],
1360
+ "network_captures": ["capture_20250612.pcap"],
1361
+ "file_hashes": ["sha256:abc123..."],
1362
+ },
1363
+ "recommendations": [
1364
+ "Reset all potentially compromised credentials",
1365
+ "Review system access logs for unauthorized activity",
1366
+ "Implement additional monitoring on affected systems",
1367
+ ],
1368
+ }
1369
+
1370
+ return {
1371
+ "result": {
1372
+ "forensic_analysis": forensic_results,
1373
+ "events_analyzed": len(events),
1374
+ "analysis_completed_at": datetime.now(UTC).isoformat(),
1375
+ "operation": "forensic_analysis",
1376
+ "timestamp": datetime.now(UTC).isoformat(),
1377
+ }
1378
+ }
1379
+
1380
+ def _automated_response(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
1381
+ """Execute automated security response actions."""
1382
+ response_actions = inputs["response_actions"]
1383
+ event_id = inputs.get("event_id")
1384
+ incident_id = inputs.get("incident_id")
1385
+ tenant_id = inputs.get("tenant_id", "default")
1386
+
1387
+ # Execute response actions
1388
+ executed_actions = []
1389
+ failed_actions = []
1390
+
1391
+ for action in response_actions:
1392
+ action_type = action.get("type")
1393
+ action_params = action.get("parameters", {})
1394
+
1395
+ try:
1396
+ if action_type == "block_ip":
1397
+ # Mock IP blocking
1398
+ result = {
1399
+ "action": "block_ip",
1400
+ "ip_address": action_params.get("ip"),
1401
+ "status": "blocked",
1402
+ "duration": action_params.get("duration", "24h"),
1403
+ }
1404
+ elif action_type == "disable_user":
1405
+ # Mock user disabling
1406
+ result = {
1407
+ "action": "disable_user",
1408
+ "user_id": action_params.get("user_id"),
1409
+ "status": "disabled",
1410
+ }
1411
+ elif action_type == "quarantine_file":
1412
+ # Mock file quarantine
1413
+ result = {
1414
+ "action": "quarantine_file",
1415
+ "file_path": action_params.get("file_path"),
1416
+ "status": "quarantined",
1417
+ }
1418
+ else:
1419
+ result = {"action": action_type, "status": "unknown_action"}
1420
+
1421
+ result["executed_at"] = datetime.now(UTC).isoformat()
1422
+ executed_actions.append(result)
1423
+
1424
+ except Exception as e:
1425
+ failed_actions.append(
1426
+ {
1427
+ "action": action_type,
1428
+ "error": str(e),
1429
+ "parameters": action_params,
1430
+ }
1431
+ )
1432
+
1433
+ return {
1434
+ "result": {
1435
+ "executed_actions": executed_actions,
1436
+ "failed_actions": failed_actions,
1437
+ "total_actions": len(response_actions),
1438
+ "success_rate": (
1439
+ len(executed_actions) / len(response_actions) * 100
1440
+ if response_actions
1441
+ else 0
1442
+ ),
1443
+ "operation": "automated_response",
1444
+ "timestamp": datetime.now(UTC).isoformat(),
1445
+ }
1446
+ }
1447
+
1448
+ def _detect_behavioral_anomalies(
1449
+ self, events: List[Dict[str, Any]], config: Dict[str, Any]
1450
+ ) -> List[Dict[str, Any]]:
1451
+ """Detect behavioral anomalies in security events."""
1452
+ anomalies = []
1453
+
1454
+ if not events:
1455
+ return anomalies
1456
+
1457
+ # Group events by user
1458
+ user_events = {}
1459
+ for event in events:
1460
+ user_id = event.get("user_id")
1461
+ if user_id:
1462
+ if user_id not in user_events:
1463
+ user_events[user_id] = []
1464
+ user_events[user_id].append(event)
1465
+
1466
+ # Detect anomalies for each user
1467
+ for user_id, user_event_list in user_events.items():
1468
+ # Check for unusual login times
1469
+ login_hours = []
1470
+ for event in user_event_list:
1471
+ if event["event_type"] in ["user_login", "suspicious_login"]:
1472
+ hour = datetime.fromisoformat(
1473
+ event["timestamp"].replace("Z", "+00:00")
1474
+ ).hour
1475
+ login_hours.append(hour)
1476
+
1477
+ if login_hours:
1478
+ # Detect off-hours activity (before 6 AM or after 10 PM)
1479
+ off_hours_count = sum(
1480
+ 1 for hour in login_hours if hour < 6 or hour > 22
1481
+ )
1482
+ if off_hours_count > len(login_hours) * 0.5: # More than 50% off-hours
1483
+ anomalies.append(
1484
+ {
1485
+ "type": "unusual_login_times",
1486
+ "user_id": user_id,
1487
+ "description": f"High percentage of off-hours logins: {off_hours_count}/{len(login_hours)}",
1488
+ "severity": "medium",
1489
+ "confidence": 0.8,
1490
+ }
1491
+ )
1492
+
1493
+ # Check for rapid successive events
1494
+ if len(user_event_list) > 10:
1495
+ timestamps = [
1496
+ datetime.fromisoformat(e["timestamp"].replace("Z", "+00:00"))
1497
+ for e in user_event_list
1498
+ ]
1499
+ timestamps.sort()
1500
+
1501
+ rapid_events = 0
1502
+ for i in range(1, len(timestamps)):
1503
+ if (
1504
+ timestamps[i] - timestamps[i - 1]
1505
+ ).total_seconds() < 60: # Less than 1 minute apart
1506
+ rapid_events += 1
1507
+
1508
+ if rapid_events > 5:
1509
+ anomalies.append(
1510
+ {
1511
+ "type": "rapid_successive_events",
1512
+ "user_id": user_id,
1513
+ "description": f"Unusually rapid event sequence: {rapid_events} events within 1 minute",
1514
+ "severity": "high",
1515
+ "confidence": 0.9,
1516
+ }
1517
+ )
1518
+
1519
+ return anomalies