iflow-mcp-m507_ai-soc-agent 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/METADATA +410 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/RECORD +85 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/top_level.txt +1 -0
- src/__init__.py +8 -0
- src/ai_controller/README.md +139 -0
- src/ai_controller/__init__.py +12 -0
- src/ai_controller/agent_executor.py +596 -0
- src/ai_controller/cli/__init__.py +2 -0
- src/ai_controller/cli/main.py +243 -0
- src/ai_controller/session_manager.py +409 -0
- src/ai_controller/web/__init__.py +2 -0
- src/ai_controller/web/server.py +1181 -0
- src/ai_controller/web/static/css/README.md +102 -0
- src/api/__init__.py +13 -0
- src/api/case_management.py +271 -0
- src/api/edr.py +187 -0
- src/api/kb.py +136 -0
- src/api/siem.py +308 -0
- src/core/__init__.py +10 -0
- src/core/config.py +242 -0
- src/core/config_storage.py +684 -0
- src/core/dto.py +50 -0
- src/core/errors.py +36 -0
- src/core/logging.py +128 -0
- src/integrations/__init__.py +8 -0
- src/integrations/case_management/__init__.py +5 -0
- src/integrations/case_management/iris/__init__.py +11 -0
- src/integrations/case_management/iris/iris_client.py +885 -0
- src/integrations/case_management/iris/iris_http.py +274 -0
- src/integrations/case_management/iris/iris_mapper.py +263 -0
- src/integrations/case_management/iris/iris_models.py +128 -0
- src/integrations/case_management/thehive/__init__.py +8 -0
- src/integrations/case_management/thehive/thehive_client.py +193 -0
- src/integrations/case_management/thehive/thehive_http.py +147 -0
- src/integrations/case_management/thehive/thehive_mapper.py +190 -0
- src/integrations/case_management/thehive/thehive_models.py +125 -0
- src/integrations/cti/__init__.py +6 -0
- src/integrations/cti/local_tip/__init__.py +10 -0
- src/integrations/cti/local_tip/local_tip_client.py +90 -0
- src/integrations/cti/local_tip/local_tip_http.py +110 -0
- src/integrations/cti/opencti/__init__.py +10 -0
- src/integrations/cti/opencti/opencti_client.py +101 -0
- src/integrations/cti/opencti/opencti_http.py +418 -0
- src/integrations/edr/__init__.py +6 -0
- src/integrations/edr/elastic_defend/__init__.py +6 -0
- src/integrations/edr/elastic_defend/elastic_defend_client.py +351 -0
- src/integrations/edr/elastic_defend/elastic_defend_http.py +162 -0
- src/integrations/eng/__init__.py +10 -0
- src/integrations/eng/clickup/__init__.py +8 -0
- src/integrations/eng/clickup/clickup_client.py +513 -0
- src/integrations/eng/clickup/clickup_http.py +156 -0
- src/integrations/eng/github/__init__.py +8 -0
- src/integrations/eng/github/github_client.py +169 -0
- src/integrations/eng/github/github_http.py +158 -0
- src/integrations/eng/trello/__init__.py +8 -0
- src/integrations/eng/trello/trello_client.py +207 -0
- src/integrations/eng/trello/trello_http.py +162 -0
- src/integrations/kb/__init__.py +12 -0
- src/integrations/kb/fs_kb_client.py +313 -0
- src/integrations/siem/__init__.py +6 -0
- src/integrations/siem/elastic/__init__.py +6 -0
- src/integrations/siem/elastic/elastic_client.py +3319 -0
- src/integrations/siem/elastic/elastic_http.py +165 -0
- src/mcp/README.md +183 -0
- src/mcp/TOOLS.md +2827 -0
- src/mcp/__init__.py +13 -0
- src/mcp/__main__.py +18 -0
- src/mcp/agent_profiles.py +408 -0
- src/mcp/flow_agent_profiles.py +424 -0
- src/mcp/mcp_server.py +4086 -0
- src/mcp/rules_engine.py +487 -0
- src/mcp/runbook_manager.py +264 -0
- src/orchestrator/__init__.py +11 -0
- src/orchestrator/incident_workflow.py +244 -0
- src/orchestrator/tools_case.py +1085 -0
- src/orchestrator/tools_cti.py +359 -0
- src/orchestrator/tools_edr.py +315 -0
- src/orchestrator/tools_eng.py +378 -0
- src/orchestrator/tools_kb.py +156 -0
- src/orchestrator/tools_siem.py +1709 -0
- src/web/__init__.py +8 -0
- src/web/config_server.py +511 -0
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM-callable tools for CTI (Cyber Threat Intelligence) operations.
|
|
3
|
+
|
|
4
|
+
These functions wrap the CTI client interface and provide
|
|
5
|
+
LLM-friendly error handling and return values.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import concurrent.futures
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
from ..core.errors import IntegrationError
|
|
14
|
+
from ..core.logging import get_logger
|
|
15
|
+
|
|
16
|
+
logger = get_logger("sami.orchestrator.tools_cti")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def lookup_hash_ti(
|
|
20
|
+
hash_value: str,
|
|
21
|
+
client=None, # type: ignore
|
|
22
|
+
clients: Optional[List] = None, # type: ignore
|
|
23
|
+
) -> Dict[str, Any]:
|
|
24
|
+
"""
|
|
25
|
+
Look up a hash in the threat intelligence platform(s).
|
|
26
|
+
|
|
27
|
+
If multiple clients are provided, queries all platforms concurrently and merges results.
|
|
28
|
+
|
|
29
|
+
Tool schema:
|
|
30
|
+
- name: lookup_hash_ti
|
|
31
|
+
- description: Look up a file hash (MD5, SHA1, SHA256, SHA512) in the threat intelligence platform to get threat intelligence information
|
|
32
|
+
- parameters:
|
|
33
|
+
- hash_value (str, required): The hash value to look up
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
hash_value: The hash value to look up.
|
|
37
|
+
client: Single CTI client (for backward compatibility).
|
|
38
|
+
clients: List of CTI clients (for multi-platform support).
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Dictionary containing merged threat intelligence information from all platforms.
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
IntegrationError: If all lookups fail.
|
|
45
|
+
"""
|
|
46
|
+
# Determine which clients to use
|
|
47
|
+
cti_clients = clients if clients is not None else ([client] if client is not None else [])
|
|
48
|
+
|
|
49
|
+
if not cti_clients:
|
|
50
|
+
raise IntegrationError("No CTI client(s) provided")
|
|
51
|
+
|
|
52
|
+
# If only one client, use simple path for backward compatibility
|
|
53
|
+
if len(cti_clients) == 1:
|
|
54
|
+
try:
|
|
55
|
+
result = cti_clients[0].lookup_hash(hash_value)
|
|
56
|
+
# Ensure result has the expected structure for threat assessment
|
|
57
|
+
if not isinstance(result, dict):
|
|
58
|
+
result = {"value": hash_value, "found": False, "indicators": []}
|
|
59
|
+
|
|
60
|
+
# Generate threat assessment for single client too
|
|
61
|
+
threat_assessment = _generate_threat_assessment(result)
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
"success": True,
|
|
65
|
+
"hash_value": hash_value,
|
|
66
|
+
"threat_intelligence": result,
|
|
67
|
+
"sources": [_get_client_type(cti_clients[0])],
|
|
68
|
+
"sources_successful": [_get_client_type(cti_clients[0])],
|
|
69
|
+
"threat_assessment": threat_assessment,
|
|
70
|
+
}
|
|
71
|
+
except Exception as e:
|
|
72
|
+
raise IntegrationError(f"Failed to lookup hash in threat intelligence: {str(e)}") from e
|
|
73
|
+
|
|
74
|
+
# Multiple clients - query concurrently and merge
|
|
75
|
+
return _lookup_hash_ti_multi(hash_value, cti_clients)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _get_client_type(client) -> str:
|
|
79
|
+
"""Get the type name of a CTI client."""
|
|
80
|
+
client_class_name = client.__class__.__name__
|
|
81
|
+
if "LocalTip" in client_class_name:
|
|
82
|
+
return "local_tip"
|
|
83
|
+
elif "OpenCTI" in client_class_name:
|
|
84
|
+
return "opencti"
|
|
85
|
+
return "unknown"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _lookup_hash_ti_multi(hash_value: str, clients: List) -> Dict[str, Any]:
|
|
89
|
+
"""
|
|
90
|
+
Query multiple CTI platforms concurrently and merge results.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
hash_value: The hash value to look up.
|
|
94
|
+
clients: List of CTI clients to query.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Merged threat intelligence information from all platforms.
|
|
98
|
+
"""
|
|
99
|
+
results = {}
|
|
100
|
+
errors = {}
|
|
101
|
+
sources = []
|
|
102
|
+
|
|
103
|
+
def query_client(client):
|
|
104
|
+
"""Query a single client and return results."""
|
|
105
|
+
client_type = _get_client_type(client)
|
|
106
|
+
try:
|
|
107
|
+
logger.debug(f"Querying {client_type} for hash {hash_value[:16]}...")
|
|
108
|
+
result = client.lookup_hash(hash_value)
|
|
109
|
+
return {
|
|
110
|
+
"client_type": client_type,
|
|
111
|
+
"success": True,
|
|
112
|
+
"result": result,
|
|
113
|
+
"error": None,
|
|
114
|
+
}
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.warning(f"Failed to query {client_type} for hash {hash_value[:16]}...: {e}")
|
|
117
|
+
return {
|
|
118
|
+
"client_type": client_type,
|
|
119
|
+
"success": False,
|
|
120
|
+
"result": None,
|
|
121
|
+
"error": str(e),
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# Query all clients concurrently
|
|
125
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=len(clients)) as executor:
|
|
126
|
+
future_to_client = {executor.submit(query_client, client): client for client in clients}
|
|
127
|
+
|
|
128
|
+
for future in concurrent.futures.as_completed(future_to_client):
|
|
129
|
+
response = future.result()
|
|
130
|
+
client_type = response["client_type"]
|
|
131
|
+
sources.append(client_type)
|
|
132
|
+
|
|
133
|
+
if response["success"]:
|
|
134
|
+
results[client_type] = response["result"]
|
|
135
|
+
else:
|
|
136
|
+
errors[client_type] = response["error"]
|
|
137
|
+
|
|
138
|
+
# Merge results
|
|
139
|
+
merged = _merge_cti_results(results, hash_value)
|
|
140
|
+
|
|
141
|
+
# Build response with clear threat assessment
|
|
142
|
+
response = {
|
|
143
|
+
"success": len(results) > 0, # Success if at least one platform returned results
|
|
144
|
+
"hash_value": hash_value,
|
|
145
|
+
"threat_intelligence": merged,
|
|
146
|
+
"sources": sources,
|
|
147
|
+
"sources_successful": list(results.keys()),
|
|
148
|
+
# Add clear threat assessment summary for LLM understanding
|
|
149
|
+
"threat_assessment": _generate_threat_assessment(merged),
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
if errors:
|
|
153
|
+
response["sources_failed"] = errors
|
|
154
|
+
|
|
155
|
+
if not results:
|
|
156
|
+
# All platforms failed
|
|
157
|
+
error_msg = "; ".join([f"{k}: {v}" for k, v in errors.items()])
|
|
158
|
+
raise IntegrationError(f"All CTI lookups failed: {error_msg}")
|
|
159
|
+
|
|
160
|
+
return response
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _generate_threat_assessment(threat_intel: Dict[str, Any]) -> Dict[str, Any]:
|
|
164
|
+
"""
|
|
165
|
+
Generate a clear threat assessment summary for LLM understanding.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
threat_intel: Merged threat intelligence data
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Dictionary with clear threat assessment
|
|
172
|
+
"""
|
|
173
|
+
assessment = {
|
|
174
|
+
"is_malicious": False,
|
|
175
|
+
"is_suspicious": False,
|
|
176
|
+
"is_benign": False,
|
|
177
|
+
"threat_level": "unknown",
|
|
178
|
+
"confidence": "low",
|
|
179
|
+
"summary": "",
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# Check if hash was found
|
|
183
|
+
if not threat_intel.get("found", False):
|
|
184
|
+
assessment["is_benign"] = True
|
|
185
|
+
assessment["threat_level"] = "benign"
|
|
186
|
+
assessment["confidence"] = "medium"
|
|
187
|
+
assessment["summary"] = "Hash not found in threat intelligence databases - likely benign or unknown"
|
|
188
|
+
return assessment
|
|
189
|
+
|
|
190
|
+
# Check classification
|
|
191
|
+
classification = threat_intel.get("classification", "").lower()
|
|
192
|
+
if classification == "malicious":
|
|
193
|
+
assessment["is_malicious"] = True
|
|
194
|
+
assessment["threat_level"] = "malicious"
|
|
195
|
+
assessment["confidence"] = "high"
|
|
196
|
+
elif classification == "suspicious":
|
|
197
|
+
assessment["is_suspicious"] = True
|
|
198
|
+
assessment["threat_level"] = "suspicious"
|
|
199
|
+
assessment["confidence"] = "medium"
|
|
200
|
+
elif classification == "benign":
|
|
201
|
+
assessment["is_benign"] = True
|
|
202
|
+
assessment["threat_level"] = "benign"
|
|
203
|
+
assessment["confidence"] = "high"
|
|
204
|
+
|
|
205
|
+
# Check threat score
|
|
206
|
+
threat_score = threat_intel.get("threat_score")
|
|
207
|
+
if threat_score is not None:
|
|
208
|
+
if threat_score >= 70:
|
|
209
|
+
assessment["is_malicious"] = True
|
|
210
|
+
assessment["threat_level"] = "malicious"
|
|
211
|
+
assessment["confidence"] = "high"
|
|
212
|
+
elif threat_score >= 40:
|
|
213
|
+
assessment["is_suspicious"] = True
|
|
214
|
+
if assessment["threat_level"] == "unknown":
|
|
215
|
+
assessment["threat_level"] = "suspicious"
|
|
216
|
+
assessment["confidence"] = "medium"
|
|
217
|
+
elif threat_score < 30:
|
|
218
|
+
assessment["is_benign"] = True
|
|
219
|
+
if assessment["threat_level"] == "unknown":
|
|
220
|
+
assessment["threat_level"] = "benign"
|
|
221
|
+
assessment["confidence"] = "medium"
|
|
222
|
+
|
|
223
|
+
# Check labels for threat indicators
|
|
224
|
+
labels = threat_intel.get("labels", [])
|
|
225
|
+
malicious_labels = ["malware", "trojan", "ransomware", "virus", "backdoor", "rootkit", "spyware", "adware", "exploit"]
|
|
226
|
+
suspicious_labels = ["suspicious", "potentially_unwanted", "phishing", "crypto_miner"]
|
|
227
|
+
|
|
228
|
+
has_malicious_label = any(label.lower() in malicious_labels for label in labels)
|
|
229
|
+
has_suspicious_label = any(label.lower() in suspicious_labels for label in labels)
|
|
230
|
+
|
|
231
|
+
if has_malicious_label:
|
|
232
|
+
assessment["is_malicious"] = True
|
|
233
|
+
assessment["threat_level"] = "malicious"
|
|
234
|
+
assessment["confidence"] = "high"
|
|
235
|
+
elif has_suspicious_label:
|
|
236
|
+
assessment["is_suspicious"] = True
|
|
237
|
+
if assessment["threat_level"] == "unknown":
|
|
238
|
+
assessment["threat_level"] = "suspicious"
|
|
239
|
+
assessment["confidence"] = "medium"
|
|
240
|
+
|
|
241
|
+
# Check if indicators exist
|
|
242
|
+
indicators = threat_intel.get("indicators", [])
|
|
243
|
+
if indicators:
|
|
244
|
+
# Check indicator scores
|
|
245
|
+
high_score_indicators = [ind for ind in indicators if ind.get("score", 0) >= 70]
|
|
246
|
+
if high_score_indicators:
|
|
247
|
+
assessment["is_malicious"] = True
|
|
248
|
+
assessment["threat_level"] = "malicious"
|
|
249
|
+
assessment["confidence"] = "high"
|
|
250
|
+
elif not assessment["is_malicious"]:
|
|
251
|
+
assessment["is_suspicious"] = True
|
|
252
|
+
if assessment["threat_level"] == "unknown":
|
|
253
|
+
assessment["threat_level"] = "suspicious"
|
|
254
|
+
assessment["confidence"] = "medium"
|
|
255
|
+
|
|
256
|
+
# Generate summary
|
|
257
|
+
if assessment["is_malicious"]:
|
|
258
|
+
assessment["summary"] = f"MALICIOUS: Hash is known malicious (threat_score: {threat_score}, classification: {classification}, labels: {labels}). Take immediate action."
|
|
259
|
+
elif assessment["is_suspicious"]:
|
|
260
|
+
assessment["summary"] = f"SUSPICIOUS: Hash shows suspicious indicators (threat_score: {threat_score}, labels: {labels}). Investigate further."
|
|
261
|
+
elif assessment["is_benign"]:
|
|
262
|
+
assessment["summary"] = f"BENIGN: Hash appears benign (threat_score: {threat_score}, classification: {classification}). Low risk."
|
|
263
|
+
else:
|
|
264
|
+
assessment["summary"] = f"UNKNOWN: Hash found in database but threat level unclear (threat_score: {threat_score}, indicators: {len(indicators)}). Review indicators."
|
|
265
|
+
|
|
266
|
+
return assessment
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def _merge_cti_results(results: Dict[str, Dict[str, Any]], hash_value: str) -> Dict[str, Any]:
|
|
270
|
+
"""
|
|
271
|
+
Merge results from multiple CTI platforms into a unified format.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
results: Dictionary mapping client_type to result data.
|
|
275
|
+
hash_value: The hash value that was queried.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Merged threat intelligence data.
|
|
279
|
+
"""
|
|
280
|
+
merged = {
|
|
281
|
+
"value": hash_value,
|
|
282
|
+
"found": False,
|
|
283
|
+
"platforms": {},
|
|
284
|
+
"indicators": [],
|
|
285
|
+
"threat_score": None,
|
|
286
|
+
"classification": None,
|
|
287
|
+
"labels": [],
|
|
288
|
+
"kill_chain_phases": [],
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
# Process each platform's results
|
|
292
|
+
for platform, data in results.items():
|
|
293
|
+
if data is None:
|
|
294
|
+
continue
|
|
295
|
+
|
|
296
|
+
# Store platform-specific data
|
|
297
|
+
merged["platforms"][platform] = data
|
|
298
|
+
|
|
299
|
+
# Extract common fields
|
|
300
|
+
if platform == "local_tip":
|
|
301
|
+
# Local TIP format
|
|
302
|
+
if data.get("threat_score") is not None:
|
|
303
|
+
# Use highest threat score if multiple platforms
|
|
304
|
+
if merged["threat_score"] is None or data["threat_score"] > merged["threat_score"]:
|
|
305
|
+
merged["threat_score"] = data["threat_score"]
|
|
306
|
+
|
|
307
|
+
if data.get("classification"):
|
|
308
|
+
# Prefer malicious > suspicious > benign
|
|
309
|
+
if not merged["classification"] or data["classification"] == "malicious":
|
|
310
|
+
merged["classification"] = data["classification"]
|
|
311
|
+
elif data["classification"] == "suspicious" and merged["classification"] != "malicious":
|
|
312
|
+
merged["classification"] = data["classification"]
|
|
313
|
+
|
|
314
|
+
if data.get("value"):
|
|
315
|
+
merged["found"] = True
|
|
316
|
+
|
|
317
|
+
elif platform == "opencti":
|
|
318
|
+
# OpenCTI format
|
|
319
|
+
if data.get("found"):
|
|
320
|
+
merged["found"] = True
|
|
321
|
+
|
|
322
|
+
# Merge indicators
|
|
323
|
+
if data.get("indicators"):
|
|
324
|
+
for indicator in data["indicators"]:
|
|
325
|
+
# Check if we already have this indicator (by ID or pattern)
|
|
326
|
+
existing = next(
|
|
327
|
+
(ind for ind in merged["indicators"]
|
|
328
|
+
if ind.get("id") == indicator.get("id") or
|
|
329
|
+
ind.get("pattern") == indicator.get("pattern")),
|
|
330
|
+
None
|
|
331
|
+
)
|
|
332
|
+
if not existing:
|
|
333
|
+
merged["indicators"].append(indicator)
|
|
334
|
+
|
|
335
|
+
# Merge labels
|
|
336
|
+
if indicator.get("labels"):
|
|
337
|
+
for label in indicator["labels"]:
|
|
338
|
+
if label not in merged["labels"]:
|
|
339
|
+
merged["labels"].append(label)
|
|
340
|
+
|
|
341
|
+
# Merge kill chain phases
|
|
342
|
+
if indicator.get("kill_chain_phases"):
|
|
343
|
+
for phase in indicator["kill_chain_phases"]:
|
|
344
|
+
existing_phase = next(
|
|
345
|
+
(p for p in merged["kill_chain_phases"]
|
|
346
|
+
if p.get("kill_chain_name") == phase.get("kill_chain_name") and
|
|
347
|
+
p.get("phase_name") == phase.get("phase_name")),
|
|
348
|
+
None
|
|
349
|
+
)
|
|
350
|
+
if not existing_phase:
|
|
351
|
+
merged["kill_chain_phases"].append(phase)
|
|
352
|
+
|
|
353
|
+
# Use highest threat score
|
|
354
|
+
if indicator.get("score") is not None:
|
|
355
|
+
if merged["threat_score"] is None or indicator["score"] > merged["threat_score"]:
|
|
356
|
+
merged["threat_score"] = indicator["score"]
|
|
357
|
+
|
|
358
|
+
return merged
|
|
359
|
+
|
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM-callable tools for EDR operations.
|
|
3
|
+
|
|
4
|
+
These functions wrap the generic EDRClient interface and provide
|
|
5
|
+
LLM-friendly error handling and return values.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from ..api.edr import EDRClient
|
|
13
|
+
from ..core.errors import IntegrationError
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_endpoint_summary(
|
|
17
|
+
endpoint_id: str,
|
|
18
|
+
client: EDRClient = None, # type: ignore
|
|
19
|
+
) -> Dict[str, Any]:
|
|
20
|
+
"""
|
|
21
|
+
Get a summary of an endpoint.
|
|
22
|
+
|
|
23
|
+
Tool schema:
|
|
24
|
+
- name: get_endpoint_summary
|
|
25
|
+
- description: Retrieve summary information about an endpoint including
|
|
26
|
+
hostname, platform, last seen time, primary user, and isolation status.
|
|
27
|
+
- parameters:
|
|
28
|
+
- endpoint_id (str, required): The endpoint ID.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
endpoint_id: The endpoint ID.
|
|
32
|
+
client: The EDR client.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Dictionary containing endpoint summary.
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
IntegrationError: If retrieving endpoint fails.
|
|
39
|
+
"""
|
|
40
|
+
if client is None:
|
|
41
|
+
raise IntegrationError("EDR client not provided")
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
endpoint = client.get_endpoint_summary(endpoint_id)
|
|
45
|
+
|
|
46
|
+
return {
|
|
47
|
+
"success": True,
|
|
48
|
+
"endpoint": {
|
|
49
|
+
"id": endpoint.id,
|
|
50
|
+
"hostname": endpoint.hostname,
|
|
51
|
+
"platform": endpoint.platform.value,
|
|
52
|
+
"last_seen": endpoint.last_seen.isoformat()
|
|
53
|
+
if endpoint.last_seen
|
|
54
|
+
else None,
|
|
55
|
+
"primary_user": endpoint.primary_user,
|
|
56
|
+
"is_isolated": endpoint.is_isolated,
|
|
57
|
+
},
|
|
58
|
+
}
|
|
59
|
+
except Exception as e:
|
|
60
|
+
raise IntegrationError(f"Failed to get endpoint summary for {endpoint_id}: {str(e)}") from e
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_detection_details(
|
|
64
|
+
detection_id: str,
|
|
65
|
+
client: EDRClient = None, # type: ignore
|
|
66
|
+
) -> Dict[str, Any]:
|
|
67
|
+
"""
|
|
68
|
+
Get details of a detection.
|
|
69
|
+
|
|
70
|
+
Tool schema:
|
|
71
|
+
- name: get_detection_details
|
|
72
|
+
- description: Retrieve detailed information about a specific detection
|
|
73
|
+
including type, severity, description, associated file hash, and process.
|
|
74
|
+
- parameters:
|
|
75
|
+
- detection_id (str, required): The detection ID.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
detection_id: The detection ID.
|
|
79
|
+
client: The EDR client.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Dictionary containing detection details.
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
IntegrationError: If retrieving detection fails.
|
|
86
|
+
"""
|
|
87
|
+
if client is None:
|
|
88
|
+
raise IntegrationError("EDR client not provided")
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
detection = client.get_detection_details(detection_id)
|
|
92
|
+
|
|
93
|
+
return {
|
|
94
|
+
"success": True,
|
|
95
|
+
"detection": {
|
|
96
|
+
"id": detection.id,
|
|
97
|
+
"endpoint_id": detection.endpoint_id,
|
|
98
|
+
"created_at": detection.created_at.isoformat(),
|
|
99
|
+
"detection_type": detection.detection_type.value,
|
|
100
|
+
"severity": detection.severity,
|
|
101
|
+
"description": detection.description,
|
|
102
|
+
"file_hash": detection.file_hash,
|
|
103
|
+
"process": {
|
|
104
|
+
"pid": detection.process.pid,
|
|
105
|
+
"name": detection.process.name,
|
|
106
|
+
"path": detection.process.path,
|
|
107
|
+
"user": detection.process.user,
|
|
108
|
+
"command_line": detection.process.command_line,
|
|
109
|
+
}
|
|
110
|
+
if detection.process
|
|
111
|
+
else None,
|
|
112
|
+
},
|
|
113
|
+
}
|
|
114
|
+
except Exception as e:
|
|
115
|
+
raise IntegrationError(f"Failed to get detection details for {detection_id}: {str(e)}") from e
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def isolate_endpoint(
|
|
119
|
+
endpoint_id: str,
|
|
120
|
+
client: EDRClient = None, # type: ignore
|
|
121
|
+
) -> Dict[str, Any]:
|
|
122
|
+
"""
|
|
123
|
+
Isolate an endpoint from the network.
|
|
124
|
+
|
|
125
|
+
Tool schema:
|
|
126
|
+
- name: isolate_endpoint
|
|
127
|
+
- description: Isolate an endpoint from the network to prevent further
|
|
128
|
+
compromise or lateral movement. This is a critical response action.
|
|
129
|
+
- parameters:
|
|
130
|
+
- endpoint_id (str, required): The endpoint ID to isolate.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
endpoint_id: The endpoint ID.
|
|
134
|
+
client: The EDR client.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Dictionary containing isolation action details.
|
|
138
|
+
|
|
139
|
+
Raises:
|
|
140
|
+
IntegrationError: If isolation fails.
|
|
141
|
+
"""
|
|
142
|
+
if client is None:
|
|
143
|
+
raise IntegrationError("EDR client not provided")
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
action = client.isolate_endpoint(endpoint_id)
|
|
147
|
+
|
|
148
|
+
return {
|
|
149
|
+
"success": True,
|
|
150
|
+
"action": {
|
|
151
|
+
"endpoint_id": action.endpoint_id,
|
|
152
|
+
"result": action.result.value,
|
|
153
|
+
"requested_at": action.requested_at.isoformat(),
|
|
154
|
+
"completed_at": action.completed_at.isoformat()
|
|
155
|
+
if action.completed_at
|
|
156
|
+
else None,
|
|
157
|
+
"message": action.message,
|
|
158
|
+
},
|
|
159
|
+
}
|
|
160
|
+
except Exception as e:
|
|
161
|
+
raise IntegrationError(f"Failed to isolate endpoint {endpoint_id}: {str(e)}") from e
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def release_endpoint_isolation(
|
|
165
|
+
endpoint_id: str,
|
|
166
|
+
client: EDRClient = None, # type: ignore
|
|
167
|
+
) -> Dict[str, Any]:
|
|
168
|
+
"""
|
|
169
|
+
Release an endpoint from isolation.
|
|
170
|
+
|
|
171
|
+
Tool schema:
|
|
172
|
+
- name: release_endpoint_isolation
|
|
173
|
+
- description: Release an endpoint from network isolation, restoring
|
|
174
|
+
normal network connectivity.
|
|
175
|
+
- parameters:
|
|
176
|
+
- endpoint_id (str, required): The endpoint ID to release.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
endpoint_id: The endpoint ID.
|
|
180
|
+
client: The EDR client.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Dictionary containing release action details.
|
|
184
|
+
|
|
185
|
+
Raises:
|
|
186
|
+
IntegrationError: If release fails.
|
|
187
|
+
"""
|
|
188
|
+
if client is None:
|
|
189
|
+
raise IntegrationError("EDR client not provided")
|
|
190
|
+
|
|
191
|
+
try:
|
|
192
|
+
action = client.release_endpoint_isolation(endpoint_id)
|
|
193
|
+
|
|
194
|
+
return {
|
|
195
|
+
"success": True,
|
|
196
|
+
"action": {
|
|
197
|
+
"endpoint_id": action.endpoint_id,
|
|
198
|
+
"result": action.result.value,
|
|
199
|
+
"requested_at": action.requested_at.isoformat(),
|
|
200
|
+
"completed_at": action.completed_at.isoformat()
|
|
201
|
+
if action.completed_at
|
|
202
|
+
else None,
|
|
203
|
+
"message": action.message,
|
|
204
|
+
},
|
|
205
|
+
}
|
|
206
|
+
except Exception as e:
|
|
207
|
+
raise IntegrationError(
|
|
208
|
+
f"Failed to release endpoint isolation for {endpoint_id}: {str(e)}"
|
|
209
|
+
) from e
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def kill_process_on_endpoint(
|
|
213
|
+
endpoint_id: str,
|
|
214
|
+
pid: int,
|
|
215
|
+
client: EDRClient = None, # type: ignore
|
|
216
|
+
) -> Dict[str, Any]:
|
|
217
|
+
"""
|
|
218
|
+
Kill a process on an endpoint.
|
|
219
|
+
|
|
220
|
+
Tool schema:
|
|
221
|
+
- name: kill_process_on_endpoint
|
|
222
|
+
- description: Terminate a specific process running on an endpoint by
|
|
223
|
+
its process ID. Use with caution as this is a disruptive action.
|
|
224
|
+
- parameters:
|
|
225
|
+
- endpoint_id (str, required): The endpoint ID.
|
|
226
|
+
- pid (int, required): The process ID to kill.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
endpoint_id: The endpoint ID.
|
|
230
|
+
pid: The process ID.
|
|
231
|
+
client: The EDR client.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Dictionary containing kill action details.
|
|
235
|
+
|
|
236
|
+
Raises:
|
|
237
|
+
IntegrationError: If killing process fails.
|
|
238
|
+
"""
|
|
239
|
+
if client is None:
|
|
240
|
+
raise IntegrationError("EDR client not provided")
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
action = client.kill_process_on_endpoint(endpoint_id, pid)
|
|
244
|
+
|
|
245
|
+
return {
|
|
246
|
+
"success": True,
|
|
247
|
+
"action": {
|
|
248
|
+
"endpoint_id": action.endpoint_id,
|
|
249
|
+
"pid": action.pid,
|
|
250
|
+
"result": action.result.value,
|
|
251
|
+
"requested_at": action.requested_at.isoformat(),
|
|
252
|
+
"completed_at": action.completed_at.isoformat()
|
|
253
|
+
if action.completed_at
|
|
254
|
+
else None,
|
|
255
|
+
"message": action.message,
|
|
256
|
+
},
|
|
257
|
+
}
|
|
258
|
+
except Exception as e:
|
|
259
|
+
raise IntegrationError(
|
|
260
|
+
f"Failed to kill process {pid} on endpoint {endpoint_id}: {str(e)}"
|
|
261
|
+
) from e
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def collect_forensic_artifacts(
|
|
265
|
+
endpoint_id: str,
|
|
266
|
+
artifact_types: List[str],
|
|
267
|
+
client: EDRClient = None, # type: ignore
|
|
268
|
+
) -> Dict[str, Any]:
|
|
269
|
+
"""
|
|
270
|
+
Collect forensic artifacts from an endpoint.
|
|
271
|
+
|
|
272
|
+
Tool schema:
|
|
273
|
+
- name: collect_forensic_artifacts
|
|
274
|
+
- description: Initiate collection of forensic artifacts from an endpoint,
|
|
275
|
+
such as process lists, network connections, file system artifacts, etc.
|
|
276
|
+
- parameters:
|
|
277
|
+
- endpoint_id (str, required): The endpoint ID.
|
|
278
|
+
- artifact_types (list[str], required): List of artifact types to collect
|
|
279
|
+
(e.g., ["processes", "network", "filesystem"]).
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
endpoint_id: The endpoint ID.
|
|
283
|
+
artifact_types: List of artifact types to collect.
|
|
284
|
+
client: The EDR client.
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
Dictionary containing collection request details.
|
|
288
|
+
|
|
289
|
+
Raises:
|
|
290
|
+
IntegrationError: If collection request fails.
|
|
291
|
+
"""
|
|
292
|
+
if client is None:
|
|
293
|
+
raise IntegrationError("EDR client not provided")
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
request = client.collect_forensic_artifacts(endpoint_id, artifact_types)
|
|
297
|
+
|
|
298
|
+
return {
|
|
299
|
+
"success": True,
|
|
300
|
+
"request": {
|
|
301
|
+
"endpoint_id": request.endpoint_id,
|
|
302
|
+
"artifact_types": request.artifact_types,
|
|
303
|
+
"result": request.result.value,
|
|
304
|
+
"requested_at": request.requested_at.isoformat(),
|
|
305
|
+
"completed_at": request.completed_at.isoformat()
|
|
306
|
+
if request.completed_at
|
|
307
|
+
else None,
|
|
308
|
+
"message": request.message,
|
|
309
|
+
},
|
|
310
|
+
}
|
|
311
|
+
except Exception as e:
|
|
312
|
+
raise IntegrationError(
|
|
313
|
+
f"Failed to collect forensic artifacts from endpoint {endpoint_id}: {str(e)}"
|
|
314
|
+
) from e
|
|
315
|
+
|