iflow-mcp-m507_ai-soc-agent 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/METADATA +410 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/RECORD +85 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/top_level.txt +1 -0
- src/__init__.py +8 -0
- src/ai_controller/README.md +139 -0
- src/ai_controller/__init__.py +12 -0
- src/ai_controller/agent_executor.py +596 -0
- src/ai_controller/cli/__init__.py +2 -0
- src/ai_controller/cli/main.py +243 -0
- src/ai_controller/session_manager.py +409 -0
- src/ai_controller/web/__init__.py +2 -0
- src/ai_controller/web/server.py +1181 -0
- src/ai_controller/web/static/css/README.md +102 -0
- src/api/__init__.py +13 -0
- src/api/case_management.py +271 -0
- src/api/edr.py +187 -0
- src/api/kb.py +136 -0
- src/api/siem.py +308 -0
- src/core/__init__.py +10 -0
- src/core/config.py +242 -0
- src/core/config_storage.py +684 -0
- src/core/dto.py +50 -0
- src/core/errors.py +36 -0
- src/core/logging.py +128 -0
- src/integrations/__init__.py +8 -0
- src/integrations/case_management/__init__.py +5 -0
- src/integrations/case_management/iris/__init__.py +11 -0
- src/integrations/case_management/iris/iris_client.py +885 -0
- src/integrations/case_management/iris/iris_http.py +274 -0
- src/integrations/case_management/iris/iris_mapper.py +263 -0
- src/integrations/case_management/iris/iris_models.py +128 -0
- src/integrations/case_management/thehive/__init__.py +8 -0
- src/integrations/case_management/thehive/thehive_client.py +193 -0
- src/integrations/case_management/thehive/thehive_http.py +147 -0
- src/integrations/case_management/thehive/thehive_mapper.py +190 -0
- src/integrations/case_management/thehive/thehive_models.py +125 -0
- src/integrations/cti/__init__.py +6 -0
- src/integrations/cti/local_tip/__init__.py +10 -0
- src/integrations/cti/local_tip/local_tip_client.py +90 -0
- src/integrations/cti/local_tip/local_tip_http.py +110 -0
- src/integrations/cti/opencti/__init__.py +10 -0
- src/integrations/cti/opencti/opencti_client.py +101 -0
- src/integrations/cti/opencti/opencti_http.py +418 -0
- src/integrations/edr/__init__.py +6 -0
- src/integrations/edr/elastic_defend/__init__.py +6 -0
- src/integrations/edr/elastic_defend/elastic_defend_client.py +351 -0
- src/integrations/edr/elastic_defend/elastic_defend_http.py +162 -0
- src/integrations/eng/__init__.py +10 -0
- src/integrations/eng/clickup/__init__.py +8 -0
- src/integrations/eng/clickup/clickup_client.py +513 -0
- src/integrations/eng/clickup/clickup_http.py +156 -0
- src/integrations/eng/github/__init__.py +8 -0
- src/integrations/eng/github/github_client.py +169 -0
- src/integrations/eng/github/github_http.py +158 -0
- src/integrations/eng/trello/__init__.py +8 -0
- src/integrations/eng/trello/trello_client.py +207 -0
- src/integrations/eng/trello/trello_http.py +162 -0
- src/integrations/kb/__init__.py +12 -0
- src/integrations/kb/fs_kb_client.py +313 -0
- src/integrations/siem/__init__.py +6 -0
- src/integrations/siem/elastic/__init__.py +6 -0
- src/integrations/siem/elastic/elastic_client.py +3319 -0
- src/integrations/siem/elastic/elastic_http.py +165 -0
- src/mcp/README.md +183 -0
- src/mcp/TOOLS.md +2827 -0
- src/mcp/__init__.py +13 -0
- src/mcp/__main__.py +18 -0
- src/mcp/agent_profiles.py +408 -0
- src/mcp/flow_agent_profiles.py +424 -0
- src/mcp/mcp_server.py +4086 -0
- src/mcp/rules_engine.py +487 -0
- src/mcp/runbook_manager.py +264 -0
- src/orchestrator/__init__.py +11 -0
- src/orchestrator/incident_workflow.py +244 -0
- src/orchestrator/tools_case.py +1085 -0
- src/orchestrator/tools_cti.py +359 -0
- src/orchestrator/tools_edr.py +315 -0
- src/orchestrator/tools_eng.py +378 -0
- src/orchestrator/tools_kb.py +156 -0
- src/orchestrator/tools_siem.py +1709 -0
- src/web/__init__.py +8 -0
- src/web/config_server.py +511 -0
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Runbook Manager for reading and parsing investigation runbooks.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
|
+
|
|
11
|
+
from ..core.errors import IntegrationError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class RunbookManager:
|
|
15
|
+
"""Manages runbook discovery and parsing."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, runbooks_dir: Optional[str] = None):
|
|
18
|
+
"""
|
|
19
|
+
Initialize runbook manager.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
runbooks_dir: Path to runbooks directory.
|
|
23
|
+
"""
|
|
24
|
+
if runbooks_dir is None:
|
|
25
|
+
# Default to run_books/ relative to project root
|
|
26
|
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
27
|
+
runbooks_dir = os.path.join(project_root, "run_books")
|
|
28
|
+
|
|
29
|
+
self.runbooks_dir = runbooks_dir
|
|
30
|
+
|
|
31
|
+
def find_runbook(self, runbook_name: str, soc_tier: Optional[str] = None) -> Optional[str]:
|
|
32
|
+
"""
|
|
33
|
+
Find runbook file by name.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
runbook_name: Name of runbook (e.g., "initial_alert_triage" or "soc1/triage/initial_alert_triage")
|
|
37
|
+
soc_tier: Optional SOC tier to limit search
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Path to runbook file, or None if not found
|
|
41
|
+
"""
|
|
42
|
+
# If runbook_name already includes path, use it directly
|
|
43
|
+
if "/" in runbook_name or runbook_name.startswith("soc"):
|
|
44
|
+
# Try as-is first
|
|
45
|
+
runbook_path = os.path.join(self.runbooks_dir, f"{runbook_name}.md")
|
|
46
|
+
if os.path.exists(runbook_path):
|
|
47
|
+
return runbook_path
|
|
48
|
+
|
|
49
|
+
# Try without .md extension
|
|
50
|
+
runbook_path = os.path.join(self.runbooks_dir, runbook_name)
|
|
51
|
+
if os.path.exists(runbook_path):
|
|
52
|
+
return runbook_path
|
|
53
|
+
|
|
54
|
+
# Search for runbook
|
|
55
|
+
for root, dirs, files in os.walk(self.runbooks_dir):
|
|
56
|
+
# Filter by soc_tier if provided
|
|
57
|
+
if soc_tier and f"/{soc_tier}/" not in root:
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
for file in files:
|
|
61
|
+
if file.endswith(".md"):
|
|
62
|
+
# Check if filename matches (without extension)
|
|
63
|
+
if file[:-3] == runbook_name or file[:-3].endswith(f"/{runbook_name}"):
|
|
64
|
+
return os.path.join(root, file)
|
|
65
|
+
|
|
66
|
+
# Check if filename contains runbook_name
|
|
67
|
+
if runbook_name in file[:-3]:
|
|
68
|
+
return os.path.join(root, file)
|
|
69
|
+
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
def list_runbooks(
|
|
73
|
+
self,
|
|
74
|
+
soc_tier: Optional[str] = None,
|
|
75
|
+
category: Optional[str] = None
|
|
76
|
+
) -> List[Dict[str, Any]]:
|
|
77
|
+
"""
|
|
78
|
+
List available runbooks.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
soc_tier: Filter by SOC tier (soc1, soc2, soc3)
|
|
82
|
+
category: Filter by category (triage, investigation, response, forensics, correlation)
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
List of runbook metadata dictionaries
|
|
86
|
+
"""
|
|
87
|
+
runbooks = []
|
|
88
|
+
|
|
89
|
+
if not os.path.exists(self.runbooks_dir):
|
|
90
|
+
return runbooks
|
|
91
|
+
|
|
92
|
+
for root, dirs, files in os.walk(self.runbooks_dir):
|
|
93
|
+
# Filter by soc_tier
|
|
94
|
+
if soc_tier and f"/{soc_tier}/" not in root:
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
# Filter by category
|
|
98
|
+
if category:
|
|
99
|
+
if f"/{category}/" not in root:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
for file in files:
|
|
103
|
+
if file.endswith(".md") and file not in [
|
|
104
|
+
"README.md",
|
|
105
|
+
"index.md",
|
|
106
|
+
"SOC_TIER_ORGANIZATION_PLAN.md",
|
|
107
|
+
"IMPLEMENTATION_SUMMARY.md",
|
|
108
|
+
"RUNBOOK_INTEGRATION_PROPOSAL.md",
|
|
109
|
+
"AGENT_PROFILES_IMPLEMENTATION.md",
|
|
110
|
+
"guidelines.md", # SOC tier guidelines are not executable runbooks
|
|
111
|
+
]:
|
|
112
|
+
runbook_path = os.path.join(root, file)
|
|
113
|
+
runbook_meta = self.parse_runbook_metadata(runbook_path)
|
|
114
|
+
|
|
115
|
+
# Get relative path from runbooks_dir
|
|
116
|
+
rel_path = os.path.relpath(runbook_path, self.runbooks_dir)
|
|
117
|
+
runbook_name = rel_path[:-3] # Remove .md extension
|
|
118
|
+
|
|
119
|
+
runbooks.append({
|
|
120
|
+
"name": runbook_name,
|
|
121
|
+
"path": runbook_path,
|
|
122
|
+
"soc_tier": runbook_meta.get("soc_tier"),
|
|
123
|
+
"category": runbook_meta.get("category"),
|
|
124
|
+
"objective": runbook_meta.get("objective", "")[:200],
|
|
125
|
+
"description": runbook_meta.get("description", "")[:200]
|
|
126
|
+
})
|
|
127
|
+
|
|
128
|
+
return runbooks
|
|
129
|
+
|
|
130
|
+
def read_runbook(self, runbook_path: str) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Read runbook content from file.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
runbook_path: Path to runbook file
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
Runbook content as string
|
|
139
|
+
"""
|
|
140
|
+
if not os.path.exists(runbook_path):
|
|
141
|
+
raise IntegrationError(f"Runbook not found: {runbook_path}")
|
|
142
|
+
|
|
143
|
+
with open(runbook_path, "r", encoding="utf-8") as f:
|
|
144
|
+
return f.read()
|
|
145
|
+
|
|
146
|
+
def parse_runbook_metadata(self, runbook_path: str, content: Optional[str] = None) -> Dict[str, Any]:
|
|
147
|
+
"""
|
|
148
|
+
Parse metadata from runbook markdown.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
runbook_path: Path to runbook file
|
|
152
|
+
content: Optional runbook content (if already loaded)
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Dictionary with parsed metadata
|
|
156
|
+
"""
|
|
157
|
+
if content is None:
|
|
158
|
+
content = self.read_runbook(runbook_path)
|
|
159
|
+
|
|
160
|
+
metadata = {}
|
|
161
|
+
|
|
162
|
+
# Extract SOC tier from path
|
|
163
|
+
if "/soc1/" in runbook_path:
|
|
164
|
+
metadata["soc_tier"] = "soc1"
|
|
165
|
+
elif "/soc2/" in runbook_path:
|
|
166
|
+
metadata["soc_tier"] = "soc2"
|
|
167
|
+
elif "/soc3/" in runbook_path:
|
|
168
|
+
metadata["soc_tier"] = "soc3"
|
|
169
|
+
|
|
170
|
+
# Extract category from path
|
|
171
|
+
if "/triage/" in runbook_path:
|
|
172
|
+
metadata["category"] = "triage"
|
|
173
|
+
elif "/investigation/" in runbook_path:
|
|
174
|
+
metadata["category"] = "investigation"
|
|
175
|
+
elif "/response/" in runbook_path:
|
|
176
|
+
metadata["category"] = "response"
|
|
177
|
+
elif "/forensics/" in runbook_path:
|
|
178
|
+
metadata["category"] = "forensics"
|
|
179
|
+
elif "/correlation/" in runbook_path:
|
|
180
|
+
metadata["category"] = "correlation"
|
|
181
|
+
elif "/enrichment/" in runbook_path:
|
|
182
|
+
metadata["category"] = "enrichment"
|
|
183
|
+
elif "/remediation/" in runbook_path:
|
|
184
|
+
metadata["category"] = "remediation"
|
|
185
|
+
elif "/cases/" in runbook_path:
|
|
186
|
+
# Case-specific runbooks are sub-runbooks
|
|
187
|
+
metadata["category"] = "cases"
|
|
188
|
+
|
|
189
|
+
# Extract objective
|
|
190
|
+
obj_match = re.search(r"## Objective\s*\n\s*\n(.*?)(?=\n##|\Z)", content, re.DOTALL | re.IGNORECASE)
|
|
191
|
+
if obj_match:
|
|
192
|
+
metadata["objective"] = obj_match.group(1).strip()
|
|
193
|
+
|
|
194
|
+
# Extract scope
|
|
195
|
+
scope_match = re.search(r"## Scope\s*\n\s*\n(.*?)(?=\n##|\Z)", content, re.DOTALL | re.IGNORECASE)
|
|
196
|
+
if scope_match:
|
|
197
|
+
metadata["scope"] = scope_match.group(1).strip()
|
|
198
|
+
|
|
199
|
+
# Extract tools
|
|
200
|
+
tools_match = re.search(r"## Tools\s*\n\s*\n(.*?)(?=\n##|\Z)", content, re.DOTALL | re.IGNORECASE)
|
|
201
|
+
if tools_match:
|
|
202
|
+
tools_text = tools_match.group(1)
|
|
203
|
+
# Extract tool names (look for backtick-wrapped tool names)
|
|
204
|
+
tool_names = re.findall(r"`([a-z_]+)`", tools_text, re.IGNORECASE)
|
|
205
|
+
metadata["tools"] = list(set(tool_names)) # Remove duplicates
|
|
206
|
+
|
|
207
|
+
# Extract inputs
|
|
208
|
+
inputs_match = re.search(r"## Inputs\s*\n\s*\n(.*?)(?=\n##|\Z)", content, re.DOTALL | re.IGNORECASE)
|
|
209
|
+
if inputs_match:
|
|
210
|
+
inputs_text = inputs_match.group(1)
|
|
211
|
+
# Extract input variables
|
|
212
|
+
input_vars = re.findall(r"\$\{([A-Z_]+)\}", inputs_text)
|
|
213
|
+
metadata["inputs"] = list(set(input_vars))
|
|
214
|
+
|
|
215
|
+
# Extract workflow steps count
|
|
216
|
+
steps_match = re.search(r"## Workflow Steps", content, re.IGNORECASE)
|
|
217
|
+
if steps_match:
|
|
218
|
+
# Count numbered steps
|
|
219
|
+
step_count = len(re.findall(r"^\d+\.\s+\*\*", content, re.MULTILINE))
|
|
220
|
+
metadata["step_count"] = step_count
|
|
221
|
+
|
|
222
|
+
return metadata
|
|
223
|
+
|
|
224
|
+
def extract_workflow_steps(self, content: str) -> List[Dict[str, Any]]:
|
|
225
|
+
"""
|
|
226
|
+
Extract workflow steps from runbook content.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
content: Runbook content
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
List of workflow step dictionaries
|
|
233
|
+
"""
|
|
234
|
+
steps = []
|
|
235
|
+
|
|
236
|
+
# Find workflow steps section
|
|
237
|
+
steps_section_match = re.search(
|
|
238
|
+
r"## Workflow Steps.*?\n(.*?)(?=\n```|\n##|\Z)",
|
|
239
|
+
content,
|
|
240
|
+
re.DOTALL | re.IGNORECASE
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
if not steps_section_match:
|
|
244
|
+
return steps
|
|
245
|
+
|
|
246
|
+
steps_text = steps_section_match.group(1)
|
|
247
|
+
|
|
248
|
+
# Extract numbered steps
|
|
249
|
+
step_pattern = r"(\d+)\.\s+\*\*(.*?)\*\*:(.*?)(?=\d+\.\s+\*\*|\Z)"
|
|
250
|
+
step_matches = re.finditer(step_pattern, steps_text, re.DOTALL)
|
|
251
|
+
|
|
252
|
+
for match in step_matches:
|
|
253
|
+
step_num = int(match.group(1))
|
|
254
|
+
step_title = match.group(2).strip()
|
|
255
|
+
step_content = match.group(3).strip()
|
|
256
|
+
|
|
257
|
+
steps.append({
|
|
258
|
+
"step_number": step_num,
|
|
259
|
+
"title": step_title,
|
|
260
|
+
"content": step_content
|
|
261
|
+
})
|
|
262
|
+
|
|
263
|
+
return steps
|
|
264
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestrator and LLM tools for SamiGPT.
|
|
3
|
+
|
|
4
|
+
This package contains:
|
|
5
|
+
- ``incident_workflow.py``: High-level workflows that coordinate between
|
|
6
|
+
case management, SIEM, and EDR clients.
|
|
7
|
+
- ``tools_case.py``: LLM-callable tools for case management operations.
|
|
8
|
+
- ``tools_siem.py``: LLM-callable tools for SIEM operations.
|
|
9
|
+
- ``tools_edr.py``: LLM-callable tools for EDR operations.
|
|
10
|
+
"""
|
|
11
|
+
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""
|
|
2
|
+
High-level incident response workflows for SamiGPT.
|
|
3
|
+
|
|
4
|
+
This module provides orchestration functions that coordinate between
|
|
5
|
+
case management, SIEM, and EDR clients to perform common incident
|
|
6
|
+
response tasks.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import List, Optional
|
|
12
|
+
|
|
13
|
+
from ..api.case_management import (
|
|
14
|
+
Case,
|
|
15
|
+
CaseManagementClient,
|
|
16
|
+
CaseObservable,
|
|
17
|
+
CaseStatus,
|
|
18
|
+
)
|
|
19
|
+
from ..api.edr import EDRClient, Endpoint
|
|
20
|
+
from ..api.siem import SIEMClient, SiemAlert
|
|
21
|
+
from ..core.errors import IntegrationError
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def create_incident_from_alert(
|
|
25
|
+
alert: SiemAlert,
|
|
26
|
+
case_client: CaseManagementClient,
|
|
27
|
+
title_prefix: Optional[str] = None,
|
|
28
|
+
) -> Case:
|
|
29
|
+
"""
|
|
30
|
+
Create a new case in the case management system from a SIEM alert.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
alert: The SIEM alert to convert into a case.
|
|
34
|
+
case_client: The case management client to use.
|
|
35
|
+
title_prefix: Optional prefix for the case title.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
The created case.
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
IntegrationError: If case creation fails.
|
|
42
|
+
"""
|
|
43
|
+
title = f"{title_prefix + ': ' if title_prefix else ''}{alert.title or 'Security Alert'}"
|
|
44
|
+
description = f"Alert from SIEM: {alert.description or 'No description'}\n\n"
|
|
45
|
+
description += f"Severity: {alert.severity}\n"
|
|
46
|
+
description += f"Source: {alert.source}\n"
|
|
47
|
+
if alert.timestamp:
|
|
48
|
+
description += f"Timestamp: {alert.timestamp.isoformat()}\n"
|
|
49
|
+
|
|
50
|
+
case = Case(
|
|
51
|
+
title=title,
|
|
52
|
+
description=description,
|
|
53
|
+
status=CaseStatus.OPEN,
|
|
54
|
+
priority=alert.severity.value if hasattr(alert.severity, "value") else "medium",
|
|
55
|
+
tags=alert.tags or [],
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
created_case = case_client.create_case(case)
|
|
60
|
+
return created_case
|
|
61
|
+
except Exception as e:
|
|
62
|
+
raise IntegrationError(f"Failed to create case from alert: {e}") from e
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def enrich_case_from_siem(
|
|
66
|
+
case_id: str,
|
|
67
|
+
case_client: CaseManagementClient,
|
|
68
|
+
siem_client: SIEMClient,
|
|
69
|
+
search_terms: Optional[List[str]] = None,
|
|
70
|
+
) -> List[CaseObservable]:
|
|
71
|
+
"""
|
|
72
|
+
Enrich a case by searching SIEM for related events and adding observables.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
case_id: The ID of the case to enrich.
|
|
76
|
+
case_client: The case management client.
|
|
77
|
+
siem_client: The SIEM client to search.
|
|
78
|
+
search_terms: Optional list of terms to search for in SIEM.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
List of observables added to the case.
|
|
82
|
+
|
|
83
|
+
Raises:
|
|
84
|
+
IntegrationError: If enrichment fails.
|
|
85
|
+
"""
|
|
86
|
+
try:
|
|
87
|
+
case = case_client.get_case(case_id)
|
|
88
|
+
except Exception as e:
|
|
89
|
+
raise IntegrationError(f"Failed to retrieve case {case_id}: {e}") from e
|
|
90
|
+
|
|
91
|
+
observables_added = []
|
|
92
|
+
|
|
93
|
+
# Extract potential observables from case title/description
|
|
94
|
+
search_terms = search_terms or []
|
|
95
|
+
if case.title:
|
|
96
|
+
search_terms.append(case.title)
|
|
97
|
+
if case.description:
|
|
98
|
+
# Simple extraction - in production, use more sophisticated parsing
|
|
99
|
+
search_terms.append(case.description[:100])
|
|
100
|
+
|
|
101
|
+
# Search SIEM for related events
|
|
102
|
+
try:
|
|
103
|
+
if search_terms:
|
|
104
|
+
query = " OR ".join(search_terms[:5]) # Limit to avoid huge queries
|
|
105
|
+
events = siem_client.search_security_events(
|
|
106
|
+
query=query,
|
|
107
|
+
limit=50,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Extract unique observables from events
|
|
111
|
+
seen_observables = set()
|
|
112
|
+
for event in events.events:
|
|
113
|
+
# Add IP addresses as observables
|
|
114
|
+
if event.ip:
|
|
115
|
+
key = f"ip:{event.ip}"
|
|
116
|
+
if key not in seen_observables:
|
|
117
|
+
observable = CaseObservable(
|
|
118
|
+
type="ip",
|
|
119
|
+
value=event.ip,
|
|
120
|
+
description=f"Found in SIEM event: {event.id}",
|
|
121
|
+
)
|
|
122
|
+
case_client.add_case_observable(case_id, observable)
|
|
123
|
+
observables_added.append(observable)
|
|
124
|
+
seen_observables.add(key)
|
|
125
|
+
|
|
126
|
+
# Add file hashes if present
|
|
127
|
+
if event.file_hash:
|
|
128
|
+
key = f"hash:{event.file_hash}"
|
|
129
|
+
if key not in seen_observables:
|
|
130
|
+
observable = CaseObservable(
|
|
131
|
+
type="hash",
|
|
132
|
+
value=event.file_hash,
|
|
133
|
+
description=f"Found in SIEM event: {event.id}",
|
|
134
|
+
)
|
|
135
|
+
case_client.add_case_observable(case_id, observable)
|
|
136
|
+
observables_added.append(observable)
|
|
137
|
+
seen_observables.add(key)
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
raise IntegrationError(f"Failed to search SIEM or add observables: {e}") from e
|
|
141
|
+
|
|
142
|
+
return observables_added
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def enrich_case_from_edr(
|
|
146
|
+
case_id: str,
|
|
147
|
+
case_client: CaseManagementClient,
|
|
148
|
+
edr_client: EDRClient,
|
|
149
|
+
endpoint_id: Optional[str] = None,
|
|
150
|
+
) -> List[CaseObservable]:
|
|
151
|
+
"""
|
|
152
|
+
Enrich a case by querying EDR for endpoint information and adding observables.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
case_id: The ID of the case to enrich.
|
|
156
|
+
case_client: The case management client.
|
|
157
|
+
edr_client: The EDR client to query.
|
|
158
|
+
endpoint_id: Optional endpoint ID to focus on. If not provided, uses
|
|
159
|
+
observables from the case to find relevant endpoints.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
List of observables added to the case.
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
IntegrationError: If enrichment fails.
|
|
166
|
+
"""
|
|
167
|
+
try:
|
|
168
|
+
case = case_client.get_case(case_id)
|
|
169
|
+
except Exception as e:
|
|
170
|
+
raise IntegrationError(f"Failed to retrieve case {case_id}: {e}") from e
|
|
171
|
+
|
|
172
|
+
observables_added = []
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
# If endpoint_id provided, get that endpoint's details
|
|
176
|
+
if endpoint_id:
|
|
177
|
+
endpoint = edr_client.get_endpoint_summary(endpoint_id)
|
|
178
|
+
if endpoint:
|
|
179
|
+
# Add endpoint hostname as observable
|
|
180
|
+
if endpoint.hostname:
|
|
181
|
+
observable = CaseObservable(
|
|
182
|
+
type="hostname",
|
|
183
|
+
value=endpoint.hostname,
|
|
184
|
+
description=f"Endpoint from EDR: {endpoint_id}",
|
|
185
|
+
)
|
|
186
|
+
case_client.add_case_observable(case_id, observable)
|
|
187
|
+
observables_added.append(observable)
|
|
188
|
+
|
|
189
|
+
# Get recent detections and add file hashes as observables
|
|
190
|
+
detections = edr_client.list_detections(limit=20)
|
|
191
|
+
seen_hashes = set()
|
|
192
|
+
for detection in detections:
|
|
193
|
+
if detection.file_hash:
|
|
194
|
+
key = f"hash:{detection.file_hash}"
|
|
195
|
+
if key not in seen_hashes:
|
|
196
|
+
observable = CaseObservable(
|
|
197
|
+
type="hash",
|
|
198
|
+
value=detection.file_hash,
|
|
199
|
+
description=f"EDR detection: {detection.id}",
|
|
200
|
+
)
|
|
201
|
+
case_client.add_case_observable(case_id, observable)
|
|
202
|
+
observables_added.append(observable)
|
|
203
|
+
seen_hashes.add(key)
|
|
204
|
+
|
|
205
|
+
except Exception as e:
|
|
206
|
+
raise IntegrationError(f"Failed to query EDR or add observables: {e}") from e
|
|
207
|
+
|
|
208
|
+
return observables_added
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def close_incident(
|
|
212
|
+
case_id: str,
|
|
213
|
+
case_client: CaseManagementClient,
|
|
214
|
+
resolution_notes: Optional[str] = None,
|
|
215
|
+
) -> Case:
|
|
216
|
+
"""
|
|
217
|
+
Close an incident case with optional resolution notes.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
case_id: The ID of the case to close.
|
|
221
|
+
case_client: The case management client.
|
|
222
|
+
resolution_notes: Optional notes about the resolution.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
The updated case.
|
|
226
|
+
|
|
227
|
+
Raises:
|
|
228
|
+
IntegrationError: If closing the case fails.
|
|
229
|
+
"""
|
|
230
|
+
try:
|
|
231
|
+
# Add resolution notes as a comment if provided
|
|
232
|
+
if resolution_notes:
|
|
233
|
+
case_client.add_case_comment(
|
|
234
|
+
case_id=case_id,
|
|
235
|
+
content=f"Resolution: {resolution_notes}",
|
|
236
|
+
author=None, # System-generated
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Update status to closed
|
|
240
|
+
updated_case = case_client.update_case_status(case_id, CaseStatus.CLOSED)
|
|
241
|
+
return updated_case
|
|
242
|
+
except Exception as e:
|
|
243
|
+
raise IntegrationError(f"Failed to close case {case_id}: {e}") from e
|
|
244
|
+
|