iflow-mcp-m507_ai-soc-agent 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/METADATA +410 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/RECORD +85 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_m507_ai_soc_agent-1.0.0.dist-info/top_level.txt +1 -0
- src/__init__.py +8 -0
- src/ai_controller/README.md +139 -0
- src/ai_controller/__init__.py +12 -0
- src/ai_controller/agent_executor.py +596 -0
- src/ai_controller/cli/__init__.py +2 -0
- src/ai_controller/cli/main.py +243 -0
- src/ai_controller/session_manager.py +409 -0
- src/ai_controller/web/__init__.py +2 -0
- src/ai_controller/web/server.py +1181 -0
- src/ai_controller/web/static/css/README.md +102 -0
- src/api/__init__.py +13 -0
- src/api/case_management.py +271 -0
- src/api/edr.py +187 -0
- src/api/kb.py +136 -0
- src/api/siem.py +308 -0
- src/core/__init__.py +10 -0
- src/core/config.py +242 -0
- src/core/config_storage.py +684 -0
- src/core/dto.py +50 -0
- src/core/errors.py +36 -0
- src/core/logging.py +128 -0
- src/integrations/__init__.py +8 -0
- src/integrations/case_management/__init__.py +5 -0
- src/integrations/case_management/iris/__init__.py +11 -0
- src/integrations/case_management/iris/iris_client.py +885 -0
- src/integrations/case_management/iris/iris_http.py +274 -0
- src/integrations/case_management/iris/iris_mapper.py +263 -0
- src/integrations/case_management/iris/iris_models.py +128 -0
- src/integrations/case_management/thehive/__init__.py +8 -0
- src/integrations/case_management/thehive/thehive_client.py +193 -0
- src/integrations/case_management/thehive/thehive_http.py +147 -0
- src/integrations/case_management/thehive/thehive_mapper.py +190 -0
- src/integrations/case_management/thehive/thehive_models.py +125 -0
- src/integrations/cti/__init__.py +6 -0
- src/integrations/cti/local_tip/__init__.py +10 -0
- src/integrations/cti/local_tip/local_tip_client.py +90 -0
- src/integrations/cti/local_tip/local_tip_http.py +110 -0
- src/integrations/cti/opencti/__init__.py +10 -0
- src/integrations/cti/opencti/opencti_client.py +101 -0
- src/integrations/cti/opencti/opencti_http.py +418 -0
- src/integrations/edr/__init__.py +6 -0
- src/integrations/edr/elastic_defend/__init__.py +6 -0
- src/integrations/edr/elastic_defend/elastic_defend_client.py +351 -0
- src/integrations/edr/elastic_defend/elastic_defend_http.py +162 -0
- src/integrations/eng/__init__.py +10 -0
- src/integrations/eng/clickup/__init__.py +8 -0
- src/integrations/eng/clickup/clickup_client.py +513 -0
- src/integrations/eng/clickup/clickup_http.py +156 -0
- src/integrations/eng/github/__init__.py +8 -0
- src/integrations/eng/github/github_client.py +169 -0
- src/integrations/eng/github/github_http.py +158 -0
- src/integrations/eng/trello/__init__.py +8 -0
- src/integrations/eng/trello/trello_client.py +207 -0
- src/integrations/eng/trello/trello_http.py +162 -0
- src/integrations/kb/__init__.py +12 -0
- src/integrations/kb/fs_kb_client.py +313 -0
- src/integrations/siem/__init__.py +6 -0
- src/integrations/siem/elastic/__init__.py +6 -0
- src/integrations/siem/elastic/elastic_client.py +3319 -0
- src/integrations/siem/elastic/elastic_http.py +165 -0
- src/mcp/README.md +183 -0
- src/mcp/TOOLS.md +2827 -0
- src/mcp/__init__.py +13 -0
- src/mcp/__main__.py +18 -0
- src/mcp/agent_profiles.py +408 -0
- src/mcp/flow_agent_profiles.py +424 -0
- src/mcp/mcp_server.py +4086 -0
- src/mcp/rules_engine.py +487 -0
- src/mcp/runbook_manager.py +264 -0
- src/orchestrator/__init__.py +11 -0
- src/orchestrator/incident_workflow.py +244 -0
- src/orchestrator/tools_case.py +1085 -0
- src/orchestrator/tools_cti.py +359 -0
- src/orchestrator/tools_edr.py +315 -0
- src/orchestrator/tools_eng.py +378 -0
- src/orchestrator/tools_kb.py +156 -0
- src/orchestrator/tools_siem.py +1709 -0
- src/web/__init__.py +8 -0
- src/web/config_server.py +511 -0
src/mcp/mcp_server.py
ADDED
|
@@ -0,0 +1,4086 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP (Model Context Protocol) server for SamiGPT.
|
|
3
|
+
|
|
4
|
+
This module implements an MCP server that exposes all investigation and
|
|
5
|
+
response skills as tools that can be invoked by LLM clients like Open WebUI,
|
|
6
|
+
Claude Desktop, Cline, etc.
|
|
7
|
+
|
|
8
|
+
The server implements JSON-RPC 2.0 over stdio as specified in the MCP protocol.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
import sys
|
|
18
|
+
import warnings
|
|
19
|
+
from typing import Any, Dict, List, Optional, Union
|
|
20
|
+
|
|
21
|
+
# Suppress urllib3 warnings that go to stderr (which can confuse MCP clients)
|
|
22
|
+
warnings.filterwarnings("ignore", category=UserWarning, module="urllib3")
|
|
23
|
+
|
|
24
|
+
from ..api.case_management import CaseManagementClient, CaseSearchQuery
|
|
25
|
+
from ..api.edr import EDRClient
|
|
26
|
+
from ..api.siem import SIEMClient
|
|
27
|
+
from ..api.kb import KBClient
|
|
28
|
+
from ..core.config import SamiConfig
|
|
29
|
+
from ..core.logging import configure_logging, get_logger
|
|
30
|
+
from ..integrations.case_management.iris.iris_client import (
|
|
31
|
+
IRISCaseManagementClient,
|
|
32
|
+
)
|
|
33
|
+
from ..integrations.case_management.thehive.thehive_client import (
|
|
34
|
+
TheHiveCaseManagementClient,
|
|
35
|
+
)
|
|
36
|
+
from ..integrations.siem.elastic.elastic_client import ElasticSIEMClient
|
|
37
|
+
from ..integrations.edr.elastic_defend.elastic_defend_client import ElasticDefendEDRClient
|
|
38
|
+
from ..integrations.cti.local_tip.local_tip_client import LocalTipCTIClient
|
|
39
|
+
from ..integrations.cti.opencti.opencti_client import OpenCTIClient
|
|
40
|
+
from ..integrations.kb import FileSystemKBClient
|
|
41
|
+
from ..integrations.eng.trello.trello_client import TrelloClient
|
|
42
|
+
from ..integrations.eng.clickup.clickup_client import ClickUpClient
|
|
43
|
+
from ..integrations.eng.github.github_client import GitHubClient
|
|
44
|
+
from ..orchestrator import tools_case, tools_cti, tools_edr, tools_siem, tools_kb, tools_eng
|
|
45
|
+
from .rules_engine import RulesEngine
|
|
46
|
+
from .agent_profiles import AgentProfileManager
|
|
47
|
+
from .runbook_manager import RunbookManager
|
|
48
|
+
|
|
49
|
+
logger = get_logger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def configure_mcp_logging(log_dir: str = "logs") -> None:
|
|
53
|
+
"""
|
|
54
|
+
Configure dedicated logging for the MCP server in its own directory.
|
|
55
|
+
|
|
56
|
+
Creates logs/mcp/ directory with:
|
|
57
|
+
- mcp_requests.log: All incoming requests
|
|
58
|
+
- mcp_responses.log: All outgoing responses
|
|
59
|
+
- mcp_errors.log: All errors
|
|
60
|
+
- mcp_all.log: Everything (for complete debugging)
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
log_dir: Base log directory (default: "logs")
|
|
64
|
+
"""
|
|
65
|
+
mcp_log_dir = os.path.join(log_dir, "mcp")
|
|
66
|
+
os.makedirs(mcp_log_dir, exist_ok=True)
|
|
67
|
+
|
|
68
|
+
# Get or create MCP-specific logger
|
|
69
|
+
mcp_logger = logging.getLogger("sami.mcp")
|
|
70
|
+
mcp_logger.setLevel(logging.DEBUG)
|
|
71
|
+
|
|
72
|
+
# Avoid duplicate handlers
|
|
73
|
+
if getattr(mcp_logger, "_mcp_logging_configured", False):
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
# Detailed formatter with more context
|
|
77
|
+
detailed_formatter = logging.Formatter(
|
|
78
|
+
fmt="%(asctime)s [%(levelname)s] %(name)s [%(funcName)s:%(lineno)d] %(message)s",
|
|
79
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# All MCP logs (everything)
|
|
83
|
+
all_handler = logging.FileHandler(os.path.join(mcp_log_dir, "mcp_all.log"))
|
|
84
|
+
all_handler.setLevel(logging.DEBUG)
|
|
85
|
+
all_handler.setFormatter(detailed_formatter)
|
|
86
|
+
|
|
87
|
+
# Requests log
|
|
88
|
+
requests_handler = logging.FileHandler(os.path.join(mcp_log_dir, "mcp_requests.log"))
|
|
89
|
+
requests_handler.setLevel(logging.INFO)
|
|
90
|
+
requests_handler.setFormatter(detailed_formatter)
|
|
91
|
+
|
|
92
|
+
# Responses log
|
|
93
|
+
responses_handler = logging.FileHandler(os.path.join(mcp_log_dir, "mcp_responses.log"))
|
|
94
|
+
responses_handler.setLevel(logging.INFO)
|
|
95
|
+
responses_handler.setFormatter(detailed_formatter)
|
|
96
|
+
|
|
97
|
+
# Errors log
|
|
98
|
+
errors_handler = logging.FileHandler(os.path.join(mcp_log_dir, "mcp_errors.log"))
|
|
99
|
+
errors_handler.setLevel(logging.ERROR)
|
|
100
|
+
errors_handler.setFormatter(detailed_formatter)
|
|
101
|
+
|
|
102
|
+
# Add custom filters for requests/responses based on message prefixes
|
|
103
|
+
class RequestFilter(logging.Filter):
|
|
104
|
+
def filter(self, record):
|
|
105
|
+
msg = record.getMessage().upper()
|
|
106
|
+
return "REQUEST" in msg or "EXECUTING" in msg
|
|
107
|
+
|
|
108
|
+
class ResponseFilter(logging.Filter):
|
|
109
|
+
def filter(self, record):
|
|
110
|
+
msg = record.getMessage().upper()
|
|
111
|
+
return "RESPONSE" in msg or record.levelno >= logging.ERROR
|
|
112
|
+
|
|
113
|
+
requests_handler.addFilter(RequestFilter())
|
|
114
|
+
responses_handler.addFilter(ResponseFilter())
|
|
115
|
+
|
|
116
|
+
mcp_logger.addHandler(all_handler)
|
|
117
|
+
mcp_logger.addHandler(requests_handler)
|
|
118
|
+
mcp_logger.addHandler(responses_handler)
|
|
119
|
+
mcp_logger.addHandler(errors_handler)
|
|
120
|
+
|
|
121
|
+
# Mark as configured
|
|
122
|
+
mcp_logger._mcp_logging_configured = True # type: ignore[attr-defined]
|
|
123
|
+
|
|
124
|
+
logger.info(f"MCP dedicated logging configured in: {mcp_log_dir}")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class SamiGPTMCPServer:
|
|
128
|
+
"""
|
|
129
|
+
MCP server that exposes SamiGPT investigation skills as tools.
|
|
130
|
+
|
|
131
|
+
Implements the Model Context Protocol (MCP) specification using
|
|
132
|
+
JSON-RPC 2.0 over stdio.
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
# MCP protocol version - support both old and new versions
|
|
136
|
+
PROTOCOL_VERSION = "2024-11-05"
|
|
137
|
+
SUPPORTED_PROTOCOL_VERSIONS = ["2024-11-05", "2025-06-18"]
|
|
138
|
+
|
|
139
|
+
# Server info
|
|
140
|
+
SERVER_NAME = "sami-gpt"
|
|
141
|
+
SERVER_VERSION = "1.0.0"
|
|
142
|
+
|
|
143
|
+
def __init__(
|
|
144
|
+
self,
|
|
145
|
+
case_client: Optional[CaseManagementClient] = None,
|
|
146
|
+
siem_client: Optional[SIEMClient] = None,
|
|
147
|
+
edr_client: Optional[EDRClient] = None,
|
|
148
|
+
cti_client: Optional[Any] = None,
|
|
149
|
+
cti_clients: Optional[list] = None,
|
|
150
|
+
kb_client: Optional[KBClient] = None,
|
|
151
|
+
eng_client: Optional[Union[TrelloClient, ClickUpClient, GitHubClient]] = None,
|
|
152
|
+
):
|
|
153
|
+
"""
|
|
154
|
+
Initialize the MCP server.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
case_client: Case management client.
|
|
158
|
+
siem_client: SIEM client.
|
|
159
|
+
edr_client: EDR client.
|
|
160
|
+
cti_client: CTI (Cyber Threat Intelligence) client (single, for backward compatibility).
|
|
161
|
+
cti_clients: List of CTI clients (for multi-platform support).
|
|
162
|
+
"""
|
|
163
|
+
self.case_client = case_client
|
|
164
|
+
self.siem_client = siem_client
|
|
165
|
+
self.edr_client = edr_client
|
|
166
|
+
# Support both single client (backward compat) and multiple clients
|
|
167
|
+
if cti_clients is not None:
|
|
168
|
+
self.cti_clients = cti_clients
|
|
169
|
+
self.cti_client = cti_clients[0] if cti_clients else None # For backward compatibility
|
|
170
|
+
else:
|
|
171
|
+
self.cti_clients = [cti_client] if cti_client else []
|
|
172
|
+
self.cti_client = cti_client
|
|
173
|
+
# KB client defaults to filesystem-based client so it is always available
|
|
174
|
+
self.kb_client: KBClient = kb_client or FileSystemKBClient()
|
|
175
|
+
self.eng_client = eng_client
|
|
176
|
+
self.rules_engine = RulesEngine(
|
|
177
|
+
case_client=case_client,
|
|
178
|
+
siem_client=siem_client,
|
|
179
|
+
edr_client=edr_client,
|
|
180
|
+
)
|
|
181
|
+
self.agent_profile_manager = AgentProfileManager()
|
|
182
|
+
self.runbook_manager = RunbookManager()
|
|
183
|
+
# Track which agent profiles have already shown their SOC tier guidelines
|
|
184
|
+
self._shown_agent_guidelines: Dict[str, bool] = {}
|
|
185
|
+
self._initialized = False
|
|
186
|
+
self._mcp_logger = logging.getLogger("sami.mcp")
|
|
187
|
+
self._register_tools()
|
|
188
|
+
|
|
189
|
+
def _register_tools(self) -> None:
|
|
190
|
+
"""Register all available tools."""
|
|
191
|
+
self.tools: Dict[str, Dict[str, Any]] = {}
|
|
192
|
+
|
|
193
|
+
# Case management tools
|
|
194
|
+
self._register_case_tools()
|
|
195
|
+
# SIEM tools
|
|
196
|
+
self._register_siem_tools()
|
|
197
|
+
# EDR tools
|
|
198
|
+
self._register_edr_tools()
|
|
199
|
+
# CTI tools
|
|
200
|
+
self._register_cti_tools()
|
|
201
|
+
# Rules engine tools
|
|
202
|
+
self._register_rules_tools()
|
|
203
|
+
# Runbook and agent profile tools
|
|
204
|
+
self._register_runbook_tools()
|
|
205
|
+
self._register_agent_profile_tools()
|
|
206
|
+
# Knowledge base tools (client infrastructure)
|
|
207
|
+
self._register_kb_tools()
|
|
208
|
+
# Engineering tools (Trello)
|
|
209
|
+
self._register_eng_tools()
|
|
210
|
+
|
|
211
|
+
def _register_kb_tools(self) -> None:
|
|
212
|
+
"""
|
|
213
|
+
Register knowledge base tools for client infrastructure.
|
|
214
|
+
|
|
215
|
+
Available tools:
|
|
216
|
+
- kb_list_clients: List available client environments based on client_env/*
|
|
217
|
+
- kb_get_client_infra: Load and summarize infrastructure for a given client.
|
|
218
|
+
"""
|
|
219
|
+
if not self.kb_client:
|
|
220
|
+
self._mcp_logger.warning(
|
|
221
|
+
"KB tools not registered: No KB client configured."
|
|
222
|
+
)
|
|
223
|
+
return
|
|
224
|
+
|
|
225
|
+
self._mcp_logger.info("Registering 2 KB tools (client infrastructure)")
|
|
226
|
+
|
|
227
|
+
self.tools["kb_list_clients"] = {
|
|
228
|
+
"name": "kb_list_clients",
|
|
229
|
+
"description": "List available client environments based on folders under client_env/*.",
|
|
230
|
+
"inputSchema": {
|
|
231
|
+
"type": "object",
|
|
232
|
+
"properties": {},
|
|
233
|
+
},
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
self.tools["kb_get_client_infra"] = {
|
|
237
|
+
"name": "kb_get_client_infra",
|
|
238
|
+
"description": "Load and summarize client infrastructure (subnets, servers, users, naming schemas, env rules) from client_env/*.",
|
|
239
|
+
"inputSchema": {
|
|
240
|
+
"type": "object",
|
|
241
|
+
"properties": {
|
|
242
|
+
"client_name": {
|
|
243
|
+
"type": "string",
|
|
244
|
+
"description": "Name of the client environment (e.g., 'acme_corp_client' or 'acme_corp').",
|
|
245
|
+
}
|
|
246
|
+
},
|
|
247
|
+
"required": ["client_name"],
|
|
248
|
+
},
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
def _register_eng_tools(self) -> None:
|
|
252
|
+
"""
|
|
253
|
+
Register engineering tools (Trello/ClickUp/GitHub).
|
|
254
|
+
|
|
255
|
+
Available tools:
|
|
256
|
+
- create_fine_tuning_recommendation: Create a fine-tuning recommendation (supports Trello, ClickUp, and GitHub)
|
|
257
|
+
- create_visibility_recommendation: Create a visibility/engineering recommendation (supports Trello, ClickUp, and GitHub)
|
|
258
|
+
- list_fine_tuning_recommendations: List all fine-tuning recommendations (ClickUp only)
|
|
259
|
+
- list_visibility_recommendations: List all visibility/engineering recommendations (ClickUp only)
|
|
260
|
+
- add_comment_to_fine_tuning_recommendation: Add a comment to a fine-tuning recommendation task (ClickUp only)
|
|
261
|
+
- add_comment_to_visibility_recommendation: Add a comment to a visibility recommendation task (ClickUp only)
|
|
262
|
+
"""
|
|
263
|
+
if not self.eng_client:
|
|
264
|
+
self._mcp_logger.warning(
|
|
265
|
+
"Engineering tools not registered: No engineering client configured. "
|
|
266
|
+
"Configure Trello, ClickUp, or GitHub in config.json to enable engineering tools."
|
|
267
|
+
)
|
|
268
|
+
return
|
|
269
|
+
|
|
270
|
+
self._mcp_logger.info("Registering 6 engineering tools (Trello/ClickUp/GitHub)")
|
|
271
|
+
|
|
272
|
+
self.tools["create_fine_tuning_recommendation"] = {
|
|
273
|
+
"name": "create_fine_tuning_recommendation",
|
|
274
|
+
"description": "Create a fine-tuning recommendation on the fine-tuning board (supports Trello, ClickUp, and GitHub)",
|
|
275
|
+
"inputSchema": {
|
|
276
|
+
"type": "object",
|
|
277
|
+
"properties": {
|
|
278
|
+
"title": {
|
|
279
|
+
"type": "string",
|
|
280
|
+
"description": "Task/card title"
|
|
281
|
+
},
|
|
282
|
+
"description": {
|
|
283
|
+
"type": "string",
|
|
284
|
+
"description": "Task/card description"
|
|
285
|
+
},
|
|
286
|
+
"list_name": {
|
|
287
|
+
"type": "string",
|
|
288
|
+
"description": "Optional list name (Trello only, defaults to first list on board)"
|
|
289
|
+
},
|
|
290
|
+
"labels": {
|
|
291
|
+
"type": "array",
|
|
292
|
+
"items": {"type": "string"},
|
|
293
|
+
"description": "Optional list of label names (Trello only)"
|
|
294
|
+
},
|
|
295
|
+
"status": {
|
|
296
|
+
"type": "string",
|
|
297
|
+
"description": "Optional status name (ClickUp only, defaults to first status in list)"
|
|
298
|
+
},
|
|
299
|
+
"tags": {
|
|
300
|
+
"type": "array",
|
|
301
|
+
"items": {"type": "string"},
|
|
302
|
+
"description": "Optional list of tag names (ClickUp only)"
|
|
303
|
+
}
|
|
304
|
+
},
|
|
305
|
+
"required": ["title", "description"]
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
self.tools["create_visibility_recommendation"] = {
|
|
310
|
+
"name": "create_visibility_recommendation",
|
|
311
|
+
"description": "Create a visibility/engineering recommendation on the engineering board (supports Trello, ClickUp, and GitHub)",
|
|
312
|
+
"inputSchema": {
|
|
313
|
+
"type": "object",
|
|
314
|
+
"properties": {
|
|
315
|
+
"title": {
|
|
316
|
+
"type": "string",
|
|
317
|
+
"description": "Task/card title"
|
|
318
|
+
},
|
|
319
|
+
"description": {
|
|
320
|
+
"type": "string",
|
|
321
|
+
"description": "Task/card description"
|
|
322
|
+
},
|
|
323
|
+
"list_name": {
|
|
324
|
+
"type": "string",
|
|
325
|
+
"description": "Optional list name (Trello only, defaults to first list on board)"
|
|
326
|
+
},
|
|
327
|
+
"labels": {
|
|
328
|
+
"type": "array",
|
|
329
|
+
"items": {"type": "string"},
|
|
330
|
+
"description": "Optional list of label names (Trello only)"
|
|
331
|
+
},
|
|
332
|
+
"status": {
|
|
333
|
+
"type": "string",
|
|
334
|
+
"description": "Optional status name (ClickUp only, defaults to first status in list)"
|
|
335
|
+
},
|
|
336
|
+
"tags": {
|
|
337
|
+
"type": "array",
|
|
338
|
+
"items": {"type": "string"},
|
|
339
|
+
"description": "Optional list of tag names (ClickUp only)"
|
|
340
|
+
}
|
|
341
|
+
},
|
|
342
|
+
"required": ["title", "description"]
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
self.tools["list_fine_tuning_recommendations"] = {
|
|
347
|
+
"name": "list_fine_tuning_recommendations",
|
|
348
|
+
"description": "List all fine-tuning recommendation tasks from the fine-tuning board (ClickUp only)",
|
|
349
|
+
"inputSchema": {
|
|
350
|
+
"type": "object",
|
|
351
|
+
"properties": {
|
|
352
|
+
"archived": {
|
|
353
|
+
"type": "boolean",
|
|
354
|
+
"description": "Include archived tasks (default: False)"
|
|
355
|
+
},
|
|
356
|
+
"include_closed": {
|
|
357
|
+
"type": "boolean",
|
|
358
|
+
"description": "Include closed tasks (default: True)"
|
|
359
|
+
},
|
|
360
|
+
"order_by": {
|
|
361
|
+
"type": "string",
|
|
362
|
+
"description": "Order tasks by field (e.g., 'created', 'updated', 'priority')"
|
|
363
|
+
},
|
|
364
|
+
"reverse": {
|
|
365
|
+
"type": "boolean",
|
|
366
|
+
"description": "Reverse the order (default: False)"
|
|
367
|
+
},
|
|
368
|
+
"subtasks": {
|
|
369
|
+
"type": "boolean",
|
|
370
|
+
"description": "Include subtasks (default: False)"
|
|
371
|
+
},
|
|
372
|
+
"statuses": {
|
|
373
|
+
"type": "array",
|
|
374
|
+
"items": {"type": "string"},
|
|
375
|
+
"description": "Filter by status names"
|
|
376
|
+
},
|
|
377
|
+
"include_markdown_description": {
|
|
378
|
+
"type": "boolean",
|
|
379
|
+
"description": "Include markdown in descriptions (default: False)"
|
|
380
|
+
}
|
|
381
|
+
},
|
|
382
|
+
"required": []
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
self.tools["list_visibility_recommendations"] = {
|
|
387
|
+
"name": "list_visibility_recommendations",
|
|
388
|
+
"description": "List all visibility/engineering recommendation tasks from the engineering board (ClickUp only)",
|
|
389
|
+
"inputSchema": {
|
|
390
|
+
"type": "object",
|
|
391
|
+
"properties": {
|
|
392
|
+
"archived": {
|
|
393
|
+
"type": "boolean",
|
|
394
|
+
"description": "Include archived tasks (default: False)"
|
|
395
|
+
},
|
|
396
|
+
"include_closed": {
|
|
397
|
+
"type": "boolean",
|
|
398
|
+
"description": "Include closed tasks (default: True)"
|
|
399
|
+
},
|
|
400
|
+
"order_by": {
|
|
401
|
+
"type": "string",
|
|
402
|
+
"description": "Order tasks by field (e.g., 'created', 'updated', 'priority')"
|
|
403
|
+
},
|
|
404
|
+
"reverse": {
|
|
405
|
+
"type": "boolean",
|
|
406
|
+
"description": "Reverse the order (default: False)"
|
|
407
|
+
},
|
|
408
|
+
"subtasks": {
|
|
409
|
+
"type": "boolean",
|
|
410
|
+
"description": "Include subtasks (default: False)"
|
|
411
|
+
},
|
|
412
|
+
"statuses": {
|
|
413
|
+
"type": "array",
|
|
414
|
+
"items": {"type": "string"},
|
|
415
|
+
"description": "Filter by status names"
|
|
416
|
+
},
|
|
417
|
+
"include_markdown_description": {
|
|
418
|
+
"type": "boolean",
|
|
419
|
+
"description": "Include markdown in descriptions (default: False)"
|
|
420
|
+
}
|
|
421
|
+
},
|
|
422
|
+
"required": []
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
self.tools["add_comment_to_fine_tuning_recommendation"] = {
|
|
427
|
+
"name": "add_comment_to_fine_tuning_recommendation",
|
|
428
|
+
"description": "Add a comment to a fine-tuning recommendation task (ClickUp only)",
|
|
429
|
+
"inputSchema": {
|
|
430
|
+
"type": "object",
|
|
431
|
+
"properties": {
|
|
432
|
+
"task_id": {
|
|
433
|
+
"type": "string",
|
|
434
|
+
"description": "ClickUp task ID"
|
|
435
|
+
},
|
|
436
|
+
"comment_text": {
|
|
437
|
+
"type": "string",
|
|
438
|
+
"description": "Comment text/content"
|
|
439
|
+
}
|
|
440
|
+
},
|
|
441
|
+
"required": ["task_id", "comment_text"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
self.tools["add_comment_to_visibility_recommendation"] = {
|
|
446
|
+
"name": "add_comment_to_visibility_recommendation",
|
|
447
|
+
"description": "Add a comment to a visibility/engineering recommendation task (ClickUp only)",
|
|
448
|
+
"inputSchema": {
|
|
449
|
+
"type": "object",
|
|
450
|
+
"properties": {
|
|
451
|
+
"task_id": {
|
|
452
|
+
"type": "string",
|
|
453
|
+
"description": "ClickUp task ID"
|
|
454
|
+
},
|
|
455
|
+
"comment_text": {
|
|
456
|
+
"type": "string",
|
|
457
|
+
"description": "Comment text/content"
|
|
458
|
+
}
|
|
459
|
+
},
|
|
460
|
+
"required": ["task_id", "comment_text"]
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
def _register_case_tools(self) -> None:
|
|
465
|
+
"""
|
|
466
|
+
Register case management tools.
|
|
467
|
+
|
|
468
|
+
Available tools:
|
|
469
|
+
- create_case: Create a new case for investigation
|
|
470
|
+
- review_case: Retrieve full case details including observables and timeline
|
|
471
|
+
- list_cases: List cases optionally filtered by status
|
|
472
|
+
- search_cases: Search cases using multiple filters (text, status, priority, tags, assignee)
|
|
473
|
+
- update_case: Update case with new information (title, description, priority, status, tags, assignee)
|
|
474
|
+
- add_case_comment: Add comments/notes to cases
|
|
475
|
+
- attach_observable_to_case: Attach IOCs (IPs, hashes, domains, URLs) to cases
|
|
476
|
+
- update_case_status: Update case status (open, in_progress, closed)
|
|
477
|
+
- assign_case: Assign cases to analysts
|
|
478
|
+
- get_case_timeline: Retrieve chronological timeline of case events
|
|
479
|
+
- add_case_timeline_event: Add an event to a case timeline
|
|
480
|
+
- list_case_timeline_events: List all timeline events for a case
|
|
481
|
+
- link_cases: Link two cases together
|
|
482
|
+
- add_case_task: Add a task to a case
|
|
483
|
+
- list_case_tasks: List all tasks for a case
|
|
484
|
+
- add_case_asset: Add an asset to a case
|
|
485
|
+
- list_case_assets: List all assets for a case
|
|
486
|
+
- add_case_evidence: Upload and attach evidence to a case
|
|
487
|
+
- list_case_evidence: List all evidence files for a case
|
|
488
|
+
|
|
489
|
+
See TOOLS.md for detailed documentation and usage examples.
|
|
490
|
+
"""
|
|
491
|
+
if not self.case_client:
|
|
492
|
+
self._mcp_logger.warning(
|
|
493
|
+
"Case management tools not registered: No case management client configured. "
|
|
494
|
+
"Configure TheHive or IRIS in config.json to enable case management tools."
|
|
495
|
+
)
|
|
496
|
+
return
|
|
497
|
+
self._mcp_logger.info(f"Registering {19} case management tools")
|
|
498
|
+
|
|
499
|
+
self.tools["create_case"] = {
|
|
500
|
+
"name": "create_case",
|
|
501
|
+
"description": "Create a new case for investigation. Follows the case standard format defined in standards/case_standard.md. Use this when triaging an alert and no case exists yet.",
|
|
502
|
+
"inputSchema": {
|
|
503
|
+
"type": "object",
|
|
504
|
+
"properties": {
|
|
505
|
+
"title": {
|
|
506
|
+
"type": "string",
|
|
507
|
+
"description": "Case title following format: [Alert Type] - [Primary Entity] - [Date/Time]. Example: 'Malware Detection - 10.10.1.2 - 2025-11-18'"
|
|
508
|
+
},
|
|
509
|
+
"description": {
|
|
510
|
+
"type": "string",
|
|
511
|
+
"description": "Comprehensive case description including alert details, initial assessment, key entities, and severity justification"
|
|
512
|
+
},
|
|
513
|
+
"priority": {
|
|
514
|
+
"type": "string",
|
|
515
|
+
"enum": ["low", "medium", "high", "critical"],
|
|
516
|
+
"description": "Case priority based on severity, impact, and IOC matches. Default: medium"
|
|
517
|
+
},
|
|
518
|
+
"status": {
|
|
519
|
+
"type": "string",
|
|
520
|
+
"enum": ["open", "in_progress", "closed"],
|
|
521
|
+
"description": "Case status. New cases should start as 'open'. Default: open"
|
|
522
|
+
},
|
|
523
|
+
"tags": {
|
|
524
|
+
"type": "array",
|
|
525
|
+
"items": {"type": "string"},
|
|
526
|
+
"description": "Tags for categorization (e.g., ['malware', 'suspicious-login', 'ioc-match', 'soc1-triage'])"
|
|
527
|
+
},
|
|
528
|
+
"alert_id": {
|
|
529
|
+
"type": "string",
|
|
530
|
+
"description": "Associated alert ID if case is created from an alert"
|
|
531
|
+
}
|
|
532
|
+
},
|
|
533
|
+
"required": ["title", "description"]
|
|
534
|
+
},
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
self.tools["review_case"] = {
|
|
538
|
+
"name": "review_case",
|
|
539
|
+
"description": "Retrieve and review the full details of a case including title, description, status, priority, observables, and comments.",
|
|
540
|
+
"inputSchema": {
|
|
541
|
+
"type": "object",
|
|
542
|
+
"properties": {
|
|
543
|
+
"case_id": {
|
|
544
|
+
"type": "string",
|
|
545
|
+
"description": "The ID of the case to review",
|
|
546
|
+
}
|
|
547
|
+
},
|
|
548
|
+
"required": ["case_id"],
|
|
549
|
+
},
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
self.tools["list_cases"] = {
|
|
553
|
+
"name": "list_cases",
|
|
554
|
+
"description": "List cases from the case management system, optionally filtered by status (open, in_progress, closed).",
|
|
555
|
+
"inputSchema": {
|
|
556
|
+
"type": "object",
|
|
557
|
+
"properties": {
|
|
558
|
+
"status": {
|
|
559
|
+
"type": "string",
|
|
560
|
+
"enum": ["open", "in_progress", "closed"],
|
|
561
|
+
"description": "Filter by status",
|
|
562
|
+
},
|
|
563
|
+
"limit": {
|
|
564
|
+
"type": "integer",
|
|
565
|
+
"description": "Maximum number of cases to return",
|
|
566
|
+
"default": 50,
|
|
567
|
+
},
|
|
568
|
+
},
|
|
569
|
+
},
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
self.tools["search_cases"] = {
|
|
573
|
+
"name": "search_cases",
|
|
574
|
+
"description": "Search for cases using text search, status, priority, tags, or assignee filters.",
|
|
575
|
+
"inputSchema": {
|
|
576
|
+
"type": "object",
|
|
577
|
+
"properties": {
|
|
578
|
+
"text": {"type": "string", "description": "Text to search for"},
|
|
579
|
+
"status": {
|
|
580
|
+
"type": "string",
|
|
581
|
+
"enum": ["open", "in_progress", "closed"],
|
|
582
|
+
},
|
|
583
|
+
"priority": {
|
|
584
|
+
"type": "string",
|
|
585
|
+
"enum": ["low", "medium", "high", "critical"],
|
|
586
|
+
},
|
|
587
|
+
"tags": {
|
|
588
|
+
"type": "array",
|
|
589
|
+
"items": {"type": "string"},
|
|
590
|
+
"description": "Tags to filter by",
|
|
591
|
+
},
|
|
592
|
+
"assignee": {"type": "string", "description": "Assignee to filter by"},
|
|
593
|
+
"limit": {"type": "integer", "default": 50},
|
|
594
|
+
},
|
|
595
|
+
},
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
self.tools["add_case_comment"] = {
|
|
599
|
+
"name": "add_case_comment",
|
|
600
|
+
"description": "Add a comment or note to a case.",
|
|
601
|
+
"inputSchema": {
|
|
602
|
+
"type": "object",
|
|
603
|
+
"properties": {
|
|
604
|
+
"case_id": {"type": "string", "description": "The ID of the case"},
|
|
605
|
+
"content": {
|
|
606
|
+
"type": "string",
|
|
607
|
+
"description": "The comment content",
|
|
608
|
+
},
|
|
609
|
+
"author": {"type": "string", "description": "The author of the comment"},
|
|
610
|
+
},
|
|
611
|
+
"required": ["case_id", "content"],
|
|
612
|
+
},
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
self.tools["attach_observable_to_case"] = {
|
|
616
|
+
"name": "attach_observable_to_case",
|
|
617
|
+
"description": "Attach an observable such as an IP address, file hash, domain, or URL to a case for tracking and analysis.",
|
|
618
|
+
"inputSchema": {
|
|
619
|
+
"type": "object",
|
|
620
|
+
"properties": {
|
|
621
|
+
"case_id": {"type": "string", "description": "The ID of the case"},
|
|
622
|
+
"observable_type": {
|
|
623
|
+
"type": "string",
|
|
624
|
+
"description": "Type of observable (ip, hash, domain, url, etc.)",
|
|
625
|
+
},
|
|
626
|
+
"observable_value": {
|
|
627
|
+
"type": "string",
|
|
628
|
+
"description": "The value of the observable",
|
|
629
|
+
},
|
|
630
|
+
"description": {
|
|
631
|
+
"type": "string",
|
|
632
|
+
"description": "Description of the observable",
|
|
633
|
+
},
|
|
634
|
+
"tags": {
|
|
635
|
+
"type": "array",
|
|
636
|
+
"items": {"type": "string"},
|
|
637
|
+
"description": "Tags for the observable",
|
|
638
|
+
},
|
|
639
|
+
},
|
|
640
|
+
"required": ["case_id", "observable_type", "observable_value"],
|
|
641
|
+
},
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
self.tools["update_case_status"] = {
|
|
645
|
+
"name": "update_case_status",
|
|
646
|
+
"description": "Update the status of a case (open, in_progress, closed).",
|
|
647
|
+
"inputSchema": {
|
|
648
|
+
"type": "object",
|
|
649
|
+
"properties": {
|
|
650
|
+
"case_id": {"type": "string", "description": "The ID of the case"},
|
|
651
|
+
"status": {
|
|
652
|
+
"type": "string",
|
|
653
|
+
"enum": ["open", "in_progress", "closed"],
|
|
654
|
+
"description": "New status",
|
|
655
|
+
},
|
|
656
|
+
},
|
|
657
|
+
"required": ["case_id", "status"],
|
|
658
|
+
},
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
self.tools["assign_case"] = {
|
|
662
|
+
"name": "assign_case",
|
|
663
|
+
"description": "Assign a case to a specific user or analyst.",
|
|
664
|
+
"inputSchema": {
|
|
665
|
+
"type": "object",
|
|
666
|
+
"properties": {
|
|
667
|
+
"case_id": {"type": "string", "description": "The ID of the case"},
|
|
668
|
+
"assignee": {
|
|
669
|
+
"type": "string",
|
|
670
|
+
"description": "The username or ID of the assignee",
|
|
671
|
+
},
|
|
672
|
+
},
|
|
673
|
+
"required": ["case_id", "assignee"],
|
|
674
|
+
},
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
self.tools["get_case_timeline"] = {
|
|
678
|
+
"name": "get_case_timeline",
|
|
679
|
+
"description": "Retrieve the timeline of comments and events for a case, ordered chronologically.",
|
|
680
|
+
"inputSchema": {
|
|
681
|
+
"type": "object",
|
|
682
|
+
"properties": {
|
|
683
|
+
"case_id": {
|
|
684
|
+
"type": "string",
|
|
685
|
+
"description": "The ID of the case",
|
|
686
|
+
}
|
|
687
|
+
},
|
|
688
|
+
"required": ["case_id"],
|
|
689
|
+
},
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
# Task management tools
|
|
693
|
+
self.tools["add_case_task"] = {
|
|
694
|
+
"name": "add_case_task",
|
|
695
|
+
"description": "Add a task to a case. Tasks represent actionable items for investigation and response, typically assigned to SOC2 or SOC3 tiers.",
|
|
696
|
+
"inputSchema": {
|
|
697
|
+
"type": "object",
|
|
698
|
+
"properties": {
|
|
699
|
+
"case_id": {
|
|
700
|
+
"type": "string",
|
|
701
|
+
"description": "The ID of the case"
|
|
702
|
+
},
|
|
703
|
+
"title": {
|
|
704
|
+
"type": "string",
|
|
705
|
+
"description": "Task title"
|
|
706
|
+
},
|
|
707
|
+
"description": {
|
|
708
|
+
"type": "string",
|
|
709
|
+
"description": "Task description"
|
|
710
|
+
},
|
|
711
|
+
"assignee": {
|
|
712
|
+
"type": "string",
|
|
713
|
+
"description": "Assignee ID or SOC tier (e.g., 'SOC2', 'SOC3')"
|
|
714
|
+
},
|
|
715
|
+
"priority": {
|
|
716
|
+
"type": "string",
|
|
717
|
+
"enum": ["low", "medium", "high", "critical"],
|
|
718
|
+
"description": "Task priority. Default: medium"
|
|
719
|
+
},
|
|
720
|
+
"status": {
|
|
721
|
+
"type": "string",
|
|
722
|
+
"enum": ["pending", "in_progress", "completed", "blocked"],
|
|
723
|
+
"description": "Task status. Default: pending"
|
|
724
|
+
}
|
|
725
|
+
},
|
|
726
|
+
"required": ["case_id", "title", "description"]
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
self.tools["list_case_tasks"] = {
|
|
731
|
+
"name": "list_case_tasks",
|
|
732
|
+
"description": "List all tasks associated with a case",
|
|
733
|
+
"inputSchema": {
|
|
734
|
+
"type": "object",
|
|
735
|
+
"properties": {
|
|
736
|
+
"case_id": {
|
|
737
|
+
"type": "string",
|
|
738
|
+
"description": "The ID of the case"
|
|
739
|
+
}
|
|
740
|
+
},
|
|
741
|
+
"required": ["case_id"]
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
self.tools["update_case_task_status"] = {
|
|
746
|
+
"name": "update_case_task_status",
|
|
747
|
+
"description": "Update the status of a task (pending, in_progress, completed, blocked). Use this to mark tasks as in-progress when starting work and completed when finishing.",
|
|
748
|
+
"inputSchema": {
|
|
749
|
+
"type": "object",
|
|
750
|
+
"properties": {
|
|
751
|
+
"case_id": {
|
|
752
|
+
"type": "string",
|
|
753
|
+
"description": "The ID of the case"
|
|
754
|
+
},
|
|
755
|
+
"task_id": {
|
|
756
|
+
"type": "string",
|
|
757
|
+
"description": "The ID of the task to update"
|
|
758
|
+
},
|
|
759
|
+
"status": {
|
|
760
|
+
"type": "string",
|
|
761
|
+
"enum": ["pending", "in_progress", "completed", "blocked"],
|
|
762
|
+
"description": "New task status"
|
|
763
|
+
}
|
|
764
|
+
},
|
|
765
|
+
"required": ["case_id", "task_id", "status"]
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
# Asset management tools
|
|
770
|
+
self.tools["add_case_asset"] = {
|
|
771
|
+
"name": "add_case_asset",
|
|
772
|
+
"description": "Add an asset (endpoint, server, network, user account, application) to a case",
|
|
773
|
+
"inputSchema": {
|
|
774
|
+
"type": "object",
|
|
775
|
+
"properties": {
|
|
776
|
+
"case_id": {
|
|
777
|
+
"type": "string",
|
|
778
|
+
"description": "The ID of the case"
|
|
779
|
+
},
|
|
780
|
+
"asset_name": {
|
|
781
|
+
"type": "string",
|
|
782
|
+
"description": "Asset name/identifier"
|
|
783
|
+
},
|
|
784
|
+
"asset_type": {
|
|
785
|
+
"type": "string",
|
|
786
|
+
"enum": ["endpoint", "server", "network", "user_account", "application"],
|
|
787
|
+
"description": "Asset type"
|
|
788
|
+
},
|
|
789
|
+
"description": {
|
|
790
|
+
"type": "string",
|
|
791
|
+
"description": "Asset description"
|
|
792
|
+
},
|
|
793
|
+
"ip_address": {
|
|
794
|
+
"type": "string",
|
|
795
|
+
"description": "IP address if applicable"
|
|
796
|
+
},
|
|
797
|
+
"hostname": {
|
|
798
|
+
"type": "string",
|
|
799
|
+
"description": "Hostname if applicable"
|
|
800
|
+
},
|
|
801
|
+
"tags": {
|
|
802
|
+
"type": "array",
|
|
803
|
+
"items": {"type": "string"},
|
|
804
|
+
"description": "Tags for the asset"
|
|
805
|
+
}
|
|
806
|
+
},
|
|
807
|
+
"required": ["case_id", "asset_name", "asset_type"]
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
self.tools["list_case_assets"] = {
|
|
812
|
+
"name": "list_case_assets",
|
|
813
|
+
"description": "List all assets associated with a case",
|
|
814
|
+
"inputSchema": {
|
|
815
|
+
"type": "object",
|
|
816
|
+
"properties": {
|
|
817
|
+
"case_id": {
|
|
818
|
+
"type": "string",
|
|
819
|
+
"description": "The ID of the case"
|
|
820
|
+
}
|
|
821
|
+
},
|
|
822
|
+
"required": ["case_id"]
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
# Evidence management tools
|
|
827
|
+
self.tools["add_case_evidence"] = {
|
|
828
|
+
"name": "add_case_evidence",
|
|
829
|
+
"description": "Upload and attach evidence (file, log, screenshot, network capture, etc.) to a case",
|
|
830
|
+
"inputSchema": {
|
|
831
|
+
"type": "object",
|
|
832
|
+
"properties": {
|
|
833
|
+
"case_id": {
|
|
834
|
+
"type": "string",
|
|
835
|
+
"description": "The ID of the case"
|
|
836
|
+
},
|
|
837
|
+
"file_path": {
|
|
838
|
+
"type": "string",
|
|
839
|
+
"description": "Path to the evidence file"
|
|
840
|
+
},
|
|
841
|
+
"description": {
|
|
842
|
+
"type": "string",
|
|
843
|
+
"description": "Description of the evidence"
|
|
844
|
+
},
|
|
845
|
+
"evidence_type": {
|
|
846
|
+
"type": "string",
|
|
847
|
+
"enum": ["file", "screenshot", "log", "network_capture", "memory_dump", "registry", "other"],
|
|
848
|
+
"description": "Type of evidence"
|
|
849
|
+
}
|
|
850
|
+
},
|
|
851
|
+
"required": ["case_id", "file_path"]
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
self.tools["list_case_evidence"] = {
|
|
856
|
+
"name": "list_case_evidence",
|
|
857
|
+
"description": "List all evidence files associated with a case",
|
|
858
|
+
"inputSchema": {
|
|
859
|
+
"type": "object",
|
|
860
|
+
"properties": {
|
|
861
|
+
"case_id": {
|
|
862
|
+
"type": "string",
|
|
863
|
+
"description": "The ID of the case"
|
|
864
|
+
}
|
|
865
|
+
},
|
|
866
|
+
"required": ["case_id"]
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
self.tools["update_case"] = {
|
|
871
|
+
"name": "update_case",
|
|
872
|
+
"description": "Update a case with new information (title, description, priority, status, tags, assignee)",
|
|
873
|
+
"inputSchema": {
|
|
874
|
+
"type": "object",
|
|
875
|
+
"properties": {
|
|
876
|
+
"case_id": {
|
|
877
|
+
"type": "string",
|
|
878
|
+
"description": "The ID of the case to update"
|
|
879
|
+
},
|
|
880
|
+
"title": {
|
|
881
|
+
"type": "string",
|
|
882
|
+
"description": "New case title"
|
|
883
|
+
},
|
|
884
|
+
"description": {
|
|
885
|
+
"type": "string",
|
|
886
|
+
"description": "New case description"
|
|
887
|
+
},
|
|
888
|
+
"priority": {
|
|
889
|
+
"type": "string",
|
|
890
|
+
"enum": ["low", "medium", "high", "critical"],
|
|
891
|
+
"description": "New priority"
|
|
892
|
+
},
|
|
893
|
+
"status": {
|
|
894
|
+
"type": "string",
|
|
895
|
+
"enum": ["open", "in_progress", "closed"],
|
|
896
|
+
"description": "New status"
|
|
897
|
+
},
|
|
898
|
+
"tags": {
|
|
899
|
+
"type": "array",
|
|
900
|
+
"items": {"type": "string"},
|
|
901
|
+
"description": "New tags list"
|
|
902
|
+
},
|
|
903
|
+
"assignee": {
|
|
904
|
+
"type": "string",
|
|
905
|
+
"description": "New assignee"
|
|
906
|
+
}
|
|
907
|
+
},
|
|
908
|
+
"required": ["case_id"]
|
|
909
|
+
}
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
self.tools["link_cases"] = {
|
|
913
|
+
"name": "link_cases",
|
|
914
|
+
"description": "Link two cases together to indicate a relationship (e.g., duplicate, related, escalated from)",
|
|
915
|
+
"inputSchema": {
|
|
916
|
+
"type": "object",
|
|
917
|
+
"properties": {
|
|
918
|
+
"source_case_id": {
|
|
919
|
+
"type": "string",
|
|
920
|
+
"description": "The ID of the source case"
|
|
921
|
+
},
|
|
922
|
+
"target_case_id": {
|
|
923
|
+
"type": "string",
|
|
924
|
+
"description": "The ID of the target case to link to"
|
|
925
|
+
},
|
|
926
|
+
"link_type": {
|
|
927
|
+
"type": "string",
|
|
928
|
+
"description": "Type of link (related_to, duplicate_of, escalated_from, child_of, blocked_by)",
|
|
929
|
+
"default": "related_to"
|
|
930
|
+
}
|
|
931
|
+
},
|
|
932
|
+
"required": ["source_case_id", "target_case_id"]
|
|
933
|
+
}
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
self.tools["add_case_timeline_event"] = {
|
|
937
|
+
"name": "add_case_timeline_event",
|
|
938
|
+
"description": "Add an event to a case timeline for tracking investigation activities and milestones",
|
|
939
|
+
"inputSchema": {
|
|
940
|
+
"type": "object",
|
|
941
|
+
"properties": {
|
|
942
|
+
"case_id": {
|
|
943
|
+
"type": "string",
|
|
944
|
+
"description": "The ID of the case"
|
|
945
|
+
},
|
|
946
|
+
"title": {
|
|
947
|
+
"type": "string",
|
|
948
|
+
"description": "Event title"
|
|
949
|
+
},
|
|
950
|
+
"content": {
|
|
951
|
+
"type": "string",
|
|
952
|
+
"description": "Event content/description"
|
|
953
|
+
},
|
|
954
|
+
"source": {
|
|
955
|
+
"type": "string",
|
|
956
|
+
"description": "Event source (e.g., 'SamiGPT', 'SIEM', 'EDR')"
|
|
957
|
+
},
|
|
958
|
+
"category_id": {
|
|
959
|
+
"type": "integer",
|
|
960
|
+
"description": "Event category ID"
|
|
961
|
+
},
|
|
962
|
+
"tags": {
|
|
963
|
+
"type": "array",
|
|
964
|
+
"items": {"type": "string"},
|
|
965
|
+
"description": "Event tags"
|
|
966
|
+
},
|
|
967
|
+
"color": {
|
|
968
|
+
"type": "string",
|
|
969
|
+
"description": "Event color (hex format, e.g., '#1572E899')"
|
|
970
|
+
},
|
|
971
|
+
"event_date": {
|
|
972
|
+
"type": "string",
|
|
973
|
+
"description": "Event date in ISO format (defaults to current time)"
|
|
974
|
+
},
|
|
975
|
+
"include_in_summary": {
|
|
976
|
+
"type": "boolean",
|
|
977
|
+
"description": "Include event in case summary",
|
|
978
|
+
"default": True
|
|
979
|
+
},
|
|
980
|
+
"include_in_graph": {
|
|
981
|
+
"type": "boolean",
|
|
982
|
+
"description": "Include event in case graph",
|
|
983
|
+
"default": True
|
|
984
|
+
},
|
|
985
|
+
"sync_iocs_assets": {
|
|
986
|
+
"type": "boolean",
|
|
987
|
+
"description": "Sync with IOCs and assets",
|
|
988
|
+
"default": True
|
|
989
|
+
},
|
|
990
|
+
"asset_ids": {
|
|
991
|
+
"type": "array",
|
|
992
|
+
"items": {"type": "integer"},
|
|
993
|
+
"description": "Related asset IDs"
|
|
994
|
+
},
|
|
995
|
+
"ioc_ids": {
|
|
996
|
+
"type": "array",
|
|
997
|
+
"items": {"type": "integer"},
|
|
998
|
+
"description": "Related IOC IDs"
|
|
999
|
+
},
|
|
1000
|
+
"custom_attributes": {
|
|
1001
|
+
"type": "object",
|
|
1002
|
+
"description": "Custom attributes"
|
|
1003
|
+
},
|
|
1004
|
+
"raw": {
|
|
1005
|
+
"type": "string",
|
|
1006
|
+
"description": "Raw event data"
|
|
1007
|
+
},
|
|
1008
|
+
"tz": {
|
|
1009
|
+
"type": "string",
|
|
1010
|
+
"description": "Timezone",
|
|
1011
|
+
"default": "+00:00"
|
|
1012
|
+
}
|
|
1013
|
+
},
|
|
1014
|
+
"required": ["case_id", "title", "content"]
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
|
|
1018
|
+
self.tools["list_case_timeline_events"] = {
|
|
1019
|
+
"name": "list_case_timeline_events",
|
|
1020
|
+
"description": "List all timeline events associated with a case",
|
|
1021
|
+
"inputSchema": {
|
|
1022
|
+
"type": "object",
|
|
1023
|
+
"properties": {
|
|
1024
|
+
"case_id": {
|
|
1025
|
+
"type": "string",
|
|
1026
|
+
"description": "The ID of the case"
|
|
1027
|
+
}
|
|
1028
|
+
},
|
|
1029
|
+
"required": ["case_id"]
|
|
1030
|
+
}
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
def _register_siem_tools(self) -> None:
|
|
1034
|
+
"""
|
|
1035
|
+
Register SIEM tools.
|
|
1036
|
+
|
|
1037
|
+
Available tools:
|
|
1038
|
+
- search_security_events: Search security events using vendor-specific query language
|
|
1039
|
+
- get_file_report: Get aggregated report about a file by hash
|
|
1040
|
+
- get_file_behavior_summary: Get behavior analysis (process trees, network activity, persistence)
|
|
1041
|
+
- get_entities_related_to_file: Get related entities (hosts, users, processes, alerts)
|
|
1042
|
+
- get_ip_address_report: Get IP reputation, geolocation, and related alerts
|
|
1043
|
+
- search_user_activity: Search security events related to a specific user
|
|
1044
|
+
- pivot_on_indicator: Search for all events related to an IOC (hash, IP, domain, etc.)
|
|
1045
|
+
- search_kql_query: Execute KQL or advanced queries for deeper investigations
|
|
1046
|
+
|
|
1047
|
+
See TOOLS.md for detailed documentation and usage examples.
|
|
1048
|
+
"""
|
|
1049
|
+
if not self.siem_client:
|
|
1050
|
+
self._mcp_logger.warning(
|
|
1051
|
+
"SIEM tools not registered: No SIEM client configured. "
|
|
1052
|
+
"Configure Elastic or other SIEM in config.json to enable SIEM tools."
|
|
1053
|
+
)
|
|
1054
|
+
return
|
|
1055
|
+
self._mcp_logger.info(f"Registering {26} SIEM tools")
|
|
1056
|
+
|
|
1057
|
+
self.tools["search_security_events"] = {
|
|
1058
|
+
"name": "search_security_events",
|
|
1059
|
+
"description": "Search security events and logs across all environments using a query string.",
|
|
1060
|
+
"inputSchema": {
|
|
1061
|
+
"type": "object",
|
|
1062
|
+
"properties": {
|
|
1063
|
+
"query": {
|
|
1064
|
+
"type": "string",
|
|
1065
|
+
"description": "Search query (vendor-specific query language)",
|
|
1066
|
+
},
|
|
1067
|
+
"limit": {
|
|
1068
|
+
"type": "integer",
|
|
1069
|
+
"description": "Maximum number of events to return",
|
|
1070
|
+
"default": 100,
|
|
1071
|
+
},
|
|
1072
|
+
},
|
|
1073
|
+
"required": ["query"],
|
|
1074
|
+
},
|
|
1075
|
+
}
|
|
1076
|
+
|
|
1077
|
+
self.tools["get_file_report"] = {
|
|
1078
|
+
"name": "get_file_report",
|
|
1079
|
+
"description": "Retrieve an aggregated report about a file identified by its hash.",
|
|
1080
|
+
"inputSchema": {
|
|
1081
|
+
"type": "object",
|
|
1082
|
+
"properties": {
|
|
1083
|
+
"file_hash": {
|
|
1084
|
+
"type": "string",
|
|
1085
|
+
"description": "The file hash (MD5, SHA256, etc.)",
|
|
1086
|
+
}
|
|
1087
|
+
},
|
|
1088
|
+
"required": ["file_hash"],
|
|
1089
|
+
},
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
self.tools["get_file_behavior_summary"] = {
|
|
1093
|
+
"name": "get_file_behavior_summary",
|
|
1094
|
+
"description": "Retrieve a high-level behavior summary for a file, including process trees, network activity, and persistence mechanisms.",
|
|
1095
|
+
"inputSchema": {
|
|
1096
|
+
"type": "object",
|
|
1097
|
+
"properties": {
|
|
1098
|
+
"file_hash": {"type": "string", "description": "The file hash"}
|
|
1099
|
+
},
|
|
1100
|
+
"required": ["file_hash"],
|
|
1101
|
+
},
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
self.tools["get_entities_related_to_file"] = {
|
|
1105
|
+
"name": "get_entities_related_to_file",
|
|
1106
|
+
"description": "Retrieve entities related to a file hash, such as hosts where it was seen, users who executed it, related processes, and alerts.",
|
|
1107
|
+
"inputSchema": {
|
|
1108
|
+
"type": "object",
|
|
1109
|
+
"properties": {
|
|
1110
|
+
"file_hash": {"type": "string", "description": "The file hash"}
|
|
1111
|
+
},
|
|
1112
|
+
"required": ["file_hash"],
|
|
1113
|
+
},
|
|
1114
|
+
}
|
|
1115
|
+
|
|
1116
|
+
self.tools["get_ip_address_report"] = {
|
|
1117
|
+
"name": "get_ip_address_report",
|
|
1118
|
+
"description": "Retrieve an aggregated report about an IP address, including reputation, geolocation, and related alerts.",
|
|
1119
|
+
"inputSchema": {
|
|
1120
|
+
"type": "object",
|
|
1121
|
+
"properties": {
|
|
1122
|
+
"ip": {"type": "string", "description": "The IP address"}
|
|
1123
|
+
},
|
|
1124
|
+
"required": ["ip"],
|
|
1125
|
+
},
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
self.tools["search_user_activity"] = {
|
|
1129
|
+
"name": "search_user_activity",
|
|
1130
|
+
"description": "Search for security events related to a specific user, including authentication events, file access, and other activities.",
|
|
1131
|
+
"inputSchema": {
|
|
1132
|
+
"type": "object",
|
|
1133
|
+
"properties": {
|
|
1134
|
+
"username": {
|
|
1135
|
+
"type": "string",
|
|
1136
|
+
"description": "The username to search for",
|
|
1137
|
+
},
|
|
1138
|
+
"limit": {
|
|
1139
|
+
"type": "integer",
|
|
1140
|
+
"description": "Maximum number of events to return",
|
|
1141
|
+
"default": 100,
|
|
1142
|
+
},
|
|
1143
|
+
},
|
|
1144
|
+
"required": ["username"],
|
|
1145
|
+
},
|
|
1146
|
+
}
|
|
1147
|
+
|
|
1148
|
+
self.tools["pivot_on_indicator"] = {
|
|
1149
|
+
"name": "pivot_on_indicator",
|
|
1150
|
+
"description": "Given an IOC (file hash, IP address, domain, etc.), search for all related security events across environments for further investigation.",
|
|
1151
|
+
"inputSchema": {
|
|
1152
|
+
"type": "object",
|
|
1153
|
+
"properties": {
|
|
1154
|
+
"indicator": {
|
|
1155
|
+
"type": "string",
|
|
1156
|
+
"description": "The IOC (hash, IP, domain, etc.)",
|
|
1157
|
+
},
|
|
1158
|
+
"limit": {
|
|
1159
|
+
"type": "integer",
|
|
1160
|
+
"description": "Maximum number of events to return",
|
|
1161
|
+
"default": 200,
|
|
1162
|
+
},
|
|
1163
|
+
},
|
|
1164
|
+
"required": ["indicator"],
|
|
1165
|
+
},
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
self.tools["search_kql_query"] = {
|
|
1169
|
+
"name": "search_kql_query",
|
|
1170
|
+
"description": "Execute a KQL (Kusto Query Language) or advanced query for deeper investigations. Supports complex queries including advanced filtering, aggregations, time-based analysis, cross-index searches, and complex joins. Supports both KQL syntax and vendor-specific query DSL (e.g., Elasticsearch Query DSL).",
|
|
1171
|
+
"inputSchema": {
|
|
1172
|
+
"type": "object",
|
|
1173
|
+
"properties": {
|
|
1174
|
+
"kql_query": {
|
|
1175
|
+
"type": "string",
|
|
1176
|
+
"description": "KQL query string or advanced query DSL (JSON for Elasticsearch)",
|
|
1177
|
+
},
|
|
1178
|
+
"limit": {
|
|
1179
|
+
"type": "integer",
|
|
1180
|
+
"description": "Maximum number of events to return",
|
|
1181
|
+
"default": 500,
|
|
1182
|
+
},
|
|
1183
|
+
"hours_back": {
|
|
1184
|
+
"type": "integer",
|
|
1185
|
+
"description": "Optional time window in hours to limit the search",
|
|
1186
|
+
},
|
|
1187
|
+
},
|
|
1188
|
+
"required": ["kql_query"],
|
|
1189
|
+
},
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1192
|
+
# Alert summarization / grouping tool
|
|
1193
|
+
self.tools["get_recent_alerts"] = {
|
|
1194
|
+
"name": "get_recent_alerts",
|
|
1195
|
+
"description": "Get recent SIEM alerts (last N hours) and smart-group similar alerts together for AI triage.",
|
|
1196
|
+
"inputSchema": {
|
|
1197
|
+
"type": "object",
|
|
1198
|
+
"properties": {
|
|
1199
|
+
"hours_back": {
|
|
1200
|
+
"type": "integer",
|
|
1201
|
+
"description": "How many hours to look back for alerts",
|
|
1202
|
+
"default": 1,
|
|
1203
|
+
},
|
|
1204
|
+
"max_alerts": {
|
|
1205
|
+
"type": "integer",
|
|
1206
|
+
"description": "Maximum number of alerts to retrieve before grouping",
|
|
1207
|
+
"default": 100,
|
|
1208
|
+
},
|
|
1209
|
+
"status_filter": {
|
|
1210
|
+
"type": "string",
|
|
1211
|
+
"description": "Filter by alert status (implementation-specific string filter)",
|
|
1212
|
+
},
|
|
1213
|
+
"severity": {
|
|
1214
|
+
"type": "string",
|
|
1215
|
+
"description": "Filter by severity (low, medium, high, critical)",
|
|
1216
|
+
},
|
|
1217
|
+
"hostname": {
|
|
1218
|
+
"type": "string",
|
|
1219
|
+
"description": "Filter alerts by hostname (matches host.name field)",
|
|
1220
|
+
},
|
|
1221
|
+
},
|
|
1222
|
+
},
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
# Network and DNS Event Tools
|
|
1226
|
+
self.tools["get_network_events"] = {
|
|
1227
|
+
"name": "get_network_events",
|
|
1228
|
+
"description": "Retrieve network traffic events (firewall, netflow, proxy logs) with structured fields for analysis. Returns network events with source/destination IPs, ports, protocols, bytes, packets, and connection duration.",
|
|
1229
|
+
"inputSchema": {
|
|
1230
|
+
"type": "object",
|
|
1231
|
+
"properties": {
|
|
1232
|
+
"source_ip": {
|
|
1233
|
+
"type": "string",
|
|
1234
|
+
"description": "Source IP address",
|
|
1235
|
+
},
|
|
1236
|
+
"destination_ip": {
|
|
1237
|
+
"type": "string",
|
|
1238
|
+
"description": "Destination IP address",
|
|
1239
|
+
},
|
|
1240
|
+
"port": {
|
|
1241
|
+
"type": "integer",
|
|
1242
|
+
"description": "Port number",
|
|
1243
|
+
},
|
|
1244
|
+
"protocol": {
|
|
1245
|
+
"type": "string",
|
|
1246
|
+
"description": "Protocol (tcp, udp, icmp, etc.)",
|
|
1247
|
+
},
|
|
1248
|
+
"hours_back": {
|
|
1249
|
+
"type": "integer",
|
|
1250
|
+
"description": "Time window in hours",
|
|
1251
|
+
"default": 24,
|
|
1252
|
+
},
|
|
1253
|
+
"limit": {
|
|
1254
|
+
"type": "integer",
|
|
1255
|
+
"description": "Maximum number of events to return",
|
|
1256
|
+
"default": 100,
|
|
1257
|
+
},
|
|
1258
|
+
"event_type": {
|
|
1259
|
+
"type": "string",
|
|
1260
|
+
"description": "Filter by event type (firewall, netflow, proxy, all)",
|
|
1261
|
+
},
|
|
1262
|
+
},
|
|
1263
|
+
},
|
|
1264
|
+
}
|
|
1265
|
+
|
|
1266
|
+
self.tools["get_dns_events"] = {
|
|
1267
|
+
"name": "get_dns_events",
|
|
1268
|
+
"description": "Retrieve DNS query and response events with structured fields for analysis. Returns DNS events with domain, query type, resolved IP, source IP, and response codes.",
|
|
1269
|
+
"inputSchema": {
|
|
1270
|
+
"type": "object",
|
|
1271
|
+
"properties": {
|
|
1272
|
+
"domain": {
|
|
1273
|
+
"type": "string",
|
|
1274
|
+
"description": "Domain name queried",
|
|
1275
|
+
},
|
|
1276
|
+
"ip_address": {
|
|
1277
|
+
"type": "string",
|
|
1278
|
+
"description": "IP that made the query",
|
|
1279
|
+
},
|
|
1280
|
+
"resolved_ip": {
|
|
1281
|
+
"type": "string",
|
|
1282
|
+
"description": "Resolved IP address",
|
|
1283
|
+
},
|
|
1284
|
+
"query_type": {
|
|
1285
|
+
"type": "string",
|
|
1286
|
+
"description": "DNS query type (A, AAAA, MX, TXT, etc.)",
|
|
1287
|
+
},
|
|
1288
|
+
"hours_back": {
|
|
1289
|
+
"type": "integer",
|
|
1290
|
+
"description": "Time window in hours",
|
|
1291
|
+
"default": 24,
|
|
1292
|
+
},
|
|
1293
|
+
"limit": {
|
|
1294
|
+
"type": "integer",
|
|
1295
|
+
"description": "Maximum number of events to return",
|
|
1296
|
+
"default": 100,
|
|
1297
|
+
},
|
|
1298
|
+
},
|
|
1299
|
+
},
|
|
1300
|
+
}
|
|
1301
|
+
|
|
1302
|
+
# Alert Correlation Tools
|
|
1303
|
+
self.tools["get_alerts_by_entity"] = {
|
|
1304
|
+
"name": "get_alerts_by_entity",
|
|
1305
|
+
"description": "Retrieve alerts filtered by specific entity (IP, user, host, domain, hash) for correlation analysis. Returns alerts that contain the specified entity.",
|
|
1306
|
+
"inputSchema": {
|
|
1307
|
+
"type": "object",
|
|
1308
|
+
"properties": {
|
|
1309
|
+
"entity_value": {
|
|
1310
|
+
"type": "string",
|
|
1311
|
+
"description": "Entity value (IP, user, hostname, domain, hash)",
|
|
1312
|
+
},
|
|
1313
|
+
"entity_type": {
|
|
1314
|
+
"type": "string",
|
|
1315
|
+
"description": "Entity type (auto-detected if not provided: ip, user, host, domain, hash)",
|
|
1316
|
+
},
|
|
1317
|
+
"hours_back": {
|
|
1318
|
+
"type": "integer",
|
|
1319
|
+
"description": "Lookback period in hours",
|
|
1320
|
+
"default": 24,
|
|
1321
|
+
},
|
|
1322
|
+
"limit": {
|
|
1323
|
+
"type": "integer",
|
|
1324
|
+
"description": "Maximum number of alerts to return",
|
|
1325
|
+
"default": 50,
|
|
1326
|
+
},
|
|
1327
|
+
"severity": {
|
|
1328
|
+
"type": "string",
|
|
1329
|
+
"description": "Filter by severity (low, medium, high, critical)",
|
|
1330
|
+
},
|
|
1331
|
+
},
|
|
1332
|
+
"required": ["entity_value"],
|
|
1333
|
+
},
|
|
1334
|
+
}
|
|
1335
|
+
|
|
1336
|
+
self.tools["get_alerts_by_time_window"] = {
|
|
1337
|
+
"name": "get_alerts_by_time_window",
|
|
1338
|
+
"description": "Retrieve alerts within a specific time window for temporal correlation. Returns alerts that occurred between start_time and end_time.",
|
|
1339
|
+
"inputSchema": {
|
|
1340
|
+
"type": "object",
|
|
1341
|
+
"properties": {
|
|
1342
|
+
"start_time": {
|
|
1343
|
+
"type": "string",
|
|
1344
|
+
"description": "Start time (ISO format)",
|
|
1345
|
+
},
|
|
1346
|
+
"end_time": {
|
|
1347
|
+
"type": "string",
|
|
1348
|
+
"description": "End time (ISO format)",
|
|
1349
|
+
},
|
|
1350
|
+
"limit": {
|
|
1351
|
+
"type": "integer",
|
|
1352
|
+
"description": "Maximum number of alerts to return",
|
|
1353
|
+
"default": 100,
|
|
1354
|
+
},
|
|
1355
|
+
"severity": {
|
|
1356
|
+
"type": "string",
|
|
1357
|
+
"description": "Filter by severity",
|
|
1358
|
+
},
|
|
1359
|
+
"alert_type": {
|
|
1360
|
+
"type": "string",
|
|
1361
|
+
"description": "Filter by alert type",
|
|
1362
|
+
},
|
|
1363
|
+
},
|
|
1364
|
+
"required": ["start_time", "end_time"],
|
|
1365
|
+
},
|
|
1366
|
+
}
|
|
1367
|
+
|
|
1368
|
+
self.tools["get_all_uncertain_alerts_for_host"] = {
|
|
1369
|
+
"name": "get_all_uncertain_alerts_for_host",
|
|
1370
|
+
"description": "Retrieve all alerts with verdict='uncertain' for a specific host. This is useful for pattern analysis when investigating uncertain alerts to determine if multiple uncertain alerts on the same host indicate a broader issue requiring case creation and escalation.",
|
|
1371
|
+
"inputSchema": {
|
|
1372
|
+
"type": "object",
|
|
1373
|
+
"properties": {
|
|
1374
|
+
"hostname": {
|
|
1375
|
+
"type": "string",
|
|
1376
|
+
"description": "The hostname to search for",
|
|
1377
|
+
},
|
|
1378
|
+
"hours_back": {
|
|
1379
|
+
"type": "integer",
|
|
1380
|
+
"description": "How many hours to look back (default: 168 = 7 days)",
|
|
1381
|
+
"default": 168,
|
|
1382
|
+
},
|
|
1383
|
+
"limit": {
|
|
1384
|
+
"type": "integer",
|
|
1385
|
+
"description": "Maximum number of alerts to return",
|
|
1386
|
+
"default": 100,
|
|
1387
|
+
},
|
|
1388
|
+
},
|
|
1389
|
+
"required": ["hostname"],
|
|
1390
|
+
},
|
|
1391
|
+
}
|
|
1392
|
+
|
|
1393
|
+
# Email Security Tools
|
|
1394
|
+
self.tools["get_email_events"] = {
|
|
1395
|
+
"name": "get_email_events",
|
|
1396
|
+
"description": "Retrieve email security events with structured fields for phishing analysis. Returns email events with sender, recipient, subject, headers, authentication, URLs, and attachments.",
|
|
1397
|
+
"inputSchema": {
|
|
1398
|
+
"type": "object",
|
|
1399
|
+
"properties": {
|
|
1400
|
+
"sender_email": {
|
|
1401
|
+
"type": "string",
|
|
1402
|
+
"description": "Sender email address",
|
|
1403
|
+
},
|
|
1404
|
+
"recipient_email": {
|
|
1405
|
+
"type": "string",
|
|
1406
|
+
"description": "Recipient email address",
|
|
1407
|
+
},
|
|
1408
|
+
"subject": {
|
|
1409
|
+
"type": "string",
|
|
1410
|
+
"description": "Email subject (partial match)",
|
|
1411
|
+
},
|
|
1412
|
+
"email_id": {
|
|
1413
|
+
"type": "string",
|
|
1414
|
+
"description": "Email message ID",
|
|
1415
|
+
},
|
|
1416
|
+
"hours_back": {
|
|
1417
|
+
"type": "integer",
|
|
1418
|
+
"description": "Time window in hours",
|
|
1419
|
+
"default": 24,
|
|
1420
|
+
},
|
|
1421
|
+
"limit": {
|
|
1422
|
+
"type": "integer",
|
|
1423
|
+
"description": "Maximum number of events to return",
|
|
1424
|
+
"default": 100,
|
|
1425
|
+
},
|
|
1426
|
+
"event_type": {
|
|
1427
|
+
"type": "string",
|
|
1428
|
+
"description": "Filter by event type (delivered, blocked, quarantined, all)",
|
|
1429
|
+
},
|
|
1430
|
+
},
|
|
1431
|
+
},
|
|
1432
|
+
}
|
|
1433
|
+
|
|
1434
|
+
# Alert Management Tools
|
|
1435
|
+
self.tools["get_security_alerts"] = {
|
|
1436
|
+
"name": "get_security_alerts",
|
|
1437
|
+
"description": "Get security alerts directly from the SIEM platform.",
|
|
1438
|
+
"inputSchema": {
|
|
1439
|
+
"type": "object",
|
|
1440
|
+
"properties": {
|
|
1441
|
+
"hours_back": {
|
|
1442
|
+
"type": "integer",
|
|
1443
|
+
"description": "How many hours to look back",
|
|
1444
|
+
"default": 24,
|
|
1445
|
+
},
|
|
1446
|
+
"max_alerts": {
|
|
1447
|
+
"type": "integer",
|
|
1448
|
+
"description": "Maximum number of alerts to return",
|
|
1449
|
+
"default": 10,
|
|
1450
|
+
},
|
|
1451
|
+
"status_filter": {
|
|
1452
|
+
"type": "string",
|
|
1453
|
+
"description": "Filter by status",
|
|
1454
|
+
},
|
|
1455
|
+
"severity": {
|
|
1456
|
+
"type": "string",
|
|
1457
|
+
"description": "Filter by severity (low, medium, high, critical)",
|
|
1458
|
+
},
|
|
1459
|
+
},
|
|
1460
|
+
},
|
|
1461
|
+
}
|
|
1462
|
+
|
|
1463
|
+
self.tools["get_security_alert_by_id"] = {
|
|
1464
|
+
"name": "get_security_alert_by_id",
|
|
1465
|
+
"description": "Get detailed information about a specific security alert by its ID.",
|
|
1466
|
+
"inputSchema": {
|
|
1467
|
+
"type": "object",
|
|
1468
|
+
"properties": {
|
|
1469
|
+
"alert_id": {
|
|
1470
|
+
"type": "string",
|
|
1471
|
+
"description": "The ID of the alert",
|
|
1472
|
+
},
|
|
1473
|
+
"include_detections": {
|
|
1474
|
+
"type": "boolean",
|
|
1475
|
+
"description": "Whether to include detection details",
|
|
1476
|
+
"default": True,
|
|
1477
|
+
},
|
|
1478
|
+
},
|
|
1479
|
+
"required": ["alert_id"],
|
|
1480
|
+
},
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
self.tools["get_siem_event_by_id"] = {
|
|
1484
|
+
"name": "get_siem_event_by_id",
|
|
1485
|
+
"description": "Retrieve a specific security event by its unique identifier (event ID). This tool allows you to get the exact event details when you know the event ID.",
|
|
1486
|
+
"inputSchema": {
|
|
1487
|
+
"type": "object",
|
|
1488
|
+
"properties": {
|
|
1489
|
+
"event_id": {
|
|
1490
|
+
"type": "string",
|
|
1491
|
+
"description": "The unique identifier of the event to retrieve",
|
|
1492
|
+
},
|
|
1493
|
+
},
|
|
1494
|
+
"required": ["event_id"],
|
|
1495
|
+
},
|
|
1496
|
+
}
|
|
1497
|
+
|
|
1498
|
+
self.tools["close_alert"] = {
|
|
1499
|
+
"name": "close_alert",
|
|
1500
|
+
"description": "Close a security alert in the SIEM platform. Use this when an alert has been determined to be a false positive or benign true positive during triage.",
|
|
1501
|
+
"inputSchema": {
|
|
1502
|
+
"type": "object",
|
|
1503
|
+
"properties": {
|
|
1504
|
+
"alert_id": {
|
|
1505
|
+
"type": "string",
|
|
1506
|
+
"description": "The ID of the alert to close",
|
|
1507
|
+
},
|
|
1508
|
+
"reason": {
|
|
1509
|
+
"type": "string",
|
|
1510
|
+
"description": "Reason for closing (e.g., 'false_positive', 'benign_true_positive')",
|
|
1511
|
+
},
|
|
1512
|
+
"comment": {
|
|
1513
|
+
"type": "string",
|
|
1514
|
+
"description": "Comment explaining why the alert is being closed",
|
|
1515
|
+
},
|
|
1516
|
+
},
|
|
1517
|
+
"required": ["alert_id"],
|
|
1518
|
+
},
|
|
1519
|
+
}
|
|
1520
|
+
|
|
1521
|
+
self.tools["update_alert_verdict"] = {
|
|
1522
|
+
"name": "update_alert_verdict",
|
|
1523
|
+
"description": "Update the verdict for a security alert. Use this to set or update the verdict field (e.g., 'in-progress', 'false_positive', 'benign_true_positive', 'true_positive', 'uncertain'). This is the preferred method for setting verdicts as it clearly indicates the intent to update the verdict rather than close the alert.",
|
|
1524
|
+
"inputSchema": {
|
|
1525
|
+
"type": "object",
|
|
1526
|
+
"properties": {
|
|
1527
|
+
"alert_id": {
|
|
1528
|
+
"type": "string",
|
|
1529
|
+
"description": "The ID of the alert to update",
|
|
1530
|
+
},
|
|
1531
|
+
"verdict": {
|
|
1532
|
+
"type": "string",
|
|
1533
|
+
"description": "The verdict value. Valid values: 'in-progress', 'false_positive', 'benign_true_positive', 'true_positive', 'uncertain'",
|
|
1534
|
+
"enum": ["in-progress", "false_positive", "benign_true_positive", "true_positive", "uncertain"],
|
|
1535
|
+
},
|
|
1536
|
+
"comment": {
|
|
1537
|
+
"type": "string",
|
|
1538
|
+
"description": "Optional comment explaining the verdict",
|
|
1539
|
+
},
|
|
1540
|
+
},
|
|
1541
|
+
"required": ["alert_id", "verdict"],
|
|
1542
|
+
},
|
|
1543
|
+
}
|
|
1544
|
+
|
|
1545
|
+
self.tools["tag_alert"] = {
|
|
1546
|
+
"name": "tag_alert",
|
|
1547
|
+
"description": "Tag a security alert in the SIEM platform with a classification. Use this to mark alerts as FP (False Positive), TP (True Positive), or NMI (Need More Investigation).",
|
|
1548
|
+
"inputSchema": {
|
|
1549
|
+
"type": "object",
|
|
1550
|
+
"properties": {
|
|
1551
|
+
"alert_id": {
|
|
1552
|
+
"type": "string",
|
|
1553
|
+
"description": "The ID of the alert to tag",
|
|
1554
|
+
},
|
|
1555
|
+
"tag": {
|
|
1556
|
+
"type": "string",
|
|
1557
|
+
"description": "The tag to apply. Must be one of: 'FP' (False Positive), 'TP' (True Positive), or 'NMI' (Need More Investigation)",
|
|
1558
|
+
"enum": ["FP", "TP", "NMI"],
|
|
1559
|
+
},
|
|
1560
|
+
},
|
|
1561
|
+
"required": ["alert_id", "tag"],
|
|
1562
|
+
},
|
|
1563
|
+
}
|
|
1564
|
+
|
|
1565
|
+
self.tools["add_alert_note"] = {
|
|
1566
|
+
"name": "add_alert_note",
|
|
1567
|
+
"description": "Add a note or comment to a security alert in the SIEM platform. Use this to document investigation findings, recommendations for detection rule improvements, case numbers, or other relevant information about the alert.",
|
|
1568
|
+
"inputSchema": {
|
|
1569
|
+
"type": "object",
|
|
1570
|
+
"properties": {
|
|
1571
|
+
"alert_id": {
|
|
1572
|
+
"type": "string",
|
|
1573
|
+
"description": "The ID of the alert to add a note to",
|
|
1574
|
+
},
|
|
1575
|
+
"note": {
|
|
1576
|
+
"type": "string",
|
|
1577
|
+
"description": "The note/comment text to add. Should include investigation findings, case numbers (if applicable), and recommendations for detection rule improvements.",
|
|
1578
|
+
},
|
|
1579
|
+
},
|
|
1580
|
+
"required": ["alert_id", "note"],
|
|
1581
|
+
},
|
|
1582
|
+
}
|
|
1583
|
+
|
|
1584
|
+
# Entity & Intelligence Tools
|
|
1585
|
+
self.tools["lookup_entity"] = {
|
|
1586
|
+
"name": "lookup_entity",
|
|
1587
|
+
"description": "Look up an entity (IP address, domain, hash, user, etc.) in the SIEM for enrichment.",
|
|
1588
|
+
"inputSchema": {
|
|
1589
|
+
"type": "object",
|
|
1590
|
+
"properties": {
|
|
1591
|
+
"entity_value": {
|
|
1592
|
+
"type": "string",
|
|
1593
|
+
"description": "Value to look up",
|
|
1594
|
+
},
|
|
1595
|
+
"entity_type": {
|
|
1596
|
+
"type": "string",
|
|
1597
|
+
"description": "Type of entity (ip, domain, hash, user, etc.)",
|
|
1598
|
+
},
|
|
1599
|
+
"hours_back": {
|
|
1600
|
+
"type": "integer",
|
|
1601
|
+
"description": "How many hours of historical data",
|
|
1602
|
+
"default": 24,
|
|
1603
|
+
},
|
|
1604
|
+
},
|
|
1605
|
+
"required": ["entity_value"],
|
|
1606
|
+
},
|
|
1607
|
+
}
|
|
1608
|
+
|
|
1609
|
+
self.tools["get_ioc_matches"] = {
|
|
1610
|
+
"name": "get_ioc_matches",
|
|
1611
|
+
"description": "Get Indicators of Compromise (IoC) matches from the SIEM.",
|
|
1612
|
+
"inputSchema": {
|
|
1613
|
+
"type": "object",
|
|
1614
|
+
"properties": {
|
|
1615
|
+
"hours_back": {
|
|
1616
|
+
"type": "integer",
|
|
1617
|
+
"description": "How many hours back to look",
|
|
1618
|
+
"default": 24,
|
|
1619
|
+
},
|
|
1620
|
+
"max_matches": {
|
|
1621
|
+
"type": "integer",
|
|
1622
|
+
"description": "Maximum number of matches",
|
|
1623
|
+
"default": 20,
|
|
1624
|
+
},
|
|
1625
|
+
"ioc_type": {
|
|
1626
|
+
"type": "string",
|
|
1627
|
+
"description": "Filter by IoC type (ip, domain, hash, url, etc.)",
|
|
1628
|
+
},
|
|
1629
|
+
"severity": {
|
|
1630
|
+
"type": "string",
|
|
1631
|
+
"description": "Filter by severity level",
|
|
1632
|
+
},
|
|
1633
|
+
},
|
|
1634
|
+
},
|
|
1635
|
+
}
|
|
1636
|
+
|
|
1637
|
+
self.tools["get_threat_intel"] = {
|
|
1638
|
+
"name": "get_threat_intel",
|
|
1639
|
+
"description": "Get answers to security questions using integrated threat intelligence.",
|
|
1640
|
+
"inputSchema": {
|
|
1641
|
+
"type": "object",
|
|
1642
|
+
"properties": {
|
|
1643
|
+
"query": {
|
|
1644
|
+
"type": "string",
|
|
1645
|
+
"description": "The security or threat intelligence question",
|
|
1646
|
+
},
|
|
1647
|
+
"context": {
|
|
1648
|
+
"type": "object",
|
|
1649
|
+
"description": "Additional context (indicators, events, etc.)",
|
|
1650
|
+
},
|
|
1651
|
+
},
|
|
1652
|
+
"required": ["query"],
|
|
1653
|
+
},
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
# Detection Rule Management
|
|
1657
|
+
self.tools["list_security_rules"] = {
|
|
1658
|
+
"name": "list_security_rules",
|
|
1659
|
+
"description": "List all security detection rules configured in the SIEM platform.",
|
|
1660
|
+
"inputSchema": {
|
|
1661
|
+
"type": "object",
|
|
1662
|
+
"properties": {
|
|
1663
|
+
"enabled_only": {
|
|
1664
|
+
"type": "boolean",
|
|
1665
|
+
"description": "Only return enabled rules",
|
|
1666
|
+
"default": False,
|
|
1667
|
+
},
|
|
1668
|
+
"limit": {
|
|
1669
|
+
"type": "integer",
|
|
1670
|
+
"description": "Maximum number of rules",
|
|
1671
|
+
"default": 100,
|
|
1672
|
+
},
|
|
1673
|
+
},
|
|
1674
|
+
},
|
|
1675
|
+
}
|
|
1676
|
+
|
|
1677
|
+
self.tools["search_security_rules"] = {
|
|
1678
|
+
"name": "search_security_rules",
|
|
1679
|
+
"description": "Search for security detection rules by name, description, or other criteria.",
|
|
1680
|
+
"inputSchema": {
|
|
1681
|
+
"type": "object",
|
|
1682
|
+
"properties": {
|
|
1683
|
+
"query": {
|
|
1684
|
+
"type": "string",
|
|
1685
|
+
"description": "Search query (supports regex patterns)",
|
|
1686
|
+
},
|
|
1687
|
+
"category": {
|
|
1688
|
+
"type": "string",
|
|
1689
|
+
"description": "Filter by rule category",
|
|
1690
|
+
},
|
|
1691
|
+
"enabled_only": {
|
|
1692
|
+
"type": "boolean",
|
|
1693
|
+
"description": "Only search enabled rules",
|
|
1694
|
+
"default": False,
|
|
1695
|
+
},
|
|
1696
|
+
},
|
|
1697
|
+
"required": ["query"],
|
|
1698
|
+
},
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
self.tools["get_rule_detections"] = {
|
|
1702
|
+
"name": "get_rule_detections",
|
|
1703
|
+
"description": "Retrieve historical detections generated by a specific security detection rule.",
|
|
1704
|
+
"inputSchema": {
|
|
1705
|
+
"type": "object",
|
|
1706
|
+
"properties": {
|
|
1707
|
+
"rule_id": {
|
|
1708
|
+
"type": "string",
|
|
1709
|
+
"description": "Unique ID of the rule",
|
|
1710
|
+
},
|
|
1711
|
+
"alert_state": {
|
|
1712
|
+
"type": "string",
|
|
1713
|
+
"description": "Filter by alert state",
|
|
1714
|
+
},
|
|
1715
|
+
"hours_back": {
|
|
1716
|
+
"type": "integer",
|
|
1717
|
+
"description": "How many hours back",
|
|
1718
|
+
"default": 24,
|
|
1719
|
+
},
|
|
1720
|
+
"limit": {
|
|
1721
|
+
"type": "integer",
|
|
1722
|
+
"description": "Maximum number of detections",
|
|
1723
|
+
"default": 50,
|
|
1724
|
+
},
|
|
1725
|
+
},
|
|
1726
|
+
"required": ["rule_id"],
|
|
1727
|
+
},
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
self.tools["list_rule_errors"] = {
|
|
1731
|
+
"name": "list_rule_errors",
|
|
1732
|
+
"description": "List execution errors for a specific security detection rule.",
|
|
1733
|
+
"inputSchema": {
|
|
1734
|
+
"type": "object",
|
|
1735
|
+
"properties": {
|
|
1736
|
+
"rule_id": {
|
|
1737
|
+
"type": "string",
|
|
1738
|
+
"description": "Unique ID of the rule",
|
|
1739
|
+
},
|
|
1740
|
+
"hours_back": {
|
|
1741
|
+
"type": "integer",
|
|
1742
|
+
"description": "How many hours back to look",
|
|
1743
|
+
"default": 24,
|
|
1744
|
+
},
|
|
1745
|
+
},
|
|
1746
|
+
"required": ["rule_id"],
|
|
1747
|
+
},
|
|
1748
|
+
}
|
|
1749
|
+
|
|
1750
|
+
def _register_edr_tools(self) -> None:
|
|
1751
|
+
"""
|
|
1752
|
+
Register EDR tools.
|
|
1753
|
+
|
|
1754
|
+
Available tools:
|
|
1755
|
+
- get_endpoint_summary: Get endpoint overview (hostname, platform, isolation status)
|
|
1756
|
+
- get_detection_details: Get detailed detection information
|
|
1757
|
+
- isolate_endpoint: Isolate endpoint from network (CRITICAL ACTION - use with caution)
|
|
1758
|
+
- release_endpoint_isolation: Release endpoint from isolation
|
|
1759
|
+
- kill_process_on_endpoint: Terminate process on endpoint (DISRUPTIVE - use with caution)
|
|
1760
|
+
- collect_forensic_artifacts: Initiate forensic artifact collection
|
|
1761
|
+
|
|
1762
|
+
See TOOLS.md for detailed documentation and usage examples.
|
|
1763
|
+
"""
|
|
1764
|
+
if not self.edr_client:
|
|
1765
|
+
self._mcp_logger.warning(
|
|
1766
|
+
"EDR tools not registered: No EDR client configured. "
|
|
1767
|
+
"Configure EDR platform in config.json to enable EDR tools."
|
|
1768
|
+
)
|
|
1769
|
+
return
|
|
1770
|
+
self._mcp_logger.info(f"Registering {6} EDR tools")
|
|
1771
|
+
|
|
1772
|
+
self.tools["get_endpoint_summary"] = {
|
|
1773
|
+
"name": "get_endpoint_summary",
|
|
1774
|
+
"description": "Retrieve summary information about an endpoint including hostname, platform, last seen time, primary user, and isolation status.",
|
|
1775
|
+
"inputSchema": {
|
|
1776
|
+
"type": "object",
|
|
1777
|
+
"properties": {
|
|
1778
|
+
"endpoint_id": {
|
|
1779
|
+
"type": "string",
|
|
1780
|
+
"description": "The endpoint ID",
|
|
1781
|
+
}
|
|
1782
|
+
},
|
|
1783
|
+
"required": ["endpoint_id"],
|
|
1784
|
+
},
|
|
1785
|
+
}
|
|
1786
|
+
|
|
1787
|
+
self.tools["get_detection_details"] = {
|
|
1788
|
+
"name": "get_detection_details",
|
|
1789
|
+
"description": "Retrieve detailed information about a specific detection including type, severity, description, associated file hash, and process.",
|
|
1790
|
+
"inputSchema": {
|
|
1791
|
+
"type": "object",
|
|
1792
|
+
"properties": {
|
|
1793
|
+
"detection_id": {
|
|
1794
|
+
"type": "string",
|
|
1795
|
+
"description": "The detection ID",
|
|
1796
|
+
}
|
|
1797
|
+
},
|
|
1798
|
+
"required": ["detection_id"],
|
|
1799
|
+
},
|
|
1800
|
+
}
|
|
1801
|
+
|
|
1802
|
+
self.tools["isolate_endpoint"] = {
|
|
1803
|
+
"name": "isolate_endpoint",
|
|
1804
|
+
"description": "Isolate an endpoint from the network to prevent further compromise or lateral movement. This is a critical response action.",
|
|
1805
|
+
"inputSchema": {
|
|
1806
|
+
"type": "object",
|
|
1807
|
+
"properties": {
|
|
1808
|
+
"endpoint_id": {
|
|
1809
|
+
"type": "string",
|
|
1810
|
+
"description": "The endpoint ID to isolate",
|
|
1811
|
+
}
|
|
1812
|
+
},
|
|
1813
|
+
"required": ["endpoint_id"],
|
|
1814
|
+
},
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
self.tools["release_endpoint_isolation"] = {
|
|
1818
|
+
"name": "release_endpoint_isolation",
|
|
1819
|
+
"description": "Release an endpoint from network isolation, restoring normal network connectivity.",
|
|
1820
|
+
"inputSchema": {
|
|
1821
|
+
"type": "object",
|
|
1822
|
+
"properties": {
|
|
1823
|
+
"endpoint_id": {
|
|
1824
|
+
"type": "string",
|
|
1825
|
+
"description": "The endpoint ID to release",
|
|
1826
|
+
}
|
|
1827
|
+
},
|
|
1828
|
+
"required": ["endpoint_id"],
|
|
1829
|
+
},
|
|
1830
|
+
}
|
|
1831
|
+
|
|
1832
|
+
self.tools["kill_process_on_endpoint"] = {
|
|
1833
|
+
"name": "kill_process_on_endpoint",
|
|
1834
|
+
"description": "Terminate a specific process running on an endpoint by its process ID. Use with caution as this is a disruptive action.",
|
|
1835
|
+
"inputSchema": {
|
|
1836
|
+
"type": "object",
|
|
1837
|
+
"properties": {
|
|
1838
|
+
"endpoint_id": {
|
|
1839
|
+
"type": "string",
|
|
1840
|
+
"description": "The endpoint ID",
|
|
1841
|
+
},
|
|
1842
|
+
"pid": {
|
|
1843
|
+
"type": "integer",
|
|
1844
|
+
"description": "The process ID to kill",
|
|
1845
|
+
},
|
|
1846
|
+
},
|
|
1847
|
+
"required": ["endpoint_id", "pid"],
|
|
1848
|
+
},
|
|
1849
|
+
}
|
|
1850
|
+
|
|
1851
|
+
self.tools["collect_forensic_artifacts"] = {
|
|
1852
|
+
"name": "collect_forensic_artifacts",
|
|
1853
|
+
"description": "Initiate collection of forensic artifacts from an endpoint, such as process lists, network connections, file system artifacts, etc.",
|
|
1854
|
+
"inputSchema": {
|
|
1855
|
+
"type": "object",
|
|
1856
|
+
"properties": {
|
|
1857
|
+
"endpoint_id": {
|
|
1858
|
+
"type": "string",
|
|
1859
|
+
"description": "The endpoint ID",
|
|
1860
|
+
},
|
|
1861
|
+
"artifact_types": {
|
|
1862
|
+
"type": "array",
|
|
1863
|
+
"items": {"type": "string"},
|
|
1864
|
+
"description": "List of artifact types to collect (e.g., ['processes', 'network', 'filesystem'])",
|
|
1865
|
+
},
|
|
1866
|
+
},
|
|
1867
|
+
"required": ["endpoint_id", "artifact_types"],
|
|
1868
|
+
},
|
|
1869
|
+
}
|
|
1870
|
+
|
|
1871
|
+
def _register_cti_tools(self) -> None:
|
|
1872
|
+
"""
|
|
1873
|
+
Register CTI (Cyber Threat Intelligence) tools.
|
|
1874
|
+
|
|
1875
|
+
Available tools:
|
|
1876
|
+
- lookup_hash_ti: Look up a file hash in the threat intelligence platform
|
|
1877
|
+
|
|
1878
|
+
See TOOLS.md for detailed documentation and usage examples.
|
|
1879
|
+
"""
|
|
1880
|
+
if not self.cti_client:
|
|
1881
|
+
self._mcp_logger.warning(
|
|
1882
|
+
"CTI tools not registered: No CTI client configured. "
|
|
1883
|
+
"Configure CTI platform in config.json to enable CTI tools."
|
|
1884
|
+
)
|
|
1885
|
+
return
|
|
1886
|
+
self._mcp_logger.info(f"Registering {1} CTI tool")
|
|
1887
|
+
|
|
1888
|
+
self.tools["lookup_hash_ti"] = {
|
|
1889
|
+
"name": "lookup_hash_ti",
|
|
1890
|
+
"description": (
|
|
1891
|
+
"Look up a file hash (MD5, SHA1, SHA256, SHA512) in threat intelligence platforms to determine if it's malicious, suspicious, or benign. "
|
|
1892
|
+
"Returns threat scores, classifications, indicators, labels, and MITRE ATT&CK kill chain phases. "
|
|
1893
|
+
"Use this to: (1) Check if a hash is known malicious - look for 'classification: malicious', high threat_score (>70), or labels like 'malware', 'trojan', 'ransomware'. "
|
|
1894
|
+
"(2) Assess threat level - threat_score 0-30=benign, 31-60=suspicious, 61-100=malicious. "
|
|
1895
|
+
"(3) Understand attack context - review kill_chain_phases and labels to understand the threat type and attack stage. "
|
|
1896
|
+
"Response includes: found (boolean), threat_score (0-100), classification (malicious/suspicious/benign), labels (array of threat types), indicators (array with scores and patterns). "
|
|
1897
|
+
"If found=false and no indicators, the hash is likely benign/unknown. If found=true with indicators, analyze the threat_score and labels to determine severity."
|
|
1898
|
+
),
|
|
1899
|
+
"inputSchema": {
|
|
1900
|
+
"type": "object",
|
|
1901
|
+
"properties": {
|
|
1902
|
+
"hash_value": {
|
|
1903
|
+
"type": "string",
|
|
1904
|
+
"description": "The hash value to look up (MD5, SHA1, SHA256, SHA512). Use this to check if a file hash is known malicious.",
|
|
1905
|
+
}
|
|
1906
|
+
},
|
|
1907
|
+
"required": ["hash_value"],
|
|
1908
|
+
},
|
|
1909
|
+
}
|
|
1910
|
+
|
|
1911
|
+
def _register_rules_tools(self) -> None:
|
|
1912
|
+
"""
|
|
1913
|
+
Register rules engine tools.
|
|
1914
|
+
|
|
1915
|
+
Available tools:
|
|
1916
|
+
- list_rules: List all available investigation rules/workflows
|
|
1917
|
+
- execute_rule: Execute an automated investigation workflow that chains multiple skills
|
|
1918
|
+
|
|
1919
|
+
Rules enable automated playbooks that combine case management, SIEM, and EDR operations.
|
|
1920
|
+
See TOOLS.md for detailed documentation and usage examples.
|
|
1921
|
+
"""
|
|
1922
|
+
self._mcp_logger.info("Registering 2 rules engine tools")
|
|
1923
|
+
self.tools["list_rules"] = {
|
|
1924
|
+
"name": "list_rules",
|
|
1925
|
+
"description": "List all available investigation rules/workflows.",
|
|
1926
|
+
"inputSchema": {"type": "object", "properties": {}},
|
|
1927
|
+
}
|
|
1928
|
+
|
|
1929
|
+
self.tools["execute_rule"] = {
|
|
1930
|
+
"name": "execute_rule",
|
|
1931
|
+
"description": "Execute an investigation rule/workflow that chains together multiple skills.",
|
|
1932
|
+
"inputSchema": {
|
|
1933
|
+
"type": "object",
|
|
1934
|
+
"properties": {
|
|
1935
|
+
"rule_name": {
|
|
1936
|
+
"type": "string",
|
|
1937
|
+
"description": "Name of the rule to execute",
|
|
1938
|
+
},
|
|
1939
|
+
"context": {
|
|
1940
|
+
"type": "object",
|
|
1941
|
+
"description": "Optional context variables to pass to the rule",
|
|
1942
|
+
},
|
|
1943
|
+
},
|
|
1944
|
+
"required": ["rule_name"],
|
|
1945
|
+
},
|
|
1946
|
+
}
|
|
1947
|
+
|
|
1948
|
+
def _register_runbook_tools(self) -> None:
|
|
1949
|
+
"""
|
|
1950
|
+
Register runbook execution tools.
|
|
1951
|
+
|
|
1952
|
+
Available tools:
|
|
1953
|
+
- list_runbooks: List available investigation runbooks
|
|
1954
|
+
- get_runbook: Get details of a specific runbook
|
|
1955
|
+
- execute_runbook: Execute a runbook (provides runbook content as context for AI)
|
|
1956
|
+
|
|
1957
|
+
Runbooks provide structured investigation procedures organized by SOC tier.
|
|
1958
|
+
See run_books/ directory for available runbooks.
|
|
1959
|
+
"""
|
|
1960
|
+
self._mcp_logger.info("Registering 3 runbook tools")
|
|
1961
|
+
self.tools["list_runbooks"] = {
|
|
1962
|
+
"name": "list_runbooks",
|
|
1963
|
+
"description": "List available investigation runbooks, optionally filtered by SOC tier or category.",
|
|
1964
|
+
"inputSchema": {
|
|
1965
|
+
"type": "object",
|
|
1966
|
+
"properties": {
|
|
1967
|
+
"soc_tier": {
|
|
1968
|
+
"type": "string",
|
|
1969
|
+
"enum": ["soc1", "soc2", "soc3"],
|
|
1970
|
+
"description": "Filter by SOC tier"
|
|
1971
|
+
},
|
|
1972
|
+
"category": {
|
|
1973
|
+
"type": "string",
|
|
1974
|
+
"enum": ["triage", "investigation", "response", "forensics", "correlation", "enrichment", "remediation"],
|
|
1975
|
+
"description": "Filter by category"
|
|
1976
|
+
}
|
|
1977
|
+
}
|
|
1978
|
+
}
|
|
1979
|
+
}
|
|
1980
|
+
|
|
1981
|
+
self.tools["get_runbook"] = {
|
|
1982
|
+
"name": "get_runbook",
|
|
1983
|
+
"description": "Get details and content of a specific runbook.",
|
|
1984
|
+
"inputSchema": {
|
|
1985
|
+
"type": "object",
|
|
1986
|
+
"properties": {
|
|
1987
|
+
"runbook_name": {
|
|
1988
|
+
"type": "string",
|
|
1989
|
+
"description": "Name of the runbook (e.g., 'initial_alert_triage' or 'soc1/triage/initial_alert_triage')"
|
|
1990
|
+
}
|
|
1991
|
+
},
|
|
1992
|
+
"required": ["runbook_name"]
|
|
1993
|
+
}
|
|
1994
|
+
}
|
|
1995
|
+
|
|
1996
|
+
self.tools["execute_runbook"] = {
|
|
1997
|
+
"name": "execute_runbook",
|
|
1998
|
+
"description": "Execute an investigation runbook. The runbook content will be provided as context for you to follow step-by-step. Use the appropriate MCP tools for each step as specified in the runbook.",
|
|
1999
|
+
"inputSchema": {
|
|
2000
|
+
"type": "object",
|
|
2001
|
+
"properties": {
|
|
2002
|
+
"runbook_name": {
|
|
2003
|
+
"type": "string",
|
|
2004
|
+
"description": "Name of the runbook to execute (e.g., 'initial_alert_triage', 'malware_deep_analysis')"
|
|
2005
|
+
},
|
|
2006
|
+
"case_id": {
|
|
2007
|
+
"type": "string",
|
|
2008
|
+
"description": "Case ID for the investigation"
|
|
2009
|
+
},
|
|
2010
|
+
"alert_id": {
|
|
2011
|
+
"type": "string",
|
|
2012
|
+
"description": "Alert ID from SIEM"
|
|
2013
|
+
},
|
|
2014
|
+
"soc_tier": {
|
|
2015
|
+
"type": "string",
|
|
2016
|
+
"enum": ["soc1", "soc2", "soc3"],
|
|
2017
|
+
"description": "SOC tier (auto-detected from runbook if not provided)"
|
|
2018
|
+
}
|
|
2019
|
+
},
|
|
2020
|
+
"required": ["runbook_name"]
|
|
2021
|
+
}
|
|
2022
|
+
}
|
|
2023
|
+
|
|
2024
|
+
def _register_agent_profile_tools(self) -> None:
|
|
2025
|
+
"""
|
|
2026
|
+
Register agent profile tools.
|
|
2027
|
+
|
|
2028
|
+
Available tools:
|
|
2029
|
+
- list_agent_profiles: List all configured agent profiles
|
|
2030
|
+
- get_agent_profile: Get details of a specific agent profile
|
|
2031
|
+
- route_case_to_agent: Route a case/alert to the appropriate agent
|
|
2032
|
+
- execute_as_agent: Execute an investigation as a specific agent (auto-selects runbook)
|
|
2033
|
+
|
|
2034
|
+
Agent profiles define SOC tier capabilities and available runbooks for autonomous agents.
|
|
2035
|
+
"""
|
|
2036
|
+
self._mcp_logger.info("Registering 4 agent profile tools")
|
|
2037
|
+
self.tools["list_agent_profiles"] = {
|
|
2038
|
+
"name": "list_agent_profiles",
|
|
2039
|
+
"description": "List all configured agent profiles with their capabilities and runbooks.",
|
|
2040
|
+
"inputSchema": {
|
|
2041
|
+
"type": "object",
|
|
2042
|
+
"properties": {}
|
|
2043
|
+
}
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
self.tools["get_agent_profile"] = {
|
|
2047
|
+
"name": "get_agent_profile",
|
|
2048
|
+
"description": "Get details of a specific agent profile including capabilities, runbooks, and decision authority.",
|
|
2049
|
+
"inputSchema": {
|
|
2050
|
+
"type": "object",
|
|
2051
|
+
"properties": {
|
|
2052
|
+
"agent_id": {
|
|
2053
|
+
"type": "string",
|
|
2054
|
+
"description": "Agent ID (e.g., 'soc1_triage_agent', 'soc2_investigation_agent', 'soc3_response_agent')"
|
|
2055
|
+
}
|
|
2056
|
+
},
|
|
2057
|
+
"required": ["agent_id"]
|
|
2058
|
+
}
|
|
2059
|
+
}
|
|
2060
|
+
|
|
2061
|
+
self.tools["route_case_to_agent"] = {
|
|
2062
|
+
"name": "route_case_to_agent",
|
|
2063
|
+
"description": "Route a case/alert to the appropriate agent based on routing rules and case characteristics.",
|
|
2064
|
+
"inputSchema": {
|
|
2065
|
+
"type": "object",
|
|
2066
|
+
"properties": {
|
|
2067
|
+
"case_id": {
|
|
2068
|
+
"type": "string",
|
|
2069
|
+
"description": "Case ID to route"
|
|
2070
|
+
},
|
|
2071
|
+
"alert_id": {
|
|
2072
|
+
"type": "string",
|
|
2073
|
+
"description": "Alert ID to route"
|
|
2074
|
+
},
|
|
2075
|
+
"alert_type": {
|
|
2076
|
+
"type": "string",
|
|
2077
|
+
"description": "Type of alert (e.g., 'suspicious_login', 'malware_detection')"
|
|
2078
|
+
},
|
|
2079
|
+
"case_status": {
|
|
2080
|
+
"type": "string",
|
|
2081
|
+
"description": "Current case status (used for routing decisions)"
|
|
2082
|
+
}
|
|
2083
|
+
}
|
|
2084
|
+
}
|
|
2085
|
+
}
|
|
2086
|
+
|
|
2087
|
+
self.tools["execute_as_agent"] = {
|
|
2088
|
+
"name": "execute_as_agent",
|
|
2089
|
+
"description": "Execute an investigation as a specific agent. The agent will automatically select and execute the appropriate runbook based on its profile and the case/alert type.",
|
|
2090
|
+
"inputSchema": {
|
|
2091
|
+
"type": "object",
|
|
2092
|
+
"properties": {
|
|
2093
|
+
"agent_id": {
|
|
2094
|
+
"type": "string",
|
|
2095
|
+
"description": "Agent ID to execute as (e.g., 'soc1_triage_agent', 'soc2_investigation_agent', 'soc3_response_agent')"
|
|
2096
|
+
},
|
|
2097
|
+
"case_id": {
|
|
2098
|
+
"type": "string",
|
|
2099
|
+
"description": "Case ID for investigation"
|
|
2100
|
+
},
|
|
2101
|
+
"alert_id": {
|
|
2102
|
+
"type": "string",
|
|
2103
|
+
"description": "Alert ID from SIEM"
|
|
2104
|
+
},
|
|
2105
|
+
"runbook_name": {
|
|
2106
|
+
"type": "string",
|
|
2107
|
+
"description": "Optional: Specific runbook to execute (overrides auto-selection)"
|
|
2108
|
+
}
|
|
2109
|
+
},
|
|
2110
|
+
"required": ["agent_id"]
|
|
2111
|
+
}
|
|
2112
|
+
}
|
|
2113
|
+
|
|
2114
|
+
def _handle_list_runbooks(
|
|
2115
|
+
self, soc_tier: Optional[str] = None, category: Optional[str] = None
|
|
2116
|
+
) -> Dict[str, Any]:
|
|
2117
|
+
"""Handle list_runbooks tool call."""
|
|
2118
|
+
runbooks = self.runbook_manager.list_runbooks(soc_tier=soc_tier, category=category)
|
|
2119
|
+
return {"runbooks": runbooks, "count": len(runbooks)}
|
|
2120
|
+
|
|
2121
|
+
def _handle_get_runbook(self, runbook_name: str) -> Dict[str, Any]:
|
|
2122
|
+
"""Handle get_runbook tool call."""
|
|
2123
|
+
runbook_path = self.runbook_manager.find_runbook(runbook_name)
|
|
2124
|
+
if not runbook_path:
|
|
2125
|
+
return {
|
|
2126
|
+
"success": False,
|
|
2127
|
+
"error": f"Runbook not found: {runbook_name}"
|
|
2128
|
+
}
|
|
2129
|
+
|
|
2130
|
+
content = self.runbook_manager.read_runbook(runbook_path)
|
|
2131
|
+
metadata = self.runbook_manager.parse_runbook_metadata(runbook_path, content)
|
|
2132
|
+
workflow_steps = self.runbook_manager.extract_workflow_steps(content)
|
|
2133
|
+
|
|
2134
|
+
# Get relative path
|
|
2135
|
+
rel_path = os.path.relpath(runbook_path, self.runbook_manager.runbooks_dir)
|
|
2136
|
+
runbook_id = rel_path[:-3] # Remove .md extension
|
|
2137
|
+
|
|
2138
|
+
return {
|
|
2139
|
+
"success": True,
|
|
2140
|
+
"runbook_name": runbook_id,
|
|
2141
|
+
"path": runbook_path,
|
|
2142
|
+
"soc_tier": metadata.get("soc_tier"),
|
|
2143
|
+
"category": metadata.get("category"),
|
|
2144
|
+
"objective": metadata.get("objective"),
|
|
2145
|
+
"scope": metadata.get("scope"),
|
|
2146
|
+
"tools": metadata.get("tools", []),
|
|
2147
|
+
"inputs": metadata.get("inputs", []),
|
|
2148
|
+
"step_count": metadata.get("step_count", len(workflow_steps)),
|
|
2149
|
+
"workflow_steps": workflow_steps,
|
|
2150
|
+
"content": content # Full markdown content
|
|
2151
|
+
}
|
|
2152
|
+
|
|
2153
|
+
def _handle_execute_runbook(
|
|
2154
|
+
self,
|
|
2155
|
+
runbook_name: str,
|
|
2156
|
+
case_id: Optional[str] = None,
|
|
2157
|
+
alert_id: Optional[str] = None,
|
|
2158
|
+
soc_tier: Optional[str] = None
|
|
2159
|
+
) -> Dict[str, Any]:
|
|
2160
|
+
"""Handle execute_runbook tool call."""
|
|
2161
|
+
# Find runbook
|
|
2162
|
+
runbook_path = self.runbook_manager.find_runbook(runbook_name, soc_tier=soc_tier)
|
|
2163
|
+
if not runbook_path:
|
|
2164
|
+
return {
|
|
2165
|
+
"success": False,
|
|
2166
|
+
"error": f"Runbook not found: {runbook_name}"
|
|
2167
|
+
}
|
|
2168
|
+
|
|
2169
|
+
# Read and parse runbook
|
|
2170
|
+
content = self.runbook_manager.read_runbook(runbook_path)
|
|
2171
|
+
metadata = self.runbook_manager.parse_runbook_metadata(runbook_path, content)
|
|
2172
|
+
workflow_steps = self.runbook_manager.extract_workflow_steps(content)
|
|
2173
|
+
|
|
2174
|
+
# Get relative path
|
|
2175
|
+
rel_path = os.path.relpath(runbook_path, self.runbook_manager.runbooks_dir)
|
|
2176
|
+
runbook_id = rel_path[:-3]
|
|
2177
|
+
|
|
2178
|
+
# Determine SOC tier
|
|
2179
|
+
detected_tier = metadata.get("soc_tier") or soc_tier
|
|
2180
|
+
|
|
2181
|
+
# Load and prepend SOC tier guidelines to runbook content if available
|
|
2182
|
+
if detected_tier:
|
|
2183
|
+
try:
|
|
2184
|
+
guidelines_runbook_name = f"{detected_tier}/guidelines"
|
|
2185
|
+
guidelines_path = self.runbook_manager.find_runbook(
|
|
2186
|
+
guidelines_runbook_name,
|
|
2187
|
+
soc_tier=detected_tier,
|
|
2188
|
+
)
|
|
2189
|
+
if guidelines_path:
|
|
2190
|
+
guidelines_content = self.runbook_manager.read_runbook(guidelines_path)
|
|
2191
|
+
# Prepend guidelines to runbook content so AI always sees them
|
|
2192
|
+
content = f"{guidelines_content}\n\n---\n\n{content}"
|
|
2193
|
+
except Exception:
|
|
2194
|
+
# Do not fail execution if guidelines cannot be loaded
|
|
2195
|
+
self._mcp_logger.debug(
|
|
2196
|
+
f"Could not load guidelines for tier {detected_tier}",
|
|
2197
|
+
)
|
|
2198
|
+
|
|
2199
|
+
# Check if case needs to be created
|
|
2200
|
+
# NOTE: For triage runbooks (especially initial_alert_triage), the runbook itself
|
|
2201
|
+
# will determine if a case should be created after quick assessment (Step 2a).
|
|
2202
|
+
# Only auto-create cases if:
|
|
2203
|
+
# 1. A case_id is explicitly provided (case already exists)
|
|
2204
|
+
# 2. The runbook is NOT a triage runbook that performs quick assessment
|
|
2205
|
+
case_created = False
|
|
2206
|
+
is_triage_runbook = "triage" in runbook_id.lower() or "initial_alert_triage" in runbook_id.lower()
|
|
2207
|
+
|
|
2208
|
+
if not case_id and alert_id and self.case_client:
|
|
2209
|
+
# Check if case exists for this alert
|
|
2210
|
+
try:
|
|
2211
|
+
search_results = self.case_client.search_cases(
|
|
2212
|
+
CaseSearchQuery(text=alert_id, limit=10)
|
|
2213
|
+
)
|
|
2214
|
+
if search_results:
|
|
2215
|
+
# Use existing case
|
|
2216
|
+
case_id = search_results[0].id
|
|
2217
|
+
elif not is_triage_runbook:
|
|
2218
|
+
# Only auto-create case for non-triage runbooks
|
|
2219
|
+
# Triage runbooks will create cases themselves after quick assessment
|
|
2220
|
+
# Create new case following case standard
|
|
2221
|
+
from datetime import datetime
|
|
2222
|
+
from ..api.case_management import Case, CaseStatus, CasePriority
|
|
2223
|
+
|
|
2224
|
+
# Get alert details if available
|
|
2225
|
+
alert_type = "Security Alert"
|
|
2226
|
+
primary_entity = alert_id[:20] + "..." if len(alert_id) > 20 else alert_id
|
|
2227
|
+
|
|
2228
|
+
if self.siem_client:
|
|
2229
|
+
try:
|
|
2230
|
+
alert_result = tools_siem.get_security_alert_by_id(
|
|
2231
|
+
alert_id=alert_id,
|
|
2232
|
+
client=self.siem_client
|
|
2233
|
+
)
|
|
2234
|
+
if alert_result and "alert" in alert_result:
|
|
2235
|
+
alert = alert_result["alert"]
|
|
2236
|
+
if isinstance(alert, dict):
|
|
2237
|
+
alert_type = alert.get("title") or alert.get("alert_type", "Security Alert")
|
|
2238
|
+
if alert.get("related_entities"):
|
|
2239
|
+
entities = alert["related_entities"]
|
|
2240
|
+
if entities:
|
|
2241
|
+
primary_entity = str(entities[0]).split(":")[-1] if ":" in str(entities[0]) else str(entities[0])
|
|
2242
|
+
except Exception:
|
|
2243
|
+
pass # Continue with defaults
|
|
2244
|
+
|
|
2245
|
+
# Generate title following case standard format
|
|
2246
|
+
date_str = datetime.utcnow().strftime("%Y-%m-%d")
|
|
2247
|
+
title = f"{alert_type} - {primary_entity} - {date_str}"
|
|
2248
|
+
|
|
2249
|
+
# Build description following case standard
|
|
2250
|
+
description = f"""**Alert ID**: {alert_id}
|
|
2251
|
+
**Alert Type**: {alert_type}
|
|
2252
|
+
**Created**: {datetime.utcnow().isoformat()}
|
|
2253
|
+
|
|
2254
|
+
## Initial Assessment
|
|
2255
|
+
Case created during runbook execution: {runbook_id}
|
|
2256
|
+
Initial triage in progress.
|
|
2257
|
+
|
|
2258
|
+
## Key Entities
|
|
2259
|
+
To be populated during investigation.
|
|
2260
|
+
|
|
2261
|
+
## Investigation Status
|
|
2262
|
+
- Status: Initial triage
|
|
2263
|
+
- SOC Tier: {detected_tier or 'unknown'}
|
|
2264
|
+
- Runbook: {runbook_id}
|
|
2265
|
+
"""
|
|
2266
|
+
|
|
2267
|
+
# Determine priority based on alert if available
|
|
2268
|
+
priority = CasePriority.MEDIUM
|
|
2269
|
+
if self.siem_client:
|
|
2270
|
+
try:
|
|
2271
|
+
alert_result = tools_siem.get_security_alert_by_id(
|
|
2272
|
+
alert_id=alert_id,
|
|
2273
|
+
client=self.siem_client
|
|
2274
|
+
)
|
|
2275
|
+
if alert_result and "alert" in alert_result:
|
|
2276
|
+
alert = alert_result["alert"]
|
|
2277
|
+
if isinstance(alert, dict):
|
|
2278
|
+
severity = alert.get("severity", "").lower()
|
|
2279
|
+
if severity in ["high", "critical"]:
|
|
2280
|
+
priority = CasePriority.HIGH
|
|
2281
|
+
elif severity == "low":
|
|
2282
|
+
priority = CasePriority.LOW
|
|
2283
|
+
except Exception:
|
|
2284
|
+
pass
|
|
2285
|
+
|
|
2286
|
+
# Create case
|
|
2287
|
+
new_case = Case(
|
|
2288
|
+
id=None,
|
|
2289
|
+
title=title,
|
|
2290
|
+
description=description,
|
|
2291
|
+
status=CaseStatus.OPEN,
|
|
2292
|
+
priority=priority,
|
|
2293
|
+
tags=[detected_tier + "-triage" if detected_tier else "triage", runbook_id.split("/")[-1]],
|
|
2294
|
+
observables=None,
|
|
2295
|
+
)
|
|
2296
|
+
|
|
2297
|
+
created_case = self.case_client.create_case(new_case)
|
|
2298
|
+
case_id = created_case.id or ""
|
|
2299
|
+
case_created = True
|
|
2300
|
+
|
|
2301
|
+
# Add initial note
|
|
2302
|
+
try:
|
|
2303
|
+
self.case_client.add_case_comment(
|
|
2304
|
+
case_id=case_id,
|
|
2305
|
+
content=f"Case created from alert {alert_id}. Executing runbook: {runbook_id}",
|
|
2306
|
+
author="SamiGPT Agent"
|
|
2307
|
+
)
|
|
2308
|
+
except Exception:
|
|
2309
|
+
pass
|
|
2310
|
+
except Exception as e:
|
|
2311
|
+
self._mcp_logger.warning(f"Failed to create/find case for alert {alert_id}: {e}")
|
|
2312
|
+
# Continue without case_id - runbook can still execute
|
|
2313
|
+
|
|
2314
|
+
# Prepare execution context
|
|
2315
|
+
execution_instructions = (
|
|
2316
|
+
f"You are executing the runbook: {runbook_id} (SOC Tier: {detected_tier or 'unknown'}). "
|
|
2317
|
+
f"Objective: {metadata.get('objective', 'Investigation')}. "
|
|
2318
|
+
)
|
|
2319
|
+
|
|
2320
|
+
if case_id:
|
|
2321
|
+
execution_instructions += f"A case has been {'created' if case_created else 'identified'} with ID: {case_id}. "
|
|
2322
|
+
elif alert_id:
|
|
2323
|
+
execution_instructions += f"Alert ID: {alert_id}. "
|
|
2324
|
+
if self.case_client:
|
|
2325
|
+
if is_triage_runbook:
|
|
2326
|
+
execution_instructions += (
|
|
2327
|
+
"IMPORTANT: Follow Step 2a (Quick Assessment) in the runbook FIRST. "
|
|
2328
|
+
"Only create a case using create_case tool if the quick assessment determines "
|
|
2329
|
+
"that case creation is needed (uncertain, suspicious, or requires tracking). "
|
|
2330
|
+
"If the alert is clearly FP/BTP with high confidence, close the alert directly "
|
|
2331
|
+
"using close_alert without creating a case. "
|
|
2332
|
+
)
|
|
2333
|
+
else:
|
|
2334
|
+
execution_instructions += "IMPORTANT: Create a case using create_case tool if one doesn't exist, following the case standard in standards/case_standard.md. "
|
|
2335
|
+
|
|
2336
|
+
execution_instructions += (
|
|
2337
|
+
f"Follow the workflow steps in the runbook below. Use the appropriate MCP tools for each step. "
|
|
2338
|
+
f"Document your progress and findings in case comments as specified in the runbook. "
|
|
2339
|
+
f"Attach all observables (IOCs) to the case using attach_observable_to_case. "
|
|
2340
|
+
f"Follow the case standard format for all documentation."
|
|
2341
|
+
)
|
|
2342
|
+
|
|
2343
|
+
return {
|
|
2344
|
+
"success": True,
|
|
2345
|
+
"runbook_name": runbook_id,
|
|
2346
|
+
"soc_tier": detected_tier,
|
|
2347
|
+
"objective": metadata.get("objective"),
|
|
2348
|
+
"execution_instructions": execution_instructions,
|
|
2349
|
+
"runbook_content": content, # Full markdown for AI context
|
|
2350
|
+
"workflow_summary": [
|
|
2351
|
+
{"step": step["step_number"], "title": step["title"]}
|
|
2352
|
+
for step in workflow_steps
|
|
2353
|
+
],
|
|
2354
|
+
"tools_available": metadata.get("tools", []),
|
|
2355
|
+
"inputs_provided": {
|
|
2356
|
+
"case_id": case_id,
|
|
2357
|
+
"alert_id": alert_id
|
|
2358
|
+
},
|
|
2359
|
+
"inputs_required": metadata.get("inputs", []),
|
|
2360
|
+
"case_created": case_created,
|
|
2361
|
+
"status": "ready_for_execution"
|
|
2362
|
+
}
|
|
2363
|
+
|
|
2364
|
+
def _handle_get_agent_profile(self, agent_id: str) -> Dict[str, Any]:
|
|
2365
|
+
"""Handle get_agent_profile tool call."""
|
|
2366
|
+
profile = self.agent_profile_manager.get_profile(agent_id)
|
|
2367
|
+
if not profile:
|
|
2368
|
+
return {
|
|
2369
|
+
"success": False,
|
|
2370
|
+
"error": f"Agent profile not found: {agent_id}"
|
|
2371
|
+
}
|
|
2372
|
+
|
|
2373
|
+
return {
|
|
2374
|
+
"success": True,
|
|
2375
|
+
"agent_id": agent_id,
|
|
2376
|
+
"name": profile.name,
|
|
2377
|
+
"tier": profile.tier,
|
|
2378
|
+
"description": profile.description,
|
|
2379
|
+
"capabilities": profile.capabilities,
|
|
2380
|
+
"runbooks": profile.runbooks,
|
|
2381
|
+
"decision_authority": {
|
|
2382
|
+
"close_false_positives": profile.decision_authority.close_false_positives,
|
|
2383
|
+
"close_benign_true_positives": profile.decision_authority.close_benign_true_positives,
|
|
2384
|
+
"escalate_to_soc2": profile.decision_authority.escalate_to_soc2,
|
|
2385
|
+
"escalate_to_soc3": profile.decision_authority.escalate_to_soc3,
|
|
2386
|
+
"containment_actions": profile.decision_authority.containment_actions,
|
|
2387
|
+
"forensic_collection": profile.decision_authority.forensic_collection
|
|
2388
|
+
},
|
|
2389
|
+
"auto_select_runbook": profile.auto_select_runbook,
|
|
2390
|
+
"max_concurrent_cases": profile.max_concurrent_cases
|
|
2391
|
+
}
|
|
2392
|
+
|
|
2393
|
+
def _handle_route_case_to_agent(
|
|
2394
|
+
self,
|
|
2395
|
+
case_id: Optional[str] = None,
|
|
2396
|
+
alert_id: Optional[str] = None,
|
|
2397
|
+
alert_type: Optional[str] = None,
|
|
2398
|
+
case_status: Optional[str] = None
|
|
2399
|
+
) -> Dict[str, Any]:
|
|
2400
|
+
"""Handle route_case_to_agent tool call."""
|
|
2401
|
+
# Get case status if case_id provided
|
|
2402
|
+
if case_id and self.case_client and not case_status:
|
|
2403
|
+
try:
|
|
2404
|
+
case = self.case_client.get_case(case_id)
|
|
2405
|
+
case_status = case.status.value if hasattr(case.status, 'value') else str(case.status)
|
|
2406
|
+
except Exception:
|
|
2407
|
+
pass # Continue without case status
|
|
2408
|
+
|
|
2409
|
+
agent_id = self.agent_profile_manager.route_to_agent(
|
|
2410
|
+
case_id=case_id,
|
|
2411
|
+
alert_id=alert_id,
|
|
2412
|
+
alert_type=alert_type,
|
|
2413
|
+
case_status=case_status
|
|
2414
|
+
)
|
|
2415
|
+
|
|
2416
|
+
if agent_id:
|
|
2417
|
+
profile = self.agent_profile_manager.get_profile(agent_id)
|
|
2418
|
+
return {
|
|
2419
|
+
"success": True,
|
|
2420
|
+
"agent_id": agent_id,
|
|
2421
|
+
"agent_name": profile.name if profile else None,
|
|
2422
|
+
"tier": profile.tier if profile else None,
|
|
2423
|
+
"routing_reason": "Based on routing rules and case/alert characteristics"
|
|
2424
|
+
}
|
|
2425
|
+
else:
|
|
2426
|
+
return {
|
|
2427
|
+
"success": False,
|
|
2428
|
+
"error": "Could not determine appropriate agent"
|
|
2429
|
+
}
|
|
2430
|
+
|
|
2431
|
+
def _handle_execute_as_agent(
|
|
2432
|
+
self,
|
|
2433
|
+
agent_id: str,
|
|
2434
|
+
case_id: Optional[str] = None,
|
|
2435
|
+
alert_id: Optional[str] = None,
|
|
2436
|
+
runbook_name: Optional[str] = None
|
|
2437
|
+
) -> Dict[str, Any]:
|
|
2438
|
+
"""Handle execute_as_agent tool call."""
|
|
2439
|
+
profile = self.agent_profile_manager.get_profile(agent_id)
|
|
2440
|
+
if not profile:
|
|
2441
|
+
return {
|
|
2442
|
+
"success": False,
|
|
2443
|
+
"error": f"Agent profile not found: {agent_id}"
|
|
2444
|
+
}
|
|
2445
|
+
|
|
2446
|
+
# Get alert/case details if provided
|
|
2447
|
+
alert_details = {}
|
|
2448
|
+
alert_type = None
|
|
2449
|
+
|
|
2450
|
+
if alert_id and self.siem_client:
|
|
2451
|
+
try:
|
|
2452
|
+
# Use the tool function which handles the client properly
|
|
2453
|
+
alert_result = tools_siem.get_security_alert_by_id(
|
|
2454
|
+
alert_id=alert_id,
|
|
2455
|
+
client=self.siem_client
|
|
2456
|
+
)
|
|
2457
|
+
if alert_result and "alert" in alert_result:
|
|
2458
|
+
alert = alert_result["alert"]
|
|
2459
|
+
if isinstance(alert, dict):
|
|
2460
|
+
alert_type = alert.get("alert_type") or alert.get("title", "")
|
|
2461
|
+
alert_details["severity"] = alert.get("severity")
|
|
2462
|
+
alert_details["title"] = alert.get("title")
|
|
2463
|
+
else:
|
|
2464
|
+
if hasattr(alert, 'alert_type'):
|
|
2465
|
+
alert_type = alert.alert_type
|
|
2466
|
+
if hasattr(alert, 'severity'):
|
|
2467
|
+
alert_details["severity"] = alert.severity
|
|
2468
|
+
if hasattr(alert, 'title'):
|
|
2469
|
+
alert_details["title"] = alert.title
|
|
2470
|
+
except Exception:
|
|
2471
|
+
pass # Continue without alert details
|
|
2472
|
+
|
|
2473
|
+
# Auto-select runbook if not specified
|
|
2474
|
+
if not runbook_name and profile.auto_select_runbook:
|
|
2475
|
+
runbook_name = profile.select_runbook_for_alert(
|
|
2476
|
+
alert_type=alert_type or "",
|
|
2477
|
+
alert_details=alert_details
|
|
2478
|
+
)
|
|
2479
|
+
|
|
2480
|
+
if not runbook_name:
|
|
2481
|
+
return {
|
|
2482
|
+
"success": False,
|
|
2483
|
+
"error": "Could not determine appropriate runbook. Please specify runbook_name."
|
|
2484
|
+
}
|
|
2485
|
+
|
|
2486
|
+
# Verify agent can execute this runbook
|
|
2487
|
+
if not profile.can_execute_runbook(runbook_name):
|
|
2488
|
+
return {
|
|
2489
|
+
"success": False,
|
|
2490
|
+
"error": f"Agent {agent_id} (tier: {profile.tier}) cannot execute runbook {runbook_name}"
|
|
2491
|
+
}
|
|
2492
|
+
|
|
2493
|
+
# Optionally load SOC tier guidelines for this agent on first execution
|
|
2494
|
+
guidelines_content: Optional[str] = None
|
|
2495
|
+
if not self._shown_agent_guidelines.get(agent_id):
|
|
2496
|
+
guidelines_runbook_name = f"{profile.tier}/guidelines"
|
|
2497
|
+
try:
|
|
2498
|
+
guidelines_path = self.runbook_manager.find_runbook(
|
|
2499
|
+
guidelines_runbook_name,
|
|
2500
|
+
soc_tier=profile.tier,
|
|
2501
|
+
)
|
|
2502
|
+
if guidelines_path:
|
|
2503
|
+
guidelines_content = self.runbook_manager.read_runbook(guidelines_path)
|
|
2504
|
+
# Mark as shown for this agent profile within this server process
|
|
2505
|
+
self._shown_agent_guidelines[agent_id] = True
|
|
2506
|
+
except Exception:
|
|
2507
|
+
# Do not fail execution if guidelines cannot be loaded
|
|
2508
|
+
self._mcp_logger.debug(
|
|
2509
|
+
f"Could not load guidelines for agent {agent_id} (tier: {profile.tier})",
|
|
2510
|
+
)
|
|
2511
|
+
|
|
2512
|
+
# Execute runbook (reuse execute_runbook logic)
|
|
2513
|
+
runbook_result = self._handle_execute_runbook(
|
|
2514
|
+
runbook_name=runbook_name,
|
|
2515
|
+
case_id=case_id,
|
|
2516
|
+
alert_id=alert_id,
|
|
2517
|
+
soc_tier=profile.tier
|
|
2518
|
+
)
|
|
2519
|
+
|
|
2520
|
+
if not runbook_result.get("success"):
|
|
2521
|
+
return runbook_result
|
|
2522
|
+
|
|
2523
|
+
result: Dict[str, Any] = {
|
|
2524
|
+
"success": True,
|
|
2525
|
+
"agent_id": agent_id,
|
|
2526
|
+
"agent_name": profile.name,
|
|
2527
|
+
"tier": profile.tier,
|
|
2528
|
+
"runbook_executed": runbook_name,
|
|
2529
|
+
"runbook_result": runbook_result,
|
|
2530
|
+
}
|
|
2531
|
+
|
|
2532
|
+
# When guidelines are available and this is the first execution for the agent,
|
|
2533
|
+
# include them so MCP users see them before following the runbook.
|
|
2534
|
+
if guidelines_content is not None:
|
|
2535
|
+
result["profile_guidelines"] = {
|
|
2536
|
+
"agent_id": agent_id,
|
|
2537
|
+
"tier": profile.tier,
|
|
2538
|
+
"path": f"{profile.tier}/guidelines",
|
|
2539
|
+
"content": guidelines_content,
|
|
2540
|
+
"first_run_for_agent": True,
|
|
2541
|
+
}
|
|
2542
|
+
|
|
2543
|
+
return result
|
|
2544
|
+
|
|
2545
|
+
def _create_response(
|
|
2546
|
+
self, request_id: Optional[Any], result: Optional[Any] = None, error: Optional[Dict[str, Any]] = None
|
|
2547
|
+
) -> Dict[str, Any]:
|
|
2548
|
+
"""
|
|
2549
|
+
Create a JSON-RPC 2.0 response.
|
|
2550
|
+
|
|
2551
|
+
Args:
|
|
2552
|
+
request_id: Request ID from the original request (can be None for notifications)
|
|
2553
|
+
result: Result data (for success responses)
|
|
2554
|
+
error: Error data (for error responses)
|
|
2555
|
+
|
|
2556
|
+
Returns:
|
|
2557
|
+
JSON-RPC 2.0 response dictionary
|
|
2558
|
+
"""
|
|
2559
|
+
response: Dict[str, Any] = {"jsonrpc": "2.0"}
|
|
2560
|
+
|
|
2561
|
+
if error:
|
|
2562
|
+
response["error"] = error
|
|
2563
|
+
elif result is not None:
|
|
2564
|
+
response["result"] = result
|
|
2565
|
+
|
|
2566
|
+
# Only include id if it's a valid value (string or number, not None/null)
|
|
2567
|
+
# Per JSON-RPC 2.0, id should be included if present in request and is valid
|
|
2568
|
+
# Some clients require id to be a string or number, not null
|
|
2569
|
+
if request_id is not None:
|
|
2570
|
+
# Ensure id is a valid type (string, int, or float)
|
|
2571
|
+
if isinstance(request_id, (str, int, float)):
|
|
2572
|
+
response["id"] = request_id
|
|
2573
|
+
# If it's not a valid type, convert to string as fallback
|
|
2574
|
+
else:
|
|
2575
|
+
response["id"] = str(request_id)
|
|
2576
|
+
|
|
2577
|
+
return response
|
|
2578
|
+
|
|
2579
|
+
def _create_error_response(
|
|
2580
|
+
self, request_id: Optional[Any], code: int, message: str, data: Optional[Any] = None
|
|
2581
|
+
) -> Dict[str, Any]:
|
|
2582
|
+
"""
|
|
2583
|
+
Create a JSON-RPC 2.0 error response.
|
|
2584
|
+
|
|
2585
|
+
Args:
|
|
2586
|
+
request_id: Request ID
|
|
2587
|
+
code: Error code (JSON-RPC 2.0 standard codes)
|
|
2588
|
+
message: Error message
|
|
2589
|
+
data: Optional additional error data
|
|
2590
|
+
|
|
2591
|
+
Returns:
|
|
2592
|
+
Error response dictionary
|
|
2593
|
+
"""
|
|
2594
|
+
error: Dict[str, Any] = {"code": code, "message": message}
|
|
2595
|
+
if data is not None:
|
|
2596
|
+
error["data"] = data
|
|
2597
|
+
return self._create_response(request_id, error=error)
|
|
2598
|
+
|
|
2599
|
+
async def handle_request(self, request: Dict[str, Any]) -> Dict[str, Any]:
|
|
2600
|
+
"""
|
|
2601
|
+
Handle an MCP request.
|
|
2602
|
+
|
|
2603
|
+
Args:
|
|
2604
|
+
request: MCP request dictionary.
|
|
2605
|
+
|
|
2606
|
+
Returns:
|
|
2607
|
+
MCP response dictionary.
|
|
2608
|
+
"""
|
|
2609
|
+
# Log raw request for debugging
|
|
2610
|
+
self._mcp_logger.debug(f"Raw request received: {json.dumps(request)[:1000]}")
|
|
2611
|
+
|
|
2612
|
+
method = request.get("method")
|
|
2613
|
+
params = request.get("params", {})
|
|
2614
|
+
|
|
2615
|
+
# Get id, but only if it's present and valid (not null)
|
|
2616
|
+
# Use 'id' in request to check presence, then get value
|
|
2617
|
+
request_id = None
|
|
2618
|
+
if "id" in request:
|
|
2619
|
+
id_value = request["id"]
|
|
2620
|
+
# Only use id if it's a valid value (not None/null)
|
|
2621
|
+
if id_value is not None:
|
|
2622
|
+
request_id = id_value
|
|
2623
|
+
|
|
2624
|
+
# Check if this is a notification (no id) or a request (has id)
|
|
2625
|
+
is_notification = "id" not in request or request.get("id") is None
|
|
2626
|
+
|
|
2627
|
+
# Log request with full context
|
|
2628
|
+
if is_notification:
|
|
2629
|
+
self._mcp_logger.info(
|
|
2630
|
+
f"NOTIFICATION received: method={method}, params={json.dumps(params)[:500]}"
|
|
2631
|
+
)
|
|
2632
|
+
else:
|
|
2633
|
+
self._mcp_logger.info(
|
|
2634
|
+
f"REQUEST [id={request_id}] method={method}, params={json.dumps(params)[:500]}"
|
|
2635
|
+
)
|
|
2636
|
+
|
|
2637
|
+
# Handle notifications (no response needed)
|
|
2638
|
+
if is_notification:
|
|
2639
|
+
if method == "notifications/initialized":
|
|
2640
|
+
self._mcp_logger.info("Client sent initialized notification - this is expected after server sends it")
|
|
2641
|
+
# Notifications don't get responses
|
|
2642
|
+
return None
|
|
2643
|
+
else:
|
|
2644
|
+
self._mcp_logger.warning(f"Unknown notification method: {method}")
|
|
2645
|
+
# Notifications don't get responses
|
|
2646
|
+
return None
|
|
2647
|
+
|
|
2648
|
+
try:
|
|
2649
|
+
if method == "initialize":
|
|
2650
|
+
return await self._handle_initialize(request_id, params)
|
|
2651
|
+
elif method == "tools/list":
|
|
2652
|
+
return await self._handle_tools_list(request_id)
|
|
2653
|
+
elif method == "tools/call":
|
|
2654
|
+
return await self._handle_tools_call(request_id, params)
|
|
2655
|
+
else:
|
|
2656
|
+
self._mcp_logger.warning(f"Unknown method: {method}")
|
|
2657
|
+
return self._create_error_response(
|
|
2658
|
+
request_id,
|
|
2659
|
+
-32601,
|
|
2660
|
+
f"Method not found: {method}",
|
|
2661
|
+
)
|
|
2662
|
+
except Exception as e:
|
|
2663
|
+
self._mcp_logger.error(
|
|
2664
|
+
f"RESPONSE [id={request_id}] Error handling request: {e}",
|
|
2665
|
+
exc_info=True,
|
|
2666
|
+
)
|
|
2667
|
+
logger.error(f"Error handling request: {e}", exc_info=True)
|
|
2668
|
+
return self._create_error_response(
|
|
2669
|
+
request_id,
|
|
2670
|
+
-32603,
|
|
2671
|
+
f"Internal error: {str(e)}",
|
|
2672
|
+
)
|
|
2673
|
+
|
|
2674
|
+
async def _handle_initialize(
|
|
2675
|
+
self, request_id: Optional[Any], params: Dict[str, Any]
|
|
2676
|
+
) -> Dict[str, Any]:
|
|
2677
|
+
"""Handle initialize request."""
|
|
2678
|
+
# Log incoming protocol version
|
|
2679
|
+
client_protocol = params.get("protocolVersion", "unknown")
|
|
2680
|
+
self._mcp_logger.info(
|
|
2681
|
+
f"Initialize request: client protocol={client_protocol}, client_info={params.get('clientInfo', {})}"
|
|
2682
|
+
)
|
|
2683
|
+
|
|
2684
|
+
# Use the client's protocol version if supported, otherwise use our default
|
|
2685
|
+
protocol_version = self.PROTOCOL_VERSION
|
|
2686
|
+
if client_protocol in self.SUPPORTED_PROTOCOL_VERSIONS:
|
|
2687
|
+
protocol_version = client_protocol
|
|
2688
|
+
self._mcp_logger.info(f"Using client's protocol version: {protocol_version}")
|
|
2689
|
+
else:
|
|
2690
|
+
self._mcp_logger.warning(
|
|
2691
|
+
f"Client protocol version {client_protocol} not explicitly supported, using {protocol_version}"
|
|
2692
|
+
)
|
|
2693
|
+
|
|
2694
|
+
self._initialized = True
|
|
2695
|
+
self._mcp_logger.info(f"RESPONSE [id={request_id}] initialize successful")
|
|
2696
|
+
|
|
2697
|
+
return self._create_response(
|
|
2698
|
+
request_id,
|
|
2699
|
+
result={
|
|
2700
|
+
"protocolVersion": protocol_version,
|
|
2701
|
+
"capabilities": {
|
|
2702
|
+
"tools": {},
|
|
2703
|
+
},
|
|
2704
|
+
"serverInfo": {
|
|
2705
|
+
"name": self.SERVER_NAME,
|
|
2706
|
+
"version": self.SERVER_VERSION,
|
|
2707
|
+
},
|
|
2708
|
+
},
|
|
2709
|
+
)
|
|
2710
|
+
|
|
2711
|
+
async def _handle_tools_list(self, request_id: Optional[Any]) -> Dict[str, Any]:
|
|
2712
|
+
"""Handle tools/list request."""
|
|
2713
|
+
# Allow tools/list even if not initialized (some clients do this)
|
|
2714
|
+
if not self._initialized:
|
|
2715
|
+
self._mcp_logger.warning(
|
|
2716
|
+
f"tools/list called before initialization complete (id={request_id})"
|
|
2717
|
+
)
|
|
2718
|
+
|
|
2719
|
+
try:
|
|
2720
|
+
# Convert tools dict to list
|
|
2721
|
+
tools_list = []
|
|
2722
|
+
for tool_name, tool_def in self.tools.items():
|
|
2723
|
+
if isinstance(tool_def, dict):
|
|
2724
|
+
tools_list.append(tool_def)
|
|
2725
|
+
else:
|
|
2726
|
+
# Fallback if tool_def is not a dict
|
|
2727
|
+
tools_list.append({
|
|
2728
|
+
"name": tool_name,
|
|
2729
|
+
"description": str(tool_def),
|
|
2730
|
+
"inputSchema": {"type": "object", "properties": {}}
|
|
2731
|
+
})
|
|
2732
|
+
|
|
2733
|
+
self._mcp_logger.info(
|
|
2734
|
+
f"RESPONSE [id={request_id}] tools/list: {len(tools_list)} tools available"
|
|
2735
|
+
)
|
|
2736
|
+
|
|
2737
|
+
return self._create_response(
|
|
2738
|
+
request_id,
|
|
2739
|
+
result={"tools": tools_list},
|
|
2740
|
+
)
|
|
2741
|
+
except Exception as e:
|
|
2742
|
+
self._mcp_logger.error(
|
|
2743
|
+
f"Error creating tools/list response: {e}", exc_info=True
|
|
2744
|
+
)
|
|
2745
|
+
logger.error(f"Error creating tools/list response: {e}", exc_info=True)
|
|
2746
|
+
raise
|
|
2747
|
+
|
|
2748
|
+
async def _handle_tools_call(
|
|
2749
|
+
self, request_id: Optional[Any], params: Dict[str, Any]
|
|
2750
|
+
) -> Dict[str, Any]:
|
|
2751
|
+
"""Handle tools/call request."""
|
|
2752
|
+
tool_name = params.get("name")
|
|
2753
|
+
tool_args = params.get("arguments", {})
|
|
2754
|
+
|
|
2755
|
+
if not tool_name:
|
|
2756
|
+
return self._create_error_response(
|
|
2757
|
+
request_id,
|
|
2758
|
+
-32602,
|
|
2759
|
+
"Invalid params: 'name' is required",
|
|
2760
|
+
)
|
|
2761
|
+
|
|
2762
|
+
if tool_name not in self.tools:
|
|
2763
|
+
self._mcp_logger.error(
|
|
2764
|
+
f"RESPONSE [id={request_id}] Tool not found: {tool_name}"
|
|
2765
|
+
)
|
|
2766
|
+
return self._create_error_response(
|
|
2767
|
+
request_id,
|
|
2768
|
+
-32601,
|
|
2769
|
+
f"Tool not found: {tool_name}",
|
|
2770
|
+
)
|
|
2771
|
+
|
|
2772
|
+
# Execute the tool
|
|
2773
|
+
self._mcp_logger.info(
|
|
2774
|
+
f"EXECUTING [id={request_id}] tool={tool_name}, args={json.dumps(tool_args)[:500]}"
|
|
2775
|
+
)
|
|
2776
|
+
|
|
2777
|
+
try:
|
|
2778
|
+
result = await self._execute_tool(tool_name, tool_args)
|
|
2779
|
+
|
|
2780
|
+
# Format result according to MCP spec: content array with text items
|
|
2781
|
+
result_text = json.dumps(result, indent=2)
|
|
2782
|
+
result_preview = result_text[:500] if len(result_text) > 500 else result_text
|
|
2783
|
+
|
|
2784
|
+
self._mcp_logger.info(
|
|
2785
|
+
f"RESPONSE [id={request_id}] tool={tool_name} completed: {result_preview}"
|
|
2786
|
+
)
|
|
2787
|
+
|
|
2788
|
+
return self._create_response(
|
|
2789
|
+
request_id,
|
|
2790
|
+
result={
|
|
2791
|
+
"content": [
|
|
2792
|
+
{
|
|
2793
|
+
"type": "text",
|
|
2794
|
+
"text": result_text,
|
|
2795
|
+
}
|
|
2796
|
+
],
|
|
2797
|
+
},
|
|
2798
|
+
)
|
|
2799
|
+
except Exception as e:
|
|
2800
|
+
self._mcp_logger.error(
|
|
2801
|
+
f"Tool {tool_name} execution failed: {e}", exc_info=True
|
|
2802
|
+
)
|
|
2803
|
+
return self._create_error_response(
|
|
2804
|
+
request_id,
|
|
2805
|
+
-32603,
|
|
2806
|
+
f"Tool execution failed: {str(e)}",
|
|
2807
|
+
)
|
|
2808
|
+
|
|
2809
|
+
async def _execute_tool(self, tool_name: str, args: Dict[str, Any]) -> Any:
|
|
2810
|
+
"""
|
|
2811
|
+
Execute a tool by name.
|
|
2812
|
+
|
|
2813
|
+
Args:
|
|
2814
|
+
tool_name: Name of the tool to execute.
|
|
2815
|
+
args: Tool arguments.
|
|
2816
|
+
|
|
2817
|
+
Returns:
|
|
2818
|
+
Tool result.
|
|
2819
|
+
"""
|
|
2820
|
+
self._mcp_logger.debug(
|
|
2821
|
+
f"Executing tool: {tool_name} with args: {json.dumps(args)[:500]}"
|
|
2822
|
+
)
|
|
2823
|
+
|
|
2824
|
+
# Case management tools
|
|
2825
|
+
if tool_name == "create_case" and self.case_client:
|
|
2826
|
+
result = tools_case.create_case(
|
|
2827
|
+
title=args["title"],
|
|
2828
|
+
description=args["description"],
|
|
2829
|
+
priority=args.get("priority", "medium"),
|
|
2830
|
+
status=args.get("status", "open"),
|
|
2831
|
+
tags=args.get("tags"),
|
|
2832
|
+
alert_id=args.get("alert_id"),
|
|
2833
|
+
client=self.case_client,
|
|
2834
|
+
)
|
|
2835
|
+
self._mcp_logger.info(
|
|
2836
|
+
f"Tool {tool_name} executed: case created with ID {result.get('case', {}).get('id')}"
|
|
2837
|
+
)
|
|
2838
|
+
return result
|
|
2839
|
+
elif tool_name == "review_case" and self.case_client:
|
|
2840
|
+
result = tools_case.review_case(args["case_id"], self.case_client)
|
|
2841
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
2842
|
+
return result
|
|
2843
|
+
elif tool_name == "list_cases" and self.case_client:
|
|
2844
|
+
result = tools_case.list_cases(
|
|
2845
|
+
status=args.get("status"),
|
|
2846
|
+
limit=args.get("limit", 50),
|
|
2847
|
+
client=self.case_client,
|
|
2848
|
+
)
|
|
2849
|
+
self._mcp_logger.debug(
|
|
2850
|
+
f"Tool {tool_name} completed: {len(result.get('cases', []))} cases found"
|
|
2851
|
+
)
|
|
2852
|
+
return result
|
|
2853
|
+
elif tool_name == "search_cases" and self.case_client:
|
|
2854
|
+
result = tools_case.search_cases(
|
|
2855
|
+
text=args.get("text"),
|
|
2856
|
+
status=args.get("status"),
|
|
2857
|
+
priority=args.get("priority"),
|
|
2858
|
+
tags=args.get("tags"),
|
|
2859
|
+
assignee=args.get("assignee"),
|
|
2860
|
+
limit=args.get("limit", 50),
|
|
2861
|
+
client=self.case_client,
|
|
2862
|
+
)
|
|
2863
|
+
self._mcp_logger.debug(
|
|
2864
|
+
f"Tool {tool_name} completed: {len(result.get('cases', []))} cases found"
|
|
2865
|
+
)
|
|
2866
|
+
return result
|
|
2867
|
+
elif tool_name == "add_case_comment" and self.case_client:
|
|
2868
|
+
result = tools_case.add_case_comment(
|
|
2869
|
+
case_id=args["case_id"],
|
|
2870
|
+
content=args["content"],
|
|
2871
|
+
author=args.get("author"),
|
|
2872
|
+
client=self.case_client,
|
|
2873
|
+
)
|
|
2874
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
2875
|
+
return result
|
|
2876
|
+
elif tool_name == "attach_observable_to_case" and self.case_client:
|
|
2877
|
+
result = tools_case.attach_observable_to_case(
|
|
2878
|
+
case_id=args["case_id"],
|
|
2879
|
+
observable_type=args["observable_type"],
|
|
2880
|
+
observable_value=args["observable_value"],
|
|
2881
|
+
description=args.get("description"),
|
|
2882
|
+
tags=args.get("tags"),
|
|
2883
|
+
client=self.case_client,
|
|
2884
|
+
)
|
|
2885
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
2886
|
+
return result
|
|
2887
|
+
elif tool_name == "update_case_status" and self.case_client:
|
|
2888
|
+
result = tools_case.update_case_status(
|
|
2889
|
+
case_id=args["case_id"],
|
|
2890
|
+
status=args["status"],
|
|
2891
|
+
client=self.case_client,
|
|
2892
|
+
)
|
|
2893
|
+
self._mcp_logger.debug(
|
|
2894
|
+
f"Tool {tool_name} completed: case {args['case_id']} status updated to {args['status']}"
|
|
2895
|
+
)
|
|
2896
|
+
return result
|
|
2897
|
+
elif tool_name == "assign_case" and self.case_client:
|
|
2898
|
+
result = tools_case.assign_case(
|
|
2899
|
+
case_id=args["case_id"],
|
|
2900
|
+
assignee=args["assignee"],
|
|
2901
|
+
client=self.case_client,
|
|
2902
|
+
)
|
|
2903
|
+
self._mcp_logger.debug(
|
|
2904
|
+
f"Tool {tool_name} completed: case {args['case_id']} assigned to {args['assignee']}"
|
|
2905
|
+
)
|
|
2906
|
+
return result
|
|
2907
|
+
elif tool_name == "get_case_timeline" and self.case_client:
|
|
2908
|
+
result = tools_case.get_case_timeline(
|
|
2909
|
+
case_id=args["case_id"],
|
|
2910
|
+
client=self.case_client,
|
|
2911
|
+
)
|
|
2912
|
+
self._mcp_logger.debug(
|
|
2913
|
+
f"Tool {tool_name} completed: {len(result.get('timeline', []))} events found"
|
|
2914
|
+
)
|
|
2915
|
+
return result
|
|
2916
|
+
elif tool_name == "add_case_task" and self.case_client:
|
|
2917
|
+
result = tools_case.add_case_task(
|
|
2918
|
+
case_id=args["case_id"],
|
|
2919
|
+
title=args["title"],
|
|
2920
|
+
description=args["description"],
|
|
2921
|
+
assignee=args.get("assignee"),
|
|
2922
|
+
priority=args.get("priority", "medium"),
|
|
2923
|
+
status=args.get("status", "pending"),
|
|
2924
|
+
client=self.case_client,
|
|
2925
|
+
)
|
|
2926
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: task '{args['title']}' added to case {args['case_id']}")
|
|
2927
|
+
return result
|
|
2928
|
+
elif tool_name == "list_case_tasks" and self.case_client:
|
|
2929
|
+
result = tools_case.list_case_tasks(args["case_id"], self.case_client)
|
|
2930
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {result.get('count', 0)} tasks found")
|
|
2931
|
+
return result
|
|
2932
|
+
elif tool_name == "update_case_task_status" and self.case_client:
|
|
2933
|
+
result = tools_case.update_case_task_status(
|
|
2934
|
+
case_id=args["case_id"],
|
|
2935
|
+
task_id=args["task_id"],
|
|
2936
|
+
status=args["status"],
|
|
2937
|
+
client=self.case_client,
|
|
2938
|
+
)
|
|
2939
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: task {args['task_id']} status updated to '{args['status']}'")
|
|
2940
|
+
return result
|
|
2941
|
+
elif tool_name == "add_case_asset" and self.case_client:
|
|
2942
|
+
result = tools_case.add_case_asset(
|
|
2943
|
+
case_id=args["case_id"],
|
|
2944
|
+
asset_name=args["asset_name"],
|
|
2945
|
+
asset_type=args["asset_type"],
|
|
2946
|
+
description=args.get("description"),
|
|
2947
|
+
ip_address=args.get("ip_address"),
|
|
2948
|
+
hostname=args.get("hostname"),
|
|
2949
|
+
tags=args.get("tags"),
|
|
2950
|
+
client=self.case_client,
|
|
2951
|
+
)
|
|
2952
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: asset '{args['asset_name']}' added to case {args['case_id']}")
|
|
2953
|
+
return result
|
|
2954
|
+
elif tool_name == "list_case_assets" and self.case_client:
|
|
2955
|
+
result = tools_case.list_case_assets(args["case_id"], self.case_client)
|
|
2956
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {result.get('count', 0)} assets found")
|
|
2957
|
+
return result
|
|
2958
|
+
elif tool_name == "add_case_evidence" and self.case_client:
|
|
2959
|
+
result = tools_case.add_case_evidence(
|
|
2960
|
+
case_id=args["case_id"],
|
|
2961
|
+
file_path=args["file_path"],
|
|
2962
|
+
description=args.get("description"),
|
|
2963
|
+
evidence_type=args.get("evidence_type"),
|
|
2964
|
+
client=self.case_client,
|
|
2965
|
+
)
|
|
2966
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: evidence '{args['file_path']}' added to case {args['case_id']}")
|
|
2967
|
+
return result
|
|
2968
|
+
elif tool_name == "list_case_evidence" and self.case_client:
|
|
2969
|
+
result = tools_case.list_case_evidence(args["case_id"], self.case_client)
|
|
2970
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {result.get('count', 0)} evidence files found")
|
|
2971
|
+
return result
|
|
2972
|
+
elif tool_name == "update_case" and self.case_client:
|
|
2973
|
+
result = tools_case.update_case(
|
|
2974
|
+
case_id=args["case_id"],
|
|
2975
|
+
title=args.get("title"),
|
|
2976
|
+
description=args.get("description"),
|
|
2977
|
+
priority=args.get("priority"),
|
|
2978
|
+
status=args.get("status"),
|
|
2979
|
+
tags=args.get("tags"),
|
|
2980
|
+
assignee=args.get("assignee"),
|
|
2981
|
+
client=self.case_client,
|
|
2982
|
+
)
|
|
2983
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: case {args['case_id']} updated")
|
|
2984
|
+
return result
|
|
2985
|
+
elif tool_name == "link_cases" and self.case_client:
|
|
2986
|
+
result = tools_case.link_cases(
|
|
2987
|
+
source_case_id=args["source_case_id"],
|
|
2988
|
+
target_case_id=args["target_case_id"],
|
|
2989
|
+
link_type=args.get("link_type", "related_to"),
|
|
2990
|
+
client=self.case_client,
|
|
2991
|
+
)
|
|
2992
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: case {args['source_case_id']} linked to {args['target_case_id']}")
|
|
2993
|
+
return result
|
|
2994
|
+
elif tool_name == "add_case_timeline_event" and self.case_client:
|
|
2995
|
+
result = tools_case.add_case_timeline_event(
|
|
2996
|
+
case_id=args["case_id"],
|
|
2997
|
+
title=args["title"],
|
|
2998
|
+
content=args["content"],
|
|
2999
|
+
source=args.get("source"),
|
|
3000
|
+
category_id=args.get("category_id"),
|
|
3001
|
+
tags=args.get("tags"),
|
|
3002
|
+
color=args.get("color"),
|
|
3003
|
+
event_date=args.get("event_date"),
|
|
3004
|
+
include_in_summary=args.get("include_in_summary", True),
|
|
3005
|
+
include_in_graph=args.get("include_in_graph", True),
|
|
3006
|
+
sync_iocs_assets=args.get("sync_iocs_assets", True),
|
|
3007
|
+
asset_ids=args.get("asset_ids"),
|
|
3008
|
+
ioc_ids=args.get("ioc_ids"),
|
|
3009
|
+
custom_attributes=args.get("custom_attributes"),
|
|
3010
|
+
raw=args.get("raw"),
|
|
3011
|
+
tz=args.get("tz"),
|
|
3012
|
+
client=self.case_client,
|
|
3013
|
+
)
|
|
3014
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: timeline event '{args['title']}' added to case {args['case_id']}")
|
|
3015
|
+
return result
|
|
3016
|
+
elif tool_name == "list_case_timeline_events" and self.case_client:
|
|
3017
|
+
result = tools_case.list_case_timeline_events(args["case_id"], self.case_client)
|
|
3018
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {result.get('count', 0)} timeline events found")
|
|
3019
|
+
return result
|
|
3020
|
+
|
|
3021
|
+
# SIEM tools
|
|
3022
|
+
elif tool_name == "search_security_events" and self.siem_client:
|
|
3023
|
+
result = tools_siem.search_security_events(
|
|
3024
|
+
query=args["query"],
|
|
3025
|
+
limit=args.get("limit", 100),
|
|
3026
|
+
client=self.siem_client,
|
|
3027
|
+
)
|
|
3028
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3029
|
+
return result
|
|
3030
|
+
elif tool_name == "get_file_report" and self.siem_client:
|
|
3031
|
+
result = tools_siem.get_file_report(
|
|
3032
|
+
file_hash=args["file_hash"],
|
|
3033
|
+
client=self.siem_client,
|
|
3034
|
+
)
|
|
3035
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3036
|
+
return result
|
|
3037
|
+
elif tool_name == "get_file_behavior_summary" and self.siem_client:
|
|
3038
|
+
result = tools_siem.get_file_behavior_summary(
|
|
3039
|
+
file_hash=args["file_hash"],
|
|
3040
|
+
client=self.siem_client,
|
|
3041
|
+
)
|
|
3042
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3043
|
+
return result
|
|
3044
|
+
elif tool_name == "get_entities_related_to_file" and self.siem_client:
|
|
3045
|
+
result = tools_siem.get_entities_related_to_file(
|
|
3046
|
+
file_hash=args["file_hash"],
|
|
3047
|
+
client=self.siem_client,
|
|
3048
|
+
)
|
|
3049
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3050
|
+
return result
|
|
3051
|
+
elif tool_name == "get_ip_address_report" and self.siem_client:
|
|
3052
|
+
result = tools_siem.get_ip_address_report(
|
|
3053
|
+
ip=args["ip"],
|
|
3054
|
+
client=self.siem_client,
|
|
3055
|
+
)
|
|
3056
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3057
|
+
return result
|
|
3058
|
+
elif tool_name == "search_user_activity" and self.siem_client:
|
|
3059
|
+
result = tools_siem.search_user_activity(
|
|
3060
|
+
username=args["username"],
|
|
3061
|
+
limit=args.get("limit", 100),
|
|
3062
|
+
client=self.siem_client,
|
|
3063
|
+
)
|
|
3064
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3065
|
+
return result
|
|
3066
|
+
elif tool_name == "pivot_on_indicator" and self.siem_client:
|
|
3067
|
+
result = tools_siem.pivot_on_indicator(
|
|
3068
|
+
indicator=args["indicator"],
|
|
3069
|
+
limit=args.get("limit", 200),
|
|
3070
|
+
client=self.siem_client,
|
|
3071
|
+
)
|
|
3072
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3073
|
+
return result
|
|
3074
|
+
elif tool_name == "search_kql_query" and self.siem_client:
|
|
3075
|
+
result = tools_siem.search_kql_query(
|
|
3076
|
+
kql_query=args["kql_query"],
|
|
3077
|
+
limit=args.get("limit", 500),
|
|
3078
|
+
hours_back=args.get("hours_back"),
|
|
3079
|
+
client=self.siem_client,
|
|
3080
|
+
)
|
|
3081
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3082
|
+
return result
|
|
3083
|
+
elif tool_name == "get_recent_alerts" and self.siem_client:
|
|
3084
|
+
result = tools_siem.get_recent_alerts(
|
|
3085
|
+
hours_back=args.get("hours_back", 1),
|
|
3086
|
+
max_alerts=args.get("max_alerts", 100),
|
|
3087
|
+
status_filter=args.get("status_filter"),
|
|
3088
|
+
severity=args.get("severity"),
|
|
3089
|
+
hostname=args.get("hostname"),
|
|
3090
|
+
client=self.siem_client,
|
|
3091
|
+
)
|
|
3092
|
+
self._mcp_logger.debug(
|
|
3093
|
+
f"Tool {tool_name} completed: {result.get('group_count', 0)} groups from {result.get('total_alerts', 0)} alerts"
|
|
3094
|
+
)
|
|
3095
|
+
return result
|
|
3096
|
+
elif tool_name == "get_security_alerts" and self.siem_client:
|
|
3097
|
+
result = tools_siem.get_security_alerts(
|
|
3098
|
+
hours_back=args.get("hours_back", 24),
|
|
3099
|
+
max_alerts=args.get("max_alerts", 10),
|
|
3100
|
+
status_filter=args.get("status_filter"),
|
|
3101
|
+
severity=args.get("severity"),
|
|
3102
|
+
client=self.siem_client,
|
|
3103
|
+
)
|
|
3104
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3105
|
+
return result
|
|
3106
|
+
elif tool_name == "get_security_alert_by_id" and self.siem_client:
|
|
3107
|
+
result = tools_siem.get_security_alert_by_id(
|
|
3108
|
+
alert_id=args["alert_id"],
|
|
3109
|
+
include_detections=args.get("include_detections", True),
|
|
3110
|
+
client=self.siem_client,
|
|
3111
|
+
)
|
|
3112
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3113
|
+
return result
|
|
3114
|
+
elif tool_name == "get_siem_event_by_id" and self.siem_client:
|
|
3115
|
+
result = tools_siem.get_siem_event_by_id(
|
|
3116
|
+
event_id=args["event_id"],
|
|
3117
|
+
client=self.siem_client,
|
|
3118
|
+
)
|
|
3119
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3120
|
+
return result
|
|
3121
|
+
elif tool_name == "close_alert" and self.siem_client:
|
|
3122
|
+
result = tools_siem.close_alert(
|
|
3123
|
+
alert_id=args["alert_id"],
|
|
3124
|
+
reason=args.get("reason"),
|
|
3125
|
+
comment=args.get("comment"),
|
|
3126
|
+
client=self.siem_client,
|
|
3127
|
+
)
|
|
3128
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3129
|
+
return result
|
|
3130
|
+
elif tool_name == "update_alert_verdict" and self.siem_client:
|
|
3131
|
+
result = tools_siem.update_alert_verdict(
|
|
3132
|
+
alert_id=args["alert_id"],
|
|
3133
|
+
verdict=args["verdict"],
|
|
3134
|
+
comment=args.get("comment"),
|
|
3135
|
+
client=self.siem_client,
|
|
3136
|
+
)
|
|
3137
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3138
|
+
return result
|
|
3139
|
+
elif tool_name == "tag_alert" and self.siem_client:
|
|
3140
|
+
result = tools_siem.tag_alert(
|
|
3141
|
+
alert_id=args["alert_id"],
|
|
3142
|
+
tag=args["tag"],
|
|
3143
|
+
client=self.siem_client,
|
|
3144
|
+
)
|
|
3145
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3146
|
+
return result
|
|
3147
|
+
elif tool_name == "add_alert_note" and self.siem_client:
|
|
3148
|
+
result = tools_siem.add_alert_note(
|
|
3149
|
+
alert_id=args["alert_id"],
|
|
3150
|
+
note=args["note"],
|
|
3151
|
+
client=self.siem_client,
|
|
3152
|
+
)
|
|
3153
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3154
|
+
return result
|
|
3155
|
+
elif tool_name == "lookup_entity" and self.siem_client:
|
|
3156
|
+
result = tools_siem.lookup_entity(
|
|
3157
|
+
entity_value=args["entity_value"],
|
|
3158
|
+
entity_type=args.get("entity_type"),
|
|
3159
|
+
hours_back=args.get("hours_back", 24),
|
|
3160
|
+
client=self.siem_client,
|
|
3161
|
+
)
|
|
3162
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3163
|
+
return result
|
|
3164
|
+
elif tool_name == "get_ioc_matches" and self.siem_client:
|
|
3165
|
+
result = tools_siem.get_ioc_matches(
|
|
3166
|
+
hours_back=args.get("hours_back", 24),
|
|
3167
|
+
max_matches=args.get("max_matches", 20),
|
|
3168
|
+
ioc_type=args.get("ioc_type"),
|
|
3169
|
+
severity=args.get("severity"),
|
|
3170
|
+
client=self.siem_client,
|
|
3171
|
+
)
|
|
3172
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3173
|
+
return result
|
|
3174
|
+
elif tool_name == "get_threat_intel" and self.siem_client:
|
|
3175
|
+
result = tools_siem.get_threat_intel(
|
|
3176
|
+
query=args["query"],
|
|
3177
|
+
context=args.get("context"),
|
|
3178
|
+
client=self.siem_client,
|
|
3179
|
+
)
|
|
3180
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3181
|
+
return result
|
|
3182
|
+
elif tool_name == "list_security_rules" and self.siem_client:
|
|
3183
|
+
result = tools_siem.list_security_rules(
|
|
3184
|
+
enabled_only=args.get("enabled_only", False),
|
|
3185
|
+
limit=args.get("limit", 100),
|
|
3186
|
+
client=self.siem_client,
|
|
3187
|
+
)
|
|
3188
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3189
|
+
return result
|
|
3190
|
+
elif tool_name == "search_security_rules" and self.siem_client:
|
|
3191
|
+
result = tools_siem.search_security_rules(
|
|
3192
|
+
query=args["query"],
|
|
3193
|
+
category=args.get("category"),
|
|
3194
|
+
enabled_only=args.get("enabled_only", False),
|
|
3195
|
+
client=self.siem_client,
|
|
3196
|
+
)
|
|
3197
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3198
|
+
return result
|
|
3199
|
+
elif tool_name == "get_rule_detections" and self.siem_client:
|
|
3200
|
+
result = tools_siem.get_rule_detections(
|
|
3201
|
+
rule_id=args["rule_id"],
|
|
3202
|
+
alert_state=args.get("alert_state"),
|
|
3203
|
+
hours_back=args.get("hours_back", 24),
|
|
3204
|
+
limit=args.get("limit", 50),
|
|
3205
|
+
client=self.siem_client,
|
|
3206
|
+
)
|
|
3207
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3208
|
+
return result
|
|
3209
|
+
elif tool_name == "list_rule_errors" and self.siem_client:
|
|
3210
|
+
result = tools_siem.list_rule_errors(
|
|
3211
|
+
rule_id=args["rule_id"],
|
|
3212
|
+
hours_back=args.get("hours_back", 24),
|
|
3213
|
+
client=self.siem_client,
|
|
3214
|
+
)
|
|
3215
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3216
|
+
return result
|
|
3217
|
+
elif tool_name == "get_network_events" and self.siem_client:
|
|
3218
|
+
result = tools_siem.get_network_events(
|
|
3219
|
+
source_ip=args.get("source_ip"),
|
|
3220
|
+
destination_ip=args.get("destination_ip"),
|
|
3221
|
+
port=args.get("port"),
|
|
3222
|
+
protocol=args.get("protocol"),
|
|
3223
|
+
hours_back=args.get("hours_back", 24),
|
|
3224
|
+
limit=args.get("limit", 100),
|
|
3225
|
+
event_type=args.get("event_type"),
|
|
3226
|
+
client=self.siem_client,
|
|
3227
|
+
)
|
|
3228
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3229
|
+
return result
|
|
3230
|
+
elif tool_name == "get_dns_events" and self.siem_client:
|
|
3231
|
+
result = tools_siem.get_dns_events(
|
|
3232
|
+
domain=args.get("domain"),
|
|
3233
|
+
ip_address=args.get("ip_address"),
|
|
3234
|
+
resolved_ip=args.get("resolved_ip"),
|
|
3235
|
+
query_type=args.get("query_type"),
|
|
3236
|
+
hours_back=args.get("hours_back", 24),
|
|
3237
|
+
limit=args.get("limit", 100),
|
|
3238
|
+
client=self.siem_client,
|
|
3239
|
+
)
|
|
3240
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3241
|
+
return result
|
|
3242
|
+
elif tool_name == "get_alerts_by_entity" and self.siem_client:
|
|
3243
|
+
result = tools_siem.get_alerts_by_entity(
|
|
3244
|
+
entity_value=args["entity_value"],
|
|
3245
|
+
entity_type=args.get("entity_type"),
|
|
3246
|
+
hours_back=args.get("hours_back", 24),
|
|
3247
|
+
limit=args.get("limit", 50),
|
|
3248
|
+
severity=args.get("severity"),
|
|
3249
|
+
client=self.siem_client,
|
|
3250
|
+
)
|
|
3251
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3252
|
+
return result
|
|
3253
|
+
elif tool_name == "get_alerts_by_time_window" and self.siem_client:
|
|
3254
|
+
result = tools_siem.get_alerts_by_time_window(
|
|
3255
|
+
start_time=args["start_time"],
|
|
3256
|
+
end_time=args["end_time"],
|
|
3257
|
+
limit=args.get("limit", 100),
|
|
3258
|
+
severity=args.get("severity"),
|
|
3259
|
+
alert_type=args.get("alert_type"),
|
|
3260
|
+
client=self.siem_client,
|
|
3261
|
+
)
|
|
3262
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3263
|
+
return result
|
|
3264
|
+
elif tool_name == "get_all_uncertain_alerts_for_host" and self.siem_client:
|
|
3265
|
+
result = tools_siem.get_all_uncertain_alerts_for_host(
|
|
3266
|
+
hostname=args["hostname"],
|
|
3267
|
+
hours_back=args.get("hours_back", 168),
|
|
3268
|
+
limit=args.get("limit", 100),
|
|
3269
|
+
client=self.siem_client,
|
|
3270
|
+
)
|
|
3271
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3272
|
+
return result
|
|
3273
|
+
elif tool_name == "get_email_events" and self.siem_client:
|
|
3274
|
+
result = tools_siem.get_email_events(
|
|
3275
|
+
sender_email=args.get("sender_email"),
|
|
3276
|
+
recipient_email=args.get("recipient_email"),
|
|
3277
|
+
subject=args.get("subject"),
|
|
3278
|
+
email_id=args.get("email_id"),
|
|
3279
|
+
hours_back=args.get("hours_back", 24),
|
|
3280
|
+
limit=args.get("limit", 100),
|
|
3281
|
+
event_type=args.get("event_type"),
|
|
3282
|
+
client=self.siem_client,
|
|
3283
|
+
)
|
|
3284
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3285
|
+
return result
|
|
3286
|
+
|
|
3287
|
+
# EDR tools
|
|
3288
|
+
elif tool_name == "get_endpoint_summary" and self.edr_client:
|
|
3289
|
+
result = tools_edr.get_endpoint_summary(
|
|
3290
|
+
endpoint_id=args["endpoint_id"],
|
|
3291
|
+
client=self.edr_client,
|
|
3292
|
+
)
|
|
3293
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3294
|
+
return result
|
|
3295
|
+
elif tool_name == "get_detection_details" and self.edr_client:
|
|
3296
|
+
result = tools_edr.get_detection_details(
|
|
3297
|
+
detection_id=args["detection_id"],
|
|
3298
|
+
client=self.edr_client,
|
|
3299
|
+
)
|
|
3300
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3301
|
+
return result
|
|
3302
|
+
elif tool_name == "isolate_endpoint" and self.edr_client:
|
|
3303
|
+
result = tools_edr.isolate_endpoint(
|
|
3304
|
+
endpoint_id=args["endpoint_id"],
|
|
3305
|
+
client=self.edr_client,
|
|
3306
|
+
)
|
|
3307
|
+
self._mcp_logger.warning(
|
|
3308
|
+
f"Tool {tool_name} executed: endpoint {args['endpoint_id']} isolated"
|
|
3309
|
+
)
|
|
3310
|
+
return result
|
|
3311
|
+
elif tool_name == "release_endpoint_isolation" and self.edr_client:
|
|
3312
|
+
result = tools_edr.release_endpoint_isolation(
|
|
3313
|
+
endpoint_id=args["endpoint_id"],
|
|
3314
|
+
client=self.edr_client,
|
|
3315
|
+
)
|
|
3316
|
+
self._mcp_logger.info(
|
|
3317
|
+
f"Tool {tool_name} executed: endpoint {args['endpoint_id']} isolation released"
|
|
3318
|
+
)
|
|
3319
|
+
return result
|
|
3320
|
+
elif tool_name == "kill_process_on_endpoint" and self.edr_client:
|
|
3321
|
+
result = tools_edr.kill_process_on_endpoint(
|
|
3322
|
+
endpoint_id=args["endpoint_id"],
|
|
3323
|
+
pid=args["pid"],
|
|
3324
|
+
client=self.edr_client,
|
|
3325
|
+
)
|
|
3326
|
+
self._mcp_logger.warning(
|
|
3327
|
+
f"Tool {tool_name} executed: process {args['pid']} killed on endpoint {args['endpoint_id']}"
|
|
3328
|
+
)
|
|
3329
|
+
return result
|
|
3330
|
+
elif tool_name == "collect_forensic_artifacts" and self.edr_client:
|
|
3331
|
+
result = tools_edr.collect_forensic_artifacts(
|
|
3332
|
+
endpoint_id=args["endpoint_id"],
|
|
3333
|
+
artifact_types=args["artifact_types"],
|
|
3334
|
+
client=self.edr_client,
|
|
3335
|
+
)
|
|
3336
|
+
self._mcp_logger.info(
|
|
3337
|
+
f"Tool {tool_name} executed: collecting {args['artifact_types']} from endpoint {args['endpoint_id']}"
|
|
3338
|
+
)
|
|
3339
|
+
return result
|
|
3340
|
+
|
|
3341
|
+
# CTI tools
|
|
3342
|
+
elif tool_name == "lookup_hash_ti" and (self.cti_client or self.cti_clients):
|
|
3343
|
+
# Use multiple clients if available, otherwise fall back to single client
|
|
3344
|
+
result = tools_cti.lookup_hash_ti(
|
|
3345
|
+
hash_value=args["hash_value"],
|
|
3346
|
+
client=self.cti_client, # For backward compatibility
|
|
3347
|
+
clients=self.cti_clients if self.cti_clients else None, # Pass list of clients
|
|
3348
|
+
)
|
|
3349
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed successfully")
|
|
3350
|
+
return result
|
|
3351
|
+
|
|
3352
|
+
# Rules engine tools
|
|
3353
|
+
elif tool_name == "list_rules":
|
|
3354
|
+
result = {"rules": self.rules_engine.list_rules()}
|
|
3355
|
+
self._mcp_logger.debug(
|
|
3356
|
+
f"Tool {tool_name} completed: {len(result['rules'])} rules found"
|
|
3357
|
+
)
|
|
3358
|
+
return result
|
|
3359
|
+
elif tool_name == "execute_rule":
|
|
3360
|
+
result = self.rules_engine.execute_rule(
|
|
3361
|
+
rule_name=args["rule_name"],
|
|
3362
|
+
context=args.get("context"),
|
|
3363
|
+
)
|
|
3364
|
+
self._mcp_logger.info(
|
|
3365
|
+
f"Tool {tool_name} executed: rule '{args['rule_name']}' completed"
|
|
3366
|
+
)
|
|
3367
|
+
return result
|
|
3368
|
+
|
|
3369
|
+
# Knowledge base tools (client infrastructure)
|
|
3370
|
+
elif tool_name == "kb_list_clients" and self.kb_client:
|
|
3371
|
+
result = tools_kb.list_kb_clients(client=self.kb_client)
|
|
3372
|
+
self._mcp_logger.debug(
|
|
3373
|
+
f"Tool {tool_name} completed: {result.get('count', 0)} clients found"
|
|
3374
|
+
)
|
|
3375
|
+
return result
|
|
3376
|
+
elif tool_name == "kb_get_client_infra" and self.kb_client:
|
|
3377
|
+
result = tools_kb.get_client_infra(
|
|
3378
|
+
client_name=args["client_name"],
|
|
3379
|
+
client=self.kb_client,
|
|
3380
|
+
)
|
|
3381
|
+
self._mcp_logger.debug(
|
|
3382
|
+
f"Tool {tool_name} completed for client {result.get('client_name')}"
|
|
3383
|
+
)
|
|
3384
|
+
return result
|
|
3385
|
+
|
|
3386
|
+
# Engineering tools (Trello/ClickUp)
|
|
3387
|
+
elif tool_name == "create_fine_tuning_recommendation" and self.eng_client:
|
|
3388
|
+
result = tools_eng.create_fine_tuning_recommendation(
|
|
3389
|
+
title=args["title"],
|
|
3390
|
+
description=args["description"],
|
|
3391
|
+
list_name=args.get("list_name"),
|
|
3392
|
+
labels=args.get("labels"),
|
|
3393
|
+
status=args.get("status"),
|
|
3394
|
+
tags=args.get("tags"),
|
|
3395
|
+
client=self.eng_client,
|
|
3396
|
+
)
|
|
3397
|
+
provider = result.get("provider", "unknown")
|
|
3398
|
+
self._mcp_logger.info(
|
|
3399
|
+
f"Tool {tool_name} executed: fine-tuning recommendation '{args['title']}' created ({provider})"
|
|
3400
|
+
)
|
|
3401
|
+
return result
|
|
3402
|
+
elif tool_name == "create_visibility_recommendation" and self.eng_client:
|
|
3403
|
+
result = tools_eng.create_visibility_recommendation(
|
|
3404
|
+
title=args["title"],
|
|
3405
|
+
description=args["description"],
|
|
3406
|
+
list_name=args.get("list_name"),
|
|
3407
|
+
labels=args.get("labels"),
|
|
3408
|
+
status=args.get("status"),
|
|
3409
|
+
tags=args.get("tags"),
|
|
3410
|
+
client=self.eng_client,
|
|
3411
|
+
)
|
|
3412
|
+
provider = result.get("provider", "unknown")
|
|
3413
|
+
self._mcp_logger.info(
|
|
3414
|
+
f"Tool {tool_name} executed: visibility recommendation '{args['title']}' created ({provider})"
|
|
3415
|
+
)
|
|
3416
|
+
return result
|
|
3417
|
+
elif tool_name == "list_fine_tuning_recommendations" and self.eng_client:
|
|
3418
|
+
result = tools_eng.list_fine_tuning_recommendations(
|
|
3419
|
+
archived=args.get("archived", False),
|
|
3420
|
+
include_closed=args.get("include_closed", True),
|
|
3421
|
+
order_by=args.get("order_by"),
|
|
3422
|
+
reverse=args.get("reverse", False),
|
|
3423
|
+
subtasks=args.get("subtasks", False),
|
|
3424
|
+
statuses=args.get("statuses"),
|
|
3425
|
+
include_markdown_description=args.get("include_markdown_description", False),
|
|
3426
|
+
client=self.eng_client,
|
|
3427
|
+
)
|
|
3428
|
+
count = result.get("count", 0)
|
|
3429
|
+
self._mcp_logger.info(
|
|
3430
|
+
f"Tool {tool_name} executed: found {count} fine-tuning recommendations"
|
|
3431
|
+
)
|
|
3432
|
+
return result
|
|
3433
|
+
elif tool_name == "list_visibility_recommendations" and self.eng_client:
|
|
3434
|
+
result = tools_eng.list_visibility_recommendations(
|
|
3435
|
+
archived=args.get("archived", False),
|
|
3436
|
+
include_closed=args.get("include_closed", True),
|
|
3437
|
+
order_by=args.get("order_by"),
|
|
3438
|
+
reverse=args.get("reverse", False),
|
|
3439
|
+
subtasks=args.get("subtasks", False),
|
|
3440
|
+
statuses=args.get("statuses"),
|
|
3441
|
+
include_markdown_description=args.get("include_markdown_description", False),
|
|
3442
|
+
client=self.eng_client,
|
|
3443
|
+
)
|
|
3444
|
+
count = result.get("count", 0)
|
|
3445
|
+
self._mcp_logger.info(
|
|
3446
|
+
f"Tool {tool_name} executed: found {count} visibility recommendations"
|
|
3447
|
+
)
|
|
3448
|
+
return result
|
|
3449
|
+
elif tool_name == "add_comment_to_fine_tuning_recommendation" and self.eng_client:
|
|
3450
|
+
result = tools_eng.add_comment_to_fine_tuning_recommendation(
|
|
3451
|
+
task_id=args["task_id"],
|
|
3452
|
+
comment_text=args["comment_text"],
|
|
3453
|
+
client=self.eng_client,
|
|
3454
|
+
)
|
|
3455
|
+
self._mcp_logger.info(
|
|
3456
|
+
f"Tool {tool_name} executed: comment added to fine-tuning recommendation task {args['task_id']}"
|
|
3457
|
+
)
|
|
3458
|
+
return result
|
|
3459
|
+
elif tool_name == "add_comment_to_visibility_recommendation" and self.eng_client:
|
|
3460
|
+
result = tools_eng.add_comment_to_visibility_recommendation(
|
|
3461
|
+
task_id=args["task_id"],
|
|
3462
|
+
comment_text=args["comment_text"],
|
|
3463
|
+
client=self.eng_client,
|
|
3464
|
+
)
|
|
3465
|
+
self._mcp_logger.info(
|
|
3466
|
+
f"Tool {tool_name} executed: comment added to visibility recommendation task {args['task_id']}"
|
|
3467
|
+
)
|
|
3468
|
+
return result
|
|
3469
|
+
|
|
3470
|
+
# Runbook tools
|
|
3471
|
+
elif tool_name == "list_runbooks":
|
|
3472
|
+
result = self._handle_list_runbooks(
|
|
3473
|
+
soc_tier=args.get("soc_tier"),
|
|
3474
|
+
category=args.get("category")
|
|
3475
|
+
)
|
|
3476
|
+
self._mcp_logger.debug(
|
|
3477
|
+
f"Tool {tool_name} completed: {result.get('count', 0)} runbooks found"
|
|
3478
|
+
)
|
|
3479
|
+
return result
|
|
3480
|
+
elif tool_name == "get_runbook":
|
|
3481
|
+
result = self._handle_get_runbook(runbook_name=args["runbook_name"])
|
|
3482
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {args['runbook_name']}")
|
|
3483
|
+
return result
|
|
3484
|
+
elif tool_name == "execute_runbook":
|
|
3485
|
+
result = self._handle_execute_runbook(
|
|
3486
|
+
runbook_name=args["runbook_name"],
|
|
3487
|
+
case_id=args.get("case_id"),
|
|
3488
|
+
alert_id=args.get("alert_id"),
|
|
3489
|
+
soc_tier=args.get("soc_tier")
|
|
3490
|
+
)
|
|
3491
|
+
self._mcp_logger.info(
|
|
3492
|
+
f"Tool {tool_name} executed: runbook '{args['runbook_name']}' provided for execution"
|
|
3493
|
+
)
|
|
3494
|
+
return result
|
|
3495
|
+
|
|
3496
|
+
# Agent profile tools
|
|
3497
|
+
elif tool_name == "list_agent_profiles":
|
|
3498
|
+
result = {"profiles": self.agent_profile_manager.list_profiles(), "count": len(self.agent_profile_manager.profiles)}
|
|
3499
|
+
self._mcp_logger.debug(
|
|
3500
|
+
f"Tool {tool_name} completed: {result['count']} agent profiles found"
|
|
3501
|
+
)
|
|
3502
|
+
return result
|
|
3503
|
+
elif tool_name == "get_agent_profile":
|
|
3504
|
+
result = self._handle_get_agent_profile(agent_id=args["agent_id"])
|
|
3505
|
+
self._mcp_logger.debug(f"Tool {tool_name} completed: {args['agent_id']}")
|
|
3506
|
+
return result
|
|
3507
|
+
elif tool_name == "route_case_to_agent":
|
|
3508
|
+
result = self._handle_route_case_to_agent(
|
|
3509
|
+
case_id=args.get("case_id"),
|
|
3510
|
+
alert_id=args.get("alert_id"),
|
|
3511
|
+
alert_type=args.get("alert_type"),
|
|
3512
|
+
case_status=args.get("case_status")
|
|
3513
|
+
)
|
|
3514
|
+
self._mcp_logger.info(f"Tool {tool_name} executed: routed to {result.get('agent_id')}")
|
|
3515
|
+
return result
|
|
3516
|
+
elif tool_name == "execute_as_agent":
|
|
3517
|
+
result = self._handle_execute_as_agent(
|
|
3518
|
+
agent_id=args["agent_id"],
|
|
3519
|
+
case_id=args.get("case_id"),
|
|
3520
|
+
alert_id=args.get("alert_id"),
|
|
3521
|
+
runbook_name=args.get("runbook_name")
|
|
3522
|
+
)
|
|
3523
|
+
self._mcp_logger.info(
|
|
3524
|
+
f"Tool {tool_name} executed: agent '{args['agent_id']}' executing runbook"
|
|
3525
|
+
)
|
|
3526
|
+
return result
|
|
3527
|
+
else:
|
|
3528
|
+
self._mcp_logger.error(
|
|
3529
|
+
f"Tool not available or client not configured: {tool_name}"
|
|
3530
|
+
)
|
|
3531
|
+
raise ValueError(f"Tool not available: {tool_name}")
|
|
3532
|
+
|
|
3533
|
+
|
|
3534
|
+
async def _read_stdio():
|
|
3535
|
+
"""
|
|
3536
|
+
Read lines from stdin asynchronously.
|
|
3537
|
+
|
|
3538
|
+
Uses a thread-based approach that works with both pipes and TTY stdin,
|
|
3539
|
+
which is required for MCP server compatibility.
|
|
3540
|
+
"""
|
|
3541
|
+
import queue
|
|
3542
|
+
import threading
|
|
3543
|
+
|
|
3544
|
+
mcp_logger = logging.getLogger("sami.mcp")
|
|
3545
|
+
mcp_logger.debug("Starting stdin reader")
|
|
3546
|
+
|
|
3547
|
+
line_queue: queue.Queue = queue.Queue()
|
|
3548
|
+
|
|
3549
|
+
def read_stdin():
|
|
3550
|
+
"""Read from stdin synchronously in a background thread."""
|
|
3551
|
+
try:
|
|
3552
|
+
while True:
|
|
3553
|
+
line = sys.stdin.readline()
|
|
3554
|
+
if not line:
|
|
3555
|
+
line_queue.put(None) # Signal EOF
|
|
3556
|
+
break
|
|
3557
|
+
line_queue.put(line.rstrip('\n\r'))
|
|
3558
|
+
except Exception as e:
|
|
3559
|
+
line_queue.put(e)
|
|
3560
|
+
except (EOFError, BrokenPipeError):
|
|
3561
|
+
line_queue.put(None) # Signal EOF
|
|
3562
|
+
|
|
3563
|
+
thread = threading.Thread(target=read_stdin, daemon=True)
|
|
3564
|
+
thread.start()
|
|
3565
|
+
|
|
3566
|
+
while True:
|
|
3567
|
+
try:
|
|
3568
|
+
# Poll the queue for items
|
|
3569
|
+
try:
|
|
3570
|
+
item = line_queue.get_nowait()
|
|
3571
|
+
except queue.Empty:
|
|
3572
|
+
# Check if thread is still alive
|
|
3573
|
+
if not thread.is_alive():
|
|
3574
|
+
mcp_logger.debug("stdin reader thread died")
|
|
3575
|
+
break
|
|
3576
|
+
# Wait a bit before checking again
|
|
3577
|
+
await asyncio.sleep(0.01)
|
|
3578
|
+
continue
|
|
3579
|
+
|
|
3580
|
+
if item is None: # EOF
|
|
3581
|
+
mcp_logger.debug("stdin closed (EOF)")
|
|
3582
|
+
break
|
|
3583
|
+
if isinstance(item, Exception):
|
|
3584
|
+
raise item
|
|
3585
|
+
if item: # Skip empty lines
|
|
3586
|
+
mcp_logger.debug(f"Received line from stdin ({len(item)} chars): {item[:200]}...")
|
|
3587
|
+
# Try to parse and log structure if it's JSON
|
|
3588
|
+
try:
|
|
3589
|
+
parsed = json.loads(item)
|
|
3590
|
+
mcp_logger.debug(f"Parsed JSON structure - method: {parsed.get('method')}, has_id: {'id' in parsed}, id_value: {parsed.get('id')}")
|
|
3591
|
+
except:
|
|
3592
|
+
pass # Not JSON, that's okay
|
|
3593
|
+
yield item
|
|
3594
|
+
except asyncio.CancelledError:
|
|
3595
|
+
mcp_logger.debug("stdin reader cancelled")
|
|
3596
|
+
break
|
|
3597
|
+
except Exception as e:
|
|
3598
|
+
mcp_logger.error(f"Error reading from stdin: {e}", exc_info=True)
|
|
3599
|
+
logger.error(f"Error reading from stdin: {e}", exc_info=True)
|
|
3600
|
+
break
|
|
3601
|
+
|
|
3602
|
+
|
|
3603
|
+
async def main() -> None:
|
|
3604
|
+
"""Main entry point for the MCP server."""
|
|
3605
|
+
# Load configuration from JSON file only (ignore .env)
|
|
3606
|
+
import json
|
|
3607
|
+
from pathlib import Path
|
|
3608
|
+
from ..core.config_storage import _dict_to_config
|
|
3609
|
+
|
|
3610
|
+
# Find config.json relative to project root (where this file is located)
|
|
3611
|
+
# This ensures it works regardless of the current working directory
|
|
3612
|
+
# Path: src/mcp/mcp_server.py -> src/mcp/ -> src/ -> project root
|
|
3613
|
+
project_root = Path(__file__).parent.parent.parent
|
|
3614
|
+
config_file = project_root / "config.json"
|
|
3615
|
+
config = None
|
|
3616
|
+
|
|
3617
|
+
try:
|
|
3618
|
+
if config_file.exists():
|
|
3619
|
+
with open(config_file, "r") as f:
|
|
3620
|
+
data = json.load(f)
|
|
3621
|
+
config = _dict_to_config(data)
|
|
3622
|
+
logger.info(f"Configuration loaded successfully from {config_file}")
|
|
3623
|
+
else:
|
|
3624
|
+
logger.warning(f"config.json not found at {config_file}, using defaults")
|
|
3625
|
+
from ..core.config import LoggingConfig
|
|
3626
|
+
config = SamiConfig(
|
|
3627
|
+
thehive=None,
|
|
3628
|
+
iris=None,
|
|
3629
|
+
elastic=None,
|
|
3630
|
+
edr=None,
|
|
3631
|
+
logging=LoggingConfig(),
|
|
3632
|
+
)
|
|
3633
|
+
except json.JSONDecodeError as e:
|
|
3634
|
+
logger.error(f"Invalid JSON in {config_file}: {e}", exc_info=True)
|
|
3635
|
+
from ..core.config import LoggingConfig
|
|
3636
|
+
config = SamiConfig(
|
|
3637
|
+
thehive=None,
|
|
3638
|
+
iris=None,
|
|
3639
|
+
elastic=None,
|
|
3640
|
+
edr=None,
|
|
3641
|
+
logging=LoggingConfig(),
|
|
3642
|
+
)
|
|
3643
|
+
except Exception as e:
|
|
3644
|
+
logger.error(f"Failed to load {config_file}, using defaults: {e}", exc_info=True)
|
|
3645
|
+
from ..core.config import LoggingConfig
|
|
3646
|
+
config = SamiConfig(
|
|
3647
|
+
thehive=None,
|
|
3648
|
+
iris=None,
|
|
3649
|
+
elastic=None,
|
|
3650
|
+
edr=None,
|
|
3651
|
+
logging=LoggingConfig(),
|
|
3652
|
+
)
|
|
3653
|
+
|
|
3654
|
+
configure_logging(config.logging)
|
|
3655
|
+
|
|
3656
|
+
# Configure dedicated MCP logging
|
|
3657
|
+
mcp_log_dir = config.logging.log_dir if config.logging else "logs"
|
|
3658
|
+
configure_mcp_logging(mcp_log_dir)
|
|
3659
|
+
|
|
3660
|
+
logger.info("Starting SamiGPT MCP Server...")
|
|
3661
|
+
mcp_logger = logging.getLogger("sami.mcp")
|
|
3662
|
+
mcp_logger.info("=" * 80)
|
|
3663
|
+
mcp_logger.info("MCP Server Starting")
|
|
3664
|
+
mcp_logger.info("=" * 80)
|
|
3665
|
+
|
|
3666
|
+
# Initialize clients
|
|
3667
|
+
case_client = None
|
|
3668
|
+
|
|
3669
|
+
# Log configuration status
|
|
3670
|
+
mcp_logger.info("Configuration Status:")
|
|
3671
|
+
mcp_logger.info(f" IRIS configured: {config.iris is not None}")
|
|
3672
|
+
if config.iris:
|
|
3673
|
+
mcp_logger.info(f" IRIS URL: {config.iris.base_url}")
|
|
3674
|
+
mcp_logger.info(f" IRIS API key: {'*' * 20}...{config.iris.api_key[-10:] if len(config.iris.api_key) > 10 else '***'}")
|
|
3675
|
+
mcp_logger.info(f" TheHive configured: {config.thehive is not None}")
|
|
3676
|
+
mcp_logger.info(f" Elastic configured: {config.elastic is not None}")
|
|
3677
|
+
mcp_logger.info(f" EDR configured: {config.edr is not None}")
|
|
3678
|
+
mcp_logger.info(f" CTI configured: {config.cti is not None}")
|
|
3679
|
+
|
|
3680
|
+
# Prioritize IRIS if both are configured
|
|
3681
|
+
if config.iris:
|
|
3682
|
+
try:
|
|
3683
|
+
mcp_logger.info("Attempting to initialize IRIS case management client...")
|
|
3684
|
+
case_client = IRISCaseManagementClient.from_config(config)
|
|
3685
|
+
logger.info("IRIS case management client initialized")
|
|
3686
|
+
mcp_logger.info("✓ IRIS case management client initialized successfully")
|
|
3687
|
+
except Exception as e:
|
|
3688
|
+
logger.error(f"Failed to initialize IRIS client: {e}")
|
|
3689
|
+
mcp_logger.error(f"✗ Failed to initialize IRIS client: {e}", exc_info=True)
|
|
3690
|
+
elif config.thehive:
|
|
3691
|
+
try:
|
|
3692
|
+
mcp_logger.info("Attempting to initialize TheHive case management client...")
|
|
3693
|
+
case_client = TheHiveCaseManagementClient.from_config(config)
|
|
3694
|
+
logger.info("TheHive case management client initialized")
|
|
3695
|
+
mcp_logger.info("✓ TheHive case management client initialized successfully")
|
|
3696
|
+
except Exception as e:
|
|
3697
|
+
logger.error(f"Failed to initialize TheHive client: {e}")
|
|
3698
|
+
mcp_logger.error(f"✗ Failed to initialize TheHive client: {e}", exc_info=True)
|
|
3699
|
+
else:
|
|
3700
|
+
mcp_logger.warning("No case management system configured (neither IRIS nor TheHive)")
|
|
3701
|
+
|
|
3702
|
+
# Initialize SIEM client
|
|
3703
|
+
siem_client = None
|
|
3704
|
+
if config.elastic:
|
|
3705
|
+
try:
|
|
3706
|
+
mcp_logger.info("Attempting to initialize Elastic SIEM client...")
|
|
3707
|
+
siem_client = ElasticSIEMClient.from_config(config)
|
|
3708
|
+
logger.info("Elastic SIEM client initialized")
|
|
3709
|
+
mcp_logger.info("✓ Elastic SIEM client initialized successfully")
|
|
3710
|
+
if config.elastic:
|
|
3711
|
+
mcp_logger.info(f" Elastic URL: {config.elastic.base_url}")
|
|
3712
|
+
mcp_logger.info(f" Elastic API key: {'*' * 20}...{config.elastic.api_key[-10:] if config.elastic.api_key and len(config.elastic.api_key) > 10 else '***'}")
|
|
3713
|
+
except Exception as e:
|
|
3714
|
+
logger.error(f"Failed to initialize Elastic SIEM client: {e}")
|
|
3715
|
+
mcp_logger.error(f"✗ Failed to initialize Elastic SIEM client: {e}", exc_info=True)
|
|
3716
|
+
|
|
3717
|
+
# Initialize EDR client
|
|
3718
|
+
edr_client = None
|
|
3719
|
+
if config.edr:
|
|
3720
|
+
if config.edr.edr_type == "elastic_defend":
|
|
3721
|
+
try:
|
|
3722
|
+
mcp_logger.info("Attempting to initialize Elastic Defend EDR client...")
|
|
3723
|
+
edr_client = ElasticDefendEDRClient.from_config(config)
|
|
3724
|
+
logger.info("Elastic Defend EDR client initialized")
|
|
3725
|
+
mcp_logger.info("✓ Elastic Defend EDR client initialized successfully")
|
|
3726
|
+
if config.edr:
|
|
3727
|
+
mcp_logger.info(f" EDR URL: {config.edr.base_url}")
|
|
3728
|
+
mcp_logger.info(f" EDR Type: {config.edr.edr_type}")
|
|
3729
|
+
mcp_logger.info(f" EDR API key: {'*' * 20}...{config.edr.api_key[-10:] if config.edr.api_key and len(config.edr.api_key) > 10 else '***'}")
|
|
3730
|
+
except Exception as e:
|
|
3731
|
+
logger.error(f"Failed to initialize Elastic Defend EDR client: {e}")
|
|
3732
|
+
mcp_logger.error(f"✗ Failed to initialize Elastic Defend EDR client: {e}", exc_info=True)
|
|
3733
|
+
else:
|
|
3734
|
+
logger.info(
|
|
3735
|
+
f"EDR configuration found ({config.edr.edr_type}), but integration not yet implemented"
|
|
3736
|
+
)
|
|
3737
|
+
mcp_logger.warning(
|
|
3738
|
+
f"EDR type '{config.edr.edr_type}' is not yet implemented. Only 'elastic_defend' is supported."
|
|
3739
|
+
)
|
|
3740
|
+
|
|
3741
|
+
# Initialize CTI client(s) - support both single and multiple platforms
|
|
3742
|
+
cti_clients = []
|
|
3743
|
+
cti_client = None # For backward compatibility
|
|
3744
|
+
|
|
3745
|
+
# Check for main CTI config
|
|
3746
|
+
if config.cti:
|
|
3747
|
+
if config.cti.cti_type == "local_tip":
|
|
3748
|
+
try:
|
|
3749
|
+
mcp_logger.info("Attempting to initialize Local TIP CTI client...")
|
|
3750
|
+
local_tip_client = LocalTipCTIClient.from_config(config)
|
|
3751
|
+
cti_clients.append(local_tip_client)
|
|
3752
|
+
cti_client = local_tip_client # For backward compatibility
|
|
3753
|
+
logger.info("Local TIP CTI client initialized")
|
|
3754
|
+
mcp_logger.info("✓ Local TIP CTI client initialized successfully")
|
|
3755
|
+
mcp_logger.info(f" CTI URL: {config.cti.base_url}")
|
|
3756
|
+
mcp_logger.info(f" CTI Type: {config.cti.cti_type}")
|
|
3757
|
+
except Exception as e:
|
|
3758
|
+
logger.error(f"Failed to initialize Local TIP CTI client: {e}")
|
|
3759
|
+
mcp_logger.error(f"✗ Failed to initialize Local TIP CTI client: {e}", exc_info=True)
|
|
3760
|
+
elif config.cti.cti_type == "opencti":
|
|
3761
|
+
try:
|
|
3762
|
+
mcp_logger.info("Attempting to initialize OpenCTI client...")
|
|
3763
|
+
opencti_client = OpenCTIClient.from_config(config)
|
|
3764
|
+
cti_clients.append(opencti_client)
|
|
3765
|
+
cti_client = opencti_client # For backward compatibility
|
|
3766
|
+
logger.info("OpenCTI client initialized")
|
|
3767
|
+
mcp_logger.info("✓ OpenCTI client initialized successfully")
|
|
3768
|
+
mcp_logger.info(f" CTI URL: {config.cti.base_url}")
|
|
3769
|
+
mcp_logger.info(f" CTI Type: {config.cti.cti_type}")
|
|
3770
|
+
except Exception as e:
|
|
3771
|
+
logger.error(f"Failed to initialize OpenCTI client: {e}")
|
|
3772
|
+
mcp_logger.error(f"✗ Failed to initialize OpenCTI client: {e}", exc_info=True)
|
|
3773
|
+
else:
|
|
3774
|
+
logger.info(
|
|
3775
|
+
f"CTI configuration found ({config.cti.cti_type}), but integration not yet implemented"
|
|
3776
|
+
)
|
|
3777
|
+
mcp_logger.warning(
|
|
3778
|
+
f"CTI type '{config.cti.cti_type}' is not yet implemented. Supported types: 'local_tip', 'opencti'."
|
|
3779
|
+
)
|
|
3780
|
+
|
|
3781
|
+
# Check for additional CTI config (cti_opencti) to support both platforms
|
|
3782
|
+
# This allows config.json to have both "cti" (local_tip) and "cti_opencti" (opencti)
|
|
3783
|
+
config_dict = None
|
|
3784
|
+
try:
|
|
3785
|
+
from ..core.config_storage import load_config_from_file
|
|
3786
|
+
import json
|
|
3787
|
+
import os
|
|
3788
|
+
config_file = os.getenv("SAMIGPT_CONFIG_FILE", "config.json")
|
|
3789
|
+
if os.path.exists(config_file):
|
|
3790
|
+
with open(config_file, "r") as f:
|
|
3791
|
+
config_dict = json.load(f)
|
|
3792
|
+
except Exception:
|
|
3793
|
+
pass # If we can't load config dict, that's okay
|
|
3794
|
+
|
|
3795
|
+
if config_dict and "cti_opencti" in config_dict:
|
|
3796
|
+
cti_opencti_config = config_dict["cti_opencti"]
|
|
3797
|
+
if cti_opencti_config.get("cti_type") == "opencti":
|
|
3798
|
+
try:
|
|
3799
|
+
# Create a temporary config with OpenCTI settings
|
|
3800
|
+
from ..core.config import CTIConfig, SamiConfig
|
|
3801
|
+
opencti_config = CTIConfig(
|
|
3802
|
+
cti_type="opencti",
|
|
3803
|
+
base_url=cti_opencti_config.get("base_url"),
|
|
3804
|
+
api_key=cti_opencti_config.get("api_key"),
|
|
3805
|
+
timeout_seconds=cti_opencti_config.get("timeout_seconds", 30),
|
|
3806
|
+
verify_ssl=cti_opencti_config.get("verify_ssl", True),
|
|
3807
|
+
)
|
|
3808
|
+
temp_config = SamiConfig(cti=opencti_config)
|
|
3809
|
+
|
|
3810
|
+
mcp_logger.info("Attempting to initialize additional OpenCTI client...")
|
|
3811
|
+
opencti_client = OpenCTIClient.from_config(temp_config)
|
|
3812
|
+
# Only add if we don't already have an OpenCTI client
|
|
3813
|
+
if not any("OpenCTI" in c.__class__.__name__ for c in cti_clients):
|
|
3814
|
+
cti_clients.append(opencti_client)
|
|
3815
|
+
logger.info("Additional OpenCTI client initialized")
|
|
3816
|
+
mcp_logger.info("✓ Additional OpenCTI client initialized successfully")
|
|
3817
|
+
mcp_logger.info(f" CTI URL: {opencti_config.base_url}")
|
|
3818
|
+
except Exception as e:
|
|
3819
|
+
logger.error(f"Failed to initialize additional OpenCTI client: {e}")
|
|
3820
|
+
mcp_logger.error(f"✗ Failed to initialize additional OpenCTI client: {e}", exc_info=True)
|
|
3821
|
+
|
|
3822
|
+
# Also check for cti_local_tip if main cti is opencti
|
|
3823
|
+
if config_dict and "cti_local_tip" in config_dict:
|
|
3824
|
+
cti_local_tip_config = config_dict["cti_local_tip"]
|
|
3825
|
+
if cti_local_tip_config.get("cti_type") == "local_tip":
|
|
3826
|
+
try:
|
|
3827
|
+
from ..core.config import CTIConfig, SamiConfig
|
|
3828
|
+
local_tip_config = CTIConfig(
|
|
3829
|
+
cti_type="local_tip",
|
|
3830
|
+
base_url=cti_local_tip_config.get("base_url"),
|
|
3831
|
+
api_key=cti_local_tip_config.get("api_key"),
|
|
3832
|
+
timeout_seconds=cti_local_tip_config.get("timeout_seconds", 30),
|
|
3833
|
+
verify_ssl=cti_local_tip_config.get("verify_ssl", False),
|
|
3834
|
+
)
|
|
3835
|
+
temp_config = SamiConfig(cti=local_tip_config)
|
|
3836
|
+
|
|
3837
|
+
mcp_logger.info("Attempting to initialize additional Local TIP client...")
|
|
3838
|
+
local_tip_client = LocalTipCTIClient.from_config(temp_config)
|
|
3839
|
+
# Only add if we don't already have a Local TIP client
|
|
3840
|
+
if not any("LocalTip" in c.__class__.__name__ for c in cti_clients):
|
|
3841
|
+
cti_clients.append(local_tip_client)
|
|
3842
|
+
logger.info("Additional Local TIP client initialized")
|
|
3843
|
+
mcp_logger.info("✓ Additional Local TIP client initialized successfully")
|
|
3844
|
+
mcp_logger.info(f" CTI URL: {local_tip_config.base_url}")
|
|
3845
|
+
except Exception as e:
|
|
3846
|
+
logger.error(f"Failed to initialize additional Local TIP client: {e}")
|
|
3847
|
+
mcp_logger.error(f"✗ Failed to initialize additional Local TIP client: {e}", exc_info=True)
|
|
3848
|
+
|
|
3849
|
+
if len(cti_clients) > 1:
|
|
3850
|
+
mcp_logger.info(f"✓ Multiple CTI platforms configured: {len(cti_clients)} platforms will be queried concurrently")
|
|
3851
|
+
|
|
3852
|
+
# Initialize Engineering client (Trello, ClickUp, or GitHub)
|
|
3853
|
+
eng_client = None
|
|
3854
|
+
if config.eng:
|
|
3855
|
+
provider = config.eng.provider.lower() if config.eng.provider else "trello"
|
|
3856
|
+
|
|
3857
|
+
if provider == "github" and config.eng.github:
|
|
3858
|
+
try:
|
|
3859
|
+
eng_client = GitHubClient.from_config(config)
|
|
3860
|
+
mcp_logger.info("✓ GitHub (Engineering) client initialized")
|
|
3861
|
+
except Exception as e:
|
|
3862
|
+
mcp_logger.warning(f"Failed to initialize GitHub client: {e}")
|
|
3863
|
+
elif provider == "clickup" and config.eng.clickup:
|
|
3864
|
+
try:
|
|
3865
|
+
eng_client = ClickUpClient.from_config(config)
|
|
3866
|
+
mcp_logger.info("✓ ClickUp (Engineering) client initialized")
|
|
3867
|
+
except Exception as e:
|
|
3868
|
+
mcp_logger.warning(f"Failed to initialize ClickUp client: {e}")
|
|
3869
|
+
elif provider == "trello" and config.eng.trello:
|
|
3870
|
+
try:
|
|
3871
|
+
eng_client = TrelloClient.from_config(config)
|
|
3872
|
+
mcp_logger.info("✓ Trello (Engineering) client initialized")
|
|
3873
|
+
except Exception as e:
|
|
3874
|
+
mcp_logger.warning(f"Failed to initialize Trello client: {e}")
|
|
3875
|
+
else:
|
|
3876
|
+
# Try to auto-detect based on what's configured (priority: GitHub > ClickUp > Trello)
|
|
3877
|
+
if config.eng.github:
|
|
3878
|
+
try:
|
|
3879
|
+
eng_client = GitHubClient.from_config(config)
|
|
3880
|
+
mcp_logger.info("✓ GitHub (Engineering) client initialized (auto-detected)")
|
|
3881
|
+
except Exception as e:
|
|
3882
|
+
mcp_logger.warning(f"Failed to initialize GitHub client: {e}")
|
|
3883
|
+
elif config.eng.clickup:
|
|
3884
|
+
try:
|
|
3885
|
+
eng_client = ClickUpClient.from_config(config)
|
|
3886
|
+
mcp_logger.info("✓ ClickUp (Engineering) client initialized (auto-detected)")
|
|
3887
|
+
except Exception as e:
|
|
3888
|
+
mcp_logger.warning(f"Failed to initialize ClickUp client: {e}")
|
|
3889
|
+
elif config.eng.trello:
|
|
3890
|
+
try:
|
|
3891
|
+
eng_client = TrelloClient.from_config(config)
|
|
3892
|
+
mcp_logger.info("✓ Trello (Engineering) client initialized (auto-detected)")
|
|
3893
|
+
except Exception as e:
|
|
3894
|
+
mcp_logger.warning(f"Failed to initialize Trello client: {e}")
|
|
3895
|
+
|
|
3896
|
+
# Create MCP server
|
|
3897
|
+
server = SamiGPTMCPServer(
|
|
3898
|
+
case_client=case_client,
|
|
3899
|
+
siem_client=siem_client,
|
|
3900
|
+
edr_client=edr_client,
|
|
3901
|
+
cti_client=cti_client, # For backward compatibility
|
|
3902
|
+
cti_clients=cti_clients if len(cti_clients) > 0 else None, # Pass list of clients
|
|
3903
|
+
eng_client=eng_client,
|
|
3904
|
+
)
|
|
3905
|
+
|
|
3906
|
+
# Log tool registration summary
|
|
3907
|
+
total_tools = len(server.tools)
|
|
3908
|
+
logger.info(f"MCP server initialized with {total_tools} tools")
|
|
3909
|
+
mcp_logger.info("=" * 80)
|
|
3910
|
+
mcp_logger.info(f"Tool Registration Summary:")
|
|
3911
|
+
mcp_logger.info(f" Total tools available: {total_tools}")
|
|
3912
|
+
mcp_logger.info(f" Case Management: {'✓ Configured' if case_client else '✗ Not configured (8 tools unavailable)'}")
|
|
3913
|
+
mcp_logger.info(f" SIEM: {'✓ Configured' if siem_client else '✗ Not configured (16 tools unavailable)'}")
|
|
3914
|
+
mcp_logger.info(f" EDR: {'✓ Configured' if edr_client else '✗ Not configured (6 tools unavailable)'}")
|
|
3915
|
+
mcp_logger.info(f" CTI: {'✓ Configured' if cti_client else '✗ Not configured (1 tool unavailable)'}")
|
|
3916
|
+
eng_provider = "None"
|
|
3917
|
+
if eng_client:
|
|
3918
|
+
if isinstance(eng_client, GitHubClient):
|
|
3919
|
+
eng_provider = "GitHub"
|
|
3920
|
+
elif isinstance(eng_client, ClickUpClient):
|
|
3921
|
+
eng_provider = "ClickUp"
|
|
3922
|
+
elif isinstance(eng_client, TrelloClient):
|
|
3923
|
+
eng_provider = "Trello"
|
|
3924
|
+
mcp_logger.info(f" Engineering ({eng_provider}): {'✓ Configured' if eng_client else '✗ Not configured (2 tools unavailable)'}")
|
|
3925
|
+
mcp_logger.info(f" Rules Engine: ✓ Always available (2 tools)")
|
|
3926
|
+
mcp_logger.info("=" * 80)
|
|
3927
|
+
|
|
3928
|
+
if total_tools == 2:
|
|
3929
|
+
mcp_logger.warning(
|
|
3930
|
+
"⚠️ Only rules engine tools are available. "
|
|
3931
|
+
"Configure integrations in config.json to enable case management, SIEM, and EDR tools. "
|
|
3932
|
+
"Use the web configuration UI: python -m src.web.config_server"
|
|
3933
|
+
)
|
|
3934
|
+
|
|
3935
|
+
# Run MCP server (stdio mode)
|
|
3936
|
+
try:
|
|
3937
|
+
async for line in _read_stdio():
|
|
3938
|
+
try:
|
|
3939
|
+
if not line or not line.strip():
|
|
3940
|
+
continue
|
|
3941
|
+
|
|
3942
|
+
# Parse JSON request
|
|
3943
|
+
try:
|
|
3944
|
+
request = json.loads(line)
|
|
3945
|
+
except json.JSONDecodeError as e:
|
|
3946
|
+
mcp_logger.error(
|
|
3947
|
+
f"Invalid JSON received: {line[:200]}... Error: {e}"
|
|
3948
|
+
)
|
|
3949
|
+
logger.error(f"Invalid JSON: {line[:100]}... Error: {e}")
|
|
3950
|
+
# Send error response for parse errors
|
|
3951
|
+
error_response = {
|
|
3952
|
+
"jsonrpc": "2.0",
|
|
3953
|
+
"error": {
|
|
3954
|
+
"code": -32700,
|
|
3955
|
+
"message": "Parse error",
|
|
3956
|
+
},
|
|
3957
|
+
}
|
|
3958
|
+
sys.stdout.write(json.dumps(error_response, ensure_ascii=False) + "\n")
|
|
3959
|
+
sys.stdout.flush()
|
|
3960
|
+
continue
|
|
3961
|
+
|
|
3962
|
+
# Log what we're about to process
|
|
3963
|
+
mcp_logger.debug(f"Processing request: {json.dumps(request)[:500]}")
|
|
3964
|
+
|
|
3965
|
+
response = await server.handle_request(request)
|
|
3966
|
+
|
|
3967
|
+
# Handle notifications (they don't get responses)
|
|
3968
|
+
if response is None:
|
|
3969
|
+
mcp_logger.debug("Request was a notification, no response sent")
|
|
3970
|
+
continue
|
|
3971
|
+
|
|
3972
|
+
# Ensure response is properly formatted and sent
|
|
3973
|
+
if response:
|
|
3974
|
+
try:
|
|
3975
|
+
# Log response before sending
|
|
3976
|
+
response_preview = json.dumps(response, ensure_ascii=False)[:500]
|
|
3977
|
+
mcp_logger.debug(f"Preparing to send response: {response_preview}")
|
|
3978
|
+
|
|
3979
|
+
# Serialize response to JSON (ensure_ascii=False for proper Unicode)
|
|
3980
|
+
response_json = json.dumps(response, ensure_ascii=False)
|
|
3981
|
+
# Write directly to stdout with explicit newline and flush
|
|
3982
|
+
sys.stdout.write(response_json + "\n")
|
|
3983
|
+
sys.stdout.flush()
|
|
3984
|
+
|
|
3985
|
+
# Log successful response sending
|
|
3986
|
+
# Extract request_id properly (only if valid)
|
|
3987
|
+
request_id = None
|
|
3988
|
+
if "id" in request and request["id"] is not None:
|
|
3989
|
+
request_id = request["id"]
|
|
3990
|
+
method = request.get("method")
|
|
3991
|
+
|
|
3992
|
+
mcp_logger.info(
|
|
3993
|
+
f"RESPONSE [id={request_id}] {method} sent successfully: {len(response_json)} bytes"
|
|
3994
|
+
)
|
|
3995
|
+
|
|
3996
|
+
if method == "tools/list":
|
|
3997
|
+
tools_count = len(response.get("result", {}).get("tools", []))
|
|
3998
|
+
mcp_logger.info(
|
|
3999
|
+
f"RESPONSE [id={request_id}] tools/list sent successfully with {tools_count} tools"
|
|
4000
|
+
)
|
|
4001
|
+
|
|
4002
|
+
# After initialize response, send initialized notification
|
|
4003
|
+
if method == "initialize":
|
|
4004
|
+
initialized_notification = {
|
|
4005
|
+
"jsonrpc": "2.0",
|
|
4006
|
+
"method": "notifications/initialized",
|
|
4007
|
+
"params": {}
|
|
4008
|
+
}
|
|
4009
|
+
notification_json = json.dumps(initialized_notification, ensure_ascii=False)
|
|
4010
|
+
mcp_logger.info(
|
|
4011
|
+
f"Sending initialized notification (no id field): {notification_json}"
|
|
4012
|
+
)
|
|
4013
|
+
sys.stdout.write(notification_json + "\n")
|
|
4014
|
+
sys.stdout.flush()
|
|
4015
|
+
mcp_logger.debug("Initialized notification sent successfully")
|
|
4016
|
+
except (TypeError, ValueError) as json_error:
|
|
4017
|
+
# JSON serialization error
|
|
4018
|
+
mcp_logger.error(
|
|
4019
|
+
f"JSON serialization error for response: {json_error}",
|
|
4020
|
+
exc_info=True,
|
|
4021
|
+
)
|
|
4022
|
+
logger.error(f"JSON serialization error: {json_error}", exc_info=True)
|
|
4023
|
+
# Send error response
|
|
4024
|
+
# Extract request_id properly (only if valid)
|
|
4025
|
+
request_id = None
|
|
4026
|
+
if isinstance(request, dict) and "id" in request and request["id"] is not None:
|
|
4027
|
+
request_id = request["id"]
|
|
4028
|
+
error_response = server._create_error_response(
|
|
4029
|
+
request_id,
|
|
4030
|
+
-32603,
|
|
4031
|
+
f"Internal error: Failed to serialize response: {str(json_error)}",
|
|
4032
|
+
)
|
|
4033
|
+
sys.stdout.write(
|
|
4034
|
+
json.dumps(error_response, ensure_ascii=False) + "\n"
|
|
4035
|
+
)
|
|
4036
|
+
sys.stdout.flush()
|
|
4037
|
+
else:
|
|
4038
|
+
# No response returned - should not happen
|
|
4039
|
+
mcp_logger.warning(
|
|
4040
|
+
f"No response returned for request: {request.get('method')}"
|
|
4041
|
+
)
|
|
4042
|
+
|
|
4043
|
+
except Exception as e:
|
|
4044
|
+
mcp_logger.error(
|
|
4045
|
+
f"Error processing request: {e}", exc_info=True
|
|
4046
|
+
)
|
|
4047
|
+
logger.error(f"Error processing request: {e}", exc_info=True)
|
|
4048
|
+
# Send error response
|
|
4049
|
+
# Extract request_id properly (only if valid)
|
|
4050
|
+
request_id = None
|
|
4051
|
+
if isinstance(request, dict) and "id" in request:
|
|
4052
|
+
req_id = request["id"]
|
|
4053
|
+
# Only include id if it's a valid value (string or number, not None/null)
|
|
4054
|
+
if req_id is not None:
|
|
4055
|
+
request_id = req_id
|
|
4056
|
+
|
|
4057
|
+
error_response = server._create_error_response(
|
|
4058
|
+
request_id,
|
|
4059
|
+
-32603,
|
|
4060
|
+
f"Internal error: {str(e)}",
|
|
4061
|
+
)
|
|
4062
|
+
|
|
4063
|
+
try:
|
|
4064
|
+
sys.stdout.write(
|
|
4065
|
+
json.dumps(error_response, ensure_ascii=False) + "\n"
|
|
4066
|
+
)
|
|
4067
|
+
sys.stdout.flush()
|
|
4068
|
+
except Exception as print_error:
|
|
4069
|
+
# Last resort - write raw error
|
|
4070
|
+
mcp_logger.critical(f"Failed to send error response: {print_error}")
|
|
4071
|
+
logger.critical(f"Failed to send error response: {print_error}")
|
|
4072
|
+
except KeyboardInterrupt:
|
|
4073
|
+
mcp_logger = logging.getLogger("sami.mcp")
|
|
4074
|
+
mcp_logger.info("=" * 80)
|
|
4075
|
+
mcp_logger.info("MCP Server Shutting Down (KeyboardInterrupt)")
|
|
4076
|
+
mcp_logger.info("=" * 80)
|
|
4077
|
+
logger.info("MCP server shutting down...")
|
|
4078
|
+
except Exception as e:
|
|
4079
|
+
mcp_logger = logging.getLogger("sami.mcp")
|
|
4080
|
+
mcp_logger.critical(f"FATAL ERROR in MCP server: {e}", exc_info=True)
|
|
4081
|
+
logger.error(f"Fatal error in MCP server: {e}", exc_info=True)
|
|
4082
|
+
sys.exit(1)
|
|
4083
|
+
|
|
4084
|
+
|
|
4085
|
+
if __name__ == "__main__":
|
|
4086
|
+
asyncio.run(main())
|