suprema-biostar-mcp 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biostar_x_mcp_server/__init__.py +25 -0
- biostar_x_mcp_server/__main__.py +15 -0
- biostar_x_mcp_server/config.py +87 -0
- biostar_x_mcp_server/handlers/__init__.py +35 -0
- biostar_x_mcp_server/handlers/access_handler.py +2162 -0
- biostar_x_mcp_server/handlers/audit_handler.py +489 -0
- biostar_x_mcp_server/handlers/auth_handler.py +216 -0
- biostar_x_mcp_server/handlers/base_handler.py +228 -0
- biostar_x_mcp_server/handlers/card_handler.py +746 -0
- biostar_x_mcp_server/handlers/device_handler.py +4344 -0
- biostar_x_mcp_server/handlers/door_handler.py +3969 -0
- biostar_x_mcp_server/handlers/event_handler.py +1331 -0
- biostar_x_mcp_server/handlers/file_handler.py +212 -0
- biostar_x_mcp_server/handlers/help_web_handler.py +379 -0
- biostar_x_mcp_server/handlers/log_handler.py +1051 -0
- biostar_x_mcp_server/handlers/navigation_handler.py +109 -0
- biostar_x_mcp_server/handlers/occupancy_handler.py +541 -0
- biostar_x_mcp_server/handlers/user_handler.py +3568 -0
- biostar_x_mcp_server/schemas/__init__.py +21 -0
- biostar_x_mcp_server/schemas/access.py +158 -0
- biostar_x_mcp_server/schemas/audit.py +73 -0
- biostar_x_mcp_server/schemas/auth.py +24 -0
- biostar_x_mcp_server/schemas/cards.py +128 -0
- biostar_x_mcp_server/schemas/devices.py +496 -0
- biostar_x_mcp_server/schemas/doors.py +306 -0
- biostar_x_mcp_server/schemas/events.py +104 -0
- biostar_x_mcp_server/schemas/files.py +7 -0
- biostar_x_mcp_server/schemas/help.py +29 -0
- biostar_x_mcp_server/schemas/logs.py +33 -0
- biostar_x_mcp_server/schemas/occupancy.py +19 -0
- biostar_x_mcp_server/schemas/tool_response.py +29 -0
- biostar_x_mcp_server/schemas/users.py +166 -0
- biostar_x_mcp_server/server.py +335 -0
- biostar_x_mcp_server/session.py +221 -0
- biostar_x_mcp_server/tool_manager.py +172 -0
- biostar_x_mcp_server/tools/__init__.py +45 -0
- biostar_x_mcp_server/tools/access.py +510 -0
- biostar_x_mcp_server/tools/audit.py +227 -0
- biostar_x_mcp_server/tools/auth.py +59 -0
- biostar_x_mcp_server/tools/cards.py +269 -0
- biostar_x_mcp_server/tools/categories.py +197 -0
- biostar_x_mcp_server/tools/devices.py +1552 -0
- biostar_x_mcp_server/tools/doors.py +865 -0
- biostar_x_mcp_server/tools/events.py +305 -0
- biostar_x_mcp_server/tools/files.py +28 -0
- biostar_x_mcp_server/tools/help.py +80 -0
- biostar_x_mcp_server/tools/logs.py +123 -0
- biostar_x_mcp_server/tools/navigation.py +89 -0
- biostar_x_mcp_server/tools/occupancy.py +91 -0
- biostar_x_mcp_server/tools/users.py +1113 -0
- biostar_x_mcp_server/utils/__init__.py +31 -0
- biostar_x_mcp_server/utils/category_mapper.py +206 -0
- biostar_x_mcp_server/utils/decorators.py +101 -0
- biostar_x_mcp_server/utils/language_detector.py +51 -0
- biostar_x_mcp_server/utils/search.py +42 -0
- biostar_x_mcp_server/utils/timezone.py +122 -0
- suprema_biostar_mcp-1.0.1.dist-info/METADATA +163 -0
- suprema_biostar_mcp-1.0.1.dist-info/RECORD +61 -0
- suprema_biostar_mcp-1.0.1.dist-info/WHEEL +4 -0
- suprema_biostar_mcp-1.0.1.dist-info/entry_points.txt +2 -0
- suprema_biostar_mcp-1.0.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File Handler - Handle uploaded files from requests
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
import base64
|
|
6
|
+
import io
|
|
7
|
+
from typing import Dict, Any, List, Sequence
|
|
8
|
+
from mcp.types import TextContent
|
|
9
|
+
from biostar_x_mcp_server.handlers.base_handler import BaseHandler
|
|
10
|
+
|
|
11
|
+
# OCR imports
|
|
12
|
+
try:
|
|
13
|
+
import pytesseract
|
|
14
|
+
from PIL import Image
|
|
15
|
+
import os
|
|
16
|
+
|
|
17
|
+
# Windows에서 Tesseract 경로 설정
|
|
18
|
+
if os.name == 'nt': # Windows
|
|
19
|
+
tesseract_paths = [
|
|
20
|
+
r'C:\Program Files\Tesseract-OCR\tesseract.exe',
|
|
21
|
+
r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe',
|
|
22
|
+
r'C:\Users\{}\AppData\Local\Programs\Tesseract-OCR\tesseract.exe'.format(os.getenv('USERNAME', ''))
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
for path in tesseract_paths:
|
|
26
|
+
if os.path.exists(path):
|
|
27
|
+
pytesseract.pytesseract.tesseract_cmd = path
|
|
28
|
+
# Tesseract path configured
|
|
29
|
+
break
|
|
30
|
+
|
|
31
|
+
# Test OCR library
|
|
32
|
+
try:
|
|
33
|
+
version = pytesseract.get_tesseract_version()
|
|
34
|
+
# Tesseract OCR is available
|
|
35
|
+
OCR_AVAILABLE = True
|
|
36
|
+
except Exception:
|
|
37
|
+
# Tesseract test failed
|
|
38
|
+
OCR_AVAILABLE = False
|
|
39
|
+
|
|
40
|
+
except ImportError:
|
|
41
|
+
# OCR library import failed
|
|
42
|
+
OCR_AVAILABLE = False
|
|
43
|
+
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class FileHandler(BaseHandler):
|
|
48
|
+
"""Handler for file operations on uploaded files"""
|
|
49
|
+
|
|
50
|
+
def __init__(self):
|
|
51
|
+
"""Initialize FileHandler without session (no API calls needed)"""
|
|
52
|
+
super().__init__(session=None)
|
|
53
|
+
self.uploaded_files = {} # Store uploaded files per session
|
|
54
|
+
|
|
55
|
+
def set_uploaded_files(self, session_id: str, files: List[Dict[str, Any]]):
|
|
56
|
+
"""Store uploaded files for a session"""
|
|
57
|
+
self.uploaded_files[session_id] = files
|
|
58
|
+
logger.info(f" Stored {len(files)} files for session {session_id}")
|
|
59
|
+
|
|
60
|
+
def _extract_text_from_image(self, base64_content: str) -> str:
|
|
61
|
+
"""Extract text from image using OCR"""
|
|
62
|
+
logger.info(" OCR started")
|
|
63
|
+
|
|
64
|
+
if not OCR_AVAILABLE:
|
|
65
|
+
logger.error(" OCR library not available")
|
|
66
|
+
return " OCR library is not installed. Please install pytesseract and Pillow."
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
# Decode base64 to bytes
|
|
70
|
+
image_bytes = base64.b64decode(base64_content)
|
|
71
|
+
|
|
72
|
+
# Open image with PIL
|
|
73
|
+
image = Image.open(io.BytesIO(image_bytes))
|
|
74
|
+
|
|
75
|
+
# Extract text using pytesseract
|
|
76
|
+
text = pytesseract.image_to_string(image, lang='eng')
|
|
77
|
+
|
|
78
|
+
result = text.strip() if text.strip() else "No text found in image."
|
|
79
|
+
logger.info(" OCR completed")
|
|
80
|
+
return result
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f" OCR error: {e}")
|
|
84
|
+
return f" Error occurred during OCR processing: {str(e)}"
|
|
85
|
+
|
|
86
|
+
async def read_uploaded_file(self, args: dict) -> Sequence[TextContent]:
|
|
87
|
+
"""
|
|
88
|
+
Read the content of an uploaded file
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
filename: Name of the uploaded file
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
File content as text or base64
|
|
95
|
+
"""
|
|
96
|
+
filename = args.get("filename", "").strip()
|
|
97
|
+
logger.info(f" Reading file: {filename}")
|
|
98
|
+
|
|
99
|
+
if not filename:
|
|
100
|
+
return [TextContent(type="text", text=" filename parameter is required")]
|
|
101
|
+
|
|
102
|
+
# Get session ID
|
|
103
|
+
session_id = getattr(self, 'client_session_id', 'default')
|
|
104
|
+
|
|
105
|
+
# Check if files exist for this session
|
|
106
|
+
if session_id not in self.uploaded_files:
|
|
107
|
+
return [TextContent(type="text", text=f" No uploaded files found for this session")]
|
|
108
|
+
|
|
109
|
+
files = self.uploaded_files[session_id]
|
|
110
|
+
|
|
111
|
+
# Find the file
|
|
112
|
+
file_data = None
|
|
113
|
+
for f in files:
|
|
114
|
+
if f.get("filename", "").lower() == filename.lower():
|
|
115
|
+
file_data = f
|
|
116
|
+
break
|
|
117
|
+
|
|
118
|
+
if not file_data:
|
|
119
|
+
available = [f.get("filename") for f in files]
|
|
120
|
+
return [TextContent(
|
|
121
|
+
type="text",
|
|
122
|
+
text=f" File '{filename}' not found.\n"
|
|
123
|
+
f"Available files: {', '.join(available)}"
|
|
124
|
+
)]
|
|
125
|
+
|
|
126
|
+
# Return file content based on type
|
|
127
|
+
file_type = file_data.get("type")
|
|
128
|
+
|
|
129
|
+
if file_type == "csv":
|
|
130
|
+
content = file_data.get("content", "")
|
|
131
|
+
base64_content = file_data.get("base64", "")
|
|
132
|
+
original_format = file_data.get("original_format", "csv")
|
|
133
|
+
|
|
134
|
+
file_type_label = "XLSX" if original_format == "xlsx" else "CSV"
|
|
135
|
+
format_note = "(Converted from XLSX to CSV format)\n\n" if original_format == "xlsx" else ""
|
|
136
|
+
|
|
137
|
+
return [TextContent(
|
|
138
|
+
type="text",
|
|
139
|
+
text=f" {file_type_label} file read successfully: {filename}\n\n"
|
|
140
|
+
f"File size: {file_data.get('size', 0)} bytes\n"
|
|
141
|
+
f"{format_note}"
|
|
142
|
+
f"Preview:\n```csv\n{file_data.get('preview', '')}\n```\n\n"
|
|
143
|
+
f"Full content:\n```csv\n{content}\n```\n\n"
|
|
144
|
+
f"Base64 (for import tools):\n{base64_content[:100]}... (total {len(base64_content)} chars)\n\n"
|
|
145
|
+
f" To add users, call import-users-csv-smart tool with:\n"
|
|
146
|
+
f"- file_text: (full content above)\n"
|
|
147
|
+
f"- original_file_name: {filename}"
|
|
148
|
+
)]
|
|
149
|
+
|
|
150
|
+
elif file_type == "pdf":
|
|
151
|
+
base64_content = file_data.get("base64", "")
|
|
152
|
+
return [TextContent(
|
|
153
|
+
type="text",
|
|
154
|
+
text=f" PDF file read successfully: {filename}\n\n"
|
|
155
|
+
f"File size: {file_data.get('size', 0)} bytes\n"
|
|
156
|
+
f"Base64 content:\n{base64_content[:100]}... (total {len(base64_content)} chars)\n\n"
|
|
157
|
+
f" PDF file requires text extraction."
|
|
158
|
+
)]
|
|
159
|
+
|
|
160
|
+
elif file_type == "image":
|
|
161
|
+
base64_content = file_data.get("base64", "")
|
|
162
|
+
|
|
163
|
+
# Extract text using OCR
|
|
164
|
+
extracted_text = self._extract_text_from_image(base64_content)
|
|
165
|
+
|
|
166
|
+
# Natural response based on OCR results
|
|
167
|
+
if "No text found in image." in extracted_text:
|
|
168
|
+
response_text = f" Image file confirmed: {filename}\n\n"
|
|
169
|
+
response_text += f"File size: {file_data.get('size', 0)} bytes\n\n"
|
|
170
|
+
response_text += f"This image appears to be primarily graphics or icons with unclear text.\n"
|
|
171
|
+
response_text += f"Please describe the image content directly for more accurate assistance."
|
|
172
|
+
elif "Error occurred during OCR processing" in extracted_text:
|
|
173
|
+
response_text = f" Image file confirmed: {filename}\n\n"
|
|
174
|
+
response_text += f"File size: {file_data.get('size', 0)} bytes\n\n"
|
|
175
|
+
response_text += f"There was a temporary issue with image analysis.\n"
|
|
176
|
+
response_text += f"Please briefly describe the main content of the image for assistance."
|
|
177
|
+
else:
|
|
178
|
+
response_text = f" Image analysis completed: {filename}\n\n"
|
|
179
|
+
response_text += f"File size: {file_data.get('size', 0)} bytes\n\n"
|
|
180
|
+
response_text += f" Extracted text:\n```\n{extracted_text}\n```\n\n"
|
|
181
|
+
response_text += f" Please let me know what assistance you need based on the above content."
|
|
182
|
+
|
|
183
|
+
return [TextContent(type="text", text=response_text)]
|
|
184
|
+
|
|
185
|
+
elif file_type == "text":
|
|
186
|
+
content = file_data.get("content", "")
|
|
187
|
+
return [TextContent(
|
|
188
|
+
type="text",
|
|
189
|
+
text=f" Text file read successfully: {filename}\n\n"
|
|
190
|
+
f"File size: {file_data.get('size', 0)} bytes\n\n"
|
|
191
|
+
f"Content:\n```\n{content}\n```"
|
|
192
|
+
)]
|
|
193
|
+
|
|
194
|
+
elif file_type == "binary":
|
|
195
|
+
base64_content = file_data.get("base64", "")
|
|
196
|
+
return [TextContent(
|
|
197
|
+
type="text",
|
|
198
|
+
text=f" Binary file read successfully: {filename}\n\n"
|
|
199
|
+
f"File size: {file_data.get('size', 0)} bytes\n"
|
|
200
|
+
f"Base64 content:\n{base64_content[:100]}... (total {len(base64_content)} chars)"
|
|
201
|
+
)]
|
|
202
|
+
|
|
203
|
+
elif file_type == "error":
|
|
204
|
+
error = file_data.get("error", "Unknown error")
|
|
205
|
+
return [TextContent(
|
|
206
|
+
type="text",
|
|
207
|
+
text=f" File read error: {filename}\n\n"
|
|
208
|
+
f"Error: {error}"
|
|
209
|
+
)]
|
|
210
|
+
|
|
211
|
+
return [TextContent(type="text", text=f" Unknown file type: {file_type}")]
|
|
212
|
+
|
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Web-based Help Handler for BioStar X MCP Server
|
|
3
|
+
Fetches documentation directly from docs.supremainc.com instead of using vector DB
|
|
4
|
+
"""
|
|
5
|
+
import logging
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from typing import Sequence, Dict, Any, Optional, List
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from mcp.types import TextContent
|
|
11
|
+
import httpx
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class HelpWebHandler:
|
|
17
|
+
"""Web-based documentation search handler using docs.supremainc.com"""
|
|
18
|
+
|
|
19
|
+
DOCS_BASE = "https://docs.supremainc.com"
|
|
20
|
+
|
|
21
|
+
# BioStar X documentation section mapping (keyword -> URL path)
|
|
22
|
+
DOC_SECTIONS = {
|
|
23
|
+
# Getting Started
|
|
24
|
+
"overview": "/en/platform/biostar_x/overview",
|
|
25
|
+
"install": "/en/platform/biostar_x/express-install",
|
|
26
|
+
"custom install": "/en/platform/biostar_x/custom-install",
|
|
27
|
+
"setup": "/en/platform/biostar_x/initial-setup-guide",
|
|
28
|
+
"launcher": "/en/platform/biostar_x/ui-launcher",
|
|
29
|
+
"server management": "/en/platform/biostar_x/server-management",
|
|
30
|
+
|
|
31
|
+
# User Management
|
|
32
|
+
"user": "/en/platform/biostar_x/user",
|
|
33
|
+
"add user": "/en/platform/biostar_x/user/add-user",
|
|
34
|
+
"edit user": "/en/platform/biostar_x/user/editing-user",
|
|
35
|
+
"delete user": "/en/platform/biostar_x/user/deleting-user",
|
|
36
|
+
"credential": "/en/platform/biostar_x/user/credential-enrollment",
|
|
37
|
+
"user group": "/en/platform/biostar_x/user/user-group",
|
|
38
|
+
"csv import": "/en/platform/biostar_x/user/csv-import",
|
|
39
|
+
"csv export": "/en/platform/biostar_x/user/exporting-users",
|
|
40
|
+
|
|
41
|
+
# Monitoring
|
|
42
|
+
"monitoring": "/en/platform/biostar_x/monitoring",
|
|
43
|
+
"door monitoring": "/en/platform/biostar_x/monitoring/door-monitoring",
|
|
44
|
+
"device monitoring": "/en/platform/biostar_x/monitoring/device-monitoring",
|
|
45
|
+
"event": "/en/platform/biostar_x/monitoring/event-log",
|
|
46
|
+
"event log": "/en/platform/biostar_x/monitoring/event-log",
|
|
47
|
+
"map": "/en/platform/biostar_x/monitoring/map-monitoring",
|
|
48
|
+
"video": "/en/platform/biostar_x/monitoring/video-monitoring",
|
|
49
|
+
|
|
50
|
+
# Data
|
|
51
|
+
"data": "/en/platform/biostar_x/data",
|
|
52
|
+
"backup": "/en/platform/biostar_x/data/backup-restore",
|
|
53
|
+
"restore": "/en/platform/biostar_x/data/backup-restore",
|
|
54
|
+
|
|
55
|
+
# Dashboard
|
|
56
|
+
"dashboard": "/en/platform/biostar_x/dashboard",
|
|
57
|
+
|
|
58
|
+
# Settings
|
|
59
|
+
"settings": "/en/platform/biostar_x/settings",
|
|
60
|
+
"access group": "/en/platform/biostar_x/settings/access-group",
|
|
61
|
+
"access level": "/en/platform/biostar_x/settings/access-level",
|
|
62
|
+
"floor": "/en/platform/biostar_x/settings/floor",
|
|
63
|
+
"zone": "/en/platform/biostar_x/settings/zone",
|
|
64
|
+
"schedule": "/en/platform/biostar_x/settings/schedule",
|
|
65
|
+
"door setting": "/en/platform/biostar_x/settings/door",
|
|
66
|
+
"device setting": "/en/platform/biostar_x/settings/device",
|
|
67
|
+
"elevator": "/en/platform/biostar_x/settings/elevator",
|
|
68
|
+
"email": "/en/platform/biostar_x/settings/email",
|
|
69
|
+
"smtp": "/en/platform/biostar_x/settings/email",
|
|
70
|
+
"card format": "/en/platform/biostar_x/settings/credential-format",
|
|
71
|
+
"wiegand": "/en/platform/biostar_x/settings/credential-format",
|
|
72
|
+
"operator": "/en/platform/biostar_x/settings/operator",
|
|
73
|
+
|
|
74
|
+
# Advanced Settings
|
|
75
|
+
"apb": "/en/platform/biostar_x/advanced-settings/anti-passback",
|
|
76
|
+
"anti-passback": "/en/platform/biostar_x/advanced-settings/anti-passback",
|
|
77
|
+
"fire alarm": "/en/platform/biostar_x/advanced-settings/fire-alarm-zone",
|
|
78
|
+
"interlock": "/en/platform/biostar_x/advanced-settings/interlock",
|
|
79
|
+
"intrusion": "/en/platform/biostar_x/advanced-settings/intrusion-alarm",
|
|
80
|
+
"muster": "/en/platform/biostar_x/advanced-settings/muster",
|
|
81
|
+
"occupancy": "/en/platform/biostar_x/advanced-settings/occupancy-management",
|
|
82
|
+
"roll call": "/en/platform/biostar_x/advanced-settings/roll-call",
|
|
83
|
+
|
|
84
|
+
# Plugins
|
|
85
|
+
"plugin": "/en/platform/biostar_x/plugins",
|
|
86
|
+
"ta": "/en/platform/biostar_x/plugins/ta-integration",
|
|
87
|
+
"airfob": "/en/platform/biostar_x/plugins/airfob",
|
|
88
|
+
|
|
89
|
+
# Licensing
|
|
90
|
+
"license": "/en/platform/biostar_x/licensing",
|
|
91
|
+
|
|
92
|
+
# Korean keywords
|
|
93
|
+
"사용자": "/ko/platform/biostar_x/user",
|
|
94
|
+
"출입문": "/ko/platform/biostar_x/monitoring/door-monitoring",
|
|
95
|
+
"장치": "/ko/platform/biostar_x/monitoring/device-monitoring",
|
|
96
|
+
"이벤트": "/ko/platform/biostar_x/monitoring/event-log",
|
|
97
|
+
"접근그룹": "/ko/platform/biostar_x/settings/access-group",
|
|
98
|
+
"접근레벨": "/ko/platform/biostar_x/settings/access-level",
|
|
99
|
+
"설치": "/ko/platform/biostar_x/express-install",
|
|
100
|
+
"대시보드": "/ko/platform/biostar_x/dashboard",
|
|
101
|
+
"설정": "/ko/platform/biostar_x/settings",
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# Language detection keywords
|
|
105
|
+
KOREAN_KEYWORDS = ["사용자", "출입문", "장치", "이벤트", "접근", "설정", "설치", "카드", "로그", "모니터링"]
|
|
106
|
+
|
|
107
|
+
def __init__(self, cache_ttl: int = 3600):
|
|
108
|
+
"""Initialize HelpWebHandler with cache settings."""
|
|
109
|
+
self.cache_ttl = cache_ttl
|
|
110
|
+
self._cache: Dict[str, Dict[str, Any]] = {}
|
|
111
|
+
|
|
112
|
+
def _detect_language(self, text: str) -> str:
|
|
113
|
+
"""Detect language from query text."""
|
|
114
|
+
korean_chars = len(re.findall(r'[가-힣]', text))
|
|
115
|
+
total_chars = len(text.replace(" ", ""))
|
|
116
|
+
|
|
117
|
+
if total_chars > 0 and korean_chars / total_chars > 0.2:
|
|
118
|
+
return "ko"
|
|
119
|
+
return "en"
|
|
120
|
+
|
|
121
|
+
def _find_relevant_urls(self, query: str, language: str) -> List[str]:
|
|
122
|
+
"""Find relevant documentation URLs based on query keywords."""
|
|
123
|
+
query_lower = query.lower()
|
|
124
|
+
matches = []
|
|
125
|
+
|
|
126
|
+
# Check each section for keyword matches
|
|
127
|
+
for keyword, path in self.DOC_SECTIONS.items():
|
|
128
|
+
if keyword.lower() in query_lower:
|
|
129
|
+
# Adjust path for language
|
|
130
|
+
if language == "ko" and path.startswith("/en/"):
|
|
131
|
+
path = path.replace("/en/", "/ko/", 1)
|
|
132
|
+
elif language == "en" and path.startswith("/ko/"):
|
|
133
|
+
path = path.replace("/ko/", "/en/", 1)
|
|
134
|
+
|
|
135
|
+
full_url = f"{self.DOCS_BASE}{path}"
|
|
136
|
+
if full_url not in matches:
|
|
137
|
+
matches.append(full_url)
|
|
138
|
+
|
|
139
|
+
# If no matches, return overview page
|
|
140
|
+
if not matches:
|
|
141
|
+
matches.append(f"{self.DOCS_BASE}/{language}/platform/biostar_x/overview")
|
|
142
|
+
|
|
143
|
+
return matches[:5] # Limit to 5 URLs
|
|
144
|
+
|
|
145
|
+
def _extract_main_content(self, html: str) -> str:
|
|
146
|
+
"""Extract main content from HTML page."""
|
|
147
|
+
# Remove script and style tags
|
|
148
|
+
html = re.sub(r'<script[^>]*>.*?</script>', '', html, flags=re.DOTALL | re.IGNORECASE)
|
|
149
|
+
html = re.sub(r'<style[^>]*>.*?</style>', '', html, flags=re.DOTALL | re.IGNORECASE)
|
|
150
|
+
|
|
151
|
+
# Try to find main content area
|
|
152
|
+
main_match = re.search(r'<main[^>]*>(.*?)</main>', html, flags=re.DOTALL | re.IGNORECASE)
|
|
153
|
+
if main_match:
|
|
154
|
+
html = main_match.group(1)
|
|
155
|
+
else:
|
|
156
|
+
# Try article tag
|
|
157
|
+
article_match = re.search(r'<article[^>]*>(.*?)</article>', html, flags=re.DOTALL | re.IGNORECASE)
|
|
158
|
+
if article_match:
|
|
159
|
+
html = article_match.group(1)
|
|
160
|
+
|
|
161
|
+
# Remove HTML tags but preserve text
|
|
162
|
+
text = re.sub(r'<[^>]+>', ' ', html)
|
|
163
|
+
|
|
164
|
+
# Clean up whitespace
|
|
165
|
+
text = re.sub(r'\s+', ' ', text).strip()
|
|
166
|
+
|
|
167
|
+
# Decode HTML entities
|
|
168
|
+
text = text.replace(' ', ' ')
|
|
169
|
+
text = text.replace('<', '<')
|
|
170
|
+
text = text.replace('>', '>')
|
|
171
|
+
text = text.replace('&', '&')
|
|
172
|
+
text = text.replace('"', '"')
|
|
173
|
+
|
|
174
|
+
return text[:4000] # Limit content length
|
|
175
|
+
|
|
176
|
+
def _get_cached(self, url: str) -> Optional[str]:
|
|
177
|
+
"""Get cached content if not expired."""
|
|
178
|
+
if url in self._cache:
|
|
179
|
+
cached = self._cache[url]
|
|
180
|
+
if datetime.now() - cached["timestamp"] < timedelta(seconds=self.cache_ttl):
|
|
181
|
+
return cached["content"]
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
def _set_cache(self, url: str, content: str):
|
|
185
|
+
"""Cache content with timestamp."""
|
|
186
|
+
self._cache[url] = {
|
|
187
|
+
"content": content,
|
|
188
|
+
"timestamp": datetime.now()
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
async def search_help(self, args: Dict[str, Any]) -> Sequence[TextContent]:
|
|
192
|
+
"""
|
|
193
|
+
Search BioStar X documentation from docs.supremainc.com
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
args: Dictionary containing:
|
|
197
|
+
- query: Search query (required)
|
|
198
|
+
- language: Language code ("ko" or "en", default: auto-detect)
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Documentation search results with URLs
|
|
202
|
+
"""
|
|
203
|
+
query = args.get("query", "").strip()
|
|
204
|
+
if not query:
|
|
205
|
+
return self._error_response("Search query is required.")
|
|
206
|
+
|
|
207
|
+
# Auto-detect language if not specified
|
|
208
|
+
language = args.get("language") or self._detect_language(query)
|
|
209
|
+
|
|
210
|
+
# Find relevant URLs
|
|
211
|
+
urls = self._find_relevant_urls(query, language)
|
|
212
|
+
|
|
213
|
+
# Fetch content from URLs
|
|
214
|
+
results = []
|
|
215
|
+
async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client:
|
|
216
|
+
for url in urls[:3]: # Fetch max 3 pages
|
|
217
|
+
try:
|
|
218
|
+
# Check cache first
|
|
219
|
+
cached_content = self._get_cached(url)
|
|
220
|
+
if cached_content:
|
|
221
|
+
results.append({
|
|
222
|
+
"url": url,
|
|
223
|
+
"content": cached_content,
|
|
224
|
+
"source": "cache"
|
|
225
|
+
})
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
response = await client.get(url)
|
|
229
|
+
if response.status_code == 200:
|
|
230
|
+
content = self._extract_main_content(response.text)
|
|
231
|
+
self._set_cache(url, content)
|
|
232
|
+
results.append({
|
|
233
|
+
"url": url,
|
|
234
|
+
"content": content,
|
|
235
|
+
"source": "web"
|
|
236
|
+
})
|
|
237
|
+
except Exception as e:
|
|
238
|
+
logger.warning(f"Failed to fetch {url}: {e}")
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
if not results:
|
|
242
|
+
return self._no_results_response(query)
|
|
243
|
+
|
|
244
|
+
return self._success_response({
|
|
245
|
+
"message": f"Found {len(results)} documentation pages for: '{query}'",
|
|
246
|
+
"search_query": query,
|
|
247
|
+
"language": language,
|
|
248
|
+
"source": "docs.supremainc.com",
|
|
249
|
+
"total_results": len(results),
|
|
250
|
+
"results": results,
|
|
251
|
+
"docs_url": f"{self.DOCS_BASE}/{language}/platform/biostar_x"
|
|
252
|
+
})
|
|
253
|
+
|
|
254
|
+
async def get_docs_page(self, args: Dict[str, Any]) -> Sequence[TextContent]:
|
|
255
|
+
"""
|
|
256
|
+
Get a specific documentation page by URL or path
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
args: Dictionary containing:
|
|
260
|
+
- url: Full URL or path (required)
|
|
261
|
+
- language: Language code ("ko" or "en", default: "en")
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Page content
|
|
265
|
+
"""
|
|
266
|
+
url = args.get("url", "").strip()
|
|
267
|
+
if not url:
|
|
268
|
+
return self._error_response("URL or path is required.")
|
|
269
|
+
|
|
270
|
+
# If it's a path, convert to full URL
|
|
271
|
+
if not url.startswith("http"):
|
|
272
|
+
language = args.get("language", "en")
|
|
273
|
+
if not url.startswith("/"):
|
|
274
|
+
url = "/" + url
|
|
275
|
+
if not url.startswith(f"/{language}/"):
|
|
276
|
+
url = f"/{language}/platform/biostar_x{url}"
|
|
277
|
+
url = f"{self.DOCS_BASE}{url}"
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
# Check cache first
|
|
281
|
+
cached_content = self._get_cached(url)
|
|
282
|
+
if cached_content:
|
|
283
|
+
return self._success_response({
|
|
284
|
+
"url": url,
|
|
285
|
+
"content": cached_content,
|
|
286
|
+
"source": "cache"
|
|
287
|
+
})
|
|
288
|
+
|
|
289
|
+
async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client:
|
|
290
|
+
response = await client.get(url)
|
|
291
|
+
|
|
292
|
+
if response.status_code == 200:
|
|
293
|
+
content = self._extract_main_content(response.text)
|
|
294
|
+
self._set_cache(url, content)
|
|
295
|
+
return self._success_response({
|
|
296
|
+
"url": url,
|
|
297
|
+
"content": content,
|
|
298
|
+
"source": "web"
|
|
299
|
+
})
|
|
300
|
+
else:
|
|
301
|
+
return self._error_response(f"Failed to fetch page: HTTP {response.status_code}")
|
|
302
|
+
|
|
303
|
+
except Exception as e:
|
|
304
|
+
logger.error(f"Error fetching docs page: {e}")
|
|
305
|
+
return self._error_response(f"Failed to fetch documentation: {str(e)}")
|
|
306
|
+
|
|
307
|
+
async def search_api_docs(self, args: Dict[str, Any]) -> Sequence[TextContent]:
|
|
308
|
+
"""
|
|
309
|
+
Search API documentation - redirects to help search with API focus
|
|
310
|
+
"""
|
|
311
|
+
query = args.get("query", "")
|
|
312
|
+
args["query"] = f"API {query}"
|
|
313
|
+
return await self.search_help(args)
|
|
314
|
+
|
|
315
|
+
async def search_manual(self, args: Dict[str, Any]) -> Sequence[TextContent]:
|
|
316
|
+
"""
|
|
317
|
+
Search user manual - redirects to help search
|
|
318
|
+
"""
|
|
319
|
+
return await self.search_help(args)
|
|
320
|
+
|
|
321
|
+
async def get_tool_help(self, args: Dict[str, Any]) -> Sequence[TextContent]:
|
|
322
|
+
"""
|
|
323
|
+
Get help for a specific MCP tool
|
|
324
|
+
"""
|
|
325
|
+
tool_name = args.get("tool_name", "")
|
|
326
|
+
if not tool_name:
|
|
327
|
+
return self._error_response("Tool name is required.")
|
|
328
|
+
|
|
329
|
+
# Map tool names to documentation sections
|
|
330
|
+
tool_to_section = {
|
|
331
|
+
"login": "setup",
|
|
332
|
+
"logout": "setup",
|
|
333
|
+
"get-users": "user",
|
|
334
|
+
"create-user": "add user",
|
|
335
|
+
"update-user": "edit user",
|
|
336
|
+
"delete-user": "delete user",
|
|
337
|
+
"get-doors": "door monitoring",
|
|
338
|
+
"control-door": "door monitoring",
|
|
339
|
+
"list-devices": "device monitoring",
|
|
340
|
+
"search-events": "event log",
|
|
341
|
+
"get-access-groups": "access group",
|
|
342
|
+
"create-access-group": "access group",
|
|
343
|
+
"get-access-levels": "access level",
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
section = tool_to_section.get(tool_name, "overview")
|
|
347
|
+
args["query"] = section
|
|
348
|
+
return await self.search_help(args)
|
|
349
|
+
|
|
350
|
+
def _success_response(self, data: Dict[str, Any]) -> Sequence[TextContent]:
|
|
351
|
+
"""Create success response."""
|
|
352
|
+
return [TextContent(
|
|
353
|
+
type="text",
|
|
354
|
+
text=json.dumps({
|
|
355
|
+
"status": "success",
|
|
356
|
+
"data": data
|
|
357
|
+
}, ensure_ascii=False, indent=2)
|
|
358
|
+
)]
|
|
359
|
+
|
|
360
|
+
def _error_response(self, message: str) -> Sequence[TextContent]:
|
|
361
|
+
"""Create error response."""
|
|
362
|
+
return [TextContent(
|
|
363
|
+
type="text",
|
|
364
|
+
text=json.dumps({
|
|
365
|
+
"status": "error",
|
|
366
|
+
"error": message
|
|
367
|
+
}, ensure_ascii=False, indent=2)
|
|
368
|
+
)]
|
|
369
|
+
|
|
370
|
+
def _no_results_response(self, query: str) -> Sequence[TextContent]:
|
|
371
|
+
"""Create no results response."""
|
|
372
|
+
return [TextContent(
|
|
373
|
+
type="text",
|
|
374
|
+
text=json.dumps({
|
|
375
|
+
"status": "no_results",
|
|
376
|
+
"message": f"Could not find documentation for: '{query}'",
|
|
377
|
+
"suggestion": f"Try browsing the documentation directly at {self.DOCS_BASE}/en/platform/biostar_x"
|
|
378
|
+
}, ensure_ascii=False, indent=2)
|
|
379
|
+
)]
|