tunnel-manager 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/validate_a2a_agent.py +148 -0
- scripts/validate_agent.py +67 -0
- tests/test_tunnel.py +76 -0
- tunnel_manager/__init__.py +66 -0
- tunnel_manager/__main__.py +6 -0
- tunnel_manager/mcp_config.json +8 -0
- tunnel_manager/middlewares.py +53 -0
- tunnel_manager/skills/tunnel-manager-remote-access/SKILL.md +51 -0
- tunnel_manager/tunnel_manager.py +990 -0
- tunnel_manager/tunnel_manager_agent.py +350 -0
- tunnel_manager/tunnel_manager_mcp.py +2600 -0
- tunnel_manager/utils.py +110 -0
- tunnel_manager-1.0.9.dist-info/METADATA +565 -0
- tunnel_manager-1.0.9.dist-info/RECORD +18 -0
- tunnel_manager-1.0.9.dist-info/WHEEL +5 -0
- tunnel_manager-1.0.9.dist-info/entry_points.txt +4 -0
- tunnel_manager-1.0.9.dist-info/licenses/LICENSE +20 -0
- tunnel_manager-1.0.9.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,2600 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import argparse
|
|
6
|
+
import logging
|
|
7
|
+
import concurrent.futures
|
|
8
|
+
import yaml
|
|
9
|
+
import asyncio
|
|
10
|
+
from typing import Optional, Dict, List, Union
|
|
11
|
+
import requests
|
|
12
|
+
from eunomia_mcp.middleware import EunomiaMcpMiddleware
|
|
13
|
+
from pydantic import Field
|
|
14
|
+
from fastmcp import FastMCP
|
|
15
|
+
from fastmcp.server.auth.oidc_proxy import OIDCProxy
|
|
16
|
+
from fastmcp.server.auth import OAuthProxy, RemoteAuthProvider
|
|
17
|
+
from fastmcp.server.auth.providers.jwt import JWTVerifier, StaticTokenVerifier
|
|
18
|
+
from fastmcp.server.middleware.logging import LoggingMiddleware
|
|
19
|
+
from fastmcp.server.middleware.timing import TimingMiddleware
|
|
20
|
+
from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
|
|
21
|
+
from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
|
|
22
|
+
from fastmcp import Context
|
|
23
|
+
from fastmcp.utilities.logging import get_logger
|
|
24
|
+
from tunnel_manager.tunnel_manager import Tunnel
|
|
25
|
+
from tunnel_manager.utils import (
|
|
26
|
+
to_boolean,
|
|
27
|
+
to_integer,
|
|
28
|
+
)
|
|
29
|
+
from tunnel_manager.middlewares import UserTokenMiddleware, JWTClaimsLoggingMiddleware
|
|
30
|
+
|
|
31
|
+
logging.basicConfig(
|
|
32
|
+
level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
33
|
+
)
|
|
34
|
+
logger = get_logger("TunnelManager")
|
|
35
|
+
|
|
36
|
+
config = {
|
|
37
|
+
"enable_delegation": to_boolean(os.environ.get("ENABLE_DELEGATION", "False")),
|
|
38
|
+
"audience": os.environ.get("AUDIENCE", None),
|
|
39
|
+
"delegated_scopes": os.environ.get("DELEGATED_SCOPES", "api"),
|
|
40
|
+
"token_endpoint": None, # Will be fetched dynamically from OIDC config
|
|
41
|
+
"oidc_client_id": os.environ.get("OIDC_CLIENT_ID", None),
|
|
42
|
+
"oidc_client_secret": os.environ.get("OIDC_CLIENT_SECRET", None),
|
|
43
|
+
"oidc_config_url": os.environ.get("OIDC_CONFIG_URL", None),
|
|
44
|
+
"jwt_jwks_uri": os.getenv("FASTMCP_SERVER_AUTH_JWT_JWKS_URI", None),
|
|
45
|
+
"jwt_issuer": os.getenv("FASTMCP_SERVER_AUTH_JWT_ISSUER", None),
|
|
46
|
+
"jwt_audience": os.getenv("FASTMCP_SERVER_AUTH_JWT_AUDIENCE", None),
|
|
47
|
+
"jwt_algorithm": os.getenv("FASTMCP_SERVER_AUTH_JWT_ALGORITHM", None),
|
|
48
|
+
"jwt_secret": os.getenv("FASTMCP_SERVER_AUTH_JWT_PUBLIC_KEY", None),
|
|
49
|
+
"jwt_required_scopes": os.getenv("FASTMCP_SERVER_AUTH_JWT_REQUIRED_SCOPES", None),
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
DEFAULT_TRANSPORT = os.environ.get("TRANSPORT", "stdio")
|
|
53
|
+
DEFAULT_HOST = os.environ.get("HOST", "0.0.0.0")
|
|
54
|
+
DEFAULT_PORT = to_integer(os.environ.get("PORT", "8000"))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ResponseBuilder:
|
|
58
|
+
@staticmethod
|
|
59
|
+
def build(
|
|
60
|
+
status: int,
|
|
61
|
+
msg: str,
|
|
62
|
+
details: Dict,
|
|
63
|
+
error: str = "",
|
|
64
|
+
stdout: str = "", # Add this
|
|
65
|
+
files: List = None,
|
|
66
|
+
locations: List = None,
|
|
67
|
+
errors: List = None,
|
|
68
|
+
) -> Dict:
|
|
69
|
+
return {
|
|
70
|
+
"status_code": status,
|
|
71
|
+
"message": msg,
|
|
72
|
+
"stdout": stdout, # Use the parameter
|
|
73
|
+
"stderr": error,
|
|
74
|
+
"files_copied": files or [],
|
|
75
|
+
"locations_copied_to": locations or [],
|
|
76
|
+
"details": details,
|
|
77
|
+
"errors": errors or ([error] if error else []),
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def load_inventory(
|
|
82
|
+
inventory: str, group: str, logger: logging.Logger
|
|
83
|
+
) -> tuple[List[Dict], Dict]:
|
|
84
|
+
try:
|
|
85
|
+
with open(inventory, "r") as f:
|
|
86
|
+
inv = yaml.safe_load(f)
|
|
87
|
+
hosts = []
|
|
88
|
+
if group in inv and isinstance(inv[group], dict) and "hosts" in inv[group]:
|
|
89
|
+
for host, vars in inv[group]["hosts"].items():
|
|
90
|
+
entry = {
|
|
91
|
+
"hostname": vars.get("ansible_host", host),
|
|
92
|
+
"username": vars.get("ansible_user"),
|
|
93
|
+
"password": vars.get("ansible_ssh_pass"),
|
|
94
|
+
"key_path": vars.get("ansible_ssh_private_key_file"),
|
|
95
|
+
}
|
|
96
|
+
if not entry["username"]:
|
|
97
|
+
logger.error(f"Skip {entry['hostname']}: no username")
|
|
98
|
+
continue
|
|
99
|
+
hosts.append(entry)
|
|
100
|
+
else:
|
|
101
|
+
return [], ResponseBuilder.build(
|
|
102
|
+
400,
|
|
103
|
+
f"Group '{group}' invalid",
|
|
104
|
+
{"inventory": inventory, "group": group},
|
|
105
|
+
errors=[f"Group '{group}' invalid"],
|
|
106
|
+
)
|
|
107
|
+
if not hosts:
|
|
108
|
+
return [], ResponseBuilder.build(
|
|
109
|
+
400,
|
|
110
|
+
f"No hosts in group '{group}'",
|
|
111
|
+
{"inventory": inventory, "group": group},
|
|
112
|
+
errors=[f"No hosts in group '{group}'"],
|
|
113
|
+
)
|
|
114
|
+
return hosts, {}
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.error(f"Load inv fail: {e}")
|
|
117
|
+
return [], ResponseBuilder.build(
|
|
118
|
+
500,
|
|
119
|
+
f"Load inv fail: {e}",
|
|
120
|
+
{"inventory": inventory, "group": group},
|
|
121
|
+
str(e),
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def register_tools(mcp: FastMCP):
|
|
126
|
+
@mcp.tool(
|
|
127
|
+
annotations={
|
|
128
|
+
"title": "Run Command on Remote Host",
|
|
129
|
+
"readOnlyHint": True,
|
|
130
|
+
"destructiveHint": True,
|
|
131
|
+
"idempotentHint": False,
|
|
132
|
+
},
|
|
133
|
+
tags={"remote_access"},
|
|
134
|
+
)
|
|
135
|
+
async def run_command_on_remote_host(
|
|
136
|
+
host: str = Field(
|
|
137
|
+
description="Remote host.",
|
|
138
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
139
|
+
),
|
|
140
|
+
user: Optional[str] = Field(
|
|
141
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
142
|
+
),
|
|
143
|
+
password: Optional[str] = Field(
|
|
144
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
145
|
+
),
|
|
146
|
+
port: int = Field(
|
|
147
|
+
description="Port.",
|
|
148
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
149
|
+
),
|
|
150
|
+
cmd: str = Field(description="Shell command.", default=None),
|
|
151
|
+
id_file: Optional[str] = Field(
|
|
152
|
+
description="Private key path.",
|
|
153
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
154
|
+
),
|
|
155
|
+
certificate: Optional[str] = Field(
|
|
156
|
+
description="Teleport certificate.",
|
|
157
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
158
|
+
),
|
|
159
|
+
proxy: Optional[str] = Field(
|
|
160
|
+
description="Teleport proxy.",
|
|
161
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
162
|
+
),
|
|
163
|
+
cfg: str = Field(
|
|
164
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
165
|
+
),
|
|
166
|
+
log: Optional[str] = Field(
|
|
167
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
168
|
+
),
|
|
169
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
170
|
+
) -> Dict:
|
|
171
|
+
"""Run shell command on remote host. Expected return object type: dict"""
|
|
172
|
+
logger.debug(f"Run cmd: host={host}, cmd={cmd}")
|
|
173
|
+
if not host or not cmd:
|
|
174
|
+
logger.error("Need host, cmd")
|
|
175
|
+
return ResponseBuilder.build(
|
|
176
|
+
400,
|
|
177
|
+
"Need host, cmd",
|
|
178
|
+
{"host": host, "cmd": cmd},
|
|
179
|
+
errors=["Need host, cmd"],
|
|
180
|
+
)
|
|
181
|
+
try:
|
|
182
|
+
t = Tunnel(
|
|
183
|
+
remote_host=host,
|
|
184
|
+
username=user,
|
|
185
|
+
password=password,
|
|
186
|
+
port=port,
|
|
187
|
+
identity_file=id_file,
|
|
188
|
+
certificate_file=certificate,
|
|
189
|
+
proxy_command=proxy,
|
|
190
|
+
ssh_config_file=cfg,
|
|
191
|
+
)
|
|
192
|
+
if ctx:
|
|
193
|
+
await ctx.report_progress(progress=0, total=100)
|
|
194
|
+
logger.debug("Progress: 0/100")
|
|
195
|
+
t.connect()
|
|
196
|
+
out, error = t.run_command(cmd)
|
|
197
|
+
if ctx:
|
|
198
|
+
await ctx.report_progress(progress=100, total=100)
|
|
199
|
+
logger.debug("Progress: 100/100")
|
|
200
|
+
logger.debug(f"Cmd out: {out}, error: {error}")
|
|
201
|
+
return ResponseBuilder.build(
|
|
202
|
+
200,
|
|
203
|
+
f"Cmd '{cmd}' done on {host}",
|
|
204
|
+
{"host": host, "cmd": cmd},
|
|
205
|
+
error,
|
|
206
|
+
stdout=out,
|
|
207
|
+
files=[],
|
|
208
|
+
locations=[],
|
|
209
|
+
errors=[],
|
|
210
|
+
)
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.error(f"Cmd fail: {e}")
|
|
213
|
+
return ResponseBuilder.build(
|
|
214
|
+
500, f"Cmd fail: {e}", {"host": host, "cmd": cmd}, str(e)
|
|
215
|
+
)
|
|
216
|
+
finally:
|
|
217
|
+
if "t" in locals():
|
|
218
|
+
t.close()
|
|
219
|
+
|
|
220
|
+
@mcp.tool(
|
|
221
|
+
annotations={
|
|
222
|
+
"title": "Send File from Remote Host",
|
|
223
|
+
"readOnlyHint": False,
|
|
224
|
+
"destructiveHint": True,
|
|
225
|
+
"idempotentHint": False,
|
|
226
|
+
},
|
|
227
|
+
tags={"remote_access"},
|
|
228
|
+
)
|
|
229
|
+
async def send_file_to_remote_host(
|
|
230
|
+
host: str = Field(
|
|
231
|
+
description="Remote host.",
|
|
232
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
233
|
+
),
|
|
234
|
+
user: Optional[str] = Field(
|
|
235
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
236
|
+
),
|
|
237
|
+
password: Optional[str] = Field(
|
|
238
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
239
|
+
),
|
|
240
|
+
port: int = Field(
|
|
241
|
+
description="Port.",
|
|
242
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
243
|
+
),
|
|
244
|
+
lpath: str = Field(description="Local file path.", default=None),
|
|
245
|
+
rpath: str = Field(description="Remote path.", default=None),
|
|
246
|
+
id_file: Optional[str] = Field(
|
|
247
|
+
description="Private key path.",
|
|
248
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
249
|
+
),
|
|
250
|
+
certificate: Optional[str] = Field(
|
|
251
|
+
description="Teleport certificate.",
|
|
252
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
253
|
+
),
|
|
254
|
+
proxy: Optional[str] = Field(
|
|
255
|
+
description="Teleport proxy.",
|
|
256
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
257
|
+
),
|
|
258
|
+
cfg: str = Field(
|
|
259
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
260
|
+
),
|
|
261
|
+
log: Optional[str] = Field(
|
|
262
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
263
|
+
),
|
|
264
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
265
|
+
) -> Dict:
|
|
266
|
+
"""Upload file to remote host. Expected return object type: dict"""
|
|
267
|
+
logger = logging.getLogger("TunnelServer")
|
|
268
|
+
logger.debug(f"Upload: host={host}, local={lpath}, remote={rpath}")
|
|
269
|
+
lpath = os.path.abspath(os.path.expanduser(lpath)) # Normalize to absolute
|
|
270
|
+
rpath = os.path.expanduser(rpath) # Handle ~ on remote
|
|
271
|
+
logger.debug(
|
|
272
|
+
f"Normalized: lpath={lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)}), rpath={rpath}, CWD={os.getcwd()}"
|
|
273
|
+
)
|
|
274
|
+
logger.debug(f"Upload: host={host}, local={lpath}, remote={rpath}")
|
|
275
|
+
if not host or not lpath or not rpath:
|
|
276
|
+
logger.error("Need host, lpath, rpath")
|
|
277
|
+
return ResponseBuilder.build(
|
|
278
|
+
400,
|
|
279
|
+
"Need host, lpath, rpath",
|
|
280
|
+
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
281
|
+
errors=["Need host, lpath, rpath"],
|
|
282
|
+
)
|
|
283
|
+
if not os.path.exists(lpath) or not os.path.isfile(lpath):
|
|
284
|
+
logger.error(
|
|
285
|
+
f"Invalid file: {lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)})"
|
|
286
|
+
)
|
|
287
|
+
return ResponseBuilder.build(
|
|
288
|
+
400,
|
|
289
|
+
f"Invalid file: {lpath}",
|
|
290
|
+
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
291
|
+
errors=[f"Invalid file: {lpath}"],
|
|
292
|
+
)
|
|
293
|
+
lpath = os.path.abspath(os.path.expanduser(lpath))
|
|
294
|
+
try:
|
|
295
|
+
t = Tunnel(
|
|
296
|
+
remote_host=host,
|
|
297
|
+
username=user,
|
|
298
|
+
password=password,
|
|
299
|
+
port=port,
|
|
300
|
+
identity_file=id_file,
|
|
301
|
+
certificate_file=certificate,
|
|
302
|
+
proxy_command=proxy,
|
|
303
|
+
ssh_config_file=cfg,
|
|
304
|
+
)
|
|
305
|
+
t.connect()
|
|
306
|
+
if ctx:
|
|
307
|
+
await ctx.report_progress(progress=0, total=100)
|
|
308
|
+
logger.debug("Progress: 0/100")
|
|
309
|
+
sftp = t.ssh_client.open_sftp()
|
|
310
|
+
transferred = 0
|
|
311
|
+
|
|
312
|
+
def progress_callback(transf, total):
|
|
313
|
+
nonlocal transferred
|
|
314
|
+
transferred = transf
|
|
315
|
+
if ctx:
|
|
316
|
+
asyncio.ensure_future(
|
|
317
|
+
ctx.report_progress(progress=transf, total=total)
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
sftp.put(lpath, rpath, callback=progress_callback)
|
|
321
|
+
sftp.close()
|
|
322
|
+
logger.debug(f"Uploaded: {lpath} -> {rpath}")
|
|
323
|
+
return ResponseBuilder.build(
|
|
324
|
+
200,
|
|
325
|
+
f"Uploaded to {rpath}",
|
|
326
|
+
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
327
|
+
files=[lpath],
|
|
328
|
+
locations=[rpath],
|
|
329
|
+
errors=[],
|
|
330
|
+
)
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.error(f"Unexpected error during file transfer: {str(e)}")
|
|
333
|
+
return ResponseBuilder.build(
|
|
334
|
+
500,
|
|
335
|
+
f"Upload fail: {str(e)}",
|
|
336
|
+
{"host": host, "lpath": lpath, "rpath": rpath},
|
|
337
|
+
str(e),
|
|
338
|
+
errors=[f"Unexpected error: {str(e)}"],
|
|
339
|
+
)
|
|
340
|
+
finally:
|
|
341
|
+
if "t" in locals():
|
|
342
|
+
t.close()
|
|
343
|
+
|
|
344
|
+
@mcp.tool(
|
|
345
|
+
annotations={
|
|
346
|
+
"title": "Receive File from Remote Host",
|
|
347
|
+
"readOnlyHint": False,
|
|
348
|
+
"destructiveHint": False,
|
|
349
|
+
"idempotentHint": True,
|
|
350
|
+
},
|
|
351
|
+
tags={"remote_access"},
|
|
352
|
+
)
|
|
353
|
+
async def receive_file_from_remote_host(
|
|
354
|
+
host: str = Field(
|
|
355
|
+
description="Remote host.",
|
|
356
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
357
|
+
),
|
|
358
|
+
user: Optional[str] = Field(
|
|
359
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
360
|
+
),
|
|
361
|
+
password: Optional[str] = Field(
|
|
362
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
363
|
+
),
|
|
364
|
+
port: int = Field(
|
|
365
|
+
description="Port.",
|
|
366
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
367
|
+
),
|
|
368
|
+
rpath: str = Field(description="Remote file path.", default=None),
|
|
369
|
+
lpath: str = Field(description="Local file path.", default=None),
|
|
370
|
+
id_file: Optional[str] = Field(
|
|
371
|
+
description="Private key path.",
|
|
372
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
373
|
+
),
|
|
374
|
+
certificate: Optional[str] = Field(
|
|
375
|
+
description="Teleport certificate.",
|
|
376
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
377
|
+
),
|
|
378
|
+
proxy: Optional[str] = Field(
|
|
379
|
+
description="Teleport proxy.",
|
|
380
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
381
|
+
),
|
|
382
|
+
cfg: str = Field(
|
|
383
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
384
|
+
),
|
|
385
|
+
log: Optional[str] = Field(
|
|
386
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
387
|
+
),
|
|
388
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
389
|
+
) -> Dict:
|
|
390
|
+
"""Download file from remote host. Expected return object type: dict"""
|
|
391
|
+
lpath = os.path.abspath(os.path.expanduser(lpath))
|
|
392
|
+
logger.debug(f"Download: host={host}, remote={rpath}, local={lpath}")
|
|
393
|
+
if not host or not rpath or not lpath:
|
|
394
|
+
logger.error("Need host, rpath, lpath")
|
|
395
|
+
return ResponseBuilder.build(
|
|
396
|
+
400,
|
|
397
|
+
"Need host, rpath, lpath",
|
|
398
|
+
{"host": host, "rpath": rpath, "lpath": lpath},
|
|
399
|
+
errors=["Need host, rpath, lpath"],
|
|
400
|
+
)
|
|
401
|
+
try:
|
|
402
|
+
t = Tunnel(
|
|
403
|
+
remote_host=host,
|
|
404
|
+
username=user,
|
|
405
|
+
password=password,
|
|
406
|
+
port=port,
|
|
407
|
+
identity_file=id_file,
|
|
408
|
+
certificate_file=certificate,
|
|
409
|
+
proxy_command=proxy,
|
|
410
|
+
ssh_config_file=cfg,
|
|
411
|
+
)
|
|
412
|
+
t.connect()
|
|
413
|
+
if ctx:
|
|
414
|
+
await ctx.report_progress(progress=0, total=100)
|
|
415
|
+
logger.debug("Progress: 0/100")
|
|
416
|
+
sftp = t.ssh_client.open_sftp()
|
|
417
|
+
sftp.stat(rpath)
|
|
418
|
+
transferred = 0
|
|
419
|
+
|
|
420
|
+
def progress_callback(transf, total):
|
|
421
|
+
nonlocal transferred
|
|
422
|
+
transferred = transf
|
|
423
|
+
if ctx:
|
|
424
|
+
asyncio.ensure_future(
|
|
425
|
+
ctx.report_progress(progress=transf, total=total)
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
sftp.get(rpath, lpath, callback=progress_callback)
|
|
429
|
+
if ctx:
|
|
430
|
+
await ctx.report_progress(progress=100, total=100)
|
|
431
|
+
logger.debug("Progress: 100/100")
|
|
432
|
+
sftp.close()
|
|
433
|
+
logger.debug(f"Downloaded: {rpath} -> {lpath}")
|
|
434
|
+
return ResponseBuilder.build(
|
|
435
|
+
200,
|
|
436
|
+
f"Downloaded to {lpath}",
|
|
437
|
+
{"host": host, "rpath": rpath, "lpath": lpath},
|
|
438
|
+
files=[rpath],
|
|
439
|
+
locations=[lpath],
|
|
440
|
+
errors=[],
|
|
441
|
+
)
|
|
442
|
+
except Exception as e:
|
|
443
|
+
logger.error(f"Download fail: {e}")
|
|
444
|
+
return ResponseBuilder.build(
|
|
445
|
+
500,
|
|
446
|
+
f"Download fail: {e}",
|
|
447
|
+
{"host": host, "rpath": rpath, "lpath": lpath},
|
|
448
|
+
str(e),
|
|
449
|
+
)
|
|
450
|
+
finally:
|
|
451
|
+
if "t" in locals():
|
|
452
|
+
t.close()
|
|
453
|
+
|
|
454
|
+
@mcp.tool(
|
|
455
|
+
annotations={
|
|
456
|
+
"title": "Check SSH Server",
|
|
457
|
+
"readOnlyHint": True,
|
|
458
|
+
"destructiveHint": False,
|
|
459
|
+
"idempotentHint": True,
|
|
460
|
+
},
|
|
461
|
+
tags={"remote_access"},
|
|
462
|
+
)
|
|
463
|
+
async def check_ssh_server(
|
|
464
|
+
host: str = Field(
|
|
465
|
+
description="Remote host.",
|
|
466
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
467
|
+
),
|
|
468
|
+
user: Optional[str] = Field(
|
|
469
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
470
|
+
),
|
|
471
|
+
password: Optional[str] = Field(
|
|
472
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
473
|
+
),
|
|
474
|
+
port: int = Field(
|
|
475
|
+
description="Port.",
|
|
476
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
477
|
+
),
|
|
478
|
+
id_file: Optional[str] = Field(
|
|
479
|
+
description="Private key path.",
|
|
480
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
481
|
+
),
|
|
482
|
+
certificate: Optional[str] = Field(
|
|
483
|
+
description="Teleport certificate.",
|
|
484
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
485
|
+
),
|
|
486
|
+
proxy: Optional[str] = Field(
|
|
487
|
+
description="Teleport proxy.",
|
|
488
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
489
|
+
),
|
|
490
|
+
cfg: str = Field(
|
|
491
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
492
|
+
),
|
|
493
|
+
log: Optional[str] = Field(
|
|
494
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
495
|
+
),
|
|
496
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
497
|
+
) -> Dict:
|
|
498
|
+
"""Check SSH server status. Expected return object type: dict"""
|
|
499
|
+
logger.debug(f"Check SSH: host={host}")
|
|
500
|
+
if not host:
|
|
501
|
+
logger.error("Need host")
|
|
502
|
+
return ResponseBuilder.build(
|
|
503
|
+
400, "Need host", {"host": host}, errors=["Need host"]
|
|
504
|
+
)
|
|
505
|
+
try:
|
|
506
|
+
t = Tunnel(
|
|
507
|
+
remote_host=host,
|
|
508
|
+
username=user,
|
|
509
|
+
password=password,
|
|
510
|
+
port=port,
|
|
511
|
+
identity_file=id_file,
|
|
512
|
+
certificate_file=certificate,
|
|
513
|
+
proxy_command=proxy,
|
|
514
|
+
ssh_config_file=cfg,
|
|
515
|
+
)
|
|
516
|
+
if ctx:
|
|
517
|
+
await ctx.report_progress(progress=0, total=100)
|
|
518
|
+
logger.debug("Progress: 0/100")
|
|
519
|
+
success, msg = t.check_ssh_server()
|
|
520
|
+
if ctx:
|
|
521
|
+
await ctx.report_progress(progress=100, total=100)
|
|
522
|
+
logger.debug("Progress: 100/100")
|
|
523
|
+
logger.debug(f"SSH check: {msg}")
|
|
524
|
+
return ResponseBuilder.build(
|
|
525
|
+
200 if success else 400,
|
|
526
|
+
f"SSH check: {msg}",
|
|
527
|
+
{"host": host, "success": success},
|
|
528
|
+
files=[],
|
|
529
|
+
locations=[],
|
|
530
|
+
errors=[] if success else [msg],
|
|
531
|
+
)
|
|
532
|
+
except Exception as e:
|
|
533
|
+
logger.error(f"Check fail: {e}")
|
|
534
|
+
return ResponseBuilder.build(
|
|
535
|
+
500, f"Check fail: {e}", {"host": host}, str(e)
|
|
536
|
+
)
|
|
537
|
+
finally:
|
|
538
|
+
if "t" in locals():
|
|
539
|
+
t.close()
|
|
540
|
+
|
|
541
|
+
@mcp.tool(
|
|
542
|
+
annotations={
|
|
543
|
+
"title": "Test Key Authentication",
|
|
544
|
+
"readOnlyHint": True,
|
|
545
|
+
"destructiveHint": False,
|
|
546
|
+
"idempotentHint": True,
|
|
547
|
+
},
|
|
548
|
+
tags={"remote_access"},
|
|
549
|
+
)
|
|
550
|
+
async def test_key_auth(
|
|
551
|
+
host: str = Field(
|
|
552
|
+
description="Remote host.",
|
|
553
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
554
|
+
),
|
|
555
|
+
user: Optional[str] = Field(
|
|
556
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
557
|
+
),
|
|
558
|
+
key: str = Field(
|
|
559
|
+
description="Private key path.",
|
|
560
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
561
|
+
),
|
|
562
|
+
port: int = Field(
|
|
563
|
+
description="Port.",
|
|
564
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
565
|
+
),
|
|
566
|
+
cfg: str = Field(
|
|
567
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
568
|
+
),
|
|
569
|
+
log: Optional[str] = Field(
|
|
570
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
571
|
+
),
|
|
572
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
573
|
+
) -> Dict:
|
|
574
|
+
"""Test key-based auth. Expected return object type: dict"""
|
|
575
|
+
logger.debug(f"Test key: host={host}, key={key}")
|
|
576
|
+
if not host or not key:
|
|
577
|
+
logger.error("Need host, key")
|
|
578
|
+
return ResponseBuilder.build(
|
|
579
|
+
400,
|
|
580
|
+
"Need host, key",
|
|
581
|
+
{"host": host, "key": key},
|
|
582
|
+
errors=["Need host, key"],
|
|
583
|
+
)
|
|
584
|
+
try:
|
|
585
|
+
t = Tunnel(remote_host=host, username=user, port=port, ssh_config_file=cfg)
|
|
586
|
+
if ctx:
|
|
587
|
+
await ctx.report_progress(progress=0, total=100)
|
|
588
|
+
logger.debug("Progress: 0/100")
|
|
589
|
+
success, msg = t.test_key_auth(key)
|
|
590
|
+
if ctx:
|
|
591
|
+
await ctx.report_progress(progress=100, total=100)
|
|
592
|
+
logger.debug("Progress: 100/100")
|
|
593
|
+
logger.debug(f"Key test: {msg}")
|
|
594
|
+
return ResponseBuilder.build(
|
|
595
|
+
200 if success else 400,
|
|
596
|
+
f"Key test: {msg}",
|
|
597
|
+
{"host": host, "key": key, "success": success},
|
|
598
|
+
files=[],
|
|
599
|
+
locations=[],
|
|
600
|
+
errors=[] if success else [msg],
|
|
601
|
+
)
|
|
602
|
+
except Exception as e:
|
|
603
|
+
logger.error(f"Key test fail: {e}")
|
|
604
|
+
return ResponseBuilder.build(
|
|
605
|
+
500, f"Key test fail: {e}", {"host": host, "key": key}, str(e)
|
|
606
|
+
)
|
|
607
|
+
|
|
608
|
+
@mcp.tool(
|
|
609
|
+
annotations={
|
|
610
|
+
"title": "Setup Passwordless SSH",
|
|
611
|
+
"readOnlyHint": False,
|
|
612
|
+
"destructiveHint": True,
|
|
613
|
+
"idempotentHint": False,
|
|
614
|
+
},
|
|
615
|
+
tags={"remote_access"},
|
|
616
|
+
)
|
|
617
|
+
async def setup_passwordless_ssh(
|
|
618
|
+
host: str = Field(
|
|
619
|
+
description="Remote host.",
|
|
620
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
621
|
+
),
|
|
622
|
+
user: Optional[str] = Field(
|
|
623
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
624
|
+
),
|
|
625
|
+
password: Optional[str] = Field(
|
|
626
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
627
|
+
),
|
|
628
|
+
port: int = Field(
|
|
629
|
+
description="Port.",
|
|
630
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
631
|
+
),
|
|
632
|
+
key: str = Field(
|
|
633
|
+
description="Private key path.", default=os.path.expanduser("~/.ssh/id_rsa")
|
|
634
|
+
),
|
|
635
|
+
key_type: str = Field(
|
|
636
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
637
|
+
),
|
|
638
|
+
cfg: str = Field(
|
|
639
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
640
|
+
),
|
|
641
|
+
log: Optional[str] = Field(
|
|
642
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
643
|
+
),
|
|
644
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
645
|
+
) -> Dict:
|
|
646
|
+
"""Setup passwordless SSH. Expected return object type: dict"""
|
|
647
|
+
logger.debug(f"Setup SSH: host={host}, key={key}, key_type={key_type}")
|
|
648
|
+
if not host or not password:
|
|
649
|
+
logger.error("Need host, password")
|
|
650
|
+
return ResponseBuilder.build(
|
|
651
|
+
400,
|
|
652
|
+
"Need host, password",
|
|
653
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
654
|
+
errors=["Need host, password"],
|
|
655
|
+
)
|
|
656
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
657
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
658
|
+
return ResponseBuilder.build(
|
|
659
|
+
400,
|
|
660
|
+
f"Invalid key_type: {key_type}",
|
|
661
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
662
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
663
|
+
)
|
|
664
|
+
try:
|
|
665
|
+
t = Tunnel(
|
|
666
|
+
remote_host=host,
|
|
667
|
+
username=user,
|
|
668
|
+
password=password,
|
|
669
|
+
port=port,
|
|
670
|
+
ssh_config_file=cfg,
|
|
671
|
+
)
|
|
672
|
+
if ctx:
|
|
673
|
+
await ctx.report_progress(progress=0, total=100)
|
|
674
|
+
logger.debug("Progress: 0/100")
|
|
675
|
+
key = os.path.expanduser(key)
|
|
676
|
+
pub_key = key + ".pub"
|
|
677
|
+
if not os.path.exists(pub_key):
|
|
678
|
+
if key_type == "rsa":
|
|
679
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {key} -N ''")
|
|
680
|
+
else: # ed25519
|
|
681
|
+
os.system(f"ssh-keygen -t ed25519 -f {key} -N ''")
|
|
682
|
+
logger.info(f"Generated {key_type} key: {key}, {pub_key}")
|
|
683
|
+
t.setup_passwordless_ssh(local_key_path=key, key_type=key_type)
|
|
684
|
+
if ctx:
|
|
685
|
+
await ctx.report_progress(progress=100, total=100)
|
|
686
|
+
logger.debug("Progress: 100/100")
|
|
687
|
+
logger.debug(f"SSH setup for {user}@{host}")
|
|
688
|
+
return ResponseBuilder.build(
|
|
689
|
+
200,
|
|
690
|
+
f"SSH setup for {user}@{host}",
|
|
691
|
+
{"host": host, "key": key, "user": user, "key_type": key_type},
|
|
692
|
+
files=[pub_key],
|
|
693
|
+
locations=[f"~/.ssh/authorized_keys on {host}"],
|
|
694
|
+
errors=[],
|
|
695
|
+
)
|
|
696
|
+
except Exception as e:
|
|
697
|
+
logger.error(f"SSH setup fail: {e}")
|
|
698
|
+
return ResponseBuilder.build(
|
|
699
|
+
500,
|
|
700
|
+
f"SSH setup fail: {e}",
|
|
701
|
+
{"host": host, "key": key, "key_type": key_type},
|
|
702
|
+
str(e),
|
|
703
|
+
)
|
|
704
|
+
finally:
|
|
705
|
+
if "t" in locals():
|
|
706
|
+
t.close()
|
|
707
|
+
|
|
708
|
+
@mcp.tool(
|
|
709
|
+
annotations={
|
|
710
|
+
"title": "Copy SSH Config",
|
|
711
|
+
"readOnlyHint": False,
|
|
712
|
+
"destructiveHint": True,
|
|
713
|
+
"idempotentHint": False,
|
|
714
|
+
},
|
|
715
|
+
tags={"remote_access"},
|
|
716
|
+
)
|
|
717
|
+
async def copy_ssh_config(
|
|
718
|
+
host: str = Field(
|
|
719
|
+
description="Remote host.",
|
|
720
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
721
|
+
),
|
|
722
|
+
user: Optional[str] = Field(
|
|
723
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
724
|
+
),
|
|
725
|
+
password: Optional[str] = Field(
|
|
726
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
727
|
+
),
|
|
728
|
+
port: int = Field(
|
|
729
|
+
description="Port.",
|
|
730
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
731
|
+
),
|
|
732
|
+
lcfg: str = Field(description="Local SSH config.", default=None),
|
|
733
|
+
rcfg: str = Field(
|
|
734
|
+
description="Remote SSH config.",
|
|
735
|
+
default=os.path.expanduser("~/.ssh/config"),
|
|
736
|
+
),
|
|
737
|
+
id_file: Optional[str] = Field(
|
|
738
|
+
description="Private key path.",
|
|
739
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
740
|
+
),
|
|
741
|
+
certificate: Optional[str] = Field(
|
|
742
|
+
description="Teleport certificate.",
|
|
743
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
744
|
+
),
|
|
745
|
+
proxy: Optional[str] = Field(
|
|
746
|
+
description="Teleport proxy.",
|
|
747
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
748
|
+
),
|
|
749
|
+
cfg: str = Field(
|
|
750
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
751
|
+
),
|
|
752
|
+
log: Optional[str] = Field(
|
|
753
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
754
|
+
),
|
|
755
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
756
|
+
) -> Dict:
|
|
757
|
+
"""Copy SSH config to remote host. Expected return object type: dict"""
|
|
758
|
+
logger.debug(f"Copy cfg: host={host}, local={lcfg}, remote={rcfg}")
|
|
759
|
+
if not host or not lcfg:
|
|
760
|
+
logger.error("Need host, lcfg")
|
|
761
|
+
return ResponseBuilder.build(
|
|
762
|
+
400,
|
|
763
|
+
"Need host, lcfg",
|
|
764
|
+
{"host": host, "lcfg": lcfg, "rcfg": rcfg},
|
|
765
|
+
errors=["Need host, lcfg"],
|
|
766
|
+
)
|
|
767
|
+
try:
|
|
768
|
+
t = Tunnel(
|
|
769
|
+
remote_host=host,
|
|
770
|
+
username=user,
|
|
771
|
+
password=password,
|
|
772
|
+
port=port,
|
|
773
|
+
identity_file=id_file,
|
|
774
|
+
certificate_file=certificate,
|
|
775
|
+
proxy_command=proxy,
|
|
776
|
+
ssh_config_file=cfg,
|
|
777
|
+
)
|
|
778
|
+
if ctx:
|
|
779
|
+
await ctx.report_progress(progress=0, total=100)
|
|
780
|
+
logger.debug("Progress: 0/100")
|
|
781
|
+
t.copy_ssh_config(lcfg, rcfg)
|
|
782
|
+
if ctx:
|
|
783
|
+
await ctx.report_progress(progress=100, total=100)
|
|
784
|
+
logger.debug("Progress: 100/100")
|
|
785
|
+
logger.debug(f"Copied cfg to {rcfg} on {host}")
|
|
786
|
+
return ResponseBuilder.build(
|
|
787
|
+
200,
|
|
788
|
+
f"Copied cfg to {rcfg} on {host}",
|
|
789
|
+
{"host": host, "lcfg": lcfg, "rcfg": rcfg},
|
|
790
|
+
files=[lcfg],
|
|
791
|
+
locations=[rcfg],
|
|
792
|
+
errors=[],
|
|
793
|
+
)
|
|
794
|
+
except Exception as e:
|
|
795
|
+
logger.error(f"Copy cfg fail: {e}")
|
|
796
|
+
return ResponseBuilder.build(
|
|
797
|
+
500,
|
|
798
|
+
f"Copy cfg fail: {e}",
|
|
799
|
+
{"host": host, "lcfg": lcfg, "rcfg": rcfg},
|
|
800
|
+
str(e),
|
|
801
|
+
)
|
|
802
|
+
finally:
|
|
803
|
+
if "t" in locals():
|
|
804
|
+
t.close()
|
|
805
|
+
|
|
806
|
+
@mcp.tool(
|
|
807
|
+
annotations={
|
|
808
|
+
"title": "Rotate SSH Key",
|
|
809
|
+
"readOnlyHint": False,
|
|
810
|
+
"destructiveHint": True,
|
|
811
|
+
"idempotentHint": False,
|
|
812
|
+
},
|
|
813
|
+
tags={"remote_access"},
|
|
814
|
+
)
|
|
815
|
+
async def rotate_ssh_key(
|
|
816
|
+
host: str = Field(
|
|
817
|
+
description="Remote host.",
|
|
818
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
819
|
+
),
|
|
820
|
+
user: Optional[str] = Field(
|
|
821
|
+
description="Username.", default=os.environ.get("TUNNEL_USERNAME", None)
|
|
822
|
+
),
|
|
823
|
+
password: Optional[str] = Field(
|
|
824
|
+
description="Password.", default=os.environ.get("TUNNEL_PASSWORD", None)
|
|
825
|
+
),
|
|
826
|
+
port: int = Field(
|
|
827
|
+
description="Port.",
|
|
828
|
+
default=to_integer(os.environ.get("TUNNEL_REMOTE_PORT", "22")),
|
|
829
|
+
),
|
|
830
|
+
new_key: str = Field(description="New private key path.", default=None),
|
|
831
|
+
key_type: str = Field(
|
|
832
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
833
|
+
),
|
|
834
|
+
id_file: Optional[str] = Field(
|
|
835
|
+
description="Current key path.",
|
|
836
|
+
default=os.environ.get("TUNNEL_IDENTITY_FILE", None),
|
|
837
|
+
),
|
|
838
|
+
certificate: Optional[str] = Field(
|
|
839
|
+
description="Teleport certificate.",
|
|
840
|
+
default=os.environ.get("TUNNEL_CERTIFICATE", None),
|
|
841
|
+
),
|
|
842
|
+
proxy: Optional[str] = Field(
|
|
843
|
+
description="Teleport proxy.",
|
|
844
|
+
default=os.environ.get("TUNNEL_PROXY_COMMAND", None),
|
|
845
|
+
),
|
|
846
|
+
cfg: str = Field(
|
|
847
|
+
description="SSH config path.", default=os.path.expanduser("~/.ssh/config")
|
|
848
|
+
),
|
|
849
|
+
log: Optional[str] = Field(
|
|
850
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
851
|
+
),
|
|
852
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
853
|
+
) -> Dict:
|
|
854
|
+
"""Rotate SSH key on remote host. Expected return object type: dict"""
|
|
855
|
+
logger.debug(f"Rotate key: host={host}, new_key={new_key}, key_type={key_type}")
|
|
856
|
+
if not host or not new_key:
|
|
857
|
+
logger.error("Need host, new_key")
|
|
858
|
+
return ResponseBuilder.build(
|
|
859
|
+
400,
|
|
860
|
+
"Need host, new_key",
|
|
861
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
862
|
+
errors=["Need host, new_key"],
|
|
863
|
+
)
|
|
864
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
865
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
866
|
+
return ResponseBuilder.build(
|
|
867
|
+
400,
|
|
868
|
+
f"Invalid key_type: {key_type}",
|
|
869
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
870
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
871
|
+
)
|
|
872
|
+
try:
|
|
873
|
+
t = Tunnel(
|
|
874
|
+
remote_host=host,
|
|
875
|
+
username=user,
|
|
876
|
+
password=password,
|
|
877
|
+
port=port,
|
|
878
|
+
identity_file=id_file,
|
|
879
|
+
certificate_file=certificate,
|
|
880
|
+
proxy_command=proxy,
|
|
881
|
+
ssh_config_file=cfg,
|
|
882
|
+
)
|
|
883
|
+
if ctx:
|
|
884
|
+
await ctx.report_progress(progress=0, total=100)
|
|
885
|
+
logger.debug("Progress: 0/100")
|
|
886
|
+
new_key = os.path.expanduser(new_key)
|
|
887
|
+
new_public_key = new_key + ".pub"
|
|
888
|
+
if not os.path.exists(new_key):
|
|
889
|
+
if key_type == "rsa":
|
|
890
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {new_key} -N ''")
|
|
891
|
+
else: # ed25519
|
|
892
|
+
os.system(f"ssh-keygen -t ed25519 -f {new_key} -N ''")
|
|
893
|
+
logger.info(f"Generated {key_type} key: {new_key}")
|
|
894
|
+
t.rotate_ssh_key(new_key, key_type=key_type)
|
|
895
|
+
if ctx:
|
|
896
|
+
await ctx.report_progress(progress=100, total=100)
|
|
897
|
+
logger.debug("Progress: 100/100")
|
|
898
|
+
logger.debug(f"Rotated {key_type} key to {new_key} on {host}")
|
|
899
|
+
return ResponseBuilder.build(
|
|
900
|
+
200,
|
|
901
|
+
f"Rotated {key_type} key to {new_key} on {host}",
|
|
902
|
+
{
|
|
903
|
+
"host": host,
|
|
904
|
+
"new_key": new_key,
|
|
905
|
+
"old_key": id_file,
|
|
906
|
+
"key_type": key_type,
|
|
907
|
+
},
|
|
908
|
+
files=[new_public_key],
|
|
909
|
+
locations=[f"~/.ssh/authorized_keys on {host}"],
|
|
910
|
+
errors=[],
|
|
911
|
+
)
|
|
912
|
+
except Exception as e:
|
|
913
|
+
logger.error(f"Rotate fail: {e}")
|
|
914
|
+
return ResponseBuilder.build(
|
|
915
|
+
500,
|
|
916
|
+
f"Rotate fail: {e}",
|
|
917
|
+
{"host": host, "new_key": new_key, "key_type": key_type},
|
|
918
|
+
str(e),
|
|
919
|
+
)
|
|
920
|
+
finally:
|
|
921
|
+
if "t" in locals():
|
|
922
|
+
t.close()
|
|
923
|
+
|
|
924
|
+
@mcp.tool(
|
|
925
|
+
annotations={
|
|
926
|
+
"title": "Remove Host Key",
|
|
927
|
+
"readOnlyHint": False,
|
|
928
|
+
"destructiveHint": True,
|
|
929
|
+
"idempotentHint": True,
|
|
930
|
+
},
|
|
931
|
+
tags={"remote_access"},
|
|
932
|
+
)
|
|
933
|
+
async def remove_host_key(
|
|
934
|
+
host: str = Field(
|
|
935
|
+
description="Remote host.",
|
|
936
|
+
default=os.environ.get("TUNNEL_REMOTE_HOST", None),
|
|
937
|
+
),
|
|
938
|
+
known_hosts: str = Field(
|
|
939
|
+
description="Known hosts path.",
|
|
940
|
+
default=os.path.expanduser("~/.ssh/known_hosts"),
|
|
941
|
+
),
|
|
942
|
+
log: Optional[str] = Field(
|
|
943
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
944
|
+
),
|
|
945
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
946
|
+
) -> Dict:
|
|
947
|
+
"""Remove host key from known_hosts. Expected return object type: dict"""
|
|
948
|
+
logger.debug(f"Remove key: host={host}, known_hosts={known_hosts}")
|
|
949
|
+
if not host:
|
|
950
|
+
logger.error("Need host")
|
|
951
|
+
return ResponseBuilder.build(
|
|
952
|
+
400,
|
|
953
|
+
"Need host",
|
|
954
|
+
{"host": host, "known_hosts": known_hosts},
|
|
955
|
+
errors=["Need host"],
|
|
956
|
+
)
|
|
957
|
+
try:
|
|
958
|
+
t = Tunnel(remote_host=host)
|
|
959
|
+
if ctx:
|
|
960
|
+
await ctx.report_progress(progress=0, total=100)
|
|
961
|
+
logger.debug("Progress: 0/100")
|
|
962
|
+
known_hosts = os.path.expanduser(known_hosts)
|
|
963
|
+
msg = t.remove_host_key(known_hosts_path=known_hosts)
|
|
964
|
+
if ctx:
|
|
965
|
+
await ctx.report_progress(progress=100, total=100)
|
|
966
|
+
logger.debug("Progress: 100/100")
|
|
967
|
+
logger.debug(f"Remove result: {msg}")
|
|
968
|
+
return ResponseBuilder.build(
|
|
969
|
+
200 if "Removed" in msg else 400,
|
|
970
|
+
msg,
|
|
971
|
+
{"host": host, "known_hosts": known_hosts},
|
|
972
|
+
files=[],
|
|
973
|
+
locations=[],
|
|
974
|
+
errors=[] if "Removed" in msg else [msg],
|
|
975
|
+
)
|
|
976
|
+
except Exception as e:
|
|
977
|
+
logger.error(f"Remove fail: {e}")
|
|
978
|
+
return ResponseBuilder.build(
|
|
979
|
+
500,
|
|
980
|
+
f"Remove fail: {e}",
|
|
981
|
+
{"host": host, "known_hosts": known_hosts},
|
|
982
|
+
str(e),
|
|
983
|
+
)
|
|
984
|
+
|
|
985
|
+
@mcp.tool(
|
|
986
|
+
annotations={
|
|
987
|
+
"title": "Setup Passwordless SSH for All",
|
|
988
|
+
"readOnlyHint": False,
|
|
989
|
+
"destructiveHint": True,
|
|
990
|
+
"idempotentHint": False,
|
|
991
|
+
},
|
|
992
|
+
tags={"remote_access"},
|
|
993
|
+
)
|
|
994
|
+
async def configure_key_auth_on_inventory(
|
|
995
|
+
inventory: str = Field(
|
|
996
|
+
description="YAML inventory path.",
|
|
997
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
998
|
+
),
|
|
999
|
+
key: str = Field(
|
|
1000
|
+
description="Shared key path.",
|
|
1001
|
+
default=os.environ.get(
|
|
1002
|
+
"TUNNEL_IDENTITY_FILE", os.path.expanduser("~/.ssh/id_shared")
|
|
1003
|
+
),
|
|
1004
|
+
),
|
|
1005
|
+
key_type: str = Field(
|
|
1006
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
1007
|
+
),
|
|
1008
|
+
group: str = Field(
|
|
1009
|
+
description="Target group.",
|
|
1010
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1011
|
+
),
|
|
1012
|
+
parallel: bool = Field(
|
|
1013
|
+
description="Run parallel.",
|
|
1014
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1015
|
+
),
|
|
1016
|
+
max_threads: int = Field(
|
|
1017
|
+
description="Max threads.",
|
|
1018
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1019
|
+
),
|
|
1020
|
+
log: Optional[str] = Field(description="Log file.", default=None),
|
|
1021
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1022
|
+
) -> Dict:
|
|
1023
|
+
"""Setup passwordless SSH for all hosts in group. Expected return object type: dict"""
|
|
1024
|
+
logger.debug(
|
|
1025
|
+
f"Setup SSH all: inv={inventory}, group={group}, key_type={key_type}"
|
|
1026
|
+
)
|
|
1027
|
+
if not inventory:
|
|
1028
|
+
logger.error("Need inventory")
|
|
1029
|
+
return ResponseBuilder.build(
|
|
1030
|
+
400,
|
|
1031
|
+
"Need inventory",
|
|
1032
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1033
|
+
errors=["Need inventory"],
|
|
1034
|
+
)
|
|
1035
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
1036
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
1037
|
+
return ResponseBuilder.build(
|
|
1038
|
+
400,
|
|
1039
|
+
f"Invalid key_type: {key_type}",
|
|
1040
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1041
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
1042
|
+
)
|
|
1043
|
+
try:
|
|
1044
|
+
key = os.path.expanduser(key)
|
|
1045
|
+
pub_key = key + ".pub"
|
|
1046
|
+
if not os.path.exists(key):
|
|
1047
|
+
if key_type == "rsa":
|
|
1048
|
+
os.system(f"ssh-keygen -t rsa -b 4096 -f {key} -N ''")
|
|
1049
|
+
else: # ed25519
|
|
1050
|
+
os.system(f"ssh-keygen -t ed25519 -f {key} -N ''")
|
|
1051
|
+
logger.info(f"Generated {key_type} key: {key}, {pub_key}")
|
|
1052
|
+
with open(pub_key, "r") as f:
|
|
1053
|
+
pub = f.read().strip()
|
|
1054
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1055
|
+
if error:
|
|
1056
|
+
return error
|
|
1057
|
+
total = len(hosts)
|
|
1058
|
+
if ctx:
|
|
1059
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1060
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1061
|
+
|
|
1062
|
+
async def setup_host(h: Dict, ctx: Context) -> Dict:
|
|
1063
|
+
host, user, password = h["hostname"], h["username"], h["password"]
|
|
1064
|
+
kpath = h.get("key_path", key)
|
|
1065
|
+
logger.info(f"Setup {user}@{host}")
|
|
1066
|
+
try:
|
|
1067
|
+
t = Tunnel(remote_host=host, username=user, password=password)
|
|
1068
|
+
t.remove_host_key()
|
|
1069
|
+
t.setup_passwordless_ssh(local_key_path=kpath, key_type=key_type)
|
|
1070
|
+
t.connect()
|
|
1071
|
+
t.run_command(f"echo '{pub}' >> ~/.ssh/authorized_keys")
|
|
1072
|
+
t.run_command("chmod 600 ~/.ssh/authorized_keys")
|
|
1073
|
+
logger.info(f"Added {key_type} key to {user}@{host}")
|
|
1074
|
+
res, msg = t.test_key_auth(kpath)
|
|
1075
|
+
return {
|
|
1076
|
+
"hostname": host,
|
|
1077
|
+
"status": "success",
|
|
1078
|
+
"message": f"SSH setup for {user}@{host} with {key_type} key",
|
|
1079
|
+
"errors": [] if res else [msg],
|
|
1080
|
+
}
|
|
1081
|
+
except Exception as e:
|
|
1082
|
+
logger.error(f"Setup fail {user}@{host}: {e}")
|
|
1083
|
+
return {
|
|
1084
|
+
"hostname": host,
|
|
1085
|
+
"status": "failed",
|
|
1086
|
+
"message": f"Setup fail: {e}",
|
|
1087
|
+
"errors": [str(e)],
|
|
1088
|
+
}
|
|
1089
|
+
finally:
|
|
1090
|
+
if "t" in locals():
|
|
1091
|
+
t.close()
|
|
1092
|
+
|
|
1093
|
+
results, files, locations, errors = [], [], [], []
|
|
1094
|
+
if parallel:
|
|
1095
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1096
|
+
max_workers=max_threads
|
|
1097
|
+
) as ex:
|
|
1098
|
+
futures = [
|
|
1099
|
+
ex.submit(lambda h: asyncio.run(setup_host(h, ctx)), h)
|
|
1100
|
+
for h in hosts
|
|
1101
|
+
]
|
|
1102
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
1103
|
+
try:
|
|
1104
|
+
r = f.result()
|
|
1105
|
+
results.append(r)
|
|
1106
|
+
if r["status"] == "success":
|
|
1107
|
+
files.append(pub_key)
|
|
1108
|
+
locations.append(
|
|
1109
|
+
f"~/.ssh/authorized_keys on {r['hostname']}"
|
|
1110
|
+
)
|
|
1111
|
+
else:
|
|
1112
|
+
errors.extend(r["errors"])
|
|
1113
|
+
if ctx:
|
|
1114
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1115
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1116
|
+
except Exception as e:
|
|
1117
|
+
logger.error(f"Parallel error: {e}")
|
|
1118
|
+
results.append(
|
|
1119
|
+
{
|
|
1120
|
+
"hostname": "unknown",
|
|
1121
|
+
"status": "failed",
|
|
1122
|
+
"message": f"Parallel error: {e}",
|
|
1123
|
+
"errors": [str(e)],
|
|
1124
|
+
}
|
|
1125
|
+
)
|
|
1126
|
+
errors.append(str(e))
|
|
1127
|
+
else:
|
|
1128
|
+
for i, h in enumerate(hosts, 1):
|
|
1129
|
+
r = await setup_host(h, ctx)
|
|
1130
|
+
results.append(r)
|
|
1131
|
+
if r["status"] == "success":
|
|
1132
|
+
files.append(pub_key)
|
|
1133
|
+
locations.append(f"~/.ssh/authorized_keys on {r['hostname']}")
|
|
1134
|
+
else:
|
|
1135
|
+
errors.extend(r["errors"])
|
|
1136
|
+
if ctx:
|
|
1137
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1138
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1139
|
+
logger.debug(f"Done SSH setup for {group}")
|
|
1140
|
+
msg = (
|
|
1141
|
+
f"SSH setup done for {group}"
|
|
1142
|
+
if not errors
|
|
1143
|
+
else f"SSH setup failed for some in {group}"
|
|
1144
|
+
)
|
|
1145
|
+
return ResponseBuilder.build(
|
|
1146
|
+
200 if not errors else 500,
|
|
1147
|
+
msg,
|
|
1148
|
+
{
|
|
1149
|
+
"inventory": inventory,
|
|
1150
|
+
"group": group,
|
|
1151
|
+
"key_type": key_type,
|
|
1152
|
+
"host_results": results,
|
|
1153
|
+
},
|
|
1154
|
+
"; ".join(errors),
|
|
1155
|
+
files,
|
|
1156
|
+
locations,
|
|
1157
|
+
errors,
|
|
1158
|
+
)
|
|
1159
|
+
except Exception as e:
|
|
1160
|
+
logger.error(f"Setup all fail: {e}")
|
|
1161
|
+
return ResponseBuilder.build(
|
|
1162
|
+
500,
|
|
1163
|
+
f"Setup all fail: {e}",
|
|
1164
|
+
{"inventory": inventory, "group": group, "key_type": key_type},
|
|
1165
|
+
str(e),
|
|
1166
|
+
)
|
|
1167
|
+
|
|
1168
|
+
@mcp.tool(
|
|
1169
|
+
annotations={
|
|
1170
|
+
"title": "Run Command on All Hosts",
|
|
1171
|
+
"readOnlyHint": True,
|
|
1172
|
+
"destructiveHint": True,
|
|
1173
|
+
"idempotentHint": False,
|
|
1174
|
+
},
|
|
1175
|
+
tags={"remote_access"},
|
|
1176
|
+
)
|
|
1177
|
+
async def run_command_on_inventory(
|
|
1178
|
+
inventory: str = Field(
|
|
1179
|
+
description="YAML inventory path.",
|
|
1180
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1181
|
+
),
|
|
1182
|
+
cmd: str = Field(description="Shell command.", default=None),
|
|
1183
|
+
group: str = Field(
|
|
1184
|
+
description="Target group.",
|
|
1185
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1186
|
+
),
|
|
1187
|
+
parallel: bool = Field(
|
|
1188
|
+
description="Run parallel.",
|
|
1189
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1190
|
+
),
|
|
1191
|
+
max_threads: int = Field(
|
|
1192
|
+
description="Max threads.",
|
|
1193
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1194
|
+
),
|
|
1195
|
+
log: Optional[str] = Field(description="Log file.", default=None),
|
|
1196
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1197
|
+
) -> Dict:
|
|
1198
|
+
"""Run command on all hosts in group. Expected return object type: dict"""
|
|
1199
|
+
logger.debug(f"Run cmd all: inv={inventory}, group={group}, cmd={cmd}")
|
|
1200
|
+
if not inventory or not cmd:
|
|
1201
|
+
logger.error("Need inventory, cmd")
|
|
1202
|
+
return ResponseBuilder.build(
|
|
1203
|
+
400,
|
|
1204
|
+
"Need inventory, cmd",
|
|
1205
|
+
{"inventory": inventory, "group": group, "cmd": cmd},
|
|
1206
|
+
errors=["Need inventory, cmd"],
|
|
1207
|
+
)
|
|
1208
|
+
try:
|
|
1209
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1210
|
+
if error:
|
|
1211
|
+
return error
|
|
1212
|
+
total = len(hosts)
|
|
1213
|
+
if ctx:
|
|
1214
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1215
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1216
|
+
|
|
1217
|
+
async def run_host(h: Dict, ctx: Context) -> Dict:
|
|
1218
|
+
host = h["hostname"]
|
|
1219
|
+
try:
|
|
1220
|
+
t = Tunnel(
|
|
1221
|
+
remote_host=host,
|
|
1222
|
+
username=h["username"],
|
|
1223
|
+
password=h.get("password"),
|
|
1224
|
+
identity_file=h.get("key_path"),
|
|
1225
|
+
)
|
|
1226
|
+
out, error = t.run_command(cmd)
|
|
1227
|
+
logger.info(f"Host {host}: Out: {out}, Err: {error}")
|
|
1228
|
+
return {
|
|
1229
|
+
"hostname": host,
|
|
1230
|
+
"status": "success",
|
|
1231
|
+
"message": f"Cmd '{cmd}' done on {host}",
|
|
1232
|
+
"stdout": out,
|
|
1233
|
+
"stderr": error,
|
|
1234
|
+
"errors": [],
|
|
1235
|
+
}
|
|
1236
|
+
except Exception as e:
|
|
1237
|
+
logger.error(f"Cmd fail {host}: {e}")
|
|
1238
|
+
return {
|
|
1239
|
+
"hostname": host,
|
|
1240
|
+
"status": "failed",
|
|
1241
|
+
"message": f"Cmd fail: {e}",
|
|
1242
|
+
"stdout": "",
|
|
1243
|
+
"stderr": str(e),
|
|
1244
|
+
"errors": [str(e)],
|
|
1245
|
+
}
|
|
1246
|
+
finally:
|
|
1247
|
+
if "t" in locals():
|
|
1248
|
+
t.close()
|
|
1249
|
+
|
|
1250
|
+
results, errors = [], []
|
|
1251
|
+
if parallel:
|
|
1252
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1253
|
+
max_workers=max_threads
|
|
1254
|
+
) as ex:
|
|
1255
|
+
futures = [
|
|
1256
|
+
ex.submit(lambda h: asyncio.run(run_host(h, ctx)), h)
|
|
1257
|
+
for h in hosts
|
|
1258
|
+
]
|
|
1259
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
1260
|
+
try:
|
|
1261
|
+
r = f.result()
|
|
1262
|
+
results.append(r)
|
|
1263
|
+
errors.extend(r["errors"])
|
|
1264
|
+
if ctx:
|
|
1265
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1266
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1267
|
+
except Exception as e:
|
|
1268
|
+
logger.error(f"Parallel error: {e}")
|
|
1269
|
+
results.append(
|
|
1270
|
+
{
|
|
1271
|
+
"hostname": "unknown",
|
|
1272
|
+
"status": "failed",
|
|
1273
|
+
"message": f"Parallel error: {e}",
|
|
1274
|
+
"stdout": "",
|
|
1275
|
+
"stderr": str(e),
|
|
1276
|
+
"errors": [str(e)],
|
|
1277
|
+
}
|
|
1278
|
+
)
|
|
1279
|
+
errors.append(str(e))
|
|
1280
|
+
else:
|
|
1281
|
+
for i, h in enumerate(hosts, 1):
|
|
1282
|
+
r = await run_host(h, ctx)
|
|
1283
|
+
results.append(r)
|
|
1284
|
+
errors.extend(r["errors"])
|
|
1285
|
+
if ctx:
|
|
1286
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1287
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1288
|
+
logger.debug(f"Done cmd for {group}")
|
|
1289
|
+
msg = (
|
|
1290
|
+
f"Cmd '{cmd}' done on {group}"
|
|
1291
|
+
if not errors
|
|
1292
|
+
else f"Cmd '{cmd}' failed for some in {group}"
|
|
1293
|
+
)
|
|
1294
|
+
return ResponseBuilder.build(
|
|
1295
|
+
200 if not errors else 500,
|
|
1296
|
+
msg,
|
|
1297
|
+
{
|
|
1298
|
+
"inventory": inventory,
|
|
1299
|
+
"group": group,
|
|
1300
|
+
"cmd": cmd,
|
|
1301
|
+
"host_results": results,
|
|
1302
|
+
},
|
|
1303
|
+
"; ".join(errors),
|
|
1304
|
+
[],
|
|
1305
|
+
[],
|
|
1306
|
+
errors,
|
|
1307
|
+
)
|
|
1308
|
+
except Exception as e:
|
|
1309
|
+
logger.error(f"Cmd all fail: {e}")
|
|
1310
|
+
return ResponseBuilder.build(
|
|
1311
|
+
500,
|
|
1312
|
+
f"Cmd all fail: {e}",
|
|
1313
|
+
{"inventory": inventory, "group": group, "cmd": cmd},
|
|
1314
|
+
str(e),
|
|
1315
|
+
)
|
|
1316
|
+
|
|
1317
|
+
@mcp.tool(
|
|
1318
|
+
annotations={
|
|
1319
|
+
"title": "Copy SSH Config to All",
|
|
1320
|
+
"readOnlyHint": False,
|
|
1321
|
+
"destructiveHint": True,
|
|
1322
|
+
"idempotentHint": False,
|
|
1323
|
+
},
|
|
1324
|
+
tags={"remote_access"},
|
|
1325
|
+
)
|
|
1326
|
+
async def copy_ssh_config_on_inventory(
|
|
1327
|
+
inventory: str = Field(
|
|
1328
|
+
description="YAML inventory path.",
|
|
1329
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1330
|
+
),
|
|
1331
|
+
cfg: str = Field(description="Local SSH config path.", default=None),
|
|
1332
|
+
rmt_cfg: str = Field(
|
|
1333
|
+
description="Remote path.", default=os.path.expanduser("~/.ssh/config")
|
|
1334
|
+
),
|
|
1335
|
+
group: str = Field(
|
|
1336
|
+
description="Target group.",
|
|
1337
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1338
|
+
),
|
|
1339
|
+
parallel: bool = Field(
|
|
1340
|
+
description="Run parallel.",
|
|
1341
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1342
|
+
),
|
|
1343
|
+
max_threads: int = Field(
|
|
1344
|
+
description="Max threads.",
|
|
1345
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1346
|
+
),
|
|
1347
|
+
log: Optional[str] = Field(
|
|
1348
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1349
|
+
),
|
|
1350
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1351
|
+
) -> Dict:
|
|
1352
|
+
"""Copy SSH config to all hosts in YAML group. Expected return object type: dict"""
|
|
1353
|
+
logger.debug(f"Copy SSH config: inv={inventory}, group={group}")
|
|
1354
|
+
|
|
1355
|
+
if not inventory or not cfg:
|
|
1356
|
+
logger.error("Need inventory, cfg")
|
|
1357
|
+
return ResponseBuilder.build(
|
|
1358
|
+
400,
|
|
1359
|
+
"Need inventory, cfg",
|
|
1360
|
+
{
|
|
1361
|
+
"inventory": inventory,
|
|
1362
|
+
"group": group,
|
|
1363
|
+
"cfg": cfg,
|
|
1364
|
+
"rmt_cfg": rmt_cfg,
|
|
1365
|
+
},
|
|
1366
|
+
errors=["Need inventory, cfg"],
|
|
1367
|
+
)
|
|
1368
|
+
|
|
1369
|
+
if not os.path.exists(cfg):
|
|
1370
|
+
logger.error(f"No cfg file: {cfg}")
|
|
1371
|
+
return ResponseBuilder.build(
|
|
1372
|
+
400,
|
|
1373
|
+
f"No cfg file: {cfg}",
|
|
1374
|
+
{
|
|
1375
|
+
"inventory": inventory,
|
|
1376
|
+
"group": group,
|
|
1377
|
+
"cfg": cfg,
|
|
1378
|
+
"rmt_cfg": rmt_cfg,
|
|
1379
|
+
},
|
|
1380
|
+
errors=[f"No cfg file: {cfg}"],
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
try:
|
|
1384
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1385
|
+
if error:
|
|
1386
|
+
return error
|
|
1387
|
+
|
|
1388
|
+
total = len(hosts)
|
|
1389
|
+
if ctx:
|
|
1390
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1391
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1392
|
+
|
|
1393
|
+
results, files, locations, errors = [], [], [], []
|
|
1394
|
+
|
|
1395
|
+
async def copy_host(h: Dict) -> Dict:
|
|
1396
|
+
try:
|
|
1397
|
+
t = Tunnel(
|
|
1398
|
+
remote_host=h["hostname"],
|
|
1399
|
+
username=h["username"],
|
|
1400
|
+
password=h.get("password"),
|
|
1401
|
+
identity_file=h.get("key_path"),
|
|
1402
|
+
)
|
|
1403
|
+
t.copy_ssh_config(cfg, rmt_cfg)
|
|
1404
|
+
logger.info(f"Copied cfg to {rmt_cfg} on {h['hostname']}")
|
|
1405
|
+
return {
|
|
1406
|
+
"hostname": h["hostname"],
|
|
1407
|
+
"status": "success",
|
|
1408
|
+
"message": f"Copied cfg to {rmt_cfg}",
|
|
1409
|
+
"errors": [],
|
|
1410
|
+
}
|
|
1411
|
+
except Exception as e:
|
|
1412
|
+
logger.error(f"Copy fail {h['hostname']}: {e}")
|
|
1413
|
+
return {
|
|
1414
|
+
"hostname": h["hostname"],
|
|
1415
|
+
"status": "failed",
|
|
1416
|
+
"message": f"Copy fail: {e}",
|
|
1417
|
+
"errors": [str(e)],
|
|
1418
|
+
}
|
|
1419
|
+
finally:
|
|
1420
|
+
if "t" in locals():
|
|
1421
|
+
t.close()
|
|
1422
|
+
|
|
1423
|
+
if parallel:
|
|
1424
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1425
|
+
max_workers=max_threads
|
|
1426
|
+
) as ex:
|
|
1427
|
+
futures = [
|
|
1428
|
+
ex.submit(lambda h: asyncio.run(copy_host(h)), h) for h in hosts
|
|
1429
|
+
]
|
|
1430
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
1431
|
+
try:
|
|
1432
|
+
r = f.result()
|
|
1433
|
+
results.append(r)
|
|
1434
|
+
if r["status"] == "success":
|
|
1435
|
+
files.append(cfg)
|
|
1436
|
+
locations.append(f"{rmt_cfg} on {r['hostname']}")
|
|
1437
|
+
else:
|
|
1438
|
+
errors.extend(r["errors"])
|
|
1439
|
+
if ctx:
|
|
1440
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1441
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1442
|
+
except Exception as e:
|
|
1443
|
+
logger.error(f"Parallel error: {e}")
|
|
1444
|
+
results.append(
|
|
1445
|
+
{
|
|
1446
|
+
"hostname": "unknown",
|
|
1447
|
+
"status": "failed",
|
|
1448
|
+
"message": f"Parallel error: {e}",
|
|
1449
|
+
"errors": [str(e)],
|
|
1450
|
+
}
|
|
1451
|
+
)
|
|
1452
|
+
errors.append(str(e))
|
|
1453
|
+
else:
|
|
1454
|
+
for i, h in enumerate(hosts, 1):
|
|
1455
|
+
r = await copy_host(h)
|
|
1456
|
+
results.append(r)
|
|
1457
|
+
if r["status"] == "success":
|
|
1458
|
+
files.append(cfg)
|
|
1459
|
+
locations.append(f"{rmt_cfg} on {r['hostname']}")
|
|
1460
|
+
else:
|
|
1461
|
+
errors.extend(r["errors"])
|
|
1462
|
+
if ctx:
|
|
1463
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1464
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1465
|
+
|
|
1466
|
+
logger.debug(f"Done SSH config copy for {group}")
|
|
1467
|
+
msg = (
|
|
1468
|
+
f"Copied cfg to {group}"
|
|
1469
|
+
if not errors
|
|
1470
|
+
else f"Copy failed for some in {group}"
|
|
1471
|
+
)
|
|
1472
|
+
return ResponseBuilder.build(
|
|
1473
|
+
200 if not errors else 500,
|
|
1474
|
+
msg,
|
|
1475
|
+
{
|
|
1476
|
+
"inventory": inventory,
|
|
1477
|
+
"group": group,
|
|
1478
|
+
"cfg": cfg,
|
|
1479
|
+
"rmt_cfg": rmt_cfg,
|
|
1480
|
+
"host_results": results,
|
|
1481
|
+
},
|
|
1482
|
+
"; ".join(errors),
|
|
1483
|
+
files,
|
|
1484
|
+
locations,
|
|
1485
|
+
errors,
|
|
1486
|
+
)
|
|
1487
|
+
|
|
1488
|
+
except Exception as e:
|
|
1489
|
+
logger.error(f"Copy all fail: {e}")
|
|
1490
|
+
return ResponseBuilder.build(
|
|
1491
|
+
500,
|
|
1492
|
+
f"Copy all fail: {e}",
|
|
1493
|
+
{
|
|
1494
|
+
"inventory": inventory,
|
|
1495
|
+
"group": group,
|
|
1496
|
+
"cfg": cfg,
|
|
1497
|
+
"rmt_cfg": rmt_cfg,
|
|
1498
|
+
},
|
|
1499
|
+
str(e),
|
|
1500
|
+
)
|
|
1501
|
+
|
|
1502
|
+
@mcp.tool(
|
|
1503
|
+
annotations={
|
|
1504
|
+
"title": "Rotate SSH Keys for All",
|
|
1505
|
+
"readOnlyHint": False,
|
|
1506
|
+
"destructiveHint": True,
|
|
1507
|
+
"idempotentHint": False,
|
|
1508
|
+
},
|
|
1509
|
+
tags={"remote_access"},
|
|
1510
|
+
)
|
|
1511
|
+
async def rotate_ssh_key_on_inventory(
|
|
1512
|
+
inventory: str = Field(
|
|
1513
|
+
description="YAML inventory path.",
|
|
1514
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1515
|
+
),
|
|
1516
|
+
key_pfx: str = Field(
|
|
1517
|
+
description="Prefix for new keys.", default=os.path.expanduser("~/.ssh/id_")
|
|
1518
|
+
),
|
|
1519
|
+
key_type: str = Field(
|
|
1520
|
+
description="Key type to generate (rsa or ed25519).", default="ed25519"
|
|
1521
|
+
),
|
|
1522
|
+
group: str = Field(
|
|
1523
|
+
description="Target group.",
|
|
1524
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1525
|
+
),
|
|
1526
|
+
parallel: bool = Field(
|
|
1527
|
+
description="Run parallel.",
|
|
1528
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1529
|
+
),
|
|
1530
|
+
max_threads: int = Field(
|
|
1531
|
+
description="Max threads.",
|
|
1532
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "6")),
|
|
1533
|
+
),
|
|
1534
|
+
log: Optional[str] = Field(
|
|
1535
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1536
|
+
),
|
|
1537
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1538
|
+
) -> Dict:
|
|
1539
|
+
"""Rotate SSH keys for all hosts in YAML group. Expected return object type: dict"""
|
|
1540
|
+
logger.debug(
|
|
1541
|
+
f"Rotate SSH keys: inv={inventory}, group={group}, key_type={key_type}"
|
|
1542
|
+
)
|
|
1543
|
+
|
|
1544
|
+
if not inventory:
|
|
1545
|
+
logger.error("Need inventory")
|
|
1546
|
+
return ResponseBuilder.build(
|
|
1547
|
+
400,
|
|
1548
|
+
"Need inventory",
|
|
1549
|
+
{
|
|
1550
|
+
"inventory": inventory,
|
|
1551
|
+
"group": group,
|
|
1552
|
+
"key_pfx": key_pfx,
|
|
1553
|
+
"key_type": key_type,
|
|
1554
|
+
},
|
|
1555
|
+
errors=["Need inventory"],
|
|
1556
|
+
)
|
|
1557
|
+
if key_type not in ["rsa", "ed25519"]:
|
|
1558
|
+
logger.error(f"Invalid key_type: {key_type}")
|
|
1559
|
+
return ResponseBuilder.build(
|
|
1560
|
+
400,
|
|
1561
|
+
f"Invalid key_type: {key_type}",
|
|
1562
|
+
{
|
|
1563
|
+
"inventory": inventory,
|
|
1564
|
+
"group": group,
|
|
1565
|
+
"key_pfx": key_pfx,
|
|
1566
|
+
"key_type": key_type,
|
|
1567
|
+
},
|
|
1568
|
+
errors=["key_type must be 'rsa' or 'ed25519'"],
|
|
1569
|
+
)
|
|
1570
|
+
|
|
1571
|
+
try:
|
|
1572
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1573
|
+
if error:
|
|
1574
|
+
return error
|
|
1575
|
+
|
|
1576
|
+
total = len(hosts)
|
|
1577
|
+
if ctx:
|
|
1578
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1579
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1580
|
+
|
|
1581
|
+
results, files, locations, errors = [], [], [], []
|
|
1582
|
+
|
|
1583
|
+
async def rotate_host(h: Dict) -> Dict:
|
|
1584
|
+
key = os.path.expanduser(key_pfx + h["hostname"])
|
|
1585
|
+
try:
|
|
1586
|
+
t = Tunnel(
|
|
1587
|
+
remote_host=h["hostname"],
|
|
1588
|
+
username=h["username"],
|
|
1589
|
+
password=h.get("password"),
|
|
1590
|
+
identity_file=h.get("key_path"),
|
|
1591
|
+
)
|
|
1592
|
+
t.rotate_ssh_key(key, key_type=key_type)
|
|
1593
|
+
logger.info(f"Rotated {key_type} key for {h['hostname']}: {key}")
|
|
1594
|
+
return {
|
|
1595
|
+
"hostname": h["hostname"],
|
|
1596
|
+
"status": "success",
|
|
1597
|
+
"message": f"Rotated {key_type} key to {key}",
|
|
1598
|
+
"errors": [],
|
|
1599
|
+
"new_key_path": key,
|
|
1600
|
+
}
|
|
1601
|
+
except Exception as e:
|
|
1602
|
+
logger.error(f"Rotate fail {h['hostname']}: {e}")
|
|
1603
|
+
return {
|
|
1604
|
+
"hostname": h["hostname"],
|
|
1605
|
+
"status": "failed",
|
|
1606
|
+
"message": f"Rotate fail: {e}",
|
|
1607
|
+
"errors": [str(e)],
|
|
1608
|
+
"new_key_path": key,
|
|
1609
|
+
}
|
|
1610
|
+
finally:
|
|
1611
|
+
if "t" in locals():
|
|
1612
|
+
t.close()
|
|
1613
|
+
|
|
1614
|
+
if parallel:
|
|
1615
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1616
|
+
max_workers=max_threads
|
|
1617
|
+
) as ex:
|
|
1618
|
+
futures = [
|
|
1619
|
+
ex.submit(lambda h: asyncio.run(rotate_host(h)), h)
|
|
1620
|
+
for h in hosts
|
|
1621
|
+
]
|
|
1622
|
+
for i, f in enumerate(concurrent.fences.as_completed(futures), 1):
|
|
1623
|
+
try:
|
|
1624
|
+
r = f.result()
|
|
1625
|
+
results.append(r)
|
|
1626
|
+
if r["status"] == "success":
|
|
1627
|
+
files.append(r["new_key_path"] + ".pub")
|
|
1628
|
+
locations.append(
|
|
1629
|
+
f"~/.ssh/authorized_keys on {r['hostname']}"
|
|
1630
|
+
)
|
|
1631
|
+
else:
|
|
1632
|
+
errors.extend(r["errors"])
|
|
1633
|
+
if ctx:
|
|
1634
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1635
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1636
|
+
except Exception as e:
|
|
1637
|
+
logger.error(f"Parallel error: {e}")
|
|
1638
|
+
results.append(
|
|
1639
|
+
{
|
|
1640
|
+
"hostname": "unknown",
|
|
1641
|
+
"status": "failed",
|
|
1642
|
+
"message": f"Parallel error: {e}",
|
|
1643
|
+
"errors": [str(e)],
|
|
1644
|
+
"new_key_path": None,
|
|
1645
|
+
}
|
|
1646
|
+
)
|
|
1647
|
+
errors.append(str(e))
|
|
1648
|
+
else:
|
|
1649
|
+
for i, h in enumerate(hosts, 1):
|
|
1650
|
+
r = await rotate_host(h)
|
|
1651
|
+
results.append(r)
|
|
1652
|
+
if r["status"] == "success":
|
|
1653
|
+
files.append(r["new_key_path"] + ".pub")
|
|
1654
|
+
locations.append(f"~/.ssh/authorized_keys on {r['hostname']}")
|
|
1655
|
+
else:
|
|
1656
|
+
errors.extend(r["errors"])
|
|
1657
|
+
if ctx:
|
|
1658
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1659
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1660
|
+
|
|
1661
|
+
logger.debug(f"Done SSH key rotate for {group}")
|
|
1662
|
+
msg = (
|
|
1663
|
+
f"Rotated {key_type} keys for {group}"
|
|
1664
|
+
if not errors
|
|
1665
|
+
else f"Rotate failed for some in {group}"
|
|
1666
|
+
)
|
|
1667
|
+
return ResponseBuilder.build(
|
|
1668
|
+
200 if not errors else 500,
|
|
1669
|
+
msg,
|
|
1670
|
+
{
|
|
1671
|
+
"inventory": inventory,
|
|
1672
|
+
"group": group,
|
|
1673
|
+
"key_pfx": key_pfx,
|
|
1674
|
+
"key_type": key_type,
|
|
1675
|
+
"host_results": results,
|
|
1676
|
+
},
|
|
1677
|
+
"; ".join(errors),
|
|
1678
|
+
files,
|
|
1679
|
+
locations,
|
|
1680
|
+
errors,
|
|
1681
|
+
)
|
|
1682
|
+
|
|
1683
|
+
except Exception as e:
|
|
1684
|
+
logger.error(f"Rotate all fail: {e}")
|
|
1685
|
+
return ResponseBuilder.build(
|
|
1686
|
+
500,
|
|
1687
|
+
f"Rotate all fail: {e}",
|
|
1688
|
+
{
|
|
1689
|
+
"inventory": inventory,
|
|
1690
|
+
"group": group,
|
|
1691
|
+
"key_pfx": key_pfx,
|
|
1692
|
+
"key_type": key_type,
|
|
1693
|
+
},
|
|
1694
|
+
str(e),
|
|
1695
|
+
)
|
|
1696
|
+
|
|
1697
|
+
@mcp.tool(
|
|
1698
|
+
annotations={
|
|
1699
|
+
"title": "Upload File to All Hosts",
|
|
1700
|
+
"readOnlyHint": False,
|
|
1701
|
+
"destructiveHint": True,
|
|
1702
|
+
"idempotentHint": False,
|
|
1703
|
+
},
|
|
1704
|
+
tags={"remote_access"},
|
|
1705
|
+
)
|
|
1706
|
+
async def send_file_to_inventory(
|
|
1707
|
+
inventory: str = Field(
|
|
1708
|
+
description="YAML inventory path.",
|
|
1709
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1710
|
+
),
|
|
1711
|
+
lpath: str = Field(description="Local file path.", default=None),
|
|
1712
|
+
rpath: str = Field(description="Remote destination path.", default=None),
|
|
1713
|
+
group: str = Field(
|
|
1714
|
+
description="Target group.",
|
|
1715
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1716
|
+
),
|
|
1717
|
+
parallel: bool = Field(
|
|
1718
|
+
description="Run parallel.",
|
|
1719
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1720
|
+
),
|
|
1721
|
+
max_threads: int = Field(
|
|
1722
|
+
description="Max threads.",
|
|
1723
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "5")),
|
|
1724
|
+
),
|
|
1725
|
+
log: Optional[str] = Field(
|
|
1726
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1727
|
+
),
|
|
1728
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1729
|
+
) -> Dict:
|
|
1730
|
+
"""Upload a file to all hosts in the specified inventory group. Expected return object type: dict"""
|
|
1731
|
+
lpath = os.path.abspath(os.path.expanduser(lpath)) # Normalize
|
|
1732
|
+
rpath = os.path.expanduser(rpath)
|
|
1733
|
+
logger.debug(
|
|
1734
|
+
f"Normalized: lpath={lpath} (exists={os.path.exists(lpath)}, isfile={os.path.isfile(lpath)}), rpath={rpath}, CWD={os.getcwd()}"
|
|
1735
|
+
)
|
|
1736
|
+
logger.debug(
|
|
1737
|
+
f"Upload file all: inv={inventory}, group={group}, local={lpath}, remote={rpath}"
|
|
1738
|
+
)
|
|
1739
|
+
if not inventory or not lpath or not rpath:
|
|
1740
|
+
logger.error("Need inventory, lpath, rpath")
|
|
1741
|
+
return ResponseBuilder.build(
|
|
1742
|
+
400,
|
|
1743
|
+
"Need inventory, lpath, rpath",
|
|
1744
|
+
{
|
|
1745
|
+
"inventory": inventory,
|
|
1746
|
+
"group": group,
|
|
1747
|
+
"lpath": lpath,
|
|
1748
|
+
"rpath": rpath,
|
|
1749
|
+
},
|
|
1750
|
+
errors=["Need inventory, lpath, rpath"],
|
|
1751
|
+
)
|
|
1752
|
+
if not os.path.exists(lpath) or not os.path.isfile(lpath):
|
|
1753
|
+
logger.error(f"Invalid file: {lpath}")
|
|
1754
|
+
return ResponseBuilder.build(
|
|
1755
|
+
400,
|
|
1756
|
+
f"Invalid file: {lpath}",
|
|
1757
|
+
{
|
|
1758
|
+
"inventory": inventory,
|
|
1759
|
+
"group": group,
|
|
1760
|
+
"lpath": lpath,
|
|
1761
|
+
"rpath": rpath,
|
|
1762
|
+
},
|
|
1763
|
+
errors=[f"Invalid file: {lpath}"],
|
|
1764
|
+
)
|
|
1765
|
+
try:
|
|
1766
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1767
|
+
if error:
|
|
1768
|
+
return error
|
|
1769
|
+
total = len(hosts)
|
|
1770
|
+
if ctx:
|
|
1771
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1772
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1773
|
+
|
|
1774
|
+
async def send_host(h: Dict) -> Dict:
|
|
1775
|
+
host = h["hostname"]
|
|
1776
|
+
try:
|
|
1777
|
+
t = Tunnel(
|
|
1778
|
+
remote_host=host,
|
|
1779
|
+
username=h["username"],
|
|
1780
|
+
password=h.get("password"),
|
|
1781
|
+
identity_file=h.get("key_path"),
|
|
1782
|
+
)
|
|
1783
|
+
t.connect()
|
|
1784
|
+
sftp = t.ssh_client.open_sftp()
|
|
1785
|
+
transferred = 0
|
|
1786
|
+
|
|
1787
|
+
def progress_callback(transf, total):
|
|
1788
|
+
nonlocal transferred
|
|
1789
|
+
transferred = transf
|
|
1790
|
+
if ctx:
|
|
1791
|
+
asyncio.ensure_future(
|
|
1792
|
+
ctx.report_progress(progress=transf, total=total)
|
|
1793
|
+
)
|
|
1794
|
+
|
|
1795
|
+
sftp.put(lpath, rpath, callback=progress_callback)
|
|
1796
|
+
sftp.close()
|
|
1797
|
+
logger.info(f"Host {host}: Uploaded {lpath} to {rpath}")
|
|
1798
|
+
return {
|
|
1799
|
+
"hostname": host,
|
|
1800
|
+
"status": "success",
|
|
1801
|
+
"message": f"Uploaded {lpath} to {rpath}",
|
|
1802
|
+
"errors": [],
|
|
1803
|
+
}
|
|
1804
|
+
except Exception as e:
|
|
1805
|
+
logger.error(f"Upload fail {host}: {e}")
|
|
1806
|
+
return {
|
|
1807
|
+
"hostname": host,
|
|
1808
|
+
"status": "failed",
|
|
1809
|
+
"message": f"Upload fail: {e}",
|
|
1810
|
+
"errors": [str(e)],
|
|
1811
|
+
}
|
|
1812
|
+
finally:
|
|
1813
|
+
if "t" in locals():
|
|
1814
|
+
t.close()
|
|
1815
|
+
|
|
1816
|
+
results, files, locations, errors = [], [lpath], [], []
|
|
1817
|
+
if parallel:
|
|
1818
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1819
|
+
max_workers=max_threads
|
|
1820
|
+
) as ex:
|
|
1821
|
+
futures = [
|
|
1822
|
+
ex.submit(lambda h: asyncio.run(send_host(h)), h) for h in hosts
|
|
1823
|
+
]
|
|
1824
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
1825
|
+
try:
|
|
1826
|
+
r = f.result()
|
|
1827
|
+
results.append(r)
|
|
1828
|
+
if r["status"] == "success":
|
|
1829
|
+
locations.append(f"{rpath} on {r['hostname']}")
|
|
1830
|
+
else:
|
|
1831
|
+
errors.extend(r["errors"])
|
|
1832
|
+
if ctx:
|
|
1833
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1834
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1835
|
+
except Exception as e:
|
|
1836
|
+
logger.error(f"Parallel error: {e}")
|
|
1837
|
+
results.append(
|
|
1838
|
+
{
|
|
1839
|
+
"hostname": "unknown",
|
|
1840
|
+
"status": "failed",
|
|
1841
|
+
"message": f"Parallel error: {e}",
|
|
1842
|
+
"errors": [str(e)],
|
|
1843
|
+
}
|
|
1844
|
+
)
|
|
1845
|
+
errors.append(str(e))
|
|
1846
|
+
else:
|
|
1847
|
+
for i, h in enumerate(hosts, 1):
|
|
1848
|
+
r = await send_host(h)
|
|
1849
|
+
results.append(r)
|
|
1850
|
+
if r["status"] == "success":
|
|
1851
|
+
locations.append(f"{rpath} on {r['hostname']}")
|
|
1852
|
+
else:
|
|
1853
|
+
errors.extend(r["errors"])
|
|
1854
|
+
if ctx:
|
|
1855
|
+
await ctx.report_progress(progress=i, total=total)
|
|
1856
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
1857
|
+
|
|
1858
|
+
logger.debug(f"Done file upload for {group}")
|
|
1859
|
+
msg = (
|
|
1860
|
+
f"Uploaded {lpath} to {group}"
|
|
1861
|
+
if not errors
|
|
1862
|
+
else f"Upload failed for some in {group}"
|
|
1863
|
+
)
|
|
1864
|
+
return ResponseBuilder.build(
|
|
1865
|
+
200 if not errors else 500,
|
|
1866
|
+
msg,
|
|
1867
|
+
{
|
|
1868
|
+
"inventory": inventory,
|
|
1869
|
+
"group": group,
|
|
1870
|
+
"lpath": lpath,
|
|
1871
|
+
"rpath": rpath,
|
|
1872
|
+
"host_results": results,
|
|
1873
|
+
},
|
|
1874
|
+
"; ".join(errors),
|
|
1875
|
+
files,
|
|
1876
|
+
locations,
|
|
1877
|
+
errors,
|
|
1878
|
+
)
|
|
1879
|
+
except Exception as e:
|
|
1880
|
+
logger.error(f"Upload all fail: {e}")
|
|
1881
|
+
return ResponseBuilder.build(
|
|
1882
|
+
500,
|
|
1883
|
+
f"Upload all fail: {e}",
|
|
1884
|
+
{
|
|
1885
|
+
"inventory": inventory,
|
|
1886
|
+
"group": group,
|
|
1887
|
+
"lpath": lpath,
|
|
1888
|
+
"rpath": rpath,
|
|
1889
|
+
},
|
|
1890
|
+
str(e),
|
|
1891
|
+
)
|
|
1892
|
+
|
|
1893
|
+
@mcp.tool(
|
|
1894
|
+
annotations={
|
|
1895
|
+
"title": "Download File from All Hosts",
|
|
1896
|
+
"readOnlyHint": False,
|
|
1897
|
+
"destructiveHint": False,
|
|
1898
|
+
"idempotentHint": True,
|
|
1899
|
+
},
|
|
1900
|
+
tags={"remote_access"},
|
|
1901
|
+
)
|
|
1902
|
+
async def receive_file_from_inventory(
|
|
1903
|
+
inventory: str = Field(
|
|
1904
|
+
description="YAML inventory path.",
|
|
1905
|
+
default=os.environ.get("TUNNEL_INVENTORY", None),
|
|
1906
|
+
),
|
|
1907
|
+
rpath: str = Field(description="Remote file path to download.", default=None),
|
|
1908
|
+
lpath_prefix: str = Field(
|
|
1909
|
+
description="Local directory path prefix to save files.", default=None
|
|
1910
|
+
),
|
|
1911
|
+
group: str = Field(
|
|
1912
|
+
description="Target group.",
|
|
1913
|
+
default=os.environ.get("TUNNEL_INVENTORY_GROUP", "all"),
|
|
1914
|
+
),
|
|
1915
|
+
parallel: bool = Field(
|
|
1916
|
+
description="Run parallel.",
|
|
1917
|
+
default=to_boolean(os.environ.get("TUNNEL_PARALLEL", False)),
|
|
1918
|
+
),
|
|
1919
|
+
max_threads: int = Field(
|
|
1920
|
+
description="Max threads.",
|
|
1921
|
+
default=to_integer(os.environ.get("TUNNEL_MAX_THREADS", "5")),
|
|
1922
|
+
),
|
|
1923
|
+
log: Optional[str] = Field(
|
|
1924
|
+
description="Log file.", default=os.environ.get("TUNNEL_LOG_FILE", None)
|
|
1925
|
+
),
|
|
1926
|
+
ctx: Context = Field(description="MCP context.", default=None),
|
|
1927
|
+
) -> Dict:
|
|
1928
|
+
"""Download a file from all hosts in the specified inventory group. Expected return object type: dict"""
|
|
1929
|
+
logger.debug(
|
|
1930
|
+
f"Download file all: inv={inventory}, group={group}, remote={rpath}, local_prefix={lpath_prefix}"
|
|
1931
|
+
)
|
|
1932
|
+
if not inventory or not rpath or not lpath_prefix:
|
|
1933
|
+
logger.error("Need inventory, rpath, lpath_prefix")
|
|
1934
|
+
return ResponseBuilder.build(
|
|
1935
|
+
400,
|
|
1936
|
+
"Need inventory, rpath, lpath_prefix",
|
|
1937
|
+
{
|
|
1938
|
+
"inventory": inventory,
|
|
1939
|
+
"group": group,
|
|
1940
|
+
"rpath": rpath,
|
|
1941
|
+
"lpath_prefix": lpath_prefix,
|
|
1942
|
+
},
|
|
1943
|
+
errors=["Need inventory, rpath, lpath_prefix"],
|
|
1944
|
+
)
|
|
1945
|
+
try:
|
|
1946
|
+
os.makedirs(lpath_prefix, exist_ok=True)
|
|
1947
|
+
hosts, error = load_inventory(inventory, group, logger)
|
|
1948
|
+
if error:
|
|
1949
|
+
return error
|
|
1950
|
+
total = len(hosts)
|
|
1951
|
+
if ctx:
|
|
1952
|
+
await ctx.report_progress(progress=0, total=total)
|
|
1953
|
+
logger.debug(f"Progress: 0/{total}")
|
|
1954
|
+
|
|
1955
|
+
async def receive_host(h: Dict) -> Dict:
|
|
1956
|
+
host = h["hostname"]
|
|
1957
|
+
lpath = os.path.join(lpath_prefix, host, os.path.basename(rpath))
|
|
1958
|
+
os.makedirs(os.path.dirname(lpath), exist_ok=True)
|
|
1959
|
+
try:
|
|
1960
|
+
t = Tunnel(
|
|
1961
|
+
remote_host=host,
|
|
1962
|
+
username=h["username"],
|
|
1963
|
+
password=h.get("password"),
|
|
1964
|
+
identity_file=h.get("key_path"),
|
|
1965
|
+
)
|
|
1966
|
+
t.connect()
|
|
1967
|
+
sftp = t.ssh_client.open_sftp()
|
|
1968
|
+
sftp.stat(rpath)
|
|
1969
|
+
transferred = 0
|
|
1970
|
+
|
|
1971
|
+
def progress_callback(transf, total):
|
|
1972
|
+
nonlocal transferred
|
|
1973
|
+
transferred = transf
|
|
1974
|
+
if ctx:
|
|
1975
|
+
asyncio.ensure_future(
|
|
1976
|
+
ctx.report_progress(progress=transf, total=total)
|
|
1977
|
+
)
|
|
1978
|
+
|
|
1979
|
+
sftp.get(rpath, lpath, callback=progress_callback)
|
|
1980
|
+
sftp.close()
|
|
1981
|
+
logger.info(f"Host {host}: Downloaded {rpath} to {lpath}")
|
|
1982
|
+
return {
|
|
1983
|
+
"hostname": host,
|
|
1984
|
+
"status": "success",
|
|
1985
|
+
"message": f"Downloaded {rpath} to {lpath}",
|
|
1986
|
+
"errors": [],
|
|
1987
|
+
"local_path": lpath,
|
|
1988
|
+
}
|
|
1989
|
+
except Exception as e:
|
|
1990
|
+
logger.error(f"Download fail {host}: {e}")
|
|
1991
|
+
return {
|
|
1992
|
+
"hostname": host,
|
|
1993
|
+
"status": "failed",
|
|
1994
|
+
"message": f"Download fail: {e}",
|
|
1995
|
+
"errors": [str(e)],
|
|
1996
|
+
"local_path": lpath,
|
|
1997
|
+
}
|
|
1998
|
+
finally:
|
|
1999
|
+
if "t" in locals():
|
|
2000
|
+
t.close()
|
|
2001
|
+
|
|
2002
|
+
results, files, locations, errors = [], [], [], []
|
|
2003
|
+
if parallel:
|
|
2004
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
2005
|
+
max_workers=max_threads
|
|
2006
|
+
) as ex:
|
|
2007
|
+
futures = [
|
|
2008
|
+
ex.submit(lambda h: asyncio.run(receive_host(h)), h)
|
|
2009
|
+
for h in hosts
|
|
2010
|
+
]
|
|
2011
|
+
for i, f in enumerate(concurrent.futures.as_completed(futures), 1):
|
|
2012
|
+
try:
|
|
2013
|
+
r = f.result()
|
|
2014
|
+
results.append(r)
|
|
2015
|
+
if r["status"] == "success":
|
|
2016
|
+
files.append(rpath)
|
|
2017
|
+
locations.append(r["local_path"])
|
|
2018
|
+
else:
|
|
2019
|
+
errors.extend(r["errors"])
|
|
2020
|
+
if ctx:
|
|
2021
|
+
await ctx.report_progress(progress=i, total=total)
|
|
2022
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
2023
|
+
except Exception as e:
|
|
2024
|
+
logger.error(f"Parallel error: {e}")
|
|
2025
|
+
results.append(
|
|
2026
|
+
{
|
|
2027
|
+
"hostname": "unknown",
|
|
2028
|
+
"status": "failed",
|
|
2029
|
+
"message": f"Parallel error: {e}",
|
|
2030
|
+
"errors": [str(e)],
|
|
2031
|
+
"local_path": None,
|
|
2032
|
+
}
|
|
2033
|
+
)
|
|
2034
|
+
errors.append(str(e))
|
|
2035
|
+
else:
|
|
2036
|
+
for i, h in enumerate(hosts, 1):
|
|
2037
|
+
r = await receive_host(h)
|
|
2038
|
+
results.append(r)
|
|
2039
|
+
if r["status"] == "success":
|
|
2040
|
+
files.append(rpath)
|
|
2041
|
+
locations.append(r["local_path"])
|
|
2042
|
+
else:
|
|
2043
|
+
errors.extend(r["errors"])
|
|
2044
|
+
if ctx:
|
|
2045
|
+
await ctx.report_progress(progress=i, total=total)
|
|
2046
|
+
logger.debug(f"Progress: {i}/{total}")
|
|
2047
|
+
|
|
2048
|
+
logger.debug(f"Done file download for {group}")
|
|
2049
|
+
msg = (
|
|
2050
|
+
f"Downloaded {rpath} from {group}"
|
|
2051
|
+
if not errors
|
|
2052
|
+
else f"Download failed for some in {group}"
|
|
2053
|
+
)
|
|
2054
|
+
return ResponseBuilder.build(
|
|
2055
|
+
200 if not errors else 500,
|
|
2056
|
+
msg,
|
|
2057
|
+
{
|
|
2058
|
+
"inventory": inventory,
|
|
2059
|
+
"group": group,
|
|
2060
|
+
"rpath": rpath,
|
|
2061
|
+
"lpath_prefix": lpath_prefix,
|
|
2062
|
+
"host_results": results,
|
|
2063
|
+
},
|
|
2064
|
+
"; ".join(errors),
|
|
2065
|
+
files,
|
|
2066
|
+
locations,
|
|
2067
|
+
errors,
|
|
2068
|
+
)
|
|
2069
|
+
except Exception as e:
|
|
2070
|
+
logger.error(f"Download all fail: {e}")
|
|
2071
|
+
return ResponseBuilder.build(
|
|
2072
|
+
500,
|
|
2073
|
+
f"Download all fail: {e}",
|
|
2074
|
+
{
|
|
2075
|
+
"inventory": inventory,
|
|
2076
|
+
"group": group,
|
|
2077
|
+
"rpath": rpath,
|
|
2078
|
+
"lpath_prefix": lpath_prefix,
|
|
2079
|
+
},
|
|
2080
|
+
str(e),
|
|
2081
|
+
)
|
|
2082
|
+
|
|
2083
|
+
|
|
2084
|
+
def tunnel_manager_mcp():
|
|
2085
|
+
parser = argparse.ArgumentParser(
|
|
2086
|
+
description="Tunnel MCP Server for remote SSH and file operations",
|
|
2087
|
+
)
|
|
2088
|
+
parser.add_argument(
|
|
2089
|
+
"-t",
|
|
2090
|
+
"--transport",
|
|
2091
|
+
default=DEFAULT_TRANSPORT,
|
|
2092
|
+
choices=["stdio", "streamable-http", "sse"],
|
|
2093
|
+
help="Transport method: 'stdio', 'streamable-http', or 'sse' [legacy] (default: stdio)",
|
|
2094
|
+
)
|
|
2095
|
+
parser.add_argument(
|
|
2096
|
+
"-s",
|
|
2097
|
+
"--host",
|
|
2098
|
+
default=DEFAULT_HOST,
|
|
2099
|
+
help="Host address for HTTP transport (default: 0.0.0.0)",
|
|
2100
|
+
)
|
|
2101
|
+
parser.add_argument(
|
|
2102
|
+
"-p",
|
|
2103
|
+
"--port",
|
|
2104
|
+
type=int,
|
|
2105
|
+
default=DEFAULT_PORT,
|
|
2106
|
+
help="Port number for HTTP transport (default: 8000)",
|
|
2107
|
+
)
|
|
2108
|
+
parser.add_argument(
|
|
2109
|
+
"--auth-type",
|
|
2110
|
+
default="none",
|
|
2111
|
+
choices=["none", "static", "jwt", "oauth-proxy", "oidc-proxy", "remote-oauth"],
|
|
2112
|
+
help="Authentication type for MCP server: 'none' (disabled), 'static' (internal), 'jwt' (external token verification), 'oauth-proxy', 'oidc-proxy', 'remote-oauth' (external) (default: none)",
|
|
2113
|
+
)
|
|
2114
|
+
# JWT/Token params
|
|
2115
|
+
parser.add_argument(
|
|
2116
|
+
"--token-jwks-uri", default=None, help="JWKS URI for JWT verification"
|
|
2117
|
+
)
|
|
2118
|
+
parser.add_argument(
|
|
2119
|
+
"--token-issuer", default=None, help="Issuer for JWT verification"
|
|
2120
|
+
)
|
|
2121
|
+
parser.add_argument(
|
|
2122
|
+
"--token-audience", default=None, help="Audience for JWT verification"
|
|
2123
|
+
)
|
|
2124
|
+
parser.add_argument(
|
|
2125
|
+
"--token-algorithm",
|
|
2126
|
+
default=os.getenv("FASTMCP_SERVER_AUTH_JWT_ALGORITHM"),
|
|
2127
|
+
choices=[
|
|
2128
|
+
"HS256",
|
|
2129
|
+
"HS384",
|
|
2130
|
+
"HS512",
|
|
2131
|
+
"RS256",
|
|
2132
|
+
"RS384",
|
|
2133
|
+
"RS512",
|
|
2134
|
+
"ES256",
|
|
2135
|
+
"ES384",
|
|
2136
|
+
"ES512",
|
|
2137
|
+
],
|
|
2138
|
+
help="JWT signing algorithm (required for HMAC or static key). Auto-detected for JWKS.",
|
|
2139
|
+
)
|
|
2140
|
+
parser.add_argument(
|
|
2141
|
+
"--token-secret",
|
|
2142
|
+
default=os.getenv("FASTMCP_SERVER_AUTH_JWT_PUBLIC_KEY"),
|
|
2143
|
+
help="Shared secret for HMAC (HS*) or PEM public key for static asymmetric verification.",
|
|
2144
|
+
)
|
|
2145
|
+
parser.add_argument(
|
|
2146
|
+
"--token-public-key",
|
|
2147
|
+
default=os.getenv("FASTMCP_SERVER_AUTH_JWT_PUBLIC_KEY"),
|
|
2148
|
+
help="Path to PEM public key file or inline PEM string (for static asymmetric keys).",
|
|
2149
|
+
)
|
|
2150
|
+
parser.add_argument(
|
|
2151
|
+
"--required-scopes",
|
|
2152
|
+
default=os.getenv("FASTMCP_SERVER_AUTH_JWT_REQUIRED_SCOPES"),
|
|
2153
|
+
help="Comma-separated list of required scopes (e.g., gitlab.read,gitlab.write).",
|
|
2154
|
+
)
|
|
2155
|
+
# OAuth Proxy params
|
|
2156
|
+
parser.add_argument(
|
|
2157
|
+
"--oauth-upstream-auth-endpoint",
|
|
2158
|
+
default=None,
|
|
2159
|
+
help="Upstream authorization endpoint for OAuth Proxy",
|
|
2160
|
+
)
|
|
2161
|
+
parser.add_argument(
|
|
2162
|
+
"--oauth-upstream-token-endpoint",
|
|
2163
|
+
default=None,
|
|
2164
|
+
help="Upstream token endpoint for OAuth Proxy",
|
|
2165
|
+
)
|
|
2166
|
+
parser.add_argument(
|
|
2167
|
+
"--oauth-upstream-client-id",
|
|
2168
|
+
default=None,
|
|
2169
|
+
help="Upstream client ID for OAuth Proxy",
|
|
2170
|
+
)
|
|
2171
|
+
parser.add_argument(
|
|
2172
|
+
"--oauth-upstream-client-secret",
|
|
2173
|
+
default=None,
|
|
2174
|
+
help="Upstream client secret for OAuth Proxy",
|
|
2175
|
+
)
|
|
2176
|
+
parser.add_argument(
|
|
2177
|
+
"--oauth-base-url", default=None, help="Base URL for OAuth Proxy"
|
|
2178
|
+
)
|
|
2179
|
+
# OIDC Proxy params
|
|
2180
|
+
parser.add_argument(
|
|
2181
|
+
"--oidc-config-url", default=None, help="OIDC configuration URL"
|
|
2182
|
+
)
|
|
2183
|
+
parser.add_argument("--oidc-client-id", default=None, help="OIDC client ID")
|
|
2184
|
+
parser.add_argument("--oidc-client-secret", default=None, help="OIDC client secret")
|
|
2185
|
+
parser.add_argument("--oidc-base-url", default=None, help="Base URL for OIDC Proxy")
|
|
2186
|
+
# Remote OAuth params
|
|
2187
|
+
parser.add_argument(
|
|
2188
|
+
"--remote-auth-servers",
|
|
2189
|
+
default=None,
|
|
2190
|
+
help="Comma-separated list of authorization servers for Remote OAuth",
|
|
2191
|
+
)
|
|
2192
|
+
parser.add_argument(
|
|
2193
|
+
"--remote-base-url", default=None, help="Base URL for Remote OAuth"
|
|
2194
|
+
)
|
|
2195
|
+
# Common
|
|
2196
|
+
parser.add_argument(
|
|
2197
|
+
"--allowed-client-redirect-uris",
|
|
2198
|
+
default=None,
|
|
2199
|
+
help="Comma-separated list of allowed client redirect URIs",
|
|
2200
|
+
)
|
|
2201
|
+
# Eunomia params
|
|
2202
|
+
parser.add_argument(
|
|
2203
|
+
"--eunomia-type",
|
|
2204
|
+
default="none",
|
|
2205
|
+
choices=["none", "embedded", "remote"],
|
|
2206
|
+
help="Eunomia authorization type: 'none' (disabled), 'embedded' (built-in), 'remote' (external) (default: none)",
|
|
2207
|
+
)
|
|
2208
|
+
parser.add_argument(
|
|
2209
|
+
"--eunomia-policy-file",
|
|
2210
|
+
default="mcp_policies.json",
|
|
2211
|
+
help="Policy file for embedded Eunomia (default: mcp_policies.json)",
|
|
2212
|
+
)
|
|
2213
|
+
parser.add_argument(
|
|
2214
|
+
"--eunomia-remote-url", default=None, help="URL for remote Eunomia server"
|
|
2215
|
+
)
|
|
2216
|
+
# Delegation params
|
|
2217
|
+
parser.add_argument(
|
|
2218
|
+
"--enable-delegation",
|
|
2219
|
+
action="store_true",
|
|
2220
|
+
default=to_boolean(os.environ.get("ENABLE_DELEGATION", "False")),
|
|
2221
|
+
help="Enable OIDC token delegation",
|
|
2222
|
+
)
|
|
2223
|
+
parser.add_argument(
|
|
2224
|
+
"--audience",
|
|
2225
|
+
default=os.environ.get("AUDIENCE", None),
|
|
2226
|
+
help="Audience for the delegated token",
|
|
2227
|
+
)
|
|
2228
|
+
parser.add_argument(
|
|
2229
|
+
"--delegated-scopes",
|
|
2230
|
+
default=os.environ.get("DELEGATED_SCOPES", "api"),
|
|
2231
|
+
help="Scopes for the delegated token (space-separated)",
|
|
2232
|
+
)
|
|
2233
|
+
parser.add_argument(
|
|
2234
|
+
"--openapi-file",
|
|
2235
|
+
default=None,
|
|
2236
|
+
help="Path to the OpenAPI JSON file to import additional tools from",
|
|
2237
|
+
)
|
|
2238
|
+
parser.add_argument(
|
|
2239
|
+
"--openapi-base-url",
|
|
2240
|
+
default=None,
|
|
2241
|
+
help="Base URL for the OpenAPI client (overrides instance URL)",
|
|
2242
|
+
)
|
|
2243
|
+
parser.add_argument(
|
|
2244
|
+
"--openapi-use-token",
|
|
2245
|
+
action="store_true",
|
|
2246
|
+
help="Use the incoming Bearer token (from MCP request) to authenticate OpenAPI import",
|
|
2247
|
+
)
|
|
2248
|
+
|
|
2249
|
+
parser.add_argument(
|
|
2250
|
+
"--openapi-username",
|
|
2251
|
+
default=os.getenv("OPENAPI_USERNAME"),
|
|
2252
|
+
help="Username for basic auth during OpenAPI import",
|
|
2253
|
+
)
|
|
2254
|
+
|
|
2255
|
+
parser.add_argument(
|
|
2256
|
+
"--openapi-password",
|
|
2257
|
+
default=os.getenv("OPENAPI_PASSWORD"),
|
|
2258
|
+
help="Password for basic auth during OpenAPI import",
|
|
2259
|
+
)
|
|
2260
|
+
|
|
2261
|
+
parser.add_argument(
|
|
2262
|
+
"--openapi-client-id",
|
|
2263
|
+
default=os.getenv("OPENAPI_CLIENT_ID"),
|
|
2264
|
+
help="OAuth client ID for OpenAPI import",
|
|
2265
|
+
)
|
|
2266
|
+
|
|
2267
|
+
parser.add_argument(
|
|
2268
|
+
"--openapi-client-secret",
|
|
2269
|
+
default=os.getenv("OPENAPI_CLIENT_SECRET"),
|
|
2270
|
+
help="OAuth client secret for OpenAPI import",
|
|
2271
|
+
)
|
|
2272
|
+
|
|
2273
|
+
args = parser.parse_args()
|
|
2274
|
+
|
|
2275
|
+
if args.port < 0 or args.port > 65535:
|
|
2276
|
+
print(f"Error: Port {args.port} is out of valid range (0-65535).")
|
|
2277
|
+
sys.exit(1)
|
|
2278
|
+
|
|
2279
|
+
# Update config with CLI arguments
|
|
2280
|
+
config["enable_delegation"] = args.enable_delegation
|
|
2281
|
+
config["audience"] = args.audience or config["audience"]
|
|
2282
|
+
config["delegated_scopes"] = args.delegated_scopes or config["delegated_scopes"]
|
|
2283
|
+
config["oidc_config_url"] = args.oidc_config_url or config["oidc_config_url"]
|
|
2284
|
+
config["oidc_client_id"] = args.oidc_client_id or config["oidc_client_id"]
|
|
2285
|
+
config["oidc_client_secret"] = (
|
|
2286
|
+
args.oidc_client_secret or config["oidc_client_secret"]
|
|
2287
|
+
)
|
|
2288
|
+
|
|
2289
|
+
# Configure delegation if enabled
|
|
2290
|
+
if config["enable_delegation"]:
|
|
2291
|
+
if args.auth_type != "oidc-proxy":
|
|
2292
|
+
logger.error("Token delegation requires auth-type=oidc-proxy")
|
|
2293
|
+
sys.exit(1)
|
|
2294
|
+
if not config["audience"]:
|
|
2295
|
+
logger.error("audience is required for delegation")
|
|
2296
|
+
sys.exit(1)
|
|
2297
|
+
if not all(
|
|
2298
|
+
[
|
|
2299
|
+
config["oidc_config_url"],
|
|
2300
|
+
config["oidc_client_id"],
|
|
2301
|
+
config["oidc_client_secret"],
|
|
2302
|
+
]
|
|
2303
|
+
):
|
|
2304
|
+
logger.error(
|
|
2305
|
+
"Delegation requires complete OIDC configuration (oidc-config-url, oidc-client-id, oidc-client-secret)"
|
|
2306
|
+
)
|
|
2307
|
+
sys.exit(1)
|
|
2308
|
+
|
|
2309
|
+
# Fetch OIDC configuration to get token_endpoint
|
|
2310
|
+
try:
|
|
2311
|
+
logger.info(
|
|
2312
|
+
"Fetching OIDC configuration",
|
|
2313
|
+
extra={"oidc_config_url": config["oidc_config_url"]},
|
|
2314
|
+
)
|
|
2315
|
+
oidc_config_resp = requests.get(config["oidc_config_url"])
|
|
2316
|
+
oidc_config_resp.raise_for_status()
|
|
2317
|
+
oidc_config = oidc_config_resp.json()
|
|
2318
|
+
config["token_endpoint"] = oidc_config.get("token_endpoint")
|
|
2319
|
+
if not config["token_endpoint"]:
|
|
2320
|
+
logger.error("No token_endpoint found in OIDC configuration")
|
|
2321
|
+
raise ValueError("No token_endpoint found in OIDC configuration")
|
|
2322
|
+
logger.info(
|
|
2323
|
+
"OIDC configuration fetched successfully",
|
|
2324
|
+
extra={"token_endpoint": config["token_endpoint"]},
|
|
2325
|
+
)
|
|
2326
|
+
except Exception as e:
|
|
2327
|
+
print(f"Failed to fetch OIDC configuration: {e}")
|
|
2328
|
+
logger.error(
|
|
2329
|
+
"Failed to fetch OIDC configuration",
|
|
2330
|
+
extra={"error_type": type(e).__name__, "error_message": str(e)},
|
|
2331
|
+
)
|
|
2332
|
+
sys.exit(1)
|
|
2333
|
+
|
|
2334
|
+
# Set auth based on type
|
|
2335
|
+
auth = None
|
|
2336
|
+
allowed_uris = (
|
|
2337
|
+
args.allowed_client_redirect_uris.split(",")
|
|
2338
|
+
if args.allowed_client_redirect_uris
|
|
2339
|
+
else None
|
|
2340
|
+
)
|
|
2341
|
+
|
|
2342
|
+
if args.auth_type == "none":
|
|
2343
|
+
auth = None
|
|
2344
|
+
elif args.auth_type == "static":
|
|
2345
|
+
auth = StaticTokenVerifier(
|
|
2346
|
+
tokens={
|
|
2347
|
+
"test-token": {"client_id": "test-user", "scopes": ["read", "write"]},
|
|
2348
|
+
"admin-token": {"client_id": "admin", "scopes": ["admin"]},
|
|
2349
|
+
}
|
|
2350
|
+
)
|
|
2351
|
+
elif args.auth_type == "jwt":
|
|
2352
|
+
# Fallback to env vars if not provided via CLI
|
|
2353
|
+
jwks_uri = args.token_jwks_uri or os.getenv("FASTMCP_SERVER_AUTH_JWT_JWKS_URI")
|
|
2354
|
+
issuer = args.token_issuer or os.getenv("FASTMCP_SERVER_AUTH_JWT_ISSUER")
|
|
2355
|
+
audience = args.token_audience or os.getenv("FASTMCP_SERVER_AUTH_JWT_AUDIENCE")
|
|
2356
|
+
algorithm = args.token_algorithm
|
|
2357
|
+
secret_or_key = args.token_secret or args.token_public_key
|
|
2358
|
+
public_key_pem = None
|
|
2359
|
+
|
|
2360
|
+
if not (jwks_uri or secret_or_key):
|
|
2361
|
+
logger.error(
|
|
2362
|
+
"JWT auth requires either --token-jwks-uri or --token-secret/--token-public-key"
|
|
2363
|
+
)
|
|
2364
|
+
sys.exit(1)
|
|
2365
|
+
if not (issuer and audience):
|
|
2366
|
+
logger.error("JWT requires --token-issuer and --token-audience")
|
|
2367
|
+
sys.exit(1)
|
|
2368
|
+
|
|
2369
|
+
# Load static public key from file if path is given
|
|
2370
|
+
if args.token_public_key and os.path.isfile(args.token_public_key):
|
|
2371
|
+
try:
|
|
2372
|
+
with open(args.token_public_key, "r") as f:
|
|
2373
|
+
public_key_pem = f.read()
|
|
2374
|
+
logger.info(f"Loaded static public key from {args.token_public_key}")
|
|
2375
|
+
except Exception as e:
|
|
2376
|
+
print(f"Failed to read public key file: {e}")
|
|
2377
|
+
logger.error(f"Failed to read public key file: {e}")
|
|
2378
|
+
sys.exit(1)
|
|
2379
|
+
elif args.token_public_key:
|
|
2380
|
+
public_key_pem = args.token_public_key # Inline PEM
|
|
2381
|
+
|
|
2382
|
+
# Validation: Conflicting options
|
|
2383
|
+
if jwks_uri and (algorithm or secret_or_key):
|
|
2384
|
+
logger.warning(
|
|
2385
|
+
"JWKS mode ignores --token-algorithm and --token-secret/--token-public-key"
|
|
2386
|
+
)
|
|
2387
|
+
|
|
2388
|
+
# HMAC mode
|
|
2389
|
+
if algorithm and algorithm.startswith("HS"):
|
|
2390
|
+
if not secret_or_key:
|
|
2391
|
+
logger.error(f"HMAC algorithm {algorithm} requires --token-secret")
|
|
2392
|
+
sys.exit(1)
|
|
2393
|
+
if jwks_uri:
|
|
2394
|
+
logger.error("Cannot use --token-jwks-uri with HMAC")
|
|
2395
|
+
sys.exit(1)
|
|
2396
|
+
public_key = secret_or_key
|
|
2397
|
+
else:
|
|
2398
|
+
public_key = public_key_pem
|
|
2399
|
+
|
|
2400
|
+
# Required scopes
|
|
2401
|
+
required_scopes = None
|
|
2402
|
+
if args.required_scopes:
|
|
2403
|
+
required_scopes = [
|
|
2404
|
+
s.strip() for s in args.required_scopes.split(",") if s.strip()
|
|
2405
|
+
]
|
|
2406
|
+
|
|
2407
|
+
try:
|
|
2408
|
+
auth = JWTVerifier(
|
|
2409
|
+
jwks_uri=jwks_uri,
|
|
2410
|
+
public_key=public_key,
|
|
2411
|
+
issuer=issuer,
|
|
2412
|
+
audience=audience,
|
|
2413
|
+
algorithm=(
|
|
2414
|
+
algorithm if algorithm and algorithm.startswith("HS") else None
|
|
2415
|
+
),
|
|
2416
|
+
required_scopes=required_scopes,
|
|
2417
|
+
)
|
|
2418
|
+
logger.info(
|
|
2419
|
+
"JWTVerifier configured",
|
|
2420
|
+
extra={
|
|
2421
|
+
"mode": (
|
|
2422
|
+
"JWKS"
|
|
2423
|
+
if jwks_uri
|
|
2424
|
+
else (
|
|
2425
|
+
"HMAC"
|
|
2426
|
+
if algorithm and algorithm.startswith("HS")
|
|
2427
|
+
else "Static Key"
|
|
2428
|
+
)
|
|
2429
|
+
),
|
|
2430
|
+
"algorithm": algorithm,
|
|
2431
|
+
"required_scopes": required_scopes,
|
|
2432
|
+
},
|
|
2433
|
+
)
|
|
2434
|
+
except Exception as e:
|
|
2435
|
+
print(f"Failed to initialize JWTVerifier: {e}")
|
|
2436
|
+
logger.error(f"Failed to initialize JWTVerifier: {e}")
|
|
2437
|
+
sys.exit(1)
|
|
2438
|
+
elif args.auth_type == "oauth-proxy":
|
|
2439
|
+
if not (
|
|
2440
|
+
args.oauth_upstream_auth_endpoint
|
|
2441
|
+
and args.oauth_upstream_token_endpoint
|
|
2442
|
+
and args.oauth_upstream_client_id
|
|
2443
|
+
and args.oauth_upstream_client_secret
|
|
2444
|
+
and args.oauth_base_url
|
|
2445
|
+
and args.token_jwks_uri
|
|
2446
|
+
and args.token_issuer
|
|
2447
|
+
and args.token_audience
|
|
2448
|
+
):
|
|
2449
|
+
print(
|
|
2450
|
+
"oauth-proxy requires oauth-upstream-auth-endpoint, oauth-upstream-token-endpoint, "
|
|
2451
|
+
"oauth-upstream-client-id, oauth-upstream-client-secret, oauth-base-url, token-jwks-uri, "
|
|
2452
|
+
"token-issuer, token-audience"
|
|
2453
|
+
)
|
|
2454
|
+
logger.error(
|
|
2455
|
+
"oauth-proxy requires oauth-upstream-auth-endpoint, oauth-upstream-token-endpoint, "
|
|
2456
|
+
"oauth-upstream-client-id, oauth-upstream-client-secret, oauth-base-url, token-jwks-uri, "
|
|
2457
|
+
"token-issuer, token-audience",
|
|
2458
|
+
extra={
|
|
2459
|
+
"auth_endpoint": args.oauth_upstream_auth_endpoint,
|
|
2460
|
+
"token_endpoint": args.oauth_upstream_token_endpoint,
|
|
2461
|
+
"client_id": args.oauth_upstream_client_id,
|
|
2462
|
+
"base_url": args.oauth_base_url,
|
|
2463
|
+
"jwks_uri": args.token_jwks_uri,
|
|
2464
|
+
"issuer": args.token_issuer,
|
|
2465
|
+
"audience": args.token_audience,
|
|
2466
|
+
},
|
|
2467
|
+
)
|
|
2468
|
+
sys.exit(1)
|
|
2469
|
+
token_verifier = JWTVerifier(
|
|
2470
|
+
jwks_uri=args.token_jwks_uri,
|
|
2471
|
+
issuer=args.token_issuer,
|
|
2472
|
+
audience=args.token_audience,
|
|
2473
|
+
)
|
|
2474
|
+
auth = OAuthProxy(
|
|
2475
|
+
upstream_authorization_endpoint=args.oauth_upstream_auth_endpoint,
|
|
2476
|
+
upstream_token_endpoint=args.oauth_upstream_token_endpoint,
|
|
2477
|
+
upstream_client_id=args.oauth_upstream_client_id,
|
|
2478
|
+
upstream_client_secret=args.oauth_upstream_client_secret,
|
|
2479
|
+
token_verifier=token_verifier,
|
|
2480
|
+
base_url=args.oauth_base_url,
|
|
2481
|
+
allowed_client_redirect_uris=allowed_uris,
|
|
2482
|
+
)
|
|
2483
|
+
elif args.auth_type == "oidc-proxy":
|
|
2484
|
+
if not (
|
|
2485
|
+
args.oidc_config_url
|
|
2486
|
+
and args.oidc_client_id
|
|
2487
|
+
and args.oidc_client_secret
|
|
2488
|
+
and args.oidc_base_url
|
|
2489
|
+
):
|
|
2490
|
+
logger.error(
|
|
2491
|
+
"oidc-proxy requires oidc-config-url, oidc-client-id, oidc-client-secret, oidc-base-url",
|
|
2492
|
+
extra={
|
|
2493
|
+
"config_url": args.oidc_config_url,
|
|
2494
|
+
"client_id": args.oidc_client_id,
|
|
2495
|
+
"base_url": args.oidc_base_url,
|
|
2496
|
+
},
|
|
2497
|
+
)
|
|
2498
|
+
sys.exit(1)
|
|
2499
|
+
auth = OIDCProxy(
|
|
2500
|
+
config_url=args.oidc_config_url,
|
|
2501
|
+
client_id=args.oidc_client_id,
|
|
2502
|
+
client_secret=args.oidc_client_secret,
|
|
2503
|
+
base_url=args.oidc_base_url,
|
|
2504
|
+
allowed_client_redirect_uris=allowed_uris,
|
|
2505
|
+
)
|
|
2506
|
+
elif args.auth_type == "remote-oauth":
|
|
2507
|
+
if not (
|
|
2508
|
+
args.remote_auth_servers
|
|
2509
|
+
and args.remote_base_url
|
|
2510
|
+
and args.token_jwks_uri
|
|
2511
|
+
and args.token_issuer
|
|
2512
|
+
and args.token_audience
|
|
2513
|
+
):
|
|
2514
|
+
logger.error(
|
|
2515
|
+
"remote-oauth requires remote-auth-servers, remote-base-url, token-jwks-uri, token-issuer, token-audience",
|
|
2516
|
+
extra={
|
|
2517
|
+
"auth_servers": args.remote_auth_servers,
|
|
2518
|
+
"base_url": args.remote_base_url,
|
|
2519
|
+
"jwks_uri": args.token_jwks_uri,
|
|
2520
|
+
"issuer": args.token_issuer,
|
|
2521
|
+
"audience": args.token_audience,
|
|
2522
|
+
},
|
|
2523
|
+
)
|
|
2524
|
+
sys.exit(1)
|
|
2525
|
+
auth_servers = [url.strip() for url in args.remote_auth_servers.split(",")]
|
|
2526
|
+
token_verifier = JWTVerifier(
|
|
2527
|
+
jwks_uri=args.token_jwks_uri,
|
|
2528
|
+
issuer=args.token_issuer,
|
|
2529
|
+
audience=args.token_audience,
|
|
2530
|
+
)
|
|
2531
|
+
auth = RemoteAuthProvider(
|
|
2532
|
+
token_verifier=token_verifier,
|
|
2533
|
+
authorization_servers=auth_servers,
|
|
2534
|
+
base_url=args.remote_base_url,
|
|
2535
|
+
)
|
|
2536
|
+
|
|
2537
|
+
# === 2. Build Middleware List ===
|
|
2538
|
+
middlewares: List[
|
|
2539
|
+
Union[
|
|
2540
|
+
UserTokenMiddleware,
|
|
2541
|
+
ErrorHandlingMiddleware,
|
|
2542
|
+
RateLimitingMiddleware,
|
|
2543
|
+
TimingMiddleware,
|
|
2544
|
+
LoggingMiddleware,
|
|
2545
|
+
JWTClaimsLoggingMiddleware,
|
|
2546
|
+
EunomiaMcpMiddleware,
|
|
2547
|
+
]
|
|
2548
|
+
] = [
|
|
2549
|
+
ErrorHandlingMiddleware(include_traceback=True, transform_errors=True),
|
|
2550
|
+
RateLimitingMiddleware(max_requests_per_second=10.0, burst_capacity=20),
|
|
2551
|
+
TimingMiddleware(),
|
|
2552
|
+
LoggingMiddleware(),
|
|
2553
|
+
JWTClaimsLoggingMiddleware(),
|
|
2554
|
+
]
|
|
2555
|
+
if config["enable_delegation"] or args.auth_type == "jwt":
|
|
2556
|
+
middlewares.insert(0, UserTokenMiddleware(config=config)) # Must be first
|
|
2557
|
+
|
|
2558
|
+
if args.eunomia_type in ["embedded", "remote"]:
|
|
2559
|
+
try:
|
|
2560
|
+
from eunomia_mcp import create_eunomia_middleware
|
|
2561
|
+
|
|
2562
|
+
policy_file = args.eunomia_policy_file or "mcp_policies.json"
|
|
2563
|
+
eunomia_endpoint = (
|
|
2564
|
+
args.eunomia_remote_url if args.eunomia_type == "remote" else None
|
|
2565
|
+
)
|
|
2566
|
+
eunomia_mw = create_eunomia_middleware(
|
|
2567
|
+
policy_file=policy_file, eunomia_endpoint=eunomia_endpoint
|
|
2568
|
+
)
|
|
2569
|
+
middlewares.append(eunomia_mw)
|
|
2570
|
+
logger.info(f"Eunomia middleware enabled ({args.eunomia_type})")
|
|
2571
|
+
except Exception as e:
|
|
2572
|
+
print(f"Failed to load Eunomia middleware: {e}")
|
|
2573
|
+
logger.error("Failed to load Eunomia middleware", extra={"error": str(e)})
|
|
2574
|
+
sys.exit(1)
|
|
2575
|
+
|
|
2576
|
+
mcp = FastMCP(name="TunnelManagerMCP", auth=auth)
|
|
2577
|
+
register_tools(mcp)
|
|
2578
|
+
|
|
2579
|
+
for mw in middlewares:
|
|
2580
|
+
mcp.add_middleware(mw)
|
|
2581
|
+
|
|
2582
|
+
print("\nStarting Tunnel Manager MCP Server")
|
|
2583
|
+
print(f" Transport: {args.transport.upper()}")
|
|
2584
|
+
print(f" Auth: {args.auth_type}")
|
|
2585
|
+
print(f" Delegation: {'ON' if config['enable_delegation'] else 'OFF'}")
|
|
2586
|
+
print(f" Eunomia: {args.eunomia_type}")
|
|
2587
|
+
|
|
2588
|
+
if args.transport == "stdio":
|
|
2589
|
+
mcp.run(transport="stdio")
|
|
2590
|
+
elif args.transport == "streamable-http":
|
|
2591
|
+
mcp.run(transport="streamable-http", host=args.host, port=args.port)
|
|
2592
|
+
elif args.transport == "sse":
|
|
2593
|
+
mcp.run(transport="sse", host=args.host, port=args.port)
|
|
2594
|
+
else:
|
|
2595
|
+
logger.error("Invalid transport", extra={"transport": args.transport})
|
|
2596
|
+
sys.exit(1)
|
|
2597
|
+
|
|
2598
|
+
|
|
2599
|
+
if __name__ == "__main__":
|
|
2600
|
+
tunnel_manager_mcp()
|